repo_name
stringlengths 6
130
| hexsha
sequence | file_path
sequence | code
sequence | apis
sequence | possible_versions
list |
---|---|---|---|---|---|
sushmit0109/ASSC | [
"8beda6f3d055a35fff9ae2ff417b38a38e2a7fa5"
] | [
"codes/alt2train.py"
] | [
"from __future__ import print_function, absolute_import, division\nimport tensorflow as tf\n# from keras.backend.tensorflow_backend import set_session\n# config = tf.ConfigProto()\n# config.gpu_options.per_process_gpu_memory_fraction = 0.9\n# set_session(tf.Session(config=config))\nimport numpy as np\nfrom collections import Counter\nnp.random.seed(1)\nfrom tensorflow import set_random_seed\n\nset_random_seed(1)\nfrom datetime import datetime\nimport argparse\nimport os\nimport tables\nfrom keras.utils import to_categorical, plot_model\nfrom keras.layers import Flatten, Dense\nfrom keras.models import Model\nfrom keras.optimizers import Adamax as opt\nfrom keras.callbacks import ModelCheckpoint, TensorBoard, CSVLogger\nimport pandas as pd\n\nfrom modules import *\nfrom utils import *\n\n\nimport xgboost as xgb\n\nif __name__ == '__main__':\n\n ############ পারসার এই ক্যাচাল এইখান থেকে শুরু #######################\n\n ########## পারসার ডেফিনিশন ########\n parser = argparse.ArgumentParser(description='')\n parser.add_argument(\"fold\",\n help=\"csvfile to use\")\n parser.add_argument(\"--seed\", type=int,\n help=\"Random seed\")\n parser.add_argument(\"--loadmodel\",\n help=\"load previous model checkpoint for retraining (Enter absolute path)\")\n parser.add_argument(\"--epochs\", type=int,\n help=\"Number of epochs for training\")\n parser.add_argument(\"--batch_size\", type=int,\n help=\"number of minibatches to take during each backwardpass preferably multiple of 2\")\n parser.add_argument(\"--verbose\", type=int, choices=[1, 2],\n help=\"Verbosity mode. 1 = progress bar, 2 = one line per epoch (default 2)\")\n parser.add_argument(\"--classweights\", type=bool,\n help=\"if True, class weights are added\")\n parser.add_argument(\"--comment\",\n help=\"Add comments to the log files\")\n\n args = parser.parse_args()\n print(\"%s selected\" % (args.fold))\n foldname = args.fold\n\n ###### পারসার থেকে নিয়ে ভ্যারিয়েবল গুলাতে মান বসানো, মান না থাকলে ডিফল্ট কত হবে সেইগুলাও বসান ########\n\n if args.seed: # if random seed is specified\n print(\"Random seed specified as %d\" % (args.seed))\n random_seed = args.seed\n else:\n random_seed = 1\n\n if args.loadmodel: # If a previously trained model is loaded for retraining\n load_path = args.loadmodel #### path to model to be loaded\n\n idx = load_path.find(\"weights\")\n initial_epoch = int(load_path[idx + 8:idx + 8 + 4])\n\n print(\"%s model loaded\\nInitial epoch is %d\" % (args.loadmodel, initial_epoch))\n else:\n print(\"no model specified, using initializer to initialize weights\")\n initial_epoch = 0\n load_path = False\n\n if args.epochs: # if number of training epochs is specified\n print(\"Training for %d epochs\" % (args.epochs))\n epochs = args.epochs\n else:\n epochs = 200\n print(\"Training for %d epochs\" % (epochs))\n\n if args.batch_size: # if batch_size is specified\n print(\"Training with %d samples per minibatch\" % (args.batch_size))\n batch_size = args.batch_size\n else:\n batch_size = 1024\n print(\"Training with %d minibatches\" % (batch_size))\n\n if args.verbose:\n verbose = args.verbose\n print(\"Verbosity level %d\" % (verbose))\n else:\n verbose = 2\n if args.comment:\n comment = args.comment\n else:\n comment = None\n\n ######## এই পর্যন্ত শুধু পারসার থেকে মান নিয়ে ভ্যারিয়েবল গুলা তে বসেছে। ###############\n\n ### ডিরেক্টরি ডিফাইন করা জেনারেলাইজ করে ####\n\n # model_dir = os.path.join(os.getcwd(), '..', 'models').replace('\\\\', '/')\n # fold_dir = os.path.join(os.getcwd(), '..', 'data').replace('\\\\', '/')\n # log_dir = os.path.join(os.getcwd(), '..', 'logs').replace('\\\\', '/')\n # log_name = foldname + '_' + str(datetime.now()).replace(' ', '').replace('\\\\', '/')\n # print(os.path.join(model_dir, log_name))\n # if not os.path.exists(os.path.join(model_dir, log_name)):\n # new_dir = (os.path.join(os.getcwd(), '..', 'models', log_name)).replace('\\\\', '/').replace(':', '')\n # print(new_dir)\n # os.makedirs(new_dir)\n # checkpoint_name = os.path.join(model_dir, log_name,\n # 'weights.{epoch04d}-{val_acc.4f}.hdf5'.replace(':', '')) # make sure separate\n # # folder for each log_name\n # results_file = os.path.join(os.getcwd().replace('\\\\', '/'), '..', 'results.csv')\n\n model_dir = os.path.join(os.getcwd(),'..','models').replace('\\\\', '/')\n fold_dir = os.path.join(os.getcwd(),'..','data').replace('\\\\', '/')\n log_dir = os.path.join(os.getcwd(),'..','logs').replace('\\\\', '/')\n log_name = foldname + '_' + str(datetime.now()).replace(':','-')\n if not os.path.exists(os.path.join(model_dir, log_name).replace('\\\\', '/')):\n new_dir = (os.path.join(model_dir, log_name).replace('\\\\', '/'))\n print(new_dir)\n os.makedirs(new_dir)\n if not os.path.exists(os.path.join(log_dir, log_name).replace('\\\\', '/')):\n new_dir = os.path.join(log_dir, log_name).replace('\\\\', '/')\n print(new_dir)\n os.makedirs(new_dir)\n checkpoint_name = os.path.join(model_dir,log_name,'weights.{epoch:04d}-{val_acc:.4f}.hdf5').replace('\\\\', '/') # make sure separate\n # folder for each log_name\n results_file = os.path.join(os.getcwd(), '..', 'results.csv').replace('\\\\','/')\n\n ##### ডিরেক্টরি ক্যাচাল শেষ #####\n\n\n\n\n\n\n\n\n ##### ডিরেক্টরি ক্যাচাল শেষ #####\n\n ##### প্যারামস হচ্ছে একটা লিস্ট যেটা নেটওয়ার্ক কে খাওয়াতে হবে। এই লিস্টে সব হাইপারপ্যারামিটার থেকে শুরু করে ফোল্ডারের নাম সব থাকবে। এটা শিখলাম। কাজ করানো টা শিখতে হবে ################\n\n params = { # still not universal\n\n 'num_classes': 7, ### automate; number of classes depends on data fold\n 'batch_size': batch_size,\n 'epochs': epochs,\n 'foldname': foldname,\n 'random_seed': random_seed,\n 'load_path': load_path,\n 'shuffle': True,\n 'initial_epoch': initial_epoch,\n 'eeg_length': 3000,\n 'kernel_size': 16,\n 'bias': True,\n 'maxnorm': 4.,\n 'dropout_rate': 0.5,\n 'dropout_rate_dense': 0.,\n 'padding': 'valid',\n 'activation_function': 'relu',\n 'subsam': 2,\n 'trainable': True,\n 'lr': .0001,\n 'lr_decay': 1e-5,\n }\n\n\n\n\n\n ########### Data Prep ################\n\n # mat_cont = tables.open_file(os.path.join(fold_dir,foldname))\n\n # Elegant one\n # df2 = pd.read_csv( (os.path.join(fold_dir,foldname),header=None)\n # কামলা কাউন্টারপার্ট\n df2 = pd.read_csv('E:/SleepWell/ASSC-master/data/purifiedallDataChannel2.csv', header=None)\n df2.rename({3000: 'hyp', 3001: 'epoch', 3002: 'patID'}, axis=\"columns\", inplace=True)\n\n trainX, valX, trainY, valY, pat_train, pat_val = patientSplitter('randomizedIDs.csv', df2, 0.7)\n print(\"Dataframe has been loaded\")\n\n\n\n\n #####এই স্প্লিটিং করা যাবে না। লিখতে হবে। পেশেন্ট আইডি এর উপর বেইজ করে স্প্লিট করতে হবে।\n\n ######পেশেন্ট আইডী বেজ করে স্প্লিট করা নিয়ে কাজ করতে হবে এইখানে। ১৫ জনের ডাটা যাবে ট্রেনিং এ, বাকি দের ডেটা যাবে ভ্যালিডেশনে। ##############\n\n dummytrainY = trainY.astype(int)\n dummytrainY= dummytrainY\n print(Counter(trainY))\n trainY = to_categorical(trainY-1, params['num_classes'])\n valY = to_categorical(valY-1, params['num_classes'])\n trainX = np.expand_dims(trainX,axis=-1)\n valX = np.expand_dims(valX, axis=-1)\n\n\n ########### Create Model ################\n ###### মডেলের শুরুর দিকের লেয়ার গুলা মডুলস নামের পাই ফাইলে আছে। এখানে শুধু ডেন্স লেয়ার গুলা আলাদা করে জইন করা হচ্ছে, কারণ এইগুলা তেই চেঞ্জ আসবে। ######\n ##### হাইপারপ্যারামিটার গুলা ট্রেইন করার জন্য শুধু হাইপার প্যারামিটার গুলা সম্ভলিত অংশ আলাদা করে লেখা হচ্ছে। ##################\n # শিখলাম ব্যপারটা। #\n\n eeg_length = 3000\n kernel_size= 16\n bias = False\n maxnorm=4\n\n\n eps = 1.1e-5\n\n\n\n K.clear_session()\n # প্রথমে ইইজি নেট টা কে তৈরি করা, সব প্যারামিটার তাকে বলে দিয়ে ###\n top_model = eegnet(**params) # might have bugs; sub modules need kwargs integration\n # top_model = eegnet()\n\n # এর পরে সেটার আউটপুট কে ফ্ল্যাটেন করা, যাতে করে ডেন্স লেয়ার এড করা যায়। ফ্ল্যাটেন করার পরে প্রথম ডেন্স লেয়ার এড করা। আরো বেশি ডেন্স লেয়ার এড করা যেতে পারে ব্যপার টা তে। পরে চেষ্টা করে দেখতে হবে বিভিন্ন কনফিগারেশন। ####\n # এখানে যেমন প্রথম ডেন্স লেয়ারের সাথেই সফটম্যাক্স করে আঊটপুট দেওয়া। এমন না করে আরেকটা ডেন্স লেয়ার রেখে সেইটা তে সফট্ম্যাএক্স করা উচিত। রান দেওার পরে সেই কাজ করতে হবে ###\n x = Flatten()(top_model.output)\n x = Dense(params['num_classes'], activation='softmax', kernel_initializer=initializers.he_normal(seed=random_seed),\n kernel_constraint=max_norm(params['maxnorm']), use_bias=True)(x) ##\n\n model = Model(top_model.input, x) # এখানে দুইটা মডেল জোড়া লেগে যাচ্ছে। টপ মডেল, আর পরের ডেন্স করার পরের অংশ - এই দুইটা।\n\n\n # model = Model(inputs=EEG_input, outputs=x)\n\n\n model.summary() # মডেলের সামারি\n if load_path: # If path for loading model was specified\n model.load_weights(filepath=load_path, by_name=False)\n #plot_model(model, to_file='model.png', show_shapes=True) # মডেল কে ইমেজ ফাইলে আঁকা\n model_json = model.to_json() # জেসন ফাইলে লেখা হচ্ছে মডেল টা কে। সব ধরনের প্রিকশন নিয়ে রাখা, আর কি।\n with open(os.path.join(model_dir, log_name, 'model.json').replace('\\\\','/'), \"w\") as json_file:\n json_file.write(model_json)\n model.compile(optimizer=opt(lr=0.001, epsilon=None, decay=0.0), loss='categorical_crossentropy', metrics=['accuracy']) # মডেল কম্পাইলেশন। টেক্সটবুক আচরণ, অবশেষে\n print(\"model compilation: Done\")\n ####### Define Callbacks #######\n\n ### ভ্যালিডেশন একুরেসির উপর বেজ করে চেজপয়েন্ট নিয়ে রাখা মডেল সেভ করার জন্য #########\n modelcheckpnt = ModelCheckpoint(filepath=checkpoint_name,\n monitor='val_acc', save_best_only=False, mode='max')\n print(\"model Checkpoints: Loaded\")\n\n ### টেন্সরবোরড ইন্সট্যান্স কল করা ######\n\n ######added to solve the issue of the model not running When applied callback\n\n tensbd = TensorBoard(log_dir=os.path.join(log_dir, log_name).replace('\\\\','/'),\n batch_size=batch_size, histogram_freq=1,\n write_grads=True,\n # embeddings_freq=99,\n # embeddings_layer_names=embedding_layer_names,\n # embeddings_data=x_val,\n # embeddings_metadata=metadata_file, write_image=True\n )\n print(\"Tensorboard initialization: Done\")\n\n ##### সিএসভি লগারের ইন্সট্যান্স তৈরি করা, লগ সেইভ করার জন্য ###########\n trainingCSVdirectory = log_dir+'/'+log_name+'/'+'training.csv'\n csv_logger = CSVLogger(trainingCSVdirectory)\n # with open(trainingCSVdirectory.replace('\\\\','/'), \"w\") as my_empty_csv:\n # # now you have an empty file already\n # pass\n\n #with open(os.path.join(log_dir, log_name, 'training.csv').replace('\\\\', '/'), \"w\") as csvfile:\n # csv_logger = CSVLogger(csvfile)\n\n #\n print(\"csv logger: Activated\")\n #class_weight= compute_weight(trainY, np.unique(trainY))\n\n if args.classweights:\n params['class_weight'] = compute_weight(trainY, np.unique(trainY))\n else:\n params['class_weight'] = dict(zip(np.r_[0:params['num_classes']], np.ones(params['num_classes']))) # weighted 1\n\n ####### Train #######\n\n #trainX, valX, trainY, valY, pat_train, pat_val\n\n\n print(\"model dot fit: Started\")\n try:\n\n model.fit(trainX, trainY, validation_data=(valX, valY), callbacks=[modelcheckpnt, log_metrics(valX, valY, pat_val), csv_logger], batch_size=128, epochs=1) # might have bugs\n #plot_model(moodel, fo_file=log_dir + log_name + '/model.png', show_shapes=True)\n results_log(results_file= results_file, log_dir=log_dir, log_name= log_name, params=params)\n\n except KeyboardInterrupt:\n print(\"Keyboard Interrupt\")\n results_log(results_file, params)\n #plot_model(moodel, fo_file=log_dir + log_name + '/model.png', show_shapes=True)\n results_log(results_file= results_file, log_dir=log_dir, log_name= log_name, params=params)\n"
] | [
[
"pandas.read_csv",
"numpy.expand_dims",
"numpy.random.seed",
"numpy.unique",
"numpy.ones",
"tensorflow.set_random_seed"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
}
] |
awoo424/algotrading | [
"0ae284e40fd3f6bd9a88a73047b13473a0abe580",
"0ae284e40fd3f6bd9a88a73047b13473a0abe580",
"0ae284e40fd3f6bd9a88a73047b13473a0abe580"
] | [
"code/integrated-strategy/baseline.py",
"code/macroeconomic-analysis/webscrap_midland.py",
"code/integrated-strategy/LSTM-train_price-only_wrapper.py"
] | [
"import matplotlib.pyplot as plt\nimport numpy as np\nimport matplotlib as mpl\nimport pandas as pd\nimport sys\n\nsys.path.append(\"..\")\nsys.path.append(\"../technical-analysis_python/\")\nmpl.use('tkagg') # issues with Big Sur\n\n# technical analysis\nfrom strategy.macd_crossover import macdCrossover\nfrom backtest import Backtest\nfrom evaluate import PortfolioReturn, SharpeRatio, MaxDrawdown, CAGR\n\n# macroeconomic analysis\nfrom filters.macro_analysis import GetSensitivity, GetMacrodata\n\n# sentiment analysis\nfrom filters.sentiment_analysis import SentimentFilter\n\n\"\"\"\nTechnical analysis\n-\nGenerate signals with MACD crossover strategy\n\"\"\"\n\n# load price data\ndf_whole = pd.read_csv('../../database/microeconomic_data/hkex_ticks_day/hkex_0001.csv', header=0, index_col='Date', parse_dates=True)\n\n# select time range (for trading)\nstart_date = pd.Timestamp('2017-01-01')\nend_date = pd.Timestamp('2021-01-01')\n#start_date = pd.Timestamp('2017-01-01')\n#end_date = pd.Timestamp('2019-02-05')\ndf = df_whole.loc[start_date:end_date]\n\n# get filtered df for macro analysis\nfiltered_df = df_whole.loc[:end_date]\n\nticker = \"0005.HK\"\n\n# apply MACD crossover strategy\nmacd_cross = macdCrossover(df)\nmacd_fig = macd_cross.plot_MACD()\nplt.close() # hide figure\n\nsignals = macd_cross.gen_signals()\nprint(signals.head())\nsignal_fig = macd_cross.plot_signals(signals)\nplt.close() # hide figure\n\n\"\"\"\nMacroecnomic analysis\n-\nAdjust bias in signals with macroeconomic data\n\"\"\"\n# get ticker's sensitivity to macro data\ns_gdp, s_unemploy, s_property = GetSensitivity(filtered_df)\n\n# append signals with macro data\nsignals = GetMacrodata(signals)\n\n# calculate adjusting factor\nsignals['macro_factor'] = s_gdp * signals['GDP'] + s_unemploy * signals['Unemployment rate'] + s_property * signals['Property price']\nsignals['signal'] = signals['signal'] + signals['macro_factor']\n\n# round off signals['signal'] to the nearest integer\nsignals['signal'] = signals['signal'].round(0)\n\n\"\"\"\nSentiment analysis\n- \nFilter out signals that contrast with the sentiment label\n\"\"\"\nfiltered_signals = SentimentFilter(ticker, signals)\n\n\"\"\"\nBacktesting & evaluation\n\"\"\"\nportfolio, backtest_fig = Backtest(ticker, filtered_signals, df)\nplt.close() # hide figure\nprint(\"Final total value: {value:.4f} \".format(value=portfolio['total'][-1]))\nprint(\"Total return: {value:.4f}%\".format(value=(((portfolio['total'][-1] - portfolio['total'][0])/portfolio['total'][0]) * 100))) # for analysis\nprint(\"No. of trade: {value}\".format(value=len(signals[signals.positions == 1])))\n\n\n\"\"\"\nPlotting figures\n\"\"\"\nbacktest_fig.suptitle('Baseline - Portfolio value', fontsize=14)\n#backtest_fig.savefig('./figures/baseline_portfolio-value')\nplt.show()\n\n# Evaluate strategy\n\n# 1. Portfolio return\nreturns_fig = PortfolioReturn(portfolio)\nreturns_fig.suptitle('Baseline - Portfolio return')\n#returns_fig.savefig('./figures/baseline_portfolo-return')\nplt.show()\n\n# 2. Sharpe ratio\nsharpe_ratio = SharpeRatio(portfolio)\nprint(\"Sharpe ratio: {ratio:.4f} \".format(ratio = sharpe_ratio))\n\n# 3. Maximum drawdown\nmaxDrawdown_fig, max_daily_drawdown, daily_drawdown = MaxDrawdown(df)\nmaxDrawdown_fig.suptitle('Baseline - Maximum drawdown', fontsize=14)\n#maxDrawdown_fig.savefig('./figures/baseline_maximum-drawdown')\nplt.show()\n\n# 4. Compound Annual Growth Rate\ncagr = CAGR(portfolio)\nprint(\"CAGR: {cagr:.4f} \".format(cagr = cagr))\n\n",
"from bs4 import BeautifulSoup\nimport requests\nimport pandas as pd\nimport json\nimport math\nimport time\n\nregion_hk = [\n [\"Chai_wan\", 100404],\n [\"Heng_fa_chuen\", 100407],\n [\"Shau_kei_wan\", 100406],\n [\"Sai_wan_ho_tai_koo\", 100405],\n [\"Quarry_bay\", 100403],\n [\"North_point_fortress_hill\", 100401],\n [\"Braemar_hill_north_point_mid_level\", 100402],\n [\"Jardines_lookout_tai_hang\", 100201],\n [\"Happy_valley_mid_level_east\", 100202],\n [\"Wan_chai_causeway_bay\", 100203],\n [\"Tin_hau\", 100204],\n [\"Central_mid_level_admiralty\", 100101],\n [\"Sheung_wan_central\", 100104],\n [\"Hong_kong_west\", 100102],\n [\"Western_mid_levels\", 100103],\n [\"The_peak\", 100105],\n [\"Residence_bel_air_pokfulam\", 100303],\n [\"Ap_lei_chau\", 100305],\n [\"Aberdeen_wong_chuk_hang\", 100304],\n [\"Repulse_bay_shou_son_hill\", 100301],\n [\"Tai_tam_shek_o\", 100306],\n [\"Stanley\", 100302]\n]\n\nregion_kowloon = [\n [\"Tsim_sha_tsui\", 200501],\n [\"Kowloon_station\", 200504],\n [\"Yau_ma_tei\", 200507],\n [\"Kingspark\", 200503],\n [\"Mongkok\", 200502],\n [\"Tai_kok_tsui\", 200506],\n [\"Olympic\", 200505],\n [\"Lai_chi_kok\", 200601],\n [\"Mei_foo\", 200604],\n [\"Cheung_sha_wan_sham_shui_po\", 200602],\n [\"Yau_yat_tsuen\", 200603],\n [\"Kowloon_tong_beacon_hill\", 200903],\n [\"Ho_man_tin\", 200902],\n [\"Hung_hum\", 200901],\n [\"To_kwa_wan\", 200904],\n [\"Kai_tak\", 200906],\n [\"Kowloon_city\", 200905],\n [\"Wong_tai_sin_lok_fu\", 200801],\n [\"Diamond_hill_san_po_kong_ngau_chi_wan\", 200802],\n [\"Kowloon_bay\", 200701],\n [\"Kwun_tong\", 200703],\n [\"Lam_tin_yau_tong\", 200702],\n [\"Lohas_park\", 201005],\n [\"Tiu_keng_leng\", 201004],\n [\"Hang_hau\", 201001],\n [\"Po_lam_tseung_kwan_o_station\", 201002]\n]\n\nregion_new_territory = [\n [\"Sai_kung_clear_water_bay\", 301003],\n [\"Shatin\", 301702],\n [\"Kau_to_shan_fotan\", 301701],\n [\"Ma_on_shan\", 301703],\n [\"Tai_po\", 301601],\n [\"North\", 301502],\n [\"Sheung_shui_fanling\", 301501],\n [\"Hung_shui_kiu\", 301403],\n [\"Fairview_palm_springs_the_vineyard\", 301404],\n [\"Tin_shui_wai\", 301401],\n [\"Yuen_long\", 301402],\n [\"Tuen_mun\", 301301],\n [\"Tsuen_wan\", 301101],\n [\"Sham_tseng\", 301102],\n [\"Ma_wan\", 301103],\n [\"Kwai_chung\", 301201],\n [\"Tsing_yi\", 301202],\n [\"Discovery_bay\", 301802],\n [\"Tung_chung\", 301803],\n [\"Lan_tau_island\", 301801]\n]\n\nurl = \"https://data.midland.com.hk/search/v1/transactions?\"\n\nheaders = {\n 'Host': 'data.midland.com.hk',\n 'Origin': 'https://www.midland.com.hk',\n 'authorization': 'Bearer eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJndWlkIjoibXItMjAyMS0wMS0wNy1IR0dISy15QlFtYWZRM1hvYjZuMWJ2M2xsa0prVGk4aW9Yd3REOGp6VzBDYTZkLVNZQ0M3M2VYNXJwZUtNYWQzNll1T0RXei1JSSIsImlhdCI6MTYxMDAxMjA2MCwiaXNzIjoiZGF0YS5taWRsYW5kLmNvbS5oayJ9.cCMBWDgWiYriWQqcbnvjjV4c7GaleBcA5rQ9a6alKsSgEwOlCX--fwSt2WsPSHMhNMPVqL58t_zqodmntNOKqZiV4baYXyxpj8AdSL4KufmB5xatdIFKY02mSm-4prcUzBDpNTv0u26hrMQP5wJxx1L4Sag_jx0llqU7WSGKXPKUXHopNvoPb0M05MnjWSnh537yOWRfeWSLmtIdAOWtk3BdlTs8drfuzF969e5dyMCOMSqgz9yOY9liDQfehQsN-9sZSNEU1nyR4EsGW8Nn4yjtppEu9FuYAzrrz2X2NJMO2oagQvsNJoqWw83ktPpf4Tpike5bWkdFCS6g-bz7IMN7X4hslcYd8wmzkIg7Ga5HWqzLU5Ns-1fVkXbulI2HvH109Cn9KlLSPp4Ya2ZCVt5ey5DRMkvQ3jxzJv05CoCfmWVKvxrbOma65t7TPmdYX0OgGH4tl9QRwJZrEoWh7st99cAabs4SKdYO1eKydugP6LXN33fnjayUEH4ouciv0QMRyjocPgGYZSVTBCmS_ks1YHmUB7nm6XzkuuLtzmvBo-PsGTcvfNIIZuonkz4fdTJfZniaU9g-Yp5Ike7517scMLMQLmCJVfDjqbDJ3GQnT_uPEBEiKLpq1in3-xh_o-a0w8wXbmlpJQ488cI67ulc3G8558W9357U1ZbXRvg',\n 'Referer': 'https://www.midland.com.hk',\n 'User-Agent': 'Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Mobile Safari/537.36',\n 'X-Requested-With': 'XMLHttpRequest'\n}\n\nparams = {\n 'hash': 'true',\n 'lang': 'en',\n 'currency': 'HKD',\n 'unit': 'feet',\n 'search_behavior': 'normal',\n 'dist_ids': '100404',\n 'tx_type': 'S',\n 'tx_date': '3year',\n 'page': '1',\n 'limit': '50',\n}\n\n\ndef get_property_list(region_name, region_list, reg_period):\n\n params['tx_date'] = reg_period\n\n for region in region_list:\n file_name = \"midland/\" + region_name + \"/\" + region[0] + \".csv\"\n print(\"File name: \", file_name)\n\n # open file to be updated\n original_df = pd.read_csv(file_name, index_col=0)\n last_updated_date = original_df[\"tx_date\"].iloc[1]\n print(\"Last updated date: \", last_updated_date)\n\n params['dist_ids'] = region[1]\n\n req = requests.get(url, headers=headers, params=params)\n soup = BeautifulSoup(req.content, 'html.parser')\n # time to load the data\n time.sleep(1)\n json_data = json.loads(soup.text)\n\n total_no = json_data[\"count\"]\n print(\"Total no. of records: \",total_no)\n\n property_list = []\n total_page_no = math.ceil(total_no/50)\n page = 1\n\n while(page < (total_page_no+1)):\n print(\"page: \", page)\n\n params['page'] = page\n req = requests.get(url, headers=headers, params=params)\n soup = BeautifulSoup(req.content, 'html.parser')\n # time to load the data\n time.sleep(1)\n json_data = json.loads(soup.text)\n items = json_data[\"result\"]\n\n for item in items:\n tx_date = item['tx_date'][:10]\n if tx_date == last_updated_date:\n break\n\n item_list = []\n\n #region\n item_list.append(item['region']['name'])\n #subregion\n item_list.append(item['subregion']['name'])\n #district\n item_list.append(item['district']['name'])\n #estate\n item_list.append(item['estate']['name'])\n #building\n try:\n item_list.append(item['building']['name'])\n except:\n item_list.append(None)\n #first_op_date\n item_list.append(item['building']['first_op_date'][:10])\n #floor_level\n try:\n item_list.append(item['floor_level']['id'])\n except:\n item_list.append(None)\n #bedroom\n item_list.append(item['bedroom'])\n #sitting_room\n item_list.append(item['sitting_room'])\n #floor\n try:\n item_list.append(item['floor'])\n except:\n item_list.append(None)\n #flat\n item_list.append(item['flat'])\n #area\n item_list.append(item['area'])\n #net_area\n item_list.append(item['net_area'])\n #price\n item_list.append(item['price'])\n #tx_date\n item_list.append(tx_date)\n #last_tx_date\n item_list.append(item['last_tx_date'][:10])\n #last_price\n item_list.append(item['last_price'])\n #gain\n item_list.append(item['gain'])\n #lat\n try:\n item_list.append(item['location']['lat'])\n except:\n item_list.append(None)\n #lon\n try:\n item_list.append(item['location']['lon'])\n except:\n item_list.append(None)\n\n property_list.append(item_list)\n\n else:\n page += 1\n continue\n break\n\n if len(property_list) > 1:\n dataFrame = pd.DataFrame(data=property_list)\n dataFrame.columns = ['region', 'subregion', 'district', 'estate', 'building', 'first_op_date',\n 'floor_level', 'bedroom', 'sitting_room', 'floor', 'flat', 'area', 'net_area',\n 'price', 'tx_date', 'last_tx_date', 'last_price', 'gain', 'lat', 'lon']\n dataFrame = dataFrame.append(original_df, ignore_index=True)\n dataFrame.to_csv(file_name)\n print(\"updated\")\n else:\n print(\"up to date\")\n print(\"---------------------------------------\")\n\n# get_property_list(region_name, region_list, reg_period (30days, 90days, 180days, 1year, 3year))\nget_property_list(\"hk_island\", region_hk, '90days')\nget_property_list(\"kowloon\", region_kowloon, '90days')\nget_property_list(\"new_territory\", region_new_territory, '90days')",
"\"\"\"\nTrading signal prediction with LSTM\n-\nInput: dataframe with daily stock tick\nOutput: buy (+1) / sell (-1) / neutral (0) signals\n\n\nCode reference:\nhttps://www.kaggle.com/taronzakaryan/predicting-stock-price-using-lstm-model-pytorch\n\"\"\"\n\nimport numpy as np\nimport random\nimport pandas as pd\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nplt.style.use('seaborn')\n\nimport datetime\nimport math, time\nimport itertools\nfrom math import sqrt\nfrom operator import itemgetter\nfrom sklearn import preprocessing\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.metrics import mean_squared_error\n\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\n\nfrom models.LSTM import LSTM, predict_price\nfrom utils import read_data, load_data, visualise, gen_signal\n\n\ndef LSTM_predict(symbol):\n\n data_dir = \"../../database/microeconomic_data/hkex_ticks_day/\"\n\n # select date range\n dates = pd.date_range('2010-01-02','2016-12-31',freq='B')\n test_dates = pd.date_range('2017-01-03','2021-03-03',freq='B')\n\n # select ticker\n symbol = symbol\n\n # load data\n df = read_data(data_dir, symbol, dates)\n df_test = read_data(data_dir, symbol, test_dates)\n\n scaler = MinMaxScaler(feature_range=(-1, 1))\n\n df['Close'] = scaler.fit_transform(df['Close'].values.reshape(-1,1))\n df_test['Close'] = scaler.fit_transform(df_test['Close'].values.reshape(-1,1))\n\n look_back = 60 # choose sequence length\n\n x_train, y_train, x_test, y_test = load_data(df, look_back)\n print('x_train.shape = ',x_train.shape)\n print('y_train.shape = ',y_train.shape)\n print('x_test.shape = ',x_test.shape)\n print('y_test.shape = ',y_test.shape)\n\n # make training and test sets in torch\n x_train = torch.from_numpy(x_train).type(torch.Tensor)\n x_test = torch.from_numpy(x_test).type(torch.Tensor)\n y_train = torch.from_numpy(y_train).type(torch.Tensor)\n y_test = torch.from_numpy(y_test).type(torch.Tensor)\n\n n_steps = look_back - 1\n batch_size = 32\n num_epochs = 100 # n_iters / (len(train_X) / batch_size)\n\n train = torch.utils.data.TensorDataset(x_train,y_train)\n test = torch.utils.data.TensorDataset(x_test,y_test)\n\n train_loader = torch.utils.data.DataLoader(dataset=train, \n batch_size=batch_size, \n shuffle=False)\n\n test_loader = torch.utils.data.DataLoader(dataset=test, \n batch_size=batch_size, \n shuffle=False)\n\n # Hyperparameters\n input_dim = 1\n hidden_dim = 32\n num_layers = 2 \n output_dim = 1\n torch.manual_seed(1) # set seed\n\n model = LSTM(input_dim=input_dim, hidden_dim=hidden_dim, output_dim=output_dim, num_layers=num_layers)\n\n loss_fn = torch.nn.MSELoss()\n optimiser = torch.optim.Adam(model.parameters(), lr=0.01)\n\n # check dimensions\n # print(model)\n # print(len(list(model.parameters())))\n # for i in range(len(list(model.parameters()))):\n # print(list(model.parameters())[i].size())\n\n hist = np.zeros(num_epochs)\n\n # Number of steps to unroll\n seq_dim = look_back - 1 \n\n # Train model\n for t in range(num_epochs): \n # Forward pass\n y_train_pred = model(x_train)\n loss = loss_fn(y_train_pred, y_train)\n\n if t % 10 == 0 and t !=0:\n print(\"Epoch \", t, \"MSE: \", loss.item())\n\n hist[t] = loss.item()\n\n # Zero out gradient, else they will accumulate between epochs\n optimiser.zero_grad()\n\n # Backward pass\n loss.backward()\n\n # Update parameters\n optimiser.step()\n \n # plt.plot(hist, label=\"Training loss\")\n # plt.legend()\n # plt.show()\n # plt.savefig('output/0001_training_loss.png')\n\n # Make predictions\n y_test_pred = model(x_test)\n\n # Invert predictions\n y_train_pred = scaler.inverse_transform(y_train_pred.detach().numpy())\n y_train = scaler.inverse_transform(y_train.detach().numpy())\n y_test_pred = scaler.inverse_transform(y_test_pred.detach().numpy())\n y_test = scaler.inverse_transform(y_test.detach().numpy())\n\n # Calculate root mean squared error\n trainScore = math.sqrt(mean_squared_error(y_train[:,0], y_train_pred[:,0]))\n print('Train Score: %.2f RMSE' % (trainScore))\n testScore = math.sqrt(mean_squared_error(y_test[:,0], y_test_pred[:,0]))\n print('Test Score: %.2f RMSE' % (testScore))\n\n # Plot predictions\n pred_filename = 'LSTM-price-only_output/' + symbol + '_pred.png'\n visualise(df, y_test, y_test_pred, pred_filename)\n\n # Inferencing\n y_inf_pred, y_inf = predict_price(df_test, model, scaler)\n signal = gen_signal(y_inf_pred, y_inf, df_test.index)\n\n # Save signals as csv file\n output_df = pd.DataFrame()\n output_df['signal'] = signal\n output_df.index.name = \"Date\"\n\n output_filename = 'LSTM-price-only_output/' + symbol + '_output.csv'\n output_df.to_csv(output_filename)\n\n # Plot inferencing results\n inf_filename = 'LSTM-price-only_output/' + symbol + '_inf.png'\n visualise(df_test, y_inf, y_inf_pred, inf_filename)\n\n\n\ndef main():\n ticker_list = ['0001', '0002', '0003', '0004', '0005', '0016', '0019', '0113', '0168', '0175', '0386', '0388', '0669', '0700',\n '0762', '0823', '0857', '0868', '0883', '0939', '0941', '0968', '1211', '1299', '1818', '2319', '2382', '2688', '2689', '2899']\n \n for ticker in ticker_list:\n\n print(\"############ Ticker: \" + ticker + \" ############\")\n LSTM_predict(ticker)\n print('\\n')\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"pandas.read_csv",
"pandas.Timestamp",
"matplotlib.use",
"matplotlib.pyplot.close",
"matplotlib.pyplot.show"
],
[
"pandas.read_csv",
"pandas.DataFrame"
],
[
"torch.nn.MSELoss",
"torch.manual_seed",
"torch.utils.data.TensorDataset",
"torch.utils.data.DataLoader",
"torch.from_numpy",
"pandas.DataFrame",
"sklearn.metrics.mean_squared_error",
"pandas.date_range",
"numpy.zeros",
"matplotlib.pyplot.style.use",
"sklearn.preprocessing.MinMaxScaler"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
pnickl/mimo | [
"81c4bbd2594e2136445009eae752ab8a1602a1cf"
] | [
"examples/ilr/robot/wam/evaluate_wam_ilr.py"
] | [
"import os\n\nos.environ[\"OMP_NUM_THREADS\"] = \"1\"\n\nimport argparse\n\nimport numpy as np\nimport numpy.random as npr\n\nimport mimo\nfrom mimo.distributions import NormalWishart\nfrom mimo.distributions import GaussianWithNormalWishart\n\nfrom mimo.distributions import MatrixNormalWishart\nfrom mimo.distributions import LinearGaussianWithMatrixNormalWishart\n\nfrom mimo.distributions import StickBreaking\nfrom mimo.distributions import CategoricalWithStickBreaking\n\nfrom mimo.distributions import Dirichlet\nfrom mimo.distributions import CategoricalWithDirichlet\n\nfrom mimo.mixtures import BayesianMixtureOfLinearGaussians\n\nfrom tqdm import tqdm\n\nimport pathos\nfrom pathos.pools import ProcessPool as Pool\nnb_cores = pathos.multiprocessing.cpu_count()\n\n\ndef _job(kwargs):\n args = kwargs.pop('arguments')\n seed = kwargs.pop('seed')\n\n input = kwargs.pop('input')\n target = kwargs.pop('target')\n\n input_transform = kwargs.pop('input_transform')\n target_transform = kwargs.pop('target_transform')\n\n input_dim = input.shape[-1]\n target_dim = target.shape[-1]\n\n # set random seed\n np.random.seed(seed)\n\n nb_params = input_dim\n if args.affine:\n nb_params += 1\n\n basis_prior = []\n models_prior = []\n\n # initialize Normal\n psi_nw = 1e1\n kappa = 1e-2\n\n # initialize Matrix-Normal\n psi_mnw = 1e2\n K = 1e-2\n\n for n in range(args.nb_models):\n basis_hypparams = dict(mu=np.zeros((input_dim, )),\n psi=np.eye(input_dim) * psi_nw,\n kappa=kappa, nu=input_dim + 1)\n\n aux = NormalWishart(**basis_hypparams)\n basis_prior.append(aux)\n\n models_hypparams = dict(M=np.zeros((target_dim, nb_params)),\n K=K * np.eye(nb_params), nu=target_dim + 1,\n psi=np.eye(target_dim) * psi_mnw)\n\n aux = MatrixNormalWishart(**models_hypparams)\n models_prior.append(aux)\n\n # define gating\n if args.prior == 'stick-breaking':\n gating_hypparams = dict(K=args.nb_models, gammas=np.ones((args.nb_models,)),\n deltas=np.ones((args.nb_models,)) * args.alpha)\n gating_prior = StickBreaking(**gating_hypparams)\n\n ilr = BayesianMixtureOfLinearGaussians(gating=CategoricalWithStickBreaking(gating_prior),\n basis=[GaussianWithNormalWishart(basis_prior[i]) for i in range(args.nb_models)],\n models=[LinearGaussianWithMatrixNormalWishart(models_prior[i], affine=args.affine)\n for i in range(args.nb_models)])\n\n else:\n gating_hypparams = dict(K=args.nb_models, alphas=np.ones((args.nb_models,)) * args.alpha)\n gating_prior = Dirichlet(**gating_hypparams)\n\n ilr = BayesianMixtureOfLinearGaussians(gating=CategoricalWithDirichlet(gating_prior),\n basis=[GaussianWithNormalWishart(basis_prior[i]) for i in range(args.nb_models)],\n models=[LinearGaussianWithMatrixNormalWishart(models_prior[i], affine=args.affine)\n for i in range(args.nb_models)])\n\n ilr.add_data(target, input, whiten=True,\n transform_type='PCA',\n target_transform=target_transform,\n input_transform=input_transform)\n\n # Gibbs sampling\n ilr.resample(maxiter=args.gibbs_iters,\n progprint=args.verbose)\n\n for i in range(args.super_iters):\n if args.stochastic:\n # Stochastic meanfield VI\n ilr.meanfield_stochastic_descent(maxiter=args.svi_iters,\n stepsize=args.svi_stepsize,\n batchsize=args.svi_batchsize)\n if args.deterministic:\n # Meanfield VI\n ilr.meanfield_coordinate_descent(tol=args.earlystop,\n maxiter=args.meanfield_iters,\n progprint=args.verbose)\n\n if args.super_iters > 1 and i + 1 < args.super_iters:\n ilr.gating.prior = ilr.gating.posterior\n for i in range(ilr.size):\n ilr.basis[i].prior = ilr.basis[i].posterior\n ilr.models[i].prior = ilr.models[i].posterior\n\n return ilr\n\n\ndef parallel_dpglm_inference(nb_jobs=50, **kwargs):\n kwargs_list = []\n for n in range(nb_jobs):\n kwargs['seed'] = npr.randint(1337, 6174)\n kwargs_list.append(kwargs.copy())\n\n with Pool(processes=min(nb_jobs, nb_cores),\n initializer=tqdm.set_lock,\n initargs=(tqdm.get_lock(),)) as p:\n res = p.map(_job, kwargs_list)\n\n return res\n\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser(description='Evaluate ilr with a Stick-breaking prior')\n parser.add_argument('--datapath', help='path to dataset', default=os.path.abspath(mimo.__file__ + '/../../datasets'))\n parser.add_argument('--evalpath', help='path to evaluation', default=os.path.abspath(mimo.__file__ + '/../../evaluation/robot'))\n parser.add_argument('--nb_seeds', help='number of seeds', default=1, type=int)\n parser.add_argument('--prior', help='prior type', default='stick-breaking')\n parser.add_argument('--alpha', help='concentration parameter', default=1000, type=float)\n parser.add_argument('--nb_models', help='max number of models', default=1000, type=int)\n parser.add_argument('--affine', help='affine functions', action='store_true', default=True)\n parser.add_argument('--no_affine', help='non-affine functions', dest='affine', action='store_false')\n parser.add_argument('--super_iters', help='interleaving Gibbs/VI iterations', default=1, type=int)\n parser.add_argument('--gibbs_iters', help='Gibbs iterations', default=1, type=int)\n parser.add_argument('--deterministic', help='use deterministic VI', action='store_true', default=True)\n parser.add_argument('--no_deterministic', help='do not use deterministic VI', dest='deterministic', action='store_false')\n parser.add_argument('--stochastic', help='use stochastic VI', action='store_true', default=False)\n parser.add_argument('--no_stochastic', help='do not use stochastic VI', dest='stochastic', action='store_false')\n parser.add_argument('--meanfield_iters', help='max VI iterations', default=10, type=int)\n parser.add_argument('--earlystop', help='stopping criterion for VI', default=1e-2, type=float)\n parser.add_argument('--svi_iters', help='SVI iterations', default=1000, type=int)\n parser.add_argument('--svi_stepsize', help='SVI step size', default=1e-3, type=float)\n parser.add_argument('--svi_batchsize', help='SVI batch size', default=1024, type=int)\n parser.add_argument('--prediction', help='prediction w/ mode or average', default='average')\n parser.add_argument('--verbose', help='show learning progress', action='store_true', default=True)\n parser.add_argument('--mute', help='show no output', dest='verbose', action='store_false')\n parser.add_argument('--seed', help='choose seed', default=1337, type=int)\n\n args = parser.parse_args()\n\n import json\n print(json.dumps(vars(args), indent=4))\n\n np.random.seed(args.seed)\n\n train_input = np.load(args.datapath + '/ourwam4dof/wam_inv_train.npz')['input']\n train_target = np.load(args.datapath + '/ourwam4dof/wam_inv_train.npz')['target']\n\n test_input = np.load(args.datapath + '/ourwam4dof/wam_inv_test.npz')['input']\n test_target = np.load(args.datapath + '/ourwam4dof/wam_inv_test.npz')['target']\n\n input_data = np.vstack((train_input, test_input))\n target_data = np.vstack((train_target, test_target))\n\n # scale data\n from sklearn.decomposition import PCA\n input_transform = PCA(n_components=12, whiten=True)\n target_transform = PCA(n_components=4, whiten=True)\n\n input_transform.fit(input_data)\n target_transform.fit(target_data)\n\n ilrs = parallel_dpglm_inference(nb_jobs=args.nb_seeds,\n input=train_input,\n target=train_target,\n input_transform=input_transform,\n target_transform=target_transform,\n arguments=args)\n\n from sklearn.metrics import mean_squared_error, r2_score\n\n test_mse, test_smse, test_nlpd, nb_models = [], [], [], []\n for ilr in ilrs:\n _nb_models = len(ilr.used_labels)\n\n _train_mu, _, _, _train_nlpd = \\\n ilr.meanfield_prediction(x=train_input,\n y=train_target,\n prediction=args.prediction)\n\n _train_mse = mean_squared_error(train_target, _train_mu)\n _train_smse = 1. - r2_score(train_target, _train_mu)\n\n print('TRAIN - MSE:', _train_mse, 'SMSE:', _train_smse,\n 'NLPD:', _train_nlpd.mean(), 'Compnents:', _nb_models)\n\n _test_mu, _, _, _test_nlpd =\\\n ilr.meanfield_prediction(x=test_input,\n y=test_target,\n prediction=args.prediction)\n\n _test_mse = mean_squared_error(test_target, _test_mu)\n _test_smse = 1. - r2_score(test_target, _test_mu)\n\n print('TEST - MSE:', _test_mse, 'SMSE:', _test_smse,\n 'NLPD:', _test_nlpd.mean(), 'Compnents:', _nb_models)\n\n test_mse.append(_test_mse)\n test_smse.append(_test_smse)\n test_nlpd.append(_test_nlpd.mean())\n nb_models.append(_nb_models)\n\n mean_mse = np.mean(test_mse)\n std_mse = np.std(test_mse)\n\n mean_smse = np.mean(test_smse)\n std_smse = np.std(test_smse)\n\n mean_nlpd = np.mean(test_nlpd)\n std_nlpd = np.std(test_nlpd)\n\n mean_nb_models = np.mean(nb_models)\n std_nb_models = np.std(nb_models)\n\n arr = np.array([mean_mse, std_mse,\n mean_smse, std_smse,\n mean_nlpd, std_nlpd,\n mean_nb_models, std_nb_models])\n\n import pandas as pd\n dt = pd.DataFrame(data=arr, index=['mse_avg', 'mse_std',\n 'smse_avg', 'smse_std',\n 'nlpd_avg', 'nlpd_std',\n 'models_avg', 'models_std'])\n\n dt.to_csv('wam_' + str(args.prior) +\n '_alpha_' + str(args.alpha) + '.csv',\n mode='a', index=True)\n"
] | [
[
"sklearn.metrics.r2_score",
"numpy.random.seed",
"numpy.eye",
"pandas.DataFrame",
"sklearn.metrics.mean_squared_error",
"numpy.ones",
"numpy.std",
"numpy.mean",
"numpy.load",
"numpy.array",
"numpy.zeros",
"sklearn.decomposition.PCA",
"numpy.vstack",
"numpy.random.randint"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
ryangillard/artificial_intelligence | [
"f7c21af221f366b075d351deeeb00a1b266ac3e3",
"f7c21af221f366b075d351deeeb00a1b266ac3e3",
"f7c21af221f366b075d351deeeb00a1b266ac3e3"
] | [
"machine_learning/gan/pgan/tf_pgan/pgan_module/trainer/discriminator.py",
"machine_learning/gan/pg_anogan_sim_enc/tf_pg_anogan_sim_enc/pg_anogan_sim_enc_module/trainer/train.py",
"machine_learning/gan/wgan_gp/tf_wgan_gp/wgan_gp_module/trainer/critic.py"
] | [
"import tensorflow as tf\n\nfrom . import regularization\nfrom .print_object import print_obj\n\n\nclass Discriminator(object):\n \"\"\"Discriminator that takes image input and outputs logits.\n\n Fields:\n name: str, name of `Discriminator`.\n kernel_regularizer: `l1_l2_regularizer` object, regularizar for kernel\n variables.\n bias_regularizer: `l1_l2_regularizer` object, regularizar for bias\n variables.\n from_rgb_conv_layers: list, fromRGB 1x1 `Conv2D` layers.\n conv_layer_blocks: list, lists of `Conv2D` block layers for each\n block.\n transition_downsample_layers: list, `AveragePooling2D` layers for\n downsampling shrinking transition paths.\n flatten_layer: `Flatten` layer prior to logits layer.\n logits_layer: `Dense` layer for logits.\n build_discriminator_tensors: list, tensors used to build layer\n internals.\n \"\"\"\n def __init__(self, kernel_regularizer, bias_regularizer, params, name):\n \"\"\"Instantiates and builds discriminator network.\n\n Args:\n kernel_regularizer: `l1_l2_regularizer` object, regularizar for\n kernel variables.\n bias_regularizer: `l1_l2_regularizer` object, regularizar for bias\n variables.\n params: dict, user passed parameters.\n name: str, name of discriminator.\n \"\"\"\n # Set name of discriminator.\n self.name = name\n\n # Regularizer for kernel weights.\n self.kernel_regularizer = kernel_regularizer\n\n # Regularizer for bias weights.\n self.bias_regularizer = bias_regularizer\n\n # Instantiate discriminator layers.\n (self.from_rgb_conv_layers,\n self.conv_layer_blocks,\n self.transition_downsample_layers,\n self.flatten_layer,\n self.logits_layer) = self.instantiate_discriminator_layers(\n params\n )\n\n # Build discriminator layer internals.\n self.build_discriminator_tensors = self.build_discriminator_layers(\n params\n )\n\n def instantiate_discriminator_from_rgb_layers(self, params):\n \"\"\"Instantiates discriminator fromRGB layers of 1x1 convs.\n\n Args:\n params: dict, user passed parameters.\n\n Returns:\n List of fromRGB 1x1 Conv2D layers.\n \"\"\"\n with tf.variable_scope(name_or_scope=self.name, reuse=tf.AUTO_REUSE):\n # Get fromRGB layer properties.\n from_rgb = [\n params[\"discriminator_from_rgb_layers\"][i][0][:]\n for i in range(len(params[\"discriminator_from_rgb_layers\"]))\n ]\n\n # Create list to hold toRGB 1x1 convs.\n from_rgb_conv_layers = [\n tf.layers.Conv2D(\n filters=from_rgb[i][3],\n kernel_size=from_rgb[i][0:2],\n strides=from_rgb[i][4:6],\n padding=\"same\",\n activation=tf.nn.leaky_relu,\n kernel_initializer=\"he_normal\",\n kernel_regularizer=self.kernel_regularizer,\n bias_regularizer=self.bias_regularizer,\n name=\"{}_from_rgb_layers_conv2d_{}_{}x{}_{}_{}\".format(\n self.name,\n i,\n from_rgb[i][0],\n from_rgb[i][1],\n from_rgb[i][2],\n from_rgb[i][3]\n )\n )\n for i in range(len(from_rgb))\n ]\n print_obj(\n \"\\ninstantiate_discriminator_from_rgb_layers\",\n \"from_rgb_conv_layers\",\n from_rgb_conv_layers\n )\n\n return from_rgb_conv_layers\n\n def instantiate_discriminator_base_conv_layer_block(self, params):\n \"\"\"Instantiates discriminator base conv layer block.\n\n Args:\n params: dict, user passed parameters.\n\n Returns:\n List of base conv layers.\n \"\"\"\n with tf.variable_scope(name_or_scope=self.name, reuse=tf.AUTO_REUSE):\n # Get conv block layer properties.\n conv_block = params[\"discriminator_base_conv_blocks\"][0]\n\n # Create list of base conv layers.\n base_conv_layers = [\n tf.layers.Conv2D(\n filters=conv_block[i][3],\n kernel_size=conv_block[i][0:2],\n strides=conv_block[i][4:6],\n padding=\"same\",\n activation=tf.nn.leaky_relu,\n kernel_initializer=\"he_normal\",\n kernel_regularizer=self.kernel_regularizer,\n bias_regularizer=self.bias_regularizer,\n name=\"{}_base_layers_conv2d_{}_{}x{}_{}_{}\".format(\n self.name,\n i,\n conv_block[i][0],\n conv_block[i][1],\n conv_block[i][2],\n conv_block[i][3]\n )\n )\n for i in range(len(conv_block) - 1)\n ]\n\n # Have valid padding for layer just before flatten and logits.\n base_conv_layers.append(\n tf.layers.Conv2D(\n filters=conv_block[-1][3],\n kernel_size=conv_block[-1][0:2],\n strides=conv_block[-1][4:6],\n padding=\"valid\",\n activation=tf.nn.leaky_relu,\n kernel_initializer=\"he_normal\",\n kernel_regularizer=self.kernel_regularizer,\n bias_regularizer=self.bias_regularizer,\n name=\"{}_base_layers_conv2d_{}_{}x{}_{}_{}\".format(\n self.name,\n len(conv_block) - 1,\n conv_block[-1][0],\n conv_block[-1][1],\n conv_block[-1][2],\n conv_block[-1][3]\n )\n )\n )\n print_obj(\n \"\\ninstantiate_discriminator_base_conv_layer_block\",\n \"base_conv_layers\",\n base_conv_layers\n )\n\n return base_conv_layers\n\n def instantiate_discriminator_growth_layer_block(self, params, block_idx):\n \"\"\"Instantiates discriminator growth block layers.\n\n Args:\n params: dict, user passed parameters.\n block_idx: int, the current growth block's index.\n\n Returns:\n List of growth block layers.\n \"\"\"\n with tf.variable_scope(name_or_scope=self.name, reuse=tf.AUTO_REUSE):\n # Get conv block layer properties.\n conv_block = params[\"discriminator_growth_conv_blocks\"][block_idx]\n\n # Create new inner convolutional layers.\n conv_layers = [\n tf.layers.Conv2D(\n filters=conv_block[i][3],\n kernel_size=conv_block[i][0:2],\n strides=conv_block[i][4:6],\n padding=\"same\",\n activation=tf.nn.leaky_relu,\n kernel_initializer=\"he_normal\",\n kernel_regularizer=self.kernel_regularizer,\n bias_regularizer=self.bias_regularizer,\n name=\"{}_growth_layers_conv2d_{}_{}_{}x{}_{}_{}\".format(\n self.name,\n block_idx,\n i,\n conv_block[i][0],\n conv_block[i][1],\n conv_block[i][2],\n conv_block[i][3]\n )\n )\n for i in range(len(conv_block))\n ]\n print_obj(\n \"\\ninstantiate_discriminator_growth_layer_block\",\n \"conv_layers\",\n conv_layers\n )\n\n # Down sample from 2s X 2s to s X s image.\n downsampled_image_layer = tf.layers.AveragePooling2D(\n pool_size=(2, 2),\n strides=(2, 2),\n name=\"{}_growth_downsampled_image_{}\".format(\n self.name,\n block_idx\n )\n )\n print_obj(\n \"instantiate_discriminator_growth_layer_block\",\n \"downsampled_image_layer\",\n downsampled_image_layer\n )\n\n return conv_layers + [downsampled_image_layer]\n\n def instantiate_discriminator_growth_transition_downsample_layers(\n self, params):\n \"\"\"Instantiates discriminator growth transition downsample layers.\n\n Args:\n params: dict, user passed parameters.\n\n Returns:\n List of growth transition downsample layers.\n \"\"\"\n with tf.variable_scope(name_or_scope=self.name, reuse=tf.AUTO_REUSE):\n # Down sample from 2s X 2s to s X s image.\n downsample_layers = [\n tf.layers.AveragePooling2D(\n pool_size=(2, 2),\n strides=(2, 2),\n name=\"{}_growth_transition_downsample_layer_{}\".format(\n self.name,\n layer_idx\n )\n )\n for layer_idx in range(\n 1 + len(params[\"discriminator_growth_conv_blocks\"])\n )\n ]\n print_obj(\n \"\\ninstantiate_discriminator_growth_transition_downsample_layers\",\n \"downsample_layers\",\n downsample_layers\n )\n\n return downsample_layers\n\n def instantiate_discriminator_logits_layer(self):\n \"\"\"Instantiates discriminator flatten and logits layers.\n\n Returns:\n Flatten and logits layers of discriminator.\n \"\"\"\n with tf.variable_scope(name_or_scope=self.name, reuse=tf.AUTO_REUSE):\n # Flatten layer to ready final block conv tensor for dense layer.\n flatten_layer = tf.layers.Flatten(\n name=\"{}_flatten_layer\".format(self.name)\n )\n print_obj(\n \"\\ncreate_discriminator_logits_layer\",\n \"flatten_layer\",\n flatten_layer\n )\n\n # Final linear layer for logits.\n logits_layer = tf.layers.Dense(\n units=1,\n activation=None,\n kernel_regularizer=self.kernel_regularizer,\n bias_regularizer=self.bias_regularizer,\n name=\"{}_layers_dense_logits\".format(self.name)\n )\n print_obj(\n \"create_growth_transition_discriminator_network\",\n \"logits_layer\",\n logits_layer\n )\n\n return flatten_layer, logits_layer\n\n def instantiate_discriminator_layers(self, params):\n \"\"\"Instantiates layers of discriminator network.\n\n Args:\n params: dict, user passed parameters.\n\n Returns:\n from_rgb_conv_layers: list, fromRGB 1x1 `Conv2D` layers.\n conv_layer_blocks: list, lists of `Conv2D` block layers for each\n block.\n transition_downsample_layers: list, `AveragePooling2D` layers for\n downsampling shrinking transition paths.\n flatten_layer: `Flatten` layer prior to logits layer.\n logits_layer: `Dense` layer for logits.\n \"\"\"\n # Instantiate fromRGB 1x1 `Conv2D` layers.\n from_rgb_conv_layers = self.instantiate_discriminator_from_rgb_layers(\n params=params\n )\n print_obj(\n \"instantiate_discriminator_layers\",\n \"from_rgb_conv_layers\",\n from_rgb_conv_layers\n )\n\n # Instantiate base conv block's `Conv2D` layers, for post-growth.\n conv_layer_blocks = [\n self.instantiate_discriminator_base_conv_layer_block(\n params=params\n )\n ]\n\n # Instantiate growth `Conv2D` layer blocks.\n conv_layer_blocks.extend(\n [\n self.instantiate_discriminator_growth_layer_block(\n params=params,\n block_idx=block_idx\n )\n for block_idx in range(\n len(params[\"discriminator_growth_conv_blocks\"])\n )\n ]\n )\n print_obj(\n \"instantiate_discriminator_layers\",\n \"conv_layer_blocks\",\n conv_layer_blocks\n )\n\n # Instantiate transition downsample `AveragePooling2D` layers.\n transition_downsample_layers = (\n self.instantiate_discriminator_growth_transition_downsample_layers(\n params=params\n )\n )\n print_obj(\n \"instantiate_discriminator_layers\",\n \"transition_downsample_layers\",\n transition_downsample_layers\n )\n\n # Instantiate `Flatten` and `Dense` logits layers.\n (flatten_layer,\n logits_layer) = self.instantiate_discriminator_logits_layer()\n print_obj(\n \"instantiate_discriminator_layers\",\n \"flatten_layer\",\n flatten_layer\n )\n print_obj(\n \"instantiate_discriminator_layers\",\n \"logits_layer\",\n logits_layer\n )\n\n return (from_rgb_conv_layers,\n conv_layer_blocks,\n transition_downsample_layers,\n flatten_layer,\n logits_layer)\n\n ##########################################################################\n ##########################################################################\n ##########################################################################\n\n def build_discriminator_from_rgb_layers(self, params):\n \"\"\"Creates discriminator fromRGB layers of 1x1 convs.\n\n Args:\n params: dict, user passed parameters.\n\n Returns:\n List of tensors from fromRGB 1x1 `Conv2D` layers.\n \"\"\"\n with tf.variable_scope(name_or_scope=self.name, reuse=tf.AUTO_REUSE):\n # Get fromRGB layer properties.\n from_rgb = [\n params[\"discriminator_from_rgb_layers\"][i][0][:]\n for i in range(len(params[\"discriminator_from_rgb_layers\"]))\n ]\n\n # Create list to hold fromRGB 1x1 convs.\n from_rgb_conv_tensors = [\n self.from_rgb_conv_layers[i](\n inputs=tf.zeros(\n shape=[1] + from_rgb[i][0:3], dtype=tf.float32\n )\n )\n for i in range(len(from_rgb))\n ]\n print_obj(\n \"\\nbuild_discriminator_from_rgb_layers\",\n \"from_rgb_conv_tensors\",\n from_rgb_conv_tensors\n )\n\n return from_rgb_conv_tensors\n\n def build_discriminator_base_conv_layer_block(self, params):\n \"\"\"Creates discriminator base conv layer block.\n\n Args:\n params: dict, user passed parameters.\n\n Returns:\n List of tensors from base `Conv2D` layers.\n \"\"\"\n with tf.variable_scope(name_or_scope=self.name, reuse=tf.AUTO_REUSE):\n # Get conv block layer properties.\n conv_block = params[\"discriminator_base_conv_blocks\"][0]\n\n # The base conv block is always the 0th one.\n base_conv_layer_block = self.conv_layer_blocks[0]\n\n # Minibatch stddev comes before first base conv layer,\n # creating 1 extra feature map.\n if params[\"use_minibatch_stddev\"]:\n # Therefore, the number of input channels will be 1 higher\n # for first base conv block.\n num_in_channels = conv_block[0][3] + 1\n else:\n num_in_channels = conv_block[0][3]\n\n # Get first base conv layer from list.\n first_base_conv_layer = base_conv_layer_block[0]\n\n # Build first layer with bigger tensor.\n base_conv_tensors = [\n first_base_conv_layer(\n inputs=tf.zeros(\n shape=[1] + conv_block[0][0:2] + [num_in_channels],\n dtype=tf.float32\n )\n )\n ]\n\n # Now build the rest of the base conv block layers, store in list.\n base_conv_tensors.extend(\n [\n base_conv_layer_block[i](\n inputs=tf.zeros(\n shape=[1] + conv_block[i][0:3], dtype=tf.float32\n )\n )\n for i in range(1, len(conv_block))\n ]\n )\n print_obj(\n \"\\nbuild_discriminator_base_conv_layer_block\",\n \"base_conv_tensors\",\n base_conv_tensors\n )\n\n return base_conv_tensors\n\n def build_discriminator_growth_layer_block(self, params, block_idx):\n \"\"\"Creates discriminator growth block.\n\n Args:\n params: dict, user passed parameters.\n block_idx: int, the current growth block's index.\n\n Returns:\n List of tensors from growth block `Conv2D` layers.\n \"\"\"\n with tf.variable_scope(name_or_scope=self.name, reuse=tf.AUTO_REUSE):\n # Get conv block layer properties.\n conv_block = params[\"discriminator_growth_conv_blocks\"][block_idx]\n\n # Create new inner convolutional layers.\n conv_tensors = [\n self.conv_layer_blocks[1 + block_idx][i](\n inputs=tf.zeros(\n shape=[1] + conv_block[i][0:3], dtype=tf.float32\n )\n )\n for i in range(len(conv_block))\n ]\n print_obj(\n \"\\nbuild_discriminator_growth_layer_block\",\n \"conv_tensors\",\n conv_tensors\n )\n\n return conv_tensors\n\n def build_discriminator_logits_layer(self, params):\n \"\"\"Builds flatten and logits layer internals using call.\n\n Args:\n params: dict, user passed parameters.\n\n Returns:\n Final logits tensor of discriminator.\n \"\"\"\n with tf.variable_scope(name_or_scope=self.name, reuse=tf.AUTO_REUSE):\n block_conv_size = params[\"discriminator_base_conv_blocks\"][-1][-1][3]\n\n # Flatten final block conv tensor.\n block_conv_flat = self.flatten_layer(\n inputs=tf.zeros(\n shape=[1, 1, 1, block_conv_size],\n dtype=tf.float32\n )\n )\n print_obj(\n \"\\nbuild_discriminator_logits_layer\",\n \"block_conv_flat\",\n block_conv_flat\n )\n\n # Final linear layer for logits.\n logits = self.logits_layer(inputs=block_conv_flat)\n print_obj(\"build_discriminator_logits_layer\", \"logits\", logits)\n\n return logits\n\n def build_discriminator_layers(self, params):\n \"\"\"Builds discriminator layer internals.\n\n Args:\n params: dict, user passed parameters.\n\n Returns:\n Logits tensor.\n \"\"\"\n # Build fromRGB 1x1 `Conv2D` layers internals through call.\n from_rgb_conv_tensors = self.build_discriminator_from_rgb_layers(\n params=params\n )\n print_obj(\n \"\\nbuild_discriminator_layers\",\n \"from_rgb_conv_tensors\",\n from_rgb_conv_tensors\n )\n\n with tf.control_dependencies(control_inputs=from_rgb_conv_tensors):\n # Create base convolutional block's layer internals using call.\n conv_block_tensors = [\n self.build_discriminator_base_conv_layer_block(\n params=params\n )\n ]\n\n # Build growth `Conv2D` layer block internals through call.\n conv_block_tensors.extend(\n [\n self.build_discriminator_growth_layer_block(\n params=params, block_idx=block_idx\n )\n for block_idx in range(\n len(params[\"discriminator_growth_conv_blocks\"])\n )\n ]\n )\n\n # Flatten conv block tensor lists of lists into list.\n conv_block_tensors = [\n item for sublist in conv_block_tensors for item in sublist\n ]\n print_obj(\n \"build_discriminator_layers\",\n \"conv_block_tensors\",\n conv_block_tensors\n )\n\n with tf.control_dependencies(control_inputs=conv_block_tensors):\n # Build logits layer internals using call.\n logits_tensor = self.build_discriminator_logits_layer(\n params=params\n )\n print_obj(\n \"build_discriminator_layers\",\n \"logits_tensor\",\n logits_tensor\n )\n\n return logits_tensor\n\n ##########################################################################\n ##########################################################################\n ##########################################################################\n\n def minibatch_stddev_common(\n self,\n variance,\n tile_multiples,\n params,\n caller):\n \"\"\"Adds minibatch stddev feature map to image using grouping.\n\n This is the code that is common between the grouped and ungroup\n minibatch stddev functions.\n\n Args:\n variance: tensor, variance of minibatch or minibatch groups.\n tile_multiples: list, length 4, used to tile input to final shape\n input_dims[i] * mutliples[i].\n params: dict, user passed parameters.\n caller: str, name of the calling function.\n\n Returns:\n Minibatch standard deviation feature map image added to\n channels of shape\n [cur_batch_size, image_size, image_size, 1].\n \"\"\"\n with tf.variable_scope(\n \"{}/{}_minibatch_stddev\".format(self.name, caller)):\n # Calculate standard deviation over the group plus small epsilon.\n # shape = (\n # {\"grouped\": cur_batch_size / group_size, \"ungrouped\": 1},\n # image_size,\n # image_size,\n # num_channels\n # )\n stddev = tf.sqrt(\n x=variance + 1e-8, name=\"{}_stddev\".format(caller)\n )\n print_obj(\n \"minibatch_stddev_common\", \"{}_stddev\".format(caller), stddev\n )\n\n # Take average over feature maps and pixels.\n if params[\"minibatch_stddev_averaging\"]:\n # grouped shape = (cur_batch_size / group_size, 1, 1, 1)\n # ungrouped shape = (1, 1, 1, 1)\n stddev = tf.reduce_mean(\n input_tensor=stddev,\n axis=[1, 2, 3],\n keepdims=True,\n name=\"{}_stddev_average\".format(caller)\n )\n print_obj(\n \"minibatch_stddev_common\",\n \"{}_stddev_average\".format(caller),\n stddev\n )\n\n # Replicate over group and pixels.\n # shape = (\n # cur_batch_size,\n # image_size,\n # image_size,\n # 1\n # )\n stddev_feature_map = tf.tile(\n input=stddev,\n multiples=tile_multiples,\n name=\"{}_stddev_feature_map\".format(caller)\n )\n print_obj(\n \"minibatch_stddev_common\",\n \"{}_stddev_feature_map\".format(caller),\n stddev_feature_map\n )\n\n return stddev_feature_map\n\n def grouped_minibatch_stddev(\n self,\n X,\n cur_batch_size,\n static_image_shape,\n params,\n group_size):\n \"\"\"Adds minibatch stddev feature map to image using grouping.\n\n Args:\n X: tf.float32 tensor, image of shape\n [cur_batch_size, image_size, image_size, num_channels].\n cur_batch_size: tf.int64 tensor, the dynamic batch size (in case\n of partial batch).\n static_image_shape: list, the static shape of each image.\n params: dict, user passed parameters.\n group_size: int, size of image groups.\n\n Returns:\n Minibatch standard deviation feature map image added to\n channels of shape\n [cur_batch_size, image_size, image_size, 1].\n \"\"\"\n with tf.variable_scope(\n \"{}/grouped_minibatch_stddev\".format(self.name)):\n # The group size should be less than or equal to the batch size.\n # shape = ()\n group_size = tf.minimum(\n x=group_size, y=cur_batch_size, name=\"group_size\"\n )\n print_obj(\"grouped_minibatch_stddev\", \"group_size\", group_size)\n\n # Split minibatch into M groups of size group_size, rank 5 tensor.\n # shape = (\n # group_size,\n # cur_batch_size / group_size,\n # image_size,\n # image_size,\n # num_channels\n # )\n grouped_image = tf.reshape(\n tensor=X,\n shape=[group_size, -1] + static_image_shape,\n name=\"grouped_image\"\n )\n print_obj(\n \"grouped_minibatch_stddev\",\n \"grouped_image\",\n grouped_image\n )\n\n # Find the mean of each group.\n # shape = (\n # 1,\n # cur_batch_size / group_size,\n # image_size,\n # image_size,\n # num_channels\n # )\n grouped_mean = tf.reduce_mean(\n input_tensor=grouped_image,\n axis=0,\n keepdims=True,\n name=\"grouped_mean\"\n )\n print_obj(\n \"grouped_minibatch_stddev\", \"grouped_mean\", grouped_mean\n )\n\n # Center each group using the mean.\n # shape = (\n # group_size,\n # cur_batch_size / group_size,\n # image_size,\n # image_size,\n # num_channels\n # )\n centered_grouped_image = tf.subtract(\n x=grouped_image, y=grouped_mean, name=\"centered_grouped_image\"\n )\n print_obj(\n \"grouped_minibatch_stddev\",\n \"centered_grouped_image\",\n centered_grouped_image\n )\n\n # Calculate variance over group.\n # shape = (\n # cur_batch_size / group_size,\n # image_size,\n # image_size,\n # num_channels\n # )\n grouped_variance = tf.reduce_mean(\n input_tensor=tf.square(x=centered_grouped_image),\n axis=0,\n name=\"grouped_variance\"\n )\n print_obj(\n \"grouped_minibatch_stddev\",\n \"grouped_variance\",\n grouped_variance\n )\n\n # Get stddev image using ops common to both grouped & ungrouped.\n stddev_feature_map = self.minibatch_stddev_common(\n variance=grouped_variance,\n tile_multiples=[group_size] + static_image_shape[0:2] + [1],\n params=params,\n caller=\"grouped\"\n )\n print_obj(\n \"grouped_minibatch_stddev\",\n \"stddev_feature_map\",\n stddev_feature_map\n )\n\n return stddev_feature_map\n\n def ungrouped_minibatch_stddev(\n self,\n X,\n cur_batch_size,\n static_image_shape,\n params):\n \"\"\"Adds minibatch stddev feature map added to image channels.\n\n Args:\n X: tensor, image of shape\n [cur_batch_size, image_size, image_size, num_channels].\n cur_batch_size: tf.int64 tensor, the dynamic batch size (in case\n of partial batch).\n static_image_shape: list, the static shape of each image.\n params: dict, user passed parameters.\n\n Returns:\n Minibatch standard deviation feature map image added to\n channels of shape\n [cur_batch_size, image_size, image_size, 1].\n \"\"\"\n with tf.variable_scope(\n \"{}/ungrouped_minibatch_stddev\".format(self.name)):\n # Find the mean of each group.\n # shape = (\n # 1,\n # image_size,\n # image_size,\n # num_channels\n # )\n mean = tf.reduce_mean(\n input_tensor=X, axis=0, keepdims=True, name=\"mean\"\n )\n print_obj(\"ungrouped_minibatch_stddev\", \"mean\", mean)\n\n # Center each group using the mean.\n # shape = (\n # cur_batch_size,\n # image_size,\n # image_size,\n # num_channels\n # )\n centered_image = tf.subtract(\n x=X, y=mean, name=\"centered_image\"\n )\n print_obj(\n \"ungrouped_minibatch_stddev\",\n \"centered_image\",\n centered_image\n )\n\n # Calculate variance over group.\n # shape = (\n # 1,\n # image_size,\n # image_size,\n # num_channels\n # )\n variance = tf.reduce_mean(\n input_tensor=tf.square(x=centered_image),\n axis=0,\n keepdims=True,\n name=\"variance\"\n )\n print_obj(\n \"ungrouped_minibatch_stddev\",\n \"variance\",\n variance\n )\n\n # Get stddev image using ops common to both grouped & ungrouped.\n stddev_feature_map = self.minibatch_stddev_common(\n variance=variance,\n tile_multiples=[cur_batch_size] + static_image_shape[0:2] + [1],\n params=params,\n caller=\"ungrouped\"\n )\n print_obj(\n \"ungrouped_minibatch_stddev\",\n \"stddev_feature_map\",\n stddev_feature_map\n )\n\n return stddev_feature_map\n\n def minibatch_stddev(self, X, params, group_size=4):\n \"\"\"Adds minibatch stddev feature map added to image.\n\n Args:\n X: tensor, image of shape\n [cur_batch_size, image_size, image_size, num_channels].\n params: dict, user passed parameters.\n group_size: int, size of image groups.\n\n Returns:\n Image with minibatch standard deviation feature map added to\n channels of shape\n [cur_batch_size, image_size, image_size, num_channels + 1].\n \"\"\"\n with tf.variable_scope(\"{}/minibatch_stddev\".format(self.name)):\n # Get dynamic shape of image.\n # shape = (4,)\n dynamic_image_shape = tf.shape(\n input=X, name=\"dynamic_image_shape\"\n )\n print_obj(\n \"\\nminibatch_stddev\",\n \"dynamic_image_shape\",\n dynamic_image_shape\n )\n\n # Extract current batch size (in case this is a partial batch).\n cur_batch_size = dynamic_image_shape[0]\n\n # Get static shape of image.\n # shape = (3,)\n static_image_shape = params[\"generator_projection_dims\"]\n print_obj(\n \"minibatch_stddev\", \"static_image_shape\", static_image_shape\n )\n\n # cur_batch_size must be divisible by or smaller than group_size.\n divisbility_condition = tf.equal(\n x=tf.mod(x=cur_batch_size, y=group_size),\n y=0,\n name=\"divisbility_condition\"\n )\n\n less_than_condition = tf.less(\n x=cur_batch_size, y=group_size, name=\"less_than_condition\"\n )\n\n any_condition = tf.reduce_any(\n input_tensor=[divisbility_condition, less_than_condition],\n name=\"any_condition\"\n )\n\n # Get minibatch stddev feature map image from grouped or\n # ungrouped branch.\n stddev_feature_map = tf.cond(\n pred=any_condition,\n true_fn=lambda: self.grouped_minibatch_stddev(\n X=X,\n cur_batch_size=cur_batch_size,\n static_image_shape=static_image_shape,\n params=params,\n group_size=group_size\n ),\n false_fn=lambda: self.ungrouped_minibatch_stddev(\n X=X,\n cur_batch_size=cur_batch_size,\n static_image_shape=static_image_shape,\n params=params\n ),\n name=\"stddev_feature_map_cond\"\n )\n\n # Append to image as new feature map.\n # shape = (\n # cur_batch_size,\n # image_size,\n # image_size,\n # num_channels + 1\n # )\n appended_image = tf.concat(\n values=[X, stddev_feature_map],\n axis=-1,\n name=\"appended_image\"\n )\n print_obj(\n \"minibatch_stddev_common\",\n \"appended_image\",\n appended_image\n )\n\n return appended_image\n\n def use_discriminator_logits_layer(self, block_conv, params):\n \"\"\"Uses flatten and logits layers to get logits tensor.\n\n Args:\n block_conv: tensor, output of last conv layer of discriminator.\n flatten_layer: `Flatten` layer.\n logits_layer: `Dense` layer for logits.\n params: dict, user passed parameters.\n\n Returns:\n Final logits tensor of discriminator.\n \"\"\"\n print_obj(\n \"\\nuse_discriminator_logits_layer\", \"block_conv\", block_conv\n )\n # Set shape to remove ambiguity for dense layer.\n height, width = params[\"generator_projection_dims\"][0:2]\n valid_kernel_size = (\n params[\"discriminator_base_conv_blocks\"][0][-1][0]\n )\n block_conv.set_shape(\n [\n block_conv.get_shape()[0],\n height - valid_kernel_size + 1,\n width - valid_kernel_size + 1,\n block_conv.get_shape()[-1]]\n )\n print_obj(\"use_discriminator_logits_layer\", \"block_conv\", block_conv)\n\n with tf.variable_scope(name_or_scope=self.name, reuse=tf.AUTO_REUSE):\n # Flatten final block conv tensor.\n block_conv_flat = self.flatten_layer(inputs=block_conv)\n print_obj(\n \"use_discriminator_logits_layer\",\n \"block_conv_flat\",\n block_conv_flat\n )\n\n # Final linear layer for logits.\n logits = self.logits_layer(inputs=block_conv_flat)\n print_obj(\"use_discriminator_logits_layer\", \"logits\", logits)\n\n return logits\n\n def create_base_discriminator_network(self, X, params):\n \"\"\"Creates base discriminator network.\n\n Args:\n X: tensor, input image to discriminator.\n params: dict, user passed parameters.\n\n Returns:\n Final logits tensor of discriminator.\n \"\"\"\n print_obj(\"\\ncreate_base_discriminator_network\", \"X\", X)\n with tf.variable_scope(name_or_scope=self.name, reuse=tf.AUTO_REUSE):\n # Only need the first fromRGB conv layer & block for base network.\n from_rgb_conv_layer = self.from_rgb_conv_layers[0]\n block_layers = self.conv_layer_blocks[0]\n\n # Pass inputs through layer chain.\n from_rgb_conv = from_rgb_conv_layer(inputs=X)\n print_obj(\n \"create_base_discriminator_network\",\n \"from_rgb_conv\",\n from_rgb_conv\n )\n\n if params[\"use_minibatch_stddev\"]:\n block_conv = self.minibatch_stddev(\n X=from_rgb_conv,\n params=params,\n group_size=params[\"minibatch_stddev_group_size\"]\n )\n else:\n block_conv = from_rgb_conv\n\n for i in range(len(block_layers)):\n block_conv = block_layers[i](inputs=block_conv)\n print_obj(\n \"create_base_discriminator_network\",\n \"block_conv\",\n block_conv\n )\n\n # Get logits now.\n logits = self.use_discriminator_logits_layer(\n block_conv=block_conv,\n params=params\n )\n print_obj(\"create_base_discriminator_network\", \"logits\", logits)\n\n return logits\n\n def create_growth_transition_discriminator_network(\n self, X, alpha_var, params, trans_idx):\n \"\"\"Creates growth transition discriminator network.\n\n Args:\n X: tensor, input image to discriminator.\n alpha_var: variable, alpha for weighted sum of fade-in of layers.\n params: dict, user passed parameters.\n trans_idx: int, index of current growth transition.\n\n Returns:\n Final logits tensor of discriminator.\n \"\"\"\n print_obj(\n \"\\nEntered create_growth_transition_discriminator_network\",\n \"trans_idx\",\n trans_idx\n )\n print_obj(\"create_growth_transition_discriminator_network\", \"X\", X)\n with tf.variable_scope(name_or_scope=self.name, reuse=tf.AUTO_REUSE):\n # Growing side chain.\n growing_from_rgb_conv_layer = self.from_rgb_conv_layers[trans_idx + 1]\n growing_block_layers = self.conv_layer_blocks[trans_idx + 1]\n\n # Pass inputs through layer chain.\n growing_block_conv = growing_from_rgb_conv_layer(inputs=X)\n print_obj(\n \"\\ncreate_growth_transition_discriminator_network\",\n \"growing_block_conv\",\n growing_block_conv\n )\n for i in range(len(growing_block_layers)):\n growing_block_conv = growing_block_layers[i](\n inputs=growing_block_conv\n )\n print_obj(\n \"create_growth_transition_discriminator_network\",\n \"growing_block_conv\",\n growing_block_conv\n )\n\n # Shrinking side chain.\n transition_downsample_layer = self.transition_downsample_layers[trans_idx]\n shrinking_from_rgb_conv_layer = self.from_rgb_conv_layers[trans_idx]\n\n # Pass inputs through layer chain.\n transition_downsample = transition_downsample_layer(inputs=X)\n print_obj(\n \"create_growth_transition_discriminator_network\",\n \"transition_downsample\",\n transition_downsample\n )\n shrinking_from_rgb_conv = shrinking_from_rgb_conv_layer(\n inputs=transition_downsample\n )\n print_obj(\n \"create_growth_transition_discriminator_network\",\n \"shrinking_from_rgb_conv\",\n shrinking_from_rgb_conv\n )\n\n # Weighted sum.\n weighted_sum = tf.add(\n x=growing_block_conv * alpha_var,\n y=shrinking_from_rgb_conv * (1.0 - alpha_var),\n name=\"{}_growth_transition_weighted_sum_{}\".format(\n self.name, trans_idx\n )\n )\n print_obj(\n \"create_growth_transition_discriminator_network\",\n \"weighted_sum\",\n weighted_sum\n )\n\n # Permanent blocks.\n permanent_blocks = self.conv_layer_blocks[0:trans_idx + 1]\n\n # Reverse order of blocks and flatten.\n permanent_block_layers = [\n item for sublist in permanent_blocks[::-1] for item in sublist\n ]\n\n # Pass inputs through layer chain.\n block_conv = weighted_sum\n\n # Find number of permanent growth conv layers.\n num_perm_growth_conv_layers = len(permanent_block_layers)\n num_perm_growth_conv_layers -= len(params[\"conv_num_filters\"][0])\n\n # Loop through only the permanent growth conv layers.\n for i in range(num_perm_growth_conv_layers):\n block_conv = permanent_block_layers[i](inputs=block_conv)\n print_obj(\n \"create_growth_transition_discriminator_network\",\n \"block_conv_{}\".format(i),\n block_conv\n )\n\n if params[\"use_minibatch_stddev\"]:\n block_conv = self.minibatch_stddev(\n X=block_conv,\n params=params,\n group_size=params[\"minibatch_stddev_group_size\"]\n )\n print_obj(\n \"create_growth_transition_discriminator_network\",\n \"minibatch_stddev_block_conv\",\n block_conv\n )\n\n # Loop through only the permanent base conv layers now.\n for i in range(\n num_perm_growth_conv_layers, len(permanent_block_layers)):\n block_conv = permanent_block_layers[i](inputs=block_conv)\n print_obj(\n \"create_growth_transition_discriminator_network\",\n \"block_conv_{}\".format(i),\n block_conv\n )\n\n # Get logits now.\n logits = self.use_discriminator_logits_layer(\n block_conv=block_conv, params=params\n )\n print_obj(\n \"create_growth_transition_discriminator_network\",\n \"logits\",\n logits\n )\n\n return logits\n\n def create_final_discriminator_network(self, X, params):\n \"\"\"Creates final discriminator network.\n\n Args:\n X: tensor, input image to discriminator.\n params: dict, user passed parameters.\n\n Returns:\n Final logits tensor of discriminator.\n \"\"\"\n print_obj(\"\\ncreate_final_discriminator_network\", \"X\", X)\n with tf.variable_scope(name_or_scope=self.name, reuse=tf.AUTO_REUSE):\n # Only need the last fromRGB conv layer.\n from_rgb_conv_layer = self.from_rgb_conv_layers[-1]\n\n # Reverse order of blocks.\n reversed_blocks = self.conv_layer_blocks[::-1]\n\n # Flatten list of lists block layers into list.\n block_layers = [\n item for sublist in reversed_blocks for item in sublist\n ]\n\n # Pass inputs through layer chain.\n block_conv = from_rgb_conv_layer(inputs=X)\n print_obj(\n \"\\ncreate_final_discriminator_network\",\n \"block_conv\",\n block_conv\n )\n\n # Find number of permanent growth conv layers.\n num_growth_conv_layers = len(block_layers)\n num_growth_conv_layers -= len(params[\"conv_num_filters\"][0])\n\n # Loop through only the permanent growth conv layers.\n for i in range(num_growth_conv_layers):\n block_conv = block_layers[i](inputs=block_conv)\n print_obj(\n \"create_final_discriminator_network\",\n \"block_conv_{}\".format(i),\n block_conv\n )\n\n if params[\"use_minibatch_stddev\"]:\n block_conv = self.minibatch_stddev(\n X=block_conv,\n params=params,\n group_size=params[\"minibatch_stddev_group_size\"]\n )\n print_obj(\n \"create_final_discriminator_network\",\n \"minibatch_stddev_block_conv\",\n block_conv\n )\n\n # Loop through only the permanent base conv layers now.\n for i in range(num_growth_conv_layers, len(block_layers)):\n block_conv = block_layers[i](inputs=block_conv)\n print_obj(\n \"create_final_discriminator_network\",\n \"block_conv_{}\".format(i),\n block_conv\n )\n\n # Get logits now.\n logits = self.use_discriminator_logits_layer(\n block_conv=block_conv,\n params=params\n )\n print_obj(\"create_final_discriminator_network\", \"logits\", logits)\n\n return logits\n\n ##########################################################################\n ##########################################################################\n ##########################################################################\n\n def switch_case_discriminator_logits(\n self, X, alpha_var, params, growth_index):\n \"\"\"Uses switch case to use the correct network to get logits.\n\n Args:\n X: tensor, image tensors of shape\n [cur_batch_size, image_size, image_size, depth].\n alpha_var: variable, alpha for weighted sum of fade-in of layers.\n params: dict, user passed parameters.\n growth_index: int, current growth stage.\n\n Returns:\n Logits tensor of shape [cur_batch_size, 1].\n \"\"\"\n # Switch to case based on number of steps to get logits.\n logits = tf.switch_case(\n branch_index=growth_index,\n branch_fns=[\n # 4x4\n lambda: self.create_base_discriminator_network(\n X=X, params=params\n ),\n # 8x8\n lambda: self.create_growth_transition_discriminator_network(\n X=X,\n alpha_var=alpha_var,\n params=params,\n trans_idx=min(0, len(params[\"conv_num_filters\"]) - 2)\n ),\n # 16x16\n lambda: self.create_growth_transition_discriminator_network(\n X=X,\n alpha_var=alpha_var,\n params=params,\n trans_idx=min(1, len(params[\"conv_num_filters\"]) - 2)\n ),\n # 32x32\n lambda: self.create_growth_transition_discriminator_network(\n X=X,\n alpha_var=alpha_var,\n params=params,\n trans_idx=min(2, len(params[\"conv_num_filters\"]) - 2)\n ),\n # 64x64\n lambda: self.create_growth_transition_discriminator_network(\n X=X,\n alpha_var=alpha_var,\n params=params,\n trans_idx=min(3, len(params[\"conv_num_filters\"]) - 2)\n ),\n # 128x128\n lambda: self.create_growth_transition_discriminator_network(\n X=X,\n alpha_var=alpha_var,\n params=params,\n trans_idx=min(4, len(params[\"conv_num_filters\"]) - 2)\n ),\n # 256x256\n lambda: self.create_growth_transition_discriminator_network(\n X=X,\n alpha_var=alpha_var,\n params=params,\n trans_idx=min(5, len(params[\"conv_num_filters\"]) - 2)\n ),\n # 512x512\n lambda: self.create_growth_transition_discriminator_network(\n X=X,\n alpha_var=alpha_var,\n params=params,\n trans_idx=min(6, len(params[\"conv_num_filters\"]) - 2)\n ),\n # 1024x1024\n lambda: self.create_growth_transition_discriminator_network(\n X=X,\n alpha_var=alpha_var,\n params=params,\n trans_idx=min(7, len(params[\"conv_num_filters\"]) - 2)\n ),\n # 1024x1024\n lambda: self.create_final_discriminator_network(\n X=X, params=params\n )\n ],\n name=\"{}_switch_case_logits\".format(self.name)\n )\n\n return logits\n\n ##########################################################################\n ##########################################################################\n ##########################################################################\n\n def get_discriminator_logits(self, X, alpha_var, params):\n \"\"\"Uses discriminator network and returns logits for train/eval.\n\n Args:\n X: tensor, image tensors of shape\n [cur_batch_size, image_size, image_size, depth].\n alpha_var: variable, alpha for weighted sum of fade-in of layers.\n params: dict, user passed parameters.\n\n Returns:\n Logits tensor of shape [cur_batch_size, 1].\n \"\"\"\n print_obj(\"\\nget_discriminator_logits\", \"X\", X)\n\n # Get discriminator's logits tensor.\n train_steps = params[\"train_steps\"] + params[\"prev_train_steps\"]\n num_steps_until_growth = params[\"num_steps_until_growth\"]\n num_stages = train_steps // num_steps_until_growth\n if (num_stages <= 0 or len(params[\"conv_num_filters\"]) == 1):\n print(\n \"\\nget_discriminator_logits: NOT GOING TO GROW, SKIP SWITCH CASE!\"\n )\n # If never going to grow, no sense using the switch case.\n # 4x4\n logits = self.create_base_discriminator_network(\n X=X, params=params\n )\n else:\n # Find growth index based on global step and growth frequency.\n growth_index = tf.cast(\n x=tf.floordiv(\n x=tf.train.get_or_create_global_step(),\n y=params[\"num_steps_until_growth\"],\n name=\"{}_global_step_floordiv\".format(self.name)\n ),\n dtype=tf.int32,\n name=\"{}_growth_index\".format(self.name)\n )\n\n # Switch to case based on number of steps for logits.\n logits = self.switch_case_discriminator_logits(\n X=X,\n alpha_var=alpha_var,\n params=params,\n growth_index=growth_index\n )\n\n print_obj(\n \"\\nget_discriminator_logits\", \"logits\", logits\n )\n\n # Wrap logits in a control dependency for the build discriminator\n # tensors to ensure discriminator internals are built.\n with tf.control_dependencies(\n control_inputs=[self.build_discriminator_tensors]):\n logits = tf.identity(\n input=logits, name=\"{}_logits_identity\".format(self.name)\n )\n\n return logits\n\n ##########################################################################\n ##########################################################################\n ##########################################################################\n\n def get_gradient_penalty_loss(\n self,\n cur_batch_size,\n fake_images,\n real_images,\n alpha_var,\n params):\n \"\"\"Gets discriminator gradient penalty loss.\n\n Args:\n cur_batch_size: tensor, in case of a partial batch instead of\n using the user passed int.\n fake_images: tensor, images generated by the generator from random\n noise of shape [cur_batch_size, image_size, image_size, 3].\n real_images: tensor, real images from input of shape\n [cur_batch_size, image_size, image_size, 3].\n alpha_var: variable, alpha for weighted sum of fade-in of layers.\n params: dict, user passed parameters.\n\n Returns:\n Discriminator's gradient penalty loss of shape [].\n \"\"\"\n func_name = \"get_gradient_penalty_loss\"\n\n with tf.name_scope(name=\"{}/gradient_penalty\".format(self.name)):\n # Get a random uniform number rank 4 tensor.\n random_uniform_num = tf.random.uniform(\n shape=[cur_batch_size, 1, 1, 1],\n minval=0., maxval=1.,\n dtype=tf.float32,\n name=\"random_uniform_num\"\n )\n print_obj(\n \"\\n\" + func_name, \"random_uniform_num\", random_uniform_num\n )\n\n # Find the element-wise difference between images.\n image_difference = fake_images - real_images\n print_obj(func_name, \"image_difference\", image_difference)\n\n # Get random samples from this mixed image distribution.\n mixed_images = random_uniform_num * image_difference\n mixed_images += real_images\n print_obj(func_name, \"mixed_images\", mixed_images)\n\n # Send to the discriminator to get logits.\n mixed_logits = self.get_discriminator_logits(\n X=mixed_images, alpha_var=alpha_var, params=params\n )\n print_obj(func_name, \"mixed_logits\", mixed_logits)\n\n # Get the mixed loss.\n mixed_loss = tf.reduce_sum(\n input_tensor=mixed_logits,\n name=\"mixed_loss\"\n )\n print_obj(func_name, \"mixed_loss\", mixed_loss)\n\n # Get gradient from returned list of length 1.\n mixed_gradients = tf.gradients(\n ys=mixed_loss,\n xs=[mixed_images],\n name=\"gradients\"\n )[0]\n print_obj(func_name, \"mixed_gradients\", mixed_gradients)\n\n # Get gradient's L2 norm.\n mixed_norms = tf.sqrt(\n x=tf.reduce_sum(\n input_tensor=tf.square(\n x=mixed_gradients,\n name=\"squared_grads\"\n ),\n axis=[1, 2, 3]\n ) + 1e-8\n )\n print_obj(func_name, \"mixed_norms\", mixed_norms)\n\n # Get squared difference from target of 1.0.\n squared_difference = tf.square(\n x=mixed_norms - 1.0,\n name=\"squared_difference\"\n )\n print_obj(func_name, \"squared_difference\", squared_difference)\n\n # Get gradient penalty scalar.\n gradient_penalty = tf.reduce_mean(\n input_tensor=squared_difference, name=\"gradient_penalty\"\n )\n print_obj(func_name, \"gradient_penalty\", gradient_penalty)\n\n # Multiply with lambda to get gradient penalty loss.\n gradient_penalty_loss = tf.multiply(\n x=params[\"discriminator_gradient_penalty_coefficient\"],\n y=gradient_penalty,\n name=\"gradient_penalty_loss\"\n )\n\n return gradient_penalty_loss\n\n def get_discriminator_loss(\n self,\n cur_batch_size,\n fake_images,\n real_images,\n fake_logits,\n real_logits,\n alpha_var,\n params):\n \"\"\"Gets discriminator loss.\n\n Args:\n cur_batch_size: tensor, in case of a partial batch instead of\n using the user passed int.\n fake_images: tensor, images generated by the generator from random\n noise of shape [cur_batch_size, image_size, image_size, 3].\n real_images: tensor, real images from input of shape\n [cur_batch_size, image_size, image_size, 3].\n fake_logits: tensor, shape of [cur_batch_size, 1] that came from\n discriminator having processed generator's output image.\n fake_logits: tensor, shape of [cur_batch_size, 1] that came from\n discriminator having processed real image.\n alpha_var: variable, alpha for weighted sum of fade-in of layers.\n params: dict, user passed parameters.\n\n Returns:\n Discriminator's total loss tensor of shape [].\n \"\"\"\n # Calculate base discriminator loss.\n discriminator_real_loss = tf.reduce_mean(\n input_tensor=real_logits,\n name=\"{}_real_loss\".format(self.name)\n )\n print_obj(\n \"\\nget_discriminator_loss\",\n \"discriminator_real_loss\",\n discriminator_real_loss\n )\n\n discriminator_generated_loss = tf.reduce_mean(\n input_tensor=fake_logits,\n name=\"{}_generated_loss\".format(self.name)\n )\n print_obj(\n \"get_discriminator_loss\",\n \"discriminator_generated_loss\",\n discriminator_generated_loss\n )\n\n discriminator_loss = tf.add(\n x=discriminator_real_loss, y=-discriminator_generated_loss,\n name=\"{}_loss\".format(self.name)\n )\n print_obj(\n \"get_discriminator_loss\",\n \"discriminator_loss\",\n discriminator_loss\n )\n\n # Get discriminator gradient penalty loss.\n discriminator_gradient_penalty = self.get_gradient_penalty_loss(\n cur_batch_size=cur_batch_size,\n fake_images=fake_images,\n real_images=real_images,\n alpha_var=alpha_var,\n params=params\n )\n print_obj(\n \"get_discriminator_loss\",\n \"discriminator_gradient_penalty\",\n discriminator_gradient_penalty\n )\n\n # Get discriminator epsilon drift penalty.\n epsilon_drift_penalty = tf.multiply(\n x=params[\"epsilon_drift\"],\n y=tf.reduce_mean(input_tensor=tf.square(x=real_logits)),\n name=\"epsilon_drift_penalty\"\n )\n print_obj(\n \"get_discriminator_loss\",\n \"epsilon_drift_penalty\",\n epsilon_drift_penalty\n )\n\n # Get discriminator Wasserstein GP loss.\n discriminator_wasserstein_gp_loss = tf.add_n(\n inputs=[\n discriminator_loss,\n discriminator_gradient_penalty,\n epsilon_drift_penalty\n ],\n name=\"{}_wasserstein_gp_loss\".format(self.name)\n )\n print_obj(\n \"get_discriminator_loss\",\n \"discriminator_wasserstein_gp_loss\",\n discriminator_wasserstein_gp_loss\n )\n\n # Get discriminator regularization losses.\n discriminator_reg_loss = regularization.get_regularization_loss(\n lambda1=params[\"discriminator_l1_regularization_scale\"],\n lambda2=params[\"discriminator_l2_regularization_scale\"],\n scope=self.name\n )\n print_obj(\n \"get_discriminator_loss\",\n \"discriminator_reg_loss\",\n discriminator_reg_loss\n )\n\n # Combine losses for total losses.\n discriminator_total_loss = tf.add(\n x=discriminator_wasserstein_gp_loss,\n y=discriminator_reg_loss,\n name=\"{}_total_loss\".format(self.name)\n )\n print_obj(\n \"get_discriminator_loss\",\n \"discriminator_total_loss\",\n discriminator_total_loss\n )\n\n return discriminator_total_loss\n",
"import tensorflow as tf\n\nfrom .print_object import print_obj\n\n\ndef get_gradients(loss, global_step, params, scope):\n \"\"\"Returns the gradients and variables of the current training step.\n\n Args:\n loss: tensor, shape of [].\n global_step: tensor, the current training step or batch in the\n training loop.\n params: dict, user passed parameters.\n scope: str, the network's name to find its variables to train.\n\n Returns:\n Gradient tensors.\n \"\"\"\n print_obj(\"\\nget_gradients_and_variables\", \"loss\", loss)\n print_obj(\"get_gradients_and_variables\", \"global_step\", global_step)\n print_obj(\"get_gradients_and_variables\", \"scope\", scope)\n\n # Get trainable variables.\n variables = tf.trainable_variables(scope=scope)\n print_obj(\"\\nget_gradients_and_variables\", \"variables\", variables)\n\n # Get gradients.\n gradients = tf.gradients(\n ys=loss,\n xs=variables,\n name=\"{}_gradients\".format(scope)\n )\n print_obj(\"\\nget_gradients_and_variables\", \"gradients\", gradients)\n\n # Clip gradients.\n if params[\"{}_clip_gradients\".format(scope)]:\n gradients, _ = tf.clip_by_global_norm(\n t_list=gradients,\n clip_norm=params[\"{}_clip_gradients\".format(scope)],\n name=\"{}_clip_by_global_norm_gradients\".format(scope)\n )\n print_obj(\"\\nget_gradients_and_variables\", \"gradients\", gradients)\n\n return gradients\n\n\ndef get_optimizer(params, scope):\n \"\"\"Returns instance of chosen `Optimizer` class.\n\n Args:\n params: dict, user passed parameters.\n scope: str, the current network's scope.\n\n Returns:\n Instance of chosen `Optimizer` class.\n \"\"\"\n # Create optimizer map.\n optimizers = {\n \"Adam\": tf.train.AdamOptimizer,\n \"Adadelta\": tf.train.AdadeltaOptimizer,\n \"AdagradDA\": tf.train.AdagradDAOptimizer,\n \"Adagrad\": tf.train.AdagradOptimizer,\n \"Ftrl\": tf.train.FtrlOptimizer,\n \"GradientDescent\": tf.train.GradientDescentOptimizer,\n \"Momentum\": tf.train.MomentumOptimizer,\n \"ProximalAdagrad\": tf.train.ProximalAdagradOptimizer,\n \"ProximalGradientDescent\": tf.train.ProximalGradientDescentOptimizer,\n \"RMSProp\": tf.train.RMSPropOptimizer\n }\n\n # Get optimizer and instantiate it.\n optimizer_name = params[\"{}_optimizer\".format(scope)]\n learning_rate = params[\"{}_learning_rate\".format(scope)]\n\n optimizer = optimizers[optimizer_name](learning_rate=learning_rate)\n print_obj(\"\\nget_optimizer\", \"optimizer\", optimizer)\n\n # If using TPU, wrap optimizer to use an allreduce to aggregate gradients\n # and broadcast the result to each shard.\n if params[\"use_tpu\"]:\n optimizer = tf.contrib.tpu.CrossShardOptimizer(opt=optimizer)\n print_obj(\"get_optimizer\", \"optimizer\", optimizer)\n\n return optimizer\n\n\ndef jointly_train_generator_encoder(\n generator_loss,\n encoder_loss,\n global_step,\n params,\n generator_scope,\n encoder_scope,\n discriminator_scope):\n \"\"\"Returns generator's/encoder's combined objects needed for training.\n\n Args:\n generator_loss: tensor, generator's loss with shape [].\n encoder_loss: tensor, encoder's loss with shape [].\n global_step: tensor, the current training step or batch in the\n training loop.\n params: dict, user passed parameters.\n generator_scope: str, the generator's name to find its variables.\n encoder_scope: str, the encoder's name to find its variables.\n discriminator_scope: str, the discriminator's name to find its\n variables.\n\n Returns:\n Loss tensor anddict of gradient tensors.\n \"\"\"\n # Add generator and encoder losses together.\n loss = tf.add(\n x=generator_loss,\n y=encoder_loss,\n name=\"jointly_train_generator_encoder_add_loss\"\n )\n print_obj(\"\\njointly_train_generator_encoder\", \"loss\", loss)\n\n # Get generator gradients.\n generator_gradients = get_gradients(\n loss=generator_loss,\n global_step=global_step,\n params=params,\n scope=generator_scope\n )\n print_obj(\n \"\\njointly_train_generator_encoder\",\n \"generator_gradients\",\n generator_gradients\n )\n\n # Get encoder gradients.\n encoder_gradients = get_gradients(\n loss=encoder_loss,\n global_step=global_step,\n params=params,\n scope=encoder_scope\n )\n print_obj(\n \"\\njointly_train_generator_encoder\",\n \"encoder_gradients\",\n encoder_gradients\n )\n\n # Get discriminator variables and set gradients to zero.\n discriminator_variables = tf.trainable_variables(scope=\"discriminator\")\n discriminator_gradients = [\n tf.zeros_like(tensor=v, dtype=tf.float32)\n for v in discriminator_variables\n ]\n\n # Combine gradients into a dictionary.\n gradients = {\n generator_scope: generator_gradients,\n encoder_scope: encoder_gradients,\n discriminator_scope: discriminator_gradients\n }\n print_obj(\"\\njointly_train_generator_encoder\", \"gradients\", gradients)\n\n return loss, gradients\n\n\ndef train_discriminator(\n discriminator_loss,\n global_step,\n params,\n generator_scope,\n encoder_scope,\n discriminator_scope):\n \"\"\"Returns discriminator's objects needed for training.\n\n Args:\n discriminator_loss: tensor, discriminator's loss with shape [].\n global_step: tensor, the current training step or batch in the\n training loop.\n params: dict, user passed parameters.\n generator_scope: str, the generator's name to find its variables.\n encoder_scope: str, the encoder's name to find its variables.\n discriminator_scope: str, the discriminator's name to find its\n variables.\n\n Returns:\n Loss tensor and dict of gradient tensors.\n \"\"\"\n # The loss is just the discriminator loss.\n loss = discriminator_loss\n\n # Get generator variables and set gradients to zero.\n generator_variables = tf.trainable_variables(scope=generator_scope)\n generator_gradients = [\n tf.zeros_like(tensor=v, dtype=tf.float32)\n for v in generator_variables\n ]\n\n # Get encoder variables and set gradients to zero.\n encoder_variables = tf.trainable_variables(scope=encoder_scope)\n encoder_gradients = [0. for _ in encoder_variables]\n encoder_gradients = [\n tf.zeros_like(tensor=v, dtype=tf.float32)\n for v in encoder_variables\n ]\n\n # Get discriminator gradients.\n discriminator_gradients = get_gradients(\n loss=discriminator_loss,\n global_step=global_step,\n params=params,\n scope=discriminator_scope\n )\n print_obj(\n \"\\ntrain_discriminator\",\n \"discriminator_gradients\",\n discriminator_gradients\n )\n\n # Combine gradients into a dictionary.\n gradients = {\n generator_scope: generator_gradients,\n encoder_scope: encoder_gradients,\n discriminator_scope: discriminator_gradients\n }\n print_obj(\"\\ntrain_discriminator\", \"gradients\", gradients)\n\n return loss, gradients\n\n\ndef get_train_op(gradients, global_step, params, scope):\n \"\"\"Returns train op of applying gradients with optimizer to variavles.\n\n Args:\n gradients: list, gradient tensors for in scope trainable variables.\n global_step: tensor, the current training step or batch in the\n training loop.\n params: dict, user passed parameters.\n scope: str, the network's name to find its variables to train.\n\n Returns:\n Training op.\n \"\"\"\n print_obj(\"\\nget_train_op\", \"gradients\", gradients)\n print_obj(\"\\nget_train_op\", \"global_step\", global_step)\n print_obj(\"get_train_op\", \"scope\", scope)\n\n # Get trainables variables from scope.\n variables = tf.trainable_variables(scope=scope)\n\n # Zip together gradients and variables.\n grads_and_vars = zip(gradients, variables)\n print_obj(\"\\nget_train_op\", \"grads_and_vars\", grads_and_vars)\n\n # Get optimizers.\n optimizer = get_optimizer(params=params, scope=scope)\n print_obj(\"get_train_op\", \"optimizer\", optimizer)\n\n # Create train op by applying gradients to variables and incrementing\n # global step.\n train_op = optimizer.apply_gradients(\n grads_and_vars=grads_and_vars,\n global_step=global_step,\n name=\"{}_apply_gradients\".format(scope)\n )\n print_obj(\"get_train_op\", \"train_op\", train_op)\n\n return train_op\n\n\ndef get_train_ops(gradients, global_step, params):\n \"\"\"Returns train op of applying gradients with optimizer to variavles.\n\n Args:\n gradients: list, gradient tensors for in scope trainable variables.\n optimizer: instance of `Optimizer` class.\n global_step: tensor, the current training step or batch in the\n training loop.\n params: dict, user passed parameters.\n\n Returns:\n Training op.\n \"\"\"\n print_obj(\"\\nget_train_ops\", \"gradients\", gradients)\n print_obj(\"\\nget_train_ops\", \"global_step\", global_step)\n\n # Create list of train ops.\n train_ops = [\n get_train_op(\n gradients=g, global_step=global_step, params=params, scope=s\n )\n for s, g in gradients.items()\n ]\n print_obj(\"\\nget_train_op\", \"train_ops\", train_ops)\n\n # Group together train ops.\n train_op = tf.group(\n train_ops,\n name=\"jointly_train_generator_encoder_group_train_op\"\n )\n print_obj(\"\\nget_train_ops\", \"train_op\", train_op)\n\n return train_op\n\n\ndef update_alpha(global_step, alpha_var, params):\n \"\"\"Returns update op for alpha variable.\n\n Args:\n global_step: tensor, the current training step or batch in the\n training loop.\n alpha_var: variable, alpha for weighted sum of fade-in of layers.\n params: dict, user passed parameters.\n\n Returns:\n Alpha variable update operation.\n \"\"\"\n # If never grow, then no need to update alpha since it is not used.\n if len(params[\"conv_num_filters\"]) > 1:\n # Update alpha var to linearly scale from 0 to 1 based on steps.\n alpha_var_update_op = tf.assign(\n ref=alpha_var,\n value=tf.divide(\n x=tf.cast(\n x=tf.mod(\n x=global_step, y=params[\"num_steps_until_growth\"]\n ),\n dtype=tf.float32\n ),\n y=params[\"num_steps_until_growth\"]\n ),\n name=\"alpha_var_update_op_assign\"\n )\n else:\n alpha_var_update_op = tf.no_op(name=\"alpha_var_update_op_no_op\")\n print_obj(\n \"update_alpha\", \"alpha_var_update_op\", alpha_var_update_op\n )\n\n return alpha_var_update_op\n\n\ndef get_loss_and_train_op(\n generator_total_loss,\n encoder_total_loss,\n discriminator_total_loss,\n alpha_var,\n params):\n \"\"\"Returns loss and train op for train mode.\n\n Args:\n generator_total_loss: tensor, scalar total loss of generator.\n encoder_total_loss: tensor, scalar total loss of encoder.\n discriminator_total_loss: tensor, scalar total loss of discriminator.\n alpha_var: variable, alpha for weighted sum of fade-in of layers.\n params: dict, user passed parameters.\n\n Returns:\n Loss scalar tensor and train_op to be used by the EstimatorSpec.\n \"\"\"\n # Get global step.\n global_step = tf.train.get_or_create_global_step()\n\n # Determine if it is time to train generator or discriminator.\n cycle_step = tf.mod(\n x=global_step,\n y=tf.cast(\n x=tf.add(\n x=params[\"generator_train_steps\"],\n y=params[\"discriminator_train_steps\"]\n ),\n dtype=tf.int64\n ),\n name=\"get_loss_and_train_op_cycle_step\"\n )\n\n # Create choose generator condition.\n condition = tf.less(\n x=cycle_step,\n y=params[\"generator_train_steps\"],\n name=\"get_loss_and_train_op_condition\"\n )\n\n # Needed for batch normalization, but has no effect otherwise.\n update_ops = tf.get_collection(key=tf.GraphKeys.UPDATE_OPS)\n\n with tf.control_dependencies(control_inputs=update_ops):\n # Conditionally choose to train generator/encoder or discriminator.\n loss, gradients = tf.cond(\n pred=condition,\n true_fn=lambda: jointly_train_generator_encoder(\n generator_loss=generator_total_loss,\n encoder_loss=encoder_total_loss,\n global_step=global_step,\n params=params,\n generator_scope=\"generator\",\n encoder_scope=\"encoder\",\n discriminator_scope=\"discriminator\"\n ),\n false_fn=lambda: train_discriminator(\n discriminator_loss=discriminator_total_loss,\n global_step=global_step,\n params=params,\n generator_scope=\"generator\",\n encoder_scope=\"encoder\",\n discriminator_scope=\"discriminator\"\n ),\n name=\"get_loss_and_train_op_cond\"\n )\n\n print_obj(\"\\nget_loss_and_train_op\", \"gradients\", gradients)\n\n # Crete train_op with whatever was returned from conditional branch.\n train_op = get_train_ops(gradients, global_step, params)\n\n # Get update op for the alpha variable.\n alpha_var_update_op = update_alpha(global_step, alpha_var, params)\n\n # Ensure alpha variable gets updated.\n with tf.control_dependencies(control_inputs=[alpha_var_update_op]):\n loss = tf.identity(\n input=loss,\n name=\"get_loss_and_train_op_loss_identity\"\n )\n\n return loss, train_op\n",
"import tensorflow as tf\n\nfrom .print_object import print_obj\n\n\nclass Critic(object):\n \"\"\"Critic that takes image input and outputs logits.\n Fields:\n name: str, name of `Critic`.\n kernel_regularizer: `l1_l2_regularizer` object, regularizar for kernel\n variables.\n bias_regularizer: `l1_l2_regularizer` object, regularizar for bias\n variables.\n \"\"\"\n def __init__(self, kernel_regularizer, bias_regularizer, name):\n \"\"\"Instantiates and builds critic network.\n Args:\n kernel_regularizer: `l1_l2_regularizer` object, regularizar for\n kernel variables.\n bias_regularizer: `l1_l2_regularizer` object, regularizar for bias\n variables.\n name: str, name of critic.\n \"\"\"\n # Set name of critic.\n self.name = name\n\n # Regularizer for kernel weights.\n self.kernel_regularizer = kernel_regularizer\n\n # Regularizer for bias weights.\n self.bias_regularizer = bias_regularizer\n\n def get_critic_logits(self, X, params):\n \"\"\"Creates critic network and returns logits.\n\n Args:\n X: tensor, image tensors of shape\n [cur_batch_size, height, width, depth].\n params: dict, user passed parameters.\n\n Returns:\n Logits tensor of shape [cur_batch_size, 1].\n \"\"\"\n func_name = \"get_critic_logits\"\n # Create the input layer to our CNN.\n # shape = (cur_batch_size, height * width * depth)\n network = X\n print_obj(\"\\n\" + func_name, \"network\", network)\n\n with tf.variable_scope(\"critic\", reuse=tf.AUTO_REUSE):\n # Iteratively build downsampling layers.\n for i in range(len(params[\"critic_num_filters\"])):\n # Add convolutional layers with given params per layer.\n # shape = (\n # cur_batch_size,\n # critic_kernel_sizes[i - 1] / critic_strides[i],\n # critic_kernel_sizes[i - 1] / critic_strides[i],\n # critic_num_filters[i]\n # )\n network = tf.layers.conv2d(\n inputs=network,\n filters=params[\"critic_num_filters\"][i],\n kernel_size=params[\"critic_kernel_sizes\"][i],\n strides=params[\"critic_strides\"][i],\n padding=\"same\",\n activation=None,\n kernel_regularizer=self.kernel_regularizer,\n bias_regularizer=self.bias_regularizer,\n name=\"layers_conv2d_{}\".format(i)\n )\n print_obj(func_name, \"network\", network)\n\n network = tf.nn.leaky_relu(\n features=network,\n alpha=params[\"critic_leaky_relu_alpha\"],\n name=\"leaky_relu_{}\".format(i)\n )\n print_obj(func_name, \"network\", network)\n\n if params[\"critic_use_layer_normalization\"]:\n # Normalize layer.\n network = tf.contrib.layers.layer_norm(inputs=network)\n\n # Flatten network output.\n # shape = (\n # cur_batch_size,\n # (critic_kernel_sizes[-2] / critic_strides[-1]) ** 2 * critic_num_filters[-1]\n # )\n network_flat = tf.layers.Flatten()(inputs=network)\n print_obj(func_name, \"network_flat\", network_flat)\n\n # Final linear layer for logits.\n # shape = (cur_batch_size, 1)\n logits = tf.layers.dense(\n inputs=network_flat,\n units=1,\n activation=None,\n kernel_regularizer=self.kernel_regularizer,\n bias_regularizer=self.bias_regularizer,\n name=\"layers_dense_logits\"\n )\n print_obj(func_name, \"logits\", logits)\n\n return logits\n\n def get_gradient_penalty_loss(\n self, cur_batch_size, fake_images, real_images, params):\n \"\"\"Gets critic gradient penalty loss.\n\n Args:\n cur_batch_size: tensor, in case of a partial batch instead of\n using the user passed int.\n fake_images: tensor, images generated by the generator from random\n noise of shape [cur_batch_size, image_size, image_size, 3].\n real_images: tensor, real images from input of shape\n [cur_batch_size, image_size, image_size, 3].\n params: dict, user passed parameters.\n\n Returns:\n Critic's gradient penalty loss of shape [].\n \"\"\"\n func_name = \"get_gradient_penalty_loss\"\n with tf.name_scope(name=\"critic/gradient_penalty\"):\n # Get a random uniform number rank 4 tensor.\n random_uniform_num = tf.random.uniform(\n shape=[cur_batch_size, 1, 1, 1],\n minval=0., maxval=1.,\n dtype=tf.float32,\n name=\"random_uniform_num\"\n )\n print_obj(\n \"\\n\" + func_name, \"random_uniform_num\", random_uniform_num\n )\n\n # Find the element-wise difference between images.\n image_difference = fake_images - real_images\n print_obj(func_name, \"image_difference\", image_difference)\n\n # Get random samples from this mixed image distribution.\n mixed_images = random_uniform_num * image_difference\n mixed_images += real_images\n print_obj(func_name, \"mixed_images\", mixed_images)\n\n # Send to the critic to get logits.\n mixed_logits = self.get_critic_logits(\n X=mixed_images, params=params\n )\n print_obj(func_name, \"mixed_logits\", mixed_logits)\n\n # Get the mixed loss.\n mixed_loss = tf.reduce_sum(\n input_tensor=mixed_logits, name=\"mixed_loss\"\n )\n print_obj(func_name, \"mixed_loss\", mixed_loss)\n\n # Get gradient from returned list of length 1.\n mixed_gradients = tf.gradients(\n ys=mixed_loss, xs=[mixed_images], name=\"gradients\"\n )[0]\n print_obj(func_name, \"mixed_gradients\", mixed_gradients)\n\n # Get gradient's L2 norm.\n mixed_norms = tf.sqrt(\n x=tf.reduce_sum(\n input_tensor=tf.square(\n x=mixed_gradients,\n name=\"squared_grads\"\n ),\n axis=[1, 2, 3]\n ) + 1e-8\n )\n print_obj(func_name, \"mixed_norms\", mixed_norms)\n\n # Get squared difference from target of 1.0.\n squared_difference = tf.square(\n x=mixed_norms - 1.0, name=\"squared_difference\"\n )\n print_obj(func_name, \"squared_difference\", squared_difference)\n\n # Get gradient penalty scalar.\n gradient_penalty = tf.reduce_mean(\n input_tensor=squared_difference, name=\"gradient_penalty\"\n )\n print_obj(func_name, \"gradient_penalty\", gradient_penalty)\n\n # Multiply with lambda to get gradient penalty loss.\n gradient_penalty_loss = tf.multiply(\n x=params[\"critic_gradient_penalty_coefficient\"],\n y=gradient_penalty,\n name=\"gradient_penalty_loss\"\n )\n\n return gradient_penalty_loss\n\n def get_critic_loss(\n self,\n cur_batch_size,\n fake_images,\n real_images,\n fake_logits,\n real_logits,\n params):\n \"\"\"Gets critic's total loss.\n\n Args:\n cur_batch_size: tensor, in case of a partial batch instead of\n using the user passed int.\n fake_images: tensor, images generated by the generator from random\n noise of shape [cur_batch_size, image_size, image_size, 3].\n real_images: tensor, real images from input of shape\n [cur_batch_size, image_size, image_size, 3].\n fake_logits: tensor, shape of [cur_batch_size, 1] that came from\n critic having processed generator's output image.\n fake_logits: tensor, shape of [cur_batch_size, 1] that came from\n critic having processed real image.\n params: dict, user passed parameters.\n\n Returns:\n Critic's total loss tensor of shape [].\n \"\"\"\n func_name = \"get_critic_loss\"\n # Calculate base critic loss.\n critic_real_loss = tf.reduce_mean(\n input_tensor=real_logits, name=\"critic_real_loss\"\n )\n print_obj(\"\\n\" + func_name, \"critic_real_loss\", critic_real_loss)\n\n critic_fake_loss = tf.reduce_mean(\n input_tensor=fake_logits, name=\"critic_fake_loss\"\n )\n print_obj(\n func_name, \"critic_fake_loss\", critic_fake_loss\n )\n\n critic_loss = tf.subtract(\n x=critic_fake_loss, y=critic_real_loss, name=\"critic_loss\"\n )\n print_obj(func_name, \"critic_loss\", critic_loss)\n\n # Get critic gradient penalty loss.\n gradient_penalty_loss = self.get_gradient_penalty_loss(\n cur_batch_size=cur_batch_size,\n fake_images=fake_images,\n real_images=real_images,\n params=params\n )\n print_obj(func_name, \"gradient_penalty_loss\", gradient_penalty_loss)\n\n # Get critic Wasserstein GP loss.\n critic_wasserstein_gp_loss = tf.add(\n x=critic_loss,\n y=gradient_penalty_loss,\n name=\"critic_wasserstein_gp_loss\"\n )\n print_obj(\n func_name,\n \"critic_wasserstein_gp_loss\",\n critic_wasserstein_gp_loss\n )\n\n # Get regularization losses.\n critic_reg_loss = tf.losses.get_regularization_loss(\n scope=\"critic\", name=\"critic_reg_loss\"\n )\n print_obj(func_name, \"critic_reg_loss\", critic_reg_loss)\n\n # Combine losses for total losses.\n critic_total_loss = tf.math.add(\n x=critic_wasserstein_gp_loss,\n y=critic_reg_loss,\n name=\"critic_total_loss\"\n )\n print_obj(func_name, \"critic_total_loss\", critic_total_loss)\n\n # Add summaries for TensorBoard.\n tf.summary.scalar(\n name=\"critic_real_loss\", tensor=critic_real_loss, family=\"losses\"\n )\n tf.summary.scalar(\n name=\"critic_fake_loss\", tensor=critic_fake_loss, family=\"losses\"\n )\n tf.summary.scalar(\n name=\"critic_loss\", tensor=critic_loss, family=\"losses\"\n )\n tf.summary.scalar(\n name=\"gradient_penalty_loss\",\n tensor=gradient_penalty_loss,\n family=\"losses\"\n )\n tf.summary.scalar(\n name=\"critic_wasserstein_gp_loss\",\n tensor=critic_wasserstein_gp_loss,\n family=\"losses\"\n )\n tf.summary.scalar(\n name=\"critic_reg_loss\", tensor=critic_reg_loss, family=\"losses\"\n )\n tf.summary.scalar(\n name=\"critic_total_loss\",\n tensor=critic_total_loss,\n family=\"total_losses\"\n )\n\n return critic_total_loss\n"
] | [
[
"tensorflow.multiply",
"tensorflow.concat",
"tensorflow.control_dependencies",
"tensorflow.reduce_mean",
"tensorflow.shape",
"tensorflow.less",
"tensorflow.reduce_any",
"tensorflow.reshape",
"tensorflow.minimum",
"tensorflow.random.uniform",
"tensorflow.reduce_sum",
"tensorflow.subtract",
"tensorflow.gradients",
"tensorflow.zeros",
"tensorflow.mod",
"tensorflow.train.get_or_create_global_step",
"tensorflow.square",
"tensorflow.variable_scope"
],
[
"tensorflow.control_dependencies",
"tensorflow.less",
"tensorflow.get_collection",
"tensorflow.identity",
"tensorflow.train.get_or_create_global_step",
"tensorflow.mod",
"tensorflow.contrib.tpu.CrossShardOptimizer",
"tensorflow.zeros_like",
"tensorflow.add",
"tensorflow.no_op",
"tensorflow.trainable_variables",
"tensorflow.group"
],
[
"tensorflow.math.add",
"tensorflow.multiply",
"tensorflow.reduce_mean",
"tensorflow.losses.get_regularization_loss",
"tensorflow.reduce_sum",
"tensorflow.random.uniform",
"tensorflow.gradients",
"tensorflow.layers.dense",
"tensorflow.subtract",
"tensorflow.contrib.layers.layer_norm",
"tensorflow.add",
"tensorflow.name_scope",
"tensorflow.square",
"tensorflow.layers.Flatten",
"tensorflow.variable_scope",
"tensorflow.summary.scalar"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
rodosha98/FRPGitHomework | [
"0905c79ccc28d33f9385c09c03e8e18d8c720787",
"0905c79ccc28d33f9385c09c03e8e18d8c720787"
] | [
"robopy/tests/test_common.py",
"robopy/base/serial_link.py"
] | [
"# Created by: Aditya Dua\n# 25 July, 2017\n\"\"\"\nThis module contains all common helpful methods used in testing toolbox functionality\n\"\"\"\nimport numpy as np\nimport numpy.testing as npt\n\n\ndef matrix_mismatch_string_builder(rec_mat, exp_mat):\n expected_mat_str = np.array2string(np.asarray(exp_mat))\n received_mat_str = np.array2string(np.asarray(rec_mat))\n output_str = str(\"\\n----------------------\\n\"\n + \" Expected Output \"\n + \"\\n----------------------\\n\"\n + expected_mat_str\n + \"\\n----------------------\\n\"\n + \" Received Output \"\n + \"\\n----------------------\\n\"\n + received_mat_str)\n return output_str\n\n\ndef matrices_equal(rec_mat, exp_mat, decimal=10):\n equal = True\n try:\n npt.assert_almost_equal(rec_mat, exp_mat, decimal=decimal)\n except AssertionError:\n equal = False\n return equal\n",
"# Created by: Aditya Dua\n# 30 September 2017\nfrom __future__ import print_function\nfrom abc import ABC\nimport math\nfrom math import pi\nimport numpy as np\nimport vtk\nfrom . import transforms\nfrom .graphics import VtkPipeline\nfrom .graphics import axesCube\nfrom .graphics import axesCubeFloor\nfrom .graphics import vtk_named_colors\nimport pkg_resources\nfrom scipy.optimize import minimize\n\n\nclass SerialLink:\n \"\"\"\n SerialLink object class.\n \"\"\"\n\n def __init__(self, links, name=None, base=None, tool=None, stl_files=None, q=None, colors=None, param=None):\n \"\"\"\n Creates a SerialLink object.\n :param links: a list of links that will constitute SerialLink object.\n :param name: name property of the object.\n :param base: base transform applied to the SerialLink object.\n :param stl_files: STL file names to associate with links. Only works for pre-implemented models in model module.\n :param q: initial angles for link joints.\n :param colors: colors of STL files.\n \"\"\"\n self.pipeline = None\n self.links = links\n if q is None:\n self.q = np.matrix([0 for each in links])\n if base is None:\n self.base = np.asmatrix(np.eye(4, 4))\n else:\n assert (type(base) is np.matrix) and (base.shape == (4, 4))\n self.base = base\n if tool is None:\n self.tool = np.asmatrix(np.eye(4, 4))\n else:\n assert (type(tool) is np.matrix) and (tool.shape == (4, 4))\n self.tool = tool\n # Following arguments initialised by plot function and animate functions only\n if stl_files is None:\n # Default stick figure model code goes here\n pass\n else:\n self.stl_files = stl_files\n if name is None:\n self.name = ''\n else:\n self.name = name\n if colors is None:\n self.colors = vtk_named_colors([\"Grey\"] * len(stl_files))\n else:\n self.colors = colors\n if param is None:\n # If model deosn't pass params, then use these default ones\n self.param = {\n \"cube_axes_x_bounds\": np.matrix([[-1.5, 1.5]]),\n \"cube_axes_y_bounds\": np.matrix([[-1.5, 1.5]]),\n \"cube_axes_z_bounds\": np.matrix([[-1.5, 1.5]]),\n \"floor_position\": np.matrix([[0, -1.5, 0]])\n }\n else:\n self.param = param\n\n def __iter__(self):\n return (each for each in self.links)\n\n @property\n def length(self):\n \"\"\"\n length property\n :return: int\n \"\"\"\n return len(self.links)\n\n def fkine(self, stance, unit='rad', apply_stance=False, actor_list=None, timer=None):\n \"\"\"\n Calculates forward kinematics for a list of joint angles.\n :param stance: stance is list of joint angles.\n :param unit: unit of input angles.\n :param apply_stance: If True, then applied tp actor_list.\n :param actor_list: Passed to apply transformations computed by fkine.\n :param timer: internal use only (for animation).\n :return: homogeneous transformation matrix.\n \"\"\"\n if type(stance) is np.ndarray:\n stance = np.asmatrix(stance)\n if unit == 'deg':\n stance = stance * pi / 180\n if timer is None:\n timer = 0\n t = self.base\n for i in range(self.length):\n if apply_stance:\n actor_list[i].SetUserMatrix(transforms.np2vtk(t))\n t = t * self.links[i].A(stance[timer, i])\n t = t * self.tool\n if apply_stance:\n actor_list[self.length].SetUserMatrix(transforms.np2vtk(t))\n return t\n\n def ikine(self, T, q0=None, unit='rad'):\n \"\"\"\n Calculates inverse kinematics for homogeneous transformation matrix using numerical optimisation method.\n :param T: homogeneous transformation matrix.\n :param q0: initial list of joint angles for optimisation.\n :param unit: preferred unit for returned joint angles. Allowed values: 'rad' or 'deg'.\n :return: a list of 6 joint angles.\n \"\"\"\n assert T.shape == (4, 4)\n bounds = [(link.qlim[0], link.qlim[1]) for link in self]\n reach = 0\n for link in self:\n reach += abs(link.a) + abs(link.d)\n omega = np.diag([1, 1, 1, 3 / reach])\n if q0 is None:\n q0 = np.asmatrix(np.zeros((1, self.length)))\n\n def objective(x):\n return (\n np.square(((np.linalg.lstsq(T, self.fkine(x))[0]) - np.asmatrix(np.eye(4, 4))) * omega)).sum()\n\n sol = minimize(objective, x0=q0, bounds=bounds)\n if unit == 'deg':\n return np.asmatrix(sol.x * 180 / pi)\n else:\n return np.asmatrix(sol.x)\n\n def plot(self, stance, unit='rad'):\n \"\"\"\n Plots the SerialLink object in a desired stance.\n :param stance: list of joint angles for SerialLink object.\n :param unit: unit of input angles.\n :return: null.\n \"\"\"\n\n assert type(stance) is np.matrix\n\n if unit == 'deg':\n stance = stance * (pi / 180)\n\n self.pipeline = VtkPipeline()\n self.pipeline.reader_list, self.pipeline.actor_list, self.pipeline.mapper_list = self.__setup_pipeline_objs()\n\n self.fkine(stance, apply_stance=True, actor_list=self.pipeline.actor_list)\n\n cube_axes = axesCubeFloor(self.pipeline.ren,\n self.param.get(\"cube_axes_x_bounds\"),\n self.param.get(\"cube_axes_y_bounds\"),\n self.param.get(\"cube_axes_z_bounds\"),\n self.param.get(\"floor_position\"))\n\n self.pipeline.add_actor(cube_axes)\n\n for each in self.pipeline.actor_list:\n each.SetScale(self.scale)\n\n self.pipeline.render()\n\n def __setup_pipeline_objs(self):\n \"\"\"\n Internal function to initialise vtk objects.\n :return: reader_list, actor_list, mapper_list\n \"\"\"\n reader_list = [0] * len(self.stl_files)\n actor_list = [0] * len(self.stl_files)\n mapper_list = [0] * len(self.stl_files)\n for i in range(len(self.stl_files)):\n reader_list[i] = vtk.vtkSTLReader()\n loc = pkg_resources.resource_filename(\"robopy\", '/'.join(('media', self.name, self.stl_files[i])))\n reader_list[i].SetFileName(loc)\n mapper_list[i] = vtk.vtkPolyDataMapper()\n mapper_list[i].SetInputConnection(reader_list[i].GetOutputPort())\n actor_list[i] = vtk.vtkActor()\n actor_list[i].SetMapper(mapper_list[i])\n actor_list[i].GetProperty().SetColor(self.colors[i]) # (R,G,B)\n\n return reader_list, actor_list, mapper_list\n\n @staticmethod\n def _setup_file_names(num):\n file_names = []\n for i in range(0, num):\n file_names.append('link' + str(i) + '.stl')\n\n return file_names\n\n def animate(self, stances, unit='rad', frame_rate=25, gif=None):\n \"\"\"\n Animates SerialLink object over nx6 dimensional input matrix, with each row representing list of 6 joint angles.\n :param stances: nx6 dimensional input matrix.\n :param unit: unit of input angles. Allowed values: 'rad' or 'deg'\n :param frame_rate: frame_rate for animation. Could be any integer more than 1. Higher value runs through stances faster.\n :return: null\n \"\"\"\n if unit == 'deg':\n stances = stances * (pi / 180)\n\n self.pipeline = VtkPipeline(total_time_steps=stances.shape[0] - 1, gif_file=gif)\n self.pipeline.reader_list, self.pipeline.actor_list, self.pipeline.mapper_list = self.__setup_pipeline_objs()\n self.fkine(stances, apply_stance=True, actor_list=self.pipeline.actor_list)\n self.pipeline.add_actor(axesCube(self.pipeline.ren))\n\n def execute(obj, event):\n nonlocal stances\n self.pipeline.timer_tick()\n\n self.fkine(stances, apply_stance=True, actor_list=self.pipeline.actor_list, timer=self.pipeline.timer_count)\n self.pipeline.iren = obj\n self.pipeline.iren.GetRenderWindow().Render()\n\n self.pipeline.iren.AddObserver('TimerEvent', execute)\n self.pipeline.animate()\n\n\nclass Link(ABC):\n \"\"\"\n Link object class.\n \"\"\"\n\n def __init__(self, j, theta, d, a, alpha, offset=None, kind='', mdh=0, flip=None, qlim=None):\n \"\"\"\n initialises the link object.\n :param j:\n :param theta:\n :param d:\n :param a:\n :param alpha:\n :param offset:\n :param kind: 'r' or 'p' as input. 'r' for Revolute. 'p' for Prismatic.\n :param mdh:\n :param flip:\n :param qlim:\n \"\"\"\n self.theta = theta\n self.d = d\n # self.j = j\n self.a = a\n self.alpha = alpha\n self.offset = offset\n self.kind = kind\n self.mdh = mdh\n self.flip = flip\n self.qlim = qlim\n\n def A(self, q):\n sa = math.sin(self.alpha)\n ca = math.cos(self.alpha)\n if self.flip:\n q = -q + self.offset\n else:\n q = q + self.offset\n st = 0\n ct = 0\n d = 0\n if self.kind == 'r':\n st = math.sin(q)\n ct = math.cos(q)\n d = self.d\n elif self.kind == 'p':\n st = math.sin(self.theta)\n ct = math.cos(self.theta)\n d = q\n\n se3_np = 0\n if self.mdh == 0:\n se3_np = np.matrix([[ct, -st * ca, st * sa, self.a * ct],\n [st, ct * ca, -ct * sa, self.a * st],\n [0, sa, ca, d],\n [0, 0, 0, 1]])\n\n return se3_np\n\n\nclass Revolute(Link):\n \"\"\"\n Revolute object class.\n \"\"\"\n\n def __init__(self, j, theta, d, a, alpha, offset, qlim):\n \"\"\"\n Initialised revolute object.\n :param j:\n :param theta:\n :param d:\n :param a:\n :param alpha:\n :param offset:\n :param qlim:\n \"\"\"\n super().__init__(j=j, theta=theta, d=d, a=a, alpha=alpha, offset=offset, kind='r', qlim=qlim)\n pass\n\n\nclass Prismatic(Link):\n \"\"\"\n Prismatic object class.\n \"\"\"\n\n def __init__(self, j, theta, d, a, alpha, offset, qlim):\n \"\"\"\n Initialises prismatic object.\n :param j:\n :param theta:\n :param d:\n :param a:\n :param alpha:\n :param offset:\n :param qlim:\n \"\"\"\n super().__init__(j=j, theta=theta, d=d, a=a, alpha=alpha, offset=offset, kind='p', qlim=qlim)\n pass\n\n pass\n"
] | [
[
"numpy.asarray",
"numpy.testing.assert_almost_equal"
],
[
"numpy.diag",
"numpy.matrix",
"numpy.eye",
"numpy.asmatrix",
"scipy.optimize.minimize",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
}
] |
yc4ny/eft | [
"3e94efd9982d4ee25ffcfed9254590631264d94c"
] | [
"demo/visEFTFit.py"
] | [
"# Copyright (c) Facebook, Inc. and its affiliates.\n\nimport os\nfrom os.path import join\nfrom os import listdir\nfrom cv2 import FONT_HERSHEY_COMPLEX\n# import json\nimport numpy as np\n\nimport cv2\nimport pickle\nimport torch\n# from smplx import SMPL\nfrom eft.models import SMPL_19\n\nfrom eft.utils.imutils import crop, crop_bboxInfo\nfrom eft.utils.imutils import convert_smpl_to_bbox, convert_bbox_to_oriIm, conv_bboxinfo_bboxXYXY\nfrom eft.utils.geometry import weakProjection\nfrom renderer import viewer2D#, glViewer, glRenderer\nfrom renderer import meshRenderer #screen less opengl renderer\nfrom renderer import glViewer #gui mode opengl renderer\nfrom renderer import denseposeRenderer #densepose renderer\n\n\nfrom tqdm import tqdm\nimport argparse\nimport json\n\n## Constant\nBBOX_IMG_RES = 224\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--img_dir',default=\"/run/media/hjoo/disk/data/mpii_human_pose_v1/images\", type=str , help='Folder path where input image files exist')\nparser.add_argument('--fit_data',default=\"eft_fit/MPII_ver01.json\", type=str, help='EFT data json fortmat')\nparser.add_argument('--smpl_dir',default=\"./extradata/smpl\", type=str , help='Folder path where smpl pkl files exist')\nparser.add_argument('--onbbox',action=\"store_true\", help=\"Show the 3D pose on bbox space\")\nparser.add_argument('--rendermode',default=\"geo\", help=\"Choose among geo, normal, densepose\")\nparser.add_argument('--multi_bbox',default=True, help=\"If ture, show multi-bbox computed from mesh vertices\")\nparser.add_argument('--render_dir',default=\"render_eft\", help=\"Folder to save rendered images\")\nparser.add_argument('--waitforkeys',action=\"store_true\", help=\"If true, it will pasue after each visualizing each sample, waiting for any key pressed\")\nparser.add_argument('--turntable',action=\"store_true\", help=\"If true, show turn table views\")\nparser.add_argument('--multi',action=\"store_true\", help='If True, show all available fitting people per image. Default, visualize a single person at each time')\nargs = parser.parse_args()\n\ndef getRenderer(ren_type='geo'):\n \"\"\"\n Choose renderer type\n geo: phong-shading (silver color)\n colorgeo: phong-shading with color (need color infor. Default silver color)\n denspose: densepose IUV\n normal: normal map\n torch3d: via pytorch3d TODO\n \"\"\"\n if ren_type=='geo':\n renderer = meshRenderer.meshRenderer()\n renderer.setRenderMode('geo')\n\n elif ren_type=='colorgeo':\n renderer = meshRenderer.meshRenderer()\n renderer.setRenderMode('colorgeo')\n \n elif ren_type=='normal':\n renderer = meshRenderer.meshRenderer()\n renderer.setRenderMode('normal')\n\n elif ren_type=='densepose':\n renderer = denseposeRenderer.denseposeRenderer()\n\n # elif ren_type=='torch3d':\n # renderer = torch3dRenderer.torch3dRenderer()\n else:\n assert False\n\n renderer.offscreenMode(True)\n # renderer.bAntiAliasing= False\n return renderer\n\ndef conv_3djoint_2djoint(smpl_joints_3d_vis, imgshape):\n\n smpl_joints_2d_vis = smpl_joints_3d_vis[:,:2] #3D is in camera comaera coordinate with origin on the image center\n smpl_joints_2d_vis[:,0] += imgshape[1]*0.5 #Offset to move the origin on the top left\n smpl_joints_2d_vis[:,1] += imgshape[0]*0.5\n\n return smpl_joints_2d_vis\n \n\ndef visEFT_singleSubject(renderer):\n\n MAGNIFY_RATIO = 3 #onbbox only. To magnify the rendered image size \n\n bStopForEachSample = args.waitforkeys #if True, it will wait for any key pressed to move to the next sample\n bShowTurnTable = args.turntable\n\n args.fit_data = \"eft_fit/COCO2014-Part-ver01.json\"\n args.img_dir = \"data/coco/train2014\"\n inputData = args.fit_data\n imgDir = args.img_dir\n\n #Load SMPL model\n smplModelPath = args.smpl_dir + '/basicModel_neutral_lbs_10_207_0_v1.0.0.pkl'\n smpl = SMPL_19(smplModelPath, batch_size=1, create_transl=False)\n \n #Load EFT fitting data\n print(f\"Loading EFT data from {inputData}\")\n if os.path.exists(inputData):\n with open(inputData,'r') as f:\n eft_data = json.load(f)\n print(\"EFT data: ver {}\".format(eft_data['ver']))\n eft_data_all = eft_data['data']\n else:\n print(f\"ERROR:: Cannot find EFT data: {inputData}\")\n assert False\n\n for n in range (1,100) :\n knn = getkthNearestNeighbors(eft_data_all, 5, n) # getkthNearestNeighbors(eft_data_all,k,baseIndex)\n base = resize_image(cv2.imread('a/' + str(knn[0][2]) +'.jpg'),1000,1000)\n first = resize_image(cv2.imread('a/' + str(knn[1][2]) +'.jpg'),1000,1000)\n second = resize_image(cv2.imread('a/' + str(knn[2][2]) +'.jpg'),1000,1000) \n third = resize_image(cv2.imread('a/' + str(knn[3][2]) +'.jpg'),1000,1000) \n fourth = resize_image(cv2.imread('a/' + str(knn[4][2]) +'.jpg'),1000,1000) \n fifth = resize_image(cv2.imread('a/' + str(knn[5][2]) +'.jpg'),1000,1000) \n row1 = np.concatenate((base, first,second), axis=1)\n row2 = np.concatenate((third, fourth, fifth), axis=1)\n final = np.concatenate((row1,row2), axis=0)\n cv2.imwrite('knn/'+str(n)+'.jpg',final)\n \n #Visualize each EFT Fitting output\n for idx, eft_data in enumerate(tqdm(eft_data_all)):\n \n #Get raw image path\n imgFullPath = eft_data['imageName']\n # imgName = os.path.basename(imgFullPath)\n imgName = imgFullPath\n imgFullPath =os.path.join(imgDir, imgName)\n if os.path.exists(imgFullPath) ==False:\n print(f\"Img path is not valid: {imgFullPath}\")\n assert False\n rawImg = cv2.imread(imgFullPath)\n print(f'Input image: {imgFullPath}')\n\n #EFT data\n bbox_scale = eft_data['bbox_scale']\n bbox_center = eft_data['bbox_center']\n\n pred_camera = np.array(eft_data['parm_cam'])\n pred_betas = np.reshape(np.array( eft_data['parm_shape'], dtype=np.float32), (1,10) ) #(10,)\n pred_betas = torch.from_numpy(pred_betas)\n\n pred_pose_rotmat = np.reshape( np.array( eft_data['parm_pose'], dtype=np.float32), (1,24,3,3) ) #(24,3,3)\n pred_pose_rotmat = torch.from_numpy(pred_pose_rotmat)\n\n keypoint_2d_validity = eft_data['joint_validity_openpose18']\n\n #COCO only. Annotation index\n if 'annotId' in eft_data.keys():\n print(\"COCO annotId: {}\".format(eft_data['annotId']))\n\n\n #Get SMPL mesh and joints from SMPL parameters\n smpl_output = smpl(betas=pred_betas, body_pose=pred_pose_rotmat[:,1:], global_orient=pred_pose_rotmat[:,[0]], pose2rot=False)\n smpl_vertices = smpl_output.vertices.detach().cpu().numpy()[0]\n smpl_joints_3d = smpl_output.joints.detach().cpu().numpy()[0]\n\n #Crop image using cropping information\n croppedImg, boxScale_o2n, bboxTopLeft = crop_bboxInfo(rawImg, bbox_center, bbox_scale, (BBOX_IMG_RES, BBOX_IMG_RES) )\n\n\n if MAGNIFY_RATIO>1:\n croppedImg = cv2.resize(croppedImg, (croppedImg.shape[1]*MAGNIFY_RATIO, croppedImg.shape[0]*MAGNIFY_RATIO) )\n\n ########################\n # Visualization\n ########################\n\n # Visualize 2D image\n if True:\n viewer2D.ImShow(rawImg, name='rawImg', waitTime=1) #You should press any key \n viewer2D.ImShow(croppedImg, name='croppedImg', waitTime=1)\n\n #Convert bbox_center, bbox_scale --> bbox_xyxy\n bbox_xyxy = conv_bboxinfo_bboxXYXY(bbox_scale,bbox_center)\n img_bbox = viewer2D.Vis_Bbox_minmaxPt(rawImg.copy(),bbox_xyxy[:2], bbox_xyxy[2:])\n viewer2D.ImShow(img_bbox, name='img_bbox', waitTime=1)\n\n # Visualization Mesh\n if True: \n camParam_scale = pred_camera[0]\n camParam_trans = pred_camera[1:]\n pred_vert_vis = smpl_vertices\n smpl_joints_3d_vis = smpl_joints_3d\n\n if args.onbbox:\n pred_vert_vis = convert_smpl_to_bbox(pred_vert_vis, camParam_scale, camParam_trans)\n smpl_joints_3d_vis = convert_smpl_to_bbox(smpl_joints_3d_vis, camParam_scale, camParam_trans)\n renderer.setBackgroundTexture(croppedImg)\n renderer.setViewportSize(croppedImg.shape[1], croppedImg.shape[0])\n\n pred_vert_vis *=MAGNIFY_RATIO\n else:\n #Covert SMPL to BBox first\n pred_vert_vis = convert_smpl_to_bbox(pred_vert_vis, camParam_scale, camParam_trans)\n smpl_joints_3d_vis = convert_smpl_to_bbox(smpl_joints_3d_vis, camParam_scale, camParam_trans)\n\n #From cropped space to original\n pred_vert_vis = convert_bbox_to_oriIm(pred_vert_vis, boxScale_o2n, bboxTopLeft, rawImg.shape[1], rawImg.shape[0]) \n smpl_joints_3d_vis = convert_bbox_to_oriIm(smpl_joints_3d_vis, boxScale_o2n, bboxTopLeft, rawImg.shape[1], rawImg.shape[0])\n renderer.setBackgroundTexture(rawImg)\n renderer.setViewportSize(rawImg.shape[1], rawImg.shape[0])\n\n #In orthographic model. XY of 3D is just 2D projection\n smpl_joints_2d_vis = conv_3djoint_2djoint(smpl_joints_3d_vis,rawImg.shape )\n # image_2dkeypoint_pred = viewer2D.Vis_Skeleton_2D_smpl45(smpl_joints_2d_vis, image=rawImg.copy(),color=(0,255,255))\n image_2dkeypoint_pred = viewer2D.Vis_Skeleton_2D_Openpose18(smpl_joints_2d_vis, image=rawImg.copy(),color=(255,0,0)) #All 2D joint\n image_2dkeypoint_pred = viewer2D.Vis_Skeleton_2D_Openpose18(smpl_joints_2d_vis, pt2d_visibility=keypoint_2d_validity, image=image_2dkeypoint_pred,color=(0,255,255)) #Only valid\n viewer2D.ImShow(image_2dkeypoint_pred, name='keypoint_2d_pred', waitTime=1)\n\n pred_meshes = {'ver': pred_vert_vis, 'f': smpl.faces}\n v = pred_meshes['ver'] \n f = pred_meshes['f']\n\n #Visualize in the original image space\n renderer.set_mesh(v,f)\n renderer.showBackground(True)\n renderer.setWorldCenterBySceneCenter()\n renderer.setCameraViewMode(\"cam\")\n\n #Set image size for rendering\n if args.onbbox:\n renderer.setViewportSize(croppedImg.shape[1], croppedImg.shape[0])\n else:\n renderer.setViewportSize(rawImg.shape[1], rawImg.shape[0])\n \n renderer.display()\n renderImg = renderer.get_screen_color_ibgr()\n viewer2D.ImShow(renderImg,waitTime=1)\n \n # Visualize multi-level cropped bbox\n if args.multi_bbox:\n from demo.multi_bbox_gen import multilvel_bbox_crop_gen\n \n bbox_list = multilvel_bbox_crop_gen(rawImg, pred_vert_vis, bbox_center, bbox_scale)\n\n #Visualize BBox\n for b_idx, b in enumerate(bbox_list):\n # bbox_xyxy= conv_bboxinfo_centerscale_to_bboxXYXY(b['center'], b['scale'])\n bbox_xyxy= b['bbox_xyxy']\n if b_idx==0:\n img_multi_bbox = viewer2D.Vis_Bbox_minmaxPt(rawImg, bbox_xyxy[:2], bbox_xyxy[2:] ,color=(0,255,0))\n else:\n img_multi_bbox = viewer2D.Vis_Bbox_minmaxPt(rawImg, bbox_xyxy[:2], bbox_xyxy[2:] ,color=(0,255,255))\n viewer2D.ImShow(img_multi_bbox, name='multi_bbox', waitTime=1)\n # for bbox in bbox_list:\n\n\n # Visualization Mesh on side view\n if True:\n renderer.showBackground(False)\n renderer.setWorldCenterBySceneCenter()\n # renderer.setCameraViewMode(\"side\") #To show the object in side vie\n renderer.setCameraViewMode(\"free\") \n renderer.setViewAngle(90,20)\n\n #Set image size for rendering\n if args.onbbox:\n renderer.setViewportSize(croppedImg.shape[1], croppedImg.shape[0])\n else:\n renderer.setViewportSize(rawImg.shape[1], rawImg.shape[0])\n renderer.display()\n sideImg = renderer.get_screen_color_ibgr() #Overwite on rawImg\n viewer2D.ImShow(sideImg,waitTime=1)\n \n sideImg = cv2.resize(sideImg, (renderImg.shape[1], renderImg.shape[0]) )\n # renderImg = cv2.resize(renderImg, (sideImg.shape[1], sideImg.shape[0]) )\n \n # Visualization Mesh on side view\n if True:\n renderer.showBackground(False)\n renderer.setWorldCenterBySceneCenter()\n # renderer.setCameraViewMode(\"side\") #To show the object in side vie\n renderer.setCameraViewMode(\"free\") \n renderer.setViewAngle(-60,50)\n\n #Set image size for rendering\n if args.onbbox:\n renderer.setViewportSize(croppedImg.shape[1], croppedImg.shape[0])\n else:\n renderer.setViewportSize(rawImg.shape[1], rawImg.shape[0])\n renderer.display()\n sideImg_2 = renderer.get_screen_color_ibgr() #Overwite on rawImg\n viewer2D.ImShow(sideImg_2,waitTime=1)\n \n sideImg_2 = cv2.resize(sideImg_2, (renderImg.shape[1], renderImg.shape[0]) )\n # renderImg = cv2.resize(renderImg, (sideImg.shape[1], sideImg.shape[0]) )\n\n\n #Visualize camera view and side view\n saveImg = np.concatenate( (renderImg,sideImg), axis =1)\n # saveImg = np.concatenate( (croppedImg, renderImg,sideImg, sideImg_2), axis =1)\n\n if bStopForEachSample:\n viewer2D.ImShow(saveImg,waitTime=0) #waitTime=0 means that it will wait for any key pressed\n else:\n viewer2D.ImShow(saveImg,waitTime=1)\n \n #Render Mesh on the rotating view\n if bShowTurnTable:\n renderer.showBackground(False)\n renderer.setWorldCenterBySceneCenter()\n renderer.setCameraViewMode(\"free\")\n for i in range(90):\n renderer.setViewAngle(i*4,0)\n renderer.display()\n sideImg = renderer.get_screen_color_ibgr() #Overwite on rawImg\n viewer2D.ImShow(sideImg,waitTime=1,name=\"turn_table\")\n\n if False: #If you want to save this into files\n render_output_path = args.render_dir + '/turntable_{}_{:08d}.jpg'.format(os.path.basename(imgName),i)\n cv2.imwrite(render_output_path, sideImg)\n\n #Save the rendered image to files\n if True: \n if os.path.exists(args.render_dir) == False:\n os.mkdir(args.render_dir)\n #render_output_path = args.render_dir + '/render_{}_eft{:08d}.jpg'.format(imgName[:-4],idx)\n render_output_path = 'a/{}.jpg'.format(idx)\n print(f\"Save to {render_output_path}\")\n cv2.imwrite(render_output_path, saveImg)\n\n#Calculating distance between 2 SMPL body shape parameters \ndef calculateDistance(parm1, parm2): \n \n #Convert each image to numpy arrays (for calculation)\n arr1 = np.array(parm1)\n arr1 = np.resize(arr1, (1,216)) # 24*3*3 = 216\n\n arr2 = np.array(parm2)\n arr2 = np.resize(arr2, (1,216))\n \n #Calculate the Euclidian Distance\n dist = np.linalg.norm(arr1-arr2)\n \n return dist\n\ndef getkthNearestNeighbors(eft_data_all,k,baseIndex): #k for kth nearest neighbor\n dis = np.zeros((28062,2), dtype= object)\n for n in range(0,28062):\n dis[n][0] = calculateDistance(eft_data_all[baseIndex]['parm_pose'],eft_data_all[n]['parm_pose'])\n dis[n][1] = n\n \n # res = sorted(dis.items(), key=lambda x:x[1]) #Sort by size\n res = dis[dis[:, 0].argsort()]\n #del res[0] #Delete closest since it is itself\n \n array = np.zeros((k+1,3), dtype= object)\n for n in range(0,k+1):\n array[n][0] = eft_data_all[res[n][1]]['imageName']\n array[n][1] = res[n][0]\n array[n][2] = res[n][1]\n\n return array\n\ndef resize_image(img,IMG_COL, IMG_ROW) :\n border_v = 0\n border_h = 0\n if (IMG_COL/IMG_ROW) >= (img.shape[0]/img.shape[1]):\n border_v = int((((IMG_COL/IMG_ROW)*img.shape[1])-img.shape[0])/2)\n else:\n border_h = int((((IMG_ROW/IMG_COL)*img.shape[0])-img.shape[1])/2)\n img = cv2.copyMakeBorder(img, border_v, border_v, border_h, border_h, cv2.BORDER_CONSTANT, 0)\n img = cv2.resize(img, (IMG_ROW, IMG_COL)) \n return img \n\ndef visEFT_multiSubjects(renderer):\n\n bStopForEachSample = args.waitforkeys #if True, it will wait for any key pressed to move to the next sample\n bShowTurnTable = args.turntable\n \n # inputDir = args.fit_dir\n inputData = args.fit_data\n imgDir = args.img_dir\n smplModelPath = args.smpl_dir + '/basicModel_neutral_lbs_10_207_0_v1.0.0.pkl'\n smpl = SMPL(smplModelPath, batch_size=1, create_transl=False)\n\n if os.path.exists(inputData):\n with open(inputData,'r') as f:\n eft_data = json.load(f)\n print(\"EFT data: ver {}\".format(eft_data['ver']))\n eft_data_all = eft_data['data']\n else:\n print(f\"ERROR:: Cannot find EFT data: {inputData}\")\n assert False\n\n #Aggregate all efl per image\n eft_perimage ={}\n for idx, eft_data in enumerate(eft_data_all):\n #Load\n imageName = eft_data['imageName']\n if imageName not in eft_perimage.keys():\n eft_perimage[imageName] =[]\n\n eft_perimage[imageName].append(eft_data)\n\n\n for imgName in tqdm(eft_perimage):\n eft_data_perimage = eft_perimage[imgName]\n \n renderer.clear_mesh()\n\n for idx,eft_data in enumerate(eft_data_perimage):\n \n #Get raw image path\n imgFullPath = eft_data['imageName']\n imgName = os.path.basename(imgFullPath)\n imgFullPath =os.path.join(imgDir, imgName)\n if os.path.exists(imgFullPath) ==False:\n print(f\"Img path is not valid: {imgFullPath}\")\n assert False\n rawImg = cv2.imread(imgFullPath)\n print(f'Input image: {imgFullPath}')\n\n bbox_scale = eft_data['bbox_scale']\n bbox_center = eft_data['bbox_center']\n\n pred_camera = np.array(eft_data['parm_cam'])\n pred_betas = np.reshape(np.array( eft_data['parm_shape'], dtype=np.float32), (1,10) ) #(10,)\n pred_betas = torch.from_numpy(pred_betas)\n\n pred_pose_rotmat = np.reshape( np.array( eft_data['parm_pose'], dtype=np.float32), (1,24,3,3) ) #(24,3,3)\n pred_pose_rotmat = torch.from_numpy(pred_pose_rotmat)\n \n # gt_keypoint_2d = np.reshape( np.array(eft_data['gt_keypoint_2d']), (-1,3)) #(49,3)\n keypoint_2d_validity = eft_data['joint_validity_openpose18']\n\n #COCO only. Annotation index\n print(\"COCO annotId: {}\".format(eft_data['annotId']))\n\n #Obtain skeleton and smpl data\n smpl_output = smpl(betas=pred_betas, body_pose=pred_pose_rotmat[:,1:], global_orient=pred_pose_rotmat[:,0].unsqueeze(1), pose2rot=False )\n smpl_vertices = smpl_output.vertices.detach().cpu().numpy() \n smpl_joints_3d = smpl_output.joints.detach().cpu().numpy() \n\n #Crop image\n croppedImg, boxScale_o2n, bboxTopLeft = crop_bboxInfo(rawImg.copy(), bbox_center, bbox_scale, (BBOX_IMG_RES, BBOX_IMG_RES) )\n\n ########################\n # Visualize\n # Visualize 2D image\n if False:\n viewer2D.ImShow(rawImg, name='rawImg', waitTime=1) #You should press any key \n viewer2D.ImShow(croppedImg, name='croppedImg', waitTime=1)\n\n # Visualization Mesh on raw images\n if True: \n camParam_scale = pred_camera[0]\n camParam_trans = pred_camera[1:]\n pred_vert_vis = smpl_vertices[0]\n smpl_joints_3d_vis = smpl_joints_3d[0]\n\n if False:#args.onbbox: #Always in the original image\n pred_vert_vis = convert_smpl_to_bbox(pred_vert_vis, camParam_scale, camParam_trans)\n smpl_joints_3d_vis = convert_smpl_to_bbox(smpl_joints_3d_vis, camParam_scale, camParam_trans)\n renderer.setBackgroundTexture(croppedImg)\n renderer.setViewportSize(croppedImg.shape[1], croppedImg.shape[0])\n else:\n #Covert SMPL to BBox first\n pred_vert_vis = convert_smpl_to_bbox(pred_vert_vis, camParam_scale, camParam_trans)\n smpl_joints_3d_vis = convert_smpl_to_bbox(smpl_joints_3d_vis, camParam_scale, camParam_trans)\n\n #From cropped space to original\n pred_vert_vis = convert_bbox_to_oriIm(pred_vert_vis, boxScale_o2n, bboxTopLeft, rawImg.shape[1], rawImg.shape[0]) \n smpl_joints_3d_vis = convert_bbox_to_oriIm(smpl_joints_3d_vis, boxScale_o2n, bboxTopLeft, rawImg.shape[1], rawImg.shape[0])\n renderer.setBackgroundTexture(rawImg)\n renderer.setViewportSize(rawImg.shape[1], rawImg.shape[0])\n\n pred_meshes = {'ver': pred_vert_vis, 'f': smpl.faces}\n v = pred_meshes['ver'] \n f = pred_meshes['f']\n\n #Visualize in the original image spaceq\n # renderer.set_mesh(v,f)\n renderer.add_mesh(v,f)\n\n #Render Mesh on the camera view\n renderer.showBackground(True)\n renderer.setWorldCenterBySceneCenter()\n renderer.setCameraViewMode(\"cam\")\n renderer.display()\n overlaid = renderer.get_screen_color_ibgr() #Overwite on rawImg\n # viewer2D.ImShow(overlaid,waitTime=1,name=\"overlaid\")\n\n if bStopForEachSample:\n viewer2D.ImShow(overlaid,waitTime=0,name=\"overlaid\") #waitTime=0 means that it will wait for any key pressed\n else:\n viewer2D.ImShow(overlaid,waitTime=1,name=\"overlaid\")\n\n #Render Mesh on the rotating view\n if bShowTurnTable:\n renderer.showBackground(False)\n renderer.setWorldCenterBySceneCenter()\n renderer.setCameraViewMode(\"free\")\n for i in range(90):\n renderer.setViewAngle(i*4,0)\n renderer.display()\n sideImg = renderer.get_screen_color_ibgr() #Overwite on rawImg\n viewer2D.ImShow(sideImg,waitTime=1,name=\"turn_table\")\n \n if True: #Save the rendered image to files\n if os.path.exists(args.render_dir) == False:\n os.mkdir(args.render_dir)\n render_output_path = args.render_dir + '/render_{}.jpg'.format(imgName)\n print(f\"Save to {render_output_path}\")\n cv2.imwrite(render_output_path, rawImg)\n\nif __name__ == '__main__':\n renderer = getRenderer(args.rendermode)\n\n if args.multi:\n visEFT_multiSubjects(renderer)\n else:\n visEFT_singleSubject(renderer)\n"
] | [
[
"numpy.resize",
"numpy.linalg.norm",
"torch.from_numpy",
"numpy.concatenate",
"numpy.array",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Sanjana12111994/dataprep | [
"e94aae5ee73a650f86825432a8c8be04d46012d7"
] | [
"dataprep/eda/missing/compute.py"
] | [
"\"\"\"\n This module implements the plot_missing(df) function's\n calculating intermediate part\n\"\"\"\nfrom typing import Optional, Tuple, Union, List\n\nimport dask\nimport dask.array as da\nimport dask.dataframe as dd\nimport numpy as np\nimport pandas as pd\nfrom scipy.stats import rv_histogram\n\nfrom ...errors import UnreachableError\nfrom ..utils import to_dask\nfrom ..intermediate import Intermediate, ColumnsMetadata\nfrom ..dtypes import is_categorical, is_numerical, is_pandas_categorical\n\n__all__ = [\"compute_missing\"]\n\nLABELS = [\"Origin\", \"DropMissing\"]\n\n\ndef histogram(\n srs: dd.Series,\n bins: Optional[int] = None,\n return_edges: bool = True,\n range: Optional[Tuple[int, int]] = None, # pylint: disable=redefined-builtin\n) -> Union[Tuple[da.Array, da.Array], Tuple[da.Array, da.Array, da.Array]]:\n \"\"\"\n Calculate histogram for both numerical and categorical\n \"\"\"\n\n if is_numerical(srs.dtype):\n if range is not None:\n minimum, maximum = range\n else:\n minimum, maximum = srs.min(axis=0), srs.max(axis=0)\n minimum, maximum = dask.compute(minimum, maximum)\n\n assert (\n bins is not None\n ), \"num_bins cannot be None if calculating numerical histograms\"\n\n counts, edges = da.histogram(\n srs.to_dask_array(), bins, range=[minimum, maximum]\n )\n centers = (edges[:-1] + edges[1:]) / 2\n\n if not return_edges:\n return counts, centers\n return counts, centers, edges\n elif is_categorical(srs.dtype):\n value_counts = srs.value_counts()\n counts = value_counts.to_dask_array()\n\n # Dask array dones't understand the pandas dtypes such as categorical type.\n # We convert these types into str before calling into `to_dask_array`.\n\n if is_pandas_categorical(value_counts.index.dtype):\n centers = value_counts.index.astype(\"str\").to_dask_array()\n else:\n centers = value_counts.index.to_dask_array()\n return (counts, centers)\n else:\n raise UnreachableError()\n\n\ndef missing_perc_blockwise(block: np.ndarray) -> np.ndarray:\n \"\"\"\n Compute the missing percentage in a block\n \"\"\"\n return block.sum(axis=0, keepdims=True) / len(block)\n\n\ndef missing_spectrum(df: dd.DataFrame, bins: int, ncols: int) -> Intermediate:\n \"\"\"\n Calculate a missing spectrum for each column\n \"\"\"\n # pylint: disable=too-many-locals\n num_bins = min(bins, len(df) - 1)\n\n df = df.iloc[:, :ncols]\n cols = df.columns[:ncols]\n ncols = len(cols)\n nrows = len(df)\n chunk_size = len(df) // num_bins\n\n data = df.isnull().to_dask_array()\n data.compute_chunk_sizes()\n data = data.rechunk((chunk_size, None))\n\n (notnull_counts,) = dd.compute(data.sum(axis=0) / data.shape[0])\n missing_percent = {col: notnull_counts[idx] for idx, col in enumerate(cols)}\n\n missing_percs = data.map_blocks(missing_perc_blockwise, dtype=float).compute()\n locs0 = np.arange(len(missing_percs)) * chunk_size\n locs1 = np.minimum(locs0 + chunk_size, nrows)\n locs_middle = locs0 + chunk_size / 2\n\n df = pd.DataFrame(\n {\n \"column\": np.repeat(cols.values, len(missing_percs)),\n \"location\": np.tile(locs_middle, ncols),\n \"missing_rate\": missing_percs.T.ravel(),\n \"loc_start\": np.tile(locs0, ncols),\n \"loc_end\": np.tile(locs1, ncols),\n }\n )\n return Intermediate(\n data=df, missing_percent=missing_percent, visual_type=\"missing_spectrum\"\n )\n\n\ndef missing_impact_1vn( # pylint: disable=too-many-locals\n df: dd.DataFrame, x: str, bins: int\n) -> Intermediate:\n \"\"\"\n Calculate the distribution change on other columns when\n the missing values in x is dropped.\n \"\"\"\n df0 = df\n df1 = df.dropna(subset=[x])\n cols = [col for col in df.columns if col != x]\n\n hists = {}\n\n for col in cols:\n range = None # pylint: disable=redefined-builtin\n if is_numerical(df0[col].dtype):\n range = (df0[col].min(axis=0), df0[col].max(axis=0))\n\n hists[col] = [\n histogram(df[col], bins=bins, return_edges=True, range=range)\n for df in [df0, df1]\n ]\n (hists,) = dd.compute(hists)\n\n dfs = {}\n\n meta = ColumnsMetadata()\n\n for col, hists_ in hists.items():\n counts, xs, *edges = zip(*hists_)\n\n labels = np.repeat(LABELS, [len(x) for x in xs])\n\n data = {\n \"x\": np.concatenate(xs),\n \"count\": np.concatenate(counts),\n \"label\": labels,\n }\n\n if edges:\n lower_bound: List[float] = []\n upper_bound: List[float] = []\n\n for edge in edges[0]:\n lower_bound.extend(edge[:-1])\n upper_bound.extend(edge[1:])\n\n data[\"lower_bound\"] = lower_bound\n data[\"upper_bound\"] = upper_bound\n\n df = pd.DataFrame(data)\n\n # If the cardinality of a categorical column is too large,\n # we show the top `num_bins` values, sorted by their count before drop\n if len(counts[0]) > bins and is_categorical(df0[col].dtype):\n sortidx = np.argsort(-counts[0])\n selected_xs = xs[0][sortidx[:bins]]\n df = df[df[\"x\"].isin(selected_xs)]\n meta[col, \"partial\"] = (bins, len(counts[0]))\n else:\n meta[col, \"partial\"] = (len(counts[0]), len(counts[0]))\n\n meta[col, \"dtype\"] = df0[col].dtype\n dfs[col] = df\n\n return Intermediate(data=dfs, x=x, meta=meta, visual_type=\"missing_impact_1vn\")\n\n\ndef missing_impact_1v1( # pylint: disable=too-many-locals\n df: dd.DataFrame, x: str, y: str, bins: int, ndist_sample: int\n) -> Intermediate:\n \"\"\"\n Calculate the distribution change on another column y when\n the missing values in x is dropped.\n \"\"\"\n\n df0 = df[[x, y]]\n df1 = df.dropna(subset=[x])\n\n srs0, srs1 = df0[y], df1[y]\n minimum, maximum = srs0.min(), srs0.max()\n\n hists = [histogram(srs, bins=bins, return_edges=True) for srs in [srs0, srs1]]\n hists = da.compute(*hists)\n\n meta = ColumnsMetadata()\n meta[\"y\", \"dtype\"] = df[y].dtype\n\n if is_numerical(df[y].dtype):\n dists = [rv_histogram((hist[0], hist[2])) for hist in hists] # type: ignore\n xs = np.linspace(minimum, maximum, ndist_sample)\n\n pdfs = [dist.pdf(xs) for dist in dists]\n cdfs = [dist.cdf(xs) for dist in dists]\n\n distdf = pd.DataFrame(\n {\n \"x\": np.tile(xs, 2),\n \"pdf\": np.concatenate(pdfs),\n \"cdf\": np.concatenate(cdfs),\n \"label\": np.repeat(LABELS, ndist_sample),\n }\n )\n\n counts, xs, edges = zip(*hists)\n\n lower_bounds: List[float] = []\n upper_bounds: List[float] = []\n\n for edge in edges:\n lower_bounds.extend(edge[:-1])\n upper_bounds.extend(edge[1:])\n\n histdf = pd.DataFrame(\n {\n \"x\": np.concatenate(xs),\n \"count\": np.concatenate(counts),\n \"label\": np.repeat(LABELS, [len(count) for count in counts]),\n \"lower_bound\": lower_bounds,\n \"upper_bound\": upper_bounds,\n }\n )\n\n quantiles = [\n [srs.quantile(q) for q in [0, 0.25, 0.5, 0.75, 1]] for srs in [srs0, srs1]\n ]\n quantiles = dd.compute(*quantiles)\n\n boxdf = pd.DataFrame(quantiles)\n boxdf.columns = [\"min\", \"q1\", \"q2\", \"q3\", \"max\"]\n\n iqr = boxdf[\"q3\"] - boxdf[\"q1\"]\n boxdf[\"upper\"] = np.minimum(boxdf[\"q3\"] + 1.5 * iqr, boxdf[\"max\"])\n boxdf[\"lower\"] = np.maximum(boxdf[\"q3\"] - 1.5 * iqr, boxdf[\"min\"])\n boxdf[\"label\"] = LABELS\n\n itmdt = Intermediate(\n dist=distdf,\n hist=histdf,\n box=boxdf,\n meta=meta[\"y\"],\n x=x,\n y=y,\n visual_type=\"missing_impact_1v1\",\n )\n return itmdt\n else:\n\n counts, xs = zip(*hists)\n\n df = pd.DataFrame(\n {\n \"x\": np.concatenate(xs, axis=0),\n \"count\": np.concatenate(counts, axis=0),\n \"label\": np.repeat(LABELS, [len(count) for count in counts]),\n }\n )\n\n # If the cardinality of a categorical column is too large,\n # we show the top `num_bins` values, sorted by their count before drop\n if len(counts[0]) > bins:\n sortidx = np.argsort(-counts[0])\n selected_xs = xs[0][sortidx[:bins]]\n df = df[df[\"x\"].isin(selected_xs)]\n partial = (bins, len(counts[0]))\n else:\n partial = (len(counts[0]), len(counts[0]))\n\n meta[\"y\", \"partial\"] = partial\n\n itmdt = Intermediate(\n hist=df, x=x, y=y, meta=meta[\"y\"], visual_type=\"missing_impact_1v1\",\n )\n return itmdt\n\n\ndef compute_missing(\n # pylint: disable=too-many-arguments\n df: Union[pd.DataFrame, dd.DataFrame],\n x: Optional[str] = None,\n y: Optional[str] = None,\n *,\n bins: int = 30,\n ncols: int = 30,\n ndist_sample: int = 100,\n) -> Intermediate:\n \"\"\"\n This function is designed to deal with missing values\n There are three functions: plot_missing(df), plot_missing(df, x)\n plot_missing(df, x, y)\n\n Parameters\n ----------\n df\n the pandas data_frame for which plots are calculated for each column\n x\n a valid column name of the data frame\n y\n a valid column name of the data frame\n ncols\n The number of columns in the figure\n bins\n The number of rows in the figure\n ndist_sample\n The number of sample points\n\n Examples\n ----------\n >>> from dataprep.eda.missing.computation import plot_missing\n >>> import pandas as pd\n >>> df = pd.read_csv(\"suicide-rate.csv\")\n >>> plot_missing(df, \"HDI_for_year\")\n >>> plot_missing(df, \"HDI_for_year\", \"population\")\n \"\"\"\n\n df = to_dask(df)\n\n # pylint: disable=no-else-raise\n if x is None and y is not None:\n raise ValueError(\"x cannot be None while y has value\")\n elif x is not None and y is None:\n return missing_impact_1vn(df, x=x, bins=bins)\n elif x is not None and y is not None:\n return missing_impact_1v1(df, x=x, y=y, bins=bins, ndist_sample=ndist_sample)\n else:\n return missing_spectrum(df, bins=bins, ncols=ncols)\n"
] | [
[
"numpy.maximum",
"numpy.minimum",
"numpy.linspace",
"numpy.tile",
"pandas.DataFrame",
"scipy.stats.rv_histogram",
"numpy.concatenate",
"numpy.argsort",
"numpy.repeat"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [
"1.6",
"1.10",
"1.4",
"1.9",
"0.19",
"1.5",
"1.2",
"1.7",
"1.0",
"1.3",
"1.8"
],
"tensorflow": []
}
] |
skeshaw/LoReNMT | [
"32ffd83f38258dfffd324f811695a44ad33954f5",
"32ffd83f38258dfffd324f811695a44ad33954f5",
"32ffd83f38258dfffd324f811695a44ad33954f5",
"32ffd83f38258dfffd324f811695a44ad33954f5"
] | [
"fairseq/fairseq/criterions/nat_loss.py",
"fairseq/models/fconv_self_att.py",
"fairseq/examples/roberta/commonsense_qa/commonsense_qa_task.py",
"fairseq/optim/adamax.py"
] | [
"# Copyright (c) Facebook, Inc. and its affiliates.\r\n#\r\n# This source code is licensed under the MIT license found in the\r\n# LICENSE file in the root directory of this source tree.\r\n\r\nimport math\r\n\r\nimport torch.nn.functional as F\r\nfrom fairseq import utils\r\nimport torch\r\nfrom torch import Tensor\r\n\r\nfrom . import FairseqCriterion, register_criterion\r\n\r\n\r\n@register_criterion(\"nat_loss\")\r\nclass LabelSmoothedDualImitationCriterion(FairseqCriterion):\r\n @staticmethod\r\n def add_args(parser):\r\n \"\"\"Add criterion-specific arguments to the parser.\"\"\"\r\n # fmt: off\r\n parser.add_argument(\r\n '--label-smoothing',\r\n default=0.,\r\n type=float,\r\n metavar='D',\r\n help='epsilon for label smoothing, 0 means no label smoothing')\r\n # fmt: on\r\n\r\n def _compute_loss(\r\n self, outputs, targets, masks=None, label_smoothing=0.0, name=\"loss\", factor=1.0\r\n ):\r\n \"\"\"\r\n outputs: batch x len x d_model\r\n targets: batch x len\r\n masks: batch x len\r\n\r\n policy_logprob: if there is some policy\r\n depends on the likelihood score as rewards.\r\n \"\"\"\r\n\r\n def mean_ds(x: Tensor, dim=None) -> Tensor:\r\n return (\r\n x.float().mean().type_as(x)\r\n if dim is None\r\n else x.float().mean(dim).type_as(x)\r\n )\r\n if masks is not None:\r\n outputs, targets = outputs[masks], targets[masks]\r\n\r\n if not masks.any():\r\n nll_loss = torch.tensor(0)\r\n loss = nll_loss\r\n else:\r\n logits = F.log_softmax(outputs, dim=-1)\r\n if targets.dim() == 1:\r\n losses = F.nll_loss(logits, targets.to(logits.device), reduction='none')\r\n\r\n else: # soft-labels\r\n losses = F.kl_div(logits, targets.to(logits.device), reduction='none')\r\n losses = losses.sum(-1)\r\n\r\n nll_loss = mean_ds(losses)\r\n if label_smoothing > 0:\r\n loss = nll_loss * (\r\n 1 - label_smoothing) - mean_ds(logits) * label_smoothing\r\n else:\r\n loss = nll_loss\r\n\r\n loss = loss * factor\r\n return {\"name\": name, \"loss\": loss, \"nll_loss\": nll_loss, \"factor\": factor}\r\n\r\n def _custom_loss(self, loss, name=\"loss\"):\r\n return {\"name\": name, \"loss\": loss, \"factor\": 1}\r\n\r\n def forward(self, model, sample, reduce=True):\r\n \"\"\"Compute the loss for the given sample.\r\n Returns a tuple with three elements:\r\n 1) the loss\r\n 2) the sample size, which is used as the denominator for the gradient\r\n 3) logging outputs to display while training\r\n \"\"\"\r\n nsentences, ntokens = sample[\"nsentences\"], sample[\"ntokens\"]\r\n\r\n # B x T\r\n src_tokens, src_lengths = (\r\n sample[\"net_input\"][\"src_tokens\"],\r\n sample[\"net_input\"][\"src_lengths\"],\r\n )\r\n tgt_tokens, prev_output_tokens = sample[\"target\"], sample[\"prev_target\"]\r\n\r\n outputs = model(src_tokens, src_lengths, prev_output_tokens, tgt_tokens)\r\n losses = []\r\n if \"mask_ins_out\" in outputs:\r\n mask_ins_losses = self._compute_loss(\r\n outputs[\"mask_ins_out\"],\r\n outputs[\"mask_ins_tgt\"],\r\n outputs[\"mask_ins_mask\"],\r\n name=\"m_ins-loss\",\r\n factor=1 if \"mask_ins_w\" not in outputs else outputs[\"mask_ins_w\"],\r\n )\r\n losses += [mask_ins_losses]\r\n\r\n if \"word_ins_out\" in outputs:\r\n word_ins_losses = self._compute_loss(\r\n outputs[\"word_ins_out\"],\r\n outputs[\"word_ins_tgt\"],\r\n outputs[\"word_ins_mask\"],\r\n self.args.label_smoothing,\r\n name=\"w_ins-loss\",\r\n factor=1 if \"word_ins_w\" not in outputs else outputs[\"word_ins_w\"],\r\n )\r\n\r\n losses += [word_ins_losses]\r\n nll_loss = word_ins_losses[\"nll_loss\"]\r\n\r\n if \"word_del_out\" in outputs:\r\n word_del_losses = self._compute_loss(\r\n outputs[\"word_del_out\"],\r\n outputs[\"word_del_tgt\"],\r\n outputs[\"word_del_mask\"],\r\n 0.01,\r\n name=\"w_del-loss\",\r\n factor=1 if \"word_del_w\" not in outputs else outputs[\"word_del_w\"],\r\n )\r\n\r\n losses += [word_del_losses]\r\n\r\n if \"length_out\" in outputs:\r\n length_losses = self._compute_loss(\r\n outputs[\"length_out\"],\r\n outputs[\"length_tgt\"],\r\n name=\"len-loss\",\r\n factor=1 if \"length_w\" not in outputs else outputs[\"length_w\"],\r\n )\r\n\r\n losses += [length_losses]\r\n\r\n for w in outputs:\r\n if \"-loss\" in w:\r\n losses += [self._custom_loss(outputs[w], w)]\r\n\r\n loss = sum(l[\"loss\"] for l in losses)\r\n\r\n # NOTE: as we are summing up per token mlm loss and per sentence nsp loss\r\n # we don't need to use sample_size as denominator for the gradient\r\n # here sample_size is just used for logging\r\n sample_size = 1\r\n logging_output = {\r\n \"loss\": utils.item(loss.data) if reduce else loss.data,\r\n \"nll_loss\": utils.item(nll_loss.data) if reduce else nll_loss.data,\r\n \"ntokens\": ntokens,\r\n \"nsentences\": nsentences,\r\n \"sample_size\": sample_size,\r\n }\r\n\r\n for l in losses:\r\n logging_output[l[\"name\"]] = (\r\n utils.item(l[\"loss\"].data / l[\"factor\"])\r\n if reduce\r\n else l[[\"loss\"]].data / l[\"factor\"]\r\n )\r\n\r\n return loss, sample_size, logging_output\r\n\r\n @staticmethod\r\n def aggregate_logging_outputs(logging_outputs):\r\n \"\"\"Aggregate logging outputs from data parallel training.\"\"\"\r\n ntokens = sum(log.get(\"ntokens\", 0) for log in logging_outputs)\r\n nsentences = sum(log.get(\"nsentences\", 0) for log in logging_outputs)\r\n sample_size = sum(log.get(\"sample_size\", 0) for log in logging_outputs)\r\n loss = sum(log.get(\"loss\", 0) for log in logging_outputs)\r\n nll_loss = sum(log.get(\"nll_loss\", 0) for log in logging_outputs)\r\n\r\n results = {\r\n \"loss\": loss / sample_size / math.log(2) if sample_size > 0 else 0.0,\r\n \"nll_loss\": nll_loss / sample_size / math.log(2)\r\n if sample_size > 0\r\n else 0.0,\r\n \"ntokens\": ntokens,\r\n \"nsentences\": nsentences,\r\n \"sample_size\": sample_size,\r\n }\r\n\r\n for key in logging_outputs[0]:\r\n if key[-5:] == \"-loss\":\r\n results[key[:-5]] = (\r\n sum(log.get(key, 0) for log in logging_outputs)\r\n / sample_size\r\n / math.log(2)\r\n if sample_size > 0\r\n else 0.0\r\n )\r\n\r\n return results\r\n",
"# Copyright (c) Facebook, Inc. and its affiliates.\r\n#\r\n# This source code is licensed under the MIT license found in the\r\n# LICENSE file in the root directory of this source tree.\r\n\r\nimport math\r\n\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\n\r\nfrom fairseq import checkpoint_utils\r\nfrom fairseq.models import (\r\n CompositeEncoder,\r\n FairseqDecoder,\r\n FairseqEncoder,\r\n FairseqEncoderDecoderModel,\r\n register_model,\r\n register_model_architecture,\r\n)\r\nfrom fairseq.modules import (\r\n DownsampledMultiHeadAttention,\r\n GradMultiply,\r\n LayerNorm,\r\n LearnedPositionalEmbedding,\r\n LinearizedConvolution,\r\n)\r\n\r\n\r\n@register_model('fconv_self_att')\r\nclass FConvModelSelfAtt(FairseqEncoderDecoderModel):\r\n\r\n @classmethod\r\n def hub_models(cls):\r\n return {\r\n 'conv.stories': 'https://dl.fbaipublicfiles.com/fairseq/models/stories_checkpoint.tar.bz2',\r\n # Test set containing dictionaries\r\n 'data.stories': 'https://dl.fbaipublicfiles.com/fairseq/data/stories_test.tar.bz2',\r\n }\r\n\r\n def __init__(self, encoder, decoder, pretrained_encoder=None):\r\n super().__init__(encoder, decoder)\r\n self.encoder.num_attention_layers = sum(layer is not None for layer in decoder.attention)\r\n self.pretrained_encoder = pretrained_encoder\r\n if self.pretrained_encoder is None:\r\n encoders = {'encoder': encoder}\r\n else:\r\n encoders = {'encoder': encoder, 'pretrained': self.pretrained_encoder}\r\n # for fusion model, CompositeEncoder contains both pretrained and training encoders\r\n # these are forwarded and then combined in the decoder\r\n self.encoder = CompositeEncoder(encoders)\r\n\r\n @staticmethod\r\n def add_args(parser):\r\n \"\"\"Add model-specific arguments to the parser.\"\"\"\r\n # fmt: off\r\n parser.add_argument('--dropout', type=float, metavar='D',\r\n help='dropout probability')\r\n parser.add_argument('--encoder-embed-dim', type=int, metavar='N',\r\n help='encoder embedding dimension')\r\n parser.add_argument('--encoder-layers', type=str, metavar='EXPR',\r\n help='encoder layers [(dim, kernel_size), ...]')\r\n parser.add_argument('--decoder-embed-dim', type=int, metavar='N',\r\n help='decoder embedding dimension')\r\n parser.add_argument('--decoder-layers', type=str, metavar='EXPR',\r\n help='decoder layers [(dim, kernel_size), ...]')\r\n parser.add_argument('--decoder-out-embed-dim', type=int, metavar='N',\r\n help='decoder output embedding dimension')\r\n parser.add_argument('--decoder-attention', type=str, metavar='EXPR',\r\n help='decoder attention [True, ...]')\r\n parser.add_argument('--self-attention', type=str, metavar='EXPR',\r\n help='decoder self-attention layers, ex: [True] + [False]*5')\r\n parser.add_argument('--multihead-attention-nheads', type=int,\r\n help='Number of heads to use in attention')\r\n parser.add_argument('--multihead-self-attention-nheads', type=int,\r\n help='Number of heads to use in self-attention')\r\n parser.add_argument('--encoder-attention', type=str, metavar='EXPR',\r\n help='encoder attention [True, ...]')\r\n parser.add_argument('--encoder-attention-nheads', type=int,\r\n help='Number of heads to use in encoder attention')\r\n parser.add_argument('--project-input', type=str, metavar='EXPR',\r\n help='Use projections in self-attention [True, ...]')\r\n parser.add_argument('--gated-attention', type=str, metavar='EXPR',\r\n help='Use GLU layers in self-attention projections [True, ...]')\r\n parser.add_argument('--downsample', type=str, metavar='EXPR',\r\n help='Use downsampling in self-attention [True, ...]')\r\n parser.add_argument('--pretrained-checkpoint', metavar='DIR',\r\n help='path to load checkpoint from pretrained model')\r\n parser.add_argument('--pretrained', type=str, metavar='EXPR',\r\n help='use pretrained model when training [True, ...]')\r\n # fmt: on\r\n\r\n @classmethod\r\n def build_model(cls, args, task):\r\n \"\"\"Build a new model instance.\"\"\"\r\n trained_encoder, trained_decoder = None, None\r\n pretrained = eval(args.pretrained)\r\n if pretrained:\r\n print(\"| loading pretrained model\")\r\n trained_model = checkpoint_utils.load_model_ensemble(\r\n filenames=[args.pretrained_checkpoint],\r\n task=task,\r\n )[0][0]\r\n trained_decoder = list(trained_model.children())[1]\r\n trained_encoder = list(trained_model.children())[0]\r\n\r\n # freeze pretrained model\r\n for param in trained_decoder.parameters():\r\n param.requires_grad = False\r\n for param in trained_encoder.parameters():\r\n param.requires_grad = False\r\n\r\n encoder = FConvEncoder(\r\n task.source_dictionary,\r\n embed_dim=args.encoder_embed_dim,\r\n convolutions=eval(args.encoder_layers),\r\n dropout=args.dropout,\r\n max_positions=args.max_source_positions,\r\n attention=eval(args.encoder_attention),\r\n attention_nheads=args.encoder_attention_nheads\r\n )\r\n\r\n decoder = FConvDecoder(\r\n task.target_dictionary,\r\n embed_dim=args.decoder_embed_dim,\r\n convolutions=eval(args.decoder_layers),\r\n out_embed_dim=args.decoder_out_embed_dim,\r\n attention=eval(args.decoder_attention),\r\n dropout=args.dropout,\r\n max_positions=args.max_target_positions,\r\n selfattention=eval(args.self_attention),\r\n attention_nheads=args.multihead_attention_nheads,\r\n selfattention_nheads=args.multihead_self_attention_nheads,\r\n project_input=eval(args.project_input),\r\n gated_attention=eval(args.gated_attention),\r\n downsample=eval(args.downsample),\r\n pretrained=pretrained,\r\n trained_decoder=trained_decoder\r\n )\r\n model = FConvModelSelfAtt(encoder, decoder, trained_encoder)\r\n\r\n return model\r\n\r\n @property\r\n def pretrained(self):\r\n return self.pretrained_encoder is not None\r\n\r\n\r\nclass FConvEncoder(FairseqEncoder):\r\n \"\"\"Convolutional encoder\"\"\"\r\n def __init__(\r\n self, dictionary, embed_dim=512, max_positions=1024,\r\n convolutions=((512, 3),) * 20, dropout=0.1, attention=False,\r\n attention_nheads=1,\r\n ):\r\n super().__init__(dictionary)\r\n self.dropout = dropout\r\n self.num_attention_layers = None\r\n\r\n num_embeddings = len(dictionary)\r\n self.padding_idx = dictionary.pad()\r\n self.embed_tokens = Embedding(num_embeddings, embed_dim, self.padding_idx)\r\n self.embed_positions = PositionalEmbedding(\r\n max_positions,\r\n embed_dim,\r\n self.padding_idx,\r\n )\r\n\r\n def expand_bool_array(val):\r\n if isinstance(val, bool):\r\n # expand True into [True, True, ...] and do the same with False\r\n return [val] * len(convolutions)\r\n return val\r\n\r\n attention = expand_bool_array(attention)\r\n\r\n in_channels = convolutions[0][0]\r\n self.fc1 = Linear(embed_dim, in_channels, dropout=dropout)\r\n self.projections = nn.ModuleList()\r\n self.convolutions = nn.ModuleList()\r\n self.attention = nn.ModuleList()\r\n self.attproj = nn.ModuleList()\r\n for i, (out_channels, kernel_size) in enumerate(convolutions):\r\n self.projections.append(\r\n Linear(in_channels, out_channels) if in_channels != out_channels else None\r\n )\r\n self.convolutions.append(\r\n ConvTBC(in_channels, out_channels * 2, kernel_size, dropout=dropout)\r\n )\r\n\r\n self.attention.append(\r\n SelfAttention(out_channels, embed_dim, attention_nheads) if attention[i] else None\r\n )\r\n in_channels = out_channels\r\n\r\n self.fc2 = Linear(in_channels, embed_dim)\r\n\r\n def forward(self, src_tokens, src_lengths):\r\n # embed tokens and positions\r\n x = self.embed_tokens(src_tokens) + self.embed_positions(src_tokens)\r\n x = F.dropout(x, p=self.dropout, training=self.training)\r\n input_embedding = x.transpose(0, 1)\r\n\r\n # project to size of convolution\r\n x = self.fc1(x)\r\n\r\n encoder_padding_mask = src_tokens.eq(self.padding_idx).t() # -> T x B\r\n if not encoder_padding_mask.any():\r\n encoder_padding_mask = None\r\n\r\n # B x T x C -> T x B x C\r\n x = x.transpose(0, 1)\r\n\r\n # temporal convolutions\r\n for proj, conv, attention in zip(self.projections, self.convolutions, self.attention):\r\n residual = x if proj is None else proj(x)\r\n\r\n if encoder_padding_mask is not None:\r\n x = x.masked_fill(encoder_padding_mask.unsqueeze(-1), 0)\r\n\r\n x = F.dropout(x, p=self.dropout, training=self.training)\r\n padding_l = (conv.kernel_size[0] - 1) // 2\r\n padding_r = conv.kernel_size[0] // 2\r\n x = F.pad(x, (0, 0, 0, 0, padding_l, padding_r))\r\n x = conv(x)\r\n x = F.glu(x, dim=2)\r\n if attention is not None:\r\n x = attention(x)\r\n x = (x + residual) * math.sqrt(0.5)\r\n\r\n # T x B x C -> B x T x C\r\n x = x.transpose(1, 0)\r\n\r\n # project back to size of embedding\r\n x = self.fc2(x)\r\n\r\n if encoder_padding_mask is not None:\r\n encoder_padding_mask = encoder_padding_mask.t() # -> B x T\r\n x = x.masked_fill(encoder_padding_mask.unsqueeze(-1), 0)\r\n\r\n # scale gradients (this only affects backward, not forward)\r\n x = GradMultiply.apply(x, 1.0 / (2.0 * self.num_attention_layers))\r\n\r\n # add output to input embedding for attention\r\n y = (x + input_embedding.transpose(0, 1)) * math.sqrt(0.5)\r\n\r\n return {\r\n 'encoder_out': (x, y),\r\n 'encoder_padding_mask': encoder_padding_mask, # B x T\r\n }\r\n\r\n def reorder_encoder_out(self, encoder_out, new_order):\r\n encoder_out['encoder_out'] = tuple(\r\n eo.index_select(0, new_order) for eo in encoder_out['encoder_out']\r\n )\r\n\r\n if encoder_out['encoder_padding_mask'] is not None:\r\n encoder_out['encoder_padding_mask'] = \\\r\n encoder_out['encoder_padding_mask'].index_select(0, new_order)\r\n\r\n if 'pretrained' in encoder_out:\r\n encoder_out['pretrained']['encoder_out'] = tuple(\r\n eo.index_select(0, new_order)\r\n for eo in encoder_out['pretrained']['encoder_out']\r\n )\r\n\r\n return encoder_out\r\n\r\n def max_positions(self):\r\n \"\"\"Maximum input length supported by the encoder.\"\"\"\r\n return self.embed_positions.max_positions()\r\n\r\n\r\nclass FConvDecoder(FairseqDecoder):\r\n \"\"\"Convolutional decoder\"\"\"\r\n def __init__(\r\n self, dictionary, embed_dim=512, out_embed_dim=256, max_positions=1024,\r\n convolutions=((512, 3),) * 8, attention=True, dropout=0.1,\r\n selfattention=False, attention_nheads=1, selfattention_nheads=1,\r\n project_input=False, gated_attention=False, downsample=False,\r\n pretrained=False, trained_decoder=None,\r\n ):\r\n super().__init__(dictionary)\r\n self.register_buffer('version', torch.Tensor([2]))\r\n self.pretrained = pretrained\r\n self.pretrained_decoder = trained_decoder\r\n self.dropout = dropout\r\n self.need_attn = True\r\n in_channels = convolutions[0][0]\r\n\r\n def expand_bool_array(val):\r\n if isinstance(val, bool):\r\n # expand True into [True, True, ...] and do the same with False\r\n return [val] * len(convolutions)\r\n return val\r\n\r\n attention = expand_bool_array(attention)\r\n selfattention = expand_bool_array(selfattention)\r\n\r\n if not isinstance(attention, list) or len(attention) != len(convolutions):\r\n raise ValueError('Attention is expected to be a list of booleans of '\r\n 'length equal to the number of layers.')\r\n\r\n num_embeddings = len(dictionary)\r\n padding_idx = dictionary.pad()\r\n self.embed_tokens = Embedding(num_embeddings, embed_dim, padding_idx)\r\n\r\n self.embed_positions = PositionalEmbedding(\r\n max_positions,\r\n embed_dim,\r\n padding_idx,\r\n )\r\n\r\n self.fc1 = Linear(embed_dim, in_channels, dropout=dropout)\r\n self.projections = nn.ModuleList()\r\n self.convolutions = nn.ModuleList()\r\n self.attention = nn.ModuleList()\r\n self.selfattention = nn.ModuleList()\r\n self.attproj = nn.ModuleList()\r\n for i, (out_channels, kernel_size) in enumerate(convolutions):\r\n self.projections.append(\r\n Linear(in_channels, out_channels) if in_channels != out_channels else None\r\n )\r\n self.convolutions.append(\r\n LinearizedConv1d(\r\n in_channels, out_channels * 2, kernel_size,\r\n padding=(kernel_size - 1), dropout=dropout,\r\n )\r\n )\r\n\r\n self.attention.append(\r\n DownsampledMultiHeadAttention(\r\n out_channels, embed_dim, attention_nheads,\r\n project_input=project_input, gated=False, downsample=False,\r\n ) if attention[i] else None\r\n )\r\n\r\n self.attproj.append(\r\n Linear(out_channels, embed_dim, dropout=dropout) if attention[i] else None\r\n )\r\n self.selfattention.append(\r\n SelfAttention(\r\n out_channels, embed_dim, selfattention_nheads,\r\n project_input=project_input, gated=gated_attention,\r\n downsample=downsample,\r\n ) if selfattention[i] else None\r\n )\r\n in_channels = out_channels\r\n\r\n self.fc2 = Linear(in_channels, out_embed_dim)\r\n self.fc3 = Linear(out_embed_dim, num_embeddings, dropout=dropout)\r\n\r\n # model fusion\r\n if self.pretrained:\r\n # independent gates are learned from the concatenated input\r\n self.gate1 = nn.Sequential(Linear(out_embed_dim*2, out_embed_dim), nn.Sigmoid())\r\n self.gate2 = nn.Sequential(Linear(out_embed_dim*2, out_embed_dim), nn.Sigmoid())\r\n # pretrained and trained models are joined\r\n self.joining = nn.Sequential(\r\n Linear(out_embed_dim*2, out_embed_dim*2),\r\n LayerNorm(out_embed_dim*2),\r\n nn.GLU(),\r\n Linear(out_embed_dim, out_embed_dim*2),\r\n LayerNorm(out_embed_dim*2),\r\n nn.GLU(),\r\n Linear(out_embed_dim, out_embed_dim),\r\n LayerNorm(out_embed_dim)\r\n )\r\n # pretrained model contains an output layer that is nhid -> vocab size\r\n # but the models are combined in their hidden state\r\n # the hook stores the output of the pretrained model forward\r\n self.pretrained_outputs = {}\r\n\r\n def save_output():\r\n def hook(a, b, output):\r\n self.pretrained_outputs[\"out\"] = output\r\n return hook\r\n\r\n self.pretrained_decoder.fc2.register_forward_hook(save_output())\r\n\r\n def forward(self, prev_output_tokens, encoder_out):\r\n trained_encoder_out = encoder_out['pretrained'] if self.pretrained else None\r\n encoder_out = encoder_out['encoder']['encoder_out']\r\n\r\n encoder_a, encoder_b = self._split_encoder_out(encoder_out)\r\n\r\n # embed positions\r\n positions = self.embed_positions(prev_output_tokens)\r\n\r\n # embed tokens and positions\r\n x = self.embed_tokens(prev_output_tokens) + positions\r\n x = F.dropout(x, p=self.dropout, training=self.training)\r\n target_embedding = x.transpose(0, 1)\r\n\r\n # project to size of convolution\r\n x = self.fc1(x)\r\n\r\n # B x T x C -> T x B x C\r\n x = x.transpose(0, 1)\r\n\r\n # temporal convolutions\r\n avg_attn_scores = None\r\n for proj, conv, attention, selfattention, attproj in zip(\r\n self.projections, self.convolutions, self.attention, self.selfattention, self.attproj\r\n ):\r\n residual = x if proj is None else proj(x)\r\n\r\n x = F.dropout(x, p=self.dropout, training=self.training)\r\n x = conv(x)\r\n x = F.glu(x, dim=2)\r\n\r\n # attention\r\n if attention is not None:\r\n r = x\r\n x, attn_scores = attention(attproj(x) + target_embedding, encoder_a, encoder_b)\r\n x = x + r\r\n if not self.training and self.need_attn:\r\n if avg_attn_scores is None:\r\n avg_attn_scores = attn_scores\r\n else:\r\n avg_attn_scores.add_(attn_scores)\r\n\r\n if selfattention is not None:\r\n x = selfattention(x)\r\n\r\n x = (x + residual) * math.sqrt(0.5)\r\n\r\n # T x B x C -> B x T x C\r\n x = x.transpose(0, 1)\r\n\r\n # project back to size of vocabulary\r\n x = self.fc2(x)\r\n x = F.dropout(x, p=self.dropout, training=self.training)\r\n if not self.pretrained:\r\n x = self.fc3(x)\r\n\r\n # fusion gating\r\n if self.pretrained:\r\n trained_x, _ = self.pretrained_decoder.forward(prev_output_tokens, trained_encoder_out)\r\n y = torch.cat([x, self.pretrained_outputs[\"out\"]], dim=-1)\r\n gate1 = self.gate1(y)\r\n gate2 = self.gate2(y)\r\n gated_x1 = gate1 * x\r\n gated_x2 = gate2 * self.pretrained_outputs[\"out\"]\r\n fusion = torch.cat([gated_x1, gated_x2], dim=-1)\r\n fusion = self.joining(fusion)\r\n fusion_output = self.fc3(fusion)\r\n return fusion_output, avg_attn_scores\r\n else:\r\n return x, avg_attn_scores\r\n\r\n def max_positions(self):\r\n \"\"\"Maximum output length supported by the decoder.\"\"\"\r\n return self.embed_positions.max_positions()\r\n\r\n def make_generation_fast_(self, need_attn=False, **kwargs):\r\n self.need_attn = need_attn\r\n\r\n def _split_encoder_out(self, encoder_out):\r\n \"\"\"Split and transpose encoder outputs.\"\"\"\r\n # transpose only once to speed up attention layers\r\n encoder_a, encoder_b = encoder_out\r\n encoder_a = encoder_a.transpose(0, 1).contiguous()\r\n encoder_b = encoder_b.transpose(0, 1).contiguous()\r\n result = (encoder_a, encoder_b)\r\n return result\r\n\r\n\r\nclass SelfAttention(nn.Module):\r\n\r\n def __init__(self, out_channels, embed_dim, num_heads, project_input=False, gated=False, downsample=False):\r\n super().__init__()\r\n self.attention = DownsampledMultiHeadAttention(\r\n out_channels, embed_dim, num_heads, dropout=0, bias=True,\r\n project_input=project_input, gated=gated, downsample=downsample,\r\n )\r\n self.in_proj_q = Linear(out_channels, embed_dim)\r\n self.in_proj_k = Linear(out_channels, embed_dim)\r\n self.in_proj_v = Linear(out_channels, embed_dim)\r\n self.ln = LayerNorm(out_channels)\r\n\r\n def forward(self, x):\r\n residual = x\r\n query = self.in_proj_q(x)\r\n key = self.in_proj_k(x)\r\n value = self.in_proj_v(x)\r\n x, _ = self.attention(query, key, value, mask_future_timesteps=True, use_scalar_bias=True)\r\n return self.ln(x + residual)\r\n\r\n\r\ndef Embedding(num_embeddings, embedding_dim, padding_idx):\r\n m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx)\r\n m.weight.data.normal_(0, 0.1)\r\n return m\r\n\r\n\r\ndef PositionalEmbedding(num_embeddings, embedding_dim, padding_idx):\r\n m = LearnedPositionalEmbedding(num_embeddings, embedding_dim, padding_idx)\r\n m.weight.data.normal_(0, 0.1)\r\n return m\r\n\r\n\r\ndef Linear(in_features, out_features, dropout=0.):\r\n \"\"\"Weight-normalized Linear layer (input: N x T x C)\"\"\"\r\n m = nn.Linear(in_features, out_features)\r\n m.weight.data.normal_(mean=0, std=math.sqrt((1 - dropout) / in_features))\r\n m.bias.data.zero_()\r\n return m\r\n\r\n\r\ndef LinearizedConv1d(in_channels, out_channels, kernel_size, dropout=0., **kwargs):\r\n \"\"\"Weight-normalized Conv1d layer optimized for decoding\"\"\"\r\n m = LinearizedConvolution(in_channels, out_channels, kernel_size, **kwargs)\r\n std = math.sqrt((4 * (1.0 - dropout)) / (m.kernel_size[0] * in_channels))\r\n m.weight.data.normal_(mean=0, std=std)\r\n m.bias.data.zero_()\r\n return m\r\n\r\n\r\ndef ConvTBC(in_channels, out_channels, kernel_size, dropout=0, **kwargs):\r\n \"\"\"Weight-normalized Conv1d layer\"\"\"\r\n from fairseq.modules import ConvTBC\r\n m = ConvTBC(in_channels, out_channels, kernel_size, **kwargs)\r\n std = math.sqrt((4 * (1.0 - dropout)) / (m.kernel_size[0] * in_channels))\r\n m.weight.data.normal_(mean=0, std=std)\r\n m.bias.data.zero_()\r\n return m\r\n\r\n\r\n@register_model_architecture('fconv_self_att', 'fconv_self_att')\r\ndef base_architecture(args):\r\n args.dropout = getattr(args, 'dropout', 0.1)\r\n args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512)\r\n args.encoder_layers = getattr(args, 'encoder_layers', '[(512, 3)] * 3')\r\n args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 512)\r\n args.decoder_layers = getattr(args, 'decoder_layers', '[(512, 3)] * 8')\r\n args.decoder_out_embed_dim = getattr(args, 'decoder_out_embed_dim', 256)\r\n args.decoder_attention = getattr(args, 'decoder_attention', 'True')\r\n args.self_attention = getattr(args, 'self_attention', 'False')\r\n args.encoder_attention = getattr(args, 'encoder_attention', 'False')\r\n args.multihead_attention_nheads = getattr(args, 'multihead_attention_nheads', 1)\r\n args.multihead_self_attention_nheads = getattr(args, 'multihead_self_attention_nheads', 1)\r\n args.encoder_attention_nheads = getattr(args, 'encoder_attention_nheads', 1)\r\n args.project_input = getattr(args, 'project_input', 'False')\r\n args.gated_attention = getattr(args, 'gated_attention', 'False')\r\n args.downsample = getattr(args, 'downsample', 'False')\r\n args.pretrained_checkpoint = getattr(args, 'pretrained_checkpoint', '')\r\n args.pretrained = getattr(args, 'pretrained', 'False')\r\n\r\n\r\n@register_model_architecture('fconv_self_att', 'fconv_self_att_wp')\r\ndef fconv_self_att_wp(args):\r\n args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 256)\r\n args.encoder_layers = getattr(args, 'encoder_layers', '[(128, 3)] * 2 + [(512,3)] * 1')\r\n args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 256)\r\n args.decoder_layers = getattr(args, 'decoder_layers', '[(512, 4)] * 4 + [(768, 4)] * 2 + [(1024, 4)] * 1')\r\n args.decoder_out_embed_dim = getattr(args, 'decoder_out_embed_dim', 256)\r\n args.self_attention = getattr(args, 'self_attention', 'True')\r\n args.multihead_self_attention_nheads = getattr(args, 'multihead_self_attention_nheads', 4)\r\n args.project_input = getattr(args, 'project_input', 'True')\r\n args.gated_attention = getattr(args, 'gated_attention', 'True')\r\n args.downsample = getattr(args, 'downsample', 'True')\r\n base_architecture(args)\r\n",
"# Copyright (c) Facebook, Inc. and its affiliates.\r\n#\r\n# This source code is licensed under the MIT license found in the\r\n# LICENSE file in the root directory of this source tree.\r\n\r\nimport json\r\nimport os\r\n\r\nimport numpy as np\r\nimport torch\r\n\r\nfrom fairseq.data import (\r\n data_utils,\r\n Dictionary,\r\n encoders,\r\n IdDataset,\r\n ListDataset,\r\n NestedDictionaryDataset,\r\n NumSamplesDataset,\r\n NumelDataset,\r\n RawLabelDataset,\r\n RightPadDataset,\r\n SortDataset,\r\n)\r\nfrom fairseq.tasks import FairseqTask, register_task\r\n\r\n\r\n@register_task('commonsense_qa')\r\nclass CommonsenseQATask(FairseqTask):\r\n \"\"\"Task to finetune RoBERTa for Commonsense QA.\"\"\"\r\n\r\n @staticmethod\r\n def add_args(parser):\r\n \"\"\"Add task-specific arguments to the parser.\"\"\"\r\n parser.add_argument('data', metavar='DIR',\r\n help='path to data directory; we load <split>.jsonl')\r\n parser.add_argument('--init-token', type=int, default=None,\r\n help='add token at the beginning of each batch item')\r\n parser.add_argument('--num-classes', type=int, default=5)\r\n\r\n def __init__(self, args, vocab):\r\n super().__init__(args)\r\n self.vocab = vocab\r\n self.mask = vocab.add_symbol('<mask>')\r\n\r\n self.bpe = encoders.build_bpe(args)\r\n\r\n @classmethod\r\n def load_dictionary(cls, filename):\r\n \"\"\"Load the dictionary from the filename\r\n\r\n Args:\r\n filename (str): the filename\r\n \"\"\"\r\n dictionary = Dictionary.load(filename)\r\n dictionary.add_symbol('<mask>')\r\n return dictionary\r\n\r\n @classmethod\r\n def setup_task(cls, args, **kwargs):\r\n assert args.criterion == 'sentence_ranking', 'Must set --criterion=sentence_ranking'\r\n\r\n # load data and label dictionaries\r\n vocab = cls.load_dictionary(os.path.join(args.data, 'dict.txt'))\r\n print('| dictionary: {} types'.format(len(vocab)))\r\n\r\n return cls(args, vocab)\r\n\r\n def load_dataset(self, split, epoch=0, combine=False, data_path=None, return_only=False, **kwargs):\r\n \"\"\"Load a given dataset split.\r\n\r\n Args:\r\n split (str): name of the split (e.g., train, valid, test)\r\n \"\"\"\r\n\r\n def binarize(s, append_bos=False):\r\n if self.bpe is not None:\r\n s = self.bpe.encode(s)\r\n tokens = self.vocab.encode_line(\r\n s, append_eos=True, add_if_not_exist=False,\r\n ).long()\r\n if append_bos and self.args.init_token is not None:\r\n tokens = torch.cat([tokens.new([self.args.init_token]), tokens])\r\n return tokens\r\n\r\n if data_path is None:\r\n data_path = os.path.join(self.args.data, split + '.jsonl')\r\n if not os.path.exists(data_path):\r\n raise FileNotFoundError('Cannot find data: {}'.format(data_path))\r\n\r\n src_tokens = [[] for i in range(self.args.num_classes)]\r\n src_lengths = [[] for i in range(self.args.num_classes)]\r\n labels = []\r\n\r\n with open(data_path) as h:\r\n for line in h:\r\n example = json.loads(line.strip())\r\n if 'answerKey' in example:\r\n label = ord(example['answerKey']) - ord('A')\r\n labels.append(label)\r\n question = example['question']['stem']\r\n assert len(example['question']['choices']) == self.args.num_classes\r\n # format: `<s> Q: Where would I not want a fox? </s> A: hen house </s>`\r\n question = 'Q: ' + question\r\n question_toks = binarize(question, append_bos=True)\r\n for i, choice in enumerate(example['question']['choices']):\r\n src = 'A: ' + choice['text']\r\n src_bin = torch.cat([question_toks, binarize(src)])\r\n src_tokens[i].append(src_bin)\r\n src_lengths[i].append(len(src_bin))\r\n assert all(len(src_tokens[0]) == len(src_tokens[i]) for i in range(self.args.num_classes))\r\n assert len(src_tokens[0]) == len(src_lengths[0])\r\n assert len(labels) == 0 or len(labels) == len(src_tokens[0])\r\n\r\n for i in range(self.args.num_classes):\r\n src_lengths[i] = np.array(src_lengths[i])\r\n src_tokens[i] = ListDataset(src_tokens[i], src_lengths[i])\r\n src_lengths[i] = ListDataset(src_lengths[i])\r\n\r\n dataset = {\r\n 'id': IdDataset(),\r\n 'nsentences': NumSamplesDataset(),\r\n 'ntokens': NumelDataset(src_tokens[0], reduce=True),\r\n }\r\n\r\n for i in range(self.args.num_classes):\r\n dataset.update({\r\n 'net_input{}'.format(i + 1): {\r\n 'src_tokens': RightPadDataset(\r\n src_tokens[i],\r\n pad_idx=self.source_dictionary.pad(),\r\n ),\r\n 'src_lengths': src_lengths[i],\r\n }\r\n })\r\n\r\n if len(labels) > 0:\r\n dataset.update({'target': RawLabelDataset(labels)})\r\n\r\n dataset = NestedDictionaryDataset(\r\n dataset,\r\n sizes=[np.maximum.reduce([src_token.sizes for src_token in src_tokens])],\r\n )\r\n\r\n with data_utils.numpy_seed(self.args.seed):\r\n dataset = SortDataset(\r\n dataset,\r\n # shuffle\r\n sort_order=[np.random.permutation(len(dataset))],\r\n )\r\n\r\n print('| Loaded {} with {} samples'.format(split, len(dataset)))\r\n\r\n self.datasets[split] = dataset\r\n return self.datasets[split]\r\n\r\n def build_model(self, args):\r\n from fairseq import models\r\n model = models.build_model(args, self)\r\n\r\n model.register_classification_head(\r\n 'sentence_classification_head',\r\n num_classes=1,\r\n )\r\n\r\n return model\r\n\r\n @property\r\n def source_dictionary(self):\r\n return self.vocab\r\n\r\n @property\r\n def target_dictionary(self):\r\n return self.vocab\r\n",
"# Copyright (c) Facebook, Inc. and its affiliates.\r\n#\r\n# This source code is licensed under the MIT license found in the\r\n# LICENSE file in the root directory of this source tree.\r\n\r\nimport torch\r\nimport torch.optim\r\n\r\nfrom . import FairseqOptimizer, register_optimizer\r\n\r\n\r\n@register_optimizer('adamax')\r\nclass FairseqAdamax(FairseqOptimizer):\r\n def __init__(self, args, params):\r\n super().__init__(args)\r\n self._optimizer = Adamax(params, **self.optimizer_config)\r\n\r\n @staticmethod\r\n def add_args(parser):\r\n \"\"\"Add optimizer-specific arguments to the parser.\"\"\"\r\n # fmt: off\r\n parser.add_argument('--adamax-betas', default='(0.9, 0.999)', metavar='B',\r\n help='betas for Adam optimizer')\r\n parser.add_argument('--adamax-eps', type=float, default=1e-8, metavar='D',\r\n help='epsilon for Adam optimizer')\r\n parser.add_argument('--weight-decay', '--wd', default=0.0, type=float, metavar='WD',\r\n help='weight decay')\r\n parser.add_argument('--no-bias-correction', default=False, action='store_true',\r\n help='disable bias correction')\r\n # fmt: on\r\n\r\n @property\r\n def optimizer_config(self):\r\n \"\"\"\r\n Return a kwarg dictionary that will be used to override optimizer\r\n args stored in checkpoints. This allows us to load a checkpoint and\r\n resume training using a different set of optimizer args, e.g., with a\r\n different learning rate.\r\n \"\"\"\r\n return {\r\n 'lr': self.args.lr[0],\r\n 'betas': eval(self.args.adamax_betas),\r\n 'eps': self.args.adamax_eps,\r\n 'weight_decay': self.args.weight_decay,\r\n 'bias_correction': not self.args.no_bias_correction,\r\n }\r\n\r\n\r\nclass Adamax(torch.optim.Optimizer):\r\n \"\"\"Implements Adamax algorithm (a variant of Adam based on infinity norm).\r\n\r\n It has been proposed in `Adam: A Method for Stochastic Optimization`__.\r\n\r\n Compared to the version in PyTorch, this version implements a fix for weight decay.\r\n\r\n Arguments:\r\n params (iterable): iterable of parameters to optimize or dicts defining\r\n parameter groups\r\n lr (float, optional): learning rate (default: 2e-3)\r\n betas (Tuple[float, float], optional): coefficients used for computing\r\n running averages of gradient and its square\r\n eps (float, optional): term added to the denominator to improve\r\n numerical stability (default: 1e-8)\r\n weight_decay (float, optional): weight decay (L2 penalty) (default: 0)\r\n bias_correction (bool, optional): enable bias correction (default: True)\r\n\r\n __ https://arxiv.org/abs/1412.6980\r\n \"\"\"\r\n\r\n def __init__(self, params, lr=2e-3, betas=(0.9, 0.999), eps=1e-8,\r\n weight_decay=0, bias_correction=True):\r\n if not 0.0 <= lr:\r\n raise ValueError(\"Invalid learning rate: {}\".format(lr))\r\n if not 0.0 <= eps:\r\n raise ValueError(\"Invalid epsilon value: {}\".format(eps))\r\n if not 0.0 <= betas[0] < 1.0:\r\n raise ValueError(\"Invalid beta parameter at index 0: {}\".format(betas[0]))\r\n if not 0.0 <= betas[1] < 1.0:\r\n raise ValueError(\"Invalid beta parameter at index 1: {}\".format(betas[1]))\r\n if not 0.0 <= weight_decay:\r\n raise ValueError(\"Invalid weight_decay value: {}\".format(weight_decay))\r\n\r\n defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay,\r\n bias_correction=bias_correction)\r\n super(Adamax, self).__init__(params, defaults)\r\n\r\n @property\r\n def supports_memory_efficient_fp16(self):\r\n return True\r\n\r\n def step(self, closure=None):\r\n \"\"\"Performs a single optimization step.\r\n\r\n Arguments:\r\n closure (callable, optional): A closure that reevaluates the model\r\n and returns the loss.\r\n \"\"\"\r\n loss = None\r\n if closure is not None:\r\n loss = closure()\r\n\r\n for group in self.param_groups:\r\n for p in group['params']:\r\n if p.grad is None:\r\n continue\r\n grad = p.grad.data.float()\r\n if grad.is_sparse:\r\n raise RuntimeError('Adamax does not support sparse gradients')\r\n\r\n p_data_fp32 = p.data.float()\r\n\r\n state = self.state[p]\r\n\r\n # State initialization\r\n if len(state) == 0:\r\n state['step'] = 0\r\n state['exp_avg'] = torch.zeros_like(p_data_fp32)\r\n state['exp_inf'] = torch.zeros_like(p_data_fp32)\r\n else:\r\n state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32)\r\n state['exp_inf'] = state['exp_inf'].type_as(p_data_fp32)\r\n\r\n exp_avg, exp_inf = state['exp_avg'], state['exp_inf']\r\n beta1, beta2 = group['betas']\r\n eps = group['eps']\r\n\r\n state['step'] += 1\r\n\r\n # Update biased first moment estimate.\r\n exp_avg.mul_(beta1).add_(1 - beta1, grad)\r\n\r\n # Update the exponentially weighted infinity norm.\r\n torch.max(\r\n exp_inf.mul_(beta2),\r\n grad.abs_(),\r\n out=exp_inf,\r\n )\r\n\r\n step_size = group['lr']\r\n if group['bias_correction']:\r\n bias_correction = 1 - beta1 ** state['step']\r\n step_size /= bias_correction\r\n\r\n if group['weight_decay'] != 0:\r\n p_data_fp32.add_(-group['weight_decay'] * group['lr'], p_data_fp32)\r\n\r\n p_data_fp32.addcdiv_(-step_size, exp_avg, exp_inf.add(eps))\r\n\r\n p.data.copy_(p_data_fp32)\r\n\r\n return loss\r\n"
] | [
[
"torch.nn.functional.log_softmax",
"torch.tensor"
],
[
"torch.nn.GLU",
"torch.nn.functional.glu",
"torch.Tensor",
"torch.nn.functional.dropout",
"torch.cat",
"torch.nn.ModuleList",
"torch.nn.Embedding",
"torch.nn.Sigmoid",
"torch.nn.Linear",
"torch.nn.functional.pad"
],
[
"numpy.array",
"numpy.maximum.reduce"
],
[
"torch.zeros_like"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
torlenor/word_embeddings | [
"74e9b4eb3ad4c89fa0c9319b07ff6c3e10f632d8"
] | [
"main.py"
] | [
"import glob\nfrom itertools import cycle\nfrom os import path\n\nimport numpy as np\nimport pandas as pd\nimport plotly.express as px\nimport plotly.graph_objs as go\nimport streamlit as st\nfrom gensim.models import KeyedVectors, Word2Vec\nfrom sklearn.cluster import KMeans\nfrom sklearn.decomposition import PCA\nfrom sklearn.manifold import TSNE\nfrom sklearn.neighbors import KDTree\nfrom wordcloud import WordCloud\n\n\[email protected]\ndef clustering_on_wordvecs(word_vectors, num_clusters):\n kmeans_clustering = KMeans(n_clusters=num_clusters, init=\"k-means++\")\n kmeans_clustering.fit_predict(word_vectors)\n\n return kmeans_clustering.cluster_centers_\n\n\[email protected]\ndef get_top_words(index2word, k, centers, wordvecs):\n tree = KDTree(wordvecs)\n closest_points = [tree.query(np.reshape(x, (1, -1)), k=k) for x in centers]\n\n closest_words_idxs = [x[1] for x in closest_points]\n closest_words = {}\n for i in range(0, len(closest_words_idxs)):\n closest_words[\"Cluster #\" + str(i)] = [\n str(index2word[j]) for j in closest_words_idxs[i][0]\n ]\n df = pd.DataFrame(closest_words)\n df.index = df.index + 1\n\n return df\n\n\[email protected]\ndef generate_cloud(cluster_num, cmap, top_words, background_color=\"black\"):\n wc = WordCloud(\n width=800,\n height=400,\n background_color=background_color,\n max_words=2000,\n max_font_size=80,\n colormap=cmap,\n stopwords=[],\n include_numbers=True,\n )\n return wc.generate(\n \" \".join([word for word in top_words[\"Cluster #\" + str(cluster_num)]])\n )\n\n\[email protected]\ndef get_cloud(cluster_num, cmap, top_words, background_color=\"black\"):\n wordcloud = generate_cloud(cluster_num, cmap, top_words, background_color)\n fig = px.imshow(wordcloud)\n fig.update_layout(width=800, height=400, margin=dict(l=0, r=0, b=0, t=0))\n fig.update_layout(coloraxis_showscale=False)\n fig.update_xaxes(showticklabels=False)\n fig.update_yaxes(showticklabels=False)\n return fig\n\n\ndef display_cloud(cluster_num, cmap, top_words, background_color=\"black\"):\n st.write(\"Cluster #\" + str(cluster_num + 1))\n st.plotly_chart(get_cloud(cluster_num, cmap, top_words, background_color))\n\n\[email protected]\ndef generate_cloud_from_frequency(words, cmap, background_color=\"black\"):\n wc = WordCloud(\n width=800,\n height=400,\n background_color=background_color,\n max_words=2000,\n max_font_size=80,\n colormap=cmap,\n relative_scaling=0.5,\n )\n return wc.generate_from_frequencies(words)\n\n\ndef display_cloud_from_frequency(words, cmap, background_color=\"black\"):\n wordcloud = generate_cloud_from_frequency(words, cmap, background_color)\n fig = px.imshow(wordcloud)\n fig.update_layout(width=800, height=400, margin=dict(l=0, r=0, b=0, t=0))\n fig.update_layout(coloraxis_showscale=False)\n fig.update_xaxes(showticklabels=False)\n fig.update_yaxes(showticklabels=False)\n st.plotly_chart(fig)\n\n\[email protected]\ndef append_list(sim_words, words):\n list_of_words = []\n\n for i in range(len(sim_words)):\n sim_words_list = list(sim_words[i])\n sim_words_list.append(words)\n sim_words_tuple = tuple(sim_words_list)\n list_of_words.append(sim_words_tuple)\n\n return list_of_words\n\n\[email protected]\ndef calc_3d_pca(word_vectors):\n return PCA(random_state=0).fit_transform(word_vectors)[:, :3]\n\n\[email protected]\ndef calc_3d_tsne(word_vectors, perplexity, learning_rate, n_iter):\n return TSNE(\n n_components=3,\n random_state=0,\n perplexity=perplexity,\n learning_rate=learning_rate,\n n_iter=n_iter,\n ).fit_transform(word_vectors)[:, :3]\n\n\[email protected]\ndef generate_scatterplot_3D(\n word_vectors,\n user_input=None,\n words=None,\n annotation=\"On\",\n dim_red=\"PCA\",\n perplexity=0,\n learning_rate=0,\n iteration=0,\n topn=0,\n):\n if dim_red == \"PCA\":\n three_dim = calc_3d_pca(word_vectors)\n else:\n three_dim = calc_3d_tsne(word_vectors, perplexity, learning_rate, iteration)\n\n color = \"blue\"\n quiver = go.Cone(\n x=[0, 0, 0],\n y=[0, 0, 0],\n z=[0, 0, 0],\n u=[1.5, 0, 0],\n v=[0, 1.5, 0],\n w=[0, 0, 1.5],\n anchor=\"tail\",\n colorscale=[[0, color], [1, color]],\n showscale=False,\n )\n\n data = [quiver]\n\n count = 0\n for i in range(len(user_input)):\n\n trace = go.Scatter3d(\n x=three_dim[count : count + topn, 0],\n y=three_dim[count : count + topn, 1],\n z=three_dim[count : count + topn, 2],\n text=words[count : count + topn] if annotation == \"On\" else \"\",\n name=user_input[i],\n textposition=\"top center\",\n textfont_size=30,\n mode=\"markers+text\",\n marker={\"size\": 10, \"opacity\": 0.8, \"color\": 2},\n )\n\n data.append(trace)\n count = count + topn\n\n trace_input = go.Scatter3d(\n x=three_dim[count:, 0],\n y=three_dim[count:, 1],\n z=three_dim[count:, 2],\n text=words[count:],\n name=\"input words\",\n textposition=\"top center\",\n textfont_size=30,\n mode=\"markers+text\",\n marker={\"size\": 10, \"opacity\": 1, \"color\": \"black\"},\n )\n\n data.append(trace_input)\n\n # Configure the layout.\n layout = go.Layout(\n margin={\"l\": 0, \"r\": 0, \"b\": 0, \"t\": 0},\n showlegend=True,\n legend=dict(\n x=1, y=0.5, font=dict(family=\"Courier New\", size=24, color=\"black\")\n ),\n font=dict(family=\" Courier New \", size=15),\n autosize=False,\n width=800,\n height=600,\n )\n\n return go.Figure(data=data, layout=layout)\n\n\ndef display_scatterplot_3D(\n model,\n user_input=None,\n words=None,\n annotation=\"On\",\n dim_red=\"PCA\",\n perplexity=0,\n learning_rate=0,\n iteration=0,\n topn=0,\n):\n plot_figure = generate_scatterplot_3D(\n np.array([model[w] for w in words]),\n user_input,\n words,\n annotation,\n dim_red,\n perplexity,\n learning_rate,\n iteration,\n topn,\n )\n\n st.plotly_chart(plot_figure)\n\n\[email protected]\ndef calc_2d_pca(word_vectors):\n return PCA(random_state=0).fit_transform(word_vectors)[:, :2]\n\n\[email protected]\ndef calc_2d_tsne(word_vectors, perplexity, learning_rate, n_iter):\n return TSNE(\n random_state=0,\n perplexity=perplexity,\n learning_rate=learning_rate,\n n_iter=n_iter,\n ).fit_transform(word_vectors)[:, :2]\n\n\[email protected]\ndef generate_scatterplot_2D(\n word_vectors,\n user_input=None,\n words=None,\n annotation=\"On\",\n dim_red=\"PCA\",\n perplexity=0,\n learning_rate=0,\n iteration=0,\n topn=0,\n):\n if dim_red == \"PCA\":\n two_dim = calc_2d_pca(word_vectors)\n else:\n two_dim = calc_2d_tsne(word_vectors, perplexity, learning_rate, iteration)\n\n data = []\n count = 0\n for i in range(len(user_input)):\n\n trace = go.Scatter(\n x=two_dim[count : count + topn, 0],\n y=two_dim[count : count + topn, 1],\n text=words[count : count + topn] if annotation == \"On\" else \"\",\n name=user_input[i],\n textposition=\"top center\",\n textfont_size=20,\n mode=\"markers+text\",\n marker={\"size\": 15, \"opacity\": 0.8, \"color\": 2},\n )\n\n data.append(trace)\n count = count + topn\n\n trace_input = go.Scatter(\n x=two_dim[count:, 0],\n y=two_dim[count:, 1],\n text=words[count:],\n name=\"input words\",\n textposition=\"top center\",\n textfont_size=20,\n mode=\"markers+text\",\n marker={\"size\": 25, \"opacity\": 1, \"color\": \"black\"},\n )\n\n data.append(trace_input)\n\n # Configure the layout.\n layout = go.Layout(\n margin={\"l\": 0, \"r\": 0, \"b\": 0, \"t\": 0},\n showlegend=True,\n hoverlabel=dict(bgcolor=\"white\", font_size=20, font_family=\"Courier New\"),\n legend=dict(\n x=1, y=0.5, font=dict(family=\"Courier New\", size=24, color=\"black\")\n ),\n font=dict(family=\" Courier New \", size=15),\n autosize=False,\n width=800,\n height=600,\n )\n\n return go.Figure(data=data, layout=layout)\n\n\ndef display_scatterplot_2D(\n model,\n user_input=None,\n words=None,\n annotation=\"On\",\n dim_red=\"PCA\",\n perplexity=0,\n learning_rate=0,\n iteration=0,\n topn=0,\n):\n plot_figure = generate_scatterplot_2D(\n np.array([model[w] for w in words[: len(words) - len(user_input)]]),\n user_input,\n words,\n annotation,\n dim_red,\n perplexity,\n learning_rate,\n iteration,\n topn,\n )\n\n st.plotly_chart(plot_figure)\n\n\[email protected]()\ndef generate_horizontal_bar_plot(word, similarity):\n similarity = [round(elem, 2) for elem in similarity]\n\n data = go.Bar(\n x=similarity,\n y=word,\n orientation=\"h\",\n text=similarity,\n marker_color=4,\n textposition=\"auto\",\n )\n\n layout = go.Layout(\n font=dict(size=20),\n xaxis=dict(showticklabels=False, automargin=True),\n yaxis=dict(showticklabels=True, automargin=True, autorange=\"reversed\"),\n margin=dict(t=20, b=20, r=10),\n )\n\n return go.Figure(data=data, layout=layout)\n\n\ndef horizontal_bar(word, similarity):\n fig = generate_horizontal_bar_plot(word, similarity)\n st.plotly_chart(fig)\n\n\[email protected](allow_output_mutation=True)\ndef get_model(gensim_model, model_types, models_path, limit_vectors):\n if model_types[gensim_model] == \"full Gensim model\":\n return Word2Vec.load(models_path + gensim_model + \".model\", mmap=\"r\").wv\n elif model_types[gensim_model] == \"word2vec binary model\":\n if limit_vectors is None or limit_vectors == \"Yes\":\n return KeyedVectors.load_word2vec_format(\n models_path + gensim_model + \".bin\", binary=True, limit=500000\n )\n else:\n return KeyedVectors.load_word2vec_format(\n models_path + gensim_model + \".bin\", binary=True\n )\n elif model_types[gensim_model] == \"Gensim kv model\":\n return KeyedVectors.load(models_path + gensim_model + \".kv\", mmap=\"r\")\n\n\n# Seems to be faster without cache, probably because of model passing model\ndef get_similarity_matrix_figure(model, similarity_matrix_input):\n x = similarity_matrix_input\n y = similarity_matrix_input\n\n z = []\n # TODO: Performance optimization necessary\n for xx in x:\n row = []\n for yy in x:\n row.append(model.similarity(xx, yy))\n z.append(row)\n\n fig = px.imshow(z, labels=dict(x=\"Word\", y=\"Word\", color=\"Similarity\"), x=x, y=y)\n fig.update_xaxes(side=\"top\")\n fig.update_layout(width=800, height=600, margin=dict(l=0, r=0, b=0, t=0))\n\n return fig\n\n\ndef main():\n models_path = \"./models/\"\n\n models = [\n model.rstrip(\".model\").lstrip(models_path)\n for model in glob.glob(models_path + \"*.model\")\n ]\n\n bin_models = [\n model.rstrip(\".bin\").lstrip(models_path)\n for model in glob.glob(models_path + \"*.bin\")\n ]\n\n kv_models = [\n model.rstrip(\".kv\").lstrip(models_path)\n for model in glob.glob(models_path + \"*.kv\")\n ]\n\n model_descriptions = {}\n model_types = {}\n for model in models:\n model_types[model] = \"full Gensim model\"\n description_file = models_path + model + \".txt\"\n if path.isfile(description_file):\n with open(description_file, \"r\") as file:\n description = file.read()\n model_descriptions[model] = description.rstrip(\"\\n\")\n else:\n model_descriptions[model] = f\"No model description found for **{model}**\"\n\n for model in bin_models:\n model_types[model] = \"word2vec binary model\"\n description_file = models_path + model + \".txt\"\n if path.isfile(description_file):\n with open(description_file, \"r\") as file:\n description = file.read()\n model_descriptions[model] = description.rstrip(\"\\n\")\n else:\n model_descriptions[model] = f\"No model description found for **{model}**\"\n\n for model in kv_models:\n model_types[model] = \"Gensim kv model\"\n description_file = models_path + model + \".txt\"\n if path.isfile(description_file):\n with open(description_file, \"r\") as file:\n description = file.read()\n model_descriptions[model] = description.rstrip(\"\\n\")\n else:\n model_descriptions[model] = f\"No model description found for **{model}**\"\n\n models += bin_models + kv_models\n\n gensim_model = st.sidebar.selectbox(\"Word2Vec model to use:\", models)\n\n if model_types[gensim_model] == \"word2vec binary model\":\n limit_vectors = st.sidebar.radio(\n \"Limit the amount of vectors loaded. Warning: If turned off may need tons of memory!\",\n (\"Yes\", \"No\"),\n )\n else:\n limit_vectors = \"No\"\n\n model = get_model(gensim_model, model_types, models_path, limit_vectors)\n\n most_similar_method = st.sidebar.selectbox(\n \"Similarity method:\", (\"Cosine\", \"3CosMul\")\n )\n\n user_input = st.sidebar.text_input(\n \"Type the word that you want to investigate. Separate more than one word by semicolon (;). You can also group words like [woman, king, -man] where words with '-' in front count negatively.\",\n \"\",\n )\n\n top_n = st.sidebar.slider(\n \"Select the amount of words associated with the input words you want to visualize:\",\n 5,\n 100,\n (5),\n )\n\n dimension = st.sidebar.radio(\"Dimension of the visualization:\", (\"2D\", \"3D\"))\n\n dim_red = st.sidebar.selectbox(\"Dimension reduction method:\", (\"PCA\", \"t-SNE\"))\n\n if dim_red == \"t-SNE\":\n perplexity = st.sidebar.slider(\n \"t-SNE - Perplexity (It says (loosely) how to balance attention between local and global aspects of your data. Larger datasets usually require a larger perplexity):\",\n 5,\n 50,\n (30),\n )\n\n learning_rate = st.sidebar.slider(\"t-SNE - Learning rate:\", 10, 1000, (200))\n\n iteration = st.sidebar.slider(\n \"t-SNE - Number of iteration:\", 250, 100000, (1000)\n )\n\n else:\n perplexity = 0\n learning_rate = 0\n iteration = 0\n\n annotation = st.sidebar.radio(\n \"Enable or disable the annotation on the visualization:\", (\"On\", \"Off\")\n )\n\n word_cloud_background_color = st.sidebar.radio(\n \"Word cloud background color:\", (\"black\", \"white\")\n )\n\n show_top_5_most_similar_words = st.sidebar.radio(\n \"Enable or disable showing the top 5 most similar words and similarity word clouds for the words you entered:\",\n (\"On\", \"Off\"),\n )\n show_kmeans_cluster_word_clouds = st.sidebar.radio(\n \"Enable or disable k-means cluster calculation:\", (\"On\", \"Off\")\n )\n\n if show_kmeans_cluster_word_clouds == \"On\":\n n_kmeans_clusters = st.sidebar.slider(\n \"k-means - Number of clusters:\", 1, 20, (5)\n )\n n_words_per_kmeans_cluster = st.sidebar.slider(\n \"k-means - Number of words per cluster to show:\", 1, 20, (10)\n )\n\n show_similarity_matrix = st.sidebar.radio(\n \"Enable or disable similarity matrix:\", (\"On\", \"Off\"), index=1\n )\n\n if show_similarity_matrix == \"On\":\n similarity_matrix_input = st.sidebar.text_input(\n \"Type the words that you want to have in the similarity matrix. Separate more than one word by comma (,).\",\n )\n if similarity_matrix_input.strip() != \"\":\n similarity_matrix_input = [\n x.strip() for x in similarity_matrix_input.strip().split(\",\")\n ]\n else:\n similarity_matrix_input = []\n else:\n similarity_matrix_input = []\n\n st.title(\"Visualization of word embeddings\")\n\n model_description = (\n \"**\"\n + gensim_model\n + \"** (\"\n + model_types[gensim_model]\n + \"): \"\n + model_descriptions[gensim_model]\n )\n\n if user_input == \"\":\n similar_word = None\n\n st.markdown(\n \"Word embedding, in natural language processing, is a representation of the meaning of words. It can be obtained using a set of language modeling and feature learning techniques where words or phrases from the vocabulary are mapped to vectors of real numbers. For more details visit https://en.wikipedia.org/wiki/Word_embedding\"\n )\n\n st.markdown(\n \"You can use the sidebar to chose between different models, visualizations and options.\"\n )\n\n st.markdown(\n \"You can type the words you want to investigate into the sidebar. Separate more than one word by semicolon (;). You can also group words like [woman, king, -man] where words with '-' in front count negatively.\"\n )\n\n st.markdown(\n \"With the slider in the sidebar, you can pick the amount of words associated with the input word you want to visualize. The words to show is determined by either the cosine or 3CosMul similarity between the word vectors.\"\n )\n\n st.markdown(\n \"There is also the option to generate a similarity matrix in the sidebar. When enabled you can enter a separate set of words and a heatmap visualizing the similarties between the words is shown.\"\n )\n\n st.header(\"Description of selected model\")\n st.markdown(model_description)\n\n else:\n st.header(\"Description of selected model\")\n st.markdown(model_description)\n\n user_input = [x.strip() for x in user_input.split(\";\")]\n input_groups = {}\n for input in user_input:\n if input[0] == \"[\" and input[len(input) - 1] == \"]\":\n input_splitted = input.replace(\"[\", \"\").replace(\"]\", \"\").split(\",\")\n all = [x.strip() for x in input_splitted]\n positive = []\n negative = []\n for one in all:\n if one[0] == \"+\":\n positive.append(one.strip(\"+\"))\n elif one[0] == \"-\":\n negative.append(one.strip(\"-\"))\n else:\n positive.append(one)\n input_groups[input] = {\"positive\": positive, \"negative\": negative}\n else:\n input_groups[input] = {\"positive\": [input], \"negative\": []}\n result_word = []\n\n # TODO: Kmeans cluster visualization in a plot?\n\n for words in user_input:\n\n if most_similar_method == \"3CosMul\":\n sim_words = model.most_similar_cosmul(\n positive=input_groups[words][\"positive\"],\n negative=input_groups[words][\"negative\"],\n topn=top_n,\n )\n else:\n sim_words = model.most_similar(\n positive=input_groups[words][\"positive\"],\n negative=input_groups[words][\"negative\"],\n topn=top_n,\n )\n sim_words = append_list(sim_words, words)\n\n result_word.extend(sim_words)\n\n similar_word = [word[0] for word in result_word]\n similarity = [word[1] for word in result_word]\n similar_word.extend(user_input)\n\n if show_kmeans_cluster_word_clouds == \"On\":\n Z = model.vectors\n\n centers = clustering_on_wordvecs(Z, n_kmeans_clusters)\n\n top_words = get_top_words(\n model.index2word, n_words_per_kmeans_cluster, centers, Z\n )\n\n if dimension == \"2D\":\n st.header(\"2D \" + dim_red + \" Visualization\")\n if dim_red != \"PCA\":\n st.markdown(\n \"Try playing around with the t-SNE hyperparameters in the sidebar, they really matter!\"\n )\n display_scatterplot_2D(\n model,\n user_input,\n similar_word,\n annotation,\n dim_red,\n perplexity,\n learning_rate,\n iteration,\n top_n,\n )\n else:\n st.header(\"3D \" + dim_red + \" Visualization\")\n if dim_red != \"PCA\":\n st.markdown(\n \"Try playing around with the t-SNE hyperparameters in the sidebar, they really matter!\"\n )\n display_scatterplot_3D(\n model,\n user_input,\n similar_word,\n annotation,\n dim_red,\n perplexity,\n learning_rate,\n iteration,\n top_n,\n )\n\n cmaps = cycle(\n [\n \"flag\",\n \"prism\",\n \"ocean\",\n \"gist_earth\",\n \"terrain\",\n \"gist_stern\",\n \"gnuplot\",\n \"gnuplot2\",\n \"CMRmap\",\n \"cubehelix\",\n \"brg\",\n \"hsv\",\n \"gist_rainbow\",\n \"rainbow\",\n \"jet\",\n \"nipy_spectral\",\n \"gist_ncar\",\n ]\n )\n\n if show_top_5_most_similar_words == \"On\":\n st.header(\"The Top 5 Most Similar Words for Each Input\")\n count = 0\n for i in range(len(user_input)):\n\n st.markdown(\n \"The most similar words to *\"\n + str(user_input[i])\n + \"* and their similarity are:\"\n )\n horizontal_bar(\n similar_word[count : count + 5], similarity[count : count + 5]\n )\n\n count = count + top_n\n\n st.header(\"Word clouds of the most similar words for each input\")\n count = 0\n for i in range(len(user_input)):\n st.write(\"Word cloud for *\" + str(user_input[i]) + \"*\")\n words = []\n sum = 0\n for i in range(count, count + top_n):\n words += [(similar_word[i], similarity[i])]\n sum += similarity[i]\n df = pd.DataFrame(words, columns=[\"word\", \"similarity\"])\n df[\"similarity\"] = df[\"similarity\"] * 100\n df[\"similarity\"] = df[\"similarity\"].astype(\"int\")\n words = {}\n for index, r in df.iterrows():\n words[str(r[\"word\"])] = r[\"similarity\"]\n display_cloud_from_frequency(\n words, next(cmaps), word_cloud_background_color\n )\n\n count = count + top_n\n\n if show_kmeans_cluster_word_clouds == \"On\":\n st.header(\"k-means clusters\")\n st.write(\n \"This visualization shows the n closest words to the determined k-means cluster centers for each cluster. Use the sidebar to adjust the number of clusters and the number of words per cluster to show.\"\n )\n\n for i in range(n_kmeans_clusters):\n col = next(cmaps)\n display_cloud(i, col, top_words, word_cloud_background_color)\n\n if show_similarity_matrix == \"On\":\n st.header(\"Similarity matrix\")\n st.write(\n \"This visualization shows a heatmap representation of the similarities between the entered words.\"\n )\n if len(similarity_matrix_input) > 0:\n fig = get_similarity_matrix_figure(model, similarity_matrix_input)\n st.plotly_chart(fig)\n else:\n st.write(\"Use the sidebar to enter words for the similarity matrix.\")\n\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"sklearn.cluster.KMeans",
"numpy.reshape",
"sklearn.neighbors.KDTree",
"pandas.DataFrame",
"sklearn.manifold.TSNE",
"numpy.array",
"sklearn.decomposition.PCA"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
creativeautomaton/OpenPrompt | [
"bd9ea544ab144d94af32d245101ba35c9d5a5a65"
] | [
"tutorial/3.1_LMBFF.py"
] | [
"# %% [markdown]\n# ## Text Classification with LM-BFF.\n# In this tutorial, we do sentiment analysis with automatic template and verbalizer generation. We use SST-2 as an example.\n\n# %% [markdown]\n# ### 1. load dataset\n\n# %%\n# import argparse\n# parser = argparse.ArgumentParser(\"\")\n# parser.add_argument(\"--lr\", type=float, default=5e-5)\n# args = parser.parse_args()\nfrom openprompt.data_utils.text_classification_dataset import SST2Processor\ndataset = {}\ndataset['train'] = SST2Processor().get_train_examples(\"./datasets/TextClassification/SST-2/16-shot/16-13\")\ndataset['validation'] = SST2Processor().get_dev_examples(\"./datasets/TextClassification/SST-2/16-shot/16-13\")\ndataset['test'] = SST2Processor().get_test_examples(\"./datasets/TextClassification/SST-2/16-shot/16-13\")\n\n# %% [markdown]\n# ### 2. build initial verbalizer and template\n# - note that if you wish to do automaitc label word generation, the verbalizer is not the final verbalizer, and is only used for template generation.\n# - note that if you wish to do automatic template generation, the template text may desirably include `{\"meta\":\"labelword\"}` so that label word can be used and remember to use `LMBFFTemplateGenerationTemplate` class so that \"labelword\" can be handled properly. Else you can just use `ManualTemplate`\n# - below is a template that expects plain text generation at each \"mask\" token position\n\n# %%\nprint('load model...')\nfrom openprompt.plms import load_plm\n# load mlm model for main tasks\nplm, tokenizer, model_config, WrapperClass = load_plm(\"roberta\", \"roberta-large\")\n\n# load generation model for template generation\ntemplate_generate_model, template_generate_tokenizer, template_generate_model_config, template_tokenizer_wrapper = load_plm('t5', 't5-large')\n\nfrom openprompt.prompts import ManualVerbalizer, ManualTemplate\nverbalizer = ManualVerbalizer(tokenizer=tokenizer, num_classes=2, label_words=[['terrible'],['great']])\n\nfrom openprompt.prompts.prompt_generator import LMBFFTemplateGenerationTemplate\ntemplate = LMBFFTemplateGenerationTemplate(tokenizer=template_generate_tokenizer, verbalizer=verbalizer, text='{\"placeholder\":\"text_a\"} {\"mask\"} {\"meta\":\"labelword\"} {\"mask\"}.')\n# template = ManualTemplate(tokenizer=tokenizer, text='{\"placeholder\":\"text_a\"} It is {\"mask\"}.')\n\n# view wrapped example\nwrapped_example = template.wrap_one_example(dataset['train'][0]) \nprint(wrapped_example)\n\n# %%\n# parameter setting\ncuda = True\nauto_t = True # whether to perform automatic template generation\nauto_v = True # whether to perform automatic label word generation\n\n\n# %%\n# train util function\nfrom openprompt.plms import load_plm\nfrom openprompt.prompts.prompt_generator import T5TemplateGenerator\nfrom openprompt.pipeline_base import PromptDataLoader, PromptForClassification\nfrom openprompt.prompts import ManualTemplate\nfrom openprompt.trainer import ClassificationRunner\nimport copy\nimport torch\nfrom transformers import AdamW, get_linear_schedule_with_warmup\n\n\ndef fit(model, train_dataloader, val_dataloader, loss_func, optimizer):\n best_score = 0.0\n for epoch in range(10):\n train_epoch(model, train_dataloader, loss_func, optimizer)\n score = evaluate(model, val_dataloader)\n if score > best_score:\n best_score = score\n return best_score\n \n\ndef train_epoch(model, train_dataloader, loss_func, optimizer):\n model.train()\n for step, inputs in enumerate(train_dataloader):\n if cuda:\n inputs = inputs.cuda()\n logits = model(inputs)\n labels = inputs['label']\n loss = loss_func(logits, labels)\n loss.backward()\n optimizer.step()\n optimizer.zero_grad()\n\ndef evaluate(model, val_dataloader):\n model.eval()\n allpreds = []\n alllabels = []\n with torch.no_grad():\n for step, inputs in enumerate(val_dataloader):\n if cuda:\n inputs = inputs.cuda()\n logits = model(inputs)\n labels = inputs['label']\n alllabels.extend(labels.cpu().tolist())\n allpreds.extend(torch.argmax(logits, dim=-1).cpu().tolist())\n acc = sum([int(i==j) for i,j in zip(allpreds, alllabels)])/len(allpreds)\n return acc\n\n\n# %% [markdown]\n# ### 3. automatic template and verbalizer generation\n\n# %%\nfrom tqdm import tqdm\n# template generation\nif auto_t:\n print('performing auto_t...')\n\n if cuda:\n template_generate_model = template_generate_model.cuda()\n template_generator = T5TemplateGenerator(template_generate_model, template_generate_tokenizer, template_tokenizer_wrapper, verbalizer, beam_width=5) # beam_width is set to 5 here for efficiency, to improve performance, try a larger number.\n\n\n dataloader = PromptDataLoader(dataset['train'], template, template_generate_tokenizer, template_tokenizer_wrapper, batch_size=len(dataset['train']), decoder_max_length=128) # register all data at once\n for data in dataloader:\n if cuda:\n data = data.cuda()\n template_generator._register_buffer(data)\n \n template_generate_model.eval()\n print('generating...')\n template_texts = template_generator._get_templates()\n\n original_template = template.text\n template_texts = [template_generator.convert_template(template_text, original_template) for template_text in template_texts]\n # template_generator._show_template()\n template_generator.release_memory()\n # generate a number of candidate template text\n print(template_texts)\n # iterate over each candidate and select the best one\n best_metrics = 0.0\n best_template_text = None\n for template_text in tqdm(template_texts):\n template = ManualTemplate(tokenizer, template_text)\n\n train_dataloader = PromptDataLoader(dataset['train'], template, tokenizer, WrapperClass)\n valid_dataloader = PromptDataLoader(dataset['validation'], template, tokenizer, WrapperClass)\n\n model = PromptForClassification(copy.deepcopy(plm), template, verbalizer)\n\n loss_func = torch.nn.CrossEntropyLoss()\n no_decay = ['bias', 'LayerNorm.weight']\n # it's always good practice to set no decay to biase and LayerNorm parameters\n optimizer_grouped_parameters = [\n {'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},\n {'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}\n ]\n\n optimizer = AdamW(optimizer_grouped_parameters, lr=1e-4)\n if cuda:\n model = model.cuda()\n score = fit(model, train_dataloader, valid_dataloader, loss_func, optimizer)\n\n if score > best_metrics:\n print('best score:', score)\n print('template:', template_text)\n best_metrics = score\n best_template_text = template_text\n # use the best template\n template = ManualTemplate(tokenizer, text=best_template_text)\n print(best_template_text)\n\n# %%\n# verbalizer generation\nfrom openprompt.prompts.prompt_generator import RobertaVerbalizerGenerator\nif auto_v:\n print('performing auto_v...')\n # load generation model for template generation\n if cuda:\n plm = plm.cuda()\n verbalizer_generator = RobertaVerbalizerGenerator(model=plm, tokenizer=tokenizer, candidate_num=20, label_word_num_per_class=20)\n # to improve performace , try larger numbers\n\n dataloader = PromptDataLoader(dataset['train'], template, tokenizer, WrapperClass, batch_size=32)\n for data in dataloader:\n if cuda:\n data = data.cuda()\n verbalizer_generator.register_buffer(data)\n label_words_list = verbalizer_generator.generate()\n verbalizer_generator.release_memory()\n\n # iterate over each candidate and select the best one\n current_verbalizer = copy.deepcopy(verbalizer)\n best_metrics = 0.0\n best_label_words = None\n for label_words in tqdm(label_words_list):\n current_verbalizer.label_words = label_words\n train_dataloader = PromptDataLoader(dataset['train'], template, tokenizer, WrapperClass)\n valid_dataloader = PromptDataLoader(dataset['validation'], template, tokenizer, WrapperClass)\n\n model = PromptForClassification(copy.deepcopy(plm), template, current_verbalizer)\n\n loss_func = torch.nn.CrossEntropyLoss()\n no_decay = ['bias', 'LayerNorm.weight']\n # it's always good practice to set no decay to biase and LayerNorm parameters\n optimizer_grouped_parameters = [\n {'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},\n {'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}\n ]\n\n optimizer = AdamW(optimizer_grouped_parameters, lr=1e-4)\n if cuda:\n model = model.cuda()\n score = fit(model, train_dataloader, valid_dataloader, loss_func, optimizer)\n\n if score > best_metrics:\n best_metrics = score\n best_label_words = label_words\n # use the best verbalizer\n print(best_label_words)\n verbalizer = ManualVerbalizer(tokenizer, num_classes=2, label_words=best_label_words)\n\n# %% [markdown]\n# ### 4. main training loop\n\n# %%\n# main training loop\ntrain_dataloader = PromptDataLoader(dataset['train'], template, tokenizer, WrapperClass)\nvalid_dataloader = PromptDataLoader(dataset['validation'], template, tokenizer, WrapperClass)\ntest_dataloader = PromptDataLoader(dataset['test'], template, tokenizer, WrapperClass)\n\n\nmodel = PromptForClassification(copy.deepcopy(plm), template, verbalizer)\nloss_func = torch.nn.CrossEntropyLoss()\nno_decay = ['bias', 'LayerNorm.weight']\n# it's always good practice to set no decay to biase and LayerNorm parameters\noptimizer_grouped_parameters = [\n {'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},\n {'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}\n]\n\noptimizer = AdamW(optimizer_grouped_parameters, lr=1e-4)\nif cuda:\n model = model.cuda()\nscore = fit(model, train_dataloader, valid_dataloader, loss_func, optimizer)\ntest_score = evaluate(model, test_dataloader)\nprint(test_score)\n\n\n"
] | [
[
"torch.nn.CrossEntropyLoss",
"torch.no_grad",
"torch.argmax"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
guitarmind/HPA-competition-solutions | [
"547d53aaca148fdb5f4585526ad7364dfa47967d",
"547d53aaca148fdb5f4585526ad7364dfa47967d",
"547d53aaca148fdb5f4585526ad7364dfa47967d",
"547d53aaca148fdb5f4585526ad7364dfa47967d",
"547d53aaca148fdb5f4585526ad7364dfa47967d",
"547d53aaca148fdb5f4585526ad7364dfa47967d",
"547d53aaca148fdb5f4585526ad7364dfa47967d",
"547d53aaca148fdb5f4585526ad7364dfa47967d",
"547d53aaca148fdb5f4585526ad7364dfa47967d",
"547d53aaca148fdb5f4585526ad7364dfa47967d",
"547d53aaca148fdb5f4585526ad7364dfa47967d",
"547d53aaca148fdb5f4585526ad7364dfa47967d",
"547d53aaca148fdb5f4585526ad7364dfa47967d"
] | [
"wienerschnitzelgemeinschaft/src/shai/fastai/other_ensemble_scripts/enstw41.py",
"one_more_layer_of_stacking/src/models/pd_bninception_v3.py",
"wienerschnitzelgemeinschaft/src/shai/fastai/other_ensemble_scripts/enstw39c.py",
"wienerschnitzelgemeinschaft/src/Christof/models/GAPNet/5crop_1024/train1_ur.py",
"wienerschnitzelgemeinschaft/src/Christof/models/ResNet34/19/train.py",
"bestfitting/src/data_process/s5_train_match_external.py",
"one_more_layer_of_stacking/src/sf_stack.py",
"wienerschnitzelgemeinschaft/src/Christof/models/GAPNet/9_crop/1/train1_lm.py",
"wienerschnitzelgemeinschaft/src/Christof/models/GAPNet/13_ext/train4_2.py",
"bestfitting/src/data_process/s6_test_match_test.py",
"wienerschnitzelgemeinschaft/src/Christof/models/ResNet34/tests/21/train_0.py",
"wienerschnitzelgemeinschaft/src/Christof/models/GAPNet/11_tests_on_clr/train_russ_schedule.py",
"wienerschnitzelgemeinschaft/src/Christof/models/GAPNet/5_filtered/train.py"
] | [
"# individual nan corrected\n# Final nan matches highest probable label (optional)\n\nimport pandas as pd\nimport numpy as np\nfrom tqdm import tqdm\nimport matplotlib.pyplot as plt\n\nSAMPLE = '../input/sample_submission.csv'\n\nlabel_names = {\n 0: \"Nucleoplasm\",\n 1: \"Nuclear membrane\",\n 2: \"Nucleoli\",\n 3: \"Nucleoli fibrillar center\",\n 4: \"Nuclear speckles\",\n 5: \"Nuclear bodies\",\n 6: \"Endoplasmic reticulum\",\n 7: \"Golgi apparatus\",\n 8: \"Peroxisomes\",\n 9: \"Endosomes\",\n 10: \"Lysosomes\",\n 11: \"Intermediate filaments\",\n 12: \"Actin filaments\",\n 13: \"Focal adhesion sites\",\n 14: \"Microtubules\",\n 15: \"Microtubule ends\",\n 16: \"Cytokinetic bridge\",\n 17: \"Mitotic spindle\",\n 18: \"Microtubule organizing center\",\n 19: \"Centrosome\",\n 20: \"Lipid droplets\",\n 21: \"Plasma membrane\",\n 22: \"Cell junctions\",\n 23: \"Mitochondria\",\n 24: \"Aggresome\",\n 25: \"Cytosol\",\n 26: \"Cytoplasmic bodies\",\n 27: \"Rods & rings\"\n }\n\ncolumn_sum = []\nsub_name = []\n\ndef expand(csv):\n sub = pd.read_csv(csv)\n print(csv, sub.isna().sum())\n\n sub = sub.replace(pd.np.nan, '101')\n sub[f'target_vec'] = sub['Predicted'].map(lambda x: list(map(int, x.strip().split())))\n for i in range(28):\n sub[f'{label_names[i]}'] = sub['Predicted'].map(\n lambda x: 1 if str(i) in x.strip().split() else 0)\n sub = sub.values\n sub = np.delete(sub, [1, 2], axis=1)\n\n a = sub[:, 1:]\n unique, counts = np.unique(a, return_counts=True)\n print('Unique counts:',np.asarray((unique, counts)).T)\n print('Total labels:{} Class-wise:{}'.format(a.sum(), a.sum(axis=0)))\n column_sum.append( a.sum(axis=0))\n sub_name.append(csv)\n return sub\n\n#======================================================================================================================\n# Input submissions\n#====================================================================================================================\nsub_dir = 'sub_dir_team/'\n\n#enstw39b\ndf_1 = expand('sub_dir_team/leak_brian_tommy_en_res34swa_re50xt_re101xtswa_wrn_4.8_562.csv') #1 +1\ndf_2 = expand( 'sub_dir_team/Christof_blend_4_580.csv') #2 +3\ndf_3 = expand('sub_dir_team/ens85bd_russ_616.csv') # 3 +3\ndf_4 = expand('sub_dir_team/enspreds103_12mdl_512-256_wtth0.45_leak_shai_593.csv') # 2+2\ndf_5 = expand('sub_dir_team/hill_m94d_dmytro_627.csv') # 3\ndf_6 = expand('sub_dir_team/voted_5_d_kevin_602.csv') #2 +2\ndf_7 = expand('sub_dir_team/hill_b93d_l2_615update.csv') #1\ndf_8 = expand('sub_dir_team/submission_loss_5fold_mean_2_GAP_chrs_602.csv') #2\n\ndf_9 = expand('sub_dir_team/hill_b92d_l2_615.csv') #1\ndf_10 = expand('sub_dir_team/hill_m92d_dmytro_617.csv') # 3\n\n# enstw36\n# df_1 = expand('sub_dir_team/leak_brian_tommy_en_res34swa_re50xt_re101xtswa_wrn_4.8_562.csv') #1\n# df_2 = expand( 'sub_dir_team/Christof_blend_4_580.csv') #3\n# df_3 = expand('sub_dir_team/ens85bd_russ_616.csv') #3\n# df_4 = expand('sub_dir_team/enspreds103_12mdl_512-256_wtth0.45_leak_shai_593.csv') #2\n# df_5 = expand('sub_dir_team/hill_m92d_dmytro_617.csv') # 3\n# df_6 = expand('sub_dir_team/voted_5_d_kevin_602.csv') #2\n# df_7 = expand('sub_dir_team/hill_b92d_l2_615.csv') #1\n#=======================================================================================================================\n# Visualize distribution\n#=======================================================================================================================\n# list =[0,1,2,3,4,5,6,7]\n# colors = ['r', 'g', 'b', 'c', 'm', 'y', 'k', 'orange']\n# w=0\n# for i in list:\n# x = np.arange(0, 28, 1)\n# plt.bar(x+w, column_sum[i],width = 0.08, color = colors[i], label=sub_name[i], )\n# w=w+0.09\n# plt.legend()\n# plt.grid(True)\n# plt.yscale('log')\n# plt.show()\n\n#=======================================================================================================================\n#=======================================================================================================================\nsum = df_1[:, 1:]*2 + \\\n df_2[:, 1:]*5 + \\\n df_3[:, 1:]*6 + \\\n df_4[:, 1:]*4 + \\\n df_5[:, 1:]*3 + \\\n df_6[:, 1:]*4 + \\\n df_7[:, 1:]*1 + \\\n df_8[:, 1:]*2 + \\\n df_9[:, 1:]*1 + \\\n df_10[:, 1:]*3\n\nvote = 15 #7 15/31\n\n#=======================================================================================================================\n# Selecting most probable label for nan rows\n#=======================================================================================================================\n# sum_tmp = sum.copy()\n# for i,row in enumerate(sum):\n# #print (str(row))\n# #print(max(row))\n# #print(row.argmax(axis=0))\n# row_max_idx = row.argmax(axis=0)\n# if max(row)<vote:\n# #row[row_max_idx] = vote\n# sum[i,row_max_idx] = vote\n# #print(str(row))\n# diff = sum-sum_tmp\n#=======================================================================================================================\n\nvote_sub0 = np.where(sum[:,0] >= vote, 1, 0) #high\nvote_sub1 = np.where(sum[:,1] >= vote, 1, 0)\nvote_sub2 = np.where(sum[:,2] >= vote, 1, 0)\nvote_sub3 = np.where(sum[:,3] >= vote, 1, 0)\nvote_sub4 = np.where(sum[:,4] >= vote, 1, 0)\nvote_sub5 = np.where(sum[:,5] >= vote, 1, 0)\nvote_sub6 = np.where(sum[:,6] >= vote, 1, 0)\nvote_sub7 = np.where(sum[:,7] >= vote, 1, 0)\nvote_sub8 = np.where(sum[:,8] >= vote, 1, 0) #low\nvote_sub9 = np.where(sum[:,9] >= vote, 1, 0) #low\nvote_sub10 = np.where(sum[:,10] >= vote, 1, 0) #low\nvote_sub11 = np.where(sum[:,11] >= vote, 1, 0)\nvote_sub12 = np.where(sum[:,12] >= vote, 1, 0)\nvote_sub13 = np.where(sum[:,13] >= vote, 1, 0)\nvote_sub14 = np.where(sum[:,14] >= vote, 1, 0)\nvote_sub15 = np.where(sum[:,15] >= vote + 2, 1, 0) #low\nvote_sub16 = np.where(sum[:,16] >= vote, 1, 0)\nvote_sub17 = np.where(sum[:,17] >= vote, 1, 0)\nvote_sub18 = np.where(sum[:,18] >= vote, 1, 0)\nvote_sub19 = np.where(sum[:,19] >= vote, 1, 0)\nvote_sub20 = np.where(sum[:,20] >= vote, 1, 0)\nvote_sub21 = np.where(sum[:,21] >= vote, 1, 0)\nvote_sub22 = np.where(sum[:,22] >= vote, 1, 0)\nvote_sub23 = np.where(sum[:,23] >= vote, 1, 0)\nvote_sub24 = np.where(sum[:,24] >= vote, 1, 0)\nvote_sub25 = np.where(sum[:,25] >= vote, 1, 0) #high\nvote_sub26 = np.where(sum[:,26] >= vote, 1, 0)\nvote_sub27 = np.where(sum[:,27] >= vote, 1, 0) #low\n\nvote_sub = np.column_stack((vote_sub0, vote_sub1, vote_sub2, vote_sub3,\n vote_sub4, vote_sub5, vote_sub6, vote_sub7,\n vote_sub8, vote_sub9, vote_sub10, vote_sub11,\n vote_sub12, vote_sub13, vote_sub14, vote_sub15,\n vote_sub16, vote_sub17, vote_sub18, vote_sub19,\n vote_sub20, vote_sub21, vote_sub22, vote_sub23,\n vote_sub24, vote_sub25, vote_sub26, vote_sub27)\n )\n#======================================================================================================================\n# prepare submission format\n#======================================================================================================================\nsubmit = pd.read_csv(SAMPLE)\nprediction = []\n\nfor row in tqdm(range(submit.shape[0])):\n\n str_label = ''\n\n for col in range(vote_sub.shape[1]):\n if (vote_sub[row, col] < 1):\n str_label += ''\n else:\n str_label += str(col) + ' '\n prediction.append(str_label.strip())\n\nsubmit['Predicted'] = np.array(prediction)\n#submit.to_csv('sub_dir_team/test.csv', index=False)\nsubmit.to_csv('sub_dir_team/enstw40_642blend.csv', index=False)\n\n#=======================================================================================================================",
"\n# coding: utf-8\n\n# In[1]:\n\n\ndef pd_bninception_v3():\n import torch\n\n model_name = 'pd_bninception_v3'\n\n device = 'cuda:0'\n\n torch.backends.cudnn.benchmark = True\n\n\n # In[2]:\n\n\n import numpy as np\n import pandas as pd\n from sklearn.model_selection import KFold, StratifiedKFold\n from sklearn.utils import shuffle\n\n import matplotlib.pyplot as plt\n plt.style.use('seaborn-white')\n import seaborn as sns\n sns.set_style(\"white\")\n\n from skimage.transform import resize\n from skimage.color import rgb2gray, gray2rgb\n\n from sklearn.metrics import f1_score, precision_score, recall_score, roc_auc_score\n\n from tqdm import tqdm_notebook\n\n import gc\n import math\n import sys\n\n from fastai import *\n from fastai.vision import *\n\n np.random.seed(42)\n\n data_dir = '../input/'\n submit_l1_dir = \"../submits/\"\n weights_dir = \"weights/\"\n results_dir = '../results/'\n\n name_label_dict = {\n 0: 'Nucleoplasm',\n 1: 'Nuclear membrane',\n 2: 'Nucleoli',\n 3: 'Nucleoli fibrillar center',\n 4: 'Nuclear speckles',\n 5: 'Nuclear bodies',\n 6: 'Endoplasmic reticulum',\n 7: 'Golgi apparatus',\n 8: 'Peroxisomes',\n 9: 'Endosomes',\n 10: 'Lysosomes',\n 11: 'Intermediate filaments',\n 12: 'Actin filaments',\n 13: 'Focal adhesion sites',\n 14: 'Microtubules',\n 15: 'Microtubule ends',\n 16: 'Cytokinetic bridge',\n 17: 'Mitotic spindle',\n 18: 'Microtubule organizing center',\n 19: 'Centrosome',\n 20: 'Lipid droplets',\n 21: 'Plasma membrane',\n 22: 'Cell junctions',\n 23: 'Mitochondria',\n 24: 'Aggresome',\n 25: 'Cytosol',\n 26: 'Cytoplasmic bodies',\n 27: 'Rods & rings' }\n\n def twenty_kfold_threshold(y_true, y_pred):\n n_classes = len(name_label_dict)\n classes_thresholds = []\n classes_scores = []\n for i in range(n_classes):\n kf_class_thresholds = []\n for j in range(20):\n kf = StratifiedKFold(n_splits=5, shuffle=True, random_state=239 + j*101)\n for _, tst_inx in kf.split(y_true,y_true[:,i]):\n t_min = np.min(y_pred[tst_inx,i])\n t_max = np.max(y_pred[tst_inx,i])\n thresholds = np.linspace(t_min, t_max, 50)\n scores = np.array([\n f1_score(y_true[tst_inx,i], np.int32(y_pred[tst_inx,i] >= threshold)) for threshold in thresholds\n ])\n threshold_best_index = np.argmax(scores)\n kf_class_thresholds.append(thresholds[threshold_best_index])\n threshold = np.mean(kf_class_thresholds)\n classes_thresholds.append(threshold)\n f1 = f1_score(y_true[:,i], np.int32(y_pred[:,i] >= threshold))\n classes_scores.append(f1)\n return classes_thresholds, classes_scores\n\n\n # In[3]:\n\n\n import pretrainedmodels\n\n pretrainedmodels.__dict__['model_names']\n\n\n # In[4]:\n\n\n import pretrainedmodels\n import pretrainedmodels.utils as pqutils\n\n _model_name = 'bninception'\n model = pretrainedmodels.__dict__[_model_name](num_classes=1000, pretrained='imagenet')\n tf_img = pqutils.TransformImage(model)\n tf_mean = list(map(float, tf_img.__dict__['mean']))\n tf_std = list(map(float, tf_img.__dict__['std']))\n model_stats = (tf_mean, tf_std)\n model_stats\n\n\n # In[5]:\n\n\n data_dir = '../input/'\n valid_df = pd.read_csv('../input/' + 'val_id.csv', header=None, names=['idx','Id'])\n train_df = pd.read_csv(data_dir + 'train.csv')\n len(train_df)\n\n\n # In[6]:\n\n\n from PIL import Image as QImage\n ids = []\n labels = []\n def file_jpg_to_png(path):\n global ids\n gclasses = set(list(range(28))) - set([0,25])\n f1 = '../input/new_data/' + path + '.jpg'\n f2 = '../input/train_png/' + path + '.png'\n xs = path.split('_')\n q = xs.index('classes') + 1\n xs = xs[q:]\n if len(gclasses & set([int(x) for x in xs])) == 0:\n return\n xs = ' '.join(xs)\n if not os.path.isfile(f2):\n try:\n im = QImage.open(f1)\n im = im.resize((512, 512), QImage.NEAREST)\n im.save(f2)\n ids.append(path)\n labels.append(xs)\n except:\n pass\n else:\n ids.append(path)\n labels.append(xs)\n\n need_to_prepare_extra = False\n if need_to_prepare_extra:\n for filename in tqdm_notebook(os.listdir('../input/new_data/'), total = 64447):\n if filename.endswith(\".jpg\"):\n file_jpg_to_png(filename[:-4])\n\n\n # In[7]:\n\n\n if need_to_prepare_extra:\n xtra_data = pd.DataFrame()\n xtra_data['Id'] = ids\n xtra_data['Target'] = labels\n xtra_data.to_csv(data_dir + 'xtra_train.csv', index=False)\n xtra_data.head(n=3)\n\n\n # In[8]:\n\n\n test_matches = pd.read_csv('../input/test_matches.csv')\n test_matches.Extra = test_matches['Extra'].apply(lambda x : \"_\".join(x.split(\"_\")[2:]))\n\n\n # In[9]:\n\n\n xtra_data = pd.read_csv(data_dir + 'xtra_train.csv')\n xtra_data['Extra'] = xtra_data.Id.apply(lambda x : x[:x.find(\"_classes\")])\n\n\n # In[10]:\n\n\n xtra_matches_ids = test_matches.Extra.values.tolist()\n xtra_data_train = xtra_data.loc[~xtra_data.Extra.isin(xtra_matches_ids),['Id','Target']].reset_index(drop=True)\n xtra_data_valid = xtra_data.loc[xtra_data.Extra.isin(xtra_matches_ids),['Id','Target']].reset_index(drop=True)\n\n\n # In[11]:\n\n\n data = xtra_data_train\n labels = np.zeros((data.shape[0], 28), dtype=np.int32)\n if \"Target\" in data:\n for i, lbls in data['Target'].str.split().iteritems():\n for j in map(int, lbls):\n labels[i, j] = 1\n for j in range(28):\n print(j,'\\t',name_label_dict[j], '\\t', labels[:,j].sum(), '\\t', labels[:,j].sum()/labels.shape[0])\n\n\n # In[12]:\n\n\n xtra_matches_ids = ['1054_E4_1_classes_25_16_0','1762_G4_5_classes_27','1335_C6_2_classes_3',\n '935_D5_2_classes_22_0','27_H9_2_classes_10','669_D8_1_classes_16_2',\n '1178_D4_2_classes_19_16_14','791_A9_1_classes_10_9','759_F9_9_classes_25_21_19_16',\n '1283_F10_2_classes_16_0','688_E7_10_classes_23','1772_F9_7_classes_25_17',\n '454_E5_1_classes_14_0','1020_C5_3_classes_23','1386_G4_2_classes_8',\n '681_G8_5_classes_13','1609_C4_2_classes_16_0','690_D3_5_classes_22_21_1_0',\n '1245_B2_2_classes_21_0','1335_C10_4_classes_16_0','693_A11_3_classes_23',\n '1139_A12_4_classes_23','916_F8_1_classes_25_2_0','694_C1_2_classes_18_1',\n '929_B8_1_classes_25_19','340_F5_3_classes_13','138_B12_1_classes_8',\n '932_G11_2_classes_25_16','28_H9_1_classes_10','924_F12_1_classes_27',\n '682_F12_2_classes_25_4','1147_D3_13_classes_16_0','346_A5_1_classes_12',\n '616_F1_4_classes_8','73_A10_1_classes_27_25','663_A9_2_classes_16_14',\n '859_C8_4_classes_16_14','933_C10_4_classes_22_21','1207_B10_7_classes_12',\n '694_F10_1_classes_25_21','908_E3_1_classes_4','1758_C9_4_classes_17_2',\n '1335_D2_2_classes_2_0','929_H2_2_classes_23','1717_G8_34_classes_25_17',\n '1150_H4_7_classes_13','1054_E4_2_classes_25_16_0','504_B1_3_classes_25_16_0',\n '747_B5_4_classes_10_9','1020_B1_7_classes_23_5','918_H10_2_classes_25_15',\n '532_H3_1_classes_25_16_0','757_C6_3_classes_16_2','1346_H6_3_classes_16_5_0',\n '496_D1_1_classes_16_0','1042_C3_3_classes_27','929_B12_1_classes_3',\n '684_C4_2_classes_23_0','696_C9_5_classes_25_21_0','1144_A10_4_classes_2','846_A8_2_classes_16_14','903_F12_2_classes_23_5','1264_G1_1_classes_27','925_H8_2_classes_1_0','121_C6_2_classes_10_9','1657_E10_3_classes_25_17','932_G11_1_classes_25_16','704_G4_1_classes_25_12','1039_C3_2_classes_19_16','906_H7_2_classes_25_6','19_H7_2_classes_8','725_G10_2_classes_16_14','681_B2_4_classes_4','697_A6_4_classes_19_0','1581_B12_2_classes_16_14','926_F7_2_classes_5_0','1770_D2_4_classes_21_17_4','1037_F4_3_classes_19','1413_F11_6_classes_21_16','694_A2_1_classes_2','1049_D11_2_classes_25_16_0','1276_C3_2_classes_21_0','346_B12_3_classes_14_0','1773_G12_3_classes_16_12','1183_F4_2_classes_15','1158_H11_8_classes_16_5','380_C6_1_classes_16_0','792_B6_7_classes_13_0','682_C9_6_classes_25_12_2','906_A9_4_classes_20_0','400_D3_2_classes_25_7','1237_G1_4_classes_21_6','793_B1_1_classes_25_22_0','1308_A5_4_classes_5','800_E1_1_classes_16_14','1421_G5_7_classes_17','906_A9_6_classes_20_0','1245_B2_3_classes_21_0','626_D7_6_classes_25_21_12','344_G2_4_classes_11','901_E12_1_classes_25_6_2','1050_F6_6_classes_16_0','240_G8_1_classes_8','933_C2_1_classes_23_2_0','556_B9_1_classes_25_18_0','1335_C10_2_classes_16_0','1125_F6_3_classes_4','1495_F7_3_classes_7_0','694_C1_1_classes_18_1','918_B3_4_classes_14','1762_E6_5_classes_7','915_C6_5_classes_4','820_G4_3_classes_10_9','927_F12_12_classes_18_0','901_D10_2_classes_12_0','1642_G7_34_classes_25_16','928_G1_2_classes_14_7','682_G9_1_classes_7_0','903_F2_1_classes_2_0','1645_E1_32_classes_16_14','685_G10_5_classes_12_0','927_A9_10_classes_25_5','957_G6_4_classes_16','757_C6_2_classes_16_2','1213_C4_2_classes_4','909_A6_1_classes_2','694_D6_2_classes_1_0','480_D6_3_classes_25_16','1050_F1_3_classes_25_16_0','692_A1_5_classes_25_14_0','1772_H1_5_classes_18_17_16_0','991_G6_7_classes_10_9','782_F8_2_classes_25_16','693_H4_1_classes_7','1259_A11_4_classes_19_16','1414_D12_2_classes_21_0','1139_D5_5_classes_5','930_H3_2_classes_1','901_G9_5_classes_25_19_0','1754_G2_34_classes_5','353_A9_1_classes_21_13','1179_H7_1_classes_25_16_0','1423_A4_2_classes_16_14','686_F4_2_classes_22_21','1693_E1_2_classes_23_16','400_H8_2_classes_23','1680_G4_4_classes_16','935_G3_1_classes_5','838_E8_1_classes_3','1030_D8_2_classes_7_0','684_D12_4_classes_18','812_C10_2_classes_13_0','1416_D10_6_classes_21_16_0','1293_E3_2_classes_1_0','480_D6_2_classes_25_16','700_H6_2_classes_25_2','1773_E10_4_classes_16_0','611_E10_1_classes_25_13','346_B12_4_classes_14_0','523_A9_4_classes_5','1581_B12_3_classes_16_14','684_D8_6_classes_25_12_0','927_F12_11_classes_18_0','353_E4_2_classes_5','556_C1_5_classes_25_22_16','1179_H7_2_classes_25_16_0','1711_B12_3_classes_26_21_4','449_G8_2_classes_4_2','544_A8_5_classes_22_21_7','1772_H1_3_classes_18_17_16_0','1772_G2_6_classes_25_19_16_0','909_C11_2_classes_2_0','930_C12_1_classes_18_14_6','690_C10_2_classes_13','1009_B6_2_classes_10_9','757_E10_5_classes_12','88_D7_2_classes_8','383_E8_7_classes_25_17','1432_F2_2_classes_6','505_C10_1_classes_25_15','1104_E7_2_classes_16_14','699_E8_1_classes_1','1213_C4_3_classes_4','690_H5_1_classes_4','1169_D3_6_classes_16_0','686_F4_1_classes_22_21','532_D1_1_classes_16_0','896_G8_3_classes_5_0','934_G4_3_classes_21','344_G2_1_classes_11','369_C9_1_classes_18_14_0','682_F12_1_classes_25_4','683_E1_2_classes_25_1_0','697_G3_6_classes_13_7','1772_A6_7_classes_5','933_C4_6_classes_5','1231_F9_5_classes_7','802_D5_9_classes_16_0','682_G10_1_classes_7','850_C1_9_classes_21_0','929_B12_2_classes_3','1339_D3_3_classes_2_1','858_D4_2_classes_4','334_B12_2_classes_4','622_F1_7_classes_8','908_G5_2_classes_2_0','778_G6_2_classes_25_16_14','1027_C4_1_classes_7','886_C10_5_classes_23_0','807_C2_3_classes_4','1314_D2_2_classes_25_16_0','1770_B5_1_classes_21_16_11','1105_F10_2_classes_16_0','1283_B2_10_classes_16_0','583_E11_1_classes_25_16','820_G4_7_classes_10_9','928_H3_2_classes_14_0','970_H1_4_classes_25_18','1751_A7_32_classes_27','701_H10_2_classes_25_14','1773_B6_11_classes_23_17_16','1736_G7_31_classes_25_16','928_H3_1_classes_14_0','1645_E5_34_classes_17','539_B3_1_classes_25_21_0','683_E1_1_classes_25_1_0','484_G6_3_classes_22','928_A1_1_classes_4','1773_B6_7_classes_23_17_16','1255_A3_4_classes_16_0','698_C6_2_classes_25_21_4','1773_D5_6_classes_17','681_G8_4_classes_13','935_H11_2_classes_22_0','1125_B9_4_classes_25_7','698_F11_1_classes_13_0','344_F7_1_classes_25_21','906_C11_1_classes_4','1656_F5_2_classes_19_17','1761_A10_3_classes_23_17_14','1772_H5_7_classes_17_7','910_B8_1_classes_12_0','1283_F10_4_classes_16_0','508_C10_1_classes_25_15','681_B2_3_classes_4','868_E8_2_classes_17_16_0','1339_B9_2_classes_16_0','856_A2_4_classes_2_0','700_C3_6_classes_21','869_B3_1_classes_16_0','701_B9_2_classes_21_13_0','1178_F9_6_classes_16_0','542_G1_1_classes_11_2_0']\n exclude_valid = ['5ae3db3a-bbc4-11e8-b2bc-ac1f6b6435d0',\n 'e6d0b648-bbbc-11e8-b2ba-ac1f6b6435d0',\n '3202385a-bbca-11e8-b2bc-ac1f6b6435d0',\n '0cf36c82-bbca-11e8-b2bc-ac1f6b6435d0',\n '7cb0006e-bbaf-11e8-b2ba-ac1f6b6435d0',\n '87b77dd2-bba2-11e8-b2b9-ac1f6b6435d0',\n '62c88efa-bbc8-11e8-b2bc-ac1f6b6435d0',\n '44d819c2-bbbb-11e8-b2ba-ac1f6b6435d0',\n 'b1ca2b40-bbbd-11e8-b2ba-ac1f6b6435d0',\n '8cd67266-bbbe-11e8-b2ba-ac1f6b6435d0',\n 'cead83ec-bb9a-11e8-b2b9-ac1f6b6435d0',\n 'a166d11a-bbca-11e8-b2bc-ac1f6b6435d0',\n '91a0a67e-bb9e-11e8-b2b9-ac1f6b6435d0',\n '2be24582-bbb1-11e8-b2ba-ac1f6b6435d0']\n exclude_train = ['7138c4aa-bb9b-11e8-b2b9-ac1f6b6435d0',\n '8a10533e-bba6-11e8-b2ba-ac1f6b6435d0',\n 'be92e108-bbb5-11e8-b2ba-ac1f6b6435d0',\n 'abfa727e-bba4-11e8-b2ba-ac1f6b6435d0',\n '2384acac-bbae-11e8-b2ba-ac1f6b6435d0',\n 'c7a7a462-bbb1-11e8-b2ba-ac1f6b6435d0',\n '559f7ce0-bbb2-11e8-b2ba-ac1f6b6435d0']\n\n\n # In[13]:\n\n\n xtra_data_train = xtra_data.loc[~xtra_data.Id.isin(xtra_matches_ids),['Id','Target']].reset_index(drop=True)\n xtra_data_valid = xtra_data.loc[xtra_data.Id.isin(xtra_matches_ids),['Id','Target']].reset_index(drop=True)\n\n\n # In[14]:\n\n\n valid_df = pd.read_csv('../input/' + 'val_id.csv', header=None, names=['idx','Id'])\n valid_df = valid_df.loc[~valid_df.Id.isin(exclude_valid),:]\n train_df = pd.read_csv(data_dir + 'train.csv')\n train_df = train_df.loc[~train_df.Id.isin(exclude_train),:]\n\n test_df = pd.read_csv('../input/' + \"sample_submission.csv\")\n train = train_df.loc[~train_df.Id.isin(valid_df.Id.values.tolist()),:].reset_index(drop=True)\n train = pd.concat([train,xtra_data_train], axis=0, sort=False)\n valid = train_df.loc[train_df.Id.isin(valid_df.Id.values.tolist()),:].reset_index(drop=True)\n valid = pd.concat([valid,xtra_data_valid], axis=0, sort=False)\n test = test_df\n\n\n # In[15]:\n\n\n train.shape\n\n\n # In[16]:\n\n\n def zero_25(x):\n return x in ['0', '25', '25 0', '0 25']\n\n train = train[~((train['Id'].str.contains('classes')) & (train['Target'].apply(zero_25)))]\n train.shape\n\n\n # In[17]:\n\n\n del train_df,valid_df,test_df,xtra_data_valid,xtra_data_train\n gc.collect()\n\n train_files = train.Id.apply(lambda s: '../input/' + 'train_png/'+s+'.png')\n train_labels = train.Target.astype(str).apply(lambda s : [name_label_dict[int(q)] for q in s.split(' ')])\n train_ds = ImageMultiDataset(fns = train_files, labels = train_labels, classes = list(name_label_dict.values()))\n del train_files, train_labels\n\n valid_files = valid.Id.apply(lambda s: '../input/' + 'train_png/'+s+'.png')\n valid_labels = valid.Target.astype(str).apply(lambda s : [name_label_dict[int(q)] for q in s.split(' ')])\n valid_ds = ImageMultiDataset(fns = valid_files, labels = valid_labels, classes = list(name_label_dict.values()))\n del valid_files, valid_labels\n\n test_files = test.Id.apply(lambda s: '../input/' + 'test_png/'+s+'.png')\n test_labels = test.Predicted.astype(str).apply(lambda s : [name_label_dict[int(q)] for q in s.split(' ')])\n test_ds = ImageMultiDataset(fns = test_files, labels = test_labels, classes = list(name_label_dict.values()))\n del test_files, test_labels\n\n xtra = [RandTransform(squish, {})]\n tfms = get_transforms(do_flip=True, flip_vert=True, max_rotate=180.0,\n max_zoom=1.25, max_lighting=0.25, max_warp=0.05,\n p_affine=0.9, p_lighting=0.7, xtra_tfms=xtra)\n data = ImageDataBunch.create(train_ds, valid_ds, test_ds, path=data_dir, device=device,\n size=512, bs=28, ds_tfms=tfms, padding_mode='zeros')\n data.normalize(model_stats)\n\n\n # In[18]:\n\n\n data.show_batch(rows=2, figsize=(12,8))\n\n\n # In[19]:\n\n\n class FocalLoss(nn.Module):\n def __init__(self, gamma=2):\n super().__init__()\n self.gamma = gamma\n\n def forward(self, input, target):\n if not (target.size() == input.size()):\n raise ValueError(\"Target size ({}) must be the same as input size ({})\"\n .format(target.size(), input.size()))\n\n max_val = (-input).clamp(min=0)\n loss = input - input * target + max_val + ((-max_val).exp() + (-input - max_val).exp()).log()\n\n invprobs = F.logsigmoid(-input * (target * 2.0 - 1.0))\n loss = (invprobs * self.gamma).exp() * loss\n\n return loss.sum(dim=1).mean()\n\n\n # In[20]:\n\n\n def create_head(nf:int, nc:int, lin_ftrs:Optional[Collection[int]]=None, ps:Floats=0.5):\n lin_ftrs = [nf, nc] if lin_ftrs is None else [nf] + lin_ftrs + [nc]\n ps = listify(ps)\n\n if len(ps)==1: ps = [ps[0]/2] * (len(lin_ftrs)-2) + ps\n\n actns = [nn.ReLU(inplace=True)] * (len(lin_ftrs)-2) + [None]\n layers = [AdaptiveConcatPool2d(), Flatten()]\n for ni,no,p,actn in zip(lin_ftrs[:-1],lin_ftrs[1:],ps,actns):\n layers += bn_drop_lin(ni,no,True,p,actn)\n\n return nn.Sequential(*layers)\n\n\n # In[21]:\n\n\n from pretrainedmodels import bninception\n\n class Stub(nn.Module):\n def __init__(self):\n super().__init__()\n def forward(self, x):\n return x.view(-1,1024, 16, 16)\n\n model = bninception()\n model.global_pool=Stub()\n model.last_linear=Stub()\n\n def create_cnn(data:DataBunch, arch:Callable, cut:Union[int,Callable]=None, pretrained:bool=True,\n lin_ftrs:Optional[Collection[int]]=None, ps:Floats=0.5,\n custom_head:Optional[nn.Module]=None, split_on:Optional[SplitFuncOrIdxList]=None,\n classification:bool=True, **kwargs:Any)->Learner:\n \"Build convnet style learners.\"\n assert classification, 'Regression CNN not implemented yet, bug us on the forums if you want this!'\n # meta = cnn_config(arch)\n body = create_body(arch, 0)\n nf = 2048\n head = custom_head or create_head(nf, data.c, lin_ftrs, ps)\n model = nn.Sequential(body,head)\n learner_cls = ifnone(data.learner_type(), ClassificationLearner)\n learn = learner_cls(data, model, **kwargs)\n learn.split(ifnone(split_on,(model[0],model[1])))\n if pretrained: learn.freeze()\n learn.freeze_to(0)\n apply_init(model[1], nn.init.kaiming_normal_)\n return learn\n\n\n # In[22]:\n\n\n from fastai.vision import models\n from sklearn import metrics\n import torchvision\n\n\n from fastai.vision.learner import cnn_config\n\n\n def body(pretrained = True):\n return pretrainedmodels.bninception(pretrained='imagenet').to(device)\n\n learner = create_cnn(data, arch=model, cut=-1,\n custom_head = create_head(2048, len(data.classes), ps=0.5))\n learner.loss_fn = FocalLoss()\n\n\n # In[23]:\n\n\n from fastai.torch_core import split_model_idx\n\n learner.split(split_model_idx(learner.model,idxs=[3,10,70,174,215,221]))\n layers = [0]*7\n\n\n # In[24]:\n\n\n def get_lrs(lr, base=16):\n return np.logspace(np.log(lr/base), np.log(lr), len(layers), base=np.e)\n\n\n # In[25]:\n\n\n learner.freeze()\n lr = 2e-3\n learner.fit_one_cycle(6, get_lrs(lr))\n\n\n # In[26]:\n\n\n learner.save('bninception-stage-1-1')\n\n\n # In[27]:\n\n\n learner.lr_find(num_it=1000)\n learner.recorder.plot()\n\n\n # In[28]:\n\n\n learner.load('bninception-stage-1-1')\n learner.unfreeze()\n lr = 1e-3\n learner.fit_one_cycle(16, max_lr=get_lrs(lr))\n\n\n # In[ ]:\n\n\n lr = 5e-4\n learner.fit_one_cycle(16, max_lr=get_lrs(lr))\n\n\n # In[30]:\n\n\n y_pred_solo, avg_preds1, y = learner.TTA(beta=None)\n y = y.cpu().numpy().copy()\n _, avg_preds2, _ = learner.TTA(beta=None)\n _, avg_preds3, _ = learner.TTA(beta=None)\n _, avg_preds4, _ = learner.TTA(beta=None)\n\n avg_preds = y_pred_solo.cpu().numpy().copy()*0.4+torch.stack([avg_preds1,avg_preds2,avg_preds3,avg_preds4]).mean(0).cpu().numpy().copy()*0.6\n\n\n # In[31]:\n\n\n classes_thresholds, classes_scores = twenty_kfold_threshold(y, avg_preds)\n n_classes = len(name_label_dict)\n yp = avg_preds.copy()\n for i in range(n_classes):\n yp[:,i] = avg_preds[:,i] >= classes_thresholds[i]\n yp = yp.astype(np.uint8)\n sc = f1_score(y,yp,average='macro')\n print('val F1 macro:', f1_score(y,yp,average='macro'))\n s = ''\n for i in range(n_classes):\n s += name_label_dict[i] + ':' + ('{:.4f}, {:.4f} ').format(classes_scores[i],classes_thresholds[i])\n\n learner.save(model_name+'_{:.4f}.pnt'.format(sc))\n\n\n # In[25]:\n\n\n learner = learner.load('pd_bninception_v3_0.6672.pnt')\n learner.unfreeze()\n\n\n # In[26]:\n\n\n for i, c in enumerate(learner.model.children()):\n if i == 0:\n continue\n\n for j, c1 in enumerate(c):\n if j == 3:\n c1.p = 0.6\n\n\n # In[27]:\n\n\n xtra = [RandTransform(squish, {})]\n tfms = get_transforms(do_flip=True, flip_vert=True, max_rotate=180.0,\n max_zoom=1.25, max_lighting=0.25, max_warp=0.05,\n p_affine=0.9, p_lighting=0.7, xtra_tfms=xtra)\n data = ImageDataBunch.create(train_ds, valid_ds, test_ds, path=data_dir, device=device,\n size=512, bs=28, ds_tfms=tfms, padding_mode='zeros')\n data.normalize(model_stats)\n\n learner.data = data\n\n lr = 1e-4\n learner.fit_one_cycle(16, max_lr=get_lrs(lr))\n\n\n # In[28]:\n\n\n y_pred_solo, avg_preds1, y = learner.TTA(beta=None)\n y = y.cpu().numpy().copy()\n _, avg_preds2, _ = learner.TTA(beta=None)\n _, avg_preds3, _ = learner.TTA(beta=None)\n _, avg_preds4, _ = learner.TTA(beta=None)\n\n avg_preds = y_pred_solo.cpu().numpy().copy()*0.4+torch.stack([avg_preds1,avg_preds2,avg_preds3,avg_preds4]).mean(0).cpu().numpy().copy()*0.6\n\n\n # In[29]:\n\n\n classes_thresholds, classes_scores = twenty_kfold_threshold(y, avg_preds)\n n_classes = len(name_label_dict)\n yp = avg_preds.copy()\n for i in range(n_classes):\n yp[:,i] = avg_preds[:,i] >= classes_thresholds[i]\n yp = yp.astype(np.uint8)\n sc = f1_score(y,yp,average='macro')\n print('val F1 macro:', f1_score(y,yp,average='macro'))\n s = ''\n for i in range(n_classes):\n s += name_label_dict[i] + ':' + ('{:.4f}, {:.4f} ').format(classes_scores[i],classes_thresholds[i])\n\n learner.save(model_name+'_{:.4f}.pnt'.format(sc))\n\n\n # In[30]:\n\n\n xtra = [RandTransform(squish, {})]\n tfms = get_transforms(do_flip=True, flip_vert=True, max_rotate=180.0,\n max_zoom=1.27, max_lighting=0.28, max_warp=0.07,\n p_affine=0.9, p_lighting=0.73, xtra_tfms=xtra)\n data = ImageDataBunch.create(train_ds, valid_ds, test_ds, path=data_dir, device=device,\n size=512, bs=28, ds_tfms=tfms, padding_mode='zeros')\n data.normalize(model_stats)\n\n learner.data = data\n\n lr = 1e-4\n learner.fit_one_cycle(8, max_lr=get_lrs(lr))\n\n\n # In[31]:\n\n\n lr = 1e-4\n learner.fit_one_cycle(8, max_lr=get_lrs(lr), div_factor=50)\n\n\n # In[32]:\n\n\n y_pred_solo, avg_preds1, y = learner.TTA(beta=None)\n y = y.cpu().numpy().copy()\n _, avg_preds2, _ = learner.TTA(beta=None)\n _, avg_preds3, _ = learner.TTA(beta=None)\n _, avg_preds4, _ = learner.TTA(beta=None)\n\n avg_preds = y_pred_solo.cpu().numpy().copy()*0.4+torch.stack([avg_preds1,avg_preds2,avg_preds3,avg_preds4]).mean(0).cpu().numpy().copy()*0.6\n\n\n # In[33]:\n\n\n classes_thresholds, classes_scores = twenty_kfold_threshold(y, avg_preds)\n n_classes = len(name_label_dict)\n yp = avg_preds.copy()\n for i in range(n_classes):\n yp[:,i] = avg_preds[:,i] >= classes_thresholds[i]\n yp = yp.astype(np.uint8)\n sc = f1_score(y,yp,average='macro')\n print('val F1 macro:', f1_score(y,yp,average='macro'))\n s = ''\n for i in range(n_classes):\n s += name_label_dict[i] + ':' + ('{:.4f}, {:.4f} ').format(classes_scores[i],classes_thresholds[i])\n\n learner.save(model_name+'_{:.4f}.pnt'.format(sc))\n\n\n # In[34]:\n\n\n lr = 5e-5\n learner.fit_one_cycle(8, max_lr=get_lrs(lr))\n\n\n # In[35]:\n\n\n y_pred_solo, avg_preds1, y = learner.TTA(beta=None)\n y = y.cpu().numpy().copy()\n _, avg_preds2, _ = learner.TTA(beta=None)\n _, avg_preds3, _ = learner.TTA(beta=None)\n _, avg_preds4, _ = learner.TTA(beta=None)\n\n avg_preds = y_pred_solo.cpu().numpy().copy()*0.4+torch.stack([avg_preds1,avg_preds2,avg_preds3,avg_preds4]).mean(0).cpu().numpy().copy()*0.6\n\n\n # In[36]:\n\n\n classes_thresholds, classes_scores = twenty_kfold_threshold(y, avg_preds)\n n_classes = len(name_label_dict)\n yp = avg_preds.copy()\n for i in range(n_classes):\n yp[:,i] = avg_preds[:,i] >= classes_thresholds[i]\n yp = yp.astype(np.uint8)\n sc = f1_score(y,yp,average='macro')\n print('val F1 macro:', f1_score(y,yp,average='macro'))\n s = ''\n for i in range(n_classes):\n s += name_label_dict[i] + ':' + ('{:.4f}, {:.4f} ').format(classes_scores[i],classes_thresholds[i])\n\n learner.save(model_name+'_{:.4f}.pnt'.format(sc))\n\n\n # In[42]:\n\n\n learner = learner.load(model_name+'_0.6750.pnt')\n\n\n # In[43]:\n\n\n xtra = [RandTransform(squish, {})]\n tfms = get_transforms(do_flip=True, flip_vert=True, max_rotate=180.0,\n max_zoom=1.25, max_lighting=0.25, max_warp=0.05,\n p_affine=0.9, p_lighting=0.7, xtra_tfms=xtra)\n data = ImageDataBunch.create(train_ds, valid_ds, test_ds, path=data_dir, device=device,\n size=512, bs=28, ds_tfms=tfms, padding_mode='zeros')\n data.normalize(model_stats)\n\n learner.data = data\n\n lr = 1e-4\n learner.fit_one_cycle(8, max_lr=get_lrs(lr))\n\n\n # In[44]:\n\n\n y_pred_solo, avg_preds1, y = learner.TTA(beta=None)\n y = y.cpu().numpy().copy()\n _, avg_preds2, _ = learner.TTA(beta=None)\n _, avg_preds3, _ = learner.TTA(beta=None)\n _, avg_preds4, _ = learner.TTA(beta=None)\n\n avg_preds = y_pred_solo.cpu().numpy().copy()*0.4+torch.stack([avg_preds1,avg_preds2,avg_preds3,avg_preds4]).mean(0).cpu().numpy().copy()*0.6\n\n\n # In[45]:\n\n\n classes_thresholds, classes_scores = twenty_kfold_threshold(y, avg_preds)\n n_classes = len(name_label_dict)\n yp = avg_preds.copy()\n for i in range(n_classes):\n yp[:,i] = avg_preds[:,i] >= classes_thresholds[i]\n yp = yp.astype(np.uint8)\n sc = f1_score(y,yp,average='macro')\n print('val F1 macro:', f1_score(y,yp,average='macro'))\n s = ''\n for i in range(n_classes):\n s += name_label_dict[i] + ':' + ('{:.4f}, {:.4f} ').format(classes_scores[i],classes_thresholds[i])\n\n learner.save(model_name+'_{:.4f}.pnt'.format(sc))\n\n\n # ^ saved\n\n # In[25]:\n\n\n learner = learner.load(model_name+'_0.6815.pnt')\n lr = 9e-5\n learner.fit_one_cycle(8, max_lr=get_lrs(lr))\n\n\n # In[26]:\n\n\n y_pred_solo, avg_preds1, y = learner.TTA(beta=None)\n y = y.cpu().numpy().copy()\n _, avg_preds2, _ = learner.TTA(beta=None)\n _, avg_preds3, _ = learner.TTA(beta=None)\n _, avg_preds4, _ = learner.TTA(beta=None)\n\n avg_preds = y_pred_solo.cpu().numpy().copy()*0.4+torch.stack([avg_preds1,avg_preds2,avg_preds3,avg_preds4]).mean(0).cpu().numpy().copy()*0.6\n\n\n # In[27]:\n\n\n classes_thresholds, classes_scores = twenty_kfold_threshold(y, avg_preds)\n n_classes = len(name_label_dict)\n yp = avg_preds.copy()\n for i in range(n_classes):\n yp[:,i] = avg_preds[:,i] >= classes_thresholds[i]\n yp = yp.astype(np.uint8)\n sc = f1_score(y,yp,average='macro')\n print('val F1 macro:', f1_score(y,yp,average='macro'))\n s = ''\n for i in range(n_classes):\n s += name_label_dict[i] + ':' + ('{:.4f}, {:.4f} ').format(classes_scores[i],classes_thresholds[i])\n\n learner.save(model_name+'_{:.4f}.pnt'.format(sc))\n\n\n # In[28]:\n\n\n v=len('../input/test_png/')\n\n ids = []\n dd = data.test_ds.ds.__dict__['x']\n for i in dd:\n ids.append(i[v:-4])\n\n\n # In[29]:\n\n\n avg_tests1 = learner.TTA(ds_type = DatasetType.Test, beta=0.4)\n avg_tests2 = learner.TTA(ds_type = DatasetType.Test, beta=0.4)\n avg_tests3 = learner.TTA(ds_type = DatasetType.Test, beta=0.4)\n avg_tests4 = learner.TTA(ds_type = DatasetType.Test, beta=0.4)\n\n\n # In[30]:\n\n\n preds = torch.stack([avg_tests1[0],avg_tests2[0],avg_tests3[0],avg_tests4[0]]).mean(0).cpu().numpy().copy()\n\n\n # In[31]:\n\n\n results_dir = '../results/'\n np.save(results_dir+model_name+'_test.npy', preds.copy())\n\n\n # In[32]:\n\n\n results_dir = '../results/'\n np.save(results_dir+model_name+'_y.npy', y)\n np.save(results_dir+model_name+'_ids.npy', valid.Id.values)\n np.save(results_dir+model_name+'_holdout_1.npy', avg_preds)\n\n",
"# individual nan corrected\n# Final nan matches highest probable label (optional)\n\nimport pandas as pd\nimport numpy as np\nfrom tqdm import tqdm\nimport matplotlib.pyplot as plt\n\nSAMPLE = '../input/sample_submission.csv'\n\nlabel_names = {\n 0: \"Nucleoplasm\",\n 1: \"Nuclear membrane\",\n 2: \"Nucleoli\",\n 3: \"Nucleoli fibrillar center\",\n 4: \"Nuclear speckles\",\n 5: \"Nuclear bodies\",\n 6: \"Endoplasmic reticulum\",\n 7: \"Golgi apparatus\",\n 8: \"Peroxisomes\",\n 9: \"Endosomes\",\n 10: \"Lysosomes\",\n 11: \"Intermediate filaments\",\n 12: \"Actin filaments\",\n 13: \"Focal adhesion sites\",\n 14: \"Microtubules\",\n 15: \"Microtubule ends\",\n 16: \"Cytokinetic bridge\",\n 17: \"Mitotic spindle\",\n 18: \"Microtubule organizing center\",\n 19: \"Centrosome\",\n 20: \"Lipid droplets\",\n 21: \"Plasma membrane\",\n 22: \"Cell junctions\",\n 23: \"Mitochondria\",\n 24: \"Aggresome\",\n 25: \"Cytosol\",\n 26: \"Cytoplasmic bodies\",\n 27: \"Rods & rings\"\n }\n\ncolumn_sum = []\nsub_name = []\n\ndef expand(csv):\n sub = pd.read_csv(csv)\n print(csv, sub.isna().sum())\n\n sub = sub.replace(pd.np.nan, '101')\n sub[f'target_vec'] = sub['Predicted'].map(lambda x: list(map(int, x.strip().split())))\n for i in range(28):\n sub[f'{label_names[i]}'] = sub['Predicted'].map(\n lambda x: 1 if str(i) in x.strip().split() else 0)\n sub = sub.values\n sub = np.delete(sub, [1, 2], axis=1)\n\n a = sub[:, 1:]\n unique, counts = np.unique(a, return_counts=True)\n print('Unique counts:',np.asarray((unique, counts)).T)\n print('Total labels:{} Class-wise:{}'.format(a.sum(), a.sum(axis=0)))\n column_sum.append( a.sum(axis=0))\n sub_name.append(csv)\n return sub\n\n#======================================================================================================================\n# Input submissions\n#====================================================================================================================\nsub_dir = 'sub_dir_team/'\n\ndf_1 = expand('sub_dir_team/leak_brian_tommy_en_res34swa_re50xt_re101xtswa_wrn_4.8_562.csv')\ndf_2 = expand( 'sub_dir_team/Christof_blend_4_580.csv')\ndf_3 = expand('sub_dir_team/ens85bd_russ_616.csv')\ndf_4 = expand('sub_dir_team/enspreds103_12mdl_512-256_wtth0.45_leak_shai_593.csv')\ndf_5 = expand('sub_dir_team/hill_m94d_dmytro_627.csv')\ndf_6 = expand('sub_dir_team/voted_5_d_kevin_602.csv')\ndf_7 = expand('sub_dir_team/hill_b93d_l2_615update.csv')\ndf_8 = expand('sub_dir_team/submission_loss_5fold_mean_2_GAP_chrs_602.csv')\n\n#=======================================================================================================================\n# Visualize distribution\n#=======================================================================================================================\nlist =[0,1,2,3,4,5,6,7]\ncolors = ['r', 'g', 'b', 'c', 'm', 'y', 'k', 'orange']\nw=0\nfor i in list:\n x = np.arange(0, 28, 1)\n plt.bar(x+w, column_sum[i],width = 0.08, color = colors[i], label=sub_name[i], )\n w=w+0.09\nplt.legend()\nplt.grid(True)\nplt.yscale('log')\nplt.show()\n\n#=======================================================================================================================\nsim = df_1*1 + df_2*1 + df_3*1 + df_4*1 + df_5*1 + df_6*1 + df_7*1 + df_8*1\nsim = sim[:, 1:]\nunique, counts = np.unique(sim, return_counts=True)\n#=======================================================================================================================\nsum = df_1[:, 1:]*1 + \\\n df_2[:, 1:]*2 + \\\n df_3[:, 1:]*3 + \\\n df_4[:, 1:]*2 + \\\n df_5[:, 1:]*3 + \\\n df_6[:, 1:]*2 + \\\n df_7[:, 1:]*1 + \\\n df_8[:, 1:]*2\n\nvote = 8 #7\n\n#=======================================================================================================================\n# Selecting most probable label for nan rows\n#=======================================================================================================================\n# sum_tmp = sum.copy()\n# for i,row in enumerate(sum):\n# #print (str(row))\n# #print(max(row))\n# #print(row.argmax(axis=0))\n# row_max_idx = row.argmax(axis=0)\n# if max(row)<vote:\n# #row[row_max_idx] = vote\n# sum[i,row_max_idx] = vote\n# #print(str(row))\n# diff = sum-sum_tmp\n#=======================================================================================================================\n\nvote_sub0 = np.where(sum[:,0] >= vote, 1, 0) #high\nvote_sub1 = np.where(sum[:,1] >= vote, 1, 0)\nvote_sub2 = np.where(sum[:,2] >= vote, 1, 0)\nvote_sub3 = np.where(sum[:,3] >= vote, 1, 0)\nvote_sub4 = np.where(sum[:,4] >= vote, 1, 0)\nvote_sub5 = np.where(sum[:,5] >= vote, 1, 0)\nvote_sub6 = np.where(sum[:,6] >= vote, 1, 0)\nvote_sub7 = np.where(sum[:,7] >= vote, 1, 0)\nvote_sub8 = np.where(sum[:,8] >= vote-2, 1, 0) #low\nvote_sub9 = np.where(sum[:,9] >= vote-2, 1, 0) #low\nvote_sub10 = np.where(sum[:,10] >= vote-2, 1, 0) #low\nvote_sub11 = np.where(sum[:,11] >= vote, 1, 0)\nvote_sub12 = np.where(sum[:,12] >= vote, 1, 0)\nvote_sub13 = np.where(sum[:,13] >= vote, 1, 0)\nvote_sub14 = np.where(sum[:,14] >= vote, 1, 0)\nvote_sub15 = np.where(sum[:,15] >= vote-2, 1, 0) #low\nvote_sub16 = np.where(sum[:,16] >= vote, 1, 0)\nvote_sub17 = np.where(sum[:,17] >= vote, 1, 0)\nvote_sub18 = np.where(sum[:,18] >= vote, 1, 0)\nvote_sub19 = np.where(sum[:,19] >= vote, 1, 0)\nvote_sub20 = np.where(sum[:,20] >= vote, 1, 0)\nvote_sub21 = np.where(sum[:,21] >= vote, 1, 0)\nvote_sub22 = np.where(sum[:,22] >= vote, 1, 0)\nvote_sub23 = np.where(sum[:,23] >= vote, 1, 0)\nvote_sub24 = np.where(sum[:,24] >= vote, 1, 0)\nvote_sub25 = np.where(sum[:,25] >= vote, 1, 0) #high\nvote_sub26 = np.where(sum[:,26] >= vote, 1, 0)\nvote_sub27 = np.where(sum[:,27] >= vote-2, 1, 0) #low\n\nvote_sub = np.column_stack((vote_sub0, vote_sub1, vote_sub2, vote_sub3,\n vote_sub4, vote_sub5, vote_sub6, vote_sub7,\n vote_sub8, vote_sub9, vote_sub10, vote_sub11,\n vote_sub12, vote_sub13, vote_sub14, vote_sub15,\n vote_sub16, vote_sub17, vote_sub18, vote_sub19,\n vote_sub20, vote_sub21, vote_sub22, vote_sub23,\n vote_sub24, vote_sub25, vote_sub26, vote_sub27)\n )\n#======================================================================================================================\n# prepare submission format\n#======================================================================================================================\nsubmit = pd.read_csv(SAMPLE)\nprediction = []\n\nfor row in tqdm(range(submit.shape[0])):\n\n str_label = ''\n\n for col in range(vote_sub.shape[1]):\n if (vote_sub[row, col] < 1):\n str_label += ''\n else:\n str_label += str(col) + ' '\n prediction.append(str_label.strip())\n\nsubmit['Predicted'] = np.array(prediction)\n#submit.to_csv('sub_dir_team/39test_nonan_mostproba.csv', index=False)\nsubmit.to_csv('sub_dir_team/enstw39c_low-2_1sh562_3ru616_2die580_2sh593_3dm627_2ke602_1l2.615updt_2die602_8.16_clswt.csv', index=False)\n\n#=======================================================================================================================",
"import os, sys\n#os.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\nos.environ[\"CUDA_VISIBLE_DEVICES\"]=\"1\"\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport skimage.io\nfrom skimage.transform import resize\nfrom imgaug import augmenters as iaa\nfrom tqdm import tqdm\nimport PIL\nfrom PIL import Image\nimport cv2\nfrom sklearn.utils import class_weight, shuffle\nfrom ml_stratifiers import MultilabelStratifiedKFold\nimport warnings\nwarnings.filterwarnings(\"ignore\")\nfrom classification_models.resnet.models import ResNet18\nimport albumentations as A\n\nMODEL_PATH = 'Christof/models/GAPNet/5crop_1024/'\n\n# a) added batchnorm and cut out one Dense 256 layer\n# b) a) + added 16 size layer to GAP\n\nSIZE = 512\n\n# Load dataset info\ntile = 'ur'\nexp_suffix = f'_{tile}'\npath_to_train = f'Christof/assets/train_rgb_1024_9crop/{tile}/'\ndata = pd.read_csv('Christof/assets/train.csv')\n\nnormal_aug = A.Compose([#A.Rotate((0,30),p=0.75),\n A.RandomRotate90(p=1),\n A.HorizontalFlip(p=0.5),\n #A.RandomBrightness(0.05),\n #A.RandomContrast(0.05),\n A.IAAAffine(translate_percent=10,rotate=45,shear=10, scale=(0.9,1.1)),\n #A.RandomAffine(degrees=45, translate=(0.1,0.1), shear=10, scale=(0.9,1.1))\n A.Normalize(mean=(0.08165012, 0.0530909 , 0.05298166), std=(0.12806622 ,0.08622692, 0.13038702),\n max_pixel_value=255.)\n ])\n\nnormal_aug_ext = A.Compose([#A.Rotate((0,30),p=0.75),\n A.RandomRotate90(p=1),\n A.HorizontalFlip(p=0.5),\n #A.RandomBrightness(0.05),\n #A.RandomContrast(0.05),\n A.IAAAffine(translate_percent=10,rotate=45,shear=10, scale=(0.9,1.1)),\n #A.RandomAffine(degrees=45, translate=(0.1,0.1), shear=10, scale=(0.9,1.1))\n A.Normalize(mean=(0.11843426, 0.06886751, 0.06541236), std=(0.16149608, 0.0987589 , 0.16087747),\n max_pixel_value=255.)\n ])\n\nval_aug = A.Compose([A.HorizontalFlip(p=0.5),\n A.Normalize(mean=(0.08165012, 0.0530909 , 0.05298166), std=(0.12806622 ,0.08622692, 0.13038702),\n max_pixel_value=255.)])\nfrom torchvision import transforms\n\neps = 0.004\ndesired = {\n 0: 0.36239782,\n 1: 0.043841336,\n 2: 0.075268817,\n 3: 0.059322034,\n 4: 0.075268817,\n 5: 0.075268817,\n 6: 0.043841336,\n 7: 0.075268817,\n 8: eps,\n 9: eps,\n 10: eps,\n 11: 0.043841336,\n 12: 0.043841336,\n 13: 0.014198783,\n 14: 0.043841336,\n 15: eps,\n 16: 0.028806584,\n 17: 0.014198783,\n 18: 0.028806584,\n 19: 0.059322034,\n 20: eps,\n 21: 0.126126126,\n 22: 0.028806584,\n 23: 0.075268817,\n 24: eps,\n 25: 0.222493888,\n 26: 0.028806584,\n 27: eps\n}\n\nsampling_weights = [ 2.6473, 35.0588 , 8.2069 , 19.3439 , 16.0145 , 13.3245 , 32.8644,\n 10.607 , 551.3 , 501.1818 , 787.5714 , 25.8523 , 39.0301, 51.644,\n 30.0846 ,1470.1333 , 62.8262, 190.1034 , 39.3084 , 23.2126 , 170.9457\n, 8.2592, 33.2609 , 9.6889 , 92.2678 , 4.19 , 99.3333 ,3150.2857]\n\nsample_weights_ext = [ 2.6728, 41.1617 , 10.3068 , 42.4172 , 22.9729 , 21.9808 , 26.8267\n, 11.5358 , 474.8659 , 486.7375 , 492.8987 , 66.963 , 50.2763 , 82.7609,\n 45.0683, 1854.2381, 100.3582 , 319.1721 , 76.5762 , 33.424 , 272.3007,\n 7.3664 , 39.4319 , 10.239 , 734.6981 , 2.548 , 196.6616 , 638.3443]\n\n\ntrain_dataset_info = []\nfor name, labels in zip(data['Id'], data['Target'].str.split(' ')):\n path = os.path.join(path_to_train, name)\n labs = np.array([int(label) for label in labels])\n bucket_ind = np.argmin([desired[l] for l in labs])\n bucket = labs[bucket_ind]\n weight = sampling_weights[bucket]\n train_dataset_info.append({\n 'path': path,\n 'labels': labs,\n 'weight':weight})\ntrain_dataset_info = np.array(train_dataset_info)\n\ndata_ext1 = pd.read_csv('Christof/assets/train_ext1.csv')\npath_to_train_ext1 = f'Christof/assets/ext_tomomi_rgb_1024_9crop/{tile}/'\ntrain_dataset_info_ext1 = []\nfor name, labels in zip(data_ext1['Id'], data_ext1['Target'].str.split(' ')):\n path = os.path.join(path_to_train_ext1, name[:-5])\n labs = np.array([int(label) for label in labels])\n bucket_ind = np.argmin([desired[l] for l in labs])\n bucket = labs[bucket_ind]\n weight = sample_weights_ext[bucket]\n train_dataset_info_ext1.append({\n 'path':path,\n 'labels': labs,\n 'weight':weight})\ntrain_dataset_info_ext1 = np.array(train_dataset_info_ext1)\n\n\ncounts = np.zeros(28)\nfor item in train_dataset_info:\n for l in item['labels']:\n counts[l] = counts[l] + 1\n\ncounts = counts / len(train_dataset_info)\nrare_classes = np.where(counts < 0.005)\n\n#rare_dataset_info = np.array([item for item in train_dataset_info if np.isin(item['labels'], rare_classes).any()])\n#train_dataset_info = rare_dataset_info\nfrom torch.utils.data.sampler import WeightedRandomSampler\n\nfrom classification_models.resnet import preprocess_input\nclass data_generator:\n\n @staticmethod\n def create_train(dataset_info, batch_size, shape, augument=None, weighted_sample = True):\n assert shape[2] == 3\n\n if weighted_sample:\n p = np.array([item['weight'] for item in dataset_info])\n p = p/np.sum(p)\n else:\n p = None\n\n while True:\n #dataset_info = shuffle(dataset_info)\n for start in range(0, len(dataset_info), batch_size):\n #end = min(start + batch_size, len(dataset_info))\n batch_images = []\n X_train_batch = np.random.choice(dataset_info,batch_size,p=p)\n batch_labels = np.zeros((len(X_train_batch), 28))\n for i in range(len(X_train_batch)):\n image = data_generator.load_image(X_train_batch[i]['path'], shape)\n #image = preprocess_input(image)\n #rare = np.isin(X_train_batch[i]['labels'], rare_classes).any()\n\n if augument:\n image = data_generator.augment(augument,image)\n\n batch_images.append(image)\n batch_labels[i][X_train_batch[i]['labels']] = 1\n yield np.array(batch_images, np.float32), batch_labels\n\n @staticmethod\n def load_image(path, shape):\n image = cv2.imread(path + '.png', cv2.IMREAD_UNCHANGED)\n return image\n\n @staticmethod\n def augment(aug,image):\n image_aug = aug(image=image)['image']\n return image_aug\n\n\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.models import Sequential, load_model\nfrom keras.layers import Activation, Dropout, Flatten, Dense, GlobalAveragePooling2D, Concatenate, Input, Conv2D\nfrom keras.applications.inception_v3 import InceptionV3\nfrom keras.callbacks import ModelCheckpoint\nfrom keras import metrics\nfrom keras.optimizers import Adam\nfrom keras import backend as K\nimport keras\nfrom keras.models import Model\n\n\nfrom keras.layers import Layer, InputSpec\nfrom keras import initializers\nfrom keras.constraints import Constraint\nimport keras.backend as K\n\nfrom keras.layers import Reshape, Permute, multiply\ndef squeeze_excite_block(input, ratio=16):\n init = input\n channel_axis = 1 if K.image_data_format() == \"channels_first\" else -1\n filters = init._keras_shape[channel_axis]\n se_shape = (1, 1, filters)\n\n se = GlobalAveragePooling2D()(init)\n se = Reshape(se_shape)(se)\n se = Dense(filters // ratio, activation='relu', kernel_initializer='he_normal', use_bias=False)(se)\n se = Dense(filters, activation='sigmoid', kernel_initializer='he_normal', use_bias=False)(se)\n\n if K.image_data_format() == 'channels_first':\n se = Permute((3, 1, 2))(se)\n\n x = multiply([init, se])\n return x\n\ndef encoder(backbone):\n\n c0 = backbone.get_layer('relu0').output\n\n c1 = backbone.get_layer('stage2_unit1_relu1').get_output_at(0) # 128\n c2 = backbone.get_layer('stage3_unit1_relu1').output # 63\n c3 = backbone.get_layer('stage4_unit1_relu1').output # 32\n enc_out = backbone.get_layer('relu1').output # 16\n #enc_out = backbone.output # 8\n\n short_cuts = [c0,c1,c2,c3]\n return enc_out, short_cuts\n\nfrom keras.layers import BatchNormalization\ndef create_model(input_shape, n_out):\n input_tensor = Input(shape=(SIZE, SIZE, 3))\n #bn = BatchNormalization()(input_tensor)\n #conv = Conv2D(3,(3,3),padding='same',activation='relu')(bn)\n base_model = ResNet18(include_top=False,\n weights='imagenet',\n input_shape=(SIZE, SIZE, 3),input_tensor=input_tensor)\n\n enc_out, short_cuts = encoder(base_model)\n x0 = GlobalAveragePooling2D()(squeeze_excite_block(enc_out))\n x1 = GlobalAveragePooling2D()(squeeze_excite_block(short_cuts[0]))\n x2 = GlobalAveragePooling2D()(squeeze_excite_block(short_cuts[1]))\n x3 = GlobalAveragePooling2D()(squeeze_excite_block(short_cuts[2]))\n x4 = GlobalAveragePooling2D()(squeeze_excite_block(short_cuts[3]))\n x = Concatenate()([x0,x1,x2,x3,x4])\n x = BatchNormalization()(x)\n x = Dropout(0.5)(x)\n x = Dense(256, activation='relu')(x)\n #x = BatchNormalization()(x)\n #x = Dropout(0.5)(x)\n #x = Dense(256, activation='relu')(x)\n x = BatchNormalization()(x)\n x = Dropout(0.5)(x)\n output = Dense(n_out, activation='sigmoid')(x)\n\n model = Model(input_tensor, output)\n\n # transfer imagenet weights\n #res_img = ResNet34(include_top=False, weights='imagenet', input_shape=(SIZE, SIZE, 3))\n #offset = 2\n #for i, l in enumerate(base_model.layers[offset+1:]):\n # l.set_weights(res_img.layers[i + 1].get_weights())\n\n return model\n\n\n\n# create callbacks list\nfrom keras.callbacks import ModelCheckpoint, LearningRateScheduler, EarlyStopping, ReduceLROnPlateau, TensorBoard\nfrom keras_callbacks import F1Metric\n#from keras_metrics import f1, f1_02\n#from keras_losses import f1_loss\nepochs = [20,150]\nbatch_size = 32\n\n\n# split data into train, valid\n\nmskf = MultilabelStratifiedKFold(n_splits=5,shuffle=True,random_state=18)\n\ny = np.zeros((len(train_dataset_info), 28))\nfor i in range(len(train_dataset_info)):\n y[i][train_dataset_info[i]['labels']] = 1\nmskf.get_n_splits(train_dataset_info, y)\nkf = mskf.split(train_dataset_info, y)\nfold_id = 1\nfor f in range(fold_id):\n train_indexes, valid_indexes = next(kf)\n\ntrain_indexes, valid_indexes = next(kf)\ntrain_generator_orig = data_generator.create_train(train_dataset_info[train_indexes],\n batch_size, (SIZE, SIZE, 3), augument=normal_aug)\ntrain_generator_ext1 = data_generator.create_train(train_dataset_info_ext1,\n batch_size, (SIZE, SIZE, 3), augument=normal_aug_ext)\nimport random\n\n\ndef gen():\n while True:\n x = random.random()\n if x > 0.5:\n batch = next(train_generator_orig)\n else:\n batch = next(train_generator_ext1)\n yield batch\n\ntrain_generator = gen()\nvalidation_generator = data_generator.create_train(train_dataset_info[valid_indexes],\n batch_size, (SIZE, SIZE, 3), augument=val_aug, weighted_sample=False)\n\ncheckpoint = ModelCheckpoint(MODEL_PATH + 'model_loss{}.h5'.format(exp_suffix), monitor='val_loss', verbose=1,\n save_best_only=True, mode='min', save_weights_only=True)\ntensorboard = TensorBoard(MODEL_PATH + 'logs{}'.format(fold_id) + '{}'.format(exp_suffix) + '/')\n# reduceLROnPlat = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=3,\n# verbose=1, mode='auto', epsilon=0.0001)\n# early = EarlyStopping(monitor=\"val_loss\",\n# mode=\"min\",\n# patience=6)\n#f1_metric = F1Metric(validation_generator2,2*len(valid_indexes)//batch_size,batch_size,28) #2 times val because of val_aug\n\nnb_epochs = epochs[0]\nnb_cycles = 1\ninit_lr = 0.0005\ndef _cosine_anneal_schedule(t):\n\n cos_inner = np.pi * (t % (nb_epochs // nb_cycles))\n cos_inner /= nb_epochs// nb_cycles\n cos_out = np.cos(cos_inner) + 1\n return float(init_lr / 2 * cos_out)\n\nlr_schedule = LearningRateScheduler(_cosine_anneal_schedule,verbose=True)\n\n\n\n\ncallbacks_list = [lr_schedule, tensorboard]\n\n\n# warm up model\nmodel = create_model(\n input_shape=(SIZE, SIZE, 3),\n n_out=28)\n\n\n\nPOS_WEIGHT = 10 # multiplier for positive targets, needs to be tuned\nimport tensorflow as tf\nimport keras.backend.tensorflow_backend as tfb\n\n\ndef weighted_binary_crossentropy(target, output):\n \"\"\"\n Weighted binary crossentropy between an output tensor\n and a target tensor. POS_WEIGHT is used as a multiplier\n for the positive targets.\n\n Combination of the following functions:\n * keras.losses.binary_crossentropy\n * keras.backend.tensorflow_backend.binary_crossentropy\n * tf.nn.weighted_cross_entropy_with_logits\n \"\"\"\n # transform back to logits\n _epsilon = tfb._to_tensor(tfb.epsilon(), output.dtype.base_dtype)\n #_epsilon = K.epsilon()\n output = tf.clip_by_value(output, _epsilon, 1 - _epsilon)\n output = tf.log(output / (1 - output))\n # compute weighted loss\n loss = tf.nn.weighted_cross_entropy_with_logits(targets=target,\n logits=output,\n pos_weight=POS_WEIGHT)\n return tf.reduce_mean(loss, axis=-1)\n\nimport tensorflow as tf\nfrom tensorflow.python.framework import ops\nfrom functools import reduce\n\ndef binaryRound(x):\n \"\"\"\n Rounds a tensor whose values are in [0,1] to a tensor with values in {0, 1},\n using the straight through estimator for the gradient.\n \"\"\"\n g = tf.get_default_graph()\n\n with ops.name_scope(\"BinaryRound\") as name:\n with g.gradient_override_map({\"Round\": \"Identity\"}):\n return tf.round(x, name=name)\n\n # For Tensorflow v0.11 and below use:\n #with g.gradient_override_map({\"Floor\": \"Identity\"}):\n # return tf.round(x, name=name)\n\ndef brian_f1(y_true, y_pred):\n y_pred = binaryRound(y_pred)\n tp = K.sum(K.cast(y_true*y_pred, 'float'), axis=0)\n tn = K.sum(K.cast((1-y_true)*(1-y_pred), 'float'), axis=0)\n fp = K.sum(K.cast((1-y_true)*y_pred, 'float'), axis=0)\n fn = K.sum(K.cast(y_true*(1-y_pred), 'float'), axis=0)\n\n p = tp / (tp + fp + K.epsilon())\n r = tp / (tp + fn + K.epsilon())\n\n f1 = 2*p*r / (p+r+K.epsilon())\n f1 = tf.where(tf.is_nan(f1), tf.zeros_like(f1), f1)\n return K.mean(f1)\n\ndef brian_f1_loss(y_true, y_pred):\n return 1- brian_f1(y_true, y_pred)\n\n\ndef custom_loss(y_true, y_pred):\n\n return 4*weighted_binary_crossentropy(y_true,y_pred) - K.log(brian_f1(y_true,y_pred))\n\n# train all layers\nfrom keras.metrics import binary_accuracy\nmodel.compile(loss=custom_loss,\n optimizer=Adam(lr=5e-4),\n metrics=[binary_accuracy,brian_f1])\nmodel.fit_generator(\n train_generator,\n steps_per_epoch=np.ceil(float(2*len(train_indexes)) / float(batch_size)),\n #validation_data=validation_generator,\n #validation_steps=2*np.ceil(float(len(valid_indexes)) / float(batch_size)),\n epochs=epochs[0],\n verbose=1,\n callbacks=callbacks_list)\nmodel.save_weights(MODEL_PATH + 'model_loss{}{}.h5'.format(fold_id,exp_suffix))\nmodel.load_weights(MODEL_PATH + 'model_loss{}{}.h5'.format(fold_id,exp_suffix))\n\nsubmit = pd.read_csv('Christof/assets/sample_submission.csv')\ntta = 8\n\n\n\ndraw_predict = np.zeros((len(submit['Id']), 28))\n\nfor i, name in tqdm(enumerate(submit['Id'])):\n path = os.path.join(f'Christof/assets/test_rgb_1024_9crop/{tile}/', name)\n image = data_generator.load_image(path, (SIZE, SIZE, 3))\n images = [data_generator.augment(normal_aug, image) for _ in range(tta)]\n tta_predicts = model.predict(np.array(images))\n draw_predict[i] = np.median(tta_predicts,axis = 0)\n\nnp.save(MODEL_PATH + f'pred{fold_id}{exp_suffix}.npy',draw_predict)\n\n\n# custom thresholds to match lb proportions\nthresholds = np.linspace(0.95, 0.05, 101)\npred = draw_predict.copy()\nfor j in tqdm(range(pred.shape[1])):\n for t in thresholds:\n pred[:, j] = (draw_predict[:, j] > t).astype(int)\n prop = np.mean(pred[:, j])\n if prop >= desired[j]: break\n print(j, '%3.2f' % t, '%6.4f' % desired[j], '%6.4f' % prop, j, )\n\nprint(pred[:5].astype(int))\n\nlabel_predict = [np.arange(28)[score_predict == 1] for score_predict in pred]\nstr_predict_label = [' '.join(str(l) for l in lp) for lp in label_predict]\n\nsubmit['Predicted'] = str_predict_label\n# np.save('draw_predict_InceptionV3.npy', score_predict)\nsubmit.to_csv(MODEL_PATH + 'submission_loss{}_lb_dist_adjusted_8tta.csv'.format(exp_suffix), index=False)\n\nfrom Christof.utils import f1_sub\n\nbest_sub = pd.read_csv('ens18.csv')\nf1_sub(best_sub,submit)\n\nbest_sub = pd.read_csv('ens56d.csv')\nf1_sub(best_sub,submit)\n\n# submit2 = pd.read_csv('Christof/models/GAPNet/11/submission_loss_0_lb_dist_adjusted_8tta.csv')\n# f1_sub(best_sub,submit2)\n#\n# submit2 = pd.read_csv('Christof/models/GAPNet/11_tests_on_clr/submission_loss_1in20_0005_2c_lb_dist_adjusted_8tta.csv')\n# f1_sub(best_sub,submit2)\n#\n# submit2 = pd.read_csv('Christof/models/GAPNet/11_tests_on_clr/submission_loss_1in20_0005_lb_dist_adjusted_8tta.csv')\n# f1_sub(best_sub,submit2)",
"import os, sys\n#os.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\nos.environ[\"CUDA_VISIBLE_DEVICES\"]=\"1\"\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport skimage.io\nfrom skimage.transform import resize\nfrom imgaug import augmenters as iaa\nfrom tqdm import tqdm\nimport PIL\nfrom PIL import Image\nimport cv2\nfrom sklearn.utils import class_weight, shuffle\nfrom ml_stratifiers import MultilabelStratifiedKFold\nimport warnings\nwarnings.filterwarnings(\"ignore\")\nfrom classification_models.resnet.models import ResNet34\nimport albumentations as A\n\nMODEL_PATH = 'Christof/models/ResNet34/19/'\nexp_suffix = '_base'\n\nSIZE = 256\n\n# Load dataset info\npath_to_train = 'Christof/assets/train_rgb_256/'\ndata = pd.read_csv('Christof/assets/train.csv')\n\nnormal_aug = A.Compose([A.Rotate((0,30),p=0.75),\n A.RandomRotate90(p=1),\n A.HorizontalFlip(p=0.5),\n A.RandomBrightness(0.05),\n A.RandomContrast(0.05),\n ])\n\nval_aug = A.HorizontalFlip(p=0.5)\n\ntrain_dataset_info = []\nfor name, labels in zip(data['Id'], data['Target'].str.split(' ')):\n train_dataset_info.append({\n 'path': os.path.join(path_to_train, name),\n 'labels': np.array([int(label) for label in labels])})\ntrain_dataset_info = np.array(train_dataset_info)\n\ncounts = np.zeros(28)\nfor item in train_dataset_info:\n for l in item['labels']:\n counts[l] = counts[l] + 1\n\ncounts = counts / len(train_dataset_info)\nrare_classes = np.where(counts < 0.005)\n\n#rare_dataset_info = np.array([item for item in train_dataset_info if np.isin(item['labels'], rare_classes).any()])\n#train_dataset_info = rare_dataset_info\n\n\nfrom classification_models.resnet import preprocess_input\nclass data_generator:\n\n @staticmethod\n def create_train(dataset_info, batch_size, shape, augument=None, oversample_factor = 0):\n assert shape[2] == 3\n\n\n\n if oversample_factor > 0:\n\n rare_dataset_info = np.array([item for item in dataset_info if np.isin(item['labels'], rare_classes).any()])\n #rare_dataset_info = shuffle(rare_dataset_info)\n for i in range(oversample_factor):\n #dataset_info\n dataset_info = np.append(dataset_info,rare_dataset_info)\n while True:\n dataset_info = shuffle(dataset_info)\n for start in range(0, len(dataset_info), batch_size):\n end = min(start + batch_size, len(dataset_info))\n batch_images = []\n X_train_batch = dataset_info[start:end]\n batch_labels = np.zeros((len(X_train_batch), 28))\n for i in range(len(X_train_batch)):\n image = data_generator.load_image(X_train_batch[i]['path'], shape)\n #image = preprocess_input(image)\n #rare = np.isin(X_train_batch[i]['labels'], rare_classes).any()\n\n if augument:\n image = data_generator.augment(augument,image)\n\n batch_images.append(image)\n batch_labels[i][X_train_batch[i]['labels']] = 1\n yield np.array(batch_images, np.float32), batch_labels\n\n @staticmethod\n def load_image(path, shape):\n image = cv2.imread(path + '.png', cv2.IMREAD_UNCHANGED)\n return image\n\n @staticmethod\n def augment(aug,image):\n image_aug = aug(image=image)['image']\n return image_aug\n\n @staticmethod\n def heavy_augment(image):\n augment_img = iaa.Sequential([\n iaa.OneOf([\n iaa.Affine(scale=(0.5,2.0)),\n iaa.Affine(shear=15),\n iaa.Affine(rotate=0),\n iaa.Affine(rotate=35),\n iaa.Affine(rotate=90),\n iaa.Affine(rotate=180),\n iaa.Affine(rotate=270),\n iaa.Affine(translate_percent=0.1),\n iaa.Fliplr(0.5),\n iaa.Flipud(0.5),\n iaa.Noop()\n ])], random_order=True)\n\n image_aug = augment_img.augment_image(image)\n return image_aug\n\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.models import Sequential, load_model\nfrom keras.layers import Activation, Dropout, Flatten, Dense, GlobalMaxPooling2D, BatchNormalization, Input, Conv2D\nfrom keras.applications.inception_v3 import InceptionV3\nfrom keras.callbacks import ModelCheckpoint\nfrom keras import metrics\nfrom keras.optimizers import Adam\nfrom keras import backend as K\nimport keras\nfrom keras.models import Model\n\ndef create_model(input_shape, n_out):\n input_tensor = Input(shape=(SIZE, SIZE, 3))\n #bn = BatchNormalization()(input_tensor)\n #conv = Conv2D(3,(3,3),padding='same',activation='relu')(bn)\n base_model = ResNet34(include_top=False,\n weights='imagenet',\n input_shape=(SIZE, SIZE, 3),input_tensor=input_tensor)\n\n x = base_model.output\n x = Conv2D(32, kernel_size=(1, 1), activation='relu')(x)\n x = Flatten()(x)\n x = Dropout(0.5)(x)\n x = Dense(1024, activation='relu')(x)\n x = Dropout(0.5)(x)\n output = Dense(n_out, activation='sigmoid')(x)\n model = Model(input_tensor, output)\n\n # transfer imagenet weights\n #res_img = ResNet34(include_top=False, weights='imagenet', input_shape=(SIZE, SIZE, 3))\n #offset = 2\n #for i, l in enumerate(base_model.layers[offset+1:]):\n # l.set_weights(res_img.layers[i + 1].get_weights())\n\n return model\n\n\n\n# create callbacks list\nfrom keras.callbacks import ModelCheckpoint, LearningRateScheduler, EarlyStopping, ReduceLROnPlateau, TensorBoard\nfrom keras_callbacks import F1Metric\n#from keras_metrics import f1, f1_02\n#from keras_losses import f1_loss\nepochs = [2,150]\nbatch_size = 32\n\n\n# split data into train, valid\n\nmskf = MultilabelStratifiedKFold(n_splits=5,shuffle=True,random_state=18)\n\ny = np.zeros((len(train_dataset_info), 28))\nfor i in range(len(train_dataset_info)):\n y[i][train_dataset_info[i]['labels']] = 1\nmskf.get_n_splits(train_dataset_info, y)\nkf = mskf.split(train_dataset_info, y)\nfold_id = 0\ntrain_indexes, valid_indexes = next(kf)\n\ntrain_generator = data_generator.create_train(train_dataset_info[train_indexes],\n batch_size, (SIZE, SIZE, 3), augument=normal_aug, oversample_factor=0)\nvalidation_generator = data_generator.create_train(train_dataset_info[valid_indexes],\n batch_size, (SIZE, SIZE, 3), augument=val_aug, oversample_factor=0)\nvalidation_generator2 = data_generator.create_train(train_dataset_info[valid_indexes],\n batch_size, (SIZE, SIZE, 3), augument=val_aug, oversample_factor=0)\n\ncheckpoint = ModelCheckpoint(MODEL_PATH + 'model_f1all{}.h5'.format(exp_suffix), monitor='val_f1_all', verbose=1,\n save_best_only=True, mode='max', save_weights_only=True)\ncheckpoint2 = ModelCheckpoint(MODEL_PATH + 'model_wbce{}.h5'.format(exp_suffix), monitor='val_loss', verbose=1,\n save_best_only=True, mode='min', save_weights_only=True)\ntensorboard = TensorBoard(MODEL_PATH + 'logs{}'.format(fold_id) + '{}'.format(exp_suffix) + '/')\n# reduceLROnPlat = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=3,\n# verbose=1, mode='auto', epsilon=0.0001)\n# early = EarlyStopping(monitor=\"val_loss\",\n# mode=\"min\",\n# patience=6)\nf1_metric = F1Metric(validation_generator2,2*len(valid_indexes)//batch_size,batch_size,28) #2 times val because of val_aug\ncallbacks_list = [f1_metric, checkpoint, checkpoint2,tensorboard]\n\n\n# warm up model\nmodel = create_model(\n input_shape=(SIZE, SIZE, 3),\n n_out=28)\n\nfor layer in model.layers:\n layer.trainable = False\n#model.layers[2].trainable = True\nmodel.layers[-1].trainable = True\nmodel.layers[-2].trainable = True\nmodel.layers[-3].trainable = True\nmodel.layers[-4].trainable = True\nmodel.layers[-5].trainable = True\nmodel.layers[-6].trainable = True\n\n\nmodel.compile(\n loss='binary_crossentropy',\n optimizer=Adam(1e-03),\n metrics=['acc'])\n# model.summary()\nmodel.fit_generator(\n train_generator,\n steps_per_epoch=np.ceil(float(len(train_indexes)) / float(batch_size)),\n validation_data=validation_generator,\n validation_steps=np.ceil(float(len(valid_indexes)) / float(batch_size)),\n epochs=epochs[0],\n verbose=1)\n\n\n\nPOS_WEIGHT = 10 # multiplier for positive targets, needs to be tuned\nimport tensorflow as tf\nimport keras.backend.tensorflow_backend as tfb\n\n\ndef weighted_binary_crossentropy(target, output):\n \"\"\"\n Weighted binary crossentropy between an output tensor\n and a target tensor. POS_WEIGHT is used as a multiplier\n for the positive targets.\n\n Combination of the following functions:\n * keras.losses.binary_crossentropy\n * keras.backend.tensorflow_backend.binary_crossentropy\n * tf.nn.weighted_cross_entropy_with_logits\n \"\"\"\n # transform back to logits\n _epsilon = tfb._to_tensor(tfb.epsilon(), output.dtype.base_dtype)\n #_epsilon = K.epsilon()\n output = tf.clip_by_value(output, _epsilon, 1 - _epsilon)\n output = tf.log(output / (1 - output))\n # compute weighted loss\n loss = tf.nn.weighted_cross_entropy_with_logits(targets=target,\n logits=output,\n pos_weight=POS_WEIGHT)\n return tf.reduce_mean(loss, axis=-1)\n\ndef soft_f1_loss(logits, labels):\n __small_value=1e-6\n beta = 1\n #batch_size = logits.size()[0]\n p = logits\n l = labels\n num_pos = K.sum(p, 1) + __small_value\n num_pos_hat = K.sum(l, 1) + __small_value\n tp = K.sum(l * p, 1)\n precise = tp / num_pos\n recall = tp / num_pos_hat\n fs = (1 + beta * beta) * precise * recall / (beta * beta * precise + recall + __small_value)\n loss = K.mean(fs,axis=0)\n return (1 - loss)\n\n\n\n# train all layers\nfrom keras.metrics import binary_accuracy\nfor layer in model.layers:\n layer.trainable = True\nmodel.compile(loss=weighted_binary_crossentropy,\n optimizer=Adam(lr=1e-4),\n metrics=[binary_accuracy,soft_f1_loss])\nmodel.fit_generator(\n train_generator,\n steps_per_epoch=np.ceil(float(len(train_indexes)) / float(batch_size)),\n validation_data=validation_generator,\n validation_steps=2*np.ceil(float(len(valid_indexes)) / float(batch_size)),\n epochs=epochs[1],\n verbose=1,\n callbacks=callbacks_list)\n\nmodel.load_weights(MODEL_PATH + 'model{}.h5'.format(exp_suffix))\npreds = np.zeros(shape=(len(valid_indexes),28))\ny_true= np.zeros(shape=(len(valid_indexes),28))\nfor i, info in tqdm(enumerate(train_dataset_info[valid_indexes])):\n image = data_generator.load_image(info['path'], (SIZE, SIZE, 3))\n preds[i] = model.predict(image[np.newaxis])[0]\n y_true[i][info['labels']]=1\n\nfrom sklearn.metrics import f1_score\n\nindividual_f1_scores = np.zeros(28)\nfor i in range(28):\n individual_f1_scores[i] = f1_score(y_true[:,i],preds[:,i])\nindividual_f1_scores = pd.DataFrame(individual_f1_scores,columns=['f1'])\nindividual_f1_scores.to_csv(MODEL_PATH + f'summary_f1_{exp_suffix}.csv',index=False)\n\n\n\nf1_res = f1_score(y_true, preds, average='macro')\nf1_res_05 = f1_score(y_true, preds_05, average='macro')\nprint(f1_res)\nprint(f1_res_05)\nSUBMISSION = True\n\nthresholds = np.linspace(0, 1, 1000)\nscore = 0.0\ntest_threshold=0.5*np.ones(28)\nbest_threshold=np.zeros(28)\nbest_val = np.zeros(28)\nfor i in range(28):\n for threshold in thresholds:\n test_threshold[i] = threshold\n max_val = np.max(preds)\n val_predict = (preds > test_threshold)\n score = f1_score(y_true > 0.5, val_predict, average='macro')\n if score > best_val[i]:\n best_threshold[i] = threshold\n best_val[i] = score\n\n print(\"Threshold[%d] %0.6f, F1: %0.6f\" % (i,best_threshold[i],best_val[i]))\n test_threshold[i] = best_threshold[i]\n\nprint(\"Best threshold: \")\nprint(best_threshold)\nprint(\"Best f1:\")\nprint(best_val)\n\n\n\nif SUBMISSION:\n\n submit = pd.read_csv('Christof/assets/sample_submission.csv')\n predicted = []\n draw_predict = []\n\n for name in tqdm(submit['Id']):\n path = os.path.join('Christof/assets/test_rgb_256/', name)\n image = data_generator.load_image(path, (SIZE, SIZE, 3))\n score_predict = model.predict(image[np.newaxis])[0]\n draw_predict.append(score_predict)\n\n thresh = max(score_predict[np.argsort(score_predict, axis=-1)[-5]],0.2)\n label_predict = np.arange(28)[score_predict >= thresh]\n str_predict_label = ' '.join(str(l) for l in label_predict)\n predicted.append(str_predict_label)\n\n submit['Predicted'] = predicted\n #np.save('draw_predict_InceptionV3.npy', score_predict)\n submit.to_csv(MODEL_PATH + 'submission{}_{:.4}.csv'.format(exp_suffix,f1_res), index=False)\n\n predicted = []\n draw_predict = []\n\n for name in tqdm(submit['Id']):\n path = os.path.join('Christof/assets/test_rgb_256/', name)\n image = data_generator.load_image(path, (SIZE, SIZE, 3))\n score_predict = model.predict(image[np.newaxis])[0]\n draw_predict.append(score_predict)\n\n thresh = max(score_predict[np.argsort(score_predict, axis=-1)[-5]],0.5)\n label_predict = np.arange(28)[score_predict >= thresh]\n str_predict_label = ' '.join(str(l) for l in label_predict)\n predicted.append(str_predict_label)\n\n submit['Predicted'] = predicted\n #np.save('draw_predict_InceptionV3.npy', score_predict)\n submit.to_csv(MODEL_PATH + 'submission{}_{:.4}.csv'.format(exp_suffix,f1_res_05), index=False)\n\n predicted = []\n draw_predict = []\n\n for name in tqdm(submit['Id']):\n path = os.path.join('Christof/assets/test_rgb_256/', name)\n image = data_generator.load_image(path, (SIZE, SIZE, 3))\n score_predict = model.predict(image[np.newaxis])[0]\n draw_predict.append(score_predict)\n\n #thresh = max(score_predict[np.argsort(score_predict, axis=-1)[-5]],0.5)\n label_predict = np.arange(28)[score_predict >= best_threshold]\n str_predict_label = ' '.join(str(l) for l in label_predict)\n predicted.append(str_predict_label)\n\n submit['Predicted'] = predicted\n #np.save('draw_predict_InceptionV3.npy', score_predict)\n submit.to_csv(MODEL_PATH + 'submission{}_best_val_{:.4}-{:.4}.csv'.format(exp_suffix,best_val[0],best_val[-1]), index=False)\n\n eps = 0.004\n desired = {\n 0: 0.36239782,\n 1: 0.043841336,\n 2: 0.075268817,\n 3: 0.059322034,\n 4: 0.075268817,\n 5: 0.075268817,\n 6: 0.043841336,\n 7: 0.075268817,\n 8: eps,\n 9: eps,\n 10: eps,\n 11: 0.043841336,\n 12: 0.043841336,\n 13: 0.014198783,\n 14: 0.043841336,\n 15: eps,\n 16: 0.028806584,\n 17: 0.014198783,\n 18: 0.028806584,\n 19: 0.059322034,\n 20: eps,\n 21: 0.126126126,\n 22: 0.028806584,\n 23: 0.075268817,\n 24: eps,\n 25: 0.222493888,\n 26: 0.028806584,\n 27: eps\n }\n\n\n draw_predict = np.zeros((len(submit['Id']),28))\n\n for i,name in tqdm(enumerate(submit['Id'])):\n path = os.path.join('Christof/assets/test_rgb_256/', name)\n image = data_generator.load_image(path, (SIZE, SIZE, 3))\n draw_predict[i] = model.predict(image[np.newaxis])[0]\n\n\n\n # custom thresholds to match lb proportions\n thresholds = np.linspace(0.95, 0.05, 101)\n pred = draw_predict.copy()\n for j in tqdm(range(pred.shape[1])):\n for t in thresholds:\n pred[:, j] = (draw_predict[:, j] > t).astype(int)\n prop = np.mean(pred[:, j])\n if prop >= desired[j]: break\n print(j, '%3.2f' % t, '%6.4f' % desired[j], '%6.4f' % prop, j, )\n\n print(pred[:5].astype(int))\n\n # thresh = max(score_predict[np.argsort(score_predict, axis=-1)[-5]],0.5)\n label_predict = [np.arange(28)[score_predict ==1] for score_predict in pred]\n str_predict_label = [' '.join(str(l) for l in lp) for lp in label_predict]\n\n submit['Predicted'] = str_predict_label\n #np.save('draw_predict_InceptionV3.npy', score_predict)\n submit.to_csv(MODEL_PATH + 'submission{}_lb_dist_adjusted.csv'.format(exp_suffix), index=False)\n",
"import sys\nsys.path.insert(0, '..')\nimport numpy as np\nimport pandas as pd\nimport cv2\nfrom PIL import Image\nimport imagehash\nfrom tqdm import tqdm\nimport pickle\nimport mlcrate as mlc\n\nfrom config.config import *\nfrom utils.common_util import *\n\ndef train_imread(img_dir, img_id, color):\n img = Image.open(opj(img_dir, '%s_%s.png' % (img_id, color)))\n return img\n\ndef external_imread(img_dir, img_id, color):\n img = cv2.imread(opj(img_dir, '%s_%s.jpg' % (img_id, color)), cv2.IMREAD_GRAYSCALE)\n img = Image.fromarray(img)\n return img\n\n# https://www.kaggle.com/c/human-protein-atlas-image-classification/discussion/72534\ndef generate_hash(img_dir, meta, colors, dataset='train', imread_func=None, is_update=False):\n meta = meta.copy()\n cache_fname = opj(DATA_DIR, 'meta', '%s_hash_maps.pkl' % dataset)\n if ope(cache_fname) and not is_update:\n with open(cache_fname, 'rb') as dbfile:\n hash_maps = pickle.load(dbfile)\n else:\n hash_maps = {}\n for color in colors:\n hash_maps[color] = []\n for idx in tqdm(range(len(meta)), desc='train %s' % color):\n img = imread_func(img_dir, meta.iloc[idx][ID], color)\n hash = imagehash.phash(img)\n hash_maps[color].append(hash)\n\n with open(cache_fname, 'wb') as dbfile:\n pickle.dump(hash_maps, dbfile)\n\n for color in colors:\n meta[color] = hash_maps[color]\n\n return meta\n\ndef calc_hash(params):\n color, th, base_external_hash, base_train_hash, train_ids, external_ids = params\n\n external_hash = base_external_hash.reshape(1, -1) # 1*m\n\n train_idxes_list = []\n external_idxes_list = []\n hash_list = []\n\n step = 5\n for train_idx in tqdm(range(0, len(base_train_hash), step), desc=color):\n train_hash = base_train_hash[train_idx:train_idx + step].reshape(-1, 1) # n*1\n hash = train_hash - external_hash # n*m\n train_idxes, external_idxes = np.where(hash <= th)\n hash = hash[train_idxes, external_idxes]\n\n train_idxes = train_idxes + train_idx\n\n train_idxes_list.extend(train_idxes.tolist())\n external_idxes_list.extend(external_idxes.tolist())\n hash_list.extend(hash.tolist())\n\n df = pd.DataFrame({\n 'Train': train_ids[train_idxes_list],\n 'Extra': external_ids[external_idxes_list],\n 'Sim%s' % color[:1].upper(): hash_list\n })\n return df\n\nif __name__ == '__main__':\n print('%s: calling main function ... ' % os.path.basename(__file__))\n\n threshold = 12\n\n # train set images\n train_img_dir = opj(DATA_DIR, 'train', 'images')\n train_meta = pd.read_csv(opj(DATA_DIR, 'meta', 'train_meta.csv'))\n\n # external images\n external_img_dir = opj(DATA_DIR, 'train', 'external_v18_512')\n external_meta = pd.read_csv(opj(DATA_DIR, 'meta', 'external_meta.csv'))\n\n colors = ['red', 'green', 'blue']\n train_meta = generate_hash(train_img_dir, train_meta, colors,\n dataset='train', imread_func=train_imread, is_update=False)\n external_meta = generate_hash(external_img_dir, external_meta, colors,\n dataset='external', imread_func=external_imread, is_update=False)\n\n pool = mlc.SuperPool(3)\n params = []\n for color in colors:\n base_tran_hash = train_meta[color].values\n base_external_hash = external_meta[color].values\n\n train_ids = train_meta[ID].values\n external_ids = external_meta[ID].values\n\n params.append((color, threshold, base_external_hash, base_tran_hash, train_ids, external_ids))\n df_list = pool.map(calc_hash, params)\n\n df = None\n for temp_df, color in zip(df_list, colors):\n if df is None:\n df = temp_df\n else:\n df = pd.merge(df, temp_df, on=['Train', 'Extra'], how='inner')\n\n print(df.shape)\n df.to_csv(opj(DATA_DIR, 'meta', 'train_match_external.csv.gz'), index=False, compression='gzip')\n\n print('\\nsuccess!')\n",
"def sf_stack():\n import importlib\n import sf_utils; importlib.reload(sf_utils)\n from sf_utils import *\n\n test_meta = pd.read_csv('input/test_meta.csv')\n test_meta.shape\n\n train_val_meta = pd.read_csv('input/train_meta.csv')\n new_data_meta = pd.read_csv('input/new_data_meta.csv')\n new_data_meta.columns = train_val_meta.columns\n val_meta = pd.concat([new_data_meta, train_val_meta], sort=False)\n val_meta.shape\n\n val_meta.head()\n model_stats = imagenet_stats\n\n import pretrainedmodels\n import pretrainedmodels.utils as pqutils\n #print(pretrainedmodels.__dict__.keys())\n _model_name = 'se_resnext50_32x4d'\n model = pretrainedmodels.__dict__[_model_name](num_classes=1000, pretrained='imagenet')\n tf_img = pqutils.TransformImage(model)\n model_stats = (tf_img.__dict__['mean'], tf_img.__dict__['std'])\n model_stats\n\n\n data_dir = 'input/'\n valid_df = pd.read_csv('input/' + 'val_id.csv', header=None, names=['idx','Id'])\n train_df = pd.read_csv(data_dir + 'train.csv')\n len(train_df)\n\n\n from PIL import Image as QImage\n ids = []\n labels = []\n def file_jpg_to_png(path):\n global ids\n gclasses = set(list(range(28))) - set([0,25])\n f1 = 'input/new_data/' + path + '.jpg'\n f2 = 'input/train_png/' + path + '.png'\n xs = path.split('_')\n q = xs.index('classes') + 1\n xs = xs[q:]\n if len(gclasses & set([int(x) for x in xs])) == 0:\n return\n xs = ' '.join(xs)\n if not os.path.isfile(f2):\n try:\n im = QImage.open(f1)\n im = im.resize((512, 512), QImage.NEAREST)\n im.save(f2)\n ids.append(path)\n labels.append(xs)\n except:\n pass\n else:\n ids.append(path)\n labels.append(xs)\n\n need_to_prepare_extra = False\n if need_to_prepare_extra:\n for filename in tqdm_notebook(os.listdir('input/new_data/'), total = 68628):\n if filename.endswith(\".jpg\"):\n file_jpg_to_png(filename[:-4])\n\n\n if need_to_prepare_extra:\n data_dir = 'input/'\n xtra_data = pd.DataFrame()\n xtra_data['Id'] = ids\n xtra_data['Target'] = labels\n xtra_data.to_csv(data_dir + 'xtra_train.csv', index=False)\n xtra_data.head(n=3)\n\n\n test_matches = pd.read_csv('test_matches.csv')\n test_matches.Extra = test_matches.Extra.apply(lambda x : \"_\".join(x.split(\"_\")[2:]))\n test_matches.head()\n\n xtra_data = pd.read_csv(data_dir + 'xtra_train.csv')\n xtra_data['Extra'] = xtra_data.Id.apply(lambda x : x[:x.find(\"_classes\")])\n xtra_data.head()\n\n xtra_matches_ids = test_matches.Extra.values.tolist()\n\n xtra_data_train = xtra_data.loc[~xtra_data.Extra.isin(xtra_matches_ids),['Id','Target']].reset_index(drop=True)\n xtra_data_valid = xtra_data.loc[xtra_data.Extra.isin(xtra_matches_ids),['Id','Target']].reset_index(drop=True)\n\n\n data = xtra_data_train\n labels = np.zeros((data.shape[0], 28), dtype=np.int32)\n if \"Target\" in data:\n for i, lbls in data['Target'].str.split().iteritems():\n for j in map(int, lbls):\n labels[i, j] = 1\n for j in range(28):\n print(j,'\\t',name_label_dict[j], '\\t', labels[:,j].sum(), '\\t', labels[:,j].sum()/labels.shape[0])\n\n xtra_matches_ids = ['1054_E4_1_classes_25_16_0','1762_G4_5_classes_27','1335_C6_2_classes_3',\n '935_D5_2_classes_22_0','27_H9_2_classes_10','669_D8_1_classes_16_2',\n '1178_D4_2_classes_19_16_14','791_A9_1_classes_10_9','759_F9_9_classes_25_21_19_16',\n '1283_F10_2_classes_16_0','688_E7_10_classes_23','1772_F9_7_classes_25_17',\n '454_E5_1_classes_14_0','1020_C5_3_classes_23','1386_G4_2_classes_8',\n '681_G8_5_classes_13','1609_C4_2_classes_16_0','690_D3_5_classes_22_21_1_0',\n '1245_B2_2_classes_21_0','1335_C10_4_classes_16_0','693_A11_3_classes_23',\n '1139_A12_4_classes_23','916_F8_1_classes_25_2_0','694_C1_2_classes_18_1',\n '929_B8_1_classes_25_19','340_F5_3_classes_13','138_B12_1_classes_8',\n '932_G11_2_classes_25_16','28_H9_1_classes_10','924_F12_1_classes_27',\n '682_F12_2_classes_25_4','1147_D3_13_classes_16_0','346_A5_1_classes_12',\n '616_F1_4_classes_8','73_A10_1_classes_27_25','663_A9_2_classes_16_14',\n '859_C8_4_classes_16_14','933_C10_4_classes_22_21','1207_B10_7_classes_12',\n '694_F10_1_classes_25_21','908_E3_1_classes_4','1758_C9_4_classes_17_2',\n '1335_D2_2_classes_2_0','929_H2_2_classes_23','1717_G8_34_classes_25_17',\n '1150_H4_7_classes_13','1054_E4_2_classes_25_16_0','504_B1_3_classes_25_16_0',\n '747_B5_4_classes_10_9','1020_B1_7_classes_23_5','918_H10_2_classes_25_15',\n '532_H3_1_classes_25_16_0','757_C6_3_classes_16_2','1346_H6_3_classes_16_5_0','496_D1_1_classes_16_0','1042_C3_3_classes_27','929_B12_1_classes_3','684_C4_2_classes_23_0','696_C9_5_classes_25_21_0','1144_A10_4_classes_2','846_A8_2_classes_16_14','903_F12_2_classes_23_5','1264_G1_1_classes_27','925_H8_2_classes_1_0','121_C6_2_classes_10_9','1657_E10_3_classes_25_17','932_G11_1_classes_25_16','704_G4_1_classes_25_12','1039_C3_2_classes_19_16','906_H7_2_classes_25_6','19_H7_2_classes_8','725_G10_2_classes_16_14','681_B2_4_classes_4','697_A6_4_classes_19_0','1581_B12_2_classes_16_14','926_F7_2_classes_5_0','1770_D2_4_classes_21_17_4','1037_F4_3_classes_19','1413_F11_6_classes_21_16','694_A2_1_classes_2','1049_D11_2_classes_25_16_0','1276_C3_2_classes_21_0','346_B12_3_classes_14_0','1773_G12_3_classes_16_12','1183_F4_2_classes_15','1158_H11_8_classes_16_5','380_C6_1_classes_16_0','792_B6_7_classes_13_0','682_C9_6_classes_25_12_2','906_A9_4_classes_20_0','400_D3_2_classes_25_7','1237_G1_4_classes_21_6','793_B1_1_classes_25_22_0','1308_A5_4_classes_5','800_E1_1_classes_16_14','1421_G5_7_classes_17','906_A9_6_classes_20_0','1245_B2_3_classes_21_0','626_D7_6_classes_25_21_12','344_G2_4_classes_11','901_E12_1_classes_25_6_2','1050_F6_6_classes_16_0','240_G8_1_classes_8','933_C2_1_classes_23_2_0','556_B9_1_classes_25_18_0','1335_C10_2_classes_16_0','1125_F6_3_classes_4','1495_F7_3_classes_7_0','694_C1_1_classes_18_1','918_B3_4_classes_14','1762_E6_5_classes_7','915_C6_5_classes_4','820_G4_3_classes_10_9','927_F12_12_classes_18_0','901_D10_2_classes_12_0','1642_G7_34_classes_25_16','928_G1_2_classes_14_7','682_G9_1_classes_7_0','903_F2_1_classes_2_0','1645_E1_32_classes_16_14','685_G10_5_classes_12_0','927_A9_10_classes_25_5','957_G6_4_classes_16','757_C6_2_classes_16_2','1213_C4_2_classes_4','909_A6_1_classes_2','694_D6_2_classes_1_0','480_D6_3_classes_25_16','1050_F1_3_classes_25_16_0','692_A1_5_classes_25_14_0','1772_H1_5_classes_18_17_16_0','991_G6_7_classes_10_9','782_F8_2_classes_25_16','693_H4_1_classes_7','1259_A11_4_classes_19_16','1414_D12_2_classes_21_0','1139_D5_5_classes_5','930_H3_2_classes_1','901_G9_5_classes_25_19_0','1754_G2_34_classes_5','353_A9_1_classes_21_13','1179_H7_1_classes_25_16_0','1423_A4_2_classes_16_14','686_F4_2_classes_22_21','1693_E1_2_classes_23_16','400_H8_2_classes_23','1680_G4_4_classes_16','935_G3_1_classes_5','838_E8_1_classes_3','1030_D8_2_classes_7_0','684_D12_4_classes_18','812_C10_2_classes_13_0','1416_D10_6_classes_21_16_0','1293_E3_2_classes_1_0','480_D6_2_classes_25_16','700_H6_2_classes_25_2','1773_E10_4_classes_16_0','611_E10_1_classes_25_13','346_B12_4_classes_14_0','523_A9_4_classes_5','1581_B12_3_classes_16_14','684_D8_6_classes_25_12_0','927_F12_11_classes_18_0','353_E4_2_classes_5','556_C1_5_classes_25_22_16','1179_H7_2_classes_25_16_0','1711_B12_3_classes_26_21_4','449_G8_2_classes_4_2','544_A8_5_classes_22_21_7','1772_H1_3_classes_18_17_16_0','1772_G2_6_classes_25_19_16_0','909_C11_2_classes_2_0','930_C12_1_classes_18_14_6','690_C10_2_classes_13','1009_B6_2_classes_10_9','757_E10_5_classes_12','88_D7_2_classes_8','383_E8_7_classes_25_17','1432_F2_2_classes_6','505_C10_1_classes_25_15','1104_E7_2_classes_16_14','699_E8_1_classes_1','1213_C4_3_classes_4','690_H5_1_classes_4','1169_D3_6_classes_16_0','686_F4_1_classes_22_21','532_D1_1_classes_16_0','896_G8_3_classes_5_0','934_G4_3_classes_21','344_G2_1_classes_11','369_C9_1_classes_18_14_0','682_F12_1_classes_25_4','683_E1_2_classes_25_1_0','697_G3_6_classes_13_7','1772_A6_7_classes_5','933_C4_6_classes_5','1231_F9_5_classes_7','802_D5_9_classes_16_0','682_G10_1_classes_7','850_C1_9_classes_21_0','929_B12_2_classes_3','1339_D3_3_classes_2_1','858_D4_2_classes_4','334_B12_2_classes_4','622_F1_7_classes_8','908_G5_2_classes_2_0','778_G6_2_classes_25_16_14','1027_C4_1_classes_7','886_C10_5_classes_23_0','807_C2_3_classes_4','1314_D2_2_classes_25_16_0','1770_B5_1_classes_21_16_11','1105_F10_2_classes_16_0','1283_B2_10_classes_16_0','583_E11_1_classes_25_16','820_G4_7_classes_10_9','928_H3_2_classes_14_0','970_H1_4_classes_25_18','1751_A7_32_classes_27','701_H10_2_classes_25_14','1773_B6_11_classes_23_17_16','1736_G7_31_classes_25_16','928_H3_1_classes_14_0','1645_E5_34_classes_17','539_B3_1_classes_25_21_0','683_E1_1_classes_25_1_0','484_G6_3_classes_22','928_A1_1_classes_4','1773_B6_7_classes_23_17_16','1255_A3_4_classes_16_0','698_C6_2_classes_25_21_4','1773_D5_6_classes_17','681_G8_4_classes_13','935_H11_2_classes_22_0','1125_B9_4_classes_25_7','698_F11_1_classes_13_0','344_F7_1_classes_25_21','906_C11_1_classes_4','1656_F5_2_classes_19_17','1761_A10_3_classes_23_17_14','1772_H5_7_classes_17_7','910_B8_1_classes_12_0','1283_F10_4_classes_16_0','508_C10_1_classes_25_15','681_B2_3_classes_4','868_E8_2_classes_17_16_0','1339_B9_2_classes_16_0','856_A2_4_classes_2_0','700_C3_6_classes_21','869_B3_1_classes_16_0','701_B9_2_classes_21_13_0','1178_F9_6_classes_16_0','542_G1_1_classes_11_2_0']\n\n xtra_data_train = xtra_data.loc[~xtra_data.Id.isin(xtra_matches_ids),['Id','Target']].reset_index(drop=True)\n xtra_data_valid = xtra_data.loc[xtra_data.Id.isin(xtra_matches_ids),['Id','Target']].reset_index(drop=True)\n xtra_data_train.shape\n\n exclude_valid = ['5ae3db3a-bbc4-11e8-b2bc-ac1f6b6435d0',\n 'e6d0b648-bbbc-11e8-b2ba-ac1f6b6435d0',\n '3202385a-bbca-11e8-b2bc-ac1f6b6435d0',\n '0cf36c82-bbca-11e8-b2bc-ac1f6b6435d0',\n '7cb0006e-bbaf-11e8-b2ba-ac1f6b6435d0',\n '87b77dd2-bba2-11e8-b2b9-ac1f6b6435d0',\n '62c88efa-bbc8-11e8-b2bc-ac1f6b6435d0',\n '44d819c2-bbbb-11e8-b2ba-ac1f6b6435d0',\n 'b1ca2b40-bbbd-11e8-b2ba-ac1f6b6435d0',\n '8cd67266-bbbe-11e8-b2ba-ac1f6b6435d0',\n 'cead83ec-bb9a-11e8-b2b9-ac1f6b6435d0',\n 'a166d11a-bbca-11e8-b2bc-ac1f6b6435d0',\n '91a0a67e-bb9e-11e8-b2b9-ac1f6b6435d0',\n '2be24582-bbb1-11e8-b2ba-ac1f6b6435d0']\n exclude_train = ['7138c4aa-bb9b-11e8-b2b9-ac1f6b6435d0',\n '8a10533e-bba6-11e8-b2ba-ac1f6b6435d0',\n 'be92e108-bbb5-11e8-b2ba-ac1f6b6435d0',\n 'abfa727e-bba4-11e8-b2ba-ac1f6b6435d0',\n '2384acac-bbae-11e8-b2ba-ac1f6b6435d0',\n 'c7a7a462-bbb1-11e8-b2ba-ac1f6b6435d0',\n '559f7ce0-bbb2-11e8-b2ba-ac1f6b6435d0']\n\n rem_val = pd.read_csv('input/remove_from_val.csv',header=None)\n rem_val.columns = ['Id']\n rem_val.head()\n\n data_dir = 'input/'\n valid_df = pd.read_csv('input/' + 'val_id.csv', header=None, names=['idx','Id'])\n valid_df = valid_df.loc[~valid_df.Id.isin(exclude_valid),:]\n train_df = pd.read_csv(data_dir + 'train.csv')\n train_df = train_df.loc[~train_df.Id.isin(exclude_train),:]\n\n test_df = pd.read_csv('input/' + \"sample_submission.csv\")\n train = train_df.loc[~train_df.Id.isin(valid_df.Id.values.tolist()),:].reset_index(drop=True)\n train = pd.concat([train,xtra_data_train], axis=0, sort=False)\n valid = train_df.loc[train_df.Id.isin(valid_df.Id.values.tolist()),:].reset_index(drop=True)\n valid = pd.concat([valid,xtra_data_valid], axis=0, sort=False)\n #valid = valid.loc[~valid.Id.isin(rem_val.Id.values),:].reset_index(drop=True)\n test = test_df\n del train_df,valid_df,test_df,xtra_data_valid,xtra_data_train\n gc.collect()\n\n\n set(valid.Id.apply(lambda x : x.split('_class')[0]).unique().tolist()) - set(val_meta.Id.unique().tolist())\n set(test.Id.values.tolist()) - set(test_meta.Id.values.tolist())\n\n qqdata = valid.copy()\n labels = np.zeros((qqdata.shape[0], 28), dtype=np.int32)\n if \"Target\" in qqdata:\n for i, lbls in qqdata['Target'].str.split().iteritems():\n for j in map(int, lbls):\n labels[i, j] = 1\n for j in range(28):\n print(j,'\\t',name_label_dict[j], '\\t', labels[:,j].sum(), '\\t', labels[:,j].sum()/labels.shape[0])\n\n def twenty_kfold_threshold(y_true, y_pred):\n n_classes = len(name_label_dict)\n classes_thresholds = []\n classes_scores = []\n for i in range(n_classes):\n for j in range(20):\n kf = StratifiedKFold(n_splits=5, shuffle=True, random_state=239 + j*101)\n kf_class_thresholds = []\n for _, tst_inx in kf.split(y_true,y_true[:,i]):\n t_min = np.min(y_pred[tst_inx,i])\n t_max = np.max(y_pred[tst_inx,i])\n thresholds = np.linspace(t_min, t_max, 100)\n scores = np.array([\n f1_score(y_true[tst_inx,i], np.int32(y_pred[tst_inx,i] >= threshold)) for threshold in thresholds\n ])\n threshold_best_index = np.argmax(scores)\n kf_class_thresholds.append(thresholds[threshold_best_index])\n threshold = np.mean(kf_class_thresholds)\n classes_thresholds.append(threshold)\n f1 = f1_score(y_true[:,i], np.int32(y_pred[:,i] >= threshold))\n classes_scores.append(f1)\n return classes_thresholds, classes_scores\n\n models = ['sf_fastai_inf_seresnext','sf_fastai_inf_incv4','pd_seresnext_v5','sf_fastai_inf_sev2',\n 'db_seresnext_v3_LSEP4','pd_seresnext_v6','pd_inceptionv4_v2',\n 'pd_seresnext_v7','pd_xception_v1', 'pd_seresnext_v10']\n results_dir = 'results/'\n\n def load_model(m):\n ids = np.load(results_dir+m+'_ids.npy')\n q = np.argsort(ids)\n y = np.load(results_dir+m+'_y.npy')\n avg_preds = np.load(results_dir+m+'_holdout.npy')\n preds = np.array([avg_preds[i] for i in q])\n ytrue = np.array([y[i] for i in q])\n avg_tests = np.load(results_dir+m+'_test.npy')\n return ytrue, preds, avg_tests\n\n holdouts = []\n tests = []\n ys = []\n for m in models:\n y,h,t = load_model(m)\n if len(ys) == 0:\n ys.append(y)\n holdouts.append(h)\n tests.append(t)\n\n valid.loc[:,'Id'] = valid.Id.apply(lambda x : x.split('_clas')[0])\n\n\n val_set = valid[['Id']].merge(val_meta, on='Id', how='left').sort_values('Id').reset_index(drop=True)\n for mod_ix in range(len(models)):\n for i in range(28):\n val_set[models[mod_ix]+'_'+str(i)] = holdouts[mod_ix][:,i]\n\n\n # In[254]:\n\n\n test_set = test[['Id']].merge(test_meta, on='Id', how='left').reset_index(drop=True)\n for mod_ix in range(len(models)):\n for i in range(28):\n test_set[models[mod_ix]+'_'+str(i)] = tests[mod_ix][:,i]\n\n train_df = pd.read_csv(data_dir + 'train.csv')\n\n qqdata = train_df.copy()\n labels = np.zeros((qqdata.shape[0], 28), dtype=np.int32)\n if \"Target\" in qqdata:\n for i, lbls in qqdata['Target'].str.split().iteritems():\n for j in map(int, lbls):\n labels[i, j] = 1\n others_mult = []\n for j in range(28):\n others_mult.append((labels.shape[0]-labels[:,j].sum())/labels[:,j].sum())\n print(j,'\\t',name_label_dict[j], '\\t', labels[:,j].sum(), '\\t', labels.shape[0]/labels[:,j].sum())\n others_mult\n\n\n from sklearn.metrics import roc_auc_score\n\n import warnings\n warnings.filterwarnings(\"ignore\")\n\n from sklearn.metrics import roc_auc_score\n\n def fold_in_fold(train_set, y, val_set, test_set, num_folds=5, num_iters=5, class_weight = None):\n test_predict = np.zeros(test_set.shape[0])\n val_predict = np.zeros(val_set.shape[0])\n all_scores = []\n for kk in range(num_iters):\n kf = StratifiedKFold(n_splits=num_folds, shuffle=True, random_state=239 + kk*101)\n for ho_trn_inx, ho_val_inx in kf.split(y,y):\n evals_result = {}\n param = {\n 'boosting_type': 'gbdt',\n 'objective': 'binary',\n 'scale_pos_weight' : class_weight,\n 'metric' : 'auc',\n 'learning_rate': 0.01, # 0.1\n 'colsample': 0.6,\n 'num_leaves': 256,\n 'min_data_in_leaf': 15,\n 'max_depth': 9,\n 'max_bin': 255, # 255\n 'bagging_fraction': 0.9,\n 'bagging_freq': 5, # 0\n #'lambda_l1': 0.001,\n #'lambda_l2': 0.005\n }\n\n trn_data = lgb.Dataset(train_set[ho_trn_inx,:], label=y[ho_trn_inx])\n val_data = lgb.Dataset(train_set[ho_val_inx,:], label=y[ho_val_inx])\n\n clf = lgb.train(param, trn_data, num_boost_round=1000, early_stopping_rounds=200,\n valid_sets=[trn_data,val_data], valid_names=[\"train\",\"val\"], #feval = lgb_f1_score,\n evals_result=evals_result, verbose_eval=None)\n fold_pred = clf.predict(train_set[ho_val_inx,:], num_iteration=clf.best_iteration)\n\n t_min = np.min(fold_pred)\n t_max = np.max(fold_pred)\n thresholds = np.linspace(t_min, t_max, 1000)\n scores = np.array([\n f1_score(y[ho_val_inx], np.int32(fold_pred >= threshold), average='macro') for threshold in thresholds\n if (np.sum(np.int32(fold_pred >= threshold)) > 0)\n ])\n\n threshold_best_index = np.argmax(scores)\n all_scores.append(scores[np.argmax(scores)])\n test_predict += (clf.predict(test_set, num_iteration=clf.best_iteration) >= thresholds[threshold_best_index]).astype(float)/(num_folds*num_iters)\n val_predict += (clf.predict(val_set, num_iteration=clf.best_iteration) >= thresholds[threshold_best_index]).astype(float)/(num_folds*num_iters)\n #print(np.mean(all_scores))\n return val_predict, test_predict\n\n from sklearn import metrics\n import lightgbm as lgb\n from sklearn.linear_model import Ridge\n from sklearn.neural_network import MLPClassifier\n from sklearn.metrics import classification_report\n\n y = ys[0].copy()\n num_folds = 5\n test_lgb_prob2 = np.zeros((test_set.values.shape[0],28))\n classes_thresholds = []\n classes_scores = []\n KK_ITER = 3\n class_thresholds = np.zeros(28)\n\n np.random.seed(239)\n\n for i in range(28):\n print()\n for kk in range(KK_ITER):\n pos_class = np.sum(y[:,i])\n neg_class = int(pos_class*others_mult[i])\n neg_idxs = np.array(range(len(y)))[np.where(y[:,i]==0,True,False).tolist()]\n np.random.shuffle(neg_idxs)\n idxs = np.hstack([neg_idxs[:neg_class],np.array(range(len(y)))[np.where(y[:,i]==1,True,False).tolist()]])\n np.random.shuffle(idxs)\n y_true = y[idxs]\n class_val_set = val_set.loc[idxs,:].reset_index(drop=True)\n class_weight = neg_class / (pos_class + neg_class)\n kf = StratifiedKFold(n_splits=num_folds, shuffle=True, random_state=239 + kk * 57)\n fold_inx = 1\n tst_lgb_prob_fold = np.zeros(test_lgb_prob2.shape)\n lgb_prob = np.zeros((class_val_set.values.shape[0], 28))\n for ho_trn_inx, ho_val_inx in kf.split(y_true,y_true[:,i]):\n val_pred,test_pred, = fold_in_fold(class_val_set.values[ho_trn_inx,1:],\n y_true[ho_trn_inx,i],\n class_val_set.values[ho_val_inx,1:],\n test_set.values[:,1:],\n 3,5,class_weight\n )\n lgb_prob[ho_val_inx,i] = val_pred\n tst_lgb_prob_fold[:,i] += test_pred/num_folds\n fold_inx += 1\n thresholds = np.linspace(0, 1, 1000)\n scores = np.array([\n f1_score(y_true[:,i], np.int32(lgb_prob[:,i] >= threshold), average='macro') for threshold in thresholds \\\n if np.sum(np.int32(lgb_prob[:,i] >= threshold)) > 0\n ])\n threshold_best_index = np.argmax(scores)\n cl_thr = thresholds[threshold_best_index]\n #print(classification_report(y_true[:,i], np.int32(lgb_prob[:,i] >= cl_thr).astype(int)))\n class_thresholds[i] += cl_thr / (1.0 * KK_ITER)\n test_lgb_prob2[:,i] += tst_lgb_prob_fold[:,i] / (1.0*KK_ITER)\n print(\"Class {0} F1 score (0.5 threshold): {1:.5f}\".format(name_label_dict[i], f1_score(y_true[:,i],(lgb_prob[:,i]>=0.5).astype(int), average='macro')))\n print(\"Class {0} F1 score ({1:.3f} threshold): {2:.5f}\".format(name_label_dict[i], cl_thr, f1_score(y_true[:,i],(lgb_prob[:,i]>=cl_thr).astype(int), average='macro')))\n\n #patch 8 10\n\n iter_num = []\n num_outter_folds = []\n num_inner_folds = []\n for i in range(28):\n q = ys[0][:,i].sum()\n t = 3\n out_folds = 7\n in_folds = 7\n if q < 15:\n t = 20\n out_folds = 5\n in_folds = 3\n elif q < 110:\n t = 10\n out_folds = 5\n in_folds = 5\n elif q < 250:\n t = 7\n out_folds = 7\n in_folds = 5\n elif q < 1000:\n t = 5\n num_outter_folds.append(out_folds)\n num_inner_folds.append(in_folds)\n iter_num.append(t)\n print(i,q,t)\n\n from sklearn.metrics import roc_auc_score\n from sklearn.linear_model import Ridge\n from sklearn.preprocessing import StandardScaler\n\n feature_importances = {}\n for i in range(28):\n feature_importances[i] = []\n\n from sklearn.metrics import roc_curve, precision_recall_curve\n def threshold_search(y_true, y_proba, plot=False):\n precision, recall, thresholds = precision_recall_curve(y_true, y_proba)\n thresholds = np.append(thresholds, 1.001)\n F = 2 / (1/precision + 1/recall)\n best_score = np.max(F)\n best_th = thresholds[np.argmax(F)]\n if plot:\n plt.plot(thresholds, F, '-b')\n plt.plot([best_th], [best_score], '*r')\n plt.show()\n search_result = {'t': best_th , 'f1': best_score}\n return search_result\n\n def fold_in_fold(train_set, y, val_set, test_set, num_folds=5, num_iters=5,\n class_weight = None, class_id = None, use_ridge = True):\n global feature_importances\n test_predict = np.zeros(test_set.shape[0])\n val_predict = np.zeros(val_set.shape[0])\n all_scores = []\n for kk in range(num_iters):\n kf = StratifiedKFold(n_splits=num_folds, shuffle=True, random_state=239 + kk*101)\n ridge_train = np.zeros((train_set.shape[0],1))\n ridge_val = np.zeros((val_set.shape[0],1))\n ridge_test = np.zeros((test_set.shape[0],1))\n #print()\n if use_ridge:\n for ho_trn_inx, ho_val_inx in kf.split(y,y):\n scaler = StandardScaler()\n scaler.fit(train_set[ho_trn_inx,:])\n model = Ridge(alpha=0.1)\n model.fit(scaler.transform(train_set[ho_trn_inx,:]), y[ho_trn_inx])\n ridge_train[ho_val_inx,:] = model.predict(scaler.transform(train_set[ho_val_inx,:])).reshape(-1,1)\n res = threshold_search(y[ho_val_inx], ridge_train[ho_val_inx,:])\n\n #print('f1', res['f1'])\n\n # Предсказываем голосованием тест, сразу усредняя по внутреннему фолду и внутренней итерации.\n test_predict += ((model.predict(scaler.transform(test_set))\n >= res['t']).astype(float)/(num_folds*num_iters))\n\n # Предсказываем голосованием валидацию, сразу усредняя по внутреннему фолду и внутренней итерации.\n val_predict += ((model.predict(scaler.transform(val_set))\n >= res['t']).astype(float)/(num_folds*num_iters))\n else:\n for ho_trn_inx, ho_val_inx in kf.split(y,y):\n evals_result = {}\n param = {\n 'boosting_type': 'gbdt',\n 'objective': 'binary',\n #'scale_pos_weight' : class_weight,\n 'metric' : 'auc', # Я хз, почему именно AUC.\n 'learning_rate': 0.001,\n 'colsample': 0.6,\n 'num_leaves': 12,\n 'min_data_in_leaf': 150,\n 'max_depth': 4,\n 'l1':1,'l2':10,\n 'max_bin': 1000,\n 'bagging_fraction': 0.9,\n 'bagging_freq': 5,\n 'min_sum_hessian_in_leaf ':1000,\n }\n\n # Формируем LGB-шные объекты датасетов для трейна и валидации.\n trn_data = lgb.Dataset(train_set[ho_trn_inx,:], label=y[ho_trn_inx])\n val_data = lgb.Dataset(train_set[ho_val_inx,:], label=y[ho_val_inx])\n\n # Учимся с ES на 1000 деревьев. Возможно, есть смысл поставить 1500 на всякий случай?\n clf = lgb.train(param, trn_data, num_boost_round=1000, early_stopping_rounds=200,\n valid_sets=[trn_data,val_data], valid_names=[\"train\",\"val\"],\n evals_result=evals_result, verbose_eval=100)\n\n # Предсказываем валидационный фолд.\n fold_pred = clf.predict(train_set[ho_val_inx,:], num_iteration=clf.best_iteration)\n\n # Перебираем пороги для f1.\n t_min = np.min(fold_pred)\n t_max = np.max(fold_pred)\n thresholds = np.linspace(t_min, t_max, 1000)\n scores = np.array([\n f1_score(y[ho_val_inx], np.int32(fold_pred >= threshold))\n for threshold in thresholds\n if (np.sum(np.int32(fold_pred >= threshold)) > 0)\n ])\n\n # Выбираем лучший порог.\n threshold_best_index = np.argmax(scores)\n print('f1', scores[np.argmax(scores)])\n\n # Предсказываем голосованием тест, сразу усредняя по внутреннему фолду и внутренней итерации.\n test_predict += ((clf.predict(test_set, num_iteration=clf.best_iteration)\n >= thresholds[threshold_best_index]).astype(float)/(num_folds*num_iters))\n\n # Предсказываем голосованием валидацию, сразу усредняя по внутреннему фолду и внутренней итерации.\n val_predict += ((clf.predict(val_set, num_iteration=clf.best_iteration)\n >= thresholds[threshold_best_index]).astype(float)/(num_folds*num_iters))\n #print(np.mean(all_scores))\n return val_predict, test_predict\n\n # from sklearn import metrics\n import lightgbm as lgb\n from sklearn.linear_model import Ridge\n from sklearn.neural_network import MLPClassifier\n from sklearn.metrics import classification_report\n\n y = ys[0].copy()\n num_folds = 5\n test_lgb_prob2 = np.zeros((test_set.values.shape[0],28))\n classes_thresholds = []\n classes_scores = []\n\n class_thresholds = np.zeros(28)\n class_scores = np.zeros(28)\n\n np.random.seed(239)\n\n for i in [8,10]:\n print()\n KK_ITER = iter_num[i]\n num_folds = num_outter_folds[i]\n for kk in range(KK_ITER):\n pos_class = np.sum(y[:,i])\n neg_class = int(pos_class*others_mult[i])\n neg_idxs = np.array(range(len(y)))[np.where(y[:,i]==0,True,False).tolist()]\n np.random.shuffle(neg_idxs)\n idxs = np.hstack([neg_idxs[:neg_class],np.array(range(len(y)))[np.where(y[:,i]==1,True,False).tolist()]])\n np.random.shuffle(idxs)\n y_true = y[idxs]\n class_val_set = val_set.loc[idxs,:].reset_index(drop=True)\n class_weight = neg_class / pos_class\n kf = StratifiedKFold(n_splits=num_folds, shuffle=True, random_state=239 + kk * 57)\n tst_lgb_prob_fold = np.zeros(test_lgb_prob2.shape)\n lgb_prob = np.zeros((class_val_set.values.shape[0], 28))\n own_set = list(range(class_val_set.shape[1]))[1:]\n if np.sum(y[:,i]) < 15:\n own_set = list(set(list(range(1,15,1)) + [16+i+q*28 for q in range(len(models)-1)] + \\\n [16 + 28*3 + q for q in range(28)] + [16+11*28+q for q in range(10)] + [16+11*28+10+q for q in range(39)]))\n #own_set = list(set(list(range(1,15,1)) + [16+i+q*28 for q in range(len(models)-1)]))\n for ho_trn_inx, ho_val_inx in kf.split(y_true,y_true[:,i]):\n val_pred,test_pred, = fold_in_fold(class_val_set.values[ho_trn_inx,:][:,own_set],\n y_true[ho_trn_inx,i],\n class_val_set.values[ho_val_inx,:][:,own_set],\n test_set.values[:,:][:,own_set],\n num_inner_folds[i],iter_num[i],class_weight,i\n )\n lgb_prob[ho_val_inx,i] = val_pred\n tst_lgb_prob_fold[:,i] += test_pred/num_folds\n # Перебираем пороги для f1\n thresholds = np.linspace(0, 1, 1000)\n scores = np.array([\n f1_score(y_true[:,i], np.int32(lgb_prob[:,i] >= threshold), average='binary') # binary f1\n for threshold in thresholds\n if np.sum(np.int32(lgb_prob[:,i] >= threshold)) > 0 # Чтобы ворнинги не писались?\n ])\n\n # Выбираем трешхолд с лучшим f1.\n threshold_best_index = np.argmax(scores)\n cl_thr = thresholds[threshold_best_index]\n\n # Трешхолды усредняем по итерациям.\n class_thresholds[i] += cl_thr / (1.0 * KK_ITER)\n\n # Предикты тоже усредняем по итерациям.\n test_lgb_prob2[:,i] += tst_lgb_prob_fold[:,i] / (1.0*KK_ITER)\n\n # Печатаем скор валидации для порога, равного 0.5.\n print(\"Class {0} F1 score (0.5 threshold): {1:.5f}\"\n .format(\n name_label_dict[i],\n f1_score(y_true[:,i],(lgb_prob[:,i]>=0.5).astype(int), average='binary')))\n\n # Печатаем скор валидации для лучшего порога по этому фолду.\n print(\"Class {0} F1 score ({1:.3f} threshold): {2:.5f}\"\n .format(\n name_label_dict[i],\n cl_thr,\n f1_score(y_true[:,i],(lgb_prob[:,i]>=cl_thr).astype(int), average='binary')\n ))\n\n #patch 27\n models = ['sf_fastai_inf_seresnext','sf_fastai_inf_incv4','pd_seresnext_v5','sf_fastai_inf_sev2',\n 'db_seresnext_v3_LSEP4','pd_seresnext_v6','pd_inceptionv4_v2',\n 'pd_seresnext_v7','pd_xception_v1', 'pd_seresnext_v10','pd_bninception_v3','pd_seresnext_ovr_v3']\n results_dir = 'results/'\n\n holdouts = []\n tests = []\n ys = []\n for m in models:\n y,h,t = load_model(m)\n if len(ys) == 0:\n ys.append(y)\n holdouts.append(h)\n tests.append(t)\n valid.loc[:,'Id'] = valid.Id.apply(lambda x : x.split('_clas')[0])\n val_set = valid[['Id']].merge(val_meta, on='Id', how='left').sort_values('Id').reset_index(drop=True)\n for mod_ix in range(len(models)):\n for i in range(holdouts[mod_ix].shape[1]):\n val_set[models[mod_ix]+'_'+str(i)] = holdouts[mod_ix][:,i]\n test_set = test[['Id']].merge(test_meta, on='Id', how='left').reset_index(drop=True)\n for mod_ix in range(len(models)):\n for i in range(tests[mod_ix].shape[1]):\n test_set[models[mod_ix]+'_'+str(i)] = tests[mod_ix][:,i]\n\n def fold_in_fold(train_set, y, val_set, test_set, num_folds=5, num_iters=5,\n class_weight = None, class_id = None, use_ridge = True):\n global feature_importances\n test_predict = np.zeros(test_set.shape[0])\n val_predict = np.zeros(val_set.shape[0])\n all_scores = []\n for kk in range(num_iters):\n kf = StratifiedKFold(n_splits=num_folds, shuffle=True, random_state=239 + kk*101)\n ridge_train = np.zeros((train_set.shape[0],1))\n ridge_val = np.zeros((val_set.shape[0],1))\n ridge_test = np.zeros((test_set.shape[0],1))\n #print()\n if use_ridge:\n for ho_trn_inx, ho_val_inx in kf.split(y,y):\n scaler = StandardScaler()\n scaler.fit(train_set[ho_trn_inx,:])\n model = Ridge(alpha=.1) #, solver='svd'\n model.fit(scaler.transform(train_set[ho_trn_inx,:]), y[ho_trn_inx])\n ridge_train[ho_val_inx,:] = model.predict(scaler.transform(train_set[ho_val_inx,:])).reshape(-1,1)\n res = threshold_search(y[ho_val_inx], ridge_train[ho_val_inx,:])\n\n #print('f1', res['f1'])\n\n # Предсказываем голосованием тест, сразу усредняя по внутреннему фолду и внутренней итерации.\n test_predict += ((model.predict(scaler.transform(test_set))\n >= res['t']*1.05).astype(float)/(num_folds*num_iters))\n\n # Предсказываем голосованием валидацию, сразу усредняя по внутреннему фолду и внутренней итерации.\n val_predict += ((model.predict(scaler.transform(val_set))\n >= res['t']*1.05).astype(float)/(num_folds*num_iters))\n else:\n for ho_trn_inx, ho_val_inx in kf.split(y,y):\n evals_result = {}\n param = {\n 'boosting_type': 'gbdt',\n 'objective': 'binary',\n #'scale_pos_weight' : class_weight,\n 'metric' : 'auc', # Я хз, почему именно AUC.\n 'learning_rate': 0.001,\n 'colsample': 0.6,\n 'num_leaves': 12,\n 'min_data_in_leaf': 150,\n 'max_depth': 4,\n 'l1':1,'l2':10,\n 'max_bin': 1000,\n 'bagging_fraction': 0.9,\n 'bagging_freq': 5,\n 'min_sum_hessian_in_leaf ':1000,\n }\n\n # Формируем LGB-шные объекты датасетов для трейна и валидации.\n trn_data = lgb.Dataset(train_set[ho_trn_inx,:], label=y[ho_trn_inx])\n val_data = lgb.Dataset(train_set[ho_val_inx,:], label=y[ho_val_inx])\n\n # Учимся с ES на 1000 деревьев. Возможно, есть смысл поставить 1500 на всякий случай?\n clf = lgb.train(param, trn_data, num_boost_round=1000, early_stopping_rounds=200,\n valid_sets=[trn_data,val_data], valid_names=[\"train\",\"val\"],\n evals_result=evals_result, verbose_eval=100)\n\n # Предсказываем валидационный фолд.\n fold_pred = clf.predict(train_set[ho_val_inx,:], num_iteration=clf.best_iteration)\n\n # Перебираем пороги для f1.\n t_min = np.min(fold_pred)\n t_max = np.max(fold_pred)\n thresholds = np.linspace(t_min, t_max, 1000)\n scores = np.array([\n f1_score(y[ho_val_inx], np.int32(fold_pred >= threshold))\n for threshold in thresholds\n if (np.sum(np.int32(fold_pred >= threshold)) > 0)\n ])\n\n # Выбираем лучший порог.\n threshold_best_index = np.argmax(scores)\n print('f1', scores[np.argmax(scores)])\n\n # Предсказываем голосованием тест, сразу усредняя по внутреннему фолду и внутренней итерации.\n test_predict += ((clf.predict(test_set, num_iteration=clf.best_iteration)\n >= thresholds[threshold_best_index]).astype(float)/(num_folds*num_iters))\n\n # Предсказываем голосованием валидацию, сразу усредняя по внутреннему фолду и внутренней итерации.\n val_predict += ((clf.predict(val_set, num_iteration=clf.best_iteration)\n >= thresholds[threshold_best_index]).astype(float)/(num_folds*num_iters))\n #print(np.mean(all_scores))\n return val_predict, test_predict\n\n iter_num[27] = 40\n\n for i in [27]:\n print()\n KK_ITER = iter_num[i]\n num_folds = num_outter_folds[i]\n for kk in range(KK_ITER):\n pos_class = np.sum(y[:,i])\n neg_class = int(pos_class*others_mult[i])\n neg_idxs = np.array(range(len(y)))[np.where(y[:,i]==0,True,False).tolist()]\n np.random.shuffle(neg_idxs)\n idxs = np.hstack([neg_idxs[:neg_class],np.array(range(len(y)))[np.where(y[:,i]==1,True,False).tolist()]])\n np.random.shuffle(idxs)\n y_true = y[idxs]\n class_val_set = val_set.loc[idxs,:].reset_index(drop=True)\n class_weight = neg_class / pos_class\n kf = StratifiedKFold(n_splits=num_folds, shuffle=True, random_state=239 + kk * 57)\n tst_lgb_prob_fold = np.zeros(test_lgb_prob2.shape)\n lgb_prob = np.zeros((class_val_set.values.shape[0], 28))\n own_set = list(range(class_val_set.shape[1]))[1:]\n if np.sum(y[:,i]) < 15:\n #own_set = list(set(list(range(1,15,1)) + [16+i+q*28 for q in range(len(models)-1)] + \\\n # [16 + 28*3 + q for q in range(28)] + [16+11*28+q for q in range(10)] + [16+11*28+10+q for q in range(39)]))\n #own_set = list(set(list(range(1,15,1)) + [16+i+q*28 for q in range(len(models)-1)]))\n own_set = list(set([16+i+q*28 for q in range(len(models)-1)]))\n for ho_trn_inx, ho_val_inx in kf.split(y_true,y_true[:,i]):\n val_pred,test_pred, = fold_in_fold(class_val_set.values[ho_trn_inx,:][:,own_set],\n y_true[ho_trn_inx,i],\n class_val_set.values[ho_val_inx,:][:,own_set],\n test_set.values[:,:][:,own_set],\n num_inner_folds[i],iter_num[i],class_weight,i\n )\n lgb_prob[ho_val_inx,i] = val_pred\n tst_lgb_prob_fold[:,i] += test_pred/num_folds\n # Перебираем пороги для f1\n thresholds = np.linspace(0, 1, 1000)\n scores = np.array([\n f1_score(y_true[:,i], np.int32(lgb_prob[:,i] >= threshold), average='binary') # binary f1\n for threshold in thresholds\n if np.sum(np.int32(lgb_prob[:,i] >= threshold)) > 0 # Чтобы ворнинги не писались?\n ])\n\n # Выбираем трешхолд с лучшим f1.\n threshold_best_index = np.argmax(scores)\n cl_thr = thresholds[threshold_best_index]\n\n # Трешхолды усредняем по итерациям.\n class_thresholds[i] += cl_thr / (1.0 * KK_ITER)\n\n # Предикты тоже усредняем по итерациям.\n test_lgb_prob2[:,i] += tst_lgb_prob_fold[:,i] / (1.0*KK_ITER)\n\n # Печатаем скор валидации для порога, равного 0.5.\n print(\"Class {0} F1 score (0.5 threshold): {1:.5f}\"\n .format(\n name_label_dict[i],\n f1_score(y_true[:,i],(lgb_prob[:,i]>=0.5).astype(int), average='binary')))\n\n # Печатаем скор валидации для лучшего порога по этому фолду.\n print(\"Class {0} F1 score ({1:.3f} threshold): {2:.5f}\"\n .format(\n name_label_dict[i],\n cl_thr,\n f1_score(y_true[:,i],(lgb_prob[:,i]>=cl_thr).astype(int), average='binary')\n ))\n\n #submit\n ppreds = test_pred.copy()\n for i in range(28):\n ppreds[:,i] = ppreds[:,i] >= class_thresholds[i]\n\n mdict = {}\n for i,p in zip(ids,ppreds):\n mdict[i] = ' '.join([str(q) for q in np.argwhere(p).ravel()])\n sub = pd.DataFrame.from_dict(mdict,orient='index').reset_index()\n sub.columns = ['Id','Predicted']\n\n japanese_duplicates = pd.read_csv('input/leak_v4.csv')\n japanese_duplicates.columns = ['Test','C','H','Target']\n\n submit = pd.read_csv(data_dir + \"sample_submission.csv\").drop(['Predicted'], axis=1)\n submit = submit.merge(sub, on='Id',how='left')\n\n print(len(submit.loc[submit.Predicted == '',:]))\n\n for i, row in japanese_duplicates.iterrows():\n test_dup_id = row['Test']\n test_dup_classes = row['Target']\n submit.loc[submit['Id'] == test_dup_id, 'Predicted'] = test_dup_classes\n\n submit.to_csv('submits/' + 'submission.csv',index=False)\n submit.head(n=30)\n",
"import os, sys\n#os.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\nos.environ[\"CUDA_VISIBLE_DEVICES\"]=\"0\"\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport skimage.io\nfrom skimage.transform import resize\nfrom imgaug import augmenters as iaa\nfrom tqdm import tqdm\nimport PIL\nfrom PIL import Image\nimport cv2\nfrom sklearn.utils import class_weight, shuffle\nfrom ml_stratifiers import MultilabelStratifiedKFold\nimport warnings\nwarnings.filterwarnings(\"ignore\")\nfrom classification_models.resnet.models import ResNet18\nimport albumentations as A\n\nMODEL_PATH = 'Christof/models/GAPNet/9_crop/'\n\n# a) added batchnorm and cut out one Dense 256 layer\n# b) a) + added 16 size layer to GAP\nexp_suffix = '_lm'\nSIZE = 256\n\n# Load dataset info\npath_to_train = 'Christof/assets/train_rgb_512/'\ndata = pd.read_csv('Christof/assets/train.csv')\n\nnormal_aug = A.Compose([#A.Rotate((0,30),p=0.75),\n A.RandomRotate90(p=1),\n A.HorizontalFlip(p=0.5),\n #A.RandomBrightness(0.05),\n #A.RandomContrast(0.05),\n A.IAAAffine(translate_percent=10,rotate=45,shear=10, scale=(0.9,1.1)),\n #A.RandomAffine(degrees=45, translate=(0.1,0.1), shear=10, scale=(0.9,1.1))\n A.Normalize(mean=(0.08069, 0.05258, 0.05487), std=(0.1300, 0.0879, 0.1386),\n max_pixel_value=255.)\n ])\n\nnormal_aug_ext = A.Compose([#A.Rotate((0,30),p=0.75),\n A.RandomRotate90(p=1),\n A.HorizontalFlip(p=0.5),\n #A.RandomBrightness(0.05),\n #A.RandomContrast(0.05),\n A.IAAAffine(translate_percent=10,rotate=45,shear=10, scale=(0.9,1.1)),\n #A.RandomAffine(degrees=45, translate=(0.1,0.1), shear=10, scale=(0.9,1.1))\n A.Normalize(mean=(0.1174382, 0.06798691, 0.06592218), std=(0.16392466 ,0.10036821, 0.16703453),\n max_pixel_value=255.)\n ])\n\nval_aug = A.Compose([A.HorizontalFlip(p=0.5),\n A.Normalize(mean=(0.08069, 0.05258, 0.05487), std=(0.1300, 0.0879, 0.1386),\n max_pixel_value=255.)])\nfrom torchvision import transforms\n\neps = 0.004\ndesired = {\n 0: 0.36239782,\n 1: 0.043841336,\n 2: 0.075268817,\n 3: 0.059322034,\n 4: 0.075268817,\n 5: 0.075268817,\n 6: 0.043841336,\n 7: 0.075268817,\n 8: eps,\n 9: eps,\n 10: eps,\n 11: 0.043841336,\n 12: 0.043841336,\n 13: 0.014198783,\n 14: 0.043841336,\n 15: eps,\n 16: 0.028806584,\n 17: 0.014198783,\n 18: 0.028806584,\n 19: 0.059322034,\n 20: eps,\n 21: 0.126126126,\n 22: 0.028806584,\n 23: 0.075268817,\n 24: eps,\n 25: 0.222493888,\n 26: 0.028806584,\n 27: eps\n}\n\nsampling_weights = [ 2.6473, 35.0588 , 8.2069 , 19.3439 , 16.0145 , 13.3245 , 32.8644,\n 10.607 , 551.3 , 501.1818 , 787.5714 , 25.8523 , 39.0301, 51.644,\n 30.0846 ,1470.1333 , 62.8262, 190.1034 , 39.3084 , 23.2126 , 170.9457\n, 8.2592, 33.2609 , 9.6889 , 92.2678 , 4.19 , 99.3333 ,3150.2857]\n\nsample_weights_ext = [ 2.6728, 41.1617 , 10.3068 , 42.4172 , 22.9729 , 21.9808 , 26.8267\n, 11.5358 , 474.8659 , 486.7375 , 492.8987 , 66.963 , 50.2763 , 82.7609,\n 45.0683, 1854.2381, 100.3582 , 319.1721 , 76.5762 , 33.424 , 272.3007,\n 7.3664 , 39.4319 , 10.239 , 734.6981 , 2.548 , 196.6616 , 638.3443]\n\n\ntrain_dataset_info = []\nfor name, labels in zip(data['Id'], data['Target'].str.split(' ')):\n path = os.path.join(path_to_train, name)\n labs = np.array([int(label) for label in labels])\n bucket_ind = np.argmin([desired[l] for l in labs])\n bucket = labs[bucket_ind]\n weight = sampling_weights[bucket]\n train_dataset_info.append({\n 'path': path,\n 'labels': labs,\n 'weight':weight})\ntrain_dataset_info = np.array(train_dataset_info)\n\ndata_ext1 = pd.read_csv('Christof/assets/train_ext1.csv')\npath_to_train_ext1 = 'Christof/assets/ext_tomomi/'\ntrain_dataset_info_ext1 = []\nfor name, labels in zip(data_ext1['Id'], data_ext1['Target'].str.split(' ')):\n path = os.path.join(path_to_train_ext1, name[:-5])\n labs = np.array([int(label) for label in labels])\n bucket_ind = np.argmin([desired[l] for l in labs])\n bucket = labs[bucket_ind]\n weight = sample_weights_ext[bucket]\n train_dataset_info_ext1.append({\n 'path':path,\n 'labels': labs,\n 'weight':weight})\ntrain_dataset_info_ext1 = np.array(train_dataset_info_ext1)\n\n\ncounts = np.zeros(28)\nfor item in train_dataset_info:\n for l in item['labels']:\n counts[l] = counts[l] + 1\n\ncounts = counts / len(train_dataset_info)\nrare_classes = np.where(counts < 0.005)\n\n#rare_dataset_info = np.array([item for item in train_dataset_info if np.isin(item['labels'], rare_classes).any()])\n#train_dataset_info = rare_dataset_info\nfrom torch.utils.data.sampler import WeightedRandomSampler\n\nfrom classification_models.resnet import preprocess_input\nclass data_generator:\n\n @staticmethod\n def create_train(dataset_info, batch_size, shape, augument=None, weighted_sample = True):\n assert shape[2] == 3\n\n if weighted_sample:\n p = np.array([item['weight'] for item in dataset_info])\n p = p/np.sum(p)\n else:\n p = None\n\n while True:\n #dataset_info = shuffle(dataset_info)\n for start in range(0, len(dataset_info), batch_size):\n #end = min(start + batch_size, len(dataset_info))\n batch_images = []\n X_train_batch = np.random.choice(dataset_info,batch_size,p=p)\n batch_labels = np.zeros((len(X_train_batch), 28))\n for i in range(len(X_train_batch)):\n image = data_generator.load_image(X_train_batch[i]['path'], shape)\n #image = preprocess_input(image)\n #rare = np.isin(X_train_batch[i]['labels'], rare_classes).any()\n\n if augument:\n image = data_generator.augment(augument,image)\n\n batch_images.append(image)\n batch_labels[i][X_train_batch[i]['labels']] = 1\n yield np.array(batch_images, np.float32), batch_labels\n\n @staticmethod\n def load_image(path, shape):\n image = cv2.imread(path + '.png', cv2.IMREAD_UNCHANGED)\n image = image[128:384,:256,:]\n return image\n\n @staticmethod\n def augment(aug,image):\n image_aug = aug(image=image)['image']\n return image_aug\n\n\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.models import Sequential, load_model\nfrom keras.layers import Activation, Dropout, Flatten, Dense, GlobalAveragePooling2D, Concatenate, Input, Conv2D\nfrom keras.applications.inception_v3 import InceptionV3\nfrom keras.callbacks import ModelCheckpoint\nfrom keras import metrics\nfrom keras.optimizers import Adam\nfrom keras import backend as K\nimport keras\nfrom keras.models import Model\n\n\nfrom keras.layers import Layer, InputSpec\nfrom keras import initializers\nfrom keras.constraints import Constraint\nimport keras.backend as K\n\nfrom keras.layers import Reshape, Permute, multiply\ndef squeeze_excite_block(input, ratio=16):\n init = input\n channel_axis = 1 if K.image_data_format() == \"channels_first\" else -1\n filters = init._keras_shape[channel_axis]\n se_shape = (1, 1, filters)\n\n se = GlobalAveragePooling2D()(init)\n se = Reshape(se_shape)(se)\n se = Dense(filters // ratio, activation='relu', kernel_initializer='he_normal', use_bias=False)(se)\n se = Dense(filters, activation='sigmoid', kernel_initializer='he_normal', use_bias=False)(se)\n\n if K.image_data_format() == 'channels_first':\n se = Permute((3, 1, 2))(se)\n\n x = multiply([init, se])\n return x\n\ndef encoder(backbone):\n\n c0 = backbone.get_layer('relu0').output\n\n c1 = backbone.get_layer('stage2_unit1_relu1').get_output_at(0) # 128\n c2 = backbone.get_layer('stage3_unit1_relu1').output # 63\n c3 = backbone.get_layer('stage4_unit1_relu1').output # 32\n enc_out = backbone.get_layer('relu1').output # 16\n #enc_out = backbone.output # 8\n\n short_cuts = [c0,c1,c2,c3]\n return enc_out, short_cuts\n\nfrom keras.layers import BatchNormalization\ndef create_model(input_shape, n_out):\n input_tensor = Input(shape=(SIZE, SIZE, 3))\n #bn = BatchNormalization()(input_tensor)\n #conv = Conv2D(3,(3,3),padding='same',activation='relu')(bn)\n base_model = ResNet18(include_top=False,\n weights='imagenet',\n input_shape=(SIZE, SIZE, 3),input_tensor=input_tensor)\n\n enc_out, short_cuts = encoder(base_model)\n x0 = GlobalAveragePooling2D()(squeeze_excite_block(enc_out))\n x1 = GlobalAveragePooling2D()(squeeze_excite_block(short_cuts[0]))\n x2 = GlobalAveragePooling2D()(squeeze_excite_block(short_cuts[1]))\n x3 = GlobalAveragePooling2D()(squeeze_excite_block(short_cuts[2]))\n x4 = GlobalAveragePooling2D()(squeeze_excite_block(short_cuts[3]))\n x = Concatenate()([x0,x1,x2,x3,x4])\n x = BatchNormalization()(x)\n x = Dropout(0.5)(x)\n x = Dense(256, activation='relu')(x)\n #x = BatchNormalization()(x)\n #x = Dropout(0.5)(x)\n #x = Dense(256, activation='relu')(x)\n x = BatchNormalization()(x)\n x = Dropout(0.5)(x)\n output = Dense(n_out, activation='sigmoid')(x)\n\n model = Model(input_tensor, output)\n\n # transfer imagenet weights\n #res_img = ResNet34(include_top=False, weights='imagenet', input_shape=(SIZE, SIZE, 3))\n #offset = 2\n #for i, l in enumerate(base_model.layers[offset+1:]):\n # l.set_weights(res_img.layers[i + 1].get_weights())\n\n return model\n\n\n\n# create callbacks list\nfrom keras.callbacks import ModelCheckpoint, LearningRateScheduler, EarlyStopping, ReduceLROnPlateau, TensorBoard\nfrom keras_callbacks import F1Metric\n#from keras_metrics import f1, f1_02\n#from keras_losses import f1_loss\nepochs = [20,150]\nbatch_size = 32\n\n\n# split data into train, valid\n\nmskf = MultilabelStratifiedKFold(n_splits=5,shuffle=True,random_state=18)\n\ny = np.zeros((len(train_dataset_info), 28))\nfor i in range(len(train_dataset_info)):\n y[i][train_dataset_info[i]['labels']] = 1\nmskf.get_n_splits(train_dataset_info, y)\nkf = mskf.split(train_dataset_info, y)\nfold_id = 1\nfor f in range(fold_id):\n train_indexes, valid_indexes = next(kf)\n\ntrain_indexes, valid_indexes = next(kf)\ntrain_generator_orig = data_generator.create_train(train_dataset_info[train_indexes],\n batch_size, (SIZE, SIZE, 3), augument=normal_aug)\ntrain_generator_ext1 = data_generator.create_train(train_dataset_info_ext1,\n batch_size, (SIZE, SIZE, 3), augument=normal_aug_ext)\nimport random\n\n\ndef gen():\n while True:\n x = random.random()\n if x > 0.5:\n batch = next(train_generator_orig)\n else:\n batch = next(train_generator_ext1)\n yield batch\n\ntrain_generator = gen()\nvalidation_generator = data_generator.create_train(train_dataset_info[valid_indexes],\n batch_size, (SIZE, SIZE, 3), augument=val_aug, weighted_sample=False)\n\ncheckpoint = ModelCheckpoint(MODEL_PATH + 'model_loss{}.h5'.format(exp_suffix), monitor='val_loss', verbose=1,\n save_best_only=True, mode='min', save_weights_only=True)\ntensorboard = TensorBoard(MODEL_PATH + 'logs{}'.format(fold_id) + '{}'.format(exp_suffix) + '/')\n# reduceLROnPlat = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=3,\n# verbose=1, mode='auto', epsilon=0.0001)\n# early = EarlyStopping(monitor=\"val_loss\",\n# mode=\"min\",\n# patience=6)\n#f1_metric = F1Metric(validation_generator2,2*len(valid_indexes)//batch_size,batch_size,28) #2 times val because of val_aug\n\nnb_epochs = epochs[0]\nnb_cycles = 1\ninit_lr = 0.0005\ndef _cosine_anneal_schedule(t):\n\n cos_inner = np.pi * (t % (nb_epochs // nb_cycles))\n cos_inner /= nb_epochs// nb_cycles\n cos_out = np.cos(cos_inner) + 1\n return float(init_lr / 2 * cos_out)\n\nlr_schedule = LearningRateScheduler(_cosine_anneal_schedule,verbose=True)\n\n\n\n\ncallbacks_list = [lr_schedule, tensorboard]\n\n\n# warm up model\nmodel = create_model(\n input_shape=(SIZE, SIZE, 3),\n n_out=28)\n\n\n\nPOS_WEIGHT = 10 # multiplier for positive targets, needs to be tuned\nimport tensorflow as tf\nimport keras.backend.tensorflow_backend as tfb\n\n\ndef weighted_binary_crossentropy(target, output):\n \"\"\"\n Weighted binary crossentropy between an output tensor\n and a target tensor. POS_WEIGHT is used as a multiplier\n for the positive targets.\n\n Combination of the following functions:\n * keras.losses.binary_crossentropy\n * keras.backend.tensorflow_backend.binary_crossentropy\n * tf.nn.weighted_cross_entropy_with_logits\n \"\"\"\n # transform back to logits\n _epsilon = tfb._to_tensor(tfb.epsilon(), output.dtype.base_dtype)\n #_epsilon = K.epsilon()\n output = tf.clip_by_value(output, _epsilon, 1 - _epsilon)\n output = tf.log(output / (1 - output))\n # compute weighted loss\n loss = tf.nn.weighted_cross_entropy_with_logits(targets=target,\n logits=output,\n pos_weight=POS_WEIGHT)\n return tf.reduce_mean(loss, axis=-1)\n\nimport tensorflow as tf\nfrom tensorflow.python.framework import ops\nfrom functools import reduce\n\ndef binaryRound(x):\n \"\"\"\n Rounds a tensor whose values are in [0,1] to a tensor with values in {0, 1},\n using the straight through estimator for the gradient.\n \"\"\"\n g = tf.get_default_graph()\n\n with ops.name_scope(\"BinaryRound\") as name:\n with g.gradient_override_map({\"Round\": \"Identity\"}):\n return tf.round(x, name=name)\n\n # For Tensorflow v0.11 and below use:\n #with g.gradient_override_map({\"Floor\": \"Identity\"}):\n # return tf.round(x, name=name)\n\ndef brian_f1(y_true, y_pred):\n y_pred = binaryRound(y_pred)\n tp = K.sum(K.cast(y_true*y_pred, 'float'), axis=0)\n tn = K.sum(K.cast((1-y_true)*(1-y_pred), 'float'), axis=0)\n fp = K.sum(K.cast((1-y_true)*y_pred, 'float'), axis=0)\n fn = K.sum(K.cast(y_true*(1-y_pred), 'float'), axis=0)\n\n p = tp / (tp + fp + K.epsilon())\n r = tp / (tp + fn + K.epsilon())\n\n f1 = 2*p*r / (p+r+K.epsilon())\n f1 = tf.where(tf.is_nan(f1), tf.zeros_like(f1), f1)\n return K.mean(f1)\n\ndef brian_f1_loss(y_true, y_pred):\n return 1- brian_f1(y_true, y_pred)\n\n\ndef custom_loss(y_true, y_pred):\n\n return 4*weighted_binary_crossentropy(y_true,y_pred) - K.log(brian_f1(y_true,y_pred))\n\n# train all layers\nfrom keras.metrics import binary_accuracy\n\n# model.compile(loss=custom_loss,\n# optimizer=Adam(lr=5e-4),\n# metrics=[binary_accuracy,brian_f1])\n# model.fit_generator(\n# train_generator,\n# steps_per_epoch=np.ceil(float(2*len(train_indexes)) / float(batch_size)),\n# #validation_data=validation_generator,\n# #validation_steps=2*np.ceil(float(len(valid_indexes)) / float(batch_size)),\n# epochs=epochs[0],\n# verbose=1,\n# callbacks=callbacks_list)\n# model.save_weights(MODEL_PATH + 'model_loss{}.h5'.format(exp_suffix))\nmodel.load_weights(MODEL_PATH + 'model_loss{}.h5'.format(exp_suffix))\n\n\nsubmit = pd.read_csv('Christof/assets/sample_submission.csv')\ntta = 8\n\n\n\ndraw_predict = np.zeros((len(submit['Id']), 28))\n\nfor i, name in tqdm(enumerate(submit['Id'])):\n path = os.path.join('Christof/assets/test_rgb_512/', name)\n image = data_generator.load_image(path, (SIZE, SIZE, 3))\n images = [data_generator.augment(normal_aug, image) for _ in range(tta)]\n tta_predicts = model.predict(np.array(images))\n draw_predict[i] = np.median(tta_predicts,axis = 0)\n\nnp.save(MODEL_PATH + f'pred{fold_id}{exp_suffix}.npy',draw_predict)\n\n\n# custom thresholds to match lb proportions\nthresholds = np.linspace(0.95, 0.05, 101)\npred = draw_predict.copy()\nfor j in tqdm(range(pred.shape[1])):\n for t in thresholds:\n pred[:, j] = (draw_predict[:, j] > t).astype(int)\n prop = np.mean(pred[:, j])\n if prop >= desired[j]: break\n print(j, '%3.2f' % t, '%6.4f' % desired[j], '%6.4f' % prop, j, )\n\nprint(pred[:5].astype(int))\n\nlabel_predict = [np.arange(28)[score_predict == 1] for score_predict in pred]\nstr_predict_label = [' '.join(str(l) for l in lp) for lp in label_predict]\n\nsubmit['Predicted'] = str_predict_label\n# np.save('draw_predict_InceptionV3.npy', score_predict)\nsubmit.to_csv(MODEL_PATH + 'submission_loss{}_lb_dist_adjusted_8tta.csv'.format(exp_suffix), index=False)\n\nfrom Christof.utils import f1_sub\n\nbest_sub = pd.read_csv('ens18.csv')\nf1_sub(best_sub,submit)\n\nbest_sub = pd.read_csv('ens56d.csv')\nf1_sub(best_sub,submit)\n\n# submit2 = pd.read_csv('Christof/models/GAPNet/11/submission_loss_0_lb_dist_adjusted_8tta.csv')\n# f1_sub(best_sub,submit2)\n#\n# submit2 = pd.read_csv('Christof/models/GAPNet/11_tests_on_clr/submission_loss_1in20_0005_2c_lb_dist_adjusted_8tta.csv')\n# f1_sub(best_sub,submit2)\n#\n# submit2 = pd.read_csv('Christof/models/GAPNet/11_tests_on_clr/submission_loss_1in20_0005_lb_dist_adjusted_8tta.csv')\n# f1_sub(best_sub,submit2)",
"import os, sys\n#os.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\nos.environ[\"CUDA_VISIBLE_DEVICES\"]=\"2\"\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport skimage.io\nfrom skimage.transform import resize\nfrom imgaug import augmenters as iaa\nfrom tqdm import tqdm\nimport PIL\nfrom PIL import Image\nimport cv2\nfrom sklearn.utils import class_weight, shuffle\nfrom ml_stratifiers import MultilabelStratifiedKFold\nimport warnings\nwarnings.filterwarnings(\"ignore\")\nfrom classification_models.resnet.models import ResNet18\nimport albumentations as A\n\nMODEL_PATH = 'Christof/models/GAPNet/13_ext/'\n\n# a) added batchnorm and cut out one Dense 256 layer\n# b) a) + added 16 size layer to GAP\nexp_suffix = '_4_2'\nSIZE = 512\n\n# Load dataset info\npath_to_train = 'Christof/assets/train_rgb_512/'\ndata = pd.read_csv('Christof/assets/train.csv')\n\nnormal_aug = A.Compose([#A.Rotate((0,30),p=0.75),\n A.RandomRotate90(p=1),\n A.HorizontalFlip(p=0.5),\n #A.RandomBrightness(0.05),\n #A.RandomContrast(0.05),\n A.IAAAffine(translate_percent=10,rotate=45,shear=10, scale=(0.9,1.1)),\n #A.RandomAffine(degrees=45, translate=(0.1,0.1), shear=10, scale=(0.9,1.1))\n A.Normalize(mean=(0.08069, 0.05258, 0.05487), std=(0.1300, 0.0879, 0.1386),\n max_pixel_value=255.)\n ])\n\nnormal_aug_ext = A.Compose([#A.Rotate((0,30),p=0.75),\n A.RandomRotate90(p=1),\n A.HorizontalFlip(p=0.5),\n #A.RandomBrightness(0.05),\n #A.RandomContrast(0.05),\n A.IAAAffine(translate_percent=10,rotate=45,shear=10, scale=(0.9,1.1)),\n #A.RandomAffine(degrees=45, translate=(0.1,0.1), shear=10, scale=(0.9,1.1))\n A.Normalize(mean=(0.1174382, 0.06798691, 0.06592218), std=(0.16392466 ,0.10036821, 0.16703453),\n max_pixel_value=255.)\n ])\n\nval_aug = A.Compose([A.HorizontalFlip(p=0.5),\n A.Normalize(mean=(0.08069, 0.05258, 0.05487), std=(0.1300, 0.0879, 0.1386),\n max_pixel_value=255.)])\nfrom torchvision import transforms\n\neps = 0.004\ndesired = {\n 0: 0.36239782,\n 1: 0.043841336,\n 2: 0.075268817,\n 3: 0.059322034,\n 4: 0.075268817,\n 5: 0.075268817,\n 6: 0.043841336,\n 7: 0.075268817,\n 8: eps,\n 9: eps,\n 10: eps,\n 11: 0.043841336,\n 12: 0.043841336,\n 13: 0.014198783,\n 14: 0.043841336,\n 15: eps,\n 16: 0.028806584,\n 17: 0.014198783,\n 18: 0.028806584,\n 19: 0.059322034,\n 20: eps,\n 21: 0.126126126,\n 22: 0.028806584,\n 23: 0.075268817,\n 24: eps,\n 25: 0.222493888,\n 26: 0.028806584,\n 27: eps\n}\n\nsampling_weights = [ 2.6473, 35.0588 , 8.2069 , 19.3439 , 16.0145 , 13.3245 , 32.8644,\n 10.607 , 551.3 , 501.1818 , 787.5714 , 25.8523 , 39.0301, 51.644,\n 30.0846 ,1470.1333 , 62.8262, 190.1034 , 39.3084 , 23.2126 , 170.9457\n, 8.2592, 33.2609 , 9.6889 , 92.2678 , 4.19 , 99.3333 ,3150.2857]\n\nsample_weights_ext = [ 2.6728, 41.1617 , 10.3068 , 42.4172 , 22.9729 , 21.9808 , 26.8267\n, 11.5358 , 474.8659 , 486.7375 , 492.8987 , 66.963 , 50.2763 , 82.7609,\n 45.0683, 1854.2381, 100.3582 , 319.1721 , 76.5762 , 33.424 , 272.3007,\n 7.3664 , 39.4319 , 10.239 , 734.6981 , 2.548 , 196.6616 , 638.3443]\n\n\ntrain_dataset_info = []\nfor name, labels in zip(data['Id'], data['Target'].str.split(' ')):\n path = os.path.join(path_to_train, name)\n labs = np.array([int(label) for label in labels])\n bucket_ind = np.argmin([desired[l] for l in labs])\n bucket = labs[bucket_ind]\n weight = sampling_weights[bucket]\n train_dataset_info.append({\n 'path': path,\n 'labels': labs,\n 'weight':weight})\ntrain_dataset_info = np.array(train_dataset_info)\n\ndata_ext1 = pd.read_csv('Christof/assets/train_ext1.csv')\npath_to_train_ext1 = 'Christof/assets/ext_tomomi/'\ntrain_dataset_info_ext1 = []\nfor name, labels in zip(data_ext1['Id'], data_ext1['Target'].str.split(' ')):\n path = os.path.join(path_to_train_ext1, name[:-5])\n labs = np.array([int(label) for label in labels])\n bucket_ind = np.argmin([desired[l] for l in labs])\n bucket = labs[bucket_ind]\n weight = sample_weights_ext[bucket]\n train_dataset_info_ext1.append({\n 'path':path,\n 'labels': labs,\n 'weight':weight})\ntrain_dataset_info_ext1 = np.array(train_dataset_info_ext1)\n\n\ncounts = np.zeros(28)\nfor item in train_dataset_info:\n for l in item['labels']:\n counts[l] = counts[l] + 1\n\ncounts = counts / len(train_dataset_info)\nrare_classes = np.where(counts < 0.005)\n\n#rare_dataset_info = np.array([item for item in train_dataset_info if np.isin(item['labels'], rare_classes).any()])\n#train_dataset_info = rare_dataset_info\nfrom torch.utils.data.sampler import WeightedRandomSampler\n\nfrom classification_models.resnet import preprocess_input\nclass data_generator:\n\n @staticmethod\n def create_train(dataset_info, batch_size, shape, augument=None, weighted_sample = True):\n assert shape[2] == 3\n\n if weighted_sample:\n p = np.array([item['weight'] for item in dataset_info])\n p = p/np.sum(p)\n else:\n p = None\n\n while True:\n #dataset_info = shuffle(dataset_info)\n for start in range(0, len(dataset_info), batch_size):\n #end = min(start + batch_size, len(dataset_info))\n batch_images = []\n X_train_batch = np.random.choice(dataset_info,batch_size,p=p)\n batch_labels = np.zeros((len(X_train_batch), 28))\n for i in range(len(X_train_batch)):\n image = data_generator.load_image(X_train_batch[i]['path'], shape)\n #image = preprocess_input(image)\n #rare = np.isin(X_train_batch[i]['labels'], rare_classes).any()\n\n if augument:\n image = data_generator.augment(augument,image)\n\n batch_images.append(image)\n batch_labels[i][X_train_batch[i]['labels']] = 1\n yield np.array(batch_images, np.float32), batch_labels\n\n @staticmethod\n def load_image(path, shape):\n image = cv2.imread(path + '.png', cv2.IMREAD_UNCHANGED)\n return image\n\n @staticmethod\n def augment(aug,image):\n image_aug = aug(image=image)['image']\n return image_aug\n\n\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.models import Sequential, load_model\nfrom keras.layers import Activation, Dropout, Flatten, Dense, GlobalAveragePooling2D, Concatenate, Input, Conv2D\nfrom keras.applications.inception_v3 import InceptionV3\nfrom keras.callbacks import ModelCheckpoint\nfrom keras import metrics\nfrom keras.optimizers import Adam\nfrom keras import backend as K\nimport keras\nfrom keras.models import Model\n\n\nfrom keras.layers import Layer, InputSpec\nfrom keras import initializers\nfrom keras.constraints import Constraint\nimport keras.backend as K\n\nfrom keras.layers import Reshape, Permute, multiply\ndef squeeze_excite_block(input, ratio=16):\n init = input\n channel_axis = 1 if K.image_data_format() == \"channels_first\" else -1\n filters = init._keras_shape[channel_axis]\n se_shape = (1, 1, filters)\n\n se = GlobalAveragePooling2D()(init)\n se = Reshape(se_shape)(se)\n se = Dense(filters // ratio, activation='relu', kernel_initializer='he_normal', use_bias=False)(se)\n se = Dense(filters, activation='sigmoid', kernel_initializer='he_normal', use_bias=False)(se)\n\n if K.image_data_format() == 'channels_first':\n se = Permute((3, 1, 2))(se)\n\n x = multiply([init, se])\n return x\n\ndef encoder(backbone):\n\n c0 = backbone.get_layer('relu0').output\n\n c1 = backbone.get_layer('stage2_unit1_relu1').get_output_at(0) # 128\n c2 = backbone.get_layer('stage3_unit1_relu1').output # 63\n c3 = backbone.get_layer('stage4_unit1_relu1').output # 32\n enc_out = backbone.get_layer('relu1').output # 16\n #enc_out = backbone.output # 8\n\n short_cuts = [c0,c1,c2,c3]\n return enc_out, short_cuts\n\nfrom keras.layers import BatchNormalization\ndef create_model(input_shape, n_out):\n input_tensor = Input(shape=(SIZE, SIZE, 3))\n #bn = BatchNormalization()(input_tensor)\n #conv = Conv2D(3,(3,3),padding='same',activation='relu')(bn)\n base_model = ResNet18(include_top=False,\n weights='imagenet',\n input_shape=(SIZE, SIZE, 3),input_tensor=input_tensor)\n\n enc_out, short_cuts = encoder(base_model)\n x0 = GlobalAveragePooling2D()(squeeze_excite_block(enc_out))\n x1 = GlobalAveragePooling2D()(squeeze_excite_block(short_cuts[0]))\n x2 = GlobalAveragePooling2D()(squeeze_excite_block(short_cuts[1]))\n x3 = GlobalAveragePooling2D()(squeeze_excite_block(short_cuts[2]))\n x4 = GlobalAveragePooling2D()(squeeze_excite_block(short_cuts[3]))\n x = Concatenate()([x0,x1,x2,x3,x4])\n x = BatchNormalization()(x)\n x = Dropout(0.5)(x)\n x = Dense(256, activation='relu')(x)\n #x = BatchNormalization()(x)\n #x = Dropout(0.5)(x)\n #x = Dense(256, activation='relu')(x)\n x = BatchNormalization()(x)\n x = Dropout(0.5)(x)\n output = Dense(n_out, activation='sigmoid')(x)\n\n model = Model(input_tensor, output)\n\n # transfer imagenet weights\n #res_img = ResNet34(include_top=False, weights='imagenet', input_shape=(SIZE, SIZE, 3))\n #offset = 2\n #for i, l in enumerate(base_model.layers[offset+1:]):\n # l.set_weights(res_img.layers[i + 1].get_weights())\n\n return model\n\n\n\n# create callbacks list\nfrom keras.callbacks import ModelCheckpoint, LearningRateScheduler, EarlyStopping, ReduceLROnPlateau, TensorBoard\nfrom keras_callbacks import F1Metric\n#from keras_metrics import f1, f1_02\n#from keras_losses import f1_loss\nepochs = [20,150]\nbatch_size = 32\n\n\n# split data into train, valid\n\nmskf = MultilabelStratifiedKFold(n_splits=5,shuffle=True,random_state=18)\n\ny = np.zeros((len(train_dataset_info), 28))\nfor i in range(len(train_dataset_info)):\n y[i][train_dataset_info[i]['labels']] = 1\nmskf.get_n_splits(train_dataset_info, y)\nkf = mskf.split(train_dataset_info, y)\nfold_id = 1\nfor f in range(fold_id):\n train_indexes, valid_indexes = next(kf)\n\ntrain_indexes, valid_indexes = next(kf)\ntrain_generator_orig = data_generator.create_train(train_dataset_info[train_indexes],\n batch_size, (SIZE, SIZE, 3), augument=normal_aug)\ntrain_generator_ext1 = data_generator.create_train(train_dataset_info_ext1,\n batch_size, (SIZE, SIZE, 3), augument=normal_aug_ext)\nimport random\n\n\ndef gen():\n while True:\n x = random.random()\n if x > 0.5:\n batch = next(train_generator_orig)\n else:\n batch = next(train_generator_ext1)\n yield batch\n\ntrain_generator = gen()\nvalidation_generator = data_generator.create_train(train_dataset_info[valid_indexes],\n batch_size, (SIZE, SIZE, 3), augument=val_aug, weighted_sample=False)\n\ncheckpoint = ModelCheckpoint(MODEL_PATH + 'model_loss{}.h5'.format(exp_suffix), monitor='val_loss', verbose=1,\n save_best_only=True, mode='min', save_weights_only=True)\ntensorboard = TensorBoard(MODEL_PATH + 'logs{}'.format(fold_id) + '{}'.format(exp_suffix) + '/')\n# reduceLROnPlat = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=3,\n# verbose=1, mode='auto', epsilon=0.0001)\n# early = EarlyStopping(monitor=\"val_loss\",\n# mode=\"min\",\n# patience=6)\n#f1_metric = F1Metric(validation_generator2,2*len(valid_indexes)//batch_size,batch_size,28) #2 times val because of val_aug\n\nnb_epochs = epochs[0]\nnb_cycles = 1\ninit_lr = 0.0005\ndef _cosine_anneal_schedule(t):\n\n cos_inner = np.pi * (t % (nb_epochs // nb_cycles))\n cos_inner /= nb_epochs// nb_cycles\n cos_out = np.cos(cos_inner) + 1\n return float(init_lr / 2 * cos_out)\n\nlr_schedule = LearningRateScheduler(_cosine_anneal_schedule,verbose=True)\n\n\n\n\ncallbacks_list = [lr_schedule, checkpoint, tensorboard]\n\n\n# warm up model\nmodel = create_model(\n input_shape=(SIZE, SIZE, 3),\n n_out=28)\n\n\n\nPOS_WEIGHT = 10 # multiplier for positive targets, needs to be tuned\nimport tensorflow as tf\nimport keras.backend.tensorflow_backend as tfb\n\n\ndef weighted_binary_crossentropy(target, output):\n \"\"\"\n Weighted binary crossentropy between an output tensor\n and a target tensor. POS_WEIGHT is used as a multiplier\n for the positive targets.\n\n Combination of the following functions:\n * keras.losses.binary_crossentropy\n * keras.backend.tensorflow_backend.binary_crossentropy\n * tf.nn.weighted_cross_entropy_with_logits\n \"\"\"\n # transform back to logits\n _epsilon = tfb._to_tensor(tfb.epsilon(), output.dtype.base_dtype)\n #_epsilon = K.epsilon()\n output = tf.clip_by_value(output, _epsilon, 1 - _epsilon)\n output = tf.log(output / (1 - output))\n # compute weighted loss\n loss = tf.nn.weighted_cross_entropy_with_logits(targets=target,\n logits=output,\n pos_weight=POS_WEIGHT)\n return tf.reduce_mean(loss, axis=-1)\n\nimport tensorflow as tf\nfrom tensorflow.python.framework import ops\nfrom functools import reduce\n\ndef binaryRound(x):\n \"\"\"\n Rounds a tensor whose values are in [0,1] to a tensor with values in {0, 1},\n using the straight through estimator for the gradient.\n \"\"\"\n g = tf.get_default_graph()\n\n with ops.name_scope(\"BinaryRound\") as name:\n with g.gradient_override_map({\"Round\": \"Identity\"}):\n return tf.round(x, name=name)\n\n # For Tensorflow v0.11 and below use:\n #with g.gradient_override_map({\"Floor\": \"Identity\"}):\n # return tf.round(x, name=name)\n\ndef brian_f1(y_true, y_pred):\n y_pred = binaryRound(y_pred)\n tp = K.sum(K.cast(y_true*y_pred, 'float'), axis=0)\n tn = K.sum(K.cast((1-y_true)*(1-y_pred), 'float'), axis=0)\n fp = K.sum(K.cast((1-y_true)*y_pred, 'float'), axis=0)\n fn = K.sum(K.cast(y_true*(1-y_pred), 'float'), axis=0)\n\n p = tp / (tp + fp + K.epsilon())\n r = tp / (tp + fn + K.epsilon())\n\n f1 = 2*p*r / (p+r+K.epsilon())\n f1 = tf.where(tf.is_nan(f1), tf.zeros_like(f1), f1)\n return K.mean(f1)\n\ndef brian_f1_loss(y_true, y_pred):\n return 1- brian_f1(y_true, y_pred)\n\n\ndef custom_loss(y_true, y_pred):\n\n return 4*weighted_binary_crossentropy(y_true,y_pred) - K.log(brian_f1(y_true,y_pred))\n\n# train all layers\nfrom keras.metrics import binary_accuracy\n\nmodel.compile(loss=custom_loss,\n optimizer=Adam(lr=5e-4),\n metrics=[binary_accuracy,brian_f1])\nmodel.load_weights(MODEL_PATH + 'model_loss{}.h5'.format('_4'))\n\nmodel.fit_generator(\n train_generator,\n steps_per_epoch=np.ceil(float(2*len(train_indexes)) / float(batch_size)),\n validation_data=validation_generator,\n validation_steps=2*np.ceil(float(len(valid_indexes)) / float(batch_size)),\n epochs=epochs[0],\n verbose=1,\n callbacks=callbacks_list)\nmodel.load_weights(MODEL_PATH + 'model_loss{}.h5'.format(exp_suffix))\n\n\nsubmit = pd.read_csv('Christof/assets/sample_submission.csv')\ntta = 8\n\n\n\ndraw_predict = np.zeros((len(submit['Id']), 28))\n\nfor i, name in tqdm(enumerate(submit['Id'])):\n path = os.path.join('Christof/assets/test_rgb_512/', name)\n image = data_generator.load_image(path, (SIZE, SIZE, 3))\n images = [data_generator.augment(normal_aug, image) for _ in range(tta)]\n tta_predicts = model.predict(np.array(images))\n draw_predict[i] = np.median(tta_predicts,axis = 0)\n\nnp.save(MODEL_PATH + f'pred{fold_id}{exp_suffix}.npy',draw_predict)\n\n\n# custom thresholds to match lb proportions\nthresholds = np.linspace(0.95, 0.05, 101)\npred = draw_predict.copy()\nfor j in tqdm(range(pred.shape[1])):\n for t in thresholds:\n pred[:, j] = (draw_predict[:, j] > t).astype(int)\n prop = np.mean(pred[:, j])\n if prop >= desired[j]: break\n print(j, '%3.2f' % t, '%6.4f' % desired[j], '%6.4f' % prop, j, )\n\nprint(pred[:5].astype(int))\n\nlabel_predict = [np.arange(28)[score_predict == 1] for score_predict in pred]\nstr_predict_label = [' '.join(str(l) for l in lp) for lp in label_predict]\n\nsubmit['Predicted'] = str_predict_label\n# np.save('draw_predict_InceptionV3.npy', score_predict)\nsubmit.to_csv(MODEL_PATH + 'submission_loss{}_lb_dist_adjusted_8tta.csv'.format(exp_suffix), index=False)\n\nfrom Christof.utils import f1_sub\n\nbest_sub = pd.read_csv('ens18.csv')\nf1_sub(best_sub,submit)\n\nbest_sub = pd.read_csv('ens56d.csv')\nf1_sub(best_sub,submit)\n\n# submit2 = pd.read_csv('Christof/models/GAPNet/11/submission_loss_0_lb_dist_adjusted_8tta.csv')\n# f1_sub(best_sub,submit2)\n#\n# submit2 = pd.read_csv('Christof/models/GAPNet/11_tests_on_clr/submission_loss_1in20_0005_2c_lb_dist_adjusted_8tta.csv')\n# f1_sub(best_sub,submit2)\n#\n# submit2 = pd.read_csv('Christof/models/GAPNet/11_tests_on_clr/submission_loss_1in20_0005_lb_dist_adjusted_8tta.csv')\n# f1_sub(best_sub,submit2)\n\nfrom keras.preprocessing.text import Tokenizer",
"import sys\nsys.path.insert(0, '..')\nimport numpy as np\nimport pandas as pd\nimport cv2\nfrom PIL import Image\nimport imagehash\nfrom tqdm import tqdm\nimport pickle\nimport mlcrate as mlc\n\nfrom config.config import *\nfrom utils.common_util import *\n\ndef test_imread(img_dir, img_id, color):\n img = Image.open(opj(img_dir, '%s_%s.png' % (img_id, color)))\n return img\n\n# https://www.kaggle.com/c/human-protein-atlas-image-classification/discussion/72534\ndef generate_hash(img_dir, meta, colors, dataset='train', imread_func=None, is_update=False):\n meta = meta.copy()\n cache_fname = opj(DATA_DIR, 'meta', '%s_hash_maps.pkl' % dataset)\n if ope(cache_fname) and not is_update:\n with open(cache_fname, 'rb') as dbfile:\n hash_maps = pickle.load(dbfile)\n else:\n hash_maps = {}\n for color in colors:\n hash_maps[color] = []\n for idx in tqdm(range(len(meta)), desc='train %s' % color):\n img = imread_func(img_dir, meta.iloc[idx][ID], color)\n hash = imagehash.phash(img)\n hash_maps[color].append(hash)\n\n with open(cache_fname, 'wb') as dbfile:\n pickle.dump(hash_maps, dbfile)\n\n for color in colors:\n meta[color] = hash_maps[color]\n\n return meta\n\ndef calc_hash(params):\n color, threshold, base_test_hash1, base_test_hash2, test_ids1, test_ids2 = params\n\n test_hash1 = base_test_hash1.reshape(1, -1) # 1*m\n\n test_idxes_list1 = []\n test_idxes_list2 = []\n hash_list = []\n\n step = 5\n for test_idx in tqdm(range(0, len(base_test_hash2), step), desc=color):\n test_hash2 = base_test_hash2[test_idx:test_idx + step].reshape(-1, 1) # n*1\n hash = test_hash2 - test_hash1 # n*m\n test_idxes2, test_idxes1 = np.where(hash <= threshold)\n hash = hash[test_idxes2, test_idxes1]\n\n test_idxes2 = test_idxes2 + test_idx\n\n test_idxes_list1.extend(test_idxes1.tolist())\n test_idxes_list2.extend(test_idxes2.tolist())\n hash_list.extend(hash.tolist())\n\n df = pd.DataFrame({\n 'Test1': test_ids1[test_idxes_list1],\n 'Test2': test_ids2[test_idxes_list2],\n 'Sim%s' % color[:1].upper(): hash_list\n })\n df = df[df['Test1'] != df['Test2']]\n return df\n\nif __name__ == '__main__':\n print('%s: calling main function ... ' % os.path.basename(__file__))\n\n # test set images\n test_img_dir = opj(DATA_DIR, 'test', 'images')\n test_meta = pd.read_csv(opj(DATA_DIR, 'raw', 'sample_submission.csv'))\n\n colors = ['red', 'green', 'blue']\n test_meta = generate_hash(test_img_dir, test_meta, colors,\n dataset='test', imread_func=test_imread, is_update=False)\n\n threshold = 12\n\n pool = mlc.SuperPool(3)\n params = []\n for color in colors:\n base_test_hash1 = test_meta[color].values\n base_test_hash2 = test_meta[color].values\n\n test_ids1 = test_meta[ID].values\n test_ids2 = test_meta[ID].values\n\n params.append((color, threshold, base_test_hash1, base_test_hash2, test_ids1, test_ids2))\n df_list = pool.map(calc_hash, params)\n\n df = None\n for temp_df, color in zip(df_list, colors):\n if df is None:\n df = temp_df\n else:\n df = pd.merge(df, temp_df, on=['Test1', 'Test2'], how='inner')\n\n print(df.shape)\n df.to_csv(opj(DATA_DIR, 'meta', 'test_match_test.csv.gz'), index=False, compression='gzip')\n\n print('\\nsuccess!')\n",
"import os, sys\n#os.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\nos.environ[\"CUDA_VISIBLE_DEVICES\"]=\"0\"\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport skimage.io\nfrom skimage.transform import resize\nfrom imgaug import augmenters as iaa\nfrom tqdm import tqdm\nimport PIL\nfrom PIL import Image\nimport cv2\nfrom sklearn.utils import class_weight, shuffle\nfrom ml_stratifiers import MultilabelStratifiedKFold\nimport albumentations as A\nimport warnings\nwarnings.filterwarnings(\"ignore\")\nfrom classification_models.resnet.models import ResNet34\n\nMODEL_PATH = 'Christof/models/ResNet34/tests/21/'\nexp_suffix = '0'\n\nSIZE = 256\n\n# Load dataset info\npath_to_train = 'Christof/assets/train_rgb_256/'\ndata = pd.read_csv('Christof/assets/train.csv').sample(frac=0.3)\n\nnormal_aug = A.Compose([A.HorizontalFlip(p=0.5),\n A.VerticalFlip(p=0.5),\n A.Rotate((-180,180))])\n\ntrain_dataset_info = []\nfor name, labels in zip(data['Id'], data['Target'].str.split(' ')):\n train_dataset_info.append({\n 'path': os.path.join(path_to_train, name),\n 'labels': np.array([int(label) for label in labels])})\ntrain_dataset_info = np.array(train_dataset_info)\n\ncounts = np.zeros(28)\nfor item in train_dataset_info:\n for l in item['labels']:\n counts[l] = counts[l] + 1\n\ncounts = counts / len(train_dataset_info)\nrare_classes = np.where(counts < 0.005)\n\n#rare_dataset_info = np.array([item for item in train_dataset_info if np.isin(item['labels'], rare_classes).any()])\n#train_dataset_info = rare_dataset_info\n\n\nfrom classification_models.resnet import preprocess_input\nclass data_generator:\n\n @staticmethod\n def create_train(dataset_info, batch_size, shape, augument=True, heavy_augment_rares=True, oversample_factor = 0):\n assert shape[2] == 3\n\n\n\n if oversample_factor > 0:\n\n rare_dataset_info = np.array([item for item in dataset_info if np.isin(item['labels'], rare_classes).any()])\n #rare_dataset_info = shuffle(rare_dataset_info)\n for i in range(oversample_factor):\n #dataset_info\n dataset_info = np.append(dataset_info,rare_dataset_info)\n while True:\n dataset_info = shuffle(dataset_info)\n for start in range(0, len(dataset_info), batch_size):\n end = min(start + batch_size, len(dataset_info))\n batch_images = []\n X_train_batch = dataset_info[start:end]\n batch_labels = np.zeros((len(X_train_batch), 28))\n for i in range(len(X_train_batch)):\n image = data_generator.load_image(X_train_batch[i]['path'], shape)\n #image = preprocess_input(image)\n rare = np.isin(X_train_batch[i]['labels'], rare_classes).any()\n\n if augument:\n if heavy_augment_rares and rare:\n image = data_generator.heavy_augment(image)\n else:\n image = data_generator.augment(normal_aug,image)\n\n batch_images.append(image)\n batch_labels[i][X_train_batch[i]['labels']] = 1\n yield np.array(batch_images, np.float32), batch_labels\n\n @staticmethod\n def load_image(path, shape):\n image = cv2.imread(path + '.png', cv2.IMREAD_UNCHANGED)\n return image\n\n @staticmethod\n def augment(aug,image):\n image_aug = aug(image=image)['image']\n return image_aug\n\n @staticmethod\n def heavy_augment(image):\n augment_img = iaa.Sequential([\n iaa.OneOf([\n iaa.Affine(scale=(0.5,2.0)),\n iaa.Affine(shear=15),\n iaa.Affine(rotate=0),\n iaa.Affine(rotate=35),\n iaa.Affine(rotate=90),\n iaa.Affine(rotate=180),\n iaa.Affine(rotate=270),\n iaa.Affine(translate_percent=0.1),\n iaa.Fliplr(0.5),\n iaa.Flipud(0.5),\n iaa.Noop()\n ])], random_order=True)\n\n image_aug = augment_img.augment_image(image)\n return image_aug\n\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.models import Sequential, load_model\nfrom keras.layers import Activation, Dropout, Flatten, Dense, GlobalMaxPooling2D, BatchNormalization, Input, Conv2D\nfrom keras.applications.inception_v3 import InceptionV3\nfrom keras.callbacks import ModelCheckpoint\nfrom keras import metrics\nfrom keras.optimizers import Adam\nfrom keras import backend as K\nimport keras\nfrom keras.models import Model\n\ndef create_model(input_shape, n_out):\n input_tensor = Input(shape=(SIZE, SIZE, 3))\n #bn = BatchNormalization()(input_tensor)\n #conv = Conv2D(3,(3,3),padding='same',activation='relu')(bn)\n base_model = ResNet34(include_top=False,\n weights='imagenet',\n input_shape=(SIZE, SIZE, 3),input_tensor=input_tensor)\n\n x = base_model.output\n x = Conv2D(32, kernel_size=(1, 1), activation='relu')(x)\n x = Flatten()(x)\n x = Dropout(0.5)(x)\n x = Dense(1024, activation='relu')(x)\n x = Dropout(0.5)(x)\n output = Dense(n_out, activation='sigmoid')(x)\n model = Model(input_tensor, output)\n\n # transfer imagenet weights\n #res_img = ResNet34(include_top=False, weights='imagenet', input_shape=(SIZE, SIZE, 3))\n #offset = 2\n #for i, l in enumerate(base_model.layers[offset+1:]):\n # l.set_weights(res_img.layers[i + 1].get_weights())\n\n return model\n\n\n\n# create callbacks list\nfrom keras.callbacks import ModelCheckpoint, LearningRateScheduler, EarlyStopping, ReduceLROnPlateau, TensorBoard\nfrom keras_callbacks import F1Metric\nfrom keras_metrics import f1, f1_02\nfrom keras_losses import f1_loss\nepochs = [2,120]\nbatch_size = 16\n\n\n# split data into train, valid\n\nmskf = MultilabelStratifiedKFold(n_splits=5,shuffle=True,random_state=18)\n\ny = np.zeros((len(train_dataset_info), 28))\nfor i in range(len(train_dataset_info)):\n y[i][train_dataset_info[i]['labels']] = 1\nmskf.get_n_splits(train_dataset_info, y)\nkf = mskf.split(train_dataset_info, y)\nfold_id = 0\ntrain_indexes, valid_indexes = next(kf)\n\ntrain_generator = data_generator.create_train(train_dataset_info[train_indexes],\n batch_size, (SIZE, SIZE, 3), augument=False, heavy_augment_rares=False, oversample_factor=0)\nvalidation_generator = data_generator.create_train(train_dataset_info[valid_indexes],\n 1, (SIZE, SIZE, 3), augument=False,heavy_augment_rares=False, oversample_factor=0)\n\ncheckpoint = ModelCheckpoint(MODEL_PATH + 'model_{}.h5'.format(exp_suffix), monitor='val_f1_all', verbose=1,\n save_best_only=True, mode='max', save_weights_only=True)\ntensorboard = TensorBoard(MODEL_PATH + 'logs{}_'.format(fold_id) + '{}'.format(exp_suffix) + '/')\n# reduceLROnPlat = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=3,\n# verbose=1, mode='auto', epsilon=0.0001)\n# early = EarlyStopping(monitor=\"val_loss\",\n# mode=\"min\",\n# patience=6)\nf1_metric = F1Metric(validation_generator,len(valid_indexes)//1,1,28)\ncallbacks_list = [f1_metric, checkpoint, tensorboard]\n\n\n# warm up model\nmodel = create_model(\n input_shape=(SIZE, SIZE, 3),\n n_out=28)\n\nfor layer in model.layers:\n layer.trainable = False\n#model.layers[2].trainable = True\nmodel.layers[-1].trainable = True\nmodel.layers[-2].trainable = True\nmodel.layers[-3].trainable = True\nmodel.layers[-4].trainable = True\nmodel.layers[-5].trainable = True\nmodel.layers[-6].trainable = True\n\n\nmodel.compile(\n loss='binary_crossentropy',\n optimizer=Adam(1e-03),\n metrics=['acc',f1])\n# model.summary()\nmodel.fit_generator(\n train_generator,\n steps_per_epoch=np.ceil(float(len(train_indexes)) / float(batch_size)),\n validation_data=validation_generator,\n validation_steps=np.ceil(float(len(valid_indexes)) / float(batch_size)),\n epochs=epochs[0],\n verbose=1)\n\n\n# train all layers\nfor layer in model.layers:\n layer.trainable = True\nmodel.compile(loss='binary_crossentropy',\n optimizer=Adam(lr=1e-4),\n metrics=['acc',f1,f1_02])\nmodel.fit_generator(\n train_generator,\n steps_per_epoch=np.ceil(float(len(train_indexes)) / float(batch_size)),\n validation_data=validation_generator,\n validation_steps=np.ceil(float(len(valid_indexes)) / float(batch_size)),\n epochs=epochs[1],\n verbose=1,\n callbacks=callbacks_list)\n\nmodel.load_weights(MODEL_PATH + 'model_{}.h5'.format(exp_suffix))\npreds = np.zeros(shape=(len(valid_indexes),28))\npreds_05 = np.zeros(shape=(len(valid_indexes),28))\ny_true= np.zeros(shape=(len(valid_indexes),28))\nfor i, info in tqdm(enumerate(train_dataset_info[valid_indexes])):\n image = data_generator.load_image(info['path'], (SIZE, SIZE, 3))\n score_predict = model.predict(image[np.newaxis])[0]\n thresh = max(score_predict[np.argsort(score_predict, axis=-1)[-5]], 0.2)\n preds[i][score_predict >= thresh] = 1\n preds_05[i][score_predict >= 0.5] = 1\n y_true[i][info['labels']]=1\n\nfrom sklearn.metrics import f1_score\n\nindividual_f1_scores = np.zeros(28)\nfor i in range(28):\n individual_f1_scores[i] = f1_score(y_true[:,i],preds[:,i])\nindividual_f1_scores = pd.DataFrame(individual_f1_scores,columns=['f1'])\nindividual_f1_scores.to_csv(MODEL_PATH + f'summary_f1_{exp_suffix}.csv',index=False)\n\n\n\nf1_res = f1_score(y_true, preds, average='macro')\nf1_res_05 = f1_score(y_true, preds_05, average='macro')\nprint(f1_res)\n\nSUBMISSION = False\nif SUBMISSION:\n\n submit = pd.read_csv('Christof/assets/sample_submission.csv')\n predicted = []\n draw_predict = []\n\n for name in tqdm(submit['Id']):\n path = os.path.join('Christof/assets/test_rgb_256/', name)\n image = data_generator.load_image(path, (SIZE, SIZE, 3))\n score_predict = model.predict(image[np.newaxis])[0]\n draw_predict.append(score_predict)\n\n thresh = max(score_predict[np.argsort(score_predict, axis=-1)[-5]],0.2)\n label_predict = np.arange(28)[score_predict >= thresh]\n str_predict_label = ' '.join(str(l) for l in label_predict)\n predicted.append(str_predict_label)\n\n submit['Predicted'] = predicted\n #np.save('draw_predict_InceptionV3.npy', score_predict)\n submit.to_csv(MODEL_PATH + 'debug_submissionb_{}_{:.4}.csv'.format(exp_suffix,f1_res), index=False)\n\n",
"import os, sys\n#os.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\nos.environ[\"CUDA_VISIBLE_DEVICES\"]=\"0\"\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport skimage.io\nfrom skimage.transform import resize\nfrom imgaug import augmenters as iaa\nfrom tqdm import tqdm\nimport PIL\nfrom PIL import Image\nimport cv2\nfrom sklearn.utils import class_weight, shuffle\nfrom ml_stratifiers import MultilabelStratifiedKFold\nimport warnings\nwarnings.filterwarnings(\"ignore\")\nfrom classification_models.resnet.models import ResNet18\nimport albumentations as A\n\nMODEL_PATH = 'Christof/models/GAPNet/11_tests_on_clr/'\nexp_suffix = '_russ_schedule'\n\nSIZE = 512\n\n# Load dataset info\npath_to_train = 'Christof/assets/train_rgb_512/'\ndata = pd.read_csv('Christof/assets/train.csv')\n\nnormal_aug = A.Compose([A.Rotate((0,30),p=0.75),\n A.RandomRotate90(p=1),\n A.HorizontalFlip(p=0.5),\n A.RandomBrightness(0.05),\n A.RandomContrast(0.05),\n A.Normalize(mean=(0.08069, 0.05258, 0.05487), std=(0.1300, 0.0879, 0.1386),\n max_pixel_value=255.)\n ])\n\nval_aug = A.Compose([A.HorizontalFlip(p=0.5),\n A.Normalize(mean=(0.08069, 0.05258, 0.05487), std=(0.1300, 0.0879, 0.1386),\n max_pixel_value=255.)])\n\ntrain_dataset_info = []\nfor name, labels in zip(data['Id'], data['Target'].str.split(' ')):\n train_dataset_info.append({\n 'path': os.path.join(path_to_train, name),\n 'labels': np.array([int(label) for label in labels])})\ntrain_dataset_info = np.array(train_dataset_info)\n\ncounts = np.zeros(28)\nfor item in train_dataset_info:\n for l in item['labels']:\n counts[l] = counts[l] + 1\n\ncounts = counts / len(train_dataset_info)\nrare_classes = np.where(counts < 0.005)\n\n#rare_dataset_info = np.array([item for item in train_dataset_info if np.isin(item['labels'], rare_classes).any()])\n#train_dataset_info = rare_dataset_info\n\n\nfrom classification_models.resnet import preprocess_input\nclass data_generator:\n\n @staticmethod\n def create_train(dataset_info, batch_size, shape, augument=None, oversample_factor = 0):\n assert shape[2] == 3\n\n\n\n if oversample_factor > 0:\n\n rare_dataset_info = np.array([item for item in dataset_info if np.isin(item['labels'], rare_classes).any()])\n #rare_dataset_info = shuffle(rare_dataset_info)\n for i in range(oversample_factor):\n #dataset_info\n dataset_info = np.append(dataset_info,rare_dataset_info)\n while True:\n dataset_info = shuffle(dataset_info)\n for start in range(0, len(dataset_info), batch_size):\n end = min(start + batch_size, len(dataset_info))\n batch_images = []\n X_train_batch = dataset_info[start:end]\n batch_labels = np.zeros((len(X_train_batch), 28))\n for i in range(len(X_train_batch)):\n image = data_generator.load_image(X_train_batch[i]['path'], shape)\n #image = preprocess_input(image)\n #rare = np.isin(X_train_batch[i]['labels'], rare_classes).any()\n\n if augument:\n image = data_generator.augment(augument,image)\n\n batch_images.append(image)\n batch_labels[i][X_train_batch[i]['labels']] = 1\n yield np.array(batch_images, np.float32), batch_labels\n\n @staticmethod\n def load_image(path, shape):\n image = cv2.imread(path + '.png', cv2.IMREAD_UNCHANGED)\n return image\n\n @staticmethod\n def augment(aug,image):\n image_aug = aug(image=image)['image']\n return image_aug\n\n\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.models import Sequential, load_model\nfrom keras.layers import Activation, Dropout, Flatten, Dense, GlobalAveragePooling2D, Concatenate, Input, Conv2D\nfrom keras.applications.inception_v3 import InceptionV3\nfrom keras.callbacks import ModelCheckpoint\nfrom keras import metrics\nfrom keras.optimizers import Adam\nfrom keras import backend as K\nimport keras\nfrom keras.models import Model\n\n\nfrom keras.layers import Layer, InputSpec\nfrom keras import initializers\nfrom keras.constraints import Constraint\nimport keras.backend as K\n\n\nfrom keras.layers import Reshape, Permute, multiply\ndef squeeze_excite_block(input, ratio=16):\n init = input\n channel_axis = 1 if K.image_data_format() == \"channels_first\" else -1\n filters = init._keras_shape[channel_axis]\n se_shape = (1, 1, filters)\n\n se = GlobalAveragePooling2D()(init)\n se = Reshape(se_shape)(se)\n se = Dense(filters // ratio, activation='relu', kernel_initializer='he_normal', use_bias=False)(se)\n se = Dense(filters, activation='sigmoid', kernel_initializer='he_normal', use_bias=False)(se)\n\n if K.image_data_format() == 'channels_first':\n se = Permute((3, 1, 2))(se)\n\n x = multiply([init, se])\n return x\n\ndef encoder(backbone):\n\n c0 = backbone.get_layer('relu0').output\n\n c1 = backbone.get_layer('stage2_unit1_relu1').get_output_at(0) # 128\n c2 = backbone.get_layer('stage3_unit1_relu1').output # 63\n enc_out = backbone.get_layer('stage4_unit1_relu1').output # 32\n #c4 = backbone.get_layer('stage4_unit1_relu1').output # 16\n #enc_out = backbone.output # 8\n\n short_cuts = [c0,c1,c2]\n return enc_out, short_cuts\n\n\ndef create_model(input_shape, n_out):\n input_tensor = Input(shape=(SIZE, SIZE, 3))\n #bn = BatchNormalization()(input_tensor)\n #conv = Conv2D(3,(3,3),padding='same',activation='relu')(bn)\n base_model = ResNet18(include_top=False,\n weights='imagenet',\n input_shape=(SIZE, SIZE, 3),input_tensor=input_tensor)\n\n enc_out, short_cuts = encoder(base_model)\n x0 = GlobalAveragePooling2D()(squeeze_excite_block(enc_out))\n x1 = GlobalAveragePooling2D()(squeeze_excite_block(short_cuts[0]))\n x2 = GlobalAveragePooling2D()(squeeze_excite_block(short_cuts[1]))\n x3 = GlobalAveragePooling2D()(squeeze_excite_block(short_cuts[2]))\n x = Concatenate()([x0,x1,x2,x3])\n x = Dropout(0.3)(x)\n x = Dense(256, activation='relu')(x)\n x = Dropout(0.3)(x)\n x = Dense(256, activation='relu')(x)\n x = Dropout(0.3)(x)\n output = Dense(n_out, activation='sigmoid')(x)\n\n model = Model(input_tensor, output)\n\n # transfer imagenet weights\n #res_img = ResNet34(include_top=False, weights='imagenet', input_shape=(SIZE, SIZE, 3))\n #offset = 2\n #for i, l in enumerate(base_model.layers[offset+1:]):\n # l.set_weights(res_img.layers[i + 1].get_weights())\n\n return model\n\n\n\n# create callbacks list\nfrom keras.callbacks import ModelCheckpoint, LearningRateScheduler, EarlyStopping, ReduceLROnPlateau, TensorBoard\nfrom keras_callbacks import F1Metric\n#from keras_metrics import f1, f1_02\n#from keras_losses import f1_loss\nepochs = [40,150]\nbatch_size = 32\n\np = {}\np['lr'] = 1e-4 # Learning rate\np['step_size'] = 5\np['gamma'] = 0.5\np['wd'] = 1e-4 # Weight decay\np['momentum'] = 0.9 # Momentum\np['epoch_size'] = 15 # How many epochs to change learning rate\np['patience'] = 30 # epochs to wait for early stopping\n\n# split data into train, valid\n\nmskf = MultilabelStratifiedKFold(n_splits=5,shuffle=True,random_state=18)\n\ny = np.zeros((len(train_dataset_info), 28))\nfor i in range(len(train_dataset_info)):\n y[i][train_dataset_info[i]['labels']] = 1\nmskf.get_n_splits(train_dataset_info, y)\nkf = mskf.split(train_dataset_info, y)\nfold_id = 1\nfor f in range(fold_id):\n train_indexes, valid_indexes = next(kf)\n\ntrain_indexes, valid_indexes = next(kf)\ntrain_generator = data_generator.create_train(train_dataset_info[train_indexes],\n batch_size, (SIZE, SIZE, 3), augument=normal_aug, oversample_factor=0)\nvalidation_generator = data_generator.create_train(train_dataset_info[valid_indexes],\n batch_size, (SIZE, SIZE, 3), augument=val_aug, oversample_factor=0)\n#validation_generator2 = data_generator.create_train(train_dataset_info[valid_indexes],\n# batch_size, (SIZE, SIZE, 3), augument=val_aug, oversample_factor=0)\n\n#checkpoint = ModelCheckpoint(MODEL_PATH + 'model_f1all{}.h5'.format(exp_suffix), monitor='val_f1_all', verbose=1,\n# save_best_only=True, mode='max', save_weights_only=True)\ncheckpoint = ModelCheckpoint(MODEL_PATH + 'model_loss{}.h5'.format(exp_suffix), monitor='val_loss', verbose=1,\n save_best_only=True, mode='min', save_weights_only=True)\ntensorboard = TensorBoard(MODEL_PATH + 'logs{}'.format(fold_id) + '{}'.format(exp_suffix) + '/')\n# reduceLROnPlat = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=3,\n# verbose=1, mode='auto', epsilon=0.0001)\n# early = EarlyStopping(monitor=\"val_loss\",\n# mode=\"min\",\n# patience=6)\n#f1_metric = F1Metric(validation_generator2,2*len(valid_indexes)//batch_size,batch_size,28) #2 times val because of val_aug\n\n\nfrom torch.optim.lr_scheduler import StepLR\n\n\nbase_lr = 1e-4\ngamma = 0.5\nstep_size = 5\n\n#base_lr * gamma ** (t // step_size)\ndef step_schedule(t):\n lr = base_lr * gamma ** (t // step_size)\n return float(lr)\n\nlr_schedule = LearningRateScheduler(step_schedule,verbose=True)\n\ncallbacks_list = [lr_schedule, checkpoint, tensorboard]\n\n\n# warm up model\nmodel = create_model(\n input_shape=(SIZE, SIZE, 3),\n n_out=28)\n\n\n\nPOS_WEIGHT = 10 # multiplier for positive targets, needs to be tuned\nimport tensorflow as tf\nimport keras.backend.tensorflow_backend as tfb\n\n\ndef weighted_binary_crossentropy(target, output):\n \"\"\"\n Weighted binary crossentropy between an output tensor\n and a target tensor. POS_WEIGHT is used as a multiplier\n for the positive targets.\n\n Combination of the following functions:\n * keras.losses.binary_crossentropy\n * keras.backend.tensorflow_backend.binary_crossentropy\n * tf.nn.weighted_cross_entropy_with_logits\n \"\"\"\n # transform back to logits\n _epsilon = tfb._to_tensor(tfb.epsilon(), output.dtype.base_dtype)\n #_epsilon = K.epsilon()\n output = tf.clip_by_value(output, _epsilon, 1 - _epsilon)\n output = tf.log(output / (1 - output))\n # compute weighted loss\n loss = tf.nn.weighted_cross_entropy_with_logits(targets=target,\n logits=output,\n pos_weight=POS_WEIGHT)\n return tf.reduce_mean(loss, axis=-1)\n\nimport tensorflow as tf\nfrom tensorflow.python.framework import ops\nfrom functools import reduce\n\ndef binaryRound(x):\n \"\"\"\n Rounds a tensor whose values are in [0,1] to a tensor with values in {0, 1},\n using the straight through estimator for the gradient.\n \"\"\"\n g = tf.get_default_graph()\n\n with ops.name_scope(\"BinaryRound\") as name:\n with g.gradient_override_map({\"Round\": \"Identity\"}):\n return tf.round(x, name=name)\n\n # For Tensorflow v0.11 and below use:\n #with g.gradient_override_map({\"Floor\": \"Identity\"}):\n # return tf.round(x, name=name)\n\ndef brian_f1(y_true, y_pred):\n y_pred = binaryRound(y_pred)\n tp = K.sum(K.cast(y_true*y_pred, 'float'), axis=0)\n tn = K.sum(K.cast((1-y_true)*(1-y_pred), 'float'), axis=0)\n fp = K.sum(K.cast((1-y_true)*y_pred, 'float'), axis=0)\n fn = K.sum(K.cast(y_true*(1-y_pred), 'float'), axis=0)\n\n p = tp / (tp + fp + K.epsilon())\n r = tp / (tp + fn + K.epsilon())\n\n f1 = 2*p*r / (p+r+K.epsilon())\n f1 = tf.where(tf.is_nan(f1), tf.zeros_like(f1), f1)\n return K.mean(f1)\n\ndef brian_f1_loss(y_true, y_pred):\n return 1- brian_f1(y_true, y_pred)\n\n\ndef custom_loss(y_true, y_pred):\n\n return 4*weighted_binary_crossentropy(y_true,y_pred) - K.log(brian_f1(y_true,y_pred))\n\n# train all layers\nfrom keras.metrics import binary_accuracy\n\nfrom keras_optimizers import AdamW\n\nmodel.compile(loss=custom_loss,\n optimizer=AdamW(lr=1e-4,weight_decay=1e-4,batch_size=32,\n samples_per_epoch=np.ceil(float(len(train_indexes)) / float(batch_size)),\n epochs=epochs[0]),\n metrics=[binary_accuracy,brian_f1])\nmodel.fit_generator(\n train_generator,\n steps_per_epoch=np.ceil(float(len(train_indexes)) / float(batch_size)),\n validation_data=validation_generator,\n validation_steps=2*np.ceil(float(len(valid_indexes)) / float(batch_size)),\n epochs=epochs[0],\n verbose=1,\n callbacks=callbacks_list)\nmodel.load_weights(MODEL_PATH + 'model_loss{}.h5'.format(exp_suffix))\neps = 0.004\ndesired = {\n 0: 0.36239782,\n 1: 0.043841336,\n 2: 0.075268817,\n 3: 0.059322034,\n 4: 0.075268817,\n 5: 0.075268817,\n 6: 0.043841336,\n 7: 0.075268817,\n 8: eps,\n 9: eps,\n 10: eps,\n 11: 0.043841336,\n 12: 0.043841336,\n 13: 0.014198783,\n 14: 0.043841336,\n 15: eps,\n 16: 0.028806584,\n 17: 0.014198783,\n 18: 0.028806584,\n 19: 0.059322034,\n 20: eps,\n 21: 0.126126126,\n 22: 0.028806584,\n 23: 0.075268817,\n 24: eps,\n 25: 0.222493888,\n 26: 0.028806584,\n 27: eps\n}\n\nsubmit = pd.read_csv('Christof/assets/sample_submission.csv')\ntta = 8\n\n\n\ndraw_predict = np.zeros((len(submit['Id']), 28))\n\nfor i, name in tqdm(enumerate(submit['Id'])):\n path = os.path.join('Christof/assets/test_rgb_512/', name)\n image = data_generator.load_image(path, (SIZE, SIZE, 3))\n images = [data_generator.augment(normal_aug, image) for _ in range(tta)]\n tta_predicts = model.predict(np.array(images))\n draw_predict[i] = np.median(tta_predicts,axis = 0)\n\n\n\n# custom thresholds to match lb proportions\nthresholds = np.linspace(0.95, 0.05, 101)\npred = draw_predict.copy()\nfor j in tqdm(range(pred.shape[1])):\n for t in thresholds:\n pred[:, j] = (draw_predict[:, j] > t).astype(int)\n prop = np.mean(pred[:, j])\n if prop >= desired[j]: break\n print(j, '%3.2f' % t, '%6.4f' % desired[j], '%6.4f' % prop, j, )\n\nprint(pred[:5].astype(int))\n\nlabel_predict = [np.arange(28)[score_predict == 1] for score_predict in pred]\nstr_predict_label = [' '.join(str(l) for l in lp) for lp in label_predict]\n\nsubmit['Predicted'] = str_predict_label\n# np.save('draw_predict_InceptionV3.npy', score_predict)\nsubmit.to_csv(MODEL_PATH + 'submission_loss{}_lb_dist_adjusted_8tta.csv'.format(exp_suffix), index=False)\n\nfrom Christof.utils import f1_sub\n\nbest_sub = pd.read_csv('ens18.csv')\nf1_sub(best_sub,submit)",
"import os, sys\n#os.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\nos.environ[\"CUDA_VISIBLE_DEVICES\"]=\"0\"\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport skimage.io\nfrom skimage.transform import resize\nfrom imgaug import augmenters as iaa\nfrom tqdm import tqdm\nimport PIL\nfrom PIL import Image\nimport cv2\nfrom sklearn.utils import class_weight, shuffle\nfrom ml_stratifiers import MultilabelStratifiedKFold\nimport warnings\nwarnings.filterwarnings(\"ignore\")\nfrom classification_models.resnet.models import ResNet34\nimport albumentations as A\n\nMODEL_PATH = 'Christof/models/GAPNet/5_filtered/'\nexp_suffix = '_base'\n\nSIZE = 256\n\n# Load dataset info\npath_to_train = 'Christof/assets/train_rgb_256/'\ndata = pd.read_csv('Christof/assets/train.csv')\n\nnormal_aug = A.Compose([A.Rotate((0,30),p=0.75),\n A.RandomRotate90(p=1),\n A.HorizontalFlip(p=0.5),\n A.RandomBrightness(0.05),\n A.RandomContrast(0.05),\n A.Normalize(mean=(0.08069, 0.05258, 0.05487), std=(0.13704, 0.10145, 0.15313),\n max_pixel_value=255.)\n ])\n\nval_aug = A.Compose([A.HorizontalFlip(p=0.5),\n A.Normalize(mean=(0.08069, 0.05258, 0.05487), std=(0.13704, 0.10145, 0.15313),\n max_pixel_value=255.)])\n\ntrain_dataset_info = []\nfor name, labels in zip(data['Id'], data['Target'].str.split(' ')):\n train_dataset_info.append({\n 'path': os.path.join(path_to_train, name),\n 'labels': np.array([int(label) for label in labels])})\ntrain_dataset_info = np.array(train_dataset_info)\n\ncounts = np.zeros(28)\nfor item in train_dataset_info:\n for l in item['labels']:\n counts[l] = counts[l] + 1\n\ncounts = counts / len(train_dataset_info)\nrare_classes = np.where(counts < 0.005)\n\n#rare_dataset_info = np.array([item for item in train_dataset_info if np.isin(item['labels'], rare_classes).any()])\n#train_dataset_info = rare_dataset_info\n\n\nfrom classification_models.resnet import preprocess_input\nclass data_generator:\n\n @staticmethod\n def create_train(dataset_info, batch_size, shape, augument=None, oversample_factor = 0):\n assert shape[2] == 3\n\n\n\n if oversample_factor > 0:\n\n rare_dataset_info = np.array([item for item in dataset_info if np.isin(item['labels'], rare_classes).any()])\n #rare_dataset_info = shuffle(rare_dataset_info)\n for i in range(oversample_factor):\n #dataset_info\n dataset_info = np.append(dataset_info,rare_dataset_info)\n while True:\n dataset_info = shuffle(dataset_info)\n for start in range(0, len(dataset_info), batch_size):\n end = min(start + batch_size, len(dataset_info))\n batch_images = []\n X_train_batch = dataset_info[start:end]\n batch_labels = np.zeros((len(X_train_batch), 28))\n for i in range(len(X_train_batch)):\n image = data_generator.load_image(X_train_batch[i]['path'], shape)\n #image = preprocess_input(image)\n #rare = np.isin(X_train_batch[i]['labels'], rare_classes).any()\n\n if augument:\n image = data_generator.augment(augument,image)\n\n batch_images.append(image)\n batch_labels[i][X_train_batch[i]['labels']] = 1\n yield np.array(batch_images, np.float32), batch_labels\n\n @staticmethod\n def load_image(path, shape):\n image = cv2.imread(path + '.png', cv2.IMREAD_UNCHANGED)\n return image\n\n @staticmethod\n def augment(aug,image):\n image_aug = aug(image=image)['image']\n return image_aug\n\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.models import Sequential, load_model\nfrom keras.layers import Activation, Dropout, Flatten, Dense, GlobalAveragePooling2D, Concatenate, Input, Conv2D\nfrom keras.applications.inception_v3 import InceptionV3\nfrom keras.callbacks import ModelCheckpoint\nfrom keras import metrics\nfrom keras.optimizers import Adam\nfrom keras import backend as K\nimport keras\nfrom keras.models import Model\n\n\nfrom keras.layers import Layer, InputSpec\nfrom keras import initializers\nfrom keras.constraints import Constraint\nimport keras.backend as K\n\ndef encoder(backbone):\n\n c1 = backbone.get_layer('relu0').get_output_at(0) # 128\n c2 = backbone.get_layer('stage2_unit1_relu1').output # 63\n enc_out = backbone.get_layer('stage3_unit1_relu1').output # 32\n #c4 = backbone.get_layer('stage4_unit1_relu1').output # 16\n #enc_out = backbone.output # 8\n\n short_cuts = [c1,c2]\n return enc_out, short_cuts\n\n\ndef create_model(input_shape, n_out):\n input_tensor = Input(shape=(SIZE, SIZE, 3))\n #bn = BatchNormalization()(input_tensor)\n #conv = Conv2D(3,(3,3),padding='same',activation='relu')(bn)\n base_model = ResNet34(include_top=False,\n weights='imagenet',\n input_shape=(SIZE, SIZE, 3),input_tensor=input_tensor)\n\n enc_out, short_cuts = encoder(base_model)\n x0 = GlobalAveragePooling2D()(enc_out)\n x1 = GlobalAveragePooling2D()(short_cuts[0])\n x2 = GlobalAveragePooling2D()(short_cuts[1])\n x = Concatenate()([x0,x1,x2])\n x = Dropout(0.3)(x)\n x = Dense(256, activation='relu')(x)\n x = Dropout(0.3)(x)\n x = Dense(256, activation='relu')(x)\n x = Dropout(0.3)(x)\n output = Dense(n_out, activation='sigmoid')(x)\n\n model = Model(input_tensor, output)\n\n # transfer imagenet weights\n #res_img = ResNet34(include_top=False, weights='imagenet', input_shape=(SIZE, SIZE, 3))\n #offset = 2\n #for i, l in enumerate(base_model.layers[offset+1:]):\n # l.set_weights(res_img.layers[i + 1].get_weights())\n\n return model\n\n\n\n# create callbacks list\nfrom keras.callbacks import ModelCheckpoint, LearningRateScheduler, EarlyStopping, ReduceLROnPlateau, TensorBoard\nfrom keras_callbacks import F1Metric\n#from keras_metrics import f1, f1_02\n#from keras_losses import f1_loss\nepochs = [10,150]\nbatch_size = 32\n\n\n# split data into train, valid\n\nmskf = MultilabelStratifiedKFold(n_splits=5,shuffle=True,random_state=18)\n\ny = np.zeros((len(train_dataset_info), 28))\nfor i in range(len(train_dataset_info)):\n y[i][train_dataset_info[i]['labels']] = 1\nmskf.get_n_splits(train_dataset_info, y)\nkf = mskf.split(train_dataset_info, y)\nfold_id = 0\ntrain_indexes, valid_indexes = next(kf)\n\n\ntrain_ds = train_dataset_info[train_indexes]\n# print(train_ds.shape)\n# train_ds = np.array([item for item in train_ds if not np.array_equal(item['labels'],np.array([0]))])\n# print(train_ds.shape)\n# train_ds = np.array([item for item in train_ds if not np.array_equal(item['labels'],np.array([25]))])\n# print(train_ds.shape)\n# train_ds = np.array([item for item in train_ds if not np.array_equal(item['labels'],np.array([0,25]))])\n# print(train_ds.shape)\n# train_ds = np.array([item for item in train_ds if not np.array_equal(item['labels'],np.array([25,0]))])\n# print(train_ds.shape)\n\ntrain_generator = data_generator.create_train(train_ds,\n batch_size, (SIZE, SIZE, 3), augument=normal_aug, oversample_factor=0)\nvalidation_generator = data_generator.create_train(train_dataset_info[valid_indexes],\n batch_size, (SIZE, SIZE, 3), augument=val_aug, oversample_factor=0)\n\ncheckpoint = ModelCheckpoint(MODEL_PATH + 'model_loss{}.h5'.format(exp_suffix), monitor='val_loss', verbose=1,\n save_best_only=True, mode='min', save_weights_only=True)\ntensorboard = TensorBoard(MODEL_PATH + 'logs{}'.format(fold_id) + '{}'.format(exp_suffix) + '/')\n# reduceLROnPlat = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=3,\n# verbose=1, mode='auto', epsilon=0.0001)\n# early = EarlyStopping(monitor=\"val_loss\",\n# mode=\"min\",\n# patience=6)\n\nnb_epochs = epochs[1]\nnb_cycles = 15\ninit_lr = 0.001\ndef _cosine_anneal_schedule(t):\n\n cos_inner = np.pi * (t % (nb_epochs // nb_cycles))\n cos_inner /= nb_epochs// nb_cycles\n cos_out = np.cos(cos_inner) + 1\n return float(init_lr / 2 * cos_out)\n\nlr_schedule = LearningRateScheduler(_cosine_anneal_schedule,verbose=True)\n\n\n\n\ncallbacks_list = [lr_schedule, checkpoint, tensorboard]\n\n\n# warm up model\nmodel = create_model(\n input_shape=(SIZE, SIZE, 3),\n n_out=28)\n\n\n\nPOS_WEIGHT = 10 # multiplier for positive targets, needs to be tuned\nimport tensorflow as tf\nimport keras.backend.tensorflow_backend as tfb\n\n\ndef weighted_binary_crossentropy(target, output):\n \"\"\"\n Weighted binary crossentropy between an output tensor\n and a target tensor. POS_WEIGHT is used as a multiplier\n for the positive targets.\n\n Combination of the following functions:\n * keras.losses.binary_crossentropy\n * keras.backend.tensorflow_backend.binary_crossentropy\n * tf.nn.weighted_cross_entropy_with_logits\n \"\"\"\n # transform back to logits\n _epsilon = tfb._to_tensor(tfb.epsilon(), output.dtype.base_dtype)\n #_epsilon = K.epsilon()\n output = tf.clip_by_value(output, _epsilon, 1 - _epsilon)\n output = tf.log(output / (1 - output))\n # compute weighted loss\n loss = tf.nn.weighted_cross_entropy_with_logits(targets=target,\n logits=output,\n pos_weight=POS_WEIGHT)\n return tf.reduce_mean(loss, axis=-1)\n\nimport tensorflow as tf\nfrom tensorflow.python.framework import ops\nfrom functools import reduce\n\ndef binaryRound(x):\n \"\"\"\n Rounds a tensor whose values are in [0,1] to a tensor with values in {0, 1},\n using the straight through estimator for the gradient.\n \"\"\"\n g = tf.get_default_graph()\n\n with ops.name_scope(\"BinaryRound\") as name:\n with g.gradient_override_map({\"Round\": \"Identity\"}):\n return tf.round(x, name=name)\n\n # For Tensorflow v0.11 and below use:\n #with g.gradient_override_map({\"Floor\": \"Identity\"}):\n # return tf.round(x, name=name)\n\ndef brian_f1(y_true, y_pred):\n y_pred = binaryRound(y_pred)\n tp = K.sum(K.cast(y_true*y_pred, 'float'), axis=0)\n tn = K.sum(K.cast((1-y_true)*(1-y_pred), 'float'), axis=0)\n fp = K.sum(K.cast((1-y_true)*y_pred, 'float'), axis=0)\n fn = K.sum(K.cast(y_true*(1-y_pred), 'float'), axis=0)\n\n p = tp / (tp + fp + K.epsilon())\n r = tp / (tp + fn + K.epsilon())\n\n f1 = 2*p*r / (p+r+K.epsilon())\n f1 = tf.where(tf.is_nan(f1), tf.zeros_like(f1), f1)\n return K.mean(f1)\n\ndef brian_f1_loss(y_true, y_pred):\n return 1- brian_f1(y_true, y_pred)\n\n\ndef custom_loss(y_true, y_pred):\n\n return 4*weighted_binary_crossentropy(y_true,y_pred) - K.log(brian_f1(y_true,y_pred))\n\n# train all layers\nfrom keras.metrics import binary_accuracy\n\nmodel.compile(loss=custom_loss,\n optimizer=Adam(lr=5e-4),\n metrics=[binary_accuracy,brian_f1])\nmodel.fit_generator(\n train_generator,\n steps_per_epoch=np.ceil(float(len(train_ds)) / float(batch_size)),\n validation_data=validation_generator,\n validation_steps=2*np.ceil(float(len(valid_indexes)) / float(batch_size)),\n epochs=epochs[0],\n verbose=1,\n callbacks=callbacks_list)\n\n\n\ny = np.zeros((len(train_ds), 28))\npreds = np.zeros((len(train_ds), 28))\n\nfor i, item in tqdm(enumerate(train_ds)):\n y[i][train_ds[i]['labels']] = 1\n image = data_generator.load_image(train_ds[i]['path'], '')\n images = [data_generator.augment(normal_aug, image) for _ in range(2)]\n tta_predicts = model.predict(np.array(images))\n preds[i] = np.median(tta_predicts,axis = 0)\n\nconf = 2*np.abs(0.5-preds)\nimg_conf = np.min(conf,axis = 1)\n\nimg_conf.mean()\nimg_conf.max()\n\nlen(img_conf[img_conf>0.9])\n\nmodel.load_weights(MODEL_PATH + 'model_loss{}.h5'.format(exp_suffix))\neps = 0.004\ndesired = {\n 0: 0.36239782,\n 1: 0.043841336,\n 2: 0.075268817,\n 3: 0.059322034,\n 4: 0.075268817,\n 5: 0.075268817,\n 6: 0.043841336,\n 7: 0.075268817,\n 8: eps,\n 9: eps,\n 10: eps,\n 11: 0.043841336,\n 12: 0.043841336,\n 13: 0.014198783,\n 14: 0.043841336,\n 15: eps,\n 16: 0.028806584,\n 17: 0.014198783,\n 18: 0.028806584,\n 19: 0.059322034,\n 20: eps,\n 21: 0.126126126,\n 22: 0.028806584,\n 23: 0.075268817,\n 24: eps,\n 25: 0.222493888,\n 26: 0.028806584,\n 27: eps\n}\n\nsubmit = pd.read_csv('Christof/assets/sample_submission.csv')\ntta = 8\n\n\n\ndraw_predict = np.zeros((len(submit['Id']), 28))\n\nfor i, name in tqdm(enumerate(submit['Id'])):\n path = os.path.join('Christof/assets/test_rgb_256/', name)\n image = data_generator.load_image(path, (SIZE, SIZE, 3))\n images = [data_generator.augment(normal_aug, image) for _ in range(tta)]\n tta_predicts = model.predict(np.array(images))\n draw_predict[i] = np.median(tta_predicts,axis = 0)\n\n\n\n# custom thresholds to match lb proportions\nthresholds = np.linspace(0.95, 0.05, 101)\npred = draw_predict.copy()\nfor j in tqdm(range(pred.shape[1])):\n for t in thresholds:\n pred[:, j] = (draw_predict[:, j] > t).astype(int)\n prop = np.mean(pred[:, j])\n if prop >= desired[j]: break\n print(j, '%3.2f' % t, '%6.4f' % desired[j], '%6.4f' % prop, j, )\n\nprint(pred[:5].astype(int))\n\nlabel_predict = [np.arange(28)[score_predict == 1] for score_predict in pred]\nstr_predict_label = [' '.join(str(l) for l in lp) for lp in label_predict]\n\nsubmit['Predicted'] = str_predict_label\n# np.save('draw_predict_InceptionV3.npy', score_predict)\nsubmit.to_csv(MODEL_PATH + 'submission_loss{}_lb_dist_adjusted_8tta.csv'.format(exp_suffix), index=False)\n\nfrom Christof.utils import f1_sub\n\nbest_sub = pd.read_csv('ens18.csv')\nf1_sub(best_sub,submit)"
] | [
[
"pandas.read_csv",
"numpy.unique",
"numpy.asarray",
"numpy.delete",
"numpy.column_stack",
"numpy.array",
"numpy.where"
],
[
"pandas.concat",
"pandas.read_csv",
"numpy.log",
"numpy.random.seed",
"numpy.min",
"numpy.linspace",
"numpy.int32",
"numpy.save",
"pandas.DataFrame",
"sklearn.model_selection.StratifiedKFold",
"numpy.max",
"numpy.argmax",
"numpy.mean",
"torch.stack",
"sklearn.metrics.f1_score",
"numpy.zeros",
"matplotlib.pyplot.style.use"
],
[
"matplotlib.pyplot.legend",
"pandas.read_csv",
"numpy.unique",
"numpy.asarray",
"numpy.arange",
"matplotlib.pyplot.yscale",
"numpy.delete",
"matplotlib.pyplot.grid",
"numpy.column_stack",
"matplotlib.pyplot.bar",
"numpy.array",
"matplotlib.pyplot.show",
"numpy.where"
],
[
"tensorflow.is_nan",
"numpy.linspace",
"numpy.argmin",
"numpy.mean",
"tensorflow.get_default_graph",
"numpy.where",
"pandas.read_csv",
"numpy.arange",
"numpy.save",
"numpy.zeros",
"numpy.random.choice",
"numpy.median",
"tensorflow.zeros_like",
"numpy.array",
"tensorflow.round",
"numpy.sum",
"tensorflow.clip_by_value",
"tensorflow.reduce_mean",
"numpy.cos",
"tensorflow.python.framework.ops.name_scope",
"tensorflow.log",
"tensorflow.nn.weighted_cross_entropy_with_logits"
],
[
"numpy.linspace",
"pandas.DataFrame",
"numpy.max",
"numpy.mean",
"sklearn.metrics.f1_score",
"numpy.where",
"pandas.read_csv",
"numpy.arange",
"numpy.zeros",
"numpy.isin",
"numpy.append",
"numpy.argsort",
"numpy.array",
"tensorflow.clip_by_value",
"tensorflow.reduce_mean",
"sklearn.utils.shuffle",
"numpy.ones",
"tensorflow.log",
"tensorflow.nn.weighted_cross_entropy_with_logits"
],
[
"pandas.merge",
"numpy.where"
],
[
"sklearn.linear_model.Ridge",
"sklearn.preprocessing.StandardScaler",
"sklearn.metrics.precision_recall_curve"
],
[
"tensorflow.is_nan",
"numpy.linspace",
"numpy.argmin",
"numpy.mean",
"tensorflow.get_default_graph",
"numpy.where",
"pandas.read_csv",
"numpy.arange",
"numpy.save",
"numpy.zeros",
"numpy.random.choice",
"numpy.median",
"tensorflow.zeros_like",
"numpy.array",
"tensorflow.round",
"numpy.sum",
"tensorflow.clip_by_value",
"tensorflow.reduce_mean",
"numpy.cos",
"tensorflow.python.framework.ops.name_scope",
"tensorflow.log",
"tensorflow.nn.weighted_cross_entropy_with_logits"
],
[
"tensorflow.is_nan",
"numpy.linspace",
"numpy.argmin",
"numpy.mean",
"tensorflow.get_default_graph",
"numpy.where",
"pandas.read_csv",
"numpy.arange",
"numpy.save",
"numpy.zeros",
"numpy.random.choice",
"numpy.median",
"tensorflow.zeros_like",
"numpy.array",
"tensorflow.round",
"numpy.sum",
"tensorflow.clip_by_value",
"tensorflow.reduce_mean",
"numpy.cos",
"tensorflow.python.framework.ops.name_scope",
"tensorflow.log",
"tensorflow.nn.weighted_cross_entropy_with_logits"
],
[
"pandas.merge",
"numpy.where"
],
[
"pandas.read_csv",
"sklearn.utils.shuffle",
"numpy.arange",
"pandas.DataFrame",
"numpy.append",
"numpy.argsort",
"sklearn.metrics.f1_score",
"numpy.array",
"numpy.zeros",
"numpy.where",
"numpy.isin"
],
[
"tensorflow.is_nan",
"numpy.linspace",
"numpy.mean",
"tensorflow.get_default_graph",
"numpy.where",
"pandas.read_csv",
"numpy.arange",
"numpy.zeros",
"numpy.isin",
"numpy.median",
"tensorflow.zeros_like",
"numpy.append",
"tensorflow.round",
"numpy.array",
"tensorflow.clip_by_value",
"tensorflow.reduce_mean",
"sklearn.utils.shuffle",
"tensorflow.python.framework.ops.name_scope",
"tensorflow.log",
"tensorflow.nn.weighted_cross_entropy_with_logits"
],
[
"tensorflow.is_nan",
"numpy.linspace",
"numpy.mean",
"tensorflow.get_default_graph",
"numpy.where",
"pandas.read_csv",
"numpy.arange",
"numpy.zeros",
"numpy.isin",
"numpy.min",
"numpy.median",
"tensorflow.zeros_like",
"numpy.append",
"numpy.array",
"tensorflow.round",
"tensorflow.clip_by_value",
"numpy.abs",
"tensorflow.reduce_mean",
"sklearn.utils.shuffle",
"numpy.cos",
"tensorflow.python.framework.ops.name_scope",
"tensorflow.log",
"tensorflow.nn.weighted_cross_entropy_with_logits"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
MustafaAbbas110/FinalProject | [
"30d371f06a8a1875285cfd4a8940ca3610ec1274",
"901ac307b68486d8289105c159ca702318bea5b0"
] | [
"Lib/site-packages/sklearn/metrics/tests/test_pairwise.py",
"Lib/site-packages/sklearn/neighbors/tests/test_neighbors.py"
] | [
"from types import GeneratorType\n\nimport numpy as np\nfrom numpy import linalg\n\nfrom scipy.sparse import dok_matrix, csr_matrix, issparse\nfrom scipy.spatial.distance import cosine, cityblock, minkowski, wminkowski\nfrom scipy.spatial.distance import cdist, pdist, squareform\n\nimport pytest\n\nfrom sklearn import config_context\n\nfrom sklearn.utils._testing import assert_array_almost_equal\nfrom sklearn.utils._testing import assert_allclose\nfrom sklearn.utils._testing import assert_almost_equal\nfrom sklearn.utils._testing import assert_array_equal\nfrom sklearn.utils._testing import ignore_warnings\n\nfrom sklearn.metrics.pairwise import euclidean_distances\nfrom sklearn.metrics.pairwise import nan_euclidean_distances\nfrom sklearn.metrics.pairwise import manhattan_distances\nfrom sklearn.metrics.pairwise import haversine_distances\nfrom sklearn.metrics.pairwise import linear_kernel\nfrom sklearn.metrics.pairwise import chi2_kernel, additive_chi2_kernel\nfrom sklearn.metrics.pairwise import polynomial_kernel\nfrom sklearn.metrics.pairwise import rbf_kernel\nfrom sklearn.metrics.pairwise import laplacian_kernel\nfrom sklearn.metrics.pairwise import sigmoid_kernel\nfrom sklearn.metrics.pairwise import cosine_similarity\nfrom sklearn.metrics.pairwise import cosine_distances\nfrom sklearn.metrics.pairwise import pairwise_distances\nfrom sklearn.metrics.pairwise import pairwise_distances_chunked\nfrom sklearn.metrics.pairwise import pairwise_distances_argmin_min\nfrom sklearn.metrics.pairwise import pairwise_distances_argmin\nfrom sklearn.metrics.pairwise import pairwise_kernels\nfrom sklearn.metrics.pairwise import PAIRWISE_KERNEL_FUNCTIONS\nfrom sklearn.metrics.pairwise import PAIRWISE_DISTANCE_FUNCTIONS\nfrom sklearn.metrics.pairwise import PAIRWISE_BOOLEAN_FUNCTIONS\nfrom sklearn.metrics.pairwise import PAIRED_DISTANCES\nfrom sklearn.metrics.pairwise import check_pairwise_arrays\nfrom sklearn.metrics.pairwise import check_paired_arrays\nfrom sklearn.metrics.pairwise import paired_distances\nfrom sklearn.metrics.pairwise import paired_euclidean_distances\nfrom sklearn.metrics.pairwise import paired_manhattan_distances\nfrom sklearn.metrics.pairwise import _euclidean_distances_upcast\nfrom sklearn.preprocessing import normalize\nfrom sklearn.exceptions import DataConversionWarning\n\n\ndef test_pairwise_distances():\n # Test the pairwise_distance helper function.\n rng = np.random.RandomState(0)\n\n # Euclidean distance should be equivalent to calling the function.\n X = rng.random_sample((5, 4))\n S = pairwise_distances(X, metric=\"euclidean\")\n S2 = euclidean_distances(X)\n assert_array_almost_equal(S, S2)\n\n # Euclidean distance, with Y != X.\n Y = rng.random_sample((2, 4))\n S = pairwise_distances(X, Y, metric=\"euclidean\")\n S2 = euclidean_distances(X, Y)\n assert_array_almost_equal(S, S2)\n # Check to ensure NaNs work with pairwise_distances.\n X_masked = rng.random_sample((5, 4))\n Y_masked = rng.random_sample((2, 4))\n X_masked[0, 0] = np.nan\n Y_masked[0, 0] = np.nan\n S_masked = pairwise_distances(X_masked, Y_masked, metric=\"nan_euclidean\")\n S2_masked = nan_euclidean_distances(X_masked, Y_masked)\n assert_array_almost_equal(S_masked, S2_masked)\n # Test with tuples as X and Y\n X_tuples = tuple([tuple([v for v in row]) for row in X])\n Y_tuples = tuple([tuple([v for v in row]) for row in Y])\n S2 = pairwise_distances(X_tuples, Y_tuples, metric=\"euclidean\")\n assert_array_almost_equal(S, S2)\n\n # Test haversine distance\n # The data should be valid latitude and longitude\n X = rng.random_sample((5, 2))\n X[:, 0] = (X[:, 0] - 0.5) * 2 * np.pi/2\n X[:, 1] = (X[:, 1] - 0.5) * 2 * np.pi\n S = pairwise_distances(X, metric=\"haversine\")\n S2 = haversine_distances(X)\n assert_array_almost_equal(S, S2)\n\n # Test haversine distance, with Y != X\n Y = rng.random_sample((2, 2))\n Y[:, 0] = (Y[:, 0] - 0.5)*2*np.pi/2\n Y[:, 1] = (Y[:, 1] - 0.5)*2*np.pi\n S = pairwise_distances(X, Y, metric=\"haversine\")\n S2 = haversine_distances(X, Y)\n assert_array_almost_equal(S, S2)\n\n # \"cityblock\" uses scikit-learn metric, cityblock (function) is\n # scipy.spatial.\n S = pairwise_distances(X, metric=\"cityblock\")\n S2 = pairwise_distances(X, metric=cityblock)\n assert S.shape[0] == S.shape[1]\n assert S.shape[0] == X.shape[0]\n assert_array_almost_equal(S, S2)\n\n # The manhattan metric should be equivalent to cityblock.\n S = pairwise_distances(X, Y, metric=\"manhattan\")\n S2 = pairwise_distances(X, Y, metric=cityblock)\n assert S.shape[0] == X.shape[0]\n assert S.shape[1] == Y.shape[0]\n assert_array_almost_equal(S, S2)\n\n # Test cosine as a string metric versus cosine callable\n # The string \"cosine\" uses sklearn.metric,\n # while the function cosine is scipy.spatial\n S = pairwise_distances(X, Y, metric=\"cosine\")\n S2 = pairwise_distances(X, Y, metric=cosine)\n assert S.shape[0] == X.shape[0]\n assert S.shape[1] == Y.shape[0]\n assert_array_almost_equal(S, S2)\n\n # Test with sparse X and Y,\n # currently only supported for Euclidean, L1 and cosine.\n X_sparse = csr_matrix(X)\n Y_sparse = csr_matrix(Y)\n S = pairwise_distances(X_sparse, Y_sparse, metric=\"euclidean\")\n S2 = euclidean_distances(X_sparse, Y_sparse)\n assert_array_almost_equal(S, S2)\n S = pairwise_distances(X_sparse, Y_sparse, metric=\"cosine\")\n S2 = cosine_distances(X_sparse, Y_sparse)\n assert_array_almost_equal(S, S2)\n S = pairwise_distances(X_sparse, Y_sparse.tocsc(), metric=\"manhattan\")\n S2 = manhattan_distances(X_sparse.tobsr(), Y_sparse.tocoo())\n assert_array_almost_equal(S, S2)\n S2 = manhattan_distances(X, Y)\n assert_array_almost_equal(S, S2)\n\n # Test with scipy.spatial.distance metric, with a kwd\n kwds = {\"p\": 2.0}\n S = pairwise_distances(X, Y, metric=\"minkowski\", **kwds)\n S2 = pairwise_distances(X, Y, metric=minkowski, **kwds)\n assert_array_almost_equal(S, S2)\n\n # same with Y = None\n kwds = {\"p\": 2.0}\n S = pairwise_distances(X, metric=\"minkowski\", **kwds)\n S2 = pairwise_distances(X, metric=minkowski, **kwds)\n assert_array_almost_equal(S, S2)\n\n # Test that scipy distance metrics throw an error if sparse matrix given\n with pytest.raises(TypeError):\n pairwise_distances(X_sparse, metric=\"minkowski\")\n with pytest.raises(TypeError):\n pairwise_distances(X, Y_sparse, metric=\"minkowski\")\n\n # Test that a value error is raised if the metric is unknown\n with pytest.raises(ValueError):\n pairwise_distances(X, Y, metric=\"blah\")\n\n\[email protected]('metric', PAIRWISE_BOOLEAN_FUNCTIONS)\ndef test_pairwise_boolean_distance(metric):\n # test that we convert to boolean arrays for boolean distances\n rng = np.random.RandomState(0)\n X = rng.randn(5, 4)\n Y = X.copy()\n Y[0, 0] = 1 - Y[0, 0]\n\n # ignore conversion to boolean in pairwise_distances\n with ignore_warnings(category=DataConversionWarning):\n for Z in [Y, None]:\n res = pairwise_distances(X, Z, metric=metric)\n res[np.isnan(res)] = 0\n assert np.sum(res != 0) == 0\n\n # non-boolean arrays are converted to boolean for boolean\n # distance metrics with a data conversion warning\n msg = \"Data was converted to boolean for metric %s\" % metric\n with pytest.warns(DataConversionWarning, match=msg):\n pairwise_distances(X, metric=metric)\n\n # Check that the warning is raised if X is boolean by Y is not boolean:\n with pytest.warns(DataConversionWarning, match=msg):\n pairwise_distances(X.astype(bool), Y=Y, metric=metric)\n\n # Check that no warning is raised if X is already boolean and Y is None:\n with pytest.warns(None) as records:\n pairwise_distances(X.astype(bool), metric=metric)\n assert len(records) == 0\n\n\ndef test_no_data_conversion_warning():\n # No warnings issued if metric is not a boolean distance function\n rng = np.random.RandomState(0)\n X = rng.randn(5, 4)\n with pytest.warns(None) as records:\n pairwise_distances(X, metric=\"minkowski\")\n assert len(records) == 0\n\n\[email protected]('func', [pairwise_distances, pairwise_kernels])\ndef test_pairwise_precomputed(func):\n # Test correct shape\n with pytest.raises(ValueError, match='.* shape .*'):\n func(np.zeros((5, 3)), metric='precomputed')\n # with two args\n with pytest.raises(ValueError, match='.* shape .*'):\n func(np.zeros((5, 3)), np.zeros((4, 4)), metric='precomputed')\n # even if shape[1] agrees (although thus second arg is spurious)\n with pytest.raises(ValueError, match='.* shape .*'):\n func(np.zeros((5, 3)), np.zeros((4, 3)), metric='precomputed')\n\n # Test not copied (if appropriate dtype)\n S = np.zeros((5, 5))\n S2 = func(S, metric=\"precomputed\")\n assert S is S2\n # with two args\n S = np.zeros((5, 3))\n S2 = func(S, np.zeros((3, 3)), metric=\"precomputed\")\n assert S is S2\n\n # Test always returns float dtype\n S = func(np.array([[1]], dtype='int'), metric='precomputed')\n assert 'f' == S.dtype.kind\n\n # Test converts list to array-like\n S = func([[1.]], metric='precomputed')\n assert isinstance(S, np.ndarray)\n\n\ndef test_pairwise_precomputed_non_negative():\n # Test non-negative values\n with pytest.raises(ValueError, match='.* non-negative values.*'):\n pairwise_distances(np.full((5, 5), -1), metric='precomputed')\n\n\n_wminkowski_kwds = {'w': np.arange(1, 5).astype('double', copy=False), 'p': 1}\n\n\ndef callable_rbf_kernel(x, y, **kwds):\n # Callable version of pairwise.rbf_kernel.\n K = rbf_kernel(np.atleast_2d(x), np.atleast_2d(y), **kwds)\n return K\n\n\[email protected](\n 'func, metric, kwds',\n [(pairwise_distances, 'euclidean', {}),\n (pairwise_distances, wminkowski, _wminkowski_kwds),\n (pairwise_distances, 'wminkowski', _wminkowski_kwds),\n (pairwise_kernels, 'polynomial', {'degree': 1}),\n (pairwise_kernels, callable_rbf_kernel, {'gamma': .1})])\[email protected]('array_constr', [np.array, csr_matrix])\[email protected]('dtype', [np.float64, int])\ndef test_pairwise_parallel(func, metric, kwds, array_constr, dtype):\n rng = np.random.RandomState(0)\n X = array_constr(5 * rng.random_sample((5, 4)), dtype=dtype)\n Y = array_constr(5 * rng.random_sample((3, 4)), dtype=dtype)\n\n try:\n S = func(X, metric=metric, n_jobs=1, **kwds)\n except (TypeError, ValueError) as exc:\n # Not all metrics support sparse input\n # ValueError may be triggered by bad callable\n if array_constr is csr_matrix:\n with pytest.raises(type(exc)):\n func(X, metric=metric, n_jobs=2, **kwds)\n return\n else:\n raise\n S2 = func(X, metric=metric, n_jobs=2, **kwds)\n assert_allclose(S, S2)\n\n S = func(X, Y, metric=metric, n_jobs=1, **kwds)\n S2 = func(X, Y, metric=metric, n_jobs=2, **kwds)\n assert_allclose(S, S2)\n\n\ndef test_pairwise_callable_nonstrict_metric():\n # paired_distances should allow callable metric where metric(x, x) != 0\n # Knowing that the callable is a strict metric would allow the diagonal to\n # be left uncalculated and set to 0.\n assert pairwise_distances([[1.]], metric=lambda x, y: 5)[0, 0] == 5\n\n\n# Test with all metrics that should be in PAIRWISE_KERNEL_FUNCTIONS.\[email protected](\n 'metric',\n [\"rbf\", \"laplacian\", \"sigmoid\", \"polynomial\", \"linear\",\n \"chi2\", \"additive_chi2\"])\ndef test_pairwise_kernels(metric):\n # Test the pairwise_kernels helper function.\n\n rng = np.random.RandomState(0)\n X = rng.random_sample((5, 4))\n Y = rng.random_sample((2, 4))\n function = PAIRWISE_KERNEL_FUNCTIONS[metric]\n # Test with Y=None\n K1 = pairwise_kernels(X, metric=metric)\n K2 = function(X)\n assert_array_almost_equal(K1, K2)\n # Test with Y=Y\n K1 = pairwise_kernels(X, Y=Y, metric=metric)\n K2 = function(X, Y=Y)\n assert_array_almost_equal(K1, K2)\n # Test with tuples as X and Y\n X_tuples = tuple([tuple([v for v in row]) for row in X])\n Y_tuples = tuple([tuple([v for v in row]) for row in Y])\n K2 = pairwise_kernels(X_tuples, Y_tuples, metric=metric)\n assert_array_almost_equal(K1, K2)\n\n # Test with sparse X and Y\n X_sparse = csr_matrix(X)\n Y_sparse = csr_matrix(Y)\n if metric in [\"chi2\", \"additive_chi2\"]:\n # these don't support sparse matrices yet\n with pytest.raises(ValueError):\n pairwise_kernels(X_sparse, Y=Y_sparse,\n metric=metric)\n return\n K1 = pairwise_kernels(X_sparse, Y=Y_sparse, metric=metric)\n assert_array_almost_equal(K1, K2)\n\n\ndef test_pairwise_kernels_callable():\n # Test the pairwise_kernels helper function\n # with a callable function, with given keywords.\n rng = np.random.RandomState(0)\n X = rng.random_sample((5, 4))\n Y = rng.random_sample((2, 4))\n\n metric = callable_rbf_kernel\n kwds = {'gamma': 0.1}\n K1 = pairwise_kernels(X, Y=Y, metric=metric, **kwds)\n K2 = rbf_kernel(X, Y=Y, **kwds)\n assert_array_almost_equal(K1, K2)\n\n # callable function, X=Y\n K1 = pairwise_kernels(X, Y=X, metric=metric, **kwds)\n K2 = rbf_kernel(X, Y=X, **kwds)\n assert_array_almost_equal(K1, K2)\n\n\ndef test_pairwise_kernels_filter_param():\n rng = np.random.RandomState(0)\n X = rng.random_sample((5, 4))\n Y = rng.random_sample((2, 4))\n K = rbf_kernel(X, Y, gamma=0.1)\n params = {\"gamma\": 0.1, \"blabla\": \":)\"}\n K2 = pairwise_kernels(X, Y, metric=\"rbf\", filter_params=True, **params)\n assert_array_almost_equal(K, K2)\n\n with pytest.raises(TypeError):\n pairwise_kernels(X, Y, \"rbf\", **params)\n\n\[email protected]('metric, func', PAIRED_DISTANCES.items())\ndef test_paired_distances(metric, func):\n # Test the pairwise_distance helper function.\n rng = np.random.RandomState(0)\n # Euclidean distance should be equivalent to calling the function.\n X = rng.random_sample((5, 4))\n # Euclidean distance, with Y != X.\n Y = rng.random_sample((5, 4))\n\n S = paired_distances(X, Y, metric=metric)\n S2 = func(X, Y)\n assert_array_almost_equal(S, S2)\n S3 = func(csr_matrix(X), csr_matrix(Y))\n assert_array_almost_equal(S, S3)\n if metric in PAIRWISE_DISTANCE_FUNCTIONS:\n # Check the pairwise_distances implementation\n # gives the same value\n distances = PAIRWISE_DISTANCE_FUNCTIONS[metric](X, Y)\n distances = np.diag(distances)\n assert_array_almost_equal(distances, S)\n\n\ndef test_paired_distances_callable():\n # Test the pairwise_distance helper function\n # with the callable implementation\n rng = np.random.RandomState(0)\n # Euclidean distance should be equivalent to calling the function.\n X = rng.random_sample((5, 4))\n # Euclidean distance, with Y != X.\n Y = rng.random_sample((5, 4))\n\n S = paired_distances(X, Y, metric='manhattan')\n S2 = paired_distances(X, Y, metric=lambda x, y: np.abs(x - y).sum(axis=0))\n assert_array_almost_equal(S, S2)\n\n # Test that a value error is raised when the lengths of X and Y should not\n # differ\n Y = rng.random_sample((3, 4))\n with pytest.raises(ValueError):\n paired_distances(X, Y)\n\n\ndef test_pairwise_distances_argmin_min():\n # Check pairwise minimum distances computation for any metric\n X = [[0], [1]]\n Y = [[-2], [3]]\n\n Xsp = dok_matrix(X)\n Ysp = csr_matrix(Y, dtype=np.float32)\n\n expected_idx = [0, 1]\n expected_vals = [2, 2]\n expected_vals_sq = [4, 4]\n\n # euclidean metric\n idx, vals = pairwise_distances_argmin_min(X, Y, metric=\"euclidean\")\n idx2 = pairwise_distances_argmin(X, Y, metric=\"euclidean\")\n assert_array_almost_equal(idx, expected_idx)\n assert_array_almost_equal(idx2, expected_idx)\n assert_array_almost_equal(vals, expected_vals)\n # sparse matrix case\n idxsp, valssp = pairwise_distances_argmin_min(Xsp, Ysp, metric=\"euclidean\")\n assert_array_almost_equal(idxsp, expected_idx)\n assert_array_almost_equal(valssp, expected_vals)\n # We don't want np.matrix here\n assert type(idxsp) == np.ndarray\n assert type(valssp) == np.ndarray\n\n # euclidean metric squared\n idx, vals = pairwise_distances_argmin_min(X, Y, metric=\"euclidean\",\n metric_kwargs={\"squared\": True})\n assert_array_almost_equal(idx, expected_idx)\n assert_array_almost_equal(vals, expected_vals_sq)\n\n # Non-euclidean scikit-learn metric\n idx, vals = pairwise_distances_argmin_min(X, Y, metric=\"manhattan\")\n idx2 = pairwise_distances_argmin(X, Y, metric=\"manhattan\")\n assert_array_almost_equal(idx, expected_idx)\n assert_array_almost_equal(idx2, expected_idx)\n assert_array_almost_equal(vals, expected_vals)\n # sparse matrix case\n idxsp, valssp = pairwise_distances_argmin_min(Xsp, Ysp, metric=\"manhattan\")\n assert_array_almost_equal(idxsp, expected_idx)\n assert_array_almost_equal(valssp, expected_vals)\n\n # Non-euclidean Scipy distance (callable)\n idx, vals = pairwise_distances_argmin_min(X, Y, metric=minkowski,\n metric_kwargs={\"p\": 2})\n assert_array_almost_equal(idx, expected_idx)\n assert_array_almost_equal(vals, expected_vals)\n\n # Non-euclidean Scipy distance (string)\n idx, vals = pairwise_distances_argmin_min(X, Y, metric=\"minkowski\",\n metric_kwargs={\"p\": 2})\n assert_array_almost_equal(idx, expected_idx)\n assert_array_almost_equal(vals, expected_vals)\n\n # Compare with naive implementation\n rng = np.random.RandomState(0)\n X = rng.randn(97, 149)\n Y = rng.randn(111, 149)\n\n dist = pairwise_distances(X, Y, metric=\"manhattan\")\n dist_orig_ind = dist.argmin(axis=0)\n dist_orig_val = dist[dist_orig_ind, range(len(dist_orig_ind))]\n\n dist_chunked_ind, dist_chunked_val = pairwise_distances_argmin_min(\n X, Y, axis=0, metric=\"manhattan\")\n np.testing.assert_almost_equal(dist_orig_ind, dist_chunked_ind, decimal=7)\n np.testing.assert_almost_equal(dist_orig_val, dist_chunked_val, decimal=7)\n\n\ndef _reduce_func(dist, start):\n return dist[:, :100]\n\n\ndef test_pairwise_distances_chunked_reduce():\n rng = np.random.RandomState(0)\n X = rng.random_sample((400, 4))\n # Reduced Euclidean distance\n S = pairwise_distances(X)[:, :100]\n S_chunks = pairwise_distances_chunked(X, None, reduce_func=_reduce_func,\n working_memory=2 ** -16)\n assert isinstance(S_chunks, GeneratorType)\n S_chunks = list(S_chunks)\n assert len(S_chunks) > 1\n # atol is for diagonal where S is explicitly zeroed on the diagonal\n assert_allclose(np.vstack(S_chunks), S, atol=1e-7)\n\n\[email protected]('good_reduce', [\n lambda D, start: list(D),\n lambda D, start: np.array(D),\n lambda D, start: csr_matrix(D),\n lambda D, start: (list(D), list(D)),\n lambda D, start: (dok_matrix(D), np.array(D), list(D)),\n ])\ndef test_pairwise_distances_chunked_reduce_valid(good_reduce):\n X = np.arange(10).reshape(-1, 1)\n S_chunks = pairwise_distances_chunked(X, None, reduce_func=good_reduce,\n working_memory=64)\n next(S_chunks)\n\n\[email protected](('bad_reduce', 'err_type', 'message'), [\n (lambda D, s: np.concatenate([D, D[-1:]]), ValueError,\n r'length 11\\..* input: 10\\.'),\n (lambda D, s: (D, np.concatenate([D, D[-1:]])), ValueError,\n r'length \\(10, 11\\)\\..* input: 10\\.'),\n (lambda D, s: (D[:9], D), ValueError,\n r'length \\(9, 10\\)\\..* input: 10\\.'),\n (lambda D, s: 7, TypeError,\n r'returned 7\\. Expected sequence\\(s\\) of length 10\\.'),\n (lambda D, s: (7, 8), TypeError,\n r'returned \\(7, 8\\)\\. Expected sequence\\(s\\) of length 10\\.'),\n (lambda D, s: (np.arange(10), 9), TypeError,\n r', 9\\)\\. Expected sequence\\(s\\) of length 10\\.'),\n])\ndef test_pairwise_distances_chunked_reduce_invalid(bad_reduce, err_type,\n message):\n X = np.arange(10).reshape(-1, 1)\n S_chunks = pairwise_distances_chunked(X, None, reduce_func=bad_reduce,\n working_memory=64)\n with pytest.raises(err_type, match=message):\n next(S_chunks)\n\n\ndef check_pairwise_distances_chunked(X, Y, working_memory, metric='euclidean'):\n gen = pairwise_distances_chunked(X, Y, working_memory=working_memory,\n metric=metric)\n assert isinstance(gen, GeneratorType)\n blockwise_distances = list(gen)\n Y = X if Y is None else Y\n min_block_mib = len(Y) * 8 * 2 ** -20\n\n for block in blockwise_distances:\n memory_used = block.nbytes\n assert memory_used <= max(working_memory, min_block_mib) * 2 ** 20\n\n blockwise_distances = np.vstack(blockwise_distances)\n S = pairwise_distances(X, Y, metric=metric)\n assert_array_almost_equal(blockwise_distances, S)\n\n\[email protected](\n 'metric',\n ('euclidean', 'l2', 'sqeuclidean'))\ndef test_pairwise_distances_chunked_diagonal(metric):\n rng = np.random.RandomState(0)\n X = rng.normal(size=(1000, 10), scale=1e10)\n chunks = list(pairwise_distances_chunked(X, working_memory=1,\n metric=metric))\n assert len(chunks) > 1\n assert_array_almost_equal(np.diag(np.vstack(chunks)), 0, decimal=10)\n\n\[email protected](\n 'metric',\n ('euclidean', 'l2', 'sqeuclidean'))\ndef test_parallel_pairwise_distances_diagonal(metric):\n rng = np.random.RandomState(0)\n X = rng.normal(size=(1000, 10), scale=1e10)\n distances = pairwise_distances(X, metric=metric, n_jobs=2)\n assert_allclose(np.diag(distances), 0, atol=1e-10)\n\n\n@ignore_warnings\ndef test_pairwise_distances_chunked():\n # Test the pairwise_distance helper function.\n rng = np.random.RandomState(0)\n # Euclidean distance should be equivalent to calling the function.\n X = rng.random_sample((200, 4))\n check_pairwise_distances_chunked(X, None, working_memory=1,\n metric='euclidean')\n # Test small amounts of memory\n for power in range(-16, 0):\n check_pairwise_distances_chunked(X, None, working_memory=2 ** power,\n metric='euclidean')\n # X as list\n check_pairwise_distances_chunked(X.tolist(), None, working_memory=1,\n metric='euclidean')\n # Euclidean distance, with Y != X.\n Y = rng.random_sample((100, 4))\n check_pairwise_distances_chunked(X, Y, working_memory=1,\n metric='euclidean')\n check_pairwise_distances_chunked(X.tolist(), Y.tolist(), working_memory=1,\n metric='euclidean')\n # absurdly large working_memory\n check_pairwise_distances_chunked(X, Y, working_memory=10000,\n metric='euclidean')\n # \"cityblock\" uses scikit-learn metric, cityblock (function) is\n # scipy.spatial.\n check_pairwise_distances_chunked(X, Y, working_memory=1,\n metric='cityblock')\n # Test that a value error is raised if the metric is unknown\n with pytest.raises(ValueError):\n next(pairwise_distances_chunked(X, Y, metric=\"blah\"))\n\n # Test precomputed returns all at once\n D = pairwise_distances(X)\n gen = pairwise_distances_chunked(D,\n working_memory=2 ** -16,\n metric='precomputed')\n assert isinstance(gen, GeneratorType)\n assert next(gen) is D\n with pytest.raises(StopIteration):\n next(gen)\n\n\[email protected](\"x_array_constr\", [np.array, csr_matrix],\n ids=[\"dense\", \"sparse\"])\[email protected](\"y_array_constr\", [np.array, csr_matrix],\n ids=[\"dense\", \"sparse\"])\ndef test_euclidean_distances_known_result(x_array_constr, y_array_constr):\n # Check the pairwise Euclidean distances computation on known result\n X = x_array_constr([[0]])\n Y = y_array_constr([[1], [2]])\n D = euclidean_distances(X, Y)\n assert_allclose(D, [[1., 2.]])\n\n\[email protected](\"dtype\", [np.float32, np.float64])\[email protected](\"y_array_constr\", [np.array, csr_matrix],\n ids=[\"dense\", \"sparse\"])\ndef test_euclidean_distances_with_norms(dtype, y_array_constr):\n # check that we still get the right answers with {X,Y}_norm_squared\n # and that we get a wrong answer with wrong {X,Y}_norm_squared\n rng = np.random.RandomState(0)\n X = rng.random_sample((10, 10)).astype(dtype, copy=False)\n Y = rng.random_sample((20, 10)).astype(dtype, copy=False)\n\n # norms will only be used if their dtype is float64\n X_norm_sq = (X.astype(np.float64) ** 2).sum(axis=1).reshape(1, -1)\n Y_norm_sq = (Y.astype(np.float64) ** 2).sum(axis=1).reshape(1, -1)\n\n Y = y_array_constr(Y)\n\n D1 = euclidean_distances(X, Y)\n D2 = euclidean_distances(X, Y, X_norm_squared=X_norm_sq)\n D3 = euclidean_distances(X, Y, Y_norm_squared=Y_norm_sq)\n D4 = euclidean_distances(X, Y, X_norm_squared=X_norm_sq,\n Y_norm_squared=Y_norm_sq)\n assert_allclose(D2, D1)\n assert_allclose(D3, D1)\n assert_allclose(D4, D1)\n\n # check we get the wrong answer with wrong {X,Y}_norm_squared\n wrong_D = euclidean_distances(X, Y,\n X_norm_squared=np.zeros_like(X_norm_sq),\n Y_norm_squared=np.zeros_like(Y_norm_sq))\n with pytest.raises(AssertionError):\n assert_allclose(wrong_D, D1)\n\n\[email protected](\"dtype\", [np.float32, np.float64])\[email protected](\"x_array_constr\", [np.array, csr_matrix],\n ids=[\"dense\", \"sparse\"])\[email protected](\"y_array_constr\", [np.array, csr_matrix],\n ids=[\"dense\", \"sparse\"])\ndef test_euclidean_distances(dtype, x_array_constr, y_array_constr):\n # check that euclidean distances gives same result as scipy cdist\n # when X and Y != X are provided\n rng = np.random.RandomState(0)\n X = rng.random_sample((100, 10)).astype(dtype, copy=False)\n X[X < 0.8] = 0\n Y = rng.random_sample((10, 10)).astype(dtype, copy=False)\n Y[Y < 0.8] = 0\n\n expected = cdist(X, Y)\n\n X = x_array_constr(X)\n Y = y_array_constr(Y)\n distances = euclidean_distances(X, Y)\n\n # the default rtol=1e-7 is too close to the float32 precision\n # and fails due too rounding errors.\n assert_allclose(distances, expected, rtol=1e-6)\n assert distances.dtype == dtype\n\n\[email protected](\"dtype\", [np.float32, np.float64])\[email protected](\"x_array_constr\", [np.array, csr_matrix],\n ids=[\"dense\", \"sparse\"])\ndef test_euclidean_distances_sym(dtype, x_array_constr):\n # check that euclidean distances gives same result as scipy pdist\n # when only X is provided\n rng = np.random.RandomState(0)\n X = rng.random_sample((100, 10)).astype(dtype, copy=False)\n X[X < 0.8] = 0\n\n expected = squareform(pdist(X))\n\n X = x_array_constr(X)\n distances = euclidean_distances(X)\n\n # the default rtol=1e-7 is too close to the float32 precision\n # and fails due too rounding errors.\n assert_allclose(distances, expected, rtol=1e-6)\n assert distances.dtype == dtype\n\n\[email protected](\"batch_size\", [None, 5, 7, 101])\[email protected](\"x_array_constr\", [np.array, csr_matrix],\n ids=[\"dense\", \"sparse\"])\[email protected](\"y_array_constr\", [np.array, csr_matrix],\n ids=[\"dense\", \"sparse\"])\ndef test_euclidean_distances_upcast(batch_size, x_array_constr,\n y_array_constr):\n # check batches handling when Y != X (#13910)\n rng = np.random.RandomState(0)\n X = rng.random_sample((100, 10)).astype(np.float32)\n X[X < 0.8] = 0\n Y = rng.random_sample((10, 10)).astype(np.float32)\n Y[Y < 0.8] = 0\n\n expected = cdist(X, Y)\n\n X = x_array_constr(X)\n Y = y_array_constr(Y)\n distances = _euclidean_distances_upcast(X, Y=Y, batch_size=batch_size)\n distances = np.sqrt(np.maximum(distances, 0))\n\n # the default rtol=1e-7 is too close to the float32 precision\n # and fails due too rounding errors.\n assert_allclose(distances, expected, rtol=1e-6)\n\n\[email protected](\"batch_size\", [None, 5, 7, 101])\[email protected](\"x_array_constr\", [np.array, csr_matrix],\n ids=[\"dense\", \"sparse\"])\ndef test_euclidean_distances_upcast_sym(batch_size, x_array_constr):\n # check batches handling when X is Y (#13910)\n rng = np.random.RandomState(0)\n X = rng.random_sample((100, 10)).astype(np.float32)\n X[X < 0.8] = 0\n\n expected = squareform(pdist(X))\n\n X = x_array_constr(X)\n distances = _euclidean_distances_upcast(X, Y=X, batch_size=batch_size)\n distances = np.sqrt(np.maximum(distances, 0))\n\n # the default rtol=1e-7 is too close to the float32 precision\n # and fails due too rounding errors.\n assert_allclose(distances, expected, rtol=1e-6)\n\n\[email protected](\n \"dtype, eps, rtol\",\n [(np.float32, 1e-4, 1e-5),\n pytest.param(\n np.float64, 1e-8, 0.99,\n marks=pytest.mark.xfail(reason='failing due to lack of precision'))])\[email protected](\"dim\", [1, 1000000])\ndef test_euclidean_distances_extreme_values(dtype, eps, rtol, dim):\n # check that euclidean distances is correct with float32 input thanks to\n # upcasting. On float64 there are still precision issues.\n X = np.array([[1.] * dim], dtype=dtype)\n Y = np.array([[1. + eps] * dim], dtype=dtype)\n\n distances = euclidean_distances(X, Y)\n expected = cdist(X, Y)\n\n assert_allclose(distances, expected, rtol=1e-5)\n\n\[email protected](\"squared\", [True, False])\ndef test_nan_euclidean_distances_equal_to_euclidean_distance(squared):\n # with no nan values\n rng = np.random.RandomState(1337)\n X = rng.randn(3, 4)\n Y = rng.randn(4, 4)\n\n normal_distance = euclidean_distances(X, Y=Y, squared=squared)\n nan_distance = nan_euclidean_distances(X, Y=Y, squared=squared)\n assert_allclose(normal_distance, nan_distance)\n\n\[email protected](\n \"X\", [np.array([[np.inf, 0]]), np.array([[0, -np.inf]])])\[email protected](\n \"Y\", [np.array([[np.inf, 0]]), np.array([[0, -np.inf]]), None])\ndef test_nan_euclidean_distances_infinite_values(X, Y):\n\n with pytest.raises(ValueError) as excinfo:\n nan_euclidean_distances(X, Y=Y)\n\n exp_msg = (\"Input contains infinity or a value too large for \"\n \"dtype('float64').\")\n assert exp_msg == str(excinfo.value)\n\n\[email protected](\"X, X_diag, missing_value\", [\n (np.array([[0, 1], [1, 0]]), np.sqrt(2), np.nan),\n (np.array([[0, 1], [1, np.nan]]), np.sqrt(2), np.nan),\n (np.array([[np.nan, 1], [1, np.nan]]), np.nan, np.nan),\n (np.array([[np.nan, 1], [np.nan, 0]]), np.sqrt(2), np.nan),\n (np.array([[0, np.nan], [1, np.nan]]), np.sqrt(2), np.nan),\n (np.array([[0, 1], [1, 0]]), np.sqrt(2), -1),\n (np.array([[0, 1], [1, -1]]), np.sqrt(2), -1),\n (np.array([[-1, 1], [1, -1]]), np.nan, -1),\n (np.array([[-1, 1], [-1, 0]]), np.sqrt(2), -1),\n (np.array([[0, -1], [1, -1]]), np.sqrt(2), -1)\n])\ndef test_nan_euclidean_distances_2x2(X, X_diag, missing_value):\n\n exp_dist = np.array([[0., X_diag], [X_diag, 0]])\n\n dist = nan_euclidean_distances(X, missing_values=missing_value)\n assert_allclose(exp_dist, dist)\n\n dist_sq = nan_euclidean_distances(\n X, squared=True, missing_values=missing_value)\n assert_allclose(exp_dist**2, dist_sq)\n\n dist_two = nan_euclidean_distances(X, X, missing_values=missing_value)\n assert_allclose(exp_dist, dist_two)\n\n dist_two_copy = nan_euclidean_distances(\n X, X.copy(), missing_values=missing_value)\n assert_allclose(exp_dist, dist_two_copy)\n\n\[email protected](\"missing_value\", [np.nan, -1])\ndef test_nan_euclidean_distances_complete_nan(missing_value):\n X = np.array([[missing_value, missing_value], [0, 1]])\n\n exp_dist = np.array([[np.nan, np.nan], [np.nan, 0]])\n\n dist = nan_euclidean_distances(X, missing_values=missing_value)\n assert_allclose(exp_dist, dist)\n\n dist = nan_euclidean_distances(\n X, X.copy(), missing_values=missing_value)\n assert_allclose(exp_dist, dist)\n\n\[email protected](\"missing_value\", [np.nan, -1])\ndef test_nan_euclidean_distances_not_trival(missing_value):\n X = np.array([[1., missing_value, 3., 4., 2.],\n [missing_value, 4., 6., 1., missing_value],\n [3., missing_value, missing_value, missing_value, 1.]])\n\n Y = np.array([[missing_value, 7., 7., missing_value, 2.],\n [missing_value, missing_value, 5., 4., 7.],\n [missing_value, missing_value, missing_value, 4., 5.]])\n\n # Check for symmetry\n D1 = nan_euclidean_distances(X, Y, missing_values=missing_value)\n D2 = nan_euclidean_distances(Y, X, missing_values=missing_value)\n\n assert_almost_equal(D1, D2.T)\n\n # Check with explicit formula and squared=True\n assert_allclose(\n nan_euclidean_distances(\n X[:1], Y[:1], squared=True, missing_values=missing_value),\n [[5.0 / 2.0 * ((7 - 3)**2 + (2 - 2)**2)]])\n\n # Check with explicit formula and squared=False\n assert_allclose(\n nan_euclidean_distances(\n X[1:2], Y[1:2], squared=False, missing_values=missing_value),\n [[np.sqrt(5.0 / 2.0 * ((6 - 5)**2 + (1 - 4)**2))]])\n\n # Check when Y = X is explicitly passed\n D3 = nan_euclidean_distances(X, missing_values=missing_value)\n D4 = nan_euclidean_distances(X, X, missing_values=missing_value)\n D5 = nan_euclidean_distances(X, X.copy(), missing_values=missing_value)\n assert_allclose(D3, D4)\n assert_allclose(D4, D5)\n\n # Check copy = True against copy = False\n D6 = nan_euclidean_distances(X, Y, copy=True)\n D7 = nan_euclidean_distances(X, Y, copy=False)\n assert_allclose(D6, D7)\n\n\[email protected](\"missing_value\", [np.nan, -1])\ndef test_nan_euclidean_distances_one_feature_match_positive(missing_value):\n # First feature is the only feature that is non-nan and in both\n # samples. The result of `nan_euclidean_distances` with squared=True\n # should be non-negative. The non-squared version should all be close to 0.\n X = np.array([[-122.27, 648., missing_value, 37.85],\n [-122.27, missing_value, 2.34701493, missing_value]])\n\n dist_squared = nan_euclidean_distances(X, missing_values=missing_value,\n squared=True)\n assert np.all(dist_squared >= 0)\n\n dist = nan_euclidean_distances(X, missing_values=missing_value,\n squared=False)\n assert_allclose(dist, 0.0)\n\n\ndef test_cosine_distances():\n # Check the pairwise Cosine distances computation\n rng = np.random.RandomState(1337)\n x = np.abs(rng.rand(910))\n XA = np.vstack([x, x])\n D = cosine_distances(XA)\n assert_array_almost_equal(D, [[0., 0.], [0., 0.]])\n # check that all elements are in [0, 2]\n assert np.all(D >= 0.)\n assert np.all(D <= 2.)\n # check that diagonal elements are equal to 0\n assert_array_almost_equal(D[np.diag_indices_from(D)], [0., 0.])\n\n XB = np.vstack([x, -x])\n D2 = cosine_distances(XB)\n # check that all elements are in [0, 2]\n assert np.all(D2 >= 0.)\n assert np.all(D2 <= 2.)\n # check that diagonal elements are equal to 0 and non diagonal to 2\n assert_array_almost_equal(D2, [[0., 2.], [2., 0.]])\n\n # check large random matrix\n X = np.abs(rng.rand(1000, 5000))\n D = cosine_distances(X)\n # check that diagonal elements are equal to 0\n assert_array_almost_equal(D[np.diag_indices_from(D)], [0.] * D.shape[0])\n assert np.all(D >= 0.)\n assert np.all(D <= 2.)\n\n\ndef test_haversine_distances():\n # Check haversine distance with distances computation\n def slow_haversine_distances(x, y):\n diff_lat = y[0] - x[0]\n diff_lon = y[1] - x[1]\n a = np.sin(diff_lat / 2) ** 2 + (\n np.cos(x[0]) * np.cos(y[0]) * np.sin(diff_lon/2) ** 2\n )\n c = 2 * np.arcsin(np.sqrt(a))\n return c\n rng = np.random.RandomState(0)\n X = rng.random_sample((5, 2))\n Y = rng.random_sample((10, 2))\n D1 = np.array([[slow_haversine_distances(x, y) for y in Y] for x in X])\n D2 = haversine_distances(X, Y)\n assert_array_almost_equal(D1, D2)\n # Test haversine distance does not accept X where n_feature != 2\n X = rng.random_sample((10, 3))\n err_msg = \"Haversine distance only valid in 2 dimensions\"\n with pytest.raises(ValueError, match=err_msg):\n haversine_distances(X)\n\n\n# Paired distances\n\ndef test_paired_euclidean_distances():\n # Check the paired Euclidean distances computation\n X = [[0], [0]]\n Y = [[1], [2]]\n D = paired_euclidean_distances(X, Y)\n assert_array_almost_equal(D, [1., 2.])\n\n\ndef test_paired_manhattan_distances():\n # Check the paired manhattan distances computation\n X = [[0], [0]]\n Y = [[1], [2]]\n D = paired_manhattan_distances(X, Y)\n assert_array_almost_equal(D, [1., 2.])\n\n\ndef test_chi_square_kernel():\n rng = np.random.RandomState(0)\n X = rng.random_sample((5, 4))\n Y = rng.random_sample((10, 4))\n K_add = additive_chi2_kernel(X, Y)\n gamma = 0.1\n K = chi2_kernel(X, Y, gamma=gamma)\n assert K.dtype == np.float\n for i, x in enumerate(X):\n for j, y in enumerate(Y):\n chi2 = -np.sum((x - y) ** 2 / (x + y))\n chi2_exp = np.exp(gamma * chi2)\n assert_almost_equal(K_add[i, j], chi2)\n assert_almost_equal(K[i, j], chi2_exp)\n\n # check diagonal is ones for data with itself\n K = chi2_kernel(Y)\n assert_array_equal(np.diag(K), 1)\n # check off-diagonal is < 1 but > 0:\n assert np.all(K > 0)\n assert np.all(K - np.diag(np.diag(K)) < 1)\n # check that float32 is preserved\n X = rng.random_sample((5, 4)).astype(np.float32)\n Y = rng.random_sample((10, 4)).astype(np.float32)\n K = chi2_kernel(X, Y)\n assert K.dtype == np.float32\n\n # check integer type gets converted,\n # check that zeros are handled\n X = rng.random_sample((10, 4)).astype(np.int32)\n K = chi2_kernel(X, X)\n assert np.isfinite(K).all()\n assert K.dtype == np.float\n\n # check that kernel of similar things is greater than dissimilar ones\n X = [[.3, .7], [1., 0]]\n Y = [[0, 1], [.9, .1]]\n K = chi2_kernel(X, Y)\n assert K[0, 0] > K[0, 1]\n assert K[1, 1] > K[1, 0]\n\n # test negative input\n with pytest.raises(ValueError):\n chi2_kernel([[0, -1]])\n with pytest.raises(ValueError):\n chi2_kernel([[0, -1]], [[-1, -1]])\n with pytest.raises(ValueError):\n chi2_kernel([[0, 1]], [[-1, -1]])\n\n # different n_features in X and Y\n with pytest.raises(ValueError):\n chi2_kernel([[0, 1]], [[.2, .2, .6]])\n\n # sparse matrices\n with pytest.raises(ValueError):\n chi2_kernel(csr_matrix(X), csr_matrix(Y))\n with pytest.raises(ValueError):\n additive_chi2_kernel(csr_matrix(X), csr_matrix(Y))\n\n\[email protected](\n 'kernel',\n (linear_kernel, polynomial_kernel, rbf_kernel,\n laplacian_kernel, sigmoid_kernel, cosine_similarity))\ndef test_kernel_symmetry(kernel):\n # Valid kernels should be symmetric\n rng = np.random.RandomState(0)\n X = rng.random_sample((5, 4))\n K = kernel(X, X)\n assert_array_almost_equal(K, K.T, 15)\n\n\[email protected](\n 'kernel',\n (linear_kernel, polynomial_kernel, rbf_kernel,\n laplacian_kernel, sigmoid_kernel, cosine_similarity))\ndef test_kernel_sparse(kernel):\n rng = np.random.RandomState(0)\n X = rng.random_sample((5, 4))\n X_sparse = csr_matrix(X)\n K = kernel(X, X)\n K2 = kernel(X_sparse, X_sparse)\n assert_array_almost_equal(K, K2)\n\n\ndef test_linear_kernel():\n rng = np.random.RandomState(0)\n X = rng.random_sample((5, 4))\n K = linear_kernel(X, X)\n # the diagonal elements of a linear kernel are their squared norm\n assert_array_almost_equal(K.flat[::6], [linalg.norm(x) ** 2 for x in X])\n\n\ndef test_rbf_kernel():\n rng = np.random.RandomState(0)\n X = rng.random_sample((5, 4))\n K = rbf_kernel(X, X)\n # the diagonal elements of a rbf kernel are 1\n assert_array_almost_equal(K.flat[::6], np.ones(5))\n\n\ndef test_laplacian_kernel():\n rng = np.random.RandomState(0)\n X = rng.random_sample((5, 4))\n K = laplacian_kernel(X, X)\n # the diagonal elements of a laplacian kernel are 1\n assert_array_almost_equal(np.diag(K), np.ones(5))\n\n # off-diagonal elements are < 1 but > 0:\n assert np.all(K > 0)\n assert np.all(K - np.diag(np.diag(K)) < 1)\n\n\[email protected]('metric, pairwise_func',\n [('linear', linear_kernel),\n ('cosine', cosine_similarity)])\ndef test_pairwise_similarity_sparse_output(metric, pairwise_func):\n rng = np.random.RandomState(0)\n X = rng.random_sample((5, 4))\n Y = rng.random_sample((3, 4))\n Xcsr = csr_matrix(X)\n Ycsr = csr_matrix(Y)\n\n # should be sparse\n K1 = pairwise_func(Xcsr, Ycsr, dense_output=False)\n assert issparse(K1)\n\n # should be dense, and equal to K1\n K2 = pairwise_func(X, Y, dense_output=True)\n assert not issparse(K2)\n assert_array_almost_equal(K1.todense(), K2)\n\n # show the kernel output equal to the sparse.todense()\n K3 = pairwise_kernels(X, Y=Y, metric=metric)\n assert_array_almost_equal(K1.todense(), K3)\n\n\ndef test_cosine_similarity():\n # Test the cosine_similarity.\n\n rng = np.random.RandomState(0)\n X = rng.random_sample((5, 4))\n Y = rng.random_sample((3, 4))\n Xcsr = csr_matrix(X)\n Ycsr = csr_matrix(Y)\n\n for X_, Y_ in ((X, None), (X, Y),\n (Xcsr, None), (Xcsr, Ycsr)):\n # Test that the cosine is kernel is equal to a linear kernel when data\n # has been previously normalized by L2-norm.\n K1 = pairwise_kernels(X_, Y=Y_, metric=\"cosine\")\n X_ = normalize(X_)\n if Y_ is not None:\n Y_ = normalize(Y_)\n K2 = pairwise_kernels(X_, Y=Y_, metric=\"linear\")\n assert_array_almost_equal(K1, K2)\n\n\ndef test_check_dense_matrices():\n # Ensure that pairwise array check works for dense matrices.\n # Check that if XB is None, XB is returned as reference to XA\n XA = np.resize(np.arange(40), (5, 8))\n XA_checked, XB_checked = check_pairwise_arrays(XA, None)\n assert XA_checked is XB_checked\n assert_array_equal(XA, XA_checked)\n\n\ndef test_check_XB_returned():\n # Ensure that if XA and XB are given correctly, they return as equal.\n # Check that if XB is not None, it is returned equal.\n # Note that the second dimension of XB is the same as XA.\n XA = np.resize(np.arange(40), (5, 8))\n XB = np.resize(np.arange(32), (4, 8))\n XA_checked, XB_checked = check_pairwise_arrays(XA, XB)\n assert_array_equal(XA, XA_checked)\n assert_array_equal(XB, XB_checked)\n\n XB = np.resize(np.arange(40), (5, 8))\n XA_checked, XB_checked = check_paired_arrays(XA, XB)\n assert_array_equal(XA, XA_checked)\n assert_array_equal(XB, XB_checked)\n\n\ndef test_check_different_dimensions():\n # Ensure an error is raised if the dimensions are different.\n XA = np.resize(np.arange(45), (5, 9))\n XB = np.resize(np.arange(32), (4, 8))\n with pytest.raises(ValueError):\n check_pairwise_arrays(XA, XB)\n\n XB = np.resize(np.arange(4 * 9), (4, 9))\n with pytest.raises(ValueError):\n check_paired_arrays(XA, XB)\n\n\ndef test_check_invalid_dimensions():\n # Ensure an error is raised on 1D input arrays.\n # The modified tests are not 1D. In the old test, the array was internally\n # converted to 2D anyways\n XA = np.arange(45).reshape(9, 5)\n XB = np.arange(32).reshape(4, 8)\n with pytest.raises(ValueError):\n check_pairwise_arrays(XA, XB)\n XA = np.arange(45).reshape(9, 5)\n XB = np.arange(32).reshape(4, 8)\n with pytest.raises(ValueError):\n check_pairwise_arrays(XA, XB)\n\n\ndef test_check_sparse_arrays():\n # Ensures that checks return valid sparse matrices.\n rng = np.random.RandomState(0)\n XA = rng.random_sample((5, 4))\n XA_sparse = csr_matrix(XA)\n XB = rng.random_sample((5, 4))\n XB_sparse = csr_matrix(XB)\n XA_checked, XB_checked = check_pairwise_arrays(XA_sparse, XB_sparse)\n # compare their difference because testing csr matrices for\n # equality with '==' does not work as expected.\n assert issparse(XA_checked)\n assert abs(XA_sparse - XA_checked).sum() == 0\n assert issparse(XB_checked)\n assert abs(XB_sparse - XB_checked).sum() == 0\n\n XA_checked, XA_2_checked = check_pairwise_arrays(XA_sparse, XA_sparse)\n assert issparse(XA_checked)\n assert abs(XA_sparse - XA_checked).sum() == 0\n assert issparse(XA_2_checked)\n assert abs(XA_2_checked - XA_checked).sum() == 0\n\n\ndef tuplify(X):\n # Turns a numpy matrix (any n-dimensional array) into tuples.\n s = X.shape\n if len(s) > 1:\n # Tuplify each sub-array in the input.\n return tuple(tuplify(row) for row in X)\n else:\n # Single dimension input, just return tuple of contents.\n return tuple(r for r in X)\n\n\ndef test_check_tuple_input():\n # Ensures that checks return valid tuples.\n rng = np.random.RandomState(0)\n XA = rng.random_sample((5, 4))\n XA_tuples = tuplify(XA)\n XB = rng.random_sample((5, 4))\n XB_tuples = tuplify(XB)\n XA_checked, XB_checked = check_pairwise_arrays(XA_tuples, XB_tuples)\n assert_array_equal(XA_tuples, XA_checked)\n assert_array_equal(XB_tuples, XB_checked)\n\n\ndef test_check_preserve_type():\n # Ensures that type float32 is preserved.\n XA = np.resize(np.arange(40), (5, 8)).astype(np.float32)\n XB = np.resize(np.arange(40), (5, 8)).astype(np.float32)\n\n XA_checked, XB_checked = check_pairwise_arrays(XA, None)\n assert XA_checked.dtype == np.float32\n\n # both float32\n XA_checked, XB_checked = check_pairwise_arrays(XA, XB)\n assert XA_checked.dtype == np.float32\n assert XB_checked.dtype == np.float32\n\n # mismatched A\n XA_checked, XB_checked = check_pairwise_arrays(XA.astype(np.float),\n XB)\n assert XA_checked.dtype == np.float\n assert XB_checked.dtype == np.float\n\n # mismatched B\n XA_checked, XB_checked = check_pairwise_arrays(XA,\n XB.astype(np.float))\n assert XA_checked.dtype == np.float\n assert XB_checked.dtype == np.float\n\n\[email protected](\"n_jobs\", [1, 2])\[email protected](\"metric\", [\"seuclidean\", \"mahalanobis\"])\[email protected](\"dist_function\",\n [pairwise_distances, pairwise_distances_chunked])\[email protected](\"y_is_x\", [True, False], ids=[\"Y is X\", \"Y is not X\"])\ndef test_pairwise_distances_data_derived_params(n_jobs, metric, dist_function,\n y_is_x):\n # check that pairwise_distances give the same result in sequential and\n # parallel, when metric has data-derived parameters.\n with config_context(working_memory=0.1): # to have more than 1 chunk\n rng = np.random.RandomState(0)\n X = rng.random_sample((100, 10))\n\n if y_is_x:\n Y = X\n expected_dist_default_params = squareform(pdist(X, metric=metric))\n if metric == \"seuclidean\":\n params = {'V': np.var(X, axis=0, ddof=1)}\n else:\n params = {'VI': np.linalg.inv(np.cov(X.T)).T}\n else:\n Y = rng.random_sample((100, 10))\n expected_dist_default_params = cdist(X, Y, metric=metric)\n if metric == \"seuclidean\":\n params = {'V': np.var(np.vstack([X, Y]), axis=0, ddof=1)}\n else:\n params = {'VI': np.linalg.inv(np.cov(np.vstack([X, Y]).T)).T}\n\n expected_dist_explicit_params = cdist(X, Y, metric=metric, **params)\n dist = np.vstack(tuple(dist_function(X, Y,\n metric=metric, n_jobs=n_jobs)))\n\n assert_allclose(dist, expected_dist_explicit_params)\n assert_allclose(dist, expected_dist_default_params)\n",
"from itertools import product\n\nimport pytest\nimport numpy as np\nfrom scipy.sparse import (bsr_matrix, coo_matrix, csc_matrix, csr_matrix,\n dok_matrix, lil_matrix, issparse)\n\nfrom sklearn import metrics\nfrom sklearn import neighbors, datasets\nfrom sklearn.base import clone\nfrom sklearn.exceptions import DataConversionWarning\nfrom sklearn.exceptions import EfficiencyWarning\nfrom sklearn.exceptions import NotFittedError\nfrom sklearn.metrics.pairwise import pairwise_distances\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.neighbors import VALID_METRICS_SPARSE, VALID_METRICS\nfrom sklearn.neighbors._base import _is_sorted_by_data, _check_precomputed\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn.utils._testing import assert_array_almost_equal\nfrom sklearn.utils._testing import assert_array_equal\nfrom sklearn.utils._testing import assert_raises\nfrom sklearn.utils._testing import assert_raises_regex\nfrom sklearn.utils._testing import assert_warns\nfrom sklearn.utils._testing import assert_warns_message\nfrom sklearn.utils._testing import assert_raise_message\nfrom sklearn.utils._testing import ignore_warnings\nfrom sklearn.utils.validation import check_random_state\n\nimport joblib\n\nrng = np.random.RandomState(0)\n# load and shuffle iris dataset\niris = datasets.load_iris()\nperm = rng.permutation(iris.target.size)\niris.data = iris.data[perm]\niris.target = iris.target[perm]\n\n# load and shuffle digits\ndigits = datasets.load_digits()\nperm = rng.permutation(digits.target.size)\ndigits.data = digits.data[perm]\ndigits.target = digits.target[perm]\n\nSPARSE_TYPES = (bsr_matrix, coo_matrix, csc_matrix, csr_matrix, dok_matrix,\n lil_matrix)\nSPARSE_OR_DENSE = SPARSE_TYPES + (np.asarray,)\n\nALGORITHMS = ('ball_tree', 'brute', 'kd_tree', 'auto')\nP = (1, 2, 3, 4, np.inf)\nJOBLIB_BACKENDS = list(joblib.parallel.BACKENDS.keys())\n\n# Filter deprecation warnings.\nneighbors.kneighbors_graph = ignore_warnings(neighbors.kneighbors_graph)\nneighbors.radius_neighbors_graph = ignore_warnings(\n neighbors.radius_neighbors_graph)\n\n\ndef _weight_func(dist):\n \"\"\" Weight function to replace lambda d: d ** -2.\n The lambda function is not valid because:\n if d==0 then 0^-2 is not valid. \"\"\"\n\n # Dist could be multidimensional, flatten it so all values\n # can be looped\n with np.errstate(divide='ignore'):\n retval = 1. / dist\n return retval ** 2\n\n\ndef test_unsupervised_kneighbors(n_samples=20, n_features=5,\n n_query_pts=2, n_neighbors=5):\n # Test unsupervised neighbors methods\n X = rng.rand(n_samples, n_features)\n\n test = rng.rand(n_query_pts, n_features)\n\n for p in P:\n results_nodist = []\n results = []\n\n for algorithm in ALGORITHMS:\n neigh = neighbors.NearestNeighbors(n_neighbors=n_neighbors,\n algorithm=algorithm,\n p=p)\n neigh.fit(X)\n\n results_nodist.append(neigh.kneighbors(test,\n return_distance=False))\n results.append(neigh.kneighbors(test, return_distance=True))\n\n for i in range(len(results) - 1):\n assert_array_almost_equal(results_nodist[i], results[i][1])\n assert_array_almost_equal(results[i][0], results[i + 1][0])\n assert_array_almost_equal(results[i][1], results[i + 1][1])\n\n\ndef test_unsupervised_inputs():\n # test the types of valid input into NearestNeighbors\n X = rng.random_sample((10, 3))\n\n nbrs_fid = neighbors.NearestNeighbors(n_neighbors=1)\n nbrs_fid.fit(X)\n\n dist1, ind1 = nbrs_fid.kneighbors(X)\n\n nbrs = neighbors.NearestNeighbors(n_neighbors=1)\n\n for input in (nbrs_fid, neighbors.BallTree(X), neighbors.KDTree(X)):\n nbrs.fit(input)\n dist2, ind2 = nbrs.kneighbors(X)\n\n assert_array_almost_equal(dist1, dist2)\n assert_array_almost_equal(ind1, ind2)\n\n\ndef test_n_neighbors_datatype():\n # Test to check whether n_neighbors is integer\n X = [[1, 1], [1, 1], [1, 1]]\n expected_msg = \"n_neighbors does not take .*float.* \" \\\n \"value, enter integer value\"\n msg = \"Expected n_neighbors > 0. Got -3\"\n\n neighbors_ = neighbors.NearestNeighbors(n_neighbors=3.)\n assert_raises_regex(TypeError, expected_msg, neighbors_.fit, X)\n assert_raises_regex(ValueError, msg,\n neighbors_.kneighbors, X=X, n_neighbors=-3)\n assert_raises_regex(TypeError, expected_msg,\n neighbors_.kneighbors, X=X, n_neighbors=3.)\n\n\ndef test_not_fitted_error_gets_raised():\n X = [[1]]\n neighbors_ = neighbors.NearestNeighbors()\n assert_raises(NotFittedError, neighbors_.kneighbors_graph, X)\n assert_raises(NotFittedError, neighbors_.radius_neighbors_graph, X)\n\n\n@ignore_warnings(category=EfficiencyWarning)\ndef check_precomputed(make_train_test, estimators):\n \"\"\"Tests unsupervised NearestNeighbors with a distance matrix.\"\"\"\n # Note: smaller samples may result in spurious test success\n rng = np.random.RandomState(42)\n X = rng.random_sample((10, 4))\n Y = rng.random_sample((3, 4))\n DXX, DYX = make_train_test(X, Y)\n for method in ['kneighbors', ]:\n # TODO: also test radius_neighbors, but requires different assertion\n\n # As a feature matrix (n_samples by n_features)\n nbrs_X = neighbors.NearestNeighbors(n_neighbors=3)\n nbrs_X.fit(X)\n dist_X, ind_X = getattr(nbrs_X, method)(Y)\n\n # As a dense distance matrix (n_samples by n_samples)\n nbrs_D = neighbors.NearestNeighbors(n_neighbors=3, algorithm='brute',\n metric='precomputed')\n nbrs_D.fit(DXX)\n dist_D, ind_D = getattr(nbrs_D, method)(DYX)\n assert_array_almost_equal(dist_X, dist_D)\n assert_array_almost_equal(ind_X, ind_D)\n\n # Check auto works too\n nbrs_D = neighbors.NearestNeighbors(n_neighbors=3, algorithm='auto',\n metric='precomputed')\n nbrs_D.fit(DXX)\n dist_D, ind_D = getattr(nbrs_D, method)(DYX)\n assert_array_almost_equal(dist_X, dist_D)\n assert_array_almost_equal(ind_X, ind_D)\n\n # Check X=None in prediction\n dist_X, ind_X = getattr(nbrs_X, method)(None)\n dist_D, ind_D = getattr(nbrs_D, method)(None)\n assert_array_almost_equal(dist_X, dist_D)\n assert_array_almost_equal(ind_X, ind_D)\n\n # Must raise a ValueError if the matrix is not of correct shape\n assert_raises(ValueError, getattr(nbrs_D, method), X)\n\n target = np.arange(X.shape[0])\n for Est in estimators:\n est = Est(metric='euclidean')\n est.radius = est.n_neighbors = 1\n pred_X = est.fit(X, target).predict(Y)\n est.metric = 'precomputed'\n pred_D = est.fit(DXX, target).predict(DYX)\n assert_array_almost_equal(pred_X, pred_D)\n\n\ndef test_precomputed_dense():\n def make_train_test(X_train, X_test):\n return (metrics.pairwise_distances(X_train),\n metrics.pairwise_distances(X_test, X_train))\n\n estimators = [\n neighbors.KNeighborsClassifier, neighbors.KNeighborsRegressor,\n neighbors.RadiusNeighborsClassifier, neighbors.RadiusNeighborsRegressor\n ]\n check_precomputed(make_train_test, estimators)\n\n\[email protected]('fmt', ['csr', 'lil'])\ndef test_precomputed_sparse_knn(fmt):\n def make_train_test(X_train, X_test):\n nn = neighbors.NearestNeighbors(n_neighbors=3 + 1).fit(X_train)\n return (nn.kneighbors_graph(X_train, mode='distance').asformat(fmt),\n nn.kneighbors_graph(X_test, mode='distance').asformat(fmt))\n\n # We do not test RadiusNeighborsClassifier and RadiusNeighborsRegressor\n # since the precomputed neighbors graph is built with k neighbors only.\n estimators = [\n neighbors.KNeighborsClassifier,\n neighbors.KNeighborsRegressor,\n ]\n check_precomputed(make_train_test, estimators)\n\n\[email protected]('fmt', ['csr', 'lil'])\ndef test_precomputed_sparse_radius(fmt):\n def make_train_test(X_train, X_test):\n nn = neighbors.NearestNeighbors(radius=1).fit(X_train)\n return (nn.radius_neighbors_graph(X_train,\n mode='distance').asformat(fmt),\n nn.radius_neighbors_graph(X_test,\n mode='distance').asformat(fmt))\n\n # We do not test KNeighborsClassifier and KNeighborsRegressor\n # since the precomputed neighbors graph is built with a radius.\n estimators = [\n neighbors.RadiusNeighborsClassifier,\n neighbors.RadiusNeighborsRegressor,\n ]\n check_precomputed(make_train_test, estimators)\n\n\ndef test_is_sorted_by_data():\n # Test that _is_sorted_by_data works as expected. In CSR sparse matrix,\n # entries in each row can be sorted by indices, by data, or unsorted.\n # _is_sorted_by_data should return True when entries are sorted by data,\n # and False in all other cases.\n\n # Test with sorted 1D array\n X = csr_matrix(np.arange(10))\n assert _is_sorted_by_data(X)\n # Test with unsorted 1D array\n X[0, 2] = 5\n assert not _is_sorted_by_data(X)\n\n # Test when the data is sorted in each sample, but not necessarily\n # between samples\n X = csr_matrix([[0, 1, 2], [3, 0, 0], [3, 4, 0], [1, 0, 2]])\n assert _is_sorted_by_data(X)\n\n # Test with duplicates entries in X.indptr\n data, indices, indptr = [0, 4, 2, 2], [0, 1, 1, 1], [0, 2, 2, 4]\n X = csr_matrix((data, indices, indptr), shape=(3, 3))\n assert _is_sorted_by_data(X)\n\n\n@ignore_warnings(category=EfficiencyWarning)\ndef test_check_precomputed():\n # Test that _check_precomputed returns a graph sorted by data\n X = csr_matrix(np.abs(np.random.RandomState(42).randn(10, 10)))\n assert not _is_sorted_by_data(X)\n Xt = _check_precomputed(X)\n assert _is_sorted_by_data(Xt)\n\n # est with a different number of nonzero entries for each sample\n mask = np.random.RandomState(42).randint(2, size=(10, 10))\n X = X.toarray()\n X[mask == 1] = 0\n X = csr_matrix(X)\n assert not _is_sorted_by_data(X)\n Xt = _check_precomputed(X)\n assert _is_sorted_by_data(Xt)\n\n\n@ignore_warnings(category=EfficiencyWarning)\ndef test_precomputed_sparse_invalid():\n dist = np.array([[0., 2., 1.], [2., 0., 3.], [1., 3., 0.]])\n dist_csr = csr_matrix(dist)\n neigh = neighbors.NearestNeighbors(n_neighbors=1, metric=\"precomputed\")\n neigh.fit(dist_csr)\n neigh.kneighbors(None, n_neighbors=1)\n neigh.kneighbors(np.array([[0., 0., 0.]]), n_neighbors=2)\n\n # Ensures enough number of nearest neighbors\n dist = np.array([[0., 2., 0.], [2., 0., 3.], [0., 3., 0.]])\n dist_csr = csr_matrix(dist)\n neigh.fit(dist_csr)\n msg = \"2 neighbors per samples are required, but some samples have only 1\"\n assert_raises_regex(ValueError, msg, neigh.kneighbors, None, n_neighbors=1)\n\n # Checks error with inconsistent distance matrix\n dist = np.array([[5., 2., 1.], [-2., 0., 3.], [1., 3., 0.]])\n dist_csr = csr_matrix(dist)\n msg = \"Negative values in data passed to precomputed distance matrix.\"\n assert_raises_regex(ValueError, msg, neigh.kneighbors, dist_csr,\n n_neighbors=1)\n\n\ndef test_precomputed_cross_validation():\n # Ensure array is split correctly\n rng = np.random.RandomState(0)\n X = rng.rand(20, 2)\n D = pairwise_distances(X, metric='euclidean')\n y = rng.randint(3, size=20)\n for Est in (neighbors.KNeighborsClassifier,\n neighbors.RadiusNeighborsClassifier,\n neighbors.KNeighborsRegressor,\n neighbors.RadiusNeighborsRegressor):\n metric_score = cross_val_score(Est(), X, y)\n precomp_score = cross_val_score(Est(metric='precomputed'), D, y)\n assert_array_equal(metric_score, precomp_score)\n\n\ndef test_unsupervised_radius_neighbors(n_samples=20, n_features=5,\n n_query_pts=2, radius=0.5,\n random_state=0):\n # Test unsupervised radius-based query\n rng = np.random.RandomState(random_state)\n\n X = rng.rand(n_samples, n_features)\n\n test = rng.rand(n_query_pts, n_features)\n\n for p in P:\n results = []\n\n for algorithm in ALGORITHMS:\n neigh = neighbors.NearestNeighbors(radius=radius,\n algorithm=algorithm,\n p=p)\n neigh.fit(X)\n\n ind1 = neigh.radius_neighbors(test, return_distance=False)\n\n # sort the results: this is not done automatically for\n # radius searches\n dist, ind = neigh.radius_neighbors(test, return_distance=True)\n for (d, i, i1) in zip(dist, ind, ind1):\n j = d.argsort()\n d[:] = d[j]\n i[:] = i[j]\n i1[:] = i1[j]\n results.append((dist, ind))\n\n assert_array_almost_equal(np.concatenate(list(ind)),\n np.concatenate(list(ind1)))\n\n for i in range(len(results) - 1):\n assert_array_almost_equal(np.concatenate(list(results[i][0])),\n np.concatenate(list(results[i + 1][0]))),\n assert_array_almost_equal(np.concatenate(list(results[i][1])),\n np.concatenate(list(results[i + 1][1])))\n\n\ndef test_kneighbors_classifier(n_samples=40,\n n_features=5,\n n_test_pts=10,\n n_neighbors=5,\n random_state=0):\n # Test k-neighbors classification\n rng = np.random.RandomState(random_state)\n X = 2 * rng.rand(n_samples, n_features) - 1\n y = ((X ** 2).sum(axis=1) < .5).astype(np.int)\n y_str = y.astype(str)\n\n weight_func = _weight_func\n\n for algorithm in ALGORITHMS:\n for weights in ['uniform', 'distance', weight_func]:\n knn = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors,\n weights=weights,\n algorithm=algorithm)\n knn.fit(X, y)\n epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)\n y_pred = knn.predict(X[:n_test_pts] + epsilon)\n assert_array_equal(y_pred, y[:n_test_pts])\n # Test prediction with y_str\n knn.fit(X, y_str)\n y_pred = knn.predict(X[:n_test_pts] + epsilon)\n assert_array_equal(y_pred, y_str[:n_test_pts])\n\n\ndef test_kneighbors_classifier_float_labels(n_samples=40, n_features=5,\n n_test_pts=10, n_neighbors=5,\n random_state=0):\n # Test k-neighbors classification\n rng = np.random.RandomState(random_state)\n X = 2 * rng.rand(n_samples, n_features) - 1\n y = ((X ** 2).sum(axis=1) < .5).astype(np.int)\n\n knn = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors)\n knn.fit(X, y.astype(np.float))\n epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)\n y_pred = knn.predict(X[:n_test_pts] + epsilon)\n assert_array_equal(y_pred, y[:n_test_pts])\n\n\ndef test_kneighbors_classifier_predict_proba():\n # Test KNeighborsClassifier.predict_proba() method\n X = np.array([[0, 2, 0],\n [0, 2, 1],\n [2, 0, 0],\n [2, 2, 0],\n [0, 0, 2],\n [0, 0, 1]])\n y = np.array([4, 4, 5, 5, 1, 1])\n cls = neighbors.KNeighborsClassifier(n_neighbors=3, p=1) # cityblock dist\n cls.fit(X, y)\n y_prob = cls.predict_proba(X)\n real_prob = np.array([[0, 2. / 3, 1. / 3],\n [1. / 3, 2. / 3, 0],\n [1. / 3, 0, 2. / 3],\n [0, 1. / 3, 2. / 3],\n [2. / 3, 1. / 3, 0],\n [2. / 3, 1. / 3, 0]])\n assert_array_equal(real_prob, y_prob)\n # Check that it also works with non integer labels\n cls.fit(X, y.astype(str))\n y_prob = cls.predict_proba(X)\n assert_array_equal(real_prob, y_prob)\n # Check that it works with weights='distance'\n cls = neighbors.KNeighborsClassifier(\n n_neighbors=2, p=1, weights='distance')\n cls.fit(X, y)\n y_prob = cls.predict_proba(np.array([[0, 2, 0], [2, 2, 2]]))\n real_prob = np.array([[0, 1, 0], [0, 0.4, 0.6]])\n assert_array_almost_equal(real_prob, y_prob)\n\n\ndef test_radius_neighbors_classifier(n_samples=40,\n n_features=5,\n n_test_pts=10,\n radius=0.5,\n random_state=0):\n # Test radius-based classification\n rng = np.random.RandomState(random_state)\n X = 2 * rng.rand(n_samples, n_features) - 1\n y = ((X ** 2).sum(axis=1) < .5).astype(np.int)\n y_str = y.astype(str)\n\n weight_func = _weight_func\n\n for algorithm in ALGORITHMS:\n for weights in ['uniform', 'distance', weight_func]:\n neigh = neighbors.RadiusNeighborsClassifier(radius=radius,\n weights=weights,\n algorithm=algorithm)\n neigh.fit(X, y)\n epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)\n y_pred = neigh.predict(X[:n_test_pts] + epsilon)\n assert_array_equal(y_pred, y[:n_test_pts])\n neigh.fit(X, y_str)\n y_pred = neigh.predict(X[:n_test_pts] + epsilon)\n assert_array_equal(y_pred, y_str[:n_test_pts])\n\n\ndef test_radius_neighbors_classifier_when_no_neighbors():\n # Test radius-based classifier when no neighbors found.\n # In this case it should rise an informative exception\n\n X = np.array([[1.0, 1.0], [2.0, 2.0]])\n y = np.array([1, 2])\n radius = 0.1\n\n z1 = np.array([[1.01, 1.01], [2.01, 2.01]]) # no outliers\n z2 = np.array([[1.01, 1.01], [1.4, 1.4]]) # one outlier\n\n weight_func = _weight_func\n\n for outlier_label in [0, -1, None]:\n for algorithm in ALGORITHMS:\n for weights in ['uniform', 'distance', weight_func]:\n rnc = neighbors.RadiusNeighborsClassifier\n clf = rnc(radius=radius, weights=weights, algorithm=algorithm,\n outlier_label=outlier_label)\n clf.fit(X, y)\n assert_array_equal(np.array([1, 2]),\n clf.predict(z1))\n if outlier_label is None:\n assert_raises(ValueError, clf.predict, z2)\n\n\ndef test_radius_neighbors_classifier_outlier_labeling():\n # Test radius-based classifier when no neighbors found and outliers\n # are labeled.\n\n X = np.array([[1.0, 1.0], [2.0, 2.0], [0.99, 0.99],\n [0.98, 0.98], [2.01, 2.01]])\n y = np.array([1, 2, 1, 1, 2])\n radius = 0.1\n\n z1 = np.array([[1.01, 1.01], [2.01, 2.01]]) # no outliers\n z2 = np.array([[1.4, 1.4], [1.01, 1.01], [2.01, 2.01]]) # one outlier\n correct_labels1 = np.array([1, 2])\n correct_labels2 = np.array([-1, 1, 2])\n outlier_proba = np.array([0, 0])\n\n weight_func = _weight_func\n\n for algorithm in ALGORITHMS:\n for weights in ['uniform', 'distance', weight_func]:\n clf = neighbors.RadiusNeighborsClassifier(radius=radius,\n weights=weights,\n algorithm=algorithm,\n outlier_label=-1)\n clf.fit(X, y)\n assert_array_equal(correct_labels1, clf.predict(z1))\n assert_array_equal(correct_labels2, clf.predict(z2))\n assert_array_equal(outlier_proba, clf.predict_proba(z2)[0])\n\n # test outlier_labeling of using predict_proba()\n RNC = neighbors.RadiusNeighborsClassifier\n X = np.array([[0], [1], [2], [3], [4], [5], [6], [7], [8], [9]])\n y = np.array([0, 2, 2, 1, 1, 1, 3, 3, 3, 3])\n\n # test outlier_label scalar verification\n def check_array_exception():\n clf = RNC(radius=1, outlier_label=[[5]])\n clf.fit(X, y)\n assert_raises(TypeError, check_array_exception)\n\n # test invalid outlier_label dtype\n def check_dtype_exception():\n clf = RNC(radius=1, outlier_label='a')\n clf.fit(X, y)\n assert_raises(TypeError, check_dtype_exception)\n\n # test most frequent\n clf = RNC(radius=1, outlier_label='most_frequent')\n clf.fit(X, y)\n proba = clf.predict_proba([[1], [15]])\n assert_array_equal(proba[1, :], [0, 0, 0, 1])\n\n # test manual label in y\n clf = RNC(radius=1, outlier_label=1)\n clf.fit(X, y)\n proba = clf.predict_proba([[1], [15]])\n assert_array_equal(proba[1, :], [0, 1, 0, 0])\n pred = clf.predict([[1], [15]])\n assert_array_equal(pred, [2, 1])\n\n # test manual label out of y warning\n def check_warning():\n clf = RNC(radius=1, outlier_label=4)\n clf.fit(X, y)\n clf.predict_proba([[1], [15]])\n assert_warns(UserWarning, check_warning)\n\n # test multi output same outlier label\n y_multi = [[0, 1], [2, 1], [2, 2], [1, 2], [1, 2],\n [1, 3], [3, 3], [3, 3], [3, 0], [3, 0]]\n clf = RNC(radius=1, outlier_label=1)\n clf.fit(X, y_multi)\n proba = clf.predict_proba([[7], [15]])\n assert_array_equal(proba[1][1, :], [0, 1, 0, 0])\n pred = clf.predict([[7], [15]])\n assert_array_equal(pred[1, :], [1, 1])\n\n # test multi output different outlier label\n y_multi = [[0, 0], [2, 2], [2, 2], [1, 1], [1, 1],\n [1, 1], [3, 3], [3, 3], [3, 3], [3, 3]]\n clf = RNC(radius=1, outlier_label=[0, 1])\n clf.fit(X, y_multi)\n proba = clf.predict_proba([[7], [15]])\n assert_array_equal(proba[0][1, :], [1, 0, 0, 0])\n assert_array_equal(proba[1][1, :], [0, 1, 0, 0])\n pred = clf.predict([[7], [15]])\n assert_array_equal(pred[1, :], [0, 1])\n\n # test inconsistent outlier label list length\n def check_exception():\n clf = RNC(radius=1, outlier_label=[0, 1, 2])\n clf.fit(X, y_multi)\n assert_raises(ValueError, check_exception)\n\n\ndef test_radius_neighbors_classifier_zero_distance():\n # Test radius-based classifier, when distance to a sample is zero.\n\n X = np.array([[1.0, 1.0], [2.0, 2.0]])\n y = np.array([1, 2])\n radius = 0.1\n\n z1 = np.array([[1.01, 1.01], [2.0, 2.0]])\n correct_labels1 = np.array([1, 2])\n\n weight_func = _weight_func\n\n for algorithm in ALGORITHMS:\n for weights in ['uniform', 'distance', weight_func]:\n clf = neighbors.RadiusNeighborsClassifier(radius=radius,\n weights=weights,\n algorithm=algorithm)\n clf.fit(X, y)\n assert_array_equal(correct_labels1, clf.predict(z1))\n\n\ndef test_neighbors_regressors_zero_distance():\n # Test radius-based regressor, when distance to a sample is zero.\n\n X = np.array([[1.0, 1.0], [1.0, 1.0], [2.0, 2.0], [2.5, 2.5]])\n y = np.array([1.0, 1.5, 2.0, 0.0])\n radius = 0.2\n z = np.array([[1.1, 1.1], [2.0, 2.0]])\n\n rnn_correct_labels = np.array([1.25, 2.0])\n\n knn_correct_unif = np.array([1.25, 1.0])\n knn_correct_dist = np.array([1.25, 2.0])\n\n for algorithm in ALGORITHMS:\n # we don't test for weights=_weight_func since user will be expected\n # to handle zero distances themselves in the function.\n for weights in ['uniform', 'distance']:\n rnn = neighbors.RadiusNeighborsRegressor(radius=radius,\n weights=weights,\n algorithm=algorithm)\n rnn.fit(X, y)\n assert_array_almost_equal(rnn_correct_labels, rnn.predict(z))\n\n for weights, corr_labels in zip(['uniform', 'distance'],\n [knn_correct_unif, knn_correct_dist]):\n knn = neighbors.KNeighborsRegressor(n_neighbors=2,\n weights=weights,\n algorithm=algorithm)\n knn.fit(X, y)\n assert_array_almost_equal(corr_labels, knn.predict(z))\n\n\ndef test_radius_neighbors_boundary_handling():\n \"\"\"Test whether points lying on boundary are handled consistently\n\n Also ensures that even with only one query point, an object array\n is returned rather than a 2d array.\n \"\"\"\n\n X = np.array([[1.5], [3.0], [3.01]])\n radius = 3.0\n\n for algorithm in ALGORITHMS:\n nbrs = neighbors.NearestNeighbors(radius=radius,\n algorithm=algorithm).fit(X)\n results = nbrs.radius_neighbors([[0.0]], return_distance=False)\n assert results.shape == (1,)\n assert results.dtype == object\n assert_array_equal(results[0], [0, 1])\n\n\ndef test_RadiusNeighborsClassifier_multioutput():\n # Test k-NN classifier on multioutput data\n rng = check_random_state(0)\n n_features = 2\n n_samples = 40\n n_output = 3\n\n X = rng.rand(n_samples, n_features)\n y = rng.randint(0, 3, (n_samples, n_output))\n\n X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)\n\n weights = [None, 'uniform', 'distance', _weight_func]\n\n for algorithm, weights in product(ALGORITHMS, weights):\n # Stack single output prediction\n y_pred_so = []\n for o in range(n_output):\n rnn = neighbors.RadiusNeighborsClassifier(weights=weights,\n algorithm=algorithm)\n rnn.fit(X_train, y_train[:, o])\n y_pred_so.append(rnn.predict(X_test))\n\n y_pred_so = np.vstack(y_pred_so).T\n assert y_pred_so.shape == y_test.shape\n\n # Multioutput prediction\n rnn_mo = neighbors.RadiusNeighborsClassifier(weights=weights,\n algorithm=algorithm)\n rnn_mo.fit(X_train, y_train)\n y_pred_mo = rnn_mo.predict(X_test)\n\n assert y_pred_mo.shape == y_test.shape\n assert_array_almost_equal(y_pred_mo, y_pred_so)\n\n\ndef test_kneighbors_classifier_sparse(n_samples=40,\n n_features=5,\n n_test_pts=10,\n n_neighbors=5,\n random_state=0):\n # Test k-NN classifier on sparse matrices\n # Like the above, but with various types of sparse matrices\n rng = np.random.RandomState(random_state)\n X = 2 * rng.rand(n_samples, n_features) - 1\n X *= X > .2\n y = ((X ** 2).sum(axis=1) < .5).astype(np.int)\n\n for sparsemat in SPARSE_TYPES:\n knn = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors,\n algorithm='auto')\n knn.fit(sparsemat(X), y)\n epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)\n for sparsev in SPARSE_TYPES + (np.asarray,):\n X_eps = sparsev(X[:n_test_pts] + epsilon)\n y_pred = knn.predict(X_eps)\n assert_array_equal(y_pred, y[:n_test_pts])\n\n\ndef test_KNeighborsClassifier_multioutput():\n # Test k-NN classifier on multioutput data\n rng = check_random_state(0)\n n_features = 5\n n_samples = 50\n n_output = 3\n\n X = rng.rand(n_samples, n_features)\n y = rng.randint(0, 3, (n_samples, n_output))\n\n X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)\n\n weights = [None, 'uniform', 'distance', _weight_func]\n\n for algorithm, weights in product(ALGORITHMS, weights):\n # Stack single output prediction\n y_pred_so = []\n y_pred_proba_so = []\n for o in range(n_output):\n knn = neighbors.KNeighborsClassifier(weights=weights,\n algorithm=algorithm)\n knn.fit(X_train, y_train[:, o])\n y_pred_so.append(knn.predict(X_test))\n y_pred_proba_so.append(knn.predict_proba(X_test))\n\n y_pred_so = np.vstack(y_pred_so).T\n assert y_pred_so.shape == y_test.shape\n assert len(y_pred_proba_so) == n_output\n\n # Multioutput prediction\n knn_mo = neighbors.KNeighborsClassifier(weights=weights,\n algorithm=algorithm)\n knn_mo.fit(X_train, y_train)\n y_pred_mo = knn_mo.predict(X_test)\n\n assert y_pred_mo.shape == y_test.shape\n assert_array_almost_equal(y_pred_mo, y_pred_so)\n\n # Check proba\n y_pred_proba_mo = knn_mo.predict_proba(X_test)\n assert len(y_pred_proba_mo) == n_output\n\n for proba_mo, proba_so in zip(y_pred_proba_mo, y_pred_proba_so):\n assert_array_almost_equal(proba_mo, proba_so)\n\n\ndef test_kneighbors_regressor(n_samples=40,\n n_features=5,\n n_test_pts=10,\n n_neighbors=3,\n random_state=0):\n # Test k-neighbors regression\n rng = np.random.RandomState(random_state)\n X = 2 * rng.rand(n_samples, n_features) - 1\n y = np.sqrt((X ** 2).sum(1))\n y /= y.max()\n\n y_target = y[:n_test_pts]\n\n weight_func = _weight_func\n\n for algorithm in ALGORITHMS:\n for weights in ['uniform', 'distance', weight_func]:\n knn = neighbors.KNeighborsRegressor(n_neighbors=n_neighbors,\n weights=weights,\n algorithm=algorithm)\n knn.fit(X, y)\n epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)\n y_pred = knn.predict(X[:n_test_pts] + epsilon)\n assert np.all(abs(y_pred - y_target) < 0.3)\n\n\ndef test_KNeighborsRegressor_multioutput_uniform_weight():\n # Test k-neighbors in multi-output regression with uniform weight\n rng = check_random_state(0)\n n_features = 5\n n_samples = 40\n n_output = 4\n\n X = rng.rand(n_samples, n_features)\n y = rng.rand(n_samples, n_output)\n\n X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)\n for algorithm, weights in product(ALGORITHMS, [None, 'uniform']):\n knn = neighbors.KNeighborsRegressor(weights=weights,\n algorithm=algorithm)\n knn.fit(X_train, y_train)\n\n neigh_idx = knn.kneighbors(X_test, return_distance=False)\n y_pred_idx = np.array([np.mean(y_train[idx], axis=0)\n for idx in neigh_idx])\n\n y_pred = knn.predict(X_test)\n\n assert y_pred.shape == y_test.shape\n assert y_pred_idx.shape == y_test.shape\n assert_array_almost_equal(y_pred, y_pred_idx)\n\n\ndef test_kneighbors_regressor_multioutput(n_samples=40,\n n_features=5,\n n_test_pts=10,\n n_neighbors=3,\n random_state=0):\n # Test k-neighbors in multi-output regression\n rng = np.random.RandomState(random_state)\n X = 2 * rng.rand(n_samples, n_features) - 1\n y = np.sqrt((X ** 2).sum(1))\n y /= y.max()\n y = np.vstack([y, y]).T\n\n y_target = y[:n_test_pts]\n\n weights = ['uniform', 'distance', _weight_func]\n for algorithm, weights in product(ALGORITHMS, weights):\n knn = neighbors.KNeighborsRegressor(n_neighbors=n_neighbors,\n weights=weights,\n algorithm=algorithm)\n knn.fit(X, y)\n epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)\n y_pred = knn.predict(X[:n_test_pts] + epsilon)\n assert y_pred.shape == y_target.shape\n\n assert np.all(np.abs(y_pred - y_target) < 0.3)\n\n\ndef test_radius_neighbors_regressor(n_samples=40,\n n_features=3,\n n_test_pts=10,\n radius=0.5,\n random_state=0):\n # Test radius-based neighbors regression\n rng = np.random.RandomState(random_state)\n X = 2 * rng.rand(n_samples, n_features) - 1\n y = np.sqrt((X ** 2).sum(1))\n y /= y.max()\n\n y_target = y[:n_test_pts]\n\n weight_func = _weight_func\n\n for algorithm in ALGORITHMS:\n for weights in ['uniform', 'distance', weight_func]:\n neigh = neighbors.RadiusNeighborsRegressor(radius=radius,\n weights=weights,\n algorithm=algorithm)\n neigh.fit(X, y)\n epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)\n y_pred = neigh.predict(X[:n_test_pts] + epsilon)\n assert np.all(abs(y_pred - y_target) < radius / 2)\n\n # test that nan is returned when no nearby observations\n for weights in ['uniform', 'distance']:\n neigh = neighbors.RadiusNeighborsRegressor(radius=radius,\n weights=weights,\n algorithm='auto')\n neigh.fit(X, y)\n X_test_nan = np.full((1, n_features), -1.)\n empty_warning_msg = (\"One or more samples have no neighbors \"\n \"within specified radius; predicting NaN.\")\n pred = assert_warns_message(UserWarning,\n empty_warning_msg,\n neigh.predict,\n X_test_nan)\n assert np.all(np.isnan(pred))\n\n\ndef test_RadiusNeighborsRegressor_multioutput_with_uniform_weight():\n # Test radius neighbors in multi-output regression (uniform weight)\n\n rng = check_random_state(0)\n n_features = 5\n n_samples = 40\n n_output = 4\n\n X = rng.rand(n_samples, n_features)\n y = rng.rand(n_samples, n_output)\n X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)\n\n for algorithm, weights in product(ALGORITHMS, [None, 'uniform']):\n\n rnn = neighbors. RadiusNeighborsRegressor(weights=weights,\n algorithm=algorithm)\n rnn.fit(X_train, y_train)\n\n neigh_idx = rnn.radius_neighbors(X_test, return_distance=False)\n y_pred_idx = np.array([np.mean(y_train[idx], axis=0)\n for idx in neigh_idx])\n\n y_pred_idx = np.array(y_pred_idx)\n y_pred = rnn.predict(X_test)\n\n assert y_pred_idx.shape == y_test.shape\n assert y_pred.shape == y_test.shape\n assert_array_almost_equal(y_pred, y_pred_idx)\n\n\ndef test_RadiusNeighborsRegressor_multioutput(n_samples=40,\n n_features=5,\n n_test_pts=10,\n n_neighbors=3,\n random_state=0):\n # Test k-neighbors in multi-output regression with various weight\n rng = np.random.RandomState(random_state)\n X = 2 * rng.rand(n_samples, n_features) - 1\n y = np.sqrt((X ** 2).sum(1))\n y /= y.max()\n y = np.vstack([y, y]).T\n\n y_target = y[:n_test_pts]\n weights = ['uniform', 'distance', _weight_func]\n\n for algorithm, weights in product(ALGORITHMS, weights):\n rnn = neighbors.RadiusNeighborsRegressor(n_neighbors=n_neighbors,\n weights=weights,\n algorithm=algorithm)\n rnn.fit(X, y)\n epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)\n y_pred = rnn.predict(X[:n_test_pts] + epsilon)\n\n assert y_pred.shape == y_target.shape\n assert np.all(np.abs(y_pred - y_target) < 0.3)\n\n\n@ignore_warnings(category=EfficiencyWarning)\ndef test_kneighbors_regressor_sparse(n_samples=40,\n n_features=5,\n n_test_pts=10,\n n_neighbors=5,\n random_state=0):\n # Test radius-based regression on sparse matrices\n # Like the above, but with various types of sparse matrices\n rng = np.random.RandomState(random_state)\n X = 2 * rng.rand(n_samples, n_features) - 1\n y = ((X ** 2).sum(axis=1) < .25).astype(np.int)\n\n for sparsemat in SPARSE_TYPES:\n knn = neighbors.KNeighborsRegressor(n_neighbors=n_neighbors,\n algorithm='auto')\n knn.fit(sparsemat(X), y)\n\n knn_pre = neighbors.KNeighborsRegressor(n_neighbors=n_neighbors,\n metric='precomputed')\n knn_pre.fit(pairwise_distances(X, metric='euclidean'), y)\n\n for sparsev in SPARSE_OR_DENSE:\n X2 = sparsev(X)\n assert np.mean(knn.predict(X2).round() == y) > 0.95\n\n X2_pre = sparsev(pairwise_distances(X, metric='euclidean'))\n assert np.mean(knn_pre.predict(X2_pre).round() == y) > 0.95\n\n\ndef test_neighbors_iris():\n # Sanity checks on the iris dataset\n # Puts three points of each label in the plane and performs a\n # nearest neighbor query on points near the decision boundary.\n\n for algorithm in ALGORITHMS:\n clf = neighbors.KNeighborsClassifier(n_neighbors=1,\n algorithm=algorithm)\n clf.fit(iris.data, iris.target)\n assert_array_equal(clf.predict(iris.data), iris.target)\n\n clf.set_params(n_neighbors=9, algorithm=algorithm)\n clf.fit(iris.data, iris.target)\n assert np.mean(clf.predict(iris.data) == iris.target) > 0.95\n\n rgs = neighbors.KNeighborsRegressor(n_neighbors=5, algorithm=algorithm)\n rgs.fit(iris.data, iris.target)\n assert (np.mean(rgs.predict(iris.data).round() == iris.target) >\n 0.95)\n\n\ndef test_neighbors_digits():\n # Sanity check on the digits dataset\n # the 'brute' algorithm has been observed to fail if the input\n # dtype is uint8 due to overflow in distance calculations.\n\n X = digits.data.astype('uint8')\n Y = digits.target\n (n_samples, n_features) = X.shape\n train_test_boundary = int(n_samples * 0.8)\n train = np.arange(0, train_test_boundary)\n test = np.arange(train_test_boundary, n_samples)\n (X_train, Y_train, X_test, Y_test) = X[train], Y[train], X[test], Y[test]\n\n clf = neighbors.KNeighborsClassifier(n_neighbors=1, algorithm='brute')\n score_uint8 = clf.fit(X_train, Y_train).score(X_test, Y_test)\n score_float = clf.fit(X_train.astype(float, copy=False), Y_train).score(\n X_test.astype(float, copy=False), Y_test)\n assert score_uint8 == score_float\n\n\ndef test_kneighbors_graph():\n # Test kneighbors_graph to build the k-Nearest Neighbor graph.\n X = np.array([[0, 1], [1.01, 1.], [2, 0]])\n\n # n_neighbors = 1\n A = neighbors.kneighbors_graph(X, 1, mode='connectivity',\n include_self=True)\n assert_array_equal(A.toarray(), np.eye(A.shape[0]))\n\n A = neighbors.kneighbors_graph(X, 1, mode='distance')\n assert_array_almost_equal(\n A.toarray(),\n [[0.00, 1.01, 0.],\n [1.01, 0., 0.],\n [0.00, 1.40716026, 0.]])\n\n # n_neighbors = 2\n A = neighbors.kneighbors_graph(X, 2, mode='connectivity',\n include_self=True)\n assert_array_equal(\n A.toarray(),\n [[1., 1., 0.],\n [1., 1., 0.],\n [0., 1., 1.]])\n\n A = neighbors.kneighbors_graph(X, 2, mode='distance')\n assert_array_almost_equal(\n A.toarray(),\n [[0., 1.01, 2.23606798],\n [1.01, 0., 1.40716026],\n [2.23606798, 1.40716026, 0.]])\n\n # n_neighbors = 3\n A = neighbors.kneighbors_graph(X, 3, mode='connectivity',\n include_self=True)\n assert_array_almost_equal(\n A.toarray(),\n [[1, 1, 1], [1, 1, 1], [1, 1, 1]])\n\n\ndef test_kneighbors_graph_sparse(seed=36):\n # Test kneighbors_graph to build the k-Nearest Neighbor graph\n # for sparse input.\n rng = np.random.RandomState(seed)\n X = rng.randn(10, 10)\n Xcsr = csr_matrix(X)\n\n for n_neighbors in [1, 2, 3]:\n for mode in [\"connectivity\", \"distance\"]:\n assert_array_almost_equal(\n neighbors.kneighbors_graph(X,\n n_neighbors,\n mode=mode).toarray(),\n neighbors.kneighbors_graph(Xcsr,\n n_neighbors,\n mode=mode).toarray())\n\n\ndef test_radius_neighbors_graph():\n # Test radius_neighbors_graph to build the Nearest Neighbor graph.\n X = np.array([[0, 1], [1.01, 1.], [2, 0]])\n\n A = neighbors.radius_neighbors_graph(X, 1.5, mode='connectivity',\n include_self=True)\n assert_array_equal(\n A.toarray(),\n [[1., 1., 0.],\n [1., 1., 1.],\n [0., 1., 1.]])\n\n A = neighbors.radius_neighbors_graph(X, 1.5, mode='distance')\n assert_array_almost_equal(\n A.toarray(),\n [[0., 1.01, 0.],\n [1.01, 0., 1.40716026],\n [0., 1.40716026, 0.]])\n\n\ndef test_radius_neighbors_graph_sparse(seed=36):\n # Test radius_neighbors_graph to build the Nearest Neighbor graph\n # for sparse input.\n rng = np.random.RandomState(seed)\n X = rng.randn(10, 10)\n Xcsr = csr_matrix(X)\n\n for n_neighbors in [1, 2, 3]:\n for mode in [\"connectivity\", \"distance\"]:\n assert_array_almost_equal(\n neighbors.radius_neighbors_graph(X,\n n_neighbors,\n mode=mode).toarray(),\n neighbors.radius_neighbors_graph(Xcsr,\n n_neighbors,\n mode=mode).toarray())\n\n\ndef test_neighbors_badargs():\n # Test bad argument values: these should all raise ValueErrors\n assert_raises(ValueError,\n neighbors.NearestNeighbors,\n algorithm='blah')\n\n X = rng.random_sample((10, 2))\n Xsparse = csr_matrix(X)\n X3 = rng.random_sample((10, 3))\n y = np.ones(10)\n\n for cls in (neighbors.KNeighborsClassifier,\n neighbors.RadiusNeighborsClassifier,\n neighbors.KNeighborsRegressor,\n neighbors.RadiusNeighborsRegressor):\n assert_raises(ValueError,\n cls,\n weights='blah')\n assert_raises(ValueError,\n cls, p=-1)\n assert_raises(ValueError,\n cls, algorithm='blah')\n\n nbrs = cls(algorithm='ball_tree', metric='haversine')\n assert_raises(ValueError,\n nbrs.predict,\n X)\n assert_raises(ValueError,\n ignore_warnings(nbrs.fit),\n Xsparse, y)\n\n nbrs = cls(metric='haversine', algorithm='brute')\n nbrs.fit(X3, y)\n assert_raise_message(ValueError,\n \"Haversine distance only valid in 2 dimensions\",\n nbrs.predict,\n X3)\n\n nbrs = cls()\n assert_raises(ValueError,\n nbrs.fit,\n np.ones((0, 2)), np.ones(0))\n assert_raises(ValueError,\n nbrs.fit,\n X[:, :, None], y)\n nbrs.fit(X, y)\n assert_raises(ValueError,\n nbrs.predict,\n [[]])\n if (issubclass(cls, neighbors.KNeighborsClassifier) or\n issubclass(cls, neighbors.KNeighborsRegressor)):\n nbrs = cls(n_neighbors=-1)\n assert_raises(ValueError, nbrs.fit, X, y)\n\n nbrs = neighbors.NearestNeighbors().fit(X)\n\n assert_raises(ValueError, nbrs.kneighbors_graph, X, mode='blah')\n assert_raises(ValueError, nbrs.radius_neighbors_graph, X, mode='blah')\n\n\ndef test_neighbors_metrics(n_samples=20, n_features=3,\n n_query_pts=2, n_neighbors=5):\n # Test computing the neighbors for various metrics\n # create a symmetric matrix\n V = rng.rand(n_features, n_features)\n VI = np.dot(V, V.T)\n\n metrics = [('euclidean', {}),\n ('manhattan', {}),\n ('minkowski', dict(p=1)),\n ('minkowski', dict(p=2)),\n ('minkowski', dict(p=3)),\n ('minkowski', dict(p=np.inf)),\n ('chebyshev', {}),\n ('seuclidean', dict(V=rng.rand(n_features))),\n ('wminkowski', dict(p=3, w=rng.rand(n_features))),\n ('mahalanobis', dict(VI=VI)),\n ('haversine', {})]\n algorithms = ['brute', 'ball_tree', 'kd_tree']\n X = rng.rand(n_samples, n_features)\n\n test = rng.rand(n_query_pts, n_features)\n\n for metric, metric_params in metrics:\n results = {}\n p = metric_params.pop('p', 2)\n for algorithm in algorithms:\n # KD tree doesn't support all metrics\n if (algorithm == 'kd_tree' and\n metric not in neighbors.KDTree.valid_metrics):\n assert_raises(ValueError,\n neighbors.NearestNeighbors,\n algorithm=algorithm,\n metric=metric, metric_params=metric_params)\n continue\n neigh = neighbors.NearestNeighbors(n_neighbors=n_neighbors,\n algorithm=algorithm,\n metric=metric, p=p,\n metric_params=metric_params)\n\n # Haversine distance only accepts 2D data\n feature_sl = (slice(None, 2)\n if metric == 'haversine' else slice(None))\n\n neigh.fit(X[:, feature_sl])\n results[algorithm] = neigh.kneighbors(test[:, feature_sl],\n return_distance=True)\n\n assert_array_almost_equal(results['brute'][0], results['ball_tree'][0])\n assert_array_almost_equal(results['brute'][1], results['ball_tree'][1])\n if 'kd_tree' in results:\n assert_array_almost_equal(results['brute'][0],\n results['kd_tree'][0])\n assert_array_almost_equal(results['brute'][1],\n results['kd_tree'][1])\n\n\ndef test_callable_metric():\n\n def custom_metric(x1, x2):\n return np.sqrt(np.sum(x1 ** 2 + x2 ** 2))\n\n X = np.random.RandomState(42).rand(20, 2)\n nbrs1 = neighbors.NearestNeighbors(3, algorithm='auto',\n metric=custom_metric)\n nbrs2 = neighbors.NearestNeighbors(3, algorithm='brute',\n metric=custom_metric)\n\n nbrs1.fit(X)\n nbrs2.fit(X)\n\n dist1, ind1 = nbrs1.kneighbors(X)\n dist2, ind2 = nbrs2.kneighbors(X)\n\n assert_array_almost_equal(dist1, dist2)\n\n\ndef test_valid_brute_metric_for_auto_algorithm():\n X = rng.rand(12, 12)\n Xcsr = csr_matrix(X)\n\n # check that there is a metric that is valid for brute\n # but not ball_tree (so we actually test something)\n assert \"cosine\" in VALID_METRICS['brute']\n assert \"cosine\" not in VALID_METRICS['ball_tree']\n\n # Metric which don't required any additional parameter\n require_params = ['mahalanobis', 'wminkowski', 'seuclidean']\n for metric in VALID_METRICS['brute']:\n if metric != 'precomputed' and metric not in require_params:\n nn = neighbors.NearestNeighbors(n_neighbors=3,\n algorithm='auto',\n metric=metric)\n if metric != 'haversine':\n nn.fit(X)\n nn.kneighbors(X)\n else:\n nn.fit(X[:, :2])\n nn.kneighbors(X[:, :2])\n elif metric == 'precomputed':\n X_precomputed = rng.random_sample((10, 4))\n Y_precomputed = rng.random_sample((3, 4))\n DXX = metrics.pairwise_distances(X_precomputed, metric='euclidean')\n DYX = metrics.pairwise_distances(Y_precomputed, X_precomputed,\n metric='euclidean')\n nb_p = neighbors.NearestNeighbors(n_neighbors=3)\n nb_p.fit(DXX)\n nb_p.kneighbors(DYX)\n\n for metric in VALID_METRICS_SPARSE['brute']:\n if metric != 'precomputed' and metric not in require_params:\n nn = neighbors.NearestNeighbors(n_neighbors=3, algorithm='auto',\n metric=metric).fit(Xcsr)\n nn.kneighbors(Xcsr)\n\n # Metric with parameter\n VI = np.dot(X, X.T)\n list_metrics = [('seuclidean', dict(V=rng.rand(12))),\n ('wminkowski', dict(w=rng.rand(12))),\n ('mahalanobis', dict(VI=VI))]\n for metric, params in list_metrics:\n nn = neighbors.NearestNeighbors(n_neighbors=3, algorithm='auto',\n metric=metric,\n metric_params=params).fit(X)\n nn.kneighbors(X)\n\n\ndef test_metric_params_interface():\n assert_warns(SyntaxWarning, neighbors.KNeighborsClassifier,\n metric_params={'p': 3})\n\n\ndef test_predict_sparse_ball_kd_tree():\n rng = np.random.RandomState(0)\n X = rng.rand(5, 5)\n y = rng.randint(0, 2, 5)\n nbrs1 = neighbors.KNeighborsClassifier(1, algorithm='kd_tree')\n nbrs2 = neighbors.KNeighborsRegressor(1, algorithm='ball_tree')\n for model in [nbrs1, nbrs2]:\n model.fit(X, y)\n assert_raises(ValueError, model.predict, csr_matrix(X))\n\n\ndef test_non_euclidean_kneighbors():\n rng = np.random.RandomState(0)\n X = rng.rand(5, 5)\n\n # Find a reasonable radius.\n dist_array = pairwise_distances(X).flatten()\n np.sort(dist_array)\n radius = dist_array[15]\n\n # Test kneighbors_graph\n for metric in ['manhattan', 'chebyshev']:\n nbrs_graph = neighbors.kneighbors_graph(\n X, 3, metric=metric, mode='connectivity',\n include_self=True).toarray()\n nbrs1 = neighbors.NearestNeighbors(3, metric=metric).fit(X)\n assert_array_equal(nbrs_graph, nbrs1.kneighbors_graph(X).toarray())\n\n # Test radiusneighbors_graph\n for metric in ['manhattan', 'chebyshev']:\n nbrs_graph = neighbors.radius_neighbors_graph(\n X, radius, metric=metric, mode='connectivity',\n include_self=True).toarray()\n nbrs1 = neighbors.NearestNeighbors(metric=metric, radius=radius).fit(X)\n assert_array_equal(nbrs_graph, nbrs1.radius_neighbors_graph(X).A)\n\n # Raise error when wrong parameters are supplied,\n X_nbrs = neighbors.NearestNeighbors(3, metric='manhattan')\n X_nbrs.fit(X)\n assert_raises(ValueError, neighbors.kneighbors_graph, X_nbrs, 3,\n metric='euclidean')\n X_nbrs = neighbors.NearestNeighbors(radius=radius, metric='manhattan')\n X_nbrs.fit(X)\n assert_raises(ValueError, neighbors.radius_neighbors_graph, X_nbrs,\n radius, metric='euclidean')\n\n\ndef check_object_arrays(nparray, list_check):\n for ind, ele in enumerate(nparray):\n assert_array_equal(ele, list_check[ind])\n\n\ndef test_k_and_radius_neighbors_train_is_not_query():\n # Test kneighbors et.al when query is not training data\n\n for algorithm in ALGORITHMS:\n\n nn = neighbors.NearestNeighbors(n_neighbors=1, algorithm=algorithm)\n\n X = [[0], [1]]\n nn.fit(X)\n test_data = [[2], [1]]\n\n # Test neighbors.\n dist, ind = nn.kneighbors(test_data)\n assert_array_equal(dist, [[1], [0]])\n assert_array_equal(ind, [[1], [1]])\n dist, ind = nn.radius_neighbors([[2], [1]], radius=1.5)\n check_object_arrays(dist, [[1], [1, 0]])\n check_object_arrays(ind, [[1], [0, 1]])\n\n # Test the graph variants.\n assert_array_equal(\n nn.kneighbors_graph(test_data).A, [[0., 1.], [0., 1.]])\n assert_array_equal(\n nn.kneighbors_graph([[2], [1]], mode='distance').A,\n np.array([[0., 1.], [0., 0.]]))\n rng = nn.radius_neighbors_graph([[2], [1]], radius=1.5)\n assert_array_equal(rng.A, [[0, 1], [1, 1]])\n\n\ndef test_k_and_radius_neighbors_X_None():\n # Test kneighbors et.al when query is None\n for algorithm in ALGORITHMS:\n\n nn = neighbors.NearestNeighbors(n_neighbors=1, algorithm=algorithm)\n\n X = [[0], [1]]\n nn.fit(X)\n\n dist, ind = nn.kneighbors()\n assert_array_equal(dist, [[1], [1]])\n assert_array_equal(ind, [[1], [0]])\n dist, ind = nn.radius_neighbors(None, radius=1.5)\n check_object_arrays(dist, [[1], [1]])\n check_object_arrays(ind, [[1], [0]])\n\n # Test the graph variants.\n rng = nn.radius_neighbors_graph(None, radius=1.5)\n kng = nn.kneighbors_graph(None)\n for graph in [rng, kng]:\n assert_array_equal(graph.A, [[0, 1], [1, 0]])\n assert_array_equal(graph.data, [1, 1])\n assert_array_equal(graph.indices, [1, 0])\n\n X = [[0, 1], [0, 1], [1, 1]]\n nn = neighbors.NearestNeighbors(n_neighbors=2, algorithm=algorithm)\n nn.fit(X)\n assert_array_equal(\n nn.kneighbors_graph().A,\n np.array([[0., 1., 1.], [1., 0., 1.], [1., 1., 0]]))\n\n\ndef test_k_and_radius_neighbors_duplicates():\n # Test behavior of kneighbors when duplicates are present in query\n\n for algorithm in ALGORITHMS:\n nn = neighbors.NearestNeighbors(n_neighbors=1, algorithm=algorithm)\n nn.fit([[0], [1]])\n\n # Do not do anything special to duplicates.\n kng = nn.kneighbors_graph([[0], [1]], mode='distance')\n assert_array_equal(\n kng.A,\n np.array([[0., 0.], [0., 0.]]))\n assert_array_equal(kng.data, [0., 0.])\n assert_array_equal(kng.indices, [0, 1])\n\n dist, ind = nn.radius_neighbors([[0], [1]], radius=1.5)\n check_object_arrays(dist, [[0, 1], [1, 0]])\n check_object_arrays(ind, [[0, 1], [0, 1]])\n\n rng = nn.radius_neighbors_graph([[0], [1]], radius=1.5)\n assert_array_equal(rng.A, np.ones((2, 2)))\n\n rng = nn.radius_neighbors_graph([[0], [1]], radius=1.5,\n mode='distance')\n rng.sort_indices()\n assert_array_equal(rng.A, [[0, 1], [1, 0]])\n assert_array_equal(rng.indices, [0, 1, 0, 1])\n assert_array_equal(rng.data, [0, 1, 1, 0])\n\n # Mask the first duplicates when n_duplicates > n_neighbors.\n X = np.ones((3, 1))\n nn = neighbors.NearestNeighbors(n_neighbors=1)\n nn.fit(X)\n dist, ind = nn.kneighbors()\n assert_array_equal(dist, np.zeros((3, 1)))\n assert_array_equal(ind, [[1], [0], [1]])\n\n # Test that zeros are explicitly marked in kneighbors_graph.\n kng = nn.kneighbors_graph(mode='distance')\n assert_array_equal(\n kng.A, np.zeros((3, 3)))\n assert_array_equal(kng.data, np.zeros(3))\n assert_array_equal(kng.indices, [1., 0., 1.])\n assert_array_equal(\n nn.kneighbors_graph().A,\n np.array([[0., 1., 0.], [1., 0., 0.], [0., 1., 0.]]))\n\n\ndef test_include_self_neighbors_graph():\n # Test include_self parameter in neighbors_graph\n X = [[2, 3], [4, 5]]\n kng = neighbors.kneighbors_graph(X, 1, include_self=True).A\n kng_not_self = neighbors.kneighbors_graph(X, 1, include_self=False).A\n assert_array_equal(kng, [[1., 0.], [0., 1.]])\n assert_array_equal(kng_not_self, [[0., 1.], [1., 0.]])\n\n rng = neighbors.radius_neighbors_graph(X, 5.0, include_self=True).A\n rng_not_self = neighbors.radius_neighbors_graph(\n X, 5.0, include_self=False).A\n assert_array_equal(rng, [[1., 1.], [1., 1.]])\n assert_array_equal(rng_not_self, [[0., 1.], [1., 0.]])\n\n\[email protected]('algorithm', ALGORITHMS)\ndef test_same_knn_parallel(algorithm):\n X, y = datasets.make_classification(n_samples=30, n_features=5,\n n_redundant=0, random_state=0)\n X_train, X_test, y_train, y_test = train_test_split(X, y)\n\n clf = neighbors.KNeighborsClassifier(n_neighbors=3,\n algorithm=algorithm)\n clf.fit(X_train, y_train)\n y = clf.predict(X_test)\n dist, ind = clf.kneighbors(X_test)\n graph = clf.kneighbors_graph(X_test, mode='distance').toarray()\n\n clf.set_params(n_jobs=3)\n clf.fit(X_train, y_train)\n y_parallel = clf.predict(X_test)\n dist_parallel, ind_parallel = clf.kneighbors(X_test)\n graph_parallel = \\\n clf.kneighbors_graph(X_test, mode='distance').toarray()\n\n assert_array_equal(y, y_parallel)\n assert_array_almost_equal(dist, dist_parallel)\n assert_array_equal(ind, ind_parallel)\n assert_array_almost_equal(graph, graph_parallel)\n\n\[email protected]('algorithm', ALGORITHMS)\ndef test_same_radius_neighbors_parallel(algorithm):\n X, y = datasets.make_classification(n_samples=30, n_features=5,\n n_redundant=0, random_state=0)\n X_train, X_test, y_train, y_test = train_test_split(X, y)\n\n clf = neighbors.RadiusNeighborsClassifier(radius=10,\n algorithm=algorithm)\n clf.fit(X_train, y_train)\n y = clf.predict(X_test)\n dist, ind = clf.radius_neighbors(X_test)\n graph = clf.radius_neighbors_graph(X_test, mode='distance').toarray()\n\n clf.set_params(n_jobs=3)\n clf.fit(X_train, y_train)\n y_parallel = clf.predict(X_test)\n dist_parallel, ind_parallel = clf.radius_neighbors(X_test)\n graph_parallel = \\\n clf.radius_neighbors_graph(X_test, mode='distance').toarray()\n\n assert_array_equal(y, y_parallel)\n for i in range(len(dist)):\n assert_array_almost_equal(dist[i], dist_parallel[i])\n assert_array_equal(ind[i], ind_parallel[i])\n assert_array_almost_equal(graph, graph_parallel)\n\n\[email protected]('backend', JOBLIB_BACKENDS)\[email protected]('algorithm', ALGORITHMS)\ndef test_knn_forcing_backend(backend, algorithm):\n # Non-regression test which ensure the knn methods are properly working\n # even when forcing the global joblib backend.\n with joblib.parallel_backend(backend):\n X, y = datasets.make_classification(n_samples=30, n_features=5,\n n_redundant=0, random_state=0)\n X_train, X_test, y_train, y_test = train_test_split(X, y)\n\n clf = neighbors.KNeighborsClassifier(n_neighbors=3,\n algorithm=algorithm,\n n_jobs=3)\n clf.fit(X_train, y_train)\n clf.predict(X_test)\n clf.kneighbors(X_test)\n clf.kneighbors_graph(X_test, mode='distance').toarray()\n\n\ndef test_dtype_convert():\n classifier = neighbors.KNeighborsClassifier(n_neighbors=1)\n CLASSES = 15\n X = np.eye(CLASSES)\n y = [ch for ch in 'ABCDEFGHIJKLMNOPQRSTU'[:CLASSES]]\n\n result = classifier.fit(X, y).predict(X)\n assert_array_equal(result, y)\n\n\ndef test_sparse_metric_callable():\n def sparse_metric(x, y): # Metric accepting sparse matrix input (only)\n assert issparse(x) and issparse(y)\n return x.dot(y.T).A.item()\n\n X = csr_matrix([ # Population matrix\n [1, 1, 1, 1, 1],\n [1, 0, 1, 0, 1],\n [0, 0, 1, 0, 0]\n ])\n\n Y = csr_matrix([ # Query matrix\n [1, 1, 0, 1, 1],\n [1, 0, 0, 0, 1]\n ])\n\n nn = neighbors.NearestNeighbors(algorithm='brute', n_neighbors=2,\n metric=sparse_metric).fit(X)\n N = nn.kneighbors(Y, return_distance=False)\n\n # GS indices of nearest neighbours in `X` for `sparse_metric`\n gold_standard_nn = np.array([\n [2, 1],\n [2, 1]\n ])\n\n assert_array_equal(N, gold_standard_nn)\n\n\n# ignore conversion to boolean in pairwise_distances\n@ignore_warnings(category=DataConversionWarning)\ndef test_pairwise_boolean_distance():\n # Non-regression test for #4523\n # 'brute': uses scipy.spatial.distance through pairwise_distances\n # 'ball_tree': uses sklearn.neighbors._dist_metrics\n rng = np.random.RandomState(0)\n X = rng.uniform(size=(6, 5))\n NN = neighbors.NearestNeighbors\n\n nn1 = NN(metric=\"jaccard\", algorithm='brute').fit(X)\n nn2 = NN(metric=\"jaccard\", algorithm='ball_tree').fit(X)\n assert_array_equal(nn1.kneighbors(X)[0], nn2.kneighbors(X)[0])\n\n\ndef test_radius_neighbors_predict_proba():\n for seed in range(5):\n X, y = datasets.make_classification(n_samples=50, n_features=5,\n n_informative=3, n_redundant=0,\n n_classes=3, random_state=seed)\n X_tr, X_te, y_tr, y_te = train_test_split(X, y, random_state=0)\n outlier_label = int(2 - seed)\n clf = neighbors.RadiusNeighborsClassifier(radius=2,\n outlier_label=outlier_label)\n clf.fit(X_tr, y_tr)\n pred = clf.predict(X_te)\n proba = clf.predict_proba(X_te)\n proba_label = proba.argmax(axis=1)\n proba_label = np.where(proba.sum(axis=1) == 0,\n outlier_label, proba_label)\n assert_array_equal(pred, proba_label)\n\n\ndef test_pipeline_with_nearest_neighbors_transformer():\n # Test chaining KNeighborsTransformer and classifiers/regressors\n rng = np.random.RandomState(0)\n X = 2 * rng.rand(40, 5) - 1\n X2 = 2 * rng.rand(40, 5) - 1\n y = rng.rand(40, 1)\n\n n_neighbors = 12\n radius = 1.5\n # We precompute more neighbors than necessary, to have equivalence between\n # k-neighbors estimator after radius-neighbors transformer, and vice-versa.\n factor = 2\n\n k_trans = neighbors.KNeighborsTransformer(\n n_neighbors=n_neighbors, mode='distance')\n k_trans_factor = neighbors.KNeighborsTransformer(\n n_neighbors=int(n_neighbors * factor), mode='distance')\n\n r_trans = neighbors.RadiusNeighborsTransformer(\n radius=radius, mode='distance')\n r_trans_factor = neighbors.RadiusNeighborsTransformer(\n radius=int(radius * factor), mode='distance')\n\n k_reg = neighbors.KNeighborsRegressor(n_neighbors=n_neighbors)\n r_reg = neighbors.RadiusNeighborsRegressor(radius=radius)\n\n test_list = [(k_trans, k_reg), (k_trans_factor, r_reg),\n (r_trans, r_reg), (r_trans_factor, k_reg), ]\n\n for trans, reg in test_list:\n # compare the chained version and the compact version\n reg_compact = clone(reg)\n reg_precomp = clone(reg)\n reg_precomp.set_params(metric='precomputed')\n\n reg_chain = make_pipeline(clone(trans), reg_precomp)\n\n y_pred_chain = reg_chain.fit(X, y).predict(X2)\n y_pred_compact = reg_compact.fit(X, y).predict(X2)\n assert_array_almost_equal(y_pred_chain, y_pred_compact)\n"
] | [
[
"numpy.diag",
"numpy.sqrt",
"sklearn.utils._testing.ignore_warnings",
"sklearn.metrics.pairwise._euclidean_distances_upcast",
"numpy.all",
"numpy.concatenate",
"sklearn.metrics.pairwise.pairwise_kernels",
"sklearn.utils._testing.assert_almost_equal",
"numpy.zeros_like",
"numpy.var",
"sklearn.metrics.pairwise.linear_kernel",
"sklearn.metrics.pairwise.check_paired_arrays",
"numpy.exp",
"sklearn.metrics.pairwise.paired_euclidean_distances",
"sklearn.utils._testing.assert_allclose",
"sklearn.metrics.pairwise.pairwise_distances_argmin",
"sklearn.metrics.pairwise.pairwise_distances_chunked",
"scipy.sparse.issparse",
"numpy.arange",
"scipy.sparse.dok_matrix",
"numpy.full",
"numpy.sin",
"sklearn.metrics.pairwise.nan_euclidean_distances",
"numpy.testing.assert_almost_equal",
"numpy.diag_indices_from",
"sklearn.metrics.pairwise.paired_distances",
"numpy.zeros",
"sklearn.utils._testing.assert_array_equal",
"sklearn.metrics.pairwise.chi2_kernel",
"sklearn.metrics.pairwise.haversine_distances",
"numpy.isnan",
"sklearn.metrics.pairwise.paired_manhattan_distances",
"scipy.spatial.distance.cdist",
"scipy.sparse.csr_matrix",
"sklearn.metrics.pairwise.manhattan_distances",
"sklearn.metrics.pairwise.euclidean_distances",
"sklearn.metrics.pairwise.laplacian_kernel",
"numpy.atleast_2d",
"sklearn.metrics.pairwise.cosine_distances",
"sklearn.metrics.pairwise.pairwise_distances_argmin_min",
"numpy.cov",
"sklearn.metrics.pairwise.rbf_kernel",
"numpy.random.RandomState",
"sklearn.metrics.pairwise.pairwise_distances",
"numpy.array",
"numpy.sum",
"sklearn.metrics.pairwise.additive_chi2_kernel",
"numpy.maximum",
"numpy.abs",
"sklearn.metrics.pairwise.check_pairwise_arrays",
"numpy.isfinite",
"sklearn.config_context",
"numpy.linalg.norm",
"numpy.cos",
"numpy.ones",
"sklearn.preprocessing.normalize",
"scipy.spatial.distance.pdist",
"sklearn.utils._testing.assert_array_almost_equal",
"numpy.vstack",
"sklearn.metrics.pairwise.PAIRED_DISTANCES.items"
],
[
"numpy.dot",
"sklearn.datasets.make_classification",
"sklearn.utils._testing.ignore_warnings",
"sklearn.neighbors.KNeighborsTransformer",
"sklearn.neighbors.KDTree",
"sklearn.base.clone",
"numpy.mean",
"sklearn.neighbors.RadiusNeighborsRegressor",
"sklearn.utils._testing.assert_raises_regex",
"scipy.sparse.issparse",
"numpy.arange",
"numpy.eye",
"sklearn.neighbors.KNeighborsClassifier",
"numpy.full",
"sklearn.utils._testing.assert_warns_message",
"sklearn.neighbors._base._check_precomputed",
"sklearn.neighbors.RadiusNeighborsClassifier",
"sklearn.neighbors.NearestNeighbors",
"sklearn.utils._testing.assert_array_equal",
"sklearn.utils._testing.assert_raise_message",
"numpy.zeros",
"numpy.isnan",
"sklearn.neighbors.kneighbors_graph",
"sklearn.neighbors.RadiusNeighborsTransformer",
"sklearn.datasets.load_iris",
"sklearn.model_selection.train_test_split",
"scipy.sparse.csr_matrix",
"sklearn.neighbors._base._is_sorted_by_data",
"sklearn.utils.validation.check_random_state",
"numpy.errstate",
"numpy.array",
"sklearn.metrics.pairwise.pairwise_distances",
"numpy.random.RandomState",
"sklearn.utils._testing.assert_raises",
"numpy.sum",
"sklearn.metrics.pairwise_distances",
"sklearn.utils._testing.assert_warns",
"sklearn.neighbors.BallTree",
"numpy.abs",
"numpy.sort",
"numpy.ones",
"sklearn.neighbors.KNeighborsRegressor",
"sklearn.datasets.load_digits",
"sklearn.utils._testing.assert_array_almost_equal",
"sklearn.neighbors.radius_neighbors_graph",
"numpy.vstack"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"1.3",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"0.16",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
mrjojo11/malpaca-pub | [
"26fd3a7045288bed66d624e0f5593067ff05952d",
"26fd3a7045288bed66d624e0f5593067ff05952d",
"26fd3a7045288bed66d624e0f5593067ff05952d"
] | [
"storage/old_malpaca/mal-detection.py",
"storage/old_malpaca/malpaca_me.py",
"storage/old_scripts/malpaca_me_improved_window_copy.py"
] | [
"#!/usr/bin/python3\n\nimport sys, dpkt, datetime, glob, os, operator, subprocess, csv\nimport socket\nimport matplotlib\nfrom collections import deque\nimport copy\nfrom itertools import permutations\nfrom dtw import dtw\nfrom fastdtw import fastdtw\nfrom math import log\nfrom sklearn.preprocessing import OneHotEncoder\nfrom sklearn.cluster import KMeans\nfrom sklearn import metrics\nfrom scipy.spatial.distance import cdist, pdist, cosine, euclidean,cityblock\nimport numpy as np\nimport pandas as pd\nimport joblib\nimport matplotlib.pyplot as plt\nfrom sklearn.cluster import DBSCAN\nimport json\nfrom sklearn.manifold import TSNE\nfrom pandas import Series\nfrom statsmodels.graphics.tsaplots import plot_acf\nimport seaborn as sns\nfrom scipy.cluster.hierarchy import dendrogram, linkage\nimport scipy.spatial.distance as ssd\nimport scipy\nfrom itertools import groupby\nimport itertools\nfrom sklearn.metrics.pairwise import euclidean_distances, manhattan_distances\nimport hdbscan\nimport time\n\ndef difference(str1, str2):\n return sum([str1[x]!=str2[x] for x in range(len(str1))])\n\ntotalconn = 0\n\nexpname = 'exp'\nif len(sys.argv) > 4:\n expname = sys.argv[4]\n\nthresh = 20\nif len(sys.argv) > 5:\n thresh = int(sys.argv[5])\n\n\ndef computeDTW(old_data, new_data, f, thresh):\n print(\"starting dtw dist computation\")\n \n new_dist = dict()\n print(len(old_data), len(new_data))\n for a in range(len(new_data)):\n for b in range(len(old_data)):\n i = [x[f] for x in new_data[a]][:thresh]\n j = old_data[b][:thresh]\n if len(i) == 0 or len(j) == 0: continue \n dist,_= fastdtw(i,j,dist=euclidean)\n if a not in new_dist.keys():\n new_dist[a] = dict()\n if b not in new_dist[a].keys():\n new_dist[a][b] = dist\n \n new_new_dist = dict()\n\n for a in range(len(new_data)):\n for b in range(len(new_data)):\n i = [x[f] for x in new_data[a]][:thresh]\n j = [x[f] for x in new_data[b]][:thresh]\n if len(i) == 0 or len(j) == 0: continue \n dist,_= fastdtw(i,j,dist=euclidean)\n if a not in new_new_dist.keys():\n new_new_dist[a] = dict()\n if b not in new_new_dist[a].keys():\n new_new_dist[a][b] = dist\n return (new_dist, new_new_dist)\n \ndef computeNgram(old_data, new_data, f, thresh):\n print(\"starting ngram dist computation\")\n \n \n print(len(old_data), len(new_data))\n \n old_ngrams = []\n for a in range(len(old_data)):\n profile = dict()\n dat = old_data[a][:thresh]\n\n li = zip(dat, dat[1:], dat[2:])\n for b in li:\n if b not in profile.keys():\n profile[b] = 0\n profile[b] += 1 \n old_ngrams.append(profile)\n \n new_ngrams = [] \n for a in range(len(new_data)):\n profile = dict()\n dat = [x[f] for x in new_data[a]][:thresh]\n\n li = zip(dat, dat[1:], dat[2:])\n for b in li:\n if b not in profile.keys():\n profile[b] = 0\n profile[b] += 1 \n new_ngrams.append(profile)\n \n new_dist = dict()\n for a in range(len(new_ngrams)):\n for b in range(len(old_ngrams)):\n\n i = new_ngrams[a]\n j = old_ngrams[b]\n ngram_all = list(set(i.keys()) | set(j.keys()))\n i_vec = [(i[item] if item in i.keys() else 0) for item in ngram_all]\n j_vec = [(j[item] if item in j.keys() else 0) for item in ngram_all]\n \n dist = cosine(i_vec, j_vec)\n \n if a not in new_dist.keys():\n new_dist[a] = dict()\n if b not in new_dist[a].keys():\n new_dist[a][b] = dist \n \n new_new_dist = dict()\n for a in range(len(new_ngrams)):\n for b in range(len(new_ngrams)):\n i = new_ngrams[a]\n j = new_ngrams[b]\n ngram_all = list(set(i.keys()) | set(j.keys()))\n i_vec = [(i[item] if item in i.keys() else 0) for item in ngram_all]\n j_vec = [(j[item] if item in j.keys() else 0) for item in ngram_all]\n \n dist = cosine(i_vec, j_vec)\n \n if a not in new_new_dist.keys():\n new_new_dist[a] = dict()\n if b not in new_new_dist[a].keys():\n new_new_dist[a][b] = dist \n return (new_dist, new_new_dist)\n\n \n\ndef compositeDist(old_data, new_data, old_dist, f, thresh, method):\n \n new_dist, new_new_dist = None, None\n if method == 'DTW': \n new_dist, new_new_dist = computeDTW(old_data, new_data, f, thresh)\n elif method== 'Ngram':\n new_dist, new_new_dist = computeNgram(old_data, new_data, f, thresh)\n \n # make a full dist matrix\n comp = []\n for i in range(len(old_data)+len(new_data)):\n c = []\n for j in range(len(old_data)+len(new_data)):\n #print(i,j, len(old_data), len(new_data))\n if i < len(old_data) and j < len(old_data):\n c.append(old_dist[i][j])\n #print('-- ', old_dist[i][j])\n elif j >= len(old_data) and i < len(old_data):\n c.append(new_dist[j-len(old_data)][i])\n #print('-- ', new_dist[j-len(old_data)][i])\n elif i >= len(old_data) and j < len(old_data):\n c.append(new_dist[i-len(old_data)][j])\n #print('-- ', new_dist[i-len(old_data)][j])\n else:\n c.append(new_new_dist[j-len(old_data)][i-len(old_data)])\n #print(c)\n comp.append(c)\n \n return comp\n\n\n\n\n\ndef readdatafile(filename):\n data = []\n for line in open(filename,'r').readlines():\n content = line[:-1].split(',')\n data.append([float(x) for x in content])\n return copy.deepcopy(data)\n\n\ndef readdistfile(filename):\n distm = []\n linecount = 0\n for line in open(filename,'r').readlines():\n distm.append([])\n ele = line.split(\" \")\n for e in ele:\n distm[linecount].append(float(e))\n linecount+=1\n \n \n return copy.deepcopy(distm)\n\n\n \n\n\ndef connlevel_sequence(metadata, mapping):\n\n inv_mapping = {v:k for k,v in mapping.items()}\n data = metadata\n timing= {}\n\n values = list(data.values())\n keys = list(data.keys())\n ipmapping = []\n\n\n addition = '-'+expname+'-'+str(thresh)\n\n past_exp = sys.argv[1].replace('model-', '').replace('.pkl', '')\n \n addition_past = '-'+past_exp\n \n # ---- Reloading storage traces ---- #\n filename = 'bytes-features'+addition_past\n dataB = readdatafile(filename)\n print( \"loaded bytes data\")\n filename = 'gaps-features'+addition_past\n dataG = readdatafile(filename)\n print( \"loaded gaps data\")\n filename = 'sport-features'+addition_past\n dataS = readdatafile(filename)\n print( \"loaded sport data\")\n filename = 'dport-features'+addition_past\n dataD = readdatafile(filename)\n print( \"loaded dport data\")\n \n # ----- Reloading storage distance matrices for tsne plot ---- #\n labels = []\n for line in open('labels'+addition_past+'.txt','r').readlines():\n labels = [int(e) for e in line.split(' ')]\n \n filename = 'bytesDist'+addition_past+'.txt'\n ndistmB = readdistfile(filename)\n print( \"loaded bytes dist\")\n filename = 'gapsDist'+addition_past+'.txt'\n ndistmG = readdistfile(filename)\n print( \"loaded gaps dist\")\n filename = 'sportDist'+addition_past+'.txt'\n ndistmS = readdistfile(filename)\n print( \"loaded sport dist\")\n filename = 'dportDist'+addition_past+'.txt'\n ndistmD = readdistfile(filename)\n print( \"loaded dport dist\")\n\n ndistm = []\n\n for a in range(len(ndistmS)):#len(data.values())): #range(10):\n ndistm.append([])\n for b in range(len(ndistmS)):\n ndistm[a].append((ndistmB[a][b]+ndistmG[a][b]+ndistmD[a][b]+ndistmS[a][b])/4.0)\n\n print(\"done reloading everything\")\n print(len(ndistm))\n print(len(ndistm[0]))\n print(len(labels))\n #print \"effective number of connections: \" + str(len(dist))\n\n \n \n # plot new points here\n #old_data, new_data, old_dist, f, window_size\n # old_data, new_data, window_size\n distB = compositeDist(dataB, values, ndistmB, 1 , thresh, 'DTW')\n distG = compositeDist(dataG, values, ndistmG, 0 , thresh, 'DTW')\n distS = compositeDist(dataS, values, ndistmS, 2 , thresh, 'Ngram')\n distD = compositeDist(dataD, values, ndistmD, 3 , thresh, 'Ngram')\n \n # Normalizing the ones that need it (all together)\n ndistmB = []\n mini = min(min(distB))\n maxi = max(max(distB))\n \n \n for a in range(len(distB)):\n ndistmB.append([])\n for b in range(len(distB)):\n normed = (distB[a][b] - mini) / (maxi-mini)\n ndistmB[a].append(normed)\n \n ndistmG = []\n mini = min(min(distG))\n maxi = max(max(distG))\n \n \n for a in range(len(distG)):\n ndistmG.append([])\n for b in range(len(distG)):\n normed = (distG[a][b] - mini) / (maxi-mini)\n ndistmG[a].append(normed)\n \n # Making a new composite dist matrix\n ndistm = []\n\n for a in range(len(distS)):#len(data.values())): #range(10):\n ndistm.append([])\n for b in range(len(distS)):\n ndistm[a].append((ndistmB[a][b]+ndistmG[a][b]+distD[a][b]+distS[a][b])/4.0) \n \n plot_kwds = {'alpha': 0.5, 's' : 80, 'linewidths': 0}\n RS=3072018\n projection = TSNE(random_state=RS).fit_transform(ndistm)\n plt.scatter(*projection.T)\n for i,_ in enumerate(ndistm):#mapping.keys()): #zip([x[:1] for x in mapping.keys()],clu.labels_)):\n if i >= len(dataB):\n txt = '*'#keys[i-len(dataB)]\n plt.scatter(projection.T[0][i],projection.T[1][i], color='r', alpha=0.6)\n plt.annotate(txt, (projection.T[0][i],projection.T[1][i]), color='r', alpha=0.6)\n\n plt.savefig(\"tsne-result\"+addition)\n #plt.show()\n size = 7\n sample= 7\n model = hdbscan.HDBSCAN(min_cluster_size = size, min_samples = sample, cluster_selection_method='leaf', metric='precomputed')\n clu = model.fit(np.array([np.array(x) for x in ndistm])) #joblib.load(sys.argv[1])\n\n print('reloaded clustering model')\n print('New points to be clustered', len(ndistm)-len(dataB))\n \n \n\n cols = ['royalblue', 'red', 'darksalmon', 'sienna', 'mediumpurple', 'palevioletred', 'plum', 'darkgreen', 'lightseagreen', 'mediumvioletred', 'gold', 'navy', 'sandybrown', 'darkorchid', 'olivedrab', 'rosybrown', 'maroon' ,'deepskyblue', 'silver']\n pal = sns.color_palette(cols)#\n\n extra_cols = len(set(clu.labels_)) - 18\n\n pal_extra = sns.color_palette('Paired', extra_cols)\n pal.extend(pal_extra)\n col = [pal[x] for x in clu.labels_]\n assert len(clu.labels_) == len(ndistm)\n\n\n mem_col = [sns.desaturate(x,p) for x,p in zip(col,clu.probabilities_)]\n\n plt.scatter(*projection.T, s=50, linewidth=0, c=col, alpha=0.2)\n \n maslist = dict()\n numclus = len(set(clu.labels_))\n array = [str(x) for x in range(numclus-1)]\n array.append(\"-1\")\n fig = plt.figure()\n for i,txt in enumerate(clu.labels_):#mapping.keys()): #zip([x[:1] for x in mapping.keys()],clu.labels_)):\n #if txt == -1:\n # continue\n plt.scatter(projection.T[0][i],projection.T[1][i], color=col[i], alpha=0.6)\n if i >= len(dataB):\n #print(keys[len(dataB)-i], 'assigned to cluster', txt)\n el = keys[len(dataB)-i]\n \n\n filename = el.split('.pcap')[0]\n if filename not in maslist.keys():\n maslist[filename] = [0]*numclus\n ind = array.index(str(txt))\n maslist[filename][ind] = 1\n \n #print(classname, filename, maslist)\n plt.annotate(txt, (projection.T[0][i],projection.T[1][i]), color=col[i], alpha=0.6)\n #else:\n #plt.annotate(txt, (projection.T[0][i],projection.T[1][i]), color=col[i], alpha=0.6)\n\n plt.savefig(\"clustering-result\"+addition)\n #plt.show()\n\n print('---------')\n print('Cluster Membership Strings')\n for name, mas in maslist.items():\n \n classname = ''\n if '-' in name:\n classname = name.split('-')[0]\n else:\n classname = name.split('.pcap')[0]\n ms = ''.join([str(x) for x in mas[:-1]])\n print(name, classname, ms)\n print('---------')\n \n\n # Making tree\n print('Producing DAG with relationships between pcaps')\n clusters = {}\n #numclus = len(set(clu.labels_))\n\n treeprep = dict()\n new_mas = set()\n for name, ms in maslist.items():\n mas = ''.join([str(x) for x in ms[:-1]])\n\n famname = ''\n if '-' in name:\n famname = name.split('-')[0]\n else:\n famname = name.split('.pcap')[0]\n \n if mas not in treeprep.keys():\n treeprep[mas] = dict()\n new_mas.add(mas)\n if famname not in treeprep[mas].keys():\n treeprep[mas][famname] = 0\n treeprep[mas][famname] += 1\n\n\n with open('mas-details'+addition_past+'.csv', 'rU') as f3:\n csv_reader = csv.reader(f3, delimiter=';')\n for i,line in enumerate(csv_reader):\n #print('reading storage file', line)\n mas = line[0]\n fam = line[1]\n count = line[2]\n if mas not in treeprep.keys():\n treeprep[mas] = dict()\n if fam not in treeprep[mas].keys():\n treeprep[mas][fam] = int(count)\n else:\n treeprep[mas][fam] += int(count)\n \n\n \n f2 = open('mas-details'+addition+'.csv', 'w')\n for k,v in treeprep.items():\n for kv,vv in v.items():\n #print(k, str(kv), (vv))\n f2.write(str(k)+';'+str(kv)+';'+str(vv)+'\\n')\n f2.close()\n\n with open('mas-details'+addition+'.csv', 'rU') as f3:\n csv_reader = csv.reader(f3, delimiter=';')\n\n graph = {}\n names ={}\n for line in csv_reader:\n graph[line[0]] = set()\n if line[0] not in names.keys():\n names[line[0]] = []\n names[line[0]].append(line[1]+\"(\"+line[2]+\")\")\n\n ulist = graph.keys()\n #print(len(ulist))\n covered = set()\n next = deque()\n\n zeros = ''.join(['0']*(numclus-1))\n\n specials = []\n\n next.append(zeros)\n while(len(next)>0):\n l1 = next.popleft()\n covered.add(l1)\n for l2 in ulist:\n if l2 not in covered and difference(l1,l2) == 1:\n graph[l1].add(l2)\n\n if l2 not in next:\n next.append(l2)\n\n #keys = graph.keys()\n val = set()\n for v in graph.values():\n val.update(v)\n\n notmain = [x for x in ulist if x not in val]\n notmain.remove(zeros)\n nums = [sum([int(y) for y in x]) for x in notmain]\n notmain = [x for _,x in sorted(zip(nums,notmain))]\n\n specials = notmain\n #print(notmain)\n #print(len(notmain))\n\n extras = set()\n\n for nm in notmain:\n comp = set()\n comp.update(val)\n comp.update(extras)\n\n mindist = 1000\n minli1, minli2 = None, None\n for l in comp:\n if nm != l:\n diff = difference(nm,l)\n if diff < mindist:\n mindist = diff\n minli = l\n\n diffbase = difference(nm,zeros)\n #print('diffs', nm, 'extra', mindist, 'with root', diffbase)\n if diffbase <= mindist:\n mindist = diffbase\n minli = zeros\n #print('replaced')\n\n\n\n num1 = sum([int(s) for s in nm])\n num2 = sum([int(s) for s in minli])\n if num1 < num2:\n graph[nm].add(minli)\n else:\n graph[minli].add(nm)\n\n\n extras.add(nm)\n\n\n #keys = graph.keys()\n val = set()\n for v in graph.values():\n val.update(v)\n f2 = open('relation-tree'+addition+'.dot', 'w')\n f2.write(\"digraph dag {\\n\")\n f2.write(\"rankdir=LR;\\n\")\n num = 0\n for idx,li in names.items():\n text = ''\n #print(idx)\n name = str(idx)+'\\n'\n\n for l in li:\n name+=l+',\\n'\n #print(str(idx) + \" [label=\\\"\"+str(num)+\"\\\"]\")\n if idx not in new_mas:\n #print(str(idx) + \" [label=\\\"\"+name+\"\\\"]\")\n text = str(idx) + \" [label=\\\"\"+name+\"\\\" , shape=box;]\"\n else:\n #print(str(idx) + \" [style=\\\"filled\\\" fillcolor=\\\"red\\\" label=\\\"\"+name+\"\\\"]\")\n text = str(idx) + \" [style=\\\"filled,dotted\\\" shape=box, fillcolor=\\\"salmon\\\" label=\\\"\"+name+\"\\\"]\"\n\n f2.write(text)\n f2.write('\\n')\n for k,v in graph.items():\n for vi in v:\n f2.write(str(k)+\"->\"+str(vi))\n f2.write('\\n')\n print(k+\"->\"+vi)\n f2.write(\"}\")\n f2.close()\n # Rendering DAG\n print('Rendering DAG -- needs graphviz dot')\n try:\n os.system('dot -Tpng relation-tree'+addition+'.dot -o DAG'+addition+'.png')\n print('Done')\n except:\n print('Failed')\n pass\n\n\n # temporal heatmaps start\n\n '''print(\"writing temporal heatmaps\")\n #print(\"prob: \", clu.probabilities_)\n if not os.path.exists('figs'+addition+'/'):\n os.mkdir('figs'+addition+'/')\n os.mkdir('figs'+addition+'/bytes')\n os.mkdir('figs'+addition+'/gaps')\n os.mkdir('figs'+addition+'/sport')\n os.mkdir('figs'+addition+'/dport')\n\n\n actlabels = []\n for a in range(len(values)): #range(10):\n actlabels.append(mapping[keys[a]])\n\n\n clusterinfo = {}\n seqclufile = csv_file\n lines = []\n lines = open(seqclufile).readlines()[1:]\n\n for line in lines:\n li = line.split(\",\") # clusnum, connnum, prob, srcip, dstip\n #if li[0] == '-1':\n # continue\n\n srcip = li[3]\n dstip = li[4][:-1]\n has = int(li[1])\n\n name = str('%12s->%12s' % (srcip,dstip))\n if li[0] not in clusterinfo.keys():\n clusterinfo[li[0]] = []\n clusterinfo[li[0]].append((has,name))\n print(\"rendering ... \")\n\n sns.set(font_scale=0.9)\n matplotlib.rcParams.update({'font.size':10})\n for names,sname,q in [(\"Packet sizes\",\"bytes\",1),(\"Interval\",\"gaps\",0),(\"Source Port\",\"sport\",2),(\"Dest. Port\",\"dport\",3)]:\n for clusnum,cluster in clusterinfo.items():\n items = [int(x[0]) for x in cluster]\n labels = [x[1] for x in cluster]\n\n acha = [actlabels.index(int(x[0])) for x in cluster]\n\n blah = [values[a] for a in acha]\n\n dataf = []\n\n for b in blah:\n\n dataf.append([x[q] for x in b][:window_size])\n\n df = pd.DataFrame(dataf, index=labels)\n\n g = sns.clustermap(df, xticklabels=False, col_cluster=False)#, vmin= minb, vmax=maxb)\n ind = g.dendrogram_row.reordered_ind\n fig = plt.figure(figsize=(10.0,9.0))\n plt.suptitle(\"Exp: \" + expname + \" | Cluster: \" + clusnum + \" | Feature: \"+ names)\n ax = fig.add_subplot(111)\n datanew = []\n labelsnew = []\n lol = []\n for it in ind:\n labelsnew.append(labels[it])\n #print labels[it]\n\n #print cluster[[x[1] for x in cluster].index(labels[it])][0]\n lol.append(cluster[[x[1] for x in cluster].index(labels[it])][0])\n #print len(labelsnew)\n #print len(lol)\n acha = [actlabels.index(int(x)) for x in lol]\n #print acha\n blah = [values[a] for a in acha]\n\n dataf = []\n\n for b in blah:\n dataf.append([x[q] for x in b][:20])\n df = pd.DataFrame(dataf, index=labelsnew)\n g = sns.heatmap(df, xticklabels=False)\n plt.setp(g.get_yticklabels(),rotation=0)\n plt.subplots_adjust(top=0.92,bottom=0.02,left=0.25,right=1,hspace=0.94)\n plt.savefig(\"figs\"+addition+\"/\"+sname+\"/\"+clusnum)'''\n\n\ndef inet_to_str(inet):\n \"\"\"Convert inet object to a string\n Args:\n inet (inet struct): inet network address\n Returns:\n str: Printable/readable IP address\n \"\"\"\n # First try ipv4 and then ipv6\n try:\n return socket.inet_ntop(socket.AF_INET, inet)\n except ValueError:\n return socket.inet_ntop(socket.AF_INET6, inet)\n\nsrc_set , dst_set, gap_set, proto_set, bytes_set, events_set, ip_set, dns_set, port_set = set(), set(), set(), set(), set(), set(), set(), set(), set()\nsrc_dict , dst_dict, proto_dict, events_dict, dns_dict, port_dict = {}, {}, {}, {}, {}, {}\nbytes, gap_list = [], []\n\n\ndef readpcap(filename):\n mal = 0\n ben = 0\n tot = 0\n counter=0\n ipcounter=0\n tcpcounter=0\n udpcounter=0\n\n data = []\n connections = {}\n packetspersecond=[]\n bytesperhost = {}\n count = 0\n prev = -1\n bytespersec = 0\n gaps = []\n incoming = []\n outgoing = []\n period = 0\n bla =0\n f = open(filename, 'rb')\n pcap = dpkt.pcap.Reader(f)\n for ts, pkt in pcap:\n #try:\n timestamp = (datetime.datetime.utcfromtimestamp(ts))\n gap = 0.0 if prev==-1 else round(float((timestamp-prev).microseconds)/float(1000),3)\n #print gap\n if prev == -1:\n period = timestamp\n\n prev = timestamp\n counter+=1\n eth= None\n bla += 1\n try:\n eth=dpkt.ethernet.Ethernet(pkt)\n except:\n continue\n\n if eth.type!=dpkt.ethernet.ETH_TYPE_IP:\n continue\n\n ip=eth.data\n\n\n tupple = (gap, ip.len, ip.p)\n\n gaps.append(tupple)\n\n\n src_ip= inet_to_str(ip.src)\n dst_ip = inet_to_str(ip.dst)\n #print(src_ip, dst_ip)\n sport = 0\n dport = 0\n try:\n if ip.p==dpkt.ip.IP_PROTO_TCP or ip.p==dpkt.ip.IP_PROTO_UDP:\n sport = ip.data.sport\n dport = ip.data.dport\n except:\n continue\n\n if (src_ip, dst_ip) not in connections.keys():\n connections[(src_ip, dst_ip)] = []\n connections[(src_ip,dst_ip)].append((gap, ip.len, ip.p, sport, dport))\n\n\n\n print(os.path.basename(filename), \" num connections: \", len(connections))\n\n values = []\n todel = []\n print('Before cleanup: Total packets: ', len(gaps), ' in ', len(connections), ' connections.' )\n for i,v in connections.items(): # clean it up\n if len(v) < thresh:\n\n todel.append(i)\n\n\n for item in todel:\n del connections[item]\n\n\n print(\"Remaining connections after clean up \", len(connections))\n\n return (gaps,connections)\n\n\ndef readfolder():\n fno = 0\n meta = {}\n mapping= {}\n files = glob.glob(sys.argv[3]+\"/*.pcap\")\n print('About to read pcap...')\n for f in files:\n key = os.path.basename(f)#[:-5].split('-')\n\n data,connections = (readpcap(f))\n if len(connections.items()) < 1:\n continue\n\n for i,v in connections.items():\n name = key+ i[0] + \"->\" + i[1]\n print (name)\n #name = meta[key[len(key)-1]]['threat']+\"|\" +key[len(key)-1][:5]+\"|\"+i[0]+\"->\"+i[1]\n mapping[name] = fno\n fno += 1\n meta[name] = v\n\n print(\"Average conn length: \", np.mean([len(x) for i,x in connections.items()]))\n print(\"Minimum conn length: \", np.min([len(x) for i,x in connections.items()]))\n print(\"Maximum conn length: \", np.max([len(x) for i,x in connections.items()]))\n print ('----------------')\n\n print('Done reading pcaps...')\n print('Collective surviving connections ', len(meta))\n\n\n connlevel_sequence(meta, mapping)\n\ndef readfile():\n startf = time.time()\n mapping= {}\n print('About to read pcap...')\n data, connections = readpcap(sys.argv[3])\n print('Done reading pcaps...')\n if len(connections.items()) < 1:\n return\n\n\n endf = time.time()\n print('file reading ', (endf-startf))\n fno = 0\n meta = {}\n nconnections = {}\n print(\"Average conn length: \", np.mean([len(x) for i,x in connections.items()]))\n print(\"Minimum conn length: \", np.min([len(x) for i,x in connections.items()]))\n print(\"Maximum conn length: \", np.max([len(x) for i,x in connections.items()]))\n #print(\"num connections survived \", len(connections))\n #print(sum([1 for i,x in connections.items() if len(x)>=50]))\n for i, v in connections.items():\n name = i[0] + \"->\" + i[1]\n mapping[name] = fno\n fno += 1\n meta[name] = v\n\n '''fig = plt.figure()\n plt.title(''+name)\n plt.plot([x[0] for x in v], 'r')\n plt.plot([x[0] for x in v], 'r.')\n plt.savefig('figs/'+str(mapping[name])+'.png')'''\n print('Surviving connections ', len(meta))\n startc = time.time()\n connlevel_sequence(meta, mapping)\n endc = time.time()\n print('Total time ', (endc-startc))\n\nif sys.argv[2] == 'file':\n readfile()\nelif sys.argv[2] == 'folder':\n readfolder()\nelse:\n print('incomplete command')\n",
"#!/usr/bin/python3\n\nimport sys, dpkt, datetime, glob, os, operator, subprocess, csv\nimport socket\nfrom pathlib import Path\n\nimport matplotlib\nfrom collections import deque\nfrom itertools import permutations\nfrom dtw import dtw\nfrom fastdtw import fastdtw\nfrom math import log\n\nfrom rpy2 import robjects\nfrom sklearn.preprocessing import OneHotEncoder\nfrom sklearn.cluster import KMeans\nfrom sklearn import metrics\nfrom scipy.spatial.distance import cdist, pdist, cosine, euclidean, cityblock\nimport numpy as np\nimport pandas as pd\nimport joblib\nimport matplotlib.pyplot as plt\nfrom sklearn.cluster import DBSCAN\nimport json\nfrom sklearn.manifold import TSNE\nfrom pandas import Series\nfrom statsmodels.graphics.tsaplots import plot_acf\nimport seaborn as sns\nfrom scipy.cluster.hierarchy import dendrogram, linkage\nimport scipy.spatial.distance as ssd\nimport scipy\nfrom itertools import groupby\nimport itertools\nfrom sklearn.metrics.pairwise import euclidean_distances, manhattan_distances\nimport hdbscan\nimport time\nimport rpy2.robjects.packages as rpackages\n\nimport numba\n\nclass MalpacaMe():\n expname = 'exp'\n thresh = 20\n RPY2 = False\n totalconn = 0\n\n def __init__(self, path_to_folder, expname, thresh, RPY2):\n self.path_to_folder = path_to_folder\n self.expname = expname\n self.thresh = thresh\n self.RPY2 = RPY2\n\n path_to_results = str(Path.joinpath(Path(os.getcwd()).parents[1], \"results\"))\n os.mkdir(path_to_results + \"/\" + expname)\n self.path_to_store = str(Path.joinpath(Path(path_to_results), expname)) + \"/\"\n\n self.readfolder()\n\n if RPY2 == True:\n import rpy2.robjects as robjects\n import rpy2.robjects.packages as rpackages\n from rpy2.robjects.vectors import StrVector\n from rpy2.robjects.packages import importr\n from rpy2.robjects import r\n from rpy2.robjects import ListVector\n\n def difference(self, str1, str2):\n return sum([str1[x] != str2[x] for x in range(len(str1))])\n\n # @profile\n def connlevel_sequence(self, metadata, mapping):\n inv_mapping = {v: k for k, v in mapping.items()}\n data = metadata\n timing = {}\n\n values = list(data.values())\n keys = list(data.keys())\n distm = []\n labels = []\n ipmapping = []\n '''for i,v in data.items():\n fig = plt.figure(figsize=(10.0,9.0))\n ax = fig.add_subplot(111)\n ax.set_title(i)\n plt.plot([x[1] for x in v][:75], 'b')\n plt.plot([x[1] for x in v][:75], 'b.')\n cid = keys.index(i)\n plt.savefig('unzipped/malevol/data/connections/'+str(cid)+'.png')'''\n\n # save intermediate results\n\n addition = '-' + self.expname + '-' + str(self.thresh)\n\n # ----- start porting -------\n\n utils, r = None, None\n\n for n, feat in [(1, 'bytes'), (0, 'gaps'), (3, 'sport'), (4, 'dport')]:\n f = open(self.path_to_store + feat + '-features' + addition, 'w')\n for val in values:\n vi = [str(x[n]) for x in val]\n f.write(','.join(vi))\n f.write(\"\\n\")\n f.close()\n\n startb = time.time()\n\n filename = self.path_to_store + 'bytesDist' + addition + '.txt'\n\n print(\"starting bytes dist\")\n\n distm = [-1] * len(data.values())\n distm = [[-1] * len(data.values()) for i in distm]\n\n for a in range(len(data.values())): # range(10):\n\n labels.append(mapping[keys[a]])\n ipmapping.append((mapping[keys[a]], inv_mapping[mapping[keys[a]]]))\n for b in range(a + 1):\n\n i = [x[1] for x in values[a]][:self.thresh]\n j = [x[1] for x in values[b]][:self.thresh]\n if len(i) == 0 or len(j) == 0: continue\n\n if a == b:\n distm[a][b] = 0.0\n else:\n dist, _ = fastdtw(i, j, dist=euclidean)\n distm[a][b] = dist\n distm[b][a] = dist\n\n with open(filename, 'w') as outfile:\n for a in range(len(distm)): # len(data.values())): #range(10):\n outfile.write(' '.join([str(e) for e in distm[a]]) + \"\\n\")\n with open(self.path_to_store + 'labels' + addition + '.txt', 'w') as outfile:\n outfile.write(' '.join([str(l) for l in labels]) + '\\n')\n with open(self.path_to_store + 'mapping' + addition + '.txt', 'w') as outfile:\n outfile.write(' '.join([str(l) for l in ipmapping]) + '\\n')\n\n endb = time.time()\n print('bytes ', (endb - startb))\n ndistmB = []\n mini = min(min(distm))\n maxi = max(max(distm))\n\n for a in range(len(distm)):\n ndistmB.append([])\n for b in range(len(distm)):\n normed = (distm[a][b] - mini) / (maxi - mini)\n ndistmB[a].append(normed)\n\n startg = time.time()\n distm = []\n\n filename = self.path_to_store + 'gapsDist' + addition + '.txt'\n\n print(\"starting gaps dist\")\n distm = [-1] * len(data.values())\n distm = [[-1] * len(data.values()) for i in distm]\n\n for a in range(len(data.values())): # range(10):\n\n for b in range(a + 1):\n\n i = [x[0] for x in values[a]][:self.thresh]\n j = [x[0] for x in values[b]][:self.thresh]\n\n if len(i) == 0 or len(j) == 0: continue\n\n if a == b:\n distm[a][b] = 0.0\n else:\n dist, _ = fastdtw(i, j, dist=euclidean)\n distm[a][b] = dist\n distm[b][a] = dist\n\n with open(filename, 'w') as outfile:\n for a in range(len(distm)): # len(data.values())): #range(10):\n # print distm[a]\n outfile.write(' '.join([str(e) for e in distm[a]]) + \"\\n\")\n\n endg = time.time()\n print('gaps ', (endg - startg))\n ndistmG = []\n mini = min(min(distm))\n maxi = max(max(distm))\n\n for a in range(len(distm)): # len(data.values())): #range(10):\n ndistmG.append([])\n for b in range(len(distm)):\n normed = (distm[a][b] - mini) / (maxi - mini)\n ndistmG[a].append(normed)\n\n # source port\n ndistmS = []\n distm = []\n\n starts = time.time()\n\n filename = self.path_to_store + 'sportDist' + addition + '.txt'\n same, diff = set(), set()\n\n print(\"starting sport dist\")\n distm = [-1] * len(data.values())\n distm = [[-1] * len(data.values()) for i in distm]\n\n ngrams = []\n for a in range(len(values)):\n profile = dict()\n\n dat = [x[3] for x in values[a]][:self.thresh]\n\n li = zip(dat, dat[1:], dat[2:])\n for b in li:\n if b not in profile.keys():\n profile[b] = 0\n\n profile[b] += 1\n\n ngrams.append(profile)\n\n profiles = []\n # update for arrays\n\n assert len(ngrams) == len(values)\n for a in range(len(ngrams)):\n for b in range(a + 1):\n if a == b:\n distm[a][b] = 0.0\n else:\n i = ngrams[a]\n j = ngrams[b]\n ngram_all = list(set(i.keys()) | set(j.keys()))\n i_vec = [(i[item] if item in i.keys() else 0) for item in ngram_all]\n j_vec = [(j[item] if item in j.keys() else 0) for item in ngram_all]\n dist = cosine(i_vec, j_vec)\n distm[a][b] = dist\n distm[b][a] = dist\n\n with open(filename, 'w') as outfile:\n for a in range(len(distm)):\n outfile.write(' '.join([str(e) for e in distm[a]]) + \"\\n\")\n\n ends = time.time()\n print('sport ', (ends - starts))\n\n\n for a in range(len(distm)):\n ndistmS.append([])\n for b in range(len(distm)):\n ndistmS[a].append(distm[a][b])\n\n # dest port\n ndistmD = []\n distm = []\n\n startd = time.time()\n\n filename = self.path_to_store + 'dportDist' + addition + '.txt'\n\n print(\"starting dport dist\")\n distm = [-1] * len(data.values())\n distm = [[-1] * len(data.values()) for i in distm]\n\n ngrams = []\n for a in range(len(values)):\n\n profile = dict()\n dat = [x[4] for x in values[a]][:self.thresh]\n\n li = zip(dat, dat[1:], dat[2:])\n\n for b in li:\n if b not in profile.keys():\n profile[b] = 0\n profile[b] += 1\n ngrams.append(profile)\n\n assert len(ngrams) == len(values)\n for a in range(len(ngrams)):\n for b in range(a + 1):\n if a == b:\n distm[a][b] = 0.0\n else:\n i = ngrams[a]\n j = ngrams[b]\n ngram_all = list(set(i.keys()) | set(j.keys()))\n i_vec = [(i[item] if item in i.keys() else 0) for item in ngram_all]\n j_vec = [(j[item] if item in j.keys() else 0) for item in ngram_all]\n dist = round(cosine(i_vec, j_vec), 8)\n distm[a][b] = dist\n distm[b][a] = dist\n\n with open(filename, 'w') as outfile:\n for a in range(len(distm)):\n outfile.write(' '.join([str(e) for e in distm[a]]) + \"\\n\")\n\n endd = time.time()\n print('time dport ', (endd - startd))\n mini = min(min(distm))\n maxi = max(max(distm))\n\n for a in range(len(distm)):\n ndistmD.append([])\n for b in range(len(distm)):\n ndistmD[a].append(distm[a][b])\n\n ndistm = []\n\n for a in range(len(ndistmS)):\n ndistm.append([])\n for b in range(len(ndistmS)):\n ndistm[a].append((ndistmB[a][b] + ndistmG[a][b] + ndistmD[a][b] + ndistmS[a][b]) / 4.0)\n\n print(\"done distance meaurement\")\n print(len(ndistm))\n print(len(ndistm[0]))\n\n plot_kwds = {'alpha': 0.5, 's': 80, 'linewidths': 0}\n RS = 3072018\n projection = TSNE(random_state=RS).fit_transform(ndistm)\n plt.scatter(*projection.T)\n plt.savefig(self.path_to_store + \"tsne-result\" + addition)\n\n size = 7\n sample = 7\n\n model = hdbscan.HDBSCAN(min_cluster_size=size, min_samples=sample, cluster_selection_method='leaf',\n metric='precomputed')\n clu = model.fit(np.array([np.array(x) for x in ndistm])) # final for citadel and dridex\n joblib.dump(clu, self.path_to_store + 'model' + addition + '.pkl')\n\n print(\"num clusters: \" + str(len(set(clu.labels_)) - 1))\n\n avg = 0.0\n for l in list(set(clu.labels_)):\n if l != -1:\n avg += sum([(1 if x == l else 0) for x in clu.labels_])\n print(\"average size of cluster:\" + str(float(avg) / float(len(set(clu.labels_)) - 1)))\n print(\"samples in noise: \" + str(sum([(1 if x == -1 else 0) for x in clu.labels_])))\n\n cols = ['royalblue', 'red', 'darksalmon', 'sienna', 'mediumpurple', 'palevioletred', 'plum', 'darkgreen',\n 'lightseagreen', 'mediumvioletred', 'gold', 'navy', 'sandybrown', 'darkorchid', 'olivedrab', 'rosybrown',\n 'maroon', 'deepskyblue', 'silver']\n pal = sns.color_palette(cols) #\n\n extra_cols = len(set(clu.labels_)) - 18\n\n pal_extra = sns.color_palette('Paired', extra_cols)\n pal.extend(pal_extra)\n col = [pal[x] for x in clu.labels_]\n assert len(clu.labels_) == len(ndistm)\n\n mem_col = [sns.desaturate(x, p) for x, p in zip(col, clu.probabilities_)]\n\n plt.scatter(*projection.T, s=50, linewidth=0, c=col, alpha=0.2)\n\n for i, txt in enumerate(clu.labels_):\n\n realind = labels[i]\n name = inv_mapping[realind]\n '''thiscol = None\n thislab = None\n for cdx, cc in enumerate(classes):\n if cc in name:\n thiscol = col[cdx]\n thislab = cc\n break'''\n plt.scatter(projection.T[0][i], projection.T[1][i], color=col[i], alpha=0.6)\n if txt == -1:\n continue\n\n plt.annotate(txt, (projection.T[0][i], projection.T[1][i]), color=col[i], alpha=0.6)\n\n plt.savefig(self.path_to_store + \"clustering-result\" + addition)\n\n # writing csv file\n print(\"writing csv file\")\n final_clusters = {}\n final_probs = {}\n for lab in set(clu.labels_):\n occ = [i for i, x in enumerate(clu.labels_) if x == lab]\n final_probs[lab] = [x for i, x in zip(clu.labels_, clu.probabilities_) if i == lab]\n print(\"cluster: \" + str(lab) + \" num items: \" + str(len([labels[x] for x in occ])))\n final_clusters[lab] = [labels[x] for x in occ]\n\n csv_file = self.path_to_store + 'clusters' + addition + '.csv'\n outfile = open(csv_file, 'w')\n outfile.write(\"clusnum,connnum,probability,class,filename,srcip,dstip\\n\")\n\n for n, clus in final_clusters.items():\n\n for idx, el in enumerate([inv_mapping[x] for x in clus]):\n\n ip = el.split('->')\n if '-' in ip[0]:\n classname = el.split('-')[1]\n else:\n classname = el.split('.pcap')[0]\n\n filename = el.split('.pcap')[0]\n\n outfile.write(\n str(n) + \",\" + str(mapping[el]) + \",\" + str(final_probs[n][idx]) + \",\" + str(classname) + \",\" + str(\n filename) + \",\" + ip[0] + \",\" + ip[1] + \"\\n\")\n outfile.close()\n\n # Making tree\n print('Producing DAG with relationships between pcaps')\n clusters = {}\n numclus = len(set(clu.labels_))\n with open(csv_file, 'r') as f1:\n reader = csv.reader(f1, delimiter=',')\n for i, line in enumerate(reader): # f1.readlines()[1:]:\n if i > 0:\n if line[4] not in clusters.keys():\n clusters[line[4]] = []\n clusters[line[4]].append((line[3], line[0])) # classname, cluster#\n # print(clusters)\n f1.close()\n array = [str(x) for x in range(numclus - 1)]\n array.append(\"-1\")\n\n treeprep = dict()\n for filename, val in clusters.items():\n arr = [0] * numclus\n for fam, clus in val:\n ind = array.index(clus)\n arr[ind] = 1\n # print(filename, )\n mas = ''.join([str(x) for x in arr[:-1]])\n famname = fam\n print(filename + \"\\t\" + fam + \"\\t\" + ''.join([str(x) for x in arr[:-1]]))\n if mas not in treeprep.keys():\n treeprep[mas] = dict()\n if famname not in treeprep[mas].keys():\n treeprep[mas][famname] = set()\n treeprep[mas][famname].add(str(filename))\n\n f2 = open(self.path_to_store +'mas-details' + addition + '.csv', 'w')\n for k, v in treeprep.items():\n for kv, vv in v.items():\n f2.write(str(k) + ';' + str(kv) + ';' + str(len(vv)) + '\\n')\n f2.close()\n\n with open(self.path_to_store +'mas-details' + addition + '.csv', 'rU') as f3:\n csv_reader = csv.reader(f3, delimiter=';')\n\n graph = {}\n\n names = {}\n for line in csv_reader:\n graph[line[0]] = set()\n if line[0] not in names.keys():\n names[line[0]] = []\n names[line[0]].append(line[1] + \"(\" + line[2] + \")\")\n\n zeros = ''.join(['0'] * (numclus - 1))\n if zeros not in graph.keys():\n graph[zeros] = set()\n\n ulist = graph.keys()\n covered = set()\n next = deque()\n\n specials = []\n\n next.append(zeros)\n\n while (len(next) > 0):\n l1 = next.popleft()\n covered.add(l1)\n for l2 in ulist:\n if l2 not in covered and self.difference(l1, l2) == 1:\n graph[l1].add(l2)\n\n if l2 not in next:\n next.append(l2)\n\n val = set()\n for v in graph.values():\n val.update(v)\n\n notmain = [x for x in ulist if x not in val]\n notmain.remove(zeros)\n nums = [sum([int(y) for y in x]) for x in notmain]\n notmain = [x for _, x in sorted(zip(nums, notmain))]\n\n specials = notmain\n\n extras = set()\n\n for nm in notmain:\n comp = set()\n comp.update(val)\n comp.update(extras)\n\n mindist = 1000\n minli1, minli2 = None, None\n for l in comp:\n if nm != l:\n diff = self.difference(nm, l)\n if diff < mindist:\n mindist = diff\n minli = l\n\n diffbase = self.difference(nm, zeros)\n if diffbase <= mindist:\n mindist = diffbase\n minli = zeros\n\n num1 = sum([int(s) for s in nm])\n num2 = sum([int(s) for s in minli])\n if num1 < num2:\n graph[nm].add(minli)\n else:\n graph[minli].add(nm)\n\n extras.add(nm)\n\n val = set()\n for v in graph.values():\n val.update(v)\n f2 = open(self.path_to_store +'relation-tree' + addition + '.dot', 'w')\n f2.write(\"digraph dag {\\n\")\n f2.write(\"rankdir=LR;\\n\")\n num = 0\n for idx, li in names.items():\n text = ''\n name = str(idx) + '\\n'\n\n for l in li:\n name += l + ',\\n'\n if idx not in specials:\n text = str(idx) + \" [label=\\\"\" + name + \"\\\" , shape=box;]\"\n else: # treat in a special way. For now, leaving intact\n text = str(idx) + \" [shape=box label=\\\"\" + name + \"\\\"]\"\n\n f2.write(text)\n f2.write('\\n')\n for k, v in graph.items():\n for vi in v:\n f2.write(str(k) + \"->\" + str(vi))\n f2.write('\\n')\n f2.write(\"}\")\n f2.close()\n # Rendering DAG\n print('Rendering DAG -- needs graphviz dot')\n try:\n os.system('dot -Tpng relation-tree' + addition + '.dot -o DAG' + addition + '.png')\n print('Done')\n except:\n print('Failed')\n pass\n\n # temporal heatmaps start\n\n print(\"writing temporal heatmaps\")\n if not os.path.exists(self.path_to_store + 'figs' + addition + '/'):\n os.mkdir(self.path_to_store + 'figs' + addition + '/')\n os.mkdir(self.path_to_store + 'figs' + addition + '/bytes')\n os.mkdir(self.path_to_store + 'figs' + addition + '/gaps')\n os.mkdir(self.path_to_store + 'figs' + addition + '/sport')\n os.mkdir(self.path_to_store + 'figs' + addition + '/dport')\n\n actlabels = []\n for a in range(len(values)): # range(10):\n actlabels.append(mapping[keys[a]])\n\n clusterinfo = {}\n seqclufile = csv_file\n lines = []\n lines = open(seqclufile).readlines()[1:]\n\n for line in lines:\n li = line.split(\",\") # clusnum, connnum, prob, srcip, dstip\n\n srcip = li[5]\n dstip = li[6][:-1]\n has = int(li[1])\n\n name = str('%12s->%12s' % (srcip, dstip))\n if li[0] not in clusterinfo.keys():\n clusterinfo[li[0]] = []\n clusterinfo[li[0]].append((has, name))\n print(\"rendering ... \")\n\n sns.set(font_scale=0.9)\n matplotlib.rcParams.update({'font.size': 10})\n for names, sname, q in [(\"Packet sizes\", \"bytes\", 1), (\"Interval\", \"gaps\", 0), (\"Source Port\", \"sport\", 3),\n (\"Dest. Port\", \"dport\", 4)]:\n for clusnum, cluster in clusterinfo.items():\n items = [int(x[0]) for x in cluster]\n labels = [x[1] for x in cluster]\n\n acha = [actlabels.index(int(x[0])) for x in cluster]\n\n blah = [values[a] for a in acha]\n\n dataf = []\n\n for b in blah:\n dataf.append([x[q] for x in b][:self.thresh])\n\n df = pd.DataFrame(dataf, index=labels)\n\n g = sns.clustermap(df, xticklabels=False, col_cluster=False) # , vmin= minb, vmax=maxb)\n ind = g.dendrogram_row.reordered_ind\n fig = plt.figure(figsize=(10.0, 9.0))\n plt.suptitle(\"Exp: \" + self.expname + \" | Cluster: \" + clusnum + \" | Feature: \" + names)\n ax = fig.add_subplot(111)\n datanew = []\n labelsnew = []\n lol = []\n for it in ind:\n labelsnew.append(labels[it])\n lol.append(cluster[[x[1] for x in cluster].index(labels[it])][0])\n\n acha = [actlabels.index(int(x)) for x in lol]\n blah = [values[a] for a in acha]\n\n dataf = []\n\n for b in blah:\n dataf.append([x[q] for x in b][:20])\n df = pd.DataFrame(dataf, index=labelsnew)\n g = sns.heatmap(df, xticklabels=False)\n plt.setp(g.get_yticklabels(), rotation=0)\n plt.subplots_adjust(top=0.92, bottom=0.02, left=0.25, right=1, hspace=0.94)\n plt.savefig(self.path_to_store + \"figs\" + addition + \"/\" + sname + \"/\" + clusnum)\n\n\n def inet_to_str(self, inet):\n \"\"\"Convert inet object to a string\n Args:\n inet (inet struct): inet network address\n Returns:\n str: Printable/readable IP address\n \"\"\"\n # First try ipv4 and then ipv6\n try:\n return socket.inet_ntop(socket.AF_INET, inet)\n except ValueError:\n return socket.inet_ntop(socket.AF_INET6, inet)\n\n\n src_set, dst_set, gap_set, proto_set, bytes_set, events_set, ip_set, dns_set, port_set = set(), set(), set(), set(), set(), set(), set(), set(), set()\n src_dict, dst_dict, proto_dict, events_dict, dns_dict, port_dict = {}, {}, {}, {}, {}, {}\n bytes, gap_list = [], []\n\n\n def readpcap(self, filename):\n print(\"Reading\", os.path.basename(filename))\n mal = 0\n ben = 0\n tot = 0\n counter = 0\n ipcounter = 0\n tcpcounter = 0\n udpcounter = 0\n\n data = []\n connections = {}\n packetspersecond = []\n bytesperhost = {}\n count = 0\n previousTimestamp = {}\n bytespersec = 0\n gaps = []\n incoming = []\n outgoing = []\n period = 0\n bla = 0\n f = open(filename, 'rb')\n pcap = dpkt.pcap.Reader(f)\n for ts, pkt in pcap:\n counter += 1\n eth = None\n bla += 1\n try:\n eth = dpkt.ethernet.Ethernet(pkt)\n except:\n continue\n\n if eth.type != dpkt.ethernet.ETH_TYPE_IP:\n continue\n\n ip = eth.data\n\n src_ip = self.inet_to_str(ip.src)\n dst_ip = self.inet_to_str(ip.dst)\n\n key = (src_ip, dst_ip)\n\n timestamp = datetime.datetime.utcfromtimestamp(ts)\n\n if key in previousTimestamp:\n gap = (timestamp - previousTimestamp[key]).microseconds / 1000\n else:\n gap = 0\n\n previousTimestamp[key] = timestamp\n\n tupple = (gap, ip.len, ip.p)\n\n gaps.append(tupple)\n\n sport = 0\n dport = 0\n\n try:\n if ip.p == dpkt.ip.IP_PROTO_TCP or ip.p == dpkt.ip.IP_PROTO_UDP:\n sport = ip.data.sport\n dport = ip.data.dport\n except:\n continue\n\n if key not in connections.keys():\n connections[key] = []\n connections[key].append((gap, ip.len, ip.p, sport, dport))\n\n print(os.path.basename(filename), \" num connections: \", len(connections))\n\n values = []\n todel = []\n print('Before cleanup: Total packets: ', len(gaps), ' in ', len(connections), ' connections.')\n for i, v in connections.items(): # clean it up\n if len(v) < self.thresh:\n todel.append(i)\n\n for item in todel:\n del connections[item]\n\n print(\"Remaining connections after clean up \", len(connections))\n\n return (gaps, connections)\n\n\n def readfolder(self):\n fno = 0\n meta = {}\n mapping = {}\n files = glob.glob(self.path_to_folder + \"/*.pcap\")\n print('About to read pcap...')\n for f in files:\n key = os.path.basename(f) # [:-5].split('-')\n\n data, connections = (self.readpcap(f))\n if len(connections.items()) < 1:\n continue\n\n for i, v in connections.items():\n name = key + i[0] + \"->\" + i[1]\n print(name)\n # name = meta[key[len(key)-1]]['threat']+\"|\" +key[len(key)-1][:5]+\"|\"+i[0]+\"->\"+i[1]\n mapping[name] = fno\n fno += 1\n meta[name] = v\n\n print(\"Average conn length: \", np.mean([len(x) for i, x in connections.items()]))\n print(\"Minimum conn length: \", np.min([len(x) for i, x in connections.items()]))\n print(\"Maximum conn length: \", np.max([len(x) for i, x in connections.items()]))\n print('----------------')\n\n print('Done reading pcaps...')\n print('Collective surviving connections ', len(meta))\n\n self.connlevel_sequence(meta, mapping)\n\n\n def readfile(self, path_to_pcap_file):\n startf = time.time()\n mapping = {}\n print('About to read pcap...')\n data, connections = self.readpcap(path_to_pcap_file)\n print('Done reading pcaps...')\n if len(connections.items()) < 1:\n return\n\n endf = time.time()\n print('file reading ', (endf - startf))\n fno = 0\n meta = {}\n nconnections = {}\n print(\"Average conn length: \", np.mean([len(x) for i, x in connections.items()]))\n print(\"Minimum conn length: \", np.min([len(x) for i, x in connections.items()]))\n print(\"Maximum conn length: \", np.max([len(x) for i, x in connections.items()]))\n # print(\"num connections survived \", len(connections))\n # print(sum([1 for i,x in connections.items() if len(x)>=50]))\n for i, v in connections.items():\n name = i[0] + \"->\" + i[1]\n mapping[name] = fno\n fno += 1\n meta[name] = v\n\n '''fig = plt.figure()\n plt.title(''+name)\n plt.plot([x[0] for x in v], 'r')\n plt.plot([x[0] for x in v], 'r.')\n plt.savefig('figs/'+str(mapping[name])+'.png')'''\n print('Surviving connections ', len(meta))\n startc = time.time()\n self.connlevel_sequence(meta, mapping)\n endc = time.time()\n print('Total time ', (endc - startc))\n",
"#!/usr/bin/python3\nimport math\n\nimport dpkt, datetime, glob, os, csv\nimport socket\nfrom pathlib import Path\n\nimport matplotlib\nfrom PIL import Image\nfrom matplotlib.pyplot import cm\nfrom collections import deque\n\nfrom sklearn import metrics\nimport numpy as np\nimport pandas as pd\nimport joblib\nimport matplotlib.pyplot as plt\nfrom matplotlib.font_manager import FontProperties\nfrom sklearn.manifold import TSNE\nimport seaborn as sns\nimport hdbscan\nimport time\n\nfrom graphviz import render\n\nfrom util.numba_cosine import cosine_similarity_numba\nfrom util.odtw import _dtw_distance\n\n\nclass MalpacaMeImprovedWindow():\n expname = 'exp'\n window_size = 20\n RPY2 = False\n totalconn = 0\n\n def __init__(self, path_to_folder, path_to_results, expname, window_size, RPY2):\n self.path_to_folder = path_to_folder\n self.expname = expname\n self.window_size = window_size\n self.RPY2 = RPY2\n\n path_to_results = path_to_results\n os.mkdir(path_to_results + \"/\" + expname)\n self.path_to_store = str(Path.joinpath(Path(path_to_results), expname)) + \"/\"\n\n self.readfolde_window()\n\n if RPY2 == True:\n pass\n\n def difference(self, str1, str2):\n return sum([str1[x] != str2[x] for x in range(len(str1))])\n\n # @profile\n def connlevel_sequence(self, metadata, mapping):\n inv_mapping = {v: k for k, v in mapping.items()}\n data = metadata\n timing = {}\n\n values = list(data.values())\n keys = list(data.keys())\n distm = []\n labels = []\n ipmapping = []\n\n # save intermediate results\n\n path_to_intermediate_results = self.path_to_store + \"/intermediate_results/\"\n os.mkdir(path_to_intermediate_results)\n\n path_to_features = path_to_intermediate_results +\"/features/\"\n os.mkdir(path_to_features)\n\n path_to_distances = path_to_intermediate_results +\"/distances/\"\n os.mkdir(path_to_distances)\n\n\n addition = '_' + self.expname + '_' + str(self.window_size)\n\n # ----- start porting -------\n\n utils, r = None, None\n\n for n, feat in [(1, 'bytes'), (0, 'gaps'), (3, 'sport'), (4, 'dport')]:\n f = open(path_to_features + feat + '-features' + addition + '.txt', 'w')\n for val in values:\n vi = [str(x[n]) for x in val]\n f.write(','.join(vi))\n f.write(\"\\n\")\n f.close()\n\n startb = time.time()\n start_time = time.time()\n\n filename = path_to_distances + 'bytesDist' + addition + '.txt'\n\n print(\"Starting bytes dist\")\n\n distm = [-1] * len(data.values())\n distm = [[-1] * len(data.values()) for i in distm]\n\n for a in range(len(data.values())): # range(10):\n\n labels.append(mapping[keys[a]])\n ipmapping.append((mapping[keys[a]], inv_mapping[mapping[keys[a]]]))\n for b in range(a + 1):\n\n i = [x[1] for x in values[a]][:self.window_size]\n j = [x[1] for x in values[b]][:self.window_size]\n if len(i) == 0 or len(j) == 0: continue\n\n if a == b:\n distm[a][b] = 0.0\n else:\n first_array = np.array(i)\n second_array = np.array(j)\n\n dist = _dtw_distance(first_array, second_array)\n distm[a][b] = dist\n distm[b][a] = dist\n\n with open(filename, 'w') as outfile:\n for a in range(len(distm)): # len(data.values())): #range(10):\n outfile.write(' '.join([str(e) for e in distm[a]]) + \"\\n\")\n outfile.close()\n with open(path_to_intermediate_results + 'labels' + addition + '.txt', 'w') as outfile:\n outfile.write(' '.join([str(l) for l in labels]) + '\\n')\n outfile.close()\n with open(path_to_intermediate_results + 'mapping' + addition + '.txt', 'w') as outfile:\n outfile.write(' '.join([str(l) for l in ipmapping]) + '\\n')\n outfile.close()\n\n endb = time.time()\n print('Time bytes: ' + str(round((endb - startb), 3)))\n ndistmB = []\n mini = min(min(distm))\n maxi = max(max(distm))\n\n for a in range(len(distm)):\n ndistmB.append([])\n for b in range(len(distm)):\n normed = (distm[a][b] - mini) / (maxi - mini)\n ndistmB[a].append(normed)\n\n startg = time.time()\n distm = []\n\n filename = path_to_distances + 'gapsDist' + addition + '.txt'\n\n print(\"Starting gaps dist\")\n distm = [-1] * len(data.values())\n distm = [[-1] * len(data.values()) for i in distm]\n\n for a in range(len(data.values())): # range(10):\n\n for b in range(a + 1):\n\n i = [x[0] for x in values[a]][:self.window_size]\n j = [x[0] for x in values[b]][:self.window_size]\n\n if len(i) == 0 or len(j) == 0: continue\n\n if a == b:\n distm[a][b] = 0.0\n else:\n first_array = np.array(i)\n second_array = np.array(j)\n\n dist = _dtw_distance(first_array, second_array)\n distm[a][b] = dist\n distm[b][a] = dist\n\n with open(filename, 'w') as outfile:\n for a in range(len(distm)): # len(data.values())): #range(10):\n # print distm[a]\n outfile.write(' '.join([str(e) for e in distm[a]]) + \"\\n\")\n\n endg = time.time()\n print('Time gaps: ' + str(round((endg - startg), 3)))\n ndistmG = []\n mini = min(min(distm))\n maxi = max(max(distm))\n\n for a in range(len(distm)): # len(data.values())): #range(10):\n ndistmG.append([])\n for b in range(len(distm)):\n normed = (distm[a][b] - mini) / (maxi - mini)\n ndistmG[a].append(normed)\n\n # source port\n ndistmS = []\n distm = []\n\n starts = time.time()\n\n filename = path_to_distances + 'sportDist' + addition + '.txt'\n same, diff = set(), set()\n\n print(\"Starting sport dist\")\n distm = [-1] * len(data.values())\n distm = [[-1] * len(data.values()) for i in distm]\n\n ngrams = []\n for a in range(len(values)):\n profile = dict()\n\n dat = [x[3] for x in values[a]][:self.window_size]\n\n li = zip(dat, dat[1:], dat[2:])\n for b in li:\n if b not in profile.keys():\n profile[b] = 0\n\n profile[b] += 1\n\n ngrams.append(profile)\n\n profiles = []\n # update for arrays\n\n assert len(ngrams) == len(values)\n for a in range(len(ngrams)):\n for b in range(a + 1):\n if a == b:\n distm[a][b] = 0.0\n else:\n i = ngrams[a]\n j = ngrams[b]\n ngram_all = list(set(i.keys()) | set(j.keys()))\n i_vec = [(i[item] if item in i.keys() else 0) for item in ngram_all]\n j_vec = [(j[item] if item in j.keys() else 0) for item in ngram_all]\n #dist = cosine(i_vec, j_vec)\n\n first_array = np.array(i_vec)\n second_array = np.array(j_vec)\n\n dist = round(cosine_similarity_numba(first_array, second_array), 8)\n\n distm[a][b] = dist\n distm[b][a] = dist\n\n with open(filename, 'w') as outfile:\n for a in range(len(distm)):\n outfile.write(' '.join([str(e) for e in distm[a]]) + \"\\n\")\n\n ends = time.time()\n print('Sport time: ' + str(round((ends - starts), 3)))\n\n\n for a in range(len(distm)):\n ndistmS.append([])\n for b in range(len(distm)):\n ndistmS[a].append(distm[a][b])\n\n # dest port\n ndistmD = []\n distm = []\n\n startd = time.time()\n\n filename = path_to_distances + 'dportDist' + addition + '.txt'\n\n print(\"Starting dport dist\")\n distm = [-1] * len(data.values())\n distm = [[-1] * len(data.values()) for i in distm]\n\n ngrams = []\n for a in range(len(values)):\n\n profile = dict()\n dat = [x[4] for x in values[a]][:self.window_size]\n\n li = zip(dat, dat[1:], dat[2:])\n\n for b in li:\n if b not in profile.keys():\n profile[b] = 0\n profile[b] += 1\n ngrams.append(profile)\n\n assert len(ngrams) == len(values)\n for a in range(len(ngrams)):\n for b in range(a + 1):\n if a == b:\n distm[a][b] = 0.0\n else:\n i = ngrams[a]\n j = ngrams[b]\n ngram_all = list(set(i.keys()) | set(j.keys()))\n i_vec = [(i[item] if item in i.keys() else 0) for item in ngram_all]\n j_vec = [(j[item] if item in j.keys() else 0) for item in ngram_all]\n #dist = round(cosine(i_vec, j_vec), 8)\n\n first_array = np.array(i_vec)\n second_array = np.array(j_vec)\n\n dist = round(cosine_similarity_numba(first_array, second_array), 8)\n\n distm[a][b] = dist\n distm[b][a] = dist\n\n with open(filename, 'w') as outfile:\n for a in range(len(distm)):\n outfile.write(' '.join([str(e) for e in distm[a]]) + \"\\n\")\n\n endd = time.time()\n print('Time dport: ' + str(round((endd - startd), 3)))\n mini = min(min(distm))\n maxi = max(max(distm))\n\n for a in range(len(distm)):\n ndistmD.append([])\n for b in range(len(distm)):\n ndistmD[a].append(distm[a][b])\n\n ndistm = []\n\n for a in range(len(ndistmS)):\n ndistm.append([])\n for b in range(len(ndistmS)):\n ndistm[a].append((ndistmB[a][b] + ndistmG[a][b] + ndistmD[a][b] + ndistmS[a][b]) / 4.0)\n\n print(\"Done with distance measurement\")\n print(\"----------------\")\n\n plot_kwds = {'alpha': 0.5, 's': 80, 'linewidths': 0}\n RS = 3072018\n projection = TSNE(random_state=RS).fit_transform(ndistm)\n plt.scatter(*projection.T)\n plt.savefig(self.path_to_store + \"tsne-result\" + addition)\n\n plt.close()\n plt.clf()\n\n #########\n # Model #\n #########\n\n path_to_model = path_to_intermediate_results +\"/model/\"\n os.mkdir(path_to_model)\n\n size = 7\n sample = 7\n\n model = hdbscan.HDBSCAN(min_cluster_size=size, min_samples=sample, cluster_selection_method='leaf',\n metric='precomputed')\n clu = model.fit(np.array([np.array(x) for x in ndistm])) # final for citadel and dridex\n\n input_array = np.array([np.array(x) for x in ndistm])\n validity_index = hdbscan.validity_index(X=input_array, labels=clu.labels_, metric='precomputed', d=4)\n\n unique_labels = np.unique(np.array(clu.labels_))\n if (len(unique_labels) >= 2):\n silhouette_score = round(metrics.silhouette_score(X=input_array, labels=np.array(clu.labels_), metric='precomputed'), 3)\n else:\n silhouette_score = \"nan\"\n\n joblib.dump(clu, path_to_model + 'model' + addition + '.pkl')\n\n print(\"Num clusters: \" + str(len(set(clu.labels_)) - 1))\n\n end_time = time.time()\n\n avg = 0.0\n for l in list(set(clu.labels_)):\n if l != -1:\n avg += sum([(1 if x == l else 0) for x in clu.labels_])\n #print(\"average size of cluster:\" + str(float(avg) / float(len(set(clu.labels_)) - 1)))\n print(\"Samples in noise: \" + str(sum([(1 if x == -1 else 0) for x in clu.labels_])))\n\n cols = ['royalblue', 'red', 'darksalmon', 'sienna', 'mediumpurple', 'palevioletred', 'plum', 'darkgreen',\n 'lightseagreen', 'mediumvioletred', 'gold', 'navy', 'sandybrown', 'darkorchid', 'olivedrab', 'rosybrown',\n 'maroon', 'deepskyblue', 'silver']\n pal = sns.color_palette(cols) #\n\n extra_cols = len(set(clu.labels_)) - 18\n\n pal_extra = sns.color_palette('Paired', extra_cols)\n pal.extend(pal_extra)\n col = [pal[x] for x in clu.labels_]\n assert len(clu.labels_) == len(ndistm)\n\n mem_col = [sns.desaturate(x, p) for x, p in zip(col, clu.probabilities_)]\n\n plt.scatter(*projection.T, s=50, linewidth=0, c=col, alpha=0.2)\n\n for i, txt in enumerate(clu.labels_):\n\n realind = labels[i]\n name = inv_mapping[realind]\n plt.scatter(projection.T[0][i], projection.T[1][i], color=col[i], alpha=0.6)\n if txt == -1:\n continue\n\n plt.annotate(txt, (projection.T[0][i], projection.T[1][i]), color=col[i], alpha=0.6)\n\n plt.savefig(self.path_to_store + \"clustering-result\" + addition)\n plt.close()\n plt.clf()\n\n print(\"----------------\")\n\n # writing csv file\n print(\"Writing csv file\")\n final_clusters = {}\n final_probs = {}\n for lab in set(clu.labels_):\n occ = [i for i, x in enumerate(clu.labels_) if x == lab]\n final_probs[lab] = [x for i, x in zip(clu.labels_, clu.probabilities_) if i == lab]\n print(\"cluster: \" + str(lab) + \" num items: \" + str(len([labels[x] for x in occ])))\n final_clusters[lab] = [labels[x] for x in occ]\n\n csv_file = self.path_to_store + 'summary' + addition + '.csv'\n outfile = open(csv_file, 'w')\n outfile.write(\"clusnum,connnum,probability,filename,src_ip,dst_ip,window\\n\")\n\n for n, clus in final_clusters.items():\n\n for idx, el in enumerate([inv_mapping[x] for x in clus]):\n\n ip = el.split('->')\n\n filename = ip[0]\n src_ip = ip[1]\n dst_ip = ip[2]\n window = ip[3]\n\n outfile.write(\n str(n) + \",\" + str(mapping[el]) + \",\" + str(final_probs[n][idx]) + \",\" + str(filename) + \",\" + src_ip + \",\" + dst_ip + \",\" + window + \"\\n\")\n outfile.close()\n\n other_csv_files = glob.glob(self.path_to_folder + \"/*.csv\")\n\n for index, csv_file_path in enumerate(other_csv_files):\n\n temp_df = pd.read_csv(csv_file_path)\n\n if index == 0:\n combined_df = temp_df\n else:\n combined_df = combined_df.append(temp_df)\n\n\n csv_df = pd.read_csv(csv_file)\n csv_df = csv_df.sort_values(by=['src_ip', 'dst_ip'])\n combined_df = combined_df.sort_values(by=['src_ip', 'dst_ip'])\n\n print(len(combined_df.index))\n print(len(csv_df.index))\n csv_df = csv_df.merge(right=combined_df, on=['src_ip', 'dst_ip', 'window'], how=\"left\")\n\n csv_df = csv_df.drop(columns=\"filename\")\n csv_df = csv_df.sort_values(by=\"clusnum\")\n csv_df.to_csv(csv_file, index=False)\n\n # Making tree\n print('Producing DAG with relationships between pcaps')\n clusters = {}\n numclus = len(set(clu.labels_))\n with open(csv_file, 'r') as f1:\n reader = csv.reader(f1, delimiter=',')\n for i, line in enumerate(reader): # f1.readlines()[1:]:\n if i > 0:\n if line[4] not in clusters.keys():\n clusters[line[4]] = []\n clusters[line[4]].append((line[3], line[0])) # classname, cluster#\n # print(clusters)\n f1.close()\n array = [str(x) for x in range(numclus - 1)]\n array.append(\"-1\")\n\n treeprep = dict()\n for filename, val in clusters.items():\n arr = [0] * numclus\n for fam, clus in val:\n ind = array.index(clus)\n arr[ind] = 1\n # print(filename, )\n mas = ''.join([str(x) for x in arr[:-1]])\n famname = fam\n if mas not in treeprep.keys():\n treeprep[mas] = dict()\n if famname not in treeprep[mas].keys():\n treeprep[mas][famname] = set()\n treeprep[mas][famname].add(str(filename))\n\n os.mkdir(Path.joinpath(Path(self.path_to_store), \"dag\"))\n path_to_dag_results = str(Path.joinpath(Path(self.path_to_store), \"dag\")) + \"/\"\n\n f2 = open(path_to_dag_results +'mas-details' + addition + '.csv', 'w')\n for k, v in treeprep.items():\n for kv, vv in v.items():\n f2.write(str(k) + ';' + str(kv) + ';' + str(len(vv)) + '\\n')\n f2.close()\n\n with open(path_to_dag_results +'mas-details' + addition + '.csv', 'rU') as f3:\n csv_reader = csv.reader(f3, delimiter=';')\n\n graph = {}\n\n names = {}\n for line in csv_reader:\n graph[line[0]] = set()\n if line[0] not in names.keys():\n names[line[0]] = []\n names[line[0]].append(line[1] + \"(\" + line[2] + \")\")\n\n zeros = ''.join(['0'] * (numclus - 1))\n if zeros not in graph.keys():\n graph[zeros] = set()\n\n ulist = graph.keys()\n covered = set()\n next = deque()\n\n specials = []\n\n next.append(zeros)\n\n while (len(next) > 0):\n l1 = next.popleft()\n covered.add(l1)\n for l2 in ulist:\n if l2 not in covered and self.difference(l1, l2) == 1:\n graph[l1].add(l2)\n\n if l2 not in next:\n next.append(l2)\n\n val = set()\n for v in graph.values():\n val.update(v)\n\n notmain = [x for x in ulist if x not in val]\n notmain.remove(zeros)\n nums = [sum([int(y) for y in x]) for x in notmain]\n notmain = [x for _, x in sorted(zip(nums, notmain))]\n\n specials = notmain\n\n extras = set()\n\n for nm in notmain:\n comp = set()\n comp.update(val)\n comp.update(extras)\n\n mindist = 1000\n minli1, minli2 = None, None\n for l in comp:\n if nm != l:\n diff = self.difference(nm, l)\n if diff < mindist:\n mindist = diff\n minli = l\n\n diffbase = self.difference(nm, zeros)\n if diffbase <= mindist:\n mindist = diffbase\n minli = zeros\n\n num1 = sum([int(s) for s in nm])\n num2 = sum([int(s) for s in minli])\n if num1 < num2:\n graph[nm].add(minli)\n else:\n graph[minli].add(nm)\n\n extras.add(nm)\n\n val = set()\n for v in graph.values():\n val.update(v)\n f2 = open(path_to_dag_results +'relation-tree' + addition + '.dot', 'w')\n f2.write(\"digraph dag {\\n\")\n f2.write(\"rankdir=LR;\\n\")\n num = 0\n for idx, li in names.items():\n text = ''\n name = str(idx) + '\\n'\n\n for l in li:\n name += l + ',\\n'\n if idx not in specials:\n text = str(idx) + \" [label=\\\"\" + name + \"\\\" , shape=box;]\"\n else: # treat in a special way. For now, leaving intact\n text = str(idx) + \" [shape=box label=\\\"\" + name + \"\\\"]\"\n f2.write(text)\n f2.write('\\n')\n for k, v in graph.items():\n for vi in v:\n f2.write(str(k) + \"->\" + str(vi))\n f2.write('\\n')\n f2.write(\"}\")\n f2.close()\n # Rendering DAG\n\n try:\n filename = path_to_dag_results +'relation-tree' + addition + '.dot'\n # src = Source(source=test)\n # new_name = self.path_to_store + \"DAG\" + addition + '.png'\n # src.render(new_name, view=True)\n\n render('dot', 'png', filename)\n except:\n print('Rendering DAG')\n # os.system('dot -Tpng relation-tree' + addition + '.dot -o DAG' + addition + '.png')\n # print('Done')\n\n\n # temporal heatmaps start\n\n print(\"Writing temporal heatmaps\")\n\n if not os.path.exists(path_to_intermediate_results + \"heatmaps\" + '/'):\n os.mkdir(path_to_intermediate_results + \"heatmaps\" + '/')\n os.mkdir(path_to_intermediate_results + \"heatmaps\" + '/bytes')\n os.mkdir(path_to_intermediate_results + \"heatmaps\" + '/gaps')\n os.mkdir(path_to_intermediate_results + \"heatmaps\" + '/sport')\n os.mkdir(path_to_intermediate_results + \"heatmaps\"+ '/dport')\n\n actlabels = []\n for a in range(len(values)): # range(10):\n actlabels.append(mapping[keys[a]])\n\n clusterinfo = {}\n seqclufile = csv_file\n lines = []\n lines = open(seqclufile).readlines()[1:]\n\n for line in lines:\n li = line.split(\",\") # clusnum, connnum, prob, srcip, dstip\n\n srcip = li[4]\n dstip = li[5][:-1]\n has = int(li[1])\n\n name = str('%12s->%12s' % (srcip, dstip))\n if li[0] not in clusterinfo.keys():\n clusterinfo[li[0]] = []\n clusterinfo[li[0]].append((has, name))\n\n sns.set(font_scale=0.9)\n matplotlib.rcParams.update({'font.size': 10})\n for names, sname, q in [(\"Packet sizes\", \"bytes\", 1), (\"Interval\", \"gaps\", 0), (\"Source Port\", \"sport\", 3),\n (\"Dest. Port\", \"dport\", 4)]:\n for clusnum, cluster in clusterinfo.items():\n items = [int(x[0]) for x in cluster]\n labels = [x[1] for x in cluster]\n\n acha = [actlabels.index(int(x[0])) for x in cluster]\n\n blah = [values[a] for a in acha]\n\n dataf = []\n\n for b in blah:\n dataf.append([x[q] for x in b][:self.window_size])\n\n df = pd.DataFrame(dataf, index=labels)\n\n g = sns.clustermap(df, xticklabels=False, col_cluster=False) # , vmin= minb, vmax=maxb)\n ind = g.dendrogram_row.reordered_ind\n fig = plt.figure(figsize=(10.0, 9.0))\n plt.suptitle(\"Exp: \" + self.expname + \" | Cluster: \" + clusnum + \" | Feature: \" + names)\n ax = fig.add_subplot(111)\n datanew = []\n labelsnew = []\n lol = []\n for it in ind:\n labelsnew.append(labels[it])\n lol.append(cluster[[x[1] for x in cluster].index(labels[it])][0])\n\n acha = [actlabels.index(int(x)) for x in lol]\n blah = [values[a] for a in acha]\n\n dataf = []\n\n for b in blah:\n dataf.append([x[q] for x in b][:20])\n df = pd.DataFrame(dataf, index=labelsnew)\n g = sns.heatmap(df, xticklabels=False)\n plt.setp(g.get_yticklabels(), rotation=0)\n plt.subplots_adjust(top=0.92, bottom=0.02, left=0.25, right=1, hspace=0.94)\n plt.savefig(path_to_intermediate_results + \"heatmaps\" + \"/\" + sname + \"/\" + clusnum)\n\n plt.close()\n plt.clf()\n\n\n ####################\n # Summary Creation #\n ####################\n\n print(\"Creating summary file\")\n\n summary_file = self.path_to_store + \"summary\" + addition + '.txt'\n\n summary_csv_df = pd.read_csv(csv_file)\n\n time_for_processing = end_time - start_time\n\n total_number_connections = len(summary_csv_df.index)\n total_number_packets = total_number_connections * self.window_size\n\n number_of_clusters = len(summary_csv_df[\"clusnum\"].unique())\n avg_size_of_cluster = int(summary_csv_df.groupby(\"clusnum\")[\"label\"].count().mean())\n std_size_of_cluster = round(summary_csv_df.groupby(\"clusnum\")[\"label\"].count().std(), 2)\n\n number_of_connections_in_noise_cluster = summary_csv_df[summary_csv_df[\"clusnum\"] == -1][\"clusnum\"].count()\n noise_percentage = round(number_of_connections_in_noise_cluster / total_number_connections, 3)\n\n total_number_unknown_connection = summary_csv_df[summary_csv_df[\"label\"] == \"Unknown\"][\"clusnum\"].count()\n\n if total_number_unknown_connection > 0:\n unknown_connections_in_noise_cluster = \\\n summary_csv_df[(summary_csv_df[\"label\"] == \"Unknown\") & (summary_csv_df[\"clusnum\"] == -1)][\"clusnum\"].count()\n percentage_total_unknown_in_noise_cluster = round(\n unknown_connections_in_noise_cluster / total_number_unknown_connection, 3)\n\n percentage_unknown_of_noise_cluster = round(\n summary_csv_df[summary_csv_df[\"clusnum\"] == -1][\"label\"].value_counts(normalize=True)[\"Unknown\"], 3)\n\n else:\n unknown_connections_in_noise_cluster = \"nan\"\n percentage_total_unknown_in_noise_cluster = \"nan\"\n percentage_unknown_of_noise_cluster = \"nan\"\n\n percentage_detailed_labels_in_noise_cluster = round((summary_csv_df[\n (summary_csv_df[\"detailed_label\"] != \"-\") & (\n summary_csv_df[\"clusnum\"] == -1)][\n \"clusnum\"].count()) / (\n summary_csv_df[summary_csv_df[\"detailed_label\"] != \"-\"][\n \"clusnum\"].count()), 3)\n\n per_cluster_label_count = summary_csv_df.groupby(\"clusnum\")[\"label\"].value_counts(normalize=True)\n max_label_per_cluster = per_cluster_label_count.groupby(\"clusnum\").idxmax().to_frame().reset_index()\n max_label_per_cluster[\"label\"] = max_label_per_cluster[\"label\"].apply(lambda x: x[1])\n\n max_percentage_per_cluster = per_cluster_label_count.groupby(\"clusnum\").max().to_frame().reset_index()\n max_percentage_per_cluster = max_percentage_per_cluster.rename(columns={\"label\": \"percentage\"})\n merged_df_1 = max_label_per_cluster.merge(right=max_percentage_per_cluster, on=\"clusnum\")\n\n connections_per_cluster = summary_csv_df.groupby(\"clusnum\")[\"label\"].count().to_frame().reset_index()\n connections_per_cluster = connections_per_cluster.rename(columns={\"label\": \"packet_count\"})\n connections_per_cluster[\"relative_packet_count\"] = connections_per_cluster[\"packet_count\"].apply(\n lambda x: x / total_number_connections)\n merged_df_2 = merged_df_1.merge(right=connections_per_cluster, on=\"clusnum\")\n\n merged_df_2[\"av_cluster_purity\"] = merged_df_2[\"percentage\"] * merged_df_2[\"relative_packet_count\"]\n average_cluster_purity = round(merged_df_2[\"av_cluster_purity\"].sum(), 3)\n\n detailed_labels_present = summary_csv_df[\"detailed_label\"].unique()\n detailed_labels_present = np.delete(detailed_labels_present, np.where(detailed_labels_present == \"-\"))\n avg_detailed_label_separation_list = []\n\n for detailed_label in detailed_labels_present:\n detailled_label_count_per_cluster = \\\n summary_csv_df[summary_csv_df[\"detailed_label\"] == detailed_label].groupby(\"clusnum\")[\n \"detailed_label\"].count().to_frame().reset_index()\n detailled_label_count_per_cluster_as_tuple = list(\n detailled_label_count_per_cluster.itertuples(index=False, name=None))\n\n max_value = 0\n total_count = 0\n for clusname, count_detailed_labels in detailled_label_count_per_cluster_as_tuple:\n if count_detailed_labels > max_value:\n max_value = count_detailed_labels\n total_count = total_count + count_detailed_labels\n separation = max_value / total_count\n avg_detailed_label_separation_list.append((separation, total_count))\n\n total_count_detailed_labels = summary_csv_df[summary_csv_df[\"detailed_label\"] != \"-\"][\"clusnum\"].count()\n avg_detailed_label_cohesion = round(\n sum(list(map((lambda x: x[0] * x[1]), avg_detailed_label_separation_list))) / total_count_detailed_labels,\n 3)\n\n avg_cluster_probability = round(summary_csv_df[\"probability\"].mean(), 3)\n\n with open(summary_file, 'w') as log_file:\n log_file.write(\"Total Time for processing: \" + str(round(time_for_processing, 2)) + \"\\n\")\n log_file.write(\"Validity index: \" + str(round(validity_index, 3)) + \"\\n\")\n log_file.write(\"Shilouette score: \" + str(silhouette_score) + \"\\n\")\n log_file.write(\"Total number of connections: \" + str(total_number_connections) + \"\\n\")\n log_file.write(\"Total number of packets: \" + str(total_number_packets) + \"\\n\")\n log_file.write(\"Number of clusters: \" + str(number_of_clusters) + \"\\n\")\n log_file.write(\"Average cluster size: \" + str(avg_size_of_cluster) + \"\\n\")\n log_file.write(\"Standard deviation cluster size: \" + str(std_size_of_cluster) + \"\\n\")\n log_file.write(\"Noise percentage: \" + str(noise_percentage) + \"\\n\")\n log_file.write(\"Percentage of all unknown connections that are in the noise cluster: \" + str(\n percentage_total_unknown_in_noise_cluster) + \"\\n\")\n log_file.write(\"Percentage of all connections in noise cluster that are unknown: \" + str(\n percentage_unknown_of_noise_cluster) + \"\\n\")\n log_file.write(\"Percentage of connections with detailed labels that are in noise cluster: \" + str(\n percentage_detailed_labels_in_noise_cluster) + \"\\n\")\n log_file.write(\"Average cluster purity: \" + str(average_cluster_purity) + \"\\n\")\n log_file.write(\"Average detailed label cohesion: \" + str(avg_detailed_label_cohesion) + \"\\n\")\n log_file.write(\"Average cluster probability: \" + str(avg_cluster_probability) + \"\\n\")\n log_file.close()\n\n\n ###############################\n # Performance Matrix Creation #\n ###############################\n\n print(\"Creating performance matrices\")\n\n performance_matrix_folder = path_to_intermediate_results + \"/performance_matrices\"\n os.mkdir(performance_matrix_folder)\n\n label_performance_matrix = performance_matrix_folder + \"/label_performance_matrix\" + addition + \".csv\"\n label_performance_matrix_table = performance_matrix_folder + \"/label_performance_matrix\" + addition + \".png\"\n\n detailed_label_performance_matrix = performance_matrix_folder + \"/detailed_label_performance_matrix\" + addition + \".csv\"\n detailed_label_performance_matrix_table = performance_matrix_folder + \"/detailed_label_performance_matrix\" + addition + \".png\"\n\n\n label_df = summary_csv_df.groupby(\"clusnum\")[\"label\"].value_counts().to_frame()\n label_df = label_df.rename(columns={\"label\": \"count\"})\n label_df = label_df.reset_index()\n\n labels = label_df[\"label\"].unique()\n\n for label in labels:\n lower_label = label.lower()\n label_df[lower_label] = np.where(label_df[\"label\"] == label, label_df[\"count\"], 0)\n\n label_df = label_df.drop([\"count\", \"label\"], axis=1)\n label_df = label_df.rename(columns={\"clusnum\": \"Cluster\"})\n\n columns = label_df.columns.tolist()\n labels = label_df.columns.tolist()\n labels.remove(\"Cluster\")\n clusters = label_df[\"Cluster\"].unique().tolist()\n\n data = []\n for cluster in clusters:\n cluster_column_data = []\n cluster_column_data.append(cluster)\n for label in labels:\n count = int(label_df[(label_df[\"Cluster\"] == cluster)][label].sum())\n cluster_column_data.append(count)\n data.append(cluster_column_data)\n\n improved_label_df = pd.DataFrame(data, columns=columns)\n\n detailed_label_df = summary_csv_df.groupby(\"clusnum\")[\"detailed_label\"].value_counts().to_frame()\n detailed_label_df = detailed_label_df.rename(columns={\"detailed_label\": \"count\"})\n detailed_label_df = detailed_label_df.reset_index()\n\n detailed_labels = detailed_label_df[\"detailed_label\"].unique()\n\n for detail_label in detailed_labels:\n lower_detail_label = detail_label.lower()\n detailed_label_df[lower_detail_label] = np.where(detailed_label_df[\"detailed_label\"] == detail_label,\n detailed_label_df[\"count\"], 0)\n\n detailed_label_df = detailed_label_df.drop([\"count\", \"detailed_label\"], axis=1)\n detailed_label_df = detailed_label_df.rename(columns={\"clusnum\": \"Cluster\"})\n\n columns = detailed_label_df.columns.tolist()\n labels = detailed_label_df.columns.tolist()\n labels.remove(\"Cluster\")\n clusters = detailed_label_df[\"Cluster\"].unique().tolist()\n\n data = []\n for cluster in clusters:\n cluster_column_data = []\n cluster_column_data.append(cluster)\n for label in labels:\n count = int(detailed_label_df[(detailed_label_df[\"Cluster\"] == cluster)][label].sum())\n cluster_column_data.append(count)\n data.append(cluster_column_data)\n\n improved_detail_label_df = pd.DataFrame(data, columns=columns)\n\n improved_label_df.to_csv(label_performance_matrix, index=False)\n\n fig, ax = plt.subplots()\n fig.patch.set_visible(False)\n ax.axis('off')\n ax.axis('tight')\n table = ax.table(cellText=improved_label_df.values, colLabels=improved_label_df.columns, loc='center',\n cellLoc='center')\n table.auto_set_column_width(col=list(range(len(improved_label_df.columns))))\n for (row, col), cell in table.get_celld().items():\n if (row == 0):\n cell.set_text_props(fontproperties=FontProperties(weight='bold'))\n fig.tight_layout()\n plt.savefig(label_performance_matrix_table)\n plt.close()\n plt.clf()\n\n improved_detail_label_df.to_csv(detailed_label_performance_matrix, index=False)\n\n reduced_column_size_name = [x[0:10] for x in improved_detail_label_df.columns.tolist()]\n\n fig, ax = plt.subplots()\n fig.patch.set_visible(False)\n ax.axis('off')\n ax.axis('tight')\n table2 = ax.table(cellText=improved_detail_label_df.values, colLabels=reduced_column_size_name, loc='center',\n cellLoc='center')\n table2.auto_set_column_width(col=list(range(len(reduced_column_size_name))))\n for (row, col), cell in table2.get_celld().items():\n if (row == 0):\n cell.set_text_props(fontproperties=FontProperties(weight='bold'))\n fig.tight_layout()\n plt.savefig(detailed_label_performance_matrix_table, dpi=1200, bbox_inches='tight')\n plt.close()\n plt.clf()\n\n\n ##################\n # Graph Creation #\n #################\n\n print(\"Creating graphs\")\n\n graphs_folder = self.path_to_store + \"/graphs_folder\"\n os.mkdir(graphs_folder)\n\n summary_csv_df = pd.read_csv(csv_file)\n\n application_name_graph = graphs_folder + \"/application_name_graph\" + addition + \".png\"\n path_to_application_name_legend_storage = graphs_folder + \"/application_name_legend\" + addition + \".png\"\n path_to_application_name_combined = graphs_folder + \"/application_name_combined\" + addition + \".png\"\n\n application_category_name_graph = graphs_folder + \"/application_category_name_graph\" + addition + \".png\"\n path_to_application_category_name_legend_storage = graphs_folder + \"/application_category_name_legend\" + addition + \".png\"\n path_to_application_category_name_combined = graphs_folder + \"/application_category_name_combined\" + addition + \".png\"\n\n label_distribution_graph = graphs_folder + \"/label_graph\" + addition + \".png\"\n path_to_label_legend_storage = graphs_folder + \"/label_legend\" + addition + \".png\"\n path_to_label_combined = graphs_folder + \"/label_combined\" + addition + \".png\"\n\n detailed_label_distribution_graph = graphs_folder + \"/detailed_label_graph\" + addition + \".png\"\n path_to_detailed_label_legend_storage = graphs_folder + \"/detailed_label_legend\" + addition + \".png\"\n path_to_detailed_label_combined = graphs_folder + \"/detailed_label_combined\" + addition + \".png\"\n\n name_distribution_graph = graphs_folder + \"/name_graph\" + addition + \".png\"\n path_to_name_legend_storage = graphs_folder + \"/name_legend\" + addition + \".png\"\n path_to_name_combined = graphs_folder + \"/name_combined\" + addition + \".png\"\n\n\n ####################\n # application name #\n ####################\n\n overall_detailed_label_df = summary_csv_df.groupby(\"clusnum\")[\"application_name\"].value_counts().to_frame()\n overall_detailed_label_df = overall_detailed_label_df.rename(columns={\"application_name\": \"count\"})\n overall_detailed_label_df = overall_detailed_label_df.reset_index()\n\n clusters = overall_detailed_label_df[\"clusnum\"].unique().tolist()\n\n if len(clusters) < 4:\n ncols = len(clusters)\n else:\n ncols = 4\n nrows = math.ceil(len(clusters) / 4)\n\n fig, ax = plt.subplots(nrows=nrows, ncols=ncols, figsize=(7, 7))\n\n list_of_names_dfs = []\n\n for cluster in clusters:\n cluster_df = overall_detailed_label_df[overall_detailed_label_df[\"clusnum\"] == cluster][\n [\"application_name\", \"count\"]]\n cluster_df[\"application_name\"] = np.where(cluster_df[\"count\"] <= 4, \"Other\", cluster_df.application_name)\n\n cluster_df = cluster_df.groupby(\"application_name\")[\"count\"].aggregate(sum).reset_index().sort_values(\n by=[\"count\"], ascending=False)\n\n list_of_names_dfs.append(cluster_df)\n\n detailed_label_name_df = list_of_names_dfs.pop()\n\n for name_df in list_of_names_dfs:\n detailed_label_name_df = detailed_label_name_df.append(name_df)\n\n detailed_label_name_df = detailed_label_name_df.groupby(\"application_name\")[\"count\"].aggregate(\n sum).reset_index().sort_values(by=[\"count\"])\n unique_application_category_names = detailed_label_name_df[\"application_name\"].tolist()\n\n colors = {}\n cmap = cm.tab20c(np.linspace(0, 1, len(unique_application_category_names)))\n\n for index, color in enumerate(cmap):\n application_name = unique_application_category_names.pop()\n colors[application_name] = color\n\n\n for index, cluster in enumerate(clusters):\n cluster_df = overall_detailed_label_df[overall_detailed_label_df[\"clusnum\"] == cluster][\n [\"application_name\", \"count\"]]\n\n cluster_df[\"application_name\"] = np.where(cluster_df[\"count\"] <= 4, \"Other\",\n cluster_df.application_name)\n\n cluster_df = cluster_df.groupby(\"application_name\")[\"count\"].aggregate(sum).reset_index().sort_values(\n by=[\"count\"])\n cluster_df[\"relative_count\"] = round((cluster_df[\"count\"] / cluster_df[\"count\"].sum()) * 100, 2)\n\n if len(clusters) == 1:\n patches, texts = ax.pie(cluster_df[\"count\"], labels=cluster_df[\"relative_count\"],\n colors=[colors[key] for key in cluster_df[\"application_name\"]])\n new_labels = self.clean_up_labels(texts)\n ax.clear()\n ax.pie(cluster_df[\"count\"], labels=new_labels,\n colors=[colors[key] for key in cluster_df[\"application_name\"]],\n labeldistance=1.15, textprops={'fontsize': 8})\n ax.set_title(\"Cluster \" + str(cluster))\n\n elif len(clusters) <= 4:\n patches, texts = ax[index].pie(cluster_df[\"count\"], labels=cluster_df[\"relative_count\"],\n colors=[colors[key] for key in\n cluster_df[\"application_name\"]],\n labeldistance=1.25)\n new_labels = self.clean_up_labels(texts)\n ax[index].clear()\n ax[index].pie(cluster_df[\"count\"], labels=new_labels,\n colors=[colors[key] for key in cluster_df[\"application_name\"]],\n labeldistance=1.15, textprops={'fontsize': 8})\n ax[index].set_title(\"Cluster \" + str(cluster))\n else:\n patches, texts = ax[math.floor(index / 4), index % 4].pie(cluster_df[\"count\"],\n labels=cluster_df[\"relative_count\"],\n colors=[colors[key] for key in\n cluster_df[\n \"application_name\"]],\n labeldistance=1.25)\n new_labels = self.clean_up_labels(texts)\n ax[math.floor(index / 4), index % 4].clear()\n ax[math.floor(index / 4), index % 4].pie(cluster_df[\"count\"], labels=new_labels,\n colors=[colors[key] for key in\n cluster_df[\"application_name\"]],\n labeldistance=1.15, textprops={'fontsize': 8})\n ax[math.floor(index / 4), index % 4].set_title(\"Cluster \" + str(cluster))\n\n if len(clusters) % 4 != 0:\n if len(clusters) > 4:\n for missing_axis in range(4 - len(clusters) % 4, 4):\n ax[nrows - 1, missing_axis].axis('off')\n\n markers = [plt.Line2D([0, 0], [0, 0], color=color, marker='o', linestyle='') for color in colors.values()]\n\n plt.suptitle(\"Application Name Distribution per Cluster\", y=0.985, x=0.5, fontweight='bold')\n\n fig.tight_layout()\n fig.canvas.draw()\n fig.savefig(application_name_graph, dpi=1200)\n\n legend = plt.legend(handles=markers, labels=colors.keys(), loc=3, framealpha=1, frameon=True,\n bbox_to_anchor=(2, 0))\n separate_legend = legend.figure\n separate_legend.canvas.draw()\n bbox = legend.get_window_extent()\n bbox = bbox.from_extents(*(bbox.extents + np.array([-4, -4, 4, 4])))\n bbox = bbox.transformed(fig.dpi_scale_trans.inverted())\n fig.savefig(path_to_application_name_legend_storage, dpi=1200, bbox_inches=bbox)\n legend.remove()\n\n plt.close()\n plt.clf()\n\n graph_img = Image.open(application_name_graph)\n legend_im = Image.open(path_to_application_name_legend_storage)\n\n widths_graph = graph_img.width\n heights_graph = graph_img.height\n\n widths_legend = legend_im.width\n heights_legend = legend_im.height\n\n if heights_legend > heights_graph:\n resize_percentage = heights_graph / heights_legend\n new_width = int(resize_percentage * widths_legend)\n\n legend_im = legend_im.resize((new_width, heights_graph), Image.ANTIALIAS)\n\n total_width = widths_graph + widths_legend\n\n y_offset = int((heights_graph - heights_legend) / 2)\n\n combined_im = Image.new('RGB', (total_width, heights_graph), color=(255, 255, 255, 1))\n combined_im.paste(graph_img, (0, 0))\n combined_im.paste(legend_im, (widths_graph, y_offset))\n combined_im.save(path_to_application_name_combined)\n\n #############################\n # application category name #\n #############################\n\n overall_detailed_label_df = summary_csv_df.groupby(\"clusnum\")[\n \"application_category_name\"].value_counts().to_frame()\n overall_detailed_label_df = overall_detailed_label_df.rename(columns={\"application_category_name\": \"count\"})\n overall_detailed_label_df = overall_detailed_label_df.reset_index()\n\n clusters = overall_detailed_label_df[\"clusnum\"].unique().tolist()\n\n if len(clusters) < 4:\n ncols = len(clusters)\n else:\n ncols = 4\n nrows = math.ceil(len(clusters) / 4)\n\n fig, ax = plt.subplots(nrows=nrows, ncols=ncols, figsize=(7, 7))\n\n list_of_names_dfs = []\n\n for cluster in clusters:\n cluster_df = overall_detailed_label_df[overall_detailed_label_df[\"clusnum\"] == cluster][\n [\"application_category_name\", \"count\"]]\n\n cluster_df = cluster_df.groupby(\"application_category_name\")[\"count\"].aggregate(\n sum).reset_index().sort_values(\n by=[\"count\"], ascending=False)\n\n list_of_names_dfs.append(cluster_df)\n\n detailed_label_name_df = list_of_names_dfs.pop()\n\n for name_df in list_of_names_dfs:\n detailed_label_name_df = detailed_label_name_df.append(name_df)\n\n detailed_label_name_df = detailed_label_name_df.groupby(\"application_category_name\")[\"count\"].aggregate(\n sum).reset_index().sort_values(by=[\"count\"])\n unique_application_category_names = detailed_label_name_df[\"application_category_name\"].tolist()\n\n colors = {}\n cmap = cm.gist_rainbow(np.linspace(0, 1, len(unique_application_category_names)))\n\n for index, color in enumerate(cmap):\n application_name = unique_application_category_names.pop()\n colors[application_name] = color\n\n for index, cluster in enumerate(clusters):\n cluster_df = overall_detailed_label_df[overall_detailed_label_df[\"clusnum\"] == cluster][\n [\"application_category_name\", \"count\"]]\n\n cluster_df = cluster_df.groupby(\"application_category_name\")[\"count\"].aggregate(\n sum).reset_index().sort_values(\n by=[\"count\"])\n cluster_df[\"relative_count\"] = round((cluster_df[\"count\"] / cluster_df[\"count\"].sum()) * 100, 2)\n\n if len(clusters) == 1:\n patches, texts = ax.pie(cluster_df[\"count\"], labels=cluster_df[\"relative_count\"],\n colors=[colors[key] for key in cluster_df[\"application_category_name\"]])\n new_labels = self.clean_up_labels(texts)\n ax.clear()\n ax.pie(cluster_df[\"count\"], labels=new_labels,\n colors=[colors[key] for key in cluster_df[\"application_category_name\"]],\n labeldistance=1.15, textprops={'fontsize': 8})\n ax.set_title(\"Cluster \" + str(cluster))\n\n elif len(clusters) <= 4:\n patches, texts = ax[index].pie(cluster_df[\"count\"], labels=cluster_df[\"relative_count\"],\n colors=[colors[key] for key in\n cluster_df[\"application_category_name\"]],\n labeldistance=1.25)\n new_labels = self.clean_up_labels(texts)\n ax[index].clear()\n ax[index].pie(cluster_df[\"count\"], labels=new_labels,\n colors=[colors[key] for key in cluster_df[\"application_category_name\"]],\n labeldistance=1.15, textprops={'fontsize': 8})\n ax[index].set_title(\"Cluster \" + str(cluster))\n else:\n patches, texts = ax[math.floor(index / 4), index % 4].pie(cluster_df[\"count\"], labels=cluster_df[\"relative_count\"],\n colors=[colors[key] for key in\n cluster_df[\"application_category_name\"]],\n labeldistance=1.25)\n new_labels = self.clean_up_labels(texts)\n ax[math.floor(index / 4), index % 4].clear()\n ax[math.floor(index / 4), index % 4].pie(cluster_df[\"count\"], labels=new_labels,\n colors=[colors[key] for key in cluster_df[\"application_category_name\"]],\n labeldistance=1.15, textprops={'fontsize': 8})\n ax[math.floor(index / 4), index % 4].set_title(\"Cluster \" + str(cluster))\n\n if len(clusters) % 4 != 0:\n if len(clusters) > 4:\n for missing_axis in range(4 - len(clusters) % 4, 4):\n ax[nrows - 1, missing_axis].axis('off')\n\n markers = [plt.Line2D([0, 0], [0, 0], color=color, marker='o', linestyle='') for color in colors.values()]\n fig.subplots_adjust(bottom=0.25)\n\n plt.suptitle(\"Application Category Name Distribution per Cluster\", y=0.985, x=0.5, fontweight='bold')\n\n fig.tight_layout()\n fig.canvas.draw()\n fig.savefig(application_category_name_graph, dpi=1200)\n\n legend = plt.legend(handles=markers, labels=colors.keys(), loc=3, framealpha=1, frameon=True,\n bbox_to_anchor=(2, 0))\n separate_legend = legend.figure\n separate_legend.canvas.draw()\n bbox = legend.get_window_extent()\n bbox = bbox.from_extents(*(bbox.extents + np.array([-4, -4, 4, 4])))\n bbox = bbox.transformed(fig.dpi_scale_trans.inverted())\n fig.savefig(path_to_application_category_name_legend_storage, dpi=1200, bbox_inches=bbox)\n legend.remove()\n\n plt.close()\n plt.clf()\n\n graph_img = Image.open(application_category_name_graph)\n legend_im = Image.open(path_to_application_category_name_legend_storage)\n\n widths_graph = graph_img.width\n heights_graph = graph_img.height\n\n widths_legend = legend_im.width\n heights_legend = legend_im.height\n\n if heights_legend > heights_graph:\n resize_percentage = heights_graph / heights_legend\n new_width = int(resize_percentage * widths_legend)\n\n legend_im = legend_im.resize((new_width, heights_graph), Image.ANTIALIAS)\n\n total_width = widths_graph + widths_legend\n\n y_offset = int((heights_graph - heights_legend) / 2)\n\n combined_im = Image.new('RGB', (total_width, heights_graph), color=(255, 255, 255, 1))\n combined_im.paste(graph_img, (0, 0))\n combined_im.paste(legend_im, (widths_graph, y_offset))\n combined_im.save(path_to_application_category_name_combined)\n\n #########\n # label #\n #########\n\n overall_detailed_label_df = summary_csv_df.groupby(\"clusnum\")[\"label\"].value_counts().to_frame()\n overall_detailed_label_df = overall_detailed_label_df.rename(columns={\"label\": \"count\"})\n overall_detailed_label_df = overall_detailed_label_df.reset_index()\n\n clusters = overall_detailed_label_df[\"clusnum\"].unique().tolist()\n\n if len(clusters) < 4:\n ncols = len(clusters)\n else:\n ncols = 4\n nrows = math.ceil(len(clusters) / 4)\n\n fig, ax = plt.subplots(nrows=nrows, ncols=ncols, figsize=(7, 7))\n\n colors = {}\n colors[\"Malicious\"] = \"r\"\n colors[\"Benign\"] = \"g\"\n colors[\"Unknown\"] = \"grey\"\n\n for index, cluster in enumerate(clusters):\n cluster_df = \\\n overall_detailed_label_df[overall_detailed_label_df[\"clusnum\"] == cluster][\n [\"label\", \"count\"]]\n\n cluster_df = cluster_df.groupby(\"label\")[\"count\"].aggregate(\n sum).reset_index().sort_values(\n by=[\"count\"])\n cluster_df[\"relative_count\"] = round((cluster_df[\"count\"] / cluster_df[\"count\"].sum()) * 100, 2)\n\n if (len(cluster_df.index) > 7):\n cluster_df[\"relative_count\"] = np.where(cluster_df[\"relative_count\"] <= 5, \"\",\n cluster_df[\"relative_count\"])\n\n if len(clusters) == 1:\n patches, texts = ax.pie(cluster_df[\"count\"], labels=cluster_df[\"relative_count\"],\n colors=[colors[key] for key in cluster_df[\"label\"]])\n new_labels = self.clean_up_labels(texts)\n ax.clear()\n ax.pie(cluster_df[\"count\"], labels=new_labels,\n colors=[colors[key] for key in cluster_df[\"label\"]],\n labeldistance=1.15, textprops={'fontsize': 8})\n ax.set_title(\"Cluster \" + str(cluster))\n\n elif len(clusters) <= 4:\n patches, texts = ax[index].pie(cluster_df[\"count\"], labels=cluster_df[\"relative_count\"],\n colors=[colors[key] for key in\n cluster_df[\"label\"]],\n labeldistance=1.25)\n new_labels = self.clean_up_labels(texts)\n ax[index].clear()\n ax[index].pie(cluster_df[\"count\"], labels=new_labels,\n colors=[colors[key] for key in cluster_df[\"label\"]],\n labeldistance=1.15, textprops={'fontsize': 8})\n ax[index].set_title(\"Cluster \" + str(cluster))\n else:\n patches, texts = ax[math.floor(index / 4), index % 4].pie(cluster_df[\"count\"],\n labels=cluster_df[\"relative_count\"],\n colors=[colors[key] for key in\n cluster_df[\n \"label\"]],\n labeldistance=1.25)\n new_labels = self.clean_up_labels(texts)\n ax[math.floor(index / 4), index % 4].clear()\n ax[math.floor(index / 4), index % 4].pie(cluster_df[\"count\"], labels=new_labels,\n colors=[colors[key] for key in\n cluster_df[\"label\"]],\n labeldistance=1.15, textprops={'fontsize': 8})\n ax[math.floor(index / 4), index % 4].set_title(\"Cluster \" + str(cluster))\n\n if len(clusters) % 4 != 0:\n if len(clusters) > 4:\n for missing_axis in range(4 - len(clusters) % 4, 4):\n ax[nrows - 1, missing_axis].axis('off')\n\n markers = [plt.Line2D([0, 0], [0, 0], color=color, marker='o', linestyle='') for color in colors.values()]\n fig.subplots_adjust(bottom=0.25)\n\n plt.suptitle(\"Label Distribution per Cluster\", y=0.985, x=0.5, fontweight='bold')\n\n fig.tight_layout()\n fig.canvas.draw()\n fig.savefig(label_distribution_graph, dpi=1200)\n\n legend = plt.legend(handles=markers, labels=colors.keys(), loc=3, framealpha=1, frameon=True,\n bbox_to_anchor=(2, 0))\n separate_legend = legend.figure\n separate_legend.canvas.draw()\n bbox = legend.get_window_extent()\n bbox = bbox.from_extents(*(bbox.extents + np.array([-4, -4, 4, 4])))\n bbox = bbox.transformed(fig.dpi_scale_trans.inverted())\n fig.savefig(path_to_label_legend_storage, dpi=1200, bbox_inches=bbox)\n legend.remove()\n\n plt.close()\n plt.clf()\n\n graph_img = Image.open(label_distribution_graph)\n legend_im = Image.open(path_to_label_legend_storage)\n\n widths_graph = graph_img.width\n heights_graph = graph_img.height\n\n widths_legend = legend_im.width\n heights_legend = legend_im.height\n\n if heights_legend > heights_graph:\n resize_percentage = heights_graph / heights_legend\n new_width = int(resize_percentage * widths_legend)\n\n legend_im = legend_im.resize((new_width, heights_graph), Image.ANTIALIAS)\n\n total_width = widths_graph + widths_legend\n\n y_offset = int((heights_graph - heights_legend) / 2)\n\n combined_im = Image.new('RGB', (total_width, heights_graph), color=(255, 255, 255, 1))\n combined_im.paste(graph_img, (0, 0))\n combined_im.paste(legend_im, (widths_graph, y_offset))\n combined_im.save(path_to_label_combined)\n\n ##################\n # detailed label #\n ##################\n\n overall_detailed_label_df = summary_csv_df.groupby(\"clusnum\")[\"detailed_label\"].value_counts().to_frame()\n overall_detailed_label_df = overall_detailed_label_df.rename(columns={\"detailed_label\": \"count\"})\n overall_detailed_label_df = overall_detailed_label_df.reset_index()\n\n clusters = overall_detailed_label_df[\"clusnum\"].unique().tolist()\n\n if len(clusters) < 4:\n ncols = len(clusters)\n else:\n ncols = 4\n nrows = math.ceil(len(clusters) / 4)\n\n fig, ax = plt.subplots(nrows=nrows, ncols=ncols, figsize=(7, 7))\n list_of_names_dfs = []\n\n for cluster in clusters:\n cluster_df = overall_detailed_label_df[overall_detailed_label_df[\"clusnum\"] == cluster][\n [\"detailed_label\", \"count\"]]\n cluster_df[\"detailed_label\"] = np.where(cluster_df[\"detailed_label\"] == \"-\", \"Unknown\",\n cluster_df.detailed_label)\n\n cluster_df = cluster_df.groupby(\"detailed_label\")[\"count\"].aggregate(sum).reset_index().sort_values(\n by=[\"count\"], ascending=False)\n\n list_of_names_dfs.append(cluster_df)\n\n detailed_label_name_df = list_of_names_dfs.pop()\n\n for name_df in list_of_names_dfs:\n detailed_label_name_df = detailed_label_name_df.append(name_df)\n\n detailed_label_name_df = detailed_label_name_df.groupby(\"detailed_label\")[\"count\"].aggregate(\n sum).reset_index().sort_values(by=[\"count\"])\n unique_application_category_names = detailed_label_name_df[\"detailed_label\"].tolist()\n\n colors = {}\n cmap = cm.terrain(np.linspace(0, 1, len(unique_application_category_names)))\n\n for index, color in enumerate(cmap):\n application_name = unique_application_category_names.pop()\n colors[application_name] = color\n\n for index, cluster in enumerate(clusters):\n cluster_df = overall_detailed_label_df[overall_detailed_label_df[\"clusnum\"] == cluster][\n [\"detailed_label\", \"count\"]]\n\n cluster_df = cluster_df.groupby(\"detailed_label\")[\"count\"].aggregate(sum).reset_index().sort_values(\n by=[\"count\"])\n cluster_df[\"relative_count\"] = round((cluster_df[\"count\"] / cluster_df[\"count\"].sum()) * 100, 2)\n\n if len(clusters) == 1:\n patches, texts = ax.pie(cluster_df[\"count\"], labels=cluster_df[\"relative_count\"],\n colors=[colors[key] for key in cluster_df[\"detailed_label\"]])\n new_labels = self.clean_up_labels(texts)\n ax.clear()\n ax.pie(cluster_df[\"count\"], labels=new_labels,\n colors=[colors[key] for key in cluster_df[\"detailed_label\"]],\n labeldistance=1.15, textprops={'fontsize': 8})\n ax.set_title(\"Cluster \" + str(cluster))\n\n elif len(clusters) <= 4:\n patches, texts = ax[index].pie(cluster_df[\"count\"], labels=cluster_df[\"relative_count\"],\n colors=[colors[key] for key in\n cluster_df[\"detailed_label\"]],\n labeldistance=1.25)\n new_labels = self.clean_up_labels(texts)\n ax[index].clear()\n ax[index].pie(cluster_df[\"count\"], labels=new_labels,\n colors=[colors[key] for key in cluster_df[\"detailed_label\"]],\n labeldistance=1.15, textprops={'fontsize': 8})\n ax[index].set_title(\"Cluster \" + str(cluster))\n else:\n patches, texts = ax[math.floor(index / 4), index % 4].pie(cluster_df[\"count\"],\n labels=cluster_df[\"relative_count\"],\n colors=[colors[key] for key in\n cluster_df[\n \"detailed_label\"]],\n labeldistance=1.25)\n new_labels = self.clean_up_labels(texts)\n ax[math.floor(index / 4), index % 4].clear()\n ax[math.floor(index / 4), index % 4].pie(cluster_df[\"count\"], labels=new_labels,\n colors=[colors[key] for key in\n cluster_df[\"detailed_label\"]],\n labeldistance=1.15, textprops={'fontsize': 8})\n ax[math.floor(index / 4), index % 4].set_title(\"Cluster \" + str(cluster))\n\n if len(clusters) % 4 != 0:\n if len(clusters) > 4:\n for missing_axis in range(4 - len(clusters) % 4, 4):\n ax[nrows - 1, missing_axis].axis('off')\n\n markers = [plt.Line2D([0, 0], [0, 0], color=color, marker='o', linestyle='') for color in colors.values()]\n fig.subplots_adjust(bottom=0.25)\n\n plt.suptitle(\"Detailed Label Distribution per Cluster\", y=0.985, x=0.5, fontweight='bold')\n\n fig.tight_layout()\n fig.canvas.draw()\n fig.savefig(detailed_label_distribution_graph, dpi=1200)\n\n legend = plt.legend(handles=markers, labels=colors.keys(), loc=3, framealpha=1, frameon=True,\n bbox_to_anchor=(2, 0))\n separate_legend = legend.figure\n separate_legend.canvas.draw()\n bbox = legend.get_window_extent()\n bbox = bbox.from_extents(*(bbox.extents + np.array([-4, -4, 4, 4])))\n bbox = bbox.transformed(fig.dpi_scale_trans.inverted())\n fig.savefig(path_to_detailed_label_legend_storage, dpi=1200, bbox_inches=bbox)\n legend.remove()\n\n plt.close()\n plt.clf()\n\n graph_img = Image.open(detailed_label_distribution_graph)\n legend_im = Image.open(path_to_detailed_label_legend_storage)\n\n widths_graph = graph_img.width\n heights_graph = graph_img.height\n\n widths_legend = legend_im.width\n heights_legend = legend_im.height\n\n if heights_legend > heights_graph:\n resize_percentage = heights_graph / heights_legend\n new_width = int(resize_percentage * widths_legend)\n\n legend_im = legend_im.resize((new_width, heights_graph), Image.ANTIALIAS)\n\n total_width = widths_graph + widths_legend\n\n y_offset = int((heights_graph - heights_legend) / 2)\n\n combined_im = Image.new('RGB', (total_width, heights_graph), color=(255, 255, 255, 1))\n combined_im.paste(graph_img, (0, 0))\n combined_im.paste(legend_im, (widths_graph, y_offset))\n combined_im.save(path_to_detailed_label_combined)\n\n ########\n # name #\n ########\n\n overall_name_df = summary_csv_df.groupby(\"clusnum\")[\"name\"].value_counts().to_frame()\n overall_name_df = overall_name_df.rename(columns={\"name\": \"count\"})\n overall_name_df = overall_name_df.reset_index()\n\n clusters = overall_name_df[\"clusnum\"].unique().tolist()\n\n if len(clusters) < 4:\n ncols = len(clusters)\n else:\n ncols = 4\n nrows = math.ceil(len(clusters) / 4)\n\n fig, ax = plt.subplots(nrows=nrows, ncols=ncols, figsize=(7, 7))\n list_of_names_dfs = []\n\n for cluster in clusters:\n cluster_df = overall_name_df[overall_name_df[\"clusnum\"] == cluster][\n [\"name\", \"count\"]]\n\n cluster_df = cluster_df.groupby(\"name\")[\"count\"].aggregate(sum).reset_index().sort_values(\n by=[\"count\"], ascending=False)\n\n list_of_names_dfs.append(cluster_df)\n\n detailed_label_name_df = list_of_names_dfs.pop()\n\n for name_df in list_of_names_dfs:\n detailed_label_name_df = detailed_label_name_df.append(name_df)\n\n detailed_label_name_df = detailed_label_name_df.groupby(\"name\")[\"count\"].aggregate(\n sum).reset_index().sort_values(by=[\"count\"])\n unique_application_category_names = detailed_label_name_df[\"name\"].tolist()\n\n colors = {}\n cmap = cm.ocean(np.linspace(0, 1, len(unique_application_category_names)))\n\n for index, color in enumerate(cmap):\n application_name = unique_application_category_names.pop()\n colors[application_name] = color\n\n for index, cluster in enumerate(clusters):\n cluster_df = overall_name_df[overall_name_df[\"clusnum\"] == cluster][\n [\"name\", \"count\"]]\n\n cluster_df = cluster_df.groupby(\"name\")[\"count\"].aggregate(sum).reset_index().sort_values(\n by=[\"count\"])\n cluster_df[\"relative_count\"] = round((cluster_df[\"count\"] / cluster_df[\"count\"].sum()) * 100, 2)\n\n if len(clusters) == 1:\n patches, texts = ax.pie(cluster_df[\"count\"], labels=cluster_df[\"relative_count\"],\n colors=[colors[key] for key in cluster_df[\"name\"]])\n new_labels = self.clean_up_labels(texts)\n ax.clear()\n ax.pie(cluster_df[\"count\"], labels=new_labels,\n colors=[colors[key] for key in cluster_df[\"name\"]],\n labeldistance=1.15, textprops={'fontsize': 8})\n ax.set_title(\"Cluster \" + str(cluster))\n\n elif len(clusters) <= 4:\n patches, texts = ax[index].pie(cluster_df[\"count\"], labels=cluster_df[\"relative_count\"],\n colors=[colors[key] for key in\n cluster_df[\"name\"]],\n labeldistance=1.25)\n new_labels = self.clean_up_labels(texts)\n ax[index].clear()\n ax[index].pie(cluster_df[\"count\"], labels=new_labels,\n colors=[colors[key] for key in cluster_df[\"name\"]],\n labeldistance=1.15, textprops={'fontsize': 8})\n ax[index].set_title(\"Cluster \" + str(cluster))\n else:\n patches, texts = ax[math.floor(index / 4), index % 4].pie(cluster_df[\"count\"],\n labels=cluster_df[\"relative_count\"],\n colors=[colors[key] for key in\n cluster_df[\n \"name\"]],\n labeldistance=1.25)\n new_labels = self.clean_up_labels(texts)\n ax[math.floor(index / 4), index % 4].clear()\n ax[math.floor(index / 4), index % 4].pie(cluster_df[\"count\"], labels=new_labels,\n colors=[colors[key] for key in\n cluster_df[\"name\"]],\n labeldistance=1.15, textprops={'fontsize': 8})\n ax[math.floor(index / 4), index % 4].set_title(\"Cluster \" + str(cluster))\n\n if len(clusters) % 4 != 0:\n if len(clusters) > 4:\n for missing_axis in range(4 - len(clusters) % 4, 4):\n ax[nrows - 1, missing_axis].axis('off')\n\n markers = [plt.Line2D([0, 0], [0, 0], color=color, marker='o', linestyle='') for color in colors.values()]\n fig.subplots_adjust(bottom=0.25)\n\n plt.suptitle(\"Device / Malware Distribution per Cluster\", y=0.985, x=0.5, fontweight='bold')\n\n fig.tight_layout()\n fig.canvas.draw()\n fig.savefig(name_distribution_graph, dpi=1200)\n\n legend = plt.legend(handles=markers, labels=colors.keys(), loc=3, framealpha=1, frameon=True,\n bbox_to_anchor=(2, 0))\n separate_legend = legend.figure\n separate_legend.canvas.draw()\n bbox = legend.get_window_extent()\n bbox = bbox.from_extents(*(bbox.extents + np.array([-4, -4, 4, 4])))\n bbox = bbox.transformed(fig.dpi_scale_trans.inverted())\n fig.savefig(path_to_name_legend_storage, dpi=1200, bbox_inches=bbox)\n legend.remove()\n\n plt.close()\n plt.clf()\n\n graph_img = Image.open(name_distribution_graph)\n legend_im = Image.open(path_to_name_legend_storage)\n\n widths_graph = graph_img.width\n heights_graph = graph_img.height\n\n widths_legend = legend_im.width\n heights_legend = legend_im.height\n\n if heights_legend > heights_graph:\n resize_percentage = heights_graph / heights_legend\n new_width = int(resize_percentage * widths_legend)\n\n legend_im = legend_im.resize((new_width, heights_graph), Image.ANTIALIAS)\n\n total_width = widths_graph + widths_legend\n\n y_offset = int((heights_graph - heights_legend) / 2)\n\n combined_im = Image.new('RGB', (total_width, heights_graph), color=(255, 255, 255, 1))\n combined_im.paste(graph_img, (0, 0))\n combined_im.paste(legend_im, (widths_graph, y_offset))\n combined_im.save(path_to_name_combined)\n\n def clean_up_labels(self, texts):\n\n amount_skip = 0\n new_labels = []\n for text_index, text in enumerate(texts):\n if (text_index == 0):\n new_labels.append(text.get_text())\n else:\n current_xy = text.get_position()\n current_str = text.get_text()\n\n past_text = texts[text_index - 1]\n past_xy = past_text.get_position()\n past_str = new_labels[text_index - 1]\n\n distance = math.sqrt(\n pow((current_xy[0] - past_xy[0]), 2) + pow((current_xy[1] - past_xy[1]), 2))\n\n if distance < 0.3:\n if distance < 0.2:\n if amount_skip < 2:\n new_labels.append(\" \")\n amount_skip = amount_skip + 1\n else:\n new_labels.append(current_str)\n amount_skip = 0\n else:\n if past_str != \" \":\n new_labels.append(\" \")\n amount_skip = amount_skip + 1\n else:\n new_labels.append(current_str)\n amount_skip = 0\n else:\n new_labels.append(current_str)\n amount_skip = 0\n\n return new_labels\n\n def inet_to_str(self, inet):\n \"\"\"Convert inet object to a string\n Args:\n inet (inet struct): inet network address\n Returns:\n str: Printable/readable IP address\n \"\"\"\n # First try ipv4 and then ipv6\n try:\n return socket.inet_ntop(socket.AF_INET, inet)\n except ValueError:\n return socket.inet_ntop(socket.AF_INET6, inet)\n\n\n src_set, dst_set, gap_set, proto_set, bytes_set, events_set, ip_set, dns_set, port_set = set(), set(), set(), set(), set(), set(), set(), set(), set()\n src_dict, dst_dict, proto_dict, events_dict, dns_dict, port_dict = {}, {}, {}, {}, {}, {}\n bytes, gap_list = [], []\n\n\n def readpcap_window(self, filename):\n\n print(\"Window mode\")\n print(\"Reading\", os.path.basename(filename))\n mal = 0\n ben = 0\n tot = 0\n counter = 0\n ipcounter = 0\n tcpcounter = 0\n udpcounter = 0\n\n data = []\n connections = {}\n packetspersecond = []\n bytesperhost = {}\n count = 0\n previousTimestamp = {}\n bytespersec = 0\n gaps = []\n incoming = []\n outgoing = []\n period = 0\n bla = 0\n f = open(filename, 'rb')\n pcap = dpkt.pcap.Reader(f)\n for ts, pkt in pcap:\n counter += 1\n eth = None\n bla += 1\n try:\n eth = dpkt.ethernet.Ethernet(pkt)\n except:\n continue\n\n if eth.type != dpkt.ethernet.ETH_TYPE_IP:\n continue\n\n ip = eth.data\n\n src_ip = self.inet_to_str(ip.src)\n dst_ip = self.inet_to_str(ip.dst)\n\n key = (src_ip, dst_ip)\n\n timestamp = datetime.datetime.utcfromtimestamp(ts)\n\n if key in previousTimestamp:\n gap = (timestamp - previousTimestamp[key]).microseconds / 1000\n else:\n gap = 0\n\n previousTimestamp[key] = timestamp\n\n tupple = (gap, ip.len, ip.p)\n\n gaps.append(tupple)\n\n sport = 0\n dport = 0\n\n try:\n if ip.p == dpkt.ip.IP_PROTO_TCP or ip.p == dpkt.ip.IP_PROTO_UDP:\n sport = ip.data.sport\n dport = ip.data.dport\n except:\n continue\n\n if key not in connections.keys():\n connections[key] = []\n connections[key].append((gap, ip.len, ip.p, sport, dport))\n\n print(os.path.basename(filename), \" num connections: \", len(connections))\n\n values = []\n todel = []\n print('Before cleanup: Total packets: ', len(gaps), ' in ', len(connections), ' connections.')\n\n final_connections = {}\n\n for (src_ip, dst_ip), packets in connections.items(): # clean it up\n src_ip = src_ip\n dst_ip = dst_ip\n\n window = 0\n loop_packet_list = []\n\n for index, packet in enumerate(packets):\n loop_packet_list.append(packet)\n if len(loop_packet_list) == self.window_size:\n final_connections[(src_ip, dst_ip, str(window))] = loop_packet_list\n loop_packet_list = []\n window = window + 1\n\n\n print(\"Remaining connections after clean up \", len(connections))\n\n return (gaps, final_connections)\n\n def readfolde_window(self):\n fno = 0\n meta = {}\n mapping = {}\n files = glob.glob(self.path_to_folder + \"/*.pcap\")\n print('About to read pcap...')\n for f in files:\n key = os.path.basename(f) # [:-5].split('-')\n\n data, connections = self.readpcap_window(f)\n if len(connections.items()) < 1:\n continue\n\n for i, v in connections.items():\n name = key + \"->\" + i[0] + \"->\" + i[1] + \"->\" + i[2]\n mapping[name] = fno\n fno += 1\n meta[name] = v\n\n print(\"Average conn length: \", np.mean([len(x) for i, x in connections.items()]))\n print(\"Minimum conn length: \", np.min([len(x) for i, x in connections.items()]))\n print(\"Maximum conn length: \", np.max([len(x) for i, x in connections.items()]))\n print('----------------')\n\n print('++++++++++++++++')\n print('----------------')\n print('Done reading pcaps...')\n print('Collective surviving connections ', len(meta))\n\n self.connlevel_sequence(meta, mapping)\n\n\n def readfile(self, path_to_pcap_file):\n startf = time.time()\n mapping = {}\n print('About to read pcap...')\n data, connections = self.readpcap(path_to_pcap_file)\n print('Done reading pcaps...')\n if len(connections.items()) < 1:\n return\n\n endf = time.time()\n print('file reading ', (endf - startf))\n fno = 0\n meta = {}\n nconnections = {}\n print(\"Average conn length: \", np.mean([len(x) for i, x in connections.items()]))\n print(\"Minimum conn length: \", np.min([len(x) for i, x in connections.items()]))\n print(\"Maximum conn length: \", np.max([len(x) for i, x in connections.items()]))\n\n for i, v in connections.items():\n name = i[0] + \"->\" + i[1]\n mapping[name] = fno\n fno += 1\n meta[name] = v\n print('Surviving connections ', len(meta))\n startc = time.time()\n self.connlevel_sequence(meta, mapping)\n endc = time.time()\n print('Total time ', (endc - startc))\n"
] | [
[
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.annotate",
"scipy.spatial.distance.cosine",
"matplotlib.pyplot.savefig",
"sklearn.manifold.TSNE",
"numpy.array",
"matplotlib.pyplot.figure"
],
[
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.annotate",
"scipy.spatial.distance.cosine",
"matplotlib.pyplot.savefig",
"pandas.DataFrame",
"sklearn.manifold.TSNE",
"matplotlib.rcParams.update",
"matplotlib.pyplot.subplots_adjust",
"numpy.array",
"matplotlib.pyplot.suptitle",
"matplotlib.pyplot.figure"
],
[
"pandas.read_csv",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.annotate",
"matplotlib.pyplot.Line2D",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.savefig",
"pandas.DataFrame",
"matplotlib.font_manager.FontProperties",
"sklearn.manifold.TSNE",
"matplotlib.pyplot.clf",
"matplotlib.rcParams.update",
"matplotlib.pyplot.close",
"matplotlib.pyplot.subplots_adjust",
"numpy.array",
"matplotlib.pyplot.suptitle",
"numpy.where",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
vinclab/loan-customer-scoring | [
"896bccd6ac25eb4c5bcb45e5429272a1e9257430"
] | [
"model/Modules/cleaning59.py"
] | [
"#BANQUE DE FONCTIONS DE NETTOYAGE DE DATAFRAME - Vincent Salas\n\n# import des librairies dont nous aurons besoin\nimport pandas as pd\nimport numpy as np\n\n#-------------------------------------------------------------------------------------------------------------------------\n#NaN DATAFRAME\n#-------------------------------------------------------------------------------------------------------------------------\n# Définition d\"une fonction identifiant les lignes sans données d'un dataframe\ndef na_rows_list(dataframe,value):\n \"\"\"Fonction faisant la somme des éléments de ligne et retournant une liste d'indices de lignes sans données\"\"\"\n \"\"\"La valeur de vérification conditionnelle dépend d'un projet, A CHANGER!!!!!!!!\"\"\"\n na_row_list = []\n for i in dataframe.index:\n if dataframe.loc[i].value_counts().sum() == value: # Attention valeur de condition à changer\n na_row_list.append(i)\n \n return na_row_list\n\n# Définition d\"une fonction supprimant les lignes sans données d'un dataframe \ndef na_raw_drop_df(dataframe,liste):\n \"\"\"Supprime les lignes NaN\"\"\"\n \"\"\"Utilisation de la fonction na_raws au préalable pour trouver une liste de lignes à supprimer\"\"\"\n \"\"\"Renvoie un dataframe\"\"\"\n for i in liste: # Utilise na_raws()\n dataframe.drop(i,inplace=True)\n \n return dataframe\n\n#-------------------------------------------------------------------------------------------------------------------------\n#LIGNES DATAFRAME\n#-------------------------------------------------------------------------------------------------------------------------\n# Taux de remplissage moyen par ligne\n#def row_data_rate_mean(dataframe):\n# \"\"\"Calcul du taux moyen de remplissage par ligne\"\"\"\n# rows_rate = []\n# for i in dataframe.index:\n# rate = dataframe.loc[i].value_counts().sum()/dataframe.shape[1]\n# rows_rate.append(rate)\n# \n# return sum(rows_rate)/len(rows_rate)\n\n# Définition d\"une fonction identifiant les lignes d'un dataframe avec un taux de remplissage minimal \ndef min_row_data_rate_list(dataframe,value):\n \"\"\"Fonction faisant la somme des éléments de ligne et retournant une liste d'indices de lignes avec un taux de \n remplissage minimal\"\"\"\n \"\"\"La valeur de vérification conditionnelle dépend d'un projet, A CHANGER!!!!!!!!\"\"\"\n rows_list = []\n for i in dataframe.index:\n if dataframe.loc[i].value_counts().sum()/dataframe.shape[1] > value:\n rows_list.append(i)\n \n return rows_list\n\n# Définition d\"une fonction supprimant les lignes d'un dataframe avec un taux de remplissage insuffisant\ndef min_row_data_rate_df(dataframe,value):\n \"\"\"Fonction faisant la somme des éléments de ligne et retournant un dataframe avec un taux de \n remplissage minimal\"\"\"\n \"\"\"La valeur de vérification conditionnelle dépend d'un projet, A CHANGER!!!!!!!!\"\"\"\n rows_list = []\n for i in dataframe.index:\n if dataframe.loc[i].value_counts().sum()/dataframe.shape[1] < value:\n dataframe.drop(i,inplace=True)\n \n return dataframe\n\n# Définition d\"une fonction identifiant les lignes avec peu de données d'un dataframe\n#def few_data_rows_list(dataframe,value):\n# \"\"\"Fonction faisant la somme des éléments de ligne et retournant une liste d'indices de lignes avec peu de\n# données, remplissant une condition de remplissage < x données\"\"\"\n# \"\"\"La valeur de vérification conditionnelle dépend d'un projet, A CHANGER!!!!!!!!\"\"\"\n# rows_list = []\n# for i in dataframe.index:\n# if dataframe.loc[i].value_counts().sum() < value:\n# rows_list.append(i)\n# return rows_list\n\n# Définition d\"une fonction identifiant les lignes avec un minimum de données d'un dataframe\n#def enough_data_rows_list(dataframe,value):\n# \"\"\"Fonction faisant la somme des éléments de ligne et retournant une liste d'indices de lignes avec assez de\n# données, remplissant une condition de remplissage > x données\"\"\"\n# \"\"\"La valeur de vérification conditionnelle dépend d'un projet, A CHANGER!!!!!!!!\"\"\"\n# rows_list = []\n# for i in dataframe.index:\n# if dataframe.loc[i].value_counts().sum() > value:\n# rows_list.append(i)\n# return rows_list\n\n\n\n#------------------------------------------------------------------------------------------------------------------------\n#COLONNES DATAFRAME\n#-------------------------------------------------------------------------------------------------------------------------\n# Vérification du taux de remplissage par colonne\ndef column_data_rate(dataframe):\n \"\"\"Fonction vérifiant le taux de remplissage par colonne\"\"\"\n serie_na = dataframe.notna().sum()/len(dataframe)\n \n return serie_na\n\n# Taux de remplissage moyen par colonne:\ndef column_data_rate_mean(dataframe):\n \"\"\"Calcul du taux moyen de remplissage par colonne\"\"\"\n serie_na = dataframe.notna().sum()/len(dataframe)\n \n return serie_na[:].mean()\n\n# Liste des colonnes d'un dataframe à supprimer si non dans une liste comparative\ndef columns_not_in_list(dataframe, liste_garder):\n \"\"\"Compare le nom des colonnes d'un dataframe avec une liste. Renvoie une liste de colonnes à supprimer si non dans la \n liste voulue\"\"\"\n colonnes_supprimer = []\n for colonne in dataframe.columns:\n if colonne not in liste_garder:\n colonnes_supprimer.append(colonne)\n \n return colonnes_supprimer\n\n# Suppression de colonnes d'un dataframe à partir d'une liste\ndef columns_delete_df(dataframe, list_delete):\n \"\"\"Supprime les colonnes d'un dataframe non incluses dans une liste et retourne le dataframe\"\"\"\n dataframe.drop(list_delete,axis=1,inplace= True)\n \n return dataframe\n\n# Définition d\"une fonction supprimant les colonnes d'un dataframe avec un taux de remplissage insuffisant\ndef min_column_data_rate_df(dataframe,value):\n \"\"\"Fonction retournant un dataframe avec un taux de remplissage minimal par colonne\"\"\"\n \"\"\"La valeur de vérification conditionnelle dépend d'un projet, A CHANGER!!!!!!!!\"\"\"\n column_list = []\n for c in dataframe.columns:\n if dataframe[c].value_counts().sum()/dataframe.shape[0] < value:\n del dataframe[c]\n \n return dataframe\n\n#-------------------------------------------------------------------------------------------------------------------------\n# Valeurs aberrantes \n#-------------------------------------------------------------------------------------------------------------------------\n\n# Calcul du nombre de valeurs aberrantes d'un dataframe\n#def low_outlier_count(dataframe, columns, value):\n# \"\"\"Calcul du nombre de valeurs aberrantes d'un dataframe en-dessous d'une valeur, #impression du \n# résultat\"\"\"\n# import numpy as np\n# dic_count_before = {}\n# dic_count_after = {}\n# dic_count_variables_aberrantes = {}\n#\n# print('Nombre de valeurs aberrantes :\\n')\n# for variable in columns:\n# dic_count_before[variable] = dataframe[variable].value_counts().sum()\n# dataframe[variable] = [t if t>value else np.NaN for t in dataframe[variable]]\n# dic_count_after[variable] = dataframe[variable].value_counts().sum()\n# dic_count_variables_aberrantes[variable] = dic_count_before[variable] - #dic_count_after[variable]\n# \n# return dic_count_variables_aberrantes\n\n# Calcul du nombre de valeurs aberrantes d'un dataframe\n#def high_outlier_count(dataframe, columns, value):\n# \"\"\"Calcul du nombre de valeurs aberrantes d'un dataframe au-dessus d'une valeur, #impression du \n# résultat\"\"\"\n# import numpy as np\n# dic_count_before = {}\n# dic_count_after = {}\n# dic_count_variables_aberrantes = {}\n#\n# print('Nombre de valeurs aberrantes :\\n')\n# for variable in columns:\n# dic_count_before[variable] = dataframe[variable].value_counts().sum()\n# dataframe[variable] = [t if t<value else np.NaN for t in dataframe[variable]]\n# dic_count_after[variable] = dataframe[variable].value_counts().sum()\n# dic_count_variables_aberrantes[variable] = dic_count_before[variable] - #dic_count_after[variable]\n# \n# return dic_count_variables_aberrantes\n\n\n#def below_zero_filter(x):\n# if x < 0:\n# x = np.NaN\n# else:\n# None\n\n#Fonction pour le filtre de séries ou colonnes d'un dataframe\n#def below_value_filter(x,value):\n# \"\"\"Filtre les valeurs en dessous d'une valeur définie. A associer avec .apply(lambda x: ) pour modifier les colonnes \n# d'un dataframe\"\"\"\n# import numpy as np\n# if x != np.NaN and x < value:\n# return np.NaN\n# else:\n# return x\n \n#Fonction pour le filtre de séries ou colonnes d'un dataframe\n#def above_value_filter(x,value):\n# \"\"\"Filtre les valeurs au dessus d'une valeur définie. A associer avec .apply(lambda x: ) pour modifier les colonnes \n# d'un dataframe\"\"\"\n# import numpy as np\n# if x != np.NaN and x > value:\n# return np.NaN\n# else:\n# return x\n\n# Calcul du nombre de valeurs aberrantes d'un dataframe, filtre de celle-ci et impression du résultat\n#Input: une SEULE valeur au choix pour chaque colonne\ndef high_outlier_filter_df(dataframe, columns, value):\n \"\"\"Calcul du nombre de valeurs aberrantes d'un dataframe au-dessus d'une valeur, filtre de celles-ci et impression \n du résultat\"\"\"\n import numpy as np\n dataframe_filter = dataframe.copy()\n dic_count_before = {}\n dic_count_after = {}\n dic_count_variables_aberrantes = {}\n dic_percent_variables_aberrantes = {}\n\n print('Nombre de valeurs aberrantes :\\n')\n for variable in columns:\n dic_count_before[variable] = dataframe_filter[variable].value_counts().sum()\n dataframe_filter[variable] = dataframe_filter[variable].map(lambda x: x if x<value else np.NaN)\n dic_count_after[variable] = dataframe_filter[variable].value_counts().sum()\n dic_count_variables_aberrantes[variable] = dic_count_before[variable] - dic_count_after[variable]\n dic_percent_variables_aberrantes[variable] = (dic_count_before[variable] - \n dic_count_after[variable])/len(dataframe)\n \n print(dic_count_variables_aberrantes) \n print('\\n')\n print('ratio de valeurs aberrantes :\\n')\n print(dic_percent_variables_aberrantes) \n return dataframe_filter\n\n# Calcul du nombre de valeurs aberrantes d'un dataframe, filtre de celle-ci et impression du résultat\n#Input: une SEULE valeur au choix pour chaque colonne\ndef low_outlier_filter_df(dataframe, columns,value):\n \"\"\"Calcul du nombre de valeurs aberrantes d'un dataframe au-dessus d'une valeur, filtre de celles-ci et impression du \n résultat\"\"\"\n import numpy as np\n dic_count_before = {}\n dic_count_after = {}\n dic_count_variables_aberrantes = {}\n dic_percent_variables_aberrantes = {}\n\n print('Nombre de valeurs aberrantes :\\n')\n for variable in columns:\n dic_count_before[variable] = dataframe[variable].value_counts().sum()\n dataframe[variable] = dataframe[variable].map(lambda x: x if x>value else np.NaN)\n dic_count_after[variable] = dataframe[variable].value_counts().sum()\n dic_count_variables_aberrantes[variable] = dic_count_before[variable] - dic_count_after[variable]\n dic_percent_variables_aberrantes[variable] = (dic_count_before[variable] - \n dic_count_after[variable])/len(dataframe)\n \n print(dic_count_variables_aberrantes) \n print('\\n')\n print('ratio de valeurs aberrantes :\\n')\n print(dic_percent_variables_aberrantes) \n return dataframe\n\ndef sign_invert_filter_df(dataframe, columns):\n \"\"\"Inversion de signe de valeurs numériques si inférieur à zéro\"\"\"\n import numpy as np\n dic_count_before = {}\n dic_count_after = {}\n dic_count_variables_aberrantes = {}\n dic_percent_variables_aberrantes = {}\n\n print('Nombre de valeurs aberrantes :\\n')\n for variable in columns:\n dic_count_before[variable] = dataframe[variable].value_counts().sum()\n dataframe[variable] = dataframe[variable].map(lambda x: -x if x<0 else x)\n dic_count_after[variable] = dataframe[variable].value_counts().sum()\n dic_count_variables_aberrantes[variable] = dic_count_before[variable] - dic_count_after[variable]\n dic_percent_variables_aberrantes[variable] = (dic_count_before[variable] - \n dic_count_after[variable])/len(dataframe)\n \n print(dic_count_variables_aberrantes) \n print('\\n')\n print('ratio de valeurs aberrantes :\\n')\n print(dic_percent_variables_aberrantes) \n return dataframe\n\n\n# Calcul du nombre de valeurs aberrantes d'un dataframe, filtre de celle-ci et impression du résultat\n# Input: dictionnaire (une valeur par colonne)\ndef dic_high_outlier_filter_df(dataframe, columns, dictionary):\n \"\"\"Calcul du nombre de valeurs aberrantes d'un dataframe au-dessus d'une valeur lue dans un dictionnaire, filtre de \n celles-ci et impression du résultat\"\"\"\n import numpy as np\n dic_count_before = {}\n dic_count_after = {}\n dic_count_variables_aberrantes = {}\n dic_percent_variables_aberrantes = {}\n \n print('Nombre de valeurs aberrantes :\\n')\n for variable in columns:\n dic_count_before[variable] = dataframe[variable].value_counts().sum()\n dataframe[variable] = dataframe[variable].map(lambda x: x if x<dictionary[variable] else np.NaN)\n dic_count_after[variable] = dataframe[variable].value_counts().sum()\n dic_count_variables_aberrantes[variable] = dic_count_before[variable] - dic_count_after[variable]\n dic_percent_variables_aberrantes[variable] = (dic_count_before[variable] - \n dic_count_after[variable])/len(dataframe)\n \n print(dic_count_variables_aberrantes) \n print('\\n')\n print('ratio de valeurs aberrantes :\\n')\n print(dic_percent_variables_aberrantes) \n return dataframe\n\n#-------------------------------------------------------------------------------------------------------------------------\n# Recherche et filtre de chaînes de caractères dans dataframe\n#-------------------------------------------------------------------------------------------------------------------------\n\n# Renvoie un dataframe filtré\ndef word_column_filter_df(dataframe, column_to_filter, column_freeze, word_list):\n# La fonction .where() donne une position qu'il faut transformer en index\n# Il faut entrer le nom d'une colonne repère (exemple: code produit) pour retrouver l'index, ou construire un colonne de re-indexée.\n \"\"\"Filtre les colonnes d'un dataframe, en fonction d'une liste de mots, puis retourne le dataframe\"\"\"\n import re\n position_to_drop_lst = np.where(dataframe[column_to_filter].str.contains('|'.join(map(re.escape, word_list)), \n np.NaN))[0]\n indices_to_drop_lst = []\n for position in position_to_drop_lst:\n indice = (dataframe[dataframe[column_freeze] == dataframe.iloc[position].loc[column_freeze]]).index[0]\n indices_to_drop_lst.append(indice)\n\n print(\"Nombre de lignes supprimées:\")\n nbr= len(indices_to_drop_lst)\n print(nbr)\n print(\"\\n\")\n\n dataframe.drop(indices_to_drop_lst, axis=0,inplace=True)\n\n return dataframe\n\n# Renvoie une liste des indices\ndef word_column_filter_lst(dataframe, column_to_filter, column_freeze, word_list):\n# La fonction .where() donne une position qu'il faut transformer en index\n# Il faut entrer le nom d'une colonne repère (exemple: code produit) pour retrouver l'index, ou construire un colonne de re-indexée.\n \"\"\"Filtre les colonnes d'un dataframe, en fonction d'une liste de mots, puis retourne le dataframe\"\"\"\n import re\n position_to_drop_lst = np.where(dataframe[column_to_filter].str.contains('|'.join(map(re.escape, word_list)), \n np.NaN))[0]\n indices_to_drop_lst = []\n for position in position_to_drop_lst:\n indice = (dataframe[dataframe[column_freeze] == dataframe.iloc[position].loc[column_freeze]]).index[0]\n indices_to_drop_lst.append(indice)\n\n print(\"Nombre de lignes supprimées:\")\n nbr= len(indices_to_drop_lst)\n print(nbr)\n print(\"\\n\")\n\n return indices_to_drop_lst\n\n#-------------------------------------------------------------------------------------------------------------------------\n# Tirage aléatoire de produits/bâtiments etc \n#-------------------------------------------------------------------------------------------------------------------------\ndef random_item(df, item_column):\n \"\"\"Tirage aléatoire de lignes de dataframe et renvoie un nouveau dataframe\"\"\"\n \n new_df = pd.DataFrame([], columns=df.columns)\n building_lst = df[item_column].unique()\n indices_keep = []\n\n for building in building_lst:\n\n # Filtre du dataframe sur les lignes ayant ce nom de bâtiment\n building_df = df[df[item_column] == building]\n # Sélection unique\n #building_count.append(building)\n\n # Tirage aléatoite d'un indice de ligne sur ce dataframe filtré\n year_building_ind = list(np.random.choice(list(building_df.index), 1))[0]\n indices_keep.append(year_building_ind)\n\n # Création d'un dataframe de ligne à ajouter\n #raw_to_add = pd.DataFrame(df.iloc[year_building_ind]).T\n\n # Concaténation avec le nouveau dataframe\n #new_df = pd.concat([new_df, raw_to_add],axis=0)\n \n # Suppression des indices de lignes \n new_df = df.iloc[indices_keep, range(len(df.columns))]\n\n return new_df\n\n#-------------------------------------------------------------------------------------------------------------------------\n# Comparaison de listes\n#-------------------------------------------------------------------------------------------------------------------------\ndef common_elements(list1, list2):\n \"\"\"Check of common values of two lists\"\"\"\n \n print(f\"Liste 1: {len(list1)}\")\n print(f\"Liste 2: {len(list2)}\")\n \n print(\"\\n\")\n print(f\"Common elements: \")\n \n return list(set(list1) & set(list2))\n\ndef separate_elements(list1, list2):\n \"\"\"Check of different values of two lists\"\"\"\n \n print(f\"Liste 1: {len(list1)}\")\n print(f\"Liste 2: {len(list2)}\")\n \n print(\"\\n\")\n print(f\"Separate elements: \")\n \n return list(set(list1) ^ set(list2))"
] | [
[
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
flymin/robustbench | [
"c51d44e5c9f9425d0a2146dbfd5c54d86ea11609"
] | [
"robustbench/model_zoo/cifar100.py"
] | [
"from collections import OrderedDict\n\nimport torch\n\nfrom robustbench.model_zoo.architectures.dm_wide_resnet import CIFAR100_MEAN, CIFAR100_STD, \\\n DMWideResNet, Swish\nfrom robustbench.model_zoo.architectures.resnet import PreActBlock, PreActResNet\nfrom robustbench.model_zoo.architectures.resnext import CifarResNeXt, ResNeXtBottleneck\nfrom robustbench.model_zoo.architectures.wide_resnet import WideResNet\nfrom robustbench.model_zoo.enums import ThreatModel\n\n\nclass Chen2020EfficientNet(WideResNet):\n def __init__(self, depth=34, widen_factor=10):\n super().__init__(depth=depth,\n widen_factor=widen_factor,\n sub_block1=True,\n num_classes=100)\n self.register_buffer(\n 'mu',\n torch.tensor([0.5071, 0.4867, 0.4408]).view(1, 3, 1, 1))\n self.register_buffer(\n 'sigma',\n torch.tensor([0.2675, 0.2565, 0.2761]).view(1, 3, 1, 1))\n\n def forward(self, x):\n x = (x - self.mu) / self.sigma\n return super().forward(x)\n\n\nclass Wu2020AdversarialNet(WideResNet):\n def __init__(self, depth=34, widen_factor=10):\n super().__init__(depth=depth,\n widen_factor=widen_factor,\n sub_block1=False,\n num_classes=100)\n self.register_buffer(\n 'mu',\n torch.tensor(\n [0.5070751592371323, 0.48654887331495095,\n 0.4409178433670343]).view(1, 3, 1, 1))\n self.register_buffer(\n 'sigma',\n torch.tensor(\n [0.2673342858792401, 0.2564384629170883,\n 0.27615047132568404]).view(1, 3, 1, 1))\n\n def forward(self, x):\n x = (x - self.mu) / self.sigma\n return super().forward(x)\n\n\nclass Rice2020OverfittingNet(PreActResNet):\n def __init__(self):\n super(Rice2020OverfittingNet, self).__init__(PreActBlock, [2, 2, 2, 2], num_classes=100, bn_before_fc=True, out_shortcut=True)\n self.register_buffer(\n 'mu',\n torch.tensor(\n [0.5070751592371323, 0.48654887331495095, 0.4409178433670343]).view(1, 3, 1, 1))\n self.register_buffer(\n 'sigma',\n torch.tensor(\n [0.2673342858792401, 0.2564384629170883,\n 0.27615047132568404]).view(1, 3, 1, 1))\n\n def forward(self, x):\n x = (x - self.mu) / self.sigma\n return super(Rice2020OverfittingNet, self).forward(x)\n\n\nclass Hendrycks2019UsingNet(WideResNet):\n def __init__(self, depth=28, widen_factor=10):\n super(Hendrycks2019UsingNet, self).__init__(depth=depth,\n widen_factor=widen_factor,\n num_classes=100,\n sub_block1=False)\n\n def forward(self, x):\n x = 2. * x - 1.\n return super(Hendrycks2019UsingNet, self).forward(x)\n\n\nclass Hendrycks2020AugMixResNeXtNet(CifarResNeXt):\n def __init__(self, depth=29, cardinality=4, base_width=32):\n super().__init__(ResNeXtBottleneck,\n depth=depth,\n num_classes=100,\n cardinality=cardinality,\n base_width=base_width)\n self.register_buffer('mu', torch.tensor([0.5] * 3).view(1, 3, 1, 1))\n self.register_buffer('sigma', torch.tensor([0.5] * 3).view(1, 3, 1, 1))\n\n def forward(self, x):\n x = (x - self.mu) / self.sigma\n return super().forward(x)\n\n\nclass Hendrycks2020AugMixWRNNet(WideResNet):\n def __init__(self, depth=40, widen_factor=2):\n super().__init__(depth=depth,\n widen_factor=widen_factor,\n sub_block1=False,\n num_classes=100)\n self.register_buffer('mu', torch.tensor([0.5] * 3).view(1, 3, 1, 1))\n self.register_buffer('sigma', torch.tensor([0.5] * 3).view(1, 3, 1, 1))\n\n def forward(self, x):\n x = (x - self.mu) / self.sigma\n return super().forward(x)\n\n\nlinf = OrderedDict([\n ('Gowal2020Uncovering', {\n 'model':\n lambda: DMWideResNet(num_classes=100,\n depth=70,\n width=16,\n activation_fn=Swish,\n mean=CIFAR100_MEAN,\n std=CIFAR100_STD),\n 'gdrive_id':\n \"16I86x2Vv_HCRKROC86G4dQKgO3Po5mT3\"\n }),\n ('Gowal2020Uncovering_extra', {\n 'model':\n lambda: DMWideResNet(num_classes=100,\n depth=70,\n width=16,\n activation_fn=Swish,\n mean=CIFAR100_MEAN,\n std=CIFAR100_STD),\n 'gdrive_id':\n \"1LQBdwO2b391mg7VKcP6I0HIOpC6O83gn\"\n }),\n ('Cui2020Learnable_34_20_LBGAT6', {\n 'model':\n lambda: WideResNet(\n depth=34, widen_factor=20, num_classes=100, sub_block1=True),\n 'gdrive_id':\n '1rN76st8q_32j6Uo8DI5XhcC2cwVhXBwK'\n }),\n ('Cui2020Learnable_34_10_LBGAT0', {\n 'model':\n lambda: WideResNet(\n depth=34, widen_factor=10, num_classes=100, sub_block1=True),\n 'gdrive_id':\n '1RnWbGxN-A-ltsfOvulr68U6i2L8ohAJi'\n }),\n ('Cui2020Learnable_34_10_LBGAT6', {\n 'model':\n lambda: WideResNet(\n depth=34, widen_factor=10, num_classes=100, sub_block1=True),\n 'gdrive_id':\n '1TfIgvW3BAkL8jL9J7AAWFSLW3SSzJ2AE'\n }),\n ('Chen2020Efficient', {\n 'model': Chen2020EfficientNet,\n 'gdrive_id': '1JEh95fvsfKireoELoVCBxOi12IPGFDUT'\n }),\n ('Wu2020Adversarial', {\n 'model': Wu2020AdversarialNet,\n 'gdrive_id': '1yWGvHmrgjtd9vOpV5zVDqZmeGhCgVYq7'\n }),\n ('Sitawarin2020Improving', {\n 'model':\n lambda: WideResNet(\n depth=34, widen_factor=10, num_classes=100, sub_block1=True),\n 'gdrive_id':\n '1hbpwans776KM1SMbOxISkDx0KR0DW8EN'\n }),\n ('Hendrycks2019Using', {\n 'model': Hendrycks2019UsingNet, \n 'gdrive_id': '1If3tppQsCe5dN8Vbo9ff0tjlKQTTrShd'\n }),\n ('Rice2020Overfitting', {\n 'model': Rice2020OverfittingNet,\n 'gdrive_id': '1XXNZn3fZBOkD1aqNL1cvcD8zZDccyAZ6'\n }),\n ('Rebuffi2021Fixing_70_16_cutmix_ddpm', {\n 'model':\n lambda: DMWideResNet(num_classes=100,\n depth=70,\n width=16,\n activation_fn=Swish,\n mean=CIFAR100_MEAN,\n std=CIFAR100_STD),\n 'gdrive_id': '1-GkVLo9QaRjCJl-by67xda1ySVhYxsLV'\n }),\n ('Rebuffi2021Fixing_28_10_cutmix_ddpm', {\n 'model':\n lambda: DMWideResNet(num_classes=100,\n depth=28,\n width=10,\n activation_fn=Swish,\n mean=CIFAR100_MEAN,\n std=CIFAR100_STD),\n 'gdrive_id': '1-P7cs82Tj6UVx7Coin3tVurVKYwXWA9p'\n }),\n])\n\ncommon_corruptions = OrderedDict([('Hendrycks2020AugMix_WRN', {\n 'model':\n Hendrycks2020AugMixWRNNet,\n 'gdrive_id':\n '1XpFFdCdU9LcDtcyNfo6_BV1RZHKKkBVE'\n}),\n ('Hendrycks2020AugMix_ResNeXt', {\n 'model':\n Hendrycks2020AugMixResNeXtNet,\n 'gdrive_id':\n '1ocnHbvDdOBLvgNr6K7vEYL08hUdkD1Rv'\n })])\n\ncifar_100_models = OrderedDict([(ThreatModel.Linf, linf),\n (ThreatModel.corruptions, common_corruptions)])\n"
] | [
[
"torch.tensor"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
fadykuzman/Rodeo-App | [
"408f4ada22c8e8f52ad37bd5e47d44d9302ebefe"
] | [
"src/readpot/read_pot.py"
] | [
"from time import time\nimport os\nimport pandas as pd\nfrom datetime import datetime\nimport csv\nfrom potentiostat import Potentiostat\n\n\ndef read_pots():\n devlist = os.listdir('/dev')\n\n coms = [c for c in devlist if c.startswith('ttyACM')]\n\n pots = {}\n for c in coms:\n p = Potentiostat('/dev/{}'.format(c))\n _id = p.get_device_id()\n if p not in pots:\n pots['pot{}'.format(_id)] = p\n return pots\n\n#pots = read_pots()\n\ndef chronoamperometry(p, print_values=True, **params):\n\n test_name = params.get('test_name', 'chronoamp')\n curr_range = params.get('curr_range','100uA')\n sample_rate = params.get('sample_rate', 1)\n\n quietValue = params.get('quietValue', 0.0)\n quietTime = params.get('quietTime', 0) \n run_duration = params.get('run_duration', 3000)\n step1_volt = params.get('step1_volt', 0.05)\n step1_duration = params.get('step1_duration', 3000)\n step2_volt = params.get('step2_volt', 0.0)\n step2_duration = params.get('step2_duration', 0)\n\n step = [\n (step1_duration, step1_volt),\n (step2_duration, step2_volt)]\n \n param = {\n 'quietValue': quietValue,\n 'quietTime': quietTime,\n 'step': step\n }\n\n### Setting Parameters ###\n p.set_param(test_name, param)\n p.set_curr_range(curr_range)\n p.set_sample_rate(sample_rate)\n\n### Getting Parameters ###\n out_volt_range = p.get_volt_range()\n### Total Duration Time ###\n step_duration = step1_duration + step2_duration\n time_all = []\n volt_all = []\n current_all = []\n start_time = datetime.now()\n \n while run_duration != 0:\n\n time, volt, current = p.run_test(test_name, display='pbar')\n time_all += time\n volt_all += volt\n current_all += current\n \n run_duration -= step_duration\n\n end_time = datetime.now()\n d = {\n 'start_time': start_time,\n 'end_time': end_time,\n 'time': time_all,\n 'voltage': volt_all,\n 'current': current_all,\n 'quietValue': quietValue,\n 'quietTime': quietTime,\n 'run_duration': run_duration,\n 'step1_duration': step1_duration,\n 'step1_volt': step1_volt,\n 'step2_duration': step2_duration,\n 'step2_volt': step2_volt,\n 'sample_rate': sample_rate,\n 'curr_range': curr_range,\n 'out_volt_range': out_volt_range,\n 'test_name': test_name,\n 'potentio_id': p.get_device_id(),\n 'electrode': params.get('electrode', None)\n }\n \n df = pd.DataFrame(d)\n filename = '{}'.format(\n params.get('filename','./data_chronoamp.csv'))\n newfile = not os.path.exists(filename)\n \n if newfile:\n df.to_csv(filename)\n else:\n df.to_csv(filename, mode='a', header=False)\n\n if print_values:\n print('Time {0}, Voltage {1}, Current {2}'\n .format(time_all, volt_all, current_all))\n print('Out_Volt_range: {}'.format(out_volt_range))\n print('Out_Curr_Range: {}'.format(curr_range))\n \n\ndef cyclic_voltammetry(p, **params):\n # getting Parameters\n quietValue = params.get('quietValue', 0)\n quietTime = params.get('quietTime', 0)\n minVolt = params.get('minVolt', -0.2)\n maxVolt = params.get('maxVolt', 1)\n scanRate = params.get('scanRate', 0.1)\n numCycles = params.get('numCycles', 10)\n shift = params.get('shift', 0.0)\n curr_range = params.get('curr_range', '100uA')\n test_name = params.get('test_name', 'cyclic')\n \n amplitude = 0.5 * ((maxVolt) - (minVolt))\n offset = 0.5 * ((maxVolt) + (minVolt))\n period = int(\n 4* params.get('periodfactor', 1000) * amplitude / scanRate)\n\n param = {\n 'quietValue': quietValue,\n 'quietTime': quietTime,\n 'amplitude': amplitude,\n 'offset': offset,\n 'period': period,\n 'numCycles': numCycles,\n 'shift': shift\n }\n\n # setting parameters\n p.set_param(test_name, param)\n p.set_curr_range(curr_range)\n p.set_sample_rate(10)\n # running\n t, v, c = p.run_test(test_name)\n print('Time {0}, Voltage {1}, Current {2}'\n .format(t, v, c)) \n d = {\n 'time': t,\n 'voltage': v,\n 'current': c,\n 'quietValue': quietValue,\n 'quietTime': quietTime,\n 'amplitude': amplitude,\n 'offset': offset,\n 'period': period,\n 'numCycles': numCycles,\n 'shift': shift,\n 'test_name': test_name,\n 'potentio_id': p.get_device_id(),\n 'electrode': params.get('electrode', None)\n }\n df = pd.DataFrame(d)\n try:\n df.to_csv('{}'.format(params.get('filename', './data_cv.csv')),\n mode='a', header=False)\n except:\n df.to_csv('./data_cv.csv')\n\n\n#def export_data()\n"
] | [
[
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
gdementen/numba | [
"78486e86ff9fbd343cac3dadbc63ec3bc66c75aa"
] | [
"numba/tests/test_sort.py"
] | [
"from __future__ import print_function\n\nimport copy\nimport itertools\nimport math\nimport random\n\nimport numpy as np\n\nfrom numba.compiler import compile_isolated, Flags\nfrom numba import jit, types, utils\nimport numba.unittest_support as unittest\nfrom numba import testing\nfrom .support import TestCase, MemoryLeakMixin\n\nfrom numba.targets.quicksort import make_py_quicksort, make_jit_quicksort\nfrom .timsort import make_py_timsort, make_jit_timsort, MergeRun\n\n\ndef make_temp_list(keys, n):\n return [keys[0]] * n\n\ndef make_temp_array(keys, n):\n return np.empty(n, keys.dtype)\n\n\npy_list_timsort = make_py_timsort(make_temp_list)\n\npy_array_timsort = make_py_timsort(make_temp_array)\n\njit_list_timsort = make_jit_timsort(make_temp_list)\n\njit_array_timsort = make_jit_timsort(make_temp_array)\n\npy_quicksort = make_py_quicksort()\n\njit_quicksort = make_jit_quicksort()\n\n\ndef sort_usecase(val):\n val.sort()\n\ndef sorted_usecase(val):\n return sorted(val)\n\ndef sorted_reverse_usecase(val, b):\n return sorted(val, reverse=b)\n\ndef np_sort_usecase(val):\n return np.sort(val)\n\ndef list_sort_usecase(n):\n np.random.seed(42)\n l = []\n for i in range(n):\n l.append(np.random.random())\n ll = l[:]\n ll.sort()\n return l, ll\n\ndef list_sort_reverse_usecase(n, b):\n np.random.seed(42)\n l = []\n for i in range(n):\n l.append(np.random.random())\n ll = l[:]\n ll.sort(reverse=b)\n return l, ll\n\n\nclass BaseSortingTest(object):\n\n def random_list(self, n, offset=10):\n random.seed(42)\n l = list(range(offset, offset + n))\n random.shuffle(l)\n return l\n\n def sorted_list(self, n, offset=10):\n return list(range(offset, offset + n))\n\n def revsorted_list(self, n, offset=10):\n return list(range(offset, offset + n))[::-1]\n\n def initially_sorted_list(self, n, m=None, offset=10):\n if m is None:\n m = n // 2\n l = self.sorted_list(m, offset)\n l += self.random_list(n - m, offset=l[-1] + offset)\n return l\n\n def duprandom_list(self, n, factor=None, offset=10):\n random.seed(42)\n if factor is None:\n factor = int(math.sqrt(n))\n l = (list(range(offset, offset + (n // factor) + 1)) * (factor + 1))[:n]\n assert len(l) == n\n random.shuffle(l)\n return l\n\n def dupsorted_list(self, n, factor=None, offset=10):\n if factor is None:\n factor = int(math.sqrt(n))\n l = (list(range(offset, offset + (n // factor) + 1)) * (factor + 1))[:n]\n assert len(l) == n, (len(l), n)\n l.sort()\n return l\n\n def assertSorted(self, orig, result):\n self.assertEqual(len(result), len(orig))\n # sorted() returns a list, so make sure we compare to another list\n self.assertEqual(list(result), sorted(orig))\n\n def assertSortedValues(self, orig, orig_values, result, result_values):\n self.assertEqual(len(result), len(orig))\n self.assertEqual(list(result), sorted(orig))\n zip_sorted = sorted(zip(orig, orig_values), key=lambda x: x[0])\n zip_result = list(zip(result, result_values))\n self.assertEqual(zip_sorted, zip_result)\n # Check stability\n for i in range(len(zip_result) - 1):\n (k1, v1), (k2, v2) = zip_result[i], zip_result[i + 1]\n if k1 == k2:\n # Assuming values are unique, which is enforced by the tests\n self.assertLess(orig_values.index(v1), orig_values.index(v2))\n\n def fibo(self):\n a = 1\n b = 1\n while True:\n yield a\n a, b = b, a + b\n\n def make_sample_sorted_lists(self, n):\n lists = []\n for offset in (20, 120):\n lists.append(self.sorted_list(n, offset))\n lists.append(self.dupsorted_list(n, offset))\n return lists\n\n def make_sample_lists(self, n):\n lists = []\n for offset in (20, 120):\n lists.append(self.sorted_list(n, offset))\n lists.append(self.dupsorted_list(n, offset))\n lists.append(self.revsorted_list(n, offset))\n lists.append(self.duprandom_list(n, offset))\n return lists\n\n\nclass BaseTimsortTest(BaseSortingTest):\n\n def merge_init(self, keys):\n f = self.timsort.merge_init\n return f(keys)\n\n def test_binarysort(self):\n n = 20\n def check(l, n, start=0):\n res = self.array_factory(l)\n f(res, res, 0, n, start)\n self.assertSorted(l, res)\n\n f = self.timsort.binarysort\n l = self.sorted_list(n)\n check(l, n)\n check(l, n, n//2)\n l = self.revsorted_list(n)\n check(l, n)\n l = self.initially_sorted_list(n, n//2)\n check(l, n)\n check(l, n, n//2)\n l = self.revsorted_list(n)\n check(l, n)\n l = self.random_list(n)\n check(l, n)\n l = self.duprandom_list(n)\n check(l, n)\n\n def test_binarysort_with_values(self):\n n = 20\n v = list(range(100, 100+n))\n\n def check(l, n, start=0):\n res = self.array_factory(l)\n res_v = self.array_factory(v)\n f(res, res_v, 0, n, start)\n self.assertSortedValues(l, v, res, res_v)\n\n f = self.timsort.binarysort\n l = self.sorted_list(n)\n check(l, n)\n check(l, n, n//2)\n l = self.revsorted_list(n)\n check(l, n)\n l = self.initially_sorted_list(n, n//2)\n check(l, n)\n check(l, n, n//2)\n l = self.revsorted_list(n)\n check(l, n)\n l = self.random_list(n)\n check(l, n)\n l = self.duprandom_list(n)\n check(l, n)\n\n def test_count_run(self):\n n = 16\n f = self.timsort.count_run\n\n def check(l, lo, hi):\n n, desc = f(self.array_factory(l), lo, hi)\n # Fully check invariants\n if desc:\n for k in range(lo, lo + n - 1):\n a, b = l[k], l[k + 1]\n self.assertGreater(a, b)\n if lo + n < hi:\n self.assertLessEqual(l[lo + n - 1], l[lo + n])\n else:\n for k in range(lo, lo + n - 1):\n a, b = l[k], l[k + 1]\n self.assertLessEqual(a, b)\n if lo + n < hi:\n self.assertGreater(l[lo + n - 1], l[lo + n], l)\n\n\n l = self.sorted_list(n, offset=100)\n check(l, 0, n)\n check(l, 1, n - 1)\n check(l, 1, 2)\n l = self.revsorted_list(n, offset=100)\n check(l, 0, n)\n check(l, 1, n - 1)\n check(l, 1, 2)\n l = self.random_list(n, offset=100)\n for i in range(len(l) - 1):\n check(l, i, n)\n l = self.duprandom_list(n, offset=100)\n for i in range(len(l) - 1):\n check(l, i, n)\n\n def test_gallop_left(self):\n n = 20\n f = self.timsort.gallop_left\n\n def check(l, key, start, stop, hint):\n k = f(key, l, start, stop, hint)\n # Fully check invariants\n self.assertGreaterEqual(k, start)\n self.assertLessEqual(k, stop)\n if k > start:\n self.assertLess(l[k - 1], key)\n if k < stop:\n self.assertGreaterEqual(l[k], key)\n\n def check_all_hints(l, key, start, stop):\n for hint in range(start, stop):\n check(l, key, start, stop, hint)\n\n def check_sorted_list(l):\n l = self.array_factory(l)\n for key in (l[5], l[15], l[0], -1000, l[-1], 1000):\n check_all_hints(l, key, 0, n)\n check_all_hints(l, key, 1, n - 1)\n check_all_hints(l, key, 8, n - 8)\n\n l = self.sorted_list(n, offset=100)\n check_sorted_list(l)\n l = self.dupsorted_list(n, offset=100)\n check_sorted_list(l)\n\n def test_gallop_right(self):\n n = 20\n f = self.timsort.gallop_right\n\n def check(l, key, start, stop, hint):\n k = f(key, l, start, stop, hint)\n # Fully check invariants\n self.assertGreaterEqual(k, start)\n self.assertLessEqual(k, stop)\n if k > start:\n self.assertLessEqual(l[k - 1], key)\n if k < stop:\n self.assertGreater(l[k], key)\n\n def check_all_hints(l, key, start, stop):\n for hint in range(start, stop):\n check(l, key, start, stop, hint)\n\n def check_sorted_list(l):\n l = self.array_factory(l)\n for key in (l[5], l[15], l[0], -1000, l[-1], 1000):\n check_all_hints(l, key, 0, n)\n check_all_hints(l, key, 1, n - 1)\n check_all_hints(l, key, 8, n - 8)\n\n l = self.sorted_list(n, offset=100)\n check_sorted_list(l)\n l = self.dupsorted_list(n, offset=100)\n check_sorted_list(l)\n\n def test_merge_compute_minrun(self):\n f = self.timsort.merge_compute_minrun\n\n for i in range(0, 64):\n self.assertEqual(f(i), i)\n for i in range(6, 63):\n self.assertEqual(f(2**i), 32)\n for i in self.fibo():\n if i < 64:\n continue\n if i >= 2 ** 63:\n break\n k = f(i)\n self.assertGreaterEqual(k, 32)\n self.assertLessEqual(k, 64)\n if i > 500:\n # i/k is close to, but strictly less than, an exact power of 2\n quot = i // k\n p = 2 ** utils.bit_length(quot)\n self.assertLess(quot, p)\n self.assertGreaterEqual(quot, 0.9 * p)\n\n def check_merge_lo_hi(self, func, a, b):\n na = len(a)\n nb = len(b)\n\n # Add sentinels at start and end, to check they weren't moved\n orig_keys = [42] + a + b + [-42]\n keys = self.array_factory(orig_keys)\n ms = self.merge_init(keys)\n ssa = 1\n ssb = ssa + na\n\n #new_ms = func(ms, keys, [], ssa, na, ssb, nb)\n new_ms = func(ms, keys, keys, ssa, na, ssb, nb)\n self.assertEqual(keys[0], orig_keys[0])\n self.assertEqual(keys[-1], orig_keys[-1])\n self.assertSorted(orig_keys[1:-1], keys[1:-1])\n # Check the MergeState result\n self.assertGreaterEqual(len(new_ms.keys), len(ms.keys))\n self.assertGreaterEqual(len(new_ms.values), len(ms.values))\n self.assertIs(new_ms.pending, ms.pending)\n self.assertGreaterEqual(new_ms.min_gallop, 1)\n\n def test_merge_lo_hi(self):\n f_lo = self.timsort.merge_lo\n f_hi = self.timsort.merge_hi\n\n # The larger sizes exercise galloping\n for (na, nb) in [(12, 16), (40, 40), (100, 110), (1000, 1100)]:\n for a, b in itertools.product(self.make_sample_sorted_lists(na),\n self.make_sample_sorted_lists(nb)):\n self.check_merge_lo_hi(f_lo, a, b)\n self.check_merge_lo_hi(f_hi, b, a)\n\n def check_merge_at(self, a, b):\n f = self.timsort.merge_at\n # Prepare the array to be sorted\n na = len(a)\n nb = len(b)\n # Add sentinels at start and end, to check they weren't moved\n orig_keys = [42] + a + b + [-42]\n ssa = 1\n ssb = ssa + na\n\n stack_sentinel = MergeRun(-42, -42)\n\n def run_merge_at(ms, keys, i):\n new_ms = f(ms, keys, keys, i)\n self.assertEqual(keys[0], orig_keys[0])\n self.assertEqual(keys[-1], orig_keys[-1])\n self.assertSorted(orig_keys[1:-1], keys[1:-1])\n # Check stack state\n self.assertIs(new_ms.pending, ms.pending)\n self.assertEqual(ms.pending[i], (ssa, na + nb))\n self.assertEqual(ms.pending[0], stack_sentinel)\n return new_ms\n\n # First check with i == len(stack) - 2\n keys = self.array_factory(orig_keys)\n ms = self.merge_init(keys)\n # Push sentinel on stack, to check it was't touched\n ms = self.timsort.merge_append(ms, stack_sentinel)\n i = ms.n\n ms = self.timsort.merge_append(ms, MergeRun(ssa, na))\n ms = self.timsort.merge_append(ms, MergeRun(ssb, nb))\n ms = run_merge_at(ms, keys, i)\n self.assertEqual(ms.n, i + 1)\n\n # Now check with i == len(stack) - 3\n keys = self.array_factory(orig_keys)\n ms = self.merge_init(keys)\n # Push sentinel on stack, to check it was't touched\n ms = self.timsort.merge_append(ms, stack_sentinel)\n i = ms.n\n ms = self.timsort.merge_append(ms, MergeRun(ssa, na))\n ms = self.timsort.merge_append(ms, MergeRun(ssb, nb))\n # A last run (trivial here)\n last_run = MergeRun(ssb + nb, 1)\n ms = self.timsort.merge_append(ms, last_run)\n ms = run_merge_at(ms, keys, i)\n self.assertEqual(ms.n, i + 2)\n self.assertEqual(ms.pending[ms.n - 1], last_run)\n\n def test_merge_at(self):\n # The larger sizes exercise galloping\n for (na, nb) in [(12, 16), (40, 40), (100, 110), (500, 510)]:\n for a, b in itertools.product(self.make_sample_sorted_lists(na),\n self.make_sample_sorted_lists(nb)):\n self.check_merge_at(a, b)\n self.check_merge_at(b, a)\n\n def test_merge_force_collapse(self):\n f = self.timsort.merge_force_collapse\n\n # Test with runs of ascending sizes, then descending sizes\n sizes_list = [(8, 10, 15, 20)]\n sizes_list.append(sizes_list[0][::-1])\n\n for sizes in sizes_list:\n for chunks in itertools.product(*(self.make_sample_sorted_lists(n)\n for n in sizes)):\n # Create runs of the given sizes\n orig_keys = sum(chunks, [])\n keys = self.array_factory(orig_keys)\n ms = self.merge_init(keys)\n pos = 0\n for c in chunks:\n ms = self.timsort.merge_append(ms, MergeRun(pos, len(c)))\n pos += len(c)\n # Sanity check\n self.assertEqual(sum(ms.pending[ms.n - 1]), len(keys))\n # Now merge the runs\n ms = f(ms, keys, keys)\n # Remaining run is the whole list\n self.assertEqual(ms.n, 1)\n self.assertEqual(ms.pending[0], MergeRun(0, len(keys)))\n # The list is now sorted\n self.assertSorted(orig_keys, keys)\n\n def test_run_timsort(self):\n f = self.timsort.run_timsort\n\n for size_factor in (1, 10):\n # Make lists to be sorted from three chunks of different kinds.\n sizes = (15, 30, 20)\n\n all_lists = [self.make_sample_lists(n * size_factor) for n in sizes]\n for chunks in itertools.product(*all_lists):\n orig_keys = sum(chunks, [])\n keys = self.array_factory(orig_keys)\n f(keys)\n # The list is now sorted\n self.assertSorted(orig_keys, keys)\n\n def test_run_timsort_with_values(self):\n # Run timsort, but also with a values array\n f = self.timsort.run_timsort_with_values\n\n for size_factor in (1, 5):\n chunk_size = 80 * size_factor\n a = self.dupsorted_list(chunk_size)\n b = self.duprandom_list(chunk_size)\n c = self.revsorted_list(chunk_size)\n orig_keys = a + b + c\n orig_values = list(range(1000, 1000 + len(orig_keys)))\n\n keys = self.array_factory(orig_keys)\n values = self.array_factory(orig_values)\n f(keys, values)\n # This checks sort stability\n self.assertSortedValues(orig_keys, orig_values, keys, values)\n\n\nclass TestTimsortPurePython(BaseTimsortTest, TestCase):\n\n timsort = py_list_timsort\n\n # Much faster than a Numpy array in pure Python\n array_factory = list\n\n\nclass TestTimsortArraysPurePython(BaseTimsortTest, TestCase):\n\n timsort = py_array_timsort\n\n def array_factory(self, lst):\n return np.array(lst, dtype=np.int32)\n\n\nclass JITTimsortMixin(object):\n\n timsort = jit_array_timsort\n\n test_merge_at = None\n test_merge_force_collapse = None\n\n def wrap_with_mergestate(self, timsort, func, _cache={}):\n \"\"\"\n Wrap *func* into another compiled function inserting a runtime-created\n mergestate as the first function argument.\n \"\"\"\n key = timsort, func\n if key in _cache:\n return _cache[key]\n\n merge_init = timsort.merge_init\n\n @timsort.compile\n def wrapper(keys, values, *args):\n ms = merge_init(keys)\n res = func(ms, keys, values, *args)\n return res\n\n _cache[key] = wrapper\n return wrapper\n\n\nclass TestTimsortArrays(JITTimsortMixin, BaseTimsortTest, TestCase):\n\n def array_factory(self, lst):\n return np.array(lst, dtype=np.int32)\n\n def check_merge_lo_hi(self, func, a, b):\n na = len(a)\n nb = len(b)\n\n func = self.wrap_with_mergestate(self.timsort, func)\n\n # Add sentinels at start and end, to check they weren't moved\n orig_keys = [42] + a + b + [-42]\n keys = self.array_factory(orig_keys)\n ssa = 1\n ssb = ssa + na\n\n new_ms = func(keys, keys, ssa, na, ssb, nb)\n self.assertEqual(keys[0], orig_keys[0])\n self.assertEqual(keys[-1], orig_keys[-1])\n self.assertSorted(orig_keys[1:-1], keys[1:-1])\n\n\n\nclass BaseQuicksortTest(BaseSortingTest):\n\n def test_insertion_sort(self):\n n = 20\n def check(l, n):\n res = self.array_factory([9999] + l + [-9999])\n f(res, 1, n)\n self.assertEqual(res[0], 9999)\n self.assertEqual(res[-1], -9999)\n self.assertSorted(l, res[1:-1])\n\n f = self.quicksort.insertion_sort\n l = self.sorted_list(n)\n check(l, n)\n l = self.revsorted_list(n)\n check(l, n)\n l = self.initially_sorted_list(n, n//2)\n check(l, n)\n l = self.revsorted_list(n)\n check(l, n)\n l = self.random_list(n)\n check(l, n)\n l = self.duprandom_list(n)\n check(l, n)\n\n def test_partition(self):\n n = 20\n def check(l, n):\n res = self.array_factory([9999] + l + [-9999])\n index = f(res, 1, n)\n self.assertEqual(res[0], 9999)\n self.assertEqual(res[-1], -9999)\n pivot = res[index]\n for i in range(1, index):\n self.assertLessEqual(res[i], pivot)\n for i in range(index + 1, n):\n self.assertGreaterEqual(res[i], pivot)\n\n f = self.quicksort.partition\n l = self.sorted_list(n)\n check(l, n)\n l = self.revsorted_list(n)\n check(l, n)\n l = self.initially_sorted_list(n, n//2)\n check(l, n)\n l = self.revsorted_list(n)\n check(l, n)\n l = self.random_list(n)\n check(l, n)\n l = self.duprandom_list(n)\n check(l, n)\n\n def test_partition3(self):\n # Test the unused partition3() function\n n = 20\n def check(l, n):\n res = self.array_factory([9999] + l + [-9999])\n lt, gt = f(res, 1, n)\n self.assertEqual(res[0], 9999)\n self.assertEqual(res[-1], -9999)\n pivot = res[lt]\n for i in range(1, lt):\n self.assertLessEqual(res[i], pivot)\n for i in range(lt, gt + 1):\n self.assertEqual(res[i], pivot)\n for i in range(gt + 1, n):\n self.assertGreater(res[i], pivot)\n\n f = self.quicksort.partition3\n l = self.sorted_list(n)\n check(l, n)\n l = self.revsorted_list(n)\n check(l, n)\n l = self.initially_sorted_list(n, n//2)\n check(l, n)\n l = self.revsorted_list(n)\n check(l, n)\n l = self.random_list(n)\n check(l, n)\n l = self.duprandom_list(n)\n check(l, n)\n\n def test_run_quicksort(self):\n f = self.quicksort.run_quicksort\n\n for size_factor in (1, 5):\n # Make lists to be sorted from two chunks of different kinds.\n sizes = (15, 20)\n\n all_lists = [self.make_sample_lists(n * size_factor) for n in sizes]\n for chunks in itertools.product(*all_lists):\n orig_keys = sum(chunks, [])\n keys = self.array_factory(orig_keys)\n f(keys)\n # The list is now sorted\n self.assertSorted(orig_keys, keys)\n\n def test_run_quicksort_lt(self):\n def lt(a, b):\n return a > b\n\n f = self.make_quicksort(lt=lt).run_quicksort\n\n for size_factor in (1, 5):\n # Make lists to be sorted from two chunks of different kinds.\n sizes = (15, 20)\n\n all_lists = [self.make_sample_lists(n * size_factor) for n in sizes]\n for chunks in itertools.product(*all_lists):\n orig_keys = sum(chunks, [])\n keys = self.array_factory(orig_keys)\n f(keys)\n # The list is now rev-sorted\n self.assertSorted(orig_keys, keys[::-1])\n\n # An imperfect comparison function, as LT(a, b) does not imply not LT(b, a).\n # The sort should handle it gracefully.\n def lt_floats(a, b):\n return math.isnan(b) or a < b\n\n f = self.make_quicksort(lt=lt_floats).run_quicksort\n\n np.random.seed(42)\n for size in (5, 20, 50, 500):\n orig = np.random.random(size=size) * 100\n orig[np.random.random(size=size) < 0.1] = float('nan')\n orig_keys = list(orig)\n keys = self.array_factory(orig_keys)\n f(keys)\n non_nans = orig[~np.isnan(orig)]\n # Non-NaNs are sorted at the front\n self.assertSorted(non_nans, keys[:len(non_nans)])\n\n\nclass TestQuicksortPurePython(BaseQuicksortTest, TestCase):\n\n quicksort = py_quicksort\n make_quicksort = staticmethod(make_py_quicksort)\n\n # Much faster than a Numpy array in pure Python\n array_factory = list\n\n\nclass TestQuicksortArrays(BaseQuicksortTest, TestCase):\n\n quicksort = jit_quicksort\n make_quicksort = staticmethod(make_jit_quicksort)\n\n def array_factory(self, lst):\n return np.array(lst, dtype=np.float64)\n\n\nclass TestNumpySort(TestCase):\n\n def setUp(self):\n np.random.seed(42)\n\n def check_sort_inplace(self, pyfunc, cfunc, val):\n expected = copy.copy(val)\n got = copy.copy(val)\n pyfunc(expected)\n cfunc(got)\n self.assertPreciseEqual(got, expected)\n\n def check_sort_copy(self, pyfunc, cfunc, val):\n orig = copy.copy(val)\n expected = pyfunc(val)\n got = cfunc(val)\n self.assertPreciseEqual(got, expected)\n # The original wasn't mutated\n self.assertPreciseEqual(val, orig)\n\n def test_array_sort_int(self):\n pyfunc = sort_usecase\n cfunc = jit(nopython=True)(pyfunc)\n\n for size in (5, 20, 50, 500):\n orig = np.random.randint(99, size=size)\n self.check_sort_inplace(pyfunc, cfunc, orig)\n\n def test_array_sort_float(self):\n pyfunc = sort_usecase\n cfunc = jit(nopython=True)(pyfunc)\n\n for size in (5, 20, 50, 500):\n orig = np.random.random(size=size) * 100\n self.check_sort_inplace(pyfunc, cfunc, orig)\n\n # Now with NaNs. Numpy sorts them at the end.\n for size in (5, 20, 50, 500):\n orig = np.random.random(size=size) * 100\n orig[np.random.random(size=size) < 0.1] = float('nan')\n self.check_sort_inplace(pyfunc, cfunc, orig)\n\n def test_np_sort_int(self):\n pyfunc = np_sort_usecase\n cfunc = jit(nopython=True)(pyfunc)\n\n for size in (5, 20, 50, 500):\n orig = np.random.randint(99, size=size)\n self.check_sort_copy(pyfunc, cfunc, orig)\n\n def test_np_sort_float(self):\n pyfunc = np_sort_usecase\n cfunc = jit(nopython=True)(pyfunc)\n\n for size in (5, 20, 50, 500):\n orig = np.random.random(size=size) * 100\n orig[np.random.random(size=size) < 0.1] = float('nan')\n self.check_sort_copy(pyfunc, cfunc, orig)\n\n\nclass TestPythonSort(TestCase):\n\n def test_list_sort(self):\n pyfunc = list_sort_usecase\n cfunc = jit(nopython=True)(pyfunc)\n\n for size in (20, 50, 500):\n orig, ret = cfunc(size)\n self.assertEqual(sorted(orig), ret)\n self.assertNotEqual(orig, ret) # sanity check\n\n def test_list_sort_reverse(self):\n pyfunc = list_sort_reverse_usecase\n cfunc = jit(nopython=True)(pyfunc)\n\n for size in (20, 50, 500):\n for b in (False, True):\n orig, ret = cfunc(size, b)\n self.assertEqual(sorted(orig, reverse=b), ret)\n self.assertNotEqual(orig, ret) # sanity check\n\n def test_sorted(self):\n pyfunc = sorted_usecase\n cfunc = jit(nopython=True)(pyfunc)\n\n for size in (20, 50, 500):\n orig = np.random.random(size=size) * 100\n expected = sorted(orig)\n got = cfunc(orig)\n self.assertPreciseEqual(got, expected)\n self.assertNotEqual(list(orig), got) # sanity check\n\n def test_sorted_reverse(self):\n pyfunc = sorted_reverse_usecase\n cfunc = jit(nopython=True)(pyfunc)\n size = 20\n\n orig = np.random.random(size=size) * 100\n for b in (False, True):\n expected = sorted(orig, reverse=b)\n got = cfunc(orig, b)\n self.assertPreciseEqual(got, expected)\n self.assertNotEqual(list(orig), got) # sanity check\n\n\nif __name__ == '__main__':\n unittest.main()\n"
] | [
[
"numpy.random.random",
"numpy.random.seed",
"numpy.isnan",
"numpy.sort",
"numpy.array",
"numpy.empty",
"numpy.random.randint"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Shreepadahs/computer_vision | [
"9380789c5fe47069aa58b33b59fdb15ead528e84"
] | [
"contrib/document_cleanup/light_weight_document_cleanup_ICDAR2021/infer.py"
] | [
"import tensorflow as tf\r\nfrom tensorflow.keras import datasets, layers, models\r\nfrom tensorflow.keras.models import Model, load_model\r\n\r\nfrom tensorflow.keras.models import model_from_json\r\nfrom tensorflow.keras.layers import Input, Add, Dense, Activation, ZeroPadding2D, BatchNormalization, Flatten, Conv2D, AveragePooling2D, MaxPooling2D, GlobalMaxPooling2D\r\n\r\nimport os\r\nfrom model import convert2gray\r\nfrom utils import GetOverlappingBlocks, CombineToImage,load_tf_img,getListOfFiles\r\nfrom tqdm import tqdm\r\nimport cv2\r\nimport numpy as np\r\n\r\n#os.environ[\"CUDA_VISIBLE_DEVICES\"]= '0'\r\n\r\n#gpu_devices = tf.config.experimental.list_physical_devices('GPU')\r\n#tf.config.experimental.set_memory_growth(gpu_devices[0], True)\r\n\r\n\r\n\r\n\r\ndef prepare_data_blocks(blocks,size):\r\n\tdata = []\r\n\tfor block in blocks:\r\n\t\tdata.append(load_tf_img(block,size))\r\n\t#blocks = []\r\n\treturn data\r\n\t\r\n\r\ndef infer(model_name,model_weight,target_dir,save_out_dir,block_size=(256,256),batch_size=1):\r\n\tjson_file = open(model_name, 'r')\r\n\tloaded_model_json = json_file.read()\r\n\tjson_file.close()\r\n\tmodel = model_from_json(loaded_model_json,custom_objects={'relu6': tf.nn.relu6, 'convert2gray': convert2gray})\r\n\r\n\r\n\tmodel.summary()\r\n\t#exit(0)\r\n\r\n\tmodel.compile(optimizer='adam', loss = 'mean_squared_error')\r\n\r\n\tmodel.load_weights(model_weight)\r\n\t\r\n\tif not os.path.exists(save_out_dir):\r\n\t\tos.makedirs(save_out_dir)\r\n\t\r\n\tM = block_size[0]\r\n\tN = block_size[1]\t\r\n\tpart = 8\r\n\tfilelists = getListOfFiles(target_dir)\r\n\tfor filename in tqdm(filelists):\r\n\t\tinitial_filename = os.path.splitext(filename)[0]\r\n\t\tin1_filename = os.path.join(target_dir,filename) \r\n\t\tin_clr = cv2.imread(in1_filename,1)\r\n\t\tin1_image = cv2.cvtColor(in_clr, cv2.COLOR_BGR2RGB)\r\n\t\tin1_img = GetOverlappingBlocks(in1_image.copy(),M,N,part)\r\n\t\tprepared_data_blocks = prepare_data_blocks(in1_img,M)\r\n\t\tin1_img = []\r\n\t\tout_img1 = model.predict(tf.convert_to_tensor(prepared_data_blocks), batch_size=batch_size)\r\n\t\tnum_img,ht,wd,ch_out = out_img1.shape\r\n\t\th,w,ch = in_clr.shape\r\n\t\tif(ch_out>1):\r\n\t\t\tc_image = cv2.cvtColor(CombineToImage(out_img1,h,w,ch_out), cv2.COLOR_RGB2BGR,part)\r\n\t\t\tout_image_name = initial_filename + '.png'\r\n\t\t\tname_fig = os.path.join(save_out_dir, out_image_name)\r\n\t\t\tcv2.imwrite(name_fig,c_image)\r\n\t\telse:\r\n\t\t\tc_image = CombineToImage(out_img1,h,w,ch_out,part)\r\n\t\t\tout_image_name = initial_filename + '.png'\r\n\t\t\tname_fig = os.path.join(save_out_dir, out_image_name)\r\n\t\t\tcv2.imwrite(name_fig,c_image)\r\n\r\ndef infer_image(model_name,model_weight,target_image,out_image_name,block_size=(256,256),batch_size=1):\r\n\tjson_file = open(model_name, 'r')\r\n\tloaded_model_json = json_file.read()\r\n\tjson_file.close()\r\n\tmodel = model_from_json(loaded_model_json,custom_objects={'relu6': tf.nn.relu6})\r\n\t#model = model_from_json(loaded_model_json,custom_objects={'HeNormal':tf.keras.initializers.he_normal(),'relu6': tf.nn.relu6, 'convert2gray': convert2gray,'Functional':tf.keras.models.Model})\r\n\r\n\r\n\tmodel.summary()\r\n\t#exit(0)\r\n\r\n\tmodel.compile(optimizer='adam', loss = 'mean_squared_error')\r\n\r\n\tmodel.load_weights(model_weight)\r\n\t\r\n\t#if not os.path.exists(save_out_dir):\r\n\t#\tos.makedirs(save_out_dir)\r\n\t\r\n\tM = block_size[0]\r\n\tN = block_size[1]\t\r\n\t#print(M,N)\r\n\tpart = 8\r\n\tin_clr = cv2.imread(target_image,1)\r\n\tin1_image = cv2.cvtColor(in_clr, cv2.COLOR_BGR2RGB)\r\n\tin1_img = GetOverlappingBlocks(in1_image.copy(),M,N,part)\r\n\t#print(len(in1_img))\r\n\tprepared_data_blocks = prepare_data_blocks(in1_img,M)\r\n\tin1_img = []\r\n\t#prepared_data_blocks = NewGetOverlappingBlocks(in_clr.copy(),M,N,part)\r\n\t\r\n\tout_img1 = model.predict(tf.convert_to_tensor(prepared_data_blocks), batch_size=batch_size)\r\n\t\r\n\tnum_img,ht,wd,ch_out = out_img1.shape\r\n\th,w,ch = in_clr.shape\r\n\t#print(num_img)\r\n\r\n\tif(ch_out>1):\r\n\t\tc_image = cv2.cvtColor(CombineToImage(out_img1,h,w,ch_out), cv2.COLOR_RGB2BGR,part)\r\n\t\tcv2.imwrite(out_image_name,c_image)\r\n\telse:\r\n\t\tc_image = CombineToImage(out_img1,h,w,ch_out,part)\r\n\t\tcv2.imwrite(out_image_name,c_image)\r\n"
] | [
[
"tensorflow.keras.models.model_from_json",
"tensorflow.convert_to_tensor"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
nestauk/funding_analytics_eu | [
"42fad368a90af2d0530ad83ac5c443c04fda1c73"
] | [
"eu_funding/utils/misc_utils.py"
] | [
"import numpy as np\n\ndef print_nested_structure(j, level=0):\n '''print_nested_structure\n Prints a list of all keys in a dictionary. The order and indentation shows any nested strucutre.\n \n Args:\n j (dict):\n level (int): Defaults to 0\n '''\n for k, v in j.items():\n print(' '*level, k)\n if isinstance(v, dict):\n print_nested_structure(v, level=level+1)\n elif (v is not None) & (type(v) != str):\n if isinstance(v[1], dict):\n print_nested_structure(v[0], level=level+1)\n\ndef chunks(l, n):\n \"\"\"Yield successive n-sized chunks from l.\"\"\"\n for i in range(0, len(l), n):\n yield l[i:i + n]\n\ndef generate_mapping(df, col_1, col_2):\n mapping = {}\n for a, b in zip(df[col_1], df[col_2]):\n if a in mapping:\n continue\n else:\n if ~pd.isnull(b):\n mapping[a] = b\n return mapping\n\nclass Groupby:\n def __init__(self, keys):\n \"\"\"\n :param keys: List of group identifiers. Both __init__ and apply will run\n much faster if keys is already sorted.\n \"\"\"\n try:\n already_sorted = np.issubdtype(keys.dtype, np.number) and (np.all(np.diff(keys) >= 0))\n except ValueError:\n already_sorted = False\n if already_sorted:\n keys = np.squeeze(keys)\n if keys.ndim > 1:\n raise ValueError('keys should be 1-dimensional')\n\n self.already_sorted = True\n new_idx = np.concatenate(([1], np.diff(keys) != 0))\n self.first_occurrences = np.where(new_idx)[0]\n self.keys_as_int = np.cumsum(new_idx) - 1\n assert isinstance(self.keys_as_int, np.ndarray)\n self.n_keys = self.keys_as_int[-1] + 1\n\n else:\n self.already_sorted = False\n _, self.first_occurrences, self.keys_as_int = \\\n np.unique(keys, return_index=True, return_inverse=True)\n self.n_keys = max(self.keys_as_int) + 1\n self.indices = self._set_indices()\n\n def _set_indices(self):\n if self.already_sorted:\n indices = [slice(i, j) for i, j in zip(self.first_occurrences[:-1],\n self.first_occurrences[1:])]\n assert isinstance(indices, list)\n indices.append(slice(self.first_occurrences[-1], len(self.keys_as_int)))\n indices = np.array(indices)\n else:\n indices = [[] for _ in range(self.n_keys)]\n for i, k in enumerate(self.keys_as_int):\n indices[k].append(i)\n indices = np.array([np.array(elt) for elt in indices])\n return indices\n\n def apply(self, function_, array, broadcast=True, shape=None, order='c'):\n \"\"\"\n Applies a function to each group, where groups are defined by self.keys_as_int (or, equivalently, as the\n argument of __init__.)\n If broadcast=True, first dimension of output will equal first dimension of \"array\", as in Pandas \"transform\".\n If broadcast=False, first dimension of output equals self.n_keys, as in Pandas \"groupby\".\n :param function_: function to be applied to each group\n :param array: np.ndarray or similar. Should have same first dimension as self.keys_as_int.\n :param broadcast: bool\n :param shape: Shape of output. Can be up to 3-dimensional.\n First dimension must be array.shape[0] (if broadcast=True)\n or self.n_keys (if broadcast=False). Default is for output to be one-dimensional.\n :param order: Should output be c-ordered or fortran-ordered?\n :return:\n :rtype: np.ndarray\n \"\"\"\n if broadcast:\n result = np.zeros(array.shape[0] if shape is None else shape, order=order)\n assert result.shape[0] == array.shape[0]\n\n # np.take doesn't allow slice arguments, so this has to be more verbose than when not already sorted\n if self.already_sorted:\n if array.ndim == 1:\n for k, idx in enumerate(self.indices):\n result[idx] = function_(array[idx])\n elif array.ndim == 2:\n for k, idx in enumerate(self.indices):\n result[idx] = function_(array[idx, :])\n elif array.ndim == 3:\n for k, idx in enumerate(self.indices):\n result[idx] = function_(array[idx, :, :])\n else:\n raise NotImplementedError('Can\\'t have more than 3 dims')\n else:\n for k, idx in enumerate(self.indices):\n result[idx] = function_(np.take(array, idx, 0))\n\n else:\n result = np.zeros(self.n_keys if shape is None else shape, order=order)\n assert result.shape[0] == self.n_keys\n if self.already_sorted:\n if array.ndim == 1:\n for k, idx in enumerate(self.indices):\n result[k] = function_(array[idx])\n elif array.ndim == 2:\n for k, idx in enumerate(self.indices):\n result[k] = function_(array[idx, :])\n elif array.ndim == 3:\n for k, idx in enumerate(self.indices):\n result[k] = function_(array[idx, :, :])\n else:\n raise NotImplementedError('Can\\'t have more than 3 dims')\n\n else:\n for k, idx in enumerate(self.indices):\n result[self.keys_as_int[self.first_occurrences[k]]] \\\n = function_(np.take(array, idx, 0))\n\n return result\n"
] | [
[
"numpy.take",
"numpy.unique",
"numpy.issubdtype",
"numpy.squeeze",
"numpy.cumsum",
"numpy.diff",
"numpy.array",
"numpy.zeros",
"numpy.where"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
eartser/hyperstyle-analyze | [
"58e2d361662e73e1e047919f57ab840055783b7a",
"58e2d361662e73e1e047919f57ab840055783b7a"
] | [
"analysis/src/python/data_analysis/statistics/issues_change_statistics.py",
"analysis/test/python/evaluation/issues_statistics/test_get_raw_issues.py"
] | [
"import argparse\nimport logging\nimport sys\nfrom typing import List\n\nimport pandas as pd\n\nfrom analysis.src.python.data_analysis.model.column_name import IssuesColumns, SubmissionColumns\nfrom analysis.src.python.data_analysis.utils.df_utils import merge_dfs\nfrom analysis.src.python.data_analysis.utils.statistics_utils import get_statistics_by_group\n\n\ndef calculate_issues_change_statistics(df_issues_statistics: pd.DataFrame,\n issues_classes: List[str]):\n \"\"\" Calculate issues count diff between previous and current attempt in one submissions series. \"\"\"\n\n df_issues_statistics = df_issues_statistics.sort_values([SubmissionColumns.ATTEMPT])\n\n issues_change_statistics = {\n SubmissionColumns.ID: df_issues_statistics[SubmissionColumns.ID].values,\n }\n\n for issue_class in issues_classes:\n issues_change_statistics[issue_class] = []\n\n previous_submission_issues_statistics = None\n for _, submission_issues_statistics in df_issues_statistics.iterrows():\n for issue_class in issues_classes:\n if previous_submission_issues_statistics is None:\n diff = submission_issues_statistics[issue_class]\n else:\n diff = submission_issues_statistics[issue_class] - previous_submission_issues_statistics[issue_class]\n\n issues_change_statistics[issue_class].append(diff)\n previous_submission_issues_statistics = submission_issues_statistics\n return pd.DataFrame.from_dict(issues_change_statistics)\n\n\ndef get_submissions_issues_change_statistics(submissions_path: str,\n issues_statistics_path: str,\n issues_change_statistics_path: str,\n issues_path: str,\n chunk_size=20000):\n \"\"\" Calculate issues count diff between previous and current attempt in all submissions series. \"\"\"\n\n df_submissions = pd.read_csv(submissions_path)\n df_issues_statistics = pd.read_csv(issues_statistics_path)\n df_issues = pd.read_csv(issues_path)[IssuesColumns.CLASS].values\n\n df_submissions = merge_dfs(\n df_submissions[[SubmissionColumns.ID, SubmissionColumns.GROUP, SubmissionColumns.ATTEMPT]],\n df_issues_statistics,\n SubmissionColumns.ID,\n SubmissionColumns.ID,\n )\n\n get_statistics_by_group(df_submissions, issues_change_statistics_path, chunk_size,\n lambda submission_series: submission_series.apply(calculate_issues_change_statistics,\n issues_classes=df_issues))\n\n\nif __name__ == '__main__':\n log = logging.getLogger()\n log.setLevel(logging.DEBUG)\n\n parser = argparse.ArgumentParser()\n\n parser.add_argument('submissions_path', type=str,\n help='Path to .csv file with preprocessed submissions with issues')\n parser.add_argument('issues_statistics_path', type=str,\n help='Path to .csv file with submissions issues count statistics')\n parser.add_argument('issues_path', type=str, help='Path to .csv file with issues list (classes and types)')\n parser.add_argument('issues_change_statistics_path', type=str,\n help='Path to .csv file with submissions issues statistics')\n parser.add_argument('--chunk-size', '-c', default=5000, type=int,\n help='Number of groups which will be processed simultaneously')\n\n args = parser.parse_args(sys.argv[1:])\n get_submissions_issues_change_statistics(args.submissions_path,\n args.issues_statistics_path,\n args.issues_change_statistics_path,\n args.issues_path,\n args.chunk_size)\n",
"from pathlib import Path\nfrom typing import List, Optional\n\nimport pandas as pd\nimport pytest\nfrom hyperstyle.src.python.review.inspectors.inspector_type import InspectorType\nfrom hyperstyle.src.python.review.inspectors.issue import (\n BaseIssue,\n CodeIssue,\n IssueDifficulty,\n IssueType,\n LineLenIssue,\n MaintainabilityLackIssue,\n)\nfrom analysis.src.python.evaluation.common.pandas_util import equal_df, get_solutions_df_by_file_path\nfrom analysis.src.python.evaluation.issues_statistics.get_raw_issues import (\n _filter_issues, _get_output_path, inspect_solutions,\n)\nfrom analysis.test.python.evaluation.issues_statistics import (\n GET_RAW_ISSUES_TARGET_FILES_FOLDER, GET_RAW_ISSUES_TEST_FILES_FOLDER,\n)\n\nORIGINAL_DF_NAME = 'original_df'\nORIGINAL_DF_CSV = f'{ORIGINAL_DF_NAME}.csv'\nORIGINAL_DF_XLSX = f'{ORIGINAL_DF_NAME}.xlsx'\n\nORIGINAL_DF_WITH_RAW_ISSUES_CSV = f'{ORIGINAL_DF_NAME}_with_raw_issues.csv'\nORIGINAL_DF_WITH_RAW_ISSUES_XLSX = f'{ORIGINAL_DF_NAME}_with_raw_issues.xlsx'\n\nNEW_DF_NAME = 'new_df'\n\nGET_OUTPUT_PATH_TEST_DATA = [\n (Path(ORIGINAL_DF_CSV), None, Path(ORIGINAL_DF_WITH_RAW_ISSUES_CSV)),\n (Path(ORIGINAL_DF_XLSX), None, Path(ORIGINAL_DF_WITH_RAW_ISSUES_XLSX)),\n (Path(ORIGINAL_DF_CSV), Path(f'{NEW_DF_NAME}.csv'), Path(f'{NEW_DF_NAME}.csv')),\n (Path(ORIGINAL_DF_CSV), Path(f'{NEW_DF_NAME}.xlsx'), Path(f'{NEW_DF_NAME}.xlsx')),\n (Path(ORIGINAL_DF_XLSX), Path(f'{NEW_DF_NAME}.csv'), Path(f'{NEW_DF_NAME}.csv')),\n (Path(ORIGINAL_DF_XLSX), Path(f'{NEW_DF_NAME}.xlsx'), Path(f'{NEW_DF_NAME}.xlsx')),\n (Path(ORIGINAL_DF_CSV), Path(NEW_DF_NAME), Path(ORIGINAL_DF_WITH_RAW_ISSUES_CSV)),\n (Path(ORIGINAL_DF_XLSX), Path(NEW_DF_NAME), Path(ORIGINAL_DF_WITH_RAW_ISSUES_XLSX)),\n (Path(ORIGINAL_DF_CSV), Path(f'{NEW_DF_NAME}/'), Path(ORIGINAL_DF_WITH_RAW_ISSUES_CSV)),\n (Path(ORIGINAL_DF_XLSX), Path(f'{NEW_DF_NAME}/'), Path(ORIGINAL_DF_WITH_RAW_ISSUES_XLSX)),\n (Path(ORIGINAL_DF_CSV), Path(f'{NEW_DF_NAME}.unknown'), Path(ORIGINAL_DF_WITH_RAW_ISSUES_CSV)),\n (Path(ORIGINAL_DF_XLSX), Path(f'{NEW_DF_NAME}.unknown'), Path(ORIGINAL_DF_WITH_RAW_ISSUES_XLSX)),\n]\n\n\[email protected](('solutions_file_path', 'output_path', 'expected_output_path'), GET_OUTPUT_PATH_TEST_DATA)\ndef test_get_output_path(solutions_file_path: Path, output_path: Optional[Path], expected_output_path: Path):\n actual_output_path = _get_output_path(solutions_file_path, output_path)\n assert actual_output_path == expected_output_path\n\n\nISSUES_FOR_FILTERING = [\n CodeIssue(\n origin_class=\"MissingSwitchDefaultCheck\",\n type=IssueType.ERROR_PRONE,\n description=\"Some description\",\n file_path=Path(\"\"),\n line_no=112,\n column_no=13,\n inspector_type=InspectorType.CHECKSTYLE,\n difficulty=IssueDifficulty.HARD,\n ),\n CodeIssue(\n origin_class=\"SwitchStmtsShouldHaveDefault\",\n type=IssueType.ERROR_PRONE,\n description=\"Some description\",\n file_path=Path(\"\"),\n line_no=112,\n column_no=1,\n inspector_type=InspectorType.PMD,\n difficulty=IssueDifficulty.HARD,\n ),\n CodeIssue(\n origin_class=\"MagicNumberCheck\",\n type=IssueType.INFO,\n description=\"Some description\",\n file_path=Path(\"\"),\n line_no=303,\n column_no=25,\n inspector_type=InspectorType.CHECKSTYLE,\n difficulty=IssueDifficulty.EASY,\n ),\n MaintainabilityLackIssue(\n origin_class=\"SomeMaintainabilityCheck\",\n type=IssueType.MAINTAINABILITY,\n description=\"Some description\",\n file_path=Path(\"\"),\n line_no=574,\n column_no=50,\n inspector_type=InspectorType.CHECKSTYLE,\n maintainability_lack=0,\n difficulty=IssueDifficulty.HARD,\n ),\n LineLenIssue(\n origin_class=\"SomeLineLenCheck\",\n type=IssueType.LINE_LEN,\n description=\"Some description\",\n file_path=Path(\"\"),\n line_no=139,\n column_no=24,\n inspector_type=InspectorType.CHECKSTYLE,\n line_len=10,\n difficulty=IssueDifficulty.EASY,\n ),\n]\n\nISSUES_WITHOUT_DUPLICATES = [\n CodeIssue(\n origin_class=\"MissingSwitchDefaultCheck\",\n type=IssueType.ERROR_PRONE,\n description=\"Some description\",\n file_path=Path(\"\"),\n line_no=112,\n column_no=13,\n inspector_type=InspectorType.CHECKSTYLE,\n difficulty=IssueDifficulty.HARD,\n ),\n CodeIssue(\n origin_class=\"MagicNumberCheck\",\n type=IssueType.INFO,\n description=\"Some description\",\n file_path=Path(\"\"),\n line_no=303,\n column_no=25,\n inspector_type=InspectorType.CHECKSTYLE,\n difficulty=IssueDifficulty.EASY,\n ),\n MaintainabilityLackIssue(\n origin_class=\"SomeMaintainabilityCheck\",\n type=IssueType.MAINTAINABILITY,\n description=\"Some description\",\n file_path=Path(\"\"),\n line_no=574,\n column_no=50,\n inspector_type=InspectorType.CHECKSTYLE,\n maintainability_lack=0,\n difficulty=IssueDifficulty.HARD,\n ),\n LineLenIssue(\n origin_class=\"SomeLineLenCheck\",\n type=IssueType.LINE_LEN,\n description=\"Some description\",\n file_path=Path(\"\"),\n line_no=139,\n column_no=24,\n inspector_type=InspectorType.CHECKSTYLE,\n line_len=10,\n difficulty=IssueDifficulty.EASY,\n ),\n]\n\nISSUES_WITHOUT_ZERO_MEASURE_ISSUES = [\n CodeIssue(\n origin_class=\"MissingSwitchDefaultCheck\",\n type=IssueType.ERROR_PRONE,\n description=\"Some description\",\n file_path=Path(\"\"),\n line_no=112,\n column_no=13,\n inspector_type=InspectorType.CHECKSTYLE,\n difficulty=IssueDifficulty.HARD,\n ),\n CodeIssue(\n origin_class=\"SwitchStmtsShouldHaveDefault\",\n type=IssueType.ERROR_PRONE,\n description=\"Some description\",\n file_path=Path(\"\"),\n line_no=112,\n column_no=1,\n inspector_type=InspectorType.PMD,\n difficulty=IssueDifficulty.HARD,\n ),\n CodeIssue(\n origin_class=\"MagicNumberCheck\",\n type=IssueType.INFO,\n description=\"Some description\",\n file_path=Path(\"\"),\n line_no=303,\n column_no=25,\n inspector_type=InspectorType.CHECKSTYLE,\n difficulty=IssueDifficulty.EASY,\n ),\n LineLenIssue(\n origin_class=\"SomeLineLenCheck\",\n type=IssueType.LINE_LEN,\n description=\"Some description\",\n file_path=Path(\"\"),\n line_no=139,\n column_no=24,\n inspector_type=InspectorType.CHECKSTYLE,\n line_len=10,\n difficulty=IssueDifficulty.EASY,\n ),\n]\n\nISSUES_WITHOUT_INFO_CATEGORY = [\n CodeIssue(\n origin_class=\"MissingSwitchDefaultCheck\",\n type=IssueType.ERROR_PRONE,\n description=\"Some description\",\n file_path=Path(\"\"),\n line_no=112,\n column_no=13,\n inspector_type=InspectorType.CHECKSTYLE,\n difficulty=IssueDifficulty.HARD,\n ),\n CodeIssue(\n origin_class=\"SwitchStmtsShouldHaveDefault\",\n type=IssueType.ERROR_PRONE,\n description=\"Some description\",\n file_path=Path(\"\"),\n line_no=112,\n column_no=1,\n inspector_type=InspectorType.PMD,\n difficulty=IssueDifficulty.HARD,\n ),\n MaintainabilityLackIssue(\n origin_class=\"SomeMaintainabilityCheck\",\n type=IssueType.MAINTAINABILITY,\n description=\"Some description\",\n file_path=Path(\"\"),\n line_no=574,\n column_no=50,\n inspector_type=InspectorType.CHECKSTYLE,\n maintainability_lack=0,\n difficulty=IssueDifficulty.HARD,\n ),\n LineLenIssue(\n origin_class=\"SomeLineLenCheck\",\n type=IssueType.LINE_LEN,\n description=\"Some description\",\n file_path=Path(\"\"),\n line_no=139,\n column_no=24,\n inspector_type=InspectorType.CHECKSTYLE,\n line_len=10,\n difficulty=IssueDifficulty.EASY,\n ),\n]\n\nFILTERED_ISSUES = [\n CodeIssue(\n origin_class=\"MissingSwitchDefaultCheck\",\n type=IssueType.ERROR_PRONE,\n description=\"Some description\",\n file_path=Path(\"\"),\n line_no=112,\n column_no=13,\n inspector_type=InspectorType.CHECKSTYLE,\n difficulty=IssueDifficulty.HARD,\n ),\n LineLenIssue(\n origin_class=\"SomeLineLenCheck\",\n type=IssueType.LINE_LEN,\n description=\"Some description\",\n file_path=Path(\"\"),\n line_no=139,\n column_no=24,\n inspector_type=InspectorType.CHECKSTYLE,\n line_len=10,\n difficulty=IssueDifficulty.EASY,\n ),\n]\n\nFILTER_ISSUES_TEST_DATA = [\n (\n ISSUES_FOR_FILTERING,\n True, # allow_duplicates\n True, # allow_zero_measure_issues\n True, # allow_info_issues\n ISSUES_FOR_FILTERING,\n ),\n (\n ISSUES_FOR_FILTERING,\n False, # allow_duplicates\n True, # allow_zero_measure_issues\n True, # allow_info_issues\n ISSUES_WITHOUT_DUPLICATES,\n ),\n (\n ISSUES_FOR_FILTERING,\n True, # allow_duplicates\n False, # allow_zero_measure_issues\n True, # allow_info_issues\n ISSUES_WITHOUT_ZERO_MEASURE_ISSUES,\n ),\n (\n ISSUES_FOR_FILTERING,\n True, # allow_duplicates\n True, # allow_zero_measure_issues\n False, # allow_info_issues\n ISSUES_WITHOUT_INFO_CATEGORY,\n ),\n (\n ISSUES_FOR_FILTERING,\n False, # allow_duplicates\n False, # allow_zero_measure_issues\n False, # allow_info_issues\n FILTERED_ISSUES,\n ),\n]\n\n\[email protected](\n ('issues', 'allow_duplicates', 'allow_zero_measure_issues', 'allow_info_issues', 'expected_issues'),\n FILTER_ISSUES_TEST_DATA,\n)\ndef test_filter_issues(\n issues: List[BaseIssue],\n allow_duplicates: bool,\n allow_zero_measure_issues: bool,\n allow_info_issues: bool,\n expected_issues: List[BaseIssue],\n):\n assert _filter_issues(issues, allow_duplicates, allow_zero_measure_issues, allow_info_issues) == expected_issues\n\n\nTEST_CORRECT_OUTPUT_DATA = [\n ('test_fragment_per_language.csv', 'target_fragment_per_language.csv'),\n ('test_incorrect_language.csv', 'target_incorrect_language.csv'),\n ('test_incorrect_code.csv', 'target_incorrect_code.csv'),\n ('test_rows_with_null.csv', 'target_rows_with_null.csv'),\n]\n\n\[email protected](('test_file', 'target_file'), TEST_CORRECT_OUTPUT_DATA)\ndef test_correct_output(test_file: str, target_file: str):\n solutions_file_path = Path(GET_RAW_ISSUES_TEST_FILES_FOLDER / test_file)\n solutions = get_solutions_df_by_file_path(solutions_file_path)\n\n test_dataframe = inspect_solutions(\n solutions,\n solutions_file_path,\n allow_duplicates=False,\n allow_info_issues=False,\n allow_zero_measure_issues=False,\n to_save_path=False,\n )\n\n target_dataframe = pd.read_csv(GET_RAW_ISSUES_TARGET_FILES_FOLDER / target_file)\n\n assert equal_df(target_dataframe, test_dataframe)\n"
] | [
[
"pandas.read_csv",
"pandas.DataFrame.from_dict"
],
[
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
forestyaser/HousePriceCrawler | [
"f0cf74128f9b6794f9ae6ebccc0356bb6d2f78f6"
] | [
"house_sigma/crawl_listing_url.py"
] | [
"import boto3\nfrom time import time\nimport sys\n# sys.stdout = open('log.txt', 'w')\n\nimport pandas as pd\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\n\n# house address\ndf_mls = pd.read_csv('/media/qindom-cpu/wd1/kai/real_master_crawler/house_sigma/mls_sample_to_20190430.csv', index_col=[0])\nmlss = list(df_mls['_id'])\nn_mls = len(mlss)\nprint('mlss total %d. est time: %0.1f h' % (n_mls, n_mls * 2 / 3600.))\nstart = time()\n\nroot_url = 'https://housesigma.com'\nchrome_options = Options()\nchrome_options.add_argument(\"--headless\")\ndriver = webdriver.Chrome('/var/qindom/chromedriver', chrome_options=chrome_options)\n\n'''step 1, get mls url'''\ndriver.get(url=root_url + '/web/en')\ndriver.implicitly_wait(4)\n\ninputElement = driver.find_element_by_id('search_input')\n\nhrefs = []\nfor i, mls in enumerate(mlss):\n print('processing %s, %d/%d:' % (mls, i + 1, n_mls))\n\n inputElement.clear()\n inputElement.send_keys(mls)\n\n mls_href = ''\n try:\n element = WebDriverWait(driver, 10).until(\n EC.visibility_of_element_located((By.XPATH, '//*[@id=\"index\"]/div[1]/div[2]/div[1]/div[2]/div[2]/div/p[2]'))\n )\n\n page_source = driver.page_source\n mls_ind = page_source.find(mls)\n from_ind = page_source.rfind('href=\"', 0, mls_ind) + len('href=\"')\n to_ind = mls_ind + len(mls)\n\n mls_href = page_source[from_ind + len('/web/en/house/'):to_ind]\n except:\n print('%s href not found. is the max waiting time too short?' % mls)\n\n hrefs.append(mls_href)\n print(mls_href)\n\ndf_mls['house_sigma_url'] = hrefs\nfile_save = 'mls_sample_with_url_to_20190430.csv'\ndf_mls.to_csv(file_save)\n\n# upload to s3\n# s3 = boto3.Session(\n# aws_access_key_id='AKIA2OKWCC2CQRPZWOOJ',\n# aws_secret_access_key='R74CNLr5qZN+9f7TWBKEuDmV4RuzjRWQ6CG/+acN',\n# ).resource('s3')\n#\n# s3.Object('kai-data-source', file_save).put(Body=open(file_save, 'rb'))\n# s3.ObjectAcl('kai-data-source', file_save).put(ACL='public-read')\n\ndriver.quit()\n\nprint('time cost: %d' % (int(time() - start)))\n"
] | [
[
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
jessicaaustin/CoTeDe | [
"0ca2a1c71de980d91262fd36fd5d8ab8cc09f019",
"0ca2a1c71de980d91262fd36fd5d8ab8cc09f019"
] | [
"tests/qctests/test_qc_tukey53H.py",
"cotede/qctests/cars_normbias.py"
] | [
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\n\"\"\"\n\nimport numpy as np\nfrom numpy import ma\n\nfrom cotede.qctests import Tukey53H\nfrom data import DummyData\n\n\ndef test():\n profile = DummyData()\n\n profile.data['PRES'] = ma.masked_array([1.0, 100, 200, 300, 500, 5000])\n profile.data['TEMP'] = ma.masked_array([27.44, 14.55, 11.96, 11.02, 7.65, 2.12])\n profile.data['PSAL'] = ma.masked_array([35.71, 35.50, 35.13, 35.02, 34.72, 35.03])\n\n features = {\n 'tukey53H': ma.masked_array([0, 0, 0.3525000000000009,\n 0.35249999999999915, 0, 0],\n mask=[True, True, False, False, True, True]),\n 'tukey53H_norm': ma.masked_array([0, 0, 0.07388721803621254,\n 0.07388721803621218, 0, 0],\n mask = [True, True, False, False, True, True])\n }\n flags = {'tukey53H_norm': np.array([0, 0, 1, 1, 0, 0], dtype='i1')}\n\n cfg = {\n 'l': 5,\n 'threshold': 6,\n 'flag_good': 1,\n 'flag_bad': 4\n }\n\n y = Tukey53H(profile, 'TEMP', cfg)\n y.test()\n\n assert type(y.features) is dict\n for f in y.features:\n assert ma.allclose(y.features[f], features[f])\n for f in y.flags:\n assert ma.allclose(y.flags[f], flags[f])\n",
"# -*- coding: utf-8 -*-\n\n\"\"\"\n\"\"\"\n\nfrom datetime import timedelta\nimport logging\n\nimport numpy as np\nfrom numpy import ma\n\nfrom oceansdb import CARS\n\n\nmodule_logger = logging.getLogger(__name__)\n\nclass CARS_NormBias(object):\n def __init__(self, data, varname, cfg, autoflag=True):\n self.data = data\n self.varname = varname\n self.cfg = cfg\n\n # Default is to do not use standard error to estimate the bias,\n # because that is the traditional approach.\n if 'use_standard_error' not in self.cfg:\n self.cfg['use_standard_error'] = False\n\n self.set_features()\n if autoflag:\n self.test()\n\n def keys(self):\n return self.features.keys() + \\\n [\"flag_%s\" % f for f in self.flags.keys()]\n\n def set_features(self):\n\n if ('LATITUDE' in self.data.attributes.keys()) and \\\n ('LONGITUDE' in self.data.attributes.keys()):\n kwargs = {\n 'lat': self.data.attributes['LATITUDE'],\n 'lon': self.data.attributes['LONGITUDE']}\n\n if ('LATITUDE' in self.data.keys()) and \\\n ('LONGITUDE' in self.data.keys()):\n dLmax = max(\n data['LATITUDE'].max()-data['LATITUDE'].min(),\n data['LONGITUDE'].max()-data['LONGITUDE'].min())\n # Only use each measurement coordinate if it is spread.\n if dLmax >= 0.01:\n kwargs = {\n 'lat': self.data['LATITUDE'],\n 'lon': self.data['LONGITUDE'],\n 'alongtrack_axis': ['lat', 'lon']}\n\n if ('DEPTH' in self.data.keys()):\n depth = self.data['DEPTH']\n elif ('PRES' in self.data.keys()):\n depth = self.data['PRES']\n\n try:\n doy = int(self.data.attributes['date'].strftime('%j'))\n except:\n doy = int(self.data.attributes['datetime'].strftime('%j'))\n\n db = CARS()\n if self.varname[-1] == '2':\n vtype = self.varname[:-1]\n else:\n vtype = self.varname\n\n idx = ~ma.getmaskarray(depth) & np.array(depth >= 0)\n cars = db[vtype].extract(\n var=['mn', 'std_dev'],\n doy=doy,\n depth=depth[idx],\n **kwargs)\n\n if idx.all() is not True:\n for v in cars.keys():\n tmp = ma.masked_all(depth.shape, dtype=cars[v].dtype)\n tmp[idx] = cars[v]\n cars[v] = tmp\n\n self.features = {\n 'cars_mean': cars['mn'],\n 'cars_std': cars['std_dev']}\n\n self.features['cars_bias'] = self.data[self.varname] - \\\n self.features['cars_mean']\n\n # if use_standard_error = True, the comparison with the climatology\n # considers the standard error, i.e. the bias will be only the\n # ammount above the standard error range.\n assert not self.cfg['use_standard_error']\n if self.cfg['use_standard_error'] is True:\n standard_error = self.features['cars_std'] / \\\n self.features['cars_nsamples'] ** 0.5\n idx = np.absolute(self.features['cars_bias']) <= \\\n standard_error\n self.features['cars_bias'][idx] = 0\n idx = np.absolute(self.features['cars_bias']) > standard_error\n self.features['cars_bias'][idx] -= \\\n np.sign(self.features['cars_bias'][idx]) * \\\n standard_error[idx]\n\n self.features['cars_normbias'] = self.features['cars_bias'] / \\\n self.features['cars_std']\n\n def test(self):\n\n # 3 is the possible minimum to estimate the std, but I shold use higher.\n try:\n min_samples = self.cfg['min_samples']\n except KeyError:\n min_samples = 3\n\n self.flags = {}\n\n try:\n flag_good = self.cfg['flag_good']\n except KeyError:\n flag_good = 1\n try:\n flag_bad = self.cfg['flag_bad']\n except KeyError:\n flag_bad = 3\n\n threshold = self.cfg['threshold']\n assert (np.size(threshold) == 1) and \\\n (threshold is not None)\n\n flag = np.zeros(self.data[self.varname].shape, dtype='i1')\n\n normbias_abs = np.absolute(self.features['cars_normbias'])\n ind = np.nonzero(normbias_abs <= threshold)\n flag[ind] = flag_good\n ind = np.nonzero(normbias_abs > threshold)\n flag[ind] = flag_bad\n\n # Flag as 9 any masked input value\n flag[ma.getmaskarray(self.data[self.varname])] = 9\n\n self.flags['cars_normbias'] = flag\n"
] | [
[
"numpy.ma.allclose",
"numpy.array",
"numpy.ma.masked_array"
],
[
"numpy.absolute",
"numpy.ma.getmaskarray",
"numpy.nonzero",
"numpy.sign",
"numpy.size",
"numpy.ma.masked_all",
"numpy.array",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [
"1.10",
"1.12",
"1.11",
"1.19",
"1.13",
"1.16",
"1.9",
"1.18",
"1.20",
"1.7",
"1.15",
"1.14",
"1.17",
"1.8"
],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
vribeiro1/covid19 | [
"2528ec2e67bee5ff864a513940fb0525f98740b0",
"2528ec2e67bee5ff864a513940fb0525f98740b0"
] | [
"ards_prediction.py",
"akf_prediction.py"
] | [
"import os\nimport numpy as np\nimport pandas as pd\nimport shap\n\nfrom sklearn.model_selection import train_test_split\n\nfrom .utils.plot import (\n plot_results_1x2,\n plot_results_2x2,\n plot_shap_values,\n plot_survival,\n plot_sensitivity_specificity_vs_threshold\n)\nfrom .utils.preprocess import preprocess\nfrom .utils.report import generate_experiment_report\nfrom .run_experiment import run_experiment\n\npd.options.mode.chained_assignment = None\n\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\nDATA_FPATH = os.path.join(BASE_DIR, \"data\", \"covid19_internacao.csv\")\n\n\ndef ards_prediction(df_final):\n df_final = preprocess(df_final)\n\n # Now we expect to prepare our training pipeline\n\n features_display_names = [\n (\"idade\", \"Age (years)\"),\n (\"seg_normal\", \"Healthy lungs (%)\"),\n (\"taxa_gordura\", \"Mediastinal fat (%)\"),\n (\"sofa_score\", \"SOFA score\"),\n (\"n_comorbidades\", \"Comorbidities\"),\n ]\n\n features = [\n f[0] for f in features_display_names\n ]\n\n target = \"sdra\"\n\n # We select a small subset of features, and maybe there will be left some duplicates in the dataframe.\n # We drop those duplicates.\n df_model = df_final.drop_duplicates(subset=features + [\"record_id\", target])\n\n # Train, validation and test split is in the patient level\n df_split = df_model.groupby(\"record_id\").agg({\n \"idade\": lambda series: series.iloc[0],\n \"sexo_M\": lambda series: series.iloc[0],\n \"instituicao\": lambda series: series.iloc[0],\n target: lambda series: series.iloc[0]\n }).reset_index()\n\n target_unknown = df_split[df_split[target].isna()].record_id.nunique()\n df_split = df_split.dropna(subset=[target])\n\n train_valid_records, test_records = train_test_split(\n df_split, test_size=0.2, random_state=0, stratify=df_split[target]\n )\n\n assert len(set(train_valid_records.record_id.unique()) & set(test_records.record_id.unique())) == 0\n\n summaries, df_test = run_experiment(df_model, train_valid_records, test_records, features, target)\n X_test = df_test[features]\n\n ############################## Finished training the models ##############################\n\n save_path_2x2 = os.path.join(BASE_DIR, \"desfechos_intermediarios\", \"ards_model_2x2.tiff\")\n plot_results_2x2(summaries, save_path_2x2, fformat=\"tiff\")\n\n save_path_1x2 = os.path.join(BASE_DIR, \"desfechos_intermediarios\", \"ards_model_1x2.tiff\")\n metrics_summary = plot_results_1x2(summaries, save_path_1x2, fformat=\"tiff\")\n\n save_path_shap = os.path.join(BASE_DIR, \"desfechos_intermediarios\", \"ards_shap.tiff\")\n shap_values_plot = plot_shap_values(X_test, summaries, [f[1] for f in features_display_names], save_path_shap, fformat=\"tiff\")\n\n save_path_sens_spec = os.path.join(BASE_DIR, \"desfechos_intermediarios\", \"ards_sens_spec.tiff\")\n plot_sensitivity_specificity_vs_threshold(summaries, save_path_sens_spec, fformat=\"tiff\")\n\n save_report = os.path.join(BASE_DIR, \"desfechos_intermediarios\", \"ards_report.txt\")\n reports = generate_experiment_report(\n \"ARDS\", target, df_split, df_final, features, metrics_summary,\n train_valid_records, test_records, save_report\n )\n\n print(reports[\"stats_report\"])\n print(reports[\"missing_values\"])\n print(reports[\"metrics\"])\n\n ############################## Survival analysis ##############################\n\n save_path_survival = os.path.join(BASE_DIR, \"desfechos_intermediarios\", \"ards_survival.tiff\")\n plot_survival(df_test, features, summaries, save_path_survival, fformat=\"tiff\")\n\n\ndef ards_prediction_loio(df_final, institution):\n \"\"\" Leave one institution out \"\"\"\n df_final = preprocess(df_final)\n\n # The same institution might appear with different names, so we make a list with the names\n assert isinstance(institution, str) or isinstance(institution, list), \"'institution' must be either a string or a list\"\n if isinstance(institution, str):\n institution = [institution]\n\n # Now we expect to prepare our training pipeline\n\n features_display_names = [\n (\"idade\", \"Age (years)\"),\n (\"seg_normal\", \"Healthy lungs (%)\"),\n (\"taxa_gordura\", \"Mediastinal fat (%)\"),\n (\"sofa_score\", \"SOFA score\"),\n (\"n_comorbidades\", \"Comorbidities\"),\n ]\n\n features = [\n f[0] for f in features_display_names\n ]\n\n target = \"sdra\"\n\n # We select a small subset of features, and maybe there will be left some duplicates in the dataframe.\n # We drop those duplicates.\n df_model = df_final.drop_duplicates(subset=features + [\"record_id\", target])\n\n # Train, validation and test split is in the patient level\n df_split = df_model.groupby(\"record_id\").agg({\n \"idade\": lambda series: series.iloc[0],\n \"sexo_M\": lambda series: series.iloc[0],\n \"instituicao\": lambda series: series.iloc[0],\n target: lambda series: series.iloc[0]\n }).reset_index()\n\n target_unknown = df_split[df_split[target].isna()].record_id.nunique()\n df_split = df_split.dropna(subset=[target])\n\n # Leave institution out of the train/validation pipeline\n train_valid_records = df_split[~df_split.instituicao.isin(institution)]\n test_records = df_split[df_split.instituicao.isin(institution)]\n\n assert len(set(train_valid_records.record_id.unique()) & set(test_records.record_id.unique())) == 0\n\n summaries, df_test = run_experiment(df_model, train_valid_records, test_records, features, target)\n X_test = df_test[features]\n\n ############################## Finished training the models ##############################\n\n save_path_2x2 = os.path.join(BASE_DIR, \"desfechos_intermediarios\", \"LOIO\", f\"{institution[0]}_ards_model_2x2.tiff\")\n plot_results_2x2(summaries, save_path_2x2, fformat=\"tiff\")\n\n save_path_1x2 = os.path.join(BASE_DIR, \"desfechos_intermediarios\", \"LOIO\", f\"{institution[0]}_ards_model_1x2.tiff\")\n metrics_summary = plot_results_1x2(summaries, save_path_1x2, fformat=\"tiff\")\n\n save_path_shap = os.path.join(BASE_DIR, \"desfechos_intermediarios\", \"LOIO\", f\"{institution[0]}_ards_shap.tiff\")\n shap_values_plot = plot_shap_values(X_test, summaries, [f[1] for f in features_display_names], save_path_shap, fformat=\"tiff\")\n\n save_path_sens_spec = os.path.join(BASE_DIR, \"desfechos_intermediarios\", \"LOIO\", f\"{institution[0]}_ards_sens_spec.tiff\")\n plot_sensitivity_specificity_vs_threshold(summaries, save_path_sens_spec, fformat=\"tiff\")\n\n save_report = os.path.join(BASE_DIR, \"desfechos_intermediarios\", \"LOIO\", f\"{institution[0]}_ards_report.txt\")\n reports = generate_experiment_report(\n \"ARDS\", target, df_split, df_final, features, metrics_summary,\n train_valid_records, test_records, save_report\n )\n\n print(reports[\"stats_report\"])\n print(reports[\"missing_values\"])\n print(reports[\"metrics\"])\n\n ############################## Survival analysis ##############################\n\n # save_path_survival = os.path.join(BASE_DIR, \"desfechos_intermediarios\", \"LOIO\", f\"{institution[0]}_ards_survival.tiff\")\n # plot_survival(df_test, features, summaries, save_path_survival, fformat=\"tiff\")\n\nif __name__ == \"__main__\":\n df_final = pd.read_csv(DATA_FPATH)\n mortality_prediction(df_final)\n",
"import os\nimport numpy as np\nimport pandas as pd\nimport shap\n\nfrom sklearn.model_selection import train_test_split\n\nfrom .utils.plot import (\n plot_results_1x2,\n plot_results_2x2,\n plot_shap_values,\n plot_survival,\n plot_sensitivity_specificity_vs_threshold\n)\nfrom .utils.preprocess import preprocess\nfrom .utils.report import generate_experiment_report\nfrom .run_experiment import run_experiment\n\npd.options.mode.chained_assignment = None\n\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\nDATA_FPATH = os.path.join(BASE_DIR, \"data\", \"covid19_internacao.csv\")\n\n\ndef akf_prediction(df_final):\n df_final = preprocess(df_final)\n\n # Now we expect to prepare our training pipeline\n\n features_display_names = [\n (\"idade\", \"Age (years)\"),\n (\"seg_normal\", \"Healthy lungs (%)\"),\n (\"taxa_gordura\", \"Mediastinal fat (%)\"),\n (\"sofa_score\", \"SOFA score\"),\n (\"n_comorbidades\", \"Comorbidities\"),\n ]\n\n features = [\n f[0] for f in features_display_names\n ]\n\n target = \"ira\"\n\n # We select a small subset of features, and maybe there will be left some duplicates in the dataframe.\n # We drop those duplicates.\n df_model = df_final.drop_duplicates(subset=features + [\"record_id\", target])\n\n # Train, validation and test split is in the patient level\n df_split = df_model.groupby(\"record_id\").agg({\n \"idade\": lambda series: series.iloc[0],\n \"sexo_M\": lambda series: series.iloc[0],\n \"instituicao\": lambda series: series.iloc[0],\n target: lambda series: series.iloc[0]\n }).reset_index()\n\n target_unknown = df_split[df_split[target].isna()].record_id.nunique()\n df_split = df_split.dropna(subset=[target])\n\n train_valid_records, test_records = train_test_split(\n df_split, test_size=0.2, random_state=0, stratify=df_split[target]\n )\n\n assert len(set(train_valid_records.record_id.unique()) & set(test_records.record_id.unique())) == 0\n\n summaries, df_test = run_experiment(df_model, train_valid_records, test_records, features, target)\n X_test = df_test[features]\n\n ############################## Finished training the models ##############################\n\n save_path_2x2 = os.path.join(BASE_DIR, \"desfechos_intermediarios\", \"akf_model_2x2.tiff\")\n plot_results_2x2(summaries, save_path_2x2, fformat=\"tiff\")\n\n save_path_1x2 = os.path.join(BASE_DIR, \"desfechos_intermediarios\", \"akf_model_1x2.tiff\")\n metrics_summary = plot_results_1x2(summaries, save_path_1x2, fformat=\"tiff\")\n\n save_path_shap = os.path.join(BASE_DIR, \"desfechos_intermediarios\", \"akf_shap.tiff\")\n shap_values_plot = plot_shap_values(X_test, summaries, [f[1] for f in features_display_names], save_path_shap, fformat=\"tiff\")\n\n save_path_sens_spec = os.path.join(BASE_DIR, \"desfechos_intermediarios\", \"akf_sens_spec.tiff\")\n plot_sensitivity_specificity_vs_threshold(summaries, save_path_sens_spec, fformat=\"tiff\")\n\n save_report = os.path.join(BASE_DIR, \"desfechos_intermediarios\", \"akf_report.txt\")\n reports = generate_experiment_report(\n \"Acute Kidney Failure\", target, df_split, df_final, features, metrics_summary,\n train_valid_records, test_records, save_report\n )\n\n print(reports[\"stats_report\"])\n print(reports[\"missing_values\"])\n print(reports[\"metrics\"])\n\n ############################## Survival analysis ##############################\n\n save_path_survival = os.path.join(BASE_DIR, \"desfechos_intermediarios\", \"akf_survival.tiff\")\n plot_survival(df_test, features, summaries, save_path_survival, fformat=\"tiff\")\n\n\ndef akf_prediction_loio(df_final, institution):\n \"\"\" Leave one institution out \"\"\"\n df_final = preprocess(df_final)\n\n # The same institution might appear with different names, so we make a list with the names\n assert isinstance(institution, str) or isinstance(institution, list), \"'institution' must be either a string or a list\"\n if isinstance(institution, str):\n institution = [institution]\n\n # Now we expect to prepare our training pipeline\n\n features_display_names = [\n (\"idade\", \"Age (years)\"),\n (\"seg_normal\", \"Healthy lungs (%)\"),\n (\"taxa_gordura\", \"Mediastinal fat (%)\"),\n (\"sofa_score\", \"SOFA score\"),\n (\"n_comorbidades\", \"Comorbidities\"),\n ]\n\n features = [\n f[0] for f in features_display_names\n ]\n\n target = \"ira\"\n\n # We select a small subset of features, and maybe there will be left some duplicates in the dataframe.\n # We drop those duplicates.\n df_model = df_final.drop_duplicates(subset=features + [\"record_id\", target])\n\n # Train, validation and test split is in the patient level\n df_split = df_model.groupby(\"record_id\").agg({\n \"idade\": lambda series: series.iloc[0],\n \"sexo_M\": lambda series: series.iloc[0],\n \"instituicao\": lambda series: series.iloc[0],\n target: lambda series: series.iloc[0]\n }).reset_index()\n\n target_unknown = df_split[df_split[target].isna()].record_id.nunique()\n df_split = df_split.dropna(subset=[target])\n\n # Leave institution out of the train/validation pipeline\n train_valid_records = df_split[~df_split.instituicao.isin(institution)]\n test_records = df_split[df_split.instituicao.isin(institution)]\n\n assert len(set(train_valid_records.record_id.unique()) & set(test_records.record_id.unique())) == 0\n\n summaries, df_test = run_experiment(df_model, train_valid_records, test_records, features, target)\n X_test = df_test[features]\n\n ############################## Finished training the models ##############################\n\n save_path_2x2 = os.path.join(BASE_DIR, \"desfechos_intermediarios\", \"LOIO\", f\"{institution[0]}_akf_model_2x2.tiff\")\n plot_results_2x2(summaries, save_path_2x2, fformat=\"tiff\")\n\n save_path_1x2 = os.path.join(BASE_DIR, \"desfechos_intermediarios\", \"LOIO\", f\"{institution[0]}_akf_model_1x2.tiff\")\n metrics_summary = plot_results_1x2(summaries, save_path_1x2, fformat=\"tiff\")\n\n save_path_shap = os.path.join(BASE_DIR, \"desfechos_intermediarios\", \"LOIO\", f\"{institution[0]}_akf_shap.tiff\")\n shap_values_plot = plot_shap_values(X_test, summaries, [f[1] for f in features_display_names], save_path_shap, fformat=\"tiff\")\n\n save_path_sens_spec = os.path.join(BASE_DIR, \"desfechos_intermediarios\", \"LOIO\", f\"{institution[0]}_akf_sens_spec.tiff\")\n plot_sensitivity_specificity_vs_threshold(summaries, save_path_sens_spec, fformat=\"tiff\")\n\n save_report = os.path.join(BASE_DIR, \"desfechos_intermediarios\", \"LOIO\", f\"{institution[0]}_akf_report.txt\")\n reports = generate_experiment_report(\n \"Acute Kidney Failure\", target, df_split, df_final, features, metrics_summary,\n train_valid_records, test_records, save_report\n )\n\n print(reports[\"stats_report\"])\n print(reports[\"missing_values\"])\n print(reports[\"metrics\"])\n\n ############################## Survival analysis ##############################\n\n # save_path_survival = os.path.join(BASE_DIR, \"desfechos_intermediarios\", \"LOIO\", f\"{institution[0]}_akf_survival.tiff\")\n # plot_survival(df_test, features, summaries, save_path_survival, fformat=\"tiff\")\n\n\nif __name__ == \"__main__\":\n df_final = pd.read_csv(DATA_FPATH)\n akf_prediction(df_final)\n"
] | [
[
"pandas.read_csv",
"sklearn.model_selection.train_test_split"
],
[
"pandas.read_csv",
"sklearn.model_selection.train_test_split"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
antalszava/piquasso | [
"7ebff83145cfab44929114437c250852dff5f9a5"
] | [
"tests/api/program/test_blackbird.py"
] | [
"#\n# Copyright 2021 Budapest Quantum Computing Group\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\nimport piquasso as pq\n\n\ndef test_loads_blackbird_parses_operations():\n blackbird_code = \"\"\"name StateTeleportation\n version 1.0\n\n BSgate(0.7853981633974483, 0) | [1, 2]\n Rgate(0.7853981633974483) | 1\n Vgate(0.5) | 1\n \"\"\"\n\n program = pq.Program()\n\n program.loads_blackbird(blackbird_code)\n\n assert len(program.instructions) == 3\n\n assert program.instructions[0] == pq.Beamsplitter(\n theta=np.pi / 4, phi=0.0\n ).on_modes(1, 2)\n assert program.instructions[1] == pq.Phaseshifter(phi=np.pi / 4).on_modes(1)\n assert program.instructions[2] == pq.CubicPhase(gamma=0.5).on_modes(1)\n\n\ndef test_loads_blackbird_parses_operations_with_default_arguments():\n blackbird_code = \"\"\"name StateTeleportation\n version 1.0\n\n BSgate() | [1, 2]\n Rgate(0.7853981633974483) | 1\n \"\"\"\n\n program = pq.Program()\n\n program.loads_blackbird(blackbird_code)\n\n assert len(program.instructions) == 2\n\n assert program.instructions[0] == pq.Beamsplitter(\n theta=0.0, phi=np.pi / 4\n ).on_modes(1, 2)\n assert program.instructions[1] == pq.Phaseshifter(phi=np.pi / 4).on_modes(1)\n\n\ndef test_loads_blackbird_parses_operations_with_classes_registered_separately():\n blackbird_code = \"\"\"name StateTeleportation\n version 1.0\n\n BSgate(0.7853981633974483, 0) | [1, 2]\n Rgate(0.7853981633974483) | 1\n \"\"\"\n\n class Beamsplitter(pq.Instruction):\n pass\n\n program = pq.Program()\n\n program.loads_blackbird(blackbird_code)\n\n assert len(program.instructions) == 2\n\n assert program.instructions[0].__class__ is Beamsplitter\n\n # Teardown\n pq.Instruction.set_subclass(pq.Beamsplitter)\n assert pq.Instruction.get_subclass(\"Beamsplitter\") is pq.Beamsplitter\n\n\ndef test_loads_blackbird_preserves_exising_operations():\n blackbird_code = \"\"\"name StateTeleportation\n version 1.0\n\n BSgate(0.7853981633974483, 0) | [1, 2]\n Rgate(0.7853981633974483) | 1\n \"\"\"\n\n program = pq.Program()\n\n squeezing = pq.Squeezing(r=np.log(2), phi=np.pi / 2)\n\n with program:\n pq.Q(0) | squeezing\n\n program.loads_blackbird(blackbird_code)\n\n assert len(program.instructions) == 3\n\n assert program.instructions[0] == squeezing\n assert program.instructions[1] == pq.Beamsplitter(\n theta=np.pi / 4, phi=0.0\n ).on_modes(1, 2)\n assert program.instructions[2] == pq.Phaseshifter(phi=np.pi / 4).on_modes(1)\n\n\ndef test_loads_blackbird_with_execution(gaussian_state_assets):\n blackbird_code = \"\"\"name StateTeleportation\n version 1.0\n\n BSgate(0.7853981633974483, 3.141592653589793) | [1, 2]\n Rgate(0.7853981633974483) | 1\n \"\"\"\n\n program = pq.Program()\n\n simulator = pq.GaussianSimulator(d=3)\n\n squeezing = pq.Squeezing(r=np.log(2), phi=np.pi / 2)\n\n with program:\n pq.Q(1) | squeezing\n\n program.loads_blackbird(blackbird_code)\n\n state = simulator.execute(program).state\n\n expected_state = gaussian_state_assets.load()\n\n assert state == expected_state\n\n\ndef test_load_blackbird_from_file_with_execution(gaussian_state_assets, tmpdir):\n blackbird_code = \"\"\"name StateTeleportation\n version 1.0\n\n BSgate(0.7853981633974483, 3.141592653589793) | [1, 2]\n Rgate(0.7853981633974483) | 1\n \"\"\"\n\n blackbird_file = tmpdir.join(\"example-blackbird-code.xbb\")\n\n blackbird_file.write(blackbird_code)\n\n program = pq.Program()\n\n simulator = pq.GaussianSimulator(d=3)\n\n squeezing = pq.Squeezing(r=np.log(2), phi=np.pi / 2)\n\n with program:\n pq.Q(1) | squeezing\n\n program.load_blackbird(blackbird_file.strpath)\n\n state = simulator.execute(program).state\n\n expected_state = gaussian_state_assets.load()\n\n assert state == expected_state\n"
] | [
[
"numpy.log"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ValentinMouret/probability | [
"7ea6cc55e5b3fed04372cd188cd0764e92fd3cf4",
"7ea6cc55e5b3fed04372cd188cd0764e92fd3cf4",
"7ea6cc55e5b3fed04372cd188cd0764e92fd3cf4",
"7ea6cc55e5b3fed04372cd188cd0764e92fd3cf4",
"7ea6cc55e5b3fed04372cd188cd0764e92fd3cf4",
"7ea6cc55e5b3fed04372cd188cd0764e92fd3cf4"
] | [
"tensorflow_probability/python/bijectors/tanh_test.py",
"tensorflow_probability/python/distributions/vector_student_t.py",
"tensorflow_probability/python/distributions/multivariate_student_t_test.py",
"tensorflow_probability/python/distributions/poisson.py",
"tensorflow_probability/python/sts/fitting.py",
"tensorflow_probability/python/distributions/mvn_full_covariance.py"
] | [
"# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Tanh Tests.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n# Dependency imports\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow_probability.python import bijectors as tfb\n\nfrom tensorflow_probability.python.bijectors import bijector_test_util\ntfe = tf.contrib.eager\n\n\[email protected]_all_tests_in_graph_and_eager_modes\nclass TanhBijectorTest(tf.test.TestCase):\n \"\"\"Tests correctness of the Y = g(X) = tanh(X) transformation.\"\"\"\n\n def testBijector(self):\n self.assertEqual(\"tanh\", tfb.Tanh().name)\n x = np.linspace(-3., 3., 100).reshape([2, 5, 10]).astype(np.float32)\n y = np.tanh(x)\n ildj = -np.log1p(-np.square(np.tanh(x)))\n bijector = tfb.Tanh()\n self.assertAllClose(\n y, self.evaluate(bijector.forward(x)), atol=0., rtol=1e-2)\n self.assertAllClose(\n x, self.evaluate(bijector.inverse(y)), atol=0., rtol=1e-4)\n self.assertAllClose(\n ildj,\n self.evaluate(bijector.inverse_log_det_jacobian(\n y, event_ndims=0)), atol=0., rtol=1e-6)\n self.assertAllClose(\n -ildj,\n self.evaluate(bijector.forward_log_det_jacobian(\n x, event_ndims=0)), atol=0., rtol=1e-4)\n\n def testScalarCongruency(self):\n bijector_test_util.assert_scalar_congruency(\n tfb.Tanh(), lower_x=-9., upper_x=9., eval_func=self.evaluate,\n n=int(10e4))\n\n def testBijectiveAndFinite(self):\n x = np.linspace(-5., 5., 100).astype(np.float32)\n eps = 1e-3\n y = np.linspace(eps, 1. - eps, 100).astype(np.float32)\n bijector_test_util.assert_bijective_and_finite(\n tfb.Tanh(), x, y, eval_func=self.evaluate, event_ndims=0, atol=0.,\n rtol=1e-4)\n\n def testMatchWithAffineTransform(self):\n direct_bj = tfb.Tanh()\n indirect_bj = tfb.Chain([\n tfb.AffineScalar(shift=tf.to_double(-1.0), scale=tf.to_double(2.0)),\n tfb.Sigmoid(),\n tfb.AffineScalar(scale=tf.to_double(2.0))])\n\n x = np.linspace(-3.0, 3.0, 100)\n y = np.tanh(x)\n self.assertAllClose(self.evaluate(direct_bj.forward(x)),\n self.evaluate(indirect_bj.forward(x)))\n self.assertAllClose(self.evaluate(direct_bj.inverse(y)),\n self.evaluate(indirect_bj.inverse(y)))\n self.assertAllClose(\n self.evaluate(direct_bj.inverse_log_det_jacobian(y, event_ndims=0)),\n self.evaluate(indirect_bj.inverse_log_det_jacobian(y, event_ndims=0)))\n self.assertAllClose(\n self.evaluate(direct_bj.forward_log_det_jacobian(x, event_ndims=0)),\n self.evaluate(indirect_bj.forward_log_det_jacobian(x, event_ndims=0)))\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n",
"# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Vector Student's t distribution classes.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\nfrom tensorflow_probability.python import bijectors\nfrom tensorflow_probability.python.distributions import student_t\nfrom tensorflow_probability.python.distributions import transformed_distribution\nfrom tensorflow_probability.python.internal import distribution_util\nfrom tensorflow_probability.python.internal import dtype_util\n\n\n# TODO(b/116482987): Expose this distribution in TFP.\nclass _VectorStudentT(transformed_distribution.TransformedDistribution):\n \"\"\"A vector version of Student's t-distribution on `R^k`.\n\n #### Mathematical details\n\n The probability density function (pdf) is,\n\n ```none\n pdf(x; df, mu, Sigma) = (1 + ||y||**2 / df)**(-0.5 (df + 1)) / Z\n where,\n y = inv(Sigma) (x - mu)\n Z = abs(det(Sigma)) ( sqrt(df pi) Gamma(0.5 df) / Gamma(0.5 (df + 1)) )**k\n ```\n\n where:\n * `loc = mu`; a vector in `R^k`,\n * `scale = Sigma`; a lower-triangular matrix in `R^{k x k}`,\n * `Z` denotes the normalization constant, and,\n * `Gamma` is the [gamma function](\n https://en.wikipedia.org/wiki/Gamma_function), and,\n * `||y||**2` denotes the [squared Euclidean norm](\n https://en.wikipedia.org/wiki/Norm_(mathematics)#Euclidean_norm) of `y`.\n\n The VectorStudentT distribution is a member of the [location-scale family](\n https://en.wikipedia.org/wiki/Location-scale_family), i.e., it can be\n constructed as,\n\n ```none\n X ~ StudentT(df, loc=0, scale=1)\n Y = loc + scale * X\n ```\n\n Notice that the `scale` matrix has semantics closer to std. deviation than\n covariance (but it is not std. deviation).\n\n This distribution is an Affine transformation of iid\n [Student's t-distributions](\n https://en.wikipedia.org/wiki/Student%27s_t-distribution)\n and should not be confused with the [Multivariate Student's t-distribution](\n https://en.wikipedia.org/wiki/Multivariate_t-distribution). The\n traditional Multivariate Student's t-distribution is type of\n [elliptical distribution](\n https://en.wikipedia.org/wiki/Elliptical_distribution); it has PDF:\n\n ```none\n pdf(x; df, mu, Sigma) = (1 + ||y||**2 / df)**(-0.5 (df + k)) / Z\n where,\n y = inv(Sigma) (x - mu)\n Z = abs(det(Sigma)) sqrt(df pi)**k Gamma(0.5 df) / Gamma(0.5 (df + k))\n ```\n\n Notice that the Multivariate Student's t-distribution uses `k` where the\n Vector Student's t-distribution has a `1`. Conversely the Vector version has a\n broader application of the power-`k` in the normalization constant.\n\n #### Examples\n\n A single instance of a \"Vector Student's t-distribution\" is defined by a mean\n vector of length `k` and a scale matrix of shape `k x k`.\n\n Extra leading dimensions, if provided, allow for batches.\n\n ```python\n tfd = tfp.distributions\n\n # Initialize a single 3-variate vector Student's t-distribution.\n mu = [1., 2, 3]\n chol = [[1., 0, 0.],\n [1, 3, 0],\n [1, 2, 3]]\n vt = tfd.VectorStudentT(df=2, loc=mu, scale_tril=chol)\n\n # Evaluate this on an observation in R^3, returning a scalar.\n vt.prob([-1., 0, 1])\n\n # Initialize a batch of two 3-variate vector Student's t-distributions.\n mu = [[1., 2, 3],\n [11, 22, 33]]\n chol = ... # shape 2 x 3 x 3, lower triangular, positive diagonal.\n vt = tfd.VectorStudentT(loc=mu, scale_tril=chol)\n\n # Evaluate this on a two observations, each in R^3, returning a length two\n # tensor.\n x = [[-1, 0, 1],\n [-11, 0, 11]]\n vt.prob(x)\n ```\n\n For more examples of how to construct the `scale` matrix, see the\n `tfp.bijectors.Affine` docstring.\n\n \"\"\"\n\n def __init__(self,\n df,\n loc=None,\n scale_identity_multiplier=None,\n scale_diag=None,\n scale_tril=None,\n scale_perturb_factor=None,\n scale_perturb_diag=None,\n validate_args=False,\n allow_nan_stats=True,\n name=\"VectorStudentT\"):\n \"\"\"Instantiates the vector Student's t-distributions on `R^k`.\n\n The `batch_shape` is the broadcast between `df.batch_shape` and\n `Affine.batch_shape` where `Affine` is constructed from `loc` and\n `scale_*` arguments.\n\n The `event_shape` is the event shape of `Affine.event_shape`.\n\n Args:\n df: Floating-point `Tensor`. The degrees of freedom of the\n distribution(s). `df` must contain only positive values. Must be\n scalar if `loc`, `scale_*` imply non-scalar batch_shape or must have the\n same `batch_shape` implied by `loc`, `scale_*`.\n loc: Floating-point `Tensor`. If this is set to `None`, no `loc` is\n applied.\n scale_identity_multiplier: floating point rank 0 `Tensor` representing a\n scaling done to the identity matrix. When `scale_identity_multiplier =\n scale_diag=scale_tril = None` then `scale += IdentityMatrix`. Otherwise\n no scaled-identity-matrix is added to `scale`.\n scale_diag: Floating-point `Tensor` representing the diagonal matrix.\n `scale_diag` has shape [N1, N2, ..., k], which represents a k x k\n diagonal matrix. When `None` no diagonal term is added to `scale`.\n scale_tril: Floating-point `Tensor` representing the diagonal matrix.\n `scale_diag` has shape [N1, N2, ..., k, k], which represents a k x k\n lower triangular matrix. When `None` no `scale_tril` term is added to\n `scale`. The upper triangular elements above the diagonal are ignored.\n scale_perturb_factor: Floating-point `Tensor` representing factor matrix\n with last two dimensions of shape `(k, r)`. When `None`, no rank-r\n update is added to `scale`.\n scale_perturb_diag: Floating-point `Tensor` representing the diagonal\n matrix. `scale_perturb_diag` has shape [N1, N2, ..., r], which\n represents an r x r Diagonal matrix. When `None` low rank updates will\n take the form `scale_perturb_factor * scale_perturb_factor.T`.\n validate_args: Python `bool`, default `False`. When `True` distribution\n parameters are checked for validity despite possibly degrading runtime\n performance. When `False` invalid inputs may silently render incorrect\n outputs.\n allow_nan_stats: Python `bool`, default `True`. When `True`,\n statistics (e.g., mean, mode, variance) use the value \"`NaN`\" to\n indicate the result is undefined. When `False`, an exception is raised\n if one or more of the statistic's batch members are undefined.\n name: Python `str` name prefixed to Ops created by this class.\n \"\"\"\n parameters = dict(locals())\n graph_parents = [df, loc, scale_identity_multiplier, scale_diag,\n scale_tril, scale_perturb_factor, scale_perturb_diag]\n with tf.name_scope(name) as name:\n with tf.name_scope(\"init\", values=graph_parents):\n dtype = dtype_util.common_dtype(graph_parents, tf.float32)\n df = tf.convert_to_tensor(df, name=\"df\", dtype=dtype)\n # The shape of the _VectorStudentT distribution is governed by the\n # relationship between df.batch_shape and affine.batch_shape. In\n # pseudocode the basic procedure is:\n # if df.batch_shape is scalar:\n # if affine.batch_shape is not scalar:\n # # broadcast distribution.sample so\n # # it has affine.batch_shape.\n # self.batch_shape = affine.batch_shape\n # else:\n # if affine.batch_shape is scalar:\n # # let affine broadcasting do its thing.\n # self.batch_shape = df.batch_shape\n # All of the above magic is actually handled by TransformedDistribution.\n # Here we really only need to collect the affine.batch_shape and decide\n # what we're going to pass in to TransformedDistribution's\n # (override) batch_shape arg.\n affine = bijectors.Affine(\n shift=loc,\n scale_identity_multiplier=scale_identity_multiplier,\n scale_diag=scale_diag,\n scale_tril=scale_tril,\n scale_perturb_factor=scale_perturb_factor,\n scale_perturb_diag=scale_perturb_diag,\n validate_args=validate_args,\n dtype=dtype)\n distribution = student_t.StudentT(\n df=df,\n loc=tf.zeros([], dtype=affine.dtype),\n scale=tf.ones([], dtype=affine.dtype))\n batch_shape, override_event_shape = (\n distribution_util.shapes_from_loc_and_scale(\n affine.shift, affine.scale))\n override_batch_shape = distribution_util.pick_vector(\n distribution.is_scalar_batch(), batch_shape,\n tf.constant([], dtype=tf.int32))\n super(_VectorStudentT, self).__init__(\n distribution=distribution,\n bijector=affine,\n batch_shape=override_batch_shape,\n event_shape=override_event_shape,\n validate_args=validate_args,\n name=name)\n self._parameters = parameters\n\n @property\n def df(self):\n \"\"\"Degrees of freedom in these Student's t distribution(s).\"\"\"\n return self.distribution.df\n\n @property\n def loc(self):\n \"\"\"Locations of these Student's t distribution(s).\"\"\"\n return self.bijector.shift\n\n @property\n def scale(self):\n \"\"\"Dense (batch) covariance matrix, if available.\"\"\"\n return self.bijector.scale\n",
"# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Tests for the MultivariateStudentTLinearOperator.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n# Dependency imports\n\nfrom absl.testing import parameterized\nimport numpy as np\nimport tensorflow as tf\n\nfrom tensorflow_probability.python.distributions import multivariate_student_t as mvt\nfrom tensorflow_probability.python.distributions import student_t\nfrom tensorflow_probability.python.internal import test_case\nfrom tensorflow_probability.python.internal import test_util as tfp_test_util\ntfe = tf.contrib.eager\n\n\[email protected]_all_tests_in_graph_and_eager_modes\nclass MultivariateStudentTTestFloat32StaticShape(\n test_case.TestCase, parameterized.TestCase,\n tfp_test_util.VectorDistributionTestHelpers):\n dtype = tf.float32\n use_static_shape = True\n\n def _input(self, value):\n \"\"\"Helper to create inputs with varied dtypes an static shapes.\"\"\"\n value = tf.cast(value, self.dtype)\n return tf.placeholder_with_default(\n value, shape=value.shape if self.use_static_shape else None)\n\n # pyformat: disable\n # pylint: disable=bad-whitespace\n @parameterized.parameters(\n # loc df diag batch_shape\n ([0., 0.], 1., [1., 1.], []),\n (0., 1., [1., 1.], []),\n ([[[0., 0.]]], 1., [1., 1.], [1, 1]),\n ([0., 0.], [[1.]], [1., 1.], [1, 1]),\n ([0., 0.], 1., [[[1., 1.]]], [1, 1]),\n ([[[0., 0.]]], [[1.]], [[[1., 1.]]], [1, 1]),\n )\n # pylint: enable=bad-whitespace\n # pyformat: enable\n def testBroadcasting(self, loc, df, diag, batch_shape):\n # Test that broadcasting works across all 3 parameters.\n loc = self._input(loc)\n df = self._input(df)\n diag = self._input(diag)\n\n scale = tf.linalg.LinearOperatorDiag(diag, is_positive_definite=True)\n dist = mvt.MultivariateStudentTLinearOperator(\n loc=loc, df=df, scale=scale, validate_args=True)\n\n sample = dist.sample(3)\n log_prob = dist.log_prob(sample)\n mean = dist.mean()\n mode = dist.mode()\n cov = dist.covariance()\n std = dist.stddev()\n var = dist.variance()\n entropy = dist.entropy()\n if self.use_static_shape:\n self.assertAllEqual([3] + batch_shape + [2], sample.shape)\n self.assertAllEqual([3] + batch_shape, log_prob.shape)\n self.assertAllEqual(batch_shape + [2], mean.shape)\n self.assertAllEqual(batch_shape + [2], mode.shape)\n self.assertAllEqual(batch_shape + [2, 2], cov.shape)\n self.assertAllEqual(batch_shape + [2], std.shape)\n self.assertAllEqual(batch_shape + [2], var.shape)\n self.assertAllEqual(batch_shape, entropy.shape)\n self.assertAllEqual([2], dist.event_shape)\n self.assertAllEqual(batch_shape, dist.batch_shape)\n\n sample = self.evaluate(sample)\n log_prob = self.evaluate(log_prob)\n mean = self.evaluate(mean)\n mode = self.evaluate(mode)\n cov = self.evaluate(cov)\n std = self.evaluate(std)\n var = self.evaluate(var)\n entropy = self.evaluate(entropy)\n self.assertAllEqual([3] + batch_shape + [2], sample.shape)\n self.assertAllEqual([3] + batch_shape, log_prob.shape)\n self.assertAllEqual(batch_shape + [2], mean.shape)\n self.assertAllEqual(batch_shape + [2], mode.shape)\n self.assertAllEqual(batch_shape + [2, 2], cov.shape)\n self.assertAllEqual(batch_shape + [2], std.shape)\n self.assertAllEqual(batch_shape + [2], var.shape)\n self.assertAllEqual(batch_shape, entropy.shape)\n self.assertAllEqual([2], self.evaluate(dist.event_shape_tensor()))\n self.assertAllEqual(batch_shape, self.evaluate(dist.batch_shape_tensor()))\n\n def testNonPositiveDf(self):\n with self.assertRaisesRegexp(tf.errors.InvalidArgumentError,\n \"`df` must be positive\"):\n self.evaluate(\n mvt.MultivariateStudentTLinearOperator(\n loc=self._input([0.]),\n df=self._input(0.),\n scale=tf.linalg.LinearOperatorDiag(\n self._input([1.]), is_positive_definite=True),\n validate_args=True).df)\n\n def testBadScaleDType(self):\n with self.assertRaisesRegexp(TypeError,\n \"`scale` must have floating-point dtype.\"):\n mvt.MultivariateStudentTLinearOperator(\n loc=[0.],\n df=1.,\n scale=tf.linalg.LinearOperatorIdentity(\n num_rows=1, dtype=tf.int32, is_positive_definite=True))\n\n def testNotPositiveDefinite(self):\n with self.assertRaisesRegexp(ValueError,\n \"`scale` must be positive definite.\"):\n mvt.MultivariateStudentTLinearOperator(\n loc=self._input([0.]),\n df=self._input(1.),\n scale=tf.linalg.LinearOperatorDiag(self._input([1.])),\n validate_args=True)\n\n def testMeanAllDefined(self):\n dist = mvt.MultivariateStudentTLinearOperator(\n loc=self._input([0., 0.]),\n df=self._input(2.),\n scale=tf.linalg.LinearOperatorDiag(self._input([1., 1.])))\n mean = self.evaluate(dist.mean())\n self.assertAllClose([0., 0.], mean)\n\n def testMeanSomeUndefinedNaNAllowed(self):\n dist = mvt.MultivariateStudentTLinearOperator(\n loc=self._input([[0., 0.], [1., 1.]]),\n df=self._input([1., 2.]),\n scale=tf.linalg.LinearOperatorDiag(self._input([[1., 1.], [1., 1.]])),\n allow_nan_stats=True)\n mean = self.evaluate(dist.mean())\n self.assertAllClose([[np.nan, np.nan], [1., 1.]], mean)\n\n def testMeanSomeUndefinedNaNNotAllowed(self):\n dist = mvt.MultivariateStudentTLinearOperator(\n loc=self._input([[0., 0.], [1., 1.]]),\n df=self._input([1., 2.]),\n scale=tf.linalg.LinearOperatorDiag(self._input([[1., 1.], [1., 1.]])),\n allow_nan_stats=False)\n with self.assertRaisesRegexp(tf.errors.InvalidArgumentError,\n \"mean not defined for components of df <= 1\"):\n self.evaluate(dist.mean())\n\n def testMode(self):\n dist = mvt.MultivariateStudentTLinearOperator(\n loc=[0., 0.], df=2., scale=tf.linalg.LinearOperatorDiag([[1., 1.]]))\n mode = self.evaluate(dist.mode())\n self.assertAllClose([[0., 0.]], mode)\n\n # pyformat: disable\n # pylint: disable=bad-whitespace\n @parameterized.parameters(\n # diag full expected_mvn_cov\n ([2., 2.], None, [[4., 0.], [0., 4.]]),\n (None, [[2., 1.], [1., 2.]], [[5., 4.], [4., 5.]]),\n )\n # pyformat: enable\n # pylint: enable=bad-whitespace\n def testCovarianceAllDefined(self,\n diag=None,\n full=None,\n expected_mvn_cov=None):\n if diag is not None:\n scale = tf.linalg.LinearOperatorDiag(self._input(diag))\n else:\n scale = tf.linalg.LinearOperatorFullMatrix(self._input(full))\n dist = mvt.MultivariateStudentTLinearOperator(\n loc=self._input([0., 0.]), df=self._input(3.), scale=scale)\n cov = self.evaluate(dist.covariance())\n self.assertAllClose(np.array(expected_mvn_cov) * 3. / (3. - 2.), cov)\n\n def testCovarianceSomeUndefinedNaNAllowed(self):\n scale = tf.linalg.LinearOperatorDiag(self._input([2., 2.]))\n dist = mvt.MultivariateStudentTLinearOperator(\n loc=self._input([0., 0.]),\n df=self._input([2., 1.]),\n scale=scale,\n allow_nan_stats=True)\n cov = self.evaluate(dist.covariance())\n self.assertAllClose(np.full([2, 2], np.inf), cov[0])\n self.assertAllClose(np.full([2, 2], np.nan), cov[1])\n\n def testCovarianceSomeUndefinedNaNNotAllowed(self):\n scale = tf.linalg.LinearOperatorDiag(self._input([2., 2.]))\n dist = mvt.MultivariateStudentTLinearOperator(\n loc=self._input([0., 0.]),\n df=self._input(1.),\n scale=scale,\n allow_nan_stats=False)\n with self.assertRaisesRegexp(\n tf.errors.InvalidArgumentError,\n \"covariance not defined for components of df <= 1\"):\n self.evaluate(dist.covariance())\n\n # pyformat: disable\n # pylint: disable=bad-whitespace\n @parameterized.parameters(\n # diag full update expected_mvn_var\n ([2., 2.], None, None, [4., 4.]),\n (None, [[2., 1.], [1., 2.]], None, [5., 5.]),\n ([2., 2.], None, [[1.],[1.]], [10., 10.]),\n )\n # pylint: enable=bad-whitespace\n # pyformat: enable\n def testVarianceStdAllDefined(self,\n diag=None,\n full=None,\n update=None,\n expected_mvn_var=None):\n if diag is not None:\n scale = tf.linalg.LinearOperatorDiag(self._input(diag))\n elif full is not None:\n scale = tf.linalg.LinearOperatorFullMatrix(self._input(full))\n if update is not None:\n scale = tf.linalg.LinearOperatorLowRankUpdate(scale, self._input(update))\n\n dist = mvt.MultivariateStudentTLinearOperator(\n loc=self._input([0., 0.]), df=self._input(3.), scale=scale)\n var = self.evaluate(dist.variance())\n std = self.evaluate(dist.stddev())\n # df = 3, so we expect the variance of the MVT to exceed MVN by a factor of\n # 3 / (3 - 2) = 3.\n self.assertAllClose(np.array(expected_mvn_var) * 3., var)\n self.assertAllClose(np.sqrt(np.array(expected_mvn_var) * 3.), std)\n\n def testVarianceStdSomeUndefinedNaNAllowed(self):\n scale = tf.linalg.LinearOperatorDiag(self._input([2., 2.]))\n dist = mvt.MultivariateStudentTLinearOperator(\n loc=self._input([0., 0.]),\n df=self._input([2., 1.]),\n scale=scale,\n allow_nan_stats=True)\n var = self.evaluate(dist.variance())\n std = self.evaluate(dist.stddev())\n self.assertAllClose([np.inf, np.inf], var[0])\n self.assertAllClose([np.nan, np.nan], var[1])\n self.assertAllClose([np.inf, np.inf], std[0])\n self.assertAllClose([np.nan, np.nan], std[1])\n\n def testVarianceStdSomeUndefinedNaNNotAllowed(self):\n scale = tf.linalg.LinearOperatorDiag(self._input([2., 2.]))\n dist = mvt.MultivariateStudentTLinearOperator(\n loc=self._input([0., 0.]),\n df=self._input(1.),\n scale=scale,\n allow_nan_stats=False)\n with self.assertRaisesRegexp(\n tf.errors.InvalidArgumentError,\n \"variance not defined for components of df <= 1\"):\n self.evaluate(dist.variance())\n with self.assertRaisesRegexp(\n tf.errors.InvalidArgumentError,\n \"standard deviation not defined for components of df <= 1\"):\n self.evaluate(dist.stddev())\n\n def testEntropy(self):\n scale = tf.linalg.LinearOperatorDiag(self._input([2., 2.]))\n dist = mvt.MultivariateStudentTLinearOperator(\n loc=self._input([0., 0.]), df=self._input([2., 3.]), scale=scale)\n # From Kotz S. and Nadarajah S. (2004). Multivariate t Distributions and\n # Their Applications. Cambridge University Press. p22.\n self.assertAllClose(\n [0.5 * np.log(16.) + 3.83788, 0.5 * np.log(16.) + 3.50454],\n dist.entropy())\n\n def testSamplingConsistency(self):\n # pyformat: disable\n scale = tf.linalg.LinearOperatorFullMatrix(self._input(\n [[2., -1.],\n [-1., 2.]]))\n # pyformat: enable\n dist = mvt.MultivariateStudentTLinearOperator(\n loc=self._input([1., 2.]), df=self._input(5.), scale=scale)\n self.run_test_sample_consistent_mean_covariance(\n sess_run_fn=self.evaluate, dist=dist)\n\n def testSamplingDeterministic(self):\n # pyformat: disable\n scale = tf.linalg.LinearOperatorFullMatrix(self._input(\n [[2., -1.],\n [-1., 2.]]))\n # pyformat: enable\n tf.set_random_seed(2)\n dist1 = mvt.MultivariateStudentTLinearOperator(\n loc=[1., 2.], df=5., scale=scale)\n samples1 = self.evaluate(dist1.sample(100, seed=1))\n tf.set_random_seed(2)\n dist2 = mvt.MultivariateStudentTLinearOperator(\n loc=[1., 2.], df=5., scale=scale)\n samples2 = self.evaluate(dist2.sample(100, seed=1))\n self.assertAllClose(samples1, samples2)\n\n def testSamplingFullyReparameterized(self):\n df = self._input(2.)\n loc = self._input([1., 2.])\n diag = self._input([3., 4.])\n with tf.GradientTape() as tape:\n tape.watch(df)\n tape.watch(loc)\n tape.watch(diag)\n scale = tf.linalg.LinearOperatorDiag(diag)\n dist = mvt.MultivariateStudentTLinearOperator(loc=loc, df=df, scale=scale)\n samples = dist.sample(100)\n grad_df, grad_loc, grad_diag = tape.gradient(samples, [df, loc, diag])\n self.assertIsNotNone(grad_df)\n self.assertIsNotNone(grad_loc)\n self.assertIsNotNone(grad_diag)\n\n def testSamplingSmallDfNoNaN(self):\n scale = tf.linalg.LinearOperatorDiag(self._input([1., 1.]))\n dist = mvt.MultivariateStudentTLinearOperator(\n loc=self._input([0., 0.]),\n df=self._input([1e-1, 1e-5, 1e-10, 1e-20]),\n scale=scale)\n samples = dist.sample(int(2e5), seed=1)\n log_probs = dist.log_prob(samples)\n samples, log_probs = self.evaluate([samples, log_probs])\n self.assertTrue(np.all(np.isfinite(samples)))\n self.assertTrue(np.all(np.isfinite(log_probs)))\n\n def testLogProb(self):\n # Test that numerically integrating over some portion of the domain yields a\n # normalization constant of close to 1.\n # pyformat: disable\n scale = tf.linalg.LinearOperatorFullMatrix(\n self._input([[1., -0.5],\n [-0.5, 1.]]))\n # pyformat: enable\n dist = mvt.MultivariateStudentTLinearOperator(\n loc=self._input([1., 1.]), df=self._input(5.), scale=scale)\n\n spacings = tf.cast(tf.linspace(-20., 20., 100), self.dtype)\n x, y = tf.meshgrid(spacings, spacings)\n points = tf.concat([x[..., tf.newaxis], y[..., tf.newaxis]], -1)\n log_probs = dist.log_prob(points)\n normalization = tf.exp(\n tf.reduce_logsumexp(log_probs)) * (spacings[1] - spacings[0])**2\n self.assertAllClose(1., self.evaluate(normalization), atol=1e-3)\n\n mode_log_prob = dist.log_prob(dist.mode())\n self.assertTrue(np.all(self.evaluate(mode_log_prob >= log_probs)))\n\n @parameterized.parameters(1., 3., 10.)\n def testHypersphereVolume(self, radius):\n # pyformat: disable\n scale = tf.linalg.LinearOperatorFullMatrix(\n self._input([[1., -0.5],\n [-0.5, 1.]]))\n # pyformat: enable\n dist = mvt.MultivariateStudentTLinearOperator(\n loc=self._input([1., 1.]), df=self._input(4.), scale=scale)\n self.run_test_sample_consistent_log_prob(\n sess_run_fn=self.evaluate,\n dist=dist,\n radius=radius,\n num_samples=int(5e6),\n rtol=0.05)\n\n def testLogProbSameFor1D(self):\n # 1D MVT is exactly a regular Student's T distribution.\n t_dist = student_t.StudentT(\n df=self._input(5.), loc=self._input(2.), scale=self._input(3.))\n scale = tf.linalg.LinearOperatorDiag([self._input(3.)])\n mvt_dist = mvt.MultivariateStudentTLinearOperator(\n loc=[self._input(2.)], df=self._input(5.), scale=scale)\n\n test_points = tf.cast(tf.linspace(-10.0, 10.0, 100), self.dtype)\n\n t_log_probs = self.evaluate(t_dist.log_prob(test_points))\n mvt_log_probs = self.evaluate(\n mvt_dist.log_prob(test_points[..., tf.newaxis]))\n\n self.assertAllClose(t_log_probs, mvt_log_probs)\n\n\nclass MultivariateStudentTTestFloat64StaticShape(\n MultivariateStudentTTestFloat32StaticShape):\n dtype = tf.float64\n use_static_shape = True\n\n\nclass MultivariateStudentTTestFloat32DynamicShape(\n MultivariateStudentTTestFloat32StaticShape):\n dtype = tf.float32\n use_static_shape = False\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n",
"# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"The Poisson distribution class.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow_probability.python.distributions import distribution\nfrom tensorflow_probability.python.internal import distribution_util\nfrom tensorflow_probability.python.internal import dtype_util\nfrom tensorflow_probability.python.internal import reparameterization\nfrom tensorflow.python.framework import tensor_shape\n\n__all__ = [\n \"Poisson\",\n]\n\n\nclass Poisson(distribution.Distribution):\n \"\"\"Poisson distribution.\n\n The Poisson distribution is parameterized by an event `rate` parameter.\n\n #### Mathematical Details\n\n The probability mass function (pmf) is,\n\n ```none\n pmf(k; lambda, k >= 0) = (lambda^k / k!) / Z\n Z = exp(lambda).\n ```\n\n where `rate = lambda` and `Z` is the normalizing constant.\n\n \"\"\"\n\n def __init__(self,\n rate=None,\n log_rate=None,\n interpolate_nondiscrete=True,\n validate_args=False,\n allow_nan_stats=True,\n name=\"Poisson\"):\n \"\"\"Initialize a batch of Poisson distributions.\n\n Args:\n rate: Floating point tensor, the rate parameter. `rate` must be positive.\n Must specify exactly one of `rate` and `log_rate`.\n log_rate: Floating point tensor, the log of the rate parameter.\n Must specify exactly one of `rate` and `log_rate`.\n interpolate_nondiscrete: Python `bool`. When `False`,\n `log_prob` returns `-inf` (and `prob` returns `0`) for non-integer\n inputs. When `True`, `log_prob` evaluates the continuous function\n `k * log_rate - lgamma(k+1) - rate`, which matches the Poisson pmf\n at integer arguments `k` (note that this function is not itself\n a normalized probability log-density).\n Default value: `True`.\n validate_args: Python `bool`. When `True` distribution\n parameters are checked for validity despite possibly degrading runtime\n performance. When `False` invalid inputs may silently render incorrect\n outputs.\n Default value: `False`.\n allow_nan_stats: Python `bool`. When `True`, statistics\n (e.g., mean, mode, variance) use the value \"`NaN`\" to indicate the\n result is undefined. When `False`, an exception is raised if one or\n more of the statistic's batch members are undefined.\n Default value: `True`.\n name: Python `str` name prefixed to Ops created by this class.\n\n Raises:\n ValueError: if none or both of `rate`, `log_rate` are specified.\n TypeError: if `rate` is not a float-type.\n TypeError: if `log_rate` is not a float-type.\n \"\"\"\n parameters = dict(locals())\n with tf.name_scope(name, values=[rate]) as name:\n if (rate is None) == (log_rate is None):\n raise ValueError(\"Must specify exactly one of `rate` and `log_rate`.\")\n elif log_rate is None:\n rate = tf.convert_to_tensor(\n rate,\n name=\"rate\",\n dtype=dtype_util.common_dtype([rate], preferred_dtype=tf.float32))\n if not rate.dtype.is_floating:\n raise TypeError(\"rate.dtype ({}) is a not a float-type.\".format(\n rate.dtype.name))\n with tf.control_dependencies([tf.assert_positive(rate)]\n if validate_args else []):\n self._rate = tf.identity(rate, name=\"rate\")\n self._log_rate = tf.log(rate, name=\"log_rate\")\n else:\n log_rate = tf.convert_to_tensor(\n log_rate,\n name=\"log_rate\",\n dtype=dtype_util.common_dtype([log_rate], tf.float32))\n if not log_rate.dtype.is_floating:\n raise TypeError(\"log_rate.dtype ({}) is a not a float-type.\".format(\n log_rate.dtype.name))\n self._rate = tf.exp(log_rate, name=\"rate\")\n self._log_rate = tf.convert_to_tensor(log_rate, name=\"log_rate\")\n\n self._interpolate_nondiscrete = interpolate_nondiscrete\n super(Poisson, self).__init__(\n dtype=self._rate.dtype,\n reparameterization_type=reparameterization.NOT_REPARAMETERIZED,\n validate_args=validate_args,\n allow_nan_stats=allow_nan_stats,\n parameters=parameters,\n graph_parents=[self._rate],\n name=name)\n\n @property\n def rate(self):\n \"\"\"Rate parameter.\"\"\"\n return self._rate\n\n @property\n def log_rate(self):\n \"\"\"Log rate parameter.\"\"\"\n return self._log_rate\n\n @property\n def interpolate_nondiscrete(self):\n \"\"\"Interpolate (log) probs on non-integer inputs.\"\"\"\n return self._interpolate_nondiscrete\n\n def _batch_shape_tensor(self):\n return tf.shape(self.rate)\n\n def _batch_shape(self):\n return self.rate.shape\n\n def _event_shape_tensor(self):\n return tf.constant([], dtype=tf.int32)\n\n def _event_shape(self):\n return tensor_shape.scalar()\n\n def _log_prob(self, x):\n log_probs = self._log_unnormalized_prob(x) - self._log_normalization()\n if not self.interpolate_nondiscrete:\n # Ensure the gradient wrt `rate` is zero at non-integer points.\n neg_inf = tf.fill(tf.shape(log_probs),\n value=np.array(\n -np.inf, dtype=log_probs.dtype.as_numpy_dtype))\n log_probs = tf.where(tf.is_inf(log_probs), neg_inf, log_probs)\n return log_probs\n\n def _log_cdf(self, x):\n return tf.log(self.cdf(x))\n\n def _cdf(self, x):\n # CDF is the probability that the Poisson variable is less or equal to x.\n # For fractional x, the CDF is equal to the CDF at n = floor(x).\n # For negative x, the CDF is zero, but tf.igammac gives NaNs, so we impute\n # the values and handle this case explicitly.\n safe_x = tf.maximum(x if self.interpolate_nondiscrete else tf.floor(x), 0.)\n cdf = tf.igammac(1. + safe_x, self.rate)\n return tf.where(tf.broadcast_to(x < 0., tf.shape(cdf)),\n tf.zeros_like(cdf),\n cdf)\n\n def _log_normalization(self):\n return self.rate\n\n def _log_unnormalized_prob(self, x):\n # The log-probability at negative points is always -inf.\n # Catch such x's and set the output value accordingly.\n safe_x = tf.maximum(x if self.interpolate_nondiscrete else tf.floor(x), 0.)\n y = safe_x * self.log_rate - tf.lgamma(1. + safe_x)\n is_supported = tf.broadcast_to(tf.equal(x, safe_x), tf.shape(y))\n neg_inf = tf.fill(tf.shape(y),\n value=np.array(-np.inf, dtype=y.dtype.as_numpy_dtype))\n return tf.where(is_supported, y, neg_inf)\n\n def _mean(self):\n return tf.identity(self.rate)\n\n def _variance(self):\n return tf.identity(self.rate)\n\n @distribution_util.AppendDocstring(\n \"\"\"Note: when `rate` is an integer, there are actually two modes: `rate`\n and `rate - 1`. In this case we return the larger, i.e., `rate`.\"\"\")\n def _mode(self):\n return tf.floor(self.rate)\n\n def _sample_n(self, n, seed=None):\n return tf.random_poisson(self.rate, [n], dtype=self.dtype, seed=seed)\n",
"# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Methods for fitting StructuralTimeSeries models to data.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\n\n# Dependency imports\nimport tensorflow as tf\n\nfrom tensorflow_probability.python import distributions as tfd\nfrom tensorflow_probability.python import mcmc\nfrom tensorflow_probability.python.sts.internal import util as sts_util\n\n\ndef sample_uniform_initial_state(parameter,\n return_constrained=True,\n init_sample_shape=(),\n seed=None):\n \"\"\"Initialize from a uniform [-2, 2] distribution in unconstrained space.\n\n Args:\n parameter: `sts.Parameter` named tuple instance.\n return_constrained: if `True`, re-applies the constraining bijector\n to return initializations in the original domain. Otherwise, returns\n initializations in the unconstrained space.\n Default value: `True`.\n init_sample_shape: `sample_shape` of the sampled initializations.\n Default value: `[]`.\n seed: Python integer to seed the random number generator.\n\n Returns:\n uniform_initializer: `Tensor` of shape `concat([init_sample_shape,\n parameter.prior.batch_shape, transformed_event_shape])`, where\n `transformed_event_shape` is `parameter.prior.event_shape`, if\n `return_constrained=True`, and otherwise it is\n `parameter.bijector.inverse_event_shape(parameteter.prior.event_shape)`.\n \"\"\"\n unconstrained_prior_sample = parameter.bijector.inverse(\n parameter.prior.sample(init_sample_shape, seed=seed))\n uniform_initializer = 4 * tf.random_uniform(\n tf.shape(unconstrained_prior_sample),\n dtype=unconstrained_prior_sample.dtype,\n seed=seed) - 2\n if return_constrained:\n return parameter.bijector.forward(uniform_initializer)\n else:\n return uniform_initializer\n\n\ndef pad_batch_dimension_for_multiple_chains(observed_time_series,\n model,\n chain_batch_shape):\n \"\"\"\"Expand the observed time series with extra batch dimension(s).\"\"\"\n\n # Running with multiple chains introduces an extra batch dimension. In\n # general we also need to pad the observed time series with a matching batch\n # dimension.\n #\n # For example, suppose our model has batch shape [3, 4] and\n # the observed time series has shape `concat([[5], [3, 4], [100])`,\n # corresponding to `sample_shape`, `batch_shape`, and `num_timesteps`\n # respectively. The model will produce distributions with batch shape\n # `concat([chain_batch_shape, [3, 4]])`, so we pad `observed_time_series` to\n # have matching shape `[5, 1, 3, 4, 100]`, where the added `1` dimension\n # between the sample and batch shapes will broadcast to `chain_batch_shape`.\n\n observed_time_series = sts_util.maybe_expand_trailing_dim(\n observed_time_series) # Guarantee `event_ndims=2`\n event_ndims = 2 # event_shape = [num_timesteps, observation_size=1]\n\n model_batch_ndims = (model.batch_shape.ndims\n if model.batch_shape.ndims is not None\n else tf.shape(model.batch_shape_tensor())[0])\n\n # Compute ndims from chain_batch_shape.\n chain_batch_shape = tf.convert_to_tensor(\n chain_batch_shape, name='chain_batch_shape', dtype=tf.int32)\n if not chain_batch_shape.shape.is_fully_defined():\n raise ValueError('Batch shape must have static rank. (given: {})'.format(\n chain_batch_shape))\n if chain_batch_shape.shape.ndims == 0: # expand int `k` to `[k]`.\n chain_batch_shape = chain_batch_shape[tf.newaxis]\n chain_batch_ndims = chain_batch_shape.shape[0].value\n\n for _ in range(chain_batch_ndims):\n observed_time_series = tf.expand_dims(\n observed_time_series, -(model_batch_ndims + event_ndims + 1))\n return observed_time_series\n\n\ndef _build_trainable_posterior(param, initial_loc_fn):\n \"\"\"Built a transformed-normal variational dist over a parameter's support.\"\"\"\n loc = tf.get_variable(param.name + '_loc',\n initializer=lambda: initial_loc_fn(param),\n use_resource=True)\n scale = tf.nn.softplus(\n tf.get_variable(param.name + '_scale',\n initializer=lambda: -4 * tf.ones_like(loc),\n use_resource=True))\n\n q = tfd.Normal(loc=loc, scale=scale)\n\n # Ensure the `event_shape` of the variational distribution matches the\n # parameter.\n if (param.prior.event_shape.ndims is None\n or param.prior.event_shape.ndims > 0):\n q = tfd.Independent(\n q, reinterpreted_batch_ndims=param.prior.event_shape.ndims)\n\n # Transform to constrained parameter space.\n return tfd.TransformedDistribution(q, param.bijector)\n\n\ndef build_factored_variational_loss(model,\n observed_time_series,\n init_batch_shape=(),\n seed=None,\n name=None):\n \"\"\"Build a loss function for variational inference in STS models.\n\n Variational inference searches for the distribution within some family of\n approximate posteriors that minimizes a divergence between the approximate\n posterior `q(z)` and true posterior `p(z|observed_time_series)`. By converting\n inference to optimization, it's generally much faster than sampling-based\n inference algorithms such as HMC. The tradeoff is that the approximating\n family rarely contains the true posterior, so it may miss important aspects of\n posterior structure (in particular, dependence between variables) and should\n not be blindly trusted. Results may vary; it's generally wise to compare to\n HMC to evaluate whether inference quality is sufficient for your task at hand.\n\n This method constructs a loss function for variational inference using the\n Kullback-Liebler divergence `KL[q(z) || p(z|observed_time_series)]`, with an\n approximating family given by independent Normal distributions transformed to\n the appropriate parameter space for each parameter. Minimizing this loss (the\n negative ELBO) maximizes a lower bound on the log model evidence `-log\n p(observed_time_series)`. This is equivalent to the 'mean-field' method\n implemented in [1]. and is a standard approach. The resulting posterior\n approximations are unimodal; they will tend to underestimate posterior\n uncertainty when the true posterior contains multiple modes (the `KL[q||p]`\n divergence encourages choosing a single mode) or dependence between variables.\n\n Args:\n model: An instance of `StructuralTimeSeries` representing a\n time-series model. This represents a joint distribution over\n time-series and their parameters with batch shape `[b1, ..., bN]`.\n observed_time_series: `float` `Tensor` of shape\n `concat([sample_shape, model.batch_shape, [num_timesteps, 1]]) where\n `sample_shape` corresponds to i.i.d. observations, and the trailing `[1]`\n dimension may (optionally) be omitted if `num_timesteps > 1`.\n init_batch_shape: Batch shape (Python `tuple`, `list`, or `int`) of initial\n states to optimize in parallel.\n Default value: `()`. (i.e., just run a single optimization).\n seed: Python integer to seed the random number generator.\n name: Python `str` name prefixed to ops created by this function.\n Default value: `None` (i.e., 'build_factored_variational_loss').\n\n Returns:\n variational_loss: `float` `Tensor` of shape\n `concat([init_batch_shape, model.batch_shape])`, encoding a stochastic\n estimate of an upper bound on the negative model evidence `-log p(y)`.\n Minimizing this loss performs variational inference; the gap between the\n variational bound and the true (generally unknown) model evidence\n corresponds to the divergence `KL[q||p]` between the approximate and true\n posterior.\n variational_distributions: `collections.OrderedDict` giving\n the approximate posterior for each model parameter. The keys are\n Python `str` parameter names in order, corresponding to\n `[param.name for param in model.parameters]`. The values are\n `tfd.Distribution` instances with batch shape\n `concat([init_batch_shape, model.batch_shape])`; these will typically be\n of the form `tfd.TransformedDistribution(tfd.Normal(...),\n bijector=param.bijector)`.\n\n #### Examples\n\n Assume we've built a structural time-series model:\n\n ```python\n day_of_week = tfp.sts.Seasonal(\n num_seasons=7,\n observed_time_series=observed_time_series,\n name='day_of_week')\n local_linear_trend = tfp.sts.LocalLinearTrend(\n observed_time_series=observed_time_series,\n name='local_linear_trend')\n model = tfp.sts.Sum(components=[day_of_week, local_linear_trend],\n observed_time_series=observed_time_series)\n ```\n\n To run variational inference, we simply construct the loss and optimize\n it:\n\n ```python\n (variational_loss,\n variational_distributions) = tfp.sts.build_factored_variational_loss(\n model=model, observed_time_series=observed_time_series)\n\n train_op = tf.train.AdamOptimizer(0.1).minimize(variational_loss)\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n\n for step in range(200):\n _, loss_ = sess.run((train_op, variational_loss))\n\n if step % 20 == 0:\n print(\"step {} loss {}\".format(step, loss_))\n\n posterior_samples_ = sess.run({\n param_name: q.sample(50)\n for param_name, q in variational_distributions.items()})\n ```\n\n As a more complex example, we might try to avoid local optima by optimizing\n from multiple initializations in parallel, and selecting the result with the\n lowest loss:\n\n ```python\n (variational_loss,\n variational_distributions) = tfp.sts.build_factored_variational_loss(\n model=model, observed_time_series=observed_time_series,\n init_batch_shape=[10])\n\n train_op = tf.train.AdamOptimizer(0.1).minimize(variational_loss)\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n\n for step in range(200):\n _, loss_ = sess.run((train_op, variational_loss))\n\n if step % 20 == 0:\n print(\"step {} losses {}\".format(step, loss_))\n\n # Draw multiple samples to reduce Monte Carlo error in the optimized\n # variational bounds.\n avg_loss = np.mean(\n [sess.run(variational_loss) for _ in range(25)], axis=0)\n best_posterior_idx = np.argmin(avg_loss, axis=0).astype(np.int32)\n ```\n\n #### References\n\n [1]: Alp Kucukelbir, Dustin Tran, Rajesh Ranganath, Andrew Gelman, and\n David M. Blei. Automatic Differentiation Variational Inference. In\n _Journal of Machine Learning Research_, 2017.\n https://arxiv.org/abs/1603.00788\n\n \"\"\"\n\n with tf.name_scope(name, 'build_factored_variational_loss',\n values=[observed_time_series]) as name:\n observed_time_series = tf.convert_to_tensor(observed_time_series,\n name='observed_time_series')\n seed = tfd.SeedStream(\n seed, salt='StructuralTimeSeries_build_factored_variational_loss')\n\n variational_distributions = collections.OrderedDict()\n variational_samples = []\n for param in model.parameters:\n def initial_loc_fn(param):\n return sample_uniform_initial_state(\n param, return_constrained=True,\n init_sample_shape=init_batch_shape,\n seed=seed())\n q = _build_trainable_posterior(param, initial_loc_fn=initial_loc_fn)\n variational_distributions[param.name] = q\n variational_samples.append(q.sample(seed=seed()))\n\n # Multiple initializations (similar to HMC chains) manifest as an extra\n # param batch dimension, so we need to add corresponding batch dimension(s)\n # to `observed_time_series`.\n observed_time_series = pad_batch_dimension_for_multiple_chains(\n observed_time_series, model, chain_batch_shape=init_batch_shape)\n\n # Construct the variational bound.\n log_prob_fn = model.joint_log_prob(observed_time_series)\n expected_log_joint = log_prob_fn(*variational_samples)\n entropy = tf.reduce_sum(\n [-q.log_prob(sample)\n for (q, sample) in zip(variational_distributions.values(),\n variational_samples)],\n axis=0)\n variational_loss = -(expected_log_joint + entropy) # -ELBO\n\n return variational_loss, variational_distributions\n\n\ndef _minimize_in_graph(build_loss_fn, num_steps=200, optimizer=None):\n \"\"\"Run an optimizer within the graph to minimize a loss function.\"\"\"\n optimizer = tf.train.AdamOptimizer(0.1) if optimizer is None else optimizer\n def train_loop_body(step):\n train_op = optimizer.minimize(\n build_loss_fn if tf.executing_eagerly() else build_loss_fn())\n return tf.tuple([tf.add(step, 1)], control_inputs=[train_op])\n return tf.while_loop(cond=lambda step: step < num_steps,\n body=train_loop_body,\n loop_vars=[tf.constant(0)])\n\n\ndef fit_with_hmc(model,\n observed_time_series,\n num_results=100,\n num_warmup_steps=50,\n num_leapfrog_steps=15,\n initial_state=None,\n initial_step_size=None,\n chain_batch_shape=(),\n num_variational_steps=150,\n variational_optimizer=None,\n seed=None,\n name=None):\n \"\"\"Draw posterior samples using Hamiltonian Monte Carlo (HMC).\n\n Markov chain Monte Carlo (MCMC) methods are considered the gold standard of\n Bayesian inference; under suitable conditions and in the limit of infinitely\n many draws they generate samples from the true posterior distribution. HMC [1]\n uses gradients of the model's log-density function to propose samples,\n allowing it to exploit posterior geometry. However, it is computationally more\n expensive than variational inference and relatively sensitive to tuning.\n\n This method attempts to provide a sensible default approach for fitting\n StructuralTimeSeries models using HMC. It first runs variational inference as\n a fast posterior approximation, and initializes the HMC sampler from the\n variational posterior, using the posterior standard deviations to set\n per-variable step sizes (equivalently, a diagonal mass matrix). During the\n warmup phase, it adapts the step size to target an acceptance rate of 0.75,\n which is thought to be in the desirable range for optimal mixing [2].\n\n\n Args:\n model: An instance of `StructuralTimeSeries` representing a\n time-series model. This represents a joint distribution over\n time-series and their parameters with batch shape `[b1, ..., bN]`.\n observed_time_series: `float` `Tensor` of shape\n `concat([sample_shape, model.batch_shape, [num_timesteps, 1]]) where\n `sample_shape` corresponds to i.i.d. observations, and the trailing `[1]`\n dimension may (optionally) be omitted if `num_timesteps > 1`.\n num_results: Integer number of Markov chain draws.\n Default value: `100`.\n num_warmup_steps: Integer number of steps to take before starting to\n collect results. The warmup steps are also used to adapt the step size\n towards a target acceptance rate of 0.75.\n Default value: `50`.\n num_leapfrog_steps: Integer number of steps to run the leapfrog integrator\n for. Total progress per HMC step is roughly proportional to\n `step_size * num_leapfrog_steps`.\n Default value: `15`.\n initial_state: Optional Python `list` of `Tensor`s, one for each model\n parameter, representing the initial state(s) of the Markov chain(s). These\n should have shape `concat([chain_batch_shape, param.prior.batch_shape,\n param.prior.event_shape])`. If `None`, the initial state is set\n automatically using a sample from a variational posterior.\n Default value: `None`.\n initial_step_size: Python `list` of `Tensor`s, one for each model parameter,\n representing the step size for the leapfrog integrator. Must\n broadcast with the shape of `initial_state`. Larger step sizes lead to\n faster progress, but too-large step sizes make rejection exponentially\n more likely. If `None`, the step size is set automatically using the\n standard deviation of a variational posterior.\n Default value: `None`.\n chain_batch_shape: Batch shape (Python `tuple`, `list`, or `int`) of chains\n to run in parallel.\n Default value: `[]` (i.e., a single chain).\n num_variational_steps: Python `int` number of steps to run the variational\n optimization to determine the initial state and step sizes.\n Default value: `200`.\n variational_optimizer: Optional `tf.train.Optimizer` instance to use in\n the variational optimization. If `None`, defaults to\n `tf.train.AdamOptimizer(0.1)`.\n Default value: `None`.\n seed: Python integer to seed the random number generator.\n name: Python `str` name prefixed to ops created by this function.\n Default value: `None` (i.e., 'fit_with_hmc').\n\n Returns:\n samples: Python `list` of `Tensors` representing posterior samples of model\n parameters, with shapes `[concat([[num_results], chain_batch_shape,\n param.prior.batch_shape, param.prior.event_shape]) for param in\n model.parameters]`.\n kernel_results: A (possibly nested) `tuple`, `namedtuple` or `list` of\n `Tensor`s representing internal calculations made within the HMC sampler.\n\n #### Examples\n\n Assume we've built a structural time-series model:\n\n ```python\n day_of_week = tfp.sts.Seasonal(\n num_seasons=7,\n observed_time_series=observed_time_series,\n name='day_of_week')\n local_linear_trend = tfp.sts.LocalLinearTrend(\n observed_time_series=observed_time_series,\n name='local_linear_trend')\n model = tfp.sts.Sum(components=[day_of_week, local_linear_trend],\n observed_time_series=observed_time_series)\n ```\n\n To draw posterior samples using HMC under default settings:\n\n ```python\n samples, kernel_results = tfp.sts.fit_with_hmc(model, observed_time_series)\n\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n samples_, kernel_results_ = sess.run((samples, kernel_results))\n\n print(\"acceptance rate: {}\".format(\n np.mean(kernel_results_.inner_results.is_accepted, axis=0)))\n print(\"posterior means: {}\".format(\n {param.name: np.mean(param_draws, axis=0)\n for (param, param_draws) in zip(model.parameters, samples_)}))\n ```\n\n We can also run multiple chains. This may help diagnose convergence issues\n and allows us to exploit vectorization to draw samples more quickly, although\n warmup still requires the same number of sequential steps.\n\n ```python\n from matplotlib import pylab as plt\n\n samples, kernel_results = tfp.sts.fit_with_hmc(\n model, observed_time_series, chain_batch_shape=[10])\n\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n samples_, kernel_results_ = sess.run((samples, kernel_results))\n\n print(\"acceptance rate: {}\".format(\n np.mean(kernel_results_.inner_results.is_accepted, axis=0)))\n\n # Plot the sampled traces for each parameter. If the chains have mixed, their\n # traces should all cover the same region of state space, frequently crossing\n # over each other.\n for (param, param_draws) in zip(model.parameters, samples_):\n if param.prior.event_shape.ndims > 0:\n print(\"Only plotting traces for scalar parameters, skipping {}\".format(\n param.name))\n continue\n plt.figure(figsize=[10, 4])\n plt.title(param.name)\n plt.plot(param_draws)\n plt.ylabel(param.name)\n plt.xlabel(\"HMC step\")\n\n # Combining the samples from multiple chains into a single dimension allows\n # us to easily pass sampled parameters to downstream forecasting methods.\n combined_samples_ = [np.reshape(param_draws,\n [-1] + list(param_draws.shape[2:]))\n for param_draws in samples_]\n ```\n\n For greater flexibility, you may prefer to implement your own sampler using\n the TensorFlow Probability primitives in `tfp.mcmc`. The following recipe\n constructs a basic HMC sampler, using a `TransformedTransitionKernel` to\n incorporate constraints on the parameter space.\n\n ```python\n transformed_hmc_kernel = mcmc.TransformedTransitionKernel(\n inner_kernel=mcmc.HamiltonianMonteCarlo(\n target_log_prob_fn=model.joint_log_prob(observed_time_series),\n step_size=step_size,\n num_leapfrog_steps=num_leapfrog_steps,\n step_size_update_fn=tfp.mcmc.make_simple_step_size_update_policy(\n num_adaptation_steps=num_adaptation_steps),\n state_gradients_are_stopped=True,\n seed=seed),\n bijector=[param.bijector for param in model.parameters])\n\n # Initialize from a Uniform[-2, 2] distribution in unconstrained space.\n initial_state = [tfp.sts.sample_uniform_initial_state(\n param, return_constrained=True) for param in model.parameters]\n\n samples, kernel_results = tfp.mcmc.sample_chain(\n kernel=transformed_hmc_kernel,\n num_results=num_results,\n current_state=initial_state,\n num_burnin_steps=num_warmup_steps)\n ```\n\n #### References\n\n [1]: Radford Neal. MCMC Using Hamiltonian Dynamics. _Handbook of Markov Chain\n Monte Carlo_, 2011. https://arxiv.org/abs/1206.1901\n [2] M.J. Betancourt, Simon Byrne, and Mark Girolami. Optimizing The\n Integrator Step Size for Hamiltonian Monte Carlo.\n https://arxiv.org/abs/1411.6669\n\n \"\"\"\n with tf.name_scope(name, 'fit_with_hmc',\n values=[observed_time_series]) as name:\n observed_time_series = tf.convert_to_tensor(observed_time_series,\n name='observed_time_series')\n seed = tfd.SeedStream(seed, salt='StructuralTimeSeries_fit_with_hmc')\n\n # Initialize state and step sizes from a variational posterior if not\n # specified.\n if initial_step_size is None or initial_state is None:\n\n # To avoid threading variational distributions through the training\n # while loop, we build our own copy here. `make_template` ensures\n # that our variational distributions share the optimized parameters.\n def make_variational():\n return build_factored_variational_loss(\n model, observed_time_series,\n init_batch_shape=chain_batch_shape, seed=seed())\n make_variational = tf.make_template('make_variational', make_variational)\n _, variational_distributions = make_variational()\n minimize_op = _minimize_in_graph(\n build_loss_fn=lambda: make_variational()[0], # return just the loss.\n num_steps=num_variational_steps,\n optimizer=variational_optimizer)\n\n with tf.control_dependencies([minimize_op]):\n if initial_state is None:\n initial_state = [tf.stop_gradient(d.sample())\n for d in variational_distributions.values()]\n\n # Set step sizes using the unconstrained variational distribution.\n if initial_step_size is None:\n initial_step_size = [\n transformed_q.distribution.stddev()\n for transformed_q in variational_distributions.values()]\n\n # Multiple chains manifest as an extra param batch dimension, so we need to\n # add a corresponding batch dimension to `observed_time_series`.\n observed_time_series = pad_batch_dimension_for_multiple_chains(\n observed_time_series, model, chain_batch_shape=chain_batch_shape)\n\n # When the initial step size depends on a variational optimization, we\n # can't initialize step size variables before the optimization runs.\n # Instead we initialize with a dummy value of the appropriate\n # shape, then wrap the HMC chain with `control_dependencies` to ensure the\n # variational step sizes are assigned before HMC actually runs.\n step_size = [tf.get_variable(\n initializer=tf.zeros_like(sample_uniform_initial_state(\n param, init_sample_shape=chain_batch_shape,\n return_constrained=False)),\n name='{}_step_size'.format(param.name),\n trainable=False,\n use_resource=True)\n for (param, ss) in zip(model.parameters, initial_step_size)]\n step_size_init_op = tf.group(\n [tf.assign(ss, initial_ss)\n for (ss, initial_ss) in zip(step_size, initial_step_size)])\n\n # Run HMC to sample from the posterior on parameters.\n with tf.control_dependencies([step_size_init_op]):\n samples, kernel_results = mcmc.sample_chain(\n num_results=num_results,\n current_state=initial_state,\n num_burnin_steps=num_warmup_steps,\n kernel=mcmc.TransformedTransitionKernel(\n inner_kernel=mcmc.HamiltonianMonteCarlo(\n target_log_prob_fn=model.joint_log_prob(observed_time_series),\n step_size=step_size,\n num_leapfrog_steps=num_leapfrog_steps,\n step_size_update_fn=mcmc.make_simple_step_size_update_policy(\n num_adaptation_steps=int(num_warmup_steps * 0.8),\n decrement_multiplier=0.1,\n increment_multiplier=0.1),\n state_gradients_are_stopped=True,\n seed=seed()),\n bijector=[param.bijector for param in model.parameters]),\n parallel_iterations=1 if seed is not None else 10)\n\n return samples, kernel_results\n",
"# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Multivariate Normal distribution class initialized with a full covariance.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\nfrom tensorflow_probability.python.distributions import mvn_tril\nfrom tensorflow_probability.python.internal import dtype_util\nfrom tensorflow.python.ops import control_flow_ops\n\n\n__all__ = [\n \"MultivariateNormalFullCovariance\",\n]\n\n\nclass MultivariateNormalFullCovariance(mvn_tril.MultivariateNormalTriL):\n \"\"\"The multivariate normal distribution on `R^k`.\n\n The Multivariate Normal distribution is defined over `R^k` and parameterized\n by a (batch of) length-`k` `loc` vector (aka \"mu\") and a (batch of) `k x k`\n `covariance_matrix` matrices that are the covariance.\n This is different than the other multivariate normals, which are parameterized\n by a matrix more akin to the standard deviation.\n\n #### Mathematical Details\n\n The probability density function (pdf) is, with `@` as matrix multiplication,\n\n ```none\n pdf(x; loc, covariance_matrix) = exp(-0.5 y) / Z,\n y = (x - loc)^T @ inv(covariance_matrix) @ (x - loc)\n Z = (2 pi)**(0.5 k) |det(covariance_matrix)|**(0.5).\n ```\n\n where:\n\n * `loc` is a vector in `R^k`,\n * `covariance_matrix` is an `R^{k x k}` symmetric positive definite matrix,\n * `Z` denotes the normalization constant.\n\n Additional leading dimensions (if any) in `loc` and `covariance_matrix` allow\n for batch dimensions.\n\n The MultivariateNormal distribution is a member of the [location-scale\n family](https://en.wikipedia.org/wiki/Location-scale_family), i.e., it can be\n constructed e.g. as,\n\n ```none\n X ~ MultivariateNormal(loc=0, scale=1) # Identity scale, zero shift.\n scale = Cholesky(covariance_matrix)\n Y = scale @ X + loc\n ```\n\n #### Examples\n\n ```python\n tfd = tfp.distributions\n\n # Initialize a single 3-variate Gaussian.\n mu = [1., 2, 3]\n cov = [[ 0.36, 0.12, 0.06],\n [ 0.12, 0.29, -0.13],\n [ 0.06, -0.13, 0.26]]\n mvn = tfd.MultivariateNormalFullCovariance(\n loc=mu,\n covariance_matrix=cov)\n\n mvn.mean().eval()\n # ==> [1., 2, 3]\n\n # Covariance agrees with covariance_matrix.\n mvn.covariance().eval()\n # ==> [[ 0.36, 0.12, 0.06],\n # [ 0.12, 0.29, -0.13],\n # [ 0.06, -0.13, 0.26]]\n\n # Compute the pdf of an observation in `R^3` ; return a scalar.\n mvn.prob([-1., 0, 1]).eval() # shape: []\n\n # Initialize a 2-batch of 3-variate Gaussians.\n mu = [[1., 2, 3],\n [11, 22, 33]] # shape: [2, 3]\n covariance_matrix = ... # shape: [2, 3, 3], symmetric, positive definite.\n mvn = tfd.MultivariateNormalFullCovariance(\n loc=mu,\n covariance=covariance_matrix)\n\n # Compute the pdf of two `R^3` observations; return a length-2 vector.\n x = [[-0.9, 0, 0.1],\n [-10, 0, 9]] # shape: [2, 3]\n mvn.prob(x).eval() # shape: [2]\n\n ```\n\n \"\"\"\n\n def __init__(self,\n loc=None,\n covariance_matrix=None,\n validate_args=False,\n allow_nan_stats=True,\n name=\"MultivariateNormalFullCovariance\"):\n \"\"\"Construct Multivariate Normal distribution on `R^k`.\n\n The `batch_shape` is the broadcast shape between `loc` and\n `covariance_matrix` arguments.\n\n The `event_shape` is given by last dimension of the matrix implied by\n `covariance_matrix`. The last dimension of `loc` (if provided) must\n broadcast with this.\n\n A non-batch `covariance_matrix` matrix is a `k x k` symmetric positive\n definite matrix. In other words it is (real) symmetric with all eigenvalues\n strictly positive.\n\n Additional leading dimensions (if any) will index batches.\n\n Args:\n loc: Floating-point `Tensor`. If this is set to `None`, `loc` is\n implicitly `0`. When specified, may have shape `[B1, ..., Bb, k]` where\n `b >= 0` and `k` is the event size.\n covariance_matrix: Floating-point, symmetric positive definite `Tensor` of\n same `dtype` as `loc`. The strict upper triangle of `covariance_matrix`\n is ignored, so if `covariance_matrix` is not symmetric no error will be\n raised (unless `validate_args is True`). `covariance_matrix` has shape\n `[B1, ..., Bb, k, k]` where `b >= 0` and `k` is the event size.\n validate_args: Python `bool`, default `False`. When `True` distribution\n parameters are checked for validity despite possibly degrading runtime\n performance. When `False` invalid inputs may silently render incorrect\n outputs.\n allow_nan_stats: Python `bool`, default `True`. When `True`,\n statistics (e.g., mean, mode, variance) use the value \"`NaN`\" to\n indicate the result is undefined. When `False`, an exception is raised\n if one or more of the statistic's batch members are undefined.\n name: Python `str` name prefixed to Ops created by this class.\n\n Raises:\n ValueError: if neither `loc` nor `covariance_matrix` are specified.\n \"\"\"\n parameters = dict(locals())\n\n # Convert the covariance_matrix up to a scale_tril and call MVNTriL.\n with tf.name_scope(name) as name:\n with tf.name_scope(\"init\", values=[loc, covariance_matrix]):\n dtype = dtype_util.common_dtype([loc, covariance_matrix], tf.float32)\n loc = loc if loc is None else tf.convert_to_tensor(\n loc, name=\"loc\", dtype=dtype)\n if covariance_matrix is None:\n scale_tril = None\n else:\n covariance_matrix = tf.convert_to_tensor(\n covariance_matrix, name=\"covariance_matrix\", dtype=dtype)\n if validate_args:\n covariance_matrix = control_flow_ops.with_dependencies([\n tf.assert_near(\n covariance_matrix,\n tf.matrix_transpose(covariance_matrix),\n message=\"Matrix was not symmetric\")\n ], covariance_matrix)\n # No need to validate that covariance_matrix is non-singular.\n # LinearOperatorLowerTriangular has an assert_non_singular method that\n # is called by the Bijector.\n # However, cholesky() ignores the upper triangular part, so we do need\n # to separately assert symmetric.\n scale_tril = tf.cholesky(covariance_matrix)\n super(MultivariateNormalFullCovariance, self).__init__(\n loc=loc,\n scale_tril=scale_tril,\n validate_args=validate_args,\n allow_nan_stats=allow_nan_stats,\n name=name)\n self._parameters = parameters\n"
] | [
[
"tensorflow.to_double",
"numpy.tanh",
"tensorflow.test.main",
"numpy.linspace"
],
[
"tensorflow.convert_to_tensor",
"tensorflow.constant",
"tensorflow.zeros",
"tensorflow.ones",
"tensorflow.name_scope"
],
[
"tensorflow.linalg.LinearOperatorIdentity",
"numpy.log",
"tensorflow.concat",
"numpy.isfinite",
"tensorflow.placeholder_with_default",
"tensorflow.cast",
"tensorflow.test.main",
"numpy.full",
"tensorflow.meshgrid",
"tensorflow.set_random_seed",
"tensorflow.linspace",
"numpy.array",
"tensorflow.reduce_logsumexp",
"tensorflow.linalg.LinearOperatorDiag",
"tensorflow.GradientTape"
],
[
"tensorflow.python.framework.tensor_shape.scalar",
"tensorflow.convert_to_tensor",
"tensorflow.constant",
"tensorflow.shape",
"tensorflow.random_poisson",
"tensorflow.floor",
"tensorflow.identity",
"tensorflow.equal",
"tensorflow.exp",
"tensorflow.is_inf",
"tensorflow.assert_positive",
"tensorflow.zeros_like",
"tensorflow.lgamma",
"tensorflow.name_scope",
"tensorflow.where",
"tensorflow.log",
"numpy.array",
"tensorflow.igammac"
],
[
"tensorflow.convert_to_tensor",
"tensorflow.executing_eagerly",
"tensorflow.constant",
"tensorflow.control_dependencies",
"tensorflow.shape",
"tensorflow.assign",
"tensorflow.ones_like",
"tensorflow.expand_dims",
"tensorflow.add",
"tensorflow.name_scope",
"tensorflow.make_template",
"tensorflow.train.AdamOptimizer"
],
[
"tensorflow.convert_to_tensor",
"tensorflow.matrix_transpose",
"tensorflow.name_scope",
"tensorflow.cholesky"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.5",
"1.7",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
}
] |
kcui/elpips | [
"87a844451aec278042f33ccd98a1d230c0773b50"
] | [
"elpips/elpips.py"
] | [
"import tensorflow as tf\nimport numpy as np\nimport itertools \nimport numbers\n\nfrom . import networks\nfrom . import pnetlin\nfrom . util import switch_case_cond, switch_case_where, for_each, as_tuple\n\n\n### Configuring E-LPIPS.\n\t\nclass Config:\n\tdef __init__(self):\n\t\tself.metric = 'vgg_ensemble'\n\t\t\n\t\tself.enable_dropout = True\n\t\tself.dropout_keep_prob = 0.99\n\t\t\n\t\tself.enable_offset = True\n\t\tself.offset_max = 7\n\t\t\n\t\tself.enable_flip = True\n\t\tself.enable_swap = True\n\t\tself.enable_color_permutation = True\n\t\t\n\t\tself.enable_color_multiplication = True\n\t\tself.color_multiplication_mode = 'color' # 'brightness'\n\t\t\n\t\tself.enable_scale = True\n\t\tself.set_scale_levels(8)\n\t\t\n\t\t# Enables additional random transformation from Eilertsen.\n\t\tself.enable_perturbations = True\n\n\t\tself.set_translation_levels(2, 2) # +-2 pixels in each axis direction\n\t\tself.set_rotation_levels(1) # +- 1 degrees in each axis direction\n\t\tself.set_zoom_levels(0.03) # 1 +- 0.03 zoom\n\t\tself.set_shear_levels(1, 1)\n\n\t\t# Enables cropping instead of padding. Faster but may randomly skip edges of the input.\n\t\tself.fast_and_approximate = False\n\t\t\n\t\tself.batch_size = 1 \n\t\tself.average_over = 1 # How many runs to average over.\n\t\n\t\tself.dtype = tf.float32\n\t\t\n\tdef set_scale_levels(self, num_scales):\n\t\t# Crop_size / num_scales should be at least 64.\n\t\tself.num_scales = num_scales\n\t\tself.scale_probabilities = [1.0 / float(i)**2 for i in range(1, self.num_scales + 1)]\n\t\t\n\tdef set_scale_levels_by_image_size(self, image_h, image_w):\n\t\t'''Sets the number of scale levels based on the image size.'''\n\t\timage_size = min(image_h, image_w)\n\t\tself.set_scale_levels(max(1, image_size // 64))\n\t\n\tdef set_translation_levels(self, translation_x, translation_y):\n\t\tself.translation_level_x = translation_x\n\t\tself.translation_level_y = translation_y\n\t\n\tdef set_rotation_levels(self, rotation):\n\t\tself.rotation_level = rotation\n\n\tdef set_zoom_levels(self, zoom):\n\t\tself.zoom_level = zoom\n\n\tdef set_shear_levels(self, shear_x, shear_y):\n\t\tself.shear_level_x = shear_x\n\t\tself.shear_level_y = shear_y\n\n\tdef validate(self):\n\t\tassert self.metric in ('vgg_ensemble', 'vgg', 'squeeze', 'squeeze_ensemble_maxpool')\n\t\tassert self.color_multiplication_mode in ('color', 'brightness')\n\t\tassert self.num_scales == len(self.scale_probabilities)\n\t\t\n\n### Ensemble sampling and application to images.\n\ndef sample_ensemble(config):\n\t'''Samples a random transformation according to the config.\n\t Uses Latin Hypercube Sampling when batch size is greater than 1.'''\n\t\n\tN = config.batch_size\n\n\t# Offset randomization.\n\toffset_xy = tf.random_uniform([N, 2], minval=0, maxval=config.offset_max + 1, dtype=tf.int32)\n\t\t\t\n\t# Sample scale level.\n\tcumulative_sum = np.cumsum(config.scale_probabilities)\n\tu = cumulative_sum[-1] * tf.random_uniform([])\n\t\t\n\tscale_level = switch_case_cond(\n\t\t[(tf.less(u, x), (lambda j=i: tf.constant(j+1))) for i, x in enumerate(cumulative_sum[:-1])],\n\t\tlambda: tf.constant(len(cumulative_sum))\n\t)\n\tscale_level = tf.clip_by_value(scale_level, 1, config.num_scales)\t\t\n\t\n\t# Scale randomization.\n\tscale_offset_xy = tf.random_uniform([2], minval=0, maxval=scale_level, dtype=tf.int32)\n\t\n\t# Sample flips.\n\tflips = tf.range((N + 3)//4*4, dtype=tf.int32)\n\tflips = tf.floormod(flips, 4)\n\tflips = tf.random_shuffle(flips)\n\tflips = flips[:N]\n\t\t\n\t# Sample transposing.\n\tswap_xy = tf.random_uniform([], minval=0, maxval=2, dtype=tf.int32)\n\n\t# Color multiplication.\n\tdef sample_colors():\n\t\tcolor = tf.random_uniform([N], minval=0.0, maxval=1.0, dtype=config.dtype)\n\t\tcolor += tf.cast(tf.range(N), config.dtype)\n\t\tcolor /= tf.cast(N, config.dtype)\n\t\treturn tf.random_shuffle(color)\n\tcolors_r = tf.reshape(sample_colors(), [-1, 1, 1, 1])\n\tcolors_g = tf.reshape(sample_colors(), [-1, 1, 1, 1])\n\tcolors_b = tf.reshape(sample_colors(), [-1, 1, 1, 1])\n\t\n\tif config.color_multiplication_mode == 'color':\n\t\tcolor_factors = tf.concat([colors_r, colors_g, colors_b], axis=3)\n\telif config.color_multiplication_mode == 'brightness':\n\t\tcolor_factors = tf.concat([colors_r, colors_r, colors_r], axis=3)\n\telse:\n\t\traise Exception('Unknown color multiplication mode.')\n\t\n\tcolor_factors = 0.2 + 0.8 * color_factors\n\t\n\t# Sample permutations.\n\tpermutations = np.asarray(list(itertools.permutations(range(3))), dtype=np.int32)\n\trepeat_count = (N + len(permutations) - 1) // len(permutations)\n\tpermutations = tf.tile(tf.convert_to_tensor(permutations), tf.constant([repeat_count, 1]))\n\tpermutations = tf.reshape(tf.random_shuffle(permutations)[:N, :], [-1])\n\t\t\t\n\tbase_indices = 3 * tf.reshape(tf.tile(tf.reshape(tf.range(N), [-1, 1]), [1, 3]), [-1]) # [0, 0, 0, 3, 3, 3, 6, 6, 6, ...]\n\tpermutations += base_indices\n\t\t\t\t\t\t\n\treturn (offset_xy, flips, swap_xy, color_factors, permutations, scale_offset_xy, scale_level)\n\t\n\t\ndef apply_ensemble(config, sampled_ensemble_params, X):\n\t'''Applies the sampled random transformation to image X.'''\n\toffset_xy, flips, swap_xy, color_factors, permutations, scale_offset_xy, scale_level = sampled_ensemble_params\n\t\n\tshape = tf.shape(X)\n\tN, H, W, C = shape[0], shape[1], shape[2], shape[3]\n\n\t# Resize image.\n\tif config.enable_scale:\t\t\n\t\tdef downscale_nx_impl(image, scale):\n\t\t\tshape = tf.shape(image)\n\t\t\tN, H, W, C = shape[0], shape[1], shape[2], shape[3]\n\t\t\n\t\t\timage = tf.reshape(image, tf.stack([N, H//scale, scale, W//scale, scale, C]))\n\t\t\timage = tf.reduce_mean(image, axis=[2, 4])\n\t\t\treturn image\n\t\t\t\n\t\tdef downscale_1x():\n\t\t\treturn X\n\t\t\n\t\tdef downscale_nx():\n\t\t\tnonlocal X\n\n\t\t\tif config.fast_and_approximate:\n\t\t\t\t# Crop to a multiple of scale_level.\n\t\t\t\tcrop_left = scale_offset_xy[1]\n\t\t\t\tfull_width = (W - scale_level + 1) // scale_level * scale_level\n\t\t\t\tcrop_right = crop_left + full_width\n\t\t\t\n\t\t\t\tcrop_bottom = scale_offset_xy[0]\n\t\t\t\tfull_height = (H - scale_level + 1) // scale_level * scale_level\n\t\t\t\tcrop_top = crop_bottom + full_height\n\t\t\t\t\n\t\t\t\tX = X[:, crop_bottom:crop_top, crop_left:crop_right, :]\n\t\t\telse:\n\t\t\t\t# Pad to a multiple of scale_level.\n\t\t\t\tpad_left = scale_offset_xy[1]\n\t\t\t\tfull_width = (scale_level - 1 + W + scale_level - 1) // scale_level * scale_level\n\t\t\t\tpad_right = full_width - W - pad_left\n\t\t\t\n\t\t\t\tpad_bottom = scale_offset_xy[0]\n\t\t\t\tfull_height = (scale_level - 1 + H + scale_level - 1) // scale_level * scale_level\n\t\t\t\tpad_top = full_height - H - pad_bottom\n\t\t\t\t\n\t\t\t\tX = tf.pad(X, [(0, 0), (pad_bottom, pad_top), (pad_left, pad_right), (0, 0)], 'reflect')\n\t\t\treturn downscale_nx_impl(X, scale_level)\n\t\t\n\t\tX = tf.cond(tf.equal(scale_level, 1), downscale_1x, downscale_nx)\n\t\n\tif config.enable_perturbations:\n\t\ttx = tf.random_uniform(shape=[config.batch_size,1], minval=-config.translation_level_x, maxval=config.translation_level_x, dtype=tf.float32)\n\t\tty = tf.random_uniform(shape=[config.batch_size,1], minval=-config.translation_level_y, maxval=config.translation_level_y, dtype=tf.float32)\n\t\tr = tf.random_uniform(shape=[config.batch_size,1], minval=np.deg2rad(-config.rotation_level), maxval=np.deg2rad(config.rotation_level), dtype=tf.float32)\n\t\tz = tf.random_uniform(shape=[config.batch_size,1], minval=1.0-config.zoom_level, maxval=1.0+config.zoom_level, dtype=tf.float32)\n\t\thx = tf.random_uniform(shape=[config.batch_size,1], minval=np.deg2rad(-config.shear_level_x), maxval=np.deg2rad(config.shear_level_x), dtype=tf.float32)\n\t\thy = tf.random_uniform(shape=[config.batch_size,1], minval=np.deg2rad(-config.shear_level_y), maxval=np.deg2rad(config.shear_level_y), dtype=tf.float32)\n\t\ta = hx - r\n\t\tb = tf.cos(hx)\n\t\tc = hy + r\n\t\td = tf.cos(hy)\n\t\tm1 = tf.divide(z*tf.cos(a), b)\n\t\tm2 = tf.divide(z*tf.sin(a), b)\n\t\tm3 = tf.divide(W*b-W*z*tf.cos(a)+2*tx*z*tf.cos(a)-H*z*tf.sin(a)+2*ty*z*tf.sin(a), 2*b)\n\t\tm4 = tf.divide(z*tf.sin(c), d)\n\t\tm5 = tf.divide(z*tf.cos(c), d)\n\t\tm6 = tf.divide(H*d-H*z*tf.cos(c)+2*ty*z*tf.cos(c)-W*z*tf.sin(c)+2*tx*z*tf.sin(c), 2*d)\n\t\tm7 = tf.zeros([config.batch_size,2], 'float32')\n\t\ttransf = tf.concat([m1, m2, m3, m4, m5, m6, m7], 1)\n\t\ty_aug = tf.contrib.image.transform(y_aug, transf, interpolation='BILINEAR')\n\n\t# Pad image.\n\tif config.enable_offset:\n\t\tL = []\n\n\t\tshape = tf.shape(X)\n\t\tN, H, W, C = shape[0], shape[1], shape[2], shape[3]\n\n\t\tfor i in range(config.batch_size):\n\t\t\tif config.fast_and_approximate:\n\t\t\t\t# Crop.\n\t\t\t\tcrop_bottom = offset_xy[i, 0]\n\t\t\t\tcrop_left = offset_xy[i, 1]\n\t\t\t\tcrop_top = H - config.offset_max + crop_bottom\n\t\t\t\tcrop_right = W - config.offset_max + crop_left\n\t\t\t\n\t\t\t\tL.append(X[i, crop_bottom:crop_top, crop_left:crop_right, :])\n\t\t\telse:\n\t\t\t\t# Pad.\n\t\t\t\tpad_bottom = config.offset_max - offset_xy[i, 0]\n\t\t\t\tpad_left = config.offset_max - offset_xy[i, 1]\n\t\t\t\tpad_top = offset_xy[i, 0]\n\t\t\t\tpad_right = offset_xy[i, 1]\n\t\t\t\n\t\t\t\tL.append(tf.pad(X[i,:,:,:], tf.convert_to_tensor([(pad_bottom, pad_top), (pad_left, pad_right), (0, 0)], dtype=np.int32), 'reflect'))\n\t\tX = tf.stack(L, axis=0)\n\t\t\n\t# Apply flips.\t\t\n\tif config.enable_flip:\n\t\tdef flipX(X):\n\t\t\treturn X[:, :, ::-1, :]\n\t\tdef flipY(X):\n\t\t\treturn X[:, ::-1, :, :]\n\t\tdef flipXandY(X):\n\t\t\treturn X[:, ::-1, ::-1, :]\n\t\tX = switch_case_where(\n\t\t\t[(tf.equal(flips, 0), flipX(X)),\n\t\t\t(tf.equal(flips, 1), flipY(X)),\n\t\t\t(tf.equal(flips, 2), flipXandY(X))],\n\t\t\tX\n\t\t)\n\t\n\t# Apply transpose.\n\tif config.enable_swap:\n\t\tdef swapXY(X):\n\t\t\treturn tf.transpose(X, perm=tf.constant((0, 2, 1, 3)))\n\t\tX = tf.cond(tf.equal(swap_xy, 1), lambda: swapXY(X), lambda: X)\n\t\t\t\t\n\t# Apply color permutations.\n\tif config.enable_color_permutation:\n\t\tdef permuteColor(X, perms):\n\t\t\tshape = tf.shape(X)\n\t\t\tN, H, W, C = shape[0], shape[1], shape[2], shape[3]\n\n\t\t\tX = tf.transpose(X, [0, 3, 1, 2]) # NHWC -> NCHW\n\t\t\tX = tf.reshape(X, [N * C, H, W]) # (NC)HW\n\t\t\tX = tf.gather(X, perms) # Permute rows (colors)\n\t\t\tX = tf.reshape(X, [N, C, H, W]) # NCHW\n\t\t\tX = tf.transpose(X, [0, 2, 3, 1]) # NCHW -> NHWC\n\t\t\treturn X\n\n\t\tX = permuteColor(X, permutations)\n\t\n\tif config.enable_color_multiplication:\n\t\tX = X * tf.reshape(color_factors, [config.batch_size, 1, 1, 3])\n\n\treturn X\n\t\n\t\n### E-LPIPS implementation.\n\t\nclass Metric:\n\tdef __init__(self, config,\n\t back_prop=True,\n\t trainable=False, use_lpips_dropout=False,\n\t custom_lpips_weights=None, custom_net_weights=None,\n\t\t\t\t custom_sample_ensemble=None):\n\t\t'''Perceptual image distance metric.\n\t\t\n\t\t PARAMS:\n\t\t config: Metric configuration. One of: elpips.elpips_vgg(), elpips.elpips_squeeze_maxpool(), elpips.lpips_vgg(), elpips.lpips_squeeze(). \n\t\t\t back_prop: Whether to store data for back_prop.\n\t\t\t \n\t\t\t trainable: Whether to make weights trainable. Options: 'lpips', 'net', 'both'.\n\t\t\t use_lpips_dropout: Whether to use dropout for activation differences. Potentially useful for training LPIPS weights.\n\t\t\t custom_lpips_weights: Custom NumPy array of LPIPS weights to use.\n\t\t\t custom_net_weights: Custom NumPy array of internal network weights to use. (For VGG, SqueezeNet, etc.)\n\t\t\t custom_sample_ensemble: Replace the input transformation sampling with something else. May be useful for e.g. variance reduction or deterministic input transformations.\n\t\t'''\n\t\tassert trainable in ('lpips', 'net', 'both', False)\n\t\t\n\t\tif trainable and back_prop != True:\n\t\t\traise Exception('Enable back_prop for training.')\n\t\t\n\t\tconfig.validate()\n\t\tself.config = config\n\t\t\n\t\tif config.metric in ('vgg', 'squeeze', 'vgg_ensemble', 'squeeze_ensemble_maxpool'):\n\t\t\tself.network = pnetlin.PNetLin(\n\t\t\t\tpnet_type=config.metric,\n\t\t\t\tuse_lpips_dropout=use_lpips_dropout,\n\t\t\t\tuse_net_dropout=self.config.enable_dropout,\n\t\t\t\tnet_dropout_keep_prob=self.config.dropout_keep_prob,\n\t\t\t\ttrainable=trainable,\n\t\t\t\tcustom_lpips_weights=custom_lpips_weights,\n\t\t\t\tcustom_net_weights=custom_net_weights,\n\t\t\t\tdtype=config.dtype\n\t\t\t)\n\t\telse:\n\t\t\traise Exception('Unknown metric type \\'{}\\''.format(config.metric))\n\t\t\t\n\t\tself.back_prop = back_prop\n\t\tself.sample_ensemble = custom_sample_ensemble if custom_sample_ensemble else sample_ensemble\n\t\t\n\tdef forward(self, image, reference):\n\t\t'''Evaluates distances between images in 'image' and 'reference' (data in NHWC order).\n\t\t Returns an N-element distance vector.\n\t\t \n\t\t If 'image' is a tuple, evaluates all the images in the tuple with the same input transformations\n\t\t and dropout as 'reference'. A different set of input transformations for each would result in\n\t\t unnecessary uncertainty in determining which of the images is closest to the reference. The\n\t\t returned value is a tuple of N-element distance vectors.'''\n\t\t \n\t\tif isinstance(image, list):\n\t\t\traise Exception('Parameter \\'image\\' must be a tensor or a tuple of tensors.')\n\t\t\n\t\timage_in = as_tuple(image)\n\t\t\n\t\tdef cond(i, loss_sum):\n\t\t\treturn tf.less(i, tf.cast(self.config.average_over, tf.int32))\n\t\t\n\t\tdef body(i, loss_sum):\n\t\t\tensemble = self.sample_ensemble(self.config)\n\t\t\t\n\t\t\tensemble_X = for_each(image_in, lambda X: apply_ensemble(self.config, ensemble, X))\n\t\t\tensemble_X = for_each(ensemble_X, lambda X: 2.0 * X - 1.0)\n\n\t\t\tensemble_R = apply_ensemble(self.config, ensemble, reference)\t\t\t\n\t\t\tensemble_R = 2.0 * ensemble_R - 1.0\n\t\t\t\n\t\t\tloss = self.network.forward(ensemble_X, ensemble_R)\n\t\t\tloss_sum += tf.stack(loss, axis=0)\n\t\t\t\n\t\t\tloss_sum.set_shape([len(image_in), self.config.batch_size])\n\t\t\t\n\t\t\treturn i+1, loss_sum\n\n\t\tif isinstance(self.config.average_over, numbers.Number) and self.config.average_over == 1:\n\t\t\t# Skip tf.while for trivial single iterations.\n\t\t\t_, loss_sum = body(0, tf.zeros([len(image_in), self.config.batch_size], dtype=self.config.dtype))\n\t\telse:\n\t\t\t# Run multiple times for any other average_over count.\n\t\t\t_, loss_sum = tf.while_loop(cond, body, (0, tf.zeros([len(image_in), self.config.batch_size], dtype=self.config.dtype)), back_prop=self.back_prop)\n\t\t\tloss_sum /= tf.cast(self.config.average_over, self.config.dtype)\n\n\t\t\n\t\tif isinstance(image, tuple):\n\t\t\treturn tuple((loss_sum[i, :] for i in range(len(image))))\n\t\telse:\n\t\t\treturn tf.reshape(loss_sum, [self.config.batch_size])\n"
] | [
[
"tensorflow.convert_to_tensor",
"tensorflow.concat",
"tensorflow.zeros",
"tensorflow.stack",
"tensorflow.cast",
"tensorflow.equal",
"numpy.cumsum",
"tensorflow.pad",
"tensorflow.random_shuffle",
"tensorflow.gather",
"tensorflow.contrib.image.transform",
"tensorflow.floormod",
"tensorflow.shape",
"tensorflow.less",
"numpy.deg2rad",
"tensorflow.clip_by_value",
"tensorflow.sin",
"tensorflow.constant",
"tensorflow.cos",
"tensorflow.range",
"tensorflow.reduce_mean",
"tensorflow.transpose",
"tensorflow.reshape",
"tensorflow.random_uniform"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
}
] |
amitdev81296/tensorflow | [
"b8bbded3c633c56b876a5600c25fdf9224c0f1ad"
] | [
"Lib/site-packages/tensorflow/python/ops/gen_array_ops.py"
] | [
"\"\"\"Python wrappers around TensorFlow ops.\r\n\r\nThis file is MACHINE GENERATED! Do not edit.\r\n\"\"\"\r\n\r\nimport collections as _collections\r\nimport six as _six\r\n\r\nfrom tensorflow.python import pywrap_tensorflow as _pywrap_tensorflow\r\nfrom tensorflow.python.eager import context as _context\r\nfrom tensorflow.python.eager import core as _core\r\nfrom tensorflow.python.eager import execute as _execute\r\nfrom tensorflow.python.framework import dtypes as _dtypes\r\nfrom tensorflow.python.framework import errors as _errors\r\nfrom tensorflow.python.framework import tensor_shape as _tensor_shape\r\n\r\nfrom tensorflow.core.framework import op_def_pb2 as _op_def_pb2\r\n# Needed to trigger the call to _set_call_cpp_shape_fn.\r\nfrom tensorflow.python.framework import common_shapes as _common_shapes\r\nfrom tensorflow.python.framework import op_def_registry as _op_def_registry\r\nfrom tensorflow.python.framework import ops as _ops\r\nfrom tensorflow.python.framework import op_def_library as _op_def_library\r\nfrom tensorflow.python.util.deprecation import deprecated_endpoints\r\nfrom tensorflow.python.util.tf_export import tf_export\r\n\r\n\r\ndef batch_matrix_band_part(input, num_lower, num_upper, name=None):\r\n r\"\"\"TODO: add doc.\r\n\r\n Args:\r\n input: A `Tensor`.\r\n num_lower: A `Tensor` of type `int64`.\r\n num_upper: A `Tensor` of type `int64`.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A `Tensor`. Has the same type as `input`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is None or not _ctx._eager_context.is_eager:\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"BatchMatrixBandPart\", input=input, num_lower=num_lower,\r\n num_upper=num_upper, name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"T\", _op.get_attr(\"T\"))\r\n _execute.record_gradient(\r\n \"BatchMatrixBandPart\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n else:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name,\r\n \"BatchMatrixBandPart\", name, _ctx._post_execution_callbacks, input,\r\n num_lower, num_upper)\r\n return _result\r\n except _core._FallbackException:\r\n return batch_matrix_band_part_eager_fallback(\r\n input, num_lower, num_upper, name=name, ctx=_ctx)\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n\r\n\r\ndef batch_matrix_band_part_eager_fallback(input, num_lower, num_upper, name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function batch_matrix_band_part\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)\r\n num_lower = _ops.convert_to_tensor(num_lower, _dtypes.int64)\r\n num_upper = _ops.convert_to_tensor(num_upper, _dtypes.int64)\r\n _inputs_flat = [input, num_lower, num_upper]\r\n _attrs = (\"T\", _attr_T)\r\n _result = _execute.execute(b\"BatchMatrixBandPart\", 1, inputs=_inputs_flat,\r\n attrs=_attrs, ctx=_ctx, name=name)\r\n _execute.record_gradient(\r\n \"BatchMatrixBandPart\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n\r\ndef batch_matrix_diag(diagonal, name=None):\r\n r\"\"\"TODO: add doc.\r\n\r\n Args:\r\n diagonal: A `Tensor`.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A `Tensor`. Has the same type as `diagonal`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is None or not _ctx._eager_context.is_eager:\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"BatchMatrixDiag\", diagonal=diagonal, name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"T\", _op.get_attr(\"T\"))\r\n _execute.record_gradient(\r\n \"BatchMatrixDiag\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n else:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name,\r\n \"BatchMatrixDiag\", name, _ctx._post_execution_callbacks, diagonal)\r\n return _result\r\n except _core._FallbackException:\r\n return batch_matrix_diag_eager_fallback(\r\n diagonal, name=name, ctx=_ctx)\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n\r\n\r\ndef batch_matrix_diag_eager_fallback(diagonal, name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function batch_matrix_diag\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n _attr_T, (diagonal,) = _execute.args_to_matching_eager([diagonal], _ctx)\r\n _inputs_flat = [diagonal]\r\n _attrs = (\"T\", _attr_T)\r\n _result = _execute.execute(b\"BatchMatrixDiag\", 1, inputs=_inputs_flat,\r\n attrs=_attrs, ctx=_ctx, name=name)\r\n _execute.record_gradient(\r\n \"BatchMatrixDiag\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n\r\ndef batch_matrix_diag_part(input, name=None):\r\n r\"\"\"TODO: add doc.\r\n\r\n Args:\r\n input: A `Tensor`.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A `Tensor`. Has the same type as `input`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is None or not _ctx._eager_context.is_eager:\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"BatchMatrixDiagPart\", input=input, name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"T\", _op.get_attr(\"T\"))\r\n _execute.record_gradient(\r\n \"BatchMatrixDiagPart\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n else:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name,\r\n \"BatchMatrixDiagPart\", name, _ctx._post_execution_callbacks, input)\r\n return _result\r\n except _core._FallbackException:\r\n return batch_matrix_diag_part_eager_fallback(\r\n input, name=name, ctx=_ctx)\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n\r\n\r\ndef batch_matrix_diag_part_eager_fallback(input, name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function batch_matrix_diag_part\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)\r\n _inputs_flat = [input]\r\n _attrs = (\"T\", _attr_T)\r\n _result = _execute.execute(b\"BatchMatrixDiagPart\", 1, inputs=_inputs_flat,\r\n attrs=_attrs, ctx=_ctx, name=name)\r\n _execute.record_gradient(\r\n \"BatchMatrixDiagPart\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n\r\ndef batch_matrix_set_diag(input, diagonal, name=None):\r\n r\"\"\"TODO: add doc.\r\n\r\n Args:\r\n input: A `Tensor`.\r\n diagonal: A `Tensor`. Must have the same type as `input`.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A `Tensor`. Has the same type as `input`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is None or not _ctx._eager_context.is_eager:\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"BatchMatrixSetDiag\", input=input, diagonal=diagonal, name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"T\", _op.get_attr(\"T\"))\r\n _execute.record_gradient(\r\n \"BatchMatrixSetDiag\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n else:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name,\r\n \"BatchMatrixSetDiag\", name, _ctx._post_execution_callbacks, input,\r\n diagonal)\r\n return _result\r\n except _core._FallbackException:\r\n return batch_matrix_set_diag_eager_fallback(\r\n input, diagonal, name=name, ctx=_ctx)\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n\r\n\r\ndef batch_matrix_set_diag_eager_fallback(input, diagonal, name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function batch_matrix_set_diag\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n _attr_T, _inputs_T = _execute.args_to_matching_eager([input, diagonal], _ctx)\r\n (input, diagonal) = _inputs_T\r\n _inputs_flat = [input, diagonal]\r\n _attrs = (\"T\", _attr_T)\r\n _result = _execute.execute(b\"BatchMatrixSetDiag\", 1, inputs=_inputs_flat,\r\n attrs=_attrs, ctx=_ctx, name=name)\r\n _execute.record_gradient(\r\n \"BatchMatrixSetDiag\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n\r\ndef batch_to_space(input, crops, block_size, name=None):\r\n r\"\"\"BatchToSpace for 4-D tensors of type T.\r\n\r\n This is a legacy version of the more general BatchToSpaceND.\r\r\n \r\r\n Rearranges (permutes) data from batch into blocks of spatial data, followed by\r\r\n cropping. This is the reverse transformation of SpaceToBatch. More specifically,\r\r\n this op outputs a copy of the input tensor where values from the `batch`\r\r\n dimension are moved in spatial blocks to the `height` and `width` dimensions,\r\r\n followed by cropping along the `height` and `width` dimensions.\r\n\r\n Args:\r\n input: A `Tensor`. 4-D tensor with shape\r\r\n `[batch*block_size*block_size, height_pad/block_size, width_pad/block_size,\r\r\n depth]`. Note that the batch size of the input tensor must be divisible by\r\r\n `block_size * block_size`.\r\n crops: A `Tensor`. Must be one of the following types: `int32`, `int64`.\r\n 2-D tensor of non-negative integers with shape `[2, 2]`. It specifies\r\r\n how many elements to crop from the intermediate result across the spatial\r\r\n dimensions as follows:\r\r\n \r\r\n crops = [[crop_top, crop_bottom], [crop_left, crop_right]]\r\n block_size: An `int` that is `>= 2`.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A `Tensor`. Has the same type as `input`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is None or not _ctx._eager_context.is_eager:\r\n block_size = _execute.make_int(block_size, \"block_size\")\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"BatchToSpace\", input=input, crops=crops, block_size=block_size,\r\n name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"T\", _op.get_attr(\"T\"), \"block_size\",\r\n _op.get_attr(\"block_size\"), \"Tidx\", _op.get_attr(\"Tidx\"))\r\n _execute.record_gradient(\r\n \"BatchToSpace\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n else:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name, \"BatchToSpace\",\r\n name, _ctx._post_execution_callbacks, input, crops, \"block_size\",\r\n block_size)\r\n return _result\r\n except _core._FallbackException:\r\n return batch_to_space_eager_fallback(\r\n input, crops, block_size=block_size, name=name, ctx=_ctx)\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n\r\n\r\ndef batch_to_space_eager_fallback(input, crops, block_size, name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function batch_to_space\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n block_size = _execute.make_int(block_size, \"block_size\")\r\n _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)\r\n _attr_Tidx, (crops,) = _execute.args_to_matching_eager([crops], _ctx, _dtypes.int32)\r\n _inputs_flat = [input, crops]\r\n _attrs = (\"T\", _attr_T, \"block_size\", block_size, \"Tidx\", _attr_Tidx)\r\n _result = _execute.execute(b\"BatchToSpace\", 1, inputs=_inputs_flat,\r\n attrs=_attrs, ctx=_ctx, name=name)\r\n _execute.record_gradient(\r\n \"BatchToSpace\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n\r\n@tf_export('batch_to_space_nd', 'manip.batch_to_space_nd')\r\n@deprecated_endpoints('manip.batch_to_space_nd')\r\ndef batch_to_space_nd(input, block_shape, crops, name=None):\r\n r\"\"\"BatchToSpace for N-D tensors of type T.\r\n\r\n This operation reshapes the \"batch\" dimension 0 into `M + 1` dimensions of shape\r\r\n `block_shape + [batch]`, interleaves these blocks back into the grid defined by\r\r\n the spatial dimensions `[1, ..., M]`, to obtain a result with the same rank as\r\r\n the input. The spatial dimensions of this intermediate result are then\r\r\n optionally cropped according to `crops` to produce the output. This is the\r\r\n reverse of SpaceToBatch. See below for a precise description.\r\n\r\n Args:\r\n input: A `Tensor`.\r\n N-D with shape `input_shape = [batch] + spatial_shape + remaining_shape`,\r\r\n where spatial_shape has M dimensions.\r\n block_shape: A `Tensor`. Must be one of the following types: `int32`, `int64`.\r\n 1-D with shape `[M]`, all values must be >= 1.\r\n crops: A `Tensor`. Must be one of the following types: `int32`, `int64`.\r\n 2-D with shape `[M, 2]`, all values must be >= 0.\r\r\n `crops[i] = [crop_start, crop_end]` specifies the amount to crop from input\r\r\n dimension `i + 1`, which corresponds to spatial dimension `i`. It is\r\r\n required that\r\r\n `crop_start[i] + crop_end[i] <= block_shape[i] * input_shape[i + 1]`.\r\r\n \r\r\n This operation is equivalent to the following steps:\r\r\n \r\r\n 1. Reshape `input` to `reshaped` of shape:\r\r\n [block_shape[0], ..., block_shape[M-1],\r\r\n batch / prod(block_shape),\r\r\n input_shape[1], ..., input_shape[N-1]]\r\r\n \r\r\n 2. Permute dimensions of `reshaped` to produce `permuted` of shape\r\r\n [batch / prod(block_shape),\r\r\n \r\r\n input_shape[1], block_shape[0],\r\r\n ...,\r\r\n input_shape[M], block_shape[M-1],\r\r\n \r\r\n input_shape[M+1], ..., input_shape[N-1]]\r\r\n \r\r\n 3. Reshape `permuted` to produce `reshaped_permuted` of shape\r\r\n [batch / prod(block_shape),\r\r\n \r\r\n input_shape[1] * block_shape[0],\r\r\n ...,\r\r\n input_shape[M] * block_shape[M-1],\r\r\n \r\r\n input_shape[M+1],\r\r\n ...,\r\r\n input_shape[N-1]]\r\r\n \r\r\n 4. Crop the start and end of dimensions `[1, ..., M]` of\r\r\n `reshaped_permuted` according to `crops` to produce the output of shape:\r\r\n [batch / prod(block_shape),\r\r\n \r\r\n input_shape[1] * block_shape[0] - crops[0,0] - crops[0,1],\r\r\n ...,\r\r\n input_shape[M] * block_shape[M-1] - crops[M-1,0] - crops[M-1,1],\r\r\n \r\r\n input_shape[M+1], ..., input_shape[N-1]]\r\r\n \r\r\n Some examples:\r\r\n \r\r\n (1) For the following input of shape `[4, 1, 1, 1]`, `block_shape = [2, 2]`, and\r\r\n `crops = [[0, 0], [0, 0]]`:\r\r\n \r\r\n ```\r\r\n [[[[1]]], [[[2]]], [[[3]]], [[[4]]]]\r\r\n ```\r\r\n \r\r\n The output tensor has shape `[1, 2, 2, 1]` and value:\r\r\n \r\r\n ```\r\r\n x = [[[[1], [2]], [[3], [4]]]]\r\r\n ```\r\r\n \r\r\n (2) For the following input of shape `[4, 1, 1, 3]`, `block_shape = [2, 2]`, and\r\r\n `crops = [[0, 0], [0, 0]]`:\r\r\n \r\r\n ```\r\r\n [[[1, 2, 3]], [[4, 5, 6]], [[7, 8, 9]], [[10, 11, 12]]]\r\r\n ```\r\r\n \r\r\n The output tensor has shape `[1, 2, 2, 3]` and value:\r\r\n \r\r\n ```\r\r\n x = [[[[1, 2, 3], [4, 5, 6]],\r\r\n [[7, 8, 9], [10, 11, 12]]]]\r\r\n ```\r\r\n \r\r\n (3) For the following input of shape `[4, 2, 2, 1]`, `block_shape = [2, 2]`, and\r\r\n `crops = [[0, 0], [0, 0]]`:\r\r\n \r\r\n ```\r\r\n x = [[[[1], [3]], [[9], [11]]],\r\r\n [[[2], [4]], [[10], [12]]],\r\r\n [[[5], [7]], [[13], [15]]],\r\r\n [[[6], [8]], [[14], [16]]]]\r\r\n ```\r\r\n \r\r\n The output tensor has shape `[1, 4, 4, 1]` and value:\r\r\n \r\r\n ```\r\r\n x = [[[1], [2], [3], [4]],\r\r\n [[5], [6], [7], [8]],\r\r\n [[9], [10], [11], [12]],\r\r\n [[13], [14], [15], [16]]]\r\r\n ```\r\r\n \r\r\n (4) For the following input of shape `[8, 1, 3, 1]`, `block_shape = [2, 2]`, and\r\r\n `crops = [[0, 0], [2, 0]]`:\r\r\n \r\r\n ```\r\r\n x = [[[[0], [1], [3]]], [[[0], [9], [11]]],\r\r\n [[[0], [2], [4]]], [[[0], [10], [12]]],\r\r\n [[[0], [5], [7]]], [[[0], [13], [15]]],\r\r\n [[[0], [6], [8]]], [[[0], [14], [16]]]]\r\r\n ```\r\r\n \r\r\n The output tensor has shape `[2, 2, 4, 1]` and value:\r\r\n \r\r\n ```\r\r\n x = [[[[1], [2], [3], [4]],\r\r\n [[5], [6], [7], [8]]],\r\r\n [[[9], [10], [11], [12]],\r\r\n [[13], [14], [15], [16]]]]\r\r\n ```\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A `Tensor`. Has the same type as `input`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is None or not _ctx._eager_context.is_eager:\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"BatchToSpaceND\", input=input, block_shape=block_shape, crops=crops,\r\n name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"T\", _op.get_attr(\"T\"), \"Tblock_shape\",\r\n _op.get_attr(\"Tblock_shape\"), \"Tcrops\", _op.get_attr(\"Tcrops\"))\r\n _execute.record_gradient(\r\n \"BatchToSpaceND\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n else:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name,\r\n \"BatchToSpaceND\", name, _ctx._post_execution_callbacks, input,\r\n block_shape, crops)\r\n return _result\r\n except _core._FallbackException:\r\n return batch_to_space_nd_eager_fallback(\r\n input, block_shape, crops, name=name, ctx=_ctx)\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n\r\n\r\ndef batch_to_space_nd_eager_fallback(input, block_shape, crops, name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function batch_to_space_nd\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)\r\n _attr_Tblock_shape, (block_shape,) = _execute.args_to_matching_eager([block_shape], _ctx, _dtypes.int32)\r\n _attr_Tcrops, (crops,) = _execute.args_to_matching_eager([crops], _ctx, _dtypes.int32)\r\n _inputs_flat = [input, block_shape, crops]\r\n _attrs = (\"T\", _attr_T, \"Tblock_shape\", _attr_Tblock_shape, \"Tcrops\",\r\n _attr_Tcrops)\r\n _result = _execute.execute(b\"BatchToSpaceND\", 1, inputs=_inputs_flat,\r\n attrs=_attrs, ctx=_ctx, name=name)\r\n _execute.record_gradient(\r\n \"BatchToSpaceND\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n\r\n@tf_export('bitcast')\r\ndef bitcast(input, type, name=None):\r\n r\"\"\"Bitcasts a tensor from one type to another without copying data.\r\n\r\n Given a tensor `input`, this operation returns a tensor that has the same buffer\r\r\n data as `input` with datatype `type`.\r\r\n \r\r\n If the input datatype `T` is larger than the output datatype `type` then the\r\r\n shape changes from [...] to [..., sizeof(`T`)/sizeof(`type`)].\r\r\n \r\r\n If `T` is smaller than `type`, the operator requires that the rightmost\r\r\n dimension be equal to sizeof(`type`)/sizeof(`T`). The shape then goes from\r\r\n [..., sizeof(`type`)/sizeof(`T`)] to [...].\r\r\n \r\r\n *NOTE*: Bitcast is implemented as a low-level cast, so machines with different\r\r\n endian orderings will give different results.\r\n\r\n Args:\r\n input: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `int64`, `int32`, `uint8`, `uint16`, `uint32`, `uint64`, `int8`, `int16`, `complex64`, `complex128`, `qint8`, `quint8`, `qint16`, `quint16`, `qint32`.\r\n type: A `tf.DType` from: `tf.bfloat16, tf.half, tf.float32, tf.float64, tf.int64, tf.int32, tf.uint8, tf.uint16, tf.uint32, tf.uint64, tf.int8, tf.int16, tf.complex64, tf.complex128, tf.qint8, tf.quint8, tf.qint16, tf.quint16, tf.qint32`.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A `Tensor` of type `type`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is None or not _ctx._eager_context.is_eager:\r\n type = _execute.make_type(type, \"type\")\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"Bitcast\", input=input, type=type, name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"T\", _op.get_attr(\"T\"), \"type\", _op.get_attr(\"type\"))\r\n _execute.record_gradient(\r\n \"Bitcast\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n else:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name, \"Bitcast\",\r\n name, _ctx._post_execution_callbacks, input, \"type\", type)\r\n return _result\r\n except _core._FallbackException:\r\n return bitcast_eager_fallback(\r\n input, type=type, name=name, ctx=_ctx)\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n\r\n\r\ndef bitcast_eager_fallback(input, type, name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function bitcast\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n type = _execute.make_type(type, \"type\")\r\n _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)\r\n _inputs_flat = [input]\r\n _attrs = (\"T\", _attr_T, \"type\", type)\r\n _result = _execute.execute(b\"Bitcast\", 1, inputs=_inputs_flat, attrs=_attrs,\r\n ctx=_ctx, name=name)\r\n _execute.record_gradient(\r\n \"Bitcast\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n\r\ndef broadcast_args(s0, s1, name=None):\r\n r\"\"\"Return the shape of s0 op s1 with broadcast.\r\n\r\n Given `s0` and `s1`, tensors that represent shapes, compute `r0`, the\r\r\n broadcasted shape. `s0`, `s1` and `r0` are all integer vectors.\r\n\r\n Args:\r\n s0: A `Tensor`. Must be one of the following types: `int32`, `int64`.\r\n s1: A `Tensor`. Must have the same type as `s0`.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A `Tensor`. Has the same type as `s0`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is None or not _ctx._eager_context.is_eager:\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"BroadcastArgs\", s0=s0, s1=s1, name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"T\", _op.get_attr(\"T\"))\r\n _execute.record_gradient(\r\n \"BroadcastArgs\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n else:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name,\r\n \"BroadcastArgs\", name, _ctx._post_execution_callbacks, s0, s1)\r\n return _result\r\n except _core._FallbackException:\r\n return broadcast_args_eager_fallback(\r\n s0, s1, name=name, ctx=_ctx)\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n\r\n\r\ndef broadcast_args_eager_fallback(s0, s1, name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function broadcast_args\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n _attr_T, _inputs_T = _execute.args_to_matching_eager([s0, s1], _ctx, _dtypes.int32)\r\n (s0, s1) = _inputs_T\r\n _inputs_flat = [s0, s1]\r\n _attrs = (\"T\", _attr_T)\r\n _result = _execute.execute(b\"BroadcastArgs\", 1, inputs=_inputs_flat,\r\n attrs=_attrs, ctx=_ctx, name=name)\r\n _execute.record_gradient(\r\n \"BroadcastArgs\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n\r\n_broadcast_gradient_args_outputs = [\"r0\", \"r1\"]\r\n_BroadcastGradientArgsOutput = _collections.namedtuple(\r\n \"BroadcastGradientArgs\", _broadcast_gradient_args_outputs)\r\n\r\n\r\ndef broadcast_gradient_args(s0, s1, name=None):\r\n r\"\"\"Return the reduction indices for computing gradients of s0 op s1 with broadcast.\r\n\r\n This is typically used by gradient computations for a broadcasting operation.\r\n\r\n Args:\r\n s0: A `Tensor`. Must be one of the following types: `int32`, `int64`.\r\n s1: A `Tensor`. Must have the same type as `s0`.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A tuple of `Tensor` objects (r0, r1).\r\n\r\n r0: A `Tensor`. Has the same type as `s0`.\r\n r1: A `Tensor`. Has the same type as `s0`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is None or not _ctx._eager_context.is_eager:\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"BroadcastGradientArgs\", s0=s0, s1=s1, name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"T\", _op.get_attr(\"T\"))\r\n _execute.record_gradient(\r\n \"BroadcastGradientArgs\", _inputs_flat, _attrs, _result, name)\r\n _result = _BroadcastGradientArgsOutput._make(_result)\r\n return _result\r\n\r\n else:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name,\r\n \"BroadcastGradientArgs\", name, _ctx._post_execution_callbacks, s0, s1)\r\n _result = _BroadcastGradientArgsOutput._make(_result)\r\n return _result\r\n except _core._FallbackException:\r\n return broadcast_gradient_args_eager_fallback(\r\n s0, s1, name=name, ctx=_ctx)\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n\r\n\r\ndef broadcast_gradient_args_eager_fallback(s0, s1, name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function broadcast_gradient_args\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n _attr_T, _inputs_T = _execute.args_to_matching_eager([s0, s1], _ctx, _dtypes.int32)\r\n (s0, s1) = _inputs_T\r\n _inputs_flat = [s0, s1]\r\n _attrs = (\"T\", _attr_T)\r\n _result = _execute.execute(b\"BroadcastGradientArgs\", 2, inputs=_inputs_flat,\r\n attrs=_attrs, ctx=_ctx, name=name)\r\n _execute.record_gradient(\r\n \"BroadcastGradientArgs\", _inputs_flat, _attrs, _result, name)\r\n _result = _BroadcastGradientArgsOutput._make(_result)\r\n return _result\r\n\r\n\r\n@tf_export('broadcast_to')\r\ndef broadcast_to(input, shape, name=None):\r\n r\"\"\"Broadcast an array for a compatible shape.\r\n\r\n Broadcasting is the process of making arrays to have compatible shapes\r\r\n for arithmetic operations. Two shapes are compatible if for each\r\r\n dimension pair they are either equal or one of them is one. When trying\r\r\n to broadcast a Tensor to a shape, it starts with the trailing dimensions,\r\r\n and works its way forward.\r\r\n \r\r\n For example,\r\r\n ```\r\r\n >>> x = tf.constant([1, 2, 3])\r\r\n >>> y = tf.broadcast_to(x, [3, 3])\r\r\n >>> sess.run(y)\r\r\n array([[1, 2, 3],\r\r\n [1, 2, 3],\r\r\n [1, 2, 3]], dtype=int32)\r\r\n ```\r\r\n In the above example, the input Tensor with the shape of `[1, 3]`\r\r\n is broadcasted to output Tensor with shape of `[3, 3]`.\r\n\r\n Args:\r\n input: A `Tensor`. A Tensor to broadcast.\r\n shape: A `Tensor`. Must be one of the following types: `int32`, `int64`.\r\n An 1-D `int` Tensor. The shape of the desired output.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A `Tensor`. Has the same type as `input`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is None or not _ctx._eager_context.is_eager:\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"BroadcastTo\", input=input, shape=shape, name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"T\", _op.get_attr(\"T\"), \"Tidx\", _op.get_attr(\"Tidx\"))\r\n _execute.record_gradient(\r\n \"BroadcastTo\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n else:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name, \"BroadcastTo\",\r\n name, _ctx._post_execution_callbacks, input, shape)\r\n return _result\r\n except _core._FallbackException:\r\n return broadcast_to_eager_fallback(\r\n input, shape, name=name, ctx=_ctx)\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n\r\n\r\ndef broadcast_to_eager_fallback(input, shape, name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function broadcast_to\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)\r\n _attr_Tidx, (shape,) = _execute.args_to_matching_eager([shape], _ctx, _dtypes.int32)\r\n _inputs_flat = [input, shape]\r\n _attrs = (\"T\", _attr_T, \"Tidx\", _attr_Tidx)\r\n _result = _execute.execute(b\"BroadcastTo\", 1, inputs=_inputs_flat,\r\n attrs=_attrs, ctx=_ctx, name=name)\r\n _execute.record_gradient(\r\n \"BroadcastTo\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n\r\n@tf_export('debugging.check_numerics', 'check_numerics')\r\n@deprecated_endpoints('check_numerics')\r\ndef check_numerics(tensor, message, name=None):\r\n r\"\"\"Checks a tensor for NaN and Inf values.\r\n\r\n When run, reports an `InvalidArgument` error if `tensor` has any values\r\r\n that are not a number (NaN) or infinity (Inf). Otherwise, passes `tensor` as-is.\r\n\r\n Args:\r\n tensor: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`.\r\n message: A `string`. Prefix of the error message.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A `Tensor`. Has the same type as `tensor`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is None or not _ctx._eager_context.is_eager:\r\n message = _execute.make_str(message, \"message\")\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"CheckNumerics\", tensor=tensor, message=message, name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"T\", _op.get_attr(\"T\"), \"message\", _op.get_attr(\"message\"))\r\n _execute.record_gradient(\r\n \"CheckNumerics\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n else:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name,\r\n \"CheckNumerics\", name, _ctx._post_execution_callbacks, tensor,\r\n \"message\", message)\r\n return _result\r\n except _core._FallbackException:\r\n return check_numerics_eager_fallback(\r\n tensor, message=message, name=name, ctx=_ctx)\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n\r\n\r\ndef check_numerics_eager_fallback(tensor, message, name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function check_numerics\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n message = _execute.make_str(message, \"message\")\r\n _attr_T, (tensor,) = _execute.args_to_matching_eager([tensor], _ctx)\r\n _inputs_flat = [tensor]\r\n _attrs = (\"T\", _attr_T, \"message\", message)\r\n _result = _execute.execute(b\"CheckNumerics\", 1, inputs=_inputs_flat,\r\n attrs=_attrs, ctx=_ctx, name=name)\r\n _execute.record_gradient(\r\n \"CheckNumerics\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n\r\ndef concat(concat_dim, values, name=None):\r\n r\"\"\"Concatenates tensors along one dimension.\r\n\r\n Args:\r\n concat_dim: A `Tensor` of type `int32`.\r\n 0-D. The dimension along which to concatenate. Must be in the\r\r\n range [0, rank(values)).\r\n values: A list of at least 2 `Tensor` objects with the same type.\r\n The `N` Tensors to concatenate. Their ranks and types must match,\r\r\n and their sizes must match in all dimensions except `concat_dim`.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A `Tensor`. Has the same type as `values`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is None or not _ctx._eager_context.is_eager:\r\n if not isinstance(values, (list, tuple)):\r\n raise TypeError(\r\n \"Expected list for 'values' argument to \"\r\n \"'concat' Op, not %r.\" % values)\r\n _attr_N = len(values)\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"Concat\", concat_dim=concat_dim, values=values, name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"N\", _op.get_attr(\"N\"), \"T\", _op.get_attr(\"T\"))\r\n _execute.record_gradient(\r\n \"Concat\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n else:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name, \"Concat\", name,\r\n _ctx._post_execution_callbacks, concat_dim, values)\r\n return _result\r\n except _core._FallbackException:\r\n return concat_eager_fallback(\r\n concat_dim, values, name=name, ctx=_ctx)\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n\r\n\r\ndef concat_eager_fallback(concat_dim, values, name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function concat\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n if not isinstance(values, (list, tuple)):\r\n raise TypeError(\r\n \"Expected list for 'values' argument to \"\r\n \"'concat' Op, not %r.\" % values)\r\n _attr_N = len(values)\r\n _attr_T, values = _execute.args_to_matching_eager(list(values), _ctx)\r\n concat_dim = _ops.convert_to_tensor(concat_dim, _dtypes.int32)\r\n _inputs_flat = [concat_dim] + list(values)\r\n _attrs = (\"N\", _attr_N, \"T\", _attr_T)\r\n _result = _execute.execute(b\"Concat\", 1, inputs=_inputs_flat, attrs=_attrs,\r\n ctx=_ctx, name=name)\r\n _execute.record_gradient(\r\n \"Concat\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n\r\ndef concat_offset(concat_dim, shape, name=None):\r\n r\"\"\"Computes offsets of concat inputs within its output.\r\n\r\n For example:\r\r\n \r\r\n ```\r\r\n # 'x' is [2, 2, 7]\r\r\n # 'y' is [2, 3, 7]\r\r\n # 'z' is [2, 5, 7]\r\r\n concat_offset(2, [x, y, z]) => [0, 0, 0], [0, 2, 0], [0, 5, 0]\r\r\n ```\r\r\n \r\r\n This is typically used by gradient computations for a concat operation.\r\n\r\n Args:\r\n concat_dim: A `Tensor` of type `int32`.\r\n The dimension along which to concatenate.\r\n shape: A list of at least 2 `Tensor` objects with type `int32`.\r\n The `N` int32 vectors representing shape of tensors being concatenated.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A list with the same length as `shape` of `Tensor` objects with type `int32`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is None or not _ctx._eager_context.is_eager:\r\n if not isinstance(shape, (list, tuple)):\r\n raise TypeError(\r\n \"Expected list for 'shape' argument to \"\r\n \"'concat_offset' Op, not %r.\" % shape)\r\n _attr_N = len(shape)\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"ConcatOffset\", concat_dim=concat_dim, shape=shape, name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"N\", _op.get_attr(\"N\"))\r\n _execute.record_gradient(\r\n \"ConcatOffset\", _inputs_flat, _attrs, _result, name)\r\n return _result\r\n\r\n else:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name, \"ConcatOffset\",\r\n name, _ctx._post_execution_callbacks, concat_dim, shape)\r\n return _result\r\n except _core._FallbackException:\r\n return concat_offset_eager_fallback(\r\n concat_dim, shape, name=name, ctx=_ctx)\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n\r\n\r\ndef concat_offset_eager_fallback(concat_dim, shape, name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function concat_offset\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n if not isinstance(shape, (list, tuple)):\r\n raise TypeError(\r\n \"Expected list for 'shape' argument to \"\r\n \"'concat_offset' Op, not %r.\" % shape)\r\n _attr_N = len(shape)\r\n concat_dim = _ops.convert_to_tensor(concat_dim, _dtypes.int32)\r\n shape = _ops.convert_n_to_tensor(shape, _dtypes.int32)\r\n _inputs_flat = [concat_dim] + list(shape)\r\n _attrs = (\"N\", _attr_N)\r\n _result = _execute.execute(b\"ConcatOffset\", _attr_N, inputs=_inputs_flat,\r\n attrs=_attrs, ctx=_ctx, name=name)\r\n _execute.record_gradient(\r\n \"ConcatOffset\", _inputs_flat, _attrs, _result, name)\r\n return _result\r\n\r\n\r\ndef concat_v2(values, axis, name=None):\r\n r\"\"\"Concatenates tensors along one dimension.\r\n\r\n Args:\r\n values: A list of at least 2 `Tensor` objects with the same type.\r\n List of `N` Tensors to concatenate. Their ranks and types must match,\r\r\n and their sizes must match in all dimensions except `concat_dim`.\r\n axis: A `Tensor`. Must be one of the following types: `int32`, `int64`.\r\n 0-D. The dimension along which to concatenate. Must be in the\r\r\n range [-rank(values), rank(values)).\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A `Tensor`. Has the same type as `values`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is None or not _ctx._eager_context.is_eager:\r\n if not isinstance(values, (list, tuple)):\r\n raise TypeError(\r\n \"Expected list for 'values' argument to \"\r\n \"'concat_v2' Op, not %r.\" % values)\r\n _attr_N = len(values)\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"ConcatV2\", values=values, axis=axis, name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"N\", _op.get_attr(\"N\"), \"T\", _op.get_attr(\"T\"), \"Tidx\",\r\n _op.get_attr(\"Tidx\"))\r\n _execute.record_gradient(\r\n \"ConcatV2\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n else:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name, \"ConcatV2\",\r\n name, _ctx._post_execution_callbacks, values, axis)\r\n return _result\r\n except _core._FallbackException:\r\n return concat_v2_eager_fallback(\r\n values, axis, name=name, ctx=_ctx)\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n\r\n\r\ndef concat_v2_eager_fallback(values, axis, name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function concat_v2\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n if not isinstance(values, (list, tuple)):\r\n raise TypeError(\r\n \"Expected list for 'values' argument to \"\r\n \"'concat_v2' Op, not %r.\" % values)\r\n _attr_N = len(values)\r\n _attr_T, values = _execute.args_to_matching_eager(list(values), _ctx)\r\n _attr_Tidx, (axis,) = _execute.args_to_matching_eager([axis], _ctx, _dtypes.int32)\r\n _inputs_flat = list(values) + [axis]\r\n _attrs = (\"N\", _attr_N, \"T\", _attr_T, \"Tidx\", _attr_Tidx)\r\n _result = _execute.execute(b\"ConcatV2\", 1, inputs=_inputs_flat,\r\n attrs=_attrs, ctx=_ctx, name=name)\r\n _execute.record_gradient(\r\n \"ConcatV2\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n\r\ndef conjugate_transpose(x, perm, name=None):\r\n r\"\"\"Shuffle dimensions of x according to a permutation and conjugate the result.\r\n\r\n The output `y` has the same rank as `x`. The shapes of `x` and `y` satisfy:\r\r\n `y.shape[i] == x.shape[perm[i]] for i in [0, 1, ..., rank(x) - 1]`\r\r\n `y[i,j,k,...,s,t,u] == conj(x[perm[i], perm[j], perm[k],...,perm[s], perm[t], perm[u]])`\r\n\r\n Args:\r\n x: A `Tensor`.\r\n perm: A `Tensor`. Must be one of the following types: `int32`, `int64`.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A `Tensor`. Has the same type as `x`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is None or not _ctx._eager_context.is_eager:\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"ConjugateTranspose\", x=x, perm=perm, name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"T\", _op.get_attr(\"T\"), \"Tperm\", _op.get_attr(\"Tperm\"))\r\n _execute.record_gradient(\r\n \"ConjugateTranspose\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n else:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name,\r\n \"ConjugateTranspose\", name, _ctx._post_execution_callbacks, x, perm)\r\n return _result\r\n except _core._FallbackException:\r\n return conjugate_transpose_eager_fallback(\r\n x, perm, name=name, ctx=_ctx)\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n\r\n\r\ndef conjugate_transpose_eager_fallback(x, perm, name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function conjugate_transpose\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n _attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)\r\n _attr_Tperm, (perm,) = _execute.args_to_matching_eager([perm], _ctx, _dtypes.int32)\r\n _inputs_flat = [x, perm]\r\n _attrs = (\"T\", _attr_T, \"Tperm\", _attr_Tperm)\r\n _result = _execute.execute(b\"ConjugateTranspose\", 1, inputs=_inputs_flat,\r\n attrs=_attrs, ctx=_ctx, name=name)\r\n _execute.record_gradient(\r\n \"ConjugateTranspose\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n\r\ndef const(value, dtype, name=None):\r\n r\"\"\"Returns a constant tensor.\r\n\r\n Args:\r\n value: A `tf.TensorProto`. Attr `value` is the tensor to return.\r\n dtype: A `tf.DType`.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A `Tensor` of type `dtype`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is None or not _ctx._eager_context.is_eager:\r\n value = _execute.make_tensor(value, \"value\")\r\n dtype = _execute.make_type(dtype, \"dtype\")\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"Const\", value=value, dtype=dtype, name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"value\", _op.get_attr(\"value\"), \"dtype\", _op.get_attr(\"dtype\"))\r\n _execute.record_gradient(\r\n \"Const\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n else:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name, \"Const\", name,\r\n _ctx._post_execution_callbacks, \"value\", value, \"dtype\", dtype)\r\n return _result\r\n except _core._FallbackException:\r\n return const_eager_fallback(\r\n value=value, dtype=dtype, name=name, ctx=_ctx)\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n\r\n\r\ndef const_eager_fallback(value, dtype, name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function const\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n value = _execute.make_tensor(value, \"value\")\r\n dtype = _execute.make_type(dtype, \"dtype\")\r\n _inputs_flat = []\r\n _attrs = (\"value\", value, \"dtype\", dtype)\r\n _result = _execute.execute(b\"Const\", 1, inputs=_inputs_flat, attrs=_attrs,\r\n ctx=_ctx, name=name)\r\n _execute.record_gradient(\r\n \"Const\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n\r\ndef debug_gradient_identity(input, name=None):\r\n r\"\"\"Identity op for gradient debugging.\r\n\r\n This op is hidden from public in Python. It is used by TensorFlow Debugger to\r\r\n register gradient tensors for gradient debugging.\r\r\n This op operates on non-reference-type tensors.\r\n\r\n Args:\r\n input: A `Tensor`.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A `Tensor`. Has the same type as `input`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is None or not _ctx._eager_context.is_eager:\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"DebugGradientIdentity\", input=input, name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"T\", _op.get_attr(\"T\"))\r\n _execute.record_gradient(\r\n \"DebugGradientIdentity\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n else:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name,\r\n \"DebugGradientIdentity\", name, _ctx._post_execution_callbacks, input)\r\n return _result\r\n except _core._FallbackException:\r\n return debug_gradient_identity_eager_fallback(\r\n input, name=name, ctx=_ctx)\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n\r\n\r\ndef debug_gradient_identity_eager_fallback(input, name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function debug_gradient_identity\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)\r\n _inputs_flat = [input]\r\n _attrs = (\"T\", _attr_T)\r\n _result = _execute.execute(b\"DebugGradientIdentity\", 1, inputs=_inputs_flat,\r\n attrs=_attrs, ctx=_ctx, name=name)\r\n _execute.record_gradient(\r\n \"DebugGradientIdentity\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n\r\ndef debug_gradient_ref_identity(input, name=None):\r\n r\"\"\"Identity op for gradient debugging.\r\n\r\n This op is hidden from public in Python. It is used by TensorFlow Debugger to\r\r\n register gradient tensors for gradient debugging.\r\r\n This op operates on reference-type tensors.\r\n\r\n Args:\r\n input: A mutable `Tensor`.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A mutable `Tensor`. Has the same type as `input`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is None or not _ctx._eager_context.is_eager:\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"DebugGradientRefIdentity\", input=input, name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"T\", _op.get_attr(\"T\"))\r\n _execute.record_gradient(\r\n \"DebugGradientRefIdentity\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n else:\r\n raise RuntimeError(\"debug_gradient_ref_identity op does not support eager execution. Arg 'output' is a ref.\")\r\n\r\n\r\n raise RuntimeError(\"debug_gradient_ref_identity op does not support eager execution. Arg 'output' is a ref.\")\r\n\r\ndef deep_copy(x, name=None):\r\n r\"\"\"Makes a copy of `x`.\r\n\r\n Args:\r\n x: A `Tensor`. The source tensor of type `T`.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A `Tensor`. Has the same type as `x`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is None or not _ctx._eager_context.is_eager:\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"DeepCopy\", x=x, name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"T\", _op.get_attr(\"T\"))\r\n _execute.record_gradient(\r\n \"DeepCopy\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n else:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name, \"DeepCopy\",\r\n name, _ctx._post_execution_callbacks, x)\r\n return _result\r\n except _core._FallbackException:\r\n return deep_copy_eager_fallback(\r\n x, name=name, ctx=_ctx)\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n\r\n\r\ndef deep_copy_eager_fallback(x, name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function deep_copy\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n _attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)\r\n _inputs_flat = [x]\r\n _attrs = (\"T\", _attr_T)\r\n _result = _execute.execute(b\"DeepCopy\", 1, inputs=_inputs_flat,\r\n attrs=_attrs, ctx=_ctx, name=name)\r\n _execute.record_gradient(\r\n \"DeepCopy\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n\r\ndef depth_to_space(input, block_size, data_format=\"NHWC\", name=None):\r\n r\"\"\"DepthToSpace for tensors of type T.\r\n\r\n Rearranges data from depth into blocks of spatial data.\r\r\n This is the reverse transformation of SpaceToDepth. More specifically,\r\r\n this op outputs a copy of the input tensor where values from the `depth`\r\r\n dimension are moved in spatial blocks to the `height` and `width` dimensions.\r\r\n The attr `block_size` indicates the input block size and how the data is moved.\r\r\n \r\r\n * Chunks of data of size `block_size * block_size` from depth are rearranged\r\r\n into non-overlapping blocks of size `block_size x block_size`\r\r\n * The width the output tensor is `input_depth * block_size`, whereas the\r\r\n height is `input_height * block_size`.\r\r\n * The Y, X coordinates within each block of the output image are determined\r\r\n by the high order component of the input channel index.\r\r\n * The depth of the input tensor must be divisible by\r\r\n `block_size * block_size`.\r\r\n \r\r\n The `data_format` attr specifies the layout of the input and output tensors\r\r\n with the following options:\r\r\n \"NHWC\": `[ batch, height, width, channels ]`\r\r\n \"NCHW\": `[ batch, channels, height, width ]`\r\r\n \"NCHW_VECT_C\":\r\r\n `qint8 [ batch, channels / 4, height, width, 4 ]`\r\r\n \r\r\n It is useful to consider the operation as transforming a 6-D Tensor.\r\r\n e.g. for data_format = NHWC,\r\r\n Each element in the input tensor can be specified via 6 coordinates,\r\r\n ordered by decreasing memory layout significance as:\r\r\n n,iY,iX,bY,bX,oC (where n=batch index, iX, iY means X or Y coordinates\r\r\n within the input image, bX, bY means coordinates\r\r\n within the output block, oC means output channels).\r\r\n The output would be the input transposed to the following layout:\r\r\n n,iY,bY,iX,bX,oC\r\r\n \r\r\n This operation is useful for resizing the activations between convolutions\r\r\n (but keeping all data), e.g. instead of pooling. It is also useful for training\r\r\n purely convolutional models.\r\r\n \r\r\n For example, given an input of shape `[1, 1, 1, 4]`, data_format = \"NHWC\" and\r\r\n block_size = 2:\r\r\n \r\r\n ```\r\r\n x = [[[[1, 2, 3, 4]]]]\r\r\n \r\r\n ```\r\r\n \r\r\n This operation will output a tensor of shape `[1, 2, 2, 1]`:\r\r\n \r\r\n ```\r\r\n [[[[1], [2]],\r\r\n [[3], [4]]]]\r\r\n ```\r\r\n \r\r\n Here, the input has a batch of 1 and each batch element has shape `[1, 1, 4]`,\r\r\n the corresponding output will have 2x2 elements and will have a depth of\r\r\n 1 channel (1 = `4 / (block_size * block_size)`).\r\r\n The output element shape is `[2, 2, 1]`.\r\r\n \r\r\n For an input tensor with larger depth, here of shape `[1, 1, 1, 12]`, e.g.\r\r\n \r\r\n ```\r\r\n x = [[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]]\r\r\n ```\r\r\n \r\r\n This operation, for block size of 2, will return the following tensor of shape\r\r\n `[1, 2, 2, 3]`\r\r\n \r\r\n ```\r\r\n [[[[1, 2, 3], [4, 5, 6]],\r\r\n [[7, 8, 9], [10, 11, 12]]]]\r\r\n \r\r\n ```\r\r\n \r\r\n Similarly, for the following input of shape `[1 2 2 4]`, and a block size of 2:\r\r\n \r\r\n ```\r\r\n x = [[[[1, 2, 3, 4],\r\r\n [5, 6, 7, 8]],\r\r\n [[9, 10, 11, 12],\r\r\n [13, 14, 15, 16]]]]\r\r\n ```\r\r\n \r\r\n the operator will return the following tensor of shape `[1 4 4 1]`:\r\r\n \r\r\n ```\r\r\n x = [[[ [1], [2], [5], [6]],\r\r\n [ [3], [4], [7], [8]],\r\r\n [ [9], [10], [13], [14]],\r\r\n [ [11], [12], [15], [16]]]]\r\r\n \r\r\n ```\r\n\r\n Args:\r\n input: A `Tensor`.\r\n block_size: An `int` that is `>= 2`.\r\n The size of the spatial block, same as in Space2Depth.\r\n data_format: An optional `string` from: `\"NHWC\", \"NCHW\", \"NCHW_VECT_C\"`. Defaults to `\"NHWC\"`.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A `Tensor`. Has the same type as `input`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is None or not _ctx._eager_context.is_eager:\r\n block_size = _execute.make_int(block_size, \"block_size\")\r\n if data_format is None:\r\n data_format = \"NHWC\"\r\n data_format = _execute.make_str(data_format, \"data_format\")\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"DepthToSpace\", input=input, block_size=block_size,\r\n data_format=data_format, name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"T\", _op.get_attr(\"T\"), \"block_size\",\r\n _op.get_attr(\"block_size\"), \"data_format\",\r\n _op.get_attr(\"data_format\"))\r\n _execute.record_gradient(\r\n \"DepthToSpace\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n else:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name, \"DepthToSpace\",\r\n name, _ctx._post_execution_callbacks, input, \"block_size\", block_size,\r\n \"data_format\", data_format)\r\n return _result\r\n except _core._FallbackException:\r\n return depth_to_space_eager_fallback(\r\n input, block_size=block_size, data_format=data_format, name=name,\r\n ctx=_ctx)\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n\r\n\r\ndef depth_to_space_eager_fallback(input, block_size, data_format=\"NHWC\", name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function depth_to_space\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n block_size = _execute.make_int(block_size, \"block_size\")\r\n if data_format is None:\r\n data_format = \"NHWC\"\r\n data_format = _execute.make_str(data_format, \"data_format\")\r\n _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)\r\n _inputs_flat = [input]\r\n _attrs = (\"T\", _attr_T, \"block_size\", block_size, \"data_format\",\r\n data_format)\r\n _result = _execute.execute(b\"DepthToSpace\", 1, inputs=_inputs_flat,\r\n attrs=_attrs, ctx=_ctx, name=name)\r\n _execute.record_gradient(\r\n \"DepthToSpace\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n\r\n@tf_export('quantization.dequantize', 'dequantize')\r\n@deprecated_endpoints('dequantize')\r\ndef dequantize(input, min_range, max_range, mode=\"MIN_COMBINED\", name=None):\r\n r\"\"\"Dequantize the 'input' tensor into a float Tensor.\r\n\r\n [min_range, max_range] are scalar floats that specify the range for\r\r\n the 'input' data. The 'mode' attribute controls exactly which calculations are\r\r\n used to convert the float values to their quantized equivalents.\r\r\n \r\r\n In 'MIN_COMBINED' mode, each value of the tensor will undergo the following:\r\r\n \r\r\n ```\r\r\n if T == qint8, in[i] += (range(T) + 1)/ 2.0\r\r\n out[i] = min_range + (in[i]* (max_range - min_range) / range(T))\r\r\n ```\r\r\n here `range(T) = numeric_limits<T>::max() - numeric_limits<T>::min()`\r\r\n \r\r\n *MIN_COMBINED Mode Example*\r\r\n \r\r\n If the input comes from a QuantizedRelu6, the output type is\r\r\n quint8 (range of 0-255) but the possible range of QuantizedRelu6 is\r\r\n 0-6. The min_range and max_range values are therefore 0.0 and 6.0.\r\r\n Dequantize on quint8 will take each value, cast to float, and multiply\r\r\n by 6 / 255.\r\r\n Note that if quantizedtype is qint8, the operation will additionally add\r\r\n each value by 128 prior to casting.\r\r\n \r\r\n If the mode is 'MIN_FIRST', then this approach is used:\r\r\n \r\r\n ```c++\r\r\n num_discrete_values = 1 << (# of bits in T)\r\r\n range_adjust = num_discrete_values / (num_discrete_values - 1)\r\r\n range = (range_max - range_min) * range_adjust\r\r\n range_scale = range / num_discrete_values\r\r\n const double offset_input = static_cast<double>(input) - lowest_quantized;\r\r\n result = range_min + ((input - numeric_limits<T>::min()) * range_scale)\r\r\n ```\r\r\n \r\r\n *SCALED mode Example*\r\r\n \r\r\n `SCALED` mode matches the quantization approach used in\r\r\n `QuantizeAndDequantize{V2|V3}`.\r\r\n \r\r\n If the mode is `SCALED`, we do not use the full range of the output type,\r\r\n choosing to elide the lowest possible value for symmetry (e.g., output range is\r\r\n -127 to 127, not -128 to 127 for signed 8 bit quantization), so that 0.0 maps to\r\r\n 0.\r\r\n \r\r\n We first find the range of values in our tensor. The\r\r\n range we use is always centered on 0, so we find m such that\r\r\n ```c++\r\r\n m = max(abs(input_min), abs(input_max))\r\r\n ```\r\r\n \r\r\n Our input tensor range is then `[-m, m]`.\r\r\n \r\r\n Next, we choose our fixed-point quantization buckets, `[min_fixed, max_fixed]`.\r\r\n If T is signed, this is\r\r\n ```\r\r\n num_bits = sizeof(T) * 8\r\r\n [min_fixed, max_fixed] =\r\r\n [-(1 << (num_bits - 1) - 1), (1 << (num_bits - 1)) - 1]\r\r\n ```\r\r\n \r\r\n Otherwise, if T is unsigned, the fixed-point range is\r\r\n ```\r\r\n [min_fixed, max_fixed] = [0, (1 << num_bits) - 1]\r\r\n ```\r\r\n \r\r\n From this we compute our scaling factor, s:\r\r\n ```c++\r\r\n s = (2 * m) / (max_fixed - min_fixed)\r\r\n ```\r\r\n \r\r\n Now we can dequantize the elements of our tensor:\r\r\n ```c++\r\r\n result = input * s\r\r\n ```\r\n\r\n Args:\r\n input: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.\r\n min_range: A `Tensor` of type `float32`.\r\n The minimum scalar value possibly produced for the input.\r\n max_range: A `Tensor` of type `float32`.\r\n The maximum scalar value possibly produced for the input.\r\n mode: An optional `string` from: `\"MIN_COMBINED\", \"MIN_FIRST\", \"SCALED\"`. Defaults to `\"MIN_COMBINED\"`.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A `Tensor` of type `float32`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is None or not _ctx._eager_context.is_eager:\r\n if mode is None:\r\n mode = \"MIN_COMBINED\"\r\n mode = _execute.make_str(mode, \"mode\")\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"Dequantize\", input=input, min_range=min_range, max_range=max_range,\r\n mode=mode, name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"T\", _op.get_attr(\"T\"), \"mode\", _op.get_attr(\"mode\"))\r\n _execute.record_gradient(\r\n \"Dequantize\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n else:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name, \"Dequantize\",\r\n name, _ctx._post_execution_callbacks, input, min_range, max_range,\r\n \"mode\", mode)\r\n return _result\r\n except _core._FallbackException:\r\n return dequantize_eager_fallback(\r\n input, min_range, max_range, mode=mode, name=name, ctx=_ctx)\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n\r\n\r\ndef dequantize_eager_fallback(input, min_range, max_range, mode=\"MIN_COMBINED\", name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function dequantize\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n if mode is None:\r\n mode = \"MIN_COMBINED\"\r\n mode = _execute.make_str(mode, \"mode\")\r\n _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)\r\n min_range = _ops.convert_to_tensor(min_range, _dtypes.float32)\r\n max_range = _ops.convert_to_tensor(max_range, _dtypes.float32)\r\n _inputs_flat = [input, min_range, max_range]\r\n _attrs = (\"T\", _attr_T, \"mode\", mode)\r\n _result = _execute.execute(b\"Dequantize\", 1, inputs=_inputs_flat,\r\n attrs=_attrs, ctx=_ctx, name=name)\r\n _execute.record_gradient(\r\n \"Dequantize\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n\r\n@tf_export('linalg.tensor_diag', 'diag')\r\n@deprecated_endpoints('diag')\r\ndef diag(diagonal, name=None):\r\n r\"\"\"Returns a diagonal tensor with a given diagonal values.\r\n\r\n Given a `diagonal`, this operation returns a tensor with the `diagonal` and\r\r\n everything else padded with zeros. The diagonal is computed as follows:\r\r\n \r\r\n Assume `diagonal` has dimensions [D1,..., Dk], then the output is a tensor of\r\r\n rank 2k with dimensions [D1,..., Dk, D1,..., Dk] where:\r\r\n \r\r\n `output[i1,..., ik, i1,..., ik] = diagonal[i1, ..., ik]` and 0 everywhere else.\r\r\n \r\r\n For example:\r\r\n \r\r\n ```\r\r\n # 'diagonal' is [1, 2, 3, 4]\r\r\n tf.diag(diagonal) ==> [[1, 0, 0, 0]\r\r\n [0, 2, 0, 0]\r\r\n [0, 0, 3, 0]\r\r\n [0, 0, 0, 4]]\r\r\n ```\r\n\r\n Args:\r\n diagonal: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`.\r\n Rank k tensor where k is at most 1.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A `Tensor`. Has the same type as `diagonal`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is None or not _ctx._eager_context.is_eager:\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"Diag\", diagonal=diagonal, name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"T\", _op.get_attr(\"T\"))\r\n _execute.record_gradient(\r\n \"Diag\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n else:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name, \"Diag\", name,\r\n _ctx._post_execution_callbacks, diagonal)\r\n return _result\r\n except _core._FallbackException:\r\n return diag_eager_fallback(\r\n diagonal, name=name, ctx=_ctx)\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n\r\n\r\ndef diag_eager_fallback(diagonal, name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function diag\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n _attr_T, (diagonal,) = _execute.args_to_matching_eager([diagonal], _ctx)\r\n _inputs_flat = [diagonal]\r\n _attrs = (\"T\", _attr_T)\r\n _result = _execute.execute(b\"Diag\", 1, inputs=_inputs_flat, attrs=_attrs,\r\n ctx=_ctx, name=name)\r\n _execute.record_gradient(\r\n \"Diag\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n\r\n@tf_export('linalg.tensor_diag_part', 'diag_part')\r\n@deprecated_endpoints('diag_part')\r\ndef diag_part(input, name=None):\r\n r\"\"\"Returns the diagonal part of the tensor.\r\n\r\n This operation returns a tensor with the `diagonal` part\r\r\n of the `input`. The `diagonal` part is computed as follows:\r\r\n \r\r\n Assume `input` has dimensions `[D1,..., Dk, D1,..., Dk]`, then the output is a\r\r\n tensor of rank `k` with dimensions `[D1,..., Dk]` where:\r\r\n \r\r\n `diagonal[i1,..., ik] = input[i1, ..., ik, i1,..., ik]`.\r\r\n \r\r\n For example:\r\r\n \r\r\n ```\r\r\n # 'input' is [[1, 0, 0, 0]\r\r\n [0, 2, 0, 0]\r\r\n [0, 0, 3, 0]\r\r\n [0, 0, 0, 4]]\r\r\n \r\r\n tf.diag_part(input) ==> [1, 2, 3, 4]\r\r\n ```\r\n\r\n Args:\r\n input: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`.\r\n Rank k tensor where k is even and not zero.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A `Tensor`. Has the same type as `input`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is None or not _ctx._eager_context.is_eager:\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"DiagPart\", input=input, name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"T\", _op.get_attr(\"T\"))\r\n _execute.record_gradient(\r\n \"DiagPart\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n else:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name, \"DiagPart\",\r\n name, _ctx._post_execution_callbacks, input)\r\n return _result\r\n except _core._FallbackException:\r\n return diag_part_eager_fallback(\r\n input, name=name, ctx=_ctx)\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n\r\n\r\ndef diag_part_eager_fallback(input, name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function diag_part\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)\r\n _inputs_flat = [input]\r\n _attrs = (\"T\", _attr_T)\r\n _result = _execute.execute(b\"DiagPart\", 1, inputs=_inputs_flat,\r\n attrs=_attrs, ctx=_ctx, name=name)\r\n _execute.record_gradient(\r\n \"DiagPart\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n\r\ndef edit_distance(hypothesis_indices, hypothesis_values, hypothesis_shape, truth_indices, truth_values, truth_shape, normalize=True, name=None):\r\n r\"\"\"Computes the (possibly normalized) Levenshtein Edit Distance.\r\n\r\n The inputs are variable-length sequences provided by SparseTensors\r\r\n (hypothesis_indices, hypothesis_values, hypothesis_shape)\r\r\n and\r\r\n (truth_indices, truth_values, truth_shape).\r\r\n \r\r\n The inputs are:\r\n\r\n Args:\r\n hypothesis_indices: A `Tensor` of type `int64`.\r\n The indices of the hypothesis list SparseTensor.\r\r\n This is an N x R int64 matrix.\r\n hypothesis_values: A `Tensor`.\r\n The values of the hypothesis list SparseTensor.\r\r\n This is an N-length vector.\r\n hypothesis_shape: A `Tensor` of type `int64`.\r\n The shape of the hypothesis list SparseTensor.\r\r\n This is an R-length vector.\r\n truth_indices: A `Tensor` of type `int64`.\r\n The indices of the truth list SparseTensor.\r\r\n This is an M x R int64 matrix.\r\n truth_values: A `Tensor`. Must have the same type as `hypothesis_values`.\r\n The values of the truth list SparseTensor.\r\r\n This is an M-length vector.\r\n truth_shape: A `Tensor` of type `int64`. truth indices, vector.\r\n normalize: An optional `bool`. Defaults to `True`.\r\n boolean (if true, edit distances are normalized by length of truth).\r\r\n \r\r\n The output is:\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A `Tensor` of type `float32`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is None or not _ctx._eager_context.is_eager:\r\n if normalize is None:\r\n normalize = True\r\n normalize = _execute.make_bool(normalize, \"normalize\")\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"EditDistance\", hypothesis_indices=hypothesis_indices,\r\n hypothesis_values=hypothesis_values,\r\n hypothesis_shape=hypothesis_shape, truth_indices=truth_indices,\r\n truth_values=truth_values, truth_shape=truth_shape,\r\n normalize=normalize, name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"normalize\", _op.get_attr(\"normalize\"), \"T\", _op.get_attr(\"T\"))\r\n _execute.record_gradient(\r\n \"EditDistance\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n else:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name, \"EditDistance\",\r\n name, _ctx._post_execution_callbacks, hypothesis_indices,\r\n hypothesis_values, hypothesis_shape, truth_indices, truth_values,\r\n truth_shape, \"normalize\", normalize)\r\n return _result\r\n except _core._FallbackException:\r\n return edit_distance_eager_fallback(\r\n hypothesis_indices, hypothesis_values, hypothesis_shape,\r\n truth_indices, truth_values, truth_shape, normalize=normalize,\r\n name=name, ctx=_ctx)\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n\r\n\r\ndef edit_distance_eager_fallback(hypothesis_indices, hypothesis_values, hypothesis_shape, truth_indices, truth_values, truth_shape, normalize=True, name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function edit_distance\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n if normalize is None:\r\n normalize = True\r\n normalize = _execute.make_bool(normalize, \"normalize\")\r\n _attr_T, _inputs_T = _execute.args_to_matching_eager([hypothesis_values, truth_values], _ctx)\r\n (hypothesis_values, truth_values) = _inputs_T\r\n hypothesis_indices = _ops.convert_to_tensor(hypothesis_indices, _dtypes.int64)\r\n hypothesis_shape = _ops.convert_to_tensor(hypothesis_shape, _dtypes.int64)\r\n truth_indices = _ops.convert_to_tensor(truth_indices, _dtypes.int64)\r\n truth_shape = _ops.convert_to_tensor(truth_shape, _dtypes.int64)\r\n _inputs_flat = [hypothesis_indices, hypothesis_values, hypothesis_shape, truth_indices, truth_values, truth_shape]\r\n _attrs = (\"normalize\", normalize, \"T\", _attr_T)\r\n _result = _execute.execute(b\"EditDistance\", 1, inputs=_inputs_flat,\r\n attrs=_attrs, ctx=_ctx, name=name)\r\n _execute.record_gradient(\r\n \"EditDistance\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n\r\ndef empty(shape, dtype, init=False, name=None):\r\n r\"\"\"Creates a tensor with the given shape.\r\r\n\r\r\nThis operation creates a tensor of `shape` and `dtype`.\r\r\n\r\n Args:\r\n shape: A `Tensor` of type `int32`.\r\n 1-D. Represents the shape of the output tensor.\r\n dtype: A `tf.DType`.\r\n init: An optional `bool`. Defaults to `False`.\r\n If True, initialize the returned tensor with the default value of dtype. Otherwise, the implementation is free not to initializethe tensor's content.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A `Tensor` of type `dtype`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is None or not _ctx._eager_context.is_eager:\r\n dtype = _execute.make_type(dtype, \"dtype\")\r\n if init is None:\r\n init = False\r\n init = _execute.make_bool(init, \"init\")\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"Empty\", shape=shape, dtype=dtype, init=init, name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"dtype\", _op.get_attr(\"dtype\"), \"init\", _op.get_attr(\"init\"))\r\n _execute.record_gradient(\r\n \"Empty\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n else:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name, \"Empty\", name,\r\n _ctx._post_execution_callbacks, shape, \"dtype\", dtype, \"init\", init)\r\n return _result\r\n except _core._FallbackException:\r\n return empty_eager_fallback(\r\n shape, dtype=dtype, init=init, name=name, ctx=_ctx)\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n\r\n\r\ndef empty_eager_fallback(shape, dtype, init=False, name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function empty\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n dtype = _execute.make_type(dtype, \"dtype\")\r\n if init is None:\r\n init = False\r\n init = _execute.make_bool(init, \"init\")\r\n shape = _ops.convert_to_tensor(shape, _dtypes.int32)\r\n _inputs_flat = [shape]\r\n _attrs = (\"dtype\", dtype, \"init\", init)\r\n _result = _execute.execute(b\"Empty\", 1, inputs=_inputs_flat, attrs=_attrs,\r\n ctx=_ctx, name=name)\r\n _execute.record_gradient(\r\n \"Empty\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n\r\ndef ensure_shape(input, shape, name=None):\r\n r\"\"\"Ensures that the tensor's shape matches the expected shape.\r\n\r\n Raises an error if the input tensor's shape does not match the specified shape.\r\r\n Returns the input tensor otherwise.\r\n\r\n Args:\r\n input: A `Tensor`. A tensor, whose shape is to be validated.\r\n shape: A `tf.TensorShape` or list of `ints`.\r\n The expected (possibly partially specified) shape of the input tensor.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A `Tensor`. Has the same type as `input`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is None or not _ctx._eager_context.is_eager:\r\n shape = _execute.make_shape(shape, \"shape\")\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"EnsureShape\", input=input, shape=shape, name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"shape\", _op.get_attr(\"shape\"), \"T\", _op.get_attr(\"T\"))\r\n _execute.record_gradient(\r\n \"EnsureShape\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n else:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name, \"EnsureShape\",\r\n name, _ctx._post_execution_callbacks, input, \"shape\", shape)\r\n return _result\r\n except _core._FallbackException:\r\n return ensure_shape_eager_fallback(\r\n input, shape=shape, name=name, ctx=_ctx)\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n\r\n\r\ndef ensure_shape_eager_fallback(input, shape, name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function ensure_shape\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n shape = _execute.make_shape(shape, \"shape\")\r\n _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)\r\n _inputs_flat = [input]\r\n _attrs = (\"shape\", shape, \"T\", _attr_T)\r\n _result = _execute.execute(b\"EnsureShape\", 1, inputs=_inputs_flat,\r\n attrs=_attrs, ctx=_ctx, name=name)\r\n _execute.record_gradient(\r\n \"EnsureShape\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n\r\ndef expand_dims(input, axis, name=None):\r\n r\"\"\"Inserts a dimension of 1 into a tensor's shape.\r\n\r\n Given a tensor `input`, this operation inserts a dimension of 1 at the\r\r\n dimension index `axis` of `input`'s shape. The dimension index `axis` starts at\r\r\n zero; if you specify a negative number for `axis` it is counted backward from\r\r\n the end.\r\r\n \r\r\n This operation is useful if you want to add a batch dimension to a single\r\r\n element. For example, if you have a single image of shape `[height, width,\r\r\n channels]`, you can make it a batch of 1 image with `expand_dims(image, 0)`,\r\r\n which will make the shape `[1, height, width, channels]`.\r\r\n \r\r\n Other examples:\r\r\n \r\r\n ```\r\r\n # 't' is a tensor of shape [2]\r\r\n shape(expand_dims(t, 0)) ==> [1, 2]\r\r\n shape(expand_dims(t, 1)) ==> [2, 1]\r\r\n shape(expand_dims(t, -1)) ==> [2, 1]\r\r\n \r\r\n # 't2' is a tensor of shape [2, 3, 5]\r\r\n shape(expand_dims(t2, 0)) ==> [1, 2, 3, 5]\r\r\n shape(expand_dims(t2, 2)) ==> [2, 3, 1, 5]\r\r\n shape(expand_dims(t2, 3)) ==> [2, 3, 5, 1]\r\r\n ```\r\r\n \r\r\n This operation requires that:\r\r\n \r\r\n `-1-input.dims() <= dim <= input.dims()`\r\r\n \r\r\n This operation is related to `squeeze()`, which removes dimensions of\r\r\n size 1.\r\n\r\n Args:\r\n input: A `Tensor`.\r\n axis: A `Tensor`. Must be one of the following types: `int32`, `int64`.\r\n 0-D (scalar). Specifies the dimension index at which to\r\r\n expand the shape of `input`. Must be in the range\r\r\n `[-rank(input) - 1, rank(input)]`.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A `Tensor`. Has the same type as `input`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is None or not _ctx._eager_context.is_eager:\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"ExpandDims\", input=input, dim=axis, name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"T\", _op.get_attr(\"T\"), \"Tdim\", _op.get_attr(\"Tdim\"))\r\n _execute.record_gradient(\r\n \"ExpandDims\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n else:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name, \"ExpandDims\",\r\n name, _ctx._post_execution_callbacks, input, axis)\r\n return _result\r\n except _core._FallbackException:\r\n return expand_dims_eager_fallback(\r\n input, axis, name=name, ctx=_ctx)\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n\r\n\r\ndef expand_dims_eager_fallback(input, axis, name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function expand_dims\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)\r\n _attr_Tdim, (axis,) = _execute.args_to_matching_eager([axis], _ctx, _dtypes.int32)\r\n _inputs_flat = [input, axis]\r\n _attrs = (\"T\", _attr_T, \"Tdim\", _attr_Tdim)\r\n _result = _execute.execute(b\"ExpandDims\", 1, inputs=_inputs_flat,\r\n attrs=_attrs, ctx=_ctx, name=name)\r\n _execute.record_gradient(\r\n \"ExpandDims\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n\r\n@tf_export('image.extract_image_patches', 'extract_image_patches')\r\n@deprecated_endpoints('extract_image_patches')\r\ndef extract_image_patches(images, ksizes, strides, rates, padding, name=None):\r\n r\"\"\"Extract `patches` from `images` and put them in the \"depth\" output dimension.\r\n\r\n Args:\r\n images: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`.\r\n 4-D Tensor with shape `[batch, in_rows, in_cols, depth]`.\r\n ksizes: A list of `ints` that has length `>= 4`.\r\n The size of the sliding window for each dimension of `images`.\r\n strides: A list of `ints` that has length `>= 4`.\r\n 1-D of length 4. How far the centers of two consecutive patches are in\r\r\n the images. Must be: `[1, stride_rows, stride_cols, 1]`.\r\n rates: A list of `ints` that has length `>= 4`.\r\n 1-D of length 4. Must be: `[1, rate_rows, rate_cols, 1]`. This is the\r\r\n input stride, specifying how far two consecutive patch samples are in the\r\r\n input. Equivalent to extracting patches with\r\r\n `patch_sizes_eff = patch_sizes + (patch_sizes - 1) * (rates - 1)`, followed by\r\r\n subsampling them spatially by a factor of `rates`. This is equivalent to\r\r\n `rate` in dilated (a.k.a. Atrous) convolutions.\r\n padding: A `string` from: `\"SAME\", \"VALID\"`.\r\n The type of padding algorithm to use.\r\r\n \r\r\n We specify the size-related attributes as:\r\r\n \r\r\n ```python\r\r\n ksizes = [1, ksize_rows, ksize_cols, 1]\r\r\n strides = [1, strides_rows, strides_cols, 1]\r\r\n rates = [1, rates_rows, rates_cols, 1]\r\r\n ```\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A `Tensor`. Has the same type as `images`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is None or not _ctx._eager_context.is_eager:\r\n if not isinstance(ksizes, (list, tuple)):\r\n raise TypeError(\r\n \"Expected list for 'ksizes' argument to \"\r\n \"'extract_image_patches' Op, not %r.\" % ksizes)\r\n ksizes = [_execute.make_int(_i, \"ksizes\") for _i in ksizes]\r\n if not isinstance(strides, (list, tuple)):\r\n raise TypeError(\r\n \"Expected list for 'strides' argument to \"\r\n \"'extract_image_patches' Op, not %r.\" % strides)\r\n strides = [_execute.make_int(_i, \"strides\") for _i in strides]\r\n if not isinstance(rates, (list, tuple)):\r\n raise TypeError(\r\n \"Expected list for 'rates' argument to \"\r\n \"'extract_image_patches' Op, not %r.\" % rates)\r\n rates = [_execute.make_int(_i, \"rates\") for _i in rates]\r\n padding = _execute.make_str(padding, \"padding\")\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"ExtractImagePatches\", images=images, ksizes=ksizes, strides=strides,\r\n rates=rates, padding=padding, name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"ksizes\", _op.get_attr(\"ksizes\"), \"strides\",\r\n _op.get_attr(\"strides\"), \"rates\", _op.get_attr(\"rates\"), \"T\",\r\n _op.get_attr(\"T\"), \"padding\", _op.get_attr(\"padding\"))\r\n _execute.record_gradient(\r\n \"ExtractImagePatches\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n else:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name,\r\n \"ExtractImagePatches\", name, _ctx._post_execution_callbacks, images,\r\n \"ksizes\", ksizes, \"strides\", strides, \"rates\", rates, \"padding\",\r\n padding)\r\n return _result\r\n except _core._FallbackException:\r\n return extract_image_patches_eager_fallback(\r\n images, ksizes=ksizes, strides=strides, rates=rates,\r\n padding=padding, name=name, ctx=_ctx)\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n\r\n\r\ndef extract_image_patches_eager_fallback(images, ksizes, strides, rates, padding, name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function extract_image_patches\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n if not isinstance(ksizes, (list, tuple)):\r\n raise TypeError(\r\n \"Expected list for 'ksizes' argument to \"\r\n \"'extract_image_patches' Op, not %r.\" % ksizes)\r\n ksizes = [_execute.make_int(_i, \"ksizes\") for _i in ksizes]\r\n if not isinstance(strides, (list, tuple)):\r\n raise TypeError(\r\n \"Expected list for 'strides' argument to \"\r\n \"'extract_image_patches' Op, not %r.\" % strides)\r\n strides = [_execute.make_int(_i, \"strides\") for _i in strides]\r\n if not isinstance(rates, (list, tuple)):\r\n raise TypeError(\r\n \"Expected list for 'rates' argument to \"\r\n \"'extract_image_patches' Op, not %r.\" % rates)\r\n rates = [_execute.make_int(_i, \"rates\") for _i in rates]\r\n padding = _execute.make_str(padding, \"padding\")\r\n _attr_T, (images,) = _execute.args_to_matching_eager([images], _ctx)\r\n _inputs_flat = [images]\r\n _attrs = (\"ksizes\", ksizes, \"strides\", strides, \"rates\", rates, \"T\",\r\n _attr_T, \"padding\", padding)\r\n _result = _execute.execute(b\"ExtractImagePatches\", 1, inputs=_inputs_flat,\r\n attrs=_attrs, ctx=_ctx, name=name)\r\n _execute.record_gradient(\r\n \"ExtractImagePatches\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n\r\n@tf_export('extract_volume_patches')\r\ndef extract_volume_patches(input, ksizes, strides, padding, name=None):\r\n r\"\"\"Extract `patches` from `input` and put them in the \"depth\" output\r\r\ndimension. 3D extension of `extract_image_patches`.\r\r\n\r\n Args:\r\n input: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`.\r\n 5-D Tensor with shape `[batch, in_planes, in_rows, in_cols, depth]`.\r\n ksizes: A list of `ints` that has length `>= 5`.\r\n The size of the sliding window for each dimension of `input`.\r\n strides: A list of `ints` that has length `>= 5`.\r\n 1-D of length 5. How far the centers of two consecutive patches are in\r\r\n `input`. Must be: `[1, stride_planes, stride_rows, stride_cols, 1]`.\r\n padding: A `string` from: `\"SAME\", \"VALID\"`.\r\n The type of padding algorithm to use.\r\r\n \r\r\n We specify the size-related attributes as:\r\r\n \r\r\n ```python\r\r\n ksizes = [1, ksize_planes, ksize_rows, ksize_cols, 1]\r\r\n strides = [1, stride_planes, strides_rows, strides_cols, 1]\r\r\n ```\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A `Tensor`. Has the same type as `input`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is None or not _ctx._eager_context.is_eager:\r\n if not isinstance(ksizes, (list, tuple)):\r\n raise TypeError(\r\n \"Expected list for 'ksizes' argument to \"\r\n \"'extract_volume_patches' Op, not %r.\" % ksizes)\r\n ksizes = [_execute.make_int(_i, \"ksizes\") for _i in ksizes]\r\n if not isinstance(strides, (list, tuple)):\r\n raise TypeError(\r\n \"Expected list for 'strides' argument to \"\r\n \"'extract_volume_patches' Op, not %r.\" % strides)\r\n strides = [_execute.make_int(_i, \"strides\") for _i in strides]\r\n padding = _execute.make_str(padding, \"padding\")\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"ExtractVolumePatches\", input=input, ksizes=ksizes, strides=strides,\r\n padding=padding, name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"ksizes\", _op.get_attr(\"ksizes\"), \"strides\",\r\n _op.get_attr(\"strides\"), \"T\", _op.get_attr(\"T\"), \"padding\",\r\n _op.get_attr(\"padding\"))\r\n _execute.record_gradient(\r\n \"ExtractVolumePatches\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n else:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name,\r\n \"ExtractVolumePatches\", name, _ctx._post_execution_callbacks, input,\r\n \"ksizes\", ksizes, \"strides\", strides, \"padding\", padding)\r\n return _result\r\n except _core._FallbackException:\r\n return extract_volume_patches_eager_fallback(\r\n input, ksizes=ksizes, strides=strides, padding=padding, name=name,\r\n ctx=_ctx)\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n\r\n\r\ndef extract_volume_patches_eager_fallback(input, ksizes, strides, padding, name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function extract_volume_patches\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n if not isinstance(ksizes, (list, tuple)):\r\n raise TypeError(\r\n \"Expected list for 'ksizes' argument to \"\r\n \"'extract_volume_patches' Op, not %r.\" % ksizes)\r\n ksizes = [_execute.make_int(_i, \"ksizes\") for _i in ksizes]\r\n if not isinstance(strides, (list, tuple)):\r\n raise TypeError(\r\n \"Expected list for 'strides' argument to \"\r\n \"'extract_volume_patches' Op, not %r.\" % strides)\r\n strides = [_execute.make_int(_i, \"strides\") for _i in strides]\r\n padding = _execute.make_str(padding, \"padding\")\r\n _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)\r\n _inputs_flat = [input]\r\n _attrs = (\"ksizes\", ksizes, \"strides\", strides, \"T\", _attr_T, \"padding\",\r\n padding)\r\n _result = _execute.execute(b\"ExtractVolumePatches\", 1, inputs=_inputs_flat,\r\n attrs=_attrs, ctx=_ctx, name=name)\r\n _execute.record_gradient(\r\n \"ExtractVolumePatches\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n\r\n@tf_export('quantization.fake_quant_with_min_max_args', 'fake_quant_with_min_max_args')\r\n@deprecated_endpoints('fake_quant_with_min_max_args')\r\ndef fake_quant_with_min_max_args(inputs, min=-6, max=6, num_bits=8, narrow_range=False, name=None):\r\n r\"\"\"Fake-quantize the 'inputs' tensor, type float to 'outputs' tensor of same type.\r\n\r\n Attributes `[min; max]` define the clamping range for the `inputs` data.\r\r\n `inputs` values are quantized into the quantization range (`[0; 2^num_bits - 1]`\r\r\n when `narrow_range` is false and `[1; 2^num_bits - 1]` when it is true) and\r\r\n then de-quantized and output as floats in `[min; max]` interval.\r\r\n `num_bits` is the bitwidth of the quantization; between 2 and 16, inclusive.\r\r\n \r\r\n Quantization is called fake since the output is still in floating point.\r\n\r\n Args:\r\n inputs: A `Tensor` of type `float32`.\r\n min: An optional `float`. Defaults to `-6`.\r\n max: An optional `float`. Defaults to `6`.\r\n num_bits: An optional `int`. Defaults to `8`.\r\n narrow_range: An optional `bool`. Defaults to `False`.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A `Tensor` of type `float32`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is None or not _ctx._eager_context.is_eager:\r\n if min is None:\r\n min = -6\r\n min = _execute.make_float(min, \"min\")\r\n if max is None:\r\n max = 6\r\n max = _execute.make_float(max, \"max\")\r\n if num_bits is None:\r\n num_bits = 8\r\n num_bits = _execute.make_int(num_bits, \"num_bits\")\r\n if narrow_range is None:\r\n narrow_range = False\r\n narrow_range = _execute.make_bool(narrow_range, \"narrow_range\")\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"FakeQuantWithMinMaxArgs\", inputs=inputs, min=min, max=max,\r\n num_bits=num_bits, narrow_range=narrow_range, name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"min\", _op.get_attr(\"min\"), \"max\", _op.get_attr(\"max\"),\r\n \"num_bits\", _op.get_attr(\"num_bits\"), \"narrow_range\",\r\n _op.get_attr(\"narrow_range\"))\r\n _execute.record_gradient(\r\n \"FakeQuantWithMinMaxArgs\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n else:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name,\r\n \"FakeQuantWithMinMaxArgs\", name, _ctx._post_execution_callbacks,\r\n inputs, \"min\", min, \"max\", max, \"num_bits\", num_bits, \"narrow_range\",\r\n narrow_range)\r\n return _result\r\n except _core._FallbackException:\r\n return fake_quant_with_min_max_args_eager_fallback(\r\n inputs, min=min, max=max, num_bits=num_bits,\r\n narrow_range=narrow_range, name=name, ctx=_ctx)\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n\r\n\r\ndef fake_quant_with_min_max_args_eager_fallback(inputs, min=-6, max=6, num_bits=8, narrow_range=False, name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function fake_quant_with_min_max_args\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n if min is None:\r\n min = -6\r\n min = _execute.make_float(min, \"min\")\r\n if max is None:\r\n max = 6\r\n max = _execute.make_float(max, \"max\")\r\n if num_bits is None:\r\n num_bits = 8\r\n num_bits = _execute.make_int(num_bits, \"num_bits\")\r\n if narrow_range is None:\r\n narrow_range = False\r\n narrow_range = _execute.make_bool(narrow_range, \"narrow_range\")\r\n inputs = _ops.convert_to_tensor(inputs, _dtypes.float32)\r\n _inputs_flat = [inputs]\r\n _attrs = (\"min\", min, \"max\", max, \"num_bits\", num_bits, \"narrow_range\",\r\n narrow_range)\r\n _result = _execute.execute(b\"FakeQuantWithMinMaxArgs\", 1,\r\n inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,\r\n name=name)\r\n _execute.record_gradient(\r\n \"FakeQuantWithMinMaxArgs\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n\r\n@tf_export('quantization.fake_quant_with_min_max_args_gradient', 'fake_quant_with_min_max_args_gradient')\r\n@deprecated_endpoints('fake_quant_with_min_max_args_gradient')\r\ndef fake_quant_with_min_max_args_gradient(gradients, inputs, min=-6, max=6, num_bits=8, narrow_range=False, name=None):\r\n r\"\"\"Compute gradients for a FakeQuantWithMinMaxArgs operation.\r\n\r\n Args:\r\n gradients: A `Tensor` of type `float32`.\r\n Backpropagated gradients above the FakeQuantWithMinMaxArgs operation.\r\n inputs: A `Tensor` of type `float32`.\r\n Values passed as inputs to the FakeQuantWithMinMaxArgs operation.\r\n min: An optional `float`. Defaults to `-6`.\r\n max: An optional `float`. Defaults to `6`.\r\n num_bits: An optional `int`. Defaults to `8`.\r\n narrow_range: An optional `bool`. Defaults to `False`.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A `Tensor` of type `float32`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is None or not _ctx._eager_context.is_eager:\r\n if min is None:\r\n min = -6\r\n min = _execute.make_float(min, \"min\")\r\n if max is None:\r\n max = 6\r\n max = _execute.make_float(max, \"max\")\r\n if num_bits is None:\r\n num_bits = 8\r\n num_bits = _execute.make_int(num_bits, \"num_bits\")\r\n if narrow_range is None:\r\n narrow_range = False\r\n narrow_range = _execute.make_bool(narrow_range, \"narrow_range\")\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"FakeQuantWithMinMaxArgsGradient\", gradients=gradients, inputs=inputs,\r\n min=min, max=max, num_bits=num_bits, narrow_range=narrow_range,\r\n name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"min\", _op.get_attr(\"min\"), \"max\", _op.get_attr(\"max\"),\r\n \"num_bits\", _op.get_attr(\"num_bits\"), \"narrow_range\",\r\n _op.get_attr(\"narrow_range\"))\r\n _execute.record_gradient(\r\n \"FakeQuantWithMinMaxArgsGradient\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n else:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name,\r\n \"FakeQuantWithMinMaxArgsGradient\", name,\r\n _ctx._post_execution_callbacks, gradients, inputs, \"min\", min, \"max\",\r\n max, \"num_bits\", num_bits, \"narrow_range\", narrow_range)\r\n return _result\r\n except _core._FallbackException:\r\n return fake_quant_with_min_max_args_gradient_eager_fallback(\r\n gradients, inputs, min=min, max=max, num_bits=num_bits,\r\n narrow_range=narrow_range, name=name, ctx=_ctx)\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n\r\n\r\ndef fake_quant_with_min_max_args_gradient_eager_fallback(gradients, inputs, min=-6, max=6, num_bits=8, narrow_range=False, name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function fake_quant_with_min_max_args_gradient\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n if min is None:\r\n min = -6\r\n min = _execute.make_float(min, \"min\")\r\n if max is None:\r\n max = 6\r\n max = _execute.make_float(max, \"max\")\r\n if num_bits is None:\r\n num_bits = 8\r\n num_bits = _execute.make_int(num_bits, \"num_bits\")\r\n if narrow_range is None:\r\n narrow_range = False\r\n narrow_range = _execute.make_bool(narrow_range, \"narrow_range\")\r\n gradients = _ops.convert_to_tensor(gradients, _dtypes.float32)\r\n inputs = _ops.convert_to_tensor(inputs, _dtypes.float32)\r\n _inputs_flat = [gradients, inputs]\r\n _attrs = (\"min\", min, \"max\", max, \"num_bits\", num_bits, \"narrow_range\",\r\n narrow_range)\r\n _result = _execute.execute(b\"FakeQuantWithMinMaxArgsGradient\", 1,\r\n inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,\r\n name=name)\r\n _execute.record_gradient(\r\n \"FakeQuantWithMinMaxArgsGradient\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n\r\n@tf_export('quantization.fake_quant_with_min_max_vars', 'fake_quant_with_min_max_vars')\r\n@deprecated_endpoints('fake_quant_with_min_max_vars')\r\ndef fake_quant_with_min_max_vars(inputs, min, max, num_bits=8, narrow_range=False, name=None):\r\n r\"\"\"Fake-quantize the 'inputs' tensor of type float via global float scalars `min`\r\n\r\n and `max` to 'outputs' tensor of same shape as `inputs`.\r\r\n \r\r\n `[min; max]` define the clamping range for the `inputs` data.\r\r\n `inputs` values are quantized into the quantization range (`[0; 2^num_bits - 1]`\r\r\n when `narrow_range` is false and `[1; 2^num_bits - 1]` when it is true) and\r\r\n then de-quantized and output as floats in `[min; max]` interval.\r\r\n `num_bits` is the bitwidth of the quantization; between 2 and 16, inclusive.\r\r\n \r\r\n This operation has a gradient and thus allows for training `min` and `max`\r\r\n values.\r\n\r\n Args:\r\n inputs: A `Tensor` of type `float32`.\r\n min: A `Tensor` of type `float32`.\r\n max: A `Tensor` of type `float32`.\r\n num_bits: An optional `int`. Defaults to `8`.\r\n narrow_range: An optional `bool`. Defaults to `False`.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A `Tensor` of type `float32`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is None or not _ctx._eager_context.is_eager:\r\n if num_bits is None:\r\n num_bits = 8\r\n num_bits = _execute.make_int(num_bits, \"num_bits\")\r\n if narrow_range is None:\r\n narrow_range = False\r\n narrow_range = _execute.make_bool(narrow_range, \"narrow_range\")\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"FakeQuantWithMinMaxVars\", inputs=inputs, min=min, max=max,\r\n num_bits=num_bits, narrow_range=narrow_range, name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"num_bits\", _op.get_attr(\"num_bits\"), \"narrow_range\",\r\n _op.get_attr(\"narrow_range\"))\r\n _execute.record_gradient(\r\n \"FakeQuantWithMinMaxVars\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n else:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name,\r\n \"FakeQuantWithMinMaxVars\", name, _ctx._post_execution_callbacks,\r\n inputs, min, max, \"num_bits\", num_bits, \"narrow_range\", narrow_range)\r\n return _result\r\n except _core._FallbackException:\r\n return fake_quant_with_min_max_vars_eager_fallback(\r\n inputs, min, max, num_bits=num_bits, narrow_range=narrow_range,\r\n name=name, ctx=_ctx)\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n\r\n\r\ndef fake_quant_with_min_max_vars_eager_fallback(inputs, min, max, num_bits=8, narrow_range=False, name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function fake_quant_with_min_max_vars\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n if num_bits is None:\r\n num_bits = 8\r\n num_bits = _execute.make_int(num_bits, \"num_bits\")\r\n if narrow_range is None:\r\n narrow_range = False\r\n narrow_range = _execute.make_bool(narrow_range, \"narrow_range\")\r\n inputs = _ops.convert_to_tensor(inputs, _dtypes.float32)\r\n min = _ops.convert_to_tensor(min, _dtypes.float32)\r\n max = _ops.convert_to_tensor(max, _dtypes.float32)\r\n _inputs_flat = [inputs, min, max]\r\n _attrs = (\"num_bits\", num_bits, \"narrow_range\", narrow_range)\r\n _result = _execute.execute(b\"FakeQuantWithMinMaxVars\", 1,\r\n inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,\r\n name=name)\r\n _execute.record_gradient(\r\n \"FakeQuantWithMinMaxVars\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n\r\n_fake_quant_with_min_max_vars_gradient_outputs = [\"backprops_wrt_input\",\r\n \"backprop_wrt_min\",\r\n \"backprop_wrt_max\"]\r\n_FakeQuantWithMinMaxVarsGradientOutput = _collections.namedtuple(\r\n \"FakeQuantWithMinMaxVarsGradient\",\r\n _fake_quant_with_min_max_vars_gradient_outputs)\r\n\r\n\r\n@tf_export('quantization.fake_quant_with_min_max_vars_gradient', 'fake_quant_with_min_max_vars_gradient')\r\n@deprecated_endpoints('fake_quant_with_min_max_vars_gradient')\r\ndef fake_quant_with_min_max_vars_gradient(gradients, inputs, min, max, num_bits=8, narrow_range=False, name=None):\r\n r\"\"\"Compute gradients for a FakeQuantWithMinMaxVars operation.\r\n\r\n Args:\r\n gradients: A `Tensor` of type `float32`.\r\n Backpropagated gradients above the FakeQuantWithMinMaxVars operation.\r\n inputs: A `Tensor` of type `float32`.\r\n Values passed as inputs to the FakeQuantWithMinMaxVars operation.\r\r\n min, max: Quantization interval, scalar floats.\r\n min: A `Tensor` of type `float32`.\r\n max: A `Tensor` of type `float32`.\r\n num_bits: An optional `int`. Defaults to `8`.\r\n The bitwidth of the quantization; between 2 and 8, inclusive.\r\n narrow_range: An optional `bool`. Defaults to `False`.\r\n Whether to quantize into 2^num_bits - 1 distinct values.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A tuple of `Tensor` objects (backprops_wrt_input, backprop_wrt_min, backprop_wrt_max).\r\n\r\n backprops_wrt_input: A `Tensor` of type `float32`.\r\n backprop_wrt_min: A `Tensor` of type `float32`.\r\n backprop_wrt_max: A `Tensor` of type `float32`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is None or not _ctx._eager_context.is_eager:\r\n if num_bits is None:\r\n num_bits = 8\r\n num_bits = _execute.make_int(num_bits, \"num_bits\")\r\n if narrow_range is None:\r\n narrow_range = False\r\n narrow_range = _execute.make_bool(narrow_range, \"narrow_range\")\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"FakeQuantWithMinMaxVarsGradient\", gradients=gradients, inputs=inputs,\r\n min=min, max=max, num_bits=num_bits, narrow_range=narrow_range,\r\n name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"num_bits\", _op.get_attr(\"num_bits\"), \"narrow_range\",\r\n _op.get_attr(\"narrow_range\"))\r\n _execute.record_gradient(\r\n \"FakeQuantWithMinMaxVarsGradient\", _inputs_flat, _attrs, _result, name)\r\n _result = _FakeQuantWithMinMaxVarsGradientOutput._make(_result)\r\n return _result\r\n\r\n else:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name,\r\n \"FakeQuantWithMinMaxVarsGradient\", name,\r\n _ctx._post_execution_callbacks, gradients, inputs, min, max,\r\n \"num_bits\", num_bits, \"narrow_range\", narrow_range)\r\n _result = _FakeQuantWithMinMaxVarsGradientOutput._make(_result)\r\n return _result\r\n except _core._FallbackException:\r\n return fake_quant_with_min_max_vars_gradient_eager_fallback(\r\n gradients, inputs, min, max, num_bits=num_bits,\r\n narrow_range=narrow_range, name=name, ctx=_ctx)\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n\r\n\r\ndef fake_quant_with_min_max_vars_gradient_eager_fallback(gradients, inputs, min, max, num_bits=8, narrow_range=False, name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function fake_quant_with_min_max_vars_gradient\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n if num_bits is None:\r\n num_bits = 8\r\n num_bits = _execute.make_int(num_bits, \"num_bits\")\r\n if narrow_range is None:\r\n narrow_range = False\r\n narrow_range = _execute.make_bool(narrow_range, \"narrow_range\")\r\n gradients = _ops.convert_to_tensor(gradients, _dtypes.float32)\r\n inputs = _ops.convert_to_tensor(inputs, _dtypes.float32)\r\n min = _ops.convert_to_tensor(min, _dtypes.float32)\r\n max = _ops.convert_to_tensor(max, _dtypes.float32)\r\n _inputs_flat = [gradients, inputs, min, max]\r\n _attrs = (\"num_bits\", num_bits, \"narrow_range\", narrow_range)\r\n _result = _execute.execute(b\"FakeQuantWithMinMaxVarsGradient\", 3,\r\n inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,\r\n name=name)\r\n _execute.record_gradient(\r\n \"FakeQuantWithMinMaxVarsGradient\", _inputs_flat, _attrs, _result, name)\r\n _result = _FakeQuantWithMinMaxVarsGradientOutput._make(_result)\r\n return _result\r\n\r\n\r\n@tf_export('quantization.fake_quant_with_min_max_vars_per_channel', 'fake_quant_with_min_max_vars_per_channel')\r\n@deprecated_endpoints('fake_quant_with_min_max_vars_per_channel')\r\ndef fake_quant_with_min_max_vars_per_channel(inputs, min, max, num_bits=8, narrow_range=False, name=None):\r\n r\"\"\"Fake-quantize the 'inputs' tensor of type float and one of the shapes: `[d]`,\r\n\r\n `[b, d]` `[b, h, w, d]` via per-channel floats `min` and `max` of shape `[d]`\r\r\n to 'outputs' tensor of same shape as `inputs`.\r\r\n \r\r\n `[min; max]` define the clamping range for the `inputs` data.\r\r\n `inputs` values are quantized into the quantization range (`[0; 2^num_bits - 1]`\r\r\n when `narrow_range` is false and `[1; 2^num_bits - 1]` when it is true) and\r\r\n then de-quantized and output as floats in `[min; max]` interval.\r\r\n `num_bits` is the bitwidth of the quantization; between 2 and 16, inclusive.\r\r\n \r\r\n This operation has a gradient and thus allows for training `min` and `max`\r\r\n values.\r\n\r\n Args:\r\n inputs: A `Tensor` of type `float32`.\r\n min: A `Tensor` of type `float32`.\r\n max: A `Tensor` of type `float32`.\r\n num_bits: An optional `int`. Defaults to `8`.\r\n narrow_range: An optional `bool`. Defaults to `False`.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A `Tensor` of type `float32`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is None or not _ctx._eager_context.is_eager:\r\n if num_bits is None:\r\n num_bits = 8\r\n num_bits = _execute.make_int(num_bits, \"num_bits\")\r\n if narrow_range is None:\r\n narrow_range = False\r\n narrow_range = _execute.make_bool(narrow_range, \"narrow_range\")\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"FakeQuantWithMinMaxVarsPerChannel\", inputs=inputs, min=min, max=max,\r\n num_bits=num_bits, narrow_range=narrow_range, name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"num_bits\", _op.get_attr(\"num_bits\"), \"narrow_range\",\r\n _op.get_attr(\"narrow_range\"))\r\n _execute.record_gradient(\r\n \"FakeQuantWithMinMaxVarsPerChannel\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n else:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name,\r\n \"FakeQuantWithMinMaxVarsPerChannel\", name,\r\n _ctx._post_execution_callbacks, inputs, min, max, \"num_bits\",\r\n num_bits, \"narrow_range\", narrow_range)\r\n return _result\r\n except _core._FallbackException:\r\n return fake_quant_with_min_max_vars_per_channel_eager_fallback(\r\n inputs, min, max, num_bits=num_bits, narrow_range=narrow_range,\r\n name=name, ctx=_ctx)\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n\r\n\r\ndef fake_quant_with_min_max_vars_per_channel_eager_fallback(inputs, min, max, num_bits=8, narrow_range=False, name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function fake_quant_with_min_max_vars_per_channel\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n if num_bits is None:\r\n num_bits = 8\r\n num_bits = _execute.make_int(num_bits, \"num_bits\")\r\n if narrow_range is None:\r\n narrow_range = False\r\n narrow_range = _execute.make_bool(narrow_range, \"narrow_range\")\r\n inputs = _ops.convert_to_tensor(inputs, _dtypes.float32)\r\n min = _ops.convert_to_tensor(min, _dtypes.float32)\r\n max = _ops.convert_to_tensor(max, _dtypes.float32)\r\n _inputs_flat = [inputs, min, max]\r\n _attrs = (\"num_bits\", num_bits, \"narrow_range\", narrow_range)\r\n _result = _execute.execute(b\"FakeQuantWithMinMaxVarsPerChannel\", 1,\r\n inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,\r\n name=name)\r\n _execute.record_gradient(\r\n \"FakeQuantWithMinMaxVarsPerChannel\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n\r\n_fake_quant_with_min_max_vars_per_channel_gradient_outputs = [\"backprops_wrt_input\",\r\n \"backprop_wrt_min\",\r\n \"backprop_wrt_max\"]\r\n_FakeQuantWithMinMaxVarsPerChannelGradientOutput = _collections.namedtuple(\r\n \"FakeQuantWithMinMaxVarsPerChannelGradient\",\r\n _fake_quant_with_min_max_vars_per_channel_gradient_outputs)\r\n\r\n\r\n@tf_export('quantization.fake_quant_with_min_max_vars_per_channel_gradient', 'fake_quant_with_min_max_vars_per_channel_gradient')\r\n@deprecated_endpoints('fake_quant_with_min_max_vars_per_channel_gradient')\r\ndef fake_quant_with_min_max_vars_per_channel_gradient(gradients, inputs, min, max, num_bits=8, narrow_range=False, name=None):\r\n r\"\"\"Compute gradients for a FakeQuantWithMinMaxVarsPerChannel operation.\r\n\r\n Args:\r\n gradients: A `Tensor` of type `float32`.\r\n Backpropagated gradients above the FakeQuantWithMinMaxVars operation,\r\r\n shape one of: `[d]`, `[b, d]`, `[b, h, w, d]`.\r\n inputs: A `Tensor` of type `float32`.\r\n Values passed as inputs to the FakeQuantWithMinMaxVars operation, shape\r\r\n same as `gradients`.\r\r\n min, max: Quantization interval, floats of shape `[d]`.\r\n min: A `Tensor` of type `float32`.\r\n max: A `Tensor` of type `float32`.\r\n num_bits: An optional `int`. Defaults to `8`.\r\n The bitwidth of the quantization; between 2 and 16, inclusive.\r\n narrow_range: An optional `bool`. Defaults to `False`.\r\n Whether to quantize into 2^num_bits - 1 distinct values.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A tuple of `Tensor` objects (backprops_wrt_input, backprop_wrt_min, backprop_wrt_max).\r\n\r\n backprops_wrt_input: A `Tensor` of type `float32`.\r\n backprop_wrt_min: A `Tensor` of type `float32`.\r\n backprop_wrt_max: A `Tensor` of type `float32`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is None or not _ctx._eager_context.is_eager:\r\n if num_bits is None:\r\n num_bits = 8\r\n num_bits = _execute.make_int(num_bits, \"num_bits\")\r\n if narrow_range is None:\r\n narrow_range = False\r\n narrow_range = _execute.make_bool(narrow_range, \"narrow_range\")\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"FakeQuantWithMinMaxVarsPerChannelGradient\", gradients=gradients,\r\n inputs=inputs, min=min, max=max, num_bits=num_bits,\r\n narrow_range=narrow_range, name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"num_bits\", _op.get_attr(\"num_bits\"), \"narrow_range\",\r\n _op.get_attr(\"narrow_range\"))\r\n _execute.record_gradient(\r\n \"FakeQuantWithMinMaxVarsPerChannelGradient\", _inputs_flat, _attrs, _result, name)\r\n _result = _FakeQuantWithMinMaxVarsPerChannelGradientOutput._make(_result)\r\n return _result\r\n\r\n else:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name,\r\n \"FakeQuantWithMinMaxVarsPerChannelGradient\", name,\r\n _ctx._post_execution_callbacks, gradients, inputs, min, max,\r\n \"num_bits\", num_bits, \"narrow_range\", narrow_range)\r\n _result = _FakeQuantWithMinMaxVarsPerChannelGradientOutput._make(_result)\r\n return _result\r\n except _core._FallbackException:\r\n return fake_quant_with_min_max_vars_per_channel_gradient_eager_fallback(\r\n gradients, inputs, min, max, num_bits=num_bits,\r\n narrow_range=narrow_range, name=name, ctx=_ctx)\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n\r\n\r\ndef fake_quant_with_min_max_vars_per_channel_gradient_eager_fallback(gradients, inputs, min, max, num_bits=8, narrow_range=False, name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function fake_quant_with_min_max_vars_per_channel_gradient\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n if num_bits is None:\r\n num_bits = 8\r\n num_bits = _execute.make_int(num_bits, \"num_bits\")\r\n if narrow_range is None:\r\n narrow_range = False\r\n narrow_range = _execute.make_bool(narrow_range, \"narrow_range\")\r\n gradients = _ops.convert_to_tensor(gradients, _dtypes.float32)\r\n inputs = _ops.convert_to_tensor(inputs, _dtypes.float32)\r\n min = _ops.convert_to_tensor(min, _dtypes.float32)\r\n max = _ops.convert_to_tensor(max, _dtypes.float32)\r\n _inputs_flat = [gradients, inputs, min, max]\r\n _attrs = (\"num_bits\", num_bits, \"narrow_range\", narrow_range)\r\n _result = _execute.execute(b\"FakeQuantWithMinMaxVarsPerChannelGradient\", 3,\r\n inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,\r\n name=name)\r\n _execute.record_gradient(\r\n \"FakeQuantWithMinMaxVarsPerChannelGradient\", _inputs_flat, _attrs, _result, name)\r\n _result = _FakeQuantWithMinMaxVarsPerChannelGradientOutput._make(_result)\r\n return _result\r\n\r\n\r\n@tf_export('fill')\r\ndef fill(dims, value, name=None):\r\n r\"\"\"Creates a tensor filled with a scalar value.\r\n\r\n This operation creates a tensor of shape `dims` and fills it with `value`.\r\r\n \r\r\n For example:\r\r\n \r\r\n ```\r\r\n # Output tensor has shape [2, 3].\r\r\n fill([2, 3], 9) ==> [[9, 9, 9]\r\r\n [9, 9, 9]]\r\r\n ```\r\r\n \r\r\n `tf.fill` differs from `tf.constant` in a few ways:\r\r\n \r\r\n * `tf.fill` only supports scalar contents, whereas `tf.constant` supports\r\r\n Tensor values.\r\r\n * `tf.fill` creates an Op in the computation graph that constructs the actual\r\r\n Tensor value at runtime. This is in contrast to `tf.constant` which embeds\r\r\n the entire Tensor into the graph with a `Const` node.\r\r\n * Because `tf.fill` evaluates at graph runtime, it supports dynamic shapes\r\r\n based on other runtime Tensors, unlike `tf.constant`.\r\n\r\n Args:\r\n dims: A `Tensor`. Must be one of the following types: `int32`, `int64`.\r\n 1-D. Represents the shape of the output tensor.\r\n value: A `Tensor`. 0-D (scalar). Value to fill the returned tensor.\r\r\n \r\r\n @compatibility(numpy)\r\r\n Equivalent to np.full\r\r\n @end_compatibility\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A `Tensor`. Has the same type as `value`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is None or not _ctx._eager_context.is_eager:\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"Fill\", dims=dims, value=value, name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"T\", _op.get_attr(\"T\"), \"index_type\",\r\n _op.get_attr(\"index_type\"))\r\n _execute.record_gradient(\r\n \"Fill\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n else:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name, \"Fill\", name,\r\n _ctx._post_execution_callbacks, dims, value)\r\n return _result\r\n except _core._FallbackException:\r\n return fill_eager_fallback(\r\n dims, value, name=name, ctx=_ctx)\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n\r\n\r\ndef fill_eager_fallback(dims, value, name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function fill\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n _attr_T, (value,) = _execute.args_to_matching_eager([value], _ctx)\r\n _attr_index_type, (dims,) = _execute.args_to_matching_eager([dims], _ctx, _dtypes.int32)\r\n _inputs_flat = [dims, value]\r\n _attrs = (\"T\", _attr_T, \"index_type\", _attr_index_type)\r\n _result = _execute.execute(b\"Fill\", 1, inputs=_inputs_flat, attrs=_attrs,\r\n ctx=_ctx, name=name)\r\n _execute.record_gradient(\r\n \"Fill\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n\r\ndef gather(params, indices, validate_indices=True, name=None):\r\n r\"\"\"Gather slices from `params` according to `indices`.\r\n\r\n `indices` must be an integer tensor of any dimension (usually 0-D or 1-D).\r\r\n Produces an output tensor with shape `indices.shape + params.shape[1:]` where:\r\r\n \r\r\n ```python\r\r\n # Scalar indices\r\r\n output[:, ..., :] = params[indices, :, ... :]\r\r\n \r\r\n # Vector indices\r\r\n output[i, :, ..., :] = params[indices[i], :, ... :]\r\r\n \r\r\n # Higher rank indices\r\r\n output[i, ..., j, :, ... :] = params[indices[i, ..., j], :, ..., :]\r\r\n ```\r\r\n \r\r\n If `indices` is a permutation and `len(indices) == params.shape[0]` then\r\r\n this operation will permute `params` accordingly.\r\r\n \r\r\n `validate_indices`: DEPRECATED. If this operation is assigned to CPU, values in\r\r\n `indices` are always validated to be within range. If assigned to GPU,\r\r\n out-of-bound indices result in safe but unspecified behavior, which may include\r\r\n raising an error.\r\r\n \r\r\n <div style=\"width:70%; margin:auto; margin-bottom:10px; margin-top:20px;\">\r\r\n <img style=\"width:100%\" src=\"https://www.tensorflow.org/images/Gather.png\" alt>\r\r\n </div>\r\n\r\n Args:\r\n params: A `Tensor`.\r\n indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.\r\n validate_indices: An optional `bool`. Defaults to `True`.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A `Tensor`. Has the same type as `params`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is None or not _ctx._eager_context.is_eager:\r\n if validate_indices is None:\r\n validate_indices = True\r\n validate_indices = _execute.make_bool(validate_indices, \"validate_indices\")\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"Gather\", params=params, indices=indices,\r\n validate_indices=validate_indices, name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"validate_indices\", _op.get_attr(\"validate_indices\"), \"Tparams\",\r\n _op.get_attr(\"Tparams\"), \"Tindices\", _op.get_attr(\"Tindices\"))\r\n _execute.record_gradient(\r\n \"Gather\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n else:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name, \"Gather\", name,\r\n _ctx._post_execution_callbacks, params, indices, \"validate_indices\",\r\n validate_indices)\r\n return _result\r\n except _core._FallbackException:\r\n return gather_eager_fallback(\r\n params, indices, validate_indices=validate_indices, name=name,\r\n ctx=_ctx)\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n\r\n\r\ndef gather_eager_fallback(params, indices, validate_indices=True, name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function gather\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n if validate_indices is None:\r\n validate_indices = True\r\n validate_indices = _execute.make_bool(validate_indices, \"validate_indices\")\r\n _attr_Tparams, (params,) = _execute.args_to_matching_eager([params], _ctx)\r\n _attr_Tindices, (indices,) = _execute.args_to_matching_eager([indices], _ctx)\r\n _inputs_flat = [params, indices]\r\n _attrs = (\"validate_indices\", validate_indices, \"Tparams\", _attr_Tparams,\r\n \"Tindices\", _attr_Tindices)\r\n _result = _execute.execute(b\"Gather\", 1, inputs=_inputs_flat, attrs=_attrs,\r\n ctx=_ctx, name=name)\r\n _execute.record_gradient(\r\n \"Gather\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n\r\n@tf_export('gather_nd', 'manip.gather_nd')\r\n@deprecated_endpoints('manip.gather_nd')\r\ndef gather_nd(params, indices, name=None):\r\n r\"\"\"Gather slices from `params` into a Tensor with shape specified by `indices`.\r\n\r\n `indices` is an K-dimensional integer tensor, best thought of as a\r\r\n (K-1)-dimensional tensor of indices into `params`, where each element defines a\r\r\n slice of `params`:\r\r\n \r\r\n output[\\\\(i_0, ..., i_{K-2}\\\\)] = params[indices[\\\\(i_0, ..., i_{K-2}\\\\)]]\r\r\n \r\r\n Whereas in `tf.gather` `indices` defines slices into the first\r\r\n dimension of `params`, in `tf.gather_nd`, `indices` defines slices into the\r\r\n first `N` dimensions of `params`, where `N = indices.shape[-1]`.\r\r\n \r\r\n The last dimension of `indices` can be at most the rank of\r\r\n `params`:\r\r\n \r\r\n indices.shape[-1] <= params.rank\r\r\n \r\r\n The last dimension of `indices` corresponds to elements\r\r\n (if `indices.shape[-1] == params.rank`) or slices\r\r\n (if `indices.shape[-1] < params.rank`) along dimension `indices.shape[-1]`\r\r\n of `params`. The output tensor has shape\r\r\n \r\r\n indices.shape[:-1] + params.shape[indices.shape[-1]:]\r\r\n \r\r\n Note that on CPU, if an out of bound index is found, an error is returned.\r\r\n On GPU, if an out of bound index is found, a 0 is stored in the\r\r\n corresponding output value.\r\r\n \r\r\n Some examples below.\r\r\n \r\r\n Simple indexing into a matrix:\r\r\n \r\r\n ```python\r\r\n indices = [[0, 0], [1, 1]]\r\r\n params = [['a', 'b'], ['c', 'd']]\r\r\n output = ['a', 'd']\r\r\n ```\r\r\n \r\r\n Slice indexing into a matrix:\r\r\n \r\r\n ```python\r\r\n indices = [[1], [0]]\r\r\n params = [['a', 'b'], ['c', 'd']]\r\r\n output = [['c', 'd'], ['a', 'b']]\r\r\n ```\r\r\n \r\r\n Indexing into a 3-tensor:\r\r\n \r\r\n ```python\r\r\n indices = [[1]]\r\r\n params = [[['a0', 'b0'], ['c0', 'd0']],\r\r\n [['a1', 'b1'], ['c1', 'd1']]]\r\r\n output = [[['a1', 'b1'], ['c1', 'd1']]]\r\r\n \r\r\n \r\r\n indices = [[0, 1], [1, 0]]\r\r\n params = [[['a0', 'b0'], ['c0', 'd0']],\r\r\n [['a1', 'b1'], ['c1', 'd1']]]\r\r\n output = [['c0', 'd0'], ['a1', 'b1']]\r\r\n \r\r\n \r\r\n indices = [[0, 0, 1], [1, 0, 1]]\r\r\n params = [[['a0', 'b0'], ['c0', 'd0']],\r\r\n [['a1', 'b1'], ['c1', 'd1']]]\r\r\n output = ['b0', 'b1']\r\r\n ```\r\r\n \r\r\n Batched indexing into a matrix:\r\r\n \r\r\n ```python\r\r\n indices = [[[0, 0]], [[0, 1]]]\r\r\n params = [['a', 'b'], ['c', 'd']]\r\r\n output = [['a'], ['b']]\r\r\n ```\r\r\n \r\r\n Batched slice indexing into a matrix:\r\r\n \r\r\n ```python\r\r\n indices = [[[1]], [[0]]]\r\r\n params = [['a', 'b'], ['c', 'd']]\r\r\n output = [[['c', 'd']], [['a', 'b']]]\r\r\n ```\r\r\n \r\r\n Batched indexing into a 3-tensor:\r\r\n \r\r\n ```python\r\r\n indices = [[[1]], [[0]]]\r\r\n params = [[['a0', 'b0'], ['c0', 'd0']],\r\r\n [['a1', 'b1'], ['c1', 'd1']]]\r\r\n output = [[[['a1', 'b1'], ['c1', 'd1']]],\r\r\n [[['a0', 'b0'], ['c0', 'd0']]]]\r\r\n \r\r\n indices = [[[0, 1], [1, 0]], [[0, 0], [1, 1]]]\r\r\n params = [[['a0', 'b0'], ['c0', 'd0']],\r\r\n [['a1', 'b1'], ['c1', 'd1']]]\r\r\n output = [[['c0', 'd0'], ['a1', 'b1']],\r\r\n [['a0', 'b0'], ['c1', 'd1']]]\r\r\n \r\r\n \r\r\n indices = [[[0, 0, 1], [1, 0, 1]], [[0, 1, 1], [1, 1, 0]]]\r\r\n params = [[['a0', 'b0'], ['c0', 'd0']],\r\r\n [['a1', 'b1'], ['c1', 'd1']]]\r\r\n output = [['b0', 'b1'], ['d0', 'c1']]\r\r\n ```\r\r\n \r\r\n See also `tf.gather` and `tf.batch_gather`.\r\n\r\n Args:\r\n params: A `Tensor`. The tensor from which to gather values.\r\n indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.\r\n Index tensor.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A `Tensor`. Has the same type as `params`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is None or not _ctx._eager_context.is_eager:\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"GatherNd\", params=params, indices=indices, name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"Tparams\", _op.get_attr(\"Tparams\"), \"Tindices\",\r\n _op.get_attr(\"Tindices\"))\r\n _execute.record_gradient(\r\n \"GatherNd\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n else:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name, \"GatherNd\",\r\n name, _ctx._post_execution_callbacks, params, indices)\r\n return _result\r\n except _core._FallbackException:\r\n return gather_nd_eager_fallback(\r\n params, indices, name=name, ctx=_ctx)\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n\r\n\r\ndef gather_nd_eager_fallback(params, indices, name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function gather_nd\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n _attr_Tparams, (params,) = _execute.args_to_matching_eager([params], _ctx)\r\n _attr_Tindices, (indices,) = _execute.args_to_matching_eager([indices], _ctx)\r\n _inputs_flat = [params, indices]\r\n _attrs = (\"Tparams\", _attr_Tparams, \"Tindices\", _attr_Tindices)\r\n _result = _execute.execute(b\"GatherNd\", 1, inputs=_inputs_flat,\r\n attrs=_attrs, ctx=_ctx, name=name)\r\n _execute.record_gradient(\r\n \"GatherNd\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n\r\ndef gather_v2(params, indices, axis, name=None):\r\n r\"\"\"Gather slices from `params` axis `axis` according to `indices`.\r\n\r\n `indices` must be an integer tensor of any dimension (usually 0-D or 1-D).\r\r\n Produces an output tensor with shape `params.shape[:axis] + indices.shape +\r\r\n params.shape[axis + 1:]` where:\r\r\n \r\r\n ```python\r\r\n # Scalar indices (output is rank(params) - 1).\r\r\n output[a_0, ..., a_n, b_0, ..., b_n] =\r\r\n params[a_0, ..., a_n, indices, b_0, ..., b_n]\r\r\n \r\r\n # Vector indices (output is rank(params)).\r\r\n output[a_0, ..., a_n, i, b_0, ..., b_n] =\r\r\n params[a_0, ..., a_n, indices[i], b_0, ..., b_n]\r\r\n \r\r\n # Higher rank indices (output is rank(params) + rank(indices) - 1).\r\r\n output[a_0, ..., a_n, i, ..., j, b_0, ... b_n] =\r\r\n params[a_0, ..., a_n, indices[i, ..., j], b_0, ..., b_n]\r\r\n ```\r\r\n \r\r\n <div style=\"width:70%; margin:auto; margin-bottom:10px; margin-top:20px;\">\r\r\n <img style=\"width:100%\" src=\"https://www.tensorflow.org/images/Gather.png\" alt>\r\r\n </div>\r\r\n \r\r\n Note that on CPU, if an out of bound index is found, an error is returned.\r\r\n On GPU, if an out of bound index is found, a 0 is stored in the\r\r\n corresponding output value.\r\r\n \r\r\n See also `tf.batch_gather` and `tf.gather_nd`.\r\n\r\n Args:\r\n params: A `Tensor`.\r\n The tensor from which to gather values. Must be at least rank\r\r\n `axis + 1`.\r\n indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.\r\n Index tensor. Must be in range `[0, params.shape[axis])`.\r\n axis: A `Tensor`. Must be one of the following types: `int32`, `int64`.\r\n The axis in `params` to gather `indices` from. Defaults to the first\r\r\n dimension. Supports negative indexes.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A `Tensor`. Has the same type as `params`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is None or not _ctx._eager_context.is_eager:\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"GatherV2\", params=params, indices=indices, axis=axis, name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"Tparams\", _op.get_attr(\"Tparams\"), \"Tindices\",\r\n _op.get_attr(\"Tindices\"), \"Taxis\", _op.get_attr(\"Taxis\"))\r\n _execute.record_gradient(\r\n \"GatherV2\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n else:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name, \"GatherV2\",\r\n name, _ctx._post_execution_callbacks, params, indices, axis)\r\n return _result\r\n except _core._FallbackException:\r\n return gather_v2_eager_fallback(\r\n params, indices, axis, name=name, ctx=_ctx)\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n\r\n\r\ndef gather_v2_eager_fallback(params, indices, axis, name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function gather_v2\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n _attr_Tparams, (params,) = _execute.args_to_matching_eager([params], _ctx)\r\n _attr_Tindices, (indices,) = _execute.args_to_matching_eager([indices], _ctx)\r\n _attr_Taxis, (axis,) = _execute.args_to_matching_eager([axis], _ctx)\r\n _inputs_flat = [params, indices, axis]\r\n _attrs = (\"Tparams\", _attr_Tparams, \"Tindices\", _attr_Tindices, \"Taxis\",\r\n _attr_Taxis)\r\n _result = _execute.execute(b\"GatherV2\", 1, inputs=_inputs_flat,\r\n attrs=_attrs, ctx=_ctx, name=name)\r\n _execute.record_gradient(\r\n \"GatherV2\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n\r\n@tf_export('guarantee_const')\r\ndef guarantee_const(input, name=None):\r\n r\"\"\"Gives a guarantee to the TF runtime that the input tensor is a constant.\r\n\r\n The runtime is then free to make optimizations based on this.\r\r\n \r\r\n Only accepts value typed tensors as inputs and rejects resource variable handles\r\r\n as input.\r\r\n \r\r\n Returns the input tensor without modification.\r\n\r\n Args:\r\n input: A `Tensor`.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A `Tensor`. Has the same type as `input`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is None or not _ctx._eager_context.is_eager:\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"GuaranteeConst\", input=input, name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"T\", _op.get_attr(\"T\"))\r\n _execute.record_gradient(\r\n \"GuaranteeConst\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n else:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name,\r\n \"GuaranteeConst\", name, _ctx._post_execution_callbacks, input)\r\n return _result\r\n except _core._FallbackException:\r\n return guarantee_const_eager_fallback(\r\n input, name=name, ctx=_ctx)\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n\r\n\r\ndef guarantee_const_eager_fallback(input, name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function guarantee_const\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)\r\n _inputs_flat = [input]\r\n _attrs = (\"T\", _attr_T)\r\n _result = _execute.execute(b\"GuaranteeConst\", 1, inputs=_inputs_flat,\r\n attrs=_attrs, ctx=_ctx, name=name)\r\n _execute.record_gradient(\r\n \"GuaranteeConst\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n\r\ndef identity(input, name=None):\r\n r\"\"\"Return a tensor with the same shape and contents as the input tensor or value.\r\n\r\n Args:\r\n input: A `Tensor`.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A `Tensor`. Has the same type as `input`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is None or not _ctx._eager_context.is_eager:\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"Identity\", input=input, name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"T\", _op.get_attr(\"T\"))\r\n _execute.record_gradient(\r\n \"Identity\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n else:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name, \"Identity\",\r\n name, _ctx._post_execution_callbacks, input)\r\n return _result\r\n except _core._FallbackException:\r\n return identity_eager_fallback(\r\n input, name=name, ctx=_ctx)\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n\r\n\r\ndef identity_eager_fallback(input, name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function identity\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)\r\n _inputs_flat = [input]\r\n _attrs = (\"T\", _attr_T)\r\n _result = _execute.execute(b\"Identity\", 1, inputs=_inputs_flat,\r\n attrs=_attrs, ctx=_ctx, name=name)\r\n _execute.record_gradient(\r\n \"Identity\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n\r\n@tf_export('identity_n')\r\ndef identity_n(input, name=None):\r\n r\"\"\"Returns a list of tensors with the same shapes and contents as the input\r\n\r\n tensors.\r\r\n \r\r\n This op can be used to override the gradient for complicated functions. For\r\r\n example, suppose y = f(x) and we wish to apply a custom function g for backprop\r\r\n such that dx = g(dy). In Python,\r\r\n \r\r\n ```python\r\r\n with tf.get_default_graph().gradient_override_map(\r\r\n {'IdentityN': 'OverrideGradientWithG'}):\r\r\n y, _ = identity_n([f(x), x])\r\r\n \r\r\n @tf.RegisterGradient('OverrideGradientWithG')\r\r\n def ApplyG(op, dy, _):\r\r\n return [None, g(dy)] # Do not backprop to f(x).\r\r\n ```\r\n\r\n Args:\r\n input: A list of `Tensor` objects.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A list of `Tensor` objects. Has the same type as `input`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is None or not _ctx._eager_context.is_eager:\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"IdentityN\", input=input, name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"T\", _op.get_attr(\"T\"))\r\n _execute.record_gradient(\r\n \"IdentityN\", _inputs_flat, _attrs, _result, name)\r\n return _result\r\n\r\n else:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name, \"IdentityN\",\r\n name, _ctx._post_execution_callbacks, input)\r\n return _result\r\n except _core._FallbackException:\r\n return identity_n_eager_fallback(\r\n input, name=name, ctx=_ctx)\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n\r\n\r\ndef identity_n_eager_fallback(input, name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function identity_n\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n _attr_T, input = _execute.convert_to_mixed_eager_tensors(input, _ctx)\r\n _inputs_flat = list(input)\r\n _attrs = (\"T\", _attr_T)\r\n _result = _execute.execute(b\"IdentityN\", len(input), inputs=_inputs_flat,\r\n attrs=_attrs, ctx=_ctx, name=name)\r\n _execute.record_gradient(\r\n \"IdentityN\", _inputs_flat, _attrs, _result, name)\r\n return _result\r\n\r\n\r\ndef immutable_const(dtype, shape, memory_region_name, name=None):\r\n r\"\"\"Returns immutable tensor from memory region.\r\n\r\n The current implementation memmaps the tensor from a file.\r\n\r\n Args:\r\n dtype: A `tf.DType`. Type of the returned tensor.\r\n shape: A `tf.TensorShape` or list of `ints`.\r\n Shape of the returned tensor.\r\n memory_region_name: A `string`.\r\n Name of readonly memory region used by the tensor, see\r\r\n NewReadOnlyMemoryRegionFromFile in tensorflow::Env.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A `Tensor` of type `dtype`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is None or not _ctx._eager_context.is_eager:\r\n dtype = _execute.make_type(dtype, \"dtype\")\r\n shape = _execute.make_shape(shape, \"shape\")\r\n memory_region_name = _execute.make_str(memory_region_name, \"memory_region_name\")\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"ImmutableConst\", dtype=dtype, shape=shape,\r\n memory_region_name=memory_region_name, name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"dtype\", _op.get_attr(\"dtype\"), \"shape\", _op.get_attr(\"shape\"),\r\n \"memory_region_name\", _op.get_attr(\"memory_region_name\"))\r\n _execute.record_gradient(\r\n \"ImmutableConst\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n else:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name,\r\n \"ImmutableConst\", name, _ctx._post_execution_callbacks, \"dtype\",\r\n dtype, \"shape\", shape, \"memory_region_name\", memory_region_name)\r\n return _result\r\n except _core._FallbackException:\r\n return immutable_const_eager_fallback(\r\n dtype=dtype, shape=shape, memory_region_name=memory_region_name,\r\n name=name, ctx=_ctx)\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n\r\n\r\ndef immutable_const_eager_fallback(dtype, shape, memory_region_name, name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function immutable_const\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n dtype = _execute.make_type(dtype, \"dtype\")\r\n shape = _execute.make_shape(shape, \"shape\")\r\n memory_region_name = _execute.make_str(memory_region_name, \"memory_region_name\")\r\n _inputs_flat = []\r\n _attrs = (\"dtype\", dtype, \"shape\", shape, \"memory_region_name\",\r\n memory_region_name)\r\n _result = _execute.execute(b\"ImmutableConst\", 1, inputs=_inputs_flat,\r\n attrs=_attrs, ctx=_ctx, name=name)\r\n _execute.record_gradient(\r\n \"ImmutableConst\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n\r\ndef inplace_add(x, i, v, name=None):\r\n r\"\"\" Adds v into specified rows of x.\r\r\n\r\r\n Computes y = x; y[i, :] += v; return y.\r\r\n\r\n Args:\r\n x: A `Tensor`. A `Tensor` of type T.\r\n i: A `Tensor` of type `int32`.\r\n A vector. Indices into the left-most dimension of `x`.\r\n v: A `Tensor`. Must have the same type as `x`.\r\n A `Tensor` of type T. Same dimension sizes as x except the first dimension, which must be the same as i's size.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A `Tensor`. Has the same type as `x`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is None or not _ctx._eager_context.is_eager:\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"InplaceAdd\", x=x, i=i, v=v, name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"T\", _op.get_attr(\"T\"))\r\n _execute.record_gradient(\r\n \"InplaceAdd\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n else:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name, \"InplaceAdd\",\r\n name, _ctx._post_execution_callbacks, x, i, v)\r\n return _result\r\n except _core._FallbackException:\r\n return inplace_add_eager_fallback(\r\n x, i, v, name=name, ctx=_ctx)\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n\r\n\r\ndef inplace_add_eager_fallback(x, i, v, name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function inplace_add\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n _attr_T, _inputs_T = _execute.args_to_matching_eager([x, v], _ctx)\r\n (x, v) = _inputs_T\r\n i = _ops.convert_to_tensor(i, _dtypes.int32)\r\n _inputs_flat = [x, i, v]\r\n _attrs = (\"T\", _attr_T)\r\n _result = _execute.execute(b\"InplaceAdd\", 1, inputs=_inputs_flat,\r\n attrs=_attrs, ctx=_ctx, name=name)\r\n _execute.record_gradient(\r\n \"InplaceAdd\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n\r\ndef inplace_sub(x, i, v, name=None):\r\n r\"\"\" Subtracts `v` into specified rows of `x`.\r\r\n\r\r\n Computes y = x; y[i, :] -= v; return y.\r\r\n\r\n Args:\r\n x: A `Tensor`. A `Tensor` of type T.\r\n i: A `Tensor` of type `int32`.\r\n A vector. Indices into the left-most dimension of `x`.\r\n v: A `Tensor`. Must have the same type as `x`.\r\n A `Tensor` of type T. Same dimension sizes as x except the first dimension, which must be the same as i's size.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A `Tensor`. Has the same type as `x`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is None or not _ctx._eager_context.is_eager:\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"InplaceSub\", x=x, i=i, v=v, name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"T\", _op.get_attr(\"T\"))\r\n _execute.record_gradient(\r\n \"InplaceSub\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n else:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name, \"InplaceSub\",\r\n name, _ctx._post_execution_callbacks, x, i, v)\r\n return _result\r\n except _core._FallbackException:\r\n return inplace_sub_eager_fallback(\r\n x, i, v, name=name, ctx=_ctx)\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n\r\n\r\ndef inplace_sub_eager_fallback(x, i, v, name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function inplace_sub\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n _attr_T, _inputs_T = _execute.args_to_matching_eager([x, v], _ctx)\r\n (x, v) = _inputs_T\r\n i = _ops.convert_to_tensor(i, _dtypes.int32)\r\n _inputs_flat = [x, i, v]\r\n _attrs = (\"T\", _attr_T)\r\n _result = _execute.execute(b\"InplaceSub\", 1, inputs=_inputs_flat,\r\n attrs=_attrs, ctx=_ctx, name=name)\r\n _execute.record_gradient(\r\n \"InplaceSub\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n\r\ndef inplace_update(x, i, v, name=None):\r\n r\"\"\" Updates specified rows with values in `v`.\r\r\n\r\r\n Computes `x[i, :] = v; return x`.\r\r\n\r\n Args:\r\n x: A `Tensor`. A tensor of type `T`.\r\n i: A `Tensor` of type `int32`.\r\n A vector. Indices into the left-most dimension of `x`.\r\n v: A `Tensor`. Must have the same type as `x`.\r\n A `Tensor` of type T. Same dimension sizes as x except the first dimension, which must be the same as i's size.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A `Tensor`. Has the same type as `x`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is None or not _ctx._eager_context.is_eager:\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"InplaceUpdate\", x=x, i=i, v=v, name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"T\", _op.get_attr(\"T\"))\r\n _execute.record_gradient(\r\n \"InplaceUpdate\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n else:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name,\r\n \"InplaceUpdate\", name, _ctx._post_execution_callbacks, x, i, v)\r\n return _result\r\n except _core._FallbackException:\r\n return inplace_update_eager_fallback(\r\n x, i, v, name=name, ctx=_ctx)\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n\r\n\r\ndef inplace_update_eager_fallback(x, i, v, name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function inplace_update\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n _attr_T, _inputs_T = _execute.args_to_matching_eager([x, v], _ctx)\r\n (x, v) = _inputs_T\r\n i = _ops.convert_to_tensor(i, _dtypes.int32)\r\n _inputs_flat = [x, i, v]\r\n _attrs = (\"T\", _attr_T)\r\n _result = _execute.execute(b\"InplaceUpdate\", 1, inputs=_inputs_flat,\r\n attrs=_attrs, ctx=_ctx, name=name)\r\n _execute.record_gradient(\r\n \"InplaceUpdate\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n\r\n@tf_export('math.invert_permutation', 'invert_permutation')\r\n@deprecated_endpoints('invert_permutation')\r\ndef invert_permutation(x, name=None):\r\n r\"\"\"Computes the inverse permutation of a tensor.\r\n\r\n This operation computes the inverse of an index permutation. It takes a 1-D\r\r\n integer tensor `x`, which represents the indices of a zero-based array, and\r\r\n swaps each value with its index position. In other words, for an output tensor\r\r\n `y` and an input tensor `x`, this operation computes the following:\r\r\n \r\r\n `y[x[i]] = i for i in [0, 1, ..., len(x) - 1]`\r\r\n \r\r\n The values must include 0. There can be no duplicate values or negative values.\r\r\n \r\r\n For example:\r\r\n \r\r\n ```\r\r\n # tensor `x` is [3, 4, 0, 2, 1]\r\r\n invert_permutation(x) ==> [2, 4, 3, 0, 1]\r\r\n ```\r\n\r\n Args:\r\n x: A `Tensor`. Must be one of the following types: `int32`, `int64`. 1-D.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A `Tensor`. Has the same type as `x`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is None or not _ctx._eager_context.is_eager:\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"InvertPermutation\", x=x, name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"T\", _op.get_attr(\"T\"))\r\n _execute.record_gradient(\r\n \"InvertPermutation\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n else:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name,\r\n \"InvertPermutation\", name, _ctx._post_execution_callbacks, x)\r\n return _result\r\n except _core._FallbackException:\r\n return invert_permutation_eager_fallback(\r\n x, name=name, ctx=_ctx)\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n\r\n\r\ndef invert_permutation_eager_fallback(x, name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function invert_permutation\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n _attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx, _dtypes.int32)\r\n _inputs_flat = [x]\r\n _attrs = (\"T\", _attr_T)\r\n _result = _execute.execute(b\"InvertPermutation\", 1, inputs=_inputs_flat,\r\n attrs=_attrs, ctx=_ctx, name=name)\r\n _execute.record_gradient(\r\n \"InvertPermutation\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n\r\n_list_diff_outputs = [\"out\", \"idx\"]\r\n_ListDiffOutput = _collections.namedtuple(\r\n \"ListDiff\", _list_diff_outputs)\r\n\r\n\r\ndef list_diff(x, y, out_idx=_dtypes.int32, name=None):\r\n r\"\"\"Computes the difference between two lists of numbers or strings.\r\n\r\n Given a list `x` and a list `y`, this operation returns a list `out` that\r\r\n represents all values that are in `x` but not in `y`. The returned list `out`\r\r\n is sorted in the same order that the numbers appear in `x` (duplicates are\r\r\n preserved). This operation also returns a list `idx` that represents the\r\r\n position of each `out` element in `x`. In other words:\r\r\n \r\r\n `out[i] = x[idx[i]] for i in [0, 1, ..., len(out) - 1]`\r\r\n \r\r\n For example, given this input:\r\r\n \r\r\n ```\r\r\n x = [1, 2, 3, 4, 5, 6]\r\r\n y = [1, 3, 5]\r\r\n ```\r\r\n \r\r\n This operation would return:\r\r\n \r\r\n ```\r\r\n out ==> [2, 4, 6]\r\r\n idx ==> [1, 3, 5]\r\r\n ```\r\n\r\n Args:\r\n x: A `Tensor`. 1-D. Values to keep.\r\n y: A `Tensor`. Must have the same type as `x`. 1-D. Values to remove.\r\n out_idx: An optional `tf.DType` from: `tf.int32, tf.int64`. Defaults to `tf.int32`.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A tuple of `Tensor` objects (out, idx).\r\n\r\n out: A `Tensor`. Has the same type as `x`.\r\n idx: A `Tensor` of type `out_idx`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is None or not _ctx._eager_context.is_eager:\r\n if out_idx is None:\r\n out_idx = _dtypes.int32\r\n out_idx = _execute.make_type(out_idx, \"out_idx\")\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"ListDiff\", x=x, y=y, out_idx=out_idx, name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"T\", _op.get_attr(\"T\"), \"out_idx\", _op.get_attr(\"out_idx\"))\r\n _execute.record_gradient(\r\n \"ListDiff\", _inputs_flat, _attrs, _result, name)\r\n _result = _ListDiffOutput._make(_result)\r\n return _result\r\n\r\n else:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name, \"ListDiff\",\r\n name, _ctx._post_execution_callbacks, x, y, \"out_idx\", out_idx)\r\n _result = _ListDiffOutput._make(_result)\r\n return _result\r\n except _core._FallbackException:\r\n return list_diff_eager_fallback(\r\n x, y, out_idx=out_idx, name=name, ctx=_ctx)\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n\r\n\r\ndef list_diff_eager_fallback(x, y, out_idx=_dtypes.int32, name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function list_diff\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n if out_idx is None:\r\n out_idx = _dtypes.int32\r\n out_idx = _execute.make_type(out_idx, \"out_idx\")\r\n _attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], _ctx)\r\n (x, y) = _inputs_T\r\n _inputs_flat = [x, y]\r\n _attrs = (\"T\", _attr_T, \"out_idx\", out_idx)\r\n _result = _execute.execute(b\"ListDiff\", 2, inputs=_inputs_flat,\r\n attrs=_attrs, ctx=_ctx, name=name)\r\n _execute.record_gradient(\r\n \"ListDiff\", _inputs_flat, _attrs, _result, name)\r\n _result = _ListDiffOutput._make(_result)\r\n return _result\r\n\r\n\r\ndef lower_bound(sorted_inputs, values, out_type=_dtypes.int32, name=None):\r\n r\"\"\"Applies lower_bound(sorted_search_values, values) along each row.\r\n\r\n Each set of rows with the same index in (sorted_inputs, values) is treated\r\r\n independently. The resulting row is the equivalent of calling\r\r\n `np.searchsorted(sorted_inputs, values, side='left')`.\r\r\n \r\r\n The result is not a global index to the entire \r\r\n `Tensor`, but rather just the index in the last dimension.\r\r\n \r\r\n A 2-D example:\r\r\n sorted_sequence = [[0, 3, 9, 9, 10],\r\r\n [1, 2, 3, 4, 5]]\r\r\n values = [[2, 4, 9],\r\r\n [0, 2, 6]]\r\r\n \r\r\n result = LowerBound(sorted_sequence, values)\r\r\n \r\r\n result == [[1, 2, 2],\r\r\n [0, 1, 5]]\r\n\r\n Args:\r\n sorted_inputs: A `Tensor`. 2-D Tensor where each row is ordered.\r\n values: A `Tensor`. Must have the same type as `sorted_inputs`.\r\n 2-D Tensor with the same numbers of rows as `sorted_search_values`. Contains\r\r\n the values that will be searched for in `sorted_search_values`.\r\n out_type: An optional `tf.DType` from: `tf.int32, tf.int64`. Defaults to `tf.int32`.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A `Tensor` of type `out_type`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is None or not _ctx._eager_context.is_eager:\r\n if out_type is None:\r\n out_type = _dtypes.int32\r\n out_type = _execute.make_type(out_type, \"out_type\")\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"LowerBound\", sorted_inputs=sorted_inputs, values=values,\r\n out_type=out_type, name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"T\", _op.get_attr(\"T\"), \"out_type\", _op.get_attr(\"out_type\"))\r\n _execute.record_gradient(\r\n \"LowerBound\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n else:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name, \"LowerBound\",\r\n name, _ctx._post_execution_callbacks, sorted_inputs, values,\r\n \"out_type\", out_type)\r\n return _result\r\n except _core._FallbackException:\r\n return lower_bound_eager_fallback(\r\n sorted_inputs, values, out_type=out_type, name=name, ctx=_ctx)\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n\r\n\r\ndef lower_bound_eager_fallback(sorted_inputs, values, out_type=_dtypes.int32, name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function lower_bound\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n if out_type is None:\r\n out_type = _dtypes.int32\r\n out_type = _execute.make_type(out_type, \"out_type\")\r\n _attr_T, _inputs_T = _execute.args_to_matching_eager([sorted_inputs, values], _ctx)\r\n (sorted_inputs, values) = _inputs_T\r\n _inputs_flat = [sorted_inputs, values]\r\n _attrs = (\"T\", _attr_T, \"out_type\", out_type)\r\n _result = _execute.execute(b\"LowerBound\", 1, inputs=_inputs_flat,\r\n attrs=_attrs, ctx=_ctx, name=name)\r\n _execute.record_gradient(\r\n \"LowerBound\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n\r\n@tf_export('linalg.band_part', 'matrix_band_part')\r\n@deprecated_endpoints('matrix_band_part')\r\ndef matrix_band_part(input, num_lower, num_upper, name=None):\r\n r\"\"\"Copy a tensor setting everything outside a central band in each innermost matrix\r\n\r\n to zero.\r\r\n \r\r\n The `band` part is computed as follows:\r\r\n Assume `input` has `k` dimensions `[I, J, K, ..., M, N]`, then the output is a\r\r\n tensor with the same shape where\r\r\n \r\r\n `band[i, j, k, ..., m, n] = in_band(m, n) * input[i, j, k, ..., m, n]`.\r\r\n \r\r\n The indicator function\r\r\n \r\r\n `in_band(m, n) = (num_lower < 0 || (m-n) <= num_lower)) &&\r\r\n (num_upper < 0 || (n-m) <= num_upper)`.\r\r\n \r\r\n For example:\r\r\n \r\r\n ```\r\r\n # if 'input' is [[ 0, 1, 2, 3]\r\r\n [-1, 0, 1, 2]\r\r\n [-2, -1, 0, 1]\r\r\n [-3, -2, -1, 0]],\r\r\n \r\r\n tf.matrix_band_part(input, 1, -1) ==> [[ 0, 1, 2, 3]\r\r\n [-1, 0, 1, 2]\r\r\n [ 0, -1, 0, 1]\r\r\n [ 0, 0, -1, 0]],\r\r\n \r\r\n tf.matrix_band_part(input, 2, 1) ==> [[ 0, 1, 0, 0]\r\r\n [-1, 0, 1, 0]\r\r\n [-2, -1, 0, 1]\r\r\n [ 0, -2, -1, 0]]\r\r\n ```\r\r\n \r\r\n Useful special cases:\r\r\n \r\r\n ```\r\r\n tf.matrix_band_part(input, 0, -1) ==> Upper triangular part.\r\r\n tf.matrix_band_part(input, -1, 0) ==> Lower triangular part.\r\r\n tf.matrix_band_part(input, 0, 0) ==> Diagonal.\r\r\n ```\r\n\r\n Args:\r\n input: A `Tensor`. Rank `k` tensor.\r\n num_lower: A `Tensor`. Must be one of the following types: `int32`, `int64`.\r\n 0-D tensor. Number of subdiagonals to keep. If negative, keep entire\r\r\n lower triangle.\r\n num_upper: A `Tensor`. Must have the same type as `num_lower`.\r\n 0-D tensor. Number of superdiagonals to keep. If negative, keep\r\r\n entire upper triangle.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A `Tensor`. Has the same type as `input`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is None or not _ctx._eager_context.is_eager:\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"MatrixBandPart\", input=input, num_lower=num_lower,\r\n num_upper=num_upper, name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"T\", _op.get_attr(\"T\"), \"Tindex\", _op.get_attr(\"Tindex\"))\r\n _execute.record_gradient(\r\n \"MatrixBandPart\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n else:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name,\r\n \"MatrixBandPart\", name, _ctx._post_execution_callbacks, input,\r\n num_lower, num_upper)\r\n return _result\r\n except _core._FallbackException:\r\n return matrix_band_part_eager_fallback(\r\n input, num_lower, num_upper, name=name, ctx=_ctx)\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n\r\n\r\ndef matrix_band_part_eager_fallback(input, num_lower, num_upper, name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function matrix_band_part\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)\r\n _attr_Tindex, _inputs_Tindex = _execute.args_to_matching_eager([num_lower, num_upper], _ctx, _dtypes.int64)\r\n (num_lower, num_upper) = _inputs_Tindex\r\n _inputs_flat = [input, num_lower, num_upper]\r\n _attrs = (\"T\", _attr_T, \"Tindex\", _attr_Tindex)\r\n _result = _execute.execute(b\"MatrixBandPart\", 1, inputs=_inputs_flat,\r\n attrs=_attrs, ctx=_ctx, name=name)\r\n _execute.record_gradient(\r\n \"MatrixBandPart\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n\r\n@tf_export('linalg.diag', 'matrix_diag')\r\n@deprecated_endpoints('matrix_diag')\r\ndef matrix_diag(diagonal, name=None):\r\n r\"\"\"Returns a batched diagonal tensor with a given batched diagonal values.\r\n\r\n Given a `diagonal`, this operation returns a tensor with the `diagonal` and\r\r\n everything else padded with zeros. The diagonal is computed as follows:\r\r\n \r\r\n Assume `diagonal` has `k` dimensions `[I, J, K, ..., N]`, then the output is a\r\r\n tensor of rank `k+1` with dimensions [I, J, K, ..., N, N]` where:\r\r\n \r\r\n `output[i, j, k, ..., m, n] = 1{m=n} * diagonal[i, j, k, ..., n]`.\r\r\n \r\r\n For example:\r\r\n \r\r\n ```\r\r\n # 'diagonal' is [[1, 2, 3, 4], [5, 6, 7, 8]]\r\r\n \r\r\n and diagonal.shape = (2, 4)\r\r\n \r\r\n tf.matrix_diag(diagonal) ==> [[[1, 0, 0, 0]\r\r\n [0, 2, 0, 0]\r\r\n [0, 0, 3, 0]\r\r\n [0, 0, 0, 4]],\r\r\n [[5, 0, 0, 0]\r\r\n [0, 6, 0, 0]\r\r\n [0, 0, 7, 0]\r\r\n [0, 0, 0, 8]]]\r\r\n \r\r\n which has shape (2, 4, 4)\r\r\n ```\r\n\r\n Args:\r\n diagonal: A `Tensor`. Rank `k`, where `k >= 1`.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A `Tensor`. Has the same type as `diagonal`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is None or not _ctx._eager_context.is_eager:\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"MatrixDiag\", diagonal=diagonal, name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"T\", _op.get_attr(\"T\"))\r\n _execute.record_gradient(\r\n \"MatrixDiag\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n else:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name, \"MatrixDiag\",\r\n name, _ctx._post_execution_callbacks, diagonal)\r\n return _result\r\n except _core._FallbackException:\r\n return matrix_diag_eager_fallback(\r\n diagonal, name=name, ctx=_ctx)\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n\r\n\r\ndef matrix_diag_eager_fallback(diagonal, name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function matrix_diag\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n _attr_T, (diagonal,) = _execute.args_to_matching_eager([diagonal], _ctx)\r\n _inputs_flat = [diagonal]\r\n _attrs = (\"T\", _attr_T)\r\n _result = _execute.execute(b\"MatrixDiag\", 1, inputs=_inputs_flat,\r\n attrs=_attrs, ctx=_ctx, name=name)\r\n _execute.record_gradient(\r\n \"MatrixDiag\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n\r\n@tf_export('linalg.diag_part', 'matrix_diag_part')\r\n@deprecated_endpoints('matrix_diag_part')\r\ndef matrix_diag_part(input, name=None):\r\n r\"\"\"Returns the batched diagonal part of a batched tensor.\r\n\r\n This operation returns a tensor with the `diagonal` part\r\r\n of the batched `input`. The `diagonal` part is computed as follows:\r\r\n \r\r\n Assume `input` has `k` dimensions `[I, J, K, ..., M, N]`, then the output is a\r\r\n tensor of rank `k - 1` with dimensions `[I, J, K, ..., min(M, N)]` where:\r\r\n \r\r\n `diagonal[i, j, k, ..., n] = input[i, j, k, ..., n, n]`.\r\r\n \r\r\n The input must be at least a matrix.\r\r\n \r\r\n For example:\r\r\n \r\r\n ```\r\r\n # 'input' is [[[1, 0, 0, 0]\r\r\n [0, 2, 0, 0]\r\r\n [0, 0, 3, 0]\r\r\n [0, 0, 0, 4]],\r\r\n [[5, 0, 0, 0]\r\r\n [0, 6, 0, 0]\r\r\n [0, 0, 7, 0]\r\r\n [0, 0, 0, 8]]]\r\r\n \r\r\n and input.shape = (2, 4, 4)\r\r\n \r\r\n tf.matrix_diag_part(input) ==> [[1, 2, 3, 4], [5, 6, 7, 8]]\r\r\n \r\r\n which has shape (2, 4)\r\r\n ```\r\n\r\n Args:\r\n input: A `Tensor`. Rank `k` tensor where `k >= 2`.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A `Tensor`. Has the same type as `input`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is None or not _ctx._eager_context.is_eager:\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"MatrixDiagPart\", input=input, name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"T\", _op.get_attr(\"T\"))\r\n _execute.record_gradient(\r\n \"MatrixDiagPart\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n else:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name,\r\n \"MatrixDiagPart\", name, _ctx._post_execution_callbacks, input)\r\n return _result\r\n except _core._FallbackException:\r\n return matrix_diag_part_eager_fallback(\r\n input, name=name, ctx=_ctx)\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n\r\n\r\ndef matrix_diag_part_eager_fallback(input, name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function matrix_diag_part\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)\r\n _inputs_flat = [input]\r\n _attrs = (\"T\", _attr_T)\r\n _result = _execute.execute(b\"MatrixDiagPart\", 1, inputs=_inputs_flat,\r\n attrs=_attrs, ctx=_ctx, name=name)\r\n _execute.record_gradient(\r\n \"MatrixDiagPart\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n\r\n@tf_export('linalg.set_diag', 'matrix_set_diag')\r\n@deprecated_endpoints('matrix_set_diag')\r\ndef matrix_set_diag(input, diagonal, name=None):\r\n r\"\"\"Returns a batched matrix tensor with new batched diagonal values.\r\n\r\n Given `input` and `diagonal`, this operation returns a tensor with the\r\r\n same shape and values as `input`, except for the main diagonal of the\r\r\n innermost matrices. These will be overwritten by the values in `diagonal`.\r\r\n \r\r\n The output is computed as follows:\r\r\n \r\r\n Assume `input` has `k+1` dimensions `[I, J, K, ..., M, N]` and `diagonal` has\r\r\n `k` dimensions `[I, J, K, ..., min(M, N)]`. Then the output is a\r\r\n tensor of rank `k+1` with dimensions `[I, J, K, ..., M, N]` where:\r\r\n \r\r\n * `output[i, j, k, ..., m, n] = diagonal[i, j, k, ..., n]` for `m == n`.\r\r\n * `output[i, j, k, ..., m, n] = input[i, j, k, ..., m, n]` for `m != n`.\r\n\r\n Args:\r\n input: A `Tensor`. Rank `k+1`, where `k >= 1`.\r\n diagonal: A `Tensor`. Must have the same type as `input`.\r\n Rank `k`, where `k >= 1`.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A `Tensor`. Has the same type as `input`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is None or not _ctx._eager_context.is_eager:\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"MatrixSetDiag\", input=input, diagonal=diagonal, name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"T\", _op.get_attr(\"T\"))\r\n _execute.record_gradient(\r\n \"MatrixSetDiag\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n else:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name,\r\n \"MatrixSetDiag\", name, _ctx._post_execution_callbacks, input,\r\n diagonal)\r\n return _result\r\n except _core._FallbackException:\r\n return matrix_set_diag_eager_fallback(\r\n input, diagonal, name=name, ctx=_ctx)\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n\r\n\r\ndef matrix_set_diag_eager_fallback(input, diagonal, name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function matrix_set_diag\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n _attr_T, _inputs_T = _execute.args_to_matching_eager([input, diagonal], _ctx)\r\n (input, diagonal) = _inputs_T\r\n _inputs_flat = [input, diagonal]\r\n _attrs = (\"T\", _attr_T)\r\n _result = _execute.execute(b\"MatrixSetDiag\", 1, inputs=_inputs_flat,\r\n attrs=_attrs, ctx=_ctx, name=name)\r\n _execute.record_gradient(\r\n \"MatrixSetDiag\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n\r\ndef mirror_pad(input, paddings, mode, name=None):\r\n r\"\"\"Pads a tensor with mirrored values.\r\n\r\n This operation pads a `input` with mirrored values according to the `paddings`\r\r\n you specify. `paddings` is an integer tensor with shape `[n, 2]`, where n is\r\r\n the rank of `input`. For each dimension D of `input`, `paddings[D, 0]` indicates\r\r\n how many values to add before the contents of `input` in that dimension, and\r\r\n `paddings[D, 1]` indicates how many values to add after the contents of `input`\r\r\n in that dimension. Both `paddings[D, 0]` and `paddings[D, 1]` must be no greater\r\r\n than `input.dim_size(D)` (or `input.dim_size(D) - 1`) if `copy_border` is true\r\r\n (if false, respectively).\r\r\n \r\r\n The padded size of each dimension D of the output is:\r\r\n \r\r\n `paddings(D, 0) + input.dim_size(D) + paddings(D, 1)`\r\r\n \r\r\n For example:\r\r\n \r\r\n ```\r\r\n # 't' is [[1, 2, 3], [4, 5, 6]].\r\r\n # 'paddings' is [[1, 1]], [2, 2]].\r\r\n # 'mode' is SYMMETRIC.\r\r\n # rank of 't' is 2.\r\r\n pad(t, paddings) ==> [[2, 1, 1, 2, 3, 3, 2]\r\r\n [2, 1, 1, 2, 3, 3, 2]\r\r\n [5, 4, 4, 5, 6, 6, 5]\r\r\n [5, 4, 4, 5, 6, 6, 5]]\r\r\n ```\r\n\r\n Args:\r\n input: A `Tensor`. The input tensor to be padded.\r\n paddings: A `Tensor`. Must be one of the following types: `int32`, `int64`.\r\n A two-column matrix specifying the padding sizes. The number of\r\r\n rows must be the same as the rank of `input`.\r\n mode: A `string` from: `\"REFLECT\", \"SYMMETRIC\"`.\r\n Either `REFLECT` or `SYMMETRIC`. In reflect mode the padded regions\r\r\n do not include the borders, while in symmetric mode the padded regions\r\r\n do include the borders. For example, if `input` is `[1, 2, 3]` and `paddings`\r\r\n is `[0, 2]`, then the output is `[1, 2, 3, 2, 1]` in reflect mode, and\r\r\n it is `[1, 2, 3, 3, 2]` in symmetric mode.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A `Tensor`. Has the same type as `input`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is None or not _ctx._eager_context.is_eager:\r\n mode = _execute.make_str(mode, \"mode\")\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"MirrorPad\", input=input, paddings=paddings, mode=mode, name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"T\", _op.get_attr(\"T\"), \"Tpaddings\", _op.get_attr(\"Tpaddings\"),\r\n \"mode\", _op.get_attr(\"mode\"))\r\n _execute.record_gradient(\r\n \"MirrorPad\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n else:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name, \"MirrorPad\",\r\n name, _ctx._post_execution_callbacks, input, paddings, \"mode\", mode)\r\n return _result\r\n except _core._FallbackException:\r\n return mirror_pad_eager_fallback(\r\n input, paddings, mode=mode, name=name, ctx=_ctx)\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n\r\n\r\ndef mirror_pad_eager_fallback(input, paddings, mode, name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function mirror_pad\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n mode = _execute.make_str(mode, \"mode\")\r\n _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)\r\n _attr_Tpaddings, (paddings,) = _execute.args_to_matching_eager([paddings], _ctx, _dtypes.int32)\r\n _inputs_flat = [input, paddings]\r\n _attrs = (\"T\", _attr_T, \"Tpaddings\", _attr_Tpaddings, \"mode\", mode)\r\n _result = _execute.execute(b\"MirrorPad\", 1, inputs=_inputs_flat,\r\n attrs=_attrs, ctx=_ctx, name=name)\r\n _execute.record_gradient(\r\n \"MirrorPad\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n\r\ndef mirror_pad_grad(input, paddings, mode, name=None):\r\n r\"\"\"Gradient op for `MirrorPad` op. This op folds a mirror-padded tensor.\r\n\r\n This operation folds the padded areas of `input` by `MirrorPad` according to the\r\r\n `paddings` you specify. `paddings` must be the same as `paddings` argument\r\r\n given to the corresponding `MirrorPad` op.\r\r\n \r\r\n The folded size of each dimension D of the output is:\r\r\n \r\r\n `input.dim_size(D) - paddings(D, 0) - paddings(D, 1)`\r\r\n \r\r\n For example:\r\r\n \r\r\n ```\r\r\n # 't' is [[1, 2, 3], [4, 5, 6], [7, 8, 9]].\r\r\n # 'paddings' is [[0, 1]], [0, 1]].\r\r\n # 'mode' is SYMMETRIC.\r\r\n # rank of 't' is 2.\r\r\n pad(t, paddings) ==> [[ 1, 5]\r\r\n [11, 28]]\r\r\n ```\r\n\r\n Args:\r\n input: A `Tensor`. The input tensor to be folded.\r\n paddings: A `Tensor`. Must be one of the following types: `int32`, `int64`.\r\n A two-column matrix specifying the padding sizes. The number of\r\r\n rows must be the same as the rank of `input`.\r\n mode: A `string` from: `\"REFLECT\", \"SYMMETRIC\"`.\r\n The mode used in the `MirrorPad` op.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A `Tensor`. Has the same type as `input`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is None or not _ctx._eager_context.is_eager:\r\n mode = _execute.make_str(mode, \"mode\")\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"MirrorPadGrad\", input=input, paddings=paddings, mode=mode, name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"T\", _op.get_attr(\"T\"), \"Tpaddings\", _op.get_attr(\"Tpaddings\"),\r\n \"mode\", _op.get_attr(\"mode\"))\r\n _execute.record_gradient(\r\n \"MirrorPadGrad\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n else:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name,\r\n \"MirrorPadGrad\", name, _ctx._post_execution_callbacks, input,\r\n paddings, \"mode\", mode)\r\n return _result\r\n except _core._FallbackException:\r\n return mirror_pad_grad_eager_fallback(\r\n input, paddings, mode=mode, name=name, ctx=_ctx)\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n\r\n\r\ndef mirror_pad_grad_eager_fallback(input, paddings, mode, name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function mirror_pad_grad\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n mode = _execute.make_str(mode, \"mode\")\r\n _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)\r\n _attr_Tpaddings, (paddings,) = _execute.args_to_matching_eager([paddings], _ctx, _dtypes.int32)\r\n _inputs_flat = [input, paddings]\r\n _attrs = (\"T\", _attr_T, \"Tpaddings\", _attr_Tpaddings, \"mode\", mode)\r\n _result = _execute.execute(b\"MirrorPadGrad\", 1, inputs=_inputs_flat,\r\n attrs=_attrs, ctx=_ctx, name=name)\r\n _execute.record_gradient(\r\n \"MirrorPadGrad\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n\r\ndef one_hot(indices, depth, on_value, off_value, axis=-1, name=None):\r\n r\"\"\"Returns a one-hot tensor.\r\n\r\n The locations represented by indices in `indices` take value `on_value`,\r\r\n while all other locations take value `off_value`.\r\r\n \r\r\n If the input `indices` is rank `N`, the output will have rank `N+1`,\r\r\n The new axis is created at dimension `axis` (default: the new axis is\r\r\n appended at the end).\r\r\n \r\r\n If `indices` is a scalar the output shape will be a vector of length `depth`.\r\r\n \r\r\n If `indices` is a vector of length `features`, the output shape will be:\r\r\n ```\r\r\n features x depth if axis == -1\r\r\n depth x features if axis == 0\r\r\n ```\r\r\n \r\r\n If `indices` is a matrix (batch) with shape `[batch, features]`,\r\r\n the output shape will be:\r\r\n ```\r\r\n batch x features x depth if axis == -1\r\r\n batch x depth x features if axis == 1\r\r\n depth x batch x features if axis == 0\r\r\n ```\r\r\n \r\r\n \r\r\n Examples\r\r\n =========\r\r\n \r\r\n Suppose that\r\r\n \r\r\n ```\r\r\n indices = [0, 2, -1, 1]\r\r\n depth = 3\r\r\n on_value = 5.0\r\r\n off_value = 0.0\r\r\n axis = -1\r\r\n ```\r\r\n \r\r\n Then output is `[4 x 3]`:\r\r\n \r\r\n ```output =\r\r\n [5.0 0.0 0.0] // one_hot(0)\r\r\n [0.0 0.0 5.0] // one_hot(2)\r\r\n [0.0 0.0 0.0] // one_hot(-1)\r\r\n [0.0 5.0 0.0] // one_hot(1)\r\r\n ```\r\r\n \r\r\n Suppose that\r\r\n \r\r\n ```\r\r\n indices = [0, 2, -1, 1]\r\r\n depth = 3\r\r\n on_value = 0.0\r\r\n off_value = 3.0\r\r\n axis = 0\r\r\n ```\r\r\n \r\r\n Then output is `[3 x 4]`:\r\r\n \r\r\n ```output =\r\r\n [0.0 3.0 3.0 3.0]\r\r\n [3.0 3.0 3.0 0.0]\r\r\n [3.0 3.0 3.0 3.0]\r\r\n [3.0 0.0 3.0 3.0]\r\r\n // ^ one_hot(0)\r\r\n // ^ one_hot(2)\r\r\n // ^ one_hot(-1)\r\r\n // ^ one_hot(1)\r\r\n ```\r\r\n Suppose that\r\r\n \r\r\n ```\r\r\n indices = [[0, 2], [1, -1]]\r\r\n depth = 3\r\r\n on_value = 1.0\r\r\n off_value = 0.0\r\r\n axis = -1\r\r\n ```\r\r\n \r\r\n Then output is `[2 x 2 x 3]`:\r\r\n \r\r\n ```output =\r\r\n [\r\r\n [1.0, 0.0, 0.0] // one_hot(0)\r\r\n [0.0, 0.0, 1.0] // one_hot(2)\r\r\n ][\r\r\n [0.0, 1.0, 0.0] // one_hot(1)\r\r\n [0.0, 0.0, 0.0] // one_hot(-1)\r\r\n ]```\r\n\r\n Args:\r\n indices: A `Tensor`. Must be one of the following types: `uint8`, `int32`, `int64`.\r\n A tensor of indices.\r\n depth: A `Tensor` of type `int32`.\r\n A scalar defining the depth of the one hot dimension.\r\n on_value: A `Tensor`.\r\n A scalar defining the value to fill in output when `indices[j] = i`.\r\n off_value: A `Tensor`. Must have the same type as `on_value`.\r\n A scalar defining the value to fill in output when `indices[j] != i`.\r\n axis: An optional `int`. Defaults to `-1`.\r\n The axis to fill (default: -1, a new inner-most axis).\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A `Tensor`. Has the same type as `on_value`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is None or not _ctx._eager_context.is_eager:\r\n if axis is None:\r\n axis = -1\r\n axis = _execute.make_int(axis, \"axis\")\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"OneHot\", indices=indices, depth=depth, on_value=on_value,\r\n off_value=off_value, axis=axis, name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"axis\", _op.get_attr(\"axis\"), \"T\", _op.get_attr(\"T\"), \"TI\",\r\n _op.get_attr(\"TI\"))\r\n _execute.record_gradient(\r\n \"OneHot\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n else:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name, \"OneHot\", name,\r\n _ctx._post_execution_callbacks, indices, depth, on_value, off_value,\r\n \"axis\", axis)\r\n return _result\r\n except _core._FallbackException:\r\n return one_hot_eager_fallback(\r\n indices, depth, on_value, off_value, axis=axis, name=name, ctx=_ctx)\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n\r\n\r\ndef one_hot_eager_fallback(indices, depth, on_value, off_value, axis=-1, name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function one_hot\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n if axis is None:\r\n axis = -1\r\n axis = _execute.make_int(axis, \"axis\")\r\n _attr_T, _inputs_T = _execute.args_to_matching_eager([on_value, off_value], _ctx)\r\n (on_value, off_value) = _inputs_T\r\n _attr_TI, (indices,) = _execute.args_to_matching_eager([indices], _ctx, _dtypes.int64)\r\n depth = _ops.convert_to_tensor(depth, _dtypes.int32)\r\n _inputs_flat = [indices, depth, on_value, off_value]\r\n _attrs = (\"axis\", axis, \"T\", _attr_T, \"TI\", _attr_TI)\r\n _result = _execute.execute(b\"OneHot\", 1, inputs=_inputs_flat, attrs=_attrs,\r\n ctx=_ctx, name=name)\r\n _execute.record_gradient(\r\n \"OneHot\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n\r\ndef ones_like(x, name=None):\r\n r\"\"\"Returns a tensor of ones with the same shape and type as x.\r\n\r\n Args:\r\n x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `int8`, `uint8`, `int16`, `uint16`, `int32`, `int64`, `complex64`, `complex128`, `bool`.\r\n a tensor of type T.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A `Tensor`. Has the same type as `x`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is None or not _ctx._eager_context.is_eager:\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"OnesLike\", x=x, name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"T\", _op.get_attr(\"T\"))\r\n _execute.record_gradient(\r\n \"OnesLike\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n else:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name, \"OnesLike\",\r\n name, _ctx._post_execution_callbacks, x)\r\n return _result\r\n except _core._FallbackException:\r\n return ones_like_eager_fallback(\r\n x, name=name, ctx=_ctx)\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n\r\n\r\ndef ones_like_eager_fallback(x, name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function ones_like\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n _attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)\r\n _inputs_flat = [x]\r\n _attrs = (\"T\", _attr_T)\r\n _result = _execute.execute(b\"OnesLike\", 1, inputs=_inputs_flat,\r\n attrs=_attrs, ctx=_ctx, name=name)\r\n _execute.record_gradient(\r\n \"OnesLike\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n\r\ndef pack(values, axis=0, name=None):\r\n r\"\"\"Packs a list of `N` rank-`R` tensors into one rank-`(R+1)` tensor.\r\n\r\n Packs the `N` tensors in `values` into a tensor with rank one higher than each\r\r\n tensor in `values`, by packing them along the `axis` dimension.\r\r\n Given a list of tensors of shape `(A, B, C)`;\r\r\n \r\r\n if `axis == 0` then the `output` tensor will have the shape `(N, A, B, C)`.\r\r\n if `axis == 1` then the `output` tensor will have the shape `(A, N, B, C)`.\r\r\n Etc.\r\r\n \r\r\n For example:\r\r\n \r\r\n ```\r\r\n # 'x' is [1, 4]\r\r\n # 'y' is [2, 5]\r\r\n # 'z' is [3, 6]\r\r\n pack([x, y, z]) => [[1, 4], [2, 5], [3, 6]] # Pack along first dim.\r\r\n pack([x, y, z], axis=1) => [[1, 2, 3], [4, 5, 6]]\r\r\n ```\r\r\n \r\r\n This is the opposite of `unpack`.\r\n\r\n Args:\r\n values: A list of at least 1 `Tensor` objects with the same type.\r\n Must be of same shape and type.\r\n axis: An optional `int`. Defaults to `0`.\r\n Dimension along which to pack. Negative values wrap around, so the\r\r\n valid range is `[-(R+1), R+1)`.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A `Tensor`. Has the same type as `values`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is None or not _ctx._eager_context.is_eager:\r\n if not isinstance(values, (list, tuple)):\r\n raise TypeError(\r\n \"Expected list for 'values' argument to \"\r\n \"'pack' Op, not %r.\" % values)\r\n _attr_N = len(values)\r\n if axis is None:\r\n axis = 0\r\n axis = _execute.make_int(axis, \"axis\")\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"Pack\", values=values, axis=axis, name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"N\", _op.get_attr(\"N\"), \"T\", _op.get_attr(\"T\"), \"axis\",\r\n _op.get_attr(\"axis\"))\r\n _execute.record_gradient(\r\n \"Pack\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n else:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name, \"Pack\", name,\r\n _ctx._post_execution_callbacks, values, \"axis\", axis)\r\n return _result\r\n except _core._FallbackException:\r\n return pack_eager_fallback(\r\n values, axis=axis, name=name, ctx=_ctx)\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n\r\n\r\ndef pack_eager_fallback(values, axis=0, name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function pack\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n if not isinstance(values, (list, tuple)):\r\n raise TypeError(\r\n \"Expected list for 'values' argument to \"\r\n \"'pack' Op, not %r.\" % values)\r\n _attr_N = len(values)\r\n if axis is None:\r\n axis = 0\r\n axis = _execute.make_int(axis, \"axis\")\r\n _attr_T, values = _execute.args_to_matching_eager(list(values), _ctx)\r\n _inputs_flat = list(values)\r\n _attrs = (\"N\", _attr_N, \"T\", _attr_T, \"axis\", axis)\r\n _result = _execute.execute(b\"Pack\", 1, inputs=_inputs_flat, attrs=_attrs,\r\n ctx=_ctx, name=name)\r\n _execute.record_gradient(\r\n \"Pack\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n\r\ndef pad(input, paddings, name=None):\r\n r\"\"\"Pads a tensor with zeros.\r\n\r\n This operation pads a `input` with zeros according to the `paddings` you\r\r\n specify. `paddings` is an integer tensor with shape `[Dn, 2]`, where n is the\r\r\n rank of `input`. For each dimension D of `input`, `paddings[D, 0]` indicates\r\r\n how many zeros to add before the contents of `input` in that dimension, and\r\r\n `paddings[D, 1]` indicates how many zeros to add after the contents of `input`\r\r\n in that dimension.\r\r\n \r\r\n The padded size of each dimension D of the output is:\r\r\n \r\r\n `paddings(D, 0) + input.dim_size(D) + paddings(D, 1)`\r\r\n \r\r\n For example:\r\r\n \r\r\n ```\r\r\n # 't' is [[1, 1], [2, 2]]\r\r\n # 'paddings' is [[1, 1], [2, 2]]\r\r\n # rank of 't' is 2\r\r\n pad(t, paddings) ==> [[0, 0, 0, 0, 0, 0]\r\r\n [0, 0, 1, 1, 0, 0]\r\r\n [0, 0, 2, 2, 0, 0]\r\r\n [0, 0, 0, 0, 0, 0]]\r\r\n ```\r\n\r\n Args:\r\n input: A `Tensor`.\r\n paddings: A `Tensor`. Must be one of the following types: `int32`, `int64`.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A `Tensor`. Has the same type as `input`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is None or not _ctx._eager_context.is_eager:\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"Pad\", input=input, paddings=paddings, name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"T\", _op.get_attr(\"T\"), \"Tpaddings\", _op.get_attr(\"Tpaddings\"))\r\n _execute.record_gradient(\r\n \"Pad\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n else:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name, \"Pad\", name,\r\n _ctx._post_execution_callbacks, input, paddings)\r\n return _result\r\n except _core._FallbackException:\r\n return pad_eager_fallback(\r\n input, paddings, name=name, ctx=_ctx)\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n\r\n\r\ndef pad_eager_fallback(input, paddings, name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function pad\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)\r\n _attr_Tpaddings, (paddings,) = _execute.args_to_matching_eager([paddings], _ctx, _dtypes.int32)\r\n _inputs_flat = [input, paddings]\r\n _attrs = (\"T\", _attr_T, \"Tpaddings\", _attr_Tpaddings)\r\n _result = _execute.execute(b\"Pad\", 1, inputs=_inputs_flat, attrs=_attrs,\r\n ctx=_ctx, name=name)\r\n _execute.record_gradient(\r\n \"Pad\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n\r\ndef pad_v2(input, paddings, constant_values, name=None):\r\n r\"\"\"Pads a tensor.\r\n\r\n This operation pads `input` according to the `paddings` and `constant_values`\r\r\n you specify. `paddings` is an integer tensor with shape `[Dn, 2]`, where n is\r\r\n the rank of `input`. For each dimension D of `input`, `paddings[D, 0]` indicates\r\r\n how many padding values to add before the contents of `input` in that dimension,\r\r\n and `paddings[D, 1]` indicates how many padding values to add after the contents\r\r\n of `input` in that dimension. `constant_values` is a scalar tensor of the same\r\r\n type as `input` that indicates the value to use for padding `input`.\r\r\n \r\r\n The padded size of each dimension D of the output is:\r\r\n \r\r\n `paddings(D, 0) + input.dim_size(D) + paddings(D, 1)`\r\r\n \r\r\n For example:\r\r\n \r\r\n ```\r\r\n # 't' is [[1, 1], [2, 2]]\r\r\n # 'paddings' is [[1, 1], [2, 2]]\r\r\n # 'constant_values' is 0\r\r\n # rank of 't' is 2\r\r\n pad(t, paddings) ==> [[0, 0, 0, 0, 0, 0]\r\r\n [0, 0, 1, 1, 0, 0]\r\r\n [0, 0, 2, 2, 0, 0]\r\r\n [0, 0, 0, 0, 0, 0]]\r\r\n ```\r\n\r\n Args:\r\n input: A `Tensor`.\r\n paddings: A `Tensor`. Must be one of the following types: `int32`, `int64`.\r\n constant_values: A `Tensor`. Must have the same type as `input`.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A `Tensor`. Has the same type as `input`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is None or not _ctx._eager_context.is_eager:\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"PadV2\", input=input, paddings=paddings,\r\n constant_values=constant_values, name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"T\", _op.get_attr(\"T\"), \"Tpaddings\", _op.get_attr(\"Tpaddings\"))\r\n _execute.record_gradient(\r\n \"PadV2\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n else:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name, \"PadV2\", name,\r\n _ctx._post_execution_callbacks, input, paddings, constant_values)\r\n return _result\r\n except _core._FallbackException:\r\n return pad_v2_eager_fallback(\r\n input, paddings, constant_values, name=name, ctx=_ctx)\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n\r\n\r\ndef pad_v2_eager_fallback(input, paddings, constant_values, name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function pad_v2\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n _attr_T, _inputs_T = _execute.args_to_matching_eager([input, constant_values], _ctx)\r\n (input, constant_values) = _inputs_T\r\n _attr_Tpaddings, (paddings,) = _execute.args_to_matching_eager([paddings], _ctx, _dtypes.int32)\r\n _inputs_flat = [input, paddings, constant_values]\r\n _attrs = (\"T\", _attr_T, \"Tpaddings\", _attr_Tpaddings)\r\n _result = _execute.execute(b\"PadV2\", 1, inputs=_inputs_flat, attrs=_attrs,\r\n ctx=_ctx, name=name)\r\n _execute.record_gradient(\r\n \"PadV2\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n\r\ndef parallel_concat(values, shape, name=None):\r\n r\"\"\"Concatenates a list of `N` tensors along the first dimension.\r\n\r\n The input tensors are all required to have size 1 in the first dimension.\r\r\n \r\r\n For example:\r\r\n \r\r\n ```\r\r\n # 'x' is [[1, 4]]\r\r\n # 'y' is [[2, 5]]\r\r\n # 'z' is [[3, 6]]\r\r\n parallel_concat([x, y, z]) => [[1, 4], [2, 5], [3, 6]] # Pack along first dim.\r\r\n ```\r\r\n \r\r\n The difference between concat and parallel_concat is that concat requires all\r\r\n of the inputs be computed before the operation will begin but doesn't require\r\r\n that the input shapes be known during graph construction. Parallel concat\r\r\n will copy pieces of the input into the output as they become available, in\r\r\n some situations this can provide a performance benefit.\r\n\r\n Args:\r\n values: A list of at least 1 `Tensor` objects with the same type.\r\n Tensors to be concatenated. All must have size 1 in the first dimension\r\r\n and same shape.\r\n shape: A `tf.TensorShape` or list of `ints`.\r\n the final shape of the result; should be equal to the shapes of any input\r\r\n but with the number of input values in the first dimension.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A `Tensor`. Has the same type as `values`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is None or not _ctx._eager_context.is_eager:\r\n if not isinstance(values, (list, tuple)):\r\n raise TypeError(\r\n \"Expected list for 'values' argument to \"\r\n \"'parallel_concat' Op, not %r.\" % values)\r\n _attr_N = len(values)\r\n shape = _execute.make_shape(shape, \"shape\")\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"ParallelConcat\", values=values, shape=shape, name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"N\", _op.get_attr(\"N\"), \"T\", _op.get_attr(\"T\"), \"shape\",\r\n _op.get_attr(\"shape\"))\r\n _execute.record_gradient(\r\n \"ParallelConcat\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n else:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name,\r\n \"ParallelConcat\", name, _ctx._post_execution_callbacks, values,\r\n \"shape\", shape)\r\n return _result\r\n except _core._FallbackException:\r\n return parallel_concat_eager_fallback(\r\n values, shape=shape, name=name, ctx=_ctx)\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n\r\n\r\ndef parallel_concat_eager_fallback(values, shape, name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function parallel_concat\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n if not isinstance(values, (list, tuple)):\r\n raise TypeError(\r\n \"Expected list for 'values' argument to \"\r\n \"'parallel_concat' Op, not %r.\" % values)\r\n _attr_N = len(values)\r\n shape = _execute.make_shape(shape, \"shape\")\r\n _attr_T, values = _execute.args_to_matching_eager(list(values), _ctx)\r\n _inputs_flat = list(values)\r\n _attrs = (\"N\", _attr_N, \"T\", _attr_T, \"shape\", shape)\r\n _result = _execute.execute(b\"ParallelConcat\", 1, inputs=_inputs_flat,\r\n attrs=_attrs, ctx=_ctx, name=name)\r\n _execute.record_gradient(\r\n \"ParallelConcat\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n\r\ndef placeholder(dtype, shape=None, name=None):\r\n r\"\"\"A placeholder op for a value that will be fed into the computation.\r\n\r\n N.B. This operation will fail with an error if it is executed. It is\r\r\n intended as a way to represent a value that will always be fed, and to\r\r\n provide attrs that enable the fed value to be checked at runtime.\r\n\r\n Args:\r\n dtype: A `tf.DType`. The type of elements in the tensor.\r\n shape: An optional `tf.TensorShape` or list of `ints`. Defaults to `None`.\r\n (Optional) The shape of the tensor. If the shape has 0 dimensions, the\r\r\n shape is unconstrained.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A `Tensor` of type `dtype`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is None or not _ctx._eager_context.is_eager:\r\n dtype = _execute.make_type(dtype, \"dtype\")\r\n if shape is None:\r\n shape = None\r\n shape = _execute.make_shape(shape, \"shape\")\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"Placeholder\", dtype=dtype, shape=shape, name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"dtype\", _op.get_attr(\"dtype\"), \"shape\", _op.get_attr(\"shape\"))\r\n _execute.record_gradient(\r\n \"Placeholder\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n else:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name, \"Placeholder\",\r\n name, _ctx._post_execution_callbacks, \"dtype\", dtype, \"shape\", shape)\r\n return _result\r\n except _core._FallbackException:\r\n return placeholder_eager_fallback(\r\n dtype=dtype, shape=shape, name=name, ctx=_ctx)\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n\r\n\r\ndef placeholder_eager_fallback(dtype, shape=None, name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function placeholder\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n dtype = _execute.make_type(dtype, \"dtype\")\r\n if shape is None:\r\n shape = None\r\n shape = _execute.make_shape(shape, \"shape\")\r\n _inputs_flat = []\r\n _attrs = (\"dtype\", dtype, \"shape\", shape)\r\n _result = _execute.execute(b\"Placeholder\", 1, inputs=_inputs_flat,\r\n attrs=_attrs, ctx=_ctx, name=name)\r\n _execute.record_gradient(\r\n \"Placeholder\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n\r\ndef placeholder_v2(dtype, shape, name=None):\r\n r\"\"\"A placeholder op for a value that will be fed into the computation.\r\n\r\n N.B. This operation will fail with an error if it is executed. It is\r\r\n intended as a way to represent a value that will always be fed, and to\r\r\n provide attrs that enable the fed value to be checked at runtime.\r\n\r\n Args:\r\n dtype: A `tf.DType`. The type of elements in the tensor.\r\n shape: A `tf.TensorShape` or list of `ints`.\r\n The shape of the tensor. The shape can be any partially-specified\r\r\n shape. To be unconstrained, pass in a shape with unknown rank.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A `Tensor` of type `dtype`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is None or not _ctx._eager_context.is_eager:\r\n dtype = _execute.make_type(dtype, \"dtype\")\r\n shape = _execute.make_shape(shape, \"shape\")\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"PlaceholderV2\", dtype=dtype, shape=shape, name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"dtype\", _op.get_attr(\"dtype\"), \"shape\", _op.get_attr(\"shape\"))\r\n _execute.record_gradient(\r\n \"PlaceholderV2\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n else:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name,\r\n \"PlaceholderV2\", name, _ctx._post_execution_callbacks, \"dtype\", dtype,\r\n \"shape\", shape)\r\n return _result\r\n except _core._FallbackException:\r\n return placeholder_v2_eager_fallback(\r\n dtype=dtype, shape=shape, name=name, ctx=_ctx)\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n\r\n\r\ndef placeholder_v2_eager_fallback(dtype, shape, name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function placeholder_v2\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n dtype = _execute.make_type(dtype, \"dtype\")\r\n shape = _execute.make_shape(shape, \"shape\")\r\n _inputs_flat = []\r\n _attrs = (\"dtype\", dtype, \"shape\", shape)\r\n _result = _execute.execute(b\"PlaceholderV2\", 1, inputs=_inputs_flat,\r\n attrs=_attrs, ctx=_ctx, name=name)\r\n _execute.record_gradient(\r\n \"PlaceholderV2\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n\r\n@tf_export('placeholder_with_default')\r\ndef placeholder_with_default(input, shape, name=None):\r\n r\"\"\"A placeholder op that passes through `input` when its output is not fed.\r\n\r\n Args:\r\n input: A `Tensor`. The default value to produce when `output` is not fed.\r\n shape: A `tf.TensorShape` or list of `ints`.\r\n The (possibly partial) shape of the tensor.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A `Tensor`. Has the same type as `input`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is None or not _ctx._eager_context.is_eager:\r\n shape = _execute.make_shape(shape, \"shape\")\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"PlaceholderWithDefault\", input=input, shape=shape, name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"dtype\", _op.get_attr(\"dtype\"), \"shape\", _op.get_attr(\"shape\"))\r\n _execute.record_gradient(\r\n \"PlaceholderWithDefault\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n else:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name,\r\n \"PlaceholderWithDefault\", name, _ctx._post_execution_callbacks, input,\r\n \"shape\", shape)\r\n return _result\r\n except _core._FallbackException:\r\n return placeholder_with_default_eager_fallback(\r\n input, shape=shape, name=name, ctx=_ctx)\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n\r\n\r\ndef placeholder_with_default_eager_fallback(input, shape, name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function placeholder_with_default\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n shape = _execute.make_shape(shape, \"shape\")\r\n _attr_dtype, (input,) = _execute.args_to_matching_eager([input], _ctx)\r\n _inputs_flat = [input]\r\n _attrs = (\"dtype\", _attr_dtype, \"shape\", shape)\r\n _result = _execute.execute(b\"PlaceholderWithDefault\", 1,\r\n inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,\r\n name=name)\r\n _execute.record_gradient(\r\n \"PlaceholderWithDefault\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n\r\ndef prevent_gradient(input, message=\"\", name=None):\r\n r\"\"\"An identity op that triggers an error if a gradient is requested.\r\n\r\n When executed in a graph, this op outputs its input tensor as-is.\r\r\n \r\r\n When building ops to compute gradients, the TensorFlow gradient system\r\r\n will return an error when trying to lookup the gradient of this op,\r\r\n because no gradient must ever be registered for this function. This\r\r\n op exists to prevent subtle bugs from silently returning unimplemented\r\r\n gradients in some corner cases.\r\n\r\n Args:\r\n input: A `Tensor`. any tensor.\r\n message: An optional `string`. Defaults to `\"\"`.\r\n Will be printed in the error when anyone tries to differentiate\r\r\n this operation.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A `Tensor`. Has the same type as `input`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is None or not _ctx._eager_context.is_eager:\r\n if message is None:\r\n message = \"\"\r\n message = _execute.make_str(message, \"message\")\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"PreventGradient\", input=input, message=message, name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"T\", _op.get_attr(\"T\"), \"message\", _op.get_attr(\"message\"))\r\n _execute.record_gradient(\r\n \"PreventGradient\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n else:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name,\r\n \"PreventGradient\", name, _ctx._post_execution_callbacks, input,\r\n \"message\", message)\r\n return _result\r\n except _core._FallbackException:\r\n return prevent_gradient_eager_fallback(\r\n input, message=message, name=name, ctx=_ctx)\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n\r\n\r\ndef prevent_gradient_eager_fallback(input, message=\"\", name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function prevent_gradient\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n if message is None:\r\n message = \"\"\r\n message = _execute.make_str(message, \"message\")\r\n _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)\r\n _inputs_flat = [input]\r\n _attrs = (\"T\", _attr_T, \"message\", message)\r\n _result = _execute.execute(b\"PreventGradient\", 1, inputs=_inputs_flat,\r\n attrs=_attrs, ctx=_ctx, name=name)\r\n _execute.record_gradient(\r\n \"PreventGradient\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n\r\ndef quantize_and_dequantize(input, signed_input=True, num_bits=8, range_given=False, input_min=0, input_max=0, name=None):\r\n r\"\"\"Use QuantizeAndDequantizeV2 instead.\r\n\r\n Args:\r\n input: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`.\r\n signed_input: An optional `bool`. Defaults to `True`.\r\n num_bits: An optional `int`. Defaults to `8`.\r\n range_given: An optional `bool`. Defaults to `False`.\r\n input_min: An optional `float`. Defaults to `0`.\r\n input_max: An optional `float`. Defaults to `0`.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A `Tensor`. Has the same type as `input`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is None or not _ctx._eager_context.is_eager:\r\n if signed_input is None:\r\n signed_input = True\r\n signed_input = _execute.make_bool(signed_input, \"signed_input\")\r\n if num_bits is None:\r\n num_bits = 8\r\n num_bits = _execute.make_int(num_bits, \"num_bits\")\r\n if range_given is None:\r\n range_given = False\r\n range_given = _execute.make_bool(range_given, \"range_given\")\r\n if input_min is None:\r\n input_min = 0\r\n input_min = _execute.make_float(input_min, \"input_min\")\r\n if input_max is None:\r\n input_max = 0\r\n input_max = _execute.make_float(input_max, \"input_max\")\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"QuantizeAndDequantize\", input=input, signed_input=signed_input,\r\n num_bits=num_bits, range_given=range_given, input_min=input_min,\r\n input_max=input_max, name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"signed_input\", _op.get_attr(\"signed_input\"), \"num_bits\",\r\n _op.get_attr(\"num_bits\"), \"range_given\",\r\n _op.get_attr(\"range_given\"), \"input_min\",\r\n _op.get_attr(\"input_min\"), \"input_max\",\r\n _op.get_attr(\"input_max\"), \"T\", _op.get_attr(\"T\"))\r\n _execute.record_gradient(\r\n \"QuantizeAndDequantize\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n else:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name,\r\n \"QuantizeAndDequantize\", name, _ctx._post_execution_callbacks, input,\r\n \"signed_input\", signed_input, \"num_bits\", num_bits, \"range_given\",\r\n range_given, \"input_min\", input_min, \"input_max\", input_max)\r\n return _result\r\n except _core._FallbackException:\r\n return quantize_and_dequantize_eager_fallback(\r\n input, signed_input=signed_input, num_bits=num_bits,\r\n range_given=range_given, input_min=input_min, input_max=input_max,\r\n name=name, ctx=_ctx)\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n\r\n\r\ndef quantize_and_dequantize_eager_fallback(input, signed_input=True, num_bits=8, range_given=False, input_min=0, input_max=0, name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function quantize_and_dequantize\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n if signed_input is None:\r\n signed_input = True\r\n signed_input = _execute.make_bool(signed_input, \"signed_input\")\r\n if num_bits is None:\r\n num_bits = 8\r\n num_bits = _execute.make_int(num_bits, \"num_bits\")\r\n if range_given is None:\r\n range_given = False\r\n range_given = _execute.make_bool(range_given, \"range_given\")\r\n if input_min is None:\r\n input_min = 0\r\n input_min = _execute.make_float(input_min, \"input_min\")\r\n if input_max is None:\r\n input_max = 0\r\n input_max = _execute.make_float(input_max, \"input_max\")\r\n _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)\r\n _inputs_flat = [input]\r\n _attrs = (\"signed_input\", signed_input, \"num_bits\", num_bits, \"range_given\",\r\n range_given, \"input_min\", input_min, \"input_max\", input_max, \"T\", _attr_T)\r\n _result = _execute.execute(b\"QuantizeAndDequantize\", 1, inputs=_inputs_flat,\r\n attrs=_attrs, ctx=_ctx, name=name)\r\n _execute.record_gradient(\r\n \"QuantizeAndDequantize\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n\r\ndef quantize_and_dequantize_v2(input, input_min, input_max, signed_input=True, num_bits=8, range_given=False, name=None):\r\n r\"\"\"Quantizes then dequantizes a tensor.\r\n\r\n This op simulates the precision loss from the quantized forward pass by:\r\r\n \r\r\n 1. Quantizing the tensor to fixed point numbers, which should match the target\r\r\n quantization method when it is used in inference.\r\r\n 2. Dequantizing it back to floating point numbers for the following ops, most\r\r\n likely matmul.\r\r\n \r\r\n There are different ways to quantize. This version uses only scaling, so 0.0\r\r\n maps to 0.\r\r\n \r\r\n From the specified 'num_bits' in the quantized output type, it determines\r\r\n minimum and maximum representable quantized values.\r\r\n \r\r\n e.g.\r\r\n \r\r\n * [-128, 127] for signed, num_bits = 8, or\r\r\n * [0, 255] for unsigned, num_bits = 8.\r\r\n \r\r\n If range_given == False, the initial input_min, input_max will be determined\r\r\n automatically as the minimum and maximum values in the input tensor, otherwise\r\r\n the specified values of input_min, input_max are used.\r\r\n \r\r\n Note: If the input_min, input_max are specified, they do not need to equal the\r\r\n actual minimum and maximum values in the tensor. e.g. in some cases it may be\r\r\n beneficial to specify these values such that the low probability extremes of the\r\r\n input distribution are clipped.\r\r\n \r\r\n This op determines the maximum scale_factor that would map the initial\r\r\n [input_min, input_max] range to a range that lies within the representable\r\r\n quantized range.\r\r\n \r\r\n It determines the scale from one of input_min and input_max, then updates the\r\r\n other one to maximize the respresentable range.\r\r\n \r\r\n e.g.\r\r\n \r\r\n * if the output is signed, num_bits = 8, [input_min, input_max] = [-10.0,\r\r\n 5.0]: it would use a scale_factor of -128 / -10.0 = 12.8 In this case, it\r\r\n would update input_max to be 127 / 12.8 = 9.921875\r\r\n * if the output is signed, num_bits = 8, [input_min, input_max] = [-10.0,\r\r\n 10.0]: it would use a scale_factor of 127 / 10.0 = 12.7 In this case, it\r\r\n would update input_min to be 128.0 / 12.7 = -10.07874\r\r\n * if the output is unsigned, input_min is forced to be 0, and only the\r\r\n specified input_max is used.\r\r\n \r\r\n After determining the scale_factor and updating the input range, it applies the\r\r\n following to each value in the 'input' tensor.\r\r\n \r\r\n output = round(clamp(value, input_min, input_max) * scale_factor) / scale_factor.\r\n\r\n Args:\r\n input: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`.\r\n Tensor to quantize and then dequantize.\r\n input_min: A `Tensor`. Must have the same type as `input`.\r\n If `range_given == True`, this specifies the minimum input value that needs to\r\r\n be represented, otherwise it is determined from the min value of the `input`\r\r\n tensor.\r\n input_max: A `Tensor`. Must have the same type as `input`.\r\n If `range_given == True`, this specifies the maximum input value that needs to\r\r\n be represented, otherwise it is determined from the max value of the `input`\r\r\n tensor.\r\n signed_input: An optional `bool`. Defaults to `True`.\r\n Whether the quantization is signed or unsigned. (actually this parameter should\r\r\n have been called <b>`signed_output`</b>)\r\n num_bits: An optional `int`. Defaults to `8`.\r\n The bitwidth of the quantization.\r\n range_given: An optional `bool`. Defaults to `False`.\r\n Whether the range is given or should be determined from the `input` tensor.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A `Tensor`. Has the same type as `input`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is None or not _ctx._eager_context.is_eager:\r\n if signed_input is None:\r\n signed_input = True\r\n signed_input = _execute.make_bool(signed_input, \"signed_input\")\r\n if num_bits is None:\r\n num_bits = 8\r\n num_bits = _execute.make_int(num_bits, \"num_bits\")\r\n if range_given is None:\r\n range_given = False\r\n range_given = _execute.make_bool(range_given, \"range_given\")\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"QuantizeAndDequantizeV2\", input=input, input_min=input_min,\r\n input_max=input_max, signed_input=signed_input, num_bits=num_bits,\r\n range_given=range_given, name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"signed_input\", _op.get_attr(\"signed_input\"), \"num_bits\",\r\n _op.get_attr(\"num_bits\"), \"range_given\",\r\n _op.get_attr(\"range_given\"), \"T\", _op.get_attr(\"T\"))\r\n _execute.record_gradient(\r\n \"QuantizeAndDequantizeV2\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n else:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name,\r\n \"QuantizeAndDequantizeV2\", name, _ctx._post_execution_callbacks,\r\n input, input_min, input_max, \"signed_input\", signed_input, \"num_bits\",\r\n num_bits, \"range_given\", range_given)\r\n return _result\r\n except _core._FallbackException:\r\n return quantize_and_dequantize_v2_eager_fallback(\r\n input, input_min, input_max, signed_input=signed_input,\r\n num_bits=num_bits, range_given=range_given, name=name, ctx=_ctx)\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n\r\n\r\ndef quantize_and_dequantize_v2_eager_fallback(input, input_min, input_max, signed_input=True, num_bits=8, range_given=False, name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function quantize_and_dequantize_v2\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n if signed_input is None:\r\n signed_input = True\r\n signed_input = _execute.make_bool(signed_input, \"signed_input\")\r\n if num_bits is None:\r\n num_bits = 8\r\n num_bits = _execute.make_int(num_bits, \"num_bits\")\r\n if range_given is None:\r\n range_given = False\r\n range_given = _execute.make_bool(range_given, \"range_given\")\r\n _attr_T, _inputs_T = _execute.args_to_matching_eager([input, input_min, input_max], _ctx)\r\n (input, input_min, input_max) = _inputs_T\r\n _inputs_flat = [input, input_min, input_max]\r\n _attrs = (\"signed_input\", signed_input, \"num_bits\", num_bits, \"range_given\",\r\n range_given, \"T\", _attr_T)\r\n _result = _execute.execute(b\"QuantizeAndDequantizeV2\", 1,\r\n inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,\r\n name=name)\r\n _execute.record_gradient(\r\n \"QuantizeAndDequantizeV2\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n\r\ndef quantize_and_dequantize_v3(input, input_min, input_max, num_bits, signed_input=True, range_given=True, name=None):\r\n r\"\"\"Quantizes then dequantizes a tensor.\r\n\r\n This is almost identical to QuantizeAndDequantizeV2, except that num_bits is a\r\r\n tensor, so its value can change during training.\r\n\r\n Args:\r\n input: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`.\r\n input_min: A `Tensor`. Must have the same type as `input`.\r\n input_max: A `Tensor`. Must have the same type as `input`.\r\n num_bits: A `Tensor` of type `int32`.\r\n signed_input: An optional `bool`. Defaults to `True`.\r\n range_given: An optional `bool`. Defaults to `True`.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A `Tensor`. Has the same type as `input`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is None or not _ctx._eager_context.is_eager:\r\n if signed_input is None:\r\n signed_input = True\r\n signed_input = _execute.make_bool(signed_input, \"signed_input\")\r\n if range_given is None:\r\n range_given = True\r\n range_given = _execute.make_bool(range_given, \"range_given\")\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"QuantizeAndDequantizeV3\", input=input, input_min=input_min,\r\n input_max=input_max, num_bits=num_bits, signed_input=signed_input,\r\n range_given=range_given, name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"signed_input\", _op.get_attr(\"signed_input\"), \"range_given\",\r\n _op.get_attr(\"range_given\"), \"T\", _op.get_attr(\"T\"))\r\n _execute.record_gradient(\r\n \"QuantizeAndDequantizeV3\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n else:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name,\r\n \"QuantizeAndDequantizeV3\", name, _ctx._post_execution_callbacks,\r\n input, input_min, input_max, num_bits, \"signed_input\", signed_input,\r\n \"range_given\", range_given)\r\n return _result\r\n except _core._FallbackException:\r\n return quantize_and_dequantize_v3_eager_fallback(\r\n input, input_min, input_max, num_bits, signed_input=signed_input,\r\n range_given=range_given, name=name, ctx=_ctx)\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n\r\n\r\ndef quantize_and_dequantize_v3_eager_fallback(input, input_min, input_max, num_bits, signed_input=True, range_given=True, name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function quantize_and_dequantize_v3\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n if signed_input is None:\r\n signed_input = True\r\n signed_input = _execute.make_bool(signed_input, \"signed_input\")\r\n if range_given is None:\r\n range_given = True\r\n range_given = _execute.make_bool(range_given, \"range_given\")\r\n _attr_T, _inputs_T = _execute.args_to_matching_eager([input, input_min, input_max], _ctx)\r\n (input, input_min, input_max) = _inputs_T\r\n num_bits = _ops.convert_to_tensor(num_bits, _dtypes.int32)\r\n _inputs_flat = [input, input_min, input_max, num_bits]\r\n _attrs = (\"signed_input\", signed_input, \"range_given\", range_given, \"T\",\r\n _attr_T)\r\n _result = _execute.execute(b\"QuantizeAndDequantizeV3\", 1,\r\n inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,\r\n name=name)\r\n _execute.record_gradient(\r\n \"QuantizeAndDequantizeV3\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n\r\n_quantize_v2_outputs = [\"output\", \"output_min\", \"output_max\"]\r\n_QuantizeV2Output = _collections.namedtuple(\r\n \"QuantizeV2\", _quantize_v2_outputs)\r\n\r\n\r\ndef quantize_v2(input, min_range, max_range, T, mode=\"MIN_COMBINED\", round_mode=\"HALF_AWAY_FROM_ZERO\", name=None):\r\n r\"\"\"Quantize the 'input' tensor of type float to 'output' tensor of type 'T'.\r\n\r\n [min_range, max_range] are scalar floats that specify the range for\r\r\n the 'input' data. The 'mode' attribute controls exactly which calculations are\r\r\n used to convert the float values to their quantized equivalents. The\r\r\n 'round_mode' attribute controls which rounding tie-breaking algorithm is used\r\r\n when rounding float values to their quantized equivalents.\r\r\n \r\r\n In 'MIN_COMBINED' mode, each value of the tensor will undergo the following:\r\r\n \r\r\n ```\r\r\n out[i] = (in[i] - min_range) * range(T) / (max_range - min_range)\r\r\n if T == qint8, out[i] -= (range(T) + 1) / 2.0\r\r\n ```\r\r\n \r\r\n here `range(T) = numeric_limits<T>::max() - numeric_limits<T>::min()`\r\r\n \r\r\n *MIN_COMBINED Mode Example*\r\r\n \r\r\n Assume the input is type float and has a possible range of [0.0, 6.0] and the\r\r\n output type is quint8 ([0, 255]). The min_range and max_range values should be\r\r\n specified as 0.0 and 6.0. Quantizing from float to quint8 will multiply each\r\r\n value of the input by 255/6 and cast to quint8.\r\r\n \r\r\n If the output type was qint8 ([-128, 127]), the operation will additionally\r\r\n subtract each value by 128 prior to casting, so that the range of values aligns\r\r\n with the range of qint8.\r\r\n \r\r\n If the mode is 'MIN_FIRST', then this approach is used:\r\r\n \r\r\n ```\r\r\n num_discrete_values = 1 << (# of bits in T)\r\r\n range_adjust = num_discrete_values / (num_discrete_values - 1)\r\r\n range = (range_max - range_min) * range_adjust\r\r\n range_scale = num_discrete_values / range\r\r\n quantized = round(input * range_scale) - round(range_min * range_scale) +\r\r\n numeric_limits<T>::min()\r\r\n quantized = max(quantized, numeric_limits<T>::min())\r\r\n quantized = min(quantized, numeric_limits<T>::max())\r\r\n ```\r\r\n \r\r\n The biggest difference between this and MIN_COMBINED is that the minimum range\r\r\n is rounded first, before it's subtracted from the rounded value. With\r\r\n MIN_COMBINED, a small bias is introduced where repeated iterations of quantizing\r\r\n and dequantizing will introduce a larger and larger error.\r\r\n \r\r\n *SCALED mode Example*\r\r\n \r\r\n `SCALED` mode matches the quantization approach used in\r\r\n `QuantizeAndDequantize{V2|V3}`.\r\r\n \r\r\n If the mode is `SCALED`, we do not use the full range of the output type,\r\r\n choosing to elide the lowest possible value for symmetry (e.g., output range is\r\r\n -127 to 127, not -128 to 127 for signed 8 bit quantization), so that 0.0 maps to\r\r\n 0.\r\r\n \r\r\n We first find the range of values in our tensor. The\r\r\n range we use is always centered on 0, so we find m such that\r\r\n \r\r\n ```c++\r\r\n m = max(abs(input_min), abs(input_max))\r\r\n ```\r\r\n \r\r\n Our input tensor range is then `[-m, m]`.\r\r\n \r\r\n Next, we choose our fixed-point quantization buckets, `[min_fixed, max_fixed]`.\r\r\n If T is signed, this is\r\r\n \r\r\n ```\r\r\n num_bits = sizeof(T) * 8\r\r\n [min_fixed, max_fixed] =\r\r\n [-(1 << (num_bits - 1) - 1), (1 << (num_bits - 1)) - 1]\r\r\n ```\r\r\n \r\r\n Otherwise, if T is unsigned, the fixed-point range is\r\r\n \r\r\n ```\r\r\n [min_fixed, max_fixed] = [0, (1 << num_bits) - 1]\r\r\n ```\r\r\n \r\r\n From this we compute our scaling factor, s:\r\r\n \r\r\n ```c++\r\r\n s = (max_fixed - min_fixed) / (2 * m)\r\r\n ```\r\r\n \r\r\n Now we can quantize the elements of our tensor:\r\r\n \r\r\n ```c++\r\r\n result = round(input * s)\r\r\n ```\r\r\n \r\r\n One thing to watch out for is that the operator may choose to adjust the\r\r\n requested minimum and maximum values slightly during the quantization process,\r\r\n so you should always use the output ports as the range for further calculations.\r\r\n For example, if the requested minimum and maximum values are close to equal,\r\r\n they will be separated by a small epsilon value to prevent ill-formed quantized\r\r\n buffers from being created. Otherwise, you can end up with buffers where all the\r\r\n quantized values map to the same float value, which causes problems for\r\r\n operations that have to perform further calculations on them.\r\n\r\n Args:\r\n input: A `Tensor` of type `float32`.\r\n min_range: A `Tensor` of type `float32`.\r\n The minimum scalar value possibly produced for the input.\r\n max_range: A `Tensor` of type `float32`.\r\n The maximum scalar value possibly produced for the input.\r\n T: A `tf.DType` from: `tf.qint8, tf.quint8, tf.qint32, tf.qint16, tf.quint16`.\r\n mode: An optional `string` from: `\"MIN_COMBINED\", \"MIN_FIRST\", \"SCALED\"`. Defaults to `\"MIN_COMBINED\"`.\r\n round_mode: An optional `string` from: `\"HALF_AWAY_FROM_ZERO\", \"HALF_TO_EVEN\"`. Defaults to `\"HALF_AWAY_FROM_ZERO\"`.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A tuple of `Tensor` objects (output, output_min, output_max).\r\n\r\n output: A `Tensor` of type `T`.\r\n output_min: A `Tensor` of type `float32`.\r\n output_max: A `Tensor` of type `float32`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is None or not _ctx._eager_context.is_eager:\r\n T = _execute.make_type(T, \"T\")\r\n if mode is None:\r\n mode = \"MIN_COMBINED\"\r\n mode = _execute.make_str(mode, \"mode\")\r\n if round_mode is None:\r\n round_mode = \"HALF_AWAY_FROM_ZERO\"\r\n round_mode = _execute.make_str(round_mode, \"round_mode\")\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"QuantizeV2\", input=input, min_range=min_range, max_range=max_range,\r\n T=T, mode=mode, round_mode=round_mode, name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"T\", _op.get_attr(\"T\"), \"mode\", _op.get_attr(\"mode\"),\r\n \"round_mode\", _op.get_attr(\"round_mode\"))\r\n _execute.record_gradient(\r\n \"QuantizeV2\", _inputs_flat, _attrs, _result, name)\r\n _result = _QuantizeV2Output._make(_result)\r\n return _result\r\n\r\n else:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name, \"QuantizeV2\",\r\n name, _ctx._post_execution_callbacks, input, min_range, max_range,\r\n \"T\", T, \"mode\", mode, \"round_mode\", round_mode)\r\n _result = _QuantizeV2Output._make(_result)\r\n return _result\r\n except _core._FallbackException:\r\n return quantize_v2_eager_fallback(\r\n input, min_range, max_range, T=T, mode=mode, round_mode=round_mode,\r\n name=name, ctx=_ctx)\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n\r\n\r\ndef quantize_v2_eager_fallback(input, min_range, max_range, T, mode=\"MIN_COMBINED\", round_mode=\"HALF_AWAY_FROM_ZERO\", name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function quantize_v2\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n T = _execute.make_type(T, \"T\")\r\n if mode is None:\r\n mode = \"MIN_COMBINED\"\r\n mode = _execute.make_str(mode, \"mode\")\r\n if round_mode is None:\r\n round_mode = \"HALF_AWAY_FROM_ZERO\"\r\n round_mode = _execute.make_str(round_mode, \"round_mode\")\r\n input = _ops.convert_to_tensor(input, _dtypes.float32)\r\n min_range = _ops.convert_to_tensor(min_range, _dtypes.float32)\r\n max_range = _ops.convert_to_tensor(max_range, _dtypes.float32)\r\n _inputs_flat = [input, min_range, max_range]\r\n _attrs = (\"T\", T, \"mode\", mode, \"round_mode\", round_mode)\r\n _result = _execute.execute(b\"QuantizeV2\", 3, inputs=_inputs_flat,\r\n attrs=_attrs, ctx=_ctx, name=name)\r\n _execute.record_gradient(\r\n \"QuantizeV2\", _inputs_flat, _attrs, _result, name)\r\n _result = _QuantizeV2Output._make(_result)\r\n return _result\r\n\r\n\r\n_quantized_concat_outputs = [\"output\", \"output_min\", \"output_max\"]\r\n_QuantizedConcatOutput = _collections.namedtuple(\r\n \"QuantizedConcat\", _quantized_concat_outputs)\r\n\r\n\r\n@tf_export('quantization.quantized_concat', 'quantized_concat')\r\n@deprecated_endpoints('quantized_concat')\r\ndef quantized_concat(concat_dim, values, input_mins, input_maxes, name=None):\r\n r\"\"\"Concatenates quantized tensors along one dimension.\r\n\r\n Args:\r\n concat_dim: A `Tensor` of type `int32`.\r\n 0-D. The dimension along which to concatenate. Must be in the\r\r\n range [0, rank(values)).\r\n values: A list of at least 2 `Tensor` objects with the same type.\r\n The `N` Tensors to concatenate. Their ranks and types must match,\r\r\n and their sizes must match in all dimensions except `concat_dim`.\r\n input_mins: A list with the same length as `values` of `Tensor` objects with type `float32`.\r\n The minimum scalar values for each of the input tensors.\r\n input_maxes: A list with the same length as `values` of `Tensor` objects with type `float32`.\r\n The maximum scalar values for each of the input tensors.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A tuple of `Tensor` objects (output, output_min, output_max).\r\n\r\n output: A `Tensor`. Has the same type as `values`.\r\n output_min: A `Tensor` of type `float32`.\r\n output_max: A `Tensor` of type `float32`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is None or not _ctx._eager_context.is_eager:\r\n if not isinstance(values, (list, tuple)):\r\n raise TypeError(\r\n \"Expected list for 'values' argument to \"\r\n \"'quantized_concat' Op, not %r.\" % values)\r\n _attr_N = len(values)\r\n if not isinstance(input_mins, (list, tuple)):\r\n raise TypeError(\r\n \"Expected list for 'input_mins' argument to \"\r\n \"'quantized_concat' Op, not %r.\" % input_mins)\r\n if len(input_mins) != _attr_N:\r\n raise ValueError(\r\n \"List argument 'input_mins' to 'quantized_concat' Op with length %d \"\r\n \"must match length %d of argument 'values'.\" %\r\n (len(input_mins), _attr_N))\r\n if not isinstance(input_maxes, (list, tuple)):\r\n raise TypeError(\r\n \"Expected list for 'input_maxes' argument to \"\r\n \"'quantized_concat' Op, not %r.\" % input_maxes)\r\n if len(input_maxes) != _attr_N:\r\n raise ValueError(\r\n \"List argument 'input_maxes' to 'quantized_concat' Op with length %d \"\r\n \"must match length %d of argument 'values'.\" %\r\n (len(input_maxes), _attr_N))\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"QuantizedConcat\", concat_dim=concat_dim, values=values,\r\n input_mins=input_mins, input_maxes=input_maxes, name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"N\", _op.get_attr(\"N\"), \"T\", _op.get_attr(\"T\"))\r\n _execute.record_gradient(\r\n \"QuantizedConcat\", _inputs_flat, _attrs, _result, name)\r\n _result = _QuantizedConcatOutput._make(_result)\r\n return _result\r\n\r\n else:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name,\r\n \"QuantizedConcat\", name, _ctx._post_execution_callbacks, concat_dim,\r\n values, input_mins, input_maxes)\r\n _result = _QuantizedConcatOutput._make(_result)\r\n return _result\r\n except _core._FallbackException:\r\n return quantized_concat_eager_fallback(\r\n concat_dim, values, input_mins, input_maxes, name=name, ctx=_ctx)\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n\r\n\r\ndef quantized_concat_eager_fallback(concat_dim, values, input_mins, input_maxes, name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function quantized_concat\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n if not isinstance(values, (list, tuple)):\r\n raise TypeError(\r\n \"Expected list for 'values' argument to \"\r\n \"'quantized_concat' Op, not %r.\" % values)\r\n _attr_N = len(values)\r\n if not isinstance(input_mins, (list, tuple)):\r\n raise TypeError(\r\n \"Expected list for 'input_mins' argument to \"\r\n \"'quantized_concat' Op, not %r.\" % input_mins)\r\n if len(input_mins) != _attr_N:\r\n raise ValueError(\r\n \"List argument 'input_mins' to 'quantized_concat' Op with length %d \"\r\n \"must match length %d of argument 'values'.\" %\r\n (len(input_mins), _attr_N))\r\n if not isinstance(input_maxes, (list, tuple)):\r\n raise TypeError(\r\n \"Expected list for 'input_maxes' argument to \"\r\n \"'quantized_concat' Op, not %r.\" % input_maxes)\r\n if len(input_maxes) != _attr_N:\r\n raise ValueError(\r\n \"List argument 'input_maxes' to 'quantized_concat' Op with length %d \"\r\n \"must match length %d of argument 'values'.\" %\r\n (len(input_maxes), _attr_N))\r\n _attr_T, values = _execute.args_to_matching_eager(list(values), _ctx)\r\n concat_dim = _ops.convert_to_tensor(concat_dim, _dtypes.int32)\r\n input_mins = _ops.convert_n_to_tensor(input_mins, _dtypes.float32)\r\n input_maxes = _ops.convert_n_to_tensor(input_maxes, _dtypes.float32)\r\n _inputs_flat = [concat_dim] + list(values) + list(input_mins) + list(input_maxes)\r\n _attrs = (\"N\", _attr_N, \"T\", _attr_T)\r\n _result = _execute.execute(b\"QuantizedConcat\", 3, inputs=_inputs_flat,\r\n attrs=_attrs, ctx=_ctx, name=name)\r\n _execute.record_gradient(\r\n \"QuantizedConcat\", _inputs_flat, _attrs, _result, name)\r\n _result = _QuantizedConcatOutput._make(_result)\r\n return _result\r\n\r\n\r\n_quantized_instance_norm_outputs = [\"y\", \"y_min\", \"y_max\"]\r\n_QuantizedInstanceNormOutput = _collections.namedtuple(\r\n \"QuantizedInstanceNorm\", _quantized_instance_norm_outputs)\r\n\r\n\r\ndef quantized_instance_norm(x, x_min, x_max, output_range_given=False, given_y_min=0, given_y_max=0, variance_epsilon=1e-05, min_separation=0.001, name=None):\r\n r\"\"\"Quantized Instance normalization.\r\n\r\n Args:\r\n x: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.\r\n A 4D input Tensor.\r\n x_min: A `Tensor` of type `float32`.\r\n The value represented by the lowest quantized input.\r\n x_max: A `Tensor` of type `float32`.\r\n The value represented by the highest quantized input.\r\n output_range_given: An optional `bool`. Defaults to `False`.\r\n If True, `given_y_min` and `given_y_min`\r\r\n and `given_y_max` are used as the output range. Otherwise,\r\r\n the implementation computes the output range.\r\n given_y_min: An optional `float`. Defaults to `0`.\r\n Output in `y_min` if `output_range_given` is True.\r\n given_y_max: An optional `float`. Defaults to `0`.\r\n Output in `y_max` if `output_range_given` is True.\r\n variance_epsilon: An optional `float`. Defaults to `1e-05`.\r\n A small float number to avoid dividing by 0.\r\n min_separation: An optional `float`. Defaults to `0.001`.\r\n Minimum value of `y_max - y_min`\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A tuple of `Tensor` objects (y, y_min, y_max).\r\n\r\n y: A `Tensor`. Has the same type as `x`.\r\n y_min: A `Tensor` of type `float32`.\r\n y_max: A `Tensor` of type `float32`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is None or not _ctx._eager_context.is_eager:\r\n if output_range_given is None:\r\n output_range_given = False\r\n output_range_given = _execute.make_bool(output_range_given, \"output_range_given\")\r\n if given_y_min is None:\r\n given_y_min = 0\r\n given_y_min = _execute.make_float(given_y_min, \"given_y_min\")\r\n if given_y_max is None:\r\n given_y_max = 0\r\n given_y_max = _execute.make_float(given_y_max, \"given_y_max\")\r\n if variance_epsilon is None:\r\n variance_epsilon = 1e-05\r\n variance_epsilon = _execute.make_float(variance_epsilon, \"variance_epsilon\")\r\n if min_separation is None:\r\n min_separation = 0.001\r\n min_separation = _execute.make_float(min_separation, \"min_separation\")\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"QuantizedInstanceNorm\", x=x, x_min=x_min, x_max=x_max,\r\n output_range_given=output_range_given, given_y_min=given_y_min,\r\n given_y_max=given_y_max, variance_epsilon=variance_epsilon,\r\n min_separation=min_separation, name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"T\", _op.get_attr(\"T\"), \"output_range_given\",\r\n _op.get_attr(\"output_range_given\"), \"given_y_min\",\r\n _op.get_attr(\"given_y_min\"), \"given_y_max\",\r\n _op.get_attr(\"given_y_max\"), \"variance_epsilon\",\r\n _op.get_attr(\"variance_epsilon\"), \"min_separation\",\r\n _op.get_attr(\"min_separation\"))\r\n _execute.record_gradient(\r\n \"QuantizedInstanceNorm\", _inputs_flat, _attrs, _result, name)\r\n _result = _QuantizedInstanceNormOutput._make(_result)\r\n return _result\r\n\r\n else:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name,\r\n \"QuantizedInstanceNorm\", name, _ctx._post_execution_callbacks, x,\r\n x_min, x_max, \"output_range_given\", output_range_given, \"given_y_min\",\r\n given_y_min, \"given_y_max\", given_y_max, \"variance_epsilon\",\r\n variance_epsilon, \"min_separation\", min_separation)\r\n _result = _QuantizedInstanceNormOutput._make(_result)\r\n return _result\r\n except _core._FallbackException:\r\n return quantized_instance_norm_eager_fallback(\r\n x, x_min, x_max, output_range_given=output_range_given,\r\n given_y_min=given_y_min, given_y_max=given_y_max,\r\n variance_epsilon=variance_epsilon, min_separation=min_separation,\r\n name=name, ctx=_ctx)\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n\r\n\r\ndef quantized_instance_norm_eager_fallback(x, x_min, x_max, output_range_given=False, given_y_min=0, given_y_max=0, variance_epsilon=1e-05, min_separation=0.001, name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function quantized_instance_norm\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n if output_range_given is None:\r\n output_range_given = False\r\n output_range_given = _execute.make_bool(output_range_given, \"output_range_given\")\r\n if given_y_min is None:\r\n given_y_min = 0\r\n given_y_min = _execute.make_float(given_y_min, \"given_y_min\")\r\n if given_y_max is None:\r\n given_y_max = 0\r\n given_y_max = _execute.make_float(given_y_max, \"given_y_max\")\r\n if variance_epsilon is None:\r\n variance_epsilon = 1e-05\r\n variance_epsilon = _execute.make_float(variance_epsilon, \"variance_epsilon\")\r\n if min_separation is None:\r\n min_separation = 0.001\r\n min_separation = _execute.make_float(min_separation, \"min_separation\")\r\n _attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)\r\n x_min = _ops.convert_to_tensor(x_min, _dtypes.float32)\r\n x_max = _ops.convert_to_tensor(x_max, _dtypes.float32)\r\n _inputs_flat = [x, x_min, x_max]\r\n _attrs = (\"T\", _attr_T, \"output_range_given\", output_range_given,\r\n \"given_y_min\", given_y_min, \"given_y_max\", given_y_max, \"variance_epsilon\",\r\n variance_epsilon, \"min_separation\", min_separation)\r\n _result = _execute.execute(b\"QuantizedInstanceNorm\", 3, inputs=_inputs_flat,\r\n attrs=_attrs, ctx=_ctx, name=name)\r\n _execute.record_gradient(\r\n \"QuantizedInstanceNorm\", _inputs_flat, _attrs, _result, name)\r\n _result = _QuantizedInstanceNormOutput._make(_result)\r\n return _result\r\n\r\n\r\n_quantized_reshape_outputs = [\"output\", \"output_min\", \"output_max\"]\r\n_QuantizedReshapeOutput = _collections.namedtuple(\r\n \"QuantizedReshape\", _quantized_reshape_outputs)\r\n\r\n\r\ndef quantized_reshape(tensor, shape, input_min, input_max, name=None):\r\n r\"\"\"Reshapes a quantized tensor as per the Reshape op.\r\n\r\n ```\r\n\r\n Args:\r\n tensor: A `Tensor`.\r\n shape: A `Tensor`. Must be one of the following types: `int32`, `int64`.\r\n Defines the shape of the output tensor.\r\n input_min: A `Tensor` of type `float32`. The minimum value of the input.\r\n input_max: A `Tensor` of type `float32`. The maximum value of the input.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A tuple of `Tensor` objects (output, output_min, output_max).\r\n\r\n output: A `Tensor`. Has the same type as `tensor`.\r\n output_min: A `Tensor` of type `float32`.\r\n output_max: A `Tensor` of type `float32`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is None or not _ctx._eager_context.is_eager:\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"QuantizedReshape\", tensor=tensor, shape=shape, input_min=input_min,\r\n input_max=input_max, name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"T\", _op.get_attr(\"T\"), \"Tshape\", _op.get_attr(\"Tshape\"))\r\n _execute.record_gradient(\r\n \"QuantizedReshape\", _inputs_flat, _attrs, _result, name)\r\n _result = _QuantizedReshapeOutput._make(_result)\r\n return _result\r\n\r\n else:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name,\r\n \"QuantizedReshape\", name, _ctx._post_execution_callbacks, tensor,\r\n shape, input_min, input_max)\r\n _result = _QuantizedReshapeOutput._make(_result)\r\n return _result\r\n except _core._FallbackException:\r\n return quantized_reshape_eager_fallback(\r\n tensor, shape, input_min, input_max, name=name, ctx=_ctx)\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n\r\n\r\ndef quantized_reshape_eager_fallback(tensor, shape, input_min, input_max, name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function quantized_reshape\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n _attr_T, (tensor,) = _execute.args_to_matching_eager([tensor], _ctx)\r\n _attr_Tshape, (shape,) = _execute.args_to_matching_eager([shape], _ctx, _dtypes.int32)\r\n input_min = _ops.convert_to_tensor(input_min, _dtypes.float32)\r\n input_max = _ops.convert_to_tensor(input_max, _dtypes.float32)\r\n _inputs_flat = [tensor, shape, input_min, input_max]\r\n _attrs = (\"T\", _attr_T, \"Tshape\", _attr_Tshape)\r\n _result = _execute.execute(b\"QuantizedReshape\", 3, inputs=_inputs_flat,\r\n attrs=_attrs, ctx=_ctx, name=name)\r\n _execute.record_gradient(\r\n \"QuantizedReshape\", _inputs_flat, _attrs, _result, name)\r\n _result = _QuantizedReshapeOutput._make(_result)\r\n return _result\r\n\r\n\r\ndef rank(input, name=None):\r\n r\"\"\"Returns the rank of a tensor.\r\n\r\n This operation returns an integer representing the rank of `input`.\r\r\n \r\r\n For example:\r\r\n \r\r\n ```\r\r\n # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]\r\r\n # shape of tensor 't' is [2, 2, 3]\r\r\n rank(t) ==> 3\r\r\n ```\r\r\n \r\r\n **Note**: The rank of a tensor is not the same as the rank of a matrix. The rank\r\r\n of a tensor is the number of indices required to uniquely select each element\r\r\n of the tensor. Rank is also known as \"order\", \"degree\", or \"ndims.\"\r\n\r\n Args:\r\n input: A `Tensor`.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A `Tensor` of type `int32`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is None or not _ctx._eager_context.is_eager:\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"Rank\", input=input, name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"T\", _op.get_attr(\"T\"))\r\n _execute.record_gradient(\r\n \"Rank\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n else:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name, \"Rank\", name,\r\n _ctx._post_execution_callbacks, input)\r\n return _result\r\n except _core._FallbackException:\r\n return rank_eager_fallback(\r\n input, name=name, ctx=_ctx)\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n\r\n\r\ndef rank_eager_fallback(input, name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function rank\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)\r\n _inputs_flat = [input]\r\n _attrs = (\"T\", _attr_T)\r\n _result = _execute.execute(b\"Rank\", 1, inputs=_inputs_flat, attrs=_attrs,\r\n ctx=_ctx, name=name)\r\n _execute.record_gradient(\r\n \"Rank\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n\r\ndef ref_identity(input, name=None):\r\n r\"\"\"Return the same ref tensor as the input ref tensor.\r\n\r\n Args:\r\n input: A mutable `Tensor`.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A mutable `Tensor`. Has the same type as `input`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is None or not _ctx._eager_context.is_eager:\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"RefIdentity\", input=input, name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"T\", _op.get_attr(\"T\"))\r\n _execute.record_gradient(\r\n \"RefIdentity\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n else:\r\n raise RuntimeError(\"ref_identity op does not support eager execution. Arg 'output' is a ref.\")\r\n\r\n\r\n raise RuntimeError(\"ref_identity op does not support eager execution. Arg 'output' is a ref.\")\r\n\r\n@tf_export('reshape', 'manip.reshape')\r\n@deprecated_endpoints('manip.reshape')\r\ndef reshape(tensor, shape, name=None):\r\n r\"\"\"Reshapes a tensor.\r\n\r\n Given `tensor`, this operation returns a tensor that has the same values\r\r\n as `tensor` with shape `shape`.\r\r\n \r\r\n If one component of `shape` is the special value -1, the size of that dimension\r\r\n is computed so that the total size remains constant. In particular, a `shape`\r\r\n of `[-1]` flattens into 1-D. At most one component of `shape` can be -1.\r\r\n \r\r\n If `shape` is 1-D or higher, then the operation returns a tensor with shape\r\r\n `shape` filled with the values of `tensor`. In this case, the number of elements\r\r\n implied by `shape` must be the same as the number of elements in `tensor`.\r\r\n \r\r\n For example:\r\r\n \r\r\n ```\r\r\n # tensor 't' is [1, 2, 3, 4, 5, 6, 7, 8, 9]\r\r\n # tensor 't' has shape [9]\r\r\n reshape(t, [3, 3]) ==> [[1, 2, 3],\r\r\n [4, 5, 6],\r\r\n [7, 8, 9]]\r\r\n \r\r\n # tensor 't' is [[[1, 1], [2, 2]],\r\r\n # [[3, 3], [4, 4]]]\r\r\n # tensor 't' has shape [2, 2, 2]\r\r\n reshape(t, [2, 4]) ==> [[1, 1, 2, 2],\r\r\n [3, 3, 4, 4]]\r\r\n \r\r\n # tensor 't' is [[[1, 1, 1],\r\r\n # [2, 2, 2]],\r\r\n # [[3, 3, 3],\r\r\n # [4, 4, 4]],\r\r\n # [[5, 5, 5],\r\r\n # [6, 6, 6]]]\r\r\n # tensor 't' has shape [3, 2, 3]\r\r\n # pass '[-1]' to flatten 't'\r\r\n reshape(t, [-1]) ==> [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6]\r\r\n \r\r\n # -1 can also be used to infer the shape\r\r\n \r\r\n # -1 is inferred to be 9:\r\r\n reshape(t, [2, -1]) ==> [[1, 1, 1, 2, 2, 2, 3, 3, 3],\r\r\n [4, 4, 4, 5, 5, 5, 6, 6, 6]]\r\r\n # -1 is inferred to be 2:\r\r\n reshape(t, [-1, 9]) ==> [[1, 1, 1, 2, 2, 2, 3, 3, 3],\r\r\n [4, 4, 4, 5, 5, 5, 6, 6, 6]]\r\r\n # -1 is inferred to be 3:\r\r\n reshape(t, [ 2, -1, 3]) ==> [[[1, 1, 1],\r\r\n [2, 2, 2],\r\r\n [3, 3, 3]],\r\r\n [[4, 4, 4],\r\r\n [5, 5, 5],\r\r\n [6, 6, 6]]]\r\r\n \r\r\n # tensor 't' is [7]\r\r\n # shape `[]` reshapes to a scalar\r\r\n reshape(t, []) ==> 7\r\r\n ```\r\n\r\n Args:\r\n tensor: A `Tensor`.\r\n shape: A `Tensor`. Must be one of the following types: `int32`, `int64`.\r\n Defines the shape of the output tensor.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A `Tensor`. Has the same type as `tensor`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is None or not _ctx._eager_context.is_eager:\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"Reshape\", tensor=tensor, shape=shape, name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"T\", _op.get_attr(\"T\"), \"Tshape\", _op.get_attr(\"Tshape\"))\r\n _execute.record_gradient(\r\n \"Reshape\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n else:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name, \"Reshape\",\r\n name, _ctx._post_execution_callbacks, tensor, shape)\r\n return _result\r\n except _core._FallbackException:\r\n return reshape_eager_fallback(\r\n tensor, shape, name=name, ctx=_ctx)\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n\r\n\r\ndef reshape_eager_fallback(tensor, shape, name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function reshape\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n _attr_T, (tensor,) = _execute.args_to_matching_eager([tensor], _ctx)\r\n _attr_Tshape, (shape,) = _execute.args_to_matching_eager([shape], _ctx, _dtypes.int32)\r\n _inputs_flat = [tensor, shape]\r\n _attrs = (\"T\", _attr_T, \"Tshape\", _attr_Tshape)\r\n _result = _execute.execute(b\"Reshape\", 1, inputs=_inputs_flat, attrs=_attrs,\r\n ctx=_ctx, name=name)\r\n _execute.record_gradient(\r\n \"Reshape\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n\r\ndef resource_strided_slice_assign(ref, begin, end, strides, value, begin_mask=0, end_mask=0, ellipsis_mask=0, new_axis_mask=0, shrink_axis_mask=0, name=None):\r\n r\"\"\"Assign `value` to the sliced l-value reference of `ref`.\r\n\r\n The values of `value` are assigned to the positions in the variable\r\r\n `ref` that are selected by the slice parameters. The slice parameters\r\r\n `begin, `end`, `strides`, etc. work exactly as in `StridedSlice`.\r\r\n \r\r\n NOTE this op currently does not support broadcasting and so `value`'s\r\r\n shape must be exactly the shape produced by the slice of `ref`.\r\n\r\n Args:\r\n ref: A `Tensor` of type `resource`.\r\n begin: A `Tensor`. Must be one of the following types: `int32`, `int64`.\r\n end: A `Tensor`. Must have the same type as `begin`.\r\n strides: A `Tensor`. Must have the same type as `begin`.\r\n value: A `Tensor`.\r\n begin_mask: An optional `int`. Defaults to `0`.\r\n end_mask: An optional `int`. Defaults to `0`.\r\n ellipsis_mask: An optional `int`. Defaults to `0`.\r\n new_axis_mask: An optional `int`. Defaults to `0`.\r\n shrink_axis_mask: An optional `int`. Defaults to `0`.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n The created Operation.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is None or not _ctx._eager_context.is_eager:\r\n if begin_mask is None:\r\n begin_mask = 0\r\n begin_mask = _execute.make_int(begin_mask, \"begin_mask\")\r\n if end_mask is None:\r\n end_mask = 0\r\n end_mask = _execute.make_int(end_mask, \"end_mask\")\r\n if ellipsis_mask is None:\r\n ellipsis_mask = 0\r\n ellipsis_mask = _execute.make_int(ellipsis_mask, \"ellipsis_mask\")\r\n if new_axis_mask is None:\r\n new_axis_mask = 0\r\n new_axis_mask = _execute.make_int(new_axis_mask, \"new_axis_mask\")\r\n if shrink_axis_mask is None:\r\n shrink_axis_mask = 0\r\n shrink_axis_mask = _execute.make_int(shrink_axis_mask, \"shrink_axis_mask\")\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"ResourceStridedSliceAssign\", ref=ref, begin=begin, end=end,\r\n strides=strides, value=value, begin_mask=begin_mask,\r\n end_mask=end_mask, ellipsis_mask=ellipsis_mask,\r\n new_axis_mask=new_axis_mask, shrink_axis_mask=shrink_axis_mask,\r\n name=name)\r\n return _op\r\n _result = None\r\n return _result\r\n\r\n else:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name,\r\n \"ResourceStridedSliceAssign\", name, _ctx._post_execution_callbacks,\r\n ref, begin, end, strides, value, \"begin_mask\", begin_mask, \"end_mask\",\r\n end_mask, \"ellipsis_mask\", ellipsis_mask, \"new_axis_mask\",\r\n new_axis_mask, \"shrink_axis_mask\", shrink_axis_mask)\r\n return _result\r\n except _core._FallbackException:\r\n return resource_strided_slice_assign_eager_fallback(\r\n ref, begin, end, strides, value, begin_mask=begin_mask,\r\n end_mask=end_mask, ellipsis_mask=ellipsis_mask,\r\n new_axis_mask=new_axis_mask, shrink_axis_mask=shrink_axis_mask,\r\n name=name, ctx=_ctx)\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n\r\n\r\ndef resource_strided_slice_assign_eager_fallback(ref, begin, end, strides, value, begin_mask=0, end_mask=0, ellipsis_mask=0, new_axis_mask=0, shrink_axis_mask=0, name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function resource_strided_slice_assign\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n if begin_mask is None:\r\n begin_mask = 0\r\n begin_mask = _execute.make_int(begin_mask, \"begin_mask\")\r\n if end_mask is None:\r\n end_mask = 0\r\n end_mask = _execute.make_int(end_mask, \"end_mask\")\r\n if ellipsis_mask is None:\r\n ellipsis_mask = 0\r\n ellipsis_mask = _execute.make_int(ellipsis_mask, \"ellipsis_mask\")\r\n if new_axis_mask is None:\r\n new_axis_mask = 0\r\n new_axis_mask = _execute.make_int(new_axis_mask, \"new_axis_mask\")\r\n if shrink_axis_mask is None:\r\n shrink_axis_mask = 0\r\n shrink_axis_mask = _execute.make_int(shrink_axis_mask, \"shrink_axis_mask\")\r\n _attr_T, (value,) = _execute.args_to_matching_eager([value], _ctx)\r\n _attr_Index, _inputs_Index = _execute.args_to_matching_eager([begin, end, strides], _ctx)\r\n (begin, end, strides) = _inputs_Index\r\n ref = _ops.convert_to_tensor(ref, _dtypes.resource)\r\n _inputs_flat = [ref, begin, end, strides, value]\r\n _attrs = (\"T\", _attr_T, \"Index\", _attr_Index, \"begin_mask\", begin_mask,\r\n \"end_mask\", end_mask, \"ellipsis_mask\", ellipsis_mask, \"new_axis_mask\",\r\n new_axis_mask, \"shrink_axis_mask\", shrink_axis_mask)\r\n _result = _execute.execute(b\"ResourceStridedSliceAssign\", 0,\r\n inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,\r\n name=name)\r\n _result = None\r\n return _result\r\n\r\n\r\ndef reverse(tensor, dims, name=None):\r\n r\"\"\"Reverses specific dimensions of a tensor.\r\n\r\n Given a `tensor`, and a `bool` tensor `dims` representing the dimensions\r\r\n of `tensor`, this operation reverses each dimension i of `tensor` where\r\r\n `dims[i]` is `True`.\r\r\n \r\r\n `tensor` can have up to 8 dimensions. The number of dimensions\r\r\n of `tensor` must equal the number of elements in `dims`. In other words:\r\r\n \r\r\n `rank(tensor) = size(dims)`\r\r\n \r\r\n For example:\r\r\n \r\r\n ```\r\r\n # tensor 't' is [[[[ 0, 1, 2, 3],\r\r\n # [ 4, 5, 6, 7],\r\r\n # [ 8, 9, 10, 11]],\r\r\n # [[12, 13, 14, 15],\r\r\n # [16, 17, 18, 19],\r\r\n # [20, 21, 22, 23]]]]\r\r\n # tensor 't' shape is [1, 2, 3, 4]\r\r\n \r\r\n # 'dims' is [False, False, False, True]\r\r\n reverse(t, dims) ==> [[[[ 3, 2, 1, 0],\r\r\n [ 7, 6, 5, 4],\r\r\n [ 11, 10, 9, 8]],\r\r\n [[15, 14, 13, 12],\r\r\n [19, 18, 17, 16],\r\r\n [23, 22, 21, 20]]]]\r\r\n \r\r\n # 'dims' is [False, True, False, False]\r\r\n reverse(t, dims) ==> [[[[12, 13, 14, 15],\r\r\n [16, 17, 18, 19],\r\r\n [20, 21, 22, 23]\r\r\n [[ 0, 1, 2, 3],\r\r\n [ 4, 5, 6, 7],\r\r\n [ 8, 9, 10, 11]]]]\r\r\n \r\r\n # 'dims' is [False, False, True, False]\r\r\n reverse(t, dims) ==> [[[[8, 9, 10, 11],\r\r\n [4, 5, 6, 7],\r\r\n [0, 1, 2, 3]]\r\r\n [[20, 21, 22, 23],\r\r\n [16, 17, 18, 19],\r\r\n [12, 13, 14, 15]]]]\r\r\n ```\r\n\r\n Args:\r\n tensor: A `Tensor`. Must be one of the following types: `uint8`, `int8`, `uint16`, `int16`, `int32`, `int64`, `bool`, `half`, `float32`, `float64`, `complex64`, `complex128`, `string`.\r\n Up to 8-D.\r\n dims: A `Tensor` of type `bool`. 1-D. The dimensions to reverse.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A `Tensor`. Has the same type as `tensor`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is None or not _ctx._eager_context.is_eager:\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"Reverse\", tensor=tensor, dims=dims, name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"T\", _op.get_attr(\"T\"))\r\n _execute.record_gradient(\r\n \"Reverse\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n else:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name, \"Reverse\",\r\n name, _ctx._post_execution_callbacks, tensor, dims)\r\n return _result\r\n except _core._FallbackException:\r\n return reverse_eager_fallback(\r\n tensor, dims, name=name, ctx=_ctx)\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n\r\n\r\ndef reverse_eager_fallback(tensor, dims, name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function reverse\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n _attr_T, (tensor,) = _execute.args_to_matching_eager([tensor], _ctx)\r\n dims = _ops.convert_to_tensor(dims, _dtypes.bool)\r\n _inputs_flat = [tensor, dims]\r\n _attrs = (\"T\", _attr_T)\r\n _result = _execute.execute(b\"Reverse\", 1, inputs=_inputs_flat, attrs=_attrs,\r\n ctx=_ctx, name=name)\r\n _execute.record_gradient(\r\n \"Reverse\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n\r\ndef reverse_sequence(input, seq_lengths, seq_dim, batch_dim=0, name=None):\r\n r\"\"\"Reverses variable length slices.\r\n\r\n This op first slices `input` along the dimension `batch_dim`, and for each\r\r\n slice `i`, reverses the first `seq_lengths[i]` elements along\r\r\n the dimension `seq_dim`.\r\r\n \r\r\n The elements of `seq_lengths` must obey `seq_lengths[i] <= input.dims[seq_dim]`,\r\r\n and `seq_lengths` must be a vector of length `input.dims[batch_dim]`.\r\r\n \r\r\n The output slice `i` along dimension `batch_dim` is then given by input\r\r\n slice `i`, with the first `seq_lengths[i]` slices along dimension\r\r\n `seq_dim` reversed.\r\r\n \r\r\n For example:\r\r\n \r\r\n ```\r\r\n # Given this:\r\r\n batch_dim = 0\r\r\n seq_dim = 1\r\r\n input.dims = (4, 8, ...)\r\r\n seq_lengths = [7, 2, 3, 5]\r\r\n \r\r\n # then slices of input are reversed on seq_dim, but only up to seq_lengths:\r\r\n output[0, 0:7, :, ...] = input[0, 7:0:-1, :, ...]\r\r\n output[1, 0:2, :, ...] = input[1, 2:0:-1, :, ...]\r\r\n output[2, 0:3, :, ...] = input[2, 3:0:-1, :, ...]\r\r\n output[3, 0:5, :, ...] = input[3, 5:0:-1, :, ...]\r\r\n \r\r\n # while entries past seq_lens are copied through:\r\r\n output[0, 7:, :, ...] = input[0, 7:, :, ...]\r\r\n output[1, 2:, :, ...] = input[1, 2:, :, ...]\r\r\n output[2, 3:, :, ...] = input[2, 3:, :, ...]\r\r\n output[3, 2:, :, ...] = input[3, 2:, :, ...]\r\r\n ```\r\r\n \r\r\n In contrast, if:\r\r\n \r\r\n ```\r\r\n # Given this:\r\r\n batch_dim = 2\r\r\n seq_dim = 0\r\r\n input.dims = (8, ?, 4, ...)\r\r\n seq_lengths = [7, 2, 3, 5]\r\r\n \r\r\n # then slices of input are reversed on seq_dim, but only up to seq_lengths:\r\r\n output[0:7, :, 0, :, ...] = input[7:0:-1, :, 0, :, ...]\r\r\n output[0:2, :, 1, :, ...] = input[2:0:-1, :, 1, :, ...]\r\r\n output[0:3, :, 2, :, ...] = input[3:0:-1, :, 2, :, ...]\r\r\n output[0:5, :, 3, :, ...] = input[5:0:-1, :, 3, :, ...]\r\r\n \r\r\n # while entries past seq_lens are copied through:\r\r\n output[7:, :, 0, :, ...] = input[7:, :, 0, :, ...]\r\r\n output[2:, :, 1, :, ...] = input[2:, :, 1, :, ...]\r\r\n output[3:, :, 2, :, ...] = input[3:, :, 2, :, ...]\r\r\n output[2:, :, 3, :, ...] = input[2:, :, 3, :, ...]\r\r\n ```\r\n\r\n Args:\r\n input: A `Tensor`. The input to reverse.\r\n seq_lengths: A `Tensor`. Must be one of the following types: `int32`, `int64`.\r\n 1-D with length `input.dims(batch_dim)` and\r\r\n `max(seq_lengths) <= input.dims(seq_dim)`\r\n seq_dim: An `int`. The dimension which is partially reversed.\r\n batch_dim: An optional `int`. Defaults to `0`.\r\n The dimension along which reversal is performed.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A `Tensor`. Has the same type as `input`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is None or not _ctx._eager_context.is_eager:\r\n seq_dim = _execute.make_int(seq_dim, \"seq_dim\")\r\n if batch_dim is None:\r\n batch_dim = 0\r\n batch_dim = _execute.make_int(batch_dim, \"batch_dim\")\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"ReverseSequence\", input=input, seq_lengths=seq_lengths,\r\n seq_dim=seq_dim, batch_dim=batch_dim, name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"seq_dim\", _op.get_attr(\"seq_dim\"), \"batch_dim\",\r\n _op.get_attr(\"batch_dim\"), \"T\", _op.get_attr(\"T\"), \"Tlen\",\r\n _op.get_attr(\"Tlen\"))\r\n _execute.record_gradient(\r\n \"ReverseSequence\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n else:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name,\r\n \"ReverseSequence\", name, _ctx._post_execution_callbacks, input,\r\n seq_lengths, \"seq_dim\", seq_dim, \"batch_dim\", batch_dim)\r\n return _result\r\n except _core._FallbackException:\r\n return reverse_sequence_eager_fallback(\r\n input, seq_lengths, seq_dim=seq_dim, batch_dim=batch_dim, name=name,\r\n ctx=_ctx)\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n\r\n\r\ndef reverse_sequence_eager_fallback(input, seq_lengths, seq_dim, batch_dim=0, name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function reverse_sequence\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n seq_dim = _execute.make_int(seq_dim, \"seq_dim\")\r\n if batch_dim is None:\r\n batch_dim = 0\r\n batch_dim = _execute.make_int(batch_dim, \"batch_dim\")\r\n _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)\r\n _attr_Tlen, (seq_lengths,) = _execute.args_to_matching_eager([seq_lengths], _ctx, _dtypes.int64)\r\n _inputs_flat = [input, seq_lengths]\r\n _attrs = (\"seq_dim\", seq_dim, \"batch_dim\", batch_dim, \"T\", _attr_T, \"Tlen\",\r\n _attr_Tlen)\r\n _result = _execute.execute(b\"ReverseSequence\", 1, inputs=_inputs_flat,\r\n attrs=_attrs, ctx=_ctx, name=name)\r\n _execute.record_gradient(\r\n \"ReverseSequence\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n\r\n@tf_export('reverse', 'manip.reverse', 'reverse_v2')\r\n@deprecated_endpoints('manip.reverse', 'reverse_v2')\r\ndef reverse_v2(tensor, axis, name=None):\r\n r\"\"\"Reverses specific dimensions of a tensor.\r\n\r\n NOTE `tf.reverse` has now changed behavior in preparation for 1.0.\r\r\n `tf.reverse_v2` is currently an alias that will be deprecated before TF 1.0.\r\r\n \r\r\n Given a `tensor`, and a `int32` tensor `axis` representing the set of\r\r\n dimensions of `tensor` to reverse. This operation reverses each dimension\r\r\n `i` for which there exists `j` s.t. `axis[j] == i`.\r\r\n \r\r\n `tensor` can have up to 8 dimensions. The number of dimensions specified\r\r\n in `axis` may be 0 or more entries. If an index is specified more than\r\r\n once, a InvalidArgument error is raised.\r\r\n \r\r\n For example:\r\r\n \r\r\n ```\r\r\n # tensor 't' is [[[[ 0, 1, 2, 3],\r\r\n # [ 4, 5, 6, 7],\r\r\n # [ 8, 9, 10, 11]],\r\r\n # [[12, 13, 14, 15],\r\r\n # [16, 17, 18, 19],\r\r\n # [20, 21, 22, 23]]]]\r\r\n # tensor 't' shape is [1, 2, 3, 4]\r\r\n \r\r\n # 'dims' is [3] or 'dims' is [-1]\r\r\n reverse(t, dims) ==> [[[[ 3, 2, 1, 0],\r\r\n [ 7, 6, 5, 4],\r\r\n [ 11, 10, 9, 8]],\r\r\n [[15, 14, 13, 12],\r\r\n [19, 18, 17, 16],\r\r\n [23, 22, 21, 20]]]]\r\r\n \r\r\n # 'dims' is '[1]' (or 'dims' is '[-3]')\r\r\n reverse(t, dims) ==> [[[[12, 13, 14, 15],\r\r\n [16, 17, 18, 19],\r\r\n [20, 21, 22, 23]\r\r\n [[ 0, 1, 2, 3],\r\r\n [ 4, 5, 6, 7],\r\r\n [ 8, 9, 10, 11]]]]\r\r\n \r\r\n # 'dims' is '[2]' (or 'dims' is '[-2]')\r\r\n reverse(t, dims) ==> [[[[8, 9, 10, 11],\r\r\n [4, 5, 6, 7],\r\r\n [0, 1, 2, 3]]\r\r\n [[20, 21, 22, 23],\r\r\n [16, 17, 18, 19],\r\r\n [12, 13, 14, 15]]]]\r\r\n ```\r\n\r\n Args:\r\n tensor: A `Tensor`. Must be one of the following types: `uint8`, `int8`, `uint16`, `int16`, `int32`, `int64`, `bool`, `bfloat16`, `half`, `float32`, `float64`, `complex64`, `complex128`, `string`.\r\n Up to 8-D.\r\n axis: A `Tensor`. Must be one of the following types: `int32`, `int64`.\r\n 1-D. The indices of the dimensions to reverse. Must be in the range\r\r\n `[-rank(tensor), rank(tensor))`.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A `Tensor`. Has the same type as `tensor`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is None or not _ctx._eager_context.is_eager:\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"ReverseV2\", tensor=tensor, axis=axis, name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"Tidx\", _op.get_attr(\"Tidx\"), \"T\", _op.get_attr(\"T\"))\r\n _execute.record_gradient(\r\n \"ReverseV2\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n else:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name, \"ReverseV2\",\r\n name, _ctx._post_execution_callbacks, tensor, axis)\r\n return _result\r\n except _core._FallbackException:\r\n return reverse_v2_eager_fallback(\r\n tensor, axis, name=name, ctx=_ctx)\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n\r\n\r\ndef reverse_v2_eager_fallback(tensor, axis, name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function reverse_v2\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n _attr_Tidx, (axis,) = _execute.args_to_matching_eager([axis], _ctx, _dtypes.int32)\r\n _attr_T, (tensor,) = _execute.args_to_matching_eager([tensor], _ctx)\r\n _inputs_flat = [tensor, axis]\r\n _attrs = (\"Tidx\", _attr_Tidx, \"T\", _attr_T)\r\n _result = _execute.execute(b\"ReverseV2\", 1, inputs=_inputs_flat,\r\n attrs=_attrs, ctx=_ctx, name=name)\r\n _execute.record_gradient(\r\n \"ReverseV2\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n\r\n@tf_export('scatter_nd', 'manip.scatter_nd')\r\n@deprecated_endpoints('manip.scatter_nd')\r\ndef scatter_nd(indices, updates, shape, name=None):\r\n r\"\"\"Scatter `updates` into a new tensor according to `indices`.\r\n\r\n Creates a new tensor by applying sparse `updates` to individual values or\r\r\n slices within a tensor (initially zero for numeric, empty for string) of\r\r\n the given `shape` according to indices. This operator is the inverse of the\r\r\n `tf.gather_nd` operator which extracts values or slices from a given tensor.\r\r\n \r\r\n If `indices` contains duplicates, then their updates are accumulated (summed).\r\r\n \r\r\n **WARNING**: The order in which updates are applied is nondeterministic, so the\r\r\n output will be nondeterministic if `indices` contains duplicates -- because\r\r\n of some numerical approximation issues, numbers summed in different order\r\r\n may yield different results.\r\r\n \r\r\n `indices` is an integer tensor containing indices into a new tensor of shape\r\r\n `shape`. The last dimension of `indices` can be at most the rank of `shape`:\r\r\n \r\r\n indices.shape[-1] <= shape.rank\r\r\n \r\r\n The last dimension of `indices` corresponds to indices into elements\r\r\n (if `indices.shape[-1] = shape.rank`) or slices\r\r\n (if `indices.shape[-1] < shape.rank`) along dimension `indices.shape[-1]` of\r\r\n `shape`. `updates` is a tensor with shape\r\r\n \r\r\n indices.shape[:-1] + shape[indices.shape[-1]:]\r\r\n \r\r\n The simplest form of scatter is to insert individual elements in a tensor by\r\r\n index. For example, say we want to insert 4 scattered elements in a rank-1\r\r\n tensor with 8 elements.\r\r\n \r\r\n <div style=\"width:70%; margin:auto; margin-bottom:10px; margin-top:20px;\">\r\r\n <img style=\"width:100%\" src=\"https://www.tensorflow.org/images/ScatterNd1.png\" alt>\r\r\n </div>\r\r\n \r\r\n In Python, this scatter operation would look like this:\r\r\n \r\r\n ```python\r\r\n indices = tf.constant([[4], [3], [1], [7]])\r\r\n updates = tf.constant([9, 10, 11, 12])\r\r\n shape = tf.constant([8])\r\r\n scatter = tf.scatter_nd(indices, updates, shape)\r\r\n with tf.Session() as sess:\r\r\n print(sess.run(scatter))\r\r\n ```\r\r\n \r\r\n The resulting tensor would look like this:\r\r\n \r\r\n [0, 11, 0, 10, 9, 0, 0, 12]\r\r\n \r\r\n We can also, insert entire slices of a higher rank tensor all at once. For\r\r\n example, if we wanted to insert two slices in the first dimension of a\r\r\n rank-3 tensor with two matrices of new values.\r\r\n \r\r\n <div style=\"width:70%; margin:auto; margin-bottom:10px; margin-top:20px;\">\r\r\n <img style=\"width:100%\" src=\"https://www.tensorflow.org/images/ScatterNd2.png\" alt>\r\r\n </div>\r\r\n \r\r\n In Python, this scatter operation would look like this:\r\r\n \r\r\n ```python\r\r\n indices = tf.constant([[0], [2]])\r\r\n updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6],\r\r\n [7, 7, 7, 7], [8, 8, 8, 8]],\r\r\n [[5, 5, 5, 5], [6, 6, 6, 6],\r\r\n [7, 7, 7, 7], [8, 8, 8, 8]]])\r\r\n shape = tf.constant([4, 4, 4])\r\r\n scatter = tf.scatter_nd(indices, updates, shape)\r\r\n with tf.Session() as sess:\r\r\n print(sess.run(scatter))\r\r\n ```\r\r\n \r\r\n The resulting tensor would look like this:\r\r\n \r\r\n [[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],\r\r\n [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]],\r\r\n [[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],\r\r\n [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]]\r\r\n \r\r\n Note that on CPU, if an out of bound index is found, an error is returned.\r\r\n On GPU, if an out of bound index is found, the index is ignored.\r\n\r\n Args:\r\n indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.\r\n Index tensor.\r\n updates: A `Tensor`. Updates to scatter into output.\r\n shape: A `Tensor`. Must have the same type as `indices`.\r\n 1-D. The shape of the resulting tensor.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A `Tensor`. Has the same type as `updates`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is None or not _ctx._eager_context.is_eager:\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"ScatterNd\", indices=indices, updates=updates, shape=shape, name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"T\", _op.get_attr(\"T\"), \"Tindices\", _op.get_attr(\"Tindices\"))\r\n _execute.record_gradient(\r\n \"ScatterNd\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n else:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name, \"ScatterNd\",\r\n name, _ctx._post_execution_callbacks, indices, updates, shape)\r\n return _result\r\n except _core._FallbackException:\r\n return scatter_nd_eager_fallback(\r\n indices, updates, shape, name=name, ctx=_ctx)\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n\r\n\r\ndef scatter_nd_eager_fallback(indices, updates, shape, name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function scatter_nd\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n _attr_T, (updates,) = _execute.args_to_matching_eager([updates], _ctx)\r\n _attr_Tindices, _inputs_Tindices = _execute.args_to_matching_eager([indices, shape], _ctx)\r\n (indices, shape) = _inputs_Tindices\r\n _inputs_flat = [indices, updates, shape]\r\n _attrs = (\"T\", _attr_T, \"Tindices\", _attr_Tindices)\r\n _result = _execute.execute(b\"ScatterNd\", 1, inputs=_inputs_flat,\r\n attrs=_attrs, ctx=_ctx, name=name)\r\n _execute.record_gradient(\r\n \"ScatterNd\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n\r\ndef scatter_nd_non_aliasing_add(input, indices, updates, name=None):\r\n r\"\"\"Applies sparse addition to `input` using individual values or slices\r\n\r\n from `updates` according to indices `indices`. The updates are non-aliasing:\r\r\n `input` is only modified in-place if no other operations will use it.\r\r\n Otherwise, a copy of `input` is made. This operation has a gradient with\r\r\n respect to both `input` and `updates`.\r\r\n \r\r\n `input` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.\r\r\n \r\r\n `indices` must be integer tensor, containing indices into `input`.\r\r\n It must be shape \\\\([d_0, ..., d_{Q-2}, K]\\\\) where `0 < K <= P`.\r\r\n \r\r\n The innermost dimension of `indices` (with length `K`) corresponds to\r\r\n indices into elements (if `K = P`) or `(P-K)`-dimensional slices\r\r\n (if `K < P`) along the `K`th dimension of `input`.\r\r\n \r\r\n `updates` is `Tensor` of rank `Q-1+P-K` with shape:\r\r\n \r\r\n $$[d_0, ..., d_{Q-2}, input.shape[K], ..., input.shape[P-1]].$$\r\r\n \r\r\n For example, say we want to add 4 scattered elements to a rank-1 tensor to 8\r\r\n elements. In Python, that addition would look like this:\r\r\n \r\r\n input = tf.constant([1, 2, 3, 4, 5, 6, 7, 8])\r\r\n indices = tf.constant([[4], [3], [1], [7]])\r\r\n updates = tf.constant([9, 10, 11, 12])\r\r\n output = tf.scatter_nd_non_aliasing_add(input, indices, updates)\r\r\n with tf.Session() as sess:\r\r\n print(sess.run(output))\r\r\n \r\r\n The resulting value `output` would look like this:\r\r\n \r\r\n [1, 13, 3, 14, 14, 6, 7, 20]\r\r\n \r\r\n See `tf.scatter_nd` for more details about how to make updates to slices.\r\n\r\n Args:\r\n input: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`, `bool`.\r\n A Tensor.\r\n indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.\r\n A Tensor. Must be one of the following types: `int32`, `int64`.\r\r\n A tensor of indices into `input`.\r\n updates: A `Tensor`. Must have the same type as `input`.\r\n A Tensor. Must have the same type as ref. A tensor of updated values\r\r\n to add to `input`.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A `Tensor`. Has the same type as `input`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is None or not _ctx._eager_context.is_eager:\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"ScatterNdNonAliasingAdd\", input=input, indices=indices,\r\n updates=updates, name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"T\", _op.get_attr(\"T\"), \"Tindices\", _op.get_attr(\"Tindices\"))\r\n _execute.record_gradient(\r\n \"ScatterNdNonAliasingAdd\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n else:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name,\r\n \"ScatterNdNonAliasingAdd\", name, _ctx._post_execution_callbacks,\r\n input, indices, updates)\r\n return _result\r\n except _core._FallbackException:\r\n return scatter_nd_non_aliasing_add_eager_fallback(\r\n input, indices, updates, name=name, ctx=_ctx)\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n\r\n\r\ndef scatter_nd_non_aliasing_add_eager_fallback(input, indices, updates, name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function scatter_nd_non_aliasing_add\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n _attr_T, _inputs_T = _execute.args_to_matching_eager([input, updates], _ctx)\r\n (input, updates) = _inputs_T\r\n _attr_Tindices, (indices,) = _execute.args_to_matching_eager([indices], _ctx)\r\n _inputs_flat = [input, indices, updates]\r\n _attrs = (\"T\", _attr_T, \"Tindices\", _attr_Tindices)\r\n _result = _execute.execute(b\"ScatterNdNonAliasingAdd\", 1,\r\n inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,\r\n name=name)\r\n _execute.record_gradient(\r\n \"ScatterNdNonAliasingAdd\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n\r\ndef shape(input, out_type=_dtypes.int32, name=None):\r\n r\"\"\"Returns the shape of a tensor.\r\n\r\n This operation returns a 1-D integer tensor representing the shape of `input`.\r\r\n \r\r\n For example:\r\r\n \r\r\n ```\r\r\n # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]\r\r\n shape(t) ==> [2, 2, 3]\r\r\n ```\r\n\r\n Args:\r\n input: A `Tensor`.\r\n out_type: An optional `tf.DType` from: `tf.int32, tf.int64`. Defaults to `tf.int32`.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A `Tensor` of type `out_type`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is None or not _ctx._eager_context.is_eager:\r\n if out_type is None:\r\n out_type = _dtypes.int32\r\n out_type = _execute.make_type(out_type, \"out_type\")\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"Shape\", input=input, out_type=out_type, name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"T\", _op.get_attr(\"T\"), \"out_type\", _op.get_attr(\"out_type\"))\r\n _execute.record_gradient(\r\n \"Shape\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n else:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name, \"Shape\", name,\r\n _ctx._post_execution_callbacks, input, \"out_type\", out_type)\r\n return _result\r\n except _core._FallbackException:\r\n return shape_eager_fallback(\r\n input, out_type=out_type, name=name, ctx=_ctx)\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n\r\n\r\ndef shape_eager_fallback(input, out_type=_dtypes.int32, name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function shape\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n if out_type is None:\r\n out_type = _dtypes.int32\r\n out_type = _execute.make_type(out_type, \"out_type\")\r\n _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)\r\n _inputs_flat = [input]\r\n _attrs = (\"T\", _attr_T, \"out_type\", out_type)\r\n _result = _execute.execute(b\"Shape\", 1, inputs=_inputs_flat, attrs=_attrs,\r\n ctx=_ctx, name=name)\r\n _execute.record_gradient(\r\n \"Shape\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n\r\ndef shape_n(input, out_type=_dtypes.int32, name=None):\r\n r\"\"\"Returns shape of tensors.\r\n\r\n This operation returns N 1-D integer tensors representing shape of `input[i]s`.\r\n\r\n Args:\r\n input: A list of at least 1 `Tensor` objects with the same type.\r\n out_type: An optional `tf.DType` from: `tf.int32, tf.int64`. Defaults to `tf.int32`.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A list with the same length as `input` of `Tensor` objects with type `out_type`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is None or not _ctx._eager_context.is_eager:\r\n if not isinstance(input, (list, tuple)):\r\n raise TypeError(\r\n \"Expected list for 'input' argument to \"\r\n \"'shape_n' Op, not %r.\" % input)\r\n _attr_N = len(input)\r\n if out_type is None:\r\n out_type = _dtypes.int32\r\n out_type = _execute.make_type(out_type, \"out_type\")\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"ShapeN\", input=input, out_type=out_type, name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"N\", _op.get_attr(\"N\"), \"T\", _op.get_attr(\"T\"), \"out_type\",\r\n _op.get_attr(\"out_type\"))\r\n _execute.record_gradient(\r\n \"ShapeN\", _inputs_flat, _attrs, _result, name)\r\n return _result\r\n\r\n else:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name, \"ShapeN\", name,\r\n _ctx._post_execution_callbacks, input, \"out_type\", out_type)\r\n return _result\r\n except _core._FallbackException:\r\n return shape_n_eager_fallback(\r\n input, out_type=out_type, name=name, ctx=_ctx)\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n\r\n\r\ndef shape_n_eager_fallback(input, out_type=_dtypes.int32, name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function shape_n\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n if not isinstance(input, (list, tuple)):\r\n raise TypeError(\r\n \"Expected list for 'input' argument to \"\r\n \"'shape_n' Op, not %r.\" % input)\r\n _attr_N = len(input)\r\n if out_type is None:\r\n out_type = _dtypes.int32\r\n out_type = _execute.make_type(out_type, \"out_type\")\r\n _attr_T, input = _execute.args_to_matching_eager(list(input), _ctx)\r\n _inputs_flat = list(input)\r\n _attrs = (\"N\", _attr_N, \"T\", _attr_T, \"out_type\", out_type)\r\n _result = _execute.execute(b\"ShapeN\", _attr_N, inputs=_inputs_flat,\r\n attrs=_attrs, ctx=_ctx, name=name)\r\n _execute.record_gradient(\r\n \"ShapeN\", _inputs_flat, _attrs, _result, name)\r\n return _result\r\n\r\n\r\ndef size(input, out_type=_dtypes.int32, name=None):\r\n r\"\"\"Returns the size of a tensor.\r\n\r\n This operation returns an integer representing the number of elements in\r\r\n `input`.\r\r\n \r\r\n For example:\r\r\n \r\r\n ```\r\r\n # 't' is [[[1, 1,, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]]\r\r\n size(t) ==> 12\r\r\n ```\r\n\r\n Args:\r\n input: A `Tensor`.\r\n out_type: An optional `tf.DType` from: `tf.int32, tf.int64`. Defaults to `tf.int32`.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A `Tensor` of type `out_type`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is None or not _ctx._eager_context.is_eager:\r\n if out_type is None:\r\n out_type = _dtypes.int32\r\n out_type = _execute.make_type(out_type, \"out_type\")\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"Size\", input=input, out_type=out_type, name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"T\", _op.get_attr(\"T\"), \"out_type\", _op.get_attr(\"out_type\"))\r\n _execute.record_gradient(\r\n \"Size\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n else:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name, \"Size\", name,\r\n _ctx._post_execution_callbacks, input, \"out_type\", out_type)\r\n return _result\r\n except _core._FallbackException:\r\n return size_eager_fallback(\r\n input, out_type=out_type, name=name, ctx=_ctx)\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n\r\n\r\ndef size_eager_fallback(input, out_type=_dtypes.int32, name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function size\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n if out_type is None:\r\n out_type = _dtypes.int32\r\n out_type = _execute.make_type(out_type, \"out_type\")\r\n _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)\r\n _inputs_flat = [input]\r\n _attrs = (\"T\", _attr_T, \"out_type\", out_type)\r\n _result = _execute.execute(b\"Size\", 1, inputs=_inputs_flat, attrs=_attrs,\r\n ctx=_ctx, name=name)\r\n _execute.record_gradient(\r\n \"Size\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n\r\ndef _slice(input, begin, size, name=None):\r\n r\"\"\"Return a slice from 'input'.\r\n\r\n The output tensor is a tensor with dimensions described by 'size'\r\r\n whose values are extracted from 'input' starting at the offsets in\r\r\n 'begin'.\r\r\n \r\r\n *Requirements*:\r\r\n 0 <= begin[i] <= begin[i] + size[i] <= Di for i in [0, n)\r\n\r\n Args:\r\n input: A `Tensor`.\r\n begin: A `Tensor`. Must be one of the following types: `int32`, `int64`.\r\n begin[i] specifies the offset into the 'i'th dimension of\r\r\n 'input' to slice from.\r\n size: A `Tensor`. Must have the same type as `begin`.\r\n size[i] specifies the number of elements of the 'i'th dimension\r\r\n of 'input' to slice. If size[i] is -1, all remaining elements in dimension\r\r\n i are included in the slice (i.e. this is equivalent to setting\r\r\n size[i] = input.dim_size(i) - begin[i]).\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A `Tensor`. Has the same type as `input`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is None or not _ctx._eager_context.is_eager:\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"Slice\", input=input, begin=begin, size=size, name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"T\", _op.get_attr(\"T\"), \"Index\", _op.get_attr(\"Index\"))\r\n _execute.record_gradient(\r\n \"Slice\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n else:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name, \"Slice\", name,\r\n _ctx._post_execution_callbacks, input, begin, size)\r\n return _result\r\n except _core._FallbackException:\r\n return _slice_eager_fallback(\r\n input, begin, size, name=name, ctx=_ctx)\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n\r\n\r\ndef _slice_eager_fallback(input, begin, size, name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function _slice\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)\r\n _attr_Index, _inputs_Index = _execute.args_to_matching_eager([begin, size], _ctx)\r\n (begin, size) = _inputs_Index\r\n _inputs_flat = [input, begin, size]\r\n _attrs = (\"T\", _attr_T, \"Index\", _attr_Index)\r\n _result = _execute.execute(b\"Slice\", 1, inputs=_inputs_flat, attrs=_attrs,\r\n ctx=_ctx, name=name)\r\n _execute.record_gradient(\r\n \"Slice\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n\r\ndef snapshot(input, name=None):\r\n r\"\"\"Returns a copy of the input tensor.\r\n\r\n Args:\r\n input: A `Tensor`.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A `Tensor`. Has the same type as `input`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is None or not _ctx._eager_context.is_eager:\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"Snapshot\", input=input, name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"T\", _op.get_attr(\"T\"))\r\n _execute.record_gradient(\r\n \"Snapshot\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n else:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name, \"Snapshot\",\r\n name, _ctx._post_execution_callbacks, input)\r\n return _result\r\n except _core._FallbackException:\r\n return snapshot_eager_fallback(\r\n input, name=name, ctx=_ctx)\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n\r\n\r\ndef snapshot_eager_fallback(input, name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function snapshot\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)\r\n _inputs_flat = [input]\r\n _attrs = (\"T\", _attr_T)\r\n _result = _execute.execute(b\"Snapshot\", 1, inputs=_inputs_flat,\r\n attrs=_attrs, ctx=_ctx, name=name)\r\n _execute.record_gradient(\r\n \"Snapshot\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n\r\ndef space_to_batch(input, paddings, block_size, name=None):\r\n r\"\"\"SpaceToBatch for 4-D tensors of type T.\r\n\r\n This is a legacy version of the more general SpaceToBatchND.\r\r\n \r\r\n Zero-pads and then rearranges (permutes) blocks of spatial data into batch.\r\r\n More specifically, this op outputs a copy of the input tensor where values from\r\r\n the `height` and `width` dimensions are moved to the `batch` dimension. After\r\r\n the zero-padding, both `height` and `width` of the input must be divisible by the\r\r\n block size.\r\n\r\n Args:\r\n input: A `Tensor`. 4-D with shape `[batch, height, width, depth]`.\r\n paddings: A `Tensor`. Must be one of the following types: `int32`, `int64`.\r\n 2-D tensor of non-negative integers with shape `[2, 2]`. It specifies\r\r\n the padding of the input with zeros across the spatial dimensions as follows:\r\r\n \r\r\n paddings = [[pad_top, pad_bottom], [pad_left, pad_right]]\r\r\n \r\r\n The effective spatial dimensions of the zero-padded input tensor will be:\r\r\n \r\r\n height_pad = pad_top + height + pad_bottom\r\r\n width_pad = pad_left + width + pad_right\r\r\n \r\r\n The attr `block_size` must be greater than one. It indicates the block size.\r\r\n \r\r\n * Non-overlapping blocks of size `block_size x block size` in the height and\r\r\n width dimensions are rearranged into the batch dimension at each location.\r\r\n * The batch of the output tensor is `batch * block_size * block_size`.\r\r\n * Both height_pad and width_pad must be divisible by block_size.\r\r\n \r\r\n The shape of the output will be:\r\r\n \r\r\n [batch*block_size*block_size, height_pad/block_size, width_pad/block_size,\r\r\n depth]\r\r\n \r\r\n Some examples:\r\r\n \r\r\n (1) For the following input of shape `[1, 2, 2, 1]` and block_size of 2:\r\r\n \r\r\n ```\r\r\n x = [[[[1], [2]], [[3], [4]]]]\r\r\n ```\r\r\n \r\r\n The output tensor has shape `[4, 1, 1, 1]` and value:\r\r\n \r\r\n ```\r\r\n [[[[1]]], [[[2]]], [[[3]]], [[[4]]]]\r\r\n ```\r\r\n \r\r\n (2) For the following input of shape `[1, 2, 2, 3]` and block_size of 2:\r\r\n \r\r\n ```\r\r\n x = [[[[1, 2, 3], [4, 5, 6]],\r\r\n [[7, 8, 9], [10, 11, 12]]]]\r\r\n ```\r\r\n \r\r\n The output tensor has shape `[4, 1, 1, 3]` and value:\r\r\n \r\r\n ```\r\r\n [[[1, 2, 3]], [[4, 5, 6]], [[7, 8, 9]], [[10, 11, 12]]]\r\r\n ```\r\r\n \r\r\n (3) For the following input of shape `[1, 4, 4, 1]` and block_size of 2:\r\r\n \r\r\n ```\r\r\n x = [[[[1], [2], [3], [4]],\r\r\n [[5], [6], [7], [8]],\r\r\n [[9], [10], [11], [12]],\r\r\n [[13], [14], [15], [16]]]]\r\r\n ```\r\r\n \r\r\n The output tensor has shape `[4, 2, 2, 1]` and value:\r\r\n \r\r\n ```\r\r\n x = [[[[1], [3]], [[9], [11]]],\r\r\n [[[2], [4]], [[10], [12]]],\r\r\n [[[5], [7]], [[13], [15]]],\r\r\n [[[6], [8]], [[14], [16]]]]\r\r\n ```\r\r\n \r\r\n (4) For the following input of shape `[2, 2, 4, 1]` and block_size of 2:\r\r\n \r\r\n ```\r\r\n x = [[[[1], [2], [3], [4]],\r\r\n [[5], [6], [7], [8]]],\r\r\n [[[9], [10], [11], [12]],\r\r\n [[13], [14], [15], [16]]]]\r\r\n ```\r\r\n \r\r\n The output tensor has shape `[8, 1, 2, 1]` and value:\r\r\n \r\r\n ```\r\r\n x = [[[[1], [3]]], [[[9], [11]]], [[[2], [4]]], [[[10], [12]]],\r\r\n [[[5], [7]]], [[[13], [15]]], [[[6], [8]]], [[[14], [16]]]]\r\r\n ```\r\r\n \r\r\n Among others, this operation is useful for reducing atrous convolution into\r\r\n regular convolution.\r\n block_size: An `int` that is `>= 2`.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A `Tensor`. Has the same type as `input`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is None or not _ctx._eager_context.is_eager:\r\n block_size = _execute.make_int(block_size, \"block_size\")\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"SpaceToBatch\", input=input, paddings=paddings, block_size=block_size,\r\n name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"T\", _op.get_attr(\"T\"), \"Tpaddings\", _op.get_attr(\"Tpaddings\"),\r\n \"block_size\", _op.get_attr(\"block_size\"))\r\n _execute.record_gradient(\r\n \"SpaceToBatch\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n else:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name, \"SpaceToBatch\",\r\n name, _ctx._post_execution_callbacks, input, paddings, \"block_size\",\r\n block_size)\r\n return _result\r\n except _core._FallbackException:\r\n return space_to_batch_eager_fallback(\r\n input, paddings, block_size=block_size, name=name, ctx=_ctx)\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n\r\n\r\ndef space_to_batch_eager_fallback(input, paddings, block_size, name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function space_to_batch\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n block_size = _execute.make_int(block_size, \"block_size\")\r\n _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)\r\n _attr_Tpaddings, (paddings,) = _execute.args_to_matching_eager([paddings], _ctx, _dtypes.int32)\r\n _inputs_flat = [input, paddings]\r\n _attrs = (\"T\", _attr_T, \"Tpaddings\", _attr_Tpaddings, \"block_size\",\r\n block_size)\r\n _result = _execute.execute(b\"SpaceToBatch\", 1, inputs=_inputs_flat,\r\n attrs=_attrs, ctx=_ctx, name=name)\r\n _execute.record_gradient(\r\n \"SpaceToBatch\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n\r\n@tf_export('space_to_batch_nd', 'manip.space_to_batch_nd')\r\n@deprecated_endpoints('manip.space_to_batch_nd')\r\ndef space_to_batch_nd(input, block_shape, paddings, name=None):\r\n r\"\"\"SpaceToBatch for N-D tensors of type T.\r\n\r\n This operation divides \"spatial\" dimensions `[1, ..., M]` of the input into a\r\r\n grid of blocks of shape `block_shape`, and interleaves these blocks with the\r\r\n \"batch\" dimension (0) such that in the output, the spatial dimensions\r\r\n `[1, ..., M]` correspond to the position within the grid, and the batch\r\r\n dimension combines both the position within a spatial block and the original\r\r\n batch position. Prior to division into blocks, the spatial dimensions of the\r\r\n input are optionally zero padded according to `paddings`. See below for a\r\r\n precise description.\r\n\r\n Args:\r\n input: A `Tensor`.\r\n N-D with shape `input_shape = [batch] + spatial_shape + remaining_shape`,\r\r\n where spatial_shape has `M` dimensions.\r\n block_shape: A `Tensor`. Must be one of the following types: `int32`, `int64`.\r\n 1-D with shape `[M]`, all values must be >= 1.\r\n paddings: A `Tensor`. Must be one of the following types: `int32`, `int64`.\r\n 2-D with shape `[M, 2]`, all values must be >= 0.\r\r\n `paddings[i] = [pad_start, pad_end]` specifies the padding for input dimension\r\r\n `i + 1`, which corresponds to spatial dimension `i`. It is required that\r\r\n `block_shape[i]` divides `input_shape[i + 1] + pad_start + pad_end`.\r\r\n \r\r\n This operation is equivalent to the following steps:\r\r\n \r\r\n 1. Zero-pad the start and end of dimensions `[1, ..., M]` of the\r\r\n input according to `paddings` to produce `padded` of shape `padded_shape`.\r\r\n \r\r\n 2. Reshape `padded` to `reshaped_padded` of shape:\r\r\n \r\r\n [batch] +\r\r\n [padded_shape[1] / block_shape[0],\r\r\n block_shape[0],\r\r\n ...,\r\r\n padded_shape[M] / block_shape[M-1],\r\r\n block_shape[M-1]] +\r\r\n remaining_shape\r\r\n \r\r\n 3. Permute dimensions of `reshaped_padded` to produce\r\r\n `permuted_reshaped_padded` of shape:\r\r\n \r\r\n block_shape +\r\r\n [batch] +\r\r\n [padded_shape[1] / block_shape[0],\r\r\n ...,\r\r\n padded_shape[M] / block_shape[M-1]] +\r\r\n remaining_shape\r\r\n \r\r\n 4. Reshape `permuted_reshaped_padded` to flatten `block_shape` into the batch\r\r\n dimension, producing an output tensor of shape:\r\r\n \r\r\n [batch * prod(block_shape)] +\r\r\n [padded_shape[1] / block_shape[0],\r\r\n ...,\r\r\n padded_shape[M] / block_shape[M-1]] +\r\r\n remaining_shape\r\r\n \r\r\n Some examples:\r\r\n \r\r\n (1) For the following input of shape `[1, 2, 2, 1]`, `block_shape = [2, 2]`, and\r\r\n `paddings = [[0, 0], [0, 0]]`:\r\r\n \r\r\n ```\r\r\n x = [[[[1], [2]], [[3], [4]]]]\r\r\n ```\r\r\n \r\r\n The output tensor has shape `[4, 1, 1, 1]` and value:\r\r\n \r\r\n ```\r\r\n [[[[1]]], [[[2]]], [[[3]]], [[[4]]]]\r\r\n ```\r\r\n \r\r\n (2) For the following input of shape `[1, 2, 2, 3]`, `block_shape = [2, 2]`, and\r\r\n `paddings = [[0, 0], [0, 0]]`:\r\r\n \r\r\n ```\r\r\n x = [[[[1, 2, 3], [4, 5, 6]],\r\r\n [[7, 8, 9], [10, 11, 12]]]]\r\r\n ```\r\r\n \r\r\n The output tensor has shape `[4, 1, 1, 3]` and value:\r\r\n \r\r\n ```\r\r\n [[[1, 2, 3]], [[4, 5, 6]], [[7, 8, 9]], [[10, 11, 12]]]\r\r\n ```\r\r\n \r\r\n (3) For the following input of shape `[1, 4, 4, 1]`, `block_shape = [2, 2]`, and\r\r\n `paddings = [[0, 0], [0, 0]]`:\r\r\n \r\r\n ```\r\r\n x = [[[[1], [2], [3], [4]],\r\r\n [[5], [6], [7], [8]],\r\r\n [[9], [10], [11], [12]],\r\r\n [[13], [14], [15], [16]]]]\r\r\n ```\r\r\n \r\r\n The output tensor has shape `[4, 2, 2, 1]` and value:\r\r\n \r\r\n ```\r\r\n x = [[[[1], [3]], [[9], [11]]],\r\r\n [[[2], [4]], [[10], [12]]],\r\r\n [[[5], [7]], [[13], [15]]],\r\r\n [[[6], [8]], [[14], [16]]]]\r\r\n ```\r\r\n \r\r\n (4) For the following input of shape `[2, 2, 4, 1]`, block_shape = `[2, 2]`, and\r\r\n paddings = `[[0, 0], [2, 0]]`:\r\r\n \r\r\n ```\r\r\n x = [[[[1], [2], [3], [4]],\r\r\n [[5], [6], [7], [8]]],\r\r\n [[[9], [10], [11], [12]],\r\r\n [[13], [14], [15], [16]]]]\r\r\n ```\r\r\n \r\r\n The output tensor has shape `[8, 1, 3, 1]` and value:\r\r\n \r\r\n ```\r\r\n x = [[[[0], [1], [3]]], [[[0], [9], [11]]],\r\r\n [[[0], [2], [4]]], [[[0], [10], [12]]],\r\r\n [[[0], [5], [7]]], [[[0], [13], [15]]],\r\r\n [[[0], [6], [8]]], [[[0], [14], [16]]]]\r\r\n ```\r\r\n \r\r\n Among others, this operation is useful for reducing atrous convolution into\r\r\n regular convolution.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A `Tensor`. Has the same type as `input`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is None or not _ctx._eager_context.is_eager:\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"SpaceToBatchND\", input=input, block_shape=block_shape,\r\n paddings=paddings, name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"T\", _op.get_attr(\"T\"), \"Tblock_shape\",\r\n _op.get_attr(\"Tblock_shape\"), \"Tpaddings\",\r\n _op.get_attr(\"Tpaddings\"))\r\n _execute.record_gradient(\r\n \"SpaceToBatchND\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n else:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name,\r\n \"SpaceToBatchND\", name, _ctx._post_execution_callbacks, input,\r\n block_shape, paddings)\r\n return _result\r\n except _core._FallbackException:\r\n return space_to_batch_nd_eager_fallback(\r\n input, block_shape, paddings, name=name, ctx=_ctx)\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n\r\n\r\ndef space_to_batch_nd_eager_fallback(input, block_shape, paddings, name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function space_to_batch_nd\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)\r\n _attr_Tblock_shape, (block_shape,) = _execute.args_to_matching_eager([block_shape], _ctx, _dtypes.int32)\r\n _attr_Tpaddings, (paddings,) = _execute.args_to_matching_eager([paddings], _ctx, _dtypes.int32)\r\n _inputs_flat = [input, block_shape, paddings]\r\n _attrs = (\"T\", _attr_T, \"Tblock_shape\", _attr_Tblock_shape, \"Tpaddings\",\r\n _attr_Tpaddings)\r\n _result = _execute.execute(b\"SpaceToBatchND\", 1, inputs=_inputs_flat,\r\n attrs=_attrs, ctx=_ctx, name=name)\r\n _execute.record_gradient(\r\n \"SpaceToBatchND\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n\r\ndef space_to_depth(input, block_size, data_format=\"NHWC\", name=None):\r\n r\"\"\"SpaceToDepth for tensors of type T.\r\n\r\n Rearranges blocks of spatial data, into depth. More specifically,\r\r\n this op outputs a copy of the input tensor where values from the `height`\r\r\n and `width` dimensions are moved to the `depth` dimension.\r\r\n The attr `block_size` indicates the input block size.\r\r\n \r\r\n * Non-overlapping blocks of size `block_size x block size` are rearranged\r\r\n into depth at each location.\r\r\n * The depth of the output tensor is `block_size * block_size * input_depth`.\r\r\n * The Y, X coordinates within each block of the input become the high order\r\r\n component of the output channel index.\r\r\n * The input tensor's height and width must be divisible by block_size.\r\r\n \r\r\n The `data_format` attr specifies the layout of the input and output tensors\r\r\n with the following options:\r\r\n \"NHWC\": `[ batch, height, width, channels ]`\r\r\n \"NCHW\": `[ batch, channels, height, width ]`\r\r\n \"NCHW_VECT_C\":\r\r\n `qint8 [ batch, channels / 4, height, width, 4 ]`\r\r\n \r\r\n It is useful to consider the operation as transforming a 6-D Tensor.\r\r\n e.g. for data_format = NHWC,\r\r\n Each element in the input tensor can be specified via 6 coordinates,\r\r\n ordered by decreasing memory layout significance as:\r\r\n n,oY,bY,oX,bX,iC (where n=batch index, oX, oY means X or Y coordinates\r\r\n within the output image, bX, bY means coordinates\r\r\n within the input block, iC means input channels).\r\r\n The output would be a transpose to the following layout:\r\r\n n,oY,oX,bY,bX,iC\r\r\n \r\r\n This operation is useful for resizing the activations between convolutions\r\r\n (but keeping all data), e.g. instead of pooling. It is also useful for training\r\r\n purely convolutional models.\r\r\n \r\r\n For example, given an input of shape `[1, 2, 2, 1]`, data_format = \"NHWC\" and\r\r\n block_size = 2:\r\r\n \r\r\n ```\r\r\n x = [[[[1], [2]],\r\r\n [[3], [4]]]]\r\r\n ```\r\r\n \r\r\n This operation will output a tensor of shape `[1, 1, 1, 4]`:\r\r\n \r\r\n ```\r\r\n [[[[1, 2, 3, 4]]]]\r\r\n ```\r\r\n \r\r\n Here, the input has a batch of 1 and each batch element has shape `[2, 2, 1]`,\r\r\n the corresponding output will have a single element (i.e. width and height are\r\r\n both 1) and will have a depth of 4 channels (1 * block_size * block_size).\r\r\n The output element shape is `[1, 1, 4]`.\r\r\n \r\r\n For an input tensor with larger depth, here of shape `[1, 2, 2, 3]`, e.g.\r\r\n \r\r\n ```\r\r\n x = [[[[1, 2, 3], [4, 5, 6]],\r\r\n [[7, 8, 9], [10, 11, 12]]]]\r\r\n ```\r\r\n \r\r\n This operation, for block_size of 2, will return the following tensor of shape\r\r\n `[1, 1, 1, 12]`\r\r\n \r\r\n ```\r\r\n [[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]]\r\r\n ```\r\r\n \r\r\n Similarly, for the following input of shape `[1 4 4 1]`, and a block size of 2:\r\r\n \r\r\n ```\r\r\n x = [[[[1], [2], [5], [6]],\r\r\n [[3], [4], [7], [8]],\r\r\n [[9], [10], [13], [14]],\r\r\n [[11], [12], [15], [16]]]]\r\r\n ```\r\r\n \r\r\n the operator will return the following tensor of shape `[1 2 2 4]`:\r\r\n \r\r\n ```\r\r\n x = [[[[1, 2, 3, 4],\r\r\n [5, 6, 7, 8]],\r\r\n [[9, 10, 11, 12],\r\r\n [13, 14, 15, 16]]]]\r\r\n ```\r\n\r\n Args:\r\n input: A `Tensor`.\r\n block_size: An `int` that is `>= 2`. The size of the spatial block.\r\n data_format: An optional `string` from: `\"NHWC\", \"NCHW\", \"NCHW_VECT_C\"`. Defaults to `\"NHWC\"`.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A `Tensor`. Has the same type as `input`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is None or not _ctx._eager_context.is_eager:\r\n block_size = _execute.make_int(block_size, \"block_size\")\r\n if data_format is None:\r\n data_format = \"NHWC\"\r\n data_format = _execute.make_str(data_format, \"data_format\")\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"SpaceToDepth\", input=input, block_size=block_size,\r\n data_format=data_format, name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"T\", _op.get_attr(\"T\"), \"block_size\",\r\n _op.get_attr(\"block_size\"), \"data_format\",\r\n _op.get_attr(\"data_format\"))\r\n _execute.record_gradient(\r\n \"SpaceToDepth\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n else:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name, \"SpaceToDepth\",\r\n name, _ctx._post_execution_callbacks, input, \"block_size\", block_size,\r\n \"data_format\", data_format)\r\n return _result\r\n except _core._FallbackException:\r\n return space_to_depth_eager_fallback(\r\n input, block_size=block_size, data_format=data_format, name=name,\r\n ctx=_ctx)\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n\r\n\r\ndef space_to_depth_eager_fallback(input, block_size, data_format=\"NHWC\", name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function space_to_depth\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n block_size = _execute.make_int(block_size, \"block_size\")\r\n if data_format is None:\r\n data_format = \"NHWC\"\r\n data_format = _execute.make_str(data_format, \"data_format\")\r\n _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)\r\n _inputs_flat = [input]\r\n _attrs = (\"T\", _attr_T, \"block_size\", block_size, \"data_format\",\r\n data_format)\r\n _result = _execute.execute(b\"SpaceToDepth\", 1, inputs=_inputs_flat,\r\n attrs=_attrs, ctx=_ctx, name=name)\r\n _execute.record_gradient(\r\n \"SpaceToDepth\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n\r\ndef split(axis, value, num_split, name=None):\r\n r\"\"\"Splits a tensor into `num_split` tensors along one dimension.\r\n\r\n Args:\r\n axis: A `Tensor` of type `int32`.\r\n 0-D. The dimension along which to split. Must be in the range\r\r\n `[-rank(value), rank(value))`.\r\n value: A `Tensor`. The tensor to split.\r\n num_split: An `int` that is `>= 1`.\r\n The number of ways to split. Must evenly divide\r\r\n `value.shape[split_dim]`.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A list of `num_split` `Tensor` objects with the same type as `value`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is None or not _ctx._eager_context.is_eager:\r\n num_split = _execute.make_int(num_split, \"num_split\")\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"Split\", split_dim=axis, value=value, num_split=num_split, name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"num_split\", _op.get_attr(\"num_split\"), \"T\", _op.get_attr(\"T\"))\r\n _execute.record_gradient(\r\n \"Split\", _inputs_flat, _attrs, _result, name)\r\n return _result\r\n\r\n else:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name, \"Split\", name,\r\n _ctx._post_execution_callbacks, axis, value, \"num_split\", num_split)\r\n return _result\r\n except _core._FallbackException:\r\n return split_eager_fallback(\r\n axis, value, num_split=num_split, name=name, ctx=_ctx)\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n\r\n\r\ndef split_eager_fallback(axis, value, num_split, name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function split\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n num_split = _execute.make_int(num_split, \"num_split\")\r\n _attr_T, (value,) = _execute.args_to_matching_eager([value], _ctx)\r\n axis = _ops.convert_to_tensor(axis, _dtypes.int32)\r\n _inputs_flat = [axis, value]\r\n _attrs = (\"num_split\", num_split, \"T\", _attr_T)\r\n _result = _execute.execute(b\"Split\", num_split, inputs=_inputs_flat,\r\n attrs=_attrs, ctx=_ctx, name=name)\r\n _execute.record_gradient(\r\n \"Split\", _inputs_flat, _attrs, _result, name)\r\n return _result\r\n\r\n\r\ndef split_v(value, size_splits, axis, num_split, name=None):\r\n r\"\"\"Splits a tensor into `num_split` tensors along one dimension.\r\n\r\n Args:\r\n value: A `Tensor`. The tensor to split.\r\n size_splits: A `Tensor`. Must be one of the following types: `int32`, `int64`.\r\n list containing the sizes of each output tensor along the split\r\r\n dimension. Must sum to the dimension of value along split_dim.\r\r\n Can contain one -1 indicating that dimension is to be inferred.\r\n axis: A `Tensor` of type `int32`.\r\n 0-D. The dimension along which to split. Must be in the range\r\r\n `[-rank(value), rank(value))`.\r\n num_split: An `int` that is `>= 1`.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A list of `num_split` `Tensor` objects with the same type as `value`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is None or not _ctx._eager_context.is_eager:\r\n num_split = _execute.make_int(num_split, \"num_split\")\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"SplitV\", value=value, size_splits=size_splits, split_dim=axis,\r\n num_split=num_split, name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"num_split\", _op.get_attr(\"num_split\"), \"T\", _op.get_attr(\"T\"),\r\n \"Tlen\", _op.get_attr(\"Tlen\"))\r\n _execute.record_gradient(\r\n \"SplitV\", _inputs_flat, _attrs, _result, name)\r\n return _result\r\n\r\n else:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name, \"SplitV\", name,\r\n _ctx._post_execution_callbacks, value, size_splits, axis, \"num_split\",\r\n num_split)\r\n return _result\r\n except _core._FallbackException:\r\n return split_v_eager_fallback(\r\n value, size_splits, axis, num_split=num_split, name=name, ctx=_ctx)\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n\r\n\r\ndef split_v_eager_fallback(value, size_splits, axis, num_split, name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function split_v\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n num_split = _execute.make_int(num_split, \"num_split\")\r\n _attr_T, (value,) = _execute.args_to_matching_eager([value], _ctx)\r\n _attr_Tlen, (size_splits,) = _execute.args_to_matching_eager([size_splits], _ctx, _dtypes.int64)\r\n axis = _ops.convert_to_tensor(axis, _dtypes.int32)\r\n _inputs_flat = [value, size_splits, axis]\r\n _attrs = (\"num_split\", num_split, \"T\", _attr_T, \"Tlen\", _attr_Tlen)\r\n _result = _execute.execute(b\"SplitV\", num_split, inputs=_inputs_flat,\r\n attrs=_attrs, ctx=_ctx, name=name)\r\n _execute.record_gradient(\r\n \"SplitV\", _inputs_flat, _attrs, _result, name)\r\n return _result\r\n\r\n\r\ndef squeeze(input, axis=[], name=None):\r\n r\"\"\"Removes dimensions of size 1 from the shape of a tensor.\r\n\r\n Given a tensor `input`, this operation returns a tensor of the same type with\r\r\n all dimensions of size 1 removed. If you don't want to remove all size 1\r\r\n dimensions, you can remove specific size 1 dimensions by specifying\r\r\n `axis`.\r\r\n \r\r\n For example:\r\r\n \r\r\n ```\r\r\n # 't' is a tensor of shape [1, 2, 1, 3, 1, 1]\r\r\n shape(squeeze(t)) ==> [2, 3]\r\r\n ```\r\r\n \r\r\n Or, to remove specific size 1 dimensions:\r\r\n \r\r\n ```\r\r\n # 't' is a tensor of shape [1, 2, 1, 3, 1, 1]\r\r\n shape(squeeze(t, [2, 4])) ==> [1, 2, 3, 1]\r\r\n ```\r\n\r\n Args:\r\n input: A `Tensor`. The `input` to squeeze.\r\n axis: An optional list of `ints`. Defaults to `[]`.\r\n If specified, only squeezes the dimensions listed. The dimension\r\r\n index starts at 0. It is an error to squeeze a dimension that is not 1. Must\r\r\n be in the range `[-rank(input), rank(input))`.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A `Tensor`. Has the same type as `input`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is None or not _ctx._eager_context.is_eager:\r\n if axis is None:\r\n axis = []\r\n if not isinstance(axis, (list, tuple)):\r\n raise TypeError(\r\n \"Expected list for 'axis' argument to \"\r\n \"'squeeze' Op, not %r.\" % axis)\r\n axis = [_execute.make_int(_i, \"axis\") for _i in axis]\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"Squeeze\", input=input, squeeze_dims=axis, name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"T\", _op.get_attr(\"T\"), \"squeeze_dims\",\r\n _op.get_attr(\"squeeze_dims\"))\r\n _execute.record_gradient(\r\n \"Squeeze\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n else:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name, \"Squeeze\",\r\n name, _ctx._post_execution_callbacks, input, \"squeeze_dims\", axis)\r\n return _result\r\n except _core._FallbackException:\r\n return squeeze_eager_fallback(\r\n input, axis=axis, name=name, ctx=_ctx)\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n\r\n\r\ndef squeeze_eager_fallback(input, axis=[], name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function squeeze\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n if axis is None:\r\n axis = []\r\n if not isinstance(axis, (list, tuple)):\r\n raise TypeError(\r\n \"Expected list for 'axis' argument to \"\r\n \"'squeeze' Op, not %r.\" % axis)\r\n axis = [_execute.make_int(_i, \"axis\") for _i in axis]\r\n _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)\r\n _inputs_flat = [input]\r\n _attrs = (\"T\", _attr_T, \"squeeze_dims\", axis)\r\n _result = _execute.execute(b\"Squeeze\", 1, inputs=_inputs_flat, attrs=_attrs,\r\n ctx=_ctx, name=name)\r\n _execute.record_gradient(\r\n \"Squeeze\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n\r\n@tf_export('stop_gradient')\r\ndef stop_gradient(input, name=None):\r\n r\"\"\"Stops gradient computation.\r\n\r\n When executed in a graph, this op outputs its input tensor as-is.\r\r\n \r\r\n When building ops to compute gradients, this op prevents the contribution of\r\r\n its inputs to be taken into account. Normally, the gradient generator adds ops\r\r\n to a graph to compute the derivatives of a specified 'loss' by recursively\r\r\n finding out inputs that contributed to its computation. If you insert this op\r\r\n in the graph it inputs are masked from the gradient generator. They are not\r\r\n taken into account for computing gradients.\r\r\n \r\r\n This is useful any time you want to compute a value with TensorFlow but need\r\r\n to pretend that the value was a constant. Some examples include:\r\r\n \r\r\n * The *EM* algorithm where the *M-step* should not involve backpropagation\r\r\n through the output of the *E-step*.\r\r\n * Contrastive divergence training of Boltzmann machines where, when\r\r\n differentiating the energy function, the training must not backpropagate\r\r\n through the graph that generated the samples from the model.\r\r\n * Adversarial training, where no backprop should happen through the adversarial\r\r\n example generation process.\r\n\r\n Args:\r\n input: A `Tensor`.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A `Tensor`. Has the same type as `input`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is None or not _ctx._eager_context.is_eager:\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"StopGradient\", input=input, name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"T\", _op.get_attr(\"T\"))\r\n _execute.record_gradient(\r\n \"StopGradient\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n else:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name, \"StopGradient\",\r\n name, _ctx._post_execution_callbacks, input)\r\n return _result\r\n except _core._FallbackException:\r\n return stop_gradient_eager_fallback(\r\n input, name=name, ctx=_ctx)\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n\r\n\r\ndef stop_gradient_eager_fallback(input, name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function stop_gradient\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)\r\n _inputs_flat = [input]\r\n _attrs = (\"T\", _attr_T)\r\n _result = _execute.execute(b\"StopGradient\", 1, inputs=_inputs_flat,\r\n attrs=_attrs, ctx=_ctx, name=name)\r\n _execute.record_gradient(\r\n \"StopGradient\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n\r\ndef strided_slice(input, begin, end, strides, begin_mask=0, end_mask=0, ellipsis_mask=0, new_axis_mask=0, shrink_axis_mask=0, name=None):\r\n r\"\"\"Return a strided slice from `input`.\r\n\r\n Note, most python users will want to use the Python `Tensor.__getitem__`\r\r\n or `Variable.__getitem__` rather than this op directly.\r\r\n \r\r\n The goal of this op is to produce a new tensor with a subset of\r\r\n the elements from the `n` dimensional `input` tensor. The subset is chosen using\r\r\n a sequence of `m` sparse range specifications encoded into the arguments\r\r\n of this function. Note, in some cases\r\r\n `m` could be equal to `n`, but this need not be the case. Each\r\r\n range specification entry can be one of the following:\r\r\n \r\r\n - An ellipsis (...). Ellipses are used to imply zero or more\r\r\n dimensions of full-dimension selection and are produced using\r\r\n `ellipsis_mask`. For example, `foo[...]` is the identity slice.\r\r\n \r\r\n - A new axis. This is used to insert a new shape=1 dimension and is\r\r\n produced using `new_axis_mask`. For example, `foo[:, ...]` where\r\r\n `foo` is shape `(3, 4)` produces a `(1, 3, 4)` tensor.\r\r\n \r\r\n \r\r\n - A range `begin:end:stride`. This is used to specify how much to choose from\r\r\n a given dimension. `stride` can be any integer but 0. `begin` is an integer\r\r\n which represents the index of the first value to select while `end` represents\r\r\n the index of the last value to select. The number of values selected in each\r\r\n dimension is `end - begin` if `stride > 0` and `begin - end` if `stride < 0`.\r\r\n `begin` and `end` can be negative where `-1` is the last element, `-2` is\r\r\n the second to last. `begin_mask` controls whether to replace the explicitly\r\r\n given `begin` with an implicit effective value of `0` if `stride > 0` and\r\r\n `-1` if `stride < 0`. `end_mask` is analogous but produces the number\r\r\n required to create the largest open interval. For example, given a shape\r\r\n `(3,)` tensor `foo[:]`, the effective `begin` and `end` are `0` and `3`. Do\r\r\n not assume this is equivalent to `foo[0:-1]` which has an effective `begin`\r\r\n and `end` of `0` and `2`. Another example is `foo[-2::-1]` which reverses the\r\r\n first dimension of a tensor while dropping the last two (in the original\r\r\n order elements). For example `foo = [1,2,3,4]; foo[-2::-1]` is `[4,3]`.\r\r\n \r\r\n - A single index. This is used to keep only elements that have a given\r\r\n index. For example (`foo[2, :]` on a shape `(5,6)` tensor produces a\r\r\n shape `(6,)` tensor. This is encoded in `begin` and `end` and\r\r\n `shrink_axis_mask`.\r\r\n \r\r\n Each conceptual range specification is encoded in the op's argument. This\r\r\n encoding is best understand by considering a non-trivial example. In\r\r\n particular,\r\r\n `foo[1, 2:4, None, ..., :-3:-1, :]` will be encoded as\r\r\n \r\r\n ```\r\r\n begin = [1, 2, x, x, 0, x] # x denotes don't care (usually 0)\r\r\n end = [2, 4, x, x, -3, x]\r\r\n strides = [1, 1, x, x, -1, 1]\r\r\n begin_mask = 1<<4 | 1 << 5 = 48\r\r\n end_mask = 1<<5 = 32\r\r\n ellipsis_mask = 1<<3 = 8\r\r\n new_axis_mask = 1<<2 4\r\r\n shrink_axis_mask = 1<<0\r\r\n ```\r\r\n \r\r\n In this case if `foo.shape` is (5, 5, 5, 5, 5, 5) the final shape of\r\r\n the slice becomes (2, 1, 5, 5, 2, 5).\r\r\n Let us walk step by step through each argument specification.\r\r\n \r\r\n 1. The first argument in the example slice is turned into `begin = 1` and\r\r\n `end = begin + 1 = 2`. To disambiguate from the original spec `2:4` we\r\r\n also set the appropriate bit in `shrink_axis_mask`.\r\r\n \r\r\n 2. `2:4` is contributes 2, 4, 1 to begin, end, and stride. All masks have\r\r\n zero bits contributed.\r\r\n \r\r\n 3. None is a synonym for `tf.newaxis`. This means insert a dimension of size 1\r\r\n dimension in the final shape. Dummy values are contributed to begin,\r\r\n end and stride, while the new_axis_mask bit is set.\r\r\n \r\r\n 4. `...` grab the full ranges from as many dimensions as needed to\r\r\n fully specify a slice for every dimension of the input shape.\r\r\n \r\r\n 5. `:-3:-1` shows the use of negative indices. A negative index `i` associated\r\r\n with a dimension that has shape `s` is converted to a positive index\r\r\n `s + i`. So `-1` becomes `s-1` (i.e. the last element). This conversion\r\r\n is done internally so begin, end and strides receive x, -3, and -1.\r\r\n The appropriate begin_mask bit is set to indicate the start range is the\r\r\n full range (ignoring the x).\r\r\n \r\r\n 6. `:` indicates that the entire contents of the corresponding dimension\r\r\n is selected. This is equivalent to `::` or `0::1`. begin, end, and strides\r\r\n receive 0, 0, and 1, respectively. The appropriate bits in `begin_mask` and\r\r\n `end_mask` are also set.\r\r\n \r\r\n *Requirements*:\r\r\n `0 != strides[i] for i in [0, m)`\r\r\n `ellipsis_mask must be a power of two (only one ellipsis)`\r\n\r\n Args:\r\n input: A `Tensor`.\r\n begin: A `Tensor`. Must be one of the following types: `int32`, `int64`.\r\n `begin[k]` specifies the offset into the `k`th range specification.\r\r\n The exact dimension this corresponds to will be determined by context.\r\r\n Out-of-bounds values will be silently clamped. If the `k`th bit of\r\r\n `begin_mask` then `begin[k]` is ignored and the full range of the\r\r\n appropriate dimension is used instead. Negative values causes indexing\r\r\n to start from the highest element e.g. If `foo==[1,2,3]` then `foo[-1]==3`.\r\n end: A `Tensor`. Must have the same type as `begin`.\r\n `end[i]` is like `begin` with the exception that `end_mask` is\r\r\n used to determine full ranges.\r\n strides: A `Tensor`. Must have the same type as `begin`.\r\n `strides[i]` specifies the increment in the `i`th specification\r\r\n after extracting a given element. Negative indices will reverse\r\r\n the original order. Out or range values are\r\r\n clamped to `[0,dim[i]) if slice[i]>0` or `[-1,dim[i]-1] if slice[i] < 0`\r\n begin_mask: An optional `int`. Defaults to `0`.\r\n a bitmask where a bit i being 1 means to ignore the begin\r\r\n value and instead use the largest interval possible. At runtime\r\r\n begin[i] will be replaced with `[0, n-1)` if `stride[i] > 0` or\r\r\n `[-1, n-1]` if `stride[i] < 0`\r\n end_mask: An optional `int`. Defaults to `0`. analogous to `begin_mask`\r\n ellipsis_mask: An optional `int`. Defaults to `0`.\r\n a bitmask where bit `i` being 1 means the `i`th\r\r\n position is actually an ellipsis. One bit at most can be 1.\r\r\n If `ellipsis_mask == 0`, then an implicit ellipsis mask of `1 << (m+1)`\r\r\n is provided. This means that `foo[3:5] == foo[3:5, ...]`. An ellipsis\r\r\n implicitly creates as many range specifications as necessary to fully\r\r\n specify the sliced range for every dimension. For example for a 4-dimensional\r\r\n tensor `foo` the slice `foo[2, ..., 5:8]` implies `foo[2, :, :, 5:8]`.\r\n new_axis_mask: An optional `int`. Defaults to `0`.\r\n a bitmask where bit `i` being 1 means the `i`th\r\r\n specification creates a new shape 1 dimension. For example\r\r\n `foo[:4, tf.newaxis, :2]` would produce a shape `(4, 1, 2)` tensor.\r\n shrink_axis_mask: An optional `int`. Defaults to `0`.\r\n a bitmask where bit `i` implies that the `i`th\r\r\n specification should shrink the dimensionality. begin and end\r\r\n must imply a slice of size 1 in the dimension. For example in\r\r\n python one might do `foo[:, 3, :]` which would result in\r\r\n `shrink_axis_mask` being 2.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A `Tensor`. Has the same type as `input`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is None or not _ctx._eager_context.is_eager:\r\n if begin_mask is None:\r\n begin_mask = 0\r\n begin_mask = _execute.make_int(begin_mask, \"begin_mask\")\r\n if end_mask is None:\r\n end_mask = 0\r\n end_mask = _execute.make_int(end_mask, \"end_mask\")\r\n if ellipsis_mask is None:\r\n ellipsis_mask = 0\r\n ellipsis_mask = _execute.make_int(ellipsis_mask, \"ellipsis_mask\")\r\n if new_axis_mask is None:\r\n new_axis_mask = 0\r\n new_axis_mask = _execute.make_int(new_axis_mask, \"new_axis_mask\")\r\n if shrink_axis_mask is None:\r\n shrink_axis_mask = 0\r\n shrink_axis_mask = _execute.make_int(shrink_axis_mask, \"shrink_axis_mask\")\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"StridedSlice\", input=input, begin=begin, end=end, strides=strides,\r\n begin_mask=begin_mask, end_mask=end_mask, ellipsis_mask=ellipsis_mask,\r\n new_axis_mask=new_axis_mask, shrink_axis_mask=shrink_axis_mask,\r\n name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"T\", _op.get_attr(\"T\"), \"Index\", _op.get_attr(\"Index\"),\r\n \"begin_mask\", _op.get_attr(\"begin_mask\"), \"end_mask\",\r\n _op.get_attr(\"end_mask\"), \"ellipsis_mask\",\r\n _op.get_attr(\"ellipsis_mask\"), \"new_axis_mask\",\r\n _op.get_attr(\"new_axis_mask\"), \"shrink_axis_mask\",\r\n _op.get_attr(\"shrink_axis_mask\"))\r\n _execute.record_gradient(\r\n \"StridedSlice\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n else:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name, \"StridedSlice\",\r\n name, _ctx._post_execution_callbacks, input, begin, end, strides,\r\n \"begin_mask\", begin_mask, \"end_mask\", end_mask, \"ellipsis_mask\",\r\n ellipsis_mask, \"new_axis_mask\", new_axis_mask, \"shrink_axis_mask\",\r\n shrink_axis_mask)\r\n return _result\r\n except _core._FallbackException:\r\n return strided_slice_eager_fallback(\r\n input, begin, end, strides, begin_mask=begin_mask,\r\n end_mask=end_mask, ellipsis_mask=ellipsis_mask,\r\n new_axis_mask=new_axis_mask, shrink_axis_mask=shrink_axis_mask,\r\n name=name, ctx=_ctx)\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n\r\n\r\ndef strided_slice_eager_fallback(input, begin, end, strides, begin_mask=0, end_mask=0, ellipsis_mask=0, new_axis_mask=0, shrink_axis_mask=0, name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function strided_slice\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n if begin_mask is None:\r\n begin_mask = 0\r\n begin_mask = _execute.make_int(begin_mask, \"begin_mask\")\r\n if end_mask is None:\r\n end_mask = 0\r\n end_mask = _execute.make_int(end_mask, \"end_mask\")\r\n if ellipsis_mask is None:\r\n ellipsis_mask = 0\r\n ellipsis_mask = _execute.make_int(ellipsis_mask, \"ellipsis_mask\")\r\n if new_axis_mask is None:\r\n new_axis_mask = 0\r\n new_axis_mask = _execute.make_int(new_axis_mask, \"new_axis_mask\")\r\n if shrink_axis_mask is None:\r\n shrink_axis_mask = 0\r\n shrink_axis_mask = _execute.make_int(shrink_axis_mask, \"shrink_axis_mask\")\r\n _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)\r\n _attr_Index, _inputs_Index = _execute.args_to_matching_eager([begin, end, strides], _ctx)\r\n (begin, end, strides) = _inputs_Index\r\n _inputs_flat = [input, begin, end, strides]\r\n _attrs = (\"T\", _attr_T, \"Index\", _attr_Index, \"begin_mask\", begin_mask,\r\n \"end_mask\", end_mask, \"ellipsis_mask\", ellipsis_mask, \"new_axis_mask\",\r\n new_axis_mask, \"shrink_axis_mask\", shrink_axis_mask)\r\n _result = _execute.execute(b\"StridedSlice\", 1, inputs=_inputs_flat,\r\n attrs=_attrs, ctx=_ctx, name=name)\r\n _execute.record_gradient(\r\n \"StridedSlice\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n\r\ndef strided_slice_assign(ref, begin, end, strides, value, begin_mask=0, end_mask=0, ellipsis_mask=0, new_axis_mask=0, shrink_axis_mask=0, name=None):\r\n r\"\"\"Assign `value` to the sliced l-value reference of `ref`.\r\n\r\n The values of `value` are assigned to the positions in the variable\r\r\n `ref` that are selected by the slice parameters. The slice parameters\r\r\n `begin, `end`, `strides`, etc. work exactly as in `StridedSlice`.\r\r\n \r\r\n NOTE this op currently does not support broadcasting and so `value`'s\r\r\n shape must be exactly the shape produced by the slice of `ref`.\r\n\r\n Args:\r\n ref: A mutable `Tensor`.\r\n begin: A `Tensor`. Must be one of the following types: `int32`, `int64`.\r\n end: A `Tensor`. Must have the same type as `begin`.\r\n strides: A `Tensor`. Must have the same type as `begin`.\r\n value: A `Tensor`. Must have the same type as `ref`.\r\n begin_mask: An optional `int`. Defaults to `0`.\r\n end_mask: An optional `int`. Defaults to `0`.\r\n ellipsis_mask: An optional `int`. Defaults to `0`.\r\n new_axis_mask: An optional `int`. Defaults to `0`.\r\n shrink_axis_mask: An optional `int`. Defaults to `0`.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A mutable `Tensor`. Has the same type as `ref`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is None or not _ctx._eager_context.is_eager:\r\n if begin_mask is None:\r\n begin_mask = 0\r\n begin_mask = _execute.make_int(begin_mask, \"begin_mask\")\r\n if end_mask is None:\r\n end_mask = 0\r\n end_mask = _execute.make_int(end_mask, \"end_mask\")\r\n if ellipsis_mask is None:\r\n ellipsis_mask = 0\r\n ellipsis_mask = _execute.make_int(ellipsis_mask, \"ellipsis_mask\")\r\n if new_axis_mask is None:\r\n new_axis_mask = 0\r\n new_axis_mask = _execute.make_int(new_axis_mask, \"new_axis_mask\")\r\n if shrink_axis_mask is None:\r\n shrink_axis_mask = 0\r\n shrink_axis_mask = _execute.make_int(shrink_axis_mask, \"shrink_axis_mask\")\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"StridedSliceAssign\", ref=ref, begin=begin, end=end, strides=strides,\r\n value=value, begin_mask=begin_mask, end_mask=end_mask,\r\n ellipsis_mask=ellipsis_mask, new_axis_mask=new_axis_mask,\r\n shrink_axis_mask=shrink_axis_mask, name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"T\", _op.get_attr(\"T\"), \"Index\", _op.get_attr(\"Index\"),\r\n \"begin_mask\", _op.get_attr(\"begin_mask\"), \"end_mask\",\r\n _op.get_attr(\"end_mask\"), \"ellipsis_mask\",\r\n _op.get_attr(\"ellipsis_mask\"), \"new_axis_mask\",\r\n _op.get_attr(\"new_axis_mask\"), \"shrink_axis_mask\",\r\n _op.get_attr(\"shrink_axis_mask\"))\r\n _execute.record_gradient(\r\n \"StridedSliceAssign\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n else:\r\n raise RuntimeError(\"strided_slice_assign op does not support eager execution. Arg 'output_ref' is a ref.\")\r\n\r\n\r\n raise RuntimeError(\"strided_slice_assign op does not support eager execution. Arg 'output_ref' is a ref.\")\r\n\r\ndef strided_slice_grad(shape, begin, end, strides, dy, begin_mask=0, end_mask=0, ellipsis_mask=0, new_axis_mask=0, shrink_axis_mask=0, name=None):\r\n r\"\"\"Returns the gradient of `StridedSlice`.\r\n\r\n Since `StridedSlice` cuts out pieces of its `input` which is size\r\r\n `shape`, its gradient will have the same shape (which is passed here\r\r\n as `shape`). The gradient will be zero in any element that the slice\r\r\n does not select.\r\r\n \r\r\n Arguments are the same as StridedSliceGrad with the exception that\r\r\n `dy` is the input gradient to be propagated and `shape` is the\r\r\n shape of `StridedSlice`'s `input`.\r\n\r\n Args:\r\n shape: A `Tensor`. Must be one of the following types: `int32`, `int64`.\r\n begin: A `Tensor`. Must have the same type as `shape`.\r\n end: A `Tensor`. Must have the same type as `shape`.\r\n strides: A `Tensor`. Must have the same type as `shape`.\r\n dy: A `Tensor`.\r\n begin_mask: An optional `int`. Defaults to `0`.\r\n end_mask: An optional `int`. Defaults to `0`.\r\n ellipsis_mask: An optional `int`. Defaults to `0`.\r\n new_axis_mask: An optional `int`. Defaults to `0`.\r\n shrink_axis_mask: An optional `int`. Defaults to `0`.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A `Tensor`. Has the same type as `dy`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is None or not _ctx._eager_context.is_eager:\r\n if begin_mask is None:\r\n begin_mask = 0\r\n begin_mask = _execute.make_int(begin_mask, \"begin_mask\")\r\n if end_mask is None:\r\n end_mask = 0\r\n end_mask = _execute.make_int(end_mask, \"end_mask\")\r\n if ellipsis_mask is None:\r\n ellipsis_mask = 0\r\n ellipsis_mask = _execute.make_int(ellipsis_mask, \"ellipsis_mask\")\r\n if new_axis_mask is None:\r\n new_axis_mask = 0\r\n new_axis_mask = _execute.make_int(new_axis_mask, \"new_axis_mask\")\r\n if shrink_axis_mask is None:\r\n shrink_axis_mask = 0\r\n shrink_axis_mask = _execute.make_int(shrink_axis_mask, \"shrink_axis_mask\")\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"StridedSliceGrad\", shape=shape, begin=begin, end=end,\r\n strides=strides, dy=dy, begin_mask=begin_mask, end_mask=end_mask,\r\n ellipsis_mask=ellipsis_mask, new_axis_mask=new_axis_mask,\r\n shrink_axis_mask=shrink_axis_mask, name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"T\", _op.get_attr(\"T\"), \"Index\", _op.get_attr(\"Index\"),\r\n \"begin_mask\", _op.get_attr(\"begin_mask\"), \"end_mask\",\r\n _op.get_attr(\"end_mask\"), \"ellipsis_mask\",\r\n _op.get_attr(\"ellipsis_mask\"), \"new_axis_mask\",\r\n _op.get_attr(\"new_axis_mask\"), \"shrink_axis_mask\",\r\n _op.get_attr(\"shrink_axis_mask\"))\r\n _execute.record_gradient(\r\n \"StridedSliceGrad\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n else:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name,\r\n \"StridedSliceGrad\", name, _ctx._post_execution_callbacks, shape,\r\n begin, end, strides, dy, \"begin_mask\", begin_mask, \"end_mask\",\r\n end_mask, \"ellipsis_mask\", ellipsis_mask, \"new_axis_mask\",\r\n new_axis_mask, \"shrink_axis_mask\", shrink_axis_mask)\r\n return _result\r\n except _core._FallbackException:\r\n return strided_slice_grad_eager_fallback(\r\n shape, begin, end, strides, dy, begin_mask=begin_mask,\r\n end_mask=end_mask, ellipsis_mask=ellipsis_mask,\r\n new_axis_mask=new_axis_mask, shrink_axis_mask=shrink_axis_mask,\r\n name=name, ctx=_ctx)\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n\r\n\r\ndef strided_slice_grad_eager_fallback(shape, begin, end, strides, dy, begin_mask=0, end_mask=0, ellipsis_mask=0, new_axis_mask=0, shrink_axis_mask=0, name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function strided_slice_grad\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n if begin_mask is None:\r\n begin_mask = 0\r\n begin_mask = _execute.make_int(begin_mask, \"begin_mask\")\r\n if end_mask is None:\r\n end_mask = 0\r\n end_mask = _execute.make_int(end_mask, \"end_mask\")\r\n if ellipsis_mask is None:\r\n ellipsis_mask = 0\r\n ellipsis_mask = _execute.make_int(ellipsis_mask, \"ellipsis_mask\")\r\n if new_axis_mask is None:\r\n new_axis_mask = 0\r\n new_axis_mask = _execute.make_int(new_axis_mask, \"new_axis_mask\")\r\n if shrink_axis_mask is None:\r\n shrink_axis_mask = 0\r\n shrink_axis_mask = _execute.make_int(shrink_axis_mask, \"shrink_axis_mask\")\r\n _attr_T, (dy,) = _execute.args_to_matching_eager([dy], _ctx)\r\n _attr_Index, _inputs_Index = _execute.args_to_matching_eager([shape, begin, end, strides], _ctx)\r\n (shape, begin, end, strides) = _inputs_Index\r\n _inputs_flat = [shape, begin, end, strides, dy]\r\n _attrs = (\"T\", _attr_T, \"Index\", _attr_Index, \"begin_mask\", begin_mask,\r\n \"end_mask\", end_mask, \"ellipsis_mask\", ellipsis_mask, \"new_axis_mask\",\r\n new_axis_mask, \"shrink_axis_mask\", shrink_axis_mask)\r\n _result = _execute.execute(b\"StridedSliceGrad\", 1, inputs=_inputs_flat,\r\n attrs=_attrs, ctx=_ctx, name=name)\r\n _execute.record_gradient(\r\n \"StridedSliceGrad\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n\r\n@tf_export('tile', 'manip.tile')\r\n@deprecated_endpoints('manip.tile')\r\ndef tile(input, multiples, name=None):\r\n r\"\"\"Constructs a tensor by tiling a given tensor.\r\n\r\n This operation creates a new tensor by replicating `input` `multiples` times.\r\r\n The output tensor's i'th dimension has `input.dims(i) * multiples[i]` elements,\r\r\n and the values of `input` are replicated `multiples[i]` times along the 'i'th\r\r\n dimension. For example, tiling `[a b c d]` by `[2]` produces\r\r\n `[a b c d a b c d]`.\r\n\r\n Args:\r\n input: A `Tensor`. 1-D or higher.\r\n multiples: A `Tensor`. Must be one of the following types: `int32`, `int64`.\r\n 1-D. Length must be the same as the number of dimensions in `input`\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A `Tensor`. Has the same type as `input`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is None or not _ctx._eager_context.is_eager:\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"Tile\", input=input, multiples=multiples, name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"T\", _op.get_attr(\"T\"), \"Tmultiples\",\r\n _op.get_attr(\"Tmultiples\"))\r\n _execute.record_gradient(\r\n \"Tile\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n else:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name, \"Tile\", name,\r\n _ctx._post_execution_callbacks, input, multiples)\r\n return _result\r\n except _core._FallbackException:\r\n return tile_eager_fallback(\r\n input, multiples, name=name, ctx=_ctx)\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n\r\n\r\ndef tile_eager_fallback(input, multiples, name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function tile\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)\r\n _attr_Tmultiples, (multiples,) = _execute.args_to_matching_eager([multiples], _ctx, _dtypes.int32)\r\n _inputs_flat = [input, multiples]\r\n _attrs = (\"T\", _attr_T, \"Tmultiples\", _attr_Tmultiples)\r\n _result = _execute.execute(b\"Tile\", 1, inputs=_inputs_flat, attrs=_attrs,\r\n ctx=_ctx, name=name)\r\n _execute.record_gradient(\r\n \"Tile\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n\r\ndef tile_grad(input, multiples, name=None):\r\n r\"\"\"Returns the gradient of `Tile`.\r\n\r\n Since `Tile` takes an input and repeats the input `multiples` times\r\r\n along each dimension, `TileGrad` takes in `multiples` and aggregates\r\r\n each repeated tile of `input` into `output`.\r\n\r\n Args:\r\n input: A `Tensor`.\r\n multiples: A `Tensor` of type `int32`.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A `Tensor`. Has the same type as `input`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is None or not _ctx._eager_context.is_eager:\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"TileGrad\", input=input, multiples=multiples, name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"T\", _op.get_attr(\"T\"))\r\n _execute.record_gradient(\r\n \"TileGrad\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n else:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name, \"TileGrad\",\r\n name, _ctx._post_execution_callbacks, input, multiples)\r\n return _result\r\n except _core._FallbackException:\r\n return tile_grad_eager_fallback(\r\n input, multiples, name=name, ctx=_ctx)\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n\r\n\r\ndef tile_grad_eager_fallback(input, multiples, name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function tile_grad\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)\r\n multiples = _ops.convert_to_tensor(multiples, _dtypes.int32)\r\n _inputs_flat = [input, multiples]\r\n _attrs = (\"T\", _attr_T)\r\n _result = _execute.execute(b\"TileGrad\", 1, inputs=_inputs_flat,\r\n attrs=_attrs, ctx=_ctx, name=name)\r\n _execute.record_gradient(\r\n \"TileGrad\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n\r\ndef transpose(x, perm, name=None):\r\n r\"\"\"Shuffle dimensions of x according to a permutation.\r\n\r\n The output `y` has the same rank as `x`. The shapes of `x` and `y` satisfy:\r\r\n `y.shape[i] == x.shape[perm[i]] for i in [0, 1, ..., rank(x) - 1]`\r\n\r\n Args:\r\n x: A `Tensor`.\r\n perm: A `Tensor`. Must be one of the following types: `int32`, `int64`.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A `Tensor`. Has the same type as `x`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is None or not _ctx._eager_context.is_eager:\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"Transpose\", x=x, perm=perm, name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"T\", _op.get_attr(\"T\"), \"Tperm\", _op.get_attr(\"Tperm\"))\r\n _execute.record_gradient(\r\n \"Transpose\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n else:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name, \"Transpose\",\r\n name, _ctx._post_execution_callbacks, x, perm)\r\n return _result\r\n except _core._FallbackException:\r\n return transpose_eager_fallback(\r\n x, perm, name=name, ctx=_ctx)\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n\r\n\r\ndef transpose_eager_fallback(x, perm, name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function transpose\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n _attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)\r\n _attr_Tperm, (perm,) = _execute.args_to_matching_eager([perm], _ctx, _dtypes.int32)\r\n _inputs_flat = [x, perm]\r\n _attrs = (\"T\", _attr_T, \"Tperm\", _attr_Tperm)\r\n _result = _execute.execute(b\"Transpose\", 1, inputs=_inputs_flat,\r\n attrs=_attrs, ctx=_ctx, name=name)\r\n _execute.record_gradient(\r\n \"Transpose\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n\r\n_unique_outputs = [\"y\", \"idx\"]\r\n_UniqueOutput = _collections.namedtuple(\r\n \"Unique\", _unique_outputs)\r\n\r\n\r\ndef unique(x, out_idx=_dtypes.int32, name=None):\r\n r\"\"\"Finds unique elements in a 1-D tensor.\r\n\r\n This operation returns a tensor `y` containing all of the unique elements of `x`\r\r\n sorted in the same order that they occur in `x`. This operation also returns a\r\r\n tensor `idx` the same size as `x` that contains the index of each value of `x`\r\r\n in the unique output `y`. In other words:\r\r\n \r\r\n `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]`\r\r\n \r\r\n For example:\r\r\n \r\r\n ```\r\r\n # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8]\r\r\n y, idx = unique(x)\r\r\n y ==> [1, 2, 4, 7, 8]\r\r\n idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4]\r\r\n ```\r\n\r\n Args:\r\n x: A `Tensor`. 1-D.\r\n out_idx: An optional `tf.DType` from: `tf.int32, tf.int64`. Defaults to `tf.int32`.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A tuple of `Tensor` objects (y, idx).\r\n\r\n y: A `Tensor`. Has the same type as `x`.\r\n idx: A `Tensor` of type `out_idx`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is None or not _ctx._eager_context.is_eager:\r\n if out_idx is None:\r\n out_idx = _dtypes.int32\r\n out_idx = _execute.make_type(out_idx, \"out_idx\")\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"Unique\", x=x, out_idx=out_idx, name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"T\", _op.get_attr(\"T\"), \"out_idx\", _op.get_attr(\"out_idx\"))\r\n _execute.record_gradient(\r\n \"Unique\", _inputs_flat, _attrs, _result, name)\r\n _result = _UniqueOutput._make(_result)\r\n return _result\r\n\r\n else:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name, \"Unique\", name,\r\n _ctx._post_execution_callbacks, x, \"out_idx\", out_idx)\r\n _result = _UniqueOutput._make(_result)\r\n return _result\r\n except _core._FallbackException:\r\n return unique_eager_fallback(\r\n x, out_idx=out_idx, name=name, ctx=_ctx)\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n\r\n\r\ndef unique_eager_fallback(x, out_idx=_dtypes.int32, name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function unique\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n if out_idx is None:\r\n out_idx = _dtypes.int32\r\n out_idx = _execute.make_type(out_idx, \"out_idx\")\r\n _attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)\r\n _inputs_flat = [x]\r\n _attrs = (\"T\", _attr_T, \"out_idx\", out_idx)\r\n _result = _execute.execute(b\"Unique\", 2, inputs=_inputs_flat, attrs=_attrs,\r\n ctx=_ctx, name=name)\r\n _execute.record_gradient(\r\n \"Unique\", _inputs_flat, _attrs, _result, name)\r\n _result = _UniqueOutput._make(_result)\r\n return _result\r\n\r\n\r\n_unique_v2_outputs = [\"y\", \"idx\"]\r\n_UniqueV2Output = _collections.namedtuple(\r\n \"UniqueV2\", _unique_v2_outputs)\r\n\r\n\r\ndef unique_v2(x, axis, out_idx=_dtypes.int32, name=None):\r\n r\"\"\"Finds unique elements along an axis of a tensor.\r\n\r\n This operation either returns a tensor `y` containing unique elements\r\r\n along the `axis` of a tensor. The returned unique elements is sorted\r\r\n in the same order as they occur along `axis` in `x`.\r\r\n This operation also returns a tensor `idx` that is the same size as\r\r\n the number of the elements in `x` along the `axis` dimension. It\r\r\n contains the index in the unique output `y`.\r\r\n In other words, for an `1-D` tensor `x` with `axis = None:\r\r\n \r\r\n `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]`\r\r\n \r\r\n For example:\r\r\n \r\r\n ```\r\r\n # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8]\r\r\n y, idx = unique(x)\r\r\n y ==> [1, 2, 4, 7, 8]\r\r\n idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4]\r\r\n ```\r\r\n \r\r\n For an `2-D` tensor `x` with `axis = 0`:\r\r\n \r\r\n ```\r\r\n # tensor 'x' is [[1, 0, 0],\r\r\n # [1, 0, 0],\r\r\n # [2, 0, 0]]\r\r\n y, idx = unique(x, axis=0)\r\r\n y ==> [[1, 0, 0],\r\r\n [2, 0, 0]]\r\r\n idx ==> [0, 0, 1]\r\r\n ```\r\r\n \r\r\n For an `2-D` tensor `x` with `axis = 1`:\r\r\n \r\r\n ```\r\r\n # tensor 'x' is [[1, 0, 0],\r\r\n # [1, 0, 0],\r\r\n # [2, 0, 0]]\r\r\n y, idx = unique(x, axis=1)\r\r\n y ==> [[1, 0],\r\r\n [1, 0],\r\r\n [2, 0]]\r\r\n idx ==> [0, 1, 1]\r\r\n ```\r\n\r\n Args:\r\n x: A `Tensor`. A `Tensor`.\r\n axis: A `Tensor`. Must be one of the following types: `int32`, `int64`.\r\n A `Tensor` of type `int32` (default: None). The axis of the Tensor to\r\r\n find the unique elements.\r\n out_idx: An optional `tf.DType` from: `tf.int32, tf.int64`. Defaults to `tf.int32`.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A tuple of `Tensor` objects (y, idx).\r\n\r\n y: A `Tensor`. Has the same type as `x`.\r\n idx: A `Tensor` of type `out_idx`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is None or not _ctx._eager_context.is_eager:\r\n if out_idx is None:\r\n out_idx = _dtypes.int32\r\n out_idx = _execute.make_type(out_idx, \"out_idx\")\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"UniqueV2\", x=x, axis=axis, out_idx=out_idx, name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"T\", _op.get_attr(\"T\"), \"Taxis\", _op.get_attr(\"Taxis\"),\r\n \"out_idx\", _op.get_attr(\"out_idx\"))\r\n _execute.record_gradient(\r\n \"UniqueV2\", _inputs_flat, _attrs, _result, name)\r\n _result = _UniqueV2Output._make(_result)\r\n return _result\r\n\r\n else:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name, \"UniqueV2\",\r\n name, _ctx._post_execution_callbacks, x, axis, \"out_idx\", out_idx)\r\n _result = _UniqueV2Output._make(_result)\r\n return _result\r\n except _core._FallbackException:\r\n return unique_v2_eager_fallback(\r\n x, axis, out_idx=out_idx, name=name, ctx=_ctx)\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n\r\n\r\ndef unique_v2_eager_fallback(x, axis, out_idx=_dtypes.int32, name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function unique_v2\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n if out_idx is None:\r\n out_idx = _dtypes.int32\r\n out_idx = _execute.make_type(out_idx, \"out_idx\")\r\n _attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)\r\n _attr_Taxis, (axis,) = _execute.args_to_matching_eager([axis], _ctx, _dtypes.int64)\r\n _inputs_flat = [x, axis]\r\n _attrs = (\"T\", _attr_T, \"Taxis\", _attr_Taxis, \"out_idx\", out_idx)\r\n _result = _execute.execute(b\"UniqueV2\", 2, inputs=_inputs_flat,\r\n attrs=_attrs, ctx=_ctx, name=name)\r\n _execute.record_gradient(\r\n \"UniqueV2\", _inputs_flat, _attrs, _result, name)\r\n _result = _UniqueV2Output._make(_result)\r\n return _result\r\n\r\n\r\n_unique_with_counts_outputs = [\"y\", \"idx\", \"count\"]\r\n_UniqueWithCountsOutput = _collections.namedtuple(\r\n \"UniqueWithCounts\", _unique_with_counts_outputs)\r\n\r\n\r\ndef unique_with_counts(x, out_idx=_dtypes.int32, name=None):\r\n r\"\"\"Finds unique elements in a 1-D tensor.\r\n\r\n This operation returns a tensor `y` containing all of the unique elements of `x`\r\r\n sorted in the same order that they occur in `x`. This operation also returns a\r\r\n tensor `idx` the same size as `x` that contains the index of each value of `x`\r\r\n in the unique output `y`. Finally, it returns a third tensor `count` that\r\r\n contains the count of each element of `y` in `x`. In other words:\r\r\n \r\r\n `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]`\r\r\n \r\r\n For example:\r\r\n \r\r\n ```\r\r\n # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8]\r\r\n y, idx, count = unique_with_counts(x)\r\r\n y ==> [1, 2, 4, 7, 8]\r\r\n idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4]\r\r\n count ==> [2, 1, 3, 1, 2]\r\r\n ```\r\n\r\n Args:\r\n x: A `Tensor`. 1-D.\r\n out_idx: An optional `tf.DType` from: `tf.int32, tf.int64`. Defaults to `tf.int32`.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A tuple of `Tensor` objects (y, idx, count).\r\n\r\n y: A `Tensor`. Has the same type as `x`.\r\n idx: A `Tensor` of type `out_idx`.\r\n count: A `Tensor` of type `out_idx`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is None or not _ctx._eager_context.is_eager:\r\n if out_idx is None:\r\n out_idx = _dtypes.int32\r\n out_idx = _execute.make_type(out_idx, \"out_idx\")\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"UniqueWithCounts\", x=x, out_idx=out_idx, name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"T\", _op.get_attr(\"T\"), \"out_idx\", _op.get_attr(\"out_idx\"))\r\n _execute.record_gradient(\r\n \"UniqueWithCounts\", _inputs_flat, _attrs, _result, name)\r\n _result = _UniqueWithCountsOutput._make(_result)\r\n return _result\r\n\r\n else:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name,\r\n \"UniqueWithCounts\", name, _ctx._post_execution_callbacks, x,\r\n \"out_idx\", out_idx)\r\n _result = _UniqueWithCountsOutput._make(_result)\r\n return _result\r\n except _core._FallbackException:\r\n return unique_with_counts_eager_fallback(\r\n x, out_idx=out_idx, name=name, ctx=_ctx)\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n\r\n\r\ndef unique_with_counts_eager_fallback(x, out_idx=_dtypes.int32, name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function unique_with_counts\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n if out_idx is None:\r\n out_idx = _dtypes.int32\r\n out_idx = _execute.make_type(out_idx, \"out_idx\")\r\n _attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)\r\n _inputs_flat = [x]\r\n _attrs = (\"T\", _attr_T, \"out_idx\", out_idx)\r\n _result = _execute.execute(b\"UniqueWithCounts\", 3, inputs=_inputs_flat,\r\n attrs=_attrs, ctx=_ctx, name=name)\r\n _execute.record_gradient(\r\n \"UniqueWithCounts\", _inputs_flat, _attrs, _result, name)\r\n _result = _UniqueWithCountsOutput._make(_result)\r\n return _result\r\n\r\n\r\n_unique_with_counts_v2_outputs = [\"y\", \"idx\", \"count\"]\r\n_UniqueWithCountsV2Output = _collections.namedtuple(\r\n \"UniqueWithCountsV2\", _unique_with_counts_v2_outputs)\r\n\r\n\r\ndef unique_with_counts_v2(x, axis, out_idx=_dtypes.int32, name=None):\r\n r\"\"\"Finds unique elements along an axis of a tensor.\r\n\r\n This operation either returns a tensor `y` containing unique elements\r\r\n along the `axis` of a tensor. The returned unique elements is sorted\r\r\n in the same order as they occur along `axis` in `x`.\r\r\n This operation also returns a tensor `idx` and a tensor `count`\r\r\n that are the same size as the number of the elements in `x` along the\r\r\n `axis` dimension. The `idx` contains the index in the unique output `y`\r\r\n and the `count` contains the count in the unique output `y`.\r\r\n In other words, for an `1-D` tensor `x` with `axis = None:\r\r\n \r\r\n `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]`\r\r\n \r\r\n For example:\r\r\n \r\r\n ```\r\r\n # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8]\r\r\n y, idx, count = unique_with_counts(x)\r\r\n y ==> [1, 2, 4, 7, 8]\r\r\n idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4]\r\r\n count ==> [2, 1, 3, 1, 2]\r\r\n ```\r\r\n \r\r\n For an `2-D` tensor `x` with `axis = 0`:\r\r\n \r\r\n ```\r\r\n # tensor 'x' is [[1, 0, 0],\r\r\n # [1, 0, 0],\r\r\n # [2, 0, 0]]\r\r\n y, idx, count = unique_with_counts(x, axis=0)\r\r\n y ==> [[1, 0, 0],\r\r\n [2, 0, 0]]\r\r\n idx ==> [0, 0, 1]\r\r\n count ==> [2, 1]\r\r\n ```\r\r\n \r\r\n For an `2-D` tensor `x` with `axis = 1`:\r\r\n \r\r\n ```\r\r\n # tensor 'x' is [[1, 0, 0],\r\r\n # [1, 0, 0],\r\r\n # [2, 0, 0]]\r\r\n y, idx, count = unique_with_counts(x, axis=1)\r\r\n y ==> [[1, 0],\r\r\n [1, 0],\r\r\n [2, 0]]\r\r\n idx ==> [0, 1, 1]\r\r\n count ==> [1, 2]\r\r\n ```\r\n\r\n Args:\r\n x: A `Tensor`. A `Tensor`.\r\n axis: A `Tensor`. Must be one of the following types: `int32`, `int64`.\r\n A `Tensor` of type `int32` (default: None). The axis of the Tensor to\r\r\n find the unique elements.\r\n out_idx: An optional `tf.DType` from: `tf.int32, tf.int64`. Defaults to `tf.int32`.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A tuple of `Tensor` objects (y, idx, count).\r\n\r\n y: A `Tensor`. Has the same type as `x`.\r\n idx: A `Tensor` of type `out_idx`.\r\n count: A `Tensor` of type `out_idx`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is None or not _ctx._eager_context.is_eager:\r\n if out_idx is None:\r\n out_idx = _dtypes.int32\r\n out_idx = _execute.make_type(out_idx, \"out_idx\")\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"UniqueWithCountsV2\", x=x, axis=axis, out_idx=out_idx, name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"T\", _op.get_attr(\"T\"), \"Taxis\", _op.get_attr(\"Taxis\"),\r\n \"out_idx\", _op.get_attr(\"out_idx\"))\r\n _execute.record_gradient(\r\n \"UniqueWithCountsV2\", _inputs_flat, _attrs, _result, name)\r\n _result = _UniqueWithCountsV2Output._make(_result)\r\n return _result\r\n\r\n else:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name,\r\n \"UniqueWithCountsV2\", name, _ctx._post_execution_callbacks, x, axis,\r\n \"out_idx\", out_idx)\r\n _result = _UniqueWithCountsV2Output._make(_result)\r\n return _result\r\n except _core._FallbackException:\r\n return unique_with_counts_v2_eager_fallback(\r\n x, axis, out_idx=out_idx, name=name, ctx=_ctx)\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n\r\n\r\ndef unique_with_counts_v2_eager_fallback(x, axis, out_idx=_dtypes.int32, name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function unique_with_counts_v2\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n if out_idx is None:\r\n out_idx = _dtypes.int32\r\n out_idx = _execute.make_type(out_idx, \"out_idx\")\r\n _attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)\r\n _attr_Taxis, (axis,) = _execute.args_to_matching_eager([axis], _ctx, _dtypes.int64)\r\n _inputs_flat = [x, axis]\r\n _attrs = (\"T\", _attr_T, \"Taxis\", _attr_Taxis, \"out_idx\", out_idx)\r\n _result = _execute.execute(b\"UniqueWithCountsV2\", 3, inputs=_inputs_flat,\r\n attrs=_attrs, ctx=_ctx, name=name)\r\n _execute.record_gradient(\r\n \"UniqueWithCountsV2\", _inputs_flat, _attrs, _result, name)\r\n _result = _UniqueWithCountsV2Output._make(_result)\r\n return _result\r\n\r\n\r\ndef unpack(value, num, axis=0, name=None):\r\n r\"\"\"Unpacks a given dimension of a rank-`R` tensor into `num` rank-`(R-1)` tensors.\r\n\r\n Unpacks `num` tensors from `value` by chipping it along the `axis` dimension.\r\r\n For example, given a tensor of shape `(A, B, C, D)`;\r\r\n \r\r\n If `axis == 0` then the i'th tensor in `output` is the slice `value[i, :, :, :]`\r\r\n and each tensor in `output` will have shape `(B, C, D)`. (Note that the\r\r\n dimension unpacked along is gone, unlike `split`).\r\r\n \r\r\n If `axis == 1` then the i'th tensor in `output` is the slice `value[:, i, :, :]`\r\r\n and each tensor in `output` will have shape `(A, C, D)`.\r\r\n Etc.\r\r\n \r\r\n This is the opposite of `pack`.\r\n\r\n Args:\r\n value: A `Tensor`.\r\n 1-D or higher, with `axis` dimension size equal to `num`.\r\n num: An `int` that is `>= 0`.\r\n axis: An optional `int`. Defaults to `0`.\r\n Dimension along which to unpack. Negative values wrap around, so the\r\r\n valid range is `[-R, R)`.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A list of `num` `Tensor` objects with the same type as `value`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is None or not _ctx._eager_context.is_eager:\r\n num = _execute.make_int(num, \"num\")\r\n if axis is None:\r\n axis = 0\r\n axis = _execute.make_int(axis, \"axis\")\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"Unpack\", value=value, num=num, axis=axis, name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"num\", _op.get_attr(\"num\"), \"T\", _op.get_attr(\"T\"), \"axis\",\r\n _op.get_attr(\"axis\"))\r\n _execute.record_gradient(\r\n \"Unpack\", _inputs_flat, _attrs, _result, name)\r\n return _result\r\n\r\n else:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name, \"Unpack\", name,\r\n _ctx._post_execution_callbacks, value, \"num\", num, \"axis\", axis)\r\n return _result\r\n except _core._FallbackException:\r\n return unpack_eager_fallback(\r\n value, num=num, axis=axis, name=name, ctx=_ctx)\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n\r\n\r\ndef unpack_eager_fallback(value, num, axis=0, name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function unpack\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n num = _execute.make_int(num, \"num\")\r\n if axis is None:\r\n axis = 0\r\n axis = _execute.make_int(axis, \"axis\")\r\n _attr_T, (value,) = _execute.args_to_matching_eager([value], _ctx)\r\n _inputs_flat = [value]\r\n _attrs = (\"num\", num, \"T\", _attr_T, \"axis\", axis)\r\n _result = _execute.execute(b\"Unpack\", num, inputs=_inputs_flat,\r\n attrs=_attrs, ctx=_ctx, name=name)\r\n _execute.record_gradient(\r\n \"Unpack\", _inputs_flat, _attrs, _result, name)\r\n return _result\r\n\r\n\r\n@tf_export('unravel_index')\r\ndef unravel_index(indices, dims, name=None):\r\n r\"\"\"Converts a flat index or array of flat indices into a tuple of\r\n\r\n coordinate arrays.\r\r\n \r\r\n @compatibility(numpy)\r\r\n Equivalent to np.unravel_index\r\r\n @end_compatibility\r\n\r\n Args:\r\n indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.\r\n An 0-D or 1-D `int` Tensor whose elements are indices into the\r\r\n flattened version of an array of dimensions dims.\r\n dims: A `Tensor`. Must have the same type as `indices`.\r\n An 1-D `int` Tensor. The shape of the array to use for unraveling\r\r\n indices.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A `Tensor`. Has the same type as `indices`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is None or not _ctx._eager_context.is_eager:\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"UnravelIndex\", indices=indices, dims=dims, name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"Tidx\", _op.get_attr(\"Tidx\"))\r\n _execute.record_gradient(\r\n \"UnravelIndex\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n else:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name, \"UnravelIndex\",\r\n name, _ctx._post_execution_callbacks, indices, dims)\r\n return _result\r\n except _core._FallbackException:\r\n return unravel_index_eager_fallback(\r\n indices, dims, name=name, ctx=_ctx)\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n\r\n\r\ndef unravel_index_eager_fallback(indices, dims, name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function unravel_index\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n _attr_Tidx, _inputs_Tidx = _execute.args_to_matching_eager([indices, dims], _ctx, _dtypes.int32)\r\n (indices, dims) = _inputs_Tidx\r\n _inputs_flat = [indices, dims]\r\n _attrs = (\"Tidx\", _attr_Tidx)\r\n _result = _execute.execute(b\"UnravelIndex\", 1, inputs=_inputs_flat,\r\n attrs=_attrs, ctx=_ctx, name=name)\r\n _execute.record_gradient(\r\n \"UnravelIndex\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n\r\ndef upper_bound(sorted_inputs, values, out_type=_dtypes.int32, name=None):\r\n r\"\"\"Applies upper_bound(sorted_search_values, values) along each row.\r\n\r\n Each set of rows with the same index in (sorted_inputs, values) is treated\r\r\n independently. The resulting row is the equivalent of calling\r\r\n `np.searchsorted(sorted_inputs, values, side='right')`.\r\r\n \r\r\n The result is not a global index to the entire \r\r\n `Tensor`, but rather just the index in the last dimension.\r\r\n \r\r\n A 2-D example:\r\r\n sorted_sequence = [[0, 3, 9, 9, 10],\r\r\n [1, 2, 3, 4, 5]]\r\r\n values = [[2, 4, 9],\r\r\n [0, 2, 6]]\r\r\n \r\r\n result = UpperBound(sorted_sequence, values)\r\r\n \r\r\n result == [[1, 2, 4],\r\r\n [0, 2, 5]]\r\n\r\n Args:\r\n sorted_inputs: A `Tensor`. 2-D Tensor where each row is ordered.\r\n values: A `Tensor`. Must have the same type as `sorted_inputs`.\r\n 2-D Tensor with the same numbers of rows as `sorted_search_values`. Contains\r\r\n the values that will be searched for in `sorted_search_values`.\r\n out_type: An optional `tf.DType` from: `tf.int32, tf.int64`. Defaults to `tf.int32`.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A `Tensor` of type `out_type`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is None or not _ctx._eager_context.is_eager:\r\n if out_type is None:\r\n out_type = _dtypes.int32\r\n out_type = _execute.make_type(out_type, \"out_type\")\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"UpperBound\", sorted_inputs=sorted_inputs, values=values,\r\n out_type=out_type, name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"T\", _op.get_attr(\"T\"), \"out_type\", _op.get_attr(\"out_type\"))\r\n _execute.record_gradient(\r\n \"UpperBound\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n else:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name, \"UpperBound\",\r\n name, _ctx._post_execution_callbacks, sorted_inputs, values,\r\n \"out_type\", out_type)\r\n return _result\r\n except _core._FallbackException:\r\n return upper_bound_eager_fallback(\r\n sorted_inputs, values, out_type=out_type, name=name, ctx=_ctx)\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n\r\n\r\ndef upper_bound_eager_fallback(sorted_inputs, values, out_type=_dtypes.int32, name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function upper_bound\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n if out_type is None:\r\n out_type = _dtypes.int32\r\n out_type = _execute.make_type(out_type, \"out_type\")\r\n _attr_T, _inputs_T = _execute.args_to_matching_eager([sorted_inputs, values], _ctx)\r\n (sorted_inputs, values) = _inputs_T\r\n _inputs_flat = [sorted_inputs, values]\r\n _attrs = (\"T\", _attr_T, \"out_type\", out_type)\r\n _result = _execute.execute(b\"UpperBound\", 1, inputs=_inputs_flat,\r\n attrs=_attrs, ctx=_ctx, name=name)\r\n _execute.record_gradient(\r\n \"UpperBound\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n\r\ndef where(condition, name=None):\r\n r\"\"\"Returns locations of nonzero / true values in a tensor.\r\n\r\n This operation returns the coordinates of true elements in `condition`. The\r\r\n coordinates are returned in a 2-D tensor where the first dimension (rows)\r\r\n represents the number of true elements, and the second dimension (columns)\r\r\n represents the coordinates of the true elements. Keep in mind, the shape of\r\r\n the output tensor can vary depending on how many true values there are in\r\r\n `condition`. Indices are output in row-major order.\r\r\n \r\r\n For example:\r\r\n \r\r\n ```\r\r\n # 'input' tensor is [[True, False]\r\r\n # [True, False]]\r\r\n # 'input' has two true values, so output has two coordinates.\r\r\n # 'input' has rank of 2, so coordinates have two indices.\r\r\n where(input) ==> [[0, 0],\r\r\n [1, 0]]\r\r\n \r\r\n # `condition` tensor is [[[True, False]\r\r\n # [True, False]]\r\r\n # [[False, True]\r\r\n # [False, True]]\r\r\n # [[False, False]\r\r\n # [False, True]]]\r\r\n # 'input' has 5 true values, so output has 5 coordinates.\r\r\n # 'input' has rank of 3, so coordinates have three indices.\r\r\n where(input) ==> [[0, 0, 0],\r\r\n [0, 1, 0],\r\r\n [1, 0, 1],\r\r\n [1, 1, 1],\r\r\n [2, 1, 1]]\r\r\n \r\r\n # `condition` tensor is [[[1.5, 0.0]\r\r\n # [-0.5, 0.0]]\r\r\n # [[0.0, 0.25]\r\r\n # [0.0, 0.75]]\r\r\n # [[0.0, 0.0]\r\r\n # [0.0, 0.01]]]\r\r\n # 'input' has 5 nonzero values, so output has 5 coordinates.\r\r\n # 'input' has rank of 3, so coordinates have three indices.\r\r\n where(input) ==> [[0, 0, 0],\r\r\n [0, 1, 0],\r\r\n [1, 0, 1],\r\r\n [1, 1, 1],\r\r\n [2, 1, 1]]\r\r\n \r\r\n # `condition` tensor is [[[1.5 + 0.0j, 0.0 + 0.0j]\r\r\n # [0.0 + 0.5j, 0.0 + 0.0j]]\r\r\n # [[0.0 + 0.0j, 0.25 + 1.5j]\r\r\n # [0.0 + 0.0j, 0.75 + 0.0j]]\r\r\n # [[0.0 + 0.0j, 0.0 + 0.0j]\r\r\n # [0.0 + 0.0j, 0.01 + 0.0j]]]\r\r\n # 'input' has 5 nonzero magnitude values, so output has 5 coordinates.\r\r\n # 'input' has rank of 3, so coordinates have three indices.\r\r\n where(input) ==> [[0, 0, 0],\r\r\n [0, 1, 0],\r\r\n [1, 0, 1],\r\r\n [1, 1, 1],\r\r\n [2, 1, 1]]\r\r\n ```\r\n\r\n Args:\r\n condition: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`, `bool`.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A `Tensor` of type `int64`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is None or not _ctx._eager_context.is_eager:\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"Where\", input=condition, name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"T\", _op.get_attr(\"T\"))\r\n _execute.record_gradient(\r\n \"Where\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n else:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name, \"Where\", name,\r\n _ctx._post_execution_callbacks, condition)\r\n return _result\r\n except _core._FallbackException:\r\n return where_eager_fallback(\r\n condition, name=name, ctx=_ctx)\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n\r\n\r\ndef where_eager_fallback(condition, name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function where\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n _attr_T, (condition,) = _execute.args_to_matching_eager([condition], _ctx, _dtypes.bool)\r\n _inputs_flat = [condition]\r\n _attrs = (\"T\", _attr_T)\r\n _result = _execute.execute(b\"Where\", 1, inputs=_inputs_flat, attrs=_attrs,\r\n ctx=_ctx, name=name)\r\n _execute.record_gradient(\r\n \"Where\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n\r\ndef zeros_like(x, name=None):\r\n r\"\"\"Returns a tensor of zeros with the same shape and type as x.\r\n\r\n Args:\r\n x: A `Tensor`. a tensor of type T.\r\n name: A name for the operation (optional).\r\n\r\n Returns:\r\n A `Tensor`. Has the same type as `x`.\r\n \"\"\"\r\n _ctx = _context._context\r\n if _ctx is None or not _ctx._eager_context.is_eager:\r\n _, _, _op = _op_def_lib._apply_op_helper(\r\n \"ZerosLike\", x=x, name=name)\r\n _result = _op.outputs[:]\r\n _inputs_flat = _op.inputs\r\n _attrs = (\"T\", _op.get_attr(\"T\"))\r\n _execute.record_gradient(\r\n \"ZerosLike\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\n else:\r\n try:\r\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\r\n _ctx._context_handle, _ctx._eager_context.device_name, \"ZerosLike\",\r\n name, _ctx._post_execution_callbacks, x)\r\n return _result\r\n except _core._FallbackException:\r\n return zeros_like_eager_fallback(\r\n x, name=name, ctx=_ctx)\r\n except _core._NotOkStatusException as e:\r\n if name is not None:\r\n message = e.message + \" name: \" + name\r\n else:\r\n message = e.message\r\n _six.raise_from(_core._status_to_exception(e.code, message), None)\r\n\r\n\r\ndef zeros_like_eager_fallback(x, name=None, ctx=None):\r\n r\"\"\"This is the slowpath function for Eager mode.\r\n This is for function zeros_like\r\n \"\"\"\r\n _ctx = ctx if ctx else _context.context()\r\n _attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)\r\n _inputs_flat = [x]\r\n _attrs = (\"T\", _attr_T)\r\n _result = _execute.execute(b\"ZerosLike\", 1, inputs=_inputs_flat,\r\n attrs=_attrs, ctx=_ctx, name=name)\r\n _execute.record_gradient(\r\n \"ZerosLike\", _inputs_flat, _attrs, _result, name)\r\n _result, = _result\r\n return _result\r\n\r\ndef _InitOpDefLibrary(op_list_proto_bytes):\r\n op_list = _op_def_pb2.OpList()\r\n op_list.ParseFromString(op_list_proto_bytes)\r\n _op_def_registry.register_op_list(op_list)\r\n op_def_lib = _op_def_library.OpDefLibrary()\r\n op_def_lib.add_op_list(op_list)\r\n return op_def_lib\r\n# op {\r\n# name: \"BatchMatrixBandPart\"\r\n# input_arg {\r\n# name: \"input\"\r\n# type_attr: \"T\"\r\n# }\r\n# input_arg {\r\n# name: \"num_lower\"\r\n# type: DT_INT64\r\n# }\r\n# input_arg {\r\n# name: \"num_upper\"\r\n# type: DT_INT64\r\n# }\r\n# output_arg {\r\n# name: \"band\"\r\n# type_attr: \"T\"\r\n# }\r\n# attr {\r\n# name: \"T\"\r\n# type: \"type\"\r\n# }\r\n# deprecation {\r\n# version: 14\r\n# explanation: \"Use MatrixBandPart\"\r\n# }\r\n# }\r\n# op {\r\n# name: \"BatchMatrixDiag\"\r\n# input_arg {\r\n# name: \"diagonal\"\r\n# type_attr: \"T\"\r\n# }\r\n# output_arg {\r\n# name: \"output\"\r\n# type_attr: \"T\"\r\n# }\r\n# attr {\r\n# name: \"T\"\r\n# type: \"type\"\r\n# }\r\n# deprecation {\r\n# version: 14\r\n# explanation: \"Use MatrixDiag\"\r\n# }\r\n# }\r\n# op {\r\n# name: \"BatchMatrixDiagPart\"\r\n# input_arg {\r\n# name: \"input\"\r\n# type_attr: \"T\"\r\n# }\r\n# output_arg {\r\n# name: \"diagonal\"\r\n# type_attr: \"T\"\r\n# }\r\n# attr {\r\n# name: \"T\"\r\n# type: \"type\"\r\n# }\r\n# deprecation {\r\n# version: 14\r\n# explanation: \"Use MatrixDiagPart\"\r\n# }\r\n# }\r\n# op {\r\n# name: \"BatchMatrixSetDiag\"\r\n# input_arg {\r\n# name: \"input\"\r\n# type_attr: \"T\"\r\n# }\r\n# input_arg {\r\n# name: \"diagonal\"\r\n# type_attr: \"T\"\r\n# }\r\n# output_arg {\r\n# name: \"output\"\r\n# type_attr: \"T\"\r\n# }\r\n# attr {\r\n# name: \"T\"\r\n# type: \"type\"\r\n# }\r\n# deprecation {\r\n# version: 14\r\n# explanation: \"Use MatrixSetDiag\"\r\n# }\r\n# }\r\n# op {\r\n# name: \"BatchToSpace\"\r\n# input_arg {\r\n# name: \"input\"\r\n# type_attr: \"T\"\r\n# }\r\n# input_arg {\r\n# name: \"crops\"\r\n# type_attr: \"Tidx\"\r\n# }\r\n# output_arg {\r\n# name: \"output\"\r\n# type_attr: \"T\"\r\n# }\r\n# attr {\r\n# name: \"T\"\r\n# type: \"type\"\r\n# }\r\n# attr {\r\n# name: \"block_size\"\r\n# type: \"int\"\r\n# has_minimum: true\r\n# minimum: 2\r\n# }\r\n# attr {\r\n# name: \"Tidx\"\r\n# type: \"type\"\r\n# default_value {\r\n# type: DT_INT32\r\n# }\r\n# allowed_values {\r\n# list {\r\n# type: DT_INT32\r\n# type: DT_INT64\r\n# }\r\n# }\r\n# }\r\n# }\r\n# op {\r\n# name: \"BatchToSpaceND\"\r\n# input_arg {\r\n# name: \"input\"\r\n# type_attr: \"T\"\r\n# }\r\n# input_arg {\r\n# name: \"block_shape\"\r\n# type_attr: \"Tblock_shape\"\r\n# }\r\n# input_arg {\r\n# name: \"crops\"\r\n# type_attr: \"Tcrops\"\r\n# }\r\n# output_arg {\r\n# name: \"output\"\r\n# type_attr: \"T\"\r\n# }\r\n# attr {\r\n# name: \"T\"\r\n# type: \"type\"\r\n# }\r\n# attr {\r\n# name: \"Tblock_shape\"\r\n# type: \"type\"\r\n# default_value {\r\n# type: DT_INT32\r\n# }\r\n# allowed_values {\r\n# list {\r\n# type: DT_INT32\r\n# type: DT_INT64\r\n# }\r\n# }\r\n# }\r\n# attr {\r\n# name: \"Tcrops\"\r\n# type: \"type\"\r\n# default_value {\r\n# type: DT_INT32\r\n# }\r\n# allowed_values {\r\n# list {\r\n# type: DT_INT32\r\n# type: DT_INT64\r\n# }\r\n# }\r\n# }\r\n# }\r\n# op {\r\n# name: \"Bitcast\"\r\n# input_arg {\r\n# name: \"input\"\r\n# type_attr: \"T\"\r\n# }\r\n# output_arg {\r\n# name: \"output\"\r\n# type_attr: \"type\"\r\n# }\r\n# attr {\r\n# name: \"T\"\r\n# type: \"type\"\r\n# allowed_values {\r\n# list {\r\n# type: DT_BFLOAT16\r\n# type: DT_HALF\r\n# type: DT_FLOAT\r\n# type: DT_DOUBLE\r\n# type: DT_INT64\r\n# type: DT_INT32\r\n# type: DT_UINT8\r\n# type: DT_UINT16\r\n# type: DT_UINT32\r\n# type: DT_UINT64\r\n# type: DT_INT8\r\n# type: DT_INT16\r\n# type: DT_COMPLEX64\r\n# type: DT_COMPLEX128\r\n# type: DT_QINT8\r\n# type: DT_QUINT8\r\n# type: DT_QINT16\r\n# type: DT_QUINT16\r\n# type: DT_QINT32\r\n# }\r\n# }\r\n# }\r\n# attr {\r\n# name: \"type\"\r\n# type: \"type\"\r\n# allowed_values {\r\n# list {\r\n# type: DT_BFLOAT16\r\n# type: DT_HALF\r\n# type: DT_FLOAT\r\n# type: DT_DOUBLE\r\n# type: DT_INT64\r\n# type: DT_INT32\r\n# type: DT_UINT8\r\n# type: DT_UINT16\r\n# type: DT_UINT32\r\n# type: DT_UINT64\r\n# type: DT_INT8\r\n# type: DT_INT16\r\n# type: DT_COMPLEX64\r\n# type: DT_COMPLEX128\r\n# type: DT_QINT8\r\n# type: DT_QUINT8\r\n# type: DT_QINT16\r\n# type: DT_QUINT16\r\n# type: DT_QINT32\r\n# }\r\n# }\r\n# }\r\n# }\r\n# op {\r\n# name: \"BroadcastArgs\"\r\n# input_arg {\r\n# name: \"s0\"\r\n# type_attr: \"T\"\r\n# }\r\n# input_arg {\r\n# name: \"s1\"\r\n# type_attr: \"T\"\r\n# }\r\n# output_arg {\r\n# name: \"r0\"\r\n# type_attr: \"T\"\r\n# }\r\n# attr {\r\n# name: \"T\"\r\n# type: \"type\"\r\n# default_value {\r\n# type: DT_INT32\r\n# }\r\n# allowed_values {\r\n# list {\r\n# type: DT_INT32\r\n# type: DT_INT64\r\n# }\r\n# }\r\n# }\r\n# }\r\n# op {\r\n# name: \"BroadcastGradientArgs\"\r\n# input_arg {\r\n# name: \"s0\"\r\n# type_attr: \"T\"\r\n# }\r\n# input_arg {\r\n# name: \"s1\"\r\n# type_attr: \"T\"\r\n# }\r\n# output_arg {\r\n# name: \"r0\"\r\n# type_attr: \"T\"\r\n# }\r\n# output_arg {\r\n# name: \"r1\"\r\n# type_attr: \"T\"\r\n# }\r\n# attr {\r\n# name: \"T\"\r\n# type: \"type\"\r\n# default_value {\r\n# type: DT_INT32\r\n# }\r\n# allowed_values {\r\n# list {\r\n# type: DT_INT32\r\n# type: DT_INT64\r\n# }\r\n# }\r\n# }\r\n# }\r\n# op {\r\n# name: \"BroadcastTo\"\r\n# input_arg {\r\n# name: \"input\"\r\n# type_attr: \"T\"\r\n# }\r\n# input_arg {\r\n# name: \"shape\"\r\n# type_attr: \"Tidx\"\r\n# }\r\n# output_arg {\r\n# name: \"output\"\r\n# type_attr: \"T\"\r\n# }\r\n# attr {\r\n# name: \"T\"\r\n# type: \"type\"\r\n# }\r\n# attr {\r\n# name: \"Tidx\"\r\n# type: \"type\"\r\n# default_value {\r\n# type: DT_INT32\r\n# }\r\n# allowed_values {\r\n# list {\r\n# type: DT_INT32\r\n# type: DT_INT64\r\n# }\r\n# }\r\n# }\r\n# }\r\n# op {\r\n# name: \"CheckNumerics\"\r\n# input_arg {\r\n# name: \"tensor\"\r\n# type_attr: \"T\"\r\n# }\r\n# output_arg {\r\n# name: \"output\"\r\n# type_attr: \"T\"\r\n# }\r\n# attr {\r\n# name: \"T\"\r\n# type: \"type\"\r\n# allowed_values {\r\n# list {\r\n# type: DT_BFLOAT16\r\n# type: DT_HALF\r\n# type: DT_FLOAT\r\n# type: DT_DOUBLE\r\n# }\r\n# }\r\n# }\r\n# attr {\r\n# name: \"message\"\r\n# type: \"string\"\r\n# }\r\n# }\r\n# op {\r\n# name: \"Concat\"\r\n# input_arg {\r\n# name: \"concat_dim\"\r\n# type: DT_INT32\r\n# }\r\n# input_arg {\r\n# name: \"values\"\r\n# type_attr: \"T\"\r\n# number_attr: \"N\"\r\n# }\r\n# output_arg {\r\n# name: \"output\"\r\n# type_attr: \"T\"\r\n# }\r\n# attr {\r\n# name: \"N\"\r\n# type: \"int\"\r\n# has_minimum: true\r\n# minimum: 2\r\n# }\r\n# attr {\r\n# name: \"T\"\r\n# type: \"type\"\r\n# }\r\n# }\r\n# op {\r\n# name: \"ConcatOffset\"\r\n# input_arg {\r\n# name: \"concat_dim\"\r\n# type: DT_INT32\r\n# }\r\n# input_arg {\r\n# name: \"shape\"\r\n# type: DT_INT32\r\n# number_attr: \"N\"\r\n# }\r\n# output_arg {\r\n# name: \"offset\"\r\n# type: DT_INT32\r\n# number_attr: \"N\"\r\n# }\r\n# attr {\r\n# name: \"N\"\r\n# type: \"int\"\r\n# has_minimum: true\r\n# minimum: 2\r\n# }\r\n# }\r\n# op {\r\n# name: \"ConcatV2\"\r\n# input_arg {\r\n# name: \"values\"\r\n# type_attr: \"T\"\r\n# number_attr: \"N\"\r\n# }\r\n# input_arg {\r\n# name: \"axis\"\r\n# type_attr: \"Tidx\"\r\n# }\r\n# output_arg {\r\n# name: \"output\"\r\n# type_attr: \"T\"\r\n# }\r\n# attr {\r\n# name: \"N\"\r\n# type: \"int\"\r\n# has_minimum: true\r\n# minimum: 2\r\n# }\r\n# attr {\r\n# name: \"T\"\r\n# type: \"type\"\r\n# }\r\n# attr {\r\n# name: \"Tidx\"\r\n# type: \"type\"\r\n# default_value {\r\n# type: DT_INT32\r\n# }\r\n# allowed_values {\r\n# list {\r\n# type: DT_INT32\r\n# type: DT_INT64\r\n# }\r\n# }\r\n# }\r\n# }\r\n# op {\r\n# name: \"ConjugateTranspose\"\r\n# input_arg {\r\n# name: \"x\"\r\n# type_attr: \"T\"\r\n# }\r\n# input_arg {\r\n# name: \"perm\"\r\n# type_attr: \"Tperm\"\r\n# }\r\n# output_arg {\r\n# name: \"y\"\r\n# type_attr: \"T\"\r\n# }\r\n# attr {\r\n# name: \"T\"\r\n# type: \"type\"\r\n# }\r\n# attr {\r\n# name: \"Tperm\"\r\n# type: \"type\"\r\n# default_value {\r\n# type: DT_INT32\r\n# }\r\n# allowed_values {\r\n# list {\r\n# type: DT_INT32\r\n# type: DT_INT64\r\n# }\r\n# }\r\n# }\r\n# }\r\n# op {\r\n# name: \"Const\"\r\n# output_arg {\r\n# name: \"output\"\r\n# type_attr: \"dtype\"\r\n# }\r\n# attr {\r\n# name: \"value\"\r\n# type: \"tensor\"\r\n# }\r\n# attr {\r\n# name: \"dtype\"\r\n# type: \"type\"\r\n# }\r\n# }\r\n# op {\r\n# name: \"DebugGradientIdentity\"\r\n# input_arg {\r\n# name: \"input\"\r\n# type_attr: \"T\"\r\n# }\r\n# output_arg {\r\n# name: \"output\"\r\n# type_attr: \"T\"\r\n# }\r\n# attr {\r\n# name: \"T\"\r\n# type: \"type\"\r\n# }\r\n# allows_uninitialized_input: true\r\n# }\r\n# op {\r\n# name: \"DebugGradientRefIdentity\"\r\n# input_arg {\r\n# name: \"input\"\r\n# type_attr: \"T\"\r\n# is_ref: true\r\n# }\r\n# output_arg {\r\n# name: \"output\"\r\n# type_attr: \"T\"\r\n# is_ref: true\r\n# }\r\n# attr {\r\n# name: \"T\"\r\n# type: \"type\"\r\n# }\r\n# allows_uninitialized_input: true\r\n# }\r\n# op {\r\n# name: \"DeepCopy\"\r\n# input_arg {\r\n# name: \"x\"\r\n# type_attr: \"T\"\r\n# }\r\n# output_arg {\r\n# name: \"y\"\r\n# type_attr: \"T\"\r\n# }\r\n# attr {\r\n# name: \"T\"\r\n# type: \"type\"\r\n# }\r\n# is_stateful: true\r\n# }\r\n# op {\r\n# name: \"DepthToSpace\"\r\n# input_arg {\r\n# name: \"input\"\r\n# type_attr: \"T\"\r\n# }\r\n# output_arg {\r\n# name: \"output\"\r\n# type_attr: \"T\"\r\n# }\r\n# attr {\r\n# name: \"T\"\r\n# type: \"type\"\r\n# }\r\n# attr {\r\n# name: \"block_size\"\r\n# type: \"int\"\r\n# has_minimum: true\r\n# minimum: 2\r\n# }\r\n# attr {\r\n# name: \"data_format\"\r\n# type: \"string\"\r\n# default_value {\r\n# s: \"NHWC\"\r\n# }\r\n# allowed_values {\r\n# list {\r\n# s: \"NHWC\"\r\n# s: \"NCHW\"\r\n# s: \"NCHW_VECT_C\"\r\n# }\r\n# }\r\n# }\r\n# }\r\n# op {\r\n# name: \"Dequantize\"\r\n# input_arg {\r\n# name: \"input\"\r\n# type_attr: \"T\"\r\n# }\r\n# input_arg {\r\n# name: \"min_range\"\r\n# type: DT_FLOAT\r\n# }\r\n# input_arg {\r\n# name: \"max_range\"\r\n# type: DT_FLOAT\r\n# }\r\n# output_arg {\r\n# name: \"output\"\r\n# type: DT_FLOAT\r\n# }\r\n# attr {\r\n# name: \"T\"\r\n# type: \"type\"\r\n# allowed_values {\r\n# list {\r\n# type: DT_QINT8\r\n# type: DT_QUINT8\r\n# type: DT_QINT32\r\n# type: DT_QINT16\r\n# type: DT_QUINT16\r\n# }\r\n# }\r\n# }\r\n# attr {\r\n# name: \"mode\"\r\n# type: \"string\"\r\n# default_value {\r\n# s: \"MIN_COMBINED\"\r\n# }\r\n# allowed_values {\r\n# list {\r\n# s: \"MIN_COMBINED\"\r\n# s: \"MIN_FIRST\"\r\n# s: \"SCALED\"\r\n# }\r\n# }\r\n# }\r\n# }\r\n# op {\r\n# name: \"Diag\"\r\n# input_arg {\r\n# name: \"diagonal\"\r\n# type_attr: \"T\"\r\n# }\r\n# output_arg {\r\n# name: \"output\"\r\n# type_attr: \"T\"\r\n# }\r\n# attr {\r\n# name: \"T\"\r\n# type: \"type\"\r\n# allowed_values {\r\n# list {\r\n# type: DT_BFLOAT16\r\n# type: DT_HALF\r\n# type: DT_FLOAT\r\n# type: DT_DOUBLE\r\n# type: DT_INT32\r\n# type: DT_INT64\r\n# type: DT_COMPLEX64\r\n# type: DT_COMPLEX128\r\n# }\r\n# }\r\n# }\r\n# }\r\n# op {\r\n# name: \"DiagPart\"\r\n# input_arg {\r\n# name: \"input\"\r\n# type_attr: \"T\"\r\n# }\r\n# output_arg {\r\n# name: \"diagonal\"\r\n# type_attr: \"T\"\r\n# }\r\n# attr {\r\n# name: \"T\"\r\n# type: \"type\"\r\n# allowed_values {\r\n# list {\r\n# type: DT_BFLOAT16\r\n# type: DT_HALF\r\n# type: DT_FLOAT\r\n# type: DT_DOUBLE\r\n# type: DT_INT32\r\n# type: DT_INT64\r\n# type: DT_COMPLEX64\r\n# type: DT_COMPLEX128\r\n# }\r\n# }\r\n# }\r\n# }\r\n# op {\r\n# name: \"EditDistance\"\r\n# input_arg {\r\n# name: \"hypothesis_indices\"\r\n# type: DT_INT64\r\n# }\r\n# input_arg {\r\n# name: \"hypothesis_values\"\r\n# type_attr: \"T\"\r\n# }\r\n# input_arg {\r\n# name: \"hypothesis_shape\"\r\n# type: DT_INT64\r\n# }\r\n# input_arg {\r\n# name: \"truth_indices\"\r\n# type: DT_INT64\r\n# }\r\n# input_arg {\r\n# name: \"truth_values\"\r\n# type_attr: \"T\"\r\n# }\r\n# input_arg {\r\n# name: \"truth_shape\"\r\n# type: DT_INT64\r\n# }\r\n# output_arg {\r\n# name: \"output\"\r\n# type: DT_FLOAT\r\n# }\r\n# attr {\r\n# name: \"normalize\"\r\n# type: \"bool\"\r\n# default_value {\r\n# b: true\r\n# }\r\n# }\r\n# attr {\r\n# name: \"T\"\r\n# type: \"type\"\r\n# }\r\n# }\r\n# op {\r\n# name: \"Empty\"\r\n# input_arg {\r\n# name: \"shape\"\r\n# type: DT_INT32\r\n# }\r\n# output_arg {\r\n# name: \"output\"\r\n# type_attr: \"dtype\"\r\n# }\r\n# attr {\r\n# name: \"dtype\"\r\n# type: \"type\"\r\n# }\r\n# attr {\r\n# name: \"init\"\r\n# type: \"bool\"\r\n# default_value {\r\n# b: false\r\n# }\r\n# }\r\n# is_stateful: true\r\n# }\r\n# op {\r\n# name: \"EnsureShape\"\r\n# input_arg {\r\n# name: \"input\"\r\n# type_attr: \"T\"\r\n# }\r\n# output_arg {\r\n# name: \"output\"\r\n# type_attr: \"T\"\r\n# }\r\n# attr {\r\n# name: \"shape\"\r\n# type: \"shape\"\r\n# }\r\n# attr {\r\n# name: \"T\"\r\n# type: \"type\"\r\n# }\r\n# }\r\n# op {\r\n# name: \"ExpandDims\"\r\n# input_arg {\r\n# name: \"input\"\r\n# type_attr: \"T\"\r\n# }\r\n# input_arg {\r\n# name: \"dim\"\r\n# type_attr: \"Tdim\"\r\n# }\r\n# output_arg {\r\n# name: \"output\"\r\n# type_attr: \"T\"\r\n# }\r\n# attr {\r\n# name: \"T\"\r\n# type: \"type\"\r\n# }\r\n# attr {\r\n# name: \"Tdim\"\r\n# type: \"type\"\r\n# default_value {\r\n# type: DT_INT32\r\n# }\r\n# allowed_values {\r\n# list {\r\n# type: DT_INT32\r\n# type: DT_INT64\r\n# }\r\n# }\r\n# }\r\n# }\r\n# op {\r\n# name: \"ExtractImagePatches\"\r\n# input_arg {\r\n# name: \"images\"\r\n# type_attr: \"T\"\r\n# }\r\n# output_arg {\r\n# name: \"patches\"\r\n# type_attr: \"T\"\r\n# }\r\n# attr {\r\n# name: \"ksizes\"\r\n# type: \"list(int)\"\r\n# has_minimum: true\r\n# minimum: 4\r\n# }\r\n# attr {\r\n# name: \"strides\"\r\n# type: \"list(int)\"\r\n# has_minimum: true\r\n# minimum: 4\r\n# }\r\n# attr {\r\n# name: \"rates\"\r\n# type: \"list(int)\"\r\n# has_minimum: true\r\n# minimum: 4\r\n# }\r\n# attr {\r\n# name: \"T\"\r\n# type: \"type\"\r\n# allowed_values {\r\n# list {\r\n# type: DT_FLOAT\r\n# type: DT_DOUBLE\r\n# type: DT_INT32\r\n# type: DT_UINT8\r\n# type: DT_INT16\r\n# type: DT_INT8\r\n# type: DT_INT64\r\n# type: DT_BFLOAT16\r\n# type: DT_UINT16\r\n# type: DT_HALF\r\n# type: DT_UINT32\r\n# type: DT_UINT64\r\n# }\r\n# }\r\n# }\r\n# attr {\r\n# name: \"padding\"\r\n# type: \"string\"\r\n# allowed_values {\r\n# list {\r\n# s: \"SAME\"\r\n# s: \"VALID\"\r\n# }\r\n# }\r\n# }\r\n# }\r\n# op {\r\n# name: \"ExtractVolumePatches\"\r\n# input_arg {\r\n# name: \"input\"\r\n# type_attr: \"T\"\r\n# }\r\n# output_arg {\r\n# name: \"patches\"\r\n# type_attr: \"T\"\r\n# }\r\n# attr {\r\n# name: \"ksizes\"\r\n# type: \"list(int)\"\r\n# has_minimum: true\r\n# minimum: 5\r\n# }\r\n# attr {\r\n# name: \"strides\"\r\n# type: \"list(int)\"\r\n# has_minimum: true\r\n# minimum: 5\r\n# }\r\n# attr {\r\n# name: \"T\"\r\n# type: \"type\"\r\n# allowed_values {\r\n# list {\r\n# type: DT_FLOAT\r\n# type: DT_DOUBLE\r\n# type: DT_INT32\r\n# type: DT_UINT8\r\n# type: DT_INT16\r\n# type: DT_INT8\r\n# type: DT_INT64\r\n# type: DT_BFLOAT16\r\n# type: DT_UINT16\r\n# type: DT_HALF\r\n# type: DT_UINT32\r\n# type: DT_UINT64\r\n# }\r\n# }\r\n# }\r\n# attr {\r\n# name: \"padding\"\r\n# type: \"string\"\r\n# allowed_values {\r\n# list {\r\n# s: \"SAME\"\r\n# s: \"VALID\"\r\n# }\r\n# }\r\n# }\r\n# }\r\n# op {\r\n# name: \"FakeQuantWithMinMaxArgs\"\r\n# input_arg {\r\n# name: \"inputs\"\r\n# type: DT_FLOAT\r\n# }\r\n# output_arg {\r\n# name: \"outputs\"\r\n# type: DT_FLOAT\r\n# }\r\n# attr {\r\n# name: \"min\"\r\n# type: \"float\"\r\n# default_value {\r\n# f: -6\r\n# }\r\n# }\r\n# attr {\r\n# name: \"max\"\r\n# type: \"float\"\r\n# default_value {\r\n# f: 6\r\n# }\r\n# }\r\n# attr {\r\n# name: \"num_bits\"\r\n# type: \"int\"\r\n# default_value {\r\n# i: 8\r\n# }\r\n# }\r\n# attr {\r\n# name: \"narrow_range\"\r\n# type: \"bool\"\r\n# default_value {\r\n# b: false\r\n# }\r\n# }\r\n# }\r\n# op {\r\n# name: \"FakeQuantWithMinMaxArgsGradient\"\r\n# input_arg {\r\n# name: \"gradients\"\r\n# type: DT_FLOAT\r\n# }\r\n# input_arg {\r\n# name: \"inputs\"\r\n# type: DT_FLOAT\r\n# }\r\n# output_arg {\r\n# name: \"backprops\"\r\n# type: DT_FLOAT\r\n# }\r\n# attr {\r\n# name: \"min\"\r\n# type: \"float\"\r\n# default_value {\r\n# f: -6\r\n# }\r\n# }\r\n# attr {\r\n# name: \"max\"\r\n# type: \"float\"\r\n# default_value {\r\n# f: 6\r\n# }\r\n# }\r\n# attr {\r\n# name: \"num_bits\"\r\n# type: \"int\"\r\n# default_value {\r\n# i: 8\r\n# }\r\n# }\r\n# attr {\r\n# name: \"narrow_range\"\r\n# type: \"bool\"\r\n# default_value {\r\n# b: false\r\n# }\r\n# }\r\n# }\r\n# op {\r\n# name: \"FakeQuantWithMinMaxVars\"\r\n# input_arg {\r\n# name: \"inputs\"\r\n# type: DT_FLOAT\r\n# }\r\n# input_arg {\r\n# name: \"min\"\r\n# type: DT_FLOAT\r\n# }\r\n# input_arg {\r\n# name: \"max\"\r\n# type: DT_FLOAT\r\n# }\r\n# output_arg {\r\n# name: \"outputs\"\r\n# type: DT_FLOAT\r\n# }\r\n# attr {\r\n# name: \"num_bits\"\r\n# type: \"int\"\r\n# default_value {\r\n# i: 8\r\n# }\r\n# }\r\n# attr {\r\n# name: \"narrow_range\"\r\n# type: \"bool\"\r\n# default_value {\r\n# b: false\r\n# }\r\n# }\r\n# }\r\n# op {\r\n# name: \"FakeQuantWithMinMaxVarsGradient\"\r\n# input_arg {\r\n# name: \"gradients\"\r\n# type: DT_FLOAT\r\n# }\r\n# input_arg {\r\n# name: \"inputs\"\r\n# type: DT_FLOAT\r\n# }\r\n# input_arg {\r\n# name: \"min\"\r\n# type: DT_FLOAT\r\n# }\r\n# input_arg {\r\n# name: \"max\"\r\n# type: DT_FLOAT\r\n# }\r\n# output_arg {\r\n# name: \"backprops_wrt_input\"\r\n# type: DT_FLOAT\r\n# }\r\n# output_arg {\r\n# name: \"backprop_wrt_min\"\r\n# type: DT_FLOAT\r\n# }\r\n# output_arg {\r\n# name: \"backprop_wrt_max\"\r\n# type: DT_FLOAT\r\n# }\r\n# attr {\r\n# name: \"num_bits\"\r\n# type: \"int\"\r\n# default_value {\r\n# i: 8\r\n# }\r\n# }\r\n# attr {\r\n# name: \"narrow_range\"\r\n# type: \"bool\"\r\n# default_value {\r\n# b: false\r\n# }\r\n# }\r\n# }\r\n# op {\r\n# name: \"FakeQuantWithMinMaxVarsPerChannel\"\r\n# input_arg {\r\n# name: \"inputs\"\r\n# type: DT_FLOAT\r\n# }\r\n# input_arg {\r\n# name: \"min\"\r\n# type: DT_FLOAT\r\n# }\r\n# input_arg {\r\n# name: \"max\"\r\n# type: DT_FLOAT\r\n# }\r\n# output_arg {\r\n# name: \"outputs\"\r\n# type: DT_FLOAT\r\n# }\r\n# attr {\r\n# name: \"num_bits\"\r\n# type: \"int\"\r\n# default_value {\r\n# i: 8\r\n# }\r\n# }\r\n# attr {\r\n# name: \"narrow_range\"\r\n# type: \"bool\"\r\n# default_value {\r\n# b: false\r\n# }\r\n# }\r\n# }\r\n# op {\r\n# name: \"FakeQuantWithMinMaxVarsPerChannelGradient\"\r\n# input_arg {\r\n# name: \"gradients\"\r\n# type: DT_FLOAT\r\n# }\r\n# input_arg {\r\n# name: \"inputs\"\r\n# type: DT_FLOAT\r\n# }\r\n# input_arg {\r\n# name: \"min\"\r\n# type: DT_FLOAT\r\n# }\r\n# input_arg {\r\n# name: \"max\"\r\n# type: DT_FLOAT\r\n# }\r\n# output_arg {\r\n# name: \"backprops_wrt_input\"\r\n# type: DT_FLOAT\r\n# }\r\n# output_arg {\r\n# name: \"backprop_wrt_min\"\r\n# type: DT_FLOAT\r\n# }\r\n# output_arg {\r\n# name: \"backprop_wrt_max\"\r\n# type: DT_FLOAT\r\n# }\r\n# attr {\r\n# name: \"num_bits\"\r\n# type: \"int\"\r\n# default_value {\r\n# i: 8\r\n# }\r\n# }\r\n# attr {\r\n# name: \"narrow_range\"\r\n# type: \"bool\"\r\n# default_value {\r\n# b: false\r\n# }\r\n# }\r\n# }\r\n# op {\r\n# name: \"Fill\"\r\n# input_arg {\r\n# name: \"dims\"\r\n# type_attr: \"index_type\"\r\n# }\r\n# input_arg {\r\n# name: \"value\"\r\n# type_attr: \"T\"\r\n# }\r\n# output_arg {\r\n# name: \"output\"\r\n# type_attr: \"T\"\r\n# }\r\n# attr {\r\n# name: \"T\"\r\n# type: \"type\"\r\n# }\r\n# attr {\r\n# name: \"index_type\"\r\n# type: \"type\"\r\n# default_value {\r\n# type: DT_INT32\r\n# }\r\n# allowed_values {\r\n# list {\r\n# type: DT_INT32\r\n# type: DT_INT64\r\n# }\r\n# }\r\n# }\r\n# }\r\n# op {\r\n# name: \"Gather\"\r\n# input_arg {\r\n# name: \"params\"\r\n# type_attr: \"Tparams\"\r\n# }\r\n# input_arg {\r\n# name: \"indices\"\r\n# type_attr: \"Tindices\"\r\n# }\r\n# output_arg {\r\n# name: \"output\"\r\n# type_attr: \"Tparams\"\r\n# }\r\n# attr {\r\n# name: \"validate_indices\"\r\n# type: \"bool\"\r\n# default_value {\r\n# b: true\r\n# }\r\n# }\r\n# attr {\r\n# name: \"Tparams\"\r\n# type: \"type\"\r\n# }\r\n# attr {\r\n# name: \"Tindices\"\r\n# type: \"type\"\r\n# allowed_values {\r\n# list {\r\n# type: DT_INT32\r\n# type: DT_INT64\r\n# }\r\n# }\r\n# }\r\n# }\r\n# op {\r\n# name: \"GatherNd\"\r\n# input_arg {\r\n# name: \"params\"\r\n# type_attr: \"Tparams\"\r\n# }\r\n# input_arg {\r\n# name: \"indices\"\r\n# type_attr: \"Tindices\"\r\n# }\r\n# output_arg {\r\n# name: \"output\"\r\n# type_attr: \"Tparams\"\r\n# }\r\n# attr {\r\n# name: \"Tparams\"\r\n# type: \"type\"\r\n# }\r\n# attr {\r\n# name: \"Tindices\"\r\n# type: \"type\"\r\n# allowed_values {\r\n# list {\r\n# type: DT_INT32\r\n# type: DT_INT64\r\n# }\r\n# }\r\n# }\r\n# }\r\n# op {\r\n# name: \"GatherV2\"\r\n# input_arg {\r\n# name: \"params\"\r\n# type_attr: \"Tparams\"\r\n# }\r\n# input_arg {\r\n# name: \"indices\"\r\n# type_attr: \"Tindices\"\r\n# }\r\n# input_arg {\r\n# name: \"axis\"\r\n# type_attr: \"Taxis\"\r\n# }\r\n# output_arg {\r\n# name: \"output\"\r\n# type_attr: \"Tparams\"\r\n# }\r\n# attr {\r\n# name: \"Tparams\"\r\n# type: \"type\"\r\n# }\r\n# attr {\r\n# name: \"Tindices\"\r\n# type: \"type\"\r\n# allowed_values {\r\n# list {\r\n# type: DT_INT32\r\n# type: DT_INT64\r\n# }\r\n# }\r\n# }\r\n# attr {\r\n# name: \"Taxis\"\r\n# type: \"type\"\r\n# allowed_values {\r\n# list {\r\n# type: DT_INT32\r\n# type: DT_INT64\r\n# }\r\n# }\r\n# }\r\n# }\r\n# op {\r\n# name: \"GuaranteeConst\"\r\n# input_arg {\r\n# name: \"input\"\r\n# type_attr: \"T\"\r\n# }\r\n# output_arg {\r\n# name: \"output\"\r\n# type_attr: \"T\"\r\n# }\r\n# attr {\r\n# name: \"T\"\r\n# type: \"type\"\r\n# }\r\n# is_stateful: true\r\n# }\r\n# op {\r\n# name: \"Identity\"\r\n# input_arg {\r\n# name: \"input\"\r\n# type_attr: \"T\"\r\n# }\r\n# output_arg {\r\n# name: \"output\"\r\n# type_attr: \"T\"\r\n# }\r\n# attr {\r\n# name: \"T\"\r\n# type: \"type\"\r\n# }\r\n# }\r\n# op {\r\n# name: \"IdentityN\"\r\n# input_arg {\r\n# name: \"input\"\r\n# type_list_attr: \"T\"\r\n# }\r\n# output_arg {\r\n# name: \"output\"\r\n# type_list_attr: \"T\"\r\n# }\r\n# attr {\r\n# name: \"T\"\r\n# type: \"list(type)\"\r\n# has_minimum: true\r\n# minimum: 1\r\n# }\r\n# }\r\n# op {\r\n# name: \"ImmutableConst\"\r\n# output_arg {\r\n# name: \"tensor\"\r\n# type_attr: \"dtype\"\r\n# }\r\n# attr {\r\n# name: \"dtype\"\r\n# type: \"type\"\r\n# }\r\n# attr {\r\n# name: \"shape\"\r\n# type: \"shape\"\r\n# }\r\n# attr {\r\n# name: \"memory_region_name\"\r\n# type: \"string\"\r\n# }\r\n# }\r\n# op {\r\n# name: \"InplaceAdd\"\r\n# input_arg {\r\n# name: \"x\"\r\n# type_attr: \"T\"\r\n# }\r\n# input_arg {\r\n# name: \"i\"\r\n# type: DT_INT32\r\n# }\r\n# input_arg {\r\n# name: \"v\"\r\n# type_attr: \"T\"\r\n# }\r\n# output_arg {\r\n# name: \"y\"\r\n# type_attr: \"T\"\r\n# }\r\n# attr {\r\n# name: \"T\"\r\n# type: \"type\"\r\n# }\r\n# }\r\n# op {\r\n# name: \"InplaceSub\"\r\n# input_arg {\r\n# name: \"x\"\r\n# type_attr: \"T\"\r\n# }\r\n# input_arg {\r\n# name: \"i\"\r\n# type: DT_INT32\r\n# }\r\n# input_arg {\r\n# name: \"v\"\r\n# type_attr: \"T\"\r\n# }\r\n# output_arg {\r\n# name: \"y\"\r\n# type_attr: \"T\"\r\n# }\r\n# attr {\r\n# name: \"T\"\r\n# type: \"type\"\r\n# }\r\n# }\r\n# op {\r\n# name: \"InplaceUpdate\"\r\n# input_arg {\r\n# name: \"x\"\r\n# type_attr: \"T\"\r\n# }\r\n# input_arg {\r\n# name: \"i\"\r\n# type: DT_INT32\r\n# }\r\n# input_arg {\r\n# name: \"v\"\r\n# type_attr: \"T\"\r\n# }\r\n# output_arg {\r\n# name: \"y\"\r\n# type_attr: \"T\"\r\n# }\r\n# attr {\r\n# name: \"T\"\r\n# type: \"type\"\r\n# }\r\n# }\r\n# op {\r\n# name: \"InvertPermutation\"\r\n# input_arg {\r\n# name: \"x\"\r\n# type_attr: \"T\"\r\n# }\r\n# output_arg {\r\n# name: \"y\"\r\n# type_attr: \"T\"\r\n# }\r\n# attr {\r\n# name: \"T\"\r\n# type: \"type\"\r\n# default_value {\r\n# type: DT_INT32\r\n# }\r\n# allowed_values {\r\n# list {\r\n# type: DT_INT32\r\n# type: DT_INT64\r\n# }\r\n# }\r\n# }\r\n# }\r\n# op {\r\n# name: \"ListDiff\"\r\n# input_arg {\r\n# name: \"x\"\r\n# type_attr: \"T\"\r\n# }\r\n# input_arg {\r\n# name: \"y\"\r\n# type_attr: \"T\"\r\n# }\r\n# output_arg {\r\n# name: \"out\"\r\n# type_attr: \"T\"\r\n# }\r\n# output_arg {\r\n# name: \"idx\"\r\n# type_attr: \"out_idx\"\r\n# }\r\n# attr {\r\n# name: \"T\"\r\n# type: \"type\"\r\n# }\r\n# attr {\r\n# name: \"out_idx\"\r\n# type: \"type\"\r\n# default_value {\r\n# type: DT_INT32\r\n# }\r\n# allowed_values {\r\n# list {\r\n# type: DT_INT32\r\n# type: DT_INT64\r\n# }\r\n# }\r\n# }\r\n# }\r\n# op {\r\n# name: \"LowerBound\"\r\n# input_arg {\r\n# name: \"sorted_inputs\"\r\n# type_attr: \"T\"\r\n# }\r\n# input_arg {\r\n# name: \"values\"\r\n# type_attr: \"T\"\r\n# }\r\n# output_arg {\r\n# name: \"output\"\r\n# type_attr: \"out_type\"\r\n# }\r\n# attr {\r\n# name: \"T\"\r\n# type: \"type\"\r\n# }\r\n# attr {\r\n# name: \"out_type\"\r\n# type: \"type\"\r\n# default_value {\r\n# type: DT_INT32\r\n# }\r\n# allowed_values {\r\n# list {\r\n# type: DT_INT32\r\n# type: DT_INT64\r\n# }\r\n# }\r\n# }\r\n# }\r\n# op {\r\n# name: \"MatrixBandPart\"\r\n# input_arg {\r\n# name: \"input\"\r\n# type_attr: \"T\"\r\n# }\r\n# input_arg {\r\n# name: \"num_lower\"\r\n# type_attr: \"Tindex\"\r\n# }\r\n# input_arg {\r\n# name: \"num_upper\"\r\n# type_attr: \"Tindex\"\r\n# }\r\n# output_arg {\r\n# name: \"band\"\r\n# type_attr: \"T\"\r\n# }\r\n# attr {\r\n# name: \"T\"\r\n# type: \"type\"\r\n# }\r\n# attr {\r\n# name: \"Tindex\"\r\n# type: \"type\"\r\n# default_value {\r\n# type: DT_INT64\r\n# }\r\n# allowed_values {\r\n# list {\r\n# type: DT_INT32\r\n# type: DT_INT64\r\n# }\r\n# }\r\n# }\r\n# }\r\n# op {\r\n# name: \"MatrixDiag\"\r\n# input_arg {\r\n# name: \"diagonal\"\r\n# type_attr: \"T\"\r\n# }\r\n# output_arg {\r\n# name: \"output\"\r\n# type_attr: \"T\"\r\n# }\r\n# attr {\r\n# name: \"T\"\r\n# type: \"type\"\r\n# }\r\n# }\r\n# op {\r\n# name: \"MatrixDiagPart\"\r\n# input_arg {\r\n# name: \"input\"\r\n# type_attr: \"T\"\r\n# }\r\n# output_arg {\r\n# name: \"diagonal\"\r\n# type_attr: \"T\"\r\n# }\r\n# attr {\r\n# name: \"T\"\r\n# type: \"type\"\r\n# }\r\n# }\r\n# op {\r\n# name: \"MatrixSetDiag\"\r\n# input_arg {\r\n# name: \"input\"\r\n# type_attr: \"T\"\r\n# }\r\n# input_arg {\r\n# name: \"diagonal\"\r\n# type_attr: \"T\"\r\n# }\r\n# output_arg {\r\n# name: \"output\"\r\n# type_attr: \"T\"\r\n# }\r\n# attr {\r\n# name: \"T\"\r\n# type: \"type\"\r\n# }\r\n# }\r\n# op {\r\n# name: \"MirrorPad\"\r\n# input_arg {\r\n# name: \"input\"\r\n# type_attr: \"T\"\r\n# }\r\n# input_arg {\r\n# name: \"paddings\"\r\n# type_attr: \"Tpaddings\"\r\n# }\r\n# output_arg {\r\n# name: \"output\"\r\n# type_attr: \"T\"\r\n# }\r\n# attr {\r\n# name: \"T\"\r\n# type: \"type\"\r\n# }\r\n# attr {\r\n# name: \"Tpaddings\"\r\n# type: \"type\"\r\n# default_value {\r\n# type: DT_INT32\r\n# }\r\n# allowed_values {\r\n# list {\r\n# type: DT_INT32\r\n# type: DT_INT64\r\n# }\r\n# }\r\n# }\r\n# attr {\r\n# name: \"mode\"\r\n# type: \"string\"\r\n# allowed_values {\r\n# list {\r\n# s: \"REFLECT\"\r\n# s: \"SYMMETRIC\"\r\n# }\r\n# }\r\n# }\r\n# }\r\n# op {\r\n# name: \"MirrorPadGrad\"\r\n# input_arg {\r\n# name: \"input\"\r\n# type_attr: \"T\"\r\n# }\r\n# input_arg {\r\n# name: \"paddings\"\r\n# type_attr: \"Tpaddings\"\r\n# }\r\n# output_arg {\r\n# name: \"output\"\r\n# type_attr: \"T\"\r\n# }\r\n# attr {\r\n# name: \"T\"\r\n# type: \"type\"\r\n# }\r\n# attr {\r\n# name: \"Tpaddings\"\r\n# type: \"type\"\r\n# default_value {\r\n# type: DT_INT32\r\n# }\r\n# allowed_values {\r\n# list {\r\n# type: DT_INT32\r\n# type: DT_INT64\r\n# }\r\n# }\r\n# }\r\n# attr {\r\n# name: \"mode\"\r\n# type: \"string\"\r\n# allowed_values {\r\n# list {\r\n# s: \"REFLECT\"\r\n# s: \"SYMMETRIC\"\r\n# }\r\n# }\r\n# }\r\n# }\r\n# op {\r\n# name: \"OneHot\"\r\n# input_arg {\r\n# name: \"indices\"\r\n# type_attr: \"TI\"\r\n# }\r\n# input_arg {\r\n# name: \"depth\"\r\n# type: DT_INT32\r\n# }\r\n# input_arg {\r\n# name: \"on_value\"\r\n# type_attr: \"T\"\r\n# }\r\n# input_arg {\r\n# name: \"off_value\"\r\n# type_attr: \"T\"\r\n# }\r\n# output_arg {\r\n# name: \"output\"\r\n# type_attr: \"T\"\r\n# }\r\n# attr {\r\n# name: \"axis\"\r\n# type: \"int\"\r\n# default_value {\r\n# i: -1\r\n# }\r\n# }\r\n# attr {\r\n# name: \"T\"\r\n# type: \"type\"\r\n# }\r\n# attr {\r\n# name: \"TI\"\r\n# type: \"type\"\r\n# default_value {\r\n# type: DT_INT64\r\n# }\r\n# allowed_values {\r\n# list {\r\n# type: DT_UINT8\r\n# type: DT_INT32\r\n# type: DT_INT64\r\n# }\r\n# }\r\n# }\r\n# }\r\n# op {\r\n# name: \"OnesLike\"\r\n# input_arg {\r\n# name: \"x\"\r\n# type_attr: \"T\"\r\n# }\r\n# output_arg {\r\n# name: \"y\"\r\n# type_attr: \"T\"\r\n# }\r\n# attr {\r\n# name: \"T\"\r\n# type: \"type\"\r\n# allowed_values {\r\n# list {\r\n# type: DT_BFLOAT16\r\n# type: DT_HALF\r\n# type: DT_FLOAT\r\n# type: DT_DOUBLE\r\n# type: DT_INT8\r\n# type: DT_UINT8\r\n# type: DT_INT16\r\n# type: DT_UINT16\r\n# type: DT_INT32\r\n# type: DT_INT64\r\n# type: DT_COMPLEX64\r\n# type: DT_COMPLEX128\r\n# type: DT_BOOL\r\n# }\r\n# }\r\n# }\r\n# }\r\n# op {\r\n# name: \"Pack\"\r\n# input_arg {\r\n# name: \"values\"\r\n# type_attr: \"T\"\r\n# number_attr: \"N\"\r\n# }\r\n# output_arg {\r\n# name: \"output\"\r\n# type_attr: \"T\"\r\n# }\r\n# attr {\r\n# name: \"N\"\r\n# type: \"int\"\r\n# has_minimum: true\r\n# minimum: 1\r\n# }\r\n# attr {\r\n# name: \"T\"\r\n# type: \"type\"\r\n# }\r\n# attr {\r\n# name: \"axis\"\r\n# type: \"int\"\r\n# default_value {\r\n# i: 0\r\n# }\r\n# }\r\n# }\r\n# op {\r\n# name: \"Pad\"\r\n# input_arg {\r\n# name: \"input\"\r\n# type_attr: \"T\"\r\n# }\r\n# input_arg {\r\n# name: \"paddings\"\r\n# type_attr: \"Tpaddings\"\r\n# }\r\n# output_arg {\r\n# name: \"output\"\r\n# type_attr: \"T\"\r\n# }\r\n# attr {\r\n# name: \"T\"\r\n# type: \"type\"\r\n# }\r\n# attr {\r\n# name: \"Tpaddings\"\r\n# type: \"type\"\r\n# default_value {\r\n# type: DT_INT32\r\n# }\r\n# allowed_values {\r\n# list {\r\n# type: DT_INT32\r\n# type: DT_INT64\r\n# }\r\n# }\r\n# }\r\n# }\r\n# op {\r\n# name: \"PadV2\"\r\n# input_arg {\r\n# name: \"input\"\r\n# type_attr: \"T\"\r\n# }\r\n# input_arg {\r\n# name: \"paddings\"\r\n# type_attr: \"Tpaddings\"\r\n# }\r\n# input_arg {\r\n# name: \"constant_values\"\r\n# type_attr: \"T\"\r\n# }\r\n# output_arg {\r\n# name: \"output\"\r\n# type_attr: \"T\"\r\n# }\r\n# attr {\r\n# name: \"T\"\r\n# type: \"type\"\r\n# }\r\n# attr {\r\n# name: \"Tpaddings\"\r\n# type: \"type\"\r\n# default_value {\r\n# type: DT_INT32\r\n# }\r\n# allowed_values {\r\n# list {\r\n# type: DT_INT32\r\n# type: DT_INT64\r\n# }\r\n# }\r\n# }\r\n# }\r\n# op {\r\n# name: \"ParallelConcat\"\r\n# input_arg {\r\n# name: \"values\"\r\n# type_attr: \"T\"\r\n# number_attr: \"N\"\r\n# }\r\n# output_arg {\r\n# name: \"output\"\r\n# type_attr: \"T\"\r\n# }\r\n# attr {\r\n# name: \"N\"\r\n# type: \"int\"\r\n# has_minimum: true\r\n# minimum: 1\r\n# }\r\n# attr {\r\n# name: \"T\"\r\n# type: \"type\"\r\n# }\r\n# attr {\r\n# name: \"shape\"\r\n# type: \"shape\"\r\n# }\r\n# }\r\n# op {\r\n# name: \"Placeholder\"\r\n# output_arg {\r\n# name: \"output\"\r\n# type_attr: \"dtype\"\r\n# }\r\n# attr {\r\n# name: \"dtype\"\r\n# type: \"type\"\r\n# }\r\n# attr {\r\n# name: \"shape\"\r\n# type: \"shape\"\r\n# default_value {\r\n# shape {\r\n# unknown_rank: true\r\n# }\r\n# }\r\n# }\r\n# }\r\n# op {\r\n# name: \"PlaceholderV2\"\r\n# output_arg {\r\n# name: \"output\"\r\n# type_attr: \"dtype\"\r\n# }\r\n# attr {\r\n# name: \"dtype\"\r\n# type: \"type\"\r\n# }\r\n# attr {\r\n# name: \"shape\"\r\n# type: \"shape\"\r\n# }\r\n# deprecation {\r\n# version: 23\r\n# explanation: \"Placeholder now behaves the same as PlaceholderV2.\"\r\n# }\r\n# }\r\n# op {\r\n# name: \"PlaceholderWithDefault\"\r\n# input_arg {\r\n# name: \"input\"\r\n# type_attr: \"dtype\"\r\n# }\r\n# output_arg {\r\n# name: \"output\"\r\n# type_attr: \"dtype\"\r\n# }\r\n# attr {\r\n# name: \"dtype\"\r\n# type: \"type\"\r\n# }\r\n# attr {\r\n# name: \"shape\"\r\n# type: \"shape\"\r\n# }\r\n# }\r\n# op {\r\n# name: \"PreventGradient\"\r\n# input_arg {\r\n# name: \"input\"\r\n# type_attr: \"T\"\r\n# }\r\n# output_arg {\r\n# name: \"output\"\r\n# type_attr: \"T\"\r\n# }\r\n# attr {\r\n# name: \"T\"\r\n# type: \"type\"\r\n# }\r\n# attr {\r\n# name: \"message\"\r\n# type: \"string\"\r\n# default_value {\r\n# s: \"\"\r\n# }\r\n# }\r\n# }\r\n# op {\r\n# name: \"QuantizeAndDequantize\"\r\n# input_arg {\r\n# name: \"input\"\r\n# type_attr: \"T\"\r\n# }\r\n# output_arg {\r\n# name: \"output\"\r\n# type_attr: \"T\"\r\n# }\r\n# attr {\r\n# name: \"signed_input\"\r\n# type: \"bool\"\r\n# default_value {\r\n# b: true\r\n# }\r\n# }\r\n# attr {\r\n# name: \"num_bits\"\r\n# type: \"int\"\r\n# default_value {\r\n# i: 8\r\n# }\r\n# }\r\n# attr {\r\n# name: \"range_given\"\r\n# type: \"bool\"\r\n# default_value {\r\n# b: false\r\n# }\r\n# }\r\n# attr {\r\n# name: \"input_min\"\r\n# type: \"float\"\r\n# default_value {\r\n# f: 0\r\n# }\r\n# }\r\n# attr {\r\n# name: \"input_max\"\r\n# type: \"float\"\r\n# default_value {\r\n# f: 0\r\n# }\r\n# }\r\n# attr {\r\n# name: \"T\"\r\n# type: \"type\"\r\n# allowed_values {\r\n# list {\r\n# type: DT_BFLOAT16\r\n# type: DT_HALF\r\n# type: DT_FLOAT\r\n# type: DT_DOUBLE\r\n# }\r\n# }\r\n# }\r\n# deprecation {\r\n# version: 22\r\n# explanation: \"Replaced by QuantizeAndDequantizeV2\"\r\n# }\r\n# }\r\n# op {\r\n# name: \"QuantizeAndDequantizeV2\"\r\n# input_arg {\r\n# name: \"input\"\r\n# type_attr: \"T\"\r\n# }\r\n# input_arg {\r\n# name: \"input_min\"\r\n# type_attr: \"T\"\r\n# }\r\n# input_arg {\r\n# name: \"input_max\"\r\n# type_attr: \"T\"\r\n# }\r\n# output_arg {\r\n# name: \"output\"\r\n# type_attr: \"T\"\r\n# }\r\n# attr {\r\n# name: \"signed_input\"\r\n# type: \"bool\"\r\n# default_value {\r\n# b: true\r\n# }\r\n# }\r\n# attr {\r\n# name: \"num_bits\"\r\n# type: \"int\"\r\n# default_value {\r\n# i: 8\r\n# }\r\n# }\r\n# attr {\r\n# name: \"range_given\"\r\n# type: \"bool\"\r\n# default_value {\r\n# b: false\r\n# }\r\n# }\r\n# attr {\r\n# name: \"T\"\r\n# type: \"type\"\r\n# allowed_values {\r\n# list {\r\n# type: DT_BFLOAT16\r\n# type: DT_HALF\r\n# type: DT_FLOAT\r\n# type: DT_DOUBLE\r\n# }\r\n# }\r\n# }\r\n# }\r\n# op {\r\n# name: \"QuantizeAndDequantizeV3\"\r\n# input_arg {\r\n# name: \"input\"\r\n# type_attr: \"T\"\r\n# }\r\n# input_arg {\r\n# name: \"input_min\"\r\n# type_attr: \"T\"\r\n# }\r\n# input_arg {\r\n# name: \"input_max\"\r\n# type_attr: \"T\"\r\n# }\r\n# input_arg {\r\n# name: \"num_bits\"\r\n# type: DT_INT32\r\n# }\r\n# output_arg {\r\n# name: \"output\"\r\n# type_attr: \"T\"\r\n# }\r\n# attr {\r\n# name: \"signed_input\"\r\n# type: \"bool\"\r\n# default_value {\r\n# b: true\r\n# }\r\n# }\r\n# attr {\r\n# name: \"range_given\"\r\n# type: \"bool\"\r\n# default_value {\r\n# b: true\r\n# }\r\n# }\r\n# attr {\r\n# name: \"T\"\r\n# type: \"type\"\r\n# allowed_values {\r\n# list {\r\n# type: DT_BFLOAT16\r\n# type: DT_HALF\r\n# type: DT_FLOAT\r\n# type: DT_DOUBLE\r\n# }\r\n# }\r\n# }\r\n# }\r\n# op {\r\n# name: \"QuantizeV2\"\r\n# input_arg {\r\n# name: \"input\"\r\n# type: DT_FLOAT\r\n# }\r\n# input_arg {\r\n# name: \"min_range\"\r\n# type: DT_FLOAT\r\n# }\r\n# input_arg {\r\n# name: \"max_range\"\r\n# type: DT_FLOAT\r\n# }\r\n# output_arg {\r\n# name: \"output\"\r\n# type_attr: \"T\"\r\n# }\r\n# output_arg {\r\n# name: \"output_min\"\r\n# type: DT_FLOAT\r\n# }\r\n# output_arg {\r\n# name: \"output_max\"\r\n# type: DT_FLOAT\r\n# }\r\n# attr {\r\n# name: \"T\"\r\n# type: \"type\"\r\n# allowed_values {\r\n# list {\r\n# type: DT_QINT8\r\n# type: DT_QUINT8\r\n# type: DT_QINT32\r\n# type: DT_QINT16\r\n# type: DT_QUINT16\r\n# }\r\n# }\r\n# }\r\n# attr {\r\n# name: \"mode\"\r\n# type: \"string\"\r\n# default_value {\r\n# s: \"MIN_COMBINED\"\r\n# }\r\n# allowed_values {\r\n# list {\r\n# s: \"MIN_COMBINED\"\r\n# s: \"MIN_FIRST\"\r\n# s: \"SCALED\"\r\n# }\r\n# }\r\n# }\r\n# attr {\r\n# name: \"round_mode\"\r\n# type: \"string\"\r\n# default_value {\r\n# s: \"HALF_AWAY_FROM_ZERO\"\r\n# }\r\n# allowed_values {\r\n# list {\r\n# s: \"HALF_AWAY_FROM_ZERO\"\r\n# s: \"HALF_TO_EVEN\"\r\n# }\r\n# }\r\n# }\r\n# }\r\n# op {\r\n# name: \"QuantizedConcat\"\r\n# input_arg {\r\n# name: \"concat_dim\"\r\n# type: DT_INT32\r\n# }\r\n# input_arg {\r\n# name: \"values\"\r\n# type_attr: \"T\"\r\n# number_attr: \"N\"\r\n# }\r\n# input_arg {\r\n# name: \"input_mins\"\r\n# type: DT_FLOAT\r\n# number_attr: \"N\"\r\n# }\r\n# input_arg {\r\n# name: \"input_maxes\"\r\n# type: DT_FLOAT\r\n# number_attr: \"N\"\r\n# }\r\n# output_arg {\r\n# name: \"output\"\r\n# type_attr: \"T\"\r\n# }\r\n# output_arg {\r\n# name: \"output_min\"\r\n# type: DT_FLOAT\r\n# }\r\n# output_arg {\r\n# name: \"output_max\"\r\n# type: DT_FLOAT\r\n# }\r\n# attr {\r\n# name: \"N\"\r\n# type: \"int\"\r\n# has_minimum: true\r\n# minimum: 2\r\n# }\r\n# attr {\r\n# name: \"T\"\r\n# type: \"type\"\r\n# }\r\n# }\r\n# op {\r\n# name: \"QuantizedInstanceNorm\"\r\n# input_arg {\r\n# name: \"x\"\r\n# type_attr: \"T\"\r\n# }\r\n# input_arg {\r\n# name: \"x_min\"\r\n# type: DT_FLOAT\r\n# }\r\n# input_arg {\r\n# name: \"x_max\"\r\n# type: DT_FLOAT\r\n# }\r\n# output_arg {\r\n# name: \"y\"\r\n# type_attr: \"T\"\r\n# }\r\n# output_arg {\r\n# name: \"y_min\"\r\n# type: DT_FLOAT\r\n# }\r\n# output_arg {\r\n# name: \"y_max\"\r\n# type: DT_FLOAT\r\n# }\r\n# attr {\r\n# name: \"T\"\r\n# type: \"type\"\r\n# allowed_values {\r\n# list {\r\n# type: DT_QINT8\r\n# type: DT_QUINT8\r\n# type: DT_QINT32\r\n# type: DT_QINT16\r\n# type: DT_QUINT16\r\n# }\r\n# }\r\n# }\r\n# attr {\r\n# name: \"output_range_given\"\r\n# type: \"bool\"\r\n# default_value {\r\n# b: false\r\n# }\r\n# }\r\n# attr {\r\n# name: \"given_y_min\"\r\n# type: \"float\"\r\n# default_value {\r\n# f: 0\r\n# }\r\n# }\r\n# attr {\r\n# name: \"given_y_max\"\r\n# type: \"float\"\r\n# default_value {\r\n# f: 0\r\n# }\r\n# }\r\n# attr {\r\n# name: \"variance_epsilon\"\r\n# type: \"float\"\r\n# default_value {\r\n# f: 1e-05\r\n# }\r\n# }\r\n# attr {\r\n# name: \"min_separation\"\r\n# type: \"float\"\r\n# default_value {\r\n# f: 0.001\r\n# }\r\n# }\r\n# }\r\n# op {\r\n# name: \"QuantizedReshape\"\r\n# input_arg {\r\n# name: \"tensor\"\r\n# type_attr: \"T\"\r\n# }\r\n# input_arg {\r\n# name: \"shape\"\r\n# type_attr: \"Tshape\"\r\n# }\r\n# input_arg {\r\n# name: \"input_min\"\r\n# type: DT_FLOAT\r\n# }\r\n# input_arg {\r\n# name: \"input_max\"\r\n# type: DT_FLOAT\r\n# }\r\n# output_arg {\r\n# name: \"output\"\r\n# type_attr: \"T\"\r\n# }\r\n# output_arg {\r\n# name: \"output_min\"\r\n# type: DT_FLOAT\r\n# }\r\n# output_arg {\r\n# name: \"output_max\"\r\n# type: DT_FLOAT\r\n# }\r\n# attr {\r\n# name: \"T\"\r\n# type: \"type\"\r\n# }\r\n# attr {\r\n# name: \"Tshape\"\r\n# type: \"type\"\r\n# default_value {\r\n# type: DT_INT32\r\n# }\r\n# allowed_values {\r\n# list {\r\n# type: DT_INT32\r\n# type: DT_INT64\r\n# }\r\n# }\r\n# }\r\n# }\r\n# op {\r\n# name: \"Rank\"\r\n# input_arg {\r\n# name: \"input\"\r\n# type_attr: \"T\"\r\n# }\r\n# output_arg {\r\n# name: \"output\"\r\n# type: DT_INT32\r\n# }\r\n# attr {\r\n# name: \"T\"\r\n# type: \"type\"\r\n# }\r\n# }\r\n# op {\r\n# name: \"RefIdentity\"\r\n# input_arg {\r\n# name: \"input\"\r\n# type_attr: \"T\"\r\n# is_ref: true\r\n# }\r\n# output_arg {\r\n# name: \"output\"\r\n# type_attr: \"T\"\r\n# is_ref: true\r\n# }\r\n# attr {\r\n# name: \"T\"\r\n# type: \"type\"\r\n# }\r\n# allows_uninitialized_input: true\r\n# }\r\n# op {\r\n# name: \"Reshape\"\r\n# input_arg {\r\n# name: \"tensor\"\r\n# type_attr: \"T\"\r\n# }\r\n# input_arg {\r\n# name: \"shape\"\r\n# type_attr: \"Tshape\"\r\n# }\r\n# output_arg {\r\n# name: \"output\"\r\n# type_attr: \"T\"\r\n# }\r\n# attr {\r\n# name: \"T\"\r\n# type: \"type\"\r\n# }\r\n# attr {\r\n# name: \"Tshape\"\r\n# type: \"type\"\r\n# default_value {\r\n# type: DT_INT32\r\n# }\r\n# allowed_values {\r\n# list {\r\n# type: DT_INT32\r\n# type: DT_INT64\r\n# }\r\n# }\r\n# }\r\n# }\r\n# op {\r\n# name: \"ResourceStridedSliceAssign\"\r\n# input_arg {\r\n# name: \"ref\"\r\n# type: DT_RESOURCE\r\n# }\r\n# input_arg {\r\n# name: \"begin\"\r\n# type_attr: \"Index\"\r\n# }\r\n# input_arg {\r\n# name: \"end\"\r\n# type_attr: \"Index\"\r\n# }\r\n# input_arg {\r\n# name: \"strides\"\r\n# type_attr: \"Index\"\r\n# }\r\n# input_arg {\r\n# name: \"value\"\r\n# type_attr: \"T\"\r\n# }\r\n# attr {\r\n# name: \"T\"\r\n# type: \"type\"\r\n# }\r\n# attr {\r\n# name: \"Index\"\r\n# type: \"type\"\r\n# allowed_values {\r\n# list {\r\n# type: DT_INT32\r\n# type: DT_INT64\r\n# }\r\n# }\r\n# }\r\n# attr {\r\n# name: \"begin_mask\"\r\n# type: \"int\"\r\n# default_value {\r\n# i: 0\r\n# }\r\n# }\r\n# attr {\r\n# name: \"end_mask\"\r\n# type: \"int\"\r\n# default_value {\r\n# i: 0\r\n# }\r\n# }\r\n# attr {\r\n# name: \"ellipsis_mask\"\r\n# type: \"int\"\r\n# default_value {\r\n# i: 0\r\n# }\r\n# }\r\n# attr {\r\n# name: \"new_axis_mask\"\r\n# type: \"int\"\r\n# default_value {\r\n# i: 0\r\n# }\r\n# }\r\n# attr {\r\n# name: \"shrink_axis_mask\"\r\n# type: \"int\"\r\n# default_value {\r\n# i: 0\r\n# }\r\n# }\r\n# is_stateful: true\r\n# }\r\n# op {\r\n# name: \"Reverse\"\r\n# input_arg {\r\n# name: \"tensor\"\r\n# type_attr: \"T\"\r\n# }\r\n# input_arg {\r\n# name: \"dims\"\r\n# type: DT_BOOL\r\n# }\r\n# output_arg {\r\n# name: \"output\"\r\n# type_attr: \"T\"\r\n# }\r\n# attr {\r\n# name: \"T\"\r\n# type: \"type\"\r\n# allowed_values {\r\n# list {\r\n# type: DT_UINT8\r\n# type: DT_INT8\r\n# type: DT_UINT16\r\n# type: DT_INT16\r\n# type: DT_INT32\r\n# type: DT_INT64\r\n# type: DT_BOOL\r\n# type: DT_HALF\r\n# type: DT_FLOAT\r\n# type: DT_DOUBLE\r\n# type: DT_COMPLEX64\r\n# type: DT_COMPLEX128\r\n# type: DT_STRING\r\n# }\r\n# }\r\n# }\r\n# }\r\n# op {\r\n# name: \"ReverseSequence\"\r\n# input_arg {\r\n# name: \"input\"\r\n# type_attr: \"T\"\r\n# }\r\n# input_arg {\r\n# name: \"seq_lengths\"\r\n# type_attr: \"Tlen\"\r\n# }\r\n# output_arg {\r\n# name: \"output\"\r\n# type_attr: \"T\"\r\n# }\r\n# attr {\r\n# name: \"seq_dim\"\r\n# type: \"int\"\r\n# }\r\n# attr {\r\n# name: \"batch_dim\"\r\n# type: \"int\"\r\n# default_value {\r\n# i: 0\r\n# }\r\n# }\r\n# attr {\r\n# name: \"T\"\r\n# type: \"type\"\r\n# }\r\n# attr {\r\n# name: \"Tlen\"\r\n# type: \"type\"\r\n# default_value {\r\n# type: DT_INT64\r\n# }\r\n# allowed_values {\r\n# list {\r\n# type: DT_INT32\r\n# type: DT_INT64\r\n# }\r\n# }\r\n# }\r\n# }\r\n# op {\r\n# name: \"ReverseV2\"\r\n# input_arg {\r\n# name: \"tensor\"\r\n# type_attr: \"T\"\r\n# }\r\n# input_arg {\r\n# name: \"axis\"\r\n# type_attr: \"Tidx\"\r\n# }\r\n# output_arg {\r\n# name: \"output\"\r\n# type_attr: \"T\"\r\n# }\r\n# attr {\r\n# name: \"Tidx\"\r\n# type: \"type\"\r\n# default_value {\r\n# type: DT_INT32\r\n# }\r\n# allowed_values {\r\n# list {\r\n# type: DT_INT32\r\n# type: DT_INT64\r\n# }\r\n# }\r\n# }\r\n# attr {\r\n# name: \"T\"\r\n# type: \"type\"\r\n# allowed_values {\r\n# list {\r\n# type: DT_UINT8\r\n# type: DT_INT8\r\n# type: DT_UINT16\r\n# type: DT_INT16\r\n# type: DT_INT32\r\n# type: DT_INT64\r\n# type: DT_BOOL\r\n# type: DT_BFLOAT16\r\n# type: DT_HALF\r\n# type: DT_FLOAT\r\n# type: DT_DOUBLE\r\n# type: DT_COMPLEX64\r\n# type: DT_COMPLEX128\r\n# type: DT_STRING\r\n# }\r\n# }\r\n# }\r\n# }\r\n# op {\r\n# name: \"ScatterNd\"\r\n# input_arg {\r\n# name: \"indices\"\r\n# type_attr: \"Tindices\"\r\n# }\r\n# input_arg {\r\n# name: \"updates\"\r\n# type_attr: \"T\"\r\n# }\r\n# input_arg {\r\n# name: \"shape\"\r\n# type_attr: \"Tindices\"\r\n# }\r\n# output_arg {\r\n# name: \"output\"\r\n# type_attr: \"T\"\r\n# }\r\n# attr {\r\n# name: \"T\"\r\n# type: \"type\"\r\n# }\r\n# attr {\r\n# name: \"Tindices\"\r\n# type: \"type\"\r\n# allowed_values {\r\n# list {\r\n# type: DT_INT32\r\n# type: DT_INT64\r\n# }\r\n# }\r\n# }\r\n# }\r\n# op {\r\n# name: \"ScatterNdNonAliasingAdd\"\r\n# input_arg {\r\n# name: \"input\"\r\n# type_attr: \"T\"\r\n# }\r\n# input_arg {\r\n# name: \"indices\"\r\n# type_attr: \"Tindices\"\r\n# }\r\n# input_arg {\r\n# name: \"updates\"\r\n# type_attr: \"T\"\r\n# }\r\n# output_arg {\r\n# name: \"output\"\r\n# type_attr: \"T\"\r\n# }\r\n# attr {\r\n# name: \"T\"\r\n# type: \"type\"\r\n# allowed_values {\r\n# list {\r\n# type: DT_FLOAT\r\n# type: DT_DOUBLE\r\n# type: DT_INT32\r\n# type: DT_UINT8\r\n# type: DT_INT16\r\n# type: DT_INT8\r\n# type: DT_COMPLEX64\r\n# type: DT_INT64\r\n# type: DT_QINT8\r\n# type: DT_QUINT8\r\n# type: DT_QINT32\r\n# type: DT_BFLOAT16\r\n# type: DT_UINT16\r\n# type: DT_COMPLEX128\r\n# type: DT_HALF\r\n# type: DT_UINT32\r\n# type: DT_UINT64\r\n# type: DT_BOOL\r\n# }\r\n# }\r\n# }\r\n# attr {\r\n# name: \"Tindices\"\r\n# type: \"type\"\r\n# allowed_values {\r\n# list {\r\n# type: DT_INT32\r\n# type: DT_INT64\r\n# }\r\n# }\r\n# }\r\n# }\r\n# op {\r\n# name: \"Shape\"\r\n# input_arg {\r\n# name: \"input\"\r\n# type_attr: \"T\"\r\n# }\r\n# output_arg {\r\n# name: \"output\"\r\n# type_attr: \"out_type\"\r\n# }\r\n# attr {\r\n# name: \"T\"\r\n# type: \"type\"\r\n# }\r\n# attr {\r\n# name: \"out_type\"\r\n# type: \"type\"\r\n# default_value {\r\n# type: DT_INT32\r\n# }\r\n# allowed_values {\r\n# list {\r\n# type: DT_INT32\r\n# type: DT_INT64\r\n# }\r\n# }\r\n# }\r\n# }\r\n# op {\r\n# name: \"ShapeN\"\r\n# input_arg {\r\n# name: \"input\"\r\n# type_attr: \"T\"\r\n# number_attr: \"N\"\r\n# }\r\n# output_arg {\r\n# name: \"output\"\r\n# type_attr: \"out_type\"\r\n# number_attr: \"N\"\r\n# }\r\n# attr {\r\n# name: \"N\"\r\n# type: \"int\"\r\n# has_minimum: true\r\n# minimum: 1\r\n# }\r\n# attr {\r\n# name: \"T\"\r\n# type: \"type\"\r\n# }\r\n# attr {\r\n# name: \"out_type\"\r\n# type: \"type\"\r\n# default_value {\r\n# type: DT_INT32\r\n# }\r\n# allowed_values {\r\n# list {\r\n# type: DT_INT32\r\n# type: DT_INT64\r\n# }\r\n# }\r\n# }\r\n# }\r\n# op {\r\n# name: \"Size\"\r\n# input_arg {\r\n# name: \"input\"\r\n# type_attr: \"T\"\r\n# }\r\n# output_arg {\r\n# name: \"output\"\r\n# type_attr: \"out_type\"\r\n# }\r\n# attr {\r\n# name: \"T\"\r\n# type: \"type\"\r\n# }\r\n# attr {\r\n# name: \"out_type\"\r\n# type: \"type\"\r\n# default_value {\r\n# type: DT_INT32\r\n# }\r\n# allowed_values {\r\n# list {\r\n# type: DT_INT32\r\n# type: DT_INT64\r\n# }\r\n# }\r\n# }\r\n# }\r\n# op {\r\n# name: \"Slice\"\r\n# input_arg {\r\n# name: \"input\"\r\n# type_attr: \"T\"\r\n# }\r\n# input_arg {\r\n# name: \"begin\"\r\n# type_attr: \"Index\"\r\n# }\r\n# input_arg {\r\n# name: \"size\"\r\n# type_attr: \"Index\"\r\n# }\r\n# output_arg {\r\n# name: \"output\"\r\n# type_attr: \"T\"\r\n# }\r\n# attr {\r\n# name: \"T\"\r\n# type: \"type\"\r\n# }\r\n# attr {\r\n# name: \"Index\"\r\n# type: \"type\"\r\n# allowed_values {\r\n# list {\r\n# type: DT_INT32\r\n# type: DT_INT64\r\n# }\r\n# }\r\n# }\r\n# }\r\n# op {\r\n# name: \"Snapshot\"\r\n# input_arg {\r\n# name: \"input\"\r\n# type_attr: \"T\"\r\n# }\r\n# output_arg {\r\n# name: \"output\"\r\n# type_attr: \"T\"\r\n# }\r\n# attr {\r\n# name: \"T\"\r\n# type: \"type\"\r\n# }\r\n# }\r\n# op {\r\n# name: \"SpaceToBatch\"\r\n# input_arg {\r\n# name: \"input\"\r\n# type_attr: \"T\"\r\n# }\r\n# input_arg {\r\n# name: \"paddings\"\r\n# type_attr: \"Tpaddings\"\r\n# }\r\n# output_arg {\r\n# name: \"output\"\r\n# type_attr: \"T\"\r\n# }\r\n# attr {\r\n# name: \"T\"\r\n# type: \"type\"\r\n# }\r\n# attr {\r\n# name: \"Tpaddings\"\r\n# type: \"type\"\r\n# default_value {\r\n# type: DT_INT32\r\n# }\r\n# allowed_values {\r\n# list {\r\n# type: DT_INT32\r\n# type: DT_INT64\r\n# }\r\n# }\r\n# }\r\n# attr {\r\n# name: \"block_size\"\r\n# type: \"int\"\r\n# has_minimum: true\r\n# minimum: 2\r\n# }\r\n# }\r\n# op {\r\n# name: \"SpaceToBatchND\"\r\n# input_arg {\r\n# name: \"input\"\r\n# type_attr: \"T\"\r\n# }\r\n# input_arg {\r\n# name: \"block_shape\"\r\n# type_attr: \"Tblock_shape\"\r\n# }\r\n# input_arg {\r\n# name: \"paddings\"\r\n# type_attr: \"Tpaddings\"\r\n# }\r\n# output_arg {\r\n# name: \"output\"\r\n# type_attr: \"T\"\r\n# }\r\n# attr {\r\n# name: \"T\"\r\n# type: \"type\"\r\n# }\r\n# attr {\r\n# name: \"Tblock_shape\"\r\n# type: \"type\"\r\n# default_value {\r\n# type: DT_INT32\r\n# }\r\n# allowed_values {\r\n# list {\r\n# type: DT_INT32\r\n# type: DT_INT64\r\n# }\r\n# }\r\n# }\r\n# attr {\r\n# name: \"Tpaddings\"\r\n# type: \"type\"\r\n# default_value {\r\n# type: DT_INT32\r\n# }\r\n# allowed_values {\r\n# list {\r\n# type: DT_INT32\r\n# type: DT_INT64\r\n# }\r\n# }\r\n# }\r\n# }\r\n# op {\r\n# name: \"SpaceToDepth\"\r\n# input_arg {\r\n# name: \"input\"\r\n# type_attr: \"T\"\r\n# }\r\n# output_arg {\r\n# name: \"output\"\r\n# type_attr: \"T\"\r\n# }\r\n# attr {\r\n# name: \"T\"\r\n# type: \"type\"\r\n# }\r\n# attr {\r\n# name: \"block_size\"\r\n# type: \"int\"\r\n# has_minimum: true\r\n# minimum: 2\r\n# }\r\n# attr {\r\n# name: \"data_format\"\r\n# type: \"string\"\r\n# default_value {\r\n# s: \"NHWC\"\r\n# }\r\n# allowed_values {\r\n# list {\r\n# s: \"NHWC\"\r\n# s: \"NCHW\"\r\n# s: \"NCHW_VECT_C\"\r\n# }\r\n# }\r\n# }\r\n# }\r\n# op {\r\n# name: \"Split\"\r\n# input_arg {\r\n# name: \"split_dim\"\r\n# type: DT_INT32\r\n# }\r\n# input_arg {\r\n# name: \"value\"\r\n# type_attr: \"T\"\r\n# }\r\n# output_arg {\r\n# name: \"output\"\r\n# type_attr: \"T\"\r\n# number_attr: \"num_split\"\r\n# }\r\n# attr {\r\n# name: \"num_split\"\r\n# type: \"int\"\r\n# has_minimum: true\r\n# minimum: 1\r\n# }\r\n# attr {\r\n# name: \"T\"\r\n# type: \"type\"\r\n# }\r\n# }\r\n# op {\r\n# name: \"SplitV\"\r\n# input_arg {\r\n# name: \"value\"\r\n# type_attr: \"T\"\r\n# }\r\n# input_arg {\r\n# name: \"size_splits\"\r\n# type_attr: \"Tlen\"\r\n# }\r\n# input_arg {\r\n# name: \"split_dim\"\r\n# type: DT_INT32\r\n# }\r\n# output_arg {\r\n# name: \"output\"\r\n# type_attr: \"T\"\r\n# number_attr: \"num_split\"\r\n# }\r\n# attr {\r\n# name: \"num_split\"\r\n# type: \"int\"\r\n# has_minimum: true\r\n# minimum: 1\r\n# }\r\n# attr {\r\n# name: \"T\"\r\n# type: \"type\"\r\n# }\r\n# attr {\r\n# name: \"Tlen\"\r\n# type: \"type\"\r\n# default_value {\r\n# type: DT_INT64\r\n# }\r\n# allowed_values {\r\n# list {\r\n# type: DT_INT32\r\n# type: DT_INT64\r\n# }\r\n# }\r\n# }\r\n# }\r\n# op {\r\n# name: \"Squeeze\"\r\n# input_arg {\r\n# name: \"input\"\r\n# type_attr: \"T\"\r\n# }\r\n# output_arg {\r\n# name: \"output\"\r\n# type_attr: \"T\"\r\n# }\r\n# attr {\r\n# name: \"T\"\r\n# type: \"type\"\r\n# }\r\n# attr {\r\n# name: \"squeeze_dims\"\r\n# type: \"list(int)\"\r\n# default_value {\r\n# list {\r\n# }\r\n# }\r\n# has_minimum: true\r\n# }\r\n# }\r\n# op {\r\n# name: \"StopGradient\"\r\n# input_arg {\r\n# name: \"input\"\r\n# type_attr: \"T\"\r\n# }\r\n# output_arg {\r\n# name: \"output\"\r\n# type_attr: \"T\"\r\n# }\r\n# attr {\r\n# name: \"T\"\r\n# type: \"type\"\r\n# }\r\n# }\r\n# op {\r\n# name: \"StridedSlice\"\r\n# input_arg {\r\n# name: \"input\"\r\n# type_attr: \"T\"\r\n# }\r\n# input_arg {\r\n# name: \"begin\"\r\n# type_attr: \"Index\"\r\n# }\r\n# input_arg {\r\n# name: \"end\"\r\n# type_attr: \"Index\"\r\n# }\r\n# input_arg {\r\n# name: \"strides\"\r\n# type_attr: \"Index\"\r\n# }\r\n# output_arg {\r\n# name: \"output\"\r\n# type_attr: \"T\"\r\n# }\r\n# attr {\r\n# name: \"T\"\r\n# type: \"type\"\r\n# }\r\n# attr {\r\n# name: \"Index\"\r\n# type: \"type\"\r\n# allowed_values {\r\n# list {\r\n# type: DT_INT32\r\n# type: DT_INT64\r\n# }\r\n# }\r\n# }\r\n# attr {\r\n# name: \"begin_mask\"\r\n# type: \"int\"\r\n# default_value {\r\n# i: 0\r\n# }\r\n# }\r\n# attr {\r\n# name: \"end_mask\"\r\n# type: \"int\"\r\n# default_value {\r\n# i: 0\r\n# }\r\n# }\r\n# attr {\r\n# name: \"ellipsis_mask\"\r\n# type: \"int\"\r\n# default_value {\r\n# i: 0\r\n# }\r\n# }\r\n# attr {\r\n# name: \"new_axis_mask\"\r\n# type: \"int\"\r\n# default_value {\r\n# i: 0\r\n# }\r\n# }\r\n# attr {\r\n# name: \"shrink_axis_mask\"\r\n# type: \"int\"\r\n# default_value {\r\n# i: 0\r\n# }\r\n# }\r\n# }\r\n# op {\r\n# name: \"StridedSliceAssign\"\r\n# input_arg {\r\n# name: \"ref\"\r\n# type_attr: \"T\"\r\n# is_ref: true\r\n# }\r\n# input_arg {\r\n# name: \"begin\"\r\n# type_attr: \"Index\"\r\n# }\r\n# input_arg {\r\n# name: \"end\"\r\n# type_attr: \"Index\"\r\n# }\r\n# input_arg {\r\n# name: \"strides\"\r\n# type_attr: \"Index\"\r\n# }\r\n# input_arg {\r\n# name: \"value\"\r\n# type_attr: \"T\"\r\n# }\r\n# output_arg {\r\n# name: \"output_ref\"\r\n# type_attr: \"T\"\r\n# is_ref: true\r\n# }\r\n# attr {\r\n# name: \"T\"\r\n# type: \"type\"\r\n# }\r\n# attr {\r\n# name: \"Index\"\r\n# type: \"type\"\r\n# allowed_values {\r\n# list {\r\n# type: DT_INT32\r\n# type: DT_INT64\r\n# }\r\n# }\r\n# }\r\n# attr {\r\n# name: \"begin_mask\"\r\n# type: \"int\"\r\n# default_value {\r\n# i: 0\r\n# }\r\n# }\r\n# attr {\r\n# name: \"end_mask\"\r\n# type: \"int\"\r\n# default_value {\r\n# i: 0\r\n# }\r\n# }\r\n# attr {\r\n# name: \"ellipsis_mask\"\r\n# type: \"int\"\r\n# default_value {\r\n# i: 0\r\n# }\r\n# }\r\n# attr {\r\n# name: \"new_axis_mask\"\r\n# type: \"int\"\r\n# default_value {\r\n# i: 0\r\n# }\r\n# }\r\n# attr {\r\n# name: \"shrink_axis_mask\"\r\n# type: \"int\"\r\n# default_value {\r\n# i: 0\r\n# }\r\n# }\r\n# }\r\n# op {\r\n# name: \"StridedSliceGrad\"\r\n# input_arg {\r\n# name: \"shape\"\r\n# type_attr: \"Index\"\r\n# }\r\n# input_arg {\r\n# name: \"begin\"\r\n# type_attr: \"Index\"\r\n# }\r\n# input_arg {\r\n# name: \"end\"\r\n# type_attr: \"Index\"\r\n# }\r\n# input_arg {\r\n# name: \"strides\"\r\n# type_attr: \"Index\"\r\n# }\r\n# input_arg {\r\n# name: \"dy\"\r\n# type_attr: \"T\"\r\n# }\r\n# output_arg {\r\n# name: \"output\"\r\n# type_attr: \"T\"\r\n# }\r\n# attr {\r\n# name: \"T\"\r\n# type: \"type\"\r\n# }\r\n# attr {\r\n# name: \"Index\"\r\n# type: \"type\"\r\n# allowed_values {\r\n# list {\r\n# type: DT_INT32\r\n# type: DT_INT64\r\n# }\r\n# }\r\n# }\r\n# attr {\r\n# name: \"begin_mask\"\r\n# type: \"int\"\r\n# default_value {\r\n# i: 0\r\n# }\r\n# }\r\n# attr {\r\n# name: \"end_mask\"\r\n# type: \"int\"\r\n# default_value {\r\n# i: 0\r\n# }\r\n# }\r\n# attr {\r\n# name: \"ellipsis_mask\"\r\n# type: \"int\"\r\n# default_value {\r\n# i: 0\r\n# }\r\n# }\r\n# attr {\r\n# name: \"new_axis_mask\"\r\n# type: \"int\"\r\n# default_value {\r\n# i: 0\r\n# }\r\n# }\r\n# attr {\r\n# name: \"shrink_axis_mask\"\r\n# type: \"int\"\r\n# default_value {\r\n# i: 0\r\n# }\r\n# }\r\n# }\r\n# op {\r\n# name: \"Tile\"\r\n# input_arg {\r\n# name: \"input\"\r\n# type_attr: \"T\"\r\n# }\r\n# input_arg {\r\n# name: \"multiples\"\r\n# type_attr: \"Tmultiples\"\r\n# }\r\n# output_arg {\r\n# name: \"output\"\r\n# type_attr: \"T\"\r\n# }\r\n# attr {\r\n# name: \"T\"\r\n# type: \"type\"\r\n# }\r\n# attr {\r\n# name: \"Tmultiples\"\r\n# type: \"type\"\r\n# default_value {\r\n# type: DT_INT32\r\n# }\r\n# allowed_values {\r\n# list {\r\n# type: DT_INT32\r\n# type: DT_INT64\r\n# }\r\n# }\r\n# }\r\n# }\r\n# op {\r\n# name: \"TileGrad\"\r\n# input_arg {\r\n# name: \"input\"\r\n# type_attr: \"T\"\r\n# }\r\n# input_arg {\r\n# name: \"multiples\"\r\n# type: DT_INT32\r\n# }\r\n# output_arg {\r\n# name: \"output\"\r\n# type_attr: \"T\"\r\n# }\r\n# attr {\r\n# name: \"T\"\r\n# type: \"type\"\r\n# }\r\n# deprecation {\r\n# version: 3\r\n# explanation: \"TileGrad has been replaced with reduce_sum\"\r\n# }\r\n# }\r\n# op {\r\n# name: \"Transpose\"\r\n# input_arg {\r\n# name: \"x\"\r\n# type_attr: \"T\"\r\n# }\r\n# input_arg {\r\n# name: \"perm\"\r\n# type_attr: \"Tperm\"\r\n# }\r\n# output_arg {\r\n# name: \"y\"\r\n# type_attr: \"T\"\r\n# }\r\n# attr {\r\n# name: \"T\"\r\n# type: \"type\"\r\n# }\r\n# attr {\r\n# name: \"Tperm\"\r\n# type: \"type\"\r\n# default_value {\r\n# type: DT_INT32\r\n# }\r\n# allowed_values {\r\n# list {\r\n# type: DT_INT32\r\n# type: DT_INT64\r\n# }\r\n# }\r\n# }\r\n# }\r\n# op {\r\n# name: \"Unique\"\r\n# input_arg {\r\n# name: \"x\"\r\n# type_attr: \"T\"\r\n# }\r\n# output_arg {\r\n# name: \"y\"\r\n# type_attr: \"T\"\r\n# }\r\n# output_arg {\r\n# name: \"idx\"\r\n# type_attr: \"out_idx\"\r\n# }\r\n# attr {\r\n# name: \"T\"\r\n# type: \"type\"\r\n# }\r\n# attr {\r\n# name: \"out_idx\"\r\n# type: \"type\"\r\n# default_value {\r\n# type: DT_INT32\r\n# }\r\n# allowed_values {\r\n# list {\r\n# type: DT_INT32\r\n# type: DT_INT64\r\n# }\r\n# }\r\n# }\r\n# }\r\n# op {\r\n# name: \"UniqueV2\"\r\n# input_arg {\r\n# name: \"x\"\r\n# type_attr: \"T\"\r\n# }\r\n# input_arg {\r\n# name: \"axis\"\r\n# type_attr: \"Taxis\"\r\n# }\r\n# output_arg {\r\n# name: \"y\"\r\n# type_attr: \"T\"\r\n# }\r\n# output_arg {\r\n# name: \"idx\"\r\n# type_attr: \"out_idx\"\r\n# }\r\n# attr {\r\n# name: \"T\"\r\n# type: \"type\"\r\n# }\r\n# attr {\r\n# name: \"Taxis\"\r\n# type: \"type\"\r\n# default_value {\r\n# type: DT_INT64\r\n# }\r\n# allowed_values {\r\n# list {\r\n# type: DT_INT32\r\n# type: DT_INT64\r\n# }\r\n# }\r\n# }\r\n# attr {\r\n# name: \"out_idx\"\r\n# type: \"type\"\r\n# default_value {\r\n# type: DT_INT32\r\n# }\r\n# allowed_values {\r\n# list {\r\n# type: DT_INT32\r\n# type: DT_INT64\r\n# }\r\n# }\r\n# }\r\n# }\r\n# op {\r\n# name: \"UniqueWithCounts\"\r\n# input_arg {\r\n# name: \"x\"\r\n# type_attr: \"T\"\r\n# }\r\n# output_arg {\r\n# name: \"y\"\r\n# type_attr: \"T\"\r\n# }\r\n# output_arg {\r\n# name: \"idx\"\r\n# type_attr: \"out_idx\"\r\n# }\r\n# output_arg {\r\n# name: \"count\"\r\n# type_attr: \"out_idx\"\r\n# }\r\n# attr {\r\n# name: \"T\"\r\n# type: \"type\"\r\n# }\r\n# attr {\r\n# name: \"out_idx\"\r\n# type: \"type\"\r\n# default_value {\r\n# type: DT_INT32\r\n# }\r\n# allowed_values {\r\n# list {\r\n# type: DT_INT32\r\n# type: DT_INT64\r\n# }\r\n# }\r\n# }\r\n# }\r\n# op {\r\n# name: \"UniqueWithCountsV2\"\r\n# input_arg {\r\n# name: \"x\"\r\n# type_attr: \"T\"\r\n# }\r\n# input_arg {\r\n# name: \"axis\"\r\n# type_attr: \"Taxis\"\r\n# }\r\n# output_arg {\r\n# name: \"y\"\r\n# type_attr: \"T\"\r\n# }\r\n# output_arg {\r\n# name: \"idx\"\r\n# type_attr: \"out_idx\"\r\n# }\r\n# output_arg {\r\n# name: \"count\"\r\n# type_attr: \"out_idx\"\r\n# }\r\n# attr {\r\n# name: \"T\"\r\n# type: \"type\"\r\n# }\r\n# attr {\r\n# name: \"Taxis\"\r\n# type: \"type\"\r\n# default_value {\r\n# type: DT_INT64\r\n# }\r\n# allowed_values {\r\n# list {\r\n# type: DT_INT32\r\n# type: DT_INT64\r\n# }\r\n# }\r\n# }\r\n# attr {\r\n# name: \"out_idx\"\r\n# type: \"type\"\r\n# default_value {\r\n# type: DT_INT32\r\n# }\r\n# allowed_values {\r\n# list {\r\n# type: DT_INT32\r\n# type: DT_INT64\r\n# }\r\n# }\r\n# }\r\n# }\r\n# op {\r\n# name: \"Unpack\"\r\n# input_arg {\r\n# name: \"value\"\r\n# type_attr: \"T\"\r\n# }\r\n# output_arg {\r\n# name: \"output\"\r\n# type_attr: \"T\"\r\n# number_attr: \"num\"\r\n# }\r\n# attr {\r\n# name: \"num\"\r\n# type: \"int\"\r\n# has_minimum: true\r\n# }\r\n# attr {\r\n# name: \"T\"\r\n# type: \"type\"\r\n# }\r\n# attr {\r\n# name: \"axis\"\r\n# type: \"int\"\r\n# default_value {\r\n# i: 0\r\n# }\r\n# }\r\n# }\r\n# op {\r\n# name: \"UnravelIndex\"\r\n# input_arg {\r\n# name: \"indices\"\r\n# type_attr: \"Tidx\"\r\n# }\r\n# input_arg {\r\n# name: \"dims\"\r\n# type_attr: \"Tidx\"\r\n# }\r\n# output_arg {\r\n# name: \"output\"\r\n# type_attr: \"Tidx\"\r\n# }\r\n# attr {\r\n# name: \"Tidx\"\r\n# type: \"type\"\r\n# default_value {\r\n# type: DT_INT32\r\n# }\r\n# allowed_values {\r\n# list {\r\n# type: DT_INT32\r\n# type: DT_INT64\r\n# }\r\n# }\r\n# }\r\n# }\r\n# op {\r\n# name: \"UpperBound\"\r\n# input_arg {\r\n# name: \"sorted_inputs\"\r\n# type_attr: \"T\"\r\n# }\r\n# input_arg {\r\n# name: \"values\"\r\n# type_attr: \"T\"\r\n# }\r\n# output_arg {\r\n# name: \"output\"\r\n# type_attr: \"out_type\"\r\n# }\r\n# attr {\r\n# name: \"T\"\r\n# type: \"type\"\r\n# }\r\n# attr {\r\n# name: \"out_type\"\r\n# type: \"type\"\r\n# default_value {\r\n# type: DT_INT32\r\n# }\r\n# allowed_values {\r\n# list {\r\n# type: DT_INT32\r\n# type: DT_INT64\r\n# }\r\n# }\r\n# }\r\n# }\r\n# op {\r\n# name: \"Where\"\r\n# input_arg {\r\n# name: \"input\"\r\n# type_attr: \"T\"\r\n# }\r\n# output_arg {\r\n# name: \"index\"\r\n# type: DT_INT64\r\n# }\r\n# attr {\r\n# name: \"T\"\r\n# type: \"type\"\r\n# default_value {\r\n# type: DT_BOOL\r\n# }\r\n# allowed_values {\r\n# list {\r\n# type: DT_FLOAT\r\n# type: DT_DOUBLE\r\n# type: DT_INT32\r\n# type: DT_UINT8\r\n# type: DT_INT16\r\n# type: DT_INT8\r\n# type: DT_COMPLEX64\r\n# type: DT_INT64\r\n# type: DT_QINT8\r\n# type: DT_QUINT8\r\n# type: DT_QINT32\r\n# type: DT_BFLOAT16\r\n# type: DT_UINT16\r\n# type: DT_COMPLEX128\r\n# type: DT_HALF\r\n# type: DT_UINT32\r\n# type: DT_UINT64\r\n# type: DT_BOOL\r\n# }\r\n# }\r\n# }\r\n# }\r\n# op {\r\n# name: \"ZerosLike\"\r\n# input_arg {\r\n# name: \"x\"\r\n# type_attr: \"T\"\r\n# }\r\n# output_arg {\r\n# name: \"y\"\r\n# type_attr: \"T\"\r\n# }\r\n# attr {\r\n# name: \"T\"\r\n# type: \"type\"\r\n# }\r\n# }\r\n_op_def_lib = _InitOpDefLibrary(b\"\\nm\\n\\023BatchMatrixBandPart\\022\\n\\n\\005input\\\"\\001T\\022\\r\\n\\tnum_lower\\030\\t\\022\\r\\n\\tnum_upper\\030\\t\\032\\t\\n\\004band\\\"\\001T\\\"\\t\\n\\001T\\022\\004typeB\\026\\010\\016\\022\\022Use MatrixBandPart\\nL\\n\\017BatchMatrixDiag\\022\\r\\n\\010diagonal\\\"\\001T\\032\\013\\n\\006output\\\"\\001T\\\"\\t\\n\\001T\\022\\004typeB\\022\\010\\016\\022\\016Use MatrixDiag\\nS\\n\\023BatchMatrixDiagPart\\022\\n\\n\\005input\\\"\\001T\\032\\r\\n\\010diagonal\\\"\\001T\\\"\\t\\n\\001T\\022\\004typeB\\026\\010\\016\\022\\022Use MatrixDiagPart\\n^\\n\\022BatchMatrixSetDiag\\022\\n\\n\\005input\\\"\\001T\\022\\r\\n\\010diagonal\\\"\\001T\\032\\013\\n\\006output\\\"\\001T\\\"\\t\\n\\001T\\022\\004typeB\\025\\010\\016\\022\\021Use MatrixSetDiag\\nr\\n\\014BatchToSpace\\022\\n\\n\\005input\\\"\\001T\\022\\r\\n\\005crops\\\"\\004Tidx\\032\\013\\n\\006output\\\"\\001T\\\"\\t\\n\\001T\\022\\004type\\\"\\025\\n\\nblock_size\\022\\003int(\\0010\\002\\\"\\030\\n\\004Tidx\\022\\004type\\032\\0020\\003:\\006\\n\\0042\\002\\003\\t\\n\\240\\001\\n\\016BatchToSpaceND\\022\\n\\n\\005input\\\"\\001T\\022\\033\\n\\013block_shape\\\"\\014Tblock_shape\\022\\017\\n\\005crops\\\"\\006Tcrops\\032\\013\\n\\006output\\\"\\001T\\\"\\t\\n\\001T\\022\\004type\\\" \\n\\014Tblock_shape\\022\\004type\\032\\0020\\003:\\006\\n\\0042\\002\\003\\t\\\"\\032\\n\\006Tcrops\\022\\004type\\032\\0020\\003:\\006\\n\\0042\\002\\003\\t\\np\\n\\007Bitcast\\022\\n\\n\\005input\\\"\\001T\\032\\016\\n\\006output\\\"\\004type\\\"\\\"\\n\\001T\\022\\004type:\\027\\n\\0252\\023\\016\\023\\001\\002\\t\\003\\004\\021\\026\\027\\006\\005\\010\\022\\013\\014\\017\\020\\r\\\"%\\n\\004type\\022\\004type:\\027\\n\\0252\\023\\016\\023\\001\\002\\t\\003\\004\\021\\026\\027\\006\\005\\010\\022\\013\\014\\017\\020\\r\\nA\\n\\rBroadcastArgs\\022\\007\\n\\002s0\\\"\\001T\\022\\007\\n\\002s1\\\"\\001T\\032\\007\\n\\002r0\\\"\\001T\\\"\\025\\n\\001T\\022\\004type\\032\\0020\\003:\\006\\n\\0042\\002\\003\\t\\nR\\n\\025BroadcastGradientArgs\\022\\007\\n\\002s0\\\"\\001T\\022\\007\\n\\002s1\\\"\\001T\\032\\007\\n\\002r0\\\"\\001T\\032\\007\\n\\002r1\\\"\\001T\\\"\\025\\n\\001T\\022\\004type\\032\\0020\\003:\\006\\n\\0042\\002\\003\\t\\nZ\\n\\013BroadcastTo\\022\\n\\n\\005input\\\"\\001T\\022\\r\\n\\005shape\\\"\\004Tidx\\032\\013\\n\\006output\\\"\\001T\\\"\\t\\n\\001T\\022\\004type\\\"\\030\\n\\004Tidx\\022\\004type\\032\\0020\\003:\\006\\n\\0042\\002\\003\\t\\nQ\\n\\rCheckNumerics\\022\\013\\n\\006tensor\\\"\\001T\\032\\013\\n\\006output\\\"\\001T\\\"\\023\\n\\001T\\022\\004type:\\010\\n\\0062\\004\\016\\023\\001\\002\\\"\\021\\n\\007message\\022\\006string\\nN\\n\\006Concat\\022\\016\\n\\nconcat_dim\\030\\003\\022\\016\\n\\006values\\\"\\001T*\\001N\\032\\013\\n\\006output\\\"\\001T\\\"\\014\\n\\001N\\022\\003int(\\0010\\002\\\"\\t\\n\\001T\\022\\004type\\nI\\n\\014ConcatOffset\\022\\016\\n\\nconcat_dim\\030\\003\\022\\014\\n\\005shape\\030\\003*\\001N\\032\\r\\n\\006offset\\030\\003*\\001N\\\"\\014\\n\\001N\\022\\003int(\\0010\\002\\nh\\n\\010ConcatV2\\022\\016\\n\\006values\\\"\\001T*\\001N\\022\\014\\n\\004axis\\\"\\004Tidx\\032\\013\\n\\006output\\\"\\001T\\\"\\014\\n\\001N\\022\\003int(\\0010\\002\\\"\\t\\n\\001T\\022\\004type\\\"\\030\\n\\004Tidx\\022\\004type\\032\\0020\\003:\\006\\n\\0042\\002\\003\\t\\nY\\n\\022ConjugateTranspose\\022\\006\\n\\001x\\\"\\001T\\022\\r\\n\\004perm\\\"\\005Tperm\\032\\006\\n\\001y\\\"\\001T\\\"\\t\\n\\001T\\022\\004type\\\"\\031\\n\\005Tperm\\022\\004type\\032\\0020\\003:\\006\\n\\0042\\002\\003\\t\\n8\\n\\005Const\\032\\017\\n\\006output\\\"\\005dtype\\\"\\017\\n\\005value\\022\\006tensor\\\"\\r\\n\\005dtype\\022\\004type\\n>\\n\\025DebugGradientIdentity\\022\\n\\n\\005input\\\"\\001T\\032\\013\\n\\006output\\\"\\001T\\\"\\t\\n\\001T\\022\\004type\\230\\001\\001\\nG\\n\\030DebugGradientRefIdentity\\022\\r\\n\\005input\\\"\\001T\\200\\001\\001\\032\\016\\n\\006output\\\"\\001T\\200\\001\\001\\\"\\t\\n\\001T\\022\\004type\\230\\001\\001\\n(\\n\\010DeepCopy\\022\\006\\n\\001x\\\"\\001T\\032\\006\\n\\001y\\\"\\001T\\\"\\t\\n\\001T\\022\\004type\\210\\001\\001\\n\\205\\001\\n\\014DepthToSpace\\022\\n\\n\\005input\\\"\\001T\\032\\013\\n\\006output\\\"\\001T\\\"\\t\\n\\001T\\022\\004type\\\"\\025\\n\\nblock_size\\022\\003int(\\0010\\002\\\":\\n\\013data_format\\022\\006string\\032\\006\\022\\004NHWC:\\033\\n\\031\\022\\004NHWC\\022\\004NCHW\\022\\013NCHW_VECT_C\\n\\235\\001\\n\\nDequantize\\022\\n\\n\\005input\\\"\\001T\\022\\r\\n\\tmin_range\\030\\001\\022\\r\\n\\tmax_range\\030\\001\\032\\n\\n\\006output\\030\\001\\\"\\024\\n\\001T\\022\\004type:\\t\\n\\0072\\005\\013\\014\\r\\017\\020\\\"C\\n\\004mode\\022\\006string\\032\\016\\022\\014MIN_COMBINED:#\\n!\\022\\014MIN_COMBINED\\022\\tMIN_FIRST\\022\\006SCALED\\n;\\n\\004Diag\\022\\r\\n\\010diagonal\\\"\\001T\\032\\013\\n\\006output\\\"\\001T\\\"\\027\\n\\001T\\022\\004type:\\014\\n\\n2\\010\\016\\023\\001\\002\\003\\t\\010\\022\\n>\\n\\010DiagPart\\022\\n\\n\\005input\\\"\\001T\\032\\r\\n\\010diagonal\\\"\\001T\\\"\\027\\n\\001T\\022\\004type:\\014\\n\\n2\\010\\016\\023\\001\\002\\003\\t\\010\\022\\n\\271\\001\\n\\014EditDistance\\022\\026\\n\\022hypothesis_indices\\030\\t\\022\\026\\n\\021hypothesis_values\\\"\\001T\\022\\024\\n\\020hypothesis_shape\\030\\t\\022\\021\\n\\rtruth_indices\\030\\t\\022\\021\\n\\014truth_values\\\"\\001T\\022\\017\\n\\013truth_shape\\030\\t\\032\\n\\n\\006output\\030\\001\\\"\\025\\n\\tnormalize\\022\\004bool\\032\\002(\\001\\\"\\t\\n\\001T\\022\\004type\\nG\\n\\005Empty\\022\\t\\n\\005shape\\030\\003\\032\\017\\n\\006output\\\"\\005dtype\\\"\\r\\n\\005dtype\\022\\004type\\\"\\020\\n\\004init\\022\\004bool\\032\\002(\\000\\210\\001\\001\\nA\\n\\013EnsureShape\\022\\n\\n\\005input\\\"\\001T\\032\\013\\n\\006output\\\"\\001T\\\"\\016\\n\\005shape\\022\\005shape\\\"\\t\\n\\001T\\022\\004type\\nW\\n\\nExpandDims\\022\\n\\n\\005input\\\"\\001T\\022\\013\\n\\003dim\\\"\\004Tdim\\032\\013\\n\\006output\\\"\\001T\\\"\\t\\n\\001T\\022\\004type\\\"\\030\\n\\004Tdim\\022\\004type\\032\\0020\\003:\\006\\n\\0042\\002\\003\\t\\n\\274\\001\\n\\023ExtractImagePatches\\022\\013\\n\\006images\\\"\\001T\\032\\014\\n\\007patches\\\"\\001T\\\"\\027\\n\\006ksizes\\022\\tlist(int)(\\0010\\004\\\"\\030\\n\\007strides\\022\\tlist(int)(\\0010\\004\\\"\\026\\n\\005rates\\022\\tlist(int)(\\0010\\004\\\"\\033\\n\\001T\\022\\004type:\\020\\n\\0162\\014\\001\\002\\003\\004\\005\\006\\t\\016\\021\\023\\026\\027\\\"\\\"\\n\\007padding\\022\\006string:\\017\\n\\r\\022\\004SAME\\022\\005VALID\\n\\244\\001\\n\\024ExtractVolumePatches\\022\\n\\n\\005input\\\"\\001T\\032\\014\\n\\007patches\\\"\\001T\\\"\\027\\n\\006ksizes\\022\\tlist(int)(\\0010\\005\\\"\\030\\n\\007strides\\022\\tlist(int)(\\0010\\005\\\"\\033\\n\\001T\\022\\004type:\\020\\n\\0162\\014\\001\\002\\003\\004\\005\\006\\t\\016\\021\\023\\026\\027\\\"\\\"\\n\\007padding\\022\\006string:\\017\\n\\r\\022\\004SAME\\022\\005VALID\\n\\213\\001\\n\\027FakeQuantWithMinMaxArgs\\022\\n\\n\\006inputs\\030\\001\\032\\013\\n\\007outputs\\030\\001\\\"\\023\\n\\003min\\022\\005float\\032\\005%\\000\\000\\300\\300\\\"\\023\\n\\003max\\022\\005float\\032\\005%\\000\\000\\300@\\\"\\023\\n\\010num_bits\\022\\003int\\032\\002\\030\\010\\\"\\030\\n\\014narrow_range\\022\\004bool\\032\\002(\\000\\n\\244\\001\\n\\037FakeQuantWithMinMaxArgsGradient\\022\\r\\n\\tgradients\\030\\001\\022\\n\\n\\006inputs\\030\\001\\032\\r\\n\\tbackprops\\030\\001\\\"\\023\\n\\003min\\022\\005float\\032\\005%\\000\\000\\300\\300\\\"\\023\\n\\003max\\022\\005float\\032\\005%\\000\\000\\300@\\\"\\023\\n\\010num_bits\\022\\003int\\032\\002\\030\\010\\\"\\030\\n\\014narrow_range\\022\\004bool\\032\\002(\\000\\ns\\n\\027FakeQuantWithMinMaxVars\\022\\n\\n\\006inputs\\030\\001\\022\\007\\n\\003min\\030\\001\\022\\007\\n\\003max\\030\\001\\032\\013\\n\\007outputs\\030\\001\\\"\\023\\n\\010num_bits\\022\\003int\\032\\002\\030\\010\\\"\\030\\n\\014narrow_range\\022\\004bool\\032\\002(\\000\\n\\302\\001\\n\\037FakeQuantWithMinMaxVarsGradient\\022\\r\\n\\tgradients\\030\\001\\022\\n\\n\\006inputs\\030\\001\\022\\007\\n\\003min\\030\\001\\022\\007\\n\\003max\\030\\001\\032\\027\\n\\023backprops_wrt_input\\030\\001\\032\\024\\n\\020backprop_wrt_min\\030\\001\\032\\024\\n\\020backprop_wrt_max\\030\\001\\\"\\023\\n\\010num_bits\\022\\003int\\032\\002\\030\\010\\\"\\030\\n\\014narrow_range\\022\\004bool\\032\\002(\\000\\n}\\n!FakeQuantWithMinMaxVarsPerChannel\\022\\n\\n\\006inputs\\030\\001\\022\\007\\n\\003min\\030\\001\\022\\007\\n\\003max\\030\\001\\032\\013\\n\\007outputs\\030\\001\\\"\\023\\n\\010num_bits\\022\\003int\\032\\002\\030\\010\\\"\\030\\n\\014narrow_range\\022\\004bool\\032\\002(\\000\\n\\314\\001\\n)FakeQuantWithMinMaxVarsPerChannelGradient\\022\\r\\n\\tgradients\\030\\001\\022\\n\\n\\006inputs\\030\\001\\022\\007\\n\\003min\\030\\001\\022\\007\\n\\003max\\030\\001\\032\\027\\n\\023backprops_wrt_input\\030\\001\\032\\024\\n\\020backprop_wrt_min\\030\\001\\032\\024\\n\\020backprop_wrt_max\\030\\001\\\"\\023\\n\\010num_bits\\022\\003int\\032\\002\\030\\010\\\"\\030\\n\\014narrow_range\\022\\004bool\\032\\002(\\000\\n^\\n\\004Fill\\022\\022\\n\\004dims\\\"\\nindex_type\\022\\n\\n\\005value\\\"\\001T\\032\\013\\n\\006output\\\"\\001T\\\"\\t\\n\\001T\\022\\004type\\\"\\036\\n\\nindex_type\\022\\004type\\032\\0020\\003:\\006\\n\\0042\\002\\003\\t\\n\\214\\001\\n\\006Gather\\022\\021\\n\\006params\\\"\\007Tparams\\022\\023\\n\\007indices\\\"\\010Tindices\\032\\021\\n\\006output\\\"\\007Tparams\\\"\\034\\n\\020validate_indices\\022\\004bool\\032\\002(\\001\\\"\\017\\n\\007Tparams\\022\\004type\\\"\\030\\n\\010Tindices\\022\\004type:\\006\\n\\0042\\002\\003\\t\\np\\n\\010GatherNd\\022\\021\\n\\006params\\\"\\007Tparams\\022\\023\\n\\007indices\\\"\\010Tindices\\032\\021\\n\\006output\\\"\\007Tparams\\\"\\017\\n\\007Tparams\\022\\004type\\\"\\030\\n\\010Tindices\\022\\004type:\\006\\n\\0042\\002\\003\\t\\n\\226\\001\\n\\010GatherV2\\022\\021\\n\\006params\\\"\\007Tparams\\022\\023\\n\\007indices\\\"\\010Tindices\\022\\r\\n\\004axis\\\"\\005Taxis\\032\\021\\n\\006output\\\"\\007Tparams\\\"\\017\\n\\007Tparams\\022\\004type\\\"\\030\\n\\010Tindices\\022\\004type:\\006\\n\\0042\\002\\003\\t\\\"\\025\\n\\005Taxis\\022\\004type:\\006\\n\\0042\\002\\003\\t\\n7\\n\\016GuaranteeConst\\022\\n\\n\\005input\\\"\\001T\\032\\013\\n\\006output\\\"\\001T\\\"\\t\\n\\001T\\022\\004type\\210\\001\\001\\n.\\n\\010Identity\\022\\n\\n\\005input\\\"\\001T\\032\\013\\n\\006output\\\"\\001T\\\"\\t\\n\\001T\\022\\004type\\n9\\n\\tIdentityN\\022\\n\\n\\005input2\\001T\\032\\013\\n\\006output2\\001T\\\"\\023\\n\\001T\\022\\nlist(type)(\\0010\\001\\n^\\n\\016ImmutableConst\\032\\017\\n\\006tensor\\\"\\005dtype\\\"\\r\\n\\005dtype\\022\\004type\\\"\\016\\n\\005shape\\022\\005shape\\\"\\034\\n\\022memory_region_name\\022\\006string\\n6\\n\\nInplaceAdd\\022\\006\\n\\001x\\\"\\001T\\022\\005\\n\\001i\\030\\003\\022\\006\\n\\001v\\\"\\001T\\032\\006\\n\\001y\\\"\\001T\\\"\\t\\n\\001T\\022\\004type\\n6\\n\\nInplaceSub\\022\\006\\n\\001x\\\"\\001T\\022\\005\\n\\001i\\030\\003\\022\\006\\n\\001v\\\"\\001T\\032\\006\\n\\001y\\\"\\001T\\\"\\t\\n\\001T\\022\\004type\\n9\\n\\rInplaceUpdate\\022\\006\\n\\001x\\\"\\001T\\022\\005\\n\\001i\\030\\003\\022\\006\\n\\001v\\\"\\001T\\032\\006\\n\\001y\\\"\\001T\\\"\\t\\n\\001T\\022\\004type\\n:\\n\\021InvertPermutation\\022\\006\\n\\001x\\\"\\001T\\032\\006\\n\\001y\\\"\\001T\\\"\\025\\n\\001T\\022\\004type\\032\\0020\\003:\\006\\n\\0042\\002\\003\\t\\n\\\\\\n\\010ListDiff\\022\\006\\n\\001x\\\"\\001T\\022\\006\\n\\001y\\\"\\001T\\032\\010\\n\\003out\\\"\\001T\\032\\016\\n\\003idx\\\"\\007out_idx\\\"\\t\\n\\001T\\022\\004type\\\"\\033\\n\\007out_idx\\022\\004type\\032\\0020\\003:\\006\\n\\0042\\002\\003\\t\\nj\\n\\nLowerBound\\022\\022\\n\\rsorted_inputs\\\"\\001T\\022\\013\\n\\006values\\\"\\001T\\032\\022\\n\\006output\\\"\\010out_type\\\"\\t\\n\\001T\\022\\004type\\\"\\034\\n\\010out_type\\022\\004type\\032\\0020\\003:\\006\\n\\0042\\002\\003\\t\\nx\\n\\016MatrixBandPart\\022\\n\\n\\005input\\\"\\001T\\022\\023\\n\\tnum_lower\\\"\\006Tindex\\022\\023\\n\\tnum_upper\\\"\\006Tindex\\032\\t\\n\\004band\\\"\\001T\\\"\\t\\n\\001T\\022\\004type\\\"\\032\\n\\006Tindex\\022\\004type\\032\\0020\\t:\\006\\n\\0042\\002\\003\\t\\n3\\n\\nMatrixDiag\\022\\r\\n\\010diagonal\\\"\\001T\\032\\013\\n\\006output\\\"\\001T\\\"\\t\\n\\001T\\022\\004type\\n6\\n\\016MatrixDiagPart\\022\\n\\n\\005input\\\"\\001T\\032\\r\\n\\010diagonal\\\"\\001T\\\"\\t\\n\\001T\\022\\004type\\nB\\n\\rMatrixSetDiag\\022\\n\\n\\005input\\\"\\001T\\022\\r\\n\\010diagonal\\\"\\001T\\032\\013\\n\\006output\\\"\\001T\\\"\\t\\n\\001T\\022\\004type\\n\\215\\001\\n\\tMirrorPad\\022\\n\\n\\005input\\\"\\001T\\022\\025\\n\\010paddings\\\"\\tTpaddings\\032\\013\\n\\006output\\\"\\001T\\\"\\t\\n\\001T\\022\\004type\\\"\\035\\n\\tTpaddings\\022\\004type\\032\\0020\\003:\\006\\n\\0042\\002\\003\\t\\\"&\\n\\004mode\\022\\006string:\\026\\n\\024\\022\\007REFLECT\\022\\tSYMMETRIC\\n\\221\\001\\n\\rMirrorPadGrad\\022\\n\\n\\005input\\\"\\001T\\022\\025\\n\\010paddings\\\"\\tTpaddings\\032\\013\\n\\006output\\\"\\001T\\\"\\t\\n\\001T\\022\\004type\\\"\\035\\n\\tTpaddings\\022\\004type\\032\\0020\\003:\\006\\n\\0042\\002\\003\\t\\\"&\\n\\004mode\\022\\006string:\\026\\n\\024\\022\\007REFLECT\\022\\tSYMMETRIC\\n\\214\\001\\n\\006OneHot\\022\\r\\n\\007indices\\\"\\002TI\\022\\t\\n\\005depth\\030\\003\\022\\r\\n\\010on_value\\\"\\001T\\022\\016\\n\\toff_value\\\"\\001T\\032\\013\\n\\006output\\\"\\001T\\\"\\030\\n\\004axis\\022\\003int\\032\\013\\030\\377\\377\\377\\377\\377\\377\\377\\377\\377\\001\\\"\\t\\n\\001T\\022\\004type\\\"\\027\\n\\002TI\\022\\004type\\032\\0020\\t:\\007\\n\\0052\\003\\004\\003\\t\\n8\\n\\010OnesLike\\022\\006\\n\\001x\\\"\\001T\\032\\006\\n\\001y\\\"\\001T\\\"\\034\\n\\001T\\022\\004type:\\021\\n\\0172\\r\\016\\023\\001\\002\\006\\004\\005\\021\\003\\t\\010\\022\\n\\nM\\n\\004Pack\\022\\016\\n\\006values\\\"\\001T*\\001N\\032\\013\\n\\006output\\\"\\001T\\\"\\014\\n\\001N\\022\\003int(\\0010\\001\\\"\\t\\n\\001T\\022\\004type\\\"\\017\\n\\004axis\\022\\003int\\032\\002\\030\\000\\n_\\n\\003Pad\\022\\n\\n\\005input\\\"\\001T\\022\\025\\n\\010paddings\\\"\\tTpaddings\\032\\013\\n\\006output\\\"\\001T\\\"\\t\\n\\001T\\022\\004type\\\"\\035\\n\\tTpaddings\\022\\004type\\032\\0020\\003:\\006\\n\\0042\\002\\003\\t\\nw\\n\\005PadV2\\022\\n\\n\\005input\\\"\\001T\\022\\025\\n\\010paddings\\\"\\tTpaddings\\022\\024\\n\\017constant_values\\\"\\001T\\032\\013\\n\\006output\\\"\\001T\\\"\\t\\n\\001T\\022\\004type\\\"\\035\\n\\tTpaddings\\022\\004type\\032\\0020\\003:\\006\\n\\0042\\002\\003\\t\\nV\\n\\016ParallelConcat\\022\\016\\n\\006values\\\"\\001T*\\001N\\032\\013\\n\\006output\\\"\\001T\\\"\\014\\n\\001N\\022\\003int(\\0010\\001\\\"\\t\\n\\001T\\022\\004type\\\"\\016\\n\\005shape\\022\\005shape\\nC\\n\\013Placeholder\\032\\017\\n\\006output\\\"\\005dtype\\\"\\r\\n\\005dtype\\022\\004type\\\"\\024\\n\\005shape\\022\\005shape\\032\\004:\\002\\030\\001\\nw\\n\\rPlaceholderV2\\032\\017\\n\\006output\\\"\\005dtype\\\"\\r\\n\\005dtype\\022\\004type\\\"\\016\\n\\005shape\\022\\005shapeB6\\010\\027\\0222Placeholder now behaves the same as PlaceholderV2.\\nX\\n\\026PlaceholderWithDefault\\022\\016\\n\\005input\\\"\\005dtype\\032\\017\\n\\006output\\\"\\005dtype\\\"\\r\\n\\005dtype\\022\\004type\\\"\\016\\n\\005shape\\022\\005shape\\nL\\n\\017PreventGradient\\022\\n\\n\\005input\\\"\\001T\\032\\013\\n\\006output\\\"\\001T\\\"\\t\\n\\001T\\022\\004type\\\"\\025\\n\\007message\\022\\006string\\032\\002\\022\\000\\n\\354\\001\\n\\025QuantizeAndDequantize\\022\\n\\n\\005input\\\"\\001T\\032\\013\\n\\006output\\\"\\001T\\\"\\030\\n\\014signed_input\\022\\004bool\\032\\002(\\001\\\"\\023\\n\\010num_bits\\022\\003int\\032\\002\\030\\010\\\"\\027\\n\\013range_given\\022\\004bool\\032\\002(\\000\\\"\\031\\n\\tinput_min\\022\\005float\\032\\005%\\000\\000\\000\\000\\\"\\031\\n\\tinput_max\\022\\005float\\032\\005%\\000\\000\\000\\000\\\"\\023\\n\\001T\\022\\004type:\\010\\n\\0062\\004\\016\\023\\001\\002B\\'\\010\\026\\022#Replaced by QuantizeAndDequantizeV2\\n\\257\\001\\n\\027QuantizeAndDequantizeV2\\022\\n\\n\\005input\\\"\\001T\\022\\016\\n\\tinput_min\\\"\\001T\\022\\016\\n\\tinput_max\\\"\\001T\\032\\013\\n\\006output\\\"\\001T\\\"\\030\\n\\014signed_input\\022\\004bool\\032\\002(\\001\\\"\\023\\n\\010num_bits\\022\\003int\\032\\002\\030\\010\\\"\\027\\n\\013range_given\\022\\004bool\\032\\002(\\000\\\"\\023\\n\\001T\\022\\004type:\\010\\n\\0062\\004\\016\\023\\001\\002\\n\\250\\001\\n\\027QuantizeAndDequantizeV3\\022\\n\\n\\005input\\\"\\001T\\022\\016\\n\\tinput_min\\\"\\001T\\022\\016\\n\\tinput_max\\\"\\001T\\022\\014\\n\\010num_bits\\030\\003\\032\\013\\n\\006output\\\"\\001T\\\"\\030\\n\\014signed_input\\022\\004bool\\032\\002(\\001\\\"\\027\\n\\013range_given\\022\\004bool\\032\\002(\\001\\\"\\023\\n\\001T\\022\\004type:\\010\\n\\0062\\004\\016\\023\\001\\002\\n\\221\\002\\n\\nQuantizeV2\\022\\t\\n\\005input\\030\\001\\022\\r\\n\\tmin_range\\030\\001\\022\\r\\n\\tmax_range\\030\\001\\032\\013\\n\\006output\\\"\\001T\\032\\016\\n\\noutput_min\\030\\001\\032\\016\\n\\noutput_max\\030\\001\\\"\\024\\n\\001T\\022\\004type:\\t\\n\\0072\\005\\013\\014\\r\\017\\020\\\"C\\n\\004mode\\022\\006string\\032\\016\\022\\014MIN_COMBINED:#\\n!\\022\\014MIN_COMBINED\\022\\tMIN_FIRST\\022\\006SCALED\\\"R\\n\\nround_mode\\022\\006string\\032\\025\\022\\023HALF_AWAY_FROM_ZERO:%\\n#\\022\\023HALF_AWAY_FROM_ZERO\\022\\014HALF_TO_EVEN\\n\\236\\001\\n\\017QuantizedConcat\\022\\016\\n\\nconcat_dim\\030\\003\\022\\016\\n\\006values\\\"\\001T*\\001N\\022\\021\\n\\ninput_mins\\030\\001*\\001N\\022\\022\\n\\013input_maxes\\030\\001*\\001N\\032\\013\\n\\006output\\\"\\001T\\032\\016\\n\\noutput_min\\030\\001\\032\\016\\n\\noutput_max\\030\\001\\\"\\014\\n\\001N\\022\\003int(\\0010\\002\\\"\\t\\n\\001T\\022\\004type\\n\\205\\002\\n\\025QuantizedInstanceNorm\\022\\006\\n\\001x\\\"\\001T\\022\\t\\n\\005x_min\\030\\001\\022\\t\\n\\005x_max\\030\\001\\032\\006\\n\\001y\\\"\\001T\\032\\t\\n\\005y_min\\030\\001\\032\\t\\n\\005y_max\\030\\001\\\"\\024\\n\\001T\\022\\004type:\\t\\n\\0072\\005\\013\\014\\r\\017\\020\\\"\\036\\n\\022output_range_given\\022\\004bool\\032\\002(\\000\\\"\\033\\n\\013given_y_min\\022\\005float\\032\\005%\\000\\000\\000\\000\\\"\\033\\n\\013given_y_max\\022\\005float\\032\\005%\\000\\000\\000\\000\\\" \\n\\020variance_epsilon\\022\\005float\\032\\005%\\254\\305\\'7\\\"\\036\\n\\016min_separation\\022\\005float\\032\\005%o\\022\\203:\\n\\242\\001\\n\\020QuantizedReshape\\022\\013\\n\\006tensor\\\"\\001T\\022\\017\\n\\005shape\\\"\\006Tshape\\022\\r\\n\\tinput_min\\030\\001\\022\\r\\n\\tinput_max\\030\\001\\032\\013\\n\\006output\\\"\\001T\\032\\016\\n\\noutput_min\\030\\001\\032\\016\\n\\noutput_max\\030\\001\\\"\\t\\n\\001T\\022\\004type\\\"\\032\\n\\006Tshape\\022\\004type\\032\\0020\\003:\\006\\n\\0042\\002\\003\\t\\n)\\n\\004Rank\\022\\n\\n\\005input\\\"\\001T\\032\\n\\n\\006output\\030\\003\\\"\\t\\n\\001T\\022\\004type\\n:\\n\\013RefIdentity\\022\\r\\n\\005input\\\"\\001T\\200\\001\\001\\032\\016\\n\\006output\\\"\\001T\\200\\001\\001\\\"\\t\\n\\001T\\022\\004type\\230\\001\\001\\n[\\n\\007Reshape\\022\\013\\n\\006tensor\\\"\\001T\\022\\017\\n\\005shape\\\"\\006Tshape\\032\\013\\n\\006output\\\"\\001T\\\"\\t\\n\\001T\\022\\004type\\\"\\032\\n\\006Tshape\\022\\004type\\032\\0020\\003:\\006\\n\\0042\\002\\003\\t\\n\\203\\002\\n\\032ResourceStridedSliceAssign\\022\\007\\n\\003ref\\030\\024\\022\\016\\n\\005begin\\\"\\005Index\\022\\014\\n\\003end\\\"\\005Index\\022\\020\\n\\007strides\\\"\\005Index\\022\\n\\n\\005value\\\"\\001T\\\"\\t\\n\\001T\\022\\004type\\\"\\025\\n\\005Index\\022\\004type:\\006\\n\\0042\\002\\003\\t\\\"\\025\\n\\nbegin_mask\\022\\003int\\032\\002\\030\\000\\\"\\023\\n\\010end_mask\\022\\003int\\032\\002\\030\\000\\\"\\030\\n\\rellipsis_mask\\022\\003int\\032\\002\\030\\000\\\"\\030\\n\\rnew_axis_mask\\022\\003int\\032\\002\\030\\000\\\"\\033\\n\\020shrink_axis_mask\\022\\003int\\032\\002\\030\\000\\210\\001\\001\\nK\\n\\007Reverse\\022\\013\\n\\006tensor\\\"\\001T\\022\\010\\n\\004dims\\030\\n\\032\\013\\n\\006output\\\"\\001T\\\"\\034\\n\\001T\\022\\004type:\\021\\n\\0172\\r\\004\\006\\021\\005\\003\\t\\n\\023\\001\\002\\010\\022\\007\\n\\212\\001\\n\\017ReverseSequence\\022\\n\\n\\005input\\\"\\001T\\022\\023\\n\\013seq_lengths\\\"\\004Tlen\\032\\013\\n\\006output\\\"\\001T\\\"\\016\\n\\007seq_dim\\022\\003int\\\"\\024\\n\\tbatch_dim\\022\\003int\\032\\002\\030\\000\\\"\\t\\n\\001T\\022\\004type\\\"\\030\\n\\004Tlen\\022\\004type\\032\\0020\\t:\\006\\n\\0042\\002\\003\\t\\nl\\n\\tReverseV2\\022\\013\\n\\006tensor\\\"\\001T\\022\\014\\n\\004axis\\\"\\004Tidx\\032\\013\\n\\006output\\\"\\001T\\\"\\030\\n\\004Tidx\\022\\004type\\032\\0020\\003:\\006\\n\\0042\\002\\003\\t\\\"\\035\\n\\001T\\022\\004type:\\022\\n\\0202\\016\\004\\006\\021\\005\\003\\t\\n\\016\\023\\001\\002\\010\\022\\007\\ns\\n\\tScatterNd\\022\\023\\n\\007indices\\\"\\010Tindices\\022\\014\\n\\007updates\\\"\\001T\\022\\021\\n\\005shape\\\"\\010Tindices\\032\\013\\n\\006output\\\"\\001T\\\"\\t\\n\\001T\\022\\004type\\\"\\030\\n\\010Tindices\\022\\004type:\\006\\n\\0042\\002\\003\\t\\n\\222\\001\\n\\027ScatterNdNonAliasingAdd\\022\\n\\n\\005input\\\"\\001T\\022\\023\\n\\007indices\\\"\\010Tindices\\022\\014\\n\\007updates\\\"\\001T\\032\\013\\n\\006output\\\"\\001T\\\"!\\n\\001T\\022\\004type:\\026\\n\\0242\\022\\001\\002\\003\\004\\005\\006\\010\\t\\013\\014\\r\\016\\021\\022\\023\\026\\027\\n\\\"\\030\\n\\010Tindices\\022\\004type:\\006\\n\\0042\\002\\003\\t\\nP\\n\\005Shape\\022\\n\\n\\005input\\\"\\001T\\032\\022\\n\\006output\\\"\\010out_type\\\"\\t\\n\\001T\\022\\004type\\\"\\034\\n\\010out_type\\022\\004type\\032\\0020\\003:\\006\\n\\0042\\002\\003\\t\\ne\\n\\006ShapeN\\022\\r\\n\\005input\\\"\\001T*\\001N\\032\\025\\n\\006output\\\"\\010out_type*\\001N\\\"\\014\\n\\001N\\022\\003int(\\0010\\001\\\"\\t\\n\\001T\\022\\004type\\\"\\034\\n\\010out_type\\022\\004type\\032\\0020\\003:\\006\\n\\0042\\002\\003\\t\\nO\\n\\004Size\\022\\n\\n\\005input\\\"\\001T\\032\\022\\n\\006output\\\"\\010out_type\\\"\\t\\n\\001T\\022\\004type\\\"\\034\\n\\010out_type\\022\\004type\\032\\0020\\003:\\006\\n\\0042\\002\\003\\t\\na\\n\\005Slice\\022\\n\\n\\005input\\\"\\001T\\022\\016\\n\\005begin\\\"\\005Index\\022\\r\\n\\004size\\\"\\005Index\\032\\013\\n\\006output\\\"\\001T\\\"\\t\\n\\001T\\022\\004type\\\"\\025\\n\\005Index\\022\\004type:\\006\\n\\0042\\002\\003\\t\\n.\\n\\010Snapshot\\022\\n\\n\\005input\\\"\\001T\\032\\013\\n\\006output\\\"\\001T\\\"\\t\\n\\001T\\022\\004type\\n\\177\\n\\014SpaceToBatch\\022\\n\\n\\005input\\\"\\001T\\022\\025\\n\\010paddings\\\"\\tTpaddings\\032\\013\\n\\006output\\\"\\001T\\\"\\t\\n\\001T\\022\\004type\\\"\\035\\n\\tTpaddings\\022\\004type\\032\\0020\\003:\\006\\n\\0042\\002\\003\\t\\\"\\025\\n\\nblock_size\\022\\003int(\\0010\\002\\n\\251\\001\\n\\016SpaceToBatchND\\022\\n\\n\\005input\\\"\\001T\\022\\033\\n\\013block_shape\\\"\\014Tblock_shape\\022\\025\\n\\010paddings\\\"\\tTpaddings\\032\\013\\n\\006output\\\"\\001T\\\"\\t\\n\\001T\\022\\004type\\\" \\n\\014Tblock_shape\\022\\004type\\032\\0020\\003:\\006\\n\\0042\\002\\003\\t\\\"\\035\\n\\tTpaddings\\022\\004type\\032\\0020\\003:\\006\\n\\0042\\002\\003\\t\\n\\205\\001\\n\\014SpaceToDepth\\022\\n\\n\\005input\\\"\\001T\\032\\013\\n\\006output\\\"\\001T\\\"\\t\\n\\001T\\022\\004type\\\"\\025\\n\\nblock_size\\022\\003int(\\0010\\002\\\":\\n\\013data_format\\022\\006string\\032\\006\\022\\004NHWC:\\033\\n\\031\\022\\004NHWC\\022\\004NCHW\\022\\013NCHW_VECT_C\\n[\\n\\005Split\\022\\r\\n\\tsplit_dim\\030\\003\\022\\n\\n\\005value\\\"\\001T\\032\\026\\n\\006output\\\"\\001T*\\tnum_split\\\"\\024\\n\\tnum_split\\022\\003int(\\0010\\001\\\"\\t\\n\\001T\\022\\004type\\n\\213\\001\\n\\006SplitV\\022\\n\\n\\005value\\\"\\001T\\022\\023\\n\\013size_splits\\\"\\004Tlen\\022\\r\\n\\tsplit_dim\\030\\003\\032\\026\\n\\006output\\\"\\001T*\\tnum_split\\\"\\024\\n\\tnum_split\\022\\003int(\\0010\\001\\\"\\t\\n\\001T\\022\\004type\\\"\\030\\n\\004Tlen\\022\\004type\\032\\0020\\t:\\006\\n\\0042\\002\\003\\t\\nN\\n\\007Squeeze\\022\\n\\n\\005input\\\"\\001T\\032\\013\\n\\006output\\\"\\001T\\\"\\t\\n\\001T\\022\\004type\\\"\\037\\n\\014squeeze_dims\\022\\tlist(int)\\032\\002\\n\\000(\\001\\n2\\n\\014StopGradient\\022\\n\\n\\005input\\\"\\001T\\032\\013\\n\\006output\\\"\\001T\\\"\\t\\n\\001T\\022\\004type\\n\\366\\001\\n\\014StridedSlice\\022\\n\\n\\005input\\\"\\001T\\022\\016\\n\\005begin\\\"\\005Index\\022\\014\\n\\003end\\\"\\005Index\\022\\020\\n\\007strides\\\"\\005Index\\032\\013\\n\\006output\\\"\\001T\\\"\\t\\n\\001T\\022\\004type\\\"\\025\\n\\005Index\\022\\004type:\\006\\n\\0042\\002\\003\\t\\\"\\025\\n\\nbegin_mask\\022\\003int\\032\\002\\030\\000\\\"\\023\\n\\010end_mask\\022\\003int\\032\\002\\030\\000\\\"\\030\\n\\rellipsis_mask\\022\\003int\\032\\002\\030\\000\\\"\\030\\n\\rnew_axis_mask\\022\\003int\\032\\002\\030\\000\\\"\\033\\n\\020shrink_axis_mask\\022\\003int\\032\\002\\030\\000\\n\\220\\002\\n\\022StridedSliceAssign\\022\\013\\n\\003ref\\\"\\001T\\200\\001\\001\\022\\016\\n\\005begin\\\"\\005Index\\022\\014\\n\\003end\\\"\\005Index\\022\\020\\n\\007strides\\\"\\005Index\\022\\n\\n\\005value\\\"\\001T\\032\\022\\n\\noutput_ref\\\"\\001T\\200\\001\\001\\\"\\t\\n\\001T\\022\\004type\\\"\\025\\n\\005Index\\022\\004type:\\006\\n\\0042\\002\\003\\t\\\"\\025\\n\\nbegin_mask\\022\\003int\\032\\002\\030\\000\\\"\\023\\n\\010end_mask\\022\\003int\\032\\002\\030\\000\\\"\\030\\n\\rellipsis_mask\\022\\003int\\032\\002\\030\\000\\\"\\030\\n\\rnew_axis_mask\\022\\003int\\032\\002\\030\\000\\\"\\033\\n\\020shrink_axis_mask\\022\\003int\\032\\002\\030\\000\\n\\207\\002\\n\\020StridedSliceGrad\\022\\016\\n\\005shape\\\"\\005Index\\022\\016\\n\\005begin\\\"\\005Index\\022\\014\\n\\003end\\\"\\005Index\\022\\020\\n\\007strides\\\"\\005Index\\022\\007\\n\\002dy\\\"\\001T\\032\\013\\n\\006output\\\"\\001T\\\"\\t\\n\\001T\\022\\004type\\\"\\025\\n\\005Index\\022\\004type:\\006\\n\\0042\\002\\003\\t\\\"\\025\\n\\nbegin_mask\\022\\003int\\032\\002\\030\\000\\\"\\023\\n\\010end_mask\\022\\003int\\032\\002\\030\\000\\\"\\030\\n\\rellipsis_mask\\022\\003int\\032\\002\\030\\000\\\"\\030\\n\\rnew_axis_mask\\022\\003int\\032\\002\\030\\000\\\"\\033\\n\\020shrink_axis_mask\\022\\003int\\032\\002\\030\\000\\nc\\n\\004Tile\\022\\n\\n\\005input\\\"\\001T\\022\\027\\n\\tmultiples\\\"\\nTmultiples\\032\\013\\n\\006output\\\"\\001T\\\"\\t\\n\\001T\\022\\004type\\\"\\036\\n\\nTmultiples\\022\\004type\\032\\0020\\003:\\006\\n\\0042\\002\\003\\t\\nm\\n\\010TileGrad\\022\\n\\n\\005input\\\"\\001T\\022\\r\\n\\tmultiples\\030\\003\\032\\013\\n\\006output\\\"\\001T\\\"\\t\\n\\001T\\022\\004typeB.\\010\\003\\022*TileGrad has been replaced with reduce_sum\\nP\\n\\tTranspose\\022\\006\\n\\001x\\\"\\001T\\022\\r\\n\\004perm\\\"\\005Tperm\\032\\006\\n\\001y\\\"\\001T\\\"\\t\\n\\001T\\022\\004type\\\"\\031\\n\\005Tperm\\022\\004type\\032\\0020\\003:\\006\\n\\0042\\002\\003\\t\\nP\\n\\006Unique\\022\\006\\n\\001x\\\"\\001T\\032\\006\\n\\001y\\\"\\001T\\032\\016\\n\\003idx\\\"\\007out_idx\\\"\\t\\n\\001T\\022\\004type\\\"\\033\\n\\007out_idx\\022\\004type\\032\\0020\\003:\\006\\n\\0042\\002\\003\\t\\n|\\n\\010UniqueV2\\022\\006\\n\\001x\\\"\\001T\\022\\r\\n\\004axis\\\"\\005Taxis\\032\\006\\n\\001y\\\"\\001T\\032\\016\\n\\003idx\\\"\\007out_idx\\\"\\t\\n\\001T\\022\\004type\\\"\\031\\n\\005Taxis\\022\\004type\\032\\0020\\t:\\006\\n\\0042\\002\\003\\t\\\"\\033\\n\\007out_idx\\022\\004type\\032\\0020\\003:\\006\\n\\0042\\002\\003\\t\\nl\\n\\020UniqueWithCounts\\022\\006\\n\\001x\\\"\\001T\\032\\006\\n\\001y\\\"\\001T\\032\\016\\n\\003idx\\\"\\007out_idx\\032\\020\\n\\005count\\\"\\007out_idx\\\"\\t\\n\\001T\\022\\004type\\\"\\033\\n\\007out_idx\\022\\004type\\032\\0020\\003:\\006\\n\\0042\\002\\003\\t\\n\\230\\001\\n\\022UniqueWithCountsV2\\022\\006\\n\\001x\\\"\\001T\\022\\r\\n\\004axis\\\"\\005Taxis\\032\\006\\n\\001y\\\"\\001T\\032\\016\\n\\003idx\\\"\\007out_idx\\032\\020\\n\\005count\\\"\\007out_idx\\\"\\t\\n\\001T\\022\\004type\\\"\\031\\n\\005Taxis\\022\\004type\\032\\0020\\t:\\006\\n\\0042\\002\\003\\t\\\"\\033\\n\\007out_idx\\022\\004type\\032\\0020\\003:\\006\\n\\0042\\002\\003\\t\\nP\\n\\006Unpack\\022\\n\\n\\005value\\\"\\001T\\032\\020\\n\\006output\\\"\\001T*\\003num\\\"\\014\\n\\003num\\022\\003int(\\001\\\"\\t\\n\\001T\\022\\004type\\\"\\017\\n\\004axis\\022\\003int\\032\\002\\030\\000\\nW\\n\\014UnravelIndex\\022\\017\\n\\007indices\\\"\\004Tidx\\022\\014\\n\\004dims\\\"\\004Tidx\\032\\016\\n\\006output\\\"\\004Tidx\\\"\\030\\n\\004Tidx\\022\\004type\\032\\0020\\003:\\006\\n\\0042\\002\\003\\t\\nj\\n\\nUpperBound\\022\\022\\n\\rsorted_inputs\\\"\\001T\\022\\013\\n\\006values\\\"\\001T\\032\\022\\n\\006output\\\"\\010out_type\\\"\\t\\n\\001T\\022\\004type\\\"\\034\\n\\010out_type\\022\\004type\\032\\0020\\003:\\006\\n\\0042\\002\\003\\t\\nE\\n\\005Where\\022\\n\\n\\005input\\\"\\001T\\032\\t\\n\\005index\\030\\t\\\"%\\n\\001T\\022\\004type\\032\\0020\\n:\\026\\n\\0242\\022\\001\\002\\003\\004\\005\\006\\010\\t\\013\\014\\r\\016\\021\\022\\023\\026\\027\\n\\n&\\n\\tZerosLike\\022\\006\\n\\001x\\\"\\001T\\032\\006\\n\\001y\\\"\\001T\\\"\\t\\n\\001T\\022\\004type\")\r\n"
] | [
[
"tensorflow.core.framework.op_def_pb2.OpList",
"tensorflow.python.pywrap_tensorflow.TFE_Py_FastPathExecute",
"tensorflow.python.eager.execute.make_str",
"tensorflow.python.util.deprecation.deprecated_endpoints",
"tensorflow.python.eager.execute.make_shape",
"tensorflow.python.eager.context.context",
"tensorflow.python.eager.execute.execute",
"tensorflow.python.eager.execute.make_type",
"tensorflow.python.util.tf_export.tf_export",
"tensorflow.python.eager.execute.record_gradient",
"tensorflow.python.eager.core._status_to_exception",
"tensorflow.python.framework.op_def_registry.register_op_list",
"tensorflow.python.eager.execute.args_to_matching_eager",
"tensorflow.python.eager.execute.make_int",
"tensorflow.python.framework.ops.convert_to_tensor",
"tensorflow.python.framework.op_def_library.OpDefLibrary",
"tensorflow.python.eager.execute.make_tensor",
"tensorflow.python.eager.execute.convert_to_mixed_eager_tensors",
"tensorflow.python.eager.execute.make_bool",
"tensorflow.python.framework.ops.convert_n_to_tensor",
"tensorflow.python.eager.execute.make_float"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7"
]
}
] |
bzhai/Ubi-SleepNet | [
"27837827dec608d06659421d073872fb1f68453e"
] | [
"data_loader/raw_data_loader.py"
] | [
"from utilities.utils import build_windowed_data\nfrom utilities.utils import load_h5_df_train_test_dataset, get_data, cast_sleep_stages\nfrom sleep_stage_config import *\nfrom sklearn.model_selection import train_test_split\n\nimport torch\nfrom torch.utils.data import Dataset, DataLoader\nimport pandas as pd\nimport numpy as np\n\n\nclass WindowedFrameAppleRAWDataLoader(torch.utils.data.Dataset):\n def __init__(self, acc_data, hrv_data, target, idx, transform=None):\n self.acc_data = torch.from_numpy(acc_data).float()\n self.acc_data = self.acc_data.permute(0, 2, 1)\n self.hrv_data = torch.from_numpy(hrv_data).float()\n self.hrv_data = self.hrv_data.permute(0, 2, 1) # set it to batch_num, channel, time_dim\n self.idx = torch.from_numpy(idx)\n self.target = torch.from_numpy(target).long()\n self.transform = transform\n\n def __getitem__(self, index):\n hrv_x = self.hrv_data[index]\n acc_x = self.acc_data[index]\n y = self.target[index]\n i = self.idx[index]\n return acc_x, hrv_x, y, i\n\n def __len__(self):\n return len(self.target)\n\n\nclass WindowedFrameMESARAWDataLoader(torch.utils.data.Dataset):\n def __init__(self, data, target, idx, transform=None):\n self.data = torch.from_numpy(data).float()\n self.data = self.data.permute(0, 2, 1) # set it to batch_num, channel, time_dim\n self.idx = torch.from_numpy(idx)\n self.target = torch.from_numpy(target).long()\n self.transform = transform\n\n def __getitem__(self, index):\n x = self.data[index]\n y = self.target[index]\n i = self.idx[index]\n if self.transform:\n x = self.transform(x)\n return x, y, i\n\n def __len__(self):\n return len(self.data)\n\n\ndef get_raw_dataloader_by_id(pid, cfg: Config, shuffle, batch_size, data_set, seq_len, apple_acc_hz=1):\n import h5py as h5py\n if data_set == \"apple_raw\":\n pid_raw_acc_path = os.path.join(cfg.APPLE_CROPPED_RAW_PATH, f\"{str(pid)}_cleaned_resampled_\"\n f\"{str(apple_acc_hz)}_hz.out\")\n raw_acc = pd.read_csv(pid_raw_acc_path, delimiter=' ', header=None).values\n raw_acc = raw_acc[:raw_acc.shape[0]-30, 1:]\n outputs = []\n for i in np.arange(3):\n sig = raw_acc[:, i].reshape(-1, 30) # e.g. 200 x 30\n out = build_windowed_data(sig=sig, sampling_rate=1, epoch_len=30, win_len=seq_len+1)\n assert out.shape == (sig.shape[0], 30*(seq_len+1))\n outputs.append(np.expand_dims(out, -1))\n raw_acc_x = np.concatenate(outputs, axis=-1)\n cache_path = cfg.APPLE_LOOCV_ALL_WINDOWED % seq_len\n with h5py.File(cache_path, 'r') as data:\n df_data = data[\"df_values\"][:]\n x = data[\"x\"][:]\n y = data[\"y\"][:]\n columns = data[\"columns\"][:].astype(str).tolist()\n data.close()\n df = pd.DataFrame(df_data, columns=columns)\n pid_idx = df[df.pid == pid]['window_idx'].values.astype(int)\n x_hrv = x[pid_idx, :, :][:, :, 1:] # remove the activity counts only keep the hrv features\n y_stage = y[pid_idx]\n data_ds = WindowedFrameAppleRAWDataLoader(raw_acc_x, x_hrv, y_stage, pid_idx)\n data_loader = DataLoader(\n data_ds,\n batch_size=batch_size,\n shuffle=shuffle,\n num_workers=0,\n pin_memory=torch.cuda.is_available()\n )\n return data_loader\n\n\ndef get_raw_test_df(pid, cfg: Config, dataset, num_classes, seq_len):\n import h5py as h5py\n if dataset == \"apple_raw\":\n with h5py.File(cfg.APPLE_LOOCV_ALL_WINDOWED % seq_len, 'r') as data:\n df_value = data[\"df_values\"][:]\n df_columns = data['columns'][:].astype(str).tolist()\n data.close()\n df_test = pd.DataFrame(df_value, columns=df_columns)\n df_test = df_test[df_test['pid'] == pid].copy(deep=True)\n return df_test\n\n"
] | [
[
"pandas.read_csv",
"numpy.expand_dims",
"numpy.arange",
"torch.from_numpy",
"pandas.DataFrame",
"numpy.concatenate",
"torch.cuda.is_available"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
thangvubk/SphereRPN | [
"9b154256774437bb23d81e22990d350555d39b81"
] | [
"model/pointgroup/backbone.py"
] | [
"import torch\nimport torch.nn as nn\nimport spconv\nfrom spconv.modules import SparseModule\nfrom collections import OrderedDict\n\n\nclass ResidualBlock(SparseModule):\n def __init__(self, in_channels, out_channels, norm_fn, indice_key=None):\n super().__init__()\n\n if in_channels == out_channels:\n self.i_branch = spconv.SparseSequential(\n nn.Identity()\n )\n else:\n self.i_branch = spconv.SparseSequential(\n spconv.SubMConv3d(in_channels, out_channels, kernel_size=1, bias=False)\n )\n\n self.conv_branch = spconv.SparseSequential(\n norm_fn(in_channels),\n nn.ReLU(),\n spconv.SubMConv3d(in_channels, out_channels, kernel_size=3, padding=1, bias=False, indice_key=indice_key),\n norm_fn(out_channels),\n nn.ReLU(),\n spconv.SubMConv3d(out_channels, out_channels, kernel_size=3, padding=1, bias=False, indice_key=indice_key)\n )\n\n def forward(self, input):\n identity = spconv.SparseConvTensor(input.features, input.indices, input.spatial_shape, input.batch_size)\n\n output = self.conv_branch(input)\n output.features += self.i_branch(identity).features\n\n return output\n\n\nclass VGGBlock(SparseModule):\n def __init__(self, in_channels, out_channels, norm_fn, indice_key=None):\n super().__init__()\n\n self.conv_layers = spconv.SparseSequential(\n norm_fn(in_channels),\n nn.ReLU(),\n spconv.SubMConv3d(in_channels, out_channels, kernel_size=3, padding=1, bias=False, indice_key=indice_key)\n )\n\n def forward(self, input):\n return self.conv_layers(input)\n\n\nclass UBlock(nn.Module):\n def __init__(self, nPlanes, norm_fn, block_reps, block, indice_key_id=1):\n\n super().__init__()\n\n self.nPlanes = nPlanes\n\n blocks = {'block{}'.format(i): block(nPlanes[0], nPlanes[0], norm_fn, indice_key='subm{}'.format(indice_key_id)) for i in range(block_reps)}\n blocks = OrderedDict(blocks)\n self.blocks = spconv.SparseSequential(blocks)\n\n if len(nPlanes) > 1:\n self.conv = spconv.SparseSequential(\n norm_fn(nPlanes[0]),\n nn.ReLU(),\n spconv.SparseConv3d(nPlanes[0], nPlanes[1], kernel_size=2, stride=2, bias=False, indice_key='spconv{}'.format(indice_key_id))\n )\n\n self.u = UBlock(nPlanes[1:], norm_fn, block_reps, block, indice_key_id=indice_key_id+1)\n\n self.deconv = spconv.SparseSequential(\n norm_fn(nPlanes[1]),\n nn.ReLU(),\n spconv.SparseInverseConv3d(nPlanes[1], nPlanes[0], kernel_size=2, bias=False, indice_key='spconv{}'.format(indice_key_id))\n )\n\n blocks_tail = {}\n for i in range(block_reps):\n blocks_tail['block{}'.format(i)] = block(nPlanes[0] * (2 - i), nPlanes[0], norm_fn, indice_key='subm{}'.format(indice_key_id))\n blocks_tail = OrderedDict(blocks_tail)\n self.blocks_tail = spconv.SparseSequential(blocks_tail)\n\n def forward(self, input):\n output = self.blocks(input)\n identity = spconv.SparseConvTensor(output.features, output.indices, output.spatial_shape, output.batch_size)\n\n if len(self.nPlanes) > 1:\n output_decoder = self.conv(output)\n output_decoder = self.u(output_decoder)\n output_decoder = self.deconv(output_decoder)\n\n output.features = torch.cat((identity.features, output_decoder.features), dim=1)\n\n output = self.blocks_tail(output)\n\n return output\n\n\n\n"
] | [
[
"torch.nn.ReLU",
"torch.nn.Identity",
"torch.cat"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ofgulban/segmentator | [
"343a410bf7d34cad267b1a95b3b66a8020dfdc5e"
] | [
"segmentator/segmentator_main.py"
] | [
"#!/usr/bin/env python\n\"\"\"Processing input and plotting.\"\"\"\n\nfrom __future__ import division, print_function\nimport numpy as np\nimport segmentator.config as cfg\nimport matplotlib\nmatplotlib.use(cfg.matplotlib_backend)\nprint(\"Matplotlib backend: {}\".format(matplotlib.rcParams['backend']))\nimport matplotlib.pyplot as plt\nfrom matplotlib.colors import LogNorm\nfrom matplotlib.widgets import Slider, Button, LassoSelector\nfrom matplotlib import path\nfrom nibabel import load\nfrom segmentator.utils import map_ima_to_2D_hist, prep_2D_hist\nfrom segmentator.utils import truncate_range, scale_range, check_data\nfrom segmentator.utils import set_gradient_magnitude\nfrom segmentator.utils import export_gradient_magnitude_image\nfrom segmentator.gui_utils import sector_mask, responsiveObj\nfrom segmentator.config_gui import palette, axcolor, hovcolor\n\n#\n\"\"\"Data Processing\"\"\"\nnii = load(cfg.filename)\norig, dims = check_data(nii.get_data(), cfg.force_original_precision)\n# Save min and max truncation thresholds to be used in axis labels\nif np.isnan(cfg.valmin) or np.isnan(cfg.valmax):\n orig, pMin, pMax = truncate_range(orig, percMin=cfg.perc_min,\n percMax=cfg.perc_max)\nelse: # TODO: integrate this into truncate range function\n orig[orig < cfg.valmin] = cfg.valmin\n orig[orig > cfg.valmax] = cfg.valmax\n pMin, pMax = cfg.valmin, cfg.valmax\n\n# Continue with scaling the original truncated image and recomputing gradient\norig = scale_range(orig, scale_factor=cfg.scale, delta=0.0001)\ngra = set_gradient_magnitude(orig, cfg.gramag)\nif cfg.export_gramag:\n export_gradient_magnitude_image(gra, nii.get_filename(), cfg.gramag,\n nii.affine)\n# Reshape for voxel-wise operations\nima = np.copy(orig.flatten())\ngra = gra.flatten()\n\n#\n\"\"\"Plots\"\"\"\nprint(\"Preparing GUI...\")\n# Plot 2D histogram\nfig = plt.figure(facecolor='0.775')\nax = fig.add_subplot(121)\n\ncounts, volHistH, d_min, d_max, nr_bins, bin_edges \\\n = prep_2D_hist(ima, gra, discard_zeros=cfg.discard_zeros)\n\n# Set x-y axis range to the same (x-axis range)\nax.set_xlim(d_min, d_max)\nax.set_ylim(d_min, d_max)\nax.set_xlabel(\"Intensity f(x)\")\nax.set_ylabel(\"Gradient Magnitude f'(x)\")\nax.set_title(\"2D Histogram\")\n\n# Plot colorbar for 2D hist\nvolHistH.set_norm(LogNorm(vmax=np.power(10, cfg.cbar_init)))\nfig.colorbar(volHistH, fraction=0.046, pad=0.04) # magical scaling\n\n# Plot 3D ima by default\nax2 = fig.add_subplot(122)\nsliceNr = int(0.5*dims[2])\nimaSlcH = ax2.imshow(orig[:, :, sliceNr], cmap=plt.cm.gray, vmin=ima.min(),\n vmax=ima.max(), interpolation='none',\n extent=[0, dims[1], dims[0], 0], zorder=0)\n\nimaSlcMsk = np.ones(dims[0:2])\nimaSlcMskH = ax2.imshow(imaSlcMsk, cmap=palette, vmin=0.1,\n interpolation='none', alpha=0.5,\n extent=[0, dims[1], dims[0], 0], zorder=1)\n\n# Adjust subplots on figure\nbottom = 0.30\nfig.subplots_adjust(bottom=bottom)\nfig.canvas.set_window_title(nii.get_filename())\nplt.axis('off')\n\n#\n\"\"\"Initialisation\"\"\"\n# Create first instance of sector mask\nsectorObj = sector_mask((nr_bins, nr_bins), cfg.init_centre, cfg.init_radius,\n cfg.init_theta)\n\n# Draw sector mask for the first time\nvolHistMaskH, volHistMask = sectorObj.draw(ax, cmap=palette, alpha=0.2,\n vmin=0.1, interpolation='nearest',\n origin='lower', zorder=1,\n extent=[0, nr_bins, 0, nr_bins])\n\n# Initiate a flexible figure object, pass to it useful properties\nidxLasso = np.zeros(nr_bins*nr_bins, dtype=bool)\nlassoSwitchCount = 0\nlassoErase = 1 # 1 for drawing, 0 for erasing\nflexFig = responsiveObj(figure=ax.figure, axes=ax.axes, axes2=ax2.axes,\n segmType='main', orig=orig, nii=nii,\n sectorObj=sectorObj,\n nrBins=nr_bins,\n sliceNr=sliceNr,\n imaSlcH=imaSlcH,\n imaSlcMsk=imaSlcMsk, imaSlcMskH=imaSlcMskH,\n volHistMask=volHistMask, volHistMaskH=volHistMaskH,\n contains=volHistMaskH.contains,\n counts=counts,\n idxLasso=idxLasso,\n lassoSwitchCount=lassoSwitchCount,\n lassoErase=lassoErase)\n\n# Make the figure responsive to clicks\nflexFig.connect()\nima2volHistMap = map_ima_to_2D_hist(xinput=ima, yinput=gra, bins_arr=bin_edges)\nflexFig.invHistVolume = np.reshape(ima2volHistMap, dims)\nima, gra = None, None\n\n#\n\"\"\"Sliders and Buttons\"\"\"\n# Colorbar slider\naxHistC = plt.axes([0.15, bottom-0.20, 0.25, 0.025], facecolor=axcolor)\nflexFig.sHistC = Slider(axHistC, 'Colorbar', 1, cfg.cbar_max,\n valinit=cfg.cbar_init, valfmt='%0.1f')\n\n# Image browser slider\naxSliceNr = plt.axes([0.6, bottom-0.15, 0.25, 0.025], facecolor=axcolor)\nflexFig.sSliceNr = Slider(axSliceNr, 'Slice', 0, 0.999, valinit=0.5,\n valfmt='%0.2f')\n\n# Theta sliders\naThetaMin = plt.axes([0.15, bottom-0.10, 0.25, 0.025], facecolor=axcolor)\nflexFig.sThetaMin = Slider(aThetaMin, 'ThetaMin', 0, 359.9,\n valinit=cfg.init_theta[0], valfmt='%0.1f')\naThetaMax = plt.axes([0.15, bottom-0.15, 0.25, 0.025], facecolor=axcolor)\nflexFig.sThetaMax = Slider(aThetaMax, 'ThetaMax', 0, 359.9,\n valinit=cfg.init_theta[1]-0.1, valfmt='%0.1f')\n\n# Cycle button\ncycleax = plt.axes([0.55, bottom-0.2475, 0.075, 0.0375])\nflexFig.bCycle = Button(cycleax, 'Cycle',\n color=axcolor, hovercolor=hovcolor)\n\n# Rotate button\nrotateax = plt.axes([0.55, bottom-0.285, 0.075, 0.0375])\nflexFig.bRotate = Button(rotateax, 'Rotate',\n color=axcolor, hovercolor=hovcolor)\n\n# Reset button\nresetax = plt.axes([0.65, bottom-0.285, 0.075, 0.075])\nflexFig.bReset = Button(resetax, 'Reset', color=axcolor, hovercolor=hovcolor)\n\n# Export nii button\nexportax = plt.axes([0.75, bottom-0.285, 0.075, 0.075])\nflexFig.bExport = Button(exportax, 'Export\\nNifti',\n color=axcolor, hovercolor=hovcolor)\n\n# Export nyp button\nexportax = plt.axes([0.85, bottom-0.285, 0.075, 0.075])\nflexFig.bExportNyp = Button(exportax, 'Export\\nHist',\n color=axcolor, hovercolor=hovcolor)\n\n#\n\"\"\"Updates\"\"\"\nflexFig.sHistC.on_changed(flexFig.updateColorBar)\nflexFig.sSliceNr.on_changed(flexFig.updateImaBrowser)\nflexFig.sThetaMin.on_changed(flexFig.updateThetaMin)\nflexFig.sThetaMax.on_changed(flexFig.updateThetaMax)\nflexFig.bCycle.on_clicked(flexFig.cycleView)\nflexFig.bRotate.on_clicked(flexFig.changeRotation)\nflexFig.bExport.on_clicked(flexFig.exportNifti)\nflexFig.bExportNyp.on_clicked(flexFig.exportNyp)\nflexFig.bReset.on_clicked(flexFig.resetGlobal)\n\n\n# TODO: Temporary solution for displaying original x-y axis labels\ndef update_axis_labels(event):\n \"\"\"Swap histogram bin indices with original values.\"\"\"\n xlabels = [item.get_text() for item in ax.get_xticklabels()]\n orig_range_labels = np.linspace(pMin, pMax, len(xlabels))\n\n # Adjust displayed decimals based on data range\n data_range = pMax - pMin\n if data_range > 200: # arbitrary value\n xlabels = [('%i' % i) for i in orig_range_labels]\n elif data_range > 20:\n xlabels = [('%.1f' % i) for i in orig_range_labels]\n elif data_range > 2:\n xlabels = [('%.2f' % i) for i in orig_range_labels]\n else:\n xlabels = [('%.3f' % i) for i in orig_range_labels]\n\n ax.set_xticklabels(xlabels)\n ax.set_yticklabels(xlabels) # limits of y axis assumed to be the same as x\n\n\nfig.canvas.mpl_connect('resize_event', update_axis_labels)\n\n#\n\"\"\"Lasso selection\"\"\"\n# Lasso button\nlassoax = plt.axes([0.15, bottom-0.285, 0.075, 0.075])\nbLasso = Button(lassoax, 'Lasso\\nOff', color=axcolor, hovercolor=hovcolor)\n\n# Lasso draw/erase\nlassoEraseAx = plt.axes([0.25, bottom-0.285, 0.075, 0.075])\nbLassoErase = Button(lassoEraseAx, 'Erase\\nOff', color=axcolor,\n hovercolor=hovcolor)\nbLassoErase.ax.patch.set_visible(False)\nbLassoErase.label.set_visible(False)\nbLassoErase.ax.axis('off')\n\n\ndef lassoSwitch(event):\n \"\"\"Enable disable lasso tool.\"\"\"\n global lasso\n lasso = []\n flexFig.lassoSwitchCount = (flexFig.lassoSwitchCount+1) % 2\n if flexFig.lassoSwitchCount == 1: # enable lasso\n flexFig.disconnect() # disable drag function of sector mask\n lasso = LassoSelector(ax, onselect)\n bLasso.label.set_text(\"Lasso\\nOn\")\n # Make erase button appear on in lasso mode\n bLassoErase.ax.patch.set_visible(True)\n bLassoErase.label.set_visible(True)\n bLassoErase.ax.axis('on')\n\n else: # disable lasso\n flexFig.connect() # enable drag function of sector mask\n bLasso.label.set_text(\"Lasso\\nOff\")\n # Make erase button disappear\n bLassoErase.ax.patch.set_visible(False)\n bLassoErase.label.set_visible(False)\n bLassoErase.ax.axis('off')\n\n# Pixel coordinates\npix = np.arange(nr_bins)\nxv, yv = np.meshgrid(pix, pix)\npix = np.vstack((xv.flatten(), yv.flatten())).T\n\n\ndef onselect(verts):\n \"\"\"Lasso related.\"\"\"\n global pix\n p = path.Path(verts)\n newLasIdx = p.contains_points(pix, radius=1.5) # New lasso indices\n flexFig.idxLasso[newLasIdx] = flexFig.lassoErase # Update lasso indices\n flexFig.remapMsks() # Update volume histogram mask\n flexFig.updatePanels(update_slice=False, update_rotation=True,\n update_extent=True)\n\n\ndef lassoEraseSwitch(event):\n \"\"\"Enable disable lasso erase function.\"\"\"\n flexFig.lassoErase = (flexFig.lassoErase + 1) % 2\n if flexFig.lassoErase is 1:\n bLassoErase.label.set_text(\"Erase\\nOff\")\n elif flexFig.lassoErase is 0:\n bLassoErase.label.set_text(\"Erase\\nOn\")\n\n\nbLasso.on_clicked(lassoSwitch) # lasso on/off\nbLassoErase.on_clicked(lassoEraseSwitch) # lasso erase on/off\nflexFig.remapMsks()\nflexFig.updatePanels(update_slice=True, update_rotation=False,\n update_extent=False)\n\nprint(\"GUI is ready.\")\nplt.show()\n"
] | [
[
"numpy.power",
"numpy.reshape",
"matplotlib.use",
"matplotlib.widgets.Button",
"numpy.arange",
"numpy.isnan",
"matplotlib.path.Path",
"numpy.ones",
"matplotlib.pyplot.axes",
"matplotlib.widgets.Slider",
"matplotlib.pyplot.axis",
"numpy.meshgrid",
"numpy.zeros",
"matplotlib.widgets.LassoSelector",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
lilianatang/data-modelling-with-postgresql | [
"4b5d057d23c346cc36695dc0548f11908aeb5431",
"4b5d057d23c346cc36695dc0548f11908aeb5431",
"4b5d057d23c346cc36695dc0548f11908aeb5431",
"4b5d057d23c346cc36695dc0548f11908aeb5431",
"4b5d057d23c346cc36695dc0548f11908aeb5431",
"4b5d057d23c346cc36695dc0548f11908aeb5431",
"4b5d057d23c346cc36695dc0548f11908aeb5431",
"4b5d057d23c346cc36695dc0548f11908aeb5431",
"4b5d057d23c346cc36695dc0548f11908aeb5431",
"4b5d057d23c346cc36695dc0548f11908aeb5431",
"4b5d057d23c346cc36695dc0548f11908aeb5431",
"4b5d057d23c346cc36695dc0548f11908aeb5431",
"4b5d057d23c346cc36695dc0548f11908aeb5431",
"4b5d057d23c346cc36695dc0548f11908aeb5431",
"4b5d057d23c346cc36695dc0548f11908aeb5431",
"4b5d057d23c346cc36695dc0548f11908aeb5431",
"4b5d057d23c346cc36695dc0548f11908aeb5431"
] | [
"mypython/Lib/site-packages/pandas/tests/indexes/categorical/test_formats.py",
"mypython/Lib/site-packages/pandas/tests/indexes/multi/test_formats.py",
"mypython/Lib/site-packages/pandas/tests/frame/indexing/test_delitem.py",
"mypython/Lib/site-packages/pandas/core/dtypes/base.py",
"mypython/Lib/site-packages/pandas/tests/series/accessors/test_dt_accessor.py",
"mypython/Lib/site-packages/pandas/tests/dtypes/cast/test_infer_dtype.py",
"mypython/Lib/site-packages/pandas/core/reshape/melt.py",
"mypython/Lib/site-packages/pandas/tests/indexes/timedeltas/test_scalar_compat.py",
"mypython/Lib/site-packages/pandas/tests/indexes/categorical/test_fillna.py",
"mypython/Lib/site-packages/pandas/tests/indexes/categorical/test_indexing.py",
"mypython/Lib/site-packages/pandas/tests/tools/test_to_datetime.py",
"mypython/Lib/site-packages/pandas/tests/series/indexing/test_setitem.py",
"mypython/Lib/site-packages/pandas/io/excel/_xlsxwriter.py",
"mypython/Lib/site-packages/pandas/tests/window/moments/test_moments_rolling_functions.py",
"mypython/Lib/site-packages/pandas/tseries/holiday.py",
"mypython/Lib/site-packages/pandas/tests/indexes/multi/test_join.py",
"mypython/Lib/site-packages/pandas/tests/tseries/offsets/test_offsets.py"
] | [
"\"\"\"\r\nTests for CategoricalIndex.__repr__ and related methods.\r\n\"\"\"\r\nimport pandas._config.config as cf\r\n\r\nfrom pandas import CategoricalIndex\r\n\r\n\r\nclass TestCategoricalIndexRepr:\r\n def test_string_categorical_index_repr(self):\r\n # short\r\n idx = CategoricalIndex([\"a\", \"bb\", \"ccc\"])\r\n expected = \"\"\"CategoricalIndex(['a', 'bb', 'ccc'], categories=['a', 'bb', 'ccc'], ordered=False, dtype='category')\"\"\" # noqa\r\n assert repr(idx) == expected\r\n\r\n # multiple lines\r\n idx = CategoricalIndex([\"a\", \"bb\", \"ccc\"] * 10)\r\n expected = \"\"\"CategoricalIndex(['a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a',\r\n 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb',\r\n 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc'],\r\n categories=['a', 'bb', 'ccc'], ordered=False, dtype='category')\"\"\"\r\n\r\n assert repr(idx) == expected\r\n\r\n # truncated\r\n idx = CategoricalIndex([\"a\", \"bb\", \"ccc\"] * 100)\r\n expected = \"\"\"CategoricalIndex(['a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a',\r\n ...\r\n 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc'],\r\n categories=['a', 'bb', 'ccc'], ordered=False, dtype='category', length=300)\"\"\" # noqa\r\n\r\n assert repr(idx) == expected\r\n\r\n # larger categories\r\n idx = CategoricalIndex(list(\"abcdefghijklmmo\"))\r\n expected = \"\"\"CategoricalIndex(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l',\r\n 'm', 'm', 'o'],\r\n categories=['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', ...], ordered=False, dtype='category')\"\"\" # noqa\r\n\r\n assert repr(idx) == expected\r\n\r\n # short\r\n idx = CategoricalIndex([\"あ\", \"いい\", \"ううう\"])\r\n expected = \"\"\"CategoricalIndex(['あ', 'いい', 'ううう'], categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category')\"\"\" # noqa\r\n assert repr(idx) == expected\r\n\r\n # multiple lines\r\n idx = CategoricalIndex([\"あ\", \"いい\", \"ううう\"] * 10)\r\n expected = \"\"\"CategoricalIndex(['あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ',\r\n 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい',\r\n 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう'],\r\n categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category')\"\"\"\r\n\r\n assert repr(idx) == expected\r\n\r\n # truncated\r\n idx = CategoricalIndex([\"あ\", \"いい\", \"ううう\"] * 100)\r\n expected = \"\"\"CategoricalIndex(['あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ',\r\n ...\r\n 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう'],\r\n categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category', length=300)\"\"\" # noqa\r\n\r\n assert repr(idx) == expected\r\n\r\n # larger categories\r\n idx = CategoricalIndex(list(\"あいうえおかきくけこさしすせそ\"))\r\n expected = \"\"\"CategoricalIndex(['あ', 'い', 'う', 'え', 'お', 'か', 'き', 'く', 'け', 'こ', 'さ', 'し',\r\n 'す', 'せ', 'そ'],\r\n categories=['あ', 'い', 'う', 'え', 'お', 'か', 'き', 'く', ...], ordered=False, dtype='category')\"\"\" # noqa\r\n\r\n assert repr(idx) == expected\r\n\r\n # Emable Unicode option -----------------------------------------\r\n with cf.option_context(\"display.unicode.east_asian_width\", True):\r\n\r\n # short\r\n idx = CategoricalIndex([\"あ\", \"いい\", \"ううう\"])\r\n expected = \"\"\"CategoricalIndex(['あ', 'いい', 'ううう'], categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category')\"\"\" # noqa\r\n assert repr(idx) == expected\r\n\r\n # multiple lines\r\n idx = CategoricalIndex([\"あ\", \"いい\", \"ううう\"] * 10)\r\n expected = \"\"\"CategoricalIndex(['あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい',\r\n 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう',\r\n 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい',\r\n 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう'],\r\n categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category')\"\"\"\r\n\r\n assert repr(idx) == expected\r\n\r\n # truncated\r\n idx = CategoricalIndex([\"あ\", \"いい\", \"ううう\"] * 100)\r\n expected = \"\"\"CategoricalIndex(['あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい',\r\n 'ううう', 'あ',\r\n ...\r\n 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう',\r\n 'あ', 'いい', 'ううう'],\r\n categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category', length=300)\"\"\" # noqa\r\n\r\n assert repr(idx) == expected\r\n\r\n # larger categories\r\n idx = CategoricalIndex(list(\"あいうえおかきくけこさしすせそ\"))\r\n expected = \"\"\"CategoricalIndex(['あ', 'い', 'う', 'え', 'お', 'か', 'き', 'く', 'け', 'こ',\r\n 'さ', 'し', 'す', 'せ', 'そ'],\r\n categories=['あ', 'い', 'う', 'え', 'お', 'か', 'き', 'く', ...], ordered=False, dtype='category')\"\"\" # noqa\r\n\r\n assert repr(idx) == expected\r\n",
"import warnings\r\n\r\nimport numpy as np\r\nimport pytest\r\n\r\nimport pandas as pd\r\nfrom pandas import Index, MultiIndex\r\nimport pandas._testing as tm\r\n\r\n\r\ndef test_format(idx):\r\n idx.format()\r\n idx[:0].format()\r\n\r\n\r\ndef test_format_integer_names():\r\n index = MultiIndex(\r\n levels=[[0, 1], [0, 1]], codes=[[0, 0, 1, 1], [0, 1, 0, 1]], names=[0, 1]\r\n )\r\n index.format(names=True)\r\n\r\n\r\ndef test_format_sparse_config(idx):\r\n warn_filters = warnings.filters\r\n warnings.filterwarnings(\"ignore\", category=FutureWarning, module=\".*format\")\r\n # GH1538\r\n pd.set_option(\"display.multi_sparse\", False)\r\n\r\n result = idx.format()\r\n assert result[1] == \"foo two\"\r\n\r\n tm.reset_display_options()\r\n\r\n warnings.filters = warn_filters\r\n\r\n\r\ndef test_format_sparse_display():\r\n index = MultiIndex(\r\n levels=[[0, 1], [0, 1], [0, 1], [0]],\r\n codes=[\r\n [0, 0, 0, 1, 1, 1],\r\n [0, 0, 1, 0, 0, 1],\r\n [0, 1, 0, 0, 1, 0],\r\n [0, 0, 0, 0, 0, 0],\r\n ],\r\n )\r\n\r\n result = index.format()\r\n assert result[3] == \"1 0 0 0\"\r\n\r\n\r\ndef test_repr_with_unicode_data():\r\n with pd.option_context(\"display.encoding\", \"UTF-8\"):\r\n d = {\"a\": [\"\\u05d0\", 2, 3], \"b\": [4, 5, 6], \"c\": [7, 8, 9]}\r\n index = pd.DataFrame(d).set_index([\"a\", \"b\"]).index\r\n assert \"\\\\\" not in repr(index) # we don't want unicode-escaped\r\n\r\n\r\ndef test_repr_roundtrip_raises():\r\n mi = MultiIndex.from_product([list(\"ab\"), range(3)], names=[\"first\", \"second\"])\r\n msg = \"Must pass both levels and codes\"\r\n with pytest.raises(TypeError, match=msg):\r\n eval(repr(mi))\r\n\r\n\r\ndef test_unicode_string_with_unicode():\r\n d = {\"a\": [\"\\u05d0\", 2, 3], \"b\": [4, 5, 6], \"c\": [7, 8, 9]}\r\n idx = pd.DataFrame(d).set_index([\"a\", \"b\"]).index\r\n str(idx)\r\n\r\n\r\ndef test_repr_max_seq_item_setting(idx):\r\n # GH10182\r\n idx = idx.repeat(50)\r\n with pd.option_context(\"display.max_seq_items\", None):\r\n repr(idx)\r\n assert \"...\" not in str(idx)\r\n\r\n\r\nclass TestRepr:\r\n def test_unicode_repr_issues(self):\r\n levels = [Index([\"a/\\u03c3\", \"b/\\u03c3\", \"c/\\u03c3\"]), Index([0, 1])]\r\n codes = [np.arange(3).repeat(2), np.tile(np.arange(2), 3)]\r\n index = MultiIndex(levels=levels, codes=codes)\r\n\r\n repr(index.levels)\r\n\r\n # FIXME: dont leave commented-out\r\n # NumPy bug\r\n # repr(index.get_level_values(1))\r\n\r\n def test_repr(self, idx):\r\n result = idx[:1].__repr__()\r\n expected = \"\"\"\\\r\nMultiIndex([('foo', 'one')],\r\n names=['first', 'second'])\"\"\"\r\n assert result == expected\r\n\r\n result = idx.__repr__()\r\n expected = \"\"\"\\\r\nMultiIndex([('foo', 'one'),\r\n ('foo', 'two'),\r\n ('bar', 'one'),\r\n ('baz', 'two'),\r\n ('qux', 'one'),\r\n ('qux', 'two')],\r\n names=['first', 'second'])\"\"\"\r\n assert result == expected\r\n\r\n with pd.option_context(\"display.max_seq_items\", 5):\r\n result = idx.__repr__()\r\n expected = \"\"\"\\\r\nMultiIndex([('foo', 'one'),\r\n ('foo', 'two'),\r\n ...\r\n ('qux', 'one'),\r\n ('qux', 'two')],\r\n names=['first', 'second'], length=6)\"\"\"\r\n assert result == expected\r\n\r\n def test_rjust(self, narrow_multi_index):\r\n mi = narrow_multi_index\r\n result = mi[:1].__repr__()\r\n expected = \"\"\"\\\r\nMultiIndex([('a', 9, '2000-01-01 00:00:00')],\r\n names=['a', 'b', 'dti'])\"\"\"\r\n assert result == expected\r\n\r\n result = mi[::500].__repr__()\r\n expected = \"\"\"\\\r\nMultiIndex([( 'a', 9, '2000-01-01 00:00:00'),\r\n ( 'a', 9, '2000-01-01 00:08:20'),\r\n ('abc', 10, '2000-01-01 00:16:40'),\r\n ('abc', 10, '2000-01-01 00:25:00')],\r\n names=['a', 'b', 'dti'])\"\"\"\r\n assert result == expected\r\n\r\n result = mi.__repr__()\r\n expected = \"\"\"\\\r\nMultiIndex([( 'a', 9, '2000-01-01 00:00:00'),\r\n ( 'a', 9, '2000-01-01 00:00:01'),\r\n ( 'a', 9, '2000-01-01 00:00:02'),\r\n ( 'a', 9, '2000-01-01 00:00:03'),\r\n ( 'a', 9, '2000-01-01 00:00:04'),\r\n ( 'a', 9, '2000-01-01 00:00:05'),\r\n ( 'a', 9, '2000-01-01 00:00:06'),\r\n ( 'a', 9, '2000-01-01 00:00:07'),\r\n ( 'a', 9, '2000-01-01 00:00:08'),\r\n ( 'a', 9, '2000-01-01 00:00:09'),\r\n ...\r\n ('abc', 10, '2000-01-01 00:33:10'),\r\n ('abc', 10, '2000-01-01 00:33:11'),\r\n ('abc', 10, '2000-01-01 00:33:12'),\r\n ('abc', 10, '2000-01-01 00:33:13'),\r\n ('abc', 10, '2000-01-01 00:33:14'),\r\n ('abc', 10, '2000-01-01 00:33:15'),\r\n ('abc', 10, '2000-01-01 00:33:16'),\r\n ('abc', 10, '2000-01-01 00:33:17'),\r\n ('abc', 10, '2000-01-01 00:33:18'),\r\n ('abc', 10, '2000-01-01 00:33:19')],\r\n names=['a', 'b', 'dti'], length=2000)\"\"\"\r\n assert result == expected\r\n\r\n def test_tuple_width(self, wide_multi_index):\r\n mi = wide_multi_index\r\n result = mi[:1].__repr__()\r\n expected = \"\"\"MultiIndex([('a', 9, '2000-01-01 00:00:00', '2000-01-01 00:00:00', ...)],\r\n names=['a', 'b', 'dti_1', 'dti_2', 'dti_3'])\"\"\"\r\n assert result == expected\r\n\r\n result = mi[:10].__repr__()\r\n expected = \"\"\"\\\r\nMultiIndex([('a', 9, '2000-01-01 00:00:00', '2000-01-01 00:00:00', ...),\r\n ('a', 9, '2000-01-01 00:00:01', '2000-01-01 00:00:01', ...),\r\n ('a', 9, '2000-01-01 00:00:02', '2000-01-01 00:00:02', ...),\r\n ('a', 9, '2000-01-01 00:00:03', '2000-01-01 00:00:03', ...),\r\n ('a', 9, '2000-01-01 00:00:04', '2000-01-01 00:00:04', ...),\r\n ('a', 9, '2000-01-01 00:00:05', '2000-01-01 00:00:05', ...),\r\n ('a', 9, '2000-01-01 00:00:06', '2000-01-01 00:00:06', ...),\r\n ('a', 9, '2000-01-01 00:00:07', '2000-01-01 00:00:07', ...),\r\n ('a', 9, '2000-01-01 00:00:08', '2000-01-01 00:00:08', ...),\r\n ('a', 9, '2000-01-01 00:00:09', '2000-01-01 00:00:09', ...)],\r\n names=['a', 'b', 'dti_1', 'dti_2', 'dti_3'])\"\"\"\r\n assert result == expected\r\n\r\n result = mi.__repr__()\r\n expected = \"\"\"\\\r\nMultiIndex([( 'a', 9, '2000-01-01 00:00:00', '2000-01-01 00:00:00', ...),\r\n ( 'a', 9, '2000-01-01 00:00:01', '2000-01-01 00:00:01', ...),\r\n ( 'a', 9, '2000-01-01 00:00:02', '2000-01-01 00:00:02', ...),\r\n ( 'a', 9, '2000-01-01 00:00:03', '2000-01-01 00:00:03', ...),\r\n ( 'a', 9, '2000-01-01 00:00:04', '2000-01-01 00:00:04', ...),\r\n ( 'a', 9, '2000-01-01 00:00:05', '2000-01-01 00:00:05', ...),\r\n ( 'a', 9, '2000-01-01 00:00:06', '2000-01-01 00:00:06', ...),\r\n ( 'a', 9, '2000-01-01 00:00:07', '2000-01-01 00:00:07', ...),\r\n ( 'a', 9, '2000-01-01 00:00:08', '2000-01-01 00:00:08', ...),\r\n ( 'a', 9, '2000-01-01 00:00:09', '2000-01-01 00:00:09', ...),\r\n ...\r\n ('abc', 10, '2000-01-01 00:33:10', '2000-01-01 00:33:10', ...),\r\n ('abc', 10, '2000-01-01 00:33:11', '2000-01-01 00:33:11', ...),\r\n ('abc', 10, '2000-01-01 00:33:12', '2000-01-01 00:33:12', ...),\r\n ('abc', 10, '2000-01-01 00:33:13', '2000-01-01 00:33:13', ...),\r\n ('abc', 10, '2000-01-01 00:33:14', '2000-01-01 00:33:14', ...),\r\n ('abc', 10, '2000-01-01 00:33:15', '2000-01-01 00:33:15', ...),\r\n ('abc', 10, '2000-01-01 00:33:16', '2000-01-01 00:33:16', ...),\r\n ('abc', 10, '2000-01-01 00:33:17', '2000-01-01 00:33:17', ...),\r\n ('abc', 10, '2000-01-01 00:33:18', '2000-01-01 00:33:18', ...),\r\n ('abc', 10, '2000-01-01 00:33:19', '2000-01-01 00:33:19', ...)],\r\n names=['a', 'b', 'dti_1', 'dti_2', 'dti_3'], length=2000)\"\"\"\r\n assert result == expected\r\n",
"import re\r\n\r\nimport numpy as np\r\nimport pytest\r\n\r\nfrom pandas import DataFrame, MultiIndex\r\n\r\n\r\nclass TestDataFrameDelItem:\r\n def test_delitem(self, float_frame):\r\n del float_frame[\"A\"]\r\n assert \"A\" not in float_frame\r\n\r\n def test_delitem_multiindex(self):\r\n midx = MultiIndex.from_product([[\"A\", \"B\"], [1, 2]])\r\n df = DataFrame(np.random.randn(4, 4), columns=midx)\r\n assert len(df.columns) == 4\r\n assert (\"A\",) in df.columns\r\n assert \"A\" in df.columns\r\n\r\n result = df[\"A\"]\r\n assert isinstance(result, DataFrame)\r\n del df[\"A\"]\r\n\r\n assert len(df.columns) == 2\r\n\r\n # A still in the levels, BUT get a KeyError if trying\r\n # to delete\r\n assert (\"A\",) not in df.columns\r\n with pytest.raises(KeyError, match=re.escape(\"('A',)\")):\r\n del df[(\"A\",)]\r\n\r\n # behavior of dropped/deleted MultiIndex levels changed from\r\n # GH 2770 to GH 19027: MultiIndex no longer '.__contains__'\r\n # levels which are dropped/deleted\r\n assert \"A\" not in df.columns\r\n with pytest.raises(KeyError, match=re.escape(\"('A',)\")):\r\n del df[\"A\"]\r\n\r\n def test_delitem_corner(self, float_frame):\r\n f = float_frame.copy()\r\n del f[\"D\"]\r\n assert len(f.columns) == 3\r\n with pytest.raises(KeyError, match=r\"^'D'$\"):\r\n del f[\"D\"]\r\n del f[\"B\"]\r\n assert len(f.columns) == 2\r\n\r\n def test_delitem_col_still_multiindex(self):\r\n arrays = [[\"a\", \"b\", \"c\", \"top\"], [\"\", \"\", \"\", \"OD\"], [\"\", \"\", \"\", \"wx\"]]\r\n\r\n tuples = sorted(zip(*arrays))\r\n index = MultiIndex.from_tuples(tuples)\r\n\r\n df = DataFrame(np.random.randn(3, 4), columns=index)\r\n del df[(\"a\", \"\", \"\")]\r\n assert isinstance(df.columns, MultiIndex)\r\n",
"\"\"\"\r\nExtend pandas with custom array types.\r\n\"\"\"\r\n\r\nfrom typing import TYPE_CHECKING, Any, List, Optional, Tuple, Type, Union\r\n\r\nimport numpy as np\r\n\r\nfrom pandas._typing import DtypeObj\r\nfrom pandas.errors import AbstractMethodError\r\n\r\nfrom pandas.core.dtypes.generic import ABCDataFrame, ABCIndexClass, ABCSeries\r\n\r\nif TYPE_CHECKING:\r\n from pandas.core.arrays import ExtensionArray\r\n\r\n\r\nclass ExtensionDtype:\r\n \"\"\"\r\n A custom data type, to be paired with an ExtensionArray.\r\n\r\n See Also\r\n --------\r\n extensions.register_extension_dtype: Register an ExtensionType\r\n with pandas as class decorator.\r\n extensions.ExtensionArray: Abstract base class for custom 1-D array types.\r\n\r\n Notes\r\n -----\r\n The interface includes the following abstract methods that must\r\n be implemented by subclasses:\r\n\r\n * type\r\n * name\r\n\r\n The following attributes and methods influence the behavior of the dtype in\r\n pandas operations\r\n\r\n * _is_numeric\r\n * _is_boolean\r\n * _get_common_dtype\r\n\r\n Optionally one can override construct_array_type for construction\r\n with the name of this dtype via the Registry. See\r\n :meth:`extensions.register_extension_dtype`.\r\n\r\n * construct_array_type\r\n\r\n The `na_value` class attribute can be used to set the default NA value\r\n for this type. :attr:`numpy.nan` is used by default.\r\n\r\n ExtensionDtypes are required to be hashable. The base class provides\r\n a default implementation, which relies on the ``_metadata`` class\r\n attribute. ``_metadata`` should be a tuple containing the strings\r\n that define your data type. For example, with ``PeriodDtype`` that's\r\n the ``freq`` attribute.\r\n\r\n **If you have a parametrized dtype you should set the ``_metadata``\r\n class property**.\r\n\r\n Ideally, the attributes in ``_metadata`` will match the\r\n parameters to your ``ExtensionDtype.__init__`` (if any). If any of\r\n the attributes in ``_metadata`` don't implement the standard\r\n ``__eq__`` or ``__hash__``, the default implementations here will not\r\n work.\r\n\r\n .. versionchanged:: 0.24.0\r\n\r\n Added ``_metadata``, ``__hash__``, and changed the default definition\r\n of ``__eq__``.\r\n\r\n For interaction with Apache Arrow (pyarrow), a ``__from_arrow__`` method\r\n can be implemented: this method receives a pyarrow Array or ChunkedArray\r\n as only argument and is expected to return the appropriate pandas\r\n ExtensionArray for this dtype and the passed values::\r\n\r\n class ExtensionDtype:\r\n\r\n def __from_arrow__(\r\n self, array: Union[pyarrow.Array, pyarrow.ChunkedArray]\r\n ) -> ExtensionArray:\r\n ...\r\n\r\n This class does not inherit from 'abc.ABCMeta' for performance reasons.\r\n Methods and properties required by the interface raise\r\n ``pandas.errors.AbstractMethodError`` and no ``register`` method is\r\n provided for registering virtual subclasses.\r\n \"\"\"\r\n\r\n _metadata: Tuple[str, ...] = ()\r\n\r\n def __str__(self) -> str:\r\n return self.name\r\n\r\n def __eq__(self, other: Any) -> bool:\r\n \"\"\"\r\n Check whether 'other' is equal to self.\r\n\r\n By default, 'other' is considered equal if either\r\n\r\n * it's a string matching 'self.name'.\r\n * it's an instance of this type and all of the attributes\r\n in ``self._metadata`` are equal between `self` and `other`.\r\n\r\n Parameters\r\n ----------\r\n other : Any\r\n\r\n Returns\r\n -------\r\n bool\r\n \"\"\"\r\n if isinstance(other, str):\r\n try:\r\n other = self.construct_from_string(other)\r\n except TypeError:\r\n return False\r\n if isinstance(other, type(self)):\r\n return all(\r\n getattr(self, attr) == getattr(other, attr) for attr in self._metadata\r\n )\r\n return False\r\n\r\n def __hash__(self) -> int:\r\n return hash(tuple(getattr(self, attr) for attr in self._metadata))\r\n\r\n def __ne__(self, other: Any) -> bool:\r\n return not self.__eq__(other)\r\n\r\n @property\r\n def na_value(self) -> object:\r\n \"\"\"\r\n Default NA value to use for this type.\r\n\r\n This is used in e.g. ExtensionArray.take. This should be the\r\n user-facing \"boxed\" version of the NA value, not the physical NA value\r\n for storage. e.g. for JSONArray, this is an empty dictionary.\r\n \"\"\"\r\n return np.nan\r\n\r\n @property\r\n def type(self) -> Type:\r\n \"\"\"\r\n The scalar type for the array, e.g. ``int``\r\n\r\n It's expected ``ExtensionArray[item]`` returns an instance\r\n of ``ExtensionDtype.type`` for scalar ``item``, assuming\r\n that value is valid (not NA). NA values do not need to be\r\n instances of `type`.\r\n \"\"\"\r\n raise AbstractMethodError(self)\r\n\r\n @property\r\n def kind(self) -> str:\r\n \"\"\"\r\n A character code (one of 'biufcmMOSUV'), default 'O'\r\n\r\n This should match the NumPy dtype used when the array is\r\n converted to an ndarray, which is probably 'O' for object if\r\n the extension type cannot be represented as a built-in NumPy\r\n type.\r\n\r\n See Also\r\n --------\r\n numpy.dtype.kind\r\n \"\"\"\r\n return \"O\"\r\n\r\n @property\r\n def name(self) -> str:\r\n \"\"\"\r\n A string identifying the data type.\r\n\r\n Will be used for display in, e.g. ``Series.dtype``\r\n \"\"\"\r\n raise AbstractMethodError(self)\r\n\r\n @property\r\n def names(self) -> Optional[List[str]]:\r\n \"\"\"\r\n Ordered list of field names, or None if there are no fields.\r\n\r\n This is for compatibility with NumPy arrays, and may be removed in the\r\n future.\r\n \"\"\"\r\n return None\r\n\r\n @classmethod\r\n def construct_array_type(cls) -> Type[\"ExtensionArray\"]:\r\n \"\"\"\r\n Return the array type associated with this dtype.\r\n\r\n Returns\r\n -------\r\n type\r\n \"\"\"\r\n raise NotImplementedError\r\n\r\n @classmethod\r\n def construct_from_string(cls, string: str):\r\n r\"\"\"\r\n Construct this type from a string.\r\n\r\n This is useful mainly for data types that accept parameters.\r\n For example, a period dtype accepts a frequency parameter that\r\n can be set as ``period[H]`` (where H means hourly frequency).\r\n\r\n By default, in the abstract class, just the name of the type is\r\n expected. But subclasses can overwrite this method to accept\r\n parameters.\r\n\r\n Parameters\r\n ----------\r\n string : str\r\n The name of the type, for example ``category``.\r\n\r\n Returns\r\n -------\r\n ExtensionDtype\r\n Instance of the dtype.\r\n\r\n Raises\r\n ------\r\n TypeError\r\n If a class cannot be constructed from this 'string'.\r\n\r\n Examples\r\n --------\r\n For extension dtypes with arguments the following may be an\r\n adequate implementation.\r\n\r\n >>> @classmethod\r\n ... def construct_from_string(cls, string):\r\n ... pattern = re.compile(r\"^my_type\\[(?P<arg_name>.+)\\]$\")\r\n ... match = pattern.match(string)\r\n ... if match:\r\n ... return cls(**match.groupdict())\r\n ... else:\r\n ... raise TypeError(\r\n ... f\"Cannot construct a '{cls.__name__}' from '{string}'\"\r\n ... )\r\n \"\"\"\r\n if not isinstance(string, str):\r\n raise TypeError(\r\n f\"'construct_from_string' expects a string, got {type(string)}\"\r\n )\r\n # error: Non-overlapping equality check (left operand type: \"str\", right\r\n # operand type: \"Callable[[ExtensionDtype], str]\") [comparison-overlap]\r\n assert isinstance(cls.name, str), (cls, type(cls.name))\r\n if string != cls.name:\r\n raise TypeError(f\"Cannot construct a '{cls.__name__}' from '{string}'\")\r\n return cls()\r\n\r\n @classmethod\r\n def is_dtype(cls, dtype: object) -> bool:\r\n \"\"\"\r\n Check if we match 'dtype'.\r\n\r\n Parameters\r\n ----------\r\n dtype : object\r\n The object to check.\r\n\r\n Returns\r\n -------\r\n bool\r\n\r\n Notes\r\n -----\r\n The default implementation is True if\r\n\r\n 1. ``cls.construct_from_string(dtype)`` is an instance\r\n of ``cls``.\r\n 2. ``dtype`` is an object and is an instance of ``cls``\r\n 3. ``dtype`` has a ``dtype`` attribute, and any of the above\r\n conditions is true for ``dtype.dtype``.\r\n \"\"\"\r\n dtype = getattr(dtype, \"dtype\", dtype)\r\n\r\n if isinstance(dtype, (ABCSeries, ABCIndexClass, ABCDataFrame, np.dtype)):\r\n # https://github.com/pandas-dev/pandas/issues/22960\r\n # avoid passing data to `construct_from_string`. This could\r\n # cause a FutureWarning from numpy about failing elementwise\r\n # comparison from, e.g., comparing DataFrame == 'category'.\r\n return False\r\n elif dtype is None:\r\n return False\r\n elif isinstance(dtype, cls):\r\n return True\r\n if isinstance(dtype, str):\r\n try:\r\n return cls.construct_from_string(dtype) is not None\r\n except TypeError:\r\n return False\r\n return False\r\n\r\n @property\r\n def _is_numeric(self) -> bool:\r\n \"\"\"\r\n Whether columns with this dtype should be considered numeric.\r\n\r\n By default ExtensionDtypes are assumed to be non-numeric.\r\n They'll be excluded from operations that exclude non-numeric\r\n columns, like (groupby) reductions, plotting, etc.\r\n \"\"\"\r\n return False\r\n\r\n @property\r\n def _is_boolean(self) -> bool:\r\n \"\"\"\r\n Whether this dtype should be considered boolean.\r\n\r\n By default, ExtensionDtypes are assumed to be non-numeric.\r\n Setting this to True will affect the behavior of several places,\r\n e.g.\r\n\r\n * is_bool\r\n * boolean indexing\r\n\r\n Returns\r\n -------\r\n bool\r\n \"\"\"\r\n return False\r\n\r\n def _get_common_dtype(self, dtypes: List[DtypeObj]) -> Optional[DtypeObj]:\r\n \"\"\"\r\n Return the common dtype, if one exists.\r\n\r\n Used in `find_common_type` implementation. This is for example used\r\n to determine the resulting dtype in a concat operation.\r\n\r\n If no common dtype exists, return None (which gives the other dtypes\r\n the chance to determine a common dtype). If all dtypes in the list\r\n return None, then the common dtype will be \"object\" dtype (this means\r\n it is never needed to return \"object\" dtype from this method itself).\r\n\r\n Parameters\r\n ----------\r\n dtypes : list of dtypes\r\n The dtypes for which to determine a common dtype. This is a list\r\n of np.dtype or ExtensionDtype instances.\r\n\r\n Returns\r\n -------\r\n Common dtype (np.dtype or ExtensionDtype) or None\r\n \"\"\"\r\n if len(set(dtypes)) == 1:\r\n # only itself\r\n return self\r\n else:\r\n return None\r\n\r\n\r\ndef register_extension_dtype(cls: Type[ExtensionDtype]) -> Type[ExtensionDtype]:\r\n \"\"\"\r\n Register an ExtensionType with pandas as class decorator.\r\n\r\n .. versionadded:: 0.24.0\r\n\r\n This enables operations like ``.astype(name)`` for the name\r\n of the ExtensionDtype.\r\n\r\n Returns\r\n -------\r\n callable\r\n A class decorator.\r\n\r\n Examples\r\n --------\r\n >>> from pandas.api.extensions import register_extension_dtype\r\n >>> from pandas.api.extensions import ExtensionDtype\r\n >>> @register_extension_dtype\r\n ... class MyExtensionDtype(ExtensionDtype):\r\n ... name = \"myextension\"\r\n \"\"\"\r\n registry.register(cls)\r\n return cls\r\n\r\n\r\nclass Registry:\r\n \"\"\"\r\n Registry for dtype inference.\r\n\r\n The registry allows one to map a string repr of a extension\r\n dtype to an extension dtype. The string alias can be used in several\r\n places, including\r\n\r\n * Series and Index constructors\r\n * :meth:`pandas.array`\r\n * :meth:`pandas.Series.astype`\r\n\r\n Multiple extension types can be registered.\r\n These are tried in order.\r\n \"\"\"\r\n\r\n def __init__(self):\r\n self.dtypes: List[Type[ExtensionDtype]] = []\r\n\r\n def register(self, dtype: Type[ExtensionDtype]) -> None:\r\n \"\"\"\r\n Parameters\r\n ----------\r\n dtype : ExtensionDtype class\r\n \"\"\"\r\n if not issubclass(dtype, ExtensionDtype):\r\n raise ValueError(\"can only register pandas extension dtypes\")\r\n\r\n self.dtypes.append(dtype)\r\n\r\n def find(\r\n self, dtype: Union[Type[ExtensionDtype], str]\r\n ) -> Optional[Type[ExtensionDtype]]:\r\n \"\"\"\r\n Parameters\r\n ----------\r\n dtype : Type[ExtensionDtype] or str\r\n\r\n Returns\r\n -------\r\n return the first matching dtype, otherwise return None\r\n \"\"\"\r\n if not isinstance(dtype, str):\r\n dtype_type = dtype\r\n if not isinstance(dtype, type):\r\n dtype_type = type(dtype)\r\n if issubclass(dtype_type, ExtensionDtype):\r\n return dtype\r\n\r\n return None\r\n\r\n for dtype_type in self.dtypes:\r\n try:\r\n return dtype_type.construct_from_string(dtype)\r\n except TypeError:\r\n pass\r\n\r\n return None\r\n\r\n\r\nregistry = Registry()\r\n",
"import calendar\r\nfrom datetime import date, datetime, time\r\nimport locale\r\nimport unicodedata\r\n\r\nimport numpy as np\r\nimport pytest\r\nimport pytz\r\n\r\nfrom pandas._libs.tslibs.timezones import maybe_get_tz\r\n\r\nfrom pandas.core.dtypes.common import is_integer_dtype, is_list_like\r\n\r\nimport pandas as pd\r\nfrom pandas import (\r\n DataFrame,\r\n DatetimeIndex,\r\n Index,\r\n Period,\r\n PeriodIndex,\r\n Series,\r\n TimedeltaIndex,\r\n date_range,\r\n period_range,\r\n timedelta_range,\r\n)\r\nimport pandas._testing as tm\r\nfrom pandas.core.arrays import PeriodArray\r\nimport pandas.core.common as com\r\n\r\n\r\nclass TestSeriesDatetimeValues:\r\n def test_dt_namespace_accessor(self):\r\n\r\n # GH 7207, 11128\r\n # test .dt namespace accessor\r\n\r\n ok_for_period = PeriodArray._datetimelike_ops\r\n ok_for_period_methods = [\"strftime\", \"to_timestamp\", \"asfreq\"]\r\n ok_for_dt = DatetimeIndex._datetimelike_ops\r\n ok_for_dt_methods = [\r\n \"to_period\",\r\n \"to_pydatetime\",\r\n \"tz_localize\",\r\n \"tz_convert\",\r\n \"normalize\",\r\n \"strftime\",\r\n \"round\",\r\n \"floor\",\r\n \"ceil\",\r\n \"day_name\",\r\n \"month_name\",\r\n \"isocalendar\",\r\n ]\r\n ok_for_td = TimedeltaIndex._datetimelike_ops\r\n ok_for_td_methods = [\r\n \"components\",\r\n \"to_pytimedelta\",\r\n \"total_seconds\",\r\n \"round\",\r\n \"floor\",\r\n \"ceil\",\r\n ]\r\n\r\n def get_expected(s, name):\r\n result = getattr(Index(s._values), prop)\r\n if isinstance(result, np.ndarray):\r\n if is_integer_dtype(result):\r\n result = result.astype(\"int64\")\r\n elif not is_list_like(result) or isinstance(result, pd.DataFrame):\r\n return result\r\n return Series(result, index=s.index, name=s.name)\r\n\r\n def compare(s, name):\r\n a = getattr(s.dt, prop)\r\n b = get_expected(s, prop)\r\n if not (is_list_like(a) and is_list_like(b)):\r\n assert a == b\r\n elif isinstance(a, pd.DataFrame):\r\n tm.assert_frame_equal(a, b)\r\n else:\r\n tm.assert_series_equal(a, b)\r\n\r\n # datetimeindex\r\n cases = [\r\n Series(date_range(\"20130101\", periods=5), name=\"xxx\"),\r\n Series(date_range(\"20130101\", periods=5, freq=\"s\"), name=\"xxx\"),\r\n Series(date_range(\"20130101 00:00:00\", periods=5, freq=\"ms\"), name=\"xxx\"),\r\n ]\r\n for s in cases:\r\n for prop in ok_for_dt:\r\n # we test freq below\r\n # we ignore week and weekofyear because they are deprecated\r\n if prop not in [\"freq\", \"week\", \"weekofyear\"]:\r\n compare(s, prop)\r\n\r\n for prop in ok_for_dt_methods:\r\n getattr(s.dt, prop)\r\n\r\n result = s.dt.to_pydatetime()\r\n assert isinstance(result, np.ndarray)\r\n assert result.dtype == object\r\n\r\n result = s.dt.tz_localize(\"US/Eastern\")\r\n exp_values = DatetimeIndex(s.values).tz_localize(\"US/Eastern\")\r\n expected = Series(exp_values, index=s.index, name=\"xxx\")\r\n tm.assert_series_equal(result, expected)\r\n\r\n tz_result = result.dt.tz\r\n assert str(tz_result) == \"US/Eastern\"\r\n freq_result = s.dt.freq\r\n assert freq_result == DatetimeIndex(s.values, freq=\"infer\").freq\r\n\r\n # let's localize, then convert\r\n result = s.dt.tz_localize(\"UTC\").dt.tz_convert(\"US/Eastern\")\r\n exp_values = (\r\n DatetimeIndex(s.values).tz_localize(\"UTC\").tz_convert(\"US/Eastern\")\r\n )\r\n expected = Series(exp_values, index=s.index, name=\"xxx\")\r\n tm.assert_series_equal(result, expected)\r\n\r\n # datetimeindex with tz\r\n s = Series(date_range(\"20130101\", periods=5, tz=\"US/Eastern\"), name=\"xxx\")\r\n for prop in ok_for_dt:\r\n\r\n # we test freq below\r\n # we ignore week and weekofyear because they are deprecated\r\n if prop not in [\"freq\", \"week\", \"weekofyear\"]:\r\n compare(s, prop)\r\n\r\n for prop in ok_for_dt_methods:\r\n getattr(s.dt, prop)\r\n\r\n result = s.dt.to_pydatetime()\r\n assert isinstance(result, np.ndarray)\r\n assert result.dtype == object\r\n\r\n result = s.dt.tz_convert(\"CET\")\r\n expected = Series(s._values.tz_convert(\"CET\"), index=s.index, name=\"xxx\")\r\n tm.assert_series_equal(result, expected)\r\n\r\n tz_result = result.dt.tz\r\n assert str(tz_result) == \"CET\"\r\n freq_result = s.dt.freq\r\n assert freq_result == DatetimeIndex(s.values, freq=\"infer\").freq\r\n\r\n # timedelta index\r\n cases = [\r\n Series(\r\n timedelta_range(\"1 day\", periods=5), index=list(\"abcde\"), name=\"xxx\"\r\n ),\r\n Series(timedelta_range(\"1 day 01:23:45\", periods=5, freq=\"s\"), name=\"xxx\"),\r\n Series(\r\n timedelta_range(\"2 days 01:23:45.012345\", periods=5, freq=\"ms\"),\r\n name=\"xxx\",\r\n ),\r\n ]\r\n for s in cases:\r\n for prop in ok_for_td:\r\n # we test freq below\r\n if prop != \"freq\":\r\n compare(s, prop)\r\n\r\n for prop in ok_for_td_methods:\r\n getattr(s.dt, prop)\r\n\r\n result = s.dt.components\r\n assert isinstance(result, DataFrame)\r\n tm.assert_index_equal(result.index, s.index)\r\n\r\n result = s.dt.to_pytimedelta()\r\n assert isinstance(result, np.ndarray)\r\n assert result.dtype == object\r\n\r\n result = s.dt.total_seconds()\r\n assert isinstance(result, pd.Series)\r\n assert result.dtype == \"float64\"\r\n\r\n freq_result = s.dt.freq\r\n assert freq_result == TimedeltaIndex(s.values, freq=\"infer\").freq\r\n\r\n # both\r\n index = date_range(\"20130101\", periods=3, freq=\"D\")\r\n s = Series(date_range(\"20140204\", periods=3, freq=\"s\"), index=index, name=\"xxx\")\r\n exp = Series(\r\n np.array([2014, 2014, 2014], dtype=\"int64\"), index=index, name=\"xxx\"\r\n )\r\n tm.assert_series_equal(s.dt.year, exp)\r\n\r\n exp = Series(np.array([2, 2, 2], dtype=\"int64\"), index=index, name=\"xxx\")\r\n tm.assert_series_equal(s.dt.month, exp)\r\n\r\n exp = Series(np.array([0, 1, 2], dtype=\"int64\"), index=index, name=\"xxx\")\r\n tm.assert_series_equal(s.dt.second, exp)\r\n\r\n exp = Series([s[0]] * 3, index=index, name=\"xxx\")\r\n tm.assert_series_equal(s.dt.normalize(), exp)\r\n\r\n # periodindex\r\n cases = [Series(period_range(\"20130101\", periods=5, freq=\"D\"), name=\"xxx\")]\r\n for s in cases:\r\n for prop in ok_for_period:\r\n # we test freq below\r\n if prop != \"freq\":\r\n compare(s, prop)\r\n\r\n for prop in ok_for_period_methods:\r\n getattr(s.dt, prop)\r\n\r\n freq_result = s.dt.freq\r\n assert freq_result == PeriodIndex(s.values).freq\r\n\r\n # test limited display api\r\n def get_dir(s):\r\n results = [r for r in s.dt.__dir__() if not r.startswith(\"_\")]\r\n return sorted(set(results))\r\n\r\n s = Series(date_range(\"20130101\", periods=5, freq=\"D\"), name=\"xxx\")\r\n results = get_dir(s)\r\n tm.assert_almost_equal(results, sorted(set(ok_for_dt + ok_for_dt_methods)))\r\n\r\n s = Series(\r\n period_range(\"20130101\", periods=5, freq=\"D\", name=\"xxx\").astype(object)\r\n )\r\n results = get_dir(s)\r\n tm.assert_almost_equal(\r\n results, sorted(set(ok_for_period + ok_for_period_methods))\r\n )\r\n\r\n # 11295\r\n # ambiguous time error on the conversions\r\n s = Series(pd.date_range(\"2015-01-01\", \"2016-01-01\", freq=\"T\"), name=\"xxx\")\r\n s = s.dt.tz_localize(\"UTC\").dt.tz_convert(\"America/Chicago\")\r\n results = get_dir(s)\r\n tm.assert_almost_equal(results, sorted(set(ok_for_dt + ok_for_dt_methods)))\r\n exp_values = pd.date_range(\r\n \"2015-01-01\", \"2016-01-01\", freq=\"T\", tz=\"UTC\"\r\n ).tz_convert(\"America/Chicago\")\r\n # freq not preserved by tz_localize above\r\n exp_values = exp_values._with_freq(None)\r\n expected = Series(exp_values, name=\"xxx\")\r\n tm.assert_series_equal(s, expected)\r\n\r\n # no setting allowed\r\n s = Series(date_range(\"20130101\", periods=5, freq=\"D\"), name=\"xxx\")\r\n with pytest.raises(ValueError, match=\"modifications\"):\r\n s.dt.hour = 5\r\n\r\n # trying to set a copy\r\n msg = \"modifications to a property of a datetimelike.+not supported\"\r\n with pd.option_context(\"chained_assignment\", \"raise\"):\r\n with pytest.raises(com.SettingWithCopyError, match=msg):\r\n s.dt.hour[0] = 5\r\n\r\n @pytest.mark.parametrize(\r\n \"method, dates\",\r\n [\r\n [\"round\", [\"2012-01-02\", \"2012-01-02\", \"2012-01-01\"]],\r\n [\"floor\", [\"2012-01-01\", \"2012-01-01\", \"2012-01-01\"]],\r\n [\"ceil\", [\"2012-01-02\", \"2012-01-02\", \"2012-01-02\"]],\r\n ],\r\n )\r\n def test_dt_round(self, method, dates):\r\n # round\r\n s = Series(\r\n pd.to_datetime(\r\n [\"2012-01-01 13:00:00\", \"2012-01-01 12:01:00\", \"2012-01-01 08:00:00\"]\r\n ),\r\n name=\"xxx\",\r\n )\r\n result = getattr(s.dt, method)(\"D\")\r\n expected = Series(pd.to_datetime(dates), name=\"xxx\")\r\n tm.assert_series_equal(result, expected)\r\n\r\n def test_dt_round_tz(self):\r\n s = Series(\r\n pd.to_datetime(\r\n [\"2012-01-01 13:00:00\", \"2012-01-01 12:01:00\", \"2012-01-01 08:00:00\"]\r\n ),\r\n name=\"xxx\",\r\n )\r\n result = s.dt.tz_localize(\"UTC\").dt.tz_convert(\"US/Eastern\").dt.round(\"D\")\r\n\r\n exp_values = pd.to_datetime(\r\n [\"2012-01-01\", \"2012-01-01\", \"2012-01-01\"]\r\n ).tz_localize(\"US/Eastern\")\r\n expected = Series(exp_values, name=\"xxx\")\r\n tm.assert_series_equal(result, expected)\r\n\r\n @pytest.mark.parametrize(\"method\", [\"ceil\", \"round\", \"floor\"])\r\n def test_dt_round_tz_ambiguous(self, method):\r\n # GH 18946 round near \"fall back\" DST\r\n df1 = pd.DataFrame(\r\n [\r\n pd.to_datetime(\"2017-10-29 02:00:00+02:00\", utc=True),\r\n pd.to_datetime(\"2017-10-29 02:00:00+01:00\", utc=True),\r\n pd.to_datetime(\"2017-10-29 03:00:00+01:00\", utc=True),\r\n ],\r\n columns=[\"date\"],\r\n )\r\n df1[\"date\"] = df1[\"date\"].dt.tz_convert(\"Europe/Madrid\")\r\n # infer\r\n result = getattr(df1.date.dt, method)(\"H\", ambiguous=\"infer\")\r\n expected = df1[\"date\"]\r\n tm.assert_series_equal(result, expected)\r\n\r\n # bool-array\r\n result = getattr(df1.date.dt, method)(\"H\", ambiguous=[True, False, False])\r\n tm.assert_series_equal(result, expected)\r\n\r\n # NaT\r\n result = getattr(df1.date.dt, method)(\"H\", ambiguous=\"NaT\")\r\n expected = df1[\"date\"].copy()\r\n expected.iloc[0:2] = pd.NaT\r\n tm.assert_series_equal(result, expected)\r\n\r\n # raise\r\n with tm.external_error_raised(pytz.AmbiguousTimeError):\r\n getattr(df1.date.dt, method)(\"H\", ambiguous=\"raise\")\r\n\r\n @pytest.mark.parametrize(\r\n \"method, ts_str, freq\",\r\n [\r\n [\"ceil\", \"2018-03-11 01:59:00-0600\", \"5min\"],\r\n [\"round\", \"2018-03-11 01:59:00-0600\", \"5min\"],\r\n [\"floor\", \"2018-03-11 03:01:00-0500\", \"2H\"],\r\n ],\r\n )\r\n def test_dt_round_tz_nonexistent(self, method, ts_str, freq):\r\n # GH 23324 round near \"spring forward\" DST\r\n s = Series([pd.Timestamp(ts_str, tz=\"America/Chicago\")])\r\n result = getattr(s.dt, method)(freq, nonexistent=\"shift_forward\")\r\n expected = Series([pd.Timestamp(\"2018-03-11 03:00:00\", tz=\"America/Chicago\")])\r\n tm.assert_series_equal(result, expected)\r\n\r\n result = getattr(s.dt, method)(freq, nonexistent=\"NaT\")\r\n expected = Series([pd.NaT]).dt.tz_localize(result.dt.tz)\r\n tm.assert_series_equal(result, expected)\r\n\r\n with pytest.raises(pytz.NonExistentTimeError, match=\"2018-03-11 02:00:00\"):\r\n getattr(s.dt, method)(freq, nonexistent=\"raise\")\r\n\r\n def test_dt_namespace_accessor_categorical(self):\r\n # GH 19468\r\n dti = DatetimeIndex([\"20171111\", \"20181212\"]).repeat(2)\r\n s = Series(pd.Categorical(dti), name=\"foo\")\r\n result = s.dt.year\r\n expected = Series([2017, 2017, 2018, 2018], name=\"foo\")\r\n tm.assert_series_equal(result, expected)\r\n\r\n def test_dt_tz_localize_categorical(self, tz_aware_fixture):\r\n # GH 27952\r\n tz = tz_aware_fixture\r\n datetimes = Series(\r\n [\"2019-01-01\", \"2019-01-01\", \"2019-01-02\"], dtype=\"datetime64[ns]\"\r\n )\r\n categorical = datetimes.astype(\"category\")\r\n result = categorical.dt.tz_localize(tz)\r\n expected = datetimes.dt.tz_localize(tz)\r\n tm.assert_series_equal(result, expected)\r\n\r\n def test_dt_tz_convert_categorical(self, tz_aware_fixture):\r\n # GH 27952\r\n tz = tz_aware_fixture\r\n datetimes = Series(\r\n [\"2019-01-01\", \"2019-01-01\", \"2019-01-02\"], dtype=\"datetime64[ns, MET]\"\r\n )\r\n categorical = datetimes.astype(\"category\")\r\n result = categorical.dt.tz_convert(tz)\r\n expected = datetimes.dt.tz_convert(tz)\r\n tm.assert_series_equal(result, expected)\r\n\r\n @pytest.mark.parametrize(\"accessor\", [\"year\", \"month\", \"day\"])\r\n def test_dt_other_accessors_categorical(self, accessor):\r\n # GH 27952\r\n datetimes = Series(\r\n [\"2018-01-01\", \"2018-01-01\", \"2019-01-02\"], dtype=\"datetime64[ns]\"\r\n )\r\n categorical = datetimes.astype(\"category\")\r\n result = getattr(categorical.dt, accessor)\r\n expected = getattr(datetimes.dt, accessor)\r\n tm.assert_series_equal(result, expected)\r\n\r\n def test_dt_accessor_no_new_attributes(self):\r\n # https://github.com/pandas-dev/pandas/issues/10673\r\n s = Series(date_range(\"20130101\", periods=5, freq=\"D\"))\r\n with pytest.raises(AttributeError, match=\"You cannot add any new attribute\"):\r\n s.dt.xlabel = \"a\"\r\n\r\n @pytest.mark.parametrize(\r\n \"time_locale\", [None] if tm.get_locales() is None else [None] + tm.get_locales()\r\n )\r\n def test_dt_accessor_datetime_name_accessors(self, time_locale):\r\n # Test Monday -> Sunday and January -> December, in that sequence\r\n if time_locale is None:\r\n # If the time_locale is None, day-name and month_name should\r\n # return the english attributes\r\n expected_days = [\r\n \"Monday\",\r\n \"Tuesday\",\r\n \"Wednesday\",\r\n \"Thursday\",\r\n \"Friday\",\r\n \"Saturday\",\r\n \"Sunday\",\r\n ]\r\n expected_months = [\r\n \"January\",\r\n \"February\",\r\n \"March\",\r\n \"April\",\r\n \"May\",\r\n \"June\",\r\n \"July\",\r\n \"August\",\r\n \"September\",\r\n \"October\",\r\n \"November\",\r\n \"December\",\r\n ]\r\n else:\r\n with tm.set_locale(time_locale, locale.LC_TIME):\r\n expected_days = calendar.day_name[:]\r\n expected_months = calendar.month_name[1:]\r\n\r\n s = Series(date_range(freq=\"D\", start=datetime(1998, 1, 1), periods=365))\r\n english_days = [\r\n \"Monday\",\r\n \"Tuesday\",\r\n \"Wednesday\",\r\n \"Thursday\",\r\n \"Friday\",\r\n \"Saturday\",\r\n \"Sunday\",\r\n ]\r\n for day, name, eng_name in zip(range(4, 11), expected_days, english_days):\r\n name = name.capitalize()\r\n assert s.dt.day_name(locale=time_locale)[day] == name\r\n s = s.append(Series([pd.NaT]))\r\n assert np.isnan(s.dt.day_name(locale=time_locale).iloc[-1])\r\n\r\n s = Series(date_range(freq=\"M\", start=\"2012\", end=\"2013\"))\r\n result = s.dt.month_name(locale=time_locale)\r\n expected = Series([month.capitalize() for month in expected_months])\r\n\r\n # work around https://github.com/pandas-dev/pandas/issues/22342\r\n result = result.str.normalize(\"NFD\")\r\n expected = expected.str.normalize(\"NFD\")\r\n\r\n tm.assert_series_equal(result, expected)\r\n\r\n for s_date, expected in zip(s, expected_months):\r\n result = s_date.month_name(locale=time_locale)\r\n expected = expected.capitalize()\r\n\r\n result = unicodedata.normalize(\"NFD\", result)\r\n expected = unicodedata.normalize(\"NFD\", expected)\r\n\r\n assert result == expected\r\n\r\n s = s.append(Series([pd.NaT]))\r\n assert np.isnan(s.dt.month_name(locale=time_locale).iloc[-1])\r\n\r\n def test_strftime(self):\r\n # GH 10086\r\n s = Series(date_range(\"20130101\", periods=5))\r\n result = s.dt.strftime(\"%Y/%m/%d\")\r\n expected = Series(\r\n [\"2013/01/01\", \"2013/01/02\", \"2013/01/03\", \"2013/01/04\", \"2013/01/05\"]\r\n )\r\n tm.assert_series_equal(result, expected)\r\n\r\n s = Series(date_range(\"2015-02-03 11:22:33.4567\", periods=5))\r\n result = s.dt.strftime(\"%Y/%m/%d %H-%M-%S\")\r\n expected = Series(\r\n [\r\n \"2015/02/03 11-22-33\",\r\n \"2015/02/04 11-22-33\",\r\n \"2015/02/05 11-22-33\",\r\n \"2015/02/06 11-22-33\",\r\n \"2015/02/07 11-22-33\",\r\n ]\r\n )\r\n tm.assert_series_equal(result, expected)\r\n\r\n s = Series(period_range(\"20130101\", periods=5))\r\n result = s.dt.strftime(\"%Y/%m/%d\")\r\n expected = Series(\r\n [\"2013/01/01\", \"2013/01/02\", \"2013/01/03\", \"2013/01/04\", \"2013/01/05\"]\r\n )\r\n tm.assert_series_equal(result, expected)\r\n\r\n s = Series(period_range(\"2015-02-03 11:22:33.4567\", periods=5, freq=\"s\"))\r\n result = s.dt.strftime(\"%Y/%m/%d %H-%M-%S\")\r\n expected = Series(\r\n [\r\n \"2015/02/03 11-22-33\",\r\n \"2015/02/03 11-22-34\",\r\n \"2015/02/03 11-22-35\",\r\n \"2015/02/03 11-22-36\",\r\n \"2015/02/03 11-22-37\",\r\n ]\r\n )\r\n tm.assert_series_equal(result, expected)\r\n\r\n s = Series(date_range(\"20130101\", periods=5))\r\n s.iloc[0] = pd.NaT\r\n result = s.dt.strftime(\"%Y/%m/%d\")\r\n expected = Series(\r\n [np.nan, \"2013/01/02\", \"2013/01/03\", \"2013/01/04\", \"2013/01/05\"]\r\n )\r\n tm.assert_series_equal(result, expected)\r\n\r\n datetime_index = date_range(\"20150301\", periods=5)\r\n result = datetime_index.strftime(\"%Y/%m/%d\")\r\n\r\n expected = Index(\r\n [\"2015/03/01\", \"2015/03/02\", \"2015/03/03\", \"2015/03/04\", \"2015/03/05\"],\r\n dtype=np.object_,\r\n )\r\n # dtype may be S10 or U10 depending on python version\r\n tm.assert_index_equal(result, expected)\r\n\r\n period_index = period_range(\"20150301\", periods=5)\r\n result = period_index.strftime(\"%Y/%m/%d\")\r\n expected = Index(\r\n [\"2015/03/01\", \"2015/03/02\", \"2015/03/03\", \"2015/03/04\", \"2015/03/05\"],\r\n dtype=\"=U10\",\r\n )\r\n tm.assert_index_equal(result, expected)\r\n\r\n s = Series([datetime(2013, 1, 1, 2, 32, 59), datetime(2013, 1, 2, 14, 32, 1)])\r\n result = s.dt.strftime(\"%Y-%m-%d %H:%M:%S\")\r\n expected = Series([\"2013-01-01 02:32:59\", \"2013-01-02 14:32:01\"])\r\n tm.assert_series_equal(result, expected)\r\n\r\n s = Series(period_range(\"20130101\", periods=4, freq=\"H\"))\r\n result = s.dt.strftime(\"%Y/%m/%d %H:%M:%S\")\r\n expected = Series(\r\n [\r\n \"2013/01/01 00:00:00\",\r\n \"2013/01/01 01:00:00\",\r\n \"2013/01/01 02:00:00\",\r\n \"2013/01/01 03:00:00\",\r\n ]\r\n )\r\n\r\n s = Series(period_range(\"20130101\", periods=4, freq=\"L\"))\r\n result = s.dt.strftime(\"%Y/%m/%d %H:%M:%S.%l\")\r\n expected = Series(\r\n [\r\n \"2013/01/01 00:00:00.000\",\r\n \"2013/01/01 00:00:00.001\",\r\n \"2013/01/01 00:00:00.002\",\r\n \"2013/01/01 00:00:00.003\",\r\n ]\r\n )\r\n tm.assert_series_equal(result, expected)\r\n\r\n @pytest.mark.parametrize(\r\n \"data\",\r\n [\r\n DatetimeIndex([\"2019-01-01\", pd.NaT]),\r\n PeriodIndex([\"2019-01-01\", pd.NaT], dtype=\"period[D]\"),\r\n ],\r\n )\r\n def test_strftime_nat(self, data):\r\n # GH 29578\r\n s = Series(data)\r\n result = s.dt.strftime(\"%Y-%m-%d\")\r\n expected = Series([\"2019-01-01\", np.nan])\r\n tm.assert_series_equal(result, expected)\r\n\r\n def test_valid_dt_with_missing_values(self):\r\n\r\n from datetime import date, time\r\n\r\n # GH 8689\r\n s = Series(date_range(\"20130101\", periods=5, freq=\"D\"))\r\n s.iloc[2] = pd.NaT\r\n\r\n for attr in [\"microsecond\", \"nanosecond\", \"second\", \"minute\", \"hour\", \"day\"]:\r\n expected = getattr(s.dt, attr).copy()\r\n expected.iloc[2] = np.nan\r\n result = getattr(s.dt, attr)\r\n tm.assert_series_equal(result, expected)\r\n\r\n result = s.dt.date\r\n expected = Series(\r\n [\r\n date(2013, 1, 1),\r\n date(2013, 1, 2),\r\n np.nan,\r\n date(2013, 1, 4),\r\n date(2013, 1, 5),\r\n ],\r\n dtype=\"object\",\r\n )\r\n tm.assert_series_equal(result, expected)\r\n\r\n result = s.dt.time\r\n expected = Series([time(0), time(0), np.nan, time(0), time(0)], dtype=\"object\")\r\n tm.assert_series_equal(result, expected)\r\n\r\n def test_dt_accessor_api(self):\r\n # GH 9322\r\n from pandas.core.indexes.accessors import (\r\n CombinedDatetimelikeProperties,\r\n DatetimeProperties,\r\n )\r\n\r\n assert Series.dt is CombinedDatetimelikeProperties\r\n\r\n s = Series(date_range(\"2000-01-01\", periods=3))\r\n assert isinstance(s.dt, DatetimeProperties)\r\n\r\n @pytest.mark.parametrize(\r\n \"ser\", [Series(np.arange(5)), Series(list(\"abcde\")), Series(np.random.randn(5))]\r\n )\r\n def test_dt_accessor_invalid(self, ser):\r\n # GH#9322 check that series with incorrect dtypes don't have attr\r\n with pytest.raises(AttributeError, match=\"only use .dt accessor\"):\r\n ser.dt\r\n assert not hasattr(ser, \"dt\")\r\n\r\n def test_dt_accessor_updates_on_inplace(self):\r\n s = Series(pd.date_range(\"2018-01-01\", periods=10))\r\n s[2] = None\r\n return_value = s.fillna(pd.Timestamp(\"2018-01-01\"), inplace=True)\r\n assert return_value is None\r\n result = s.dt.date\r\n assert result[0] == result[2]\r\n\r\n def test_date_tz(self):\r\n # GH11757\r\n rng = DatetimeIndex(\r\n [\"2014-04-04 23:56\", \"2014-07-18 21:24\", \"2015-11-22 22:14\"],\r\n tz=\"US/Eastern\",\r\n )\r\n s = Series(rng)\r\n expected = Series([date(2014, 4, 4), date(2014, 7, 18), date(2015, 11, 22)])\r\n tm.assert_series_equal(s.dt.date, expected)\r\n tm.assert_series_equal(s.apply(lambda x: x.date()), expected)\r\n\r\n def test_dt_timetz_accessor(self, tz_naive_fixture):\r\n # GH21358\r\n tz = maybe_get_tz(tz_naive_fixture)\r\n\r\n dtindex = DatetimeIndex(\r\n [\"2014-04-04 23:56\", \"2014-07-18 21:24\", \"2015-11-22 22:14\"], tz=tz\r\n )\r\n s = Series(dtindex)\r\n expected = Series(\r\n [time(23, 56, tzinfo=tz), time(21, 24, tzinfo=tz), time(22, 14, tzinfo=tz)]\r\n )\r\n result = s.dt.timetz\r\n tm.assert_series_equal(result, expected)\r\n\r\n @pytest.mark.parametrize(\r\n \"input_series, expected_output\",\r\n [\r\n [[\"2020-01-01\"], [[2020, 1, 3]]],\r\n [[pd.NaT], [[np.NaN, np.NaN, np.NaN]]],\r\n [[\"2019-12-31\", \"2019-12-29\"], [[2020, 1, 2], [2019, 52, 7]]],\r\n [[\"2010-01-01\", pd.NaT], [[2009, 53, 5], [np.NaN, np.NaN, np.NaN]]],\r\n # see GH#36032\r\n [[\"2016-01-08\", \"2016-01-04\"], [[2016, 1, 5], [2016, 1, 1]]],\r\n [[\"2016-01-07\", \"2016-01-01\"], [[2016, 1, 4], [2015, 53, 5]]],\r\n ],\r\n )\r\n def test_isocalendar(self, input_series, expected_output):\r\n result = pd.to_datetime(Series(input_series)).dt.isocalendar()\r\n expected_frame = pd.DataFrame(\r\n expected_output, columns=[\"year\", \"week\", \"day\"], dtype=\"UInt32\"\r\n )\r\n tm.assert_frame_equal(result, expected_frame)\r\n\r\n\r\nclass TestSeriesPeriodValuesDtAccessor:\r\n @pytest.mark.parametrize(\r\n \"input_vals\",\r\n [\r\n [Period(\"2016-01\", freq=\"M\"), Period(\"2016-02\", freq=\"M\")],\r\n [Period(\"2016-01-01\", freq=\"D\"), Period(\"2016-01-02\", freq=\"D\")],\r\n [\r\n Period(\"2016-01-01 00:00:00\", freq=\"H\"),\r\n Period(\"2016-01-01 01:00:00\", freq=\"H\"),\r\n ],\r\n [\r\n Period(\"2016-01-01 00:00:00\", freq=\"M\"),\r\n Period(\"2016-01-01 00:01:00\", freq=\"M\"),\r\n ],\r\n [\r\n Period(\"2016-01-01 00:00:00\", freq=\"S\"),\r\n Period(\"2016-01-01 00:00:01\", freq=\"S\"),\r\n ],\r\n ],\r\n )\r\n def test_end_time_timevalues(self, input_vals):\r\n # GH#17157\r\n # Check that the time part of the Period is adjusted by end_time\r\n # when using the dt accessor on a Series\r\n input_vals = PeriodArray._from_sequence(np.asarray(input_vals))\r\n\r\n s = Series(input_vals)\r\n result = s.dt.end_time\r\n expected = s.apply(lambda x: x.end_time)\r\n tm.assert_series_equal(result, expected)\r\n\r\n @pytest.mark.parametrize(\"input_vals\", [(\"2001\"), (\"NaT\")])\r\n def test_to_period(self, input_vals):\r\n # GH#21205\r\n expected = Series([input_vals], dtype=\"Period[D]\")\r\n result = Series([input_vals], dtype=\"datetime64[ns]\").dt.to_period(\"D\")\r\n tm.assert_series_equal(result, expected)\r\n\r\n\r\ndef test_week_and_weekofyear_are_deprecated():\r\n # GH#33595 Deprecate week and weekofyear\r\n series = pd.to_datetime(Series([\"2020-01-01\"]))\r\n with tm.assert_produces_warning(FutureWarning):\r\n series.dt.week\r\n with tm.assert_produces_warning(FutureWarning):\r\n series.dt.weekofyear\r\n\r\n\r\ndef test_normalize_pre_epoch_dates():\r\n # GH: 36294\r\n s = pd.to_datetime(Series([\"1969-01-01 09:00:00\", \"2016-01-01 09:00:00\"]))\r\n result = s.dt.normalize()\r\n expected = pd.to_datetime(Series([\"1969-01-01\", \"2016-01-01\"]))\r\n tm.assert_series_equal(result, expected)\r\n",
"from datetime import date, datetime, timedelta\r\n\r\nimport numpy as np\r\nimport pytest\r\n\r\nfrom pandas.core.dtypes.cast import infer_dtype_from_array, infer_dtype_from_scalar\r\nfrom pandas.core.dtypes.common import is_dtype_equal\r\n\r\nfrom pandas import (\r\n Categorical,\r\n Interval,\r\n Period,\r\n Series,\r\n Timedelta,\r\n Timestamp,\r\n date_range,\r\n)\r\n\r\n\r\[email protected](params=[True, False])\r\ndef pandas_dtype(request):\r\n return request.param\r\n\r\n\r\ndef test_infer_dtype_from_int_scalar(any_int_dtype):\r\n # Test that infer_dtype_from_scalar is\r\n # returning correct dtype for int and float.\r\n data = np.dtype(any_int_dtype).type(12)\r\n dtype, val = infer_dtype_from_scalar(data)\r\n assert dtype == type(data)\r\n\r\n\r\ndef test_infer_dtype_from_float_scalar(float_dtype):\r\n float_dtype = np.dtype(float_dtype).type\r\n data = float_dtype(12)\r\n\r\n dtype, val = infer_dtype_from_scalar(data)\r\n assert dtype == float_dtype\r\n\r\n\r\[email protected](\r\n \"data,exp_dtype\", [(12, np.int64), (np.float_(12), np.float64)]\r\n)\r\ndef test_infer_dtype_from_python_scalar(data, exp_dtype):\r\n dtype, val = infer_dtype_from_scalar(data)\r\n assert dtype == exp_dtype\r\n\r\n\r\[email protected](\"bool_val\", [True, False])\r\ndef test_infer_dtype_from_boolean(bool_val):\r\n dtype, val = infer_dtype_from_scalar(bool_val)\r\n assert dtype == np.bool_\r\n\r\n\r\ndef test_infer_dtype_from_complex(complex_dtype):\r\n data = np.dtype(complex_dtype).type(1)\r\n dtype, val = infer_dtype_from_scalar(data)\r\n assert dtype == np.complex_\r\n\r\n\r\[email protected](\r\n \"data\", [np.datetime64(1, \"ns\"), Timestamp(1), datetime(2000, 1, 1, 0, 0)]\r\n)\r\ndef test_infer_dtype_from_datetime(data):\r\n dtype, val = infer_dtype_from_scalar(data)\r\n assert dtype == \"M8[ns]\"\r\n\r\n\r\[email protected](\"data\", [np.timedelta64(1, \"ns\"), Timedelta(1), timedelta(1)])\r\ndef test_infer_dtype_from_timedelta(data):\r\n dtype, val = infer_dtype_from_scalar(data)\r\n assert dtype == \"m8[ns]\"\r\n\r\n\r\[email protected](\"freq\", [\"M\", \"D\"])\r\ndef test_infer_dtype_from_period(freq, pandas_dtype):\r\n p = Period(\"2011-01-01\", freq=freq)\r\n dtype, val = infer_dtype_from_scalar(p, pandas_dtype=pandas_dtype)\r\n\r\n if pandas_dtype:\r\n exp_dtype = f\"period[{freq}]\"\r\n else:\r\n exp_dtype = np.object_\r\n\r\n assert dtype == exp_dtype\r\n assert val == p\r\n\r\n\r\[email protected](\r\n \"data\", [date(2000, 1, 1), \"foo\", Timestamp(1, tz=\"US/Eastern\")]\r\n)\r\ndef test_infer_dtype_misc(data):\r\n dtype, val = infer_dtype_from_scalar(data)\r\n assert dtype == np.object_\r\n\r\n\r\[email protected](\"tz\", [\"UTC\", \"US/Eastern\", \"Asia/Tokyo\"])\r\ndef test_infer_from_scalar_tz(tz, pandas_dtype):\r\n dt = Timestamp(1, tz=tz)\r\n dtype, val = infer_dtype_from_scalar(dt, pandas_dtype=pandas_dtype)\r\n\r\n if pandas_dtype:\r\n exp_dtype = f\"datetime64[ns, {tz}]\"\r\n exp_val = dt.value\r\n else:\r\n exp_dtype = np.object_\r\n exp_val = dt\r\n\r\n assert dtype == exp_dtype\r\n assert val == exp_val\r\n\r\n\r\[email protected](\r\n \"left, right, subtype\",\r\n [\r\n (0, 1, \"int64\"),\r\n (0.0, 1.0, \"float64\"),\r\n (Timestamp(0), Timestamp(1), \"datetime64[ns]\"),\r\n (Timestamp(0, tz=\"UTC\"), Timestamp(1, tz=\"UTC\"), \"datetime64[ns, UTC]\"),\r\n (Timedelta(0), Timedelta(1), \"timedelta64[ns]\"),\r\n ],\r\n)\r\ndef test_infer_from_interval(left, right, subtype, closed, pandas_dtype):\r\n # GH 30337\r\n interval = Interval(left, right, closed)\r\n result_dtype, result_value = infer_dtype_from_scalar(interval, pandas_dtype)\r\n expected_dtype = f\"interval[{subtype}]\" if pandas_dtype else np.object_\r\n assert result_dtype == expected_dtype\r\n assert result_value == interval\r\n\r\n\r\ndef test_infer_dtype_from_scalar_errors():\r\n msg = \"invalid ndarray passed to infer_dtype_from_scalar\"\r\n\r\n with pytest.raises(ValueError, match=msg):\r\n infer_dtype_from_scalar(np.array([1]))\r\n\r\n\r\[email protected](\r\n \"arr, expected, pandas_dtype\",\r\n [\r\n (\"foo\", np.object_, False),\r\n (b\"foo\", np.object_, False),\r\n (1, np.int_, False),\r\n (1.5, np.float_, False),\r\n ([1], np.int_, False),\r\n (np.array([1], dtype=np.int64), np.int64, False),\r\n ([np.nan, 1, \"\"], np.object_, False),\r\n (np.array([[1.0, 2.0]]), np.float_, False),\r\n (Categorical(list(\"aabc\")), np.object_, False),\r\n (Categorical([1, 2, 3]), np.int64, False),\r\n (Categorical(list(\"aabc\")), \"category\", True),\r\n (Categorical([1, 2, 3]), \"category\", True),\r\n (Timestamp(\"20160101\"), np.object_, False),\r\n (np.datetime64(\"2016-01-01\"), np.dtype(\"=M8[D]\"), False),\r\n (date_range(\"20160101\", periods=3), np.dtype(\"=M8[ns]\"), False),\r\n (\r\n date_range(\"20160101\", periods=3, tz=\"US/Eastern\"),\r\n \"datetime64[ns, US/Eastern]\",\r\n True,\r\n ),\r\n (Series([1.0, 2, 3]), np.float64, False),\r\n (Series(list(\"abc\")), np.object_, False),\r\n (\r\n Series(date_range(\"20160101\", periods=3, tz=\"US/Eastern\")),\r\n \"datetime64[ns, US/Eastern]\",\r\n True,\r\n ),\r\n ],\r\n)\r\ndef test_infer_dtype_from_array(arr, expected, pandas_dtype):\r\n dtype, _ = infer_dtype_from_array(arr, pandas_dtype=pandas_dtype)\r\n assert is_dtype_equal(dtype, expected)\r\n",
"import re\r\nfrom typing import TYPE_CHECKING, List, cast\r\nimport warnings\r\n\r\nimport numpy as np\r\n\r\nfrom pandas.util._decorators import Appender, deprecate_kwarg\r\n\r\nfrom pandas.core.dtypes.common import is_extension_array_dtype, is_list_like\r\nfrom pandas.core.dtypes.concat import concat_compat\r\nfrom pandas.core.dtypes.missing import notna\r\n\r\nfrom pandas.core.arrays import Categorical\r\nimport pandas.core.common as com\r\nfrom pandas.core.indexes.api import Index, MultiIndex\r\nfrom pandas.core.reshape.concat import concat\r\nfrom pandas.core.reshape.util import tile_compat\r\nfrom pandas.core.shared_docs import _shared_docs\r\nfrom pandas.core.tools.numeric import to_numeric\r\n\r\nif TYPE_CHECKING:\r\n from pandas import DataFrame, Series\r\n\r\n\r\n@Appender(_shared_docs[\"melt\"] % {\"caller\": \"pd.melt(df, \", \"other\": \"DataFrame.melt\"})\r\ndef melt(\r\n frame: \"DataFrame\",\r\n id_vars=None,\r\n value_vars=None,\r\n var_name=None,\r\n value_name=\"value\",\r\n col_level=None,\r\n ignore_index: bool = True,\r\n) -> \"DataFrame\":\r\n # If multiindex, gather names of columns on all level for checking presence\r\n # of `id_vars` and `value_vars`\r\n if isinstance(frame.columns, MultiIndex):\r\n cols = [x for c in frame.columns for x in c]\r\n else:\r\n cols = list(frame.columns)\r\n\r\n if value_name in frame.columns:\r\n warnings.warn(\r\n \"This dataframe has a column name that matches the 'value_name' column \"\r\n \"name of the resulting Dataframe. \"\r\n \"In the future this will raise an error, please set the 'value_name' \"\r\n \"parameter of DataFrame.melt to a unique name.\",\r\n FutureWarning,\r\n stacklevel=3,\r\n )\r\n\r\n if id_vars is not None:\r\n if not is_list_like(id_vars):\r\n id_vars = [id_vars]\r\n elif isinstance(frame.columns, MultiIndex) and not isinstance(id_vars, list):\r\n raise ValueError(\r\n \"id_vars must be a list of tuples when columns are a MultiIndex\"\r\n )\r\n else:\r\n # Check that `id_vars` are in frame\r\n id_vars = list(id_vars)\r\n missing = Index(com.flatten(id_vars)).difference(cols)\r\n if not missing.empty:\r\n raise KeyError(\r\n \"The following 'id_vars' are not present \"\r\n f\"in the DataFrame: {list(missing)}\"\r\n )\r\n else:\r\n id_vars = []\r\n\r\n if value_vars is not None:\r\n if not is_list_like(value_vars):\r\n value_vars = [value_vars]\r\n elif isinstance(frame.columns, MultiIndex) and not isinstance(value_vars, list):\r\n raise ValueError(\r\n \"value_vars must be a list of tuples when columns are a MultiIndex\"\r\n )\r\n else:\r\n value_vars = list(value_vars)\r\n # Check that `value_vars` are in frame\r\n missing = Index(com.flatten(value_vars)).difference(cols)\r\n if not missing.empty:\r\n raise KeyError(\r\n \"The following 'value_vars' are not present in \"\r\n f\"the DataFrame: {list(missing)}\"\r\n )\r\n if col_level is not None:\r\n idx = frame.columns.get_level_values(col_level).get_indexer(\r\n id_vars + value_vars\r\n )\r\n else:\r\n idx = frame.columns.get_indexer(id_vars + value_vars)\r\n frame = frame.iloc[:, idx]\r\n else:\r\n frame = frame.copy()\r\n\r\n if col_level is not None: # allow list or other?\r\n # frame is a copy\r\n frame.columns = frame.columns.get_level_values(col_level)\r\n\r\n if var_name is None:\r\n if isinstance(frame.columns, MultiIndex):\r\n if len(frame.columns.names) == len(set(frame.columns.names)):\r\n var_name = frame.columns.names\r\n else:\r\n var_name = [f\"variable_{i}\" for i in range(len(frame.columns.names))]\r\n else:\r\n var_name = [\r\n frame.columns.name if frame.columns.name is not None else \"variable\"\r\n ]\r\n if isinstance(var_name, str):\r\n var_name = [var_name]\r\n\r\n N, K = frame.shape\r\n K -= len(id_vars)\r\n\r\n mdata = {}\r\n for col in id_vars:\r\n id_data = frame.pop(col)\r\n if is_extension_array_dtype(id_data):\r\n id_data = cast(\"Series\", concat([id_data] * K, ignore_index=True))\r\n else:\r\n id_data = np.tile(id_data._values, K)\r\n mdata[col] = id_data\r\n\r\n mcolumns = id_vars + var_name + [value_name]\r\n\r\n mdata[value_name] = frame._values.ravel(\"F\")\r\n for i, col in enumerate(var_name):\r\n # asanyarray will keep the columns as an Index\r\n mdata[col] = np.asanyarray(frame.columns._get_level_values(i)).repeat(N)\r\n\r\n result = frame._constructor(mdata, columns=mcolumns)\r\n\r\n if not ignore_index:\r\n result.index = tile_compat(frame.index, K)\r\n\r\n return result\r\n\r\n\r\n@deprecate_kwarg(old_arg_name=\"label\", new_arg_name=None)\r\ndef lreshape(data: \"DataFrame\", groups, dropna: bool = True, label=None) -> \"DataFrame\":\r\n \"\"\"\r\n Reshape wide-format data to long. Generalized inverse of DataFrame.pivot.\r\n\r\n Accepts a dictionary, ``groups``, in which each key is a new column name\r\n and each value is a list of old column names that will be \"melted\" under\r\n the new column name as part of the reshape.\r\n\r\n Parameters\r\n ----------\r\n data : DataFrame\r\n The wide-format DataFrame.\r\n groups : dict\r\n {new_name : list_of_columns}.\r\n dropna : bool, default True\r\n Do not include columns whose entries are all NaN.\r\n label : None\r\n Not used.\r\n\r\n .. deprecated:: 1.0.0\r\n\r\n Returns\r\n -------\r\n DataFrame\r\n Reshaped DataFrame.\r\n\r\n See Also\r\n --------\r\n melt : Unpivot a DataFrame from wide to long format, optionally leaving\r\n identifiers set.\r\n pivot : Create a spreadsheet-style pivot table as a DataFrame.\r\n DataFrame.pivot : Pivot without aggregation that can handle\r\n non-numeric data.\r\n DataFrame.pivot_table : Generalization of pivot that can handle\r\n duplicate values for one index/column pair.\r\n DataFrame.unstack : Pivot based on the index values instead of a\r\n column.\r\n wide_to_long : Wide panel to long format. Less flexible but more\r\n user-friendly than melt.\r\n\r\n Examples\r\n --------\r\n >>> data = pd.DataFrame({'hr1': [514, 573], 'hr2': [545, 526],\r\n ... 'team': ['Red Sox', 'Yankees'],\r\n ... 'year1': [2007, 2007], 'year2': [2008, 2008]})\r\n >>> data\r\n hr1 hr2 team year1 year2\r\n 0 514 545 Red Sox 2007 2008\r\n 1 573 526 Yankees 2007 2008\r\n\r\n >>> pd.lreshape(data, {'year': ['year1', 'year2'], 'hr': ['hr1', 'hr2']})\r\n team year hr\r\n 0 Red Sox 2007 514\r\n 1 Yankees 2007 573\r\n 2 Red Sox 2008 545\r\n 3 Yankees 2008 526\r\n \"\"\"\r\n if isinstance(groups, dict):\r\n keys = list(groups.keys())\r\n values = list(groups.values())\r\n else:\r\n keys, values = zip(*groups)\r\n\r\n all_cols = list(set.union(*[set(x) for x in values]))\r\n id_cols = list(data.columns.difference(all_cols))\r\n\r\n K = len(values[0])\r\n\r\n for seq in values:\r\n if len(seq) != K:\r\n raise ValueError(\"All column lists must be same length\")\r\n\r\n mdata = {}\r\n pivot_cols = []\r\n\r\n for target, names in zip(keys, values):\r\n to_concat = [data[col]._values for col in names]\r\n\r\n mdata[target] = concat_compat(to_concat)\r\n pivot_cols.append(target)\r\n\r\n for col in id_cols:\r\n mdata[col] = np.tile(data[col]._values, K)\r\n\r\n if dropna:\r\n mask = np.ones(len(mdata[pivot_cols[0]]), dtype=bool)\r\n for c in pivot_cols:\r\n mask &= notna(mdata[c])\r\n if not mask.all():\r\n mdata = {k: v[mask] for k, v in mdata.items()}\r\n\r\n return data._constructor(mdata, columns=id_cols + pivot_cols)\r\n\r\n\r\ndef wide_to_long(\r\n df: \"DataFrame\", stubnames, i, j, sep: str = \"\", suffix: str = r\"\\d+\"\r\n) -> \"DataFrame\":\r\n r\"\"\"\r\n Wide panel to long format. Less flexible but more user-friendly than melt.\r\n\r\n With stubnames ['A', 'B'], this function expects to find one or more\r\n group of columns with format\r\n A-suffix1, A-suffix2,..., B-suffix1, B-suffix2,...\r\n You specify what you want to call this suffix in the resulting long format\r\n with `j` (for example `j='year'`)\r\n\r\n Each row of these wide variables are assumed to be uniquely identified by\r\n `i` (can be a single column name or a list of column names)\r\n\r\n All remaining variables in the data frame are left intact.\r\n\r\n Parameters\r\n ----------\r\n df : DataFrame\r\n The wide-format DataFrame.\r\n stubnames : str or list-like\r\n The stub name(s). The wide format variables are assumed to\r\n start with the stub names.\r\n i : str or list-like\r\n Column(s) to use as id variable(s).\r\n j : str\r\n The name of the sub-observation variable. What you wish to name your\r\n suffix in the long format.\r\n sep : str, default \"\"\r\n A character indicating the separation of the variable names\r\n in the wide format, to be stripped from the names in the long format.\r\n For example, if your column names are A-suffix1, A-suffix2, you\r\n can strip the hyphen by specifying `sep='-'`.\r\n suffix : str, default '\\\\d+'\r\n A regular expression capturing the wanted suffixes. '\\\\d+' captures\r\n numeric suffixes. Suffixes with no numbers could be specified with the\r\n negated character class '\\\\D+'. You can also further disambiguate\r\n suffixes, for example, if your wide variables are of the form A-one,\r\n B-two,.., and you have an unrelated column A-rating, you can ignore the\r\n last one by specifying `suffix='(!?one|two)'`. When all suffixes are\r\n numeric, they are cast to int64/float64.\r\n\r\n Returns\r\n -------\r\n DataFrame\r\n A DataFrame that contains each stub name as a variable, with new index\r\n (i, j).\r\n\r\n See Also\r\n --------\r\n melt : Unpivot a DataFrame from wide to long format, optionally leaving\r\n identifiers set.\r\n pivot : Create a spreadsheet-style pivot table as a DataFrame.\r\n DataFrame.pivot : Pivot without aggregation that can handle\r\n non-numeric data.\r\n DataFrame.pivot_table : Generalization of pivot that can handle\r\n duplicate values for one index/column pair.\r\n DataFrame.unstack : Pivot based on the index values instead of a\r\n column.\r\n\r\n Notes\r\n -----\r\n All extra variables are left untouched. This simply uses\r\n `pandas.melt` under the hood, but is hard-coded to \"do the right thing\"\r\n in a typical case.\r\n\r\n Examples\r\n --------\r\n >>> np.random.seed(123)\r\n >>> df = pd.DataFrame({\"A1970\" : {0 : \"a\", 1 : \"b\", 2 : \"c\"},\r\n ... \"A1980\" : {0 : \"d\", 1 : \"e\", 2 : \"f\"},\r\n ... \"B1970\" : {0 : 2.5, 1 : 1.2, 2 : .7},\r\n ... \"B1980\" : {0 : 3.2, 1 : 1.3, 2 : .1},\r\n ... \"X\" : dict(zip(range(3), np.random.randn(3)))\r\n ... })\r\n >>> df[\"id\"] = df.index\r\n >>> df\r\n A1970 A1980 B1970 B1980 X id\r\n 0 a d 2.5 3.2 -1.085631 0\r\n 1 b e 1.2 1.3 0.997345 1\r\n 2 c f 0.7 0.1 0.282978 2\r\n >>> pd.wide_to_long(df, [\"A\", \"B\"], i=\"id\", j=\"year\")\r\n ... # doctest: +NORMALIZE_WHITESPACE\r\n X A B\r\n id year\r\n 0 1970 -1.085631 a 2.5\r\n 1 1970 0.997345 b 1.2\r\n 2 1970 0.282978 c 0.7\r\n 0 1980 -1.085631 d 3.2\r\n 1 1980 0.997345 e 1.3\r\n 2 1980 0.282978 f 0.1\r\n\r\n With multiple id columns\r\n\r\n >>> df = pd.DataFrame({\r\n ... 'famid': [1, 1, 1, 2, 2, 2, 3, 3, 3],\r\n ... 'birth': [1, 2, 3, 1, 2, 3, 1, 2, 3],\r\n ... 'ht1': [2.8, 2.9, 2.2, 2, 1.8, 1.9, 2.2, 2.3, 2.1],\r\n ... 'ht2': [3.4, 3.8, 2.9, 3.2, 2.8, 2.4, 3.3, 3.4, 2.9]\r\n ... })\r\n >>> df\r\n famid birth ht1 ht2\r\n 0 1 1 2.8 3.4\r\n 1 1 2 2.9 3.8\r\n 2 1 3 2.2 2.9\r\n 3 2 1 2.0 3.2\r\n 4 2 2 1.8 2.8\r\n 5 2 3 1.9 2.4\r\n 6 3 1 2.2 3.3\r\n 7 3 2 2.3 3.4\r\n 8 3 3 2.1 2.9\r\n >>> l = pd.wide_to_long(df, stubnames='ht', i=['famid', 'birth'], j='age')\r\n >>> l\r\n ... # doctest: +NORMALIZE_WHITESPACE\r\n ht\r\n famid birth age\r\n 1 1 1 2.8\r\n 2 3.4\r\n 2 1 2.9\r\n 2 3.8\r\n 3 1 2.2\r\n 2 2.9\r\n 2 1 1 2.0\r\n 2 3.2\r\n 2 1 1.8\r\n 2 2.8\r\n 3 1 1.9\r\n 2 2.4\r\n 3 1 1 2.2\r\n 2 3.3\r\n 2 1 2.3\r\n 2 3.4\r\n 3 1 2.1\r\n 2 2.9\r\n\r\n Going from long back to wide just takes some creative use of `unstack`\r\n\r\n >>> w = l.unstack()\r\n >>> w.columns = w.columns.map('{0[0]}{0[1]}'.format)\r\n >>> w.reset_index()\r\n famid birth ht1 ht2\r\n 0 1 1 2.8 3.4\r\n 1 1 2 2.9 3.8\r\n 2 1 3 2.2 2.9\r\n 3 2 1 2.0 3.2\r\n 4 2 2 1.8 2.8\r\n 5 2 3 1.9 2.4\r\n 6 3 1 2.2 3.3\r\n 7 3 2 2.3 3.4\r\n 8 3 3 2.1 2.9\r\n\r\n Less wieldy column names are also handled\r\n\r\n >>> np.random.seed(0)\r\n >>> df = pd.DataFrame({'A(weekly)-2010': np.random.rand(3),\r\n ... 'A(weekly)-2011': np.random.rand(3),\r\n ... 'B(weekly)-2010': np.random.rand(3),\r\n ... 'B(weekly)-2011': np.random.rand(3),\r\n ... 'X' : np.random.randint(3, size=3)})\r\n >>> df['id'] = df.index\r\n >>> df # doctest: +NORMALIZE_WHITESPACE, +ELLIPSIS\r\n A(weekly)-2010 A(weekly)-2011 B(weekly)-2010 B(weekly)-2011 X id\r\n 0 0.548814 0.544883 0.437587 0.383442 0 0\r\n 1 0.715189 0.423655 0.891773 0.791725 1 1\r\n 2 0.602763 0.645894 0.963663 0.528895 1 2\r\n\r\n >>> pd.wide_to_long(df, ['A(weekly)', 'B(weekly)'], i='id',\r\n ... j='year', sep='-')\r\n ... # doctest: +NORMALIZE_WHITESPACE\r\n X A(weekly) B(weekly)\r\n id year\r\n 0 2010 0 0.548814 0.437587\r\n 1 2010 1 0.715189 0.891773\r\n 2 2010 1 0.602763 0.963663\r\n 0 2011 0 0.544883 0.383442\r\n 1 2011 1 0.423655 0.791725\r\n 2 2011 1 0.645894 0.528895\r\n\r\n If we have many columns, we could also use a regex to find our\r\n stubnames and pass that list on to wide_to_long\r\n\r\n >>> stubnames = sorted(\r\n ... set([match[0] for match in df.columns.str.findall(\r\n ... r'[A-B]\\(.*\\)').values if match != []])\r\n ... )\r\n >>> list(stubnames)\r\n ['A(weekly)', 'B(weekly)']\r\n\r\n All of the above examples have integers as suffixes. It is possible to\r\n have non-integers as suffixes.\r\n\r\n >>> df = pd.DataFrame({\r\n ... 'famid': [1, 1, 1, 2, 2, 2, 3, 3, 3],\r\n ... 'birth': [1, 2, 3, 1, 2, 3, 1, 2, 3],\r\n ... 'ht_one': [2.8, 2.9, 2.2, 2, 1.8, 1.9, 2.2, 2.3, 2.1],\r\n ... 'ht_two': [3.4, 3.8, 2.9, 3.2, 2.8, 2.4, 3.3, 3.4, 2.9]\r\n ... })\r\n >>> df\r\n famid birth ht_one ht_two\r\n 0 1 1 2.8 3.4\r\n 1 1 2 2.9 3.8\r\n 2 1 3 2.2 2.9\r\n 3 2 1 2.0 3.2\r\n 4 2 2 1.8 2.8\r\n 5 2 3 1.9 2.4\r\n 6 3 1 2.2 3.3\r\n 7 3 2 2.3 3.4\r\n 8 3 3 2.1 2.9\r\n\r\n >>> l = pd.wide_to_long(df, stubnames='ht', i=['famid', 'birth'], j='age',\r\n ... sep='_', suffix=r'\\w+')\r\n >>> l\r\n ... # doctest: +NORMALIZE_WHITESPACE\r\n ht\r\n famid birth age\r\n 1 1 one 2.8\r\n two 3.4\r\n 2 one 2.9\r\n two 3.8\r\n 3 one 2.2\r\n two 2.9\r\n 2 1 one 2.0\r\n two 3.2\r\n 2 one 1.8\r\n two 2.8\r\n 3 one 1.9\r\n two 2.4\r\n 3 1 one 2.2\r\n two 3.3\r\n 2 one 2.3\r\n two 3.4\r\n 3 one 2.1\r\n two 2.9\r\n \"\"\"\r\n\r\n def get_var_names(df, stub: str, sep: str, suffix: str) -> List[str]:\r\n regex = fr\"^{re.escape(stub)}{re.escape(sep)}{suffix}$\"\r\n pattern = re.compile(regex)\r\n return [col for col in df.columns if pattern.match(col)]\r\n\r\n def melt_stub(df, stub: str, i, j, value_vars, sep: str):\r\n newdf = melt(\r\n df,\r\n id_vars=i,\r\n value_vars=value_vars,\r\n value_name=stub.rstrip(sep),\r\n var_name=j,\r\n )\r\n newdf[j] = Categorical(newdf[j])\r\n newdf[j] = newdf[j].str.replace(re.escape(stub + sep), \"\", regex=True)\r\n\r\n # GH17627 Cast numerics suffixes to int/float\r\n newdf[j] = to_numeric(newdf[j], errors=\"ignore\")\r\n\r\n return newdf.set_index(i + [j])\r\n\r\n if not is_list_like(stubnames):\r\n stubnames = [stubnames]\r\n else:\r\n stubnames = list(stubnames)\r\n\r\n if any(col in stubnames for col in df.columns):\r\n raise ValueError(\"stubname can't be identical to a column name\")\r\n\r\n if not is_list_like(i):\r\n i = [i]\r\n else:\r\n i = list(i)\r\n\r\n if df[i].duplicated().any():\r\n raise ValueError(\"the id variables need to uniquely identify each row\")\r\n\r\n value_vars = [get_var_names(df, stub, sep, suffix) for stub in stubnames]\r\n\r\n value_vars_flattened = [e for sublist in value_vars for e in sublist]\r\n id_vars = list(set(df.columns.tolist()).difference(value_vars_flattened))\r\n\r\n _melted = [melt_stub(df, s, i, j, v, sep) for s, v in zip(stubnames, value_vars)]\r\n melted = _melted[0].join(_melted[1:], how=\"outer\")\r\n\r\n if len(i) == 1:\r\n new = df[id_vars].set_index(i).join(melted)\r\n return new\r\n\r\n new = df[id_vars].merge(melted.reset_index(), on=i).set_index(i + [j])\r\n\r\n return new\r\n",
"\"\"\"\r\nTests for TimedeltaIndex methods behaving like their Timedelta counterparts\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport pytest\r\n\r\nfrom pandas._libs.tslibs.offsets import INVALID_FREQ_ERR_MSG\r\n\r\nfrom pandas import Index, Series, Timedelta, TimedeltaIndex, timedelta_range\r\nimport pandas._testing as tm\r\n\r\n\r\nclass TestVectorizedTimedelta:\r\n def test_tdi_total_seconds(self):\r\n # GH#10939\r\n # test index\r\n rng = timedelta_range(\"1 days, 10:11:12.100123456\", periods=2, freq=\"s\")\r\n expt = [\r\n 1 * 86400 + 10 * 3600 + 11 * 60 + 12 + 100123456.0 / 1e9,\r\n 1 * 86400 + 10 * 3600 + 11 * 60 + 13 + 100123456.0 / 1e9,\r\n ]\r\n tm.assert_almost_equal(rng.total_seconds(), Index(expt))\r\n\r\n # test Series\r\n ser = Series(rng)\r\n s_expt = Series(expt, index=[0, 1])\r\n tm.assert_series_equal(ser.dt.total_seconds(), s_expt)\r\n\r\n # with nat\r\n ser[1] = np.nan\r\n s_expt = Series(\r\n [1 * 86400 + 10 * 3600 + 11 * 60 + 12 + 100123456.0 / 1e9, np.nan],\r\n index=[0, 1],\r\n )\r\n tm.assert_series_equal(ser.dt.total_seconds(), s_expt)\r\n\r\n # with both nat\r\n ser = Series([np.nan, np.nan], dtype=\"timedelta64[ns]\")\r\n tm.assert_series_equal(\r\n ser.dt.total_seconds(), Series([np.nan, np.nan], index=[0, 1])\r\n )\r\n\r\n def test_tdi_round(self):\r\n td = timedelta_range(start=\"16801 days\", periods=5, freq=\"30Min\")\r\n elt = td[1]\r\n\r\n expected_rng = TimedeltaIndex(\r\n [\r\n Timedelta(\"16801 days 00:00:00\"),\r\n Timedelta(\"16801 days 00:00:00\"),\r\n Timedelta(\"16801 days 01:00:00\"),\r\n Timedelta(\"16801 days 02:00:00\"),\r\n Timedelta(\"16801 days 02:00:00\"),\r\n ]\r\n )\r\n expected_elt = expected_rng[1]\r\n\r\n tm.assert_index_equal(td.round(freq=\"H\"), expected_rng)\r\n assert elt.round(freq=\"H\") == expected_elt\r\n\r\n msg = INVALID_FREQ_ERR_MSG\r\n with pytest.raises(ValueError, match=msg):\r\n td.round(freq=\"foo\")\r\n with pytest.raises(ValueError, match=msg):\r\n elt.round(freq=\"foo\")\r\n\r\n msg = \"<MonthEnd> is a non-fixed frequency\"\r\n with pytest.raises(ValueError, match=msg):\r\n td.round(freq=\"M\")\r\n with pytest.raises(ValueError, match=msg):\r\n elt.round(freq=\"M\")\r\n\r\n @pytest.mark.parametrize(\r\n \"freq,msg\",\r\n [\r\n (\"Y\", \"<YearEnd: month=12> is a non-fixed frequency\"),\r\n (\"M\", \"<MonthEnd> is a non-fixed frequency\"),\r\n (\"foobar\", \"Invalid frequency: foobar\"),\r\n ],\r\n )\r\n def test_tdi_round_invalid(self, freq, msg):\r\n t1 = timedelta_range(\"1 days\", periods=3, freq=\"1 min 2 s 3 us\")\r\n\r\n with pytest.raises(ValueError, match=msg):\r\n t1.round(freq)\r\n with pytest.raises(ValueError, match=msg):\r\n # Same test for TimedeltaArray\r\n t1._data.round(freq)\r\n\r\n # TODO: de-duplicate with test_tdi_round\r\n def test_round(self):\r\n t1 = timedelta_range(\"1 days\", periods=3, freq=\"1 min 2 s 3 us\")\r\n t2 = -1 * t1\r\n t1a = timedelta_range(\"1 days\", periods=3, freq=\"1 min 2 s\")\r\n t1c = TimedeltaIndex([1, 1, 1], unit=\"D\")\r\n\r\n # note that negative times round DOWN! so don't give whole numbers\r\n for (freq, s1, s2) in [\r\n (\"N\", t1, t2),\r\n (\"U\", t1, t2),\r\n (\r\n \"L\",\r\n t1a,\r\n TimedeltaIndex(\r\n [\"-1 days +00:00:00\", \"-2 days +23:58:58\", \"-2 days +23:57:56\"]\r\n ),\r\n ),\r\n (\r\n \"S\",\r\n t1a,\r\n TimedeltaIndex(\r\n [\"-1 days +00:00:00\", \"-2 days +23:58:58\", \"-2 days +23:57:56\"]\r\n ),\r\n ),\r\n (\"12T\", t1c, TimedeltaIndex([\"-1 days\", \"-1 days\", \"-1 days\"])),\r\n (\"H\", t1c, TimedeltaIndex([\"-1 days\", \"-1 days\", \"-1 days\"])),\r\n (\"d\", t1c, TimedeltaIndex([-1, -1, -1], unit=\"D\")),\r\n ]:\r\n\r\n r1 = t1.round(freq)\r\n tm.assert_index_equal(r1, s1)\r\n r2 = t2.round(freq)\r\n tm.assert_index_equal(r2, s2)\r\n\r\n def test_components(self):\r\n rng = timedelta_range(\"1 days, 10:11:12\", periods=2, freq=\"s\")\r\n rng.components\r\n\r\n # with nat\r\n s = Series(rng)\r\n s[1] = np.nan\r\n\r\n result = s.dt.components\r\n assert not result.iloc[0].isna().all()\r\n assert result.iloc[1].isna().all()\r\n",
"import numpy as np\r\nimport pytest\r\n\r\nfrom pandas import CategoricalIndex\r\nimport pandas._testing as tm\r\n\r\n\r\nclass TestFillNA:\r\n def test_fillna_categorical(self):\r\n # GH#11343\r\n idx = CategoricalIndex([1.0, np.nan, 3.0, 1.0], name=\"x\")\r\n # fill by value in categories\r\n exp = CategoricalIndex([1.0, 1.0, 3.0, 1.0], name=\"x\")\r\n tm.assert_index_equal(idx.fillna(1.0), exp)\r\n\r\n # fill by value not in categories raises ValueError\r\n msg = \"Cannot setitem on a Categorical with a new category\"\r\n with pytest.raises(ValueError, match=msg):\r\n idx.fillna(2.0)\r\n\r\n def test_fillna_copies_with_no_nas(self):\r\n # Nothing to fill, should still get a copy\r\n ci = CategoricalIndex([0, 1, 1])\r\n cat = ci._data\r\n result = ci.fillna(0)\r\n assert result._values._ndarray is not cat._ndarray\r\n assert result._values._ndarray.base is None\r\n\r\n # Same check directly on the Categorical object\r\n result = cat.fillna(0)\r\n assert result._ndarray is not cat._ndarray\r\n assert result._ndarray.base is None\r\n\r\n def test_fillna_validates_with_no_nas(self):\r\n # We validate the fill value even if fillna is a no-op\r\n ci = CategoricalIndex([2, 3, 3])\r\n cat = ci._data\r\n\r\n msg = \"Cannot setitem on a Categorical with a new category\"\r\n with pytest.raises(ValueError, match=msg):\r\n ci.fillna(False)\r\n\r\n # Same check directly on the Categorical\r\n with pytest.raises(ValueError, match=msg):\r\n cat.fillna(False)\r\n",
"import numpy as np\r\nimport pytest\r\n\r\nimport pandas as pd\r\nfrom pandas import CategoricalIndex, Index, IntervalIndex, Timestamp\r\nimport pandas._testing as tm\r\n\r\n\r\nclass TestTake:\r\n def test_take_fill_value(self):\r\n # GH 12631\r\n\r\n # numeric category\r\n idx = CategoricalIndex([1, 2, 3], name=\"xxx\")\r\n result = idx.take(np.array([1, 0, -1]))\r\n expected = CategoricalIndex([2, 1, 3], name=\"xxx\")\r\n tm.assert_index_equal(result, expected)\r\n tm.assert_categorical_equal(result.values, expected.values)\r\n\r\n # fill_value\r\n result = idx.take(np.array([1, 0, -1]), fill_value=True)\r\n expected = CategoricalIndex([2, 1, np.nan], categories=[1, 2, 3], name=\"xxx\")\r\n tm.assert_index_equal(result, expected)\r\n tm.assert_categorical_equal(result.values, expected.values)\r\n\r\n # allow_fill=False\r\n result = idx.take(np.array([1, 0, -1]), allow_fill=False, fill_value=True)\r\n expected = CategoricalIndex([2, 1, 3], name=\"xxx\")\r\n tm.assert_index_equal(result, expected)\r\n tm.assert_categorical_equal(result.values, expected.values)\r\n\r\n # object category\r\n idx = CategoricalIndex(\r\n list(\"CBA\"), categories=list(\"ABC\"), ordered=True, name=\"xxx\"\r\n )\r\n result = idx.take(np.array([1, 0, -1]))\r\n expected = CategoricalIndex(\r\n list(\"BCA\"), categories=list(\"ABC\"), ordered=True, name=\"xxx\"\r\n )\r\n tm.assert_index_equal(result, expected)\r\n tm.assert_categorical_equal(result.values, expected.values)\r\n\r\n # fill_value\r\n result = idx.take(np.array([1, 0, -1]), fill_value=True)\r\n expected = CategoricalIndex(\r\n [\"B\", \"C\", np.nan], categories=list(\"ABC\"), ordered=True, name=\"xxx\"\r\n )\r\n tm.assert_index_equal(result, expected)\r\n tm.assert_categorical_equal(result.values, expected.values)\r\n\r\n # allow_fill=False\r\n result = idx.take(np.array([1, 0, -1]), allow_fill=False, fill_value=True)\r\n expected = CategoricalIndex(\r\n list(\"BCA\"), categories=list(\"ABC\"), ordered=True, name=\"xxx\"\r\n )\r\n tm.assert_index_equal(result, expected)\r\n tm.assert_categorical_equal(result.values, expected.values)\r\n\r\n msg = (\r\n \"When allow_fill=True and fill_value is not None, \"\r\n \"all indices must be >= -1\"\r\n )\r\n with pytest.raises(ValueError, match=msg):\r\n idx.take(np.array([1, 0, -2]), fill_value=True)\r\n with pytest.raises(ValueError, match=msg):\r\n idx.take(np.array([1, 0, -5]), fill_value=True)\r\n\r\n msg = \"index -5 is out of bounds for (axis 0 with )?size 3\"\r\n with pytest.raises(IndexError, match=msg):\r\n idx.take(np.array([1, -5]))\r\n\r\n def test_take_fill_value_datetime(self):\r\n\r\n # datetime category\r\n idx = pd.DatetimeIndex([\"2011-01-01\", \"2011-02-01\", \"2011-03-01\"], name=\"xxx\")\r\n idx = CategoricalIndex(idx)\r\n result = idx.take(np.array([1, 0, -1]))\r\n expected = pd.DatetimeIndex(\r\n [\"2011-02-01\", \"2011-01-01\", \"2011-03-01\"], name=\"xxx\"\r\n )\r\n expected = CategoricalIndex(expected)\r\n tm.assert_index_equal(result, expected)\r\n\r\n # fill_value\r\n result = idx.take(np.array([1, 0, -1]), fill_value=True)\r\n expected = pd.DatetimeIndex([\"2011-02-01\", \"2011-01-01\", \"NaT\"], name=\"xxx\")\r\n exp_cats = pd.DatetimeIndex([\"2011-01-01\", \"2011-02-01\", \"2011-03-01\"])\r\n expected = CategoricalIndex(expected, categories=exp_cats)\r\n tm.assert_index_equal(result, expected)\r\n\r\n # allow_fill=False\r\n result = idx.take(np.array([1, 0, -1]), allow_fill=False, fill_value=True)\r\n expected = pd.DatetimeIndex(\r\n [\"2011-02-01\", \"2011-01-01\", \"2011-03-01\"], name=\"xxx\"\r\n )\r\n expected = CategoricalIndex(expected)\r\n tm.assert_index_equal(result, expected)\r\n\r\n msg = (\r\n \"When allow_fill=True and fill_value is not None, \"\r\n \"all indices must be >= -1\"\r\n )\r\n with pytest.raises(ValueError, match=msg):\r\n idx.take(np.array([1, 0, -2]), fill_value=True)\r\n with pytest.raises(ValueError, match=msg):\r\n idx.take(np.array([1, 0, -5]), fill_value=True)\r\n\r\n msg = \"index -5 is out of bounds for (axis 0 with )?size 3\"\r\n with pytest.raises(IndexError, match=msg):\r\n idx.take(np.array([1, -5]))\r\n\r\n def test_take_invalid_kwargs(self):\r\n idx = CategoricalIndex([1, 2, 3], name=\"foo\")\r\n indices = [1, 0, -1]\r\n\r\n msg = r\"take\\(\\) got an unexpected keyword argument 'foo'\"\r\n with pytest.raises(TypeError, match=msg):\r\n idx.take(indices, foo=2)\r\n\r\n msg = \"the 'out' parameter is not supported\"\r\n with pytest.raises(ValueError, match=msg):\r\n idx.take(indices, out=indices)\r\n\r\n msg = \"the 'mode' parameter is not supported\"\r\n with pytest.raises(ValueError, match=msg):\r\n idx.take(indices, mode=\"clip\")\r\n\r\n\r\nclass TestGetLoc:\r\n def test_get_loc(self):\r\n # GH 12531\r\n cidx1 = CategoricalIndex(list(\"abcde\"), categories=list(\"edabc\"))\r\n idx1 = Index(list(\"abcde\"))\r\n assert cidx1.get_loc(\"a\") == idx1.get_loc(\"a\")\r\n assert cidx1.get_loc(\"e\") == idx1.get_loc(\"e\")\r\n\r\n for i in [cidx1, idx1]:\r\n with pytest.raises(KeyError, match=\"'NOT-EXIST'\"):\r\n i.get_loc(\"NOT-EXIST\")\r\n\r\n # non-unique\r\n cidx2 = CategoricalIndex(list(\"aacded\"), categories=list(\"edabc\"))\r\n idx2 = Index(list(\"aacded\"))\r\n\r\n # results in bool array\r\n res = cidx2.get_loc(\"d\")\r\n tm.assert_numpy_array_equal(res, idx2.get_loc(\"d\"))\r\n tm.assert_numpy_array_equal(\r\n res, np.array([False, False, False, True, False, True])\r\n )\r\n # unique element results in scalar\r\n res = cidx2.get_loc(\"e\")\r\n assert res == idx2.get_loc(\"e\")\r\n assert res == 4\r\n\r\n for i in [cidx2, idx2]:\r\n with pytest.raises(KeyError, match=\"'NOT-EXIST'\"):\r\n i.get_loc(\"NOT-EXIST\")\r\n\r\n # non-unique, sliceable\r\n cidx3 = CategoricalIndex(list(\"aabbb\"), categories=list(\"abc\"))\r\n idx3 = Index(list(\"aabbb\"))\r\n\r\n # results in slice\r\n res = cidx3.get_loc(\"a\")\r\n assert res == idx3.get_loc(\"a\")\r\n assert res == slice(0, 2, None)\r\n\r\n res = cidx3.get_loc(\"b\")\r\n assert res == idx3.get_loc(\"b\")\r\n assert res == slice(2, 5, None)\r\n\r\n for i in [cidx3, idx3]:\r\n with pytest.raises(KeyError, match=\"'c'\"):\r\n i.get_loc(\"c\")\r\n\r\n def test_get_loc_unique(self):\r\n cidx = CategoricalIndex(list(\"abc\"))\r\n result = cidx.get_loc(\"b\")\r\n assert result == 1\r\n\r\n def test_get_loc_monotonic_nonunique(self):\r\n cidx = CategoricalIndex(list(\"abbc\"))\r\n result = cidx.get_loc(\"b\")\r\n expected = slice(1, 3, None)\r\n assert result == expected\r\n\r\n def test_get_loc_nonmonotonic_nonunique(self):\r\n cidx = CategoricalIndex(list(\"abcb\"))\r\n result = cidx.get_loc(\"b\")\r\n expected = np.array([False, True, False, True], dtype=bool)\r\n tm.assert_numpy_array_equal(result, expected)\r\n\r\n\r\nclass TestGetIndexer:\r\n def test_get_indexer_base(self):\r\n # Determined by cat ordering.\r\n idx = CategoricalIndex(list(\"cab\"), categories=list(\"cab\"))\r\n expected = np.arange(len(idx), dtype=np.intp)\r\n\r\n actual = idx.get_indexer(idx)\r\n tm.assert_numpy_array_equal(expected, actual)\r\n\r\n with pytest.raises(ValueError, match=\"Invalid fill method\"):\r\n idx.get_indexer(idx, method=\"invalid\")\r\n\r\n def test_get_indexer_non_unique(self):\r\n np.random.seed(123456789)\r\n\r\n ci = CategoricalIndex(list(\"aabbca\"), categories=list(\"cab\"), ordered=False)\r\n oidx = Index(np.array(ci))\r\n\r\n for n in [1, 2, 5, len(ci)]:\r\n finder = oidx[np.random.randint(0, len(ci), size=n)]\r\n expected = oidx.get_indexer_non_unique(finder)[0]\r\n\r\n actual = ci.get_indexer(finder)\r\n tm.assert_numpy_array_equal(expected, actual)\r\n\r\n # see gh-17323\r\n #\r\n # Even when indexer is equal to the\r\n # members in the index, we should\r\n # respect duplicates instead of taking\r\n # the fast-track path.\r\n for finder in [list(\"aabbca\"), list(\"aababca\")]:\r\n expected = oidx.get_indexer_non_unique(finder)[0]\r\n\r\n actual = ci.get_indexer(finder)\r\n tm.assert_numpy_array_equal(expected, actual)\r\n\r\n def test_get_indexer(self):\r\n\r\n idx1 = CategoricalIndex(list(\"aabcde\"), categories=list(\"edabc\"))\r\n idx2 = CategoricalIndex(list(\"abf\"))\r\n\r\n for indexer in [idx2, list(\"abf\"), Index(list(\"abf\"))]:\r\n r1 = idx1.get_indexer(idx2)\r\n tm.assert_almost_equal(r1, np.array([0, 1, 2, -1], dtype=np.intp))\r\n\r\n msg = \"method pad not yet implemented for CategoricalIndex\"\r\n with pytest.raises(NotImplementedError, match=msg):\r\n idx2.get_indexer(idx1, method=\"pad\")\r\n msg = \"method backfill not yet implemented for CategoricalIndex\"\r\n with pytest.raises(NotImplementedError, match=msg):\r\n idx2.get_indexer(idx1, method=\"backfill\")\r\n\r\n msg = \"method nearest not yet implemented for CategoricalIndex\"\r\n with pytest.raises(NotImplementedError, match=msg):\r\n idx2.get_indexer(idx1, method=\"nearest\")\r\n\r\n def test_get_indexer_array(self):\r\n arr = np.array(\r\n [Timestamp(\"1999-12-31 00:00:00\"), Timestamp(\"2000-12-31 00:00:00\")],\r\n dtype=object,\r\n )\r\n cats = [Timestamp(\"1999-12-31 00:00:00\"), Timestamp(\"2000-12-31 00:00:00\")]\r\n ci = CategoricalIndex(cats, categories=cats, ordered=False, dtype=\"category\")\r\n result = ci.get_indexer(arr)\r\n expected = np.array([0, 1], dtype=\"intp\")\r\n tm.assert_numpy_array_equal(result, expected)\r\n\r\n def test_get_indexer_same_categories_same_order(self):\r\n ci = CategoricalIndex([\"a\", \"b\"], categories=[\"a\", \"b\"])\r\n\r\n result = ci.get_indexer(CategoricalIndex([\"b\", \"b\"], categories=[\"a\", \"b\"]))\r\n expected = np.array([1, 1], dtype=\"intp\")\r\n tm.assert_numpy_array_equal(result, expected)\r\n\r\n def test_get_indexer_same_categories_different_order(self):\r\n # https://github.com/pandas-dev/pandas/issues/19551\r\n ci = CategoricalIndex([\"a\", \"b\"], categories=[\"a\", \"b\"])\r\n\r\n result = ci.get_indexer(CategoricalIndex([\"b\", \"b\"], categories=[\"b\", \"a\"]))\r\n expected = np.array([1, 1], dtype=\"intp\")\r\n tm.assert_numpy_array_equal(result, expected)\r\n\r\n\r\nclass TestWhere:\r\n @pytest.mark.parametrize(\"klass\", [list, tuple, np.array, pd.Series])\r\n def test_where(self, klass):\r\n i = CategoricalIndex(list(\"aabbca\"), categories=list(\"cab\"), ordered=False)\r\n cond = [True] * len(i)\r\n expected = i\r\n result = i.where(klass(cond))\r\n tm.assert_index_equal(result, expected)\r\n\r\n cond = [False] + [True] * (len(i) - 1)\r\n expected = CategoricalIndex([np.nan] + i[1:].tolist(), categories=i.categories)\r\n result = i.where(klass(cond))\r\n tm.assert_index_equal(result, expected)\r\n\r\n def test_where_non_categories(self):\r\n ci = CategoricalIndex([\"a\", \"b\", \"c\", \"d\"])\r\n mask = np.array([True, False, True, False])\r\n\r\n msg = \"Cannot setitem on a Categorical with a new category\"\r\n with pytest.raises(ValueError, match=msg):\r\n ci.where(mask, 2)\r\n\r\n with pytest.raises(ValueError, match=msg):\r\n # Test the Categorical method directly\r\n ci._data.where(mask, 2)\r\n\r\n\r\nclass TestContains:\r\n def test_contains(self):\r\n\r\n ci = CategoricalIndex(list(\"aabbca\"), categories=list(\"cabdef\"), ordered=False)\r\n\r\n assert \"a\" in ci\r\n assert \"z\" not in ci\r\n assert \"e\" not in ci\r\n assert np.nan not in ci\r\n\r\n # assert codes NOT in index\r\n assert 0 not in ci\r\n assert 1 not in ci\r\n\r\n def test_contains_nan(self):\r\n ci = CategoricalIndex(list(\"aabbca\") + [np.nan], categories=list(\"cabdef\"))\r\n assert np.nan in ci\r\n\r\n @pytest.mark.parametrize(\"unwrap\", [True, False])\r\n def test_contains_na_dtype(self, unwrap):\r\n dti = pd.date_range(\"2016-01-01\", periods=100).insert(0, pd.NaT)\r\n pi = dti.to_period(\"D\")\r\n tdi = dti - dti[-1]\r\n ci = CategoricalIndex(dti)\r\n\r\n obj = ci\r\n if unwrap:\r\n obj = ci._data\r\n\r\n assert np.nan in obj\r\n assert None in obj\r\n assert pd.NaT in obj\r\n assert np.datetime64(\"NaT\") in obj\r\n assert np.timedelta64(\"NaT\") not in obj\r\n\r\n obj2 = CategoricalIndex(tdi)\r\n if unwrap:\r\n obj2 = obj2._data\r\n\r\n assert np.nan in obj2\r\n assert None in obj2\r\n assert pd.NaT in obj2\r\n assert np.datetime64(\"NaT\") not in obj2\r\n assert np.timedelta64(\"NaT\") in obj2\r\n\r\n obj3 = CategoricalIndex(pi)\r\n if unwrap:\r\n obj3 = obj3._data\r\n\r\n assert np.nan in obj3\r\n assert None in obj3\r\n assert pd.NaT in obj3\r\n assert np.datetime64(\"NaT\") not in obj3\r\n assert np.timedelta64(\"NaT\") not in obj3\r\n\r\n @pytest.mark.parametrize(\r\n \"item, expected\",\r\n [\r\n (pd.Interval(0, 1), True),\r\n (1.5, True),\r\n (pd.Interval(0.5, 1.5), False),\r\n (\"a\", False),\r\n (Timestamp(1), False),\r\n (pd.Timedelta(1), False),\r\n ],\r\n ids=str,\r\n )\r\n def test_contains_interval(self, item, expected):\r\n # GH 23705\r\n ci = CategoricalIndex(IntervalIndex.from_breaks(range(3)))\r\n result = item in ci\r\n assert result is expected\r\n\r\n def test_contains_list(self):\r\n # GH#21729\r\n idx = CategoricalIndex([1, 2, 3])\r\n\r\n assert \"a\" not in idx\r\n\r\n with pytest.raises(TypeError, match=\"unhashable type\"):\r\n [\"a\"] in idx\r\n\r\n with pytest.raises(TypeError, match=\"unhashable type\"):\r\n [\"a\", \"b\"] in idx\r\n",
"\"\"\" test to_datetime \"\"\"\r\n\r\nimport calendar\r\nfrom collections import deque\r\nfrom datetime import datetime, timedelta\r\nimport locale\r\n\r\nfrom dateutil.parser import parse\r\nfrom dateutil.tz.tz import tzoffset\r\nimport numpy as np\r\nimport pytest\r\nimport pytz\r\n\r\nfrom pandas._libs import tslib\r\nfrom pandas._libs.tslibs import iNaT, parsing\r\nfrom pandas.errors import OutOfBoundsDatetime\r\nimport pandas.util._test_decorators as td\r\n\r\nfrom pandas.core.dtypes.common import is_datetime64_ns_dtype\r\n\r\nimport pandas as pd\r\nfrom pandas import (\r\n DataFrame,\r\n DatetimeIndex,\r\n Index,\r\n NaT,\r\n Series,\r\n Timestamp,\r\n date_range,\r\n isna,\r\n to_datetime,\r\n)\r\nimport pandas._testing as tm\r\nfrom pandas.core.arrays import DatetimeArray\r\nfrom pandas.core.tools import datetimes as tools\r\n\r\n\r\nclass TestTimeConversionFormats:\r\n @pytest.mark.parametrize(\"readonly\", [True, False])\r\n def test_to_datetime_readonly(self, readonly):\r\n # GH#34857\r\n arr = np.array([], dtype=object)\r\n if readonly:\r\n arr.setflags(write=False)\r\n result = to_datetime(arr)\r\n expected = to_datetime([])\r\n tm.assert_index_equal(result, expected)\r\n\r\n @pytest.mark.parametrize(\"cache\", [True, False])\r\n def test_to_datetime_format(self, cache):\r\n values = [\"1/1/2000\", \"1/2/2000\", \"1/3/2000\"]\r\n\r\n results1 = [Timestamp(\"20000101\"), Timestamp(\"20000201\"), Timestamp(\"20000301\")]\r\n results2 = [Timestamp(\"20000101\"), Timestamp(\"20000102\"), Timestamp(\"20000103\")]\r\n for vals, expecteds in [\r\n (values, (Index(results1), Index(results2))),\r\n (Series(values), (Series(results1), Series(results2))),\r\n (values[0], (results1[0], results2[0])),\r\n (values[1], (results1[1], results2[1])),\r\n (values[2], (results1[2], results2[2])),\r\n ]:\r\n\r\n for i, fmt in enumerate([\"%d/%m/%Y\", \"%m/%d/%Y\"]):\r\n result = to_datetime(vals, format=fmt, cache=cache)\r\n expected = expecteds[i]\r\n\r\n if isinstance(expected, Series):\r\n tm.assert_series_equal(result, Series(expected))\r\n elif isinstance(expected, Timestamp):\r\n assert result == expected\r\n else:\r\n tm.assert_index_equal(result, expected)\r\n\r\n @pytest.mark.parametrize(\"cache\", [True, False])\r\n def test_to_datetime_format_YYYYMMDD(self, cache):\r\n s = Series([19801222, 19801222] + [19810105] * 5)\r\n expected = Series([Timestamp(x) for x in s.apply(str)])\r\n\r\n result = to_datetime(s, format=\"%Y%m%d\", cache=cache)\r\n tm.assert_series_equal(result, expected)\r\n\r\n result = to_datetime(s.apply(str), format=\"%Y%m%d\", cache=cache)\r\n tm.assert_series_equal(result, expected)\r\n\r\n # with NaT\r\n expected = Series(\r\n [Timestamp(\"19801222\"), Timestamp(\"19801222\")] + [Timestamp(\"19810105\")] * 5\r\n )\r\n expected[2] = np.nan\r\n s[2] = np.nan\r\n\r\n result = to_datetime(s, format=\"%Y%m%d\", cache=cache)\r\n tm.assert_series_equal(result, expected)\r\n\r\n # string with NaT\r\n s = s.apply(str)\r\n s[2] = \"nat\"\r\n result = to_datetime(s, format=\"%Y%m%d\", cache=cache)\r\n tm.assert_series_equal(result, expected)\r\n\r\n # coercion\r\n # GH 7930\r\n s = Series([20121231, 20141231, 99991231])\r\n result = pd.to_datetime(s, format=\"%Y%m%d\", errors=\"ignore\", cache=cache)\r\n expected = Series(\r\n [datetime(2012, 12, 31), datetime(2014, 12, 31), datetime(9999, 12, 31)],\r\n dtype=object,\r\n )\r\n tm.assert_series_equal(result, expected)\r\n\r\n result = pd.to_datetime(s, format=\"%Y%m%d\", errors=\"coerce\", cache=cache)\r\n expected = Series([\"20121231\", \"20141231\", \"NaT\"], dtype=\"M8[ns]\")\r\n tm.assert_series_equal(result, expected)\r\n\r\n @pytest.mark.parametrize(\r\n \"input_s\",\r\n [\r\n # Null values with Strings\r\n [\"19801222\", \"20010112\", None],\r\n [\"19801222\", \"20010112\", np.nan],\r\n [\"19801222\", \"20010112\", pd.NaT],\r\n [\"19801222\", \"20010112\", \"NaT\"],\r\n # Null values with Integers\r\n [19801222, 20010112, None],\r\n [19801222, 20010112, np.nan],\r\n [19801222, 20010112, pd.NaT],\r\n [19801222, 20010112, \"NaT\"],\r\n ],\r\n )\r\n def test_to_datetime_format_YYYYMMDD_with_none(self, input_s):\r\n # GH 30011\r\n # format='%Y%m%d'\r\n # with None\r\n expected = Series([Timestamp(\"19801222\"), Timestamp(\"20010112\"), pd.NaT])\r\n result = Series(pd.to_datetime(input_s, format=\"%Y%m%d\"))\r\n tm.assert_series_equal(result, expected)\r\n\r\n @pytest.mark.parametrize(\r\n \"input_s, expected\",\r\n [\r\n # NaN before strings with invalid date values\r\n [\r\n Series([\"19801222\", np.nan, \"20010012\", \"10019999\"]),\r\n Series([Timestamp(\"19801222\"), np.nan, np.nan, np.nan]),\r\n ],\r\n # NaN after strings with invalid date values\r\n [\r\n Series([\"19801222\", \"20010012\", \"10019999\", np.nan]),\r\n Series([Timestamp(\"19801222\"), np.nan, np.nan, np.nan]),\r\n ],\r\n # NaN before integers with invalid date values\r\n [\r\n Series([20190813, np.nan, 20010012, 20019999]),\r\n Series([Timestamp(\"20190813\"), np.nan, np.nan, np.nan]),\r\n ],\r\n # NaN after integers with invalid date values\r\n [\r\n Series([20190813, 20010012, np.nan, 20019999]),\r\n Series([Timestamp(\"20190813\"), np.nan, np.nan, np.nan]),\r\n ],\r\n ],\r\n )\r\n def test_to_datetime_format_YYYYMMDD_overflow(self, input_s, expected):\r\n # GH 25512\r\n # format='%Y%m%d', errors='coerce'\r\n result = pd.to_datetime(input_s, format=\"%Y%m%d\", errors=\"coerce\")\r\n tm.assert_series_equal(result, expected)\r\n\r\n @pytest.mark.parametrize(\"cache\", [True, False])\r\n def test_to_datetime_format_integer(self, cache):\r\n # GH 10178\r\n s = Series([2000, 2001, 2002])\r\n expected = Series([Timestamp(x) for x in s.apply(str)])\r\n\r\n result = to_datetime(s, format=\"%Y\", cache=cache)\r\n tm.assert_series_equal(result, expected)\r\n\r\n s = Series([200001, 200105, 200206])\r\n expected = Series([Timestamp(x[:4] + \"-\" + x[4:]) for x in s.apply(str)])\r\n\r\n result = to_datetime(s, format=\"%Y%m\", cache=cache)\r\n tm.assert_series_equal(result, expected)\r\n\r\n @pytest.mark.parametrize(\r\n \"int_date, expected\",\r\n [\r\n # valid date, length == 8\r\n [20121030, datetime(2012, 10, 30)],\r\n # short valid date, length == 6\r\n [199934, datetime(1999, 3, 4)],\r\n # long integer date partially parsed to datetime(2012,1,1), length > 8\r\n [2012010101, 2012010101],\r\n # invalid date partially parsed to datetime(2012,9,9), length == 8\r\n [20129930, 20129930],\r\n # short integer date partially parsed to datetime(2012,9,9), length < 8\r\n [2012993, 2012993],\r\n # short invalid date, length == 4\r\n [2121, 2121],\r\n ],\r\n )\r\n def test_int_to_datetime_format_YYYYMMDD_typeerror(self, int_date, expected):\r\n # GH 26583\r\n result = to_datetime(int_date, format=\"%Y%m%d\", errors=\"ignore\")\r\n assert result == expected\r\n\r\n @pytest.mark.parametrize(\"cache\", [True, False])\r\n def test_to_datetime_format_microsecond(self, cache):\r\n\r\n # these are locale dependent\r\n lang, _ = locale.getlocale()\r\n month_abbr = calendar.month_abbr[4]\r\n val = f\"01-{month_abbr}-2011 00:00:01.978\"\r\n\r\n format = \"%d-%b-%Y %H:%M:%S.%f\"\r\n result = to_datetime(val, format=format, cache=cache)\r\n exp = datetime.strptime(val, format)\r\n assert result == exp\r\n\r\n @pytest.mark.parametrize(\"cache\", [True, False])\r\n def test_to_datetime_format_time(self, cache):\r\n data = [\r\n [\"01/10/2010 15:20\", \"%m/%d/%Y %H:%M\", Timestamp(\"2010-01-10 15:20\")],\r\n [\"01/10/2010 05:43\", \"%m/%d/%Y %I:%M\", Timestamp(\"2010-01-10 05:43\")],\r\n [\r\n \"01/10/2010 13:56:01\",\r\n \"%m/%d/%Y %H:%M:%S\",\r\n Timestamp(\"2010-01-10 13:56:01\"),\r\n ] # ,\r\n # ['01/10/2010 08:14 PM', '%m/%d/%Y %I:%M %p',\r\n # Timestamp('2010-01-10 20:14')],\r\n # ['01/10/2010 07:40 AM', '%m/%d/%Y %I:%M %p',\r\n # Timestamp('2010-01-10 07:40')],\r\n # ['01/10/2010 09:12:56 AM', '%m/%d/%Y %I:%M:%S %p',\r\n # Timestamp('2010-01-10 09:12:56')]\r\n ]\r\n for s, format, dt in data:\r\n assert to_datetime(s, format=format, cache=cache) == dt\r\n\r\n @td.skip_if_has_locale\r\n @pytest.mark.parametrize(\"cache\", [True, False])\r\n def test_to_datetime_with_non_exact(self, cache):\r\n # GH 10834\r\n # 8904\r\n # exact kw\r\n s = Series(\r\n [\"19MAY11\", \"foobar19MAY11\", \"19MAY11:00:00:00\", \"19MAY11 00:00:00Z\"]\r\n )\r\n result = to_datetime(s, format=\"%d%b%y\", exact=False, cache=cache)\r\n expected = to_datetime(\r\n s.str.extract(r\"(\\d+\\w+\\d+)\", expand=False), format=\"%d%b%y\", cache=cache\r\n )\r\n tm.assert_series_equal(result, expected)\r\n\r\n @pytest.mark.parametrize(\"cache\", [True, False])\r\n def test_parse_nanoseconds_with_formula(self, cache):\r\n\r\n # GH8989\r\n # truncating the nanoseconds when a format was provided\r\n for v in [\r\n \"2012-01-01 09:00:00.000000001\",\r\n \"2012-01-01 09:00:00.000001\",\r\n \"2012-01-01 09:00:00.001\",\r\n \"2012-01-01 09:00:00.001000\",\r\n \"2012-01-01 09:00:00.001000000\",\r\n ]:\r\n expected = pd.to_datetime(v, cache=cache)\r\n result = pd.to_datetime(v, format=\"%Y-%m-%d %H:%M:%S.%f\", cache=cache)\r\n assert result == expected\r\n\r\n @pytest.mark.parametrize(\"cache\", [True, False])\r\n def test_to_datetime_format_weeks(self, cache):\r\n data = [\r\n [\"2009324\", \"%Y%W%w\", Timestamp(\"2009-08-13\")],\r\n [\"2013020\", \"%Y%U%w\", Timestamp(\"2013-01-13\")],\r\n ]\r\n for s, format, dt in data:\r\n assert to_datetime(s, format=format, cache=cache) == dt\r\n\r\n @pytest.mark.parametrize(\r\n \"fmt,dates,expected_dates\",\r\n [\r\n [\r\n \"%Y-%m-%d %H:%M:%S %Z\",\r\n [\"2010-01-01 12:00:00 UTC\"] * 2,\r\n [Timestamp(\"2010-01-01 12:00:00\", tz=\"UTC\")] * 2,\r\n ],\r\n [\r\n \"%Y-%m-%d %H:%M:%S %Z\",\r\n [\r\n \"2010-01-01 12:00:00 UTC\",\r\n \"2010-01-01 12:00:00 GMT\",\r\n \"2010-01-01 12:00:00 US/Pacific\",\r\n ],\r\n [\r\n Timestamp(\"2010-01-01 12:00:00\", tz=\"UTC\"),\r\n Timestamp(\"2010-01-01 12:00:00\", tz=\"GMT\"),\r\n Timestamp(\"2010-01-01 12:00:00\", tz=\"US/Pacific\"),\r\n ],\r\n ],\r\n [\r\n \"%Y-%m-%d %H:%M:%S%z\",\r\n [\"2010-01-01 12:00:00+0100\"] * 2,\r\n [Timestamp(\"2010-01-01 12:00:00\", tzinfo=pytz.FixedOffset(60))] * 2,\r\n ],\r\n [\r\n \"%Y-%m-%d %H:%M:%S %z\",\r\n [\"2010-01-01 12:00:00 +0100\"] * 2,\r\n [Timestamp(\"2010-01-01 12:00:00\", tzinfo=pytz.FixedOffset(60))] * 2,\r\n ],\r\n [\r\n \"%Y-%m-%d %H:%M:%S %z\",\r\n [\"2010-01-01 12:00:00 +0100\", \"2010-01-01 12:00:00 -0100\"],\r\n [\r\n Timestamp(\"2010-01-01 12:00:00\", tzinfo=pytz.FixedOffset(60)),\r\n Timestamp(\"2010-01-01 12:00:00\", tzinfo=pytz.FixedOffset(-60)),\r\n ],\r\n ],\r\n [\r\n \"%Y-%m-%d %H:%M:%S %z\",\r\n [\"2010-01-01 12:00:00 Z\", \"2010-01-01 12:00:00 Z\"],\r\n [\r\n Timestamp(\r\n \"2010-01-01 12:00:00\", tzinfo=pytz.FixedOffset(0)\r\n ), # pytz coerces to UTC\r\n Timestamp(\"2010-01-01 12:00:00\", tzinfo=pytz.FixedOffset(0)),\r\n ],\r\n ],\r\n ],\r\n )\r\n def test_to_datetime_parse_tzname_or_tzoffset(self, fmt, dates, expected_dates):\r\n # GH 13486\r\n result = pd.to_datetime(dates, format=fmt)\r\n expected = Index(expected_dates)\r\n tm.assert_equal(result, expected)\r\n\r\n def test_to_datetime_parse_tzname_or_tzoffset_different_tz_to_utc(self):\r\n # GH 32792\r\n dates = [\r\n \"2010-01-01 12:00:00 +0100\",\r\n \"2010-01-01 12:00:00 -0100\",\r\n \"2010-01-01 12:00:00 +0300\",\r\n \"2010-01-01 12:00:00 +0400\",\r\n ]\r\n expected_dates = [\r\n \"2010-01-01 11:00:00+00:00\",\r\n \"2010-01-01 13:00:00+00:00\",\r\n \"2010-01-01 09:00:00+00:00\",\r\n \"2010-01-01 08:00:00+00:00\",\r\n ]\r\n fmt = \"%Y-%m-%d %H:%M:%S %z\"\r\n\r\n result = pd.to_datetime(dates, format=fmt, utc=True)\r\n expected = DatetimeIndex(expected_dates)\r\n tm.assert_index_equal(result, expected)\r\n\r\n @pytest.mark.parametrize(\r\n \"offset\", [\"+0\", \"-1foo\", \"UTCbar\", \":10\", \"+01:000:01\", \"\"]\r\n )\r\n def test_to_datetime_parse_timezone_malformed(self, offset):\r\n fmt = \"%Y-%m-%d %H:%M:%S %z\"\r\n date = \"2010-01-01 12:00:00 \" + offset\r\n\r\n msg = \"does not match format|unconverted data remains\"\r\n with pytest.raises(ValueError, match=msg):\r\n pd.to_datetime([date], format=fmt)\r\n\r\n def test_to_datetime_parse_timezone_keeps_name(self):\r\n # GH 21697\r\n fmt = \"%Y-%m-%d %H:%M:%S %z\"\r\n arg = Index([\"2010-01-01 12:00:00 Z\"], name=\"foo\")\r\n result = pd.to_datetime(arg, format=fmt)\r\n expected = DatetimeIndex([\"2010-01-01 12:00:00\"], tz=\"UTC\", name=\"foo\")\r\n tm.assert_index_equal(result, expected)\r\n\r\n\r\nclass TestToDatetime:\r\n @pytest.mark.parametrize(\r\n \"s, _format, dt\",\r\n [\r\n [\"2015-1-1\", \"%G-%V-%u\", datetime(2014, 12, 29, 0, 0)],\r\n [\"2015-1-4\", \"%G-%V-%u\", datetime(2015, 1, 1, 0, 0)],\r\n [\"2015-1-7\", \"%G-%V-%u\", datetime(2015, 1, 4, 0, 0)],\r\n ],\r\n )\r\n def test_to_datetime_iso_week_year_format(self, s, _format, dt):\r\n # See GH#16607\r\n assert to_datetime(s, format=_format) == dt\r\n\r\n @pytest.mark.parametrize(\r\n \"msg, s, _format\",\r\n [\r\n [\r\n \"ISO week directive '%V' must be used with the ISO year directive \"\r\n \"'%G' and a weekday directive '%A', '%a', '%w', or '%u'.\",\r\n \"1999 50\",\r\n \"%Y %V\",\r\n ],\r\n [\r\n \"ISO year directive '%G' must be used with the ISO week directive \"\r\n \"'%V' and a weekday directive '%A', '%a', '%w', or '%u'.\",\r\n \"1999 51\",\r\n \"%G %V\",\r\n ],\r\n [\r\n \"ISO year directive '%G' must be used with the ISO week directive \"\r\n \"'%V' and a weekday directive '%A', '%a', '%w', or '%u'.\",\r\n \"1999 Monday\",\r\n \"%G %A\",\r\n ],\r\n [\r\n \"ISO year directive '%G' must be used with the ISO week directive \"\r\n \"'%V' and a weekday directive '%A', '%a', '%w', or '%u'.\",\r\n \"1999 Mon\",\r\n \"%G %a\",\r\n ],\r\n [\r\n \"ISO year directive '%G' must be used with the ISO week directive \"\r\n \"'%V' and a weekday directive '%A', '%a', '%w', or '%u'.\",\r\n \"1999 6\",\r\n \"%G %w\",\r\n ],\r\n [\r\n \"ISO year directive '%G' must be used with the ISO week directive \"\r\n \"'%V' and a weekday directive '%A', '%a', '%w', or '%u'.\",\r\n \"1999 6\",\r\n \"%G %u\",\r\n ],\r\n [\r\n \"ISO year directive '%G' must be used with the ISO week directive \"\r\n \"'%V' and a weekday directive '%A', '%a', '%w', or '%u'.\",\r\n \"2051\",\r\n \"%G\",\r\n ],\r\n [\r\n \"Day of the year directive '%j' is not compatible with ISO year \"\r\n \"directive '%G'. Use '%Y' instead.\",\r\n \"1999 51 6 256\",\r\n \"%G %V %u %j\",\r\n ],\r\n [\r\n \"ISO week directive '%V' is incompatible with the year directive \"\r\n \"'%Y'. Use the ISO year '%G' instead.\",\r\n \"1999 51 Sunday\",\r\n \"%Y %V %A\",\r\n ],\r\n [\r\n \"ISO week directive '%V' is incompatible with the year directive \"\r\n \"'%Y'. Use the ISO year '%G' instead.\",\r\n \"1999 51 Sun\",\r\n \"%Y %V %a\",\r\n ],\r\n [\r\n \"ISO week directive '%V' is incompatible with the year directive \"\r\n \"'%Y'. Use the ISO year '%G' instead.\",\r\n \"1999 51 1\",\r\n \"%Y %V %w\",\r\n ],\r\n [\r\n \"ISO week directive '%V' is incompatible with the year directive \"\r\n \"'%Y'. Use the ISO year '%G' instead.\",\r\n \"1999 51 1\",\r\n \"%Y %V %u\",\r\n ],\r\n [\r\n \"ISO week directive '%V' must be used with the ISO year directive \"\r\n \"'%G' and a weekday directive '%A', '%a', '%w', or '%u'.\",\r\n \"20\",\r\n \"%V\",\r\n ],\r\n ],\r\n )\r\n def test_error_iso_week_year(self, msg, s, _format):\r\n # See GH#16607\r\n # This test checks for errors thrown when giving the wrong format\r\n # However, as discussed on PR#25541, overriding the locale\r\n # causes a different error to be thrown due to the format being\r\n # locale specific, but the test data is in english.\r\n # Therefore, the tests only run when locale is not overwritten,\r\n # as a sort of solution to this problem.\r\n if locale.getlocale() != (\"zh_CN\", \"UTF-8\") and locale.getlocale() != (\r\n \"it_IT\",\r\n \"UTF-8\",\r\n ):\r\n with pytest.raises(ValueError, match=msg):\r\n to_datetime(s, format=_format)\r\n\r\n @pytest.mark.parametrize(\"tz\", [None, \"US/Central\"])\r\n def test_to_datetime_dtarr(self, tz):\r\n # DatetimeArray\r\n dti = date_range(\"1965-04-03\", periods=19, freq=\"2W\", tz=tz)\r\n arr = DatetimeArray(dti)\r\n\r\n result = to_datetime(arr)\r\n assert result is arr\r\n\r\n result = to_datetime(arr)\r\n assert result is arr\r\n\r\n def test_to_datetime_pydatetime(self):\r\n actual = pd.to_datetime(datetime(2008, 1, 15))\r\n assert actual == datetime(2008, 1, 15)\r\n\r\n def test_to_datetime_YYYYMMDD(self):\r\n actual = pd.to_datetime(\"20080115\")\r\n assert actual == datetime(2008, 1, 15)\r\n\r\n def test_to_datetime_unparseable_ignore(self):\r\n # unparseable\r\n s = \"Month 1, 1999\"\r\n assert pd.to_datetime(s, errors=\"ignore\") == s\r\n\r\n @td.skip_if_windows # `tm.set_timezone` does not work in windows\r\n def test_to_datetime_now(self):\r\n # See GH#18666\r\n with tm.set_timezone(\"US/Eastern\"):\r\n npnow = np.datetime64(\"now\").astype(\"datetime64[ns]\")\r\n pdnow = pd.to_datetime(\"now\")\r\n pdnow2 = pd.to_datetime([\"now\"])[0]\r\n\r\n # These should all be equal with infinite perf; this gives\r\n # a generous margin of 10 seconds\r\n assert abs(pdnow.value - npnow.astype(np.int64)) < 1e10\r\n assert abs(pdnow2.value - npnow.astype(np.int64)) < 1e10\r\n\r\n assert pdnow.tzinfo is None\r\n assert pdnow2.tzinfo is None\r\n\r\n @td.skip_if_windows # `tm.set_timezone` does not work in windows\r\n def test_to_datetime_today(self):\r\n # See GH#18666\r\n # Test with one timezone far ahead of UTC and another far behind, so\r\n # one of these will _almost_ always be in a different day from UTC.\r\n # Unfortunately this test between 12 and 1 AM Samoa time\r\n # this both of these timezones _and_ UTC will all be in the same day,\r\n # so this test will not detect the regression introduced in #18666.\r\n with tm.set_timezone(\"Pacific/Auckland\"): # 12-13 hours ahead of UTC\r\n nptoday = np.datetime64(\"today\").astype(\"datetime64[ns]\").astype(np.int64)\r\n pdtoday = pd.to_datetime(\"today\")\r\n pdtoday2 = pd.to_datetime([\"today\"])[0]\r\n\r\n tstoday = Timestamp(\"today\")\r\n tstoday2 = Timestamp.today()\r\n\r\n # These should all be equal with infinite perf; this gives\r\n # a generous margin of 10 seconds\r\n assert abs(pdtoday.normalize().value - nptoday) < 1e10\r\n assert abs(pdtoday2.normalize().value - nptoday) < 1e10\r\n assert abs(pdtoday.value - tstoday.value) < 1e10\r\n assert abs(pdtoday.value - tstoday2.value) < 1e10\r\n\r\n assert pdtoday.tzinfo is None\r\n assert pdtoday2.tzinfo is None\r\n\r\n with tm.set_timezone(\"US/Samoa\"): # 11 hours behind UTC\r\n nptoday = np.datetime64(\"today\").astype(\"datetime64[ns]\").astype(np.int64)\r\n pdtoday = pd.to_datetime(\"today\")\r\n pdtoday2 = pd.to_datetime([\"today\"])[0]\r\n\r\n # These should all be equal with infinite perf; this gives\r\n # a generous margin of 10 seconds\r\n assert abs(pdtoday.normalize().value - nptoday) < 1e10\r\n assert abs(pdtoday2.normalize().value - nptoday) < 1e10\r\n\r\n assert pdtoday.tzinfo is None\r\n assert pdtoday2.tzinfo is None\r\n\r\n def test_to_datetime_today_now_unicode_bytes(self):\r\n to_datetime([\"now\"])\r\n to_datetime([\"today\"])\r\n\r\n @pytest.mark.parametrize(\"cache\", [True, False])\r\n def test_to_datetime_dt64s(self, cache):\r\n in_bound_dts = [np.datetime64(\"2000-01-01\"), np.datetime64(\"2000-01-02\")]\r\n\r\n for dt in in_bound_dts:\r\n assert pd.to_datetime(dt, cache=cache) == Timestamp(dt)\r\n\r\n @pytest.mark.parametrize(\r\n \"dt\", [np.datetime64(\"1000-01-01\"), np.datetime64(\"5000-01-02\")]\r\n )\r\n @pytest.mark.parametrize(\"cache\", [True, False])\r\n def test_to_datetime_dt64s_out_of_bounds(self, cache, dt):\r\n msg = f\"Out of bounds nanosecond timestamp: {dt}\"\r\n with pytest.raises(OutOfBoundsDatetime, match=msg):\r\n pd.to_datetime(dt, errors=\"raise\")\r\n with pytest.raises(OutOfBoundsDatetime, match=msg):\r\n Timestamp(dt)\r\n assert pd.to_datetime(dt, errors=\"coerce\", cache=cache) is NaT\r\n\r\n @pytest.mark.parametrize(\"cache\", [True, False])\r\n @pytest.mark.parametrize(\"unit\", [\"s\", \"D\"])\r\n def test_to_datetime_array_of_dt64s(self, cache, unit):\r\n # https://github.com/pandas-dev/pandas/issues/31491\r\n # Need at least 50 to ensure cache is used.\r\n dts = [\r\n np.datetime64(\"2000-01-01\", unit),\r\n np.datetime64(\"2000-01-02\", unit),\r\n ] * 30\r\n # Assuming all datetimes are in bounds, to_datetime() returns\r\n # an array that is equal to Timestamp() parsing\r\n tm.assert_index_equal(\r\n pd.to_datetime(dts, cache=cache),\r\n DatetimeIndex([Timestamp(x).asm8 for x in dts]),\r\n )\r\n\r\n # A list of datetimes where the last one is out of bounds\r\n dts_with_oob = dts + [np.datetime64(\"9999-01-01\")]\r\n\r\n msg = \"Out of bounds nanosecond timestamp: 9999-01-01 00:00:00\"\r\n with pytest.raises(OutOfBoundsDatetime, match=msg):\r\n pd.to_datetime(dts_with_oob, errors=\"raise\")\r\n\r\n tm.assert_index_equal(\r\n pd.to_datetime(dts_with_oob, errors=\"coerce\", cache=cache),\r\n DatetimeIndex(\r\n [Timestamp(dts_with_oob[0]).asm8, Timestamp(dts_with_oob[1]).asm8] * 30\r\n + [pd.NaT],\r\n ),\r\n )\r\n\r\n # With errors='ignore', out of bounds datetime64s\r\n # are converted to their .item(), which depending on the version of\r\n # numpy is either a python datetime.datetime or datetime.date\r\n tm.assert_index_equal(\r\n pd.to_datetime(dts_with_oob, errors=\"ignore\", cache=cache),\r\n Index([dt.item() for dt in dts_with_oob]),\r\n )\r\n\r\n @pytest.mark.parametrize(\"cache\", [True, False])\r\n def test_to_datetime_tz(self, cache):\r\n\r\n # xref 8260\r\n # uniform returns a DatetimeIndex\r\n arr = [\r\n Timestamp(\"2013-01-01 13:00:00-0800\", tz=\"US/Pacific\"),\r\n Timestamp(\"2013-01-02 14:00:00-0800\", tz=\"US/Pacific\"),\r\n ]\r\n result = pd.to_datetime(arr, cache=cache)\r\n expected = DatetimeIndex(\r\n [\"2013-01-01 13:00:00\", \"2013-01-02 14:00:00\"], tz=\"US/Pacific\"\r\n )\r\n tm.assert_index_equal(result, expected)\r\n\r\n # mixed tzs will raise\r\n arr = [\r\n Timestamp(\"2013-01-01 13:00:00\", tz=\"US/Pacific\"),\r\n Timestamp(\"2013-01-02 14:00:00\", tz=\"US/Eastern\"),\r\n ]\r\n msg = (\r\n \"Tz-aware datetime.datetime cannot be \"\r\n \"converted to datetime64 unless utc=True\"\r\n )\r\n with pytest.raises(ValueError, match=msg):\r\n pd.to_datetime(arr, cache=cache)\r\n\r\n @pytest.mark.parametrize(\"cache\", [True, False])\r\n def test_to_datetime_different_offsets(self, cache):\r\n # inspired by asv timeseries.ToDatetimeNONISO8601 benchmark\r\n # see GH-26097 for more\r\n ts_string_1 = \"March 1, 2018 12:00:00+0400\"\r\n ts_string_2 = \"March 1, 2018 12:00:00+0500\"\r\n arr = [ts_string_1] * 5 + [ts_string_2] * 5\r\n expected = Index([parse(x) for x in arr])\r\n result = pd.to_datetime(arr, cache=cache)\r\n tm.assert_index_equal(result, expected)\r\n\r\n @pytest.mark.parametrize(\"cache\", [True, False])\r\n def test_to_datetime_tz_pytz(self, cache):\r\n # see gh-8260\r\n us_eastern = pytz.timezone(\"US/Eastern\")\r\n arr = np.array(\r\n [\r\n us_eastern.localize(\r\n datetime(year=2000, month=1, day=1, hour=3, minute=0)\r\n ),\r\n us_eastern.localize(\r\n datetime(year=2000, month=6, day=1, hour=3, minute=0)\r\n ),\r\n ],\r\n dtype=object,\r\n )\r\n result = pd.to_datetime(arr, utc=True, cache=cache)\r\n expected = DatetimeIndex(\r\n [\"2000-01-01 08:00:00+00:00\", \"2000-06-01 07:00:00+00:00\"],\r\n dtype=\"datetime64[ns, UTC]\",\r\n freq=None,\r\n )\r\n tm.assert_index_equal(result, expected)\r\n\r\n @pytest.mark.parametrize(\"cache\", [True, False])\r\n @pytest.mark.parametrize(\r\n \"init_constructor, end_constructor, test_method\",\r\n [\r\n (Index, DatetimeIndex, tm.assert_index_equal),\r\n (list, DatetimeIndex, tm.assert_index_equal),\r\n (np.array, DatetimeIndex, tm.assert_index_equal),\r\n (Series, Series, tm.assert_series_equal),\r\n ],\r\n )\r\n def test_to_datetime_utc_true(\r\n self, cache, init_constructor, end_constructor, test_method\r\n ):\r\n # See gh-11934 & gh-6415\r\n data = [\"20100102 121314\", \"20100102 121315\"]\r\n expected_data = [\r\n Timestamp(\"2010-01-02 12:13:14\", tz=\"utc\"),\r\n Timestamp(\"2010-01-02 12:13:15\", tz=\"utc\"),\r\n ]\r\n\r\n result = pd.to_datetime(\r\n init_constructor(data), format=\"%Y%m%d %H%M%S\", utc=True, cache=cache\r\n )\r\n expected = end_constructor(expected_data)\r\n test_method(result, expected)\r\n\r\n # Test scalar case as well\r\n for scalar, expected in zip(data, expected_data):\r\n result = pd.to_datetime(\r\n scalar, format=\"%Y%m%d %H%M%S\", utc=True, cache=cache\r\n )\r\n assert result == expected\r\n\r\n @pytest.mark.parametrize(\"cache\", [True, False])\r\n def test_to_datetime_utc_true_with_series_single_value(self, cache):\r\n # GH 15760 UTC=True with Series\r\n ts = 1.5e18\r\n result = pd.to_datetime(Series([ts]), utc=True, cache=cache)\r\n expected = Series([Timestamp(ts, tz=\"utc\")])\r\n tm.assert_series_equal(result, expected)\r\n\r\n @pytest.mark.parametrize(\"cache\", [True, False])\r\n def test_to_datetime_utc_true_with_series_tzaware_string(self, cache):\r\n ts = \"2013-01-01 00:00:00-01:00\"\r\n expected_ts = \"2013-01-01 01:00:00\"\r\n data = Series([ts] * 3)\r\n result = pd.to_datetime(data, utc=True, cache=cache)\r\n expected = Series([Timestamp(expected_ts, tz=\"utc\")] * 3)\r\n tm.assert_series_equal(result, expected)\r\n\r\n @pytest.mark.parametrize(\"cache\", [True, False])\r\n @pytest.mark.parametrize(\r\n \"date, dtype\",\r\n [\r\n (\"2013-01-01 01:00:00\", \"datetime64[ns]\"),\r\n (\"2013-01-01 01:00:00\", \"datetime64[ns, UTC]\"),\r\n ],\r\n )\r\n def test_to_datetime_utc_true_with_series_datetime_ns(self, cache, date, dtype):\r\n expected = Series([Timestamp(\"2013-01-01 01:00:00\", tz=\"UTC\")])\r\n result = pd.to_datetime(Series([date], dtype=dtype), utc=True, cache=cache)\r\n tm.assert_series_equal(result, expected)\r\n\r\n @pytest.mark.parametrize(\"cache\", [True, False])\r\n @td.skip_if_no(\"psycopg2\")\r\n def test_to_datetime_tz_psycopg2(self, cache):\r\n\r\n # xref 8260\r\n import psycopg2\r\n\r\n # misc cases\r\n tz1 = psycopg2.tz.FixedOffsetTimezone(offset=-300, name=None)\r\n tz2 = psycopg2.tz.FixedOffsetTimezone(offset=-240, name=None)\r\n arr = np.array(\r\n [\r\n datetime(2000, 1, 1, 3, 0, tzinfo=tz1),\r\n datetime(2000, 6, 1, 3, 0, tzinfo=tz2),\r\n ],\r\n dtype=object,\r\n )\r\n\r\n result = pd.to_datetime(arr, errors=\"coerce\", utc=True, cache=cache)\r\n expected = DatetimeIndex(\r\n [\"2000-01-01 08:00:00+00:00\", \"2000-06-01 07:00:00+00:00\"],\r\n dtype=\"datetime64[ns, UTC]\",\r\n freq=None,\r\n )\r\n tm.assert_index_equal(result, expected)\r\n\r\n # dtype coercion\r\n i = DatetimeIndex(\r\n [\"2000-01-01 08:00:00\"],\r\n tz=psycopg2.tz.FixedOffsetTimezone(offset=-300, name=None),\r\n )\r\n assert is_datetime64_ns_dtype(i)\r\n\r\n # tz coercion\r\n result = pd.to_datetime(i, errors=\"coerce\", cache=cache)\r\n tm.assert_index_equal(result, i)\r\n\r\n result = pd.to_datetime(i, errors=\"coerce\", utc=True, cache=cache)\r\n expected = DatetimeIndex([\"2000-01-01 13:00:00\"], dtype=\"datetime64[ns, UTC]\")\r\n tm.assert_index_equal(result, expected)\r\n\r\n @pytest.mark.parametrize(\"cache\", [True, False])\r\n def test_datetime_bool(self, cache):\r\n # GH13176\r\n msg = r\"dtype bool cannot be converted to datetime64\\[ns\\]\"\r\n with pytest.raises(TypeError, match=msg):\r\n to_datetime(False)\r\n assert to_datetime(False, errors=\"coerce\", cache=cache) is NaT\r\n assert to_datetime(False, errors=\"ignore\", cache=cache) is False\r\n with pytest.raises(TypeError, match=msg):\r\n to_datetime(True)\r\n assert to_datetime(True, errors=\"coerce\", cache=cache) is NaT\r\n assert to_datetime(True, errors=\"ignore\", cache=cache) is True\r\n msg = f\"{type(cache)} is not convertible to datetime\"\r\n with pytest.raises(TypeError, match=msg):\r\n to_datetime([False, datetime.today()], cache=cache)\r\n with pytest.raises(TypeError, match=msg):\r\n to_datetime([\"20130101\", True], cache=cache)\r\n tm.assert_index_equal(\r\n to_datetime([0, False, NaT, 0.0], errors=\"coerce\", cache=cache),\r\n DatetimeIndex(\r\n [to_datetime(0, cache=cache), NaT, NaT, to_datetime(0, cache=cache)]\r\n ),\r\n )\r\n\r\n def test_datetime_invalid_datatype(self):\r\n # GH13176\r\n msg = \"is not convertible to datetime\"\r\n with pytest.raises(TypeError, match=msg):\r\n pd.to_datetime(bool)\r\n with pytest.raises(TypeError, match=msg):\r\n pd.to_datetime(pd.to_datetime)\r\n\r\n @pytest.mark.parametrize(\"value\", [\"a\", \"00:01:99\"])\r\n @pytest.mark.parametrize(\"infer\", [True, False])\r\n @pytest.mark.parametrize(\"format\", [None, \"H%:M%:S%\"])\r\n def test_datetime_invalid_scalar(self, value, format, infer):\r\n # GH24763\r\n res = pd.to_datetime(\r\n value, errors=\"ignore\", format=format, infer_datetime_format=infer\r\n )\r\n assert res == value\r\n\r\n res = pd.to_datetime(\r\n value, errors=\"coerce\", format=format, infer_datetime_format=infer\r\n )\r\n assert res is pd.NaT\r\n\r\n msg = (\r\n \"is a bad directive in format|\"\r\n \"second must be in 0..59|\"\r\n \"Given date string not likely a datetime\"\r\n )\r\n with pytest.raises(ValueError, match=msg):\r\n pd.to_datetime(\r\n value, errors=\"raise\", format=format, infer_datetime_format=infer\r\n )\r\n\r\n @pytest.mark.parametrize(\"value\", [\"3000/12/11 00:00:00\"])\r\n @pytest.mark.parametrize(\"infer\", [True, False])\r\n @pytest.mark.parametrize(\"format\", [None, \"H%:M%:S%\"])\r\n def test_datetime_outofbounds_scalar(self, value, format, infer):\r\n # GH24763\r\n res = pd.to_datetime(\r\n value, errors=\"ignore\", format=format, infer_datetime_format=infer\r\n )\r\n assert res == value\r\n\r\n res = pd.to_datetime(\r\n value, errors=\"coerce\", format=format, infer_datetime_format=infer\r\n )\r\n assert res is pd.NaT\r\n\r\n if format is not None:\r\n msg = \"is a bad directive in format|Out of bounds nanosecond timestamp\"\r\n with pytest.raises(ValueError, match=msg):\r\n pd.to_datetime(\r\n value, errors=\"raise\", format=format, infer_datetime_format=infer\r\n )\r\n else:\r\n msg = \"Out of bounds nanosecond timestamp\"\r\n with pytest.raises(OutOfBoundsDatetime, match=msg):\r\n pd.to_datetime(\r\n value, errors=\"raise\", format=format, infer_datetime_format=infer\r\n )\r\n\r\n @pytest.mark.parametrize(\"values\", [[\"a\"], [\"00:01:99\"], [\"a\", \"b\", \"99:00:00\"]])\r\n @pytest.mark.parametrize(\"infer\", [True, False])\r\n @pytest.mark.parametrize(\"format\", [None, \"H%:M%:S%\"])\r\n def test_datetime_invalid_index(self, values, format, infer):\r\n # GH24763\r\n res = pd.to_datetime(\r\n values, errors=\"ignore\", format=format, infer_datetime_format=infer\r\n )\r\n tm.assert_index_equal(res, Index(values))\r\n\r\n res = pd.to_datetime(\r\n values, errors=\"coerce\", format=format, infer_datetime_format=infer\r\n )\r\n tm.assert_index_equal(res, DatetimeIndex([pd.NaT] * len(values)))\r\n\r\n msg = (\r\n \"is a bad directive in format|\"\r\n \"Given date string not likely a datetime|\"\r\n \"second must be in 0..59\"\r\n )\r\n with pytest.raises(ValueError, match=msg):\r\n pd.to_datetime(\r\n values, errors=\"raise\", format=format, infer_datetime_format=infer\r\n )\r\n\r\n @pytest.mark.parametrize(\"utc\", [True, None])\r\n @pytest.mark.parametrize(\"format\", [\"%Y%m%d %H:%M:%S\", None])\r\n @pytest.mark.parametrize(\"constructor\", [list, tuple, np.array, Index, deque])\r\n def test_to_datetime_cache(self, utc, format, constructor):\r\n date = \"20130101 00:00:00\"\r\n test_dates = [date] * 10 ** 5\r\n data = constructor(test_dates)\r\n\r\n result = pd.to_datetime(data, utc=utc, format=format, cache=True)\r\n expected = pd.to_datetime(data, utc=utc, format=format, cache=False)\r\n\r\n tm.assert_index_equal(result, expected)\r\n\r\n @pytest.mark.parametrize(\r\n \"listlike\",\r\n [\r\n (deque([Timestamp(\"2010-06-02 09:30:00\")] * 51)),\r\n ([Timestamp(\"2010-06-02 09:30:00\")] * 51),\r\n (tuple([Timestamp(\"2010-06-02 09:30:00\")] * 51)),\r\n ],\r\n )\r\n def test_no_slicing_errors_in_should_cache(self, listlike):\r\n # GH 29403\r\n assert tools.should_cache(listlike) is True\r\n\r\n def test_to_datetime_from_deque(self):\r\n # GH 29403\r\n result = pd.to_datetime(deque([Timestamp(\"2010-06-02 09:30:00\")] * 51))\r\n expected = pd.to_datetime([Timestamp(\"2010-06-02 09:30:00\")] * 51)\r\n tm.assert_index_equal(result, expected)\r\n\r\n @pytest.mark.parametrize(\"utc\", [True, None])\r\n @pytest.mark.parametrize(\"format\", [\"%Y%m%d %H:%M:%S\", None])\r\n def test_to_datetime_cache_series(self, utc, format):\r\n date = \"20130101 00:00:00\"\r\n test_dates = [date] * 10 ** 5\r\n data = Series(test_dates)\r\n result = pd.to_datetime(data, utc=utc, format=format, cache=True)\r\n expected = pd.to_datetime(data, utc=utc, format=format, cache=False)\r\n tm.assert_series_equal(result, expected)\r\n\r\n def test_to_datetime_cache_scalar(self):\r\n date = \"20130101 00:00:00\"\r\n result = pd.to_datetime(date, cache=True)\r\n expected = Timestamp(\"20130101 00:00:00\")\r\n assert result == expected\r\n\r\n @pytest.mark.parametrize(\r\n \"date, format\",\r\n [\r\n (\"2017-20\", \"%Y-%W\"),\r\n (\"20 Sunday\", \"%W %A\"),\r\n (\"20 Sun\", \"%W %a\"),\r\n (\"2017-21\", \"%Y-%U\"),\r\n (\"20 Sunday\", \"%U %A\"),\r\n (\"20 Sun\", \"%U %a\"),\r\n ],\r\n )\r\n def test_week_without_day_and_calendar_year(self, date, format):\r\n # GH16774\r\n\r\n msg = \"Cannot use '%W' or '%U' without day and year\"\r\n with pytest.raises(ValueError, match=msg):\r\n pd.to_datetime(date, format=format)\r\n\r\n def test_to_datetime_coerce(self):\r\n # GH 26122\r\n ts_strings = [\r\n \"March 1, 2018 12:00:00+0400\",\r\n \"March 1, 2018 12:00:00+0500\",\r\n \"20100240\",\r\n ]\r\n result = to_datetime(ts_strings, errors=\"coerce\")\r\n expected = Index(\r\n [\r\n datetime(2018, 3, 1, 12, 0, tzinfo=tzoffset(None, 14400)),\r\n datetime(2018, 3, 1, 12, 0, tzinfo=tzoffset(None, 18000)),\r\n NaT,\r\n ]\r\n )\r\n tm.assert_index_equal(result, expected)\r\n\r\n def test_to_datetime_coerce_malformed(self):\r\n # GH 28299\r\n ts_strings = [\"200622-12-31\", \"111111-24-11\"]\r\n result = to_datetime(ts_strings, errors=\"coerce\")\r\n expected = Index([NaT, NaT])\r\n tm.assert_index_equal(result, expected)\r\n\r\n def test_iso_8601_strings_with_same_offset(self):\r\n # GH 17697, 11736\r\n ts_str = \"2015-11-18 15:30:00+05:30\"\r\n result = to_datetime(ts_str)\r\n expected = Timestamp(ts_str)\r\n assert result == expected\r\n\r\n expected = DatetimeIndex([Timestamp(ts_str)] * 2)\r\n result = to_datetime([ts_str] * 2)\r\n tm.assert_index_equal(result, expected)\r\n\r\n result = DatetimeIndex([ts_str] * 2)\r\n tm.assert_index_equal(result, expected)\r\n\r\n def test_iso_8601_strings_with_different_offsets(self):\r\n # GH 17697, 11736\r\n ts_strings = [\"2015-11-18 15:30:00+05:30\", \"2015-11-18 16:30:00+06:30\", NaT]\r\n result = to_datetime(ts_strings)\r\n expected = np.array(\r\n [\r\n datetime(2015, 11, 18, 15, 30, tzinfo=tzoffset(None, 19800)),\r\n datetime(2015, 11, 18, 16, 30, tzinfo=tzoffset(None, 23400)),\r\n NaT,\r\n ],\r\n dtype=object,\r\n )\r\n # GH 21864\r\n expected = Index(expected)\r\n tm.assert_index_equal(result, expected)\r\n\r\n result = to_datetime(ts_strings, utc=True)\r\n expected = DatetimeIndex(\r\n [Timestamp(2015, 11, 18, 10), Timestamp(2015, 11, 18, 10), NaT], tz=\"UTC\"\r\n )\r\n tm.assert_index_equal(result, expected)\r\n\r\n def test_iso8601_strings_mixed_offsets_with_naive(self):\r\n # GH 24992\r\n result = pd.to_datetime(\r\n [\r\n \"2018-11-28T00:00:00\",\r\n \"2018-11-28T00:00:00+12:00\",\r\n \"2018-11-28T00:00:00\",\r\n \"2018-11-28T00:00:00+06:00\",\r\n \"2018-11-28T00:00:00\",\r\n ],\r\n utc=True,\r\n )\r\n expected = pd.to_datetime(\r\n [\r\n \"2018-11-28T00:00:00\",\r\n \"2018-11-27T12:00:00\",\r\n \"2018-11-28T00:00:00\",\r\n \"2018-11-27T18:00:00\",\r\n \"2018-11-28T00:00:00\",\r\n ],\r\n utc=True,\r\n )\r\n tm.assert_index_equal(result, expected)\r\n\r\n items = [\"2018-11-28T00:00:00+12:00\", \"2018-11-28T00:00:00\"]\r\n result = pd.to_datetime(items, utc=True)\r\n expected = pd.to_datetime(list(reversed(items)), utc=True)[::-1]\r\n tm.assert_index_equal(result, expected)\r\n\r\n def test_mixed_offsets_with_native_datetime_raises(self):\r\n # GH 25978\r\n s = Series(\r\n [\r\n \"nan\",\r\n Timestamp(\"1990-01-01\"),\r\n \"2015-03-14T16:15:14.123-08:00\",\r\n \"2019-03-04T21:56:32.620-07:00\",\r\n None,\r\n ]\r\n )\r\n with pytest.raises(ValueError, match=\"Tz-aware datetime.datetime\"):\r\n pd.to_datetime(s)\r\n\r\n def test_non_iso_strings_with_tz_offset(self):\r\n result = to_datetime([\"March 1, 2018 12:00:00+0400\"] * 2)\r\n expected = DatetimeIndex(\r\n [datetime(2018, 3, 1, 12, tzinfo=pytz.FixedOffset(240))] * 2\r\n )\r\n tm.assert_index_equal(result, expected)\r\n\r\n @pytest.mark.parametrize(\r\n \"ts, expected\",\r\n [\r\n (Timestamp(\"2018-01-01\"), Timestamp(\"2018-01-01\", tz=\"UTC\")),\r\n (\r\n Timestamp(\"2018-01-01\", tz=\"US/Pacific\"),\r\n Timestamp(\"2018-01-01 08:00\", tz=\"UTC\"),\r\n ),\r\n ],\r\n )\r\n def test_timestamp_utc_true(self, ts, expected):\r\n # GH 24415\r\n result = to_datetime(ts, utc=True)\r\n assert result == expected\r\n\r\n @pytest.mark.parametrize(\"dt_str\", [\"00010101\", \"13000101\", \"30000101\", \"99990101\"])\r\n def test_to_datetime_with_format_out_of_bounds(self, dt_str):\r\n # GH 9107\r\n msg = \"Out of bounds nanosecond timestamp\"\r\n with pytest.raises(OutOfBoundsDatetime, match=msg):\r\n pd.to_datetime(dt_str, format=\"%Y%m%d\")\r\n\r\n def test_to_datetime_utc(self):\r\n arr = np.array([parse(\"2012-06-13T01:39:00Z\")], dtype=object)\r\n\r\n result = to_datetime(arr, utc=True)\r\n assert result.tz is pytz.utc\r\n\r\n def test_to_datetime_fixed_offset(self):\r\n from pandas.tests.indexes.datetimes.test_timezones import fixed_off\r\n\r\n dates = [\r\n datetime(2000, 1, 1, tzinfo=fixed_off),\r\n datetime(2000, 1, 2, tzinfo=fixed_off),\r\n datetime(2000, 1, 3, tzinfo=fixed_off),\r\n ]\r\n result = to_datetime(dates)\r\n assert result.tz == fixed_off\r\n\r\n\r\nclass TestToDatetimeUnit:\r\n @pytest.mark.parametrize(\"cache\", [True, False])\r\n def test_unit(self, cache):\r\n # GH 11758\r\n # test proper behavior with errors\r\n msg = \"cannot specify both format and unit\"\r\n with pytest.raises(ValueError, match=msg):\r\n to_datetime([1], unit=\"D\", format=\"%Y%m%d\", cache=cache)\r\n\r\n values = [11111111, 1, 1.0, iNaT, NaT, np.nan, \"NaT\", \"\"]\r\n result = to_datetime(values, unit=\"D\", errors=\"ignore\", cache=cache)\r\n expected = Index(\r\n [\r\n 11111111,\r\n Timestamp(\"1970-01-02\"),\r\n Timestamp(\"1970-01-02\"),\r\n NaT,\r\n NaT,\r\n NaT,\r\n NaT,\r\n NaT,\r\n ],\r\n dtype=object,\r\n )\r\n tm.assert_index_equal(result, expected)\r\n\r\n result = to_datetime(values, unit=\"D\", errors=\"coerce\", cache=cache)\r\n expected = DatetimeIndex(\r\n [\"NaT\", \"1970-01-02\", \"1970-01-02\", \"NaT\", \"NaT\", \"NaT\", \"NaT\", \"NaT\"]\r\n )\r\n tm.assert_index_equal(result, expected)\r\n\r\n msg = \"cannot convert input 11111111 with the unit 'D'\"\r\n with pytest.raises(tslib.OutOfBoundsDatetime, match=msg):\r\n to_datetime(values, unit=\"D\", errors=\"raise\", cache=cache)\r\n\r\n values = [1420043460000, iNaT, NaT, np.nan, \"NaT\"]\r\n\r\n result = to_datetime(values, errors=\"ignore\", unit=\"s\", cache=cache)\r\n expected = Index([1420043460000, NaT, NaT, NaT, NaT], dtype=object)\r\n tm.assert_index_equal(result, expected)\r\n\r\n result = to_datetime(values, errors=\"coerce\", unit=\"s\", cache=cache)\r\n expected = DatetimeIndex([\"NaT\", \"NaT\", \"NaT\", \"NaT\", \"NaT\"])\r\n tm.assert_index_equal(result, expected)\r\n\r\n msg = \"cannot convert input 1420043460000 with the unit 's'\"\r\n with pytest.raises(tslib.OutOfBoundsDatetime, match=msg):\r\n to_datetime(values, errors=\"raise\", unit=\"s\", cache=cache)\r\n\r\n # if we have a string, then we raise a ValueError\r\n # and NOT an OutOfBoundsDatetime\r\n for val in [\"foo\", Timestamp(\"20130101\")]:\r\n try:\r\n to_datetime(val, errors=\"raise\", unit=\"s\", cache=cache)\r\n except tslib.OutOfBoundsDatetime as err:\r\n raise AssertionError(\"incorrect exception raised\") from err\r\n except ValueError:\r\n pass\r\n\r\n @pytest.mark.parametrize(\"cache\", [True, False])\r\n def test_unit_consistency(self, cache):\r\n\r\n # consistency of conversions\r\n expected = Timestamp(\"1970-05-09 14:25:11\")\r\n result = pd.to_datetime(11111111, unit=\"s\", errors=\"raise\", cache=cache)\r\n assert result == expected\r\n assert isinstance(result, Timestamp)\r\n\r\n result = pd.to_datetime(11111111, unit=\"s\", errors=\"coerce\", cache=cache)\r\n assert result == expected\r\n assert isinstance(result, Timestamp)\r\n\r\n result = pd.to_datetime(11111111, unit=\"s\", errors=\"ignore\", cache=cache)\r\n assert result == expected\r\n assert isinstance(result, Timestamp)\r\n\r\n @pytest.mark.parametrize(\"cache\", [True, False])\r\n def test_unit_with_numeric(self, cache):\r\n\r\n # GH 13180\r\n # coercions from floats/ints are ok\r\n expected = DatetimeIndex([\"2015-06-19 05:33:20\", \"2015-05-27 22:33:20\"])\r\n arr1 = [1.434692e18, 1.432766e18]\r\n arr2 = np.array(arr1).astype(\"int64\")\r\n for errors in [\"ignore\", \"raise\", \"coerce\"]:\r\n result = pd.to_datetime(arr1, errors=errors, cache=cache)\r\n tm.assert_index_equal(result, expected)\r\n\r\n result = pd.to_datetime(arr2, errors=errors, cache=cache)\r\n tm.assert_index_equal(result, expected)\r\n\r\n # but we want to make sure that we are coercing\r\n # if we have ints/strings\r\n expected = DatetimeIndex([\"NaT\", \"2015-06-19 05:33:20\", \"2015-05-27 22:33:20\"])\r\n arr = [\"foo\", 1.434692e18, 1.432766e18]\r\n result = pd.to_datetime(arr, errors=\"coerce\", cache=cache)\r\n tm.assert_index_equal(result, expected)\r\n\r\n expected = DatetimeIndex(\r\n [\"2015-06-19 05:33:20\", \"2015-05-27 22:33:20\", \"NaT\", \"NaT\"]\r\n )\r\n arr = [1.434692e18, 1.432766e18, \"foo\", \"NaT\"]\r\n result = pd.to_datetime(arr, errors=\"coerce\", cache=cache)\r\n tm.assert_index_equal(result, expected)\r\n\r\n @pytest.mark.parametrize(\"cache\", [True, False])\r\n def test_unit_mixed(self, cache):\r\n\r\n # mixed integers/datetimes\r\n expected = DatetimeIndex([\"2013-01-01\", \"NaT\", \"NaT\"])\r\n arr = [Timestamp(\"20130101\"), 1.434692e18, 1.432766e18]\r\n result = pd.to_datetime(arr, errors=\"coerce\", cache=cache)\r\n tm.assert_index_equal(result, expected)\r\n\r\n msg = \"mixed datetimes and integers in passed array\"\r\n with pytest.raises(ValueError, match=msg):\r\n pd.to_datetime(arr, errors=\"raise\", cache=cache)\r\n\r\n expected = DatetimeIndex([\"NaT\", \"NaT\", \"2013-01-01\"])\r\n arr = [1.434692e18, 1.432766e18, Timestamp(\"20130101\")]\r\n result = pd.to_datetime(arr, errors=\"coerce\", cache=cache)\r\n tm.assert_index_equal(result, expected)\r\n\r\n with pytest.raises(ValueError, match=msg):\r\n pd.to_datetime(arr, errors=\"raise\", cache=cache)\r\n\r\n @pytest.mark.parametrize(\"cache\", [True, False])\r\n def test_unit_rounding(self, cache):\r\n # GH 14156 & GH 20445: argument will incur floating point errors\r\n # but no premature rounding\r\n result = pd.to_datetime(1434743731.8770001, unit=\"s\", cache=cache)\r\n expected = Timestamp(\"2015-06-19 19:55:31.877000192\")\r\n assert result == expected\r\n\r\n @pytest.mark.parametrize(\"cache\", [True, False])\r\n def test_unit_ignore_keeps_name(self, cache):\r\n # GH 21697\r\n expected = Index([15e9] * 2, name=\"name\")\r\n result = pd.to_datetime(expected, errors=\"ignore\", unit=\"s\", cache=cache)\r\n tm.assert_index_equal(result, expected)\r\n\r\n @pytest.mark.parametrize(\"cache\", [True, False])\r\n def test_dataframe(self, cache):\r\n\r\n df = DataFrame(\r\n {\r\n \"year\": [2015, 2016],\r\n \"month\": [2, 3],\r\n \"day\": [4, 5],\r\n \"hour\": [6, 7],\r\n \"minute\": [58, 59],\r\n \"second\": [10, 11],\r\n \"ms\": [1, 1],\r\n \"us\": [2, 2],\r\n \"ns\": [3, 3],\r\n }\r\n )\r\n\r\n result = to_datetime(\r\n {\"year\": df[\"year\"], \"month\": df[\"month\"], \"day\": df[\"day\"]}, cache=cache\r\n )\r\n expected = Series(\r\n [Timestamp(\"20150204 00:00:00\"), Timestamp(\"20160305 00:0:00\")]\r\n )\r\n tm.assert_series_equal(result, expected)\r\n\r\n # dict-like\r\n result = to_datetime(df[[\"year\", \"month\", \"day\"]].to_dict(), cache=cache)\r\n tm.assert_series_equal(result, expected)\r\n\r\n # dict but with constructable\r\n df2 = df[[\"year\", \"month\", \"day\"]].to_dict()\r\n df2[\"month\"] = 2\r\n result = to_datetime(df2, cache=cache)\r\n expected2 = Series(\r\n [Timestamp(\"20150204 00:00:00\"), Timestamp(\"20160205 00:0:00\")]\r\n )\r\n tm.assert_series_equal(result, expected2)\r\n\r\n # unit mappings\r\n units = [\r\n {\r\n \"year\": \"years\",\r\n \"month\": \"months\",\r\n \"day\": \"days\",\r\n \"hour\": \"hours\",\r\n \"minute\": \"minutes\",\r\n \"second\": \"seconds\",\r\n },\r\n {\r\n \"year\": \"year\",\r\n \"month\": \"month\",\r\n \"day\": \"day\",\r\n \"hour\": \"hour\",\r\n \"minute\": \"minute\",\r\n \"second\": \"second\",\r\n },\r\n ]\r\n\r\n for d in units:\r\n result = to_datetime(df[list(d.keys())].rename(columns=d), cache=cache)\r\n expected = Series(\r\n [Timestamp(\"20150204 06:58:10\"), Timestamp(\"20160305 07:59:11\")]\r\n )\r\n tm.assert_series_equal(result, expected)\r\n\r\n d = {\r\n \"year\": \"year\",\r\n \"month\": \"month\",\r\n \"day\": \"day\",\r\n \"hour\": \"hour\",\r\n \"minute\": \"minute\",\r\n \"second\": \"second\",\r\n \"ms\": \"ms\",\r\n \"us\": \"us\",\r\n \"ns\": \"ns\",\r\n }\r\n\r\n result = to_datetime(df.rename(columns=d), cache=cache)\r\n expected = Series(\r\n [\r\n Timestamp(\"20150204 06:58:10.001002003\"),\r\n Timestamp(\"20160305 07:59:11.001002003\"),\r\n ]\r\n )\r\n tm.assert_series_equal(result, expected)\r\n\r\n # coerce back to int\r\n result = to_datetime(df.astype(str), cache=cache)\r\n tm.assert_series_equal(result, expected)\r\n\r\n # passing coerce\r\n df2 = DataFrame({\"year\": [2015, 2016], \"month\": [2, 20], \"day\": [4, 5]})\r\n\r\n msg = (\r\n \"cannot assemble the datetimes: time data .+ does not \"\r\n r\"match format '%Y%m%d' \\(match\\)\"\r\n )\r\n with pytest.raises(ValueError, match=msg):\r\n to_datetime(df2, cache=cache)\r\n result = to_datetime(df2, errors=\"coerce\", cache=cache)\r\n expected = Series([Timestamp(\"20150204 00:00:00\"), NaT])\r\n tm.assert_series_equal(result, expected)\r\n\r\n # extra columns\r\n msg = r\"extra keys have been passed to the datetime assemblage: \\[foo\\]\"\r\n with pytest.raises(ValueError, match=msg):\r\n df2 = df.copy()\r\n df2[\"foo\"] = 1\r\n to_datetime(df2, cache=cache)\r\n\r\n # not enough\r\n msg = (\r\n r\"to assemble mappings requires at least that \\[year, month, \"\r\n r\"day\\] be specified: \\[.+\\] is missing\"\r\n )\r\n for c in [\r\n [\"year\"],\r\n [\"year\", \"month\"],\r\n [\"year\", \"month\", \"second\"],\r\n [\"month\", \"day\"],\r\n [\"year\", \"day\", \"second\"],\r\n ]:\r\n with pytest.raises(ValueError, match=msg):\r\n to_datetime(df[c], cache=cache)\r\n\r\n # duplicates\r\n msg = \"cannot assemble with duplicate keys\"\r\n df2 = DataFrame({\"year\": [2015, 2016], \"month\": [2, 20], \"day\": [4, 5]})\r\n df2.columns = [\"year\", \"year\", \"day\"]\r\n with pytest.raises(ValueError, match=msg):\r\n to_datetime(df2, cache=cache)\r\n\r\n df2 = DataFrame(\r\n {\"year\": [2015, 2016], \"month\": [2, 20], \"day\": [4, 5], \"hour\": [4, 5]}\r\n )\r\n df2.columns = [\"year\", \"month\", \"day\", \"day\"]\r\n with pytest.raises(ValueError, match=msg):\r\n to_datetime(df2, cache=cache)\r\n\r\n @pytest.mark.parametrize(\"cache\", [True, False])\r\n def test_dataframe_dtypes(self, cache):\r\n # #13451\r\n df = DataFrame({\"year\": [2015, 2016], \"month\": [2, 3], \"day\": [4, 5]})\r\n\r\n # int16\r\n result = to_datetime(df.astype(\"int16\"), cache=cache)\r\n expected = Series(\r\n [Timestamp(\"20150204 00:00:00\"), Timestamp(\"20160305 00:00:00\")]\r\n )\r\n tm.assert_series_equal(result, expected)\r\n\r\n # mixed dtypes\r\n df[\"month\"] = df[\"month\"].astype(\"int8\")\r\n df[\"day\"] = df[\"day\"].astype(\"int8\")\r\n result = to_datetime(df, cache=cache)\r\n expected = Series(\r\n [Timestamp(\"20150204 00:00:00\"), Timestamp(\"20160305 00:00:00\")]\r\n )\r\n tm.assert_series_equal(result, expected)\r\n\r\n # float\r\n df = DataFrame({\"year\": [2000, 2001], \"month\": [1.5, 1], \"day\": [1, 1]})\r\n msg = \"cannot assemble the datetimes: unconverted data remains: 1\"\r\n with pytest.raises(ValueError, match=msg):\r\n to_datetime(df, cache=cache)\r\n\r\n def test_dataframe_utc_true(self):\r\n # GH 23760\r\n df = DataFrame({\"year\": [2015, 2016], \"month\": [2, 3], \"day\": [4, 5]})\r\n result = pd.to_datetime(df, utc=True)\r\n expected = Series(\r\n np.array([\"2015-02-04\", \"2016-03-05\"], dtype=\"datetime64[ns]\")\r\n ).dt.tz_localize(\"UTC\")\r\n tm.assert_series_equal(result, expected)\r\n\r\n def test_to_datetime_errors_ignore_utc_true(self):\r\n # GH 23758\r\n result = pd.to_datetime([1], unit=\"s\", utc=True, errors=\"ignore\")\r\n expected = DatetimeIndex([\"1970-01-01 00:00:01\"], tz=\"UTC\")\r\n tm.assert_index_equal(result, expected)\r\n\r\n # TODO: this is moved from tests.series.test_timeseries, may be redundant\r\n def test_to_datetime_unit(self):\r\n\r\n epoch = 1370745748\r\n s = Series([epoch + t for t in range(20)])\r\n result = to_datetime(s, unit=\"s\")\r\n expected = Series(\r\n [Timestamp(\"2013-06-09 02:42:28\") + timedelta(seconds=t) for t in range(20)]\r\n )\r\n tm.assert_series_equal(result, expected)\r\n\r\n s = Series([epoch + t for t in range(20)]).astype(float)\r\n result = to_datetime(s, unit=\"s\")\r\n expected = Series(\r\n [Timestamp(\"2013-06-09 02:42:28\") + timedelta(seconds=t) for t in range(20)]\r\n )\r\n tm.assert_series_equal(result, expected)\r\n\r\n s = Series([epoch + t for t in range(20)] + [iNaT])\r\n result = to_datetime(s, unit=\"s\")\r\n expected = Series(\r\n [Timestamp(\"2013-06-09 02:42:28\") + timedelta(seconds=t) for t in range(20)]\r\n + [NaT]\r\n )\r\n tm.assert_series_equal(result, expected)\r\n\r\n s = Series([epoch + t for t in range(20)] + [iNaT]).astype(float)\r\n result = to_datetime(s, unit=\"s\")\r\n expected = Series(\r\n [Timestamp(\"2013-06-09 02:42:28\") + timedelta(seconds=t) for t in range(20)]\r\n + [NaT]\r\n )\r\n tm.assert_series_equal(result, expected)\r\n\r\n # GH13834\r\n s = Series([epoch + t for t in np.arange(0, 2, 0.25)] + [iNaT]).astype(float)\r\n result = to_datetime(s, unit=\"s\")\r\n expected = Series(\r\n [\r\n Timestamp(\"2013-06-09 02:42:28\") + timedelta(seconds=t)\r\n for t in np.arange(0, 2, 0.25)\r\n ]\r\n + [NaT]\r\n )\r\n # GH20455 argument will incur floating point errors but no premature rounding\r\n result = result.round(\"ms\")\r\n tm.assert_series_equal(result, expected)\r\n\r\n s = pd.concat(\r\n [Series([epoch + t for t in range(20)]).astype(float), Series([np.nan])],\r\n ignore_index=True,\r\n )\r\n result = to_datetime(s, unit=\"s\")\r\n expected = Series(\r\n [Timestamp(\"2013-06-09 02:42:28\") + timedelta(seconds=t) for t in range(20)]\r\n + [NaT]\r\n )\r\n tm.assert_series_equal(result, expected)\r\n\r\n result = to_datetime([1, 2, \"NaT\", pd.NaT, np.nan], unit=\"D\")\r\n expected = DatetimeIndex(\r\n [Timestamp(\"1970-01-02\"), Timestamp(\"1970-01-03\")] + [\"NaT\"] * 3\r\n )\r\n tm.assert_index_equal(result, expected)\r\n\r\n msg = \"non convertible value foo with the unit 'D'\"\r\n with pytest.raises(ValueError, match=msg):\r\n to_datetime([1, 2, \"foo\"], unit=\"D\")\r\n msg = \"cannot convert input 111111111 with the unit 'D'\"\r\n with pytest.raises(OutOfBoundsDatetime, match=msg):\r\n to_datetime([1, 2, 111111111], unit=\"D\")\r\n\r\n # coerce we can process\r\n expected = DatetimeIndex(\r\n [Timestamp(\"1970-01-02\"), Timestamp(\"1970-01-03\")] + [\"NaT\"] * 1\r\n )\r\n result = to_datetime([1, 2, \"foo\"], unit=\"D\", errors=\"coerce\")\r\n tm.assert_index_equal(result, expected)\r\n\r\n result = to_datetime([1, 2, 111111111], unit=\"D\", errors=\"coerce\")\r\n tm.assert_index_equal(result, expected)\r\n\r\n\r\nclass TestToDatetimeMisc:\r\n def test_to_datetime_barely_out_of_bounds(self):\r\n # GH#19529\r\n # GH#19382 close enough to bounds that dropping nanos would result\r\n # in an in-bounds datetime\r\n arr = np.array([\"2262-04-11 23:47:16.854775808\"], dtype=object)\r\n\r\n msg = \"Out of bounds nanosecond timestamp\"\r\n with pytest.raises(OutOfBoundsDatetime, match=msg):\r\n to_datetime(arr)\r\n\r\n @pytest.mark.parametrize(\"cache\", [True, False])\r\n def test_to_datetime_iso8601(self, cache):\r\n result = to_datetime([\"2012-01-01 00:00:00\"], cache=cache)\r\n exp = Timestamp(\"2012-01-01 00:00:00\")\r\n assert result[0] == exp\r\n\r\n result = to_datetime([\"20121001\"], cache=cache) # bad iso 8601\r\n exp = Timestamp(\"2012-10-01\")\r\n assert result[0] == exp\r\n\r\n @pytest.mark.parametrize(\"cache\", [True, False])\r\n def test_to_datetime_default(self, cache):\r\n rs = to_datetime(\"2001\", cache=cache)\r\n xp = datetime(2001, 1, 1)\r\n assert rs == xp\r\n\r\n # dayfirst is essentially broken\r\n\r\n # to_datetime('01-13-2012', dayfirst=True)\r\n # pytest.raises(ValueError, to_datetime('01-13-2012',\r\n # dayfirst=True))\r\n\r\n @pytest.mark.parametrize(\"cache\", [True, False])\r\n def test_to_datetime_on_datetime64_series(self, cache):\r\n # #2699\r\n s = Series(date_range(\"1/1/2000\", periods=10))\r\n\r\n result = to_datetime(s, cache=cache)\r\n assert result[0] == s[0]\r\n\r\n @pytest.mark.parametrize(\"cache\", [True, False])\r\n def test_to_datetime_with_space_in_series(self, cache):\r\n # GH 6428\r\n s = Series([\"10/18/2006\", \"10/18/2008\", \" \"])\r\n msg = r\"(\\(')?String does not contain a date(:', ' '\\))?\"\r\n with pytest.raises(ValueError, match=msg):\r\n to_datetime(s, errors=\"raise\", cache=cache)\r\n result_coerce = to_datetime(s, errors=\"coerce\", cache=cache)\r\n expected_coerce = Series([datetime(2006, 10, 18), datetime(2008, 10, 18), NaT])\r\n tm.assert_series_equal(result_coerce, expected_coerce)\r\n result_ignore = to_datetime(s, errors=\"ignore\", cache=cache)\r\n tm.assert_series_equal(result_ignore, s)\r\n\r\n @td.skip_if_has_locale\r\n @pytest.mark.parametrize(\"cache\", [True, False])\r\n def test_to_datetime_with_apply(self, cache):\r\n # this is only locale tested with US/None locales\r\n # GH 5195\r\n # with a format and coerce a single item to_datetime fails\r\n td = Series([\"May 04\", \"Jun 02\", \"Dec 11\"], index=[1, 2, 3])\r\n expected = pd.to_datetime(td, format=\"%b %y\", cache=cache)\r\n result = td.apply(pd.to_datetime, format=\"%b %y\", cache=cache)\r\n tm.assert_series_equal(result, expected)\r\n\r\n td = Series([\"May 04\", \"Jun 02\", \"\"], index=[1, 2, 3])\r\n msg = r\"time data '' does not match format '%b %y' \\(match\\)\"\r\n with pytest.raises(ValueError, match=msg):\r\n pd.to_datetime(td, format=\"%b %y\", errors=\"raise\", cache=cache)\r\n with pytest.raises(ValueError, match=msg):\r\n td.apply(pd.to_datetime, format=\"%b %y\", errors=\"raise\", cache=cache)\r\n expected = pd.to_datetime(td, format=\"%b %y\", errors=\"coerce\", cache=cache)\r\n\r\n result = td.apply(\r\n lambda x: pd.to_datetime(x, format=\"%b %y\", errors=\"coerce\", cache=cache)\r\n )\r\n tm.assert_series_equal(result, expected)\r\n\r\n @pytest.mark.parametrize(\"cache\", [True, False])\r\n def test_to_datetime_types(self, cache):\r\n\r\n # empty string\r\n result = to_datetime(\"\", cache=cache)\r\n assert result is NaT\r\n\r\n result = to_datetime([\"\", \"\"], cache=cache)\r\n assert isna(result).all()\r\n\r\n # ints\r\n result = Timestamp(0)\r\n expected = to_datetime(0, cache=cache)\r\n assert result == expected\r\n\r\n # GH 3888 (strings)\r\n expected = to_datetime([\"2012\"], cache=cache)[0]\r\n result = to_datetime(\"2012\", cache=cache)\r\n assert result == expected\r\n\r\n # array = ['2012','20120101','20120101 12:01:01']\r\n array = [\"20120101\", \"20120101 12:01:01\"]\r\n expected = list(to_datetime(array, cache=cache))\r\n result = [Timestamp(date_str) for date_str in array]\r\n tm.assert_almost_equal(result, expected)\r\n\r\n # currently fails ###\r\n # result = Timestamp('2012')\r\n # expected = to_datetime('2012')\r\n # assert result == expected\r\n\r\n @pytest.mark.parametrize(\"cache\", [True, False])\r\n def test_to_datetime_unprocessable_input(self, cache):\r\n # GH 4928\r\n # GH 21864\r\n result = to_datetime([1, \"1\"], errors=\"ignore\", cache=cache)\r\n\r\n expected = Index(np.array([1, \"1\"], dtype=\"O\"))\r\n tm.assert_equal(result, expected)\r\n msg = \"invalid string coercion to datetime\"\r\n with pytest.raises(TypeError, match=msg):\r\n to_datetime([1, \"1\"], errors=\"raise\", cache=cache)\r\n\r\n def test_to_datetime_other_datetime64_units(self):\r\n # 5/25/2012\r\n scalar = np.int64(1337904000000000).view(\"M8[us]\")\r\n as_obj = scalar.astype(\"O\")\r\n\r\n index = DatetimeIndex([scalar])\r\n assert index[0] == scalar.astype(\"O\")\r\n\r\n value = Timestamp(scalar)\r\n assert value == as_obj\r\n\r\n def test_to_datetime_list_of_integers(self):\r\n rng = date_range(\"1/1/2000\", periods=20)\r\n rng = DatetimeIndex(rng.values)\r\n\r\n ints = list(rng.asi8)\r\n\r\n result = DatetimeIndex(ints)\r\n\r\n tm.assert_index_equal(rng, result)\r\n\r\n def test_to_datetime_overflow(self):\r\n # gh-17637\r\n # we are overflowing Timedelta range here\r\n\r\n msg = (\r\n \"(Python int too large to convert to C long)|\"\r\n \"(long too big to convert)|\"\r\n \"(int too big to convert)\"\r\n )\r\n with pytest.raises(OverflowError, match=msg):\r\n date_range(start=\"1/1/1700\", freq=\"B\", periods=100000)\r\n\r\n @pytest.mark.parametrize(\"cache\", [True, False])\r\n def test_string_na_nat_conversion(self, cache):\r\n # GH #999, #858\r\n\r\n strings = np.array(\r\n [\"1/1/2000\", \"1/2/2000\", np.nan, \"1/4/2000, 12:34:56\"], dtype=object\r\n )\r\n\r\n expected = np.empty(4, dtype=\"M8[ns]\")\r\n for i, val in enumerate(strings):\r\n if isna(val):\r\n expected[i] = iNaT\r\n else:\r\n expected[i] = parse(val)\r\n\r\n result = tslib.array_to_datetime(strings)[0]\r\n tm.assert_almost_equal(result, expected)\r\n\r\n result2 = to_datetime(strings, cache=cache)\r\n assert isinstance(result2, DatetimeIndex)\r\n tm.assert_numpy_array_equal(result, result2.values)\r\n\r\n malformed = np.array([\"1/100/2000\", np.nan], dtype=object)\r\n\r\n # GH 10636, default is now 'raise'\r\n msg = r\"Unknown string format:|day is out of range for month\"\r\n with pytest.raises(ValueError, match=msg):\r\n to_datetime(malformed, errors=\"raise\", cache=cache)\r\n\r\n result = to_datetime(malformed, errors=\"ignore\", cache=cache)\r\n # GH 21864\r\n expected = Index(malformed)\r\n tm.assert_index_equal(result, expected)\r\n\r\n with pytest.raises(ValueError, match=msg):\r\n to_datetime(malformed, errors=\"raise\", cache=cache)\r\n\r\n idx = [\"a\", \"b\", \"c\", \"d\", \"e\"]\r\n series = Series(\r\n [\"1/1/2000\", np.nan, \"1/3/2000\", np.nan, \"1/5/2000\"], index=idx, name=\"foo\"\r\n )\r\n dseries = Series(\r\n [\r\n to_datetime(\"1/1/2000\", cache=cache),\r\n np.nan,\r\n to_datetime(\"1/3/2000\", cache=cache),\r\n np.nan,\r\n to_datetime(\"1/5/2000\", cache=cache),\r\n ],\r\n index=idx,\r\n name=\"foo\",\r\n )\r\n\r\n result = to_datetime(series, cache=cache)\r\n dresult = to_datetime(dseries, cache=cache)\r\n\r\n expected = Series(np.empty(5, dtype=\"M8[ns]\"), index=idx)\r\n for i in range(5):\r\n x = series[i]\r\n if isna(x):\r\n expected[i] = pd.NaT\r\n else:\r\n expected[i] = to_datetime(x, cache=cache)\r\n\r\n tm.assert_series_equal(result, expected, check_names=False)\r\n assert result.name == \"foo\"\r\n\r\n tm.assert_series_equal(dresult, expected, check_names=False)\r\n assert dresult.name == \"foo\"\r\n\r\n @pytest.mark.parametrize(\r\n \"dtype\",\r\n [\r\n \"datetime64[h]\",\r\n \"datetime64[m]\",\r\n \"datetime64[s]\",\r\n \"datetime64[ms]\",\r\n \"datetime64[us]\",\r\n \"datetime64[ns]\",\r\n ],\r\n )\r\n @pytest.mark.parametrize(\"cache\", [True, False])\r\n def test_dti_constructor_numpy_timeunits(self, cache, dtype):\r\n # GH 9114\r\n base = pd.to_datetime(\r\n [\"2000-01-01T00:00\", \"2000-01-02T00:00\", \"NaT\"], cache=cache\r\n )\r\n\r\n values = base.values.astype(dtype)\r\n\r\n tm.assert_index_equal(DatetimeIndex(values), base)\r\n tm.assert_index_equal(to_datetime(values, cache=cache), base)\r\n\r\n @pytest.mark.parametrize(\"cache\", [True, False])\r\n def test_dayfirst(self, cache):\r\n # GH 5917\r\n arr = [\"10/02/2014\", \"11/02/2014\", \"12/02/2014\"]\r\n expected = DatetimeIndex(\r\n [datetime(2014, 2, 10), datetime(2014, 2, 11), datetime(2014, 2, 12)]\r\n )\r\n idx1 = DatetimeIndex(arr, dayfirst=True)\r\n idx2 = DatetimeIndex(np.array(arr), dayfirst=True)\r\n idx3 = to_datetime(arr, dayfirst=True, cache=cache)\r\n idx4 = to_datetime(np.array(arr), dayfirst=True, cache=cache)\r\n idx5 = DatetimeIndex(Index(arr), dayfirst=True)\r\n idx6 = DatetimeIndex(Series(arr), dayfirst=True)\r\n tm.assert_index_equal(expected, idx1)\r\n tm.assert_index_equal(expected, idx2)\r\n tm.assert_index_equal(expected, idx3)\r\n tm.assert_index_equal(expected, idx4)\r\n tm.assert_index_equal(expected, idx5)\r\n tm.assert_index_equal(expected, idx6)\r\n\r\n @pytest.mark.parametrize(\"klass\", [DatetimeIndex, DatetimeArray])\r\n def test_to_datetime_dta_tz(self, klass):\r\n # GH#27733\r\n dti = date_range(\"2015-04-05\", periods=3).rename(\"foo\")\r\n expected = dti.tz_localize(\"UTC\")\r\n\r\n obj = klass(dti)\r\n expected = klass(expected)\r\n\r\n result = to_datetime(obj, utc=True)\r\n tm.assert_equal(result, expected)\r\n\r\n\r\nclass TestGuessDatetimeFormat:\r\n @td.skip_if_not_us_locale\r\n def test_guess_datetime_format_for_array(self):\r\n expected_format = \"%Y-%m-%d %H:%M:%S.%f\"\r\n dt_string = datetime(2011, 12, 30, 0, 0, 0).strftime(expected_format)\r\n\r\n test_arrays = [\r\n np.array([dt_string, dt_string, dt_string], dtype=\"O\"),\r\n np.array([np.nan, np.nan, dt_string], dtype=\"O\"),\r\n np.array([dt_string, \"random_string\"], dtype=\"O\"),\r\n ]\r\n\r\n for test_array in test_arrays:\r\n assert tools._guess_datetime_format_for_array(test_array) == expected_format\r\n\r\n format_for_string_of_nans = tools._guess_datetime_format_for_array(\r\n np.array([np.nan, np.nan, np.nan], dtype=\"O\")\r\n )\r\n assert format_for_string_of_nans is None\r\n\r\n\r\nclass TestToDatetimeInferFormat:\r\n @pytest.mark.parametrize(\"cache\", [True, False])\r\n def test_to_datetime_infer_datetime_format_consistent_format(self, cache):\r\n s = Series(pd.date_range(\"20000101\", periods=50, freq=\"H\"))\r\n\r\n test_formats = [\"%m-%d-%Y\", \"%m/%d/%Y %H:%M:%S.%f\", \"%Y-%m-%dT%H:%M:%S.%f\"]\r\n\r\n for test_format in test_formats:\r\n s_as_dt_strings = s.apply(lambda x: x.strftime(test_format))\r\n\r\n with_format = pd.to_datetime(\r\n s_as_dt_strings, format=test_format, cache=cache\r\n )\r\n no_infer = pd.to_datetime(\r\n s_as_dt_strings, infer_datetime_format=False, cache=cache\r\n )\r\n yes_infer = pd.to_datetime(\r\n s_as_dt_strings, infer_datetime_format=True, cache=cache\r\n )\r\n\r\n # Whether the format is explicitly passed, it is inferred, or\r\n # it is not inferred, the results should all be the same\r\n tm.assert_series_equal(with_format, no_infer)\r\n tm.assert_series_equal(no_infer, yes_infer)\r\n\r\n @pytest.mark.parametrize(\"cache\", [True, False])\r\n def test_to_datetime_infer_datetime_format_inconsistent_format(self, cache):\r\n s = Series(\r\n np.array(\r\n [\"01/01/2011 00:00:00\", \"01-02-2011 00:00:00\", \"2011-01-03T00:00:00\"]\r\n )\r\n )\r\n\r\n # When the format is inconsistent, infer_datetime_format should just\r\n # fallback to the default parsing\r\n tm.assert_series_equal(\r\n pd.to_datetime(s, infer_datetime_format=False, cache=cache),\r\n pd.to_datetime(s, infer_datetime_format=True, cache=cache),\r\n )\r\n\r\n s = Series(np.array([\"Jan/01/2011\", \"Feb/01/2011\", \"Mar/01/2011\"]))\r\n\r\n tm.assert_series_equal(\r\n pd.to_datetime(s, infer_datetime_format=False, cache=cache),\r\n pd.to_datetime(s, infer_datetime_format=True, cache=cache),\r\n )\r\n\r\n @pytest.mark.parametrize(\"cache\", [True, False])\r\n def test_to_datetime_infer_datetime_format_series_with_nans(self, cache):\r\n s = Series(\r\n np.array([\"01/01/2011 00:00:00\", np.nan, \"01/03/2011 00:00:00\", np.nan])\r\n )\r\n tm.assert_series_equal(\r\n pd.to_datetime(s, infer_datetime_format=False, cache=cache),\r\n pd.to_datetime(s, infer_datetime_format=True, cache=cache),\r\n )\r\n\r\n @pytest.mark.parametrize(\"cache\", [True, False])\r\n def test_to_datetime_infer_datetime_format_series_start_with_nans(self, cache):\r\n s = Series(\r\n np.array(\r\n [\r\n np.nan,\r\n np.nan,\r\n \"01/01/2011 00:00:00\",\r\n \"01/02/2011 00:00:00\",\r\n \"01/03/2011 00:00:00\",\r\n ]\r\n )\r\n )\r\n\r\n tm.assert_series_equal(\r\n pd.to_datetime(s, infer_datetime_format=False, cache=cache),\r\n pd.to_datetime(s, infer_datetime_format=True, cache=cache),\r\n )\r\n\r\n @pytest.mark.parametrize(\r\n \"tz_name, offset\", [(\"UTC\", 0), (\"UTC-3\", 180), (\"UTC+3\", -180)]\r\n )\r\n def test_infer_datetime_format_tz_name(self, tz_name, offset):\r\n # GH 33133\r\n s = Series([f\"2019-02-02 08:07:13 {tz_name}\"])\r\n result = to_datetime(s, infer_datetime_format=True)\r\n expected = Series(\r\n [Timestamp(\"2019-02-02 08:07:13\").tz_localize(pytz.FixedOffset(offset))]\r\n )\r\n tm.assert_series_equal(result, expected)\r\n\r\n @pytest.mark.parametrize(\"cache\", [True, False])\r\n def test_to_datetime_iso8601_noleading_0s(self, cache):\r\n # GH 11871\r\n s = Series([\"2014-1-1\", \"2014-2-2\", \"2015-3-3\"])\r\n expected = Series(\r\n [\r\n Timestamp(\"2014-01-01\"),\r\n Timestamp(\"2014-02-02\"),\r\n Timestamp(\"2015-03-03\"),\r\n ]\r\n )\r\n tm.assert_series_equal(pd.to_datetime(s, cache=cache), expected)\r\n tm.assert_series_equal(\r\n pd.to_datetime(s, format=\"%Y-%m-%d\", cache=cache), expected\r\n )\r\n\r\n\r\nclass TestDaysInMonth:\r\n # tests for issue #10154\r\n\r\n @pytest.mark.parametrize(\"cache\", [True, False])\r\n def test_day_not_in_month_coerce(self, cache):\r\n assert isna(to_datetime(\"2015-02-29\", errors=\"coerce\", cache=cache))\r\n assert isna(\r\n to_datetime(\"2015-02-29\", format=\"%Y-%m-%d\", errors=\"coerce\", cache=cache)\r\n )\r\n assert isna(\r\n to_datetime(\"2015-02-32\", format=\"%Y-%m-%d\", errors=\"coerce\", cache=cache)\r\n )\r\n assert isna(\r\n to_datetime(\"2015-04-31\", format=\"%Y-%m-%d\", errors=\"coerce\", cache=cache)\r\n )\r\n\r\n @pytest.mark.parametrize(\"cache\", [True, False])\r\n def test_day_not_in_month_raise(self, cache):\r\n msg = \"day is out of range for month\"\r\n with pytest.raises(ValueError, match=msg):\r\n to_datetime(\"2015-02-29\", errors=\"raise\", cache=cache)\r\n\r\n msg = \"time data 2015-02-29 doesn't match format specified\"\r\n with pytest.raises(ValueError, match=msg):\r\n to_datetime(\"2015-02-29\", errors=\"raise\", format=\"%Y-%m-%d\", cache=cache)\r\n\r\n msg = \"time data 2015-02-32 doesn't match format specified\"\r\n with pytest.raises(ValueError, match=msg):\r\n to_datetime(\"2015-02-32\", errors=\"raise\", format=\"%Y-%m-%d\", cache=cache)\r\n\r\n msg = \"time data 2015-04-31 doesn't match format specified\"\r\n with pytest.raises(ValueError, match=msg):\r\n to_datetime(\"2015-04-31\", errors=\"raise\", format=\"%Y-%m-%d\", cache=cache)\r\n\r\n @pytest.mark.parametrize(\"cache\", [True, False])\r\n def test_day_not_in_month_ignore(self, cache):\r\n assert to_datetime(\"2015-02-29\", errors=\"ignore\", cache=cache) == \"2015-02-29\"\r\n assert (\r\n to_datetime(\"2015-02-29\", errors=\"ignore\", format=\"%Y-%m-%d\", cache=cache)\r\n == \"2015-02-29\"\r\n )\r\n assert (\r\n to_datetime(\"2015-02-32\", errors=\"ignore\", format=\"%Y-%m-%d\", cache=cache)\r\n == \"2015-02-32\"\r\n )\r\n assert (\r\n to_datetime(\"2015-04-31\", errors=\"ignore\", format=\"%Y-%m-%d\", cache=cache)\r\n == \"2015-04-31\"\r\n )\r\n\r\n\r\nclass TestDatetimeParsingWrappers:\r\n @pytest.mark.parametrize(\r\n \"date_str,expected\",\r\n list(\r\n {\r\n \"2011-01-01\": datetime(2011, 1, 1),\r\n \"2Q2005\": datetime(2005, 4, 1),\r\n \"2Q05\": datetime(2005, 4, 1),\r\n \"2005Q1\": datetime(2005, 1, 1),\r\n \"05Q1\": datetime(2005, 1, 1),\r\n \"2011Q3\": datetime(2011, 7, 1),\r\n \"11Q3\": datetime(2011, 7, 1),\r\n \"3Q2011\": datetime(2011, 7, 1),\r\n \"3Q11\": datetime(2011, 7, 1),\r\n # quarterly without space\r\n \"2000Q4\": datetime(2000, 10, 1),\r\n \"00Q4\": datetime(2000, 10, 1),\r\n \"4Q2000\": datetime(2000, 10, 1),\r\n \"4Q00\": datetime(2000, 10, 1),\r\n \"2000q4\": datetime(2000, 10, 1),\r\n \"2000-Q4\": datetime(2000, 10, 1),\r\n \"00-Q4\": datetime(2000, 10, 1),\r\n \"4Q-2000\": datetime(2000, 10, 1),\r\n \"4Q-00\": datetime(2000, 10, 1),\r\n \"00q4\": datetime(2000, 10, 1),\r\n \"2005\": datetime(2005, 1, 1),\r\n \"2005-11\": datetime(2005, 11, 1),\r\n \"2005 11\": datetime(2005, 11, 1),\r\n \"11-2005\": datetime(2005, 11, 1),\r\n \"11 2005\": datetime(2005, 11, 1),\r\n \"200511\": datetime(2020, 5, 11),\r\n \"20051109\": datetime(2005, 11, 9),\r\n \"20051109 10:15\": datetime(2005, 11, 9, 10, 15),\r\n \"20051109 08H\": datetime(2005, 11, 9, 8, 0),\r\n \"2005-11-09 10:15\": datetime(2005, 11, 9, 10, 15),\r\n \"2005-11-09 08H\": datetime(2005, 11, 9, 8, 0),\r\n \"2005/11/09 10:15\": datetime(2005, 11, 9, 10, 15),\r\n \"2005/11/09 08H\": datetime(2005, 11, 9, 8, 0),\r\n \"Thu Sep 25 10:36:28 2003\": datetime(2003, 9, 25, 10, 36, 28),\r\n \"Thu Sep 25 2003\": datetime(2003, 9, 25),\r\n \"Sep 25 2003\": datetime(2003, 9, 25),\r\n \"January 1 2014\": datetime(2014, 1, 1),\r\n # GHE10537\r\n \"2014-06\": datetime(2014, 6, 1),\r\n \"06-2014\": datetime(2014, 6, 1),\r\n \"2014-6\": datetime(2014, 6, 1),\r\n \"6-2014\": datetime(2014, 6, 1),\r\n \"20010101 12\": datetime(2001, 1, 1, 12),\r\n \"20010101 1234\": datetime(2001, 1, 1, 12, 34),\r\n \"20010101 123456\": datetime(2001, 1, 1, 12, 34, 56),\r\n }.items()\r\n ),\r\n )\r\n @pytest.mark.parametrize(\"cache\", [True, False])\r\n def test_parsers(self, date_str, expected, cache):\r\n\r\n # dateutil >= 2.5.0 defaults to yearfirst=True\r\n # https://github.com/dateutil/dateutil/issues/217\r\n yearfirst = True\r\n\r\n result1, _ = parsing.parse_time_string(date_str, yearfirst=yearfirst)\r\n result2 = to_datetime(date_str, yearfirst=yearfirst)\r\n result3 = to_datetime([date_str], yearfirst=yearfirst)\r\n # result5 is used below\r\n result4 = to_datetime(\r\n np.array([date_str], dtype=object), yearfirst=yearfirst, cache=cache\r\n )\r\n result6 = DatetimeIndex([date_str], yearfirst=yearfirst)\r\n # result7 is used below\r\n result8 = DatetimeIndex(Index([date_str]), yearfirst=yearfirst)\r\n result9 = DatetimeIndex(Series([date_str]), yearfirst=yearfirst)\r\n\r\n for res in [result1, result2]:\r\n assert res == expected\r\n for res in [result3, result4, result6, result8, result9]:\r\n exp = DatetimeIndex([Timestamp(expected)])\r\n tm.assert_index_equal(res, exp)\r\n\r\n # these really need to have yearfirst, but we don't support\r\n if not yearfirst:\r\n result5 = Timestamp(date_str)\r\n assert result5 == expected\r\n result7 = date_range(date_str, freq=\"S\", periods=1, yearfirst=yearfirst)\r\n assert result7 == expected\r\n\r\n @pytest.mark.parametrize(\"cache\", [True, False])\r\n def test_na_values_with_cache(\r\n self, cache, unique_nulls_fixture, unique_nulls_fixture2\r\n ):\r\n # GH22305\r\n expected = Index([NaT, NaT], dtype=\"datetime64[ns]\")\r\n result = to_datetime([unique_nulls_fixture, unique_nulls_fixture2], cache=cache)\r\n tm.assert_index_equal(result, expected)\r\n\r\n def test_parsers_nat(self):\r\n # Test that each of several string-accepting methods return pd.NaT\r\n result1, _ = parsing.parse_time_string(\"NaT\")\r\n result2 = to_datetime(\"NaT\")\r\n result3 = Timestamp(\"NaT\")\r\n result4 = DatetimeIndex([\"NaT\"])[0]\r\n assert result1 is NaT\r\n assert result2 is NaT\r\n assert result3 is NaT\r\n assert result4 is NaT\r\n\r\n @pytest.mark.parametrize(\"cache\", [True, False])\r\n def test_parsers_dayfirst_yearfirst(self, cache):\r\n # OK\r\n # 2.5.1 10-11-12 [dayfirst=0, yearfirst=0] -> 2012-10-11 00:00:00\r\n # 2.5.2 10-11-12 [dayfirst=0, yearfirst=1] -> 2012-10-11 00:00:00\r\n # 2.5.3 10-11-12 [dayfirst=0, yearfirst=0] -> 2012-10-11 00:00:00\r\n\r\n # OK\r\n # 2.5.1 10-11-12 [dayfirst=0, yearfirst=1] -> 2010-11-12 00:00:00\r\n # 2.5.2 10-11-12 [dayfirst=0, yearfirst=1] -> 2010-11-12 00:00:00\r\n # 2.5.3 10-11-12 [dayfirst=0, yearfirst=1] -> 2010-11-12 00:00:00\r\n\r\n # bug fix in 2.5.2\r\n # 2.5.1 10-11-12 [dayfirst=1, yearfirst=1] -> 2010-11-12 00:00:00\r\n # 2.5.2 10-11-12 [dayfirst=1, yearfirst=1] -> 2010-12-11 00:00:00\r\n # 2.5.3 10-11-12 [dayfirst=1, yearfirst=1] -> 2010-12-11 00:00:00\r\n\r\n # OK\r\n # 2.5.1 10-11-12 [dayfirst=1, yearfirst=0] -> 2012-11-10 00:00:00\r\n # 2.5.2 10-11-12 [dayfirst=1, yearfirst=0] -> 2012-11-10 00:00:00\r\n # 2.5.3 10-11-12 [dayfirst=1, yearfirst=0] -> 2012-11-10 00:00:00\r\n\r\n # OK\r\n # 2.5.1 20/12/21 [dayfirst=0, yearfirst=0] -> 2021-12-20 00:00:00\r\n # 2.5.2 20/12/21 [dayfirst=0, yearfirst=0] -> 2021-12-20 00:00:00\r\n # 2.5.3 20/12/21 [dayfirst=0, yearfirst=0] -> 2021-12-20 00:00:00\r\n\r\n # OK\r\n # 2.5.1 20/12/21 [dayfirst=0, yearfirst=1] -> 2020-12-21 00:00:00\r\n # 2.5.2 20/12/21 [dayfirst=0, yearfirst=1] -> 2020-12-21 00:00:00\r\n # 2.5.3 20/12/21 [dayfirst=0, yearfirst=1] -> 2020-12-21 00:00:00\r\n\r\n # revert of bug in 2.5.2\r\n # 2.5.1 20/12/21 [dayfirst=1, yearfirst=1] -> 2020-12-21 00:00:00\r\n # 2.5.2 20/12/21 [dayfirst=1, yearfirst=1] -> month must be in 1..12\r\n # 2.5.3 20/12/21 [dayfirst=1, yearfirst=1] -> 2020-12-21 00:00:00\r\n\r\n # OK\r\n # 2.5.1 20/12/21 [dayfirst=1, yearfirst=0] -> 2021-12-20 00:00:00\r\n # 2.5.2 20/12/21 [dayfirst=1, yearfirst=0] -> 2021-12-20 00:00:00\r\n # 2.5.3 20/12/21 [dayfirst=1, yearfirst=0] -> 2021-12-20 00:00:00\r\n\r\n # str : dayfirst, yearfirst, expected\r\n cases = {\r\n \"10-11-12\": [\r\n (False, False, datetime(2012, 10, 11)),\r\n (True, False, datetime(2012, 11, 10)),\r\n (False, True, datetime(2010, 11, 12)),\r\n (True, True, datetime(2010, 12, 11)),\r\n ],\r\n \"20/12/21\": [\r\n (False, False, datetime(2021, 12, 20)),\r\n (True, False, datetime(2021, 12, 20)),\r\n (False, True, datetime(2020, 12, 21)),\r\n (True, True, datetime(2020, 12, 21)),\r\n ],\r\n }\r\n\r\n for date_str, values in cases.items():\r\n for dayfirst, yearfirst, expected in values:\r\n\r\n # compare with dateutil result\r\n dateutil_result = parse(\r\n date_str, dayfirst=dayfirst, yearfirst=yearfirst\r\n )\r\n assert dateutil_result == expected\r\n\r\n result1, _ = parsing.parse_time_string(\r\n date_str, dayfirst=dayfirst, yearfirst=yearfirst\r\n )\r\n\r\n # we don't support dayfirst/yearfirst here:\r\n if not dayfirst and not yearfirst:\r\n result2 = Timestamp(date_str)\r\n assert result2 == expected\r\n\r\n result3 = to_datetime(\r\n date_str, dayfirst=dayfirst, yearfirst=yearfirst, cache=cache\r\n )\r\n\r\n result4 = DatetimeIndex(\r\n [date_str], dayfirst=dayfirst, yearfirst=yearfirst\r\n )[0]\r\n\r\n assert result1 == expected\r\n assert result3 == expected\r\n assert result4 == expected\r\n\r\n @pytest.mark.parametrize(\"cache\", [True, False])\r\n def test_parsers_timestring(self, cache):\r\n # must be the same as dateutil result\r\n cases = {\r\n \"10:15\": (parse(\"10:15\"), datetime(1, 1, 1, 10, 15)),\r\n \"9:05\": (parse(\"9:05\"), datetime(1, 1, 1, 9, 5)),\r\n }\r\n\r\n for date_str, (exp_now, exp_def) in cases.items():\r\n result1, _ = parsing.parse_time_string(date_str)\r\n result2 = to_datetime(date_str)\r\n result3 = to_datetime([date_str])\r\n result4 = Timestamp(date_str)\r\n result5 = DatetimeIndex([date_str])[0]\r\n # parse time string return time string based on default date\r\n # others are not, and can't be changed because it is used in\r\n # time series plot\r\n assert result1 == exp_def\r\n assert result2 == exp_now\r\n assert result3 == exp_now\r\n assert result4 == exp_now\r\n assert result5 == exp_now\r\n\r\n @pytest.mark.parametrize(\"cache\", [True, False])\r\n @pytest.mark.parametrize(\r\n \"dt_string, tz, dt_string_repr\",\r\n [\r\n (\r\n \"2013-01-01 05:45+0545\",\r\n pytz.FixedOffset(345),\r\n \"Timestamp('2013-01-01 05:45:00+0545', tz='pytz.FixedOffset(345)')\",\r\n ),\r\n (\r\n \"2013-01-01 05:30+0530\",\r\n pytz.FixedOffset(330),\r\n \"Timestamp('2013-01-01 05:30:00+0530', tz='pytz.FixedOffset(330)')\",\r\n ),\r\n ],\r\n )\r\n def test_parsers_timezone_minute_offsets_roundtrip(\r\n self, cache, dt_string, tz, dt_string_repr\r\n ):\r\n # GH11708\r\n base = to_datetime(\"2013-01-01 00:00:00\", cache=cache)\r\n base = base.tz_localize(\"UTC\").tz_convert(tz)\r\n dt_time = to_datetime(dt_string, cache=cache)\r\n assert base == dt_time\r\n assert dt_string_repr == repr(dt_time)\r\n\r\n\r\[email protected](params=[\"D\", \"s\", \"ms\", \"us\", \"ns\"])\r\ndef units(request):\r\n \"\"\"Day and some time units.\r\n\r\n * D\r\n * s\r\n * ms\r\n * us\r\n * ns\r\n \"\"\"\r\n return request.param\r\n\r\n\r\[email protected]\r\ndef epoch_1960():\r\n \"\"\"Timestamp at 1960-01-01.\"\"\"\r\n return Timestamp(\"1960-01-01\")\r\n\r\n\r\[email protected]\r\ndef units_from_epochs():\r\n return list(range(5))\r\n\r\n\r\[email protected](params=[\"timestamp\", \"pydatetime\", \"datetime64\", \"str_1960\"])\r\ndef epochs(epoch_1960, request):\r\n \"\"\"Timestamp at 1960-01-01 in various forms.\r\n\r\n * Timestamp\r\n * datetime.datetime\r\n * numpy.datetime64\r\n * str\r\n \"\"\"\r\n assert request.param in {\"timestamp\", \"pydatetime\", \"datetime64\", \"str_1960\"}\r\n if request.param == \"timestamp\":\r\n return epoch_1960\r\n elif request.param == \"pydatetime\":\r\n return epoch_1960.to_pydatetime()\r\n elif request.param == \"datetime64\":\r\n return epoch_1960.to_datetime64()\r\n else:\r\n return str(epoch_1960)\r\n\r\n\r\[email protected]\r\ndef julian_dates():\r\n return pd.date_range(\"2014-1-1\", periods=10).to_julian_date().values\r\n\r\n\r\nclass TestOrigin:\r\n def test_to_basic(self, julian_dates):\r\n # gh-11276, gh-11745\r\n # for origin as julian\r\n\r\n result = Series(pd.to_datetime(julian_dates, unit=\"D\", origin=\"julian\"))\r\n expected = Series(\r\n pd.to_datetime(julian_dates - Timestamp(0).to_julian_date(), unit=\"D\")\r\n )\r\n tm.assert_series_equal(result, expected)\r\n\r\n result = Series(pd.to_datetime([0, 1, 2], unit=\"D\", origin=\"unix\"))\r\n expected = Series(\r\n [Timestamp(\"1970-01-01\"), Timestamp(\"1970-01-02\"), Timestamp(\"1970-01-03\")]\r\n )\r\n tm.assert_series_equal(result, expected)\r\n\r\n # default\r\n result = Series(pd.to_datetime([0, 1, 2], unit=\"D\"))\r\n expected = Series(\r\n [Timestamp(\"1970-01-01\"), Timestamp(\"1970-01-02\"), Timestamp(\"1970-01-03\")]\r\n )\r\n tm.assert_series_equal(result, expected)\r\n\r\n def test_julian_round_trip(self):\r\n result = pd.to_datetime(2456658, origin=\"julian\", unit=\"D\")\r\n assert result.to_julian_date() == 2456658\r\n\r\n # out-of-bounds\r\n msg = \"1 is Out of Bounds for origin='julian'\"\r\n with pytest.raises(ValueError, match=msg):\r\n pd.to_datetime(1, origin=\"julian\", unit=\"D\")\r\n\r\n def test_invalid_unit(self, units, julian_dates):\r\n\r\n # checking for invalid combination of origin='julian' and unit != D\r\n if units != \"D\":\r\n msg = \"unit must be 'D' for origin='julian'\"\r\n with pytest.raises(ValueError, match=msg):\r\n pd.to_datetime(julian_dates, unit=units, origin=\"julian\")\r\n\r\n def test_invalid_origin(self):\r\n\r\n # need to have a numeric specified\r\n msg = \"it must be numeric with a unit specified\"\r\n with pytest.raises(ValueError, match=msg):\r\n pd.to_datetime(\"2005-01-01\", origin=\"1960-01-01\")\r\n\r\n with pytest.raises(ValueError, match=msg):\r\n pd.to_datetime(\"2005-01-01\", origin=\"1960-01-01\", unit=\"D\")\r\n\r\n def test_epoch(self, units, epochs, epoch_1960, units_from_epochs):\r\n\r\n expected = Series(\r\n [pd.Timedelta(x, unit=units) + epoch_1960 for x in units_from_epochs]\r\n )\r\n\r\n result = Series(pd.to_datetime(units_from_epochs, unit=units, origin=epochs))\r\n tm.assert_series_equal(result, expected)\r\n\r\n @pytest.mark.parametrize(\r\n \"origin, exc\",\r\n [\r\n (\"random_string\", ValueError),\r\n (\"epoch\", ValueError),\r\n (\"13-24-1990\", ValueError),\r\n (datetime(1, 1, 1), tslib.OutOfBoundsDatetime),\r\n ],\r\n )\r\n def test_invalid_origins(self, origin, exc, units, units_from_epochs):\r\n\r\n msg = f\"origin {origin} (is Out of Bounds|cannot be converted to a Timestamp)\"\r\n with pytest.raises(exc, match=msg):\r\n pd.to_datetime(units_from_epochs, unit=units, origin=origin)\r\n\r\n def test_invalid_origins_tzinfo(self):\r\n # GH16842\r\n with pytest.raises(ValueError, match=\"must be tz-naive\"):\r\n pd.to_datetime(1, unit=\"D\", origin=datetime(2000, 1, 1, tzinfo=pytz.utc))\r\n\r\n @pytest.mark.parametrize(\"format\", [None, \"%Y-%m-%d %H:%M:%S\"])\r\n def test_to_datetime_out_of_bounds_with_format_arg(self, format):\r\n # see gh-23830\r\n msg = \"Out of bounds nanosecond timestamp\"\r\n with pytest.raises(OutOfBoundsDatetime, match=msg):\r\n to_datetime(\"2417-10-27 00:00:00\", format=format)\r\n\r\n def test_processing_order(self):\r\n # make sure we handle out-of-bounds *before*\r\n # constructing the dates\r\n\r\n result = pd.to_datetime(200 * 365, unit=\"D\")\r\n expected = Timestamp(\"2169-11-13 00:00:00\")\r\n assert result == expected\r\n\r\n result = pd.to_datetime(200 * 365, unit=\"D\", origin=\"1870-01-01\")\r\n expected = Timestamp(\"2069-11-13 00:00:00\")\r\n assert result == expected\r\n\r\n result = pd.to_datetime(300 * 365, unit=\"D\", origin=\"1870-01-01\")\r\n expected = Timestamp(\"2169-10-20 00:00:00\")\r\n assert result == expected\r\n\r\n @pytest.mark.parametrize(\r\n \"offset,utc,exp\",\r\n [\r\n [\"Z\", True, \"2019-01-01T00:00:00.000Z\"],\r\n [\"Z\", None, \"2019-01-01T00:00:00.000Z\"],\r\n [\"-01:00\", True, \"2019-01-01T01:00:00.000Z\"],\r\n [\"-01:00\", None, \"2019-01-01T00:00:00.000-01:00\"],\r\n ],\r\n )\r\n def test_arg_tz_ns_unit(self, offset, utc, exp):\r\n # GH 25546\r\n arg = \"2019-01-01T00:00:00.000\" + offset\r\n result = to_datetime([arg], unit=\"ns\", utc=utc)\r\n expected = to_datetime([exp])\r\n tm.assert_index_equal(result, expected)\r\n\r\n\r\[email protected](\r\n \"listlike,do_caching\",\r\n [([1, 2, 3, 4, 5, 6, 7, 8, 9, 0], False), ([1, 1, 1, 1, 4, 5, 6, 7, 8, 9], True)],\r\n)\r\ndef test_should_cache(listlike, do_caching):\r\n assert (\r\n tools.should_cache(listlike, check_count=len(listlike), unique_share=0.7)\r\n == do_caching\r\n )\r\n\r\n\r\[email protected](\r\n \"unique_share,check_count, err_message\",\r\n [\r\n (0.5, 11, r\"check_count must be in next bounds: \\[0; len\\(arg\\)\\]\"),\r\n (10, 2, r\"unique_share must be in next bounds: \\(0; 1\\)\"),\r\n ],\r\n)\r\ndef test_should_cache_errors(unique_share, check_count, err_message):\r\n arg = [5] * 10\r\n\r\n with pytest.raises(AssertionError, match=err_message):\r\n tools.should_cache(arg, unique_share, check_count)\r\n\r\n\r\ndef test_nullable_integer_to_datetime():\r\n # Test for #30050\r\n ser = Series([1, 2, None, 2 ** 61, None])\r\n ser = ser.astype(\"Int64\")\r\n ser_copy = ser.copy()\r\n\r\n res = pd.to_datetime(ser, unit=\"ns\")\r\n\r\n expected = Series(\r\n [\r\n np.datetime64(\"1970-01-01 00:00:00.000000001\"),\r\n np.datetime64(\"1970-01-01 00:00:00.000000002\"),\r\n np.datetime64(\"NaT\"),\r\n np.datetime64(\"2043-01-25 23:56:49.213693952\"),\r\n np.datetime64(\"NaT\"),\r\n ]\r\n )\r\n tm.assert_series_equal(res, expected)\r\n # Check that ser isn't mutated\r\n tm.assert_series_equal(ser, ser_copy)\r\n\r\n\r\[email protected](\"klass\", [np.array, list])\r\ndef test_na_to_datetime(nulls_fixture, klass):\r\n result = pd.to_datetime(klass([nulls_fixture]))\r\n\r\n assert result[0] is pd.NaT\r\n\r\n\r\ndef test_empty_string_datetime_coerce__format():\r\n # GH13044\r\n td = Series([\"03/24/2016\", \"03/25/2016\", \"\"])\r\n format = \"%m/%d/%Y\"\r\n\r\n # coerce empty string to pd.NaT\r\n result = pd.to_datetime(td, format=format, errors=\"coerce\")\r\n expected = Series([\"2016-03-24\", \"2016-03-25\", pd.NaT], dtype=\"datetime64[ns]\")\r\n tm.assert_series_equal(expected, result)\r\n\r\n # raise an exception in case a format is given\r\n with pytest.raises(ValueError, match=\"does not match format\"):\r\n result = pd.to_datetime(td, format=format, errors=\"raise\")\r\n\r\n # don't raise an expection in case no format is given\r\n result = pd.to_datetime(td, errors=\"raise\")\r\n tm.assert_series_equal(result, expected)\r\n\r\n\r\ndef test_empty_string_datetime_coerce__unit():\r\n # GH13044\r\n # coerce empty string to pd.NaT\r\n result = pd.to_datetime([1, \"\"], unit=\"s\", errors=\"coerce\")\r\n expected = DatetimeIndex([\"1970-01-01 00:00:01\", \"NaT\"], dtype=\"datetime64[ns]\")\r\n tm.assert_index_equal(expected, result)\r\n\r\n # verify that no exception is raised even when errors='raise' is set\r\n result = pd.to_datetime([1, \"\"], unit=\"s\", errors=\"raise\")\r\n tm.assert_index_equal(expected, result)\r\n",
"from datetime import date\r\n\r\nimport numpy as np\r\nimport pytest\r\n\r\nfrom pandas import (\r\n DatetimeIndex,\r\n MultiIndex,\r\n NaT,\r\n Series,\r\n Timestamp,\r\n date_range,\r\n period_range,\r\n)\r\nfrom pandas.core.indexing import IndexingError\r\nimport pandas.testing as tm\r\n\r\nfrom pandas.tseries.offsets import BDay\r\n\r\n\r\nclass TestSetitemDT64Values:\r\n def test_setitem_none_nan(self):\r\n series = Series(date_range(\"1/1/2000\", periods=10))\r\n series[3] = None\r\n assert series[3] is NaT\r\n\r\n series[3:5] = None\r\n assert series[4] is NaT\r\n\r\n series[5] = np.nan\r\n assert series[5] is NaT\r\n\r\n series[5:7] = np.nan\r\n assert series[6] is NaT\r\n\r\n def test_setitem_multiindex_empty_slice(self):\r\n # https://github.com/pandas-dev/pandas/issues/35878\r\n idx = MultiIndex.from_tuples([(\"a\", 1), (\"b\", 2)])\r\n result = Series([1, 2], index=idx)\r\n expected = result.copy()\r\n result.loc[[]] = 0\r\n tm.assert_series_equal(result, expected)\r\n\r\n def test_setitem_with_string_index(self):\r\n # GH#23451\r\n ser = Series([1, 2, 3], index=[\"Date\", \"b\", \"other\"])\r\n ser[\"Date\"] = date.today()\r\n assert ser.Date == date.today()\r\n assert ser[\"Date\"] == date.today()\r\n\r\n def test_setitem_with_different_tz_casts_to_object(self):\r\n # GH#24024\r\n ser = Series(date_range(\"2000\", periods=2, tz=\"US/Central\"))\r\n ser[0] = Timestamp(\"2000\", tz=\"US/Eastern\")\r\n expected = Series(\r\n [\r\n Timestamp(\"2000-01-01 00:00:00-05:00\", tz=\"US/Eastern\"),\r\n Timestamp(\"2000-01-02 00:00:00-06:00\", tz=\"US/Central\"),\r\n ],\r\n dtype=object,\r\n )\r\n tm.assert_series_equal(ser, expected)\r\n\r\n def test_setitem_tuple_with_datetimetz_values(self):\r\n # GH#20441\r\n arr = date_range(\"2017\", periods=4, tz=\"US/Eastern\")\r\n index = [(0, 1), (0, 2), (0, 3), (0, 4)]\r\n result = Series(arr, index=index)\r\n expected = result.copy()\r\n result[(0, 1)] = np.nan\r\n expected.iloc[0] = np.nan\r\n tm.assert_series_equal(result, expected)\r\n\r\n\r\nclass TestSetitemPeriodDtype:\r\n @pytest.mark.parametrize(\"na_val\", [None, np.nan])\r\n def test_setitem_na_period_dtype_casts_to_nat(self, na_val):\r\n ser = Series(period_range(\"2000-01-01\", periods=10, freq=\"D\"))\r\n\r\n ser[3] = na_val\r\n assert ser[3] is NaT\r\n\r\n ser[3:5] = na_val\r\n assert ser[4] is NaT\r\n\r\n\r\nclass TestSetitemBooleanMask:\r\n def test_setitem_boolean(self, string_series):\r\n mask = string_series > string_series.median()\r\n\r\n # similar indexed series\r\n result = string_series.copy()\r\n result[mask] = string_series * 2\r\n expected = string_series * 2\r\n tm.assert_series_equal(result[mask], expected[mask])\r\n\r\n # needs alignment\r\n result = string_series.copy()\r\n result[mask] = (string_series * 2)[0:5]\r\n expected = (string_series * 2)[0:5].reindex_like(string_series)\r\n expected[-mask] = string_series[mask]\r\n tm.assert_series_equal(result[mask], expected[mask])\r\n\r\n def test_setitem_boolean_corner(self, datetime_series):\r\n ts = datetime_series\r\n mask_shifted = ts.shift(1, freq=BDay()) > ts.median()\r\n\r\n msg = (\r\n r\"Unalignable boolean Series provided as indexer \\(index of \"\r\n r\"the boolean Series and of the indexed object do not match\"\r\n )\r\n with pytest.raises(IndexingError, match=msg):\r\n ts[mask_shifted] = 1\r\n\r\n with pytest.raises(IndexingError, match=msg):\r\n ts.loc[mask_shifted] = 1\r\n\r\n def test_setitem_boolean_different_order(self, string_series):\r\n ordered = string_series.sort_values()\r\n\r\n copy = string_series.copy()\r\n copy[ordered > 0] = 0\r\n\r\n expected = string_series.copy()\r\n expected[expected > 0] = 0\r\n\r\n tm.assert_series_equal(copy, expected)\r\n\r\n @pytest.mark.parametrize(\"func\", [list, np.array, Series])\r\n def test_setitem_boolean_python_list(self, func):\r\n # GH19406\r\n ser = Series([None, \"b\", None])\r\n mask = func([True, False, True])\r\n ser[mask] = [\"a\", \"c\"]\r\n expected = Series([\"a\", \"b\", \"c\"])\r\n tm.assert_series_equal(ser, expected)\r\n\r\n @pytest.mark.parametrize(\"value\", [None, NaT, np.nan])\r\n def test_setitem_boolean_td64_values_cast_na(self, value):\r\n # GH#18586\r\n series = Series([0, 1, 2], dtype=\"timedelta64[ns]\")\r\n mask = series == series[0]\r\n series[mask] = value\r\n expected = Series([NaT, 1, 2], dtype=\"timedelta64[ns]\")\r\n tm.assert_series_equal(series, expected)\r\n\r\n def test_setitem_boolean_nullable_int_types(self, any_numeric_dtype):\r\n # GH: 26468\r\n ser = Series([5, 6, 7, 8], dtype=any_numeric_dtype)\r\n ser[ser > 6] = Series(range(4), dtype=any_numeric_dtype)\r\n expected = Series([5, 6, 2, 3], dtype=any_numeric_dtype)\r\n tm.assert_series_equal(ser, expected)\r\n\r\n ser = Series([5, 6, 7, 8], dtype=any_numeric_dtype)\r\n ser.loc[ser > 6] = Series(range(4), dtype=any_numeric_dtype)\r\n tm.assert_series_equal(ser, expected)\r\n\r\n ser = Series([5, 6, 7, 8], dtype=any_numeric_dtype)\r\n loc_ser = Series(range(4), dtype=any_numeric_dtype)\r\n ser.loc[ser > 6] = loc_ser.loc[loc_ser > 1]\r\n tm.assert_series_equal(ser, expected)\r\n\r\n\r\nclass TestSetitemViewCopySemantics:\r\n def test_setitem_invalidates_datetime_index_freq(self):\r\n # GH#24096 altering a datetime64tz Series inplace invalidates the\r\n # `freq` attribute on the underlying DatetimeIndex\r\n\r\n dti = date_range(\"20130101\", periods=3, tz=\"US/Eastern\")\r\n ts = dti[1]\r\n ser = Series(dti)\r\n assert ser._values is not dti\r\n assert ser._values._data.base is not dti._data._data.base\r\n assert dti.freq == \"D\"\r\n ser.iloc[1] = NaT\r\n assert ser._values.freq is None\r\n\r\n # check that the DatetimeIndex was not altered in place\r\n assert ser._values is not dti\r\n assert ser._values._data.base is not dti._data._data.base\r\n assert dti[1] == ts\r\n assert dti.freq == \"D\"\r\n\r\n def test_dt64tz_setitem_does_not_mutate_dti(self):\r\n # GH#21907, GH#24096\r\n dti = date_range(\"2016-01-01\", periods=10, tz=\"US/Pacific\")\r\n ts = dti[0]\r\n ser = Series(dti)\r\n assert ser._values is not dti\r\n assert ser._values._data.base is not dti._data._data.base\r\n assert ser._mgr.blocks[0].values is not dti\r\n assert ser._mgr.blocks[0].values._data.base is not dti._data._data.base\r\n\r\n ser[::3] = NaT\r\n assert ser[0] is NaT\r\n assert dti[0] == ts\r\n\r\n\r\nclass TestSetitemCallable:\r\n def test_setitem_callable_key(self):\r\n # GH#12533\r\n ser = Series([1, 2, 3, 4], index=list(\"ABCD\"))\r\n ser[lambda x: \"A\"] = -1\r\n\r\n expected = Series([-1, 2, 3, 4], index=list(\"ABCD\"))\r\n tm.assert_series_equal(ser, expected)\r\n\r\n def test_setitem_callable_other(self):\r\n # GH#13299\r\n inc = lambda x: x + 1\r\n\r\n ser = Series([1, 2, -1, 4])\r\n ser[ser < 0] = inc\r\n\r\n expected = Series([1, 2, inc, 4])\r\n tm.assert_series_equal(ser, expected)\r\n\r\n\r\nclass TestSetitemCasting:\r\n def test_setitem_nan_casts(self):\r\n # these induce dtype changes\r\n expected = Series([np.nan, 3, np.nan, 5, np.nan, 7, np.nan, 9, np.nan])\r\n ser = Series([2, 3, 4, 5, 6, 7, 8, 9, 10])\r\n ser[::2] = np.nan\r\n tm.assert_series_equal(ser, expected)\r\n\r\n # gets coerced to float, right?\r\n expected = Series([np.nan, 1, np.nan, 0])\r\n ser = Series([True, True, False, False])\r\n ser[::2] = np.nan\r\n tm.assert_series_equal(ser, expected)\r\n\r\n expected = Series([np.nan, np.nan, np.nan, np.nan, np.nan, 5, 6, 7, 8, 9])\r\n ser = Series(np.arange(10))\r\n ser[:5] = np.nan\r\n tm.assert_series_equal(ser, expected)\r\n\r\n\r\nclass TestSetitemWithExpansion:\r\n def test_setitem_empty_series(self):\r\n # GH#10193\r\n key = Timestamp(\"2012-01-01\")\r\n series = Series(dtype=object)\r\n series[key] = 47\r\n expected = Series(47, [key])\r\n tm.assert_series_equal(series, expected)\r\n\r\n def test_setitem_empty_series_datetimeindex_preserves_freq(self):\r\n # GH#33573 our index should retain its freq\r\n series = Series([], DatetimeIndex([], freq=\"D\"), dtype=object)\r\n key = Timestamp(\"2012-01-01\")\r\n series[key] = 47\r\n expected = Series(47, DatetimeIndex([key], freq=\"D\"))\r\n tm.assert_series_equal(series, expected)\r\n assert series.index.freq == expected.index.freq\r\n\r\n\r\ndef test_setitem_scalar_into_readonly_backing_data():\r\n # GH#14359: test that you cannot mutate a read only buffer\r\n\r\n array = np.zeros(5)\r\n array.flags.writeable = False # make the array immutable\r\n series = Series(array)\r\n\r\n for n in range(len(series)):\r\n msg = \"assignment destination is read-only\"\r\n with pytest.raises(ValueError, match=msg):\r\n series[n] = 1\r\n\r\n assert array[n] == 0\r\n\r\n\r\ndef test_setitem_slice_into_readonly_backing_data():\r\n # GH#14359: test that you cannot mutate a read only buffer\r\n\r\n array = np.zeros(5)\r\n array.flags.writeable = False # make the array immutable\r\n series = Series(array)\r\n\r\n msg = \"assignment destination is read-only\"\r\n with pytest.raises(ValueError, match=msg):\r\n series[1:3] = 1\r\n\r\n assert not array.any()\r\n",
"from typing import Dict, List, Tuple\r\n\r\nimport pandas._libs.json as json\r\nfrom pandas._typing import StorageOptions\r\n\r\nfrom pandas.io.excel._base import ExcelWriter\r\nfrom pandas.io.excel._util import validate_freeze_panes\r\n\r\n\r\nclass _XlsxStyler:\r\n # Map from openpyxl-oriented styles to flatter xlsxwriter representation\r\n # Ordering necessary for both determinism and because some are keyed by\r\n # prefixes of others.\r\n STYLE_MAPPING: Dict[str, List[Tuple[Tuple[str, ...], str]]] = {\r\n \"font\": [\r\n ((\"name\",), \"font_name\"),\r\n ((\"sz\",), \"font_size\"),\r\n ((\"size\",), \"font_size\"),\r\n ((\"color\", \"rgb\"), \"font_color\"),\r\n ((\"color\",), \"font_color\"),\r\n ((\"b\",), \"bold\"),\r\n ((\"bold\",), \"bold\"),\r\n ((\"i\",), \"italic\"),\r\n ((\"italic\",), \"italic\"),\r\n ((\"u\",), \"underline\"),\r\n ((\"underline\",), \"underline\"),\r\n ((\"strike\",), \"font_strikeout\"),\r\n ((\"vertAlign\",), \"font_script\"),\r\n ((\"vertalign\",), \"font_script\"),\r\n ],\r\n \"number_format\": [((\"format_code\",), \"num_format\"), ((), \"num_format\")],\r\n \"protection\": [((\"locked\",), \"locked\"), ((\"hidden\",), \"hidden\")],\r\n \"alignment\": [\r\n ((\"horizontal\",), \"align\"),\r\n ((\"vertical\",), \"valign\"),\r\n ((\"text_rotation\",), \"rotation\"),\r\n ((\"wrap_text\",), \"text_wrap\"),\r\n ((\"indent\",), \"indent\"),\r\n ((\"shrink_to_fit\",), \"shrink\"),\r\n ],\r\n \"fill\": [\r\n ((\"patternType\",), \"pattern\"),\r\n ((\"patterntype\",), \"pattern\"),\r\n ((\"fill_type\",), \"pattern\"),\r\n ((\"start_color\", \"rgb\"), \"fg_color\"),\r\n ((\"fgColor\", \"rgb\"), \"fg_color\"),\r\n ((\"fgcolor\", \"rgb\"), \"fg_color\"),\r\n ((\"start_color\",), \"fg_color\"),\r\n ((\"fgColor\",), \"fg_color\"),\r\n ((\"fgcolor\",), \"fg_color\"),\r\n ((\"end_color\", \"rgb\"), \"bg_color\"),\r\n ((\"bgColor\", \"rgb\"), \"bg_color\"),\r\n ((\"bgcolor\", \"rgb\"), \"bg_color\"),\r\n ((\"end_color\",), \"bg_color\"),\r\n ((\"bgColor\",), \"bg_color\"),\r\n ((\"bgcolor\",), \"bg_color\"),\r\n ],\r\n \"border\": [\r\n ((\"color\", \"rgb\"), \"border_color\"),\r\n ((\"color\",), \"border_color\"),\r\n ((\"style\",), \"border\"),\r\n ((\"top\", \"color\", \"rgb\"), \"top_color\"),\r\n ((\"top\", \"color\"), \"top_color\"),\r\n ((\"top\", \"style\"), \"top\"),\r\n ((\"top\",), \"top\"),\r\n ((\"right\", \"color\", \"rgb\"), \"right_color\"),\r\n ((\"right\", \"color\"), \"right_color\"),\r\n ((\"right\", \"style\"), \"right\"),\r\n ((\"right\",), \"right\"),\r\n ((\"bottom\", \"color\", \"rgb\"), \"bottom_color\"),\r\n ((\"bottom\", \"color\"), \"bottom_color\"),\r\n ((\"bottom\", \"style\"), \"bottom\"),\r\n ((\"bottom\",), \"bottom\"),\r\n ((\"left\", \"color\", \"rgb\"), \"left_color\"),\r\n ((\"left\", \"color\"), \"left_color\"),\r\n ((\"left\", \"style\"), \"left\"),\r\n ((\"left\",), \"left\"),\r\n ],\r\n }\r\n\r\n @classmethod\r\n def convert(cls, style_dict, num_format_str=None):\r\n \"\"\"\r\n converts a style_dict to an xlsxwriter format dict\r\n\r\n Parameters\r\n ----------\r\n style_dict : style dictionary to convert\r\n num_format_str : optional number format string\r\n \"\"\"\r\n # Create a XlsxWriter format object.\r\n props = {}\r\n\r\n if num_format_str is not None:\r\n props[\"num_format\"] = num_format_str\r\n\r\n if style_dict is None:\r\n return props\r\n\r\n if \"borders\" in style_dict:\r\n style_dict = style_dict.copy()\r\n style_dict[\"border\"] = style_dict.pop(\"borders\")\r\n\r\n for style_group_key, style_group in style_dict.items():\r\n for src, dst in cls.STYLE_MAPPING.get(style_group_key, []):\r\n # src is a sequence of keys into a nested dict\r\n # dst is a flat key\r\n if dst in props:\r\n continue\r\n v = style_group\r\n for k in src:\r\n try:\r\n v = v[k]\r\n except (KeyError, TypeError):\r\n break\r\n else:\r\n props[dst] = v\r\n\r\n if isinstance(props.get(\"pattern\"), str):\r\n # TODO: support other fill patterns\r\n props[\"pattern\"] = 0 if props[\"pattern\"] == \"none\" else 1\r\n\r\n for k in [\"border\", \"top\", \"right\", \"bottom\", \"left\"]:\r\n if isinstance(props.get(k), str):\r\n try:\r\n props[k] = [\r\n \"none\",\r\n \"thin\",\r\n \"medium\",\r\n \"dashed\",\r\n \"dotted\",\r\n \"thick\",\r\n \"double\",\r\n \"hair\",\r\n \"mediumDashed\",\r\n \"dashDot\",\r\n \"mediumDashDot\",\r\n \"dashDotDot\",\r\n \"mediumDashDotDot\",\r\n \"slantDashDot\",\r\n ].index(props[k])\r\n except ValueError:\r\n props[k] = 2\r\n\r\n if isinstance(props.get(\"font_script\"), str):\r\n props[\"font_script\"] = [\"baseline\", \"superscript\", \"subscript\"].index(\r\n props[\"font_script\"]\r\n )\r\n\r\n if isinstance(props.get(\"underline\"), str):\r\n props[\"underline\"] = {\r\n \"none\": 0,\r\n \"single\": 1,\r\n \"double\": 2,\r\n \"singleAccounting\": 33,\r\n \"doubleAccounting\": 34,\r\n }[props[\"underline\"]]\r\n\r\n return props\r\n\r\n\r\nclass XlsxWriter(ExcelWriter):\r\n engine = \"xlsxwriter\"\r\n supported_extensions = (\".xlsx\",)\r\n\r\n def __init__(\r\n self,\r\n path,\r\n engine=None,\r\n date_format=None,\r\n datetime_format=None,\r\n mode: str = \"w\",\r\n storage_options: StorageOptions = None,\r\n **engine_kwargs,\r\n ):\r\n # Use the xlsxwriter module as the Excel writer.\r\n from xlsxwriter import Workbook\r\n\r\n if mode == \"a\":\r\n raise ValueError(\"Append mode is not supported with xlsxwriter!\")\r\n\r\n super().__init__(\r\n path,\r\n engine=engine,\r\n date_format=date_format,\r\n datetime_format=datetime_format,\r\n mode=mode,\r\n storage_options=storage_options,\r\n **engine_kwargs,\r\n )\r\n\r\n self.book = Workbook(self.handles.handle, **engine_kwargs)\r\n\r\n def save(self):\r\n \"\"\"\r\n Save workbook to disk.\r\n \"\"\"\r\n return self.book.close()\r\n\r\n def write_cells(\r\n self, cells, sheet_name=None, startrow=0, startcol=0, freeze_panes=None\r\n ):\r\n # Write the frame cells using xlsxwriter.\r\n sheet_name = self._get_sheet_name(sheet_name)\r\n\r\n if sheet_name in self.sheets:\r\n wks = self.sheets[sheet_name]\r\n else:\r\n wks = self.book.add_worksheet(sheet_name)\r\n self.sheets[sheet_name] = wks\r\n\r\n style_dict = {\"null\": None}\r\n\r\n if validate_freeze_panes(freeze_panes):\r\n wks.freeze_panes(*(freeze_panes))\r\n\r\n for cell in cells:\r\n val, fmt = self._value_with_fmt(cell.val)\r\n\r\n stylekey = json.dumps(cell.style)\r\n if fmt:\r\n stylekey += fmt\r\n\r\n if stylekey in style_dict:\r\n style = style_dict[stylekey]\r\n else:\r\n style = self.book.add_format(_XlsxStyler.convert(cell.style, fmt))\r\n style_dict[stylekey] = style\r\n\r\n if cell.mergestart is not None and cell.mergeend is not None:\r\n wks.merge_range(\r\n startrow + cell.row,\r\n startcol + cell.col,\r\n startrow + cell.mergestart,\r\n startcol + cell.mergeend,\r\n val,\r\n style,\r\n )\r\n else:\r\n wks.write(startrow + cell.row, startcol + cell.col, val, style)\r\n",
"import numpy as np\r\nimport pytest\r\n\r\nfrom pandas import DataFrame, Series, concat, isna, notna\r\nimport pandas._testing as tm\r\n\r\nimport pandas.tseries.offsets as offsets\r\n\r\n\r\[email protected](\r\n \"compare_func, roll_func, kwargs\",\r\n [\r\n [np.mean, \"mean\", {}],\r\n [np.nansum, \"sum\", {}],\r\n pytest.param(\r\n lambda x: np.isfinite(x).astype(float).sum(),\r\n \"count\",\r\n {},\r\n marks=pytest.mark.filterwarnings(\"ignore:min_periods:FutureWarning\"),\r\n ),\r\n [np.median, \"median\", {}],\r\n [np.min, \"min\", {}],\r\n [np.max, \"max\", {}],\r\n [lambda x: np.std(x, ddof=1), \"std\", {}],\r\n [lambda x: np.std(x, ddof=0), \"std\", {\"ddof\": 0}],\r\n [lambda x: np.var(x, ddof=1), \"var\", {}],\r\n [lambda x: np.var(x, ddof=0), \"var\", {\"ddof\": 0}],\r\n ],\r\n)\r\ndef test_series(series, compare_func, roll_func, kwargs):\r\n result = getattr(series.rolling(50), roll_func)(**kwargs)\r\n assert isinstance(result, Series)\r\n tm.assert_almost_equal(result.iloc[-1], compare_func(series[-50:]))\r\n\r\n\r\[email protected](\r\n \"compare_func, roll_func, kwargs\",\r\n [\r\n [np.mean, \"mean\", {}],\r\n [np.nansum, \"sum\", {}],\r\n pytest.param(\r\n lambda x: np.isfinite(x).astype(float).sum(),\r\n \"count\",\r\n {},\r\n marks=pytest.mark.filterwarnings(\"ignore:min_periods:FutureWarning\"),\r\n ),\r\n [np.median, \"median\", {}],\r\n [np.min, \"min\", {}],\r\n [np.max, \"max\", {}],\r\n [lambda x: np.std(x, ddof=1), \"std\", {}],\r\n [lambda x: np.std(x, ddof=0), \"std\", {\"ddof\": 0}],\r\n [lambda x: np.var(x, ddof=1), \"var\", {}],\r\n [lambda x: np.var(x, ddof=0), \"var\", {\"ddof\": 0}],\r\n ],\r\n)\r\ndef test_frame(raw, frame, compare_func, roll_func, kwargs):\r\n result = getattr(frame.rolling(50), roll_func)(**kwargs)\r\n assert isinstance(result, DataFrame)\r\n tm.assert_series_equal(\r\n result.iloc[-1, :],\r\n frame.iloc[-50:, :].apply(compare_func, axis=0, raw=raw),\r\n check_names=False,\r\n )\r\n\r\n\r\[email protected](\r\n \"compare_func, roll_func, kwargs, minp\",\r\n [\r\n [np.mean, \"mean\", {}, 10],\r\n [np.nansum, \"sum\", {}, 10],\r\n [lambda x: np.isfinite(x).astype(float).sum(), \"count\", {}, 0],\r\n [np.median, \"median\", {}, 10],\r\n [np.min, \"min\", {}, 10],\r\n [np.max, \"max\", {}, 10],\r\n [lambda x: np.std(x, ddof=1), \"std\", {}, 10],\r\n [lambda x: np.std(x, ddof=0), \"std\", {\"ddof\": 0}, 10],\r\n [lambda x: np.var(x, ddof=1), \"var\", {}, 10],\r\n [lambda x: np.var(x, ddof=0), \"var\", {\"ddof\": 0}, 10],\r\n ],\r\n)\r\ndef test_time_rule_series(series, compare_func, roll_func, kwargs, minp):\r\n win = 25\r\n ser = series[::2].resample(\"B\").mean()\r\n series_result = getattr(ser.rolling(window=win, min_periods=minp), roll_func)(\r\n **kwargs\r\n )\r\n last_date = series_result.index[-1]\r\n prev_date = last_date - 24 * offsets.BDay()\r\n\r\n trunc_series = series[::2].truncate(prev_date, last_date)\r\n tm.assert_almost_equal(series_result[-1], compare_func(trunc_series))\r\n\r\n\r\[email protected](\r\n \"compare_func, roll_func, kwargs, minp\",\r\n [\r\n [np.mean, \"mean\", {}, 10],\r\n [np.nansum, \"sum\", {}, 10],\r\n [lambda x: np.isfinite(x).astype(float).sum(), \"count\", {}, 0],\r\n [np.median, \"median\", {}, 10],\r\n [np.min, \"min\", {}, 10],\r\n [np.max, \"max\", {}, 10],\r\n [lambda x: np.std(x, ddof=1), \"std\", {}, 10],\r\n [lambda x: np.std(x, ddof=0), \"std\", {\"ddof\": 0}, 10],\r\n [lambda x: np.var(x, ddof=1), \"var\", {}, 10],\r\n [lambda x: np.var(x, ddof=0), \"var\", {\"ddof\": 0}, 10],\r\n ],\r\n)\r\ndef test_time_rule_frame(raw, frame, compare_func, roll_func, kwargs, minp):\r\n win = 25\r\n frm = frame[::2].resample(\"B\").mean()\r\n frame_result = getattr(frm.rolling(window=win, min_periods=minp), roll_func)(\r\n **kwargs\r\n )\r\n last_date = frame_result.index[-1]\r\n prev_date = last_date - 24 * offsets.BDay()\r\n\r\n trunc_frame = frame[::2].truncate(prev_date, last_date)\r\n tm.assert_series_equal(\r\n frame_result.xs(last_date),\r\n trunc_frame.apply(compare_func, raw=raw),\r\n check_names=False,\r\n )\r\n\r\n\r\[email protected](\r\n \"compare_func, roll_func, kwargs\",\r\n [\r\n [np.mean, \"mean\", {}],\r\n [np.nansum, \"sum\", {}],\r\n [np.median, \"median\", {}],\r\n [np.min, \"min\", {}],\r\n [np.max, \"max\", {}],\r\n [lambda x: np.std(x, ddof=1), \"std\", {}],\r\n [lambda x: np.std(x, ddof=0), \"std\", {\"ddof\": 0}],\r\n [lambda x: np.var(x, ddof=1), \"var\", {}],\r\n [lambda x: np.var(x, ddof=0), \"var\", {\"ddof\": 0}],\r\n ],\r\n)\r\ndef test_nans(compare_func, roll_func, kwargs):\r\n obj = Series(np.random.randn(50))\r\n obj[:10] = np.NaN\r\n obj[-10:] = np.NaN\r\n\r\n result = getattr(obj.rolling(50, min_periods=30), roll_func)(**kwargs)\r\n tm.assert_almost_equal(result.iloc[-1], compare_func(obj[10:-10]))\r\n\r\n # min_periods is working correctly\r\n result = getattr(obj.rolling(20, min_periods=15), roll_func)(**kwargs)\r\n assert isna(result.iloc[23])\r\n assert not isna(result.iloc[24])\r\n\r\n assert not isna(result.iloc[-6])\r\n assert isna(result.iloc[-5])\r\n\r\n obj2 = Series(np.random.randn(20))\r\n result = getattr(obj2.rolling(10, min_periods=5), roll_func)(**kwargs)\r\n assert isna(result.iloc[3])\r\n assert notna(result.iloc[4])\r\n\r\n if roll_func != \"sum\":\r\n result0 = getattr(obj.rolling(20, min_periods=0), roll_func)(**kwargs)\r\n result1 = getattr(obj.rolling(20, min_periods=1), roll_func)(**kwargs)\r\n tm.assert_almost_equal(result0, result1)\r\n\r\n\r\ndef test_nans_count():\r\n obj = Series(np.random.randn(50))\r\n obj[:10] = np.NaN\r\n obj[-10:] = np.NaN\r\n result = obj.rolling(50, min_periods=30).count()\r\n tm.assert_almost_equal(\r\n result.iloc[-1], np.isfinite(obj[10:-10]).astype(float).sum()\r\n )\r\n\r\n\r\[email protected](\r\n \"roll_func, kwargs\",\r\n [\r\n [\"mean\", {}],\r\n [\"sum\", {}],\r\n [\"median\", {}],\r\n [\"min\", {}],\r\n [\"max\", {}],\r\n [\"std\", {}],\r\n [\"std\", {\"ddof\": 0}],\r\n [\"var\", {}],\r\n [\"var\", {\"ddof\": 0}],\r\n ],\r\n)\r\[email protected](\"minp\", [0, 99, 100])\r\ndef test_min_periods(series, minp, roll_func, kwargs):\r\n result = getattr(series.rolling(len(series) + 1, min_periods=minp), roll_func)(\r\n **kwargs\r\n )\r\n expected = getattr(series.rolling(len(series), min_periods=minp), roll_func)(\r\n **kwargs\r\n )\r\n nan_mask = isna(result)\r\n tm.assert_series_equal(nan_mask, isna(expected))\r\n\r\n nan_mask = ~nan_mask\r\n tm.assert_almost_equal(result[nan_mask], expected[nan_mask])\r\n\r\n\r\ndef test_min_periods_count(series):\r\n result = series.rolling(len(series) + 1, min_periods=0).count()\r\n expected = series.rolling(len(series), min_periods=0).count()\r\n nan_mask = isna(result)\r\n tm.assert_series_equal(nan_mask, isna(expected))\r\n\r\n nan_mask = ~nan_mask\r\n tm.assert_almost_equal(result[nan_mask], expected[nan_mask])\r\n\r\n\r\[email protected](\r\n \"roll_func, kwargs, minp\",\r\n [\r\n [\"mean\", {}, 15],\r\n [\"sum\", {}, 15],\r\n [\"count\", {}, 0],\r\n [\"median\", {}, 15],\r\n [\"min\", {}, 15],\r\n [\"max\", {}, 15],\r\n [\"std\", {}, 15],\r\n [\"std\", {\"ddof\": 0}, 15],\r\n [\"var\", {}, 15],\r\n [\"var\", {\"ddof\": 0}, 15],\r\n ],\r\n)\r\ndef test_center(roll_func, kwargs, minp):\r\n obj = Series(np.random.randn(50))\r\n obj[:10] = np.NaN\r\n obj[-10:] = np.NaN\r\n\r\n result = getattr(obj.rolling(20, min_periods=minp, center=True), roll_func)(\r\n **kwargs\r\n )\r\n expected = getattr(\r\n concat([obj, Series([np.NaN] * 9)]).rolling(20, min_periods=minp), roll_func\r\n )(**kwargs)[9:].reset_index(drop=True)\r\n tm.assert_series_equal(result, expected)\r\n\r\n\r\[email protected](\r\n \"roll_func, kwargs, minp, fill_value\",\r\n [\r\n [\"mean\", {}, 10, None],\r\n [\"sum\", {}, 10, None],\r\n [\"count\", {}, 0, 0],\r\n [\"median\", {}, 10, None],\r\n [\"min\", {}, 10, None],\r\n [\"max\", {}, 10, None],\r\n [\"std\", {}, 10, None],\r\n [\"std\", {\"ddof\": 0}, 10, None],\r\n [\"var\", {}, 10, None],\r\n [\"var\", {\"ddof\": 0}, 10, None],\r\n ],\r\n)\r\ndef test_center_reindex_series(series, roll_func, kwargs, minp, fill_value):\r\n # shifter index\r\n s = [f\"x{x:d}\" for x in range(12)]\r\n\r\n series_xp = (\r\n getattr(\r\n series.reindex(list(series.index) + s).rolling(window=25, min_periods=minp),\r\n roll_func,\r\n )(**kwargs)\r\n .shift(-12)\r\n .reindex(series.index)\r\n )\r\n series_rs = getattr(\r\n series.rolling(window=25, min_periods=minp, center=True), roll_func\r\n )(**kwargs)\r\n if fill_value is not None:\r\n series_xp = series_xp.fillna(fill_value)\r\n tm.assert_series_equal(series_xp, series_rs)\r\n\r\n\r\[email protected](\r\n \"roll_func, kwargs, minp, fill_value\",\r\n [\r\n [\"mean\", {}, 10, None],\r\n [\"sum\", {}, 10, None],\r\n [\"count\", {}, 0, 0],\r\n [\"median\", {}, 10, None],\r\n [\"min\", {}, 10, None],\r\n [\"max\", {}, 10, None],\r\n [\"std\", {}, 10, None],\r\n [\"std\", {\"ddof\": 0}, 10, None],\r\n [\"var\", {}, 10, None],\r\n [\"var\", {\"ddof\": 0}, 10, None],\r\n ],\r\n)\r\ndef test_center_reindex_frame(frame, roll_func, kwargs, minp, fill_value):\r\n # shifter index\r\n s = [f\"x{x:d}\" for x in range(12)]\r\n\r\n frame_xp = (\r\n getattr(\r\n frame.reindex(list(frame.index) + s).rolling(window=25, min_periods=minp),\r\n roll_func,\r\n )(**kwargs)\r\n .shift(-12)\r\n .reindex(frame.index)\r\n )\r\n frame_rs = getattr(\r\n frame.rolling(window=25, min_periods=minp, center=True), roll_func\r\n )(**kwargs)\r\n if fill_value is not None:\r\n frame_xp = frame_xp.fillna(fill_value)\r\n tm.assert_frame_equal(frame_xp, frame_rs)\r\n",
"from datetime import datetime, timedelta\r\nfrom typing import List\r\nimport warnings\r\n\r\nfrom dateutil.relativedelta import FR, MO, SA, SU, TH, TU, WE # noqa\r\nimport numpy as np\r\n\r\nfrom pandas.errors import PerformanceWarning\r\n\r\nfrom pandas import DateOffset, DatetimeIndex, Series, Timestamp, concat, date_range\r\n\r\nfrom pandas.tseries.offsets import Day, Easter\r\n\r\n\r\ndef next_monday(dt: datetime) -> datetime:\r\n \"\"\"\r\n If holiday falls on Saturday, use following Monday instead;\r\n if holiday falls on Sunday, use Monday instead\r\n \"\"\"\r\n if dt.weekday() == 5:\r\n return dt + timedelta(2)\r\n elif dt.weekday() == 6:\r\n return dt + timedelta(1)\r\n return dt\r\n\r\n\r\ndef next_monday_or_tuesday(dt: datetime) -> datetime:\r\n \"\"\"\r\n For second holiday of two adjacent ones!\r\n If holiday falls on Saturday, use following Monday instead;\r\n if holiday falls on Sunday or Monday, use following Tuesday instead\r\n (because Monday is already taken by adjacent holiday on the day before)\r\n \"\"\"\r\n dow = dt.weekday()\r\n if dow == 5 or dow == 6:\r\n return dt + timedelta(2)\r\n elif dow == 0:\r\n return dt + timedelta(1)\r\n return dt\r\n\r\n\r\ndef previous_friday(dt: datetime) -> datetime:\r\n \"\"\"\r\n If holiday falls on Saturday or Sunday, use previous Friday instead.\r\n \"\"\"\r\n if dt.weekday() == 5:\r\n return dt - timedelta(1)\r\n elif dt.weekday() == 6:\r\n return dt - timedelta(2)\r\n return dt\r\n\r\n\r\ndef sunday_to_monday(dt: datetime) -> datetime:\r\n \"\"\"\r\n If holiday falls on Sunday, use day thereafter (Monday) instead.\r\n \"\"\"\r\n if dt.weekday() == 6:\r\n return dt + timedelta(1)\r\n return dt\r\n\r\n\r\ndef weekend_to_monday(dt: datetime) -> datetime:\r\n \"\"\"\r\n If holiday falls on Sunday or Saturday,\r\n use day thereafter (Monday) instead.\r\n Needed for holidays such as Christmas observation in Europe\r\n \"\"\"\r\n if dt.weekday() == 6:\r\n return dt + timedelta(1)\r\n elif dt.weekday() == 5:\r\n return dt + timedelta(2)\r\n return dt\r\n\r\n\r\ndef nearest_workday(dt: datetime) -> datetime:\r\n \"\"\"\r\n If holiday falls on Saturday, use day before (Friday) instead;\r\n if holiday falls on Sunday, use day thereafter (Monday) instead.\r\n \"\"\"\r\n if dt.weekday() == 5:\r\n return dt - timedelta(1)\r\n elif dt.weekday() == 6:\r\n return dt + timedelta(1)\r\n return dt\r\n\r\n\r\ndef next_workday(dt: datetime) -> datetime:\r\n \"\"\"\r\n returns next weekday used for observances\r\n \"\"\"\r\n dt += timedelta(days=1)\r\n while dt.weekday() > 4:\r\n # Mon-Fri are 0-4\r\n dt += timedelta(days=1)\r\n return dt\r\n\r\n\r\ndef previous_workday(dt: datetime) -> datetime:\r\n \"\"\"\r\n returns previous weekday used for observances\r\n \"\"\"\r\n dt -= timedelta(days=1)\r\n while dt.weekday() > 4:\r\n # Mon-Fri are 0-4\r\n dt -= timedelta(days=1)\r\n return dt\r\n\r\n\r\ndef before_nearest_workday(dt: datetime) -> datetime:\r\n \"\"\"\r\n returns previous workday after nearest workday\r\n \"\"\"\r\n return previous_workday(nearest_workday(dt))\r\n\r\n\r\ndef after_nearest_workday(dt: datetime) -> datetime:\r\n \"\"\"\r\n returns next workday after nearest workday\r\n needed for Boxing day or multiple holidays in a series\r\n \"\"\"\r\n return next_workday(nearest_workday(dt))\r\n\r\n\r\nclass Holiday:\r\n \"\"\"\r\n Class that defines a holiday with start/end dates and rules\r\n for observance.\r\n \"\"\"\r\n\r\n def __init__(\r\n self,\r\n name,\r\n year=None,\r\n month=None,\r\n day=None,\r\n offset=None,\r\n observance=None,\r\n start_date=None,\r\n end_date=None,\r\n days_of_week=None,\r\n ):\r\n \"\"\"\r\n Parameters\r\n ----------\r\n name : str\r\n Name of the holiday , defaults to class name\r\n offset : array of pandas.tseries.offsets or\r\n class from pandas.tseries.offsets\r\n computes offset from date\r\n observance: function\r\n computes when holiday is given a pandas Timestamp\r\n days_of_week:\r\n provide a tuple of days e.g (0,1,2,3,) for Monday Through Thursday\r\n Monday=0,..,Sunday=6\r\n\r\n Examples\r\n --------\r\n >>> from pandas.tseries.holiday import Holiday, nearest_workday\r\n >>> from dateutil.relativedelta import MO\r\n\r\n >>> USMemorialDay = Holiday(\r\n ... \"Memorial Day\", month=5, day=31, offset=pd.DateOffset(weekday=MO(-1))\r\n ... )\r\n >>> USMemorialDay\r\n Holiday: Memorial Day (month=5, day=31, offset=<DateOffset: weekday=MO(-1)>)\r\n\r\n >>> USLaborDay = Holiday(\r\n ... \"Labor Day\", month=9, day=1, offset=pd.DateOffset(weekday=MO(1))\r\n ... )\r\n >>> USLaborDay\r\n Holiday: Labor Day (month=9, day=1, offset=<DateOffset: weekday=MO(+1)>)\r\n\r\n >>> July3rd = Holiday(\"July 3rd\", month=7, day=3)\r\n >>> July3rd\r\n Holiday: July 3rd (month=7, day=3, )\r\n\r\n >>> NewYears = Holiday(\r\n ... \"New Years Day\", month=1, day=1, observance=nearest_workday\r\n ... )\r\n >>> NewYears # doctest: +SKIP\r\n Holiday: New Years Day (\r\n month=1, day=1, observance=<function nearest_workday at 0x66545e9bc440>\r\n )\r\n\r\n >>> July3rd = Holiday(\"July 3rd\", month=7, day=3, days_of_week=(0, 1, 2, 3))\r\n >>> July3rd\r\n Holiday: July 3rd (month=7, day=3, )\r\n \"\"\"\r\n if offset is not None and observance is not None:\r\n raise NotImplementedError(\"Cannot use both offset and observance.\")\r\n\r\n self.name = name\r\n self.year = year\r\n self.month = month\r\n self.day = day\r\n self.offset = offset\r\n self.start_date = (\r\n Timestamp(start_date) if start_date is not None else start_date\r\n )\r\n self.end_date = Timestamp(end_date) if end_date is not None else end_date\r\n self.observance = observance\r\n assert days_of_week is None or type(days_of_week) == tuple\r\n self.days_of_week = days_of_week\r\n\r\n def __repr__(self) -> str:\r\n info = \"\"\r\n if self.year is not None:\r\n info += f\"year={self.year}, \"\r\n info += f\"month={self.month}, day={self.day}, \"\r\n\r\n if self.offset is not None:\r\n info += f\"offset={self.offset}\"\r\n\r\n if self.observance is not None:\r\n info += f\"observance={self.observance}\"\r\n\r\n repr = f\"Holiday: {self.name} ({info})\"\r\n return repr\r\n\r\n def dates(self, start_date, end_date, return_name=False):\r\n \"\"\"\r\n Calculate holidays observed between start date and end date\r\n\r\n Parameters\r\n ----------\r\n start_date : starting date, datetime-like, optional\r\n end_date : ending date, datetime-like, optional\r\n return_name : bool, optional, default=False\r\n If True, return a series that has dates and holiday names.\r\n False will only return dates.\r\n \"\"\"\r\n start_date = Timestamp(start_date)\r\n end_date = Timestamp(end_date)\r\n\r\n filter_start_date = start_date\r\n filter_end_date = end_date\r\n\r\n if self.year is not None:\r\n dt = Timestamp(datetime(self.year, self.month, self.day))\r\n if return_name:\r\n return Series(self.name, index=[dt])\r\n else:\r\n return [dt]\r\n\r\n dates = self._reference_dates(start_date, end_date)\r\n holiday_dates = self._apply_rule(dates)\r\n if self.days_of_week is not None:\r\n holiday_dates = holiday_dates[\r\n np.in1d(holiday_dates.dayofweek, self.days_of_week)\r\n ]\r\n\r\n if self.start_date is not None:\r\n filter_start_date = max(\r\n self.start_date.tz_localize(filter_start_date.tz), filter_start_date\r\n )\r\n if self.end_date is not None:\r\n filter_end_date = min(\r\n self.end_date.tz_localize(filter_end_date.tz), filter_end_date\r\n )\r\n holiday_dates = holiday_dates[\r\n (holiday_dates >= filter_start_date) & (holiday_dates <= filter_end_date)\r\n ]\r\n if return_name:\r\n return Series(self.name, index=holiday_dates)\r\n return holiday_dates\r\n\r\n def _reference_dates(self, start_date, end_date):\r\n \"\"\"\r\n Get reference dates for the holiday.\r\n\r\n Return reference dates for the holiday also returning the year\r\n prior to the start_date and year following the end_date. This ensures\r\n that any offsets to be applied will yield the holidays within\r\n the passed in dates.\r\n \"\"\"\r\n if self.start_date is not None:\r\n start_date = self.start_date.tz_localize(start_date.tz)\r\n\r\n if self.end_date is not None:\r\n end_date = self.end_date.tz_localize(start_date.tz)\r\n\r\n year_offset = DateOffset(years=1)\r\n reference_start_date = Timestamp(\r\n datetime(start_date.year - 1, self.month, self.day)\r\n )\r\n\r\n reference_end_date = Timestamp(\r\n datetime(end_date.year + 1, self.month, self.day)\r\n )\r\n # Don't process unnecessary holidays\r\n dates = date_range(\r\n start=reference_start_date,\r\n end=reference_end_date,\r\n freq=year_offset,\r\n tz=start_date.tz,\r\n )\r\n\r\n return dates\r\n\r\n def _apply_rule(self, dates):\r\n \"\"\"\r\n Apply the given offset/observance to a DatetimeIndex of dates.\r\n\r\n Parameters\r\n ----------\r\n dates : DatetimeIndex\r\n Dates to apply the given offset/observance rule\r\n\r\n Returns\r\n -------\r\n Dates with rules applied\r\n \"\"\"\r\n if self.observance is not None:\r\n return dates.map(lambda d: self.observance(d))\r\n\r\n if self.offset is not None:\r\n if not isinstance(self.offset, list):\r\n offsets = [self.offset]\r\n else:\r\n offsets = self.offset\r\n for offset in offsets:\r\n\r\n # if we are adding a non-vectorized value\r\n # ignore the PerformanceWarnings:\r\n with warnings.catch_warnings():\r\n warnings.simplefilter(\"ignore\", PerformanceWarning)\r\n dates += offset\r\n return dates\r\n\r\n\r\nholiday_calendars = {}\r\n\r\n\r\ndef register(cls):\r\n try:\r\n name = cls.name\r\n except AttributeError:\r\n name = cls.__name__\r\n holiday_calendars[name] = cls\r\n\r\n\r\ndef get_calendar(name):\r\n \"\"\"\r\n Return an instance of a calendar based on its name.\r\n\r\n Parameters\r\n ----------\r\n name : str\r\n Calendar name to return an instance of\r\n \"\"\"\r\n return holiday_calendars[name]()\r\n\r\n\r\nclass HolidayCalendarMetaClass(type):\r\n def __new__(cls, clsname, bases, attrs):\r\n calendar_class = super().__new__(cls, clsname, bases, attrs)\r\n register(calendar_class)\r\n return calendar_class\r\n\r\n\r\nclass AbstractHolidayCalendar(metaclass=HolidayCalendarMetaClass):\r\n \"\"\"\r\n Abstract interface to create holidays following certain rules.\r\n \"\"\"\r\n\r\n rules: List[Holiday] = []\r\n start_date = Timestamp(datetime(1970, 1, 1))\r\n end_date = Timestamp(datetime(2200, 12, 31))\r\n _cache = None\r\n\r\n def __init__(self, name=None, rules=None):\r\n \"\"\"\r\n Initializes holiday object with a given set a rules. Normally\r\n classes just have the rules defined within them.\r\n\r\n Parameters\r\n ----------\r\n name : str\r\n Name of the holiday calendar, defaults to class name\r\n rules : array of Holiday objects\r\n A set of rules used to create the holidays.\r\n \"\"\"\r\n super().__init__()\r\n if name is None:\r\n name = type(self).__name__\r\n self.name = name\r\n\r\n if rules is not None:\r\n self.rules = rules\r\n\r\n def rule_from_name(self, name):\r\n for rule in self.rules:\r\n if rule.name == name:\r\n return rule\r\n\r\n return None\r\n\r\n def holidays(self, start=None, end=None, return_name=False):\r\n \"\"\"\r\n Returns a curve with holidays between start_date and end_date\r\n\r\n Parameters\r\n ----------\r\n start : starting date, datetime-like, optional\r\n end : ending date, datetime-like, optional\r\n return_name : bool, optional\r\n If True, return a series that has dates and holiday names.\r\n False will only return a DatetimeIndex of dates.\r\n\r\n Returns\r\n -------\r\n DatetimeIndex of holidays\r\n \"\"\"\r\n if self.rules is None:\r\n raise Exception(\r\n f\"Holiday Calendar {self.name} does not have any rules specified\"\r\n )\r\n\r\n if start is None:\r\n start = AbstractHolidayCalendar.start_date\r\n\r\n if end is None:\r\n end = AbstractHolidayCalendar.end_date\r\n\r\n start = Timestamp(start)\r\n end = Timestamp(end)\r\n\r\n # If we don't have a cache or the dates are outside the prior cache, we\r\n # get them again\r\n if self._cache is None or start < self._cache[0] or end > self._cache[1]:\r\n pre_holidays = [\r\n rule.dates(start, end, return_name=True) for rule in self.rules\r\n ]\r\n if pre_holidays:\r\n holidays = concat(pre_holidays)\r\n else:\r\n holidays = Series(index=DatetimeIndex([]), dtype=object)\r\n\r\n self._cache = (start, end, holidays.sort_index())\r\n\r\n holidays = self._cache[2]\r\n holidays = holidays[start:end]\r\n\r\n if return_name:\r\n return holidays\r\n else:\r\n return holidays.index\r\n\r\n @staticmethod\r\n def merge_class(base, other):\r\n \"\"\"\r\n Merge holiday calendars together. The base calendar\r\n will take precedence to other. The merge will be done\r\n based on each holiday's name.\r\n\r\n Parameters\r\n ----------\r\n base : AbstractHolidayCalendar\r\n instance/subclass or array of Holiday objects\r\n other : AbstractHolidayCalendar\r\n instance/subclass or array of Holiday objects\r\n \"\"\"\r\n try:\r\n other = other.rules\r\n except AttributeError:\r\n pass\r\n\r\n if not isinstance(other, list):\r\n other = [other]\r\n other_holidays = {holiday.name: holiday for holiday in other}\r\n\r\n try:\r\n base = base.rules\r\n except AttributeError:\r\n pass\r\n\r\n if not isinstance(base, list):\r\n base = [base]\r\n base_holidays = {holiday.name: holiday for holiday in base}\r\n\r\n other_holidays.update(base_holidays)\r\n return list(other_holidays.values())\r\n\r\n def merge(self, other, inplace=False):\r\n \"\"\"\r\n Merge holiday calendars together. The caller's class\r\n rules take precedence. The merge will be done\r\n based on each holiday's name.\r\n\r\n Parameters\r\n ----------\r\n other : holiday calendar\r\n inplace : bool (default=False)\r\n If True set rule_table to holidays, else return array of Holidays\r\n \"\"\"\r\n holidays = self.merge_class(self, other)\r\n if inplace:\r\n self.rules = holidays\r\n else:\r\n return holidays\r\n\r\n\r\nUSMemorialDay = Holiday(\r\n \"Memorial Day\", month=5, day=31, offset=DateOffset(weekday=MO(-1))\r\n)\r\nUSLaborDay = Holiday(\"Labor Day\", month=9, day=1, offset=DateOffset(weekday=MO(1)))\r\nUSColumbusDay = Holiday(\r\n \"Columbus Day\", month=10, day=1, offset=DateOffset(weekday=MO(2))\r\n)\r\nUSThanksgivingDay = Holiday(\r\n \"Thanksgiving\", month=11, day=1, offset=DateOffset(weekday=TH(4))\r\n)\r\nUSMartinLutherKingJr = Holiday(\r\n \"Martin Luther King Jr. Day\",\r\n start_date=datetime(1986, 1, 1),\r\n month=1,\r\n day=1,\r\n offset=DateOffset(weekday=MO(3)),\r\n)\r\nUSPresidentsDay = Holiday(\r\n \"Presidents Day\", month=2, day=1, offset=DateOffset(weekday=MO(3))\r\n)\r\nGoodFriday = Holiday(\"Good Friday\", month=1, day=1, offset=[Easter(), Day(-2)])\r\n\r\nEasterMonday = Holiday(\"Easter Monday\", month=1, day=1, offset=[Easter(), Day(1)])\r\n\r\n\r\nclass USFederalHolidayCalendar(AbstractHolidayCalendar):\r\n \"\"\"\r\n US Federal Government Holiday Calendar based on rules specified by:\r\n https://www.opm.gov/policy-data-oversight/\r\n snow-dismissal-procedures/federal-holidays/\r\n \"\"\"\r\n\r\n rules = [\r\n Holiday(\"New Years Day\", month=1, day=1, observance=nearest_workday),\r\n USMartinLutherKingJr,\r\n USPresidentsDay,\r\n USMemorialDay,\r\n Holiday(\"July 4th\", month=7, day=4, observance=nearest_workday),\r\n USLaborDay,\r\n USColumbusDay,\r\n Holiday(\"Veterans Day\", month=11, day=11, observance=nearest_workday),\r\n USThanksgivingDay,\r\n Holiday(\"Christmas\", month=12, day=25, observance=nearest_workday),\r\n ]\r\n\r\n\r\ndef HolidayCalendarFactory(name, base, other, base_class=AbstractHolidayCalendar):\r\n rules = AbstractHolidayCalendar.merge_class(base, other)\r\n calendar_class = type(name, (base_class,), {\"rules\": rules, \"name\": name})\r\n return calendar_class\r\n",
"import numpy as np\r\nimport pytest\r\n\r\nimport pandas as pd\r\nfrom pandas import Index, MultiIndex\r\nimport pandas._testing as tm\r\n\r\n\r\[email protected](\r\n \"other\", [Index([\"three\", \"one\", \"two\"]), Index([\"one\"]), Index([\"one\", \"three\"])]\r\n)\r\ndef test_join_level(idx, other, join_type):\r\n join_index, lidx, ridx = other.join(\r\n idx, how=join_type, level=\"second\", return_indexers=True\r\n )\r\n\r\n exp_level = other.join(idx.levels[1], how=join_type)\r\n assert join_index.levels[0].equals(idx.levels[0])\r\n assert join_index.levels[1].equals(exp_level)\r\n\r\n # pare down levels\r\n mask = np.array([x[1] in exp_level for x in idx], dtype=bool)\r\n exp_values = idx.values[mask]\r\n tm.assert_numpy_array_equal(join_index.values, exp_values)\r\n\r\n if join_type in (\"outer\", \"inner\"):\r\n join_index2, ridx2, lidx2 = idx.join(\r\n other, how=join_type, level=\"second\", return_indexers=True\r\n )\r\n\r\n assert join_index.equals(join_index2)\r\n tm.assert_numpy_array_equal(lidx, lidx2)\r\n tm.assert_numpy_array_equal(ridx, ridx2)\r\n tm.assert_numpy_array_equal(join_index2.values, exp_values)\r\n\r\n\r\ndef test_join_level_corner_case(idx):\r\n # some corner cases\r\n index = Index([\"three\", \"one\", \"two\"])\r\n result = index.join(idx, level=\"second\")\r\n assert isinstance(result, MultiIndex)\r\n\r\n with pytest.raises(TypeError, match=\"Join.*MultiIndex.*ambiguous\"):\r\n idx.join(idx, level=1)\r\n\r\n\r\ndef test_join_self(idx, join_type):\r\n joined = idx.join(idx, how=join_type)\r\n tm.assert_index_equal(joined, idx)\r\n\r\n\r\ndef test_join_multi():\r\n # GH 10665\r\n midx = pd.MultiIndex.from_product([np.arange(4), np.arange(4)], names=[\"a\", \"b\"])\r\n idx = Index([1, 2, 5], name=\"b\")\r\n\r\n # inner\r\n jidx, lidx, ridx = midx.join(idx, how=\"inner\", return_indexers=True)\r\n exp_idx = pd.MultiIndex.from_product([np.arange(4), [1, 2]], names=[\"a\", \"b\"])\r\n exp_lidx = np.array([1, 2, 5, 6, 9, 10, 13, 14], dtype=np.intp)\r\n exp_ridx = np.array([0, 1, 0, 1, 0, 1, 0, 1], dtype=np.intp)\r\n tm.assert_index_equal(jidx, exp_idx)\r\n tm.assert_numpy_array_equal(lidx, exp_lidx)\r\n tm.assert_numpy_array_equal(ridx, exp_ridx)\r\n # flip\r\n jidx, ridx, lidx = idx.join(midx, how=\"inner\", return_indexers=True)\r\n tm.assert_index_equal(jidx, exp_idx)\r\n tm.assert_numpy_array_equal(lidx, exp_lidx)\r\n tm.assert_numpy_array_equal(ridx, exp_ridx)\r\n\r\n # keep MultiIndex\r\n jidx, lidx, ridx = midx.join(idx, how=\"left\", return_indexers=True)\r\n exp_ridx = np.array(\r\n [-1, 0, 1, -1, -1, 0, 1, -1, -1, 0, 1, -1, -1, 0, 1, -1], dtype=np.intp\r\n )\r\n tm.assert_index_equal(jidx, midx)\r\n assert lidx is None\r\n tm.assert_numpy_array_equal(ridx, exp_ridx)\r\n # flip\r\n jidx, ridx, lidx = idx.join(midx, how=\"right\", return_indexers=True)\r\n tm.assert_index_equal(jidx, midx)\r\n assert lidx is None\r\n tm.assert_numpy_array_equal(ridx, exp_ridx)\r\n\r\n\r\ndef test_join_self_unique(idx, join_type):\r\n if idx.is_unique:\r\n joined = idx.join(idx, how=join_type)\r\n assert (idx == joined).all()\r\n\r\n\r\ndef test_join_multi_wrong_order():\r\n # GH 25760\r\n # GH 28956\r\n\r\n midx1 = pd.MultiIndex.from_product([[1, 2], [3, 4]], names=[\"a\", \"b\"])\r\n midx2 = pd.MultiIndex.from_product([[1, 2], [3, 4]], names=[\"b\", \"a\"])\r\n\r\n join_idx, lidx, ridx = midx1.join(midx2, return_indexers=True)\r\n\r\n exp_ridx = np.array([-1, -1, -1, -1], dtype=np.intp)\r\n\r\n tm.assert_index_equal(midx1, join_idx)\r\n assert lidx is None\r\n tm.assert_numpy_array_equal(ridx, exp_ridx)\r\n\r\n\r\ndef test_join_multi_return_indexers():\r\n # GH 34074\r\n\r\n midx1 = pd.MultiIndex.from_product([[1, 2], [3, 4], [5, 6]], names=[\"a\", \"b\", \"c\"])\r\n midx2 = pd.MultiIndex.from_product([[1, 2], [3, 4]], names=[\"a\", \"b\"])\r\n\r\n result = midx1.join(midx2, return_indexers=False)\r\n tm.assert_index_equal(result, midx1)\r\n",
"from datetime import date, datetime, time as dt_time, timedelta\r\nfrom typing import Dict, List, Optional, Tuple, Type\r\n\r\nfrom dateutil.tz import tzlocal\r\nimport numpy as np\r\nimport pytest\r\n\r\nfrom pandas._libs.tslibs import (\r\n NaT,\r\n OutOfBoundsDatetime,\r\n Timestamp,\r\n conversion,\r\n timezones,\r\n)\r\nimport pandas._libs.tslibs.offsets as liboffsets\r\nfrom pandas._libs.tslibs.offsets import ApplyTypeError, _get_offset, _offset_map\r\nfrom pandas._libs.tslibs.period import INVALID_FREQ_ERR_MSG\r\nfrom pandas.compat import IS64\r\nfrom pandas.compat.numpy import np_datetime64_compat\r\nfrom pandas.errors import PerformanceWarning\r\n\r\nimport pandas._testing as tm\r\nfrom pandas.core.indexes.datetimes import DatetimeIndex, date_range\r\nfrom pandas.core.series import Series\r\n\r\nfrom pandas.io.pickle import read_pickle\r\nfrom pandas.tseries.holiday import USFederalHolidayCalendar\r\nimport pandas.tseries.offsets as offsets\r\nfrom pandas.tseries.offsets import (\r\n FY5253,\r\n BaseOffset,\r\n BDay,\r\n BMonthBegin,\r\n BMonthEnd,\r\n BQuarterBegin,\r\n BQuarterEnd,\r\n BusinessHour,\r\n BYearBegin,\r\n BYearEnd,\r\n CBMonthBegin,\r\n CBMonthEnd,\r\n CDay,\r\n CustomBusinessDay,\r\n CustomBusinessHour,\r\n CustomBusinessMonthBegin,\r\n CustomBusinessMonthEnd,\r\n DateOffset,\r\n Day,\r\n Easter,\r\n FY5253Quarter,\r\n LastWeekOfMonth,\r\n MonthBegin,\r\n MonthEnd,\r\n Nano,\r\n QuarterBegin,\r\n QuarterEnd,\r\n SemiMonthBegin,\r\n SemiMonthEnd,\r\n Tick,\r\n Week,\r\n WeekOfMonth,\r\n YearBegin,\r\n YearEnd,\r\n)\r\n\r\nfrom .common import assert_is_on_offset, assert_offset_equal\r\n\r\n\r\nclass WeekDay:\r\n # TODO: Remove: This is not used outside of tests\r\n MON = 0\r\n TUE = 1\r\n WED = 2\r\n THU = 3\r\n FRI = 4\r\n SAT = 5\r\n SUN = 6\r\n\r\n\r\n#####\r\n# DateOffset Tests\r\n#####\r\n_ApplyCases = List[Tuple[BaseOffset, Dict[datetime, datetime]]]\r\n\r\n\r\nclass Base:\r\n _offset: Optional[Type[DateOffset]] = None\r\n d = Timestamp(datetime(2008, 1, 2))\r\n\r\n timezones = [\r\n None,\r\n \"UTC\",\r\n \"Asia/Tokyo\",\r\n \"US/Eastern\",\r\n \"dateutil/Asia/Tokyo\",\r\n \"dateutil/US/Pacific\",\r\n ]\r\n\r\n def _get_offset(self, klass, value=1, normalize=False):\r\n # create instance from offset class\r\n if klass is FY5253:\r\n klass = klass(\r\n n=value,\r\n startingMonth=1,\r\n weekday=1,\r\n variation=\"last\",\r\n normalize=normalize,\r\n )\r\n elif klass is FY5253Quarter:\r\n klass = klass(\r\n n=value,\r\n startingMonth=1,\r\n weekday=1,\r\n qtr_with_extra_week=1,\r\n variation=\"last\",\r\n normalize=normalize,\r\n )\r\n elif klass is LastWeekOfMonth:\r\n klass = klass(n=value, weekday=5, normalize=normalize)\r\n elif klass is WeekOfMonth:\r\n klass = klass(n=value, week=1, weekday=5, normalize=normalize)\r\n elif klass is Week:\r\n klass = klass(n=value, weekday=5, normalize=normalize)\r\n elif klass is DateOffset:\r\n klass = klass(days=value, normalize=normalize)\r\n else:\r\n klass = klass(value, normalize=normalize)\r\n return klass\r\n\r\n def test_apply_out_of_range(self, tz_naive_fixture):\r\n tz = tz_naive_fixture\r\n if self._offset is None:\r\n return\r\n if isinstance(tz, tzlocal) and not IS64:\r\n pytest.xfail(reason=\"OverflowError inside tzlocal past 2038\")\r\n\r\n # try to create an out-of-bounds result timestamp; if we can't create\r\n # the offset skip\r\n try:\r\n if self._offset in (BusinessHour, CustomBusinessHour):\r\n # Using 10000 in BusinessHour fails in tz check because of DST\r\n # difference\r\n offset = self._get_offset(self._offset, value=100000)\r\n else:\r\n offset = self._get_offset(self._offset, value=10000)\r\n\r\n result = Timestamp(\"20080101\") + offset\r\n assert isinstance(result, datetime)\r\n assert result.tzinfo is None\r\n\r\n # Check tz is preserved\r\n t = Timestamp(\"20080101\", tz=tz)\r\n result = t + offset\r\n assert isinstance(result, datetime)\r\n assert t.tzinfo == result.tzinfo\r\n\r\n except OutOfBoundsDatetime:\r\n pass\r\n except (ValueError, KeyError):\r\n # we are creating an invalid offset\r\n # so ignore\r\n pass\r\n\r\n def test_offsets_compare_equal(self):\r\n # root cause of GH#456: __ne__ was not implemented\r\n if self._offset is None:\r\n return\r\n offset1 = self._offset()\r\n offset2 = self._offset()\r\n assert not offset1 != offset2\r\n assert offset1 == offset2\r\n\r\n def test_rsub(self):\r\n if self._offset is None or not hasattr(self, \"offset2\"):\r\n # i.e. skip for TestCommon and YQM subclasses that do not have\r\n # offset2 attr\r\n return\r\n assert self.d - self.offset2 == (-self.offset2).apply(self.d)\r\n\r\n def test_radd(self):\r\n if self._offset is None or not hasattr(self, \"offset2\"):\r\n # i.e. skip for TestCommon and YQM subclasses that do not have\r\n # offset2 attr\r\n return\r\n assert self.d + self.offset2 == self.offset2 + self.d\r\n\r\n def test_sub(self):\r\n if self._offset is None or not hasattr(self, \"offset2\"):\r\n # i.e. skip for TestCommon and YQM subclasses that do not have\r\n # offset2 attr\r\n return\r\n off = self.offset2\r\n msg = \"Cannot subtract datetime from offset\"\r\n with pytest.raises(TypeError, match=msg):\r\n off - self.d\r\n\r\n assert 2 * off - off == off\r\n assert self.d - self.offset2 == self.d + self._offset(-2)\r\n assert self.d - self.offset2 == self.d - (2 * off - off)\r\n\r\n def testMult1(self):\r\n if self._offset is None or not hasattr(self, \"offset1\"):\r\n # i.e. skip for TestCommon and YQM subclasses that do not have\r\n # offset1 attr\r\n return\r\n assert self.d + 10 * self.offset1 == self.d + self._offset(10)\r\n assert self.d + 5 * self.offset1 == self.d + self._offset(5)\r\n\r\n def testMult2(self):\r\n if self._offset is None:\r\n return\r\n assert self.d + (-5 * self._offset(-10)) == self.d + self._offset(50)\r\n assert self.d + (-3 * self._offset(-2)) == self.d + self._offset(6)\r\n\r\n def test_compare_str(self):\r\n # GH#23524\r\n # comparing to strings that cannot be cast to DateOffsets should\r\n # not raise for __eq__ or __ne__\r\n if self._offset is None:\r\n return\r\n off = self._get_offset(self._offset)\r\n\r\n assert not off == \"infer\"\r\n assert off != \"foo\"\r\n # Note: inequalities are only implemented for Tick subclasses;\r\n # tests for this are in test_ticks\r\n\r\n\r\nclass TestCommon(Base):\r\n # exected value created by Base._get_offset\r\n # are applied to 2011/01/01 09:00 (Saturday)\r\n # used for .apply and .rollforward\r\n expecteds = {\r\n \"Day\": Timestamp(\"2011-01-02 09:00:00\"),\r\n \"DateOffset\": Timestamp(\"2011-01-02 09:00:00\"),\r\n \"BusinessDay\": Timestamp(\"2011-01-03 09:00:00\"),\r\n \"CustomBusinessDay\": Timestamp(\"2011-01-03 09:00:00\"),\r\n \"CustomBusinessMonthEnd\": Timestamp(\"2011-01-31 09:00:00\"),\r\n \"CustomBusinessMonthBegin\": Timestamp(\"2011-01-03 09:00:00\"),\r\n \"MonthBegin\": Timestamp(\"2011-02-01 09:00:00\"),\r\n \"BusinessMonthBegin\": Timestamp(\"2011-01-03 09:00:00\"),\r\n \"MonthEnd\": Timestamp(\"2011-01-31 09:00:00\"),\r\n \"SemiMonthEnd\": Timestamp(\"2011-01-15 09:00:00\"),\r\n \"SemiMonthBegin\": Timestamp(\"2011-01-15 09:00:00\"),\r\n \"BusinessMonthEnd\": Timestamp(\"2011-01-31 09:00:00\"),\r\n \"YearBegin\": Timestamp(\"2012-01-01 09:00:00\"),\r\n \"BYearBegin\": Timestamp(\"2011-01-03 09:00:00\"),\r\n \"YearEnd\": Timestamp(\"2011-12-31 09:00:00\"),\r\n \"BYearEnd\": Timestamp(\"2011-12-30 09:00:00\"),\r\n \"QuarterBegin\": Timestamp(\"2011-03-01 09:00:00\"),\r\n \"BQuarterBegin\": Timestamp(\"2011-03-01 09:00:00\"),\r\n \"QuarterEnd\": Timestamp(\"2011-03-31 09:00:00\"),\r\n \"BQuarterEnd\": Timestamp(\"2011-03-31 09:00:00\"),\r\n \"BusinessHour\": Timestamp(\"2011-01-03 10:00:00\"),\r\n \"CustomBusinessHour\": Timestamp(\"2011-01-03 10:00:00\"),\r\n \"WeekOfMonth\": Timestamp(\"2011-01-08 09:00:00\"),\r\n \"LastWeekOfMonth\": Timestamp(\"2011-01-29 09:00:00\"),\r\n \"FY5253Quarter\": Timestamp(\"2011-01-25 09:00:00\"),\r\n \"FY5253\": Timestamp(\"2011-01-25 09:00:00\"),\r\n \"Week\": Timestamp(\"2011-01-08 09:00:00\"),\r\n \"Easter\": Timestamp(\"2011-04-24 09:00:00\"),\r\n \"Hour\": Timestamp(\"2011-01-01 10:00:00\"),\r\n \"Minute\": Timestamp(\"2011-01-01 09:01:00\"),\r\n \"Second\": Timestamp(\"2011-01-01 09:00:01\"),\r\n \"Milli\": Timestamp(\"2011-01-01 09:00:00.001000\"),\r\n \"Micro\": Timestamp(\"2011-01-01 09:00:00.000001\"),\r\n \"Nano\": Timestamp(np_datetime64_compat(\"2011-01-01T09:00:00.000000001Z\")),\r\n }\r\n\r\n def test_immutable(self, offset_types):\r\n # GH#21341 check that __setattr__ raises\r\n offset = self._get_offset(offset_types)\r\n msg = \"objects is not writable|DateOffset objects are immutable\"\r\n with pytest.raises(AttributeError, match=msg):\r\n offset.normalize = True\r\n with pytest.raises(AttributeError, match=msg):\r\n offset.n = 91\r\n\r\n def test_return_type(self, offset_types):\r\n offset = self._get_offset(offset_types)\r\n\r\n # make sure that we are returning a Timestamp\r\n result = Timestamp(\"20080101\") + offset\r\n assert isinstance(result, Timestamp)\r\n\r\n # make sure that we are returning NaT\r\n assert NaT + offset is NaT\r\n assert offset + NaT is NaT\r\n\r\n assert NaT - offset is NaT\r\n assert (-offset).apply(NaT) is NaT\r\n\r\n def test_offset_n(self, offset_types):\r\n offset = self._get_offset(offset_types)\r\n assert offset.n == 1\r\n\r\n neg_offset = offset * -1\r\n assert neg_offset.n == -1\r\n\r\n mul_offset = offset * 3\r\n assert mul_offset.n == 3\r\n\r\n def test_offset_timedelta64_arg(self, offset_types):\r\n # check that offset._validate_n raises TypeError on a timedelt64\r\n # object\r\n off = self._get_offset(offset_types)\r\n\r\n td64 = np.timedelta64(4567, \"s\")\r\n with pytest.raises(TypeError, match=\"argument must be an integer\"):\r\n type(off)(n=td64, **off.kwds)\r\n\r\n def test_offset_mul_ndarray(self, offset_types):\r\n off = self._get_offset(offset_types)\r\n\r\n expected = np.array([[off, off * 2], [off * 3, off * 4]])\r\n\r\n result = np.array([[1, 2], [3, 4]]) * off\r\n tm.assert_numpy_array_equal(result, expected)\r\n\r\n result = off * np.array([[1, 2], [3, 4]])\r\n tm.assert_numpy_array_equal(result, expected)\r\n\r\n def test_offset_freqstr(self, offset_types):\r\n offset = self._get_offset(offset_types)\r\n\r\n freqstr = offset.freqstr\r\n if freqstr not in (\"<Easter>\", \"<DateOffset: days=1>\", \"LWOM-SAT\"):\r\n code = _get_offset(freqstr)\r\n assert offset.rule_code == code\r\n\r\n def _check_offsetfunc_works(self, offset, funcname, dt, expected, normalize=False):\r\n\r\n if normalize and issubclass(offset, Tick):\r\n # normalize=True disallowed for Tick subclasses GH#21427\r\n return\r\n\r\n offset_s = self._get_offset(offset, normalize=normalize)\r\n func = getattr(offset_s, funcname)\r\n\r\n result = func(dt)\r\n assert isinstance(result, Timestamp)\r\n assert result == expected\r\n\r\n result = func(Timestamp(dt))\r\n assert isinstance(result, Timestamp)\r\n assert result == expected\r\n\r\n # see gh-14101\r\n exp_warning = None\r\n ts = Timestamp(dt) + Nano(5)\r\n\r\n if (\r\n type(offset_s).__name__ == \"DateOffset\"\r\n and (funcname == \"apply\" or normalize)\r\n and ts.nanosecond > 0\r\n ):\r\n exp_warning = UserWarning\r\n\r\n # test nanosecond is preserved\r\n with tm.assert_produces_warning(exp_warning, check_stacklevel=False):\r\n result = func(ts)\r\n assert isinstance(result, Timestamp)\r\n if normalize is False:\r\n assert result == expected + Nano(5)\r\n else:\r\n assert result == expected\r\n\r\n if isinstance(dt, np.datetime64):\r\n # test tz when input is datetime or Timestamp\r\n return\r\n\r\n for tz in self.timezones:\r\n expected_localize = expected.tz_localize(tz)\r\n tz_obj = timezones.maybe_get_tz(tz)\r\n dt_tz = conversion.localize_pydatetime(dt, tz_obj)\r\n\r\n result = func(dt_tz)\r\n assert isinstance(result, Timestamp)\r\n assert result == expected_localize\r\n\r\n result = func(Timestamp(dt, tz=tz))\r\n assert isinstance(result, Timestamp)\r\n assert result == expected_localize\r\n\r\n # see gh-14101\r\n exp_warning = None\r\n ts = Timestamp(dt, tz=tz) + Nano(5)\r\n\r\n if (\r\n type(offset_s).__name__ == \"DateOffset\"\r\n and (funcname == \"apply\" or normalize)\r\n and ts.nanosecond > 0\r\n ):\r\n exp_warning = UserWarning\r\n\r\n # test nanosecond is preserved\r\n with tm.assert_produces_warning(exp_warning, check_stacklevel=False):\r\n result = func(ts)\r\n assert isinstance(result, Timestamp)\r\n if normalize is False:\r\n assert result == expected_localize + Nano(5)\r\n else:\r\n assert result == expected_localize\r\n\r\n def test_apply(self, offset_types):\r\n sdt = datetime(2011, 1, 1, 9, 0)\r\n ndt = np_datetime64_compat(\"2011-01-01 09:00Z\")\r\n\r\n for dt in [sdt, ndt]:\r\n expected = self.expecteds[offset_types.__name__]\r\n self._check_offsetfunc_works(offset_types, \"apply\", dt, expected)\r\n\r\n expected = Timestamp(expected.date())\r\n self._check_offsetfunc_works(\r\n offset_types, \"apply\", dt, expected, normalize=True\r\n )\r\n\r\n def test_rollforward(self, offset_types):\r\n expecteds = self.expecteds.copy()\r\n\r\n # result will not be changed if the target is on the offset\r\n no_changes = [\r\n \"Day\",\r\n \"MonthBegin\",\r\n \"SemiMonthBegin\",\r\n \"YearBegin\",\r\n \"Week\",\r\n \"Hour\",\r\n \"Minute\",\r\n \"Second\",\r\n \"Milli\",\r\n \"Micro\",\r\n \"Nano\",\r\n \"DateOffset\",\r\n ]\r\n for n in no_changes:\r\n expecteds[n] = Timestamp(\"2011/01/01 09:00\")\r\n\r\n expecteds[\"BusinessHour\"] = Timestamp(\"2011-01-03 09:00:00\")\r\n expecteds[\"CustomBusinessHour\"] = Timestamp(\"2011-01-03 09:00:00\")\r\n\r\n # but be changed when normalize=True\r\n norm_expected = expecteds.copy()\r\n for k in norm_expected:\r\n norm_expected[k] = Timestamp(norm_expected[k].date())\r\n\r\n normalized = {\r\n \"Day\": Timestamp(\"2011-01-02 00:00:00\"),\r\n \"DateOffset\": Timestamp(\"2011-01-02 00:00:00\"),\r\n \"MonthBegin\": Timestamp(\"2011-02-01 00:00:00\"),\r\n \"SemiMonthBegin\": Timestamp(\"2011-01-15 00:00:00\"),\r\n \"YearBegin\": Timestamp(\"2012-01-01 00:00:00\"),\r\n \"Week\": Timestamp(\"2011-01-08 00:00:00\"),\r\n \"Hour\": Timestamp(\"2011-01-01 00:00:00\"),\r\n \"Minute\": Timestamp(\"2011-01-01 00:00:00\"),\r\n \"Second\": Timestamp(\"2011-01-01 00:00:00\"),\r\n \"Milli\": Timestamp(\"2011-01-01 00:00:00\"),\r\n \"Micro\": Timestamp(\"2011-01-01 00:00:00\"),\r\n }\r\n norm_expected.update(normalized)\r\n\r\n sdt = datetime(2011, 1, 1, 9, 0)\r\n ndt = np_datetime64_compat(\"2011-01-01 09:00Z\")\r\n\r\n for dt in [sdt, ndt]:\r\n expected = expecteds[offset_types.__name__]\r\n self._check_offsetfunc_works(offset_types, \"rollforward\", dt, expected)\r\n expected = norm_expected[offset_types.__name__]\r\n self._check_offsetfunc_works(\r\n offset_types, \"rollforward\", dt, expected, normalize=True\r\n )\r\n\r\n def test_rollback(self, offset_types):\r\n expecteds = {\r\n \"BusinessDay\": Timestamp(\"2010-12-31 09:00:00\"),\r\n \"CustomBusinessDay\": Timestamp(\"2010-12-31 09:00:00\"),\r\n \"CustomBusinessMonthEnd\": Timestamp(\"2010-12-31 09:00:00\"),\r\n \"CustomBusinessMonthBegin\": Timestamp(\"2010-12-01 09:00:00\"),\r\n \"BusinessMonthBegin\": Timestamp(\"2010-12-01 09:00:00\"),\r\n \"MonthEnd\": Timestamp(\"2010-12-31 09:00:00\"),\r\n \"SemiMonthEnd\": Timestamp(\"2010-12-31 09:00:00\"),\r\n \"BusinessMonthEnd\": Timestamp(\"2010-12-31 09:00:00\"),\r\n \"BYearBegin\": Timestamp(\"2010-01-01 09:00:00\"),\r\n \"YearEnd\": Timestamp(\"2010-12-31 09:00:00\"),\r\n \"BYearEnd\": Timestamp(\"2010-12-31 09:00:00\"),\r\n \"QuarterBegin\": Timestamp(\"2010-12-01 09:00:00\"),\r\n \"BQuarterBegin\": Timestamp(\"2010-12-01 09:00:00\"),\r\n \"QuarterEnd\": Timestamp(\"2010-12-31 09:00:00\"),\r\n \"BQuarterEnd\": Timestamp(\"2010-12-31 09:00:00\"),\r\n \"BusinessHour\": Timestamp(\"2010-12-31 17:00:00\"),\r\n \"CustomBusinessHour\": Timestamp(\"2010-12-31 17:00:00\"),\r\n \"WeekOfMonth\": Timestamp(\"2010-12-11 09:00:00\"),\r\n \"LastWeekOfMonth\": Timestamp(\"2010-12-25 09:00:00\"),\r\n \"FY5253Quarter\": Timestamp(\"2010-10-26 09:00:00\"),\r\n \"FY5253\": Timestamp(\"2010-01-26 09:00:00\"),\r\n \"Easter\": Timestamp(\"2010-04-04 09:00:00\"),\r\n }\r\n\r\n # result will not be changed if the target is on the offset\r\n for n in [\r\n \"Day\",\r\n \"MonthBegin\",\r\n \"SemiMonthBegin\",\r\n \"YearBegin\",\r\n \"Week\",\r\n \"Hour\",\r\n \"Minute\",\r\n \"Second\",\r\n \"Milli\",\r\n \"Micro\",\r\n \"Nano\",\r\n \"DateOffset\",\r\n ]:\r\n expecteds[n] = Timestamp(\"2011/01/01 09:00\")\r\n\r\n # but be changed when normalize=True\r\n norm_expected = expecteds.copy()\r\n for k in norm_expected:\r\n norm_expected[k] = Timestamp(norm_expected[k].date())\r\n\r\n normalized = {\r\n \"Day\": Timestamp(\"2010-12-31 00:00:00\"),\r\n \"DateOffset\": Timestamp(\"2010-12-31 00:00:00\"),\r\n \"MonthBegin\": Timestamp(\"2010-12-01 00:00:00\"),\r\n \"SemiMonthBegin\": Timestamp(\"2010-12-15 00:00:00\"),\r\n \"YearBegin\": Timestamp(\"2010-01-01 00:00:00\"),\r\n \"Week\": Timestamp(\"2010-12-25 00:00:00\"),\r\n \"Hour\": Timestamp(\"2011-01-01 00:00:00\"),\r\n \"Minute\": Timestamp(\"2011-01-01 00:00:00\"),\r\n \"Second\": Timestamp(\"2011-01-01 00:00:00\"),\r\n \"Milli\": Timestamp(\"2011-01-01 00:00:00\"),\r\n \"Micro\": Timestamp(\"2011-01-01 00:00:00\"),\r\n }\r\n norm_expected.update(normalized)\r\n\r\n sdt = datetime(2011, 1, 1, 9, 0)\r\n ndt = np_datetime64_compat(\"2011-01-01 09:00Z\")\r\n\r\n for dt in [sdt, ndt]:\r\n expected = expecteds[offset_types.__name__]\r\n self._check_offsetfunc_works(offset_types, \"rollback\", dt, expected)\r\n\r\n expected = norm_expected[offset_types.__name__]\r\n self._check_offsetfunc_works(\r\n offset_types, \"rollback\", dt, expected, normalize=True\r\n )\r\n\r\n def test_is_on_offset(self, offset_types):\r\n dt = self.expecteds[offset_types.__name__]\r\n offset_s = self._get_offset(offset_types)\r\n assert offset_s.is_on_offset(dt)\r\n\r\n # when normalize=True, is_on_offset checks time is 00:00:00\r\n if issubclass(offset_types, Tick):\r\n # normalize=True disallowed for Tick subclasses GH#21427\r\n return\r\n offset_n = self._get_offset(offset_types, normalize=True)\r\n assert not offset_n.is_on_offset(dt)\r\n\r\n if offset_types in (BusinessHour, CustomBusinessHour):\r\n # In default BusinessHour (9:00-17:00), normalized time\r\n # cannot be in business hour range\r\n return\r\n date = datetime(dt.year, dt.month, dt.day)\r\n assert offset_n.is_on_offset(date)\r\n\r\n def test_add(self, offset_types, tz_naive_fixture):\r\n tz = tz_naive_fixture\r\n dt = datetime(2011, 1, 1, 9, 0)\r\n\r\n offset_s = self._get_offset(offset_types)\r\n expected = self.expecteds[offset_types.__name__]\r\n\r\n result_dt = dt + offset_s\r\n result_ts = Timestamp(dt) + offset_s\r\n for result in [result_dt, result_ts]:\r\n assert isinstance(result, Timestamp)\r\n assert result == expected\r\n\r\n expected_localize = expected.tz_localize(tz)\r\n result = Timestamp(dt, tz=tz) + offset_s\r\n assert isinstance(result, Timestamp)\r\n assert result == expected_localize\r\n\r\n # normalize=True, disallowed for Tick subclasses GH#21427\r\n if issubclass(offset_types, Tick):\r\n return\r\n offset_s = self._get_offset(offset_types, normalize=True)\r\n expected = Timestamp(expected.date())\r\n\r\n result_dt = dt + offset_s\r\n result_ts = Timestamp(dt) + offset_s\r\n for result in [result_dt, result_ts]:\r\n assert isinstance(result, Timestamp)\r\n assert result == expected\r\n\r\n expected_localize = expected.tz_localize(tz)\r\n result = Timestamp(dt, tz=tz) + offset_s\r\n assert isinstance(result, Timestamp)\r\n assert result == expected_localize\r\n\r\n def test_add_empty_datetimeindex(self, offset_types, tz_naive_fixture):\r\n # GH#12724, GH#30336\r\n offset_s = self._get_offset(offset_types)\r\n\r\n dti = DatetimeIndex([], tz=tz_naive_fixture)\r\n\r\n warn = None\r\n if isinstance(\r\n offset_s,\r\n (\r\n Easter,\r\n WeekOfMonth,\r\n LastWeekOfMonth,\r\n CustomBusinessDay,\r\n BusinessHour,\r\n CustomBusinessHour,\r\n CustomBusinessMonthBegin,\r\n CustomBusinessMonthEnd,\r\n FY5253,\r\n FY5253Quarter,\r\n ),\r\n ):\r\n # We don't have an optimized apply_index\r\n warn = PerformanceWarning\r\n\r\n with tm.assert_produces_warning(warn):\r\n result = dti + offset_s\r\n tm.assert_index_equal(result, dti)\r\n with tm.assert_produces_warning(warn):\r\n result = offset_s + dti\r\n tm.assert_index_equal(result, dti)\r\n\r\n dta = dti._data\r\n with tm.assert_produces_warning(warn):\r\n result = dta + offset_s\r\n tm.assert_equal(result, dta)\r\n with tm.assert_produces_warning(warn):\r\n result = offset_s + dta\r\n tm.assert_equal(result, dta)\r\n\r\n def test_pickle_roundtrip(self, offset_types):\r\n off = self._get_offset(offset_types)\r\n res = tm.round_trip_pickle(off)\r\n assert off == res\r\n if type(off) is not DateOffset:\r\n for attr in off._attributes:\r\n if attr == \"calendar\":\r\n # np.busdaycalendar __eq__ will return False;\r\n # we check holidays and weekmask attrs so are OK\r\n continue\r\n # Make sure nothings got lost from _params (which __eq__) is based on\r\n assert getattr(off, attr) == getattr(res, attr)\r\n\r\n def test_pickle_dateoffset_odd_inputs(self):\r\n # GH#34511\r\n off = DateOffset(months=12)\r\n res = tm.round_trip_pickle(off)\r\n assert off == res\r\n\r\n base_dt = datetime(2020, 1, 1)\r\n assert base_dt + off == base_dt + res\r\n\r\n def test_onOffset_deprecated(self, offset_types):\r\n # GH#30340 use idiomatic naming\r\n off = self._get_offset(offset_types)\r\n\r\n ts = Timestamp.now()\r\n with tm.assert_produces_warning(FutureWarning):\r\n result = off.onOffset(ts)\r\n\r\n expected = off.is_on_offset(ts)\r\n assert result == expected\r\n\r\n def test_isAnchored_deprecated(self, offset_types):\r\n # GH#30340 use idiomatic naming\r\n off = self._get_offset(offset_types)\r\n\r\n with tm.assert_produces_warning(FutureWarning):\r\n result = off.isAnchored()\r\n\r\n expected = off.is_anchored()\r\n assert result == expected\r\n\r\n def test_offsets_hashable(self, offset_types):\r\n # GH: 37267\r\n off = self._get_offset(offset_types)\r\n assert hash(off) is not None\r\n\r\n\r\nclass TestDateOffset(Base):\r\n def setup_method(self, method):\r\n self.d = Timestamp(datetime(2008, 1, 2))\r\n _offset_map.clear()\r\n\r\n def test_repr(self):\r\n repr(DateOffset())\r\n repr(DateOffset(2))\r\n repr(2 * DateOffset())\r\n repr(2 * DateOffset(months=2))\r\n\r\n def test_mul(self):\r\n assert DateOffset(2) == 2 * DateOffset(1)\r\n assert DateOffset(2) == DateOffset(1) * 2\r\n\r\n def test_constructor(self):\r\n\r\n assert (self.d + DateOffset(months=2)) == datetime(2008, 3, 2)\r\n assert (self.d - DateOffset(months=2)) == datetime(2007, 11, 2)\r\n\r\n assert (self.d + DateOffset(2)) == datetime(2008, 1, 4)\r\n\r\n assert not DateOffset(2).is_anchored()\r\n assert DateOffset(1).is_anchored()\r\n\r\n d = datetime(2008, 1, 31)\r\n assert (d + DateOffset(months=1)) == datetime(2008, 2, 29)\r\n\r\n def test_copy(self):\r\n assert DateOffset(months=2).copy() == DateOffset(months=2)\r\n\r\n def test_eq(self):\r\n offset1 = DateOffset(days=1)\r\n offset2 = DateOffset(days=365)\r\n\r\n assert offset1 != offset2\r\n\r\n\r\nclass TestBusinessDay(Base):\r\n _offset = BDay\r\n\r\n def setup_method(self, method):\r\n self.d = datetime(2008, 1, 1)\r\n\r\n self.offset = BDay()\r\n self.offset1 = self.offset\r\n self.offset2 = BDay(2)\r\n\r\n def test_different_normalize_equals(self):\r\n # GH#21404 changed __eq__ to return False when `normalize` does not match\r\n offset = self._offset()\r\n offset2 = self._offset(normalize=True)\r\n assert offset != offset2\r\n\r\n def test_repr(self):\r\n assert repr(self.offset) == \"<BusinessDay>\"\r\n assert repr(self.offset2) == \"<2 * BusinessDays>\"\r\n\r\n expected = \"<BusinessDay: offset=datetime.timedelta(days=1)>\"\r\n assert repr(self.offset + timedelta(1)) == expected\r\n\r\n def test_with_offset(self):\r\n offset = self.offset + timedelta(hours=2)\r\n\r\n assert (self.d + offset) == datetime(2008, 1, 2, 2)\r\n\r\n def test_with_offset_index(self):\r\n dti = DatetimeIndex([self.d])\r\n result = dti + (self.offset + timedelta(hours=2))\r\n\r\n expected = DatetimeIndex([datetime(2008, 1, 2, 2)])\r\n tm.assert_index_equal(result, expected)\r\n\r\n def test_eq(self):\r\n assert self.offset2 == self.offset2\r\n\r\n def test_mul(self):\r\n pass\r\n\r\n def test_hash(self):\r\n assert hash(self.offset2) == hash(self.offset2)\r\n\r\n def test_call(self):\r\n with tm.assert_produces_warning(FutureWarning):\r\n # GH#34171 DateOffset.__call__ is deprecated\r\n assert self.offset2(self.d) == datetime(2008, 1, 3)\r\n\r\n def testRollback1(self):\r\n assert BDay(10).rollback(self.d) == self.d\r\n\r\n def testRollback2(self):\r\n assert BDay(10).rollback(datetime(2008, 1, 5)) == datetime(2008, 1, 4)\r\n\r\n def testRollforward1(self):\r\n assert BDay(10).rollforward(self.d) == self.d\r\n\r\n def testRollforward2(self):\r\n assert BDay(10).rollforward(datetime(2008, 1, 5)) == datetime(2008, 1, 7)\r\n\r\n def test_roll_date_object(self):\r\n offset = BDay()\r\n\r\n dt = date(2012, 9, 15)\r\n\r\n result = offset.rollback(dt)\r\n assert result == datetime(2012, 9, 14)\r\n\r\n result = offset.rollforward(dt)\r\n assert result == datetime(2012, 9, 17)\r\n\r\n offset = offsets.Day()\r\n result = offset.rollback(dt)\r\n assert result == datetime(2012, 9, 15)\r\n\r\n result = offset.rollforward(dt)\r\n assert result == datetime(2012, 9, 15)\r\n\r\n def test_is_on_offset(self):\r\n tests = [\r\n (BDay(), datetime(2008, 1, 1), True),\r\n (BDay(), datetime(2008, 1, 5), False),\r\n ]\r\n\r\n for offset, d, expected in tests:\r\n assert_is_on_offset(offset, d, expected)\r\n\r\n apply_cases: _ApplyCases = []\r\n apply_cases.append(\r\n (\r\n BDay(),\r\n {\r\n datetime(2008, 1, 1): datetime(2008, 1, 2),\r\n datetime(2008, 1, 4): datetime(2008, 1, 7),\r\n datetime(2008, 1, 5): datetime(2008, 1, 7),\r\n datetime(2008, 1, 6): datetime(2008, 1, 7),\r\n datetime(2008, 1, 7): datetime(2008, 1, 8),\r\n },\r\n )\r\n )\r\n\r\n apply_cases.append(\r\n (\r\n 2 * BDay(),\r\n {\r\n datetime(2008, 1, 1): datetime(2008, 1, 3),\r\n datetime(2008, 1, 4): datetime(2008, 1, 8),\r\n datetime(2008, 1, 5): datetime(2008, 1, 8),\r\n datetime(2008, 1, 6): datetime(2008, 1, 8),\r\n datetime(2008, 1, 7): datetime(2008, 1, 9),\r\n },\r\n )\r\n )\r\n\r\n apply_cases.append(\r\n (\r\n -BDay(),\r\n {\r\n datetime(2008, 1, 1): datetime(2007, 12, 31),\r\n datetime(2008, 1, 4): datetime(2008, 1, 3),\r\n datetime(2008, 1, 5): datetime(2008, 1, 4),\r\n datetime(2008, 1, 6): datetime(2008, 1, 4),\r\n datetime(2008, 1, 7): datetime(2008, 1, 4),\r\n datetime(2008, 1, 8): datetime(2008, 1, 7),\r\n },\r\n )\r\n )\r\n\r\n apply_cases.append(\r\n (\r\n -2 * BDay(),\r\n {\r\n datetime(2008, 1, 1): datetime(2007, 12, 28),\r\n datetime(2008, 1, 4): datetime(2008, 1, 2),\r\n datetime(2008, 1, 5): datetime(2008, 1, 3),\r\n datetime(2008, 1, 6): datetime(2008, 1, 3),\r\n datetime(2008, 1, 7): datetime(2008, 1, 3),\r\n datetime(2008, 1, 8): datetime(2008, 1, 4),\r\n datetime(2008, 1, 9): datetime(2008, 1, 7),\r\n },\r\n )\r\n )\r\n\r\n apply_cases.append(\r\n (\r\n BDay(0),\r\n {\r\n datetime(2008, 1, 1): datetime(2008, 1, 1),\r\n datetime(2008, 1, 4): datetime(2008, 1, 4),\r\n datetime(2008, 1, 5): datetime(2008, 1, 7),\r\n datetime(2008, 1, 6): datetime(2008, 1, 7),\r\n datetime(2008, 1, 7): datetime(2008, 1, 7),\r\n },\r\n )\r\n )\r\n\r\n @pytest.mark.parametrize(\"case\", apply_cases)\r\n def test_apply(self, case):\r\n offset, cases = case\r\n for base, expected in cases.items():\r\n assert_offset_equal(offset, base, expected)\r\n\r\n def test_apply_large_n(self):\r\n dt = datetime(2012, 10, 23)\r\n\r\n result = dt + BDay(10)\r\n assert result == datetime(2012, 11, 6)\r\n\r\n result = dt + BDay(100) - BDay(100)\r\n assert result == dt\r\n\r\n off = BDay() * 6\r\n rs = datetime(2012, 1, 1) - off\r\n xp = datetime(2011, 12, 23)\r\n assert rs == xp\r\n\r\n st = datetime(2011, 12, 18)\r\n rs = st + off\r\n xp = datetime(2011, 12, 26)\r\n assert rs == xp\r\n\r\n off = BDay() * 10\r\n rs = datetime(2014, 1, 5) + off # see #5890\r\n xp = datetime(2014, 1, 17)\r\n assert rs == xp\r\n\r\n def test_apply_corner(self):\r\n msg = \"Only know how to combine business day with datetime or timedelta\"\r\n with pytest.raises(ApplyTypeError, match=msg):\r\n BDay().apply(BMonthEnd())\r\n\r\n\r\nclass TestBusinessHour(Base):\r\n _offset = BusinessHour\r\n\r\n def setup_method(self, method):\r\n self.d = datetime(2014, 7, 1, 10, 00)\r\n\r\n self.offset1 = BusinessHour()\r\n self.offset2 = BusinessHour(n=3)\r\n\r\n self.offset3 = BusinessHour(n=-1)\r\n self.offset4 = BusinessHour(n=-4)\r\n\r\n from datetime import time as dt_time\r\n\r\n self.offset5 = BusinessHour(start=dt_time(11, 0), end=dt_time(14, 30))\r\n self.offset6 = BusinessHour(start=\"20:00\", end=\"05:00\")\r\n self.offset7 = BusinessHour(n=-2, start=dt_time(21, 30), end=dt_time(6, 30))\r\n self.offset8 = BusinessHour(start=[\"09:00\", \"13:00\"], end=[\"12:00\", \"17:00\"])\r\n self.offset9 = BusinessHour(\r\n n=3, start=[\"09:00\", \"22:00\"], end=[\"13:00\", \"03:00\"]\r\n )\r\n self.offset10 = BusinessHour(\r\n n=-1, start=[\"23:00\", \"13:00\"], end=[\"02:00\", \"17:00\"]\r\n )\r\n\r\n @pytest.mark.parametrize(\r\n \"start,end,match\",\r\n [\r\n (\r\n dt_time(11, 0, 5),\r\n \"17:00\",\r\n \"time data must be specified only with hour and minute\",\r\n ),\r\n (\"AAA\", \"17:00\", \"time data must match '%H:%M' format\"),\r\n (\"14:00:05\", \"17:00\", \"time data must match '%H:%M' format\"),\r\n ([], \"17:00\", \"Must include at least 1 start time\"),\r\n (\"09:00\", [], \"Must include at least 1 end time\"),\r\n (\r\n [\"09:00\", \"11:00\"],\r\n \"17:00\",\r\n \"number of starting time and ending time must be the same\",\r\n ),\r\n (\r\n [\"09:00\", \"11:00\"],\r\n [\"10:00\"],\r\n \"number of starting time and ending time must be the same\",\r\n ),\r\n (\r\n [\"09:00\", \"11:00\"],\r\n [\"12:00\", \"20:00\"],\r\n r\"invalid starting and ending time\\(s\\): opening hours should not \"\r\n \"touch or overlap with one another\",\r\n ),\r\n (\r\n [\"12:00\", \"20:00\"],\r\n [\"09:00\", \"11:00\"],\r\n r\"invalid starting and ending time\\(s\\): opening hours should not \"\r\n \"touch or overlap with one another\",\r\n ),\r\n ],\r\n )\r\n def test_constructor_errors(self, start, end, match):\r\n with pytest.raises(ValueError, match=match):\r\n BusinessHour(start=start, end=end)\r\n\r\n def test_different_normalize_equals(self):\r\n # GH#21404 changed __eq__ to return False when `normalize` does not match\r\n offset = self._offset()\r\n offset2 = self._offset(normalize=True)\r\n assert offset != offset2\r\n\r\n def test_repr(self):\r\n assert repr(self.offset1) == \"<BusinessHour: BH=09:00-17:00>\"\r\n assert repr(self.offset2) == \"<3 * BusinessHours: BH=09:00-17:00>\"\r\n assert repr(self.offset3) == \"<-1 * BusinessHour: BH=09:00-17:00>\"\r\n assert repr(self.offset4) == \"<-4 * BusinessHours: BH=09:00-17:00>\"\r\n\r\n assert repr(self.offset5) == \"<BusinessHour: BH=11:00-14:30>\"\r\n assert repr(self.offset6) == \"<BusinessHour: BH=20:00-05:00>\"\r\n assert repr(self.offset7) == \"<-2 * BusinessHours: BH=21:30-06:30>\"\r\n assert repr(self.offset8) == \"<BusinessHour: BH=09:00-12:00,13:00-17:00>\"\r\n assert repr(self.offset9) == \"<3 * BusinessHours: BH=09:00-13:00,22:00-03:00>\"\r\n assert repr(self.offset10) == \"<-1 * BusinessHour: BH=13:00-17:00,23:00-02:00>\"\r\n\r\n def test_with_offset(self):\r\n expected = Timestamp(\"2014-07-01 13:00\")\r\n\r\n assert self.d + BusinessHour() * 3 == expected\r\n assert self.d + BusinessHour(n=3) == expected\r\n\r\n @pytest.mark.parametrize(\r\n \"offset_name\",\r\n [\"offset1\", \"offset2\", \"offset3\", \"offset4\", \"offset8\", \"offset9\", \"offset10\"],\r\n )\r\n def test_eq_attribute(self, offset_name):\r\n offset = getattr(self, offset_name)\r\n assert offset == offset\r\n\r\n @pytest.mark.parametrize(\r\n \"offset1,offset2\",\r\n [\r\n (BusinessHour(start=\"09:00\"), BusinessHour()),\r\n (\r\n BusinessHour(start=[\"23:00\", \"13:00\"], end=[\"12:00\", \"17:00\"]),\r\n BusinessHour(start=[\"13:00\", \"23:00\"], end=[\"17:00\", \"12:00\"]),\r\n ),\r\n ],\r\n )\r\n def test_eq(self, offset1, offset2):\r\n assert offset1 == offset2\r\n\r\n @pytest.mark.parametrize(\r\n \"offset1,offset2\",\r\n [\r\n (BusinessHour(), BusinessHour(-1)),\r\n (BusinessHour(start=\"09:00\"), BusinessHour(start=\"09:01\")),\r\n (\r\n BusinessHour(start=\"09:00\", end=\"17:00\"),\r\n BusinessHour(start=\"17:00\", end=\"09:01\"),\r\n ),\r\n (\r\n BusinessHour(start=[\"13:00\", \"23:00\"], end=[\"18:00\", \"07:00\"]),\r\n BusinessHour(start=[\"13:00\", \"23:00\"], end=[\"17:00\", \"12:00\"]),\r\n ),\r\n ],\r\n )\r\n def test_neq(self, offset1, offset2):\r\n assert offset1 != offset2\r\n\r\n @pytest.mark.parametrize(\r\n \"offset_name\",\r\n [\"offset1\", \"offset2\", \"offset3\", \"offset4\", \"offset8\", \"offset9\", \"offset10\"],\r\n )\r\n def test_hash(self, offset_name):\r\n offset = getattr(self, offset_name)\r\n assert offset == offset\r\n\r\n def test_call(self):\r\n with tm.assert_produces_warning(FutureWarning):\r\n # GH#34171 DateOffset.__call__ is deprecated\r\n assert self.offset1(self.d) == datetime(2014, 7, 1, 11)\r\n assert self.offset2(self.d) == datetime(2014, 7, 1, 13)\r\n assert self.offset3(self.d) == datetime(2014, 6, 30, 17)\r\n assert self.offset4(self.d) == datetime(2014, 6, 30, 14)\r\n assert self.offset8(self.d) == datetime(2014, 7, 1, 11)\r\n assert self.offset9(self.d) == datetime(2014, 7, 1, 22)\r\n assert self.offset10(self.d) == datetime(2014, 7, 1, 1)\r\n\r\n def test_sub(self):\r\n # we have to override test_sub here because self.offset2 is not\r\n # defined as self._offset(2)\r\n off = self.offset2\r\n msg = \"Cannot subtract datetime from offset\"\r\n with pytest.raises(TypeError, match=msg):\r\n off - self.d\r\n assert 2 * off - off == off\r\n\r\n assert self.d - self.offset2 == self.d + self._offset(-3)\r\n\r\n def testRollback1(self):\r\n assert self.offset1.rollback(self.d) == self.d\r\n assert self.offset2.rollback(self.d) == self.d\r\n assert self.offset3.rollback(self.d) == self.d\r\n assert self.offset4.rollback(self.d) == self.d\r\n assert self.offset5.rollback(self.d) == datetime(2014, 6, 30, 14, 30)\r\n assert self.offset6.rollback(self.d) == datetime(2014, 7, 1, 5, 0)\r\n assert self.offset7.rollback(self.d) == datetime(2014, 7, 1, 6, 30)\r\n assert self.offset8.rollback(self.d) == self.d\r\n assert self.offset9.rollback(self.d) == self.d\r\n assert self.offset10.rollback(self.d) == datetime(2014, 7, 1, 2)\r\n\r\n d = datetime(2014, 7, 1, 0)\r\n assert self.offset1.rollback(d) == datetime(2014, 6, 30, 17)\r\n assert self.offset2.rollback(d) == datetime(2014, 6, 30, 17)\r\n assert self.offset3.rollback(d) == datetime(2014, 6, 30, 17)\r\n assert self.offset4.rollback(d) == datetime(2014, 6, 30, 17)\r\n assert self.offset5.rollback(d) == datetime(2014, 6, 30, 14, 30)\r\n assert self.offset6.rollback(d) == d\r\n assert self.offset7.rollback(d) == d\r\n assert self.offset8.rollback(d) == datetime(2014, 6, 30, 17)\r\n assert self.offset9.rollback(d) == d\r\n assert self.offset10.rollback(d) == d\r\n\r\n assert self._offset(5).rollback(self.d) == self.d\r\n\r\n def testRollback2(self):\r\n assert self._offset(-3).rollback(datetime(2014, 7, 5, 15, 0)) == datetime(\r\n 2014, 7, 4, 17, 0\r\n )\r\n\r\n def testRollforward1(self):\r\n assert self.offset1.rollforward(self.d) == self.d\r\n assert self.offset2.rollforward(self.d) == self.d\r\n assert self.offset3.rollforward(self.d) == self.d\r\n assert self.offset4.rollforward(self.d) == self.d\r\n assert self.offset5.rollforward(self.d) == datetime(2014, 7, 1, 11, 0)\r\n assert self.offset6.rollforward(self.d) == datetime(2014, 7, 1, 20, 0)\r\n assert self.offset7.rollforward(self.d) == datetime(2014, 7, 1, 21, 30)\r\n assert self.offset8.rollforward(self.d) == self.d\r\n assert self.offset9.rollforward(self.d) == self.d\r\n assert self.offset10.rollforward(self.d) == datetime(2014, 7, 1, 13)\r\n\r\n d = datetime(2014, 7, 1, 0)\r\n assert self.offset1.rollforward(d) == datetime(2014, 7, 1, 9)\r\n assert self.offset2.rollforward(d) == datetime(2014, 7, 1, 9)\r\n assert self.offset3.rollforward(d) == datetime(2014, 7, 1, 9)\r\n assert self.offset4.rollforward(d) == datetime(2014, 7, 1, 9)\r\n assert self.offset5.rollforward(d) == datetime(2014, 7, 1, 11)\r\n assert self.offset6.rollforward(d) == d\r\n assert self.offset7.rollforward(d) == d\r\n assert self.offset8.rollforward(d) == datetime(2014, 7, 1, 9)\r\n assert self.offset9.rollforward(d) == d\r\n assert self.offset10.rollforward(d) == d\r\n\r\n assert self._offset(5).rollforward(self.d) == self.d\r\n\r\n def testRollforward2(self):\r\n assert self._offset(-3).rollforward(datetime(2014, 7, 5, 16, 0)) == datetime(\r\n 2014, 7, 7, 9\r\n )\r\n\r\n def test_roll_date_object(self):\r\n offset = BusinessHour()\r\n\r\n dt = datetime(2014, 7, 6, 15, 0)\r\n\r\n result = offset.rollback(dt)\r\n assert result == datetime(2014, 7, 4, 17)\r\n\r\n result = offset.rollforward(dt)\r\n assert result == datetime(2014, 7, 7, 9)\r\n\r\n normalize_cases = []\r\n normalize_cases.append(\r\n (\r\n BusinessHour(normalize=True),\r\n {\r\n datetime(2014, 7, 1, 8): datetime(2014, 7, 1),\r\n datetime(2014, 7, 1, 17): datetime(2014, 7, 2),\r\n datetime(2014, 7, 1, 16): datetime(2014, 7, 2),\r\n datetime(2014, 7, 1, 23): datetime(2014, 7, 2),\r\n datetime(2014, 7, 1, 0): datetime(2014, 7, 1),\r\n datetime(2014, 7, 4, 15): datetime(2014, 7, 4),\r\n datetime(2014, 7, 4, 15, 59): datetime(2014, 7, 4),\r\n datetime(2014, 7, 4, 16, 30): datetime(2014, 7, 7),\r\n datetime(2014, 7, 5, 23): datetime(2014, 7, 7),\r\n datetime(2014, 7, 6, 10): datetime(2014, 7, 7),\r\n },\r\n )\r\n )\r\n\r\n normalize_cases.append(\r\n (\r\n BusinessHour(-1, normalize=True),\r\n {\r\n datetime(2014, 7, 1, 8): datetime(2014, 6, 30),\r\n datetime(2014, 7, 1, 17): datetime(2014, 7, 1),\r\n datetime(2014, 7, 1, 16): datetime(2014, 7, 1),\r\n datetime(2014, 7, 1, 10): datetime(2014, 6, 30),\r\n datetime(2014, 7, 1, 0): datetime(2014, 6, 30),\r\n datetime(2014, 7, 7, 10): datetime(2014, 7, 4),\r\n datetime(2014, 7, 7, 10, 1): datetime(2014, 7, 7),\r\n datetime(2014, 7, 5, 23): datetime(2014, 7, 4),\r\n datetime(2014, 7, 6, 10): datetime(2014, 7, 4),\r\n },\r\n )\r\n )\r\n\r\n normalize_cases.append(\r\n (\r\n BusinessHour(1, normalize=True, start=\"17:00\", end=\"04:00\"),\r\n {\r\n datetime(2014, 7, 1, 8): datetime(2014, 7, 1),\r\n datetime(2014, 7, 1, 17): datetime(2014, 7, 1),\r\n datetime(2014, 7, 1, 23): datetime(2014, 7, 2),\r\n datetime(2014, 7, 2, 2): datetime(2014, 7, 2),\r\n datetime(2014, 7, 2, 3): datetime(2014, 7, 2),\r\n datetime(2014, 7, 4, 23): datetime(2014, 7, 5),\r\n datetime(2014, 7, 5, 2): datetime(2014, 7, 5),\r\n datetime(2014, 7, 7, 2): datetime(2014, 7, 7),\r\n datetime(2014, 7, 7, 17): datetime(2014, 7, 7),\r\n },\r\n )\r\n )\r\n\r\n @pytest.mark.parametrize(\"case\", normalize_cases)\r\n def test_normalize(self, case):\r\n offset, cases = case\r\n for dt, expected in cases.items():\r\n assert offset.apply(dt) == expected\r\n\r\n on_offset_cases = []\r\n on_offset_cases.append(\r\n (\r\n BusinessHour(),\r\n {\r\n datetime(2014, 7, 1, 9): True,\r\n datetime(2014, 7, 1, 8, 59): False,\r\n datetime(2014, 7, 1, 8): False,\r\n datetime(2014, 7, 1, 17): True,\r\n datetime(2014, 7, 1, 17, 1): False,\r\n datetime(2014, 7, 1, 18): False,\r\n datetime(2014, 7, 5, 9): False,\r\n datetime(2014, 7, 6, 12): False,\r\n },\r\n )\r\n )\r\n\r\n on_offset_cases.append(\r\n (\r\n BusinessHour(start=\"10:00\", end=\"15:00\"),\r\n {\r\n datetime(2014, 7, 1, 9): False,\r\n datetime(2014, 7, 1, 10): True,\r\n datetime(2014, 7, 1, 15): True,\r\n datetime(2014, 7, 1, 15, 1): False,\r\n datetime(2014, 7, 5, 12): False,\r\n datetime(2014, 7, 6, 12): False,\r\n },\r\n )\r\n )\r\n\r\n on_offset_cases.append(\r\n (\r\n BusinessHour(start=\"19:00\", end=\"05:00\"),\r\n {\r\n datetime(2014, 7, 1, 9, 0): False,\r\n datetime(2014, 7, 1, 10, 0): False,\r\n datetime(2014, 7, 1, 15): False,\r\n datetime(2014, 7, 1, 15, 1): False,\r\n datetime(2014, 7, 5, 12, 0): False,\r\n datetime(2014, 7, 6, 12, 0): False,\r\n datetime(2014, 7, 1, 19, 0): True,\r\n datetime(2014, 7, 2, 0, 0): True,\r\n datetime(2014, 7, 4, 23): True,\r\n datetime(2014, 7, 5, 1): True,\r\n datetime(2014, 7, 5, 5, 0): True,\r\n datetime(2014, 7, 6, 23, 0): False,\r\n datetime(2014, 7, 7, 3, 0): False,\r\n },\r\n )\r\n )\r\n\r\n on_offset_cases.append(\r\n (\r\n BusinessHour(start=[\"09:00\", \"13:00\"], end=[\"12:00\", \"17:00\"]),\r\n {\r\n datetime(2014, 7, 1, 9): True,\r\n datetime(2014, 7, 1, 8, 59): False,\r\n datetime(2014, 7, 1, 8): False,\r\n datetime(2014, 7, 1, 17): True,\r\n datetime(2014, 7, 1, 17, 1): False,\r\n datetime(2014, 7, 1, 18): False,\r\n datetime(2014, 7, 5, 9): False,\r\n datetime(2014, 7, 6, 12): False,\r\n datetime(2014, 7, 1, 12, 30): False,\r\n },\r\n )\r\n )\r\n\r\n on_offset_cases.append(\r\n (\r\n BusinessHour(start=[\"19:00\", \"23:00\"], end=[\"21:00\", \"05:00\"]),\r\n {\r\n datetime(2014, 7, 1, 9, 0): False,\r\n datetime(2014, 7, 1, 10, 0): False,\r\n datetime(2014, 7, 1, 15): False,\r\n datetime(2014, 7, 1, 15, 1): False,\r\n datetime(2014, 7, 5, 12, 0): False,\r\n datetime(2014, 7, 6, 12, 0): False,\r\n datetime(2014, 7, 1, 19, 0): True,\r\n datetime(2014, 7, 2, 0, 0): True,\r\n datetime(2014, 7, 4, 23): True,\r\n datetime(2014, 7, 5, 1): True,\r\n datetime(2014, 7, 5, 5, 0): True,\r\n datetime(2014, 7, 6, 23, 0): False,\r\n datetime(2014, 7, 7, 3, 0): False,\r\n datetime(2014, 7, 4, 22): False,\r\n },\r\n )\r\n )\r\n\r\n @pytest.mark.parametrize(\"case\", on_offset_cases)\r\n def test_is_on_offset(self, case):\r\n offset, cases = case\r\n for dt, expected in cases.items():\r\n assert offset.is_on_offset(dt) == expected\r\n\r\n opening_time_cases = []\r\n # opening time should be affected by sign of n, not by n's value and\r\n # end\r\n opening_time_cases.append(\r\n (\r\n [\r\n BusinessHour(),\r\n BusinessHour(n=2),\r\n BusinessHour(n=4),\r\n BusinessHour(end=\"10:00\"),\r\n BusinessHour(n=2, end=\"4:00\"),\r\n BusinessHour(n=4, end=\"15:00\"),\r\n ],\r\n {\r\n datetime(2014, 7, 1, 11): (\r\n datetime(2014, 7, 2, 9),\r\n datetime(2014, 7, 1, 9),\r\n ),\r\n datetime(2014, 7, 1, 18): (\r\n datetime(2014, 7, 2, 9),\r\n datetime(2014, 7, 1, 9),\r\n ),\r\n datetime(2014, 7, 1, 23): (\r\n datetime(2014, 7, 2, 9),\r\n datetime(2014, 7, 1, 9),\r\n ),\r\n datetime(2014, 7, 2, 8): (\r\n datetime(2014, 7, 2, 9),\r\n datetime(2014, 7, 1, 9),\r\n ),\r\n # if timestamp is on opening time, next opening time is\r\n # as it is\r\n datetime(2014, 7, 2, 9): (\r\n datetime(2014, 7, 2, 9),\r\n datetime(2014, 7, 2, 9),\r\n ),\r\n datetime(2014, 7, 2, 10): (\r\n datetime(2014, 7, 3, 9),\r\n datetime(2014, 7, 2, 9),\r\n ),\r\n # 2014-07-05 is saturday\r\n datetime(2014, 7, 5, 10): (\r\n datetime(2014, 7, 7, 9),\r\n datetime(2014, 7, 4, 9),\r\n ),\r\n datetime(2014, 7, 4, 10): (\r\n datetime(2014, 7, 7, 9),\r\n datetime(2014, 7, 4, 9),\r\n ),\r\n datetime(2014, 7, 4, 23): (\r\n datetime(2014, 7, 7, 9),\r\n datetime(2014, 7, 4, 9),\r\n ),\r\n datetime(2014, 7, 6, 10): (\r\n datetime(2014, 7, 7, 9),\r\n datetime(2014, 7, 4, 9),\r\n ),\r\n datetime(2014, 7, 7, 5): (\r\n datetime(2014, 7, 7, 9),\r\n datetime(2014, 7, 4, 9),\r\n ),\r\n datetime(2014, 7, 7, 9, 1): (\r\n datetime(2014, 7, 8, 9),\r\n datetime(2014, 7, 7, 9),\r\n ),\r\n },\r\n )\r\n )\r\n\r\n opening_time_cases.append(\r\n (\r\n [\r\n BusinessHour(start=\"11:15\"),\r\n BusinessHour(n=2, start=\"11:15\"),\r\n BusinessHour(n=3, start=\"11:15\"),\r\n BusinessHour(start=\"11:15\", end=\"10:00\"),\r\n BusinessHour(n=2, start=\"11:15\", end=\"4:00\"),\r\n BusinessHour(n=3, start=\"11:15\", end=\"15:00\"),\r\n ],\r\n {\r\n datetime(2014, 7, 1, 11): (\r\n datetime(2014, 7, 1, 11, 15),\r\n datetime(2014, 6, 30, 11, 15),\r\n ),\r\n datetime(2014, 7, 1, 18): (\r\n datetime(2014, 7, 2, 11, 15),\r\n datetime(2014, 7, 1, 11, 15),\r\n ),\r\n datetime(2014, 7, 1, 23): (\r\n datetime(2014, 7, 2, 11, 15),\r\n datetime(2014, 7, 1, 11, 15),\r\n ),\r\n datetime(2014, 7, 2, 8): (\r\n datetime(2014, 7, 2, 11, 15),\r\n datetime(2014, 7, 1, 11, 15),\r\n ),\r\n datetime(2014, 7, 2, 9): (\r\n datetime(2014, 7, 2, 11, 15),\r\n datetime(2014, 7, 1, 11, 15),\r\n ),\r\n datetime(2014, 7, 2, 10): (\r\n datetime(2014, 7, 2, 11, 15),\r\n datetime(2014, 7, 1, 11, 15),\r\n ),\r\n datetime(2014, 7, 2, 11, 15): (\r\n datetime(2014, 7, 2, 11, 15),\r\n datetime(2014, 7, 2, 11, 15),\r\n ),\r\n datetime(2014, 7, 2, 11, 15, 1): (\r\n datetime(2014, 7, 3, 11, 15),\r\n datetime(2014, 7, 2, 11, 15),\r\n ),\r\n datetime(2014, 7, 5, 10): (\r\n datetime(2014, 7, 7, 11, 15),\r\n datetime(2014, 7, 4, 11, 15),\r\n ),\r\n datetime(2014, 7, 4, 10): (\r\n datetime(2014, 7, 4, 11, 15),\r\n datetime(2014, 7, 3, 11, 15),\r\n ),\r\n datetime(2014, 7, 4, 23): (\r\n datetime(2014, 7, 7, 11, 15),\r\n datetime(2014, 7, 4, 11, 15),\r\n ),\r\n datetime(2014, 7, 6, 10): (\r\n datetime(2014, 7, 7, 11, 15),\r\n datetime(2014, 7, 4, 11, 15),\r\n ),\r\n datetime(2014, 7, 7, 5): (\r\n datetime(2014, 7, 7, 11, 15),\r\n datetime(2014, 7, 4, 11, 15),\r\n ),\r\n datetime(2014, 7, 7, 9, 1): (\r\n datetime(2014, 7, 7, 11, 15),\r\n datetime(2014, 7, 4, 11, 15),\r\n ),\r\n },\r\n )\r\n )\r\n\r\n opening_time_cases.append(\r\n (\r\n [\r\n BusinessHour(-1),\r\n BusinessHour(n=-2),\r\n BusinessHour(n=-4),\r\n BusinessHour(n=-1, end=\"10:00\"),\r\n BusinessHour(n=-2, end=\"4:00\"),\r\n BusinessHour(n=-4, end=\"15:00\"),\r\n ],\r\n {\r\n datetime(2014, 7, 1, 11): (\r\n datetime(2014, 7, 1, 9),\r\n datetime(2014, 7, 2, 9),\r\n ),\r\n datetime(2014, 7, 1, 18): (\r\n datetime(2014, 7, 1, 9),\r\n datetime(2014, 7, 2, 9),\r\n ),\r\n datetime(2014, 7, 1, 23): (\r\n datetime(2014, 7, 1, 9),\r\n datetime(2014, 7, 2, 9),\r\n ),\r\n datetime(2014, 7, 2, 8): (\r\n datetime(2014, 7, 1, 9),\r\n datetime(2014, 7, 2, 9),\r\n ),\r\n datetime(2014, 7, 2, 9): (\r\n datetime(2014, 7, 2, 9),\r\n datetime(2014, 7, 2, 9),\r\n ),\r\n datetime(2014, 7, 2, 10): (\r\n datetime(2014, 7, 2, 9),\r\n datetime(2014, 7, 3, 9),\r\n ),\r\n datetime(2014, 7, 5, 10): (\r\n datetime(2014, 7, 4, 9),\r\n datetime(2014, 7, 7, 9),\r\n ),\r\n datetime(2014, 7, 4, 10): (\r\n datetime(2014, 7, 4, 9),\r\n datetime(2014, 7, 7, 9),\r\n ),\r\n datetime(2014, 7, 4, 23): (\r\n datetime(2014, 7, 4, 9),\r\n datetime(2014, 7, 7, 9),\r\n ),\r\n datetime(2014, 7, 6, 10): (\r\n datetime(2014, 7, 4, 9),\r\n datetime(2014, 7, 7, 9),\r\n ),\r\n datetime(2014, 7, 7, 5): (\r\n datetime(2014, 7, 4, 9),\r\n datetime(2014, 7, 7, 9),\r\n ),\r\n datetime(2014, 7, 7, 9): (\r\n datetime(2014, 7, 7, 9),\r\n datetime(2014, 7, 7, 9),\r\n ),\r\n datetime(2014, 7, 7, 9, 1): (\r\n datetime(2014, 7, 7, 9),\r\n datetime(2014, 7, 8, 9),\r\n ),\r\n },\r\n )\r\n )\r\n\r\n opening_time_cases.append(\r\n (\r\n [\r\n BusinessHour(start=\"17:00\", end=\"05:00\"),\r\n BusinessHour(n=3, start=\"17:00\", end=\"03:00\"),\r\n ],\r\n {\r\n datetime(2014, 7, 1, 11): (\r\n datetime(2014, 7, 1, 17),\r\n datetime(2014, 6, 30, 17),\r\n ),\r\n datetime(2014, 7, 1, 18): (\r\n datetime(2014, 7, 2, 17),\r\n datetime(2014, 7, 1, 17),\r\n ),\r\n datetime(2014, 7, 1, 23): (\r\n datetime(2014, 7, 2, 17),\r\n datetime(2014, 7, 1, 17),\r\n ),\r\n datetime(2014, 7, 2, 8): (\r\n datetime(2014, 7, 2, 17),\r\n datetime(2014, 7, 1, 17),\r\n ),\r\n datetime(2014, 7, 2, 9): (\r\n datetime(2014, 7, 2, 17),\r\n datetime(2014, 7, 1, 17),\r\n ),\r\n datetime(2014, 7, 4, 17): (\r\n datetime(2014, 7, 4, 17),\r\n datetime(2014, 7, 4, 17),\r\n ),\r\n datetime(2014, 7, 5, 10): (\r\n datetime(2014, 7, 7, 17),\r\n datetime(2014, 7, 4, 17),\r\n ),\r\n datetime(2014, 7, 4, 10): (\r\n datetime(2014, 7, 4, 17),\r\n datetime(2014, 7, 3, 17),\r\n ),\r\n datetime(2014, 7, 4, 23): (\r\n datetime(2014, 7, 7, 17),\r\n datetime(2014, 7, 4, 17),\r\n ),\r\n datetime(2014, 7, 6, 10): (\r\n datetime(2014, 7, 7, 17),\r\n datetime(2014, 7, 4, 17),\r\n ),\r\n datetime(2014, 7, 7, 5): (\r\n datetime(2014, 7, 7, 17),\r\n datetime(2014, 7, 4, 17),\r\n ),\r\n datetime(2014, 7, 7, 17, 1): (\r\n datetime(2014, 7, 8, 17),\r\n datetime(2014, 7, 7, 17),\r\n ),\r\n },\r\n )\r\n )\r\n\r\n opening_time_cases.append(\r\n (\r\n [\r\n BusinessHour(-1, start=\"17:00\", end=\"05:00\"),\r\n BusinessHour(n=-2, start=\"17:00\", end=\"03:00\"),\r\n ],\r\n {\r\n datetime(2014, 7, 1, 11): (\r\n datetime(2014, 6, 30, 17),\r\n datetime(2014, 7, 1, 17),\r\n ),\r\n datetime(2014, 7, 1, 18): (\r\n datetime(2014, 7, 1, 17),\r\n datetime(2014, 7, 2, 17),\r\n ),\r\n datetime(2014, 7, 1, 23): (\r\n datetime(2014, 7, 1, 17),\r\n datetime(2014, 7, 2, 17),\r\n ),\r\n datetime(2014, 7, 2, 8): (\r\n datetime(2014, 7, 1, 17),\r\n datetime(2014, 7, 2, 17),\r\n ),\r\n datetime(2014, 7, 2, 9): (\r\n datetime(2014, 7, 1, 17),\r\n datetime(2014, 7, 2, 17),\r\n ),\r\n datetime(2014, 7, 2, 16, 59): (\r\n datetime(2014, 7, 1, 17),\r\n datetime(2014, 7, 2, 17),\r\n ),\r\n datetime(2014, 7, 5, 10): (\r\n datetime(2014, 7, 4, 17),\r\n datetime(2014, 7, 7, 17),\r\n ),\r\n datetime(2014, 7, 4, 10): (\r\n datetime(2014, 7, 3, 17),\r\n datetime(2014, 7, 4, 17),\r\n ),\r\n datetime(2014, 7, 4, 23): (\r\n datetime(2014, 7, 4, 17),\r\n datetime(2014, 7, 7, 17),\r\n ),\r\n datetime(2014, 7, 6, 10): (\r\n datetime(2014, 7, 4, 17),\r\n datetime(2014, 7, 7, 17),\r\n ),\r\n datetime(2014, 7, 7, 5): (\r\n datetime(2014, 7, 4, 17),\r\n datetime(2014, 7, 7, 17),\r\n ),\r\n datetime(2014, 7, 7, 18): (\r\n datetime(2014, 7, 7, 17),\r\n datetime(2014, 7, 8, 17),\r\n ),\r\n },\r\n )\r\n )\r\n\r\n opening_time_cases.append(\r\n (\r\n [\r\n BusinessHour(start=[\"11:15\", \"15:00\"], end=[\"13:00\", \"20:00\"]),\r\n BusinessHour(n=3, start=[\"11:15\", \"15:00\"], end=[\"12:00\", \"20:00\"]),\r\n BusinessHour(start=[\"11:15\", \"15:00\"], end=[\"13:00\", \"17:00\"]),\r\n BusinessHour(n=2, start=[\"11:15\", \"15:00\"], end=[\"12:00\", \"03:00\"]),\r\n BusinessHour(n=3, start=[\"11:15\", \"15:00\"], end=[\"13:00\", \"16:00\"]),\r\n ],\r\n {\r\n datetime(2014, 7, 1, 11): (\r\n datetime(2014, 7, 1, 11, 15),\r\n datetime(2014, 6, 30, 15),\r\n ),\r\n datetime(2014, 7, 1, 18): (\r\n datetime(2014, 7, 2, 11, 15),\r\n datetime(2014, 7, 1, 15),\r\n ),\r\n datetime(2014, 7, 1, 23): (\r\n datetime(2014, 7, 2, 11, 15),\r\n datetime(2014, 7, 1, 15),\r\n ),\r\n datetime(2014, 7, 2, 8): (\r\n datetime(2014, 7, 2, 11, 15),\r\n datetime(2014, 7, 1, 15),\r\n ),\r\n datetime(2014, 7, 2, 9): (\r\n datetime(2014, 7, 2, 11, 15),\r\n datetime(2014, 7, 1, 15),\r\n ),\r\n datetime(2014, 7, 2, 10): (\r\n datetime(2014, 7, 2, 11, 15),\r\n datetime(2014, 7, 1, 15),\r\n ),\r\n datetime(2014, 7, 2, 11, 15): (\r\n datetime(2014, 7, 2, 11, 15),\r\n datetime(2014, 7, 2, 11, 15),\r\n ),\r\n datetime(2014, 7, 2, 11, 15, 1): (\r\n datetime(2014, 7, 2, 15),\r\n datetime(2014, 7, 2, 11, 15),\r\n ),\r\n datetime(2014, 7, 5, 10): (\r\n datetime(2014, 7, 7, 11, 15),\r\n datetime(2014, 7, 4, 15),\r\n ),\r\n datetime(2014, 7, 4, 10): (\r\n datetime(2014, 7, 4, 11, 15),\r\n datetime(2014, 7, 3, 15),\r\n ),\r\n datetime(2014, 7, 4, 23): (\r\n datetime(2014, 7, 7, 11, 15),\r\n datetime(2014, 7, 4, 15),\r\n ),\r\n datetime(2014, 7, 6, 10): (\r\n datetime(2014, 7, 7, 11, 15),\r\n datetime(2014, 7, 4, 15),\r\n ),\r\n datetime(2014, 7, 7, 5): (\r\n datetime(2014, 7, 7, 11, 15),\r\n datetime(2014, 7, 4, 15),\r\n ),\r\n datetime(2014, 7, 7, 9, 1): (\r\n datetime(2014, 7, 7, 11, 15),\r\n datetime(2014, 7, 4, 15),\r\n ),\r\n datetime(2014, 7, 7, 12): (\r\n datetime(2014, 7, 7, 15),\r\n datetime(2014, 7, 7, 11, 15),\r\n ),\r\n },\r\n )\r\n )\r\n\r\n opening_time_cases.append(\r\n (\r\n [\r\n BusinessHour(n=-1, start=[\"17:00\", \"08:00\"], end=[\"05:00\", \"10:00\"]),\r\n BusinessHour(n=-2, start=[\"08:00\", \"17:00\"], end=[\"10:00\", \"03:00\"]),\r\n ],\r\n {\r\n datetime(2014, 7, 1, 11): (\r\n datetime(2014, 7, 1, 8),\r\n datetime(2014, 7, 1, 17),\r\n ),\r\n datetime(2014, 7, 1, 18): (\r\n datetime(2014, 7, 1, 17),\r\n datetime(2014, 7, 2, 8),\r\n ),\r\n datetime(2014, 7, 1, 23): (\r\n datetime(2014, 7, 1, 17),\r\n datetime(2014, 7, 2, 8),\r\n ),\r\n datetime(2014, 7, 2, 8): (\r\n datetime(2014, 7, 2, 8),\r\n datetime(2014, 7, 2, 8),\r\n ),\r\n datetime(2014, 7, 2, 9): (\r\n datetime(2014, 7, 2, 8),\r\n datetime(2014, 7, 2, 17),\r\n ),\r\n datetime(2014, 7, 2, 16, 59): (\r\n datetime(2014, 7, 2, 8),\r\n datetime(2014, 7, 2, 17),\r\n ),\r\n datetime(2014, 7, 5, 10): (\r\n datetime(2014, 7, 4, 17),\r\n datetime(2014, 7, 7, 8),\r\n ),\r\n datetime(2014, 7, 4, 10): (\r\n datetime(2014, 7, 4, 8),\r\n datetime(2014, 7, 4, 17),\r\n ),\r\n datetime(2014, 7, 4, 23): (\r\n datetime(2014, 7, 4, 17),\r\n datetime(2014, 7, 7, 8),\r\n ),\r\n datetime(2014, 7, 6, 10): (\r\n datetime(2014, 7, 4, 17),\r\n datetime(2014, 7, 7, 8),\r\n ),\r\n datetime(2014, 7, 7, 5): (\r\n datetime(2014, 7, 4, 17),\r\n datetime(2014, 7, 7, 8),\r\n ),\r\n datetime(2014, 7, 7, 18): (\r\n datetime(2014, 7, 7, 17),\r\n datetime(2014, 7, 8, 8),\r\n ),\r\n },\r\n )\r\n )\r\n\r\n @pytest.mark.parametrize(\"case\", opening_time_cases)\r\n def test_opening_time(self, case):\r\n _offsets, cases = case\r\n for offset in _offsets:\r\n for dt, (exp_next, exp_prev) in cases.items():\r\n assert offset._next_opening_time(dt) == exp_next\r\n assert offset._prev_opening_time(dt) == exp_prev\r\n\r\n apply_cases = []\r\n apply_cases.append(\r\n (\r\n BusinessHour(),\r\n {\r\n datetime(2014, 7, 1, 11): datetime(2014, 7, 1, 12),\r\n datetime(2014, 7, 1, 13): datetime(2014, 7, 1, 14),\r\n datetime(2014, 7, 1, 15): datetime(2014, 7, 1, 16),\r\n datetime(2014, 7, 1, 19): datetime(2014, 7, 2, 10),\r\n datetime(2014, 7, 1, 16): datetime(2014, 7, 2, 9),\r\n datetime(2014, 7, 1, 16, 30, 15): datetime(2014, 7, 2, 9, 30, 15),\r\n datetime(2014, 7, 1, 17): datetime(2014, 7, 2, 10),\r\n datetime(2014, 7, 2, 11): datetime(2014, 7, 2, 12),\r\n # out of business hours\r\n datetime(2014, 7, 2, 8): datetime(2014, 7, 2, 10),\r\n datetime(2014, 7, 2, 19): datetime(2014, 7, 3, 10),\r\n datetime(2014, 7, 2, 23): datetime(2014, 7, 3, 10),\r\n datetime(2014, 7, 3, 0): datetime(2014, 7, 3, 10),\r\n # saturday\r\n datetime(2014, 7, 5, 15): datetime(2014, 7, 7, 10),\r\n datetime(2014, 7, 4, 17): datetime(2014, 7, 7, 10),\r\n datetime(2014, 7, 4, 16, 30): datetime(2014, 7, 7, 9, 30),\r\n datetime(2014, 7, 4, 16, 30, 30): datetime(2014, 7, 7, 9, 30, 30),\r\n },\r\n )\r\n )\r\n\r\n apply_cases.append(\r\n (\r\n BusinessHour(4),\r\n {\r\n datetime(2014, 7, 1, 11): datetime(2014, 7, 1, 15),\r\n datetime(2014, 7, 1, 13): datetime(2014, 7, 2, 9),\r\n datetime(2014, 7, 1, 15): datetime(2014, 7, 2, 11),\r\n datetime(2014, 7, 1, 16): datetime(2014, 7, 2, 12),\r\n datetime(2014, 7, 1, 17): datetime(2014, 7, 2, 13),\r\n datetime(2014, 7, 2, 11): datetime(2014, 7, 2, 15),\r\n datetime(2014, 7, 2, 8): datetime(2014, 7, 2, 13),\r\n datetime(2014, 7, 2, 19): datetime(2014, 7, 3, 13),\r\n datetime(2014, 7, 2, 23): datetime(2014, 7, 3, 13),\r\n datetime(2014, 7, 3, 0): datetime(2014, 7, 3, 13),\r\n datetime(2014, 7, 5, 15): datetime(2014, 7, 7, 13),\r\n datetime(2014, 7, 4, 17): datetime(2014, 7, 7, 13),\r\n datetime(2014, 7, 4, 16, 30): datetime(2014, 7, 7, 12, 30),\r\n datetime(2014, 7, 4, 16, 30, 30): datetime(2014, 7, 7, 12, 30, 30),\r\n },\r\n )\r\n )\r\n\r\n apply_cases.append(\r\n (\r\n BusinessHour(-1),\r\n {\r\n datetime(2014, 7, 1, 11): datetime(2014, 7, 1, 10),\r\n datetime(2014, 7, 1, 13): datetime(2014, 7, 1, 12),\r\n datetime(2014, 7, 1, 15): datetime(2014, 7, 1, 14),\r\n datetime(2014, 7, 1, 16): datetime(2014, 7, 1, 15),\r\n datetime(2014, 7, 1, 10): datetime(2014, 6, 30, 17),\r\n datetime(2014, 7, 1, 16, 30, 15): datetime(2014, 7, 1, 15, 30, 15),\r\n datetime(2014, 7, 1, 9, 30, 15): datetime(2014, 6, 30, 16, 30, 15),\r\n datetime(2014, 7, 1, 17): datetime(2014, 7, 1, 16),\r\n datetime(2014, 7, 1, 5): datetime(2014, 6, 30, 16),\r\n datetime(2014, 7, 2, 11): datetime(2014, 7, 2, 10),\r\n # out of business hours\r\n datetime(2014, 7, 2, 8): datetime(2014, 7, 1, 16),\r\n datetime(2014, 7, 2, 19): datetime(2014, 7, 2, 16),\r\n datetime(2014, 7, 2, 23): datetime(2014, 7, 2, 16),\r\n datetime(2014, 7, 3, 0): datetime(2014, 7, 2, 16),\r\n # saturday\r\n datetime(2014, 7, 5, 15): datetime(2014, 7, 4, 16),\r\n datetime(2014, 7, 7, 9): datetime(2014, 7, 4, 16),\r\n datetime(2014, 7, 7, 9, 30): datetime(2014, 7, 4, 16, 30),\r\n datetime(2014, 7, 7, 9, 30, 30): datetime(2014, 7, 4, 16, 30, 30),\r\n },\r\n )\r\n )\r\n\r\n apply_cases.append(\r\n (\r\n BusinessHour(-4),\r\n {\r\n datetime(2014, 7, 1, 11): datetime(2014, 6, 30, 15),\r\n datetime(2014, 7, 1, 13): datetime(2014, 6, 30, 17),\r\n datetime(2014, 7, 1, 15): datetime(2014, 7, 1, 11),\r\n datetime(2014, 7, 1, 16): datetime(2014, 7, 1, 12),\r\n datetime(2014, 7, 1, 17): datetime(2014, 7, 1, 13),\r\n datetime(2014, 7, 2, 11): datetime(2014, 7, 1, 15),\r\n datetime(2014, 7, 2, 8): datetime(2014, 7, 1, 13),\r\n datetime(2014, 7, 2, 19): datetime(2014, 7, 2, 13),\r\n datetime(2014, 7, 2, 23): datetime(2014, 7, 2, 13),\r\n datetime(2014, 7, 3, 0): datetime(2014, 7, 2, 13),\r\n datetime(2014, 7, 5, 15): datetime(2014, 7, 4, 13),\r\n datetime(2014, 7, 4, 18): datetime(2014, 7, 4, 13),\r\n datetime(2014, 7, 7, 9, 30): datetime(2014, 7, 4, 13, 30),\r\n datetime(2014, 7, 7, 9, 30, 30): datetime(2014, 7, 4, 13, 30, 30),\r\n },\r\n )\r\n )\r\n\r\n apply_cases.append(\r\n (\r\n BusinessHour(start=\"13:00\", end=\"16:00\"),\r\n {\r\n datetime(2014, 7, 1, 11): datetime(2014, 7, 1, 14),\r\n datetime(2014, 7, 1, 13): datetime(2014, 7, 1, 14),\r\n datetime(2014, 7, 1, 15): datetime(2014, 7, 2, 13),\r\n datetime(2014, 7, 1, 19): datetime(2014, 7, 2, 14),\r\n datetime(2014, 7, 1, 16): datetime(2014, 7, 2, 14),\r\n datetime(2014, 7, 1, 15, 30, 15): datetime(2014, 7, 2, 13, 30, 15),\r\n datetime(2014, 7, 5, 15): datetime(2014, 7, 7, 14),\r\n datetime(2014, 7, 4, 17): datetime(2014, 7, 7, 14),\r\n },\r\n )\r\n )\r\n\r\n apply_cases.append(\r\n (\r\n BusinessHour(n=2, start=\"13:00\", end=\"16:00\"),\r\n {\r\n datetime(2014, 7, 1, 17): datetime(2014, 7, 2, 15),\r\n datetime(2014, 7, 2, 14): datetime(2014, 7, 3, 13),\r\n datetime(2014, 7, 2, 8): datetime(2014, 7, 2, 15),\r\n datetime(2014, 7, 2, 19): datetime(2014, 7, 3, 15),\r\n datetime(2014, 7, 2, 14, 30): datetime(2014, 7, 3, 13, 30),\r\n datetime(2014, 7, 3, 0): datetime(2014, 7, 3, 15),\r\n datetime(2014, 7, 5, 15): datetime(2014, 7, 7, 15),\r\n datetime(2014, 7, 4, 17): datetime(2014, 7, 7, 15),\r\n datetime(2014, 7, 4, 14, 30): datetime(2014, 7, 7, 13, 30),\r\n datetime(2014, 7, 4, 14, 30, 30): datetime(2014, 7, 7, 13, 30, 30),\r\n },\r\n )\r\n )\r\n\r\n apply_cases.append(\r\n (\r\n BusinessHour(n=-1, start=\"13:00\", end=\"16:00\"),\r\n {\r\n datetime(2014, 7, 2, 11): datetime(2014, 7, 1, 15),\r\n datetime(2014, 7, 2, 13): datetime(2014, 7, 1, 15),\r\n datetime(2014, 7, 2, 14): datetime(2014, 7, 1, 16),\r\n datetime(2014, 7, 2, 15): datetime(2014, 7, 2, 14),\r\n datetime(2014, 7, 2, 19): datetime(2014, 7, 2, 15),\r\n datetime(2014, 7, 2, 16): datetime(2014, 7, 2, 15),\r\n datetime(2014, 7, 2, 13, 30, 15): datetime(2014, 7, 1, 15, 30, 15),\r\n datetime(2014, 7, 5, 15): datetime(2014, 7, 4, 15),\r\n datetime(2014, 7, 7, 11): datetime(2014, 7, 4, 15),\r\n },\r\n )\r\n )\r\n\r\n apply_cases.append(\r\n (\r\n BusinessHour(n=-3, start=\"10:00\", end=\"16:00\"),\r\n {\r\n datetime(2014, 7, 1, 17): datetime(2014, 7, 1, 13),\r\n datetime(2014, 7, 2, 14): datetime(2014, 7, 2, 11),\r\n datetime(2014, 7, 2, 8): datetime(2014, 7, 1, 13),\r\n datetime(2014, 7, 2, 13): datetime(2014, 7, 1, 16),\r\n datetime(2014, 7, 2, 19): datetime(2014, 7, 2, 13),\r\n datetime(2014, 7, 2, 11, 30): datetime(2014, 7, 1, 14, 30),\r\n datetime(2014, 7, 3, 0): datetime(2014, 7, 2, 13),\r\n datetime(2014, 7, 4, 10): datetime(2014, 7, 3, 13),\r\n datetime(2014, 7, 5, 15): datetime(2014, 7, 4, 13),\r\n datetime(2014, 7, 4, 16): datetime(2014, 7, 4, 13),\r\n datetime(2014, 7, 4, 12, 30): datetime(2014, 7, 3, 15, 30),\r\n datetime(2014, 7, 4, 12, 30, 30): datetime(2014, 7, 3, 15, 30, 30),\r\n },\r\n )\r\n )\r\n\r\n apply_cases.append(\r\n (\r\n BusinessHour(start=\"19:00\", end=\"05:00\"),\r\n {\r\n datetime(2014, 7, 1, 17): datetime(2014, 7, 1, 20),\r\n datetime(2014, 7, 2, 14): datetime(2014, 7, 2, 20),\r\n datetime(2014, 7, 2, 8): datetime(2014, 7, 2, 20),\r\n datetime(2014, 7, 2, 13): datetime(2014, 7, 2, 20),\r\n datetime(2014, 7, 2, 19): datetime(2014, 7, 2, 20),\r\n datetime(2014, 7, 2, 4, 30): datetime(2014, 7, 2, 19, 30),\r\n datetime(2014, 7, 3, 0): datetime(2014, 7, 3, 1),\r\n datetime(2014, 7, 4, 10): datetime(2014, 7, 4, 20),\r\n datetime(2014, 7, 4, 23): datetime(2014, 7, 5, 0),\r\n datetime(2014, 7, 5, 0): datetime(2014, 7, 5, 1),\r\n datetime(2014, 7, 5, 4): datetime(2014, 7, 7, 19),\r\n datetime(2014, 7, 5, 4, 30): datetime(2014, 7, 7, 19, 30),\r\n datetime(2014, 7, 5, 4, 30, 30): datetime(2014, 7, 7, 19, 30, 30),\r\n },\r\n )\r\n )\r\n\r\n apply_cases.append(\r\n (\r\n BusinessHour(n=-1, start=\"19:00\", end=\"05:00\"),\r\n {\r\n datetime(2014, 7, 1, 17): datetime(2014, 7, 1, 4),\r\n datetime(2014, 7, 2, 14): datetime(2014, 7, 2, 4),\r\n datetime(2014, 7, 2, 8): datetime(2014, 7, 2, 4),\r\n datetime(2014, 7, 2, 13): datetime(2014, 7, 2, 4),\r\n datetime(2014, 7, 2, 20): datetime(2014, 7, 2, 5),\r\n datetime(2014, 7, 2, 19): datetime(2014, 7, 2, 4),\r\n datetime(2014, 7, 2, 19, 30): datetime(2014, 7, 2, 4, 30),\r\n datetime(2014, 7, 3, 0): datetime(2014, 7, 2, 23),\r\n datetime(2014, 7, 3, 6): datetime(2014, 7, 3, 4),\r\n datetime(2014, 7, 4, 23): datetime(2014, 7, 4, 22),\r\n datetime(2014, 7, 5, 0): datetime(2014, 7, 4, 23),\r\n datetime(2014, 7, 5, 4): datetime(2014, 7, 5, 3),\r\n datetime(2014, 7, 7, 19, 30): datetime(2014, 7, 5, 4, 30),\r\n datetime(2014, 7, 7, 19, 30, 30): datetime(2014, 7, 5, 4, 30, 30),\r\n },\r\n )\r\n )\r\n\r\n # long business hours (see gh-26381)\r\n apply_cases.append(\r\n (\r\n BusinessHour(n=4, start=\"00:00\", end=\"23:00\"),\r\n {\r\n datetime(2014, 7, 3, 22): datetime(2014, 7, 4, 3),\r\n datetime(2014, 7, 4, 22): datetime(2014, 7, 7, 3),\r\n datetime(2014, 7, 3, 22, 30): datetime(2014, 7, 4, 3, 30),\r\n datetime(2014, 7, 3, 22, 20): datetime(2014, 7, 4, 3, 20),\r\n datetime(2014, 7, 4, 22, 30, 30): datetime(2014, 7, 7, 3, 30, 30),\r\n datetime(2014, 7, 4, 22, 30, 20): datetime(2014, 7, 7, 3, 30, 20),\r\n },\r\n )\r\n )\r\n\r\n apply_cases.append(\r\n (\r\n BusinessHour(n=-4, start=\"00:00\", end=\"23:00\"),\r\n {\r\n datetime(2014, 7, 4, 3): datetime(2014, 7, 3, 22),\r\n datetime(2014, 7, 7, 3): datetime(2014, 7, 4, 22),\r\n datetime(2014, 7, 4, 3, 30): datetime(2014, 7, 3, 22, 30),\r\n datetime(2014, 7, 4, 3, 20): datetime(2014, 7, 3, 22, 20),\r\n datetime(2014, 7, 7, 3, 30, 30): datetime(2014, 7, 4, 22, 30, 30),\r\n datetime(2014, 7, 7, 3, 30, 20): datetime(2014, 7, 4, 22, 30, 20),\r\n },\r\n )\r\n )\r\n\r\n # multiple business hours\r\n apply_cases.append(\r\n (\r\n BusinessHour(start=[\"09:00\", \"14:00\"], end=[\"12:00\", \"18:00\"]),\r\n {\r\n datetime(2014, 7, 1, 11): datetime(2014, 7, 1, 14),\r\n datetime(2014, 7, 1, 15): datetime(2014, 7, 1, 16),\r\n datetime(2014, 7, 1, 19): datetime(2014, 7, 2, 10),\r\n datetime(2014, 7, 1, 16): datetime(2014, 7, 1, 17),\r\n datetime(2014, 7, 1, 16, 30, 15): datetime(2014, 7, 1, 17, 30, 15),\r\n datetime(2014, 7, 1, 17): datetime(2014, 7, 2, 9),\r\n datetime(2014, 7, 2, 11): datetime(2014, 7, 2, 14),\r\n # out of business hours\r\n datetime(2014, 7, 1, 13): datetime(2014, 7, 1, 15),\r\n datetime(2014, 7, 2, 8): datetime(2014, 7, 2, 10),\r\n datetime(2014, 7, 2, 19): datetime(2014, 7, 3, 10),\r\n datetime(2014, 7, 2, 23): datetime(2014, 7, 3, 10),\r\n datetime(2014, 7, 3, 0): datetime(2014, 7, 3, 10),\r\n # saturday\r\n datetime(2014, 7, 5, 15): datetime(2014, 7, 7, 10),\r\n datetime(2014, 7, 4, 17): datetime(2014, 7, 7, 9),\r\n datetime(2014, 7, 4, 17, 30): datetime(2014, 7, 7, 9, 30),\r\n datetime(2014, 7, 4, 17, 30, 30): datetime(2014, 7, 7, 9, 30, 30),\r\n },\r\n )\r\n )\r\n\r\n apply_cases.append(\r\n (\r\n BusinessHour(n=4, start=[\"09:00\", \"14:00\"], end=[\"12:00\", \"18:00\"]),\r\n {\r\n datetime(2014, 7, 1, 11): datetime(2014, 7, 1, 17),\r\n datetime(2014, 7, 1, 13): datetime(2014, 7, 2, 9),\r\n datetime(2014, 7, 1, 15): datetime(2014, 7, 2, 10),\r\n datetime(2014, 7, 1, 16): datetime(2014, 7, 2, 11),\r\n datetime(2014, 7, 1, 17): datetime(2014, 7, 2, 14),\r\n datetime(2014, 7, 2, 11): datetime(2014, 7, 2, 17),\r\n datetime(2014, 7, 2, 8): datetime(2014, 7, 2, 15),\r\n datetime(2014, 7, 2, 19): datetime(2014, 7, 3, 15),\r\n datetime(2014, 7, 2, 23): datetime(2014, 7, 3, 15),\r\n datetime(2014, 7, 3, 0): datetime(2014, 7, 3, 15),\r\n datetime(2014, 7, 5, 15): datetime(2014, 7, 7, 15),\r\n datetime(2014, 7, 4, 17): datetime(2014, 7, 7, 14),\r\n datetime(2014, 7, 4, 16, 30): datetime(2014, 7, 7, 11, 30),\r\n datetime(2014, 7, 4, 16, 30, 30): datetime(2014, 7, 7, 11, 30, 30),\r\n },\r\n )\r\n )\r\n\r\n apply_cases.append(\r\n (\r\n BusinessHour(n=-4, start=[\"09:00\", \"14:00\"], end=[\"12:00\", \"18:00\"]),\r\n {\r\n datetime(2014, 7, 1, 11): datetime(2014, 6, 30, 16),\r\n datetime(2014, 7, 1, 13): datetime(2014, 6, 30, 17),\r\n datetime(2014, 7, 1, 15): datetime(2014, 6, 30, 18),\r\n datetime(2014, 7, 1, 16): datetime(2014, 7, 1, 10),\r\n datetime(2014, 7, 1, 17): datetime(2014, 7, 1, 11),\r\n datetime(2014, 7, 2, 11): datetime(2014, 7, 1, 16),\r\n datetime(2014, 7, 2, 8): datetime(2014, 7, 1, 12),\r\n datetime(2014, 7, 2, 19): datetime(2014, 7, 2, 12),\r\n datetime(2014, 7, 2, 23): datetime(2014, 7, 2, 12),\r\n datetime(2014, 7, 3, 0): datetime(2014, 7, 2, 12),\r\n datetime(2014, 7, 5, 15): datetime(2014, 7, 4, 12),\r\n datetime(2014, 7, 4, 18): datetime(2014, 7, 4, 12),\r\n datetime(2014, 7, 7, 9, 30): datetime(2014, 7, 4, 14, 30),\r\n datetime(2014, 7, 7, 9, 30, 30): datetime(2014, 7, 4, 14, 30, 30),\r\n },\r\n )\r\n )\r\n\r\n apply_cases.append(\r\n (\r\n BusinessHour(n=-1, start=[\"19:00\", \"03:00\"], end=[\"01:00\", \"05:00\"]),\r\n {\r\n datetime(2014, 7, 1, 17): datetime(2014, 7, 1, 4),\r\n datetime(2014, 7, 2, 14): datetime(2014, 7, 2, 4),\r\n datetime(2014, 7, 2, 8): datetime(2014, 7, 2, 4),\r\n datetime(2014, 7, 2, 13): datetime(2014, 7, 2, 4),\r\n datetime(2014, 7, 2, 20): datetime(2014, 7, 2, 5),\r\n datetime(2014, 7, 2, 19): datetime(2014, 7, 2, 4),\r\n datetime(2014, 7, 2, 4): datetime(2014, 7, 2, 1),\r\n datetime(2014, 7, 2, 19, 30): datetime(2014, 7, 2, 4, 30),\r\n datetime(2014, 7, 3, 0): datetime(2014, 7, 2, 23),\r\n datetime(2014, 7, 3, 6): datetime(2014, 7, 3, 4),\r\n datetime(2014, 7, 4, 23): datetime(2014, 7, 4, 22),\r\n datetime(2014, 7, 5, 0): datetime(2014, 7, 4, 23),\r\n datetime(2014, 7, 5, 4): datetime(2014, 7, 5, 0),\r\n datetime(2014, 7, 7, 3, 30): datetime(2014, 7, 5, 0, 30),\r\n datetime(2014, 7, 7, 19, 30): datetime(2014, 7, 7, 4, 30),\r\n datetime(2014, 7, 7, 19, 30, 30): datetime(2014, 7, 7, 4, 30, 30),\r\n },\r\n )\r\n )\r\n\r\n @pytest.mark.parametrize(\"case\", apply_cases)\r\n def test_apply(self, case):\r\n offset, cases = case\r\n for base, expected in cases.items():\r\n assert_offset_equal(offset, base, expected)\r\n\r\n apply_large_n_cases = []\r\n # A week later\r\n apply_large_n_cases.append(\r\n (\r\n BusinessHour(40),\r\n {\r\n datetime(2014, 7, 1, 11): datetime(2014, 7, 8, 11),\r\n datetime(2014, 7, 1, 13): datetime(2014, 7, 8, 13),\r\n datetime(2014, 7, 1, 15): datetime(2014, 7, 8, 15),\r\n datetime(2014, 7, 1, 16): datetime(2014, 7, 8, 16),\r\n datetime(2014, 7, 1, 17): datetime(2014, 7, 9, 9),\r\n datetime(2014, 7, 2, 11): datetime(2014, 7, 9, 11),\r\n datetime(2014, 7, 2, 8): datetime(2014, 7, 9, 9),\r\n datetime(2014, 7, 2, 19): datetime(2014, 7, 10, 9),\r\n datetime(2014, 7, 2, 23): datetime(2014, 7, 10, 9),\r\n datetime(2014, 7, 3, 0): datetime(2014, 7, 10, 9),\r\n datetime(2014, 7, 5, 15): datetime(2014, 7, 14, 9),\r\n datetime(2014, 7, 4, 18): datetime(2014, 7, 14, 9),\r\n datetime(2014, 7, 7, 9, 30): datetime(2014, 7, 14, 9, 30),\r\n datetime(2014, 7, 7, 9, 30, 30): datetime(2014, 7, 14, 9, 30, 30),\r\n },\r\n )\r\n )\r\n\r\n # 3 days and 1 hour before\r\n apply_large_n_cases.append(\r\n (\r\n BusinessHour(-25),\r\n {\r\n datetime(2014, 7, 1, 11): datetime(2014, 6, 26, 10),\r\n datetime(2014, 7, 1, 13): datetime(2014, 6, 26, 12),\r\n datetime(2014, 7, 1, 9): datetime(2014, 6, 25, 16),\r\n datetime(2014, 7, 1, 10): datetime(2014, 6, 25, 17),\r\n datetime(2014, 7, 3, 11): datetime(2014, 6, 30, 10),\r\n datetime(2014, 7, 3, 8): datetime(2014, 6, 27, 16),\r\n datetime(2014, 7, 3, 19): datetime(2014, 6, 30, 16),\r\n datetime(2014, 7, 3, 23): datetime(2014, 6, 30, 16),\r\n datetime(2014, 7, 4, 9): datetime(2014, 6, 30, 16),\r\n datetime(2014, 7, 5, 15): datetime(2014, 7, 1, 16),\r\n datetime(2014, 7, 6, 18): datetime(2014, 7, 1, 16),\r\n datetime(2014, 7, 7, 9, 30): datetime(2014, 7, 1, 16, 30),\r\n datetime(2014, 7, 7, 10, 30, 30): datetime(2014, 7, 2, 9, 30, 30),\r\n },\r\n )\r\n )\r\n\r\n # 5 days and 3 hours later\r\n apply_large_n_cases.append(\r\n (\r\n BusinessHour(28, start=\"21:00\", end=\"02:00\"),\r\n {\r\n datetime(2014, 7, 1, 11): datetime(2014, 7, 9, 0),\r\n datetime(2014, 7, 1, 22): datetime(2014, 7, 9, 1),\r\n datetime(2014, 7, 1, 23): datetime(2014, 7, 9, 21),\r\n datetime(2014, 7, 2, 2): datetime(2014, 7, 10, 0),\r\n datetime(2014, 7, 3, 21): datetime(2014, 7, 11, 0),\r\n datetime(2014, 7, 4, 1): datetime(2014, 7, 11, 23),\r\n datetime(2014, 7, 4, 2): datetime(2014, 7, 12, 0),\r\n datetime(2014, 7, 4, 3): datetime(2014, 7, 12, 0),\r\n datetime(2014, 7, 5, 1): datetime(2014, 7, 14, 23),\r\n datetime(2014, 7, 5, 15): datetime(2014, 7, 15, 0),\r\n datetime(2014, 7, 6, 18): datetime(2014, 7, 15, 0),\r\n datetime(2014, 7, 7, 1): datetime(2014, 7, 15, 0),\r\n datetime(2014, 7, 7, 23, 30): datetime(2014, 7, 15, 21, 30),\r\n },\r\n )\r\n )\r\n\r\n # large n for multiple opening hours (3 days and 1 hour before)\r\n apply_large_n_cases.append(\r\n (\r\n BusinessHour(n=-25, start=[\"09:00\", \"14:00\"], end=[\"12:00\", \"19:00\"]),\r\n {\r\n datetime(2014, 7, 1, 11): datetime(2014, 6, 26, 10),\r\n datetime(2014, 7, 1, 13): datetime(2014, 6, 26, 11),\r\n datetime(2014, 7, 1, 9): datetime(2014, 6, 25, 18),\r\n datetime(2014, 7, 1, 10): datetime(2014, 6, 25, 19),\r\n datetime(2014, 7, 3, 11): datetime(2014, 6, 30, 10),\r\n datetime(2014, 7, 3, 8): datetime(2014, 6, 27, 18),\r\n datetime(2014, 7, 3, 19): datetime(2014, 6, 30, 18),\r\n datetime(2014, 7, 3, 23): datetime(2014, 6, 30, 18),\r\n datetime(2014, 7, 4, 9): datetime(2014, 6, 30, 18),\r\n datetime(2014, 7, 5, 15): datetime(2014, 7, 1, 18),\r\n datetime(2014, 7, 6, 18): datetime(2014, 7, 1, 18),\r\n datetime(2014, 7, 7, 9, 30): datetime(2014, 7, 1, 18, 30),\r\n datetime(2014, 7, 7, 10, 30, 30): datetime(2014, 7, 2, 9, 30, 30),\r\n },\r\n )\r\n )\r\n\r\n # 5 days and 3 hours later\r\n apply_large_n_cases.append(\r\n (\r\n BusinessHour(28, start=[\"21:00\", \"03:00\"], end=[\"01:00\", \"04:00\"]),\r\n {\r\n datetime(2014, 7, 1, 11): datetime(2014, 7, 9, 0),\r\n datetime(2014, 7, 1, 22): datetime(2014, 7, 9, 3),\r\n datetime(2014, 7, 1, 23): datetime(2014, 7, 9, 21),\r\n datetime(2014, 7, 2, 2): datetime(2014, 7, 9, 23),\r\n datetime(2014, 7, 3, 21): datetime(2014, 7, 11, 0),\r\n datetime(2014, 7, 4, 1): datetime(2014, 7, 11, 23),\r\n datetime(2014, 7, 4, 2): datetime(2014, 7, 11, 23),\r\n datetime(2014, 7, 4, 3): datetime(2014, 7, 11, 23),\r\n datetime(2014, 7, 4, 21): datetime(2014, 7, 12, 0),\r\n datetime(2014, 7, 5, 0): datetime(2014, 7, 14, 22),\r\n datetime(2014, 7, 5, 1): datetime(2014, 7, 14, 23),\r\n datetime(2014, 7, 5, 15): datetime(2014, 7, 14, 23),\r\n datetime(2014, 7, 6, 18): datetime(2014, 7, 14, 23),\r\n datetime(2014, 7, 7, 1): datetime(2014, 7, 14, 23),\r\n datetime(2014, 7, 7, 23, 30): datetime(2014, 7, 15, 21, 30),\r\n },\r\n )\r\n )\r\n\r\n @pytest.mark.parametrize(\"case\", apply_large_n_cases)\r\n def test_apply_large_n(self, case):\r\n offset, cases = case\r\n for base, expected in cases.items():\r\n assert_offset_equal(offset, base, expected)\r\n\r\n def test_apply_nanoseconds(self):\r\n tests = []\r\n\r\n tests.append(\r\n (\r\n BusinessHour(),\r\n {\r\n Timestamp(\"2014-07-04 15:00\")\r\n + Nano(5): Timestamp(\"2014-07-04 16:00\")\r\n + Nano(5),\r\n Timestamp(\"2014-07-04 16:00\")\r\n + Nano(5): Timestamp(\"2014-07-07 09:00\")\r\n + Nano(5),\r\n Timestamp(\"2014-07-04 16:00\")\r\n - Nano(5): Timestamp(\"2014-07-04 17:00\")\r\n - Nano(5),\r\n },\r\n )\r\n )\r\n\r\n tests.append(\r\n (\r\n BusinessHour(-1),\r\n {\r\n Timestamp(\"2014-07-04 15:00\")\r\n + Nano(5): Timestamp(\"2014-07-04 14:00\")\r\n + Nano(5),\r\n Timestamp(\"2014-07-04 10:00\")\r\n + Nano(5): Timestamp(\"2014-07-04 09:00\")\r\n + Nano(5),\r\n Timestamp(\"2014-07-04 10:00\")\r\n - Nano(5): Timestamp(\"2014-07-03 17:00\")\r\n - Nano(5),\r\n },\r\n )\r\n )\r\n\r\n for offset, cases in tests:\r\n for base, expected in cases.items():\r\n assert_offset_equal(offset, base, expected)\r\n\r\n def test_datetimeindex(self):\r\n idx1 = date_range(start=\"2014-07-04 15:00\", end=\"2014-07-08 10:00\", freq=\"BH\")\r\n idx2 = date_range(start=\"2014-07-04 15:00\", periods=12, freq=\"BH\")\r\n idx3 = date_range(end=\"2014-07-08 10:00\", periods=12, freq=\"BH\")\r\n expected = DatetimeIndex(\r\n [\r\n \"2014-07-04 15:00\",\r\n \"2014-07-04 16:00\",\r\n \"2014-07-07 09:00\",\r\n \"2014-07-07 10:00\",\r\n \"2014-07-07 11:00\",\r\n \"2014-07-07 12:00\",\r\n \"2014-07-07 13:00\",\r\n \"2014-07-07 14:00\",\r\n \"2014-07-07 15:00\",\r\n \"2014-07-07 16:00\",\r\n \"2014-07-08 09:00\",\r\n \"2014-07-08 10:00\",\r\n ],\r\n freq=\"BH\",\r\n )\r\n for idx in [idx1, idx2, idx3]:\r\n tm.assert_index_equal(idx, expected)\r\n\r\n idx1 = date_range(start=\"2014-07-04 15:45\", end=\"2014-07-08 10:45\", freq=\"BH\")\r\n idx2 = date_range(start=\"2014-07-04 15:45\", periods=12, freq=\"BH\")\r\n idx3 = date_range(end=\"2014-07-08 10:45\", periods=12, freq=\"BH\")\r\n\r\n expected = DatetimeIndex(\r\n [\r\n \"2014-07-04 15:45\",\r\n \"2014-07-04 16:45\",\r\n \"2014-07-07 09:45\",\r\n \"2014-07-07 10:45\",\r\n \"2014-07-07 11:45\",\r\n \"2014-07-07 12:45\",\r\n \"2014-07-07 13:45\",\r\n \"2014-07-07 14:45\",\r\n \"2014-07-07 15:45\",\r\n \"2014-07-07 16:45\",\r\n \"2014-07-08 09:45\",\r\n \"2014-07-08 10:45\",\r\n ],\r\n freq=\"BH\",\r\n )\r\n expected = idx1\r\n for idx in [idx1, idx2, idx3]:\r\n tm.assert_index_equal(idx, expected)\r\n\r\n\r\nclass TestCustomBusinessHour(Base):\r\n _offset = CustomBusinessHour\r\n holidays = [\"2014-06-27\", datetime(2014, 6, 30), np.datetime64(\"2014-07-02\")]\r\n\r\n def setup_method(self, method):\r\n # 2014 Calendar to check custom holidays\r\n # Sun Mon Tue Wed Thu Fri Sat\r\n # 6/22 23 24 25 26 27 28\r\n # 29 30 7/1 2 3 4 5\r\n # 6 7 8 9 10 11 12\r\n self.d = datetime(2014, 7, 1, 10, 00)\r\n self.offset1 = CustomBusinessHour(weekmask=\"Tue Wed Thu Fri\")\r\n\r\n self.offset2 = CustomBusinessHour(holidays=self.holidays)\r\n\r\n def test_constructor_errors(self):\r\n from datetime import time as dt_time\r\n\r\n msg = \"time data must be specified only with hour and minute\"\r\n with pytest.raises(ValueError, match=msg):\r\n CustomBusinessHour(start=dt_time(11, 0, 5))\r\n msg = \"time data must match '%H:%M' format\"\r\n with pytest.raises(ValueError, match=msg):\r\n CustomBusinessHour(start=\"AAA\")\r\n msg = \"time data must match '%H:%M' format\"\r\n with pytest.raises(ValueError, match=msg):\r\n CustomBusinessHour(start=\"14:00:05\")\r\n\r\n def test_different_normalize_equals(self):\r\n # GH#21404 changed __eq__ to return False when `normalize` does not match\r\n offset = self._offset()\r\n offset2 = self._offset(normalize=True)\r\n assert offset != offset2\r\n\r\n def test_repr(self):\r\n assert repr(self.offset1) == \"<CustomBusinessHour: CBH=09:00-17:00>\"\r\n assert repr(self.offset2) == \"<CustomBusinessHour: CBH=09:00-17:00>\"\r\n\r\n def test_with_offset(self):\r\n expected = Timestamp(\"2014-07-01 13:00\")\r\n\r\n assert self.d + CustomBusinessHour() * 3 == expected\r\n assert self.d + CustomBusinessHour(n=3) == expected\r\n\r\n def test_eq(self):\r\n for offset in [self.offset1, self.offset2]:\r\n assert offset == offset\r\n\r\n assert CustomBusinessHour() != CustomBusinessHour(-1)\r\n assert CustomBusinessHour(start=\"09:00\") == CustomBusinessHour()\r\n assert CustomBusinessHour(start=\"09:00\") != CustomBusinessHour(start=\"09:01\")\r\n assert CustomBusinessHour(start=\"09:00\", end=\"17:00\") != CustomBusinessHour(\r\n start=\"17:00\", end=\"09:01\"\r\n )\r\n\r\n assert CustomBusinessHour(weekmask=\"Tue Wed Thu Fri\") != CustomBusinessHour(\r\n weekmask=\"Mon Tue Wed Thu Fri\"\r\n )\r\n assert CustomBusinessHour(holidays=[\"2014-06-27\"]) != CustomBusinessHour(\r\n holidays=[\"2014-06-28\"]\r\n )\r\n\r\n def test_sub(self):\r\n # override the Base.test_sub implementation because self.offset2 is\r\n # defined differently in this class than the test expects\r\n pass\r\n\r\n def test_hash(self):\r\n assert hash(self.offset1) == hash(self.offset1)\r\n assert hash(self.offset2) == hash(self.offset2)\r\n\r\n def test_call(self):\r\n with tm.assert_produces_warning(FutureWarning):\r\n # GH#34171 DateOffset.__call__ is deprecated\r\n assert self.offset1(self.d) == datetime(2014, 7, 1, 11)\r\n assert self.offset2(self.d) == datetime(2014, 7, 1, 11)\r\n\r\n def testRollback1(self):\r\n assert self.offset1.rollback(self.d) == self.d\r\n assert self.offset2.rollback(self.d) == self.d\r\n\r\n d = datetime(2014, 7, 1, 0)\r\n\r\n # 2014/07/01 is Tuesday, 06/30 is Monday(holiday)\r\n assert self.offset1.rollback(d) == datetime(2014, 6, 27, 17)\r\n\r\n # 2014/6/30 and 2014/6/27 are holidays\r\n assert self.offset2.rollback(d) == datetime(2014, 6, 26, 17)\r\n\r\n def testRollback2(self):\r\n assert self._offset(-3).rollback(datetime(2014, 7, 5, 15, 0)) == datetime(\r\n 2014, 7, 4, 17, 0\r\n )\r\n\r\n def testRollforward1(self):\r\n assert self.offset1.rollforward(self.d) == self.d\r\n assert self.offset2.rollforward(self.d) == self.d\r\n\r\n d = datetime(2014, 7, 1, 0)\r\n assert self.offset1.rollforward(d) == datetime(2014, 7, 1, 9)\r\n assert self.offset2.rollforward(d) == datetime(2014, 7, 1, 9)\r\n\r\n def testRollforward2(self):\r\n assert self._offset(-3).rollforward(datetime(2014, 7, 5, 16, 0)) == datetime(\r\n 2014, 7, 7, 9\r\n )\r\n\r\n def test_roll_date_object(self):\r\n offset = BusinessHour()\r\n\r\n dt = datetime(2014, 7, 6, 15, 0)\r\n\r\n result = offset.rollback(dt)\r\n assert result == datetime(2014, 7, 4, 17)\r\n\r\n result = offset.rollforward(dt)\r\n assert result == datetime(2014, 7, 7, 9)\r\n\r\n normalize_cases = []\r\n normalize_cases.append(\r\n (\r\n CustomBusinessHour(normalize=True, holidays=holidays),\r\n {\r\n datetime(2014, 7, 1, 8): datetime(2014, 7, 1),\r\n datetime(2014, 7, 1, 17): datetime(2014, 7, 3),\r\n datetime(2014, 7, 1, 16): datetime(2014, 7, 3),\r\n datetime(2014, 7, 1, 23): datetime(2014, 7, 3),\r\n datetime(2014, 7, 1, 0): datetime(2014, 7, 1),\r\n datetime(2014, 7, 4, 15): datetime(2014, 7, 4),\r\n datetime(2014, 7, 4, 15, 59): datetime(2014, 7, 4),\r\n datetime(2014, 7, 4, 16, 30): datetime(2014, 7, 7),\r\n datetime(2014, 7, 5, 23): datetime(2014, 7, 7),\r\n datetime(2014, 7, 6, 10): datetime(2014, 7, 7),\r\n },\r\n )\r\n )\r\n\r\n normalize_cases.append(\r\n (\r\n CustomBusinessHour(-1, normalize=True, holidays=holidays),\r\n {\r\n datetime(2014, 7, 1, 8): datetime(2014, 6, 26),\r\n datetime(2014, 7, 1, 17): datetime(2014, 7, 1),\r\n datetime(2014, 7, 1, 16): datetime(2014, 7, 1),\r\n datetime(2014, 7, 1, 10): datetime(2014, 6, 26),\r\n datetime(2014, 7, 1, 0): datetime(2014, 6, 26),\r\n datetime(2014, 7, 7, 10): datetime(2014, 7, 4),\r\n datetime(2014, 7, 7, 10, 1): datetime(2014, 7, 7),\r\n datetime(2014, 7, 5, 23): datetime(2014, 7, 4),\r\n datetime(2014, 7, 6, 10): datetime(2014, 7, 4),\r\n },\r\n )\r\n )\r\n\r\n normalize_cases.append(\r\n (\r\n CustomBusinessHour(\r\n 1, normalize=True, start=\"17:00\", end=\"04:00\", holidays=holidays\r\n ),\r\n {\r\n datetime(2014, 7, 1, 8): datetime(2014, 7, 1),\r\n datetime(2014, 7, 1, 17): datetime(2014, 7, 1),\r\n datetime(2014, 7, 1, 23): datetime(2014, 7, 2),\r\n datetime(2014, 7, 2, 2): datetime(2014, 7, 2),\r\n datetime(2014, 7, 2, 3): datetime(2014, 7, 3),\r\n datetime(2014, 7, 4, 23): datetime(2014, 7, 5),\r\n datetime(2014, 7, 5, 2): datetime(2014, 7, 5),\r\n datetime(2014, 7, 7, 2): datetime(2014, 7, 7),\r\n datetime(2014, 7, 7, 17): datetime(2014, 7, 7),\r\n },\r\n )\r\n )\r\n\r\n @pytest.mark.parametrize(\"norm_cases\", normalize_cases)\r\n def test_normalize(self, norm_cases):\r\n offset, cases = norm_cases\r\n for dt, expected in cases.items():\r\n assert offset.apply(dt) == expected\r\n\r\n def test_is_on_offset(self):\r\n tests = []\r\n\r\n tests.append(\r\n (\r\n CustomBusinessHour(start=\"10:00\", end=\"15:00\", holidays=self.holidays),\r\n {\r\n datetime(2014, 7, 1, 9): False,\r\n datetime(2014, 7, 1, 10): True,\r\n datetime(2014, 7, 1, 15): True,\r\n datetime(2014, 7, 1, 15, 1): False,\r\n datetime(2014, 7, 5, 12): False,\r\n datetime(2014, 7, 6, 12): False,\r\n },\r\n )\r\n )\r\n\r\n for offset, cases in tests:\r\n for dt, expected in cases.items():\r\n assert offset.is_on_offset(dt) == expected\r\n\r\n apply_cases = []\r\n apply_cases.append(\r\n (\r\n CustomBusinessHour(holidays=holidays),\r\n {\r\n datetime(2014, 7, 1, 11): datetime(2014, 7, 1, 12),\r\n datetime(2014, 7, 1, 13): datetime(2014, 7, 1, 14),\r\n datetime(2014, 7, 1, 15): datetime(2014, 7, 1, 16),\r\n datetime(2014, 7, 1, 19): datetime(2014, 7, 3, 10),\r\n datetime(2014, 7, 1, 16): datetime(2014, 7, 3, 9),\r\n datetime(2014, 7, 1, 16, 30, 15): datetime(2014, 7, 3, 9, 30, 15),\r\n datetime(2014, 7, 1, 17): datetime(2014, 7, 3, 10),\r\n datetime(2014, 7, 2, 11): datetime(2014, 7, 3, 10),\r\n # out of business hours\r\n datetime(2014, 7, 2, 8): datetime(2014, 7, 3, 10),\r\n datetime(2014, 7, 2, 19): datetime(2014, 7, 3, 10),\r\n datetime(2014, 7, 2, 23): datetime(2014, 7, 3, 10),\r\n datetime(2014, 7, 3, 0): datetime(2014, 7, 3, 10),\r\n # saturday\r\n datetime(2014, 7, 5, 15): datetime(2014, 7, 7, 10),\r\n datetime(2014, 7, 4, 17): datetime(2014, 7, 7, 10),\r\n datetime(2014, 7, 4, 16, 30): datetime(2014, 7, 7, 9, 30),\r\n datetime(2014, 7, 4, 16, 30, 30): datetime(2014, 7, 7, 9, 30, 30),\r\n },\r\n )\r\n )\r\n\r\n apply_cases.append(\r\n (\r\n CustomBusinessHour(4, holidays=holidays),\r\n {\r\n datetime(2014, 7, 1, 11): datetime(2014, 7, 1, 15),\r\n datetime(2014, 7, 1, 13): datetime(2014, 7, 3, 9),\r\n datetime(2014, 7, 1, 15): datetime(2014, 7, 3, 11),\r\n datetime(2014, 7, 1, 16): datetime(2014, 7, 3, 12),\r\n datetime(2014, 7, 1, 17): datetime(2014, 7, 3, 13),\r\n datetime(2014, 7, 2, 11): datetime(2014, 7, 3, 13),\r\n datetime(2014, 7, 2, 8): datetime(2014, 7, 3, 13),\r\n datetime(2014, 7, 2, 19): datetime(2014, 7, 3, 13),\r\n datetime(2014, 7, 2, 23): datetime(2014, 7, 3, 13),\r\n datetime(2014, 7, 3, 0): datetime(2014, 7, 3, 13),\r\n datetime(2014, 7, 5, 15): datetime(2014, 7, 7, 13),\r\n datetime(2014, 7, 4, 17): datetime(2014, 7, 7, 13),\r\n datetime(2014, 7, 4, 16, 30): datetime(2014, 7, 7, 12, 30),\r\n datetime(2014, 7, 4, 16, 30, 30): datetime(2014, 7, 7, 12, 30, 30),\r\n },\r\n )\r\n )\r\n\r\n @pytest.mark.parametrize(\"apply_case\", apply_cases)\r\n def test_apply(self, apply_case):\r\n offset, cases = apply_case\r\n for base, expected in cases.items():\r\n assert_offset_equal(offset, base, expected)\r\n\r\n nano_cases = []\r\n nano_cases.append(\r\n (\r\n CustomBusinessHour(holidays=holidays),\r\n {\r\n Timestamp(\"2014-07-01 15:00\")\r\n + Nano(5): Timestamp(\"2014-07-01 16:00\")\r\n + Nano(5),\r\n Timestamp(\"2014-07-01 16:00\")\r\n + Nano(5): Timestamp(\"2014-07-03 09:00\")\r\n + Nano(5),\r\n Timestamp(\"2014-07-01 16:00\")\r\n - Nano(5): Timestamp(\"2014-07-01 17:00\")\r\n - Nano(5),\r\n },\r\n )\r\n )\r\n\r\n nano_cases.append(\r\n (\r\n CustomBusinessHour(-1, holidays=holidays),\r\n {\r\n Timestamp(\"2014-07-01 15:00\")\r\n + Nano(5): Timestamp(\"2014-07-01 14:00\")\r\n + Nano(5),\r\n Timestamp(\"2014-07-01 10:00\")\r\n + Nano(5): Timestamp(\"2014-07-01 09:00\")\r\n + Nano(5),\r\n Timestamp(\"2014-07-01 10:00\")\r\n - Nano(5): Timestamp(\"2014-06-26 17:00\")\r\n - Nano(5),\r\n },\r\n )\r\n )\r\n\r\n @pytest.mark.parametrize(\"nano_case\", nano_cases)\r\n def test_apply_nanoseconds(self, nano_case):\r\n offset, cases = nano_case\r\n for base, expected in cases.items():\r\n assert_offset_equal(offset, base, expected)\r\n\r\n\r\nclass TestCustomBusinessDay(Base):\r\n _offset = CDay\r\n\r\n def setup_method(self, method):\r\n self.d = datetime(2008, 1, 1)\r\n self.nd = np_datetime64_compat(\"2008-01-01 00:00:00Z\")\r\n\r\n self.offset = CDay()\r\n self.offset1 = self.offset\r\n self.offset2 = CDay(2)\r\n\r\n def test_different_normalize_equals(self):\r\n # GH#21404 changed __eq__ to return False when `normalize` does not match\r\n offset = self._offset()\r\n offset2 = self._offset(normalize=True)\r\n assert offset != offset2\r\n\r\n def test_repr(self):\r\n assert repr(self.offset) == \"<CustomBusinessDay>\"\r\n assert repr(self.offset2) == \"<2 * CustomBusinessDays>\"\r\n\r\n expected = \"<BusinessDay: offset=datetime.timedelta(days=1)>\"\r\n assert repr(self.offset + timedelta(1)) == expected\r\n\r\n def test_with_offset(self):\r\n offset = self.offset + timedelta(hours=2)\r\n\r\n assert (self.d + offset) == datetime(2008, 1, 2, 2)\r\n\r\n def test_with_offset_index(self):\r\n dti = DatetimeIndex([self.d])\r\n result = dti + (self.offset + timedelta(hours=2))\r\n\r\n expected = DatetimeIndex([datetime(2008, 1, 2, 2)])\r\n tm.assert_index_equal(result, expected)\r\n\r\n def test_eq(self):\r\n assert self.offset2 == self.offset2\r\n\r\n def test_mul(self):\r\n pass\r\n\r\n def test_hash(self):\r\n assert hash(self.offset2) == hash(self.offset2)\r\n\r\n def test_call(self):\r\n with tm.assert_produces_warning(FutureWarning):\r\n # GH#34171 DateOffset.__call__ is deprecated\r\n assert self.offset2(self.d) == datetime(2008, 1, 3)\r\n assert self.offset2(self.nd) == datetime(2008, 1, 3)\r\n\r\n def testRollback1(self):\r\n assert CDay(10).rollback(self.d) == self.d\r\n\r\n def testRollback2(self):\r\n assert CDay(10).rollback(datetime(2008, 1, 5)) == datetime(2008, 1, 4)\r\n\r\n def testRollforward1(self):\r\n assert CDay(10).rollforward(self.d) == self.d\r\n\r\n def testRollforward2(self):\r\n assert CDay(10).rollforward(datetime(2008, 1, 5)) == datetime(2008, 1, 7)\r\n\r\n def test_roll_date_object(self):\r\n offset = CDay()\r\n\r\n dt = date(2012, 9, 15)\r\n\r\n result = offset.rollback(dt)\r\n assert result == datetime(2012, 9, 14)\r\n\r\n result = offset.rollforward(dt)\r\n assert result == datetime(2012, 9, 17)\r\n\r\n offset = offsets.Day()\r\n result = offset.rollback(dt)\r\n assert result == datetime(2012, 9, 15)\r\n\r\n result = offset.rollforward(dt)\r\n assert result == datetime(2012, 9, 15)\r\n\r\n on_offset_cases = [\r\n (CDay(), datetime(2008, 1, 1), True),\r\n (CDay(), datetime(2008, 1, 5), False),\r\n ]\r\n\r\n @pytest.mark.parametrize(\"case\", on_offset_cases)\r\n def test_is_on_offset(self, case):\r\n offset, d, expected = case\r\n assert_is_on_offset(offset, d, expected)\r\n\r\n apply_cases: _ApplyCases = []\r\n apply_cases.append(\r\n (\r\n CDay(),\r\n {\r\n datetime(2008, 1, 1): datetime(2008, 1, 2),\r\n datetime(2008, 1, 4): datetime(2008, 1, 7),\r\n datetime(2008, 1, 5): datetime(2008, 1, 7),\r\n datetime(2008, 1, 6): datetime(2008, 1, 7),\r\n datetime(2008, 1, 7): datetime(2008, 1, 8),\r\n },\r\n )\r\n )\r\n\r\n apply_cases.append(\r\n (\r\n 2 * CDay(),\r\n {\r\n datetime(2008, 1, 1): datetime(2008, 1, 3),\r\n datetime(2008, 1, 4): datetime(2008, 1, 8),\r\n datetime(2008, 1, 5): datetime(2008, 1, 8),\r\n datetime(2008, 1, 6): datetime(2008, 1, 8),\r\n datetime(2008, 1, 7): datetime(2008, 1, 9),\r\n },\r\n )\r\n )\r\n\r\n apply_cases.append(\r\n (\r\n -CDay(),\r\n {\r\n datetime(2008, 1, 1): datetime(2007, 12, 31),\r\n datetime(2008, 1, 4): datetime(2008, 1, 3),\r\n datetime(2008, 1, 5): datetime(2008, 1, 4),\r\n datetime(2008, 1, 6): datetime(2008, 1, 4),\r\n datetime(2008, 1, 7): datetime(2008, 1, 4),\r\n datetime(2008, 1, 8): datetime(2008, 1, 7),\r\n },\r\n )\r\n )\r\n\r\n apply_cases.append(\r\n (\r\n -2 * CDay(),\r\n {\r\n datetime(2008, 1, 1): datetime(2007, 12, 28),\r\n datetime(2008, 1, 4): datetime(2008, 1, 2),\r\n datetime(2008, 1, 5): datetime(2008, 1, 3),\r\n datetime(2008, 1, 6): datetime(2008, 1, 3),\r\n datetime(2008, 1, 7): datetime(2008, 1, 3),\r\n datetime(2008, 1, 8): datetime(2008, 1, 4),\r\n datetime(2008, 1, 9): datetime(2008, 1, 7),\r\n },\r\n )\r\n )\r\n\r\n apply_cases.append(\r\n (\r\n CDay(0),\r\n {\r\n datetime(2008, 1, 1): datetime(2008, 1, 1),\r\n datetime(2008, 1, 4): datetime(2008, 1, 4),\r\n datetime(2008, 1, 5): datetime(2008, 1, 7),\r\n datetime(2008, 1, 6): datetime(2008, 1, 7),\r\n datetime(2008, 1, 7): datetime(2008, 1, 7),\r\n },\r\n )\r\n )\r\n\r\n @pytest.mark.parametrize(\"case\", apply_cases)\r\n def test_apply(self, case):\r\n offset, cases = case\r\n for base, expected in cases.items():\r\n assert_offset_equal(offset, base, expected)\r\n\r\n def test_apply_large_n(self):\r\n dt = datetime(2012, 10, 23)\r\n\r\n result = dt + CDay(10)\r\n assert result == datetime(2012, 11, 6)\r\n\r\n result = dt + CDay(100) - CDay(100)\r\n assert result == dt\r\n\r\n off = CDay() * 6\r\n rs = datetime(2012, 1, 1) - off\r\n xp = datetime(2011, 12, 23)\r\n assert rs == xp\r\n\r\n st = datetime(2011, 12, 18)\r\n rs = st + off\r\n xp = datetime(2011, 12, 26)\r\n assert rs == xp\r\n\r\n def test_apply_corner(self):\r\n msg = (\r\n \"Only know how to combine trading day \"\r\n \"with datetime, datetime64 or timedelta\"\r\n )\r\n with pytest.raises(ApplyTypeError, match=msg):\r\n CDay().apply(BMonthEnd())\r\n\r\n def test_holidays(self):\r\n # Define a TradingDay offset\r\n holidays = [\"2012-05-01\", datetime(2013, 5, 1), np.datetime64(\"2014-05-01\")]\r\n tday = CDay(holidays=holidays)\r\n for year in range(2012, 2015):\r\n dt = datetime(year, 4, 30)\r\n xp = datetime(year, 5, 2)\r\n rs = dt + tday\r\n assert rs == xp\r\n\r\n def test_weekmask(self):\r\n weekmask_saudi = \"Sat Sun Mon Tue Wed\" # Thu-Fri Weekend\r\n weekmask_uae = \"1111001\" # Fri-Sat Weekend\r\n weekmask_egypt = [1, 1, 1, 1, 0, 0, 1] # Fri-Sat Weekend\r\n bday_saudi = CDay(weekmask=weekmask_saudi)\r\n bday_uae = CDay(weekmask=weekmask_uae)\r\n bday_egypt = CDay(weekmask=weekmask_egypt)\r\n dt = datetime(2013, 5, 1)\r\n xp_saudi = datetime(2013, 5, 4)\r\n xp_uae = datetime(2013, 5, 2)\r\n xp_egypt = datetime(2013, 5, 2)\r\n assert xp_saudi == dt + bday_saudi\r\n assert xp_uae == dt + bday_uae\r\n assert xp_egypt == dt + bday_egypt\r\n xp2 = datetime(2013, 5, 5)\r\n assert xp2 == dt + 2 * bday_saudi\r\n assert xp2 == dt + 2 * bday_uae\r\n assert xp2 == dt + 2 * bday_egypt\r\n\r\n def test_weekmask_and_holidays(self):\r\n weekmask_egypt = \"Sun Mon Tue Wed Thu\" # Fri-Sat Weekend\r\n holidays = [\"2012-05-01\", datetime(2013, 5, 1), np.datetime64(\"2014-05-01\")]\r\n bday_egypt = CDay(holidays=holidays, weekmask=weekmask_egypt)\r\n dt = datetime(2013, 4, 30)\r\n xp_egypt = datetime(2013, 5, 5)\r\n assert xp_egypt == dt + 2 * bday_egypt\r\n\r\n @pytest.mark.filterwarnings(\"ignore:Non:pandas.errors.PerformanceWarning\")\r\n def test_calendar(self):\r\n calendar = USFederalHolidayCalendar()\r\n dt = datetime(2014, 1, 17)\r\n assert_offset_equal(CDay(calendar=calendar), dt, datetime(2014, 1, 21))\r\n\r\n def test_roundtrip_pickle(self):\r\n def _check_roundtrip(obj):\r\n unpickled = tm.round_trip_pickle(obj)\r\n assert unpickled == obj\r\n\r\n _check_roundtrip(self.offset)\r\n _check_roundtrip(self.offset2)\r\n _check_roundtrip(self.offset * 2)\r\n\r\n def test_pickle_compat_0_14_1(self, datapath):\r\n hdays = [datetime(2013, 1, 1) for ele in range(4)]\r\n pth = datapath(\"tseries\", \"offsets\", \"data\", \"cday-0.14.1.pickle\")\r\n cday0_14_1 = read_pickle(pth)\r\n cday = CDay(holidays=hdays)\r\n assert cday == cday0_14_1\r\n\r\n\r\nclass CustomBusinessMonthBase:\r\n def setup_method(self, method):\r\n self.d = datetime(2008, 1, 1)\r\n\r\n self.offset = self._offset()\r\n self.offset1 = self.offset\r\n self.offset2 = self._offset(2)\r\n\r\n def test_eq(self):\r\n assert self.offset2 == self.offset2\r\n\r\n def test_mul(self):\r\n pass\r\n\r\n def test_hash(self):\r\n assert hash(self.offset2) == hash(self.offset2)\r\n\r\n def test_roundtrip_pickle(self):\r\n def _check_roundtrip(obj):\r\n unpickled = tm.round_trip_pickle(obj)\r\n assert unpickled == obj\r\n\r\n _check_roundtrip(self._offset())\r\n _check_roundtrip(self._offset(2))\r\n _check_roundtrip(self._offset() * 2)\r\n\r\n def test_copy(self):\r\n # GH 17452\r\n off = self._offset(weekmask=\"Mon Wed Fri\")\r\n assert off == off.copy()\r\n\r\n\r\nclass TestCustomBusinessMonthEnd(CustomBusinessMonthBase, Base):\r\n _offset = CBMonthEnd\r\n\r\n def test_different_normalize_equals(self):\r\n # GH#21404 changed __eq__ to return False when `normalize` does not match\r\n offset = self._offset()\r\n offset2 = self._offset(normalize=True)\r\n assert offset != offset2\r\n\r\n def test_repr(self):\r\n assert repr(self.offset) == \"<CustomBusinessMonthEnd>\"\r\n assert repr(self.offset2) == \"<2 * CustomBusinessMonthEnds>\"\r\n\r\n def test_call(self):\r\n with tm.assert_produces_warning(FutureWarning):\r\n # GH#34171 DateOffset.__call__ is deprecated\r\n assert self.offset2(self.d) == datetime(2008, 2, 29)\r\n\r\n def testRollback1(self):\r\n assert CDay(10).rollback(datetime(2007, 12, 31)) == datetime(2007, 12, 31)\r\n\r\n def testRollback2(self):\r\n assert CBMonthEnd(10).rollback(self.d) == datetime(2007, 12, 31)\r\n\r\n def testRollforward1(self):\r\n assert CBMonthEnd(10).rollforward(self.d) == datetime(2008, 1, 31)\r\n\r\n def test_roll_date_object(self):\r\n offset = CBMonthEnd()\r\n\r\n dt = date(2012, 9, 15)\r\n\r\n result = offset.rollback(dt)\r\n assert result == datetime(2012, 8, 31)\r\n\r\n result = offset.rollforward(dt)\r\n assert result == datetime(2012, 9, 28)\r\n\r\n offset = offsets.Day()\r\n result = offset.rollback(dt)\r\n assert result == datetime(2012, 9, 15)\r\n\r\n result = offset.rollforward(dt)\r\n assert result == datetime(2012, 9, 15)\r\n\r\n on_offset_cases = [\r\n (CBMonthEnd(), datetime(2008, 1, 31), True),\r\n (CBMonthEnd(), datetime(2008, 1, 1), False),\r\n ]\r\n\r\n @pytest.mark.parametrize(\"case\", on_offset_cases)\r\n def test_is_on_offset(self, case):\r\n offset, d, expected = case\r\n assert_is_on_offset(offset, d, expected)\r\n\r\n apply_cases: _ApplyCases = []\r\n apply_cases.append(\r\n (\r\n CBMonthEnd(),\r\n {\r\n datetime(2008, 1, 1): datetime(2008, 1, 31),\r\n datetime(2008, 2, 7): datetime(2008, 2, 29),\r\n },\r\n )\r\n )\r\n\r\n apply_cases.append(\r\n (\r\n 2 * CBMonthEnd(),\r\n {\r\n datetime(2008, 1, 1): datetime(2008, 2, 29),\r\n datetime(2008, 2, 7): datetime(2008, 3, 31),\r\n },\r\n )\r\n )\r\n\r\n apply_cases.append(\r\n (\r\n -CBMonthEnd(),\r\n {\r\n datetime(2008, 1, 1): datetime(2007, 12, 31),\r\n datetime(2008, 2, 8): datetime(2008, 1, 31),\r\n },\r\n )\r\n )\r\n\r\n apply_cases.append(\r\n (\r\n -2 * CBMonthEnd(),\r\n {\r\n datetime(2008, 1, 1): datetime(2007, 11, 30),\r\n datetime(2008, 2, 9): datetime(2007, 12, 31),\r\n },\r\n )\r\n )\r\n\r\n apply_cases.append(\r\n (\r\n CBMonthEnd(0),\r\n {\r\n datetime(2008, 1, 1): datetime(2008, 1, 31),\r\n datetime(2008, 2, 7): datetime(2008, 2, 29),\r\n },\r\n )\r\n )\r\n\r\n @pytest.mark.parametrize(\"case\", apply_cases)\r\n def test_apply(self, case):\r\n offset, cases = case\r\n for base, expected in cases.items():\r\n assert_offset_equal(offset, base, expected)\r\n\r\n def test_apply_large_n(self):\r\n dt = datetime(2012, 10, 23)\r\n\r\n result = dt + CBMonthEnd(10)\r\n assert result == datetime(2013, 7, 31)\r\n\r\n result = dt + CDay(100) - CDay(100)\r\n assert result == dt\r\n\r\n off = CBMonthEnd() * 6\r\n rs = datetime(2012, 1, 1) - off\r\n xp = datetime(2011, 7, 29)\r\n assert rs == xp\r\n\r\n st = datetime(2011, 12, 18)\r\n rs = st + off\r\n xp = datetime(2012, 5, 31)\r\n assert rs == xp\r\n\r\n def test_holidays(self):\r\n # Define a TradingDay offset\r\n holidays = [\"2012-01-31\", datetime(2012, 2, 28), np.datetime64(\"2012-02-29\")]\r\n bm_offset = CBMonthEnd(holidays=holidays)\r\n dt = datetime(2012, 1, 1)\r\n assert dt + bm_offset == datetime(2012, 1, 30)\r\n assert dt + 2 * bm_offset == datetime(2012, 2, 27)\r\n\r\n @pytest.mark.filterwarnings(\"ignore:Non:pandas.errors.PerformanceWarning\")\r\n def test_datetimeindex(self):\r\n from pandas.tseries.holiday import USFederalHolidayCalendar\r\n\r\n hcal = USFederalHolidayCalendar()\r\n freq = CBMonthEnd(calendar=hcal)\r\n\r\n assert date_range(start=\"20120101\", end=\"20130101\", freq=freq).tolist()[\r\n 0\r\n ] == datetime(2012, 1, 31)\r\n\r\n\r\nclass TestCustomBusinessMonthBegin(CustomBusinessMonthBase, Base):\r\n _offset = CBMonthBegin\r\n\r\n def test_different_normalize_equals(self):\r\n # GH#21404 changed __eq__ to return False when `normalize` does not match\r\n offset = self._offset()\r\n offset2 = self._offset(normalize=True)\r\n assert offset != offset2\r\n\r\n def test_repr(self):\r\n assert repr(self.offset) == \"<CustomBusinessMonthBegin>\"\r\n assert repr(self.offset2) == \"<2 * CustomBusinessMonthBegins>\"\r\n\r\n def test_call(self):\r\n with tm.assert_produces_warning(FutureWarning):\r\n # GH#34171 DateOffset.__call__ is deprecated\r\n assert self.offset2(self.d) == datetime(2008, 3, 3)\r\n\r\n def testRollback1(self):\r\n assert CDay(10).rollback(datetime(2007, 12, 31)) == datetime(2007, 12, 31)\r\n\r\n def testRollback2(self):\r\n assert CBMonthBegin(10).rollback(self.d) == datetime(2008, 1, 1)\r\n\r\n def testRollforward1(self):\r\n assert CBMonthBegin(10).rollforward(self.d) == datetime(2008, 1, 1)\r\n\r\n def test_roll_date_object(self):\r\n offset = CBMonthBegin()\r\n\r\n dt = date(2012, 9, 15)\r\n\r\n result = offset.rollback(dt)\r\n assert result == datetime(2012, 9, 3)\r\n\r\n result = offset.rollforward(dt)\r\n assert result == datetime(2012, 10, 1)\r\n\r\n offset = offsets.Day()\r\n result = offset.rollback(dt)\r\n assert result == datetime(2012, 9, 15)\r\n\r\n result = offset.rollforward(dt)\r\n assert result == datetime(2012, 9, 15)\r\n\r\n on_offset_cases = [\r\n (CBMonthBegin(), datetime(2008, 1, 1), True),\r\n (CBMonthBegin(), datetime(2008, 1, 31), False),\r\n ]\r\n\r\n @pytest.mark.parametrize(\"case\", on_offset_cases)\r\n def test_is_on_offset(self, case):\r\n offset, dt, expected = case\r\n assert_is_on_offset(offset, dt, expected)\r\n\r\n apply_cases: _ApplyCases = []\r\n apply_cases.append(\r\n (\r\n CBMonthBegin(),\r\n {\r\n datetime(2008, 1, 1): datetime(2008, 2, 1),\r\n datetime(2008, 2, 7): datetime(2008, 3, 3),\r\n },\r\n )\r\n )\r\n\r\n apply_cases.append(\r\n (\r\n 2 * CBMonthBegin(),\r\n {\r\n datetime(2008, 1, 1): datetime(2008, 3, 3),\r\n datetime(2008, 2, 7): datetime(2008, 4, 1),\r\n },\r\n )\r\n )\r\n\r\n apply_cases.append(\r\n (\r\n -CBMonthBegin(),\r\n {\r\n datetime(2008, 1, 1): datetime(2007, 12, 3),\r\n datetime(2008, 2, 8): datetime(2008, 2, 1),\r\n },\r\n )\r\n )\r\n\r\n apply_cases.append(\r\n (\r\n -2 * CBMonthBegin(),\r\n {\r\n datetime(2008, 1, 1): datetime(2007, 11, 1),\r\n datetime(2008, 2, 9): datetime(2008, 1, 1),\r\n },\r\n )\r\n )\r\n\r\n apply_cases.append(\r\n (\r\n CBMonthBegin(0),\r\n {\r\n datetime(2008, 1, 1): datetime(2008, 1, 1),\r\n datetime(2008, 1, 7): datetime(2008, 2, 1),\r\n },\r\n )\r\n )\r\n\r\n @pytest.mark.parametrize(\"case\", apply_cases)\r\n def test_apply(self, case):\r\n offset, cases = case\r\n for base, expected in cases.items():\r\n assert_offset_equal(offset, base, expected)\r\n\r\n def test_apply_large_n(self):\r\n dt = datetime(2012, 10, 23)\r\n\r\n result = dt + CBMonthBegin(10)\r\n assert result == datetime(2013, 8, 1)\r\n\r\n result = dt + CDay(100) - CDay(100)\r\n assert result == dt\r\n\r\n off = CBMonthBegin() * 6\r\n rs = datetime(2012, 1, 1) - off\r\n xp = datetime(2011, 7, 1)\r\n assert rs == xp\r\n\r\n st = datetime(2011, 12, 18)\r\n rs = st + off\r\n\r\n xp = datetime(2012, 6, 1)\r\n assert rs == xp\r\n\r\n def test_holidays(self):\r\n # Define a TradingDay offset\r\n holidays = [\"2012-02-01\", datetime(2012, 2, 2), np.datetime64(\"2012-03-01\")]\r\n bm_offset = CBMonthBegin(holidays=holidays)\r\n dt = datetime(2012, 1, 1)\r\n\r\n assert dt + bm_offset == datetime(2012, 1, 2)\r\n assert dt + 2 * bm_offset == datetime(2012, 2, 3)\r\n\r\n @pytest.mark.filterwarnings(\"ignore:Non:pandas.errors.PerformanceWarning\")\r\n def test_datetimeindex(self):\r\n hcal = USFederalHolidayCalendar()\r\n cbmb = CBMonthBegin(calendar=hcal)\r\n assert date_range(start=\"20120101\", end=\"20130101\", freq=cbmb).tolist()[\r\n 0\r\n ] == datetime(2012, 1, 3)\r\n\r\n\r\nclass TestWeek(Base):\r\n _offset = Week\r\n d = Timestamp(datetime(2008, 1, 2))\r\n offset1 = _offset()\r\n offset2 = _offset(2)\r\n\r\n def test_repr(self):\r\n assert repr(Week(weekday=0)) == \"<Week: weekday=0>\"\r\n assert repr(Week(n=-1, weekday=0)) == \"<-1 * Week: weekday=0>\"\r\n assert repr(Week(n=-2, weekday=0)) == \"<-2 * Weeks: weekday=0>\"\r\n\r\n def test_corner(self):\r\n with pytest.raises(ValueError, match=\"Day must be\"):\r\n Week(weekday=7)\r\n\r\n with pytest.raises(ValueError, match=\"Day must be\"):\r\n Week(weekday=-1)\r\n\r\n def test_is_anchored(self):\r\n assert Week(weekday=0).is_anchored()\r\n assert not Week().is_anchored()\r\n assert not Week(2, weekday=2).is_anchored()\r\n assert not Week(2).is_anchored()\r\n\r\n offset_cases = []\r\n # not business week\r\n offset_cases.append(\r\n (\r\n Week(),\r\n {\r\n datetime(2008, 1, 1): datetime(2008, 1, 8),\r\n datetime(2008, 1, 4): datetime(2008, 1, 11),\r\n datetime(2008, 1, 5): datetime(2008, 1, 12),\r\n datetime(2008, 1, 6): datetime(2008, 1, 13),\r\n datetime(2008, 1, 7): datetime(2008, 1, 14),\r\n },\r\n )\r\n )\r\n\r\n # Mon\r\n offset_cases.append(\r\n (\r\n Week(weekday=0),\r\n {\r\n datetime(2007, 12, 31): datetime(2008, 1, 7),\r\n datetime(2008, 1, 4): datetime(2008, 1, 7),\r\n datetime(2008, 1, 5): datetime(2008, 1, 7),\r\n datetime(2008, 1, 6): datetime(2008, 1, 7),\r\n datetime(2008, 1, 7): datetime(2008, 1, 14),\r\n },\r\n )\r\n )\r\n\r\n # n=0 -> roll forward. Mon\r\n offset_cases.append(\r\n (\r\n Week(0, weekday=0),\r\n {\r\n datetime(2007, 12, 31): datetime(2007, 12, 31),\r\n datetime(2008, 1, 4): datetime(2008, 1, 7),\r\n datetime(2008, 1, 5): datetime(2008, 1, 7),\r\n datetime(2008, 1, 6): datetime(2008, 1, 7),\r\n datetime(2008, 1, 7): datetime(2008, 1, 7),\r\n },\r\n )\r\n )\r\n\r\n # n=0 -> roll forward. Mon\r\n offset_cases.append(\r\n (\r\n Week(-2, weekday=1),\r\n {\r\n datetime(2010, 4, 6): datetime(2010, 3, 23),\r\n datetime(2010, 4, 8): datetime(2010, 3, 30),\r\n datetime(2010, 4, 5): datetime(2010, 3, 23),\r\n },\r\n )\r\n )\r\n\r\n @pytest.mark.parametrize(\"case\", offset_cases)\r\n def test_offset(self, case):\r\n offset, cases = case\r\n for base, expected in cases.items():\r\n assert_offset_equal(offset, base, expected)\r\n\r\n @pytest.mark.parametrize(\"weekday\", range(7))\r\n def test_is_on_offset(self, weekday):\r\n offset = Week(weekday=weekday)\r\n\r\n for day in range(1, 8):\r\n date = datetime(2008, 1, day)\r\n\r\n if day % 7 == weekday:\r\n expected = True\r\n else:\r\n expected = False\r\n assert_is_on_offset(offset, date, expected)\r\n\r\n\r\nclass TestWeekOfMonth(Base):\r\n _offset = WeekOfMonth\r\n offset1 = _offset()\r\n offset2 = _offset(2)\r\n\r\n def test_constructor(self):\r\n with pytest.raises(ValueError, match=\"^Week\"):\r\n WeekOfMonth(n=1, week=4, weekday=0)\r\n\r\n with pytest.raises(ValueError, match=\"^Week\"):\r\n WeekOfMonth(n=1, week=-1, weekday=0)\r\n\r\n with pytest.raises(ValueError, match=\"^Day\"):\r\n WeekOfMonth(n=1, week=0, weekday=-1)\r\n\r\n with pytest.raises(ValueError, match=\"^Day\"):\r\n WeekOfMonth(n=1, week=0, weekday=-7)\r\n\r\n def test_repr(self):\r\n assert (\r\n repr(WeekOfMonth(weekday=1, week=2)) == \"<WeekOfMonth: week=2, weekday=1>\"\r\n )\r\n\r\n def test_offset(self):\r\n date1 = datetime(2011, 1, 4) # 1st Tuesday of Month\r\n date2 = datetime(2011, 1, 11) # 2nd Tuesday of Month\r\n date3 = datetime(2011, 1, 18) # 3rd Tuesday of Month\r\n date4 = datetime(2011, 1, 25) # 4th Tuesday of Month\r\n\r\n # see for loop for structure\r\n test_cases = [\r\n (-2, 2, 1, date1, datetime(2010, 11, 16)),\r\n (-2, 2, 1, date2, datetime(2010, 11, 16)),\r\n (-2, 2, 1, date3, datetime(2010, 11, 16)),\r\n (-2, 2, 1, date4, datetime(2010, 12, 21)),\r\n (-1, 2, 1, date1, datetime(2010, 12, 21)),\r\n (-1, 2, 1, date2, datetime(2010, 12, 21)),\r\n (-1, 2, 1, date3, datetime(2010, 12, 21)),\r\n (-1, 2, 1, date4, datetime(2011, 1, 18)),\r\n (0, 0, 1, date1, datetime(2011, 1, 4)),\r\n (0, 0, 1, date2, datetime(2011, 2, 1)),\r\n (0, 0, 1, date3, datetime(2011, 2, 1)),\r\n (0, 0, 1, date4, datetime(2011, 2, 1)),\r\n (0, 1, 1, date1, datetime(2011, 1, 11)),\r\n (0, 1, 1, date2, datetime(2011, 1, 11)),\r\n (0, 1, 1, date3, datetime(2011, 2, 8)),\r\n (0, 1, 1, date4, datetime(2011, 2, 8)),\r\n (0, 0, 1, date1, datetime(2011, 1, 4)),\r\n (0, 1, 1, date2, datetime(2011, 1, 11)),\r\n (0, 2, 1, date3, datetime(2011, 1, 18)),\r\n (0, 3, 1, date4, datetime(2011, 1, 25)),\r\n (1, 0, 0, date1, datetime(2011, 2, 7)),\r\n (1, 0, 0, date2, datetime(2011, 2, 7)),\r\n (1, 0, 0, date3, datetime(2011, 2, 7)),\r\n (1, 0, 0, date4, datetime(2011, 2, 7)),\r\n (1, 0, 1, date1, datetime(2011, 2, 1)),\r\n (1, 0, 1, date2, datetime(2011, 2, 1)),\r\n (1, 0, 1, date3, datetime(2011, 2, 1)),\r\n (1, 0, 1, date4, datetime(2011, 2, 1)),\r\n (1, 0, 2, date1, datetime(2011, 1, 5)),\r\n (1, 0, 2, date2, datetime(2011, 2, 2)),\r\n (1, 0, 2, date3, datetime(2011, 2, 2)),\r\n (1, 0, 2, date4, datetime(2011, 2, 2)),\r\n (1, 2, 1, date1, datetime(2011, 1, 18)),\r\n (1, 2, 1, date2, datetime(2011, 1, 18)),\r\n (1, 2, 1, date3, datetime(2011, 2, 15)),\r\n (1, 2, 1, date4, datetime(2011, 2, 15)),\r\n (2, 2, 1, date1, datetime(2011, 2, 15)),\r\n (2, 2, 1, date2, datetime(2011, 2, 15)),\r\n (2, 2, 1, date3, datetime(2011, 3, 15)),\r\n (2, 2, 1, date4, datetime(2011, 3, 15)),\r\n ]\r\n\r\n for n, week, weekday, dt, expected in test_cases:\r\n offset = WeekOfMonth(n, week=week, weekday=weekday)\r\n assert_offset_equal(offset, dt, expected)\r\n\r\n # try subtracting\r\n result = datetime(2011, 2, 1) - WeekOfMonth(week=1, weekday=2)\r\n assert result == datetime(2011, 1, 12)\r\n\r\n result = datetime(2011, 2, 3) - WeekOfMonth(week=0, weekday=2)\r\n assert result == datetime(2011, 2, 2)\r\n\r\n on_offset_cases = [\r\n (0, 0, datetime(2011, 2, 7), True),\r\n (0, 0, datetime(2011, 2, 6), False),\r\n (0, 0, datetime(2011, 2, 14), False),\r\n (1, 0, datetime(2011, 2, 14), True),\r\n (0, 1, datetime(2011, 2, 1), True),\r\n (0, 1, datetime(2011, 2, 8), False),\r\n ]\r\n\r\n @pytest.mark.parametrize(\"case\", on_offset_cases)\r\n def test_is_on_offset(self, case):\r\n week, weekday, dt, expected = case\r\n offset = WeekOfMonth(week=week, weekday=weekday)\r\n assert offset.is_on_offset(dt) == expected\r\n\r\n\r\nclass TestLastWeekOfMonth(Base):\r\n _offset = LastWeekOfMonth\r\n offset1 = _offset()\r\n offset2 = _offset(2)\r\n\r\n def test_constructor(self):\r\n with pytest.raises(ValueError, match=\"^N cannot be 0\"):\r\n LastWeekOfMonth(n=0, weekday=1)\r\n\r\n with pytest.raises(ValueError, match=\"^Day\"):\r\n LastWeekOfMonth(n=1, weekday=-1)\r\n\r\n with pytest.raises(ValueError, match=\"^Day\"):\r\n LastWeekOfMonth(n=1, weekday=7)\r\n\r\n def test_offset(self):\r\n # Saturday\r\n last_sat = datetime(2013, 8, 31)\r\n next_sat = datetime(2013, 9, 28)\r\n offset_sat = LastWeekOfMonth(n=1, weekday=5)\r\n\r\n one_day_before = last_sat + timedelta(days=-1)\r\n assert one_day_before + offset_sat == last_sat\r\n\r\n one_day_after = last_sat + timedelta(days=+1)\r\n assert one_day_after + offset_sat == next_sat\r\n\r\n # Test On that day\r\n assert last_sat + offset_sat == next_sat\r\n\r\n # Thursday\r\n\r\n offset_thur = LastWeekOfMonth(n=1, weekday=3)\r\n last_thurs = datetime(2013, 1, 31)\r\n next_thurs = datetime(2013, 2, 28)\r\n\r\n one_day_before = last_thurs + timedelta(days=-1)\r\n assert one_day_before + offset_thur == last_thurs\r\n\r\n one_day_after = last_thurs + timedelta(days=+1)\r\n assert one_day_after + offset_thur == next_thurs\r\n\r\n # Test on that day\r\n assert last_thurs + offset_thur == next_thurs\r\n\r\n three_before = last_thurs + timedelta(days=-3)\r\n assert three_before + offset_thur == last_thurs\r\n\r\n two_after = last_thurs + timedelta(days=+2)\r\n assert two_after + offset_thur == next_thurs\r\n\r\n offset_sunday = LastWeekOfMonth(n=1, weekday=WeekDay.SUN)\r\n assert datetime(2013, 7, 31) + offset_sunday == datetime(2013, 8, 25)\r\n\r\n on_offset_cases = [\r\n (WeekDay.SUN, datetime(2013, 1, 27), True),\r\n (WeekDay.SAT, datetime(2013, 3, 30), True),\r\n (WeekDay.MON, datetime(2013, 2, 18), False), # Not the last Mon\r\n (WeekDay.SUN, datetime(2013, 2, 25), False), # Not a SUN\r\n (WeekDay.MON, datetime(2013, 2, 25), True),\r\n (WeekDay.SAT, datetime(2013, 11, 30), True),\r\n (WeekDay.SAT, datetime(2006, 8, 26), True),\r\n (WeekDay.SAT, datetime(2007, 8, 25), True),\r\n (WeekDay.SAT, datetime(2008, 8, 30), True),\r\n (WeekDay.SAT, datetime(2009, 8, 29), True),\r\n (WeekDay.SAT, datetime(2010, 8, 28), True),\r\n (WeekDay.SAT, datetime(2011, 8, 27), True),\r\n (WeekDay.SAT, datetime(2019, 8, 31), True),\r\n ]\r\n\r\n @pytest.mark.parametrize(\"case\", on_offset_cases)\r\n def test_is_on_offset(self, case):\r\n weekday, dt, expected = case\r\n offset = LastWeekOfMonth(weekday=weekday)\r\n assert offset.is_on_offset(dt) == expected\r\n\r\n def test_repr(self):\r\n assert (\r\n repr(LastWeekOfMonth(n=2, weekday=1)) == \"<2 * LastWeekOfMonths: weekday=1>\"\r\n )\r\n\r\n\r\nclass TestSemiMonthEnd(Base):\r\n _offset = SemiMonthEnd\r\n offset1 = _offset()\r\n offset2 = _offset(2)\r\n\r\n def test_offset_whole_year(self):\r\n dates = (\r\n datetime(2007, 12, 31),\r\n datetime(2008, 1, 15),\r\n datetime(2008, 1, 31),\r\n datetime(2008, 2, 15),\r\n datetime(2008, 2, 29),\r\n datetime(2008, 3, 15),\r\n datetime(2008, 3, 31),\r\n datetime(2008, 4, 15),\r\n datetime(2008, 4, 30),\r\n datetime(2008, 5, 15),\r\n datetime(2008, 5, 31),\r\n datetime(2008, 6, 15),\r\n datetime(2008, 6, 30),\r\n datetime(2008, 7, 15),\r\n datetime(2008, 7, 31),\r\n datetime(2008, 8, 15),\r\n datetime(2008, 8, 31),\r\n datetime(2008, 9, 15),\r\n datetime(2008, 9, 30),\r\n datetime(2008, 10, 15),\r\n datetime(2008, 10, 31),\r\n datetime(2008, 11, 15),\r\n datetime(2008, 11, 30),\r\n datetime(2008, 12, 15),\r\n datetime(2008, 12, 31),\r\n )\r\n\r\n for base, exp_date in zip(dates[:-1], dates[1:]):\r\n assert_offset_equal(SemiMonthEnd(), base, exp_date)\r\n\r\n # ensure .apply_index works as expected\r\n s = DatetimeIndex(dates[:-1])\r\n with tm.assert_produces_warning(None):\r\n # GH#22535 check that we don't get a FutureWarning from adding\r\n # an integer array to PeriodIndex\r\n result = SemiMonthEnd() + s\r\n\r\n exp = DatetimeIndex(dates[1:])\r\n tm.assert_index_equal(result, exp)\r\n\r\n # ensure generating a range with DatetimeIndex gives same result\r\n result = date_range(start=dates[0], end=dates[-1], freq=\"SM\")\r\n exp = DatetimeIndex(dates, freq=\"SM\")\r\n tm.assert_index_equal(result, exp)\r\n\r\n offset_cases = []\r\n offset_cases.append(\r\n (\r\n SemiMonthEnd(),\r\n {\r\n datetime(2008, 1, 1): datetime(2008, 1, 15),\r\n datetime(2008, 1, 15): datetime(2008, 1, 31),\r\n datetime(2008, 1, 31): datetime(2008, 2, 15),\r\n datetime(2006, 12, 14): datetime(2006, 12, 15),\r\n datetime(2006, 12, 29): datetime(2006, 12, 31),\r\n datetime(2006, 12, 31): datetime(2007, 1, 15),\r\n datetime(2007, 1, 1): datetime(2007, 1, 15),\r\n datetime(2006, 12, 1): datetime(2006, 12, 15),\r\n datetime(2006, 12, 15): datetime(2006, 12, 31),\r\n },\r\n )\r\n )\r\n\r\n offset_cases.append(\r\n (\r\n SemiMonthEnd(day_of_month=20),\r\n {\r\n datetime(2008, 1, 1): datetime(2008, 1, 20),\r\n datetime(2008, 1, 15): datetime(2008, 1, 20),\r\n datetime(2008, 1, 21): datetime(2008, 1, 31),\r\n datetime(2008, 1, 31): datetime(2008, 2, 20),\r\n datetime(2006, 12, 14): datetime(2006, 12, 20),\r\n datetime(2006, 12, 29): datetime(2006, 12, 31),\r\n datetime(2006, 12, 31): datetime(2007, 1, 20),\r\n datetime(2007, 1, 1): datetime(2007, 1, 20),\r\n datetime(2006, 12, 1): datetime(2006, 12, 20),\r\n datetime(2006, 12, 15): datetime(2006, 12, 20),\r\n },\r\n )\r\n )\r\n\r\n offset_cases.append(\r\n (\r\n SemiMonthEnd(0),\r\n {\r\n datetime(2008, 1, 1): datetime(2008, 1, 15),\r\n datetime(2008, 1, 16): datetime(2008, 1, 31),\r\n datetime(2008, 1, 15): datetime(2008, 1, 15),\r\n datetime(2008, 1, 31): datetime(2008, 1, 31),\r\n datetime(2006, 12, 29): datetime(2006, 12, 31),\r\n datetime(2006, 12, 31): datetime(2006, 12, 31),\r\n datetime(2007, 1, 1): datetime(2007, 1, 15),\r\n },\r\n )\r\n )\r\n\r\n offset_cases.append(\r\n (\r\n SemiMonthEnd(0, day_of_month=16),\r\n {\r\n datetime(2008, 1, 1): datetime(2008, 1, 16),\r\n datetime(2008, 1, 16): datetime(2008, 1, 16),\r\n datetime(2008, 1, 15): datetime(2008, 1, 16),\r\n datetime(2008, 1, 31): datetime(2008, 1, 31),\r\n datetime(2006, 12, 29): datetime(2006, 12, 31),\r\n datetime(2006, 12, 31): datetime(2006, 12, 31),\r\n datetime(2007, 1, 1): datetime(2007, 1, 16),\r\n },\r\n )\r\n )\r\n\r\n offset_cases.append(\r\n (\r\n SemiMonthEnd(2),\r\n {\r\n datetime(2008, 1, 1): datetime(2008, 1, 31),\r\n datetime(2008, 1, 31): datetime(2008, 2, 29),\r\n datetime(2006, 12, 29): datetime(2007, 1, 15),\r\n datetime(2006, 12, 31): datetime(2007, 1, 31),\r\n datetime(2007, 1, 1): datetime(2007, 1, 31),\r\n datetime(2007, 1, 16): datetime(2007, 2, 15),\r\n datetime(2006, 11, 1): datetime(2006, 11, 30),\r\n },\r\n )\r\n )\r\n\r\n offset_cases.append(\r\n (\r\n SemiMonthEnd(-1),\r\n {\r\n datetime(2007, 1, 1): datetime(2006, 12, 31),\r\n datetime(2008, 6, 30): datetime(2008, 6, 15),\r\n datetime(2008, 12, 31): datetime(2008, 12, 15),\r\n datetime(2006, 12, 29): datetime(2006, 12, 15),\r\n datetime(2006, 12, 30): datetime(2006, 12, 15),\r\n datetime(2007, 1, 1): datetime(2006, 12, 31),\r\n },\r\n )\r\n )\r\n\r\n offset_cases.append(\r\n (\r\n SemiMonthEnd(-1, day_of_month=4),\r\n {\r\n datetime(2007, 1, 1): datetime(2006, 12, 31),\r\n datetime(2007, 1, 4): datetime(2006, 12, 31),\r\n datetime(2008, 6, 30): datetime(2008, 6, 4),\r\n datetime(2008, 12, 31): datetime(2008, 12, 4),\r\n datetime(2006, 12, 5): datetime(2006, 12, 4),\r\n datetime(2006, 12, 30): datetime(2006, 12, 4),\r\n datetime(2007, 1, 1): datetime(2006, 12, 31),\r\n },\r\n )\r\n )\r\n\r\n offset_cases.append(\r\n (\r\n SemiMonthEnd(-2),\r\n {\r\n datetime(2007, 1, 1): datetime(2006, 12, 15),\r\n datetime(2008, 6, 30): datetime(2008, 5, 31),\r\n datetime(2008, 3, 15): datetime(2008, 2, 15),\r\n datetime(2008, 12, 31): datetime(2008, 11, 30),\r\n datetime(2006, 12, 29): datetime(2006, 11, 30),\r\n datetime(2006, 12, 14): datetime(2006, 11, 15),\r\n datetime(2007, 1, 1): datetime(2006, 12, 15),\r\n },\r\n )\r\n )\r\n\r\n @pytest.mark.parametrize(\"case\", offset_cases)\r\n def test_offset(self, case):\r\n offset, cases = case\r\n for base, expected in cases.items():\r\n assert_offset_equal(offset, base, expected)\r\n\r\n @pytest.mark.parametrize(\"case\", offset_cases)\r\n def test_apply_index(self, case):\r\n # https://github.com/pandas-dev/pandas/issues/34580\r\n offset, cases = case\r\n s = DatetimeIndex(cases.keys())\r\n exp = DatetimeIndex(cases.values())\r\n\r\n with tm.assert_produces_warning(None):\r\n # GH#22535 check that we don't get a FutureWarning from adding\r\n # an integer array to PeriodIndex\r\n result = offset + s\r\n tm.assert_index_equal(result, exp)\r\n\r\n with tm.assert_produces_warning(FutureWarning):\r\n result = offset.apply_index(s)\r\n tm.assert_index_equal(result, exp)\r\n\r\n on_offset_cases = [\r\n (datetime(2007, 12, 31), True),\r\n (datetime(2007, 12, 15), True),\r\n (datetime(2007, 12, 14), False),\r\n (datetime(2007, 12, 1), False),\r\n (datetime(2008, 2, 29), True),\r\n ]\r\n\r\n @pytest.mark.parametrize(\"case\", on_offset_cases)\r\n def test_is_on_offset(self, case):\r\n dt, expected = case\r\n assert_is_on_offset(SemiMonthEnd(), dt, expected)\r\n\r\n @pytest.mark.parametrize(\"klass\", [Series, DatetimeIndex])\r\n def test_vectorized_offset_addition(self, klass):\r\n s = klass(\r\n [\r\n Timestamp(\"2000-01-15 00:15:00\", tz=\"US/Central\"),\r\n Timestamp(\"2000-02-15\", tz=\"US/Central\"),\r\n ],\r\n name=\"a\",\r\n )\r\n\r\n with tm.assert_produces_warning(None):\r\n # GH#22535 check that we don't get a FutureWarning from adding\r\n # an integer array to PeriodIndex\r\n result = s + SemiMonthEnd()\r\n result2 = SemiMonthEnd() + s\r\n\r\n exp = klass(\r\n [\r\n Timestamp(\"2000-01-31 00:15:00\", tz=\"US/Central\"),\r\n Timestamp(\"2000-02-29\", tz=\"US/Central\"),\r\n ],\r\n name=\"a\",\r\n )\r\n tm.assert_equal(result, exp)\r\n tm.assert_equal(result2, exp)\r\n\r\n s = klass(\r\n [\r\n Timestamp(\"2000-01-01 00:15:00\", tz=\"US/Central\"),\r\n Timestamp(\"2000-02-01\", tz=\"US/Central\"),\r\n ],\r\n name=\"a\",\r\n )\r\n\r\n with tm.assert_produces_warning(None):\r\n # GH#22535 check that we don't get a FutureWarning from adding\r\n # an integer array to PeriodIndex\r\n result = s + SemiMonthEnd()\r\n result2 = SemiMonthEnd() + s\r\n\r\n exp = klass(\r\n [\r\n Timestamp(\"2000-01-15 00:15:00\", tz=\"US/Central\"),\r\n Timestamp(\"2000-02-15\", tz=\"US/Central\"),\r\n ],\r\n name=\"a\",\r\n )\r\n tm.assert_equal(result, exp)\r\n tm.assert_equal(result2, exp)\r\n\r\n\r\nclass TestSemiMonthBegin(Base):\r\n _offset = SemiMonthBegin\r\n offset1 = _offset()\r\n offset2 = _offset(2)\r\n\r\n def test_offset_whole_year(self):\r\n dates = (\r\n datetime(2007, 12, 15),\r\n datetime(2008, 1, 1),\r\n datetime(2008, 1, 15),\r\n datetime(2008, 2, 1),\r\n datetime(2008, 2, 15),\r\n datetime(2008, 3, 1),\r\n datetime(2008, 3, 15),\r\n datetime(2008, 4, 1),\r\n datetime(2008, 4, 15),\r\n datetime(2008, 5, 1),\r\n datetime(2008, 5, 15),\r\n datetime(2008, 6, 1),\r\n datetime(2008, 6, 15),\r\n datetime(2008, 7, 1),\r\n datetime(2008, 7, 15),\r\n datetime(2008, 8, 1),\r\n datetime(2008, 8, 15),\r\n datetime(2008, 9, 1),\r\n datetime(2008, 9, 15),\r\n datetime(2008, 10, 1),\r\n datetime(2008, 10, 15),\r\n datetime(2008, 11, 1),\r\n datetime(2008, 11, 15),\r\n datetime(2008, 12, 1),\r\n datetime(2008, 12, 15),\r\n )\r\n\r\n for base, exp_date in zip(dates[:-1], dates[1:]):\r\n assert_offset_equal(SemiMonthBegin(), base, exp_date)\r\n\r\n # ensure .apply_index works as expected\r\n s = DatetimeIndex(dates[:-1])\r\n with tm.assert_produces_warning(None):\r\n # GH#22535 check that we don't get a FutureWarning from adding\r\n # an integer array to PeriodIndex\r\n result = SemiMonthBegin() + s\r\n\r\n exp = DatetimeIndex(dates[1:])\r\n tm.assert_index_equal(result, exp)\r\n\r\n # ensure generating a range with DatetimeIndex gives same result\r\n result = date_range(start=dates[0], end=dates[-1], freq=\"SMS\")\r\n exp = DatetimeIndex(dates, freq=\"SMS\")\r\n tm.assert_index_equal(result, exp)\r\n\r\n offset_cases = []\r\n offset_cases.append(\r\n (\r\n SemiMonthBegin(),\r\n {\r\n datetime(2008, 1, 1): datetime(2008, 1, 15),\r\n datetime(2008, 1, 15): datetime(2008, 2, 1),\r\n datetime(2008, 1, 31): datetime(2008, 2, 1),\r\n datetime(2006, 12, 14): datetime(2006, 12, 15),\r\n datetime(2006, 12, 29): datetime(2007, 1, 1),\r\n datetime(2006, 12, 31): datetime(2007, 1, 1),\r\n datetime(2007, 1, 1): datetime(2007, 1, 15),\r\n datetime(2006, 12, 1): datetime(2006, 12, 15),\r\n datetime(2006, 12, 15): datetime(2007, 1, 1),\r\n },\r\n )\r\n )\r\n\r\n offset_cases.append(\r\n (\r\n SemiMonthBegin(day_of_month=20),\r\n {\r\n datetime(2008, 1, 1): datetime(2008, 1, 20),\r\n datetime(2008, 1, 15): datetime(2008, 1, 20),\r\n datetime(2008, 1, 21): datetime(2008, 2, 1),\r\n datetime(2008, 1, 31): datetime(2008, 2, 1),\r\n datetime(2006, 12, 14): datetime(2006, 12, 20),\r\n datetime(2006, 12, 29): datetime(2007, 1, 1),\r\n datetime(2006, 12, 31): datetime(2007, 1, 1),\r\n datetime(2007, 1, 1): datetime(2007, 1, 20),\r\n datetime(2006, 12, 1): datetime(2006, 12, 20),\r\n datetime(2006, 12, 15): datetime(2006, 12, 20),\r\n },\r\n )\r\n )\r\n\r\n offset_cases.append(\r\n (\r\n SemiMonthBegin(0),\r\n {\r\n datetime(2008, 1, 1): datetime(2008, 1, 1),\r\n datetime(2008, 1, 16): datetime(2008, 2, 1),\r\n datetime(2008, 1, 15): datetime(2008, 1, 15),\r\n datetime(2008, 1, 31): datetime(2008, 2, 1),\r\n datetime(2006, 12, 29): datetime(2007, 1, 1),\r\n datetime(2006, 12, 2): datetime(2006, 12, 15),\r\n datetime(2007, 1, 1): datetime(2007, 1, 1),\r\n },\r\n )\r\n )\r\n\r\n offset_cases.append(\r\n (\r\n SemiMonthBegin(0, day_of_month=16),\r\n {\r\n datetime(2008, 1, 1): datetime(2008, 1, 1),\r\n datetime(2008, 1, 16): datetime(2008, 1, 16),\r\n datetime(2008, 1, 15): datetime(2008, 1, 16),\r\n datetime(2008, 1, 31): datetime(2008, 2, 1),\r\n datetime(2006, 12, 29): datetime(2007, 1, 1),\r\n datetime(2006, 12, 31): datetime(2007, 1, 1),\r\n datetime(2007, 1, 5): datetime(2007, 1, 16),\r\n datetime(2007, 1, 1): datetime(2007, 1, 1),\r\n },\r\n )\r\n )\r\n\r\n offset_cases.append(\r\n (\r\n SemiMonthBegin(2),\r\n {\r\n datetime(2008, 1, 1): datetime(2008, 2, 1),\r\n datetime(2008, 1, 31): datetime(2008, 2, 15),\r\n datetime(2006, 12, 1): datetime(2007, 1, 1),\r\n datetime(2006, 12, 29): datetime(2007, 1, 15),\r\n datetime(2006, 12, 15): datetime(2007, 1, 15),\r\n datetime(2007, 1, 1): datetime(2007, 2, 1),\r\n datetime(2007, 1, 16): datetime(2007, 2, 15),\r\n datetime(2006, 11, 1): datetime(2006, 12, 1),\r\n },\r\n )\r\n )\r\n\r\n offset_cases.append(\r\n (\r\n SemiMonthBegin(-1),\r\n {\r\n datetime(2007, 1, 1): datetime(2006, 12, 15),\r\n datetime(2008, 6, 30): datetime(2008, 6, 15),\r\n datetime(2008, 6, 14): datetime(2008, 6, 1),\r\n datetime(2008, 12, 31): datetime(2008, 12, 15),\r\n datetime(2006, 12, 29): datetime(2006, 12, 15),\r\n datetime(2006, 12, 15): datetime(2006, 12, 1),\r\n datetime(2007, 1, 1): datetime(2006, 12, 15),\r\n },\r\n )\r\n )\r\n\r\n offset_cases.append(\r\n (\r\n SemiMonthBegin(-1, day_of_month=4),\r\n {\r\n datetime(2007, 1, 1): datetime(2006, 12, 4),\r\n datetime(2007, 1, 4): datetime(2007, 1, 1),\r\n datetime(2008, 6, 30): datetime(2008, 6, 4),\r\n datetime(2008, 12, 31): datetime(2008, 12, 4),\r\n datetime(2006, 12, 5): datetime(2006, 12, 4),\r\n datetime(2006, 12, 30): datetime(2006, 12, 4),\r\n datetime(2006, 12, 2): datetime(2006, 12, 1),\r\n datetime(2007, 1, 1): datetime(2006, 12, 4),\r\n },\r\n )\r\n )\r\n\r\n offset_cases.append(\r\n (\r\n SemiMonthBegin(-2),\r\n {\r\n datetime(2007, 1, 1): datetime(2006, 12, 1),\r\n datetime(2008, 6, 30): datetime(2008, 6, 1),\r\n datetime(2008, 6, 14): datetime(2008, 5, 15),\r\n datetime(2008, 12, 31): datetime(2008, 12, 1),\r\n datetime(2006, 12, 29): datetime(2006, 12, 1),\r\n datetime(2006, 12, 15): datetime(2006, 11, 15),\r\n datetime(2007, 1, 1): datetime(2006, 12, 1),\r\n },\r\n )\r\n )\r\n\r\n @pytest.mark.parametrize(\"case\", offset_cases)\r\n def test_offset(self, case):\r\n offset, cases = case\r\n for base, expected in cases.items():\r\n assert_offset_equal(offset, base, expected)\r\n\r\n @pytest.mark.parametrize(\"case\", offset_cases)\r\n def test_apply_index(self, case):\r\n offset, cases = case\r\n s = DatetimeIndex(cases.keys())\r\n\r\n with tm.assert_produces_warning(None):\r\n # GH#22535 check that we don't get a FutureWarning from adding\r\n # an integer array to PeriodIndex\r\n result = offset + s\r\n\r\n exp = DatetimeIndex(cases.values())\r\n tm.assert_index_equal(result, exp)\r\n\r\n on_offset_cases = [\r\n (datetime(2007, 12, 1), True),\r\n (datetime(2007, 12, 15), True),\r\n (datetime(2007, 12, 14), False),\r\n (datetime(2007, 12, 31), False),\r\n (datetime(2008, 2, 15), True),\r\n ]\r\n\r\n @pytest.mark.parametrize(\"case\", on_offset_cases)\r\n def test_is_on_offset(self, case):\r\n dt, expected = case\r\n assert_is_on_offset(SemiMonthBegin(), dt, expected)\r\n\r\n @pytest.mark.parametrize(\"klass\", [Series, DatetimeIndex])\r\n def test_vectorized_offset_addition(self, klass):\r\n s = klass(\r\n [\r\n Timestamp(\"2000-01-15 00:15:00\", tz=\"US/Central\"),\r\n Timestamp(\"2000-02-15\", tz=\"US/Central\"),\r\n ],\r\n name=\"a\",\r\n )\r\n with tm.assert_produces_warning(None):\r\n # GH#22535 check that we don't get a FutureWarning from adding\r\n # an integer array to PeriodIndex\r\n result = s + SemiMonthBegin()\r\n result2 = SemiMonthBegin() + s\r\n\r\n exp = klass(\r\n [\r\n Timestamp(\"2000-02-01 00:15:00\", tz=\"US/Central\"),\r\n Timestamp(\"2000-03-01\", tz=\"US/Central\"),\r\n ],\r\n name=\"a\",\r\n )\r\n tm.assert_equal(result, exp)\r\n tm.assert_equal(result2, exp)\r\n\r\n s = klass(\r\n [\r\n Timestamp(\"2000-01-01 00:15:00\", tz=\"US/Central\"),\r\n Timestamp(\"2000-02-01\", tz=\"US/Central\"),\r\n ],\r\n name=\"a\",\r\n )\r\n with tm.assert_produces_warning(None):\r\n # GH#22535 check that we don't get a FutureWarning from adding\r\n # an integer array to PeriodIndex\r\n result = s + SemiMonthBegin()\r\n result2 = SemiMonthBegin() + s\r\n\r\n exp = klass(\r\n [\r\n Timestamp(\"2000-01-15 00:15:00\", tz=\"US/Central\"),\r\n Timestamp(\"2000-02-15\", tz=\"US/Central\"),\r\n ],\r\n name=\"a\",\r\n )\r\n tm.assert_equal(result, exp)\r\n tm.assert_equal(result2, exp)\r\n\r\n\r\ndef test_Easter():\r\n assert_offset_equal(Easter(), datetime(2010, 1, 1), datetime(2010, 4, 4))\r\n assert_offset_equal(Easter(), datetime(2010, 4, 5), datetime(2011, 4, 24))\r\n assert_offset_equal(Easter(2), datetime(2010, 1, 1), datetime(2011, 4, 24))\r\n\r\n assert_offset_equal(Easter(), datetime(2010, 4, 4), datetime(2011, 4, 24))\r\n assert_offset_equal(Easter(2), datetime(2010, 4, 4), datetime(2012, 4, 8))\r\n\r\n assert_offset_equal(-Easter(), datetime(2011, 1, 1), datetime(2010, 4, 4))\r\n assert_offset_equal(-Easter(), datetime(2010, 4, 5), datetime(2010, 4, 4))\r\n assert_offset_equal(-Easter(2), datetime(2011, 1, 1), datetime(2009, 4, 12))\r\n\r\n assert_offset_equal(-Easter(), datetime(2010, 4, 4), datetime(2009, 4, 12))\r\n assert_offset_equal(-Easter(2), datetime(2010, 4, 4), datetime(2008, 3, 23))\r\n\r\n\r\nclass TestOffsetNames:\r\n def test_get_offset_name(self):\r\n assert BDay().freqstr == \"B\"\r\n assert BDay(2).freqstr == \"2B\"\r\n assert BMonthEnd().freqstr == \"BM\"\r\n assert Week(weekday=0).freqstr == \"W-MON\"\r\n assert Week(weekday=1).freqstr == \"W-TUE\"\r\n assert Week(weekday=2).freqstr == \"W-WED\"\r\n assert Week(weekday=3).freqstr == \"W-THU\"\r\n assert Week(weekday=4).freqstr == \"W-FRI\"\r\n\r\n assert LastWeekOfMonth(weekday=WeekDay.SUN).freqstr == \"LWOM-SUN\"\r\n\r\n\r\ndef test_get_offset():\r\n with pytest.raises(ValueError, match=INVALID_FREQ_ERR_MSG):\r\n _get_offset(\"gibberish\")\r\n with pytest.raises(ValueError, match=INVALID_FREQ_ERR_MSG):\r\n _get_offset(\"QS-JAN-B\")\r\n\r\n pairs = [\r\n (\"B\", BDay()),\r\n (\"b\", BDay()),\r\n (\"bm\", BMonthEnd()),\r\n (\"Bm\", BMonthEnd()),\r\n (\"W-MON\", Week(weekday=0)),\r\n (\"W-TUE\", Week(weekday=1)),\r\n (\"W-WED\", Week(weekday=2)),\r\n (\"W-THU\", Week(weekday=3)),\r\n (\"W-FRI\", Week(weekday=4)),\r\n ]\r\n\r\n for name, expected in pairs:\r\n offset = _get_offset(name)\r\n assert offset == expected, (\r\n f\"Expected {repr(name)} to yield {repr(expected)} \"\r\n f\"(actual: {repr(offset)})\"\r\n )\r\n\r\n\r\ndef test_get_offset_legacy():\r\n pairs = [(\"w@Sat\", Week(weekday=5))]\r\n for name, expected in pairs:\r\n with pytest.raises(ValueError, match=INVALID_FREQ_ERR_MSG):\r\n _get_offset(name)\r\n\r\n\r\nclass TestOffsetAliases:\r\n def setup_method(self, method):\r\n _offset_map.clear()\r\n\r\n def test_alias_equality(self):\r\n for k, v in _offset_map.items():\r\n if v is None:\r\n continue\r\n assert k == v.copy()\r\n\r\n def test_rule_code(self):\r\n lst = [\"M\", \"MS\", \"BM\", \"BMS\", \"D\", \"B\", \"H\", \"T\", \"S\", \"L\", \"U\"]\r\n for k in lst:\r\n assert k == _get_offset(k).rule_code\r\n # should be cached - this is kind of an internals test...\r\n assert k in _offset_map\r\n assert k == (_get_offset(k) * 3).rule_code\r\n\r\n suffix_lst = [\"MON\", \"TUE\", \"WED\", \"THU\", \"FRI\", \"SAT\", \"SUN\"]\r\n base = \"W\"\r\n for v in suffix_lst:\r\n alias = \"-\".join([base, v])\r\n assert alias == _get_offset(alias).rule_code\r\n assert alias == (_get_offset(alias) * 5).rule_code\r\n\r\n suffix_lst = [\r\n \"JAN\",\r\n \"FEB\",\r\n \"MAR\",\r\n \"APR\",\r\n \"MAY\",\r\n \"JUN\",\r\n \"JUL\",\r\n \"AUG\",\r\n \"SEP\",\r\n \"OCT\",\r\n \"NOV\",\r\n \"DEC\",\r\n ]\r\n base_lst = [\"A\", \"AS\", \"BA\", \"BAS\", \"Q\", \"QS\", \"BQ\", \"BQS\"]\r\n for base in base_lst:\r\n for v in suffix_lst:\r\n alias = \"-\".join([base, v])\r\n assert alias == _get_offset(alias).rule_code\r\n assert alias == (_get_offset(alias) * 5).rule_code\r\n\r\n\r\ndef test_dateoffset_misc():\r\n oset = offsets.DateOffset(months=2, days=4)\r\n # it works\r\n oset.freqstr\r\n\r\n assert not offsets.DateOffset(months=2) == 2\r\n\r\n\r\ndef test_freq_offsets():\r\n off = BDay(1, offset=timedelta(0, 1800))\r\n assert off.freqstr == \"B+30Min\"\r\n\r\n off = BDay(1, offset=timedelta(0, -1800))\r\n assert off.freqstr == \"B-30Min\"\r\n\r\n\r\nclass TestReprNames:\r\n def test_str_for_named_is_name(self):\r\n # look at all the amazing combinations!\r\n month_prefixes = [\"A\", \"AS\", \"BA\", \"BAS\", \"Q\", \"BQ\", \"BQS\", \"QS\"]\r\n names = [\r\n prefix + \"-\" + month\r\n for prefix in month_prefixes\r\n for month in [\r\n \"JAN\",\r\n \"FEB\",\r\n \"MAR\",\r\n \"APR\",\r\n \"MAY\",\r\n \"JUN\",\r\n \"JUL\",\r\n \"AUG\",\r\n \"SEP\",\r\n \"OCT\",\r\n \"NOV\",\r\n \"DEC\",\r\n ]\r\n ]\r\n days = [\"MON\", \"TUE\", \"WED\", \"THU\", \"FRI\", \"SAT\", \"SUN\"]\r\n names += [\"W-\" + day for day in days]\r\n names += [\"WOM-\" + week + day for week in (\"1\", \"2\", \"3\", \"4\") for day in days]\r\n _offset_map.clear()\r\n for name in names:\r\n offset = _get_offset(name)\r\n assert offset.freqstr == name\r\n\r\n\r\ndef get_utc_offset_hours(ts):\r\n # take a Timestamp and compute total hours of utc offset\r\n o = ts.utcoffset()\r\n return (o.days * 24 * 3600 + o.seconds) / 3600.0\r\n\r\n\r\nclass TestDST:\r\n \"\"\"\r\n test DateOffset additions over Daylight Savings Time\r\n \"\"\"\r\n\r\n # one microsecond before the DST transition\r\n ts_pre_fallback = \"2013-11-03 01:59:59.999999\"\r\n ts_pre_springfwd = \"2013-03-10 01:59:59.999999\"\r\n\r\n # test both basic names and dateutil timezones\r\n timezone_utc_offsets = {\r\n \"US/Eastern\": {\"utc_offset_daylight\": -4, \"utc_offset_standard\": -5},\r\n \"dateutil/US/Pacific\": {\"utc_offset_daylight\": -7, \"utc_offset_standard\": -8},\r\n }\r\n valid_date_offsets_singular = [\r\n \"weekday\",\r\n \"day\",\r\n \"hour\",\r\n \"minute\",\r\n \"second\",\r\n \"microsecond\",\r\n ]\r\n valid_date_offsets_plural = [\r\n \"weeks\",\r\n \"days\",\r\n \"hours\",\r\n \"minutes\",\r\n \"seconds\",\r\n \"milliseconds\",\r\n \"microseconds\",\r\n ]\r\n\r\n def _test_all_offsets(self, n, **kwds):\r\n valid_offsets = (\r\n self.valid_date_offsets_plural\r\n if n > 1\r\n else self.valid_date_offsets_singular\r\n )\r\n\r\n for name in valid_offsets:\r\n self._test_offset(offset_name=name, offset_n=n, **kwds)\r\n\r\n def _test_offset(self, offset_name, offset_n, tstart, expected_utc_offset):\r\n offset = DateOffset(**{offset_name: offset_n})\r\n\r\n t = tstart + offset\r\n if expected_utc_offset is not None:\r\n assert get_utc_offset_hours(t) == expected_utc_offset\r\n\r\n if offset_name == \"weeks\":\r\n # dates should match\r\n assert t.date() == timedelta(days=7 * offset.kwds[\"weeks\"]) + tstart.date()\r\n # expect the same day of week, hour of day, minute, second, ...\r\n assert (\r\n t.dayofweek == tstart.dayofweek\r\n and t.hour == tstart.hour\r\n and t.minute == tstart.minute\r\n and t.second == tstart.second\r\n )\r\n elif offset_name == \"days\":\r\n # dates should match\r\n assert timedelta(offset.kwds[\"days\"]) + tstart.date() == t.date()\r\n # expect the same hour of day, minute, second, ...\r\n assert (\r\n t.hour == tstart.hour\r\n and t.minute == tstart.minute\r\n and t.second == tstart.second\r\n )\r\n elif offset_name in self.valid_date_offsets_singular:\r\n # expect the singular offset value to match between tstart and t\r\n datepart_offset = getattr(\r\n t, offset_name if offset_name != \"weekday\" else \"dayofweek\"\r\n )\r\n assert datepart_offset == offset.kwds[offset_name]\r\n else:\r\n # the offset should be the same as if it was done in UTC\r\n assert t == (tstart.tz_convert(\"UTC\") + offset).tz_convert(\"US/Pacific\")\r\n\r\n def _make_timestamp(self, string, hrs_offset, tz):\r\n if hrs_offset >= 0:\r\n offset_string = f\"{hrs_offset:02d}00\"\r\n else:\r\n offset_string = f\"-{(hrs_offset * -1):02}00\"\r\n return Timestamp(string + offset_string).tz_convert(tz)\r\n\r\n def test_springforward_plural(self):\r\n # test moving from standard to daylight savings\r\n for tz, utc_offsets in self.timezone_utc_offsets.items():\r\n hrs_pre = utc_offsets[\"utc_offset_standard\"]\r\n hrs_post = utc_offsets[\"utc_offset_daylight\"]\r\n self._test_all_offsets(\r\n n=3,\r\n tstart=self._make_timestamp(self.ts_pre_springfwd, hrs_pre, tz),\r\n expected_utc_offset=hrs_post,\r\n )\r\n\r\n def test_fallback_singular(self):\r\n # in the case of singular offsets, we don't necessarily know which utc\r\n # offset the new Timestamp will wind up in (the tz for 1 month may be\r\n # different from 1 second) so we don't specify an expected_utc_offset\r\n for tz, utc_offsets in self.timezone_utc_offsets.items():\r\n hrs_pre = utc_offsets[\"utc_offset_standard\"]\r\n self._test_all_offsets(\r\n n=1,\r\n tstart=self._make_timestamp(self.ts_pre_fallback, hrs_pre, tz),\r\n expected_utc_offset=None,\r\n )\r\n\r\n def test_springforward_singular(self):\r\n for tz, utc_offsets in self.timezone_utc_offsets.items():\r\n hrs_pre = utc_offsets[\"utc_offset_standard\"]\r\n self._test_all_offsets(\r\n n=1,\r\n tstart=self._make_timestamp(self.ts_pre_springfwd, hrs_pre, tz),\r\n expected_utc_offset=None,\r\n )\r\n\r\n offset_classes = {\r\n MonthBegin: [\"11/2/2012\", \"12/1/2012\"],\r\n MonthEnd: [\"11/2/2012\", \"11/30/2012\"],\r\n BMonthBegin: [\"11/2/2012\", \"12/3/2012\"],\r\n BMonthEnd: [\"11/2/2012\", \"11/30/2012\"],\r\n CBMonthBegin: [\"11/2/2012\", \"12/3/2012\"],\r\n CBMonthEnd: [\"11/2/2012\", \"11/30/2012\"],\r\n SemiMonthBegin: [\"11/2/2012\", \"11/15/2012\"],\r\n SemiMonthEnd: [\"11/2/2012\", \"11/15/2012\"],\r\n Week: [\"11/2/2012\", \"11/9/2012\"],\r\n YearBegin: [\"11/2/2012\", \"1/1/2013\"],\r\n YearEnd: [\"11/2/2012\", \"12/31/2012\"],\r\n BYearBegin: [\"11/2/2012\", \"1/1/2013\"],\r\n BYearEnd: [\"11/2/2012\", \"12/31/2012\"],\r\n QuarterBegin: [\"11/2/2012\", \"12/1/2012\"],\r\n QuarterEnd: [\"11/2/2012\", \"12/31/2012\"],\r\n BQuarterBegin: [\"11/2/2012\", \"12/3/2012\"],\r\n BQuarterEnd: [\"11/2/2012\", \"12/31/2012\"],\r\n Day: [\"11/4/2012\", \"11/4/2012 23:00\"],\r\n }.items()\r\n\r\n @pytest.mark.parametrize(\"tup\", offset_classes)\r\n def test_all_offset_classes(self, tup):\r\n offset, test_values = tup\r\n\r\n first = Timestamp(test_values[0], tz=\"US/Eastern\") + offset()\r\n second = Timestamp(test_values[1], tz=\"US/Eastern\")\r\n assert first == second\r\n\r\n\r\n# ---------------------------------------------------------------------\r\n\r\n\r\ndef test_valid_default_arguments(offset_types):\r\n # GH#19142 check that the calling the constructors without passing\r\n # any keyword arguments produce valid offsets\r\n cls = offset_types\r\n cls()\r\n\r\n\r\[email protected](\"kwd\", sorted(liboffsets._relativedelta_kwds))\r\ndef test_valid_month_attributes(kwd, month_classes):\r\n # GH#18226\r\n cls = month_classes\r\n # check that we cannot create e.g. MonthEnd(weeks=3)\r\n msg = rf\"__init__\\(\\) got an unexpected keyword argument '{kwd}'\"\r\n with pytest.raises(TypeError, match=msg):\r\n cls(**{kwd: 3})\r\n\r\n\r\ndef test_month_offset_name(month_classes):\r\n # GH#33757 off.name with n != 1 should not raise AttributeError\r\n obj = month_classes(1)\r\n obj2 = month_classes(2)\r\n assert obj2.name == obj.name\r\n\r\n\r\[email protected](\"kwd\", sorted(liboffsets._relativedelta_kwds))\r\ndef test_valid_relativedelta_kwargs(kwd):\r\n # Check that all the arguments specified in liboffsets._relativedelta_kwds\r\n # are in fact valid relativedelta keyword args\r\n DateOffset(**{kwd: 1})\r\n\r\n\r\[email protected](\"kwd\", sorted(liboffsets._relativedelta_kwds))\r\ndef test_valid_tick_attributes(kwd, tick_classes):\r\n # GH#18226\r\n cls = tick_classes\r\n # check that we cannot create e.g. Hour(weeks=3)\r\n msg = rf\"__init__\\(\\) got an unexpected keyword argument '{kwd}'\"\r\n with pytest.raises(TypeError, match=msg):\r\n cls(**{kwd: 3})\r\n\r\n\r\ndef test_validate_n_error():\r\n with pytest.raises(TypeError, match=\"argument must be an integer\"):\r\n DateOffset(n=\"Doh!\")\r\n\r\n with pytest.raises(TypeError, match=\"argument must be an integer\"):\r\n MonthBegin(n=timedelta(1))\r\n\r\n with pytest.raises(TypeError, match=\"argument must be an integer\"):\r\n BDay(n=np.array([1, 2], dtype=np.int64))\r\n\r\n\r\ndef test_require_integers(offset_types):\r\n cls = offset_types\r\n with pytest.raises(ValueError, match=\"argument must be an integer\"):\r\n cls(n=1.5)\r\n\r\n\r\ndef test_tick_normalize_raises(tick_classes):\r\n # check that trying to create a Tick object with normalize=True raises\r\n # GH#21427\r\n cls = tick_classes\r\n msg = \"Tick offset with `normalize=True` are not allowed.\"\r\n with pytest.raises(ValueError, match=msg):\r\n cls(n=3, normalize=True)\r\n\r\n\r\ndef test_weeks_onoffset():\r\n # GH#18510 Week with weekday = None, normalize = False should always\r\n # be is_on_offset\r\n offset = Week(n=2, weekday=None)\r\n ts = Timestamp(\"1862-01-13 09:03:34.873477378+0210\", tz=\"Africa/Lusaka\")\r\n fast = offset.is_on_offset(ts)\r\n slow = (ts + offset) - offset == ts\r\n assert fast == slow\r\n\r\n # negative n\r\n offset = Week(n=2, weekday=None)\r\n ts = Timestamp(\"1856-10-24 16:18:36.556360110-0717\", tz=\"Pacific/Easter\")\r\n fast = offset.is_on_offset(ts)\r\n slow = (ts + offset) - offset == ts\r\n assert fast == slow\r\n\r\n\r\ndef test_weekofmonth_onoffset():\r\n # GH#18864\r\n # Make sure that nanoseconds don't trip up is_on_offset (and with it apply)\r\n offset = WeekOfMonth(n=2, week=2, weekday=0)\r\n ts = Timestamp(\"1916-05-15 01:14:49.583410462+0422\", tz=\"Asia/Qyzylorda\")\r\n fast = offset.is_on_offset(ts)\r\n slow = (ts + offset) - offset == ts\r\n assert fast == slow\r\n\r\n # negative n\r\n offset = WeekOfMonth(n=-3, week=1, weekday=0)\r\n ts = Timestamp(\"1980-12-08 03:38:52.878321185+0500\", tz=\"Asia/Oral\")\r\n fast = offset.is_on_offset(ts)\r\n slow = (ts + offset) - offset == ts\r\n assert fast == slow\r\n\r\n\r\ndef test_last_week_of_month_on_offset():\r\n # GH#19036, GH#18977 _adjust_dst was incorrect for LastWeekOfMonth\r\n offset = LastWeekOfMonth(n=4, weekday=6)\r\n ts = Timestamp(\"1917-05-27 20:55:27.084284178+0200\", tz=\"Europe/Warsaw\")\r\n slow = (ts + offset) - offset == ts\r\n fast = offset.is_on_offset(ts)\r\n assert fast == slow\r\n\r\n # negative n\r\n offset = LastWeekOfMonth(n=-4, weekday=5)\r\n ts = Timestamp(\"2005-08-27 05:01:42.799392561-0500\", tz=\"America/Rainy_River\")\r\n slow = (ts + offset) - offset == ts\r\n fast = offset.is_on_offset(ts)\r\n assert fast == slow\r\n\r\n\r\ndef test_week_add_invalid():\r\n # Week with weekday should raise TypeError and _not_ AttributeError\r\n # when adding invalid offset\r\n offset = Week(weekday=1)\r\n other = Day()\r\n with pytest.raises(TypeError, match=\"Cannot add\"):\r\n offset + other\r\n\r\n\r\[email protected](\r\n \"attribute\",\r\n [\r\n \"hours\",\r\n \"days\",\r\n \"weeks\",\r\n \"months\",\r\n \"years\",\r\n ],\r\n)\r\ndef test_dateoffset_immutable(attribute):\r\n offset = DateOffset(**{attribute: 0})\r\n msg = \"DateOffset objects are immutable\"\r\n with pytest.raises(AttributeError, match=msg):\r\n setattr(offset, attribute, 5)\r\n"
] | [
[
"pandas.CategoricalIndex",
"pandas._config.config.option_context"
],
[
"pandas.MultiIndex",
"numpy.arange",
"pandas.Index",
"pandas.option_context",
"pandas.DataFrame",
"pandas.set_option",
"pandas._testing.reset_display_options"
],
[
"pandas.MultiIndex.from_tuples",
"numpy.random.randn",
"pandas.MultiIndex.from_product"
],
[
"pandas.errors.AbstractMethodError"
],
[
"pandas.to_datetime",
"pandas.Series",
"pandas._libs.tslibs.timezones.maybe_get_tz",
"pandas.PeriodIndex",
"numpy.asarray",
"pandas.DataFrame",
"numpy.random.randn",
"pandas._testing.get_locales",
"pandas._testing.assert_frame_equal",
"numpy.arange",
"pandas.Index",
"pandas.DatetimeIndex",
"pandas._testing.assert_series_equal",
"pandas._testing.assert_index_equal",
"pandas._testing.assert_produces_warning",
"pandas.core.dtypes.common.is_integer_dtype",
"pandas.core.dtypes.common.is_list_like",
"pandas.Categorical",
"pandas.option_context",
"pandas.date_range",
"pandas._testing.external_error_raised",
"numpy.array",
"pandas.timedelta_range",
"pandas.TimedeltaIndex",
"pandas.period_range",
"pandas._testing.set_locale",
"pandas.Period",
"pandas.Timestamp"
],
[
"pandas.core.dtypes.cast.infer_dtype_from_array",
"pandas.Series",
"pandas.date_range",
"pandas.core.dtypes.cast.infer_dtype_from_scalar",
"pandas.core.dtypes.common.is_dtype_equal",
"pandas.Categorical",
"numpy.dtype",
"numpy.datetime64",
"numpy.timedelta64",
"pandas.Timedelta",
"numpy.float_",
"pandas.Interval",
"pandas.Period",
"numpy.array",
"pandas.Timestamp"
],
[
"pandas.core.common.flatten",
"pandas.core.dtypes.common.is_list_like",
"pandas.util._decorators.Appender",
"pandas.util._decorators.deprecate_kwarg",
"pandas.core.dtypes.common.is_extension_array_dtype",
"numpy.tile",
"pandas.core.dtypes.missing.notna",
"pandas.core.tools.numeric.to_numeric",
"pandas.core.dtypes.concat.concat_compat",
"pandas.core.arrays.Categorical",
"pandas.core.reshape.concat.concat",
"pandas.core.reshape.util.tile_compat"
],
[
"pandas.timedelta_range",
"pandas.TimedeltaIndex",
"pandas.Series",
"pandas.Index",
"pandas.Timedelta",
"pandas._testing.assert_index_equal"
],
[
"pandas.CategoricalIndex"
],
[
"pandas.CategoricalIndex",
"pandas._testing.assert_numpy_array_equal",
"numpy.random.seed",
"pandas.Timestamp",
"pandas.DatetimeIndex",
"numpy.datetime64",
"numpy.timedelta64",
"pandas.Timedelta",
"pandas._testing.assert_categorical_equal",
"pandas.Interval",
"pandas.date_range",
"numpy.array",
"pandas._testing.assert_index_equal"
],
[
"pandas._testing.assert_almost_equal",
"pandas.to_datetime",
"pandas.core.dtypes.common.is_datetime64_ns_dtype",
"pandas.Series",
"pandas.DataFrame",
"pandas._testing.set_timezone",
"pandas.isna",
"pandas.Timestamp.today",
"pandas._testing.assert_numpy_array_equal",
"numpy.arange",
"pandas.core.tools.datetimes.should_cache",
"pandas.Index",
"pandas.DatetimeIndex",
"pandas._testing.assert_series_equal",
"pandas._testing.assert_index_equal",
"pandas.core.tools.datetimes._guess_datetime_format_for_array",
"pandas.Timedelta",
"pandas.core.arrays.DatetimeArray",
"numpy.int64",
"pandas.date_range",
"numpy.array",
"pandas._testing.assert_equal",
"pandas._libs.tslib.array_to_datetime",
"pandas._libs.tslibs.parsing.parse_time_string",
"numpy.datetime64",
"pandas.Timestamp",
"numpy.empty"
],
[
"pandas.testing.assert_series_equal",
"pandas.Series",
"pandas.period_range",
"numpy.arange",
"pandas.MultiIndex.from_tuples",
"pandas.DatetimeIndex",
"pandas.date_range",
"pandas.Timestamp",
"numpy.zeros",
"pandas.tseries.offsets.BDay"
],
[
"pandas.io.excel._util.validate_freeze_panes",
"pandas._libs.json.dumps"
],
[
"pandas._testing.assert_almost_equal",
"pandas.notna",
"pandas.Series",
"numpy.isfinite",
"pandas._testing.assert_series_equal",
"numpy.std",
"numpy.random.randn",
"numpy.var",
"pandas.isna",
"pandas._testing.assert_frame_equal",
"pandas.tseries.offsets.BDay"
],
[
"pandas.tseries.offsets.Day",
"pandas.concat",
"pandas.DateOffset",
"pandas.Series",
"numpy.in1d",
"pandas.DatetimeIndex",
"pandas.tseries.offsets.Easter",
"pandas.date_range",
"pandas.Timestamp"
],
[
"pandas._testing.assert_numpy_array_equal",
"numpy.arange",
"pandas.Index",
"pandas.MultiIndex.from_product",
"numpy.array",
"pandas._testing.assert_index_equal"
],
[
"pandas.tseries.offsets.Day",
"pandas.core.indexes.datetimes.DatetimeIndex",
"pandas._libs.tslibs.Timestamp",
"pandas.tseries.offsets.CBMonthBegin",
"pandas._libs.tslibs.timezones.maybe_get_tz",
"pandas.core.indexes.datetimes.date_range",
"pandas.tseries.offsets.BusinessHour",
"pandas._libs.tslibs.conversion.localize_pydatetime",
"pandas.tseries.offsets.BDay",
"pandas._testing.assert_numpy_array_equal",
"pandas._libs.tslibs.Timestamp.now",
"pandas.tseries.offsets.DateOffset",
"pandas.tseries.offsets.Easter",
"pandas._libs.tslibs.offsets._offset_map.clear",
"pandas.tseries.offsets.CustomBusinessHour",
"pandas.io.pickle.read_pickle",
"pandas._testing.assert_index_equal",
"pandas.tseries.offsets.SemiMonthBegin",
"pandas._testing.assert_produces_warning",
"pandas._libs.tslibs.offsets._get_offset",
"pandas.tseries.offsets.SemiMonthEnd",
"pandas.compat.numpy.np_datetime64_compat",
"pandas._libs.tslibs.offsets._offset_map.items",
"numpy.timedelta64",
"pandas.tseries.offsets.LastWeekOfMonth",
"pandas._testing.round_trip_pickle",
"pandas.tseries.holiday.USFederalHolidayCalendar",
"numpy.array",
"pandas._testing.assert_equal",
"pandas.tseries.offsets.WeekOfMonth",
"pandas.tseries.offsets.CDay",
"numpy.datetime64",
"pandas.tseries.offsets.Nano",
"pandas.tseries.offsets.BMonthEnd",
"pandas.tseries.offsets.Week",
"pandas.tseries.offsets.CBMonthEnd"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"0.24",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"0.19",
"0.24",
"0.20",
"1.0",
"0.25"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"0.19",
"0.24",
"0.20",
"1.0",
"0.25"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"0.19",
"0.24",
"0.20",
"1.0",
"0.25"
],
"scipy": [],
"tensorflow": []
}
] |
dumpmemory/Transformer-Explainability | [
"951e112d24c1a642ceefeb0dd03a607040305383",
"951e112d24c1a642ceefeb0dd03a607040305383"
] | [
"utils/saver.py",
"BERT_explainability/modules/BERT/BERT.py"
] | [
"import os\nimport torch\nfrom collections import OrderedDict\nimport glob\n\n\nclass Saver(object):\n\n def __init__(self, args):\n self.args = args\n self.directory = os.path.join('run', args.train_dataset, args.checkname)\n self.runs = sorted(glob.glob(os.path.join(self.directory, 'experiment_*')))\n run_id = int(self.runs[-1].split('_')[-1]) + 1 if self.runs else 0\n\n self.experiment_dir = os.path.join(self.directory, 'experiment_{}'.format(str(run_id)))\n if not os.path.exists(self.experiment_dir):\n os.makedirs(self.experiment_dir)\n\n def save_checkpoint(self, state, filename='checkpoint.pth.tar'):\n \"\"\"Saves checkpoint to disk\"\"\"\n filename = os.path.join(self.experiment_dir, filename)\n torch.save(state, filename)\n\n def save_experiment_config(self):\n logfile = os.path.join(self.experiment_dir, 'parameters.txt')\n log_file = open(logfile, 'w')\n p = OrderedDict()\n p['train_dataset'] = self.args.train_dataset\n p['lr'] = self.args.lr\n p['epoch'] = self.args.epochs\n\n for key, val in p.items():\n log_file.write(key + ':' + str(val) + '\\n')\n log_file.close()\n",
"from __future__ import absolute_import\r\n\r\nimport torch\r\nfrom torch import nn\r\nimport torch.nn.functional as F\r\nimport math\r\nfrom transformers import BertConfig\r\nfrom transformers.modeling_outputs import BaseModelOutputWithPooling, BaseModelOutput\r\nfrom BERT_explainability.modules.layers_ours import *\r\nfrom transformers import (\r\n BertPreTrainedModel,\r\n PreTrainedModel,\r\n)\r\n\r\nACT2FN = {\r\n \"relu\": ReLU,\r\n \"tanh\": Tanh,\r\n \"gelu\": GELU,\r\n}\r\n\r\n\r\ndef get_activation(activation_string):\r\n if activation_string in ACT2FN:\r\n return ACT2FN[activation_string]\r\n else:\r\n raise KeyError(\"function {} not found in ACT2FN mapping {}\".format(activation_string, list(ACT2FN.keys())))\r\n\r\ndef compute_rollout_attention(all_layer_matrices, start_layer=0):\r\n # adding residual consideration\r\n num_tokens = all_layer_matrices[0].shape[1]\r\n batch_size = all_layer_matrices[0].shape[0]\r\n eye = torch.eye(num_tokens).expand(batch_size, num_tokens, num_tokens).to(all_layer_matrices[0].device)\r\n all_layer_matrices = [all_layer_matrices[i] + eye for i in range(len(all_layer_matrices))]\r\n all_layer_matrices = [all_layer_matrices[i] / all_layer_matrices[i].sum(dim=-1, keepdim=True)\r\n for i in range(len(all_layer_matrices))]\r\n joint_attention = all_layer_matrices[start_layer]\r\n for i in range(start_layer+1, len(all_layer_matrices)):\r\n joint_attention = all_layer_matrices[i].bmm(joint_attention)\r\n return joint_attention\r\n\r\nclass BertEmbeddings(nn.Module):\r\n \"\"\"Construct the embeddings from word, position and token_type embeddings.\"\"\"\r\n\r\n def __init__(self, config):\r\n super().__init__()\r\n self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)\r\n self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)\r\n self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)\r\n\r\n # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load\r\n # any TensorFlow checkpoint file\r\n self.LayerNorm = LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\r\n self.dropout = Dropout(config.hidden_dropout_prob)\r\n\r\n # position_ids (1, len position emb) is contiguous in memory and exported when serialized\r\n self.register_buffer(\"position_ids\", torch.arange(config.max_position_embeddings).expand((1, -1)))\r\n\r\n self.add1 = Add()\r\n self.add2 = Add()\r\n\r\n def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None):\r\n if input_ids is not None:\r\n input_shape = input_ids.size()\r\n else:\r\n input_shape = inputs_embeds.size()[:-1]\r\n\r\n seq_length = input_shape[1]\r\n\r\n if position_ids is None:\r\n position_ids = self.position_ids[:, :seq_length]\r\n\r\n if token_type_ids is None:\r\n token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)\r\n\r\n if inputs_embeds is None:\r\n inputs_embeds = self.word_embeddings(input_ids)\r\n position_embeddings = self.position_embeddings(position_ids)\r\n token_type_embeddings = self.token_type_embeddings(token_type_ids)\r\n\r\n # embeddings = inputs_embeds + position_embeddings + token_type_embeddings\r\n embeddings = self.add1([token_type_embeddings, position_embeddings])\r\n embeddings = self.add2([embeddings, inputs_embeds])\r\n embeddings = self.LayerNorm(embeddings)\r\n embeddings = self.dropout(embeddings)\r\n return embeddings\r\n\r\n def relprop(self, cam, **kwargs):\r\n cam = self.dropout.relprop(cam, **kwargs)\r\n cam = self.LayerNorm.relprop(cam, **kwargs)\r\n\r\n # [inputs_embeds, position_embeddings, token_type_embeddings]\r\n (cam) = self.add2.relprop(cam, **kwargs)\r\n\r\n return cam\r\n\r\nclass BertEncoder(nn.Module):\r\n def __init__(self, config):\r\n super().__init__()\r\n self.config = config\r\n self.layer = nn.ModuleList([BertLayer(config) for _ in range(config.num_hidden_layers)])\r\n\r\n def forward(\r\n self,\r\n hidden_states,\r\n attention_mask=None,\r\n head_mask=None,\r\n encoder_hidden_states=None,\r\n encoder_attention_mask=None,\r\n output_attentions=False,\r\n output_hidden_states=False,\r\n return_dict=False,\r\n ):\r\n all_hidden_states = () if output_hidden_states else None\r\n all_attentions = () if output_attentions else None\r\n for i, layer_module in enumerate(self.layer):\r\n if output_hidden_states:\r\n all_hidden_states = all_hidden_states + (hidden_states,)\r\n\r\n layer_head_mask = head_mask[i] if head_mask is not None else None\r\n\r\n if getattr(self.config, \"gradient_checkpointing\", False):\r\n\r\n def create_custom_forward(module):\r\n def custom_forward(*inputs):\r\n return module(*inputs, output_attentions)\r\n\r\n return custom_forward\r\n\r\n layer_outputs = torch.utils.checkpoint.checkpoint(\r\n create_custom_forward(layer_module),\r\n hidden_states,\r\n attention_mask,\r\n layer_head_mask,\r\n )\r\n else:\r\n layer_outputs = layer_module(\r\n hidden_states,\r\n attention_mask,\r\n layer_head_mask,\r\n output_attentions,\r\n )\r\n hidden_states = layer_outputs[0]\r\n if output_attentions:\r\n all_attentions = all_attentions + (layer_outputs[1],)\r\n\r\n if output_hidden_states:\r\n all_hidden_states = all_hidden_states + (hidden_states,)\r\n\r\n if not return_dict:\r\n return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None)\r\n return BaseModelOutput(\r\n last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions\r\n )\r\n\r\n def relprop(self, cam, **kwargs):\r\n # assuming output_hidden_states is False\r\n for layer_module in reversed(self.layer):\r\n cam = layer_module.relprop(cam, **kwargs)\r\n return cam\r\n\r\n# not adding relprop since this is only pooling at the end of the network, does not impact tokens importance\r\nclass BertPooler(nn.Module):\r\n def __init__(self, config):\r\n super().__init__()\r\n self.dense = Linear(config.hidden_size, config.hidden_size)\r\n self.activation = Tanh()\r\n self.pool = IndexSelect()\r\n\r\n def forward(self, hidden_states):\r\n # We \"pool\" the model by simply taking the hidden state corresponding\r\n # to the first token.\r\n self._seq_size = hidden_states.shape[1]\r\n\r\n # first_token_tensor = hidden_states[:, 0]\r\n first_token_tensor = self.pool(hidden_states, 1, torch.tensor(0, device=hidden_states.device))\r\n first_token_tensor = first_token_tensor.squeeze(1)\r\n pooled_output = self.dense(first_token_tensor)\r\n pooled_output = self.activation(pooled_output)\r\n return pooled_output\r\n\r\n def relprop(self, cam, **kwargs):\r\n cam = self.activation.relprop(cam, **kwargs)\r\n #print(cam.sum())\r\n cam = self.dense.relprop(cam, **kwargs)\r\n #print(cam.sum())\r\n cam = cam.unsqueeze(1)\r\n cam = self.pool.relprop(cam, **kwargs)\r\n #print(cam.sum())\r\n\r\n return cam\r\n\r\nclass BertAttention(nn.Module):\r\n def __init__(self, config):\r\n super().__init__()\r\n self.self = BertSelfAttention(config)\r\n self.output = BertSelfOutput(config)\r\n self.pruned_heads = set()\r\n self.clone = Clone()\r\n\r\n def prune_heads(self, heads):\r\n if len(heads) == 0:\r\n return\r\n heads, index = find_pruneable_heads_and_indices(\r\n heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads\r\n )\r\n\r\n # Prune linear layers\r\n self.self.query = prune_linear_layer(self.self.query, index)\r\n self.self.key = prune_linear_layer(self.self.key, index)\r\n self.self.value = prune_linear_layer(self.self.value, index)\r\n self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)\r\n\r\n # Update hyper params and store pruned heads\r\n self.self.num_attention_heads = self.self.num_attention_heads - len(heads)\r\n self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads\r\n self.pruned_heads = self.pruned_heads.union(heads)\r\n\r\n def forward(\r\n self,\r\n hidden_states,\r\n attention_mask=None,\r\n head_mask=None,\r\n encoder_hidden_states=None,\r\n encoder_attention_mask=None,\r\n output_attentions=False,\r\n ):\r\n h1, h2 = self.clone(hidden_states, 2)\r\n self_outputs = self.self(\r\n h1,\r\n attention_mask,\r\n head_mask,\r\n encoder_hidden_states,\r\n encoder_attention_mask,\r\n output_attentions,\r\n )\r\n attention_output = self.output(self_outputs[0], h2)\r\n outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them\r\n return outputs\r\n\r\n def relprop(self, cam, **kwargs):\r\n # assuming that we don't ouput the attentions (outputs = (attention_output,)), self_outputs=(context_layer,)\r\n (cam1, cam2) = self.output.relprop(cam, **kwargs)\r\n #print(cam1.sum(), cam2.sum(), (cam1 + cam2).sum())\r\n cam1 = self.self.relprop(cam1, **kwargs)\r\n #print(cam1.sum(), cam2.sum(), (cam1 + cam2).sum())\r\n\r\n return self.clone.relprop((cam1, cam2), **kwargs)\r\n\r\nclass BertSelfAttention(nn.Module):\r\n def __init__(self, config):\r\n super().__init__()\r\n if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, \"embedding_size\"):\r\n raise ValueError(\r\n \"The hidden size (%d) is not a multiple of the number of attention \"\r\n \"heads (%d)\" % (config.hidden_size, config.num_attention_heads)\r\n )\r\n\r\n self.num_attention_heads = config.num_attention_heads\r\n self.attention_head_size = int(config.hidden_size / config.num_attention_heads)\r\n self.all_head_size = self.num_attention_heads * self.attention_head_size\r\n\r\n self.query = Linear(config.hidden_size, self.all_head_size)\r\n self.key = Linear(config.hidden_size, self.all_head_size)\r\n self.value = Linear(config.hidden_size, self.all_head_size)\r\n\r\n self.dropout = Dropout(config.attention_probs_dropout_prob)\r\n\r\n self.matmul1 = MatMul()\r\n self.matmul2 = MatMul()\r\n self.softmax = Softmax(dim=-1)\r\n self.add = Add()\r\n self.mul = Mul()\r\n self.head_mask = None\r\n self.attention_mask = None\r\n self.clone = Clone()\r\n\r\n self.attn_cam = None\r\n self.attn = None\r\n self.attn_gradients = None\r\n\r\n def get_attn(self):\r\n return self.attn\r\n\r\n def save_attn(self, attn):\r\n self.attn = attn\r\n\r\n def save_attn_cam(self, cam):\r\n self.attn_cam = cam\r\n\r\n def get_attn_cam(self):\r\n return self.attn_cam\r\n\r\n def save_attn_gradients(self, attn_gradients):\r\n self.attn_gradients = attn_gradients\r\n\r\n def get_attn_gradients(self):\r\n return self.attn_gradients\r\n\r\n def transpose_for_scores(self, x):\r\n new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)\r\n x = x.view(*new_x_shape)\r\n return x.permute(0, 2, 1, 3)\r\n\r\n def transpose_for_scores_relprop(self, x):\r\n return x.permute(0, 2, 1, 3).flatten(2)\r\n\r\n def forward(\r\n self,\r\n hidden_states,\r\n attention_mask=None,\r\n head_mask=None,\r\n encoder_hidden_states=None,\r\n encoder_attention_mask=None,\r\n output_attentions=False,\r\n ):\r\n self.head_mask = head_mask\r\n self.attention_mask = attention_mask\r\n\r\n h1, h2, h3 = self.clone(hidden_states, 3)\r\n mixed_query_layer = self.query(h1)\r\n\r\n # If this is instantiated as a cross-attention module, the keys\r\n # and values come from an encoder; the attention mask needs to be\r\n # such that the encoder's padding tokens are not attended to.\r\n if encoder_hidden_states is not None:\r\n mixed_key_layer = self.key(encoder_hidden_states)\r\n mixed_value_layer = self.value(encoder_hidden_states)\r\n attention_mask = encoder_attention_mask\r\n else:\r\n mixed_key_layer = self.key(h2)\r\n mixed_value_layer = self.value(h3)\r\n\r\n query_layer = self.transpose_for_scores(mixed_query_layer)\r\n key_layer = self.transpose_for_scores(mixed_key_layer)\r\n value_layer = self.transpose_for_scores(mixed_value_layer)\r\n\r\n # Take the dot product between \"query\" and \"key\" to get the raw attention scores.\r\n attention_scores = self.matmul1([query_layer, key_layer.transpose(-1, -2)])\r\n attention_scores = attention_scores / math.sqrt(self.attention_head_size)\r\n if attention_mask is not None:\r\n # Apply the attention mask is (precomputed for all layers in BertModel forward() function)\r\n attention_scores = self.add([attention_scores, attention_mask])\r\n\r\n # Normalize the attention scores to probabilities.\r\n attention_probs = self.softmax(attention_scores)\r\n\r\n self.save_attn(attention_probs)\r\n attention_probs.register_hook(self.save_attn_gradients)\r\n\r\n # This is actually dropping out entire tokens to attend to, which might\r\n # seem a bit unusual, but is taken from the original Transformer paper.\r\n attention_probs = self.dropout(attention_probs)\r\n\r\n # Mask heads if we want to\r\n if head_mask is not None:\r\n attention_probs = attention_probs * head_mask\r\n\r\n context_layer = self.matmul2([attention_probs, value_layer])\r\n\r\n context_layer = context_layer.permute(0, 2, 1, 3).contiguous()\r\n new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)\r\n context_layer = context_layer.view(*new_context_layer_shape)\r\n\r\n outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)\r\n return outputs\r\n\r\n def relprop(self, cam, **kwargs):\r\n # Assume output_attentions == False\r\n cam = self.transpose_for_scores(cam)\r\n\r\n # [attention_probs, value_layer]\r\n (cam1, cam2) = self.matmul2.relprop(cam, **kwargs)\r\n cam1 /= 2\r\n cam2 /= 2\r\n if self.head_mask is not None:\r\n # [attention_probs, head_mask]\r\n (cam1, _)= self.mul.relprop(cam1, **kwargs)\r\n\r\n\r\n self.save_attn_cam(cam1)\r\n\r\n cam1 = self.dropout.relprop(cam1, **kwargs)\r\n\r\n cam1 = self.softmax.relprop(cam1, **kwargs)\r\n\r\n if self.attention_mask is not None:\r\n # [attention_scores, attention_mask]\r\n (cam1, _) = self.add.relprop(cam1, **kwargs)\r\n\r\n # [query_layer, key_layer.transpose(-1, -2)]\r\n (cam1_1, cam1_2) = self.matmul1.relprop(cam1, **kwargs)\r\n cam1_1 /= 2\r\n cam1_2 /= 2\r\n\r\n # query\r\n cam1_1 = self.transpose_for_scores_relprop(cam1_1)\r\n cam1_1 = self.query.relprop(cam1_1, **kwargs)\r\n\r\n # key\r\n cam1_2 = self.transpose_for_scores_relprop(cam1_2.transpose(-1, -2))\r\n cam1_2 = self.key.relprop(cam1_2, **kwargs)\r\n\r\n # value\r\n cam2 = self.transpose_for_scores_relprop(cam2)\r\n cam2 = self.value.relprop(cam2, **kwargs)\r\n\r\n cam = self.clone.relprop((cam1_1, cam1_2, cam2), **kwargs)\r\n\r\n return cam\r\n\r\n\r\nclass BertSelfOutput(nn.Module):\r\n def __init__(self, config):\r\n super().__init__()\r\n self.dense = Linear(config.hidden_size, config.hidden_size)\r\n self.LayerNorm = LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\r\n self.dropout = Dropout(config.hidden_dropout_prob)\r\n self.add = Add()\r\n\r\n def forward(self, hidden_states, input_tensor):\r\n hidden_states = self.dense(hidden_states)\r\n hidden_states = self.dropout(hidden_states)\r\n add = self.add([hidden_states, input_tensor])\r\n hidden_states = self.LayerNorm(add)\r\n return hidden_states\r\n\r\n def relprop(self, cam, **kwargs):\r\n cam = self.LayerNorm.relprop(cam, **kwargs)\r\n # [hidden_states, input_tensor]\r\n (cam1, cam2) = self.add.relprop(cam, **kwargs)\r\n cam1 = self.dropout.relprop(cam1, **kwargs)\r\n cam1 = self.dense.relprop(cam1, **kwargs)\r\n\r\n return (cam1, cam2)\r\n\r\n\r\nclass BertIntermediate(nn.Module):\r\n def __init__(self, config):\r\n super().__init__()\r\n self.dense = Linear(config.hidden_size, config.intermediate_size)\r\n if isinstance(config.hidden_act, str):\r\n self.intermediate_act_fn = ACT2FN[config.hidden_act]()\r\n else:\r\n self.intermediate_act_fn = config.hidden_act\r\n\r\n def forward(self, hidden_states):\r\n hidden_states = self.dense(hidden_states)\r\n hidden_states = self.intermediate_act_fn(hidden_states)\r\n return hidden_states\r\n\r\n def relprop(self, cam, **kwargs):\r\n cam = self.intermediate_act_fn.relprop(cam, **kwargs) # FIXME only ReLU\r\n #print(cam.sum())\r\n cam = self.dense.relprop(cam, **kwargs)\r\n #print(cam.sum())\r\n return cam\r\n\r\n\r\nclass BertOutput(nn.Module):\r\n def __init__(self, config):\r\n super().__init__()\r\n self.dense = Linear(config.intermediate_size, config.hidden_size)\r\n self.LayerNorm = LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\r\n self.dropout = Dropout(config.hidden_dropout_prob)\r\n self.add = Add()\r\n\r\n def forward(self, hidden_states, input_tensor):\r\n hidden_states = self.dense(hidden_states)\r\n hidden_states = self.dropout(hidden_states)\r\n add = self.add([hidden_states, input_tensor])\r\n hidden_states = self.LayerNorm(add)\r\n return hidden_states\r\n\r\n def relprop(self, cam, **kwargs):\r\n # print(\"in\", cam.sum())\r\n cam = self.LayerNorm.relprop(cam, **kwargs)\r\n #print(cam.sum())\r\n # [hidden_states, input_tensor]\r\n (cam1, cam2)= self.add.relprop(cam, **kwargs)\r\n # print(\"add\", cam1.sum(), cam2.sum(), cam1.sum() + cam2.sum())\r\n cam1 = self.dropout.relprop(cam1, **kwargs)\r\n #print(cam1.sum())\r\n cam1 = self.dense.relprop(cam1, **kwargs)\r\n # print(\"dense\", cam1.sum())\r\n\r\n # print(\"out\", cam1.sum() + cam2.sum(), cam1.sum(), cam2.sum())\r\n return (cam1, cam2)\r\n\r\n\r\nclass BertLayer(nn.Module):\r\n def __init__(self, config):\r\n super().__init__()\r\n self.attention = BertAttention(config)\r\n self.intermediate = BertIntermediate(config)\r\n self.output = BertOutput(config)\r\n self.clone = Clone()\r\n\r\n def forward(\r\n self,\r\n hidden_states,\r\n attention_mask=None,\r\n head_mask=None,\r\n output_attentions=False,\r\n ):\r\n self_attention_outputs = self.attention(\r\n hidden_states,\r\n attention_mask,\r\n head_mask,\r\n output_attentions=output_attentions,\r\n )\r\n attention_output = self_attention_outputs[0]\r\n outputs = self_attention_outputs[1:] # add self attentions if we output attention weights\r\n\r\n ao1, ao2 = self.clone(attention_output, 2)\r\n intermediate_output = self.intermediate(ao1)\r\n layer_output = self.output(intermediate_output, ao2)\r\n\r\n outputs = (layer_output,) + outputs\r\n return outputs\r\n\r\n def relprop(self, cam, **kwargs):\r\n (cam1, cam2) = self.output.relprop(cam, **kwargs)\r\n # print(\"output\", cam1.sum(), cam2.sum(), cam1.sum() + cam2.sum())\r\n cam1 = self.intermediate.relprop(cam1, **kwargs)\r\n # print(\"intermediate\", cam1.sum())\r\n cam = self.clone.relprop((cam1, cam2), **kwargs)\r\n # print(\"clone\", cam.sum())\r\n cam = self.attention.relprop(cam, **kwargs)\r\n # print(\"attention\", cam.sum())\r\n return cam\r\n\r\n\r\nclass BertModel(BertPreTrainedModel):\r\n def __init__(self, config):\r\n super().__init__(config)\r\n self.config = config\r\n\r\n self.embeddings = BertEmbeddings(config)\r\n self.encoder = BertEncoder(config)\r\n self.pooler = BertPooler(config)\r\n\r\n self.init_weights()\r\n\r\n def get_input_embeddings(self):\r\n return self.embeddings.word_embeddings\r\n\r\n def set_input_embeddings(self, value):\r\n self.embeddings.word_embeddings = value\r\n\r\n def forward(\r\n self,\r\n input_ids=None,\r\n attention_mask=None,\r\n token_type_ids=None,\r\n position_ids=None,\r\n head_mask=None,\r\n inputs_embeds=None,\r\n encoder_hidden_states=None,\r\n encoder_attention_mask=None,\r\n output_attentions=None,\r\n output_hidden_states=None,\r\n return_dict=None,\r\n ):\r\n r\"\"\"\r\n encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):\r\n Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention\r\n if the model is configured as a decoder.\r\n encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\r\n Mask to avoid performing attention on the padding token indices of the encoder input. This mask\r\n is used in the cross-attention if the model is configured as a decoder.\r\n Mask values selected in ``[0, 1]``:\r\n ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.\r\n \"\"\"\r\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\r\n output_hidden_states = (\r\n output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\r\n )\r\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\r\n\r\n if input_ids is not None and inputs_embeds is not None:\r\n raise ValueError(\"You cannot specify both input_ids and inputs_embeds at the same time\")\r\n elif input_ids is not None:\r\n input_shape = input_ids.size()\r\n elif inputs_embeds is not None:\r\n input_shape = inputs_embeds.size()[:-1]\r\n else:\r\n raise ValueError(\"You have to specify either input_ids or inputs_embeds\")\r\n\r\n device = input_ids.device if input_ids is not None else inputs_embeds.device\r\n\r\n if attention_mask is None:\r\n attention_mask = torch.ones(input_shape, device=device)\r\n if token_type_ids is None:\r\n token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)\r\n\r\n # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]\r\n # ourselves in which case we just need to make it broadcastable to all heads.\r\n extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, device)\r\n\r\n # If a 2D or 3D attention mask is provided for the cross-attention\r\n # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]\r\n if self.config.is_decoder and encoder_hidden_states is not None:\r\n encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()\r\n encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)\r\n if encoder_attention_mask is None:\r\n encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)\r\n encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)\r\n else:\r\n encoder_extended_attention_mask = None\r\n\r\n # Prepare head mask if needed\r\n # 1.0 in head_mask indicate we keep the head\r\n # attention_probs has shape bsz x n_heads x N x N\r\n # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]\r\n # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]\r\n head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)\r\n\r\n embedding_output = self.embeddings(\r\n input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds\r\n )\r\n\r\n encoder_outputs = self.encoder(\r\n embedding_output,\r\n attention_mask=extended_attention_mask,\r\n head_mask=head_mask,\r\n encoder_hidden_states=encoder_hidden_states,\r\n encoder_attention_mask=encoder_extended_attention_mask,\r\n output_attentions=output_attentions,\r\n output_hidden_states=output_hidden_states,\r\n return_dict=return_dict,\r\n )\r\n sequence_output = encoder_outputs[0]\r\n pooled_output = self.pooler(sequence_output)\r\n\r\n if not return_dict:\r\n return (sequence_output, pooled_output) + encoder_outputs[1:]\r\n\r\n return BaseModelOutputWithPooling(\r\n last_hidden_state=sequence_output,\r\n pooler_output=pooled_output,\r\n hidden_states=encoder_outputs.hidden_states,\r\n attentions=encoder_outputs.attentions,\r\n )\r\n\r\n def relprop(self, cam, **kwargs):\r\n cam = self.pooler.relprop(cam, **kwargs)\r\n # print(\"111111111111\",cam.sum())\r\n cam = self.encoder.relprop(cam, **kwargs)\r\n # print(\"222222222222222\", cam.sum())\r\n # print(\"conservation: \", cam.sum())\r\n return cam\r\n\r\n\r\nif __name__ == '__main__':\r\n class Config:\r\n def __init__(self, hidden_size, num_attention_heads, attention_probs_dropout_prob):\r\n self.hidden_size = hidden_size\r\n self.num_attention_heads = num_attention_heads\r\n self.attention_probs_dropout_prob = attention_probs_dropout_prob\r\n\r\n model = BertSelfAttention(Config(1024, 4, 0.1))\r\n x = torch.rand(2, 20, 1024)\r\n x.requires_grad_()\r\n\r\n model.eval()\r\n\r\n y = model.forward(x)\r\n\r\n relprop = model.relprop(torch.rand(2, 20, 1024), (torch.rand(2, 20, 1024),))\r\n\r\n print(relprop[1][0].shape)\r\n"
] | [
[
"torch.save"
],
[
"torch.ones",
"torch.zeros",
"torch.eye",
"torch.nn.Embedding",
"torch.tensor",
"torch.rand",
"torch.arange"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
kujaku11/mth5 | [
"b7681335871f3cd1b652276fd93c08554c7538ff",
"b7681335871f3cd1b652276fd93c08554c7538ff"
] | [
"tests/test_filters.py",
"mth5/groups/filter_groups/zpk_filter_group.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue May 11 10:35:30 2021\n\n:copyright: \n Jared Peacock ([email protected])\n\n:license: MIT\n\n\"\"\"\n# =============================================================================\n# Imports\n# =============================================================================\nimport unittest\nfrom pathlib import Path\nimport numpy as np\n\nfrom mth5 import mth5\nfrom mt_metadata.timeseries.filters import PoleZeroFilter, CoefficientFilter\n\nfn_path = Path(__file__).parent\n# =============================================================================\nmth5.helpers.close_open_files()\n\n\nclass TestFilters(unittest.TestCase):\n \"\"\"\n Test filters to make sure get out what is put in\n \"\"\"\n\n def setUp(self):\n self.fn = fn_path.joinpath(\"filter_test.h5\")\n self.m_obj = mth5.MTH5()\n self.m_obj.open_mth5(self.fn, \"w\")\n self.filter_group = self.m_obj.filters_group\n\n self.zpk = PoleZeroFilter()\n self.zpk.units_in = \"counts\"\n self.zpk.units_out = \"mv\"\n self.zpk.name = \"zpk_test\"\n self.zpk.poles = np.array([1 + 2j, 0, 1 - 2j])\n self.zpk.zeros = np.array([10 - 1j, 10 + 1j])\n\n self.coefficient = CoefficientFilter()\n self.coefficient.units_in = \"volts\"\n self.coefficient.units_out = \"millivolts per meter\"\n self.coefficient.name = \"coefficient_test\"\n self.coefficient.gain = 10.0\n\n self.zpk_group = self.filter_group.add_filter(self.zpk)\n self.coefficient_group = self.filter_group.add_filter(self.coefficient)\n\n def test_zpk_in(self):\n\n self.assertIn(\"zpk_test\", self.filter_group.zpk_group.groups_list)\n\n def test_zpk_name(self):\n self.assertEqual(self.zpk_group.attrs[\"name\"], self.zpk.name)\n\n def test_zpk_units_in(self):\n self.assertEqual(self.zpk_group.attrs[\"units_in\"], self.zpk.units_in)\n\n def test_zpk_units_out(self):\n self.assertEqual(self.zpk_group.attrs[\"units_out\"], self.zpk.units_out)\n\n def test_zpk_poles(self):\n self.assertTrue(\n np.allclose(self.zpk_group[\"poles\"][\"real\"][()], self.zpk.poles.real)\n )\n self.assertTrue(\n np.allclose(self.zpk_group[\"poles\"][\"imag\"][()], self.zpk.poles.imag)\n )\n\n def test_zpk_zeros(self):\n self.assertTrue(\n np.allclose(self.zpk_group[\"zeros\"][\"real\"][()], self.zpk.zeros.real)\n )\n self.assertTrue(\n np.allclose(self.zpk_group[\"zeros\"][\"imag\"][()], self.zpk.zeros.imag)\n )\n\n def test_zpk_out(self):\n new_zpk = self.filter_group.to_filter_object(self.zpk.name)\n\n self.assertTrue(new_zpk == self.zpk)\n\n def test_coefficient_in(self):\n\n self.assertIn(\n \"coefficient_test\", self.filter_group.coefficient_group.groups_list\n )\n\n def test_coefficient_name(self):\n self.assertEqual(self.coefficient_group.attrs[\"name\"], self.coefficient.name)\n\n def test_coefficient_units_in(self):\n self.assertEqual(\n self.coefficient_group.attrs[\"units_in\"], self.coefficient.units_in\n )\n\n def test_coefficient_units_out(self):\n self.assertEqual(\n self.coefficient_group.attrs[\"units_out\"], self.coefficient.units_out\n )\n\n def test_coefficient_out(self):\n new_coefficient = self.filter_group.to_filter_object(self.coefficient.name)\n\n self.assertTrue(new_coefficient == self.coefficient)\n\n def tearDown(self):\n self.m_obj.close_mth5()\n self.fn.unlink()\n",
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jun 9 08:55:16 2021\n\n:copyright: \n Jared Peacock ([email protected])\n\n:license: MIT\n\n\"\"\"\n\n# =============================================================================\n# Imports\n# =============================================================================\nimport numpy as np\n\nfrom mt_metadata.timeseries.filters import PoleZeroFilter\n\nfrom mth5.groups.base import BaseGroup\n\n# =============================================================================\n# ZPK Group\n# =============================================================================\nclass ZPKGroup(BaseGroup):\n \"\"\"\n Container for ZPK type filters\n\n \"\"\"\n\n def __init__(self, group, **kwargs):\n super().__init__(group, **kwargs)\n\n @property\n def filter_dict(self):\n \"\"\"\n\n Dictionary of available ZPK filters\n\n :return: DESCRIPTION\n :rtype: TYPE\n \"\"\"\n f_dict = {}\n for key in self.hdf5_group.keys():\n zpk_group = self.hdf5_group[key]\n f_dict[key] = {\"type\": zpk_group.attrs[\"type\"], \"hdf5_ref\": zpk_group.ref}\n\n return f_dict\n\n def add_filter(self, name, poles, zeros, zpk_metadata):\n \"\"\"\n create an HDF5 group/dataset from information given.\n\n :param name: Nane of the filter\n :type name: string\n :param poles: poles of the filter as complex numbers\n :type poles: np.ndarray(dtype=complex)\n :param zeros: zeros of the filter as complex numbers\n :type zeros: np.ndarray(dtype=comples)\n :param zpk_metadata: metadata dictionary see\n :class:`mt_metadata.timeseries.filters.PoleZeroFilter` for details on entries\n :type zpk_metadata: dictionary\n\n \"\"\"\n # create a group for the filter by the name\n zpk_filter_group = self.hdf5_group.create_group(name)\n\n # create datasets for the poles and zeros\n poles_ds = zpk_filter_group.create_dataset(\n \"poles\",\n poles.shape,\n dtype=np.dtype([(\"real\", float), (\"imag\", float)]),\n **self.dataset_options,\n )\n zeros_ds = zpk_filter_group.create_dataset(\n \"zeros\",\n zeros.shape,\n dtype=np.dtype([(\"real\", float), (\"imag\", float)]),\n **self.dataset_options,\n )\n\n # when filling data need to fill the full row for what ever reason.\n poles_ds[:] = [(pr, pi) for pr, pi in zip(poles.real, poles.imag)]\n zeros_ds[:] = [(pr, pi) for pr, pi in zip(zeros.real, zeros.imag)]\n\n # fill in the metadata\n zpk_filter_group.attrs.update(zpk_metadata)\n\n return zpk_filter_group\n\n def remove_filter(self):\n pass\n\n def get_filter(self, name):\n \"\"\"\n Get a filter from the name\n\n :param name: name of the filter\n :type name: string\n\n :return: HDF5 group of the ZPK filter\n \"\"\"\n return self.hdf5_group[name]\n\n def from_object(self, zpk_object):\n \"\"\"\n make a filter from a :class:`mt_metadata.timeseries.filters.PoleZeroFilter`\n\n :param zpk_object: MT metadata PoleZeroFilter\n :type zpk_object: :class:`mt_metadata.timeseries.filters.PoleZeroFilter`\n\n \"\"\"\n\n if not isinstance(zpk_object, PoleZeroFilter):\n msg = f\"Filter must be a PoleZeroFilter not {type(zpk_object)}\"\n self.logger.error(msg)\n raise TypeError(msg)\n\n input_dict = zpk_object.to_dict(single=True, required=False)\n input_dict.pop(\"poles\")\n input_dict.pop(\"zeros\")\n for k, v in input_dict.items():\n if v is None:\n input_dict[k] = str(v)\n\n zpk_group = self.add_filter(\n zpk_object.name,\n zpk_object.poles,\n zpk_object.zeros,\n input_dict,\n )\n return zpk_group\n\n def to_object(self, name):\n \"\"\"\n make a :class:`mt_metadata.timeseries.filters.pole_zeros_filter` object\n :return: DESCRIPTION\n :rtype: TYPE\n\n \"\"\"\n\n zpk_group = self.get_filter(name)\n\n zpk_obj = PoleZeroFilter(**zpk_group.attrs)\n\n try:\n zpk_obj.poles = (\n zpk_group[\"poles\"][\"real\"][:] + zpk_group[\"poles\"][\"imag\"][:] * 1j\n )\n except TypeError:\n self.logger.debug(f\"ZPK filter {name} has no poles\")\n zpk_obj.poles = []\n\n try:\n zpk_obj.zeros = (\n zpk_group[\"zeros\"][\"real\"][:] + zpk_group[\"zeros\"][\"imag\"][:] * 1j\n )\n except TypeError:\n self.logger.debug(f\"ZPK filter {name} has no zeros\")\n zpk_obj.zeros = []\n\n return zpk_obj\n"
] | [
[
"numpy.array",
"numpy.allclose"
],
[
"numpy.dtype"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ghgh3269/compression | [
"3e920bc49fa32d79c1c2917583ffb663c6ebac85"
] | [
"tensorflow_compression/python/ops/math_ops_test.py"
] | [
"# -*- coding: utf-8 -*-\n# Copyright 2018 Google LLC. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for the math operations.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n# Dependency imports\n\nimport tensorflow as tf\nimport tensorflow_compression as tfc\n\n\nclass MathTest(tf.test.TestCase):\n\n def _test_upper_bound(self, gradient):\n inputs = tf.placeholder(dtype=tf.float32)\n outputs = tfc.upper_bound(inputs, 0, gradient=gradient)\n pgrads, = tf.gradients([outputs], [inputs], [tf.ones_like(inputs)])\n ngrads, = tf.gradients([outputs], [inputs], [-tf.ones_like(inputs)])\n\n inputs_feed = [-1, 1]\n outputs_expected = [-1, 0]\n if gradient == \"disconnected\":\n pgrads_expected = [1, 0]\n ngrads_expected = [-1, 0]\n elif gradient == \"identity\":\n pgrads_expected = [1, 1]\n ngrads_expected = [-1, -1]\n else:\n pgrads_expected = [1, 1]\n ngrads_expected = [-1, 0]\n\n with self.test_session() as sess:\n outputs, pgrads, ngrads = sess.run(\n [outputs, pgrads, ngrads], {inputs: inputs_feed})\n self.assertAllEqual(outputs, outputs_expected)\n self.assertAllEqual(pgrads, pgrads_expected)\n self.assertAllEqual(ngrads, ngrads_expected)\n\n def test_upper_bound_disconnected(self):\n self._test_upper_bound(\"disconnected\")\n\n def test_upper_bound_identity(self):\n self._test_upper_bound(\"identity\")\n\n def test_upper_bound_identity_if_towards(self):\n self._test_upper_bound(\"identity_if_towards\")\n\n def test_upper_bound_invalid(self):\n with self.assertRaises(ValueError):\n self._test_upper_bound(\"invalid\")\n\n def _test_lower_bound(self, gradient):\n inputs = tf.placeholder(dtype=tf.float32)\n outputs = tfc.lower_bound(inputs, 0, gradient=gradient)\n pgrads, = tf.gradients([outputs], [inputs], [tf.ones_like(inputs)])\n ngrads, = tf.gradients([outputs], [inputs], [-tf.ones_like(inputs)])\n\n inputs_feed = [-1, 1]\n outputs_expected = [0, 1]\n if gradient == \"disconnected\":\n pgrads_expected = [0, 1]\n ngrads_expected = [0, -1]\n elif gradient == \"identity\":\n pgrads_expected = [1, 1]\n ngrads_expected = [-1, -1]\n else:\n pgrads_expected = [0, 1]\n ngrads_expected = [-1, -1]\n\n with self.test_session() as sess:\n outputs, pgrads, ngrads = sess.run(\n [outputs, pgrads, ngrads], {inputs: inputs_feed})\n self.assertAllEqual(outputs, outputs_expected)\n self.assertAllEqual(pgrads, pgrads_expected)\n self.assertAllEqual(ngrads, ngrads_expected)\n\n def test_lower_bound_disconnected(self):\n self._test_lower_bound(\"disconnected\")\n\n def test_lower_bound_identity(self):\n self._test_lower_bound(\"identity\")\n\n def test_lower_bound_identity_if_towards(self):\n self._test_lower_bound(\"identity_if_towards\")\n\n def test_lower_bound_invalid(self):\n with self.assertRaises(ValueError):\n self._test_lower_bound(\"invalid\")\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n"
] | [
[
"tensorflow.ones_like",
"tensorflow.placeholder",
"tensorflow.test.main"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
MECLabTUDA/OOD-Gen | [
"f85ea9106ae1425f18e34c9d82fa3ca4925d8d9e",
"f85ea9106ae1425f18e34c9d82fa3ca4925d8d9e"
] | [
"mp/agents/segmentation_semisup_domain_pred_IRM_seg_agent.py",
"mp/data/pytorch/domain_prediction_dataset_wrapper.py"
] | [
"import torch\n\nfrom mp.agents.segmentation_semisup_domain_pred_agent import SegmentationSemisupDomainPredictionAgent\nfrom mp.data.pytorch.domain_prediction_dataset_wrapper import DomainPredictionDatasetWrapper\nfrom mp.eval.accumulator import Accumulator\nfrom mp.eval.inference.predict import softmax\nfrom mp.utils.domain_prediction_utils import perform_stage1_training_epoch\nfrom mp.utils.early_stopping import EarlyStopping\nfrom mp.utils.helper_functions import zip_longest_with_cycle\n\n\nclass SegmentationSemisupDomainPredictionIRMAgent(SegmentationSemisupDomainPredictionAgent):\n r\"\"\"An Agent for segmentation models using a classifier for the domain space using the features from the encoder\"\"\"\n\n def perform_stage2_training_epoch(self, optimizer_model,\n optimizer_domain_predictor,\n optimizer_encoder,\n irm_loss_f_classifier,\n loss_f_domain_pred,\n loss_f_encoder,\n train_dataloaders_seg,\n train_dataloaders_dp,\n beta,\n print_run_loss=False):\n r\"\"\"Perform a stage 2 training epoch,\n meaning that the encoder, classifier and domain predictor are all trained one after the other\n\n Args:\n print_run_loss (bool): whether a running loss should be tracked and printed.\n \"\"\"\n # The main difference in this semi-sup version is that\n # the domain predictor is trained using another set of dataloaders\n\n acc = Accumulator('loss')\n # We zip the dataloaders for segmentor and domain predictor\n # Each of these lists of dataloaders contains a dataloader per dataset\n for data_list_seg, data_list_dp in zip(zip_longest_with_cycle(*train_dataloaders_seg),\n zip_longest_with_cycle(*train_dataloaders_dp)):\n classifier_losses = []\n classifier_penalties = []\n\n # For each dataloader\n for data_seg in data_list_seg:\n # Get data for the segmentor\n inputs, targets = self.get_inputs_targets(data_seg)\n\n # Forward pass for the classification\n # Here we cannot use self.get_outputs(inputs)\n feature = self.model.get_features_from_encoder(inputs)\n classifier_output = softmax(self.model.get_classification_from_features(feature))\n\n # Store losses and predictions\n classifier_losses.append(irm_loss_f_classifier.erm(classifier_output, targets))\n classifier_penalties.append(irm_loss_f_classifier(classifier_output, targets))\n\n # Model Optimization step\n optimizer_model.zero_grad()\n\n loss = irm_loss_f_classifier.finalize_loss(classifier_losses, classifier_penalties)\n acc.add('loss', float(loss.detach().cpu()))\n\n loss.backward(retain_graph=True)\n optimizer_model.step()\n\n # Domain Predictor Optimization step\n data_lengths = [] # Is used to produce the domain targets on the fly\n features = []\n # For each dataloader\n for data_dp in data_list_dp:\n # Get data\n inputs, _ = self.get_inputs_targets(data_dp)\n features.append(self.model.get_features_from_encoder(inputs))\n data_lengths.append(inputs.shape[0])\n\n optimizer_domain_predictor.zero_grad()\n features = torch.cat(features, dim=0)\n domain_pred = self.model.get_domain_prediction_from_features(features.detach())\n\n domain_targets = self._create_domain_targets(data_lengths)\n\n loss_dm = loss_f_domain_pred(domain_pred, domain_targets)\n loss_dm.backward(retain_graph=False)\n optimizer_domain_predictor.step()\n\n # Encoder Optimization step based on domain prediction loss\n features = []\n for data_dp in data_list_dp:\n # Get data\n inputs, _ = self.get_inputs_targets(data_dp)\n feature = self.model.get_features_from_encoder(inputs)\n features.append(feature)\n features = torch.cat(features, dim=0)\n\n optimizer_encoder.zero_grad()\n domain_pred = self.model.get_domain_prediction_from_features(features)\n loss_encoder = beta * loss_f_encoder(domain_pred, domain_targets)\n loss_encoder.backward(retain_graph=False)\n optimizer_encoder.step()\n\n if print_run_loss:\n print('\\nRunning loss: {}'.format(acc.mean('loss')))\n",
"from mp.data.pytorch.pytorch_dataset import PytorchDataset\nfrom mp.data.datasets.dataset import Instance\nimport copy\nimport torch\n\n\nclass DomainPredictionDatasetWrapper(PytorchDataset):\n r\"\"\"Wraps a PytorchDataset to reuse its instances.x and replacing the labels\"\"\"\n\n def __init__(self, pytorch_ds, target_idx):\n \"\"\"\n Args:\n pytorch_ds (PytorchSegmentationDataset): the Dataset that need to be wrapped\n target_idx (int): the target idx for domain prediction, corresponding to this dataset\n \"\"\"\n\n class Dummy:\n def __init__(self):\n self.instances = pytorch_ds.instances\n self.hold_out_ixs = []\n\n self.original_ds = pytorch_ds\n\n # Ugly\n # noinspection PyTypeChecker\n super().__init__(dataset=Dummy(), size=pytorch_ds.size)\n # Copy the predictor, but prevent it from reshaping the prediction\n self.predictor = copy.copy(pytorch_ds.predictor)\n self.predictor.reshape_pred = False\n\n # Create new target as one hot encoded\n # self.target = torch.zeros((1, target_cnt), dtype=self.instances[0].y.tensor.dtype)\n # self.target[:, target_idx] = 1\n self.target = torch.tensor([target_idx], dtype=self.instances[0].y.tensor.dtype)\n\n # Modify instances\n self.instances = [Instance(inst.x, self.target, inst.name, inst.class_ix, inst.group_id)\n for inst in self.instances]\n\n def get_subject_dataloader(self, subject_ix):\n r\"\"\"Get a list of input/target pairs equivalent to those if the dataset\n was only of subject with index subject_ix. For evaluation purposes.\n \"\"\"\n # Generate the original subject dataloader and replace the target\n subject_dataloader = self.original_ds.get_subject_dataloader(subject_ix)\n return [(x, self.target) for x, _ in subject_dataloader]\n"
] | [
[
"torch.cat"
],
[
"torch.tensor"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Jean1995/Masterarbeit | [
"d7d6c4a2031f715fa788cc8d498f339d2df675ee"
] | [
"shower/plot_shower.py"
] | [
"import numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nimport sys\nimport matplotlib\nimport matplotlib.cm as cm\nfrom matplotlib.lines import Line2D\n\nfrom matplotlibconfig import *\n\nx_i_list, y_i_list, z_i_list, x_f_list, y_f_list, z_f_list, ID_list, energy_i_list = np.genfromtxt(sys.argv[1], unpack=True)\n\nconversion_y = 100 # cm in m\nconversion_z = 100000 # cm in km\n\nx_i_list /= conversion_y\ny_i_list /= conversion_y\nz_i_list /= conversion_z\nx_f_list /= conversion_y\ny_f_list /= conversion_y\nz_f_list /= conversion_z\n\nvec_i = np.array([x_i_list, y_i_list, z_i_list]).T\nvec_f = np.array([x_f_list, y_f_list, z_f_list]).T\n\n## choose projection plane here\nprojection_vec_1 = np.array([1,0,0])\nprojection_vec_2 = np.array([0,0,1])\n\nproj_1_i_list = np.matmul(vec_i, projection_vec_1)\nproj_2_i_list = np.matmul(vec_i, projection_vec_2)\n\nproj_1_f_list = np.matmul(vec_f, projection_vec_1)\nproj_2_f_list = np.matmul(vec_f, projection_vec_2)\n\n\ncount_electron = 0\ncount_positron = 0\ncount_photon = 0\n\nplt.figure(figsize=(2.7, 5))\nplt.rcParams.update(params)\n\nfor proj_1_i, proj_2_i, proj_1_f, proj_2_f, ID, energy_i in zip(proj_1_i_list, proj_2_i_list, proj_1_f_list, proj_2_f_list, ID_list, energy_i_list):\n\n\tif(energy_i < 50):\n\t\tcontinue\n\n\tif(ID == 0 or ID == 3):\n\t\tplt.plot([proj_1_i,proj_1_f], [proj_2_i,proj_2_f], 'b-', linewidth=0.3, alpha = 1)\n\t\tcount_photon+=1\n\telif(ID == 1):\n\t\tplt.plot([proj_1_i,proj_1_f], [proj_2_i,proj_2_f], 'g-', linewidth=0.3, alpha = 1)\n\t\tcount_electron+=1\n\telif(ID == 2):\n\t\tplt.plot([proj_1_i,proj_1_f], [proj_2_i,proj_2_f], 'r-', linewidth=0.3, alpha = 1)\n\t\tcount_positron+=1\n\telse:\n\t\tprint(\"Unknown particle_id\")\n\n\n## custom legend\n\nprint(\"Showing \" + str(count_photon) + \" Photons, \" + str(count_electron) + \" electrons and \" + str(count_positron) + \" positrons\")\n\ncustom_lines = [Line2D([0], [0], color='b', lw=2),\n Line2D([0], [0], color='g', lw=2),\n Line2D([0], [0], color='r', lw=2)]\n\nplt.legend(custom_lines, ['Photon', 'Electron', 'Positron'], loc='best')\n\nplt.grid(True)\nplt.xlabel(r'$y \\,/\\, \\si{\\metre}$')\nplt.ylabel(r'$z \\,/\\, \\si{\\kilo\\metre}$', labelpad=0)\nplt.xlim(-10000/conversion_y, 10000/conversion_y)\nplt.ylim(200000/conversion_z, 1000000/conversion_z)\nplt.tight_layout()\n\nif(len(sys.argv)==2):\n\tplt.show()\nelse:\n\tplt.savefig(sys.argv[2], bbox_inches='tight', dpi=500)\n\n"
] | [
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.ylim",
"matplotlib.lines.Line2D",
"numpy.matmul",
"matplotlib.pyplot.savefig",
"numpy.genfromtxt",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.rcParams.update",
"numpy.array",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ZhekehZ/catboost | [
"de75c6af12cf490700e76c22072fbdc15b35d679",
"3f774da539b8e57cca25686b89c473cbd1f61a6c"
] | [
"contrib/python/scipy/scipy/sparse/csr.py",
"catboost/pytest/cuda_tests/test_gpu.py"
] | [
"\"\"\"Compressed Sparse Row matrix format\"\"\"\n\nfrom __future__ import division, print_function, absolute_import\n\n__docformat__ = \"restructuredtext en\"\n\n__all__ = ['csr_matrix', 'isspmatrix_csr']\n\n\nimport numpy as np\nfrom scipy._lib.six import xrange\n\nfrom .base import spmatrix\n\nfrom ._sparsetools import csr_tocsc, csr_tobsr, csr_count_blocks, \\\n get_csr_submatrix, csr_sample_values\nfrom .sputils import (upcast, isintlike, IndexMixin, issequence,\n get_index_dtype, ismatrix)\n\nfrom .compressed import _cs_matrix\n\n\nclass csr_matrix(_cs_matrix, IndexMixin):\n \"\"\"\n Compressed Sparse Row matrix\n\n This can be instantiated in several ways:\n csr_matrix(D)\n with a dense matrix or rank-2 ndarray D\n\n csr_matrix(S)\n with another sparse matrix S (equivalent to S.tocsr())\n\n csr_matrix((M, N), [dtype])\n to construct an empty matrix with shape (M, N)\n dtype is optional, defaulting to dtype='d'.\n\n csr_matrix((data, (row_ind, col_ind)), [shape=(M, N)])\n where ``data``, ``row_ind`` and ``col_ind`` satisfy the\n relationship ``a[row_ind[k], col_ind[k]] = data[k]``.\n\n csr_matrix((data, indices, indptr), [shape=(M, N)])\n is the standard CSR representation where the column indices for\n row i are stored in ``indices[indptr[i]:indptr[i+1]]`` and their\n corresponding values are stored in ``data[indptr[i]:indptr[i+1]]``.\n If the shape parameter is not supplied, the matrix dimensions\n are inferred from the index arrays.\n\n Attributes\n ----------\n dtype : dtype\n Data type of the matrix\n shape : 2-tuple\n Shape of the matrix\n ndim : int\n Number of dimensions (this is always 2)\n nnz\n Number of nonzero elements\n data\n CSR format data array of the matrix\n indices\n CSR format index array of the matrix\n indptr\n CSR format index pointer array of the matrix\n has_sorted_indices\n Whether indices are sorted\n\n Notes\n -----\n\n Sparse matrices can be used in arithmetic operations: they support\n addition, subtraction, multiplication, division, and matrix power.\n\n Advantages of the CSR format\n - efficient arithmetic operations CSR + CSR, CSR * CSR, etc.\n - efficient row slicing\n - fast matrix vector products\n\n Disadvantages of the CSR format\n - slow column slicing operations (consider CSC)\n - changes to the sparsity structure are expensive (consider LIL or DOK)\n\n Examples\n --------\n\n >>> import numpy as np\n >>> from scipy.sparse import csr_matrix\n >>> csr_matrix((3, 4), dtype=np.int8).toarray()\n array([[0, 0, 0, 0],\n [0, 0, 0, 0],\n [0, 0, 0, 0]], dtype=int8)\n\n >>> row = np.array([0, 0, 1, 2, 2, 2])\n >>> col = np.array([0, 2, 2, 0, 1, 2])\n >>> data = np.array([1, 2, 3, 4, 5, 6])\n >>> csr_matrix((data, (row, col)), shape=(3, 3)).toarray()\n array([[1, 0, 2],\n [0, 0, 3],\n [4, 5, 6]])\n\n >>> indptr = np.array([0, 2, 3, 6])\n >>> indices = np.array([0, 2, 2, 0, 1, 2])\n >>> data = np.array([1, 2, 3, 4, 5, 6])\n >>> csr_matrix((data, indices, indptr), shape=(3, 3)).toarray()\n array([[1, 0, 2],\n [0, 0, 3],\n [4, 5, 6]])\n\n As an example of how to construct a CSR matrix incrementally,\n the following snippet builds a term-document matrix from texts:\n\n >>> docs = [[\"hello\", \"world\", \"hello\"], [\"goodbye\", \"cruel\", \"world\"]]\n >>> indptr = [0]\n >>> indices = []\n >>> data = []\n >>> vocabulary = {}\n >>> for d in docs:\n ... for term in d:\n ... index = vocabulary.setdefault(term, len(vocabulary))\n ... indices.append(index)\n ... data.append(1)\n ... indptr.append(len(indices))\n ...\n >>> csr_matrix((data, indices, indptr), dtype=int).toarray()\n array([[2, 1, 0, 0],\n [0, 1, 1, 1]])\n\n \"\"\"\n format = 'csr'\n\n def transpose(self, axes=None, copy=False):\n if axes is not None:\n raise ValueError((\"Sparse matrices do not support \"\n \"an 'axes' parameter because swapping \"\n \"dimensions is the only logical permutation.\"))\n\n M, N = self.shape\n\n from .csc import csc_matrix\n return csc_matrix((self.data, self.indices,\n self.indptr), shape=(N, M), copy=copy)\n\n transpose.__doc__ = spmatrix.transpose.__doc__\n\n def tolil(self, copy=False):\n from .lil import lil_matrix\n lil = lil_matrix(self.shape,dtype=self.dtype)\n\n self.sum_duplicates()\n ptr,ind,dat = self.indptr,self.indices,self.data\n rows, data = lil.rows, lil.data\n\n for n in xrange(self.shape[0]):\n start = ptr[n]\n end = ptr[n+1]\n rows[n] = ind[start:end].tolist()\n data[n] = dat[start:end].tolist()\n\n return lil\n\n tolil.__doc__ = spmatrix.tolil.__doc__\n\n def tocsr(self, copy=False):\n if copy:\n return self.copy()\n else:\n return self\n\n tocsr.__doc__ = spmatrix.tocsr.__doc__\n\n def tocsc(self, copy=False):\n idx_dtype = get_index_dtype((self.indptr, self.indices),\n maxval=max(self.nnz, self.shape[0]))\n indptr = np.empty(self.shape[1] + 1, dtype=idx_dtype)\n indices = np.empty(self.nnz, dtype=idx_dtype)\n data = np.empty(self.nnz, dtype=upcast(self.dtype))\n\n csr_tocsc(self.shape[0], self.shape[1],\n self.indptr.astype(idx_dtype),\n self.indices.astype(idx_dtype),\n self.data,\n indptr,\n indices,\n data)\n\n from .csc import csc_matrix\n A = csc_matrix((data, indices, indptr), shape=self.shape)\n A.has_sorted_indices = True\n return A\n\n tocsr.__doc__ = spmatrix.tocsr.__doc__\n\n def tobsr(self, blocksize=None, copy=True):\n from .bsr import bsr_matrix\n\n if blocksize is None:\n from .spfuncs import estimate_blocksize\n return self.tobsr(blocksize=estimate_blocksize(self))\n\n elif blocksize == (1,1):\n arg1 = (self.data.reshape(-1,1,1),self.indices,self.indptr)\n return bsr_matrix(arg1, shape=self.shape, copy=copy)\n\n else:\n R,C = blocksize\n M,N = self.shape\n\n if R < 1 or C < 1 or M % R != 0 or N % C != 0:\n raise ValueError('invalid blocksize %s' % blocksize)\n\n blks = csr_count_blocks(M,N,R,C,self.indptr,self.indices)\n\n idx_dtype = get_index_dtype((self.indptr, self.indices),\n maxval=max(N//C, blks))\n indptr = np.empty(M//R+1, dtype=idx_dtype)\n indices = np.empty(blks, dtype=idx_dtype)\n data = np.zeros((blks,R,C), dtype=self.dtype)\n\n csr_tobsr(M, N, R, C,\n self.indptr.astype(idx_dtype),\n self.indices.astype(idx_dtype),\n self.data,\n indptr, indices, data.ravel())\n\n return bsr_matrix((data,indices,indptr), shape=self.shape)\n\n tobsr.__doc__ = spmatrix.tobsr.__doc__\n\n # these functions are used by the parent class (_cs_matrix)\n # to remove redudancy between csc_matrix and csr_matrix\n def _swap(self,x):\n \"\"\"swap the members of x if this is a column-oriented matrix\n \"\"\"\n return (x[0],x[1])\n\n def __getitem__(self, key):\n def asindices(x):\n try:\n x = np.asarray(x)\n\n # Check index contents, to avoid creating 64-bit arrays needlessly\n idx_dtype = get_index_dtype((x,), check_contents=True)\n if idx_dtype != x.dtype:\n x = x.astype(idx_dtype)\n except:\n raise IndexError('invalid index')\n else:\n return x\n\n def check_bounds(indices, N):\n if indices.size == 0:\n return (0, 0)\n\n max_indx = indices.max()\n if max_indx >= N:\n raise IndexError('index (%d) out of range' % max_indx)\n\n min_indx = indices.min()\n if min_indx < -N:\n raise IndexError('index (%d) out of range' % (N + min_indx))\n\n return (min_indx,max_indx)\n\n def extractor(indices,N):\n \"\"\"Return a sparse matrix P so that P*self implements\n slicing of the form self[[1,2,3],:]\n \"\"\"\n indices = asindices(indices)\n\n (min_indx,max_indx) = check_bounds(indices,N)\n\n if min_indx < 0:\n indices = indices.copy()\n indices[indices < 0] += N\n\n indptr = np.arange(len(indices)+1, dtype=indices.dtype)\n data = np.ones(len(indices), dtype=self.dtype)\n shape = (len(indices),N)\n\n return csr_matrix((data,indices,indptr), shape=shape)\n\n row, col = self._unpack_index(key)\n\n # First attempt to use original row optimized methods\n # [1, ?]\n if isintlike(row):\n # [i, j]\n if isintlike(col):\n return self._get_single_element(row, col)\n # [i, 1:2]\n elif isinstance(col, slice):\n return self._get_row_slice(row, col)\n # [i, [1, 2]]\n elif issequence(col):\n P = extractor(col,self.shape[1]).T\n return self[row, :] * P\n elif isinstance(row, slice):\n # [1:2,??]\n if ((isintlike(col) and row.step in (1, None)) or\n (isinstance(col, slice) and\n col.step in (1, None) and\n row.step in (1, None))):\n # col is int or slice with step 1, row is slice with step 1.\n return self._get_submatrix(row, col)\n elif issequence(col):\n # row is slice, col is sequence.\n P = extractor(col,self.shape[1]).T # [1:2,[1,2]]\n sliced = self\n if row != slice(None, None, None):\n sliced = sliced[row,:]\n return sliced * P\n\n elif issequence(row):\n # [[1,2],??]\n if isintlike(col) or isinstance(col,slice):\n P = extractor(row, self.shape[0]) # [[1,2],j] or [[1,2],1:2]\n extracted = P * self\n if col == slice(None, None, None):\n return extracted\n else:\n return extracted[:,col]\n\n elif ismatrix(row) and issequence(col):\n if len(row[0]) == 1 and isintlike(row[0][0]):\n # [[[1],[2]], [1,2]], outer indexing\n row = asindices(row)\n P_row = extractor(row[:,0], self.shape[0])\n P_col = extractor(col, self.shape[1]).T\n return P_row * self * P_col\n\n if not (issequence(col) and issequence(row)):\n # Sample elementwise\n row, col = self._index_to_arrays(row, col)\n\n row = asindices(row)\n col = asindices(col)\n if row.shape != col.shape:\n raise IndexError('number of row and column indices differ')\n assert row.ndim <= 2\n\n num_samples = np.size(row)\n if num_samples == 0:\n return csr_matrix(np.atleast_2d(row).shape, dtype=self.dtype)\n check_bounds(row, self.shape[0])\n check_bounds(col, self.shape[1])\n\n val = np.empty(num_samples, dtype=self.dtype)\n csr_sample_values(self.shape[0], self.shape[1],\n self.indptr, self.indices, self.data,\n num_samples, row.ravel(), col.ravel(), val)\n if row.ndim == 1:\n # row and col are 1d\n return np.asmatrix(val)\n return self.__class__(val.reshape(row.shape))\n\n def getrow(self, i):\n \"\"\"Returns a copy of row i of the matrix, as a (1 x n)\n CSR matrix (row vector).\n \"\"\"\n return self._get_submatrix(i, slice(None))\n\n def getcol(self, i):\n \"\"\"Returns a copy of column i of the matrix, as a (m x 1)\n CSR matrix (column vector).\n \"\"\"\n return self._get_submatrix(slice(None), i)\n\n def _get_row_slice(self, i, cslice):\n \"\"\"Returns a copy of row self[i, cslice]\n \"\"\"\n if i < 0:\n i += self.shape[0]\n\n if i < 0 or i >= self.shape[0]:\n raise IndexError('index (%d) out of range' % i)\n\n start, stop, stride = cslice.indices(self.shape[1])\n\n if stride == 1:\n # for stride == 1, _get_submatrix is ~30% faster than below\n row_slice = self._get_submatrix(i, cslice)\n\n else:\n # other strides need new code\n row_indices = self.indices[self.indptr[i]:self.indptr[i + 1]]\n row_data = self.data[self.indptr[i]:self.indptr[i + 1]]\n\n if stride > 0:\n ind = (row_indices >= start) & (row_indices < stop)\n elif stride < 0:\n ind = (row_indices <= start) & (row_indices > stop)\n\n if abs(stride) > 1:\n ind = ind & ((row_indices - start) % stride == 0)\n\n row_indices = (row_indices[ind] - start) // stride\n row_data = row_data[ind]\n row_indptr = np.array([0, len(row_indices)])\n\n if stride < 0:\n row_data = row_data[::-1]\n row_indices = abs(row_indices[::-1])\n\n shape = (1, int(np.ceil(float(stop - start) / stride)))\n\n row_slice = csr_matrix((row_data, row_indices, row_indptr),\n shape=shape)\n\n return row_slice\n\n def _get_submatrix(self, row_slice, col_slice):\n \"\"\"Return a submatrix of this matrix (new matrix is created).\"\"\"\n\n M,N = self.shape\n\n def process_slice(sl, num):\n if isinstance(sl, slice):\n if sl.step not in (1, None):\n raise ValueError('slicing with step != 1 not supported')\n i0, i1 = sl.start, sl.stop\n if i0 is None:\n i0 = 0\n elif i0 < 0:\n i0 = num + i0\n\n if i1 is None:\n i1 = num\n elif i1 < 0:\n i1 = num + i1\n return i0, i1\n\n elif isintlike(sl):\n if sl < 0:\n sl += num\n return sl, sl + 1\n else:\n raise TypeError('expected slice or scalar')\n\n def check_bounds(i0, i1, num):\n if not (0 <= i0 <= num) or not (0 <= i1 <= num) or not (i0 <= i1):\n raise IndexError(\n \"index out of bounds: 0 <= %d <= %d, 0 <= %d <= %d,\"\n \" %d <= %d\" % (i0, num, i1, num, i0, i1))\n\n i0, i1 = process_slice(row_slice, M)\n j0, j1 = process_slice(col_slice, N)\n check_bounds(i0, i1, M)\n check_bounds(j0, j1, N)\n\n indptr, indices, data = get_csr_submatrix(M, N,\n self.indptr, self.indices, self.data,\n int(i0), int(i1), int(j0), int(j1))\n\n shape = (i1 - i0, j1 - j0)\n\n return self.__class__((data,indices,indptr), shape=shape)\n\ndef isspmatrix_csr(x):\n return isinstance(x, csr_matrix)\n",
"import catboost\nimport filecmp\nimport json\nimport numpy as np\nimport os\nimport pytest\nimport re\nimport yatest.common\n\nfrom catboost_pytest_lib import (\n append_params_to_cmdline,\n apply_catboost,\n compare_evals_with_precision,\n compare_fit_evals_with_precision,\n compare_metrics_with_diff,\n data_file,\n execute_catboost_fit,\n format_crossvalidation,\n get_limited_precision_dsv_diff_tool,\n local_canonical_file,\n)\n\nCATBOOST_PATH = yatest.common.binary_path(\"catboost/app/catboost\")\nBOOSTING_TYPE = ['Ordered', 'Plain']\nMULTICLASS_LOSSES = ['MultiClass', 'MultiClassOneVsAll']\nNONSYMMETRIC = ['Lossguide', 'Depthwise']\nGROW_POLICIES = ['SymmetricTree'] + NONSYMMETRIC\nSCORE_FUNCTIONS = [\n 'L2', 'Cosine',\n 'NewtonL2', 'NewtonCosine',\n 'SolarL2', 'LOOL2'\n]\n\nTEXT_FEATURE_ESTIMATORS = [\n 'BoW',\n 'NaiveBayes',\n 'BM25',\n 'BoW,NaiveBayes',\n 'BoW,NaiveBayes,BM25'\n]\n\n\ndef generate_concatenated_random_labeled_dataset(nrows, nvals, labels, seed=20181219, prng=None):\n if prng is None:\n prng = np.random.RandomState(seed=seed)\n label = prng.choice(labels, [nrows, 1])\n feature = prng.random_sample([nrows, nvals])\n return np.concatenate([label, feature], axis=1)\n\n\ndef diff_tool(threshold=2e-7):\n return get_limited_precision_dsv_diff_tool(threshold, True)\n\n\[email protected](scope='module', autouse=True)\ndef skipif_no_cuda():\n for flag in pytest.config.option.flags:\n if re.match('HAVE_CUDA=(0|no|false)', flag, flags=re.IGNORECASE):\n return pytest.mark.skipif(True, reason=flag)\n\n return pytest.mark.skipif(False, reason='None')\n\n\npytestmark = skipif_no_cuda()\n\n\ndef fit_catboost_gpu(params, devices='0'):\n execute_catboost_fit(\n task_type='GPU',\n params=params,\n devices=devices,\n )\n\n\n# currently only works on CPU\ndef fstr_catboost_cpu(params):\n cmd = list()\n cmd.append(CATBOOST_PATH)\n cmd.append('fstr')\n append_params_to_cmdline(cmd, params)\n yatest.common.execute(cmd)\n\n\ndef test_eval_metric_equals_loss_metric():\n output_model_path = 'model.bin'\n train_dir_path = 'trainDir'\n params = (\n '--use-best-model', 'false',\n '--loss-function', 'RMSE',\n '--eval-metric', 'RMSE',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '-i', '10',\n '-T', '4',\n '-m', output_model_path,\n '--train-dir', train_dir_path,\n )\n fit_catboost_gpu(params)\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected]('qwise_loss', ['QueryRMSE', 'RMSE'])\ndef test_queryrmse(boosting_type, qwise_loss):\n output_model_path = yatest.common.test_output_path('model.bin')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n predictions_path_learn = yatest.common.test_output_path('predictions_learn.tsv')\n predictions_path_test = yatest.common.test_output_path('predictions_test.tsv')\n\n learn_file = data_file('querywise', 'train')\n cd_file = data_file('querywise', 'train.cd')\n test_file = data_file('querywise', 'test')\n params = {\"--loss-function\": qwise_loss,\n \"-f\": learn_file,\n \"-t\": test_file,\n '--column-description': cd_file,\n '--boosting-type': boosting_type,\n '-i': '100',\n '-T': '4',\n '-m': output_model_path,\n '--learn-err-log': learn_error_path,\n '--test-err-log': test_error_path,\n '--use-best-model': 'false'\n }\n\n fit_catboost_gpu(params)\n apply_catboost(output_model_path, learn_file, cd_file, predictions_path_learn)\n apply_catboost(output_model_path, test_file, cd_file, predictions_path_test)\n\n return [local_canonical_file(learn_error_path, diff_tool=diff_tool()),\n local_canonical_file(test_error_path, diff_tool=diff_tool()),\n local_canonical_file(predictions_path_learn, diff_tool=diff_tool()),\n local_canonical_file(predictions_path_test, diff_tool=diff_tool()),\n ]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_boosting_type(boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n train_file = data_file('adult', 'train_small')\n test_file = data_file('adult', 'test_small')\n cd_file = data_file('adult', 'train.cd')\n\n params = {\n '--use-best-model': 'false',\n '--loss-function': 'Logloss',\n '-f': train_file,\n '-t': test_file,\n '--column-description': cd_file,\n '--boosting-type': boosting_type,\n '-i': '10',\n '-w': '0.03',\n '-T': '4',\n '-m': output_model_path,\n }\n\n fit_catboost_gpu(params)\n apply_catboost(output_model_path, test_file, cd_file, output_eval_path)\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_rsm_with_default_value(boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n\n params = {\n '--use-best-model': 'false',\n '--loss-function': 'Logloss',\n '-f': data_file('adult', 'train_small'),\n '-t': data_file('adult', 'test_small'),\n '--column-description': data_file('adult', 'train.cd'),\n '--boosting-type': boosting_type,\n '-i': '10',\n '-w': '0.03',\n '-T': '4',\n '--rsm': 1,\n '-m': output_model_path,\n }\n fit_catboost_gpu(params)\n\n\[email protected](reason='Need fixing')\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_rsm_with_pairwise(boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n\n params = {\n '--use-best-model': 'false',\n '--loss-function': 'PairLogitPairwise',\n '-f': data_file('querywise', 'train'),\n '--learn-pairs': data_file('querywise', 'train.pairs'),\n '--column-description': data_file('querywise', 'train.cd'),\n '--boosting-type': boosting_type,\n '-i': '10',\n '-w': '0.03',\n '-T': '4',\n '--rsm': 0.5,\n '-m': output_model_path,\n }\n fit_catboost_gpu(params)\n\n\ndef combine_dicts(first, *vargs):\n combined = first.copy()\n for rest in vargs:\n combined.update(rest)\n return combined\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_bootstrap(boosting_type):\n bootstrap_option = {\n 'no': {'--bootstrap-type': 'No'},\n 'bayes': {'--bootstrap-type': 'Bayesian', '--bagging-temperature': '0.0'},\n 'bernoulli': {'--bootstrap-type': 'Bernoulli', '--subsample': '1.0'}\n }\n\n test_file = data_file('adult', 'test_small')\n cd_file = data_file('adult', 'train.cd')\n\n params = {\n '--use-best-model': 'false',\n '--loss-function': 'Logloss',\n '-f': data_file('adult', 'train_small'),\n '-t': test_file,\n '--column-description': cd_file,\n '--boosting-type': boosting_type,\n '-i': '10',\n '-w': '0.03',\n '-T': '4',\n }\n\n for bootstrap in bootstrap_option:\n model_path = yatest.common.test_output_path('model_' + bootstrap + '.bin')\n eval_path = yatest.common.test_output_path('test_' + bootstrap + '.eval')\n model_option = {'-m': model_path}\n\n run_params = combine_dicts(params,\n bootstrap_option[bootstrap],\n model_option)\n\n fit_catboost_gpu(run_params)\n apply_catboost(model_path, test_file, cd_file, eval_path)\n\n ref_eval_path = yatest.common.test_output_path('test_no.eval')\n assert (filecmp.cmp(ref_eval_path, yatest.common.test_output_path('test_bayes.eval')))\n assert (filecmp.cmp(ref_eval_path, yatest.common.test_output_path('test_bernoulli.eval')))\n\n return [local_canonical_file(ref_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_nan_mode_forbidden(boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n test_file = data_file('adult', 'test_small')\n learn_file = data_file('adult', 'train_small')\n cd_file = data_file('adult', 'train.cd')\n params = {\n '-f': learn_file,\n '-t': test_file,\n '--column-description': cd_file,\n '--boosting-type': boosting_type,\n '-i': '20',\n '-T': '4',\n '-m': output_model_path,\n '--nan-mode': 'Forbidden',\n '--use-best-model': 'false',\n }\n fit_catboost_gpu(params)\n apply_catboost(output_model_path, test_file, cd_file, output_eval_path)\n\n return [local_canonical_file(output_eval_path, diff_tool=diff_tool())]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_overfit_detector_iter(boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cd_file = data_file('adult', 'train.cd')\n test_file = data_file('adult', 'test_small')\n params = {\n '--use-best-model': 'false',\n '--loss-function': 'Logloss',\n '-f': data_file('adult', 'train_small'),\n '-t': test_file,\n '--column-description': cd_file,\n '--boosting-type': boosting_type,\n '-i': '2000',\n '-T': '4',\n '-m': output_model_path,\n '-x': '1',\n '-n': '8',\n '-w': '0.5',\n '--od-type': 'Iter',\n '--od-wait': '2',\n }\n\n fit_catboost_gpu(params)\n apply_catboost(output_model_path, test_file, cd_file, output_eval_path)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_overfit_detector_inc_to_dec(boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cd_file = data_file('adult', 'train.cd')\n test_file = data_file('adult', 'test_small')\n params = {\n '--use-best-model': 'false',\n '--loss-function': 'Logloss',\n '-f': data_file('adult', 'train_small'),\n '-t': test_file,\n '--column-description': cd_file,\n '--boosting-type': boosting_type,\n '-i': '2000',\n '-T': '4',\n '-m': output_model_path,\n '-x': '1',\n '-n': '8',\n '-w': '0.5',\n '--od-pval': '0.5',\n '--od-type': 'IncToDec',\n '--od-wait': '2',\n }\n\n fit_catboost_gpu(params)\n apply_catboost(output_model_path, test_file, cd_file, output_eval_path)\n\n return [local_canonical_file(output_eval_path)]\n\n\nNAN_MODE = ['Min', 'Max']\n\n\[email protected]('nan_mode', NAN_MODE)\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_nan_mode(nan_mode, boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n test_file = data_file('adult_nan', 'test_small')\n cd_file = data_file('adult_nan', 'train.cd')\n\n params = {\n '--use-best-model': 'false',\n '-f': data_file('adult_nan', 'train_small'),\n '-t': test_file,\n '--column-description': cd_file,\n '--boosting-type': boosting_type,\n '-i': '20',\n '-T': '4',\n '-m': output_model_path,\n '--nan-mode': nan_mode\n }\n\n fit_catboost_gpu(params)\n apply_catboost(output_model_path, test_file, cd_file, output_eval_path)\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_use_best_model(boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cd_file = data_file('adult', 'train.cd')\n test_file = data_file('adult', 'test_small')\n params = {\n '--loss-function': 'Logloss',\n '-f': data_file('adult', 'train_small'),\n '-t': test_file,\n '--column-description': cd_file,\n '--boosting-type': boosting_type,\n '-i': '100',\n '-T': '4',\n '-m': output_model_path,\n '-x': '1',\n '-n': '8',\n '-w': '1',\n '--od-pval': '0.99',\n '--use-best-model': 'true'\n }\n\n fit_catboost_gpu(params)\n apply_catboost(output_model_path, test_file, cd_file, output_eval_path)\n\n return [local_canonical_file(output_eval_path)]\n\n\nLOSS_FUNCTIONS = ['RMSE', 'Logloss', 'MAE', 'CrossEntropy', 'Quantile', 'LogLinQuantile', 'Poisson', 'MAPE']\nLEAF_ESTIMATION_METHOD = ['Gradient', 'Newton']\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_crossentropy(boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cd_file = data_file('adult_crossentropy', 'train.cd')\n test_file = data_file('adult_crossentropy', 'test_proba')\n params = {\n '--loss-function': 'CrossEntropy',\n '-f': data_file('adult_crossentropy', 'train_proba'),\n '-t': test_file,\n '--column-description': cd_file,\n '--boosting-type': boosting_type,\n '-i': '10',\n '-w': '0.03',\n '-T': '4',\n '-m': output_model_path,\n }\n\n fit_catboost_gpu(params)\n apply_catboost(output_model_path, test_file, cd_file, output_eval_path)\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_permutation_block(boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cd_file = data_file('adult_crossentropy', 'train.cd')\n test_file = data_file('adult_crossentropy', 'test_proba')\n params = {\n '--loss-function': 'CrossEntropy',\n '-f': data_file('adult_crossentropy', 'train_proba'),\n '-t': test_file,\n '--column-description': cd_file,\n '--boosting-type': boosting_type,\n '-i': '10',\n '-w': '0.03',\n '-T': '4',\n '--fold-permutation-block': '8',\n '-m': output_model_path,\n }\n\n fit_catboost_gpu(params)\n apply_catboost(output_model_path, test_file, cd_file, output_eval_path)\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_ignored_features(boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n test_file = data_file('adult', 'test_small')\n cd_file = data_file('adult', 'train.cd')\n params = {\n '--loss-function': 'Logloss',\n '-f': data_file('adult', 'train_small'),\n '-t': test_file,\n '--column-description': cd_file,\n '--boosting-type': boosting_type,\n '-i': '10',\n '-w': '0.03',\n '-T': '4',\n '-m': output_model_path,\n '-I': '0:1:3:5-7:10000',\n '--use-best-model': 'false',\n }\n\n fit_catboost_gpu(params)\n apply_catboost(output_model_path, test_file, cd_file, output_eval_path)\n return [local_canonical_file(output_eval_path, diff_tool=diff_tool())]\n\n\ndef test_ignored_features_not_read():\n output_model_path = yatest.common.test_output_path('model.bin')\n input_cd_path = data_file('adult', 'train.cd')\n cd_file = yatest.common.test_output_path('train.cd')\n\n with open(input_cd_path, \"rt\") as f:\n cd_lines = f.readlines()\n with open(cd_file, \"wt\") as f:\n for cd_line in cd_lines:\n # Corrupt some features by making them 'Num'\n if cd_line.split() == ('5', 'Categ'): # column 5 --> feature 4\n cd_line = cd_line.replace('Categ', 'Num')\n if cd_line.split() == ('7', 'Categ'): # column 7 --> feature 6\n cd_line = cd_line.replace('Categ', 'Num')\n f.write(cd_line)\n\n test_file = data_file('adult', 'test_small')\n params = {\n '--loss-function': 'Logloss',\n '-f': data_file('adult', 'train_small'),\n '-t': test_file,\n '--column-description': cd_file,\n '--boosting-type': 'Plain',\n '-i': '10',\n '-T': '4',\n '-m': output_model_path,\n '-I': '4:6',\n '--use-best-model': 'false',\n }\n\n fit_catboost_gpu(params)\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_baseline(boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cd_file = data_file('train_adult_baseline.cd')\n test_file = data_file('adult_weight', 'test_weight')\n params = {\n '--loss-function': 'Logloss',\n '-f': data_file('adult_weight', 'train_weight'),\n '-t': test_file,\n '--column-description': cd_file,\n '--boosting-type': boosting_type,\n '-i': '10',\n '-w': '0.03',\n '-T': '4',\n '-m': output_model_path,\n '--use-best-model': 'false',\n }\n fit_catboost_gpu(params)\n apply_catboost(output_model_path, test_file, cd_file, output_eval_path)\n\n return [local_canonical_file(output_eval_path, diff_tool=diff_tool())]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected]('loss_function', ['RMSE', 'Logloss', 'CrossEntropy'])\ndef test_boost_from_average(boosting_type, loss_function):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_calc_eval_path = yatest.common.test_output_path('test_calc.eval')\n output_eval_path_with_avg = yatest.common.test_output_path('test_avg.eval')\n output_eval_path_with_baseline = yatest.common.test_output_path('test_baseline.eval')\n baselined_train = yatest.common.test_output_path('baselined_train')\n baselined_test = yatest.common.test_output_path('baselined_test')\n baselined_cd = yatest.common.test_output_path('baselined.cd')\n\n train_path = data_file('adult', 'train_small')\n test_path = data_file('adult', 'test_small')\n original_cd = data_file('adult', 'train.cd')\n\n # use float32 beacause we use float in C++\n sum_target = np.float32(0)\n obj_count = np.float32(0)\n with open(train_path) as train_f:\n for line in train_f:\n obj_count += 1\n sum_target += np.float32(line.split()[1])\n\n mean_target = sum_target / obj_count\n if loss_function in ['Logloss', 'CrossEntropy']:\n mean_target = -np.log(1 / mean_target - 1)\n mean_target_str = str(mean_target)\n\n def append_baseline_to_pool(source, target):\n with open(source) as source_f, open(target, 'w') as target_f:\n for line in source_f:\n target_f.write(line.rstrip('\\n') + '\\t' + mean_target_str + '\\n')\n\n append_baseline_to_pool(train_path, baselined_train)\n append_baseline_to_pool(test_path, baselined_test)\n\n with open(baselined_cd, 'w') as cd_output, open(original_cd) as cd_input:\n for line in cd_input:\n cd_output.write(line)\n cd_output.write('18\\tBaseline\\n')\n\n baseline_boost_params = {\n '--loss-function': loss_function,\n '--boosting-type': boosting_type,\n '-i': '30',\n '-w': '0.03',\n '-T': '4',\n '-m': output_model_path,\n '-f': baselined_train,\n '-t': baselined_test,\n '--boost-from-average': '0',\n '--column-description': baselined_cd,\n '--eval-file': output_eval_path_with_baseline,\n }\n avg_boost_params = {\n '--loss-function': loss_function,\n '--boosting-type': boosting_type,\n '-i': '30',\n '-w': '0.03',\n '-T': '4',\n '-m': output_model_path,\n '-f': train_path,\n '-t': test_path,\n '--boost-from-average': '1',\n '--column-description': original_cd,\n '--eval-file': output_eval_path_with_avg,\n }\n fit_catboost_gpu(baseline_boost_params)\n fit_catboost_gpu(avg_boost_params)\n\n apply_catboost(output_model_path, test_path, original_cd, output_calc_eval_path)\n\n assert compare_fit_evals_with_precision(output_eval_path_with_avg, output_eval_path_with_baseline)\n assert compare_evals_with_precision(output_eval_path_with_avg, output_calc_eval_path)\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_weights(boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cd_file = data_file('adult_weight', 'train.cd')\n test_file = data_file('adult_weight', 'test_weight')\n params = {\n '--use-best-model': 'false',\n '--loss-function': 'Logloss',\n '-f': data_file('adult_weight', 'train_weight'),\n '-t': test_file,\n '--column-description': cd_file,\n '--boosting-type': boosting_type,\n '-i': '10',\n '-w': '0.03',\n '-T': '4',\n '-m': output_model_path,\n }\n fit_catboost_gpu(params)\n apply_catboost(output_model_path, test_file, cd_file, output_eval_path)\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_weights_without_bootstrap(boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cd_file = data_file('adult_weight', 'train.cd')\n test_file = data_file('adult_weight', 'test_weight')\n params = {\n '--use-best-model': 'false',\n '--loss-function': 'Logloss',\n '-f': data_file('adult_weight', 'train_weight'),\n '-t': test_file,\n '--column-description': cd_file,\n '--boosting-type': boosting_type,\n '-i': '10',\n '-w': '0.03',\n '-T': '4',\n '--bootstrap-type': 'No',\n '-m': output_model_path,\n }\n fit_catboost_gpu(params)\n apply_catboost(output_model_path, test_file, cd_file, output_eval_path)\n return [local_canonical_file(output_eval_path, diff_tool=diff_tool())]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected]('leaf_estimation', [\"Newton\", \"Gradient\"])\ndef test_weighted_pool_leaf_estimation_method(boosting_type, leaf_estimation):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cd_file = data_file('adult_weight', 'train.cd')\n test_file = data_file('adult_weight', 'test_weight')\n params = {\n '--use-best-model': 'false',\n '--loss-function': 'Logloss',\n '-f': data_file('adult_weight', 'train_weight'),\n '-t': test_file,\n '--column-description': cd_file,\n '--boosting-type': boosting_type,\n '-i': '10',\n '-T': '4',\n '--leaf-estimation-method': leaf_estimation,\n '-m': output_model_path,\n }\n fit_catboost_gpu(params)\n apply_catboost(output_model_path, test_file, cd_file, output_eval_path)\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected]('leaf_estimation', [\"Newton\", \"Gradient\"])\ndef test_leaf_estimation_method(boosting_type, leaf_estimation):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cd_file = data_file('adult', 'train.cd')\n test_file = data_file('adult', 'test_small')\n params = {\n '--use-best-model': 'false',\n '--loss-function': 'Logloss',\n '-f': data_file('adult', 'train_small'),\n '-t': test_file,\n '--column-description': cd_file,\n '--boosting-type': boosting_type,\n '-i': '10',\n '-T': '4',\n '--leaf-estimation-method': leaf_estimation,\n '-m': output_model_path,\n }\n fit_catboost_gpu(params)\n apply_catboost(output_model_path, test_file, cd_file, output_eval_path)\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_one_hot_max_size(boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cd_file = data_file('adult', 'train.cd')\n test_file = data_file('adult', 'test_small')\n params = {\n '--use-best-model': 'false',\n '--loss-function': 'Logloss',\n '-f': data_file('adult', 'train_small'),\n '-t': test_file,\n '--column-description': cd_file,\n '--boosting-type': boosting_type,\n '-i': '10',\n '-w': '0.03',\n '-T': '4',\n '--one-hot-max-size': 64,\n '-m': output_model_path,\n }\n fit_catboost_gpu(params)\n apply_catboost(output_model_path, test_file, cd_file, output_eval_path)\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_l2_reg_size(boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cd_file = data_file('adult', 'train.cd')\n test_file = data_file('adult', 'test_small')\n params = {\n '--use-best-model': 'false',\n '--loss-function': 'Logloss',\n '-f': data_file('adult', 'train_small'),\n '-t': test_file,\n '--column-description': cd_file,\n '--boosting-type': boosting_type,\n '-i': '10',\n '-T': '4',\n '--l2-leaf-reg': 10,\n '-m': output_model_path,\n }\n fit_catboost_gpu(params)\n apply_catboost(output_model_path, test_file, cd_file, output_eval_path)\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_has_time(boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cd_file = data_file('adult', 'train.cd')\n test_file = data_file('adult', 'test_small')\n params = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', test_file,\n '--column-description', cd_file,\n '--boosting-type', boosting_type,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '--has-time',\n '-m', output_model_path,\n )\n fit_catboost_gpu(params)\n apply_catboost(output_model_path, test_file, cd_file, output_eval_path)\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_logloss_with_not_binarized_target(boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cd_file = data_file('adult_not_binarized', 'train.cd')\n test_file = data_file('adult_not_binarized', 'test_small')\n params = {\n '--use-best-model': 'false',\n '--loss-function': 'Logloss',\n '-f': data_file('adult_not_binarized', 'train_small'),\n '-t': test_file,\n '--column-description': cd_file,\n '--boosting-type': boosting_type,\n '-i': '10',\n '-w': '0.03',\n '-T': '4',\n '--target-border': '0.5',\n '-m': output_model_path,\n }\n fit_catboost_gpu(params)\n apply_catboost(output_model_path, test_file, cd_file, output_eval_path)\n\n return [local_canonical_file(output_eval_path)]\n\n\ndef test_fold_len_mult():\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cd_file = data_file('adult_not_binarized', 'train.cd')\n test_file = data_file('adult_not_binarized', 'test_small')\n params = {\n '--use-best-model': 'false',\n '--loss-function': 'Logloss',\n '-f': data_file('adult_not_binarized', 'train_small'),\n '-t': test_file,\n '--column-description': cd_file,\n '--boosting-type': 'Ordered',\n '-i': '10',\n '-w': '0.03',\n '-T': '4',\n '--fold-len-multiplier': 1.2,\n '--target-border': '0.5',\n '-m': output_model_path,\n }\n fit_catboost_gpu(params)\n apply_catboost(output_model_path, test_file, cd_file, output_eval_path)\n\n return [local_canonical_file(output_eval_path)]\n\n\ndef test_random_strength():\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n cd_file = data_file('adult_not_binarized', 'train.cd')\n test_file = data_file('adult_not_binarized', 'test_small')\n params = {\n '--use-best-model': 'false',\n '--loss-function': 'Logloss',\n '-f': data_file('adult_not_binarized', 'train_small'),\n '-t': test_file,\n '--column-description': cd_file,\n '--boosting-type': 'Ordered',\n '-i': '10',\n '-w': '0.03',\n '-T': '4',\n '--random-strength': 122,\n '--target-border': '0.5',\n '-m': output_model_path,\n }\n fit_catboost_gpu(params)\n apply_catboost(output_model_path, test_file, cd_file, output_eval_path)\n\n return [local_canonical_file(output_eval_path)]\n\n\[email protected]('loss_function', LOSS_FUNCTIONS)\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_all_targets(loss_function, boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n test_file = data_file('adult', 'test_small')\n cd_file = data_file('adult', 'train.cd')\n params = (\n '--use-best-model', 'false',\n '--loss-function', loss_function,\n '-f', data_file('adult', 'train_small'),\n '-t', test_file,\n '--column-description', cd_file,\n '--boosting-type', boosting_type,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n )\n\n fit_catboost_gpu(params)\n apply_catboost(output_model_path, test_file, cd_file, output_eval_path)\n\n return [local_canonical_file(output_eval_path, diff_tool=diff_tool())]\n\n\[email protected]('is_inverted', [False, True], ids=['', 'inverted'])\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_cv(is_inverted, boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n params = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '--cv', format_crossvalidation(is_inverted, 2, 10),\n '--eval-file', output_eval_path,\n )\n fit_catboost_gpu(params)\n return [local_canonical_file(output_eval_path, diff_tool=diff_tool())]\n\n\[email protected]('is_inverted', [False, True], ids=['', 'inverted'])\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_cv_for_query(is_inverted, boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n params = (\n '--use-best-model', 'false',\n '--loss-function', 'QueryRMSE',\n '-f', data_file('querywise', 'train'),\n '--column-description', data_file('querywise', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '10',\n '-T', '4',\n '-m', output_model_path,\n '--cv', format_crossvalidation(is_inverted, 2, 7),\n '--eval-file', output_eval_path,\n )\n fit_catboost_gpu(params)\n return [local_canonical_file(output_eval_path, diff_tool=diff_tool())]\n\n\[email protected]('is_inverted', [False, True], ids=['', 'inverted'])\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_cv_for_pairs(is_inverted, boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n params = (\n '--use-best-model', 'false',\n '--loss-function', 'PairLogit',\n '-f', data_file('querywise', 'train'),\n '--column-description', data_file('querywise', 'train.cd'),\n '--learn-pairs', data_file('querywise', 'train.pairs'),\n '--boosting-type', boosting_type,\n '-i', '10',\n '-T', '4',\n '-m', output_model_path,\n '--cv', format_crossvalidation(is_inverted, 2, 7),\n '--eval-file', output_eval_path,\n )\n fit_catboost_gpu(params)\n return [local_canonical_file(output_eval_path, diff_tool=diff_tool())]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_custom_priors(boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n test_file = data_file('adult', 'test_small')\n cd_file = data_file('adult', 'train.cd')\n params = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', test_file,\n '--column-description', cd_file,\n '--boosting-type', boosting_type,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '--ctr', 'Borders:Prior=-2:Prior=0:Prior=8/3:Prior=1:Prior=-1:Prior=3,'\n 'FeatureFreq:Prior=0',\n '--per-feature-ctr', '4:Borders:Prior=0.444,FeatureFreq:Prior=0.444;'\n '6:Borders:Prior=0.666,FeatureFreq:Prior=0.666;'\n '8:Borders:Prior=-0.888:Prior=2/3,FeatureFreq:Prior=-0.888:Prior=0.888'\n )\n\n fit_catboost_gpu(params)\n apply_catboost(output_model_path, test_file, cd_file, output_eval_path)\n return [local_canonical_file(output_eval_path)]\n\n\nCTR_TYPES = ['Borders', 'Buckets', 'FloatTargetMeanValue',\n 'Borders,FloatTargetMeanValue', 'Buckets,Borders']\n\n\[email protected]('ctr_type', CTR_TYPES)\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_ctr_type(ctr_type, boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cd_file = data_file('adult_crossentropy', 'train.cd')\n test_file = data_file('adult_crossentropy', 'test_proba')\n params = (\n '--use-best-model', 'false',\n '--loss-function', 'RMSE',\n '-f', data_file('adult_crossentropy', 'train_proba'),\n '-t', test_file,\n '--column-description', cd_file,\n '--boosting-type', boosting_type,\n '-i', '3',\n '-T', '4',\n '-m', output_model_path,\n '--ctr', ctr_type\n )\n fit_catboost_gpu(params)\n apply_catboost(output_model_path, test_file, cd_file, output_eval_path)\n return [local_canonical_file(output_eval_path, diff_tool=diff_tool())]\n\n\ndef test_train_dir():\n output_model_path = 'model.bin'\n train_dir_path = 'trainDir'\n params = (\n '--use-best-model', 'false',\n '--loss-function', 'RMSE',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '-i', '10',\n '-T', '4',\n '-m', output_model_path,\n '--train-dir', train_dir_path,\n )\n fit_catboost_gpu(params)\n outputs = ['time_left.tsv', 'learn_error.tsv', 'test_error.tsv', output_model_path]\n for output in outputs:\n assert os.path.isfile(train_dir_path + '/' + output)\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected]('qwise_loss', ['QueryRMSE', 'RMSE'])\ndef test_train_on_binarized_equal_train_on_float(boosting_type, qwise_loss):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_model_path_binarized = yatest.common.test_output_path('model_binarized.bin')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n\n borders_file = yatest.common.test_output_path('borders.tsv')\n borders_file_output = borders_file + '.out'\n predictions_path_learn = yatest.common.test_output_path('predictions_learn.tsv')\n predictions_path_learn_binarized = yatest.common.test_output_path('predictions_learn_binarized.tsv')\n predictions_path_test = yatest.common.test_output_path('predictions_test.tsv')\n predictions_path_test_binarized = yatest.common.test_output_path('predictions_test_binarized.tsv')\n\n learn_file = data_file('querywise', 'train')\n cd_file = data_file('querywise', 'train.cd')\n test_file = data_file('querywise', 'test')\n params = {\"--loss-function\": qwise_loss,\n \"-f\": learn_file,\n \"-t\": test_file,\n '--column-description': cd_file,\n '--boosting-type': boosting_type,\n '-i': '100',\n '-T': '4',\n '-m': output_model_path,\n '--learn-err-log': learn_error_path,\n '--test-err-log': test_error_path,\n '--use-best-model': 'false',\n '--output-borders-file': borders_file_output,\n }\n\n params_binarized = dict(params)\n params_binarized['--input-borders-file'] = borders_file_output\n params_binarized['--output-borders-file'] = borders_file\n params_binarized['-m'] = output_model_path_binarized\n\n fit_catboost_gpu(params)\n apply_catboost(output_model_path, learn_file, cd_file, predictions_path_learn)\n apply_catboost(output_model_path, test_file, cd_file, predictions_path_test)\n\n fit_catboost_gpu(params_binarized)\n\n apply_catboost(output_model_path_binarized, learn_file, cd_file, predictions_path_learn_binarized)\n apply_catboost(output_model_path_binarized, test_file, cd_file, predictions_path_test_binarized)\n\n assert (filecmp.cmp(predictions_path_learn, predictions_path_learn_binarized))\n assert (filecmp.cmp(predictions_path_test, predictions_path_test_binarized))\n\n return [local_canonical_file(learn_error_path, diff_tool=diff_tool()),\n local_canonical_file(test_error_path, diff_tool=diff_tool()),\n local_canonical_file(predictions_path_test, diff_tool=diff_tool()),\n local_canonical_file(predictions_path_learn, diff_tool=diff_tool()),\n local_canonical_file(borders_file, diff_tool=diff_tool())]\n\n\nFSTR_TYPES = ['PredictionValuesChange', 'InternalFeatureImportance', 'InternalInteraction', 'Interaction', 'ShapValues']\n\n\[email protected]('fstr_type', FSTR_TYPES)\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_fstr(fstr_type, boosting_type):\n model_path = yatest.common.test_output_path('adult_model.bin')\n output_fstr_path = yatest.common.test_output_path('fstr.tsv')\n\n fit_params = (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '--boosting-type', boosting_type,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '--one-hot-max-size', '10',\n '-m', model_path\n )\n\n if fstr_type == 'ShapValues':\n fit_params += ('--max-ctr-complexity', '1')\n\n fit_catboost_gpu(fit_params)\n\n fstr_params = (\n '--input-path', data_file('adult', 'train_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '-m', model_path,\n '-o', output_fstr_path,\n '--fstr-type', fstr_type\n )\n fstr_catboost_cpu(fstr_params)\n\n return local_canonical_file(output_fstr_path)\n\n\nLOSS_FUNCTIONS_NO_MAPE = ['RMSE', 'Logloss', 'MAE', 'CrossEntropy', 'Quantile', 'LogLinQuantile', 'Poisson']\n\n\[email protected]('loss_function', LOSS_FUNCTIONS_NO_MAPE)\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_quantized_pool(loss_function, boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n quantized_train_file = 'quantized://' + data_file('quantized_adult', 'train.qbin')\n quantized_test_file = 'quantized://' + data_file('quantized_adult', 'test.qbin')\n params = (\n '--use-best-model', 'false',\n '--loss-function', loss_function,\n '-f', quantized_train_file,\n '-t', quantized_test_file,\n '--boosting-type', boosting_type,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n )\n\n fit_catboost_gpu(params)\n cd_file = data_file('quantized_adult', 'pool.cd')\n test_file = data_file('quantized_adult', 'test_small.tsv')\n apply_catboost(output_model_path, test_file, cd_file, output_eval_path)\n\n return [local_canonical_file(output_eval_path, diff_tool=diff_tool(1.e-5))]\n\n\ndef execute_fit_for_test_quantized_pool(loss_function, pool_path, test_path, cd_path, eval_path,\n border_count=128, other_options=()):\n model_path = yatest.common.test_output_path('model.bin')\n\n params = (\n '--use-best-model', 'false',\n '--loss-function', loss_function,\n '-f', pool_path,\n '-t', test_path,\n '--cd', cd_path,\n '-i', '10',\n '-w', '0.03',\n '-T', '4',\n '-x', str(border_count),\n '--feature-border-type', 'GreedyLogSum',\n '-m', model_path,\n '--eval-file', eval_path,\n )\n fit_catboost_gpu(params + other_options)\n\n\[email protected](reason='TODO(kirillovs): Not yet implemented. MLTOOLS-2636.')\ndef test_quantized_pool_with_large_grid():\n test_path = data_file('querywise', 'test')\n\n tsv_eval_path = yatest.common.test_output_path('tsv.eval')\n execute_fit_for_test_quantized_pool(\n loss_function='PairLogitPairwise',\n pool_path=data_file('querywise', 'train'),\n test_path=test_path,\n cd_path=data_file('querywise', 'train.cd.query_id'),\n eval_path=tsv_eval_path,\n border_count=1024\n )\n\n quantized_eval_path = yatest.common.test_output_path('quantized.eval')\n execute_fit_for_test_quantized_pool(\n loss_function='PairLogitPairwise',\n pool_path='quantized://' + data_file('querywise', 'train.quantized_x1024'),\n test_path='quantized://' + data_file('querywise', 'test.quantized_x1024'),\n cd_path=data_file('querywise', 'train.cd.query_id'),\n eval_path=quantized_eval_path\n )\n\n assert (compare_evals_with_precision(tsv_eval_path, quantized_eval_path))\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected]('used_ram_limit', ['1Kb', '550Mb'])\ndef test_allow_writing_files_and_used_ram_limit(boosting_type, used_ram_limit):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n cd_file = data_file('airlines_5K', 'cd')\n\n params = (\n '--use-best-model', 'false',\n '--allow-writing-files', 'false',\n '--used-ram-limit', used_ram_limit,\n '--loss-function', 'Logloss',\n '--max-ctr-complexity', '8',\n '--depth', '10',\n '-f', data_file('airlines_5K', 'train'),\n '-t', data_file('airlines_5K', 'test'),\n '--column-description', cd_file,\n '--has-header',\n '--boosting-type', boosting_type,\n '-i', '20',\n '-w', '0.03',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n )\n fit_catboost_gpu(params)\n\n test_file = data_file('airlines_5K', 'test')\n apply_catboost(output_model_path, test_file, cd_file,\n output_eval_path, has_header=True)\n\n return [local_canonical_file(output_eval_path, diff_tool=diff_tool())]\n\n\ndef test_pairs_generation():\n output_model_path = yatest.common.test_output_path('model.bin')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n predictions_path_learn = yatest.common.test_output_path('predictions_learn.tsv')\n predictions_path_test = yatest.common.test_output_path('predictions_test.tsv')\n\n cd_file = data_file('querywise', 'train.cd')\n learn_file = data_file('querywise', 'train')\n test_file = data_file('querywise', 'test')\n\n params = [\n '--loss-function', 'PairLogit',\n '--eval-metric', 'PairAccuracy',\n '-f', learn_file,\n '-t', test_file,\n '--column-description', cd_file,\n '--l2-leaf-reg', '0',\n '-i', '20',\n '-T', '4',\n '-m', output_model_path,\n '--learn-err-log', learn_error_path,\n '--test-err-log', test_error_path,\n '--use-best-model', 'false'\n ]\n fit_catboost_gpu(params)\n apply_catboost(output_model_path, learn_file, cd_file, predictions_path_learn)\n apply_catboost(output_model_path, test_file, cd_file, predictions_path_test)\n\n return [local_canonical_file(learn_error_path, diff_tool=diff_tool()),\n local_canonical_file(test_error_path, diff_tool=diff_tool()),\n local_canonical_file(predictions_path_learn, diff_tool=diff_tool()),\n local_canonical_file(predictions_path_test, diff_tool=diff_tool()),\n ]\n\n\n# def test_pairs_generation_with_max_pairs():\n# output_model_path = yatest.common.test_output_path('model.bin')\n# test_error_path = yatest.common.test_output_path('test_error.tsv')\n# learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n# predictions_path_learn = yatest.common.test_output_path('predictions_learn.tsv')\n# predictions_path_test = yatest.common.test_output_path('predictions_test.tsv')\n#\n# cd_file = data_file('querywise', 'train.cd')\n# learn_file = data_file('querywise', 'train')\n# test_file = data_file('querywise', 'test')\n#\n# params = [\n# '--loss-function', 'PairLogit:max_pairs=30',\n# '--eval-metric', 'PairAccuracy',\n# '-f', learn_file,\n# '-t', test_file,\n# '--column-description', cd_file,\n# '--l2-leaf-reg', '0',\n# '-i', '20',\n# '-T', '4',\n# '-m', output_model_path,\n# '--learn-err-log', learn_error_path,\n# '--test-err-log', test_error_path,\n# '--use-best-model', 'false'\n# ]\n# fit_catboost_gpu(params)\n# apply_catboost(output_model_path, learn_file, cd_file, predictions_path_learn)\n# apply_catboost(output_model_path, test_file, cd_file, predictions_path_test)\n#\n# return [local_canonical_file(learn_error_path, diff_tool=diff_tool()),\n# local_canonical_file(test_error_path, diff_tool=diff_tool()),\n# local_canonical_file(predictions_path_learn, diff_tool=diff_tool()),\n# local_canonical_file(predictions_path_test, diff_tool=diff_tool()),\n# ]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_pairlogit_no_target(boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n params = [\n '--loss-function', 'PairLogit',\n '-f', data_file('querywise', 'train'),\n '-t', data_file('querywise', 'test'),\n '--column-description', data_file('querywise', 'train.cd.no_target'),\n '--learn-pairs', data_file('querywise', 'train.pairs'),\n '--test-pairs', data_file('querywise', 'test.pairs'),\n '--boosting-type', boosting_type,\n '-i', '20',\n '-T', '4',\n '-m', output_model_path,\n '--eval-file', output_eval_path,\n '--use-best-model', 'false',\n ]\n fit_catboost_gpu(params)\n\n return [\n local_canonical_file(\n output_eval_path,\n # TODO(akhropov): why such result instability for Plain. MLTOOLS-2801\n diff_tool=diff_tool(threshold={'Plain': 0.07, 'Ordered': 1.e-7}[boosting_type])\n )\n ]\n\n\[email protected]('task_type', ['CPU', 'GPU'])\ndef test_learn_without_header_eval_with_header(task_type):\n train_path = yatest.common.test_output_path('airlines_without_header')\n with open(data_file('airlines_5K', 'train'), 'r') as with_header_file:\n with open(train_path, 'w') as without_header_file:\n without_header_file.writelines(with_header_file.readlines()[1:])\n\n model_path = yatest.common.test_output_path('model.bin')\n\n fit_params = [\n '--loss-function', 'Logloss',\n '-f', train_path,\n '--cd', data_file('airlines_5K', 'cd'),\n '-i', '10',\n '-m', model_path\n ]\n execute_catboost_fit(\n task_type=task_type,\n params=fit_params,\n devices='0'\n )\n\n cmd_calc = (\n CATBOOST_PATH,\n 'calc',\n '--input-path', data_file('airlines_5K', 'test'),\n '--cd', data_file('airlines_5K', 'cd'),\n '-m', model_path,\n '--has-header'\n )\n yatest.common.execute(cmd_calc)\n\n\ndef test_group_weights_file():\n first_eval_path = yatest.common.test_output_path('first.eval')\n second_eval_path = yatest.common.test_output_path('second.eval')\n first_model_path = yatest.common.test_output_path('first_model.bin')\n second_model_path = yatest.common.test_output_path('second_model.bin')\n\n def run_catboost(eval_path, model_path, cd_file, is_additional_query_weights):\n cd_file_path = data_file('querywise', cd_file)\n fit_params = [\n '--use-best-model', 'false',\n '--loss-function', 'QueryRMSE',\n '-f', data_file('querywise', 'train'),\n '--column-description', cd_file_path,\n '-i', '5',\n '-T', '4',\n '-m', model_path,\n ]\n if is_additional_query_weights:\n fit_params += [\n '--learn-group-weights', data_file('querywise', 'train.group_weights'),\n '--test-group-weights', data_file('querywise', 'test.group_weights'),\n ]\n fit_catboost_gpu(fit_params)\n apply_catboost(model_path, data_file('querywise', 'test'), cd_file_path, eval_path)\n\n run_catboost(first_eval_path, first_model_path, 'train.cd', True)\n run_catboost(second_eval_path, second_model_path, 'train.cd.group_weight', False)\n assert filecmp.cmp(first_eval_path, second_eval_path)\n\n return [local_canonical_file(first_eval_path)]\n\n\ndef test_group_weights_file_quantized():\n first_eval_path = yatest.common.test_output_path('first.eval')\n second_eval_path = yatest.common.test_output_path('second.eval')\n first_model_path = yatest.common.test_output_path('first_model.bin')\n second_model_path = yatest.common.test_output_path('second_model.bin')\n\n def run_catboost(eval_path, model_path, train, is_additional_query_weights):\n fit_params = [\n '--use-best-model', 'false',\n '--loss-function', 'QueryRMSE',\n '-f', 'quantized://' + data_file('querywise', train),\n '-i', '5',\n '-T', '4',\n '-m', model_path,\n ]\n if is_additional_query_weights:\n fit_params += [\n '--learn-group-weights', data_file('querywise', 'train.group_weights'),\n '--test-group-weights', data_file('querywise', 'test.group_weights'),\n ]\n fit_catboost_gpu(fit_params)\n apply_catboost(model_path, data_file('querywise', 'test'), data_file('querywise', 'train.cd.group_weight'), eval_path)\n\n run_catboost(first_eval_path, first_model_path, 'train.quantized', True)\n run_catboost(second_eval_path, second_model_path, 'train.quantized.group_weight', False)\n assert filecmp.cmp(first_eval_path, second_eval_path)\n\n return [local_canonical_file(first_eval_path)]\n\n\nNO_RANDOM_PARAMS = {\n '--random-strength': '0',\n '--bootstrap-type': 'No',\n '--has-time': '',\n '--set-metadata-from-freeargs': ''\n}\n\nMETRIC_CHECKING_MULTICLASS_NO_WEIGHTS = 'Accuracy'\nMETRIC_CHECKING_MULTICLASS_WITH_WEIGHTS = 'Accuracy:use_weights=false'\n\nCAT_COMPARE_PARAMS = {\n '--counter-calc-method': 'SkipTest',\n '--simple-ctr': 'Buckets',\n '--max-ctr-complexity': 1\n}\n\n\ndef eval_metric(model_path, metrics, data_path, cd_path, output_log, eval_period='1'):\n cmd = [\n CATBOOST_PATH,\n 'eval-metrics',\n '--metrics', metrics,\n '-m', model_path,\n '--input-path', data_path,\n '--cd', cd_path,\n '--output-path', output_log,\n '--eval-period', eval_period\n ]\n\n yatest.common.execute(cmd)\n\n\[email protected]('loss_function', MULTICLASS_LOSSES)\ndef test_class_weight_multiclass(loss_function):\n model_path = yatest.common.test_output_path('model.bin')\n\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n eval_error_path = yatest.common.test_output_path('eval_error.tsv')\n\n learn_path = data_file('adult', 'train_small')\n test_path = data_file('adult', 'test_small')\n cd_path = data_file('adult', 'train.cd')\n\n fit_params = {\n '--use-best-model': 'false',\n '--loss-function': loss_function,\n '-f': learn_path,\n '-t': test_path,\n '--column-description': cd_path,\n '--boosting-type': 'Plain',\n '-i': '10',\n '-T': '4',\n '-m': model_path,\n '--class-weights': '0.5,2',\n '--learn-err-log': learn_error_path,\n '--test-err-log': test_error_path,\n '--custom-metric': METRIC_CHECKING_MULTICLASS_WITH_WEIGHTS\n }\n\n fit_params.update(CAT_COMPARE_PARAMS)\n\n fit_catboost_gpu(fit_params)\n\n eval_metric(model_path, METRIC_CHECKING_MULTICLASS_WITH_WEIGHTS, test_path, cd_path, eval_error_path)\n compare_metrics_with_diff(METRIC_CHECKING_MULTICLASS_WITH_WEIGHTS, test_error_path, eval_error_path)\n\n return [local_canonical_file(learn_error_path), local_canonical_file(test_error_path)]\n\n\[email protected]('leaf_estimation_method', LEAF_ESTIMATION_METHOD)\ndef test_multi_leaf_estimation_method(leaf_estimation_method):\n output_model_path = yatest.common.test_output_path('model.bin')\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n eval_test_error_path = yatest.common.test_output_path('eval_test_error.tsv')\n\n train_path = data_file('cloudness_small', 'train_small')\n test_path = data_file('cloudness_small', 'test_small')\n cd_path = data_file('cloudness_small', 'train.cd')\n\n fit_params = {\n '--loss-function': 'MultiClass',\n '-f': train_path,\n '-t': test_path,\n '--column-description': cd_path,\n '--boosting-type': 'Plain',\n '-i': '10',\n '-T': '4',\n '-m': output_model_path,\n '--leaf-estimation-method': leaf_estimation_method,\n '--leaf-estimation-iterations': '2',\n '--use-best-model': 'false',\n '--learn-err-log': learn_error_path,\n '--test-err-log': test_error_path,\n '--custom-metric': METRIC_CHECKING_MULTICLASS_NO_WEIGHTS\n }\n\n fit_params.update(CAT_COMPARE_PARAMS)\n fit_catboost_gpu(fit_params)\n\n eval_metric(output_model_path, METRIC_CHECKING_MULTICLASS_NO_WEIGHTS, test_path, cd_path, eval_test_error_path)\n compare_metrics_with_diff(METRIC_CHECKING_MULTICLASS_NO_WEIGHTS, test_error_path, eval_test_error_path)\n\n return [local_canonical_file(learn_error_path), local_canonical_file(test_error_path)]\n\n\[email protected]('loss_function', MULTICLASS_LOSSES)\ndef test_multiclass_baseline(loss_function):\n labels = [0, 1, 2, 3]\n\n cd_path = yatest.common.test_output_path('cd.txt')\n np.savetxt(cd_path, [[0, 'Target'], [1, 'Baseline'], [2, 'Baseline'], [3, 'Baseline'], [4, 'Baseline']], fmt='%s', delimiter='\\t')\n\n prng = np.random.RandomState(seed=0)\n\n train_path = yatest.common.test_output_path('train.txt')\n np.savetxt(train_path, generate_concatenated_random_labeled_dataset(100, 1000, labels, prng=prng), fmt='%s', delimiter='\\t')\n\n test_path = yatest.common.test_output_path('test.txt')\n np.savetxt(test_path, generate_concatenated_random_labeled_dataset(100, 1000, labels, prng=prng), fmt='%s', delimiter='\\t')\n\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n eval_error_path = yatest.common.test_output_path('eval_error.tsv')\n\n fit_params = {\n '--loss-function': loss_function,\n '--learning-rate': '0.03',\n '-f': train_path,\n '-t': test_path,\n '--column-description': cd_path,\n '--boosting-type': 'Plain',\n '-i': '10',\n '-T': '4',\n '--use-best-model': 'false',\n '--classes-count': '4',\n '--custom-metric': METRIC_CHECKING_MULTICLASS_NO_WEIGHTS,\n '--test-err-log': eval_error_path\n }\n\n fit_params.update(NO_RANDOM_PARAMS)\n\n execute_catboost_fit('CPU', fit_params)\n\n fit_params['--learn-err-log'] = learn_error_path\n fit_params['--test-err-log'] = test_error_path\n fit_catboost_gpu(fit_params)\n\n compare_metrics_with_diff(METRIC_CHECKING_MULTICLASS_NO_WEIGHTS, test_error_path, eval_error_path)\n return [local_canonical_file(learn_error_path), local_canonical_file(test_error_path)]\n\n\[email protected]('loss_function', MULTICLASS_LOSSES)\ndef test_multiclass_baseline_lost_class(loss_function):\n num_objects = 1000\n\n cd_path = yatest.common.test_output_path('cd.txt')\n np.savetxt(cd_path, [[0, 'Target'], [1, 'Baseline'], [2, 'Baseline']], fmt='%s', delimiter='\\t')\n\n prng = np.random.RandomState(seed=0)\n\n train_path = yatest.common.test_output_path('train.txt')\n np.savetxt(train_path, generate_concatenated_random_labeled_dataset(num_objects, 10, labels=[1, 2], prng=prng), fmt='%.5f', delimiter='\\t')\n\n test_path = yatest.common.test_output_path('test.txt')\n np.savetxt(test_path, generate_concatenated_random_labeled_dataset(num_objects, 10, labels=[0, 1, 2, 3], prng=prng), fmt='%.5f', delimiter='\\t')\n\n eval_error_path = yatest.common.test_output_path('eval_error.tsv')\n\n custom_metric = 'Accuracy:use_weights=false'\n\n fit_params = {\n '--loss-function': loss_function,\n '-f': train_path,\n '-t': test_path,\n '--column-description': cd_path,\n '--boosting-type': 'Plain',\n '-i': '10',\n '-T': '4',\n '--custom-metric': custom_metric,\n '--test-err-log': eval_error_path,\n '--use-best-model': 'false',\n '--classes-count': '4'\n }\n\n fit_params.update(NO_RANDOM_PARAMS)\n\n with pytest.raises(yatest.common.ExecutionError):\n execute_catboost_fit('CPU', fit_params)\n\n\ndef test_ctr_buckets():\n model_path = yatest.common.test_output_path('model.bin')\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n eval_error_path = yatest.common.test_output_path('eval_error.tsv')\n\n learn_path = data_file('adult', 'train_small')\n test_path = data_file('adult', 'test_small')\n cd_path = data_file('adult', 'train.cd')\n\n fit_params = {\n '--use-best-model': 'false',\n '--loss-function': 'MultiClass',\n '-f': learn_path,\n '-t': test_path,\n '--column-description': cd_path,\n '--boosting-type': 'Plain',\n '-i': '10',\n '-T': '4',\n '-m': model_path,\n '--learn-err-log': learn_error_path,\n '--test-err-log': test_error_path,\n '--custom-metric': METRIC_CHECKING_MULTICLASS_NO_WEIGHTS\n }\n\n fit_params.update(CAT_COMPARE_PARAMS)\n\n fit_catboost_gpu(fit_params)\n\n eval_metric(model_path, METRIC_CHECKING_MULTICLASS_NO_WEIGHTS, test_path, cd_path, eval_error_path)\n\n compare_metrics_with_diff(METRIC_CHECKING_MULTICLASS_NO_WEIGHTS, test_error_path, eval_error_path)\n return [local_canonical_file(learn_error_path), local_canonical_file(test_error_path)]\n\n\[email protected]('loss_function', MULTICLASS_LOSSES)\ndef test_multi_targets(loss_function):\n model_path = yatest.common.test_output_path('model.bin')\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n eval_error_path = yatest.common.test_output_path('eval_error.tsv')\n\n learn_path = data_file('cloudness_small', 'train_small')\n test_path = data_file('cloudness_small', 'test_small')\n cd_path = data_file('cloudness_small', 'train.cd')\n\n fit_params = {\n '--use-best-model': 'false',\n '--loss-function': loss_function,\n '-f': learn_path,\n '-t': test_path,\n '--column-description': cd_path,\n '--boosting-type': 'Plain',\n '-i': '10',\n '-T': '4',\n '-m': model_path,\n '--learn-err-log': learn_error_path,\n '--test-err-log': test_error_path,\n '--custom-metric': METRIC_CHECKING_MULTICLASS_NO_WEIGHTS\n }\n\n fit_params.update(CAT_COMPARE_PARAMS)\n fit_catboost_gpu(fit_params)\n\n eval_metric(model_path, METRIC_CHECKING_MULTICLASS_NO_WEIGHTS, test_path, cd_path, eval_error_path)\n\n compare_metrics_with_diff(METRIC_CHECKING_MULTICLASS_NO_WEIGHTS, test_error_path, eval_error_path)\n return [local_canonical_file(learn_error_path), local_canonical_file(test_error_path)]\n\n\ndef test_custom_loss_for_multiclassification():\n model_path = yatest.common.test_output_path('model.bin')\n\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n eval_error_path = yatest.common.test_output_path('eval_error.tsv')\n\n learn_path = data_file('cloudness_small', 'train_small')\n test_path = data_file('cloudness_small', 'test_small')\n cd_path = data_file('cloudness_small', 'train.cd')\n\n custom_metric = [\n 'Accuracy',\n 'Precision',\n 'Recall',\n 'F1',\n 'TotalF1',\n 'MCC',\n 'Kappa',\n 'WKappa',\n 'ZeroOneLoss',\n 'HammingLoss',\n 'HingeLoss'\n ]\n\n custom_metric_string = ','.join(custom_metric)\n\n fit_params = {\n '--use-best-model': 'false',\n '--loss-function': 'MultiClass',\n '-f': learn_path,\n '-t': test_path,\n '--column-description': cd_path,\n '--boosting-type': 'Plain',\n '-i': '10',\n '-T': '4',\n '-m': model_path,\n '--custom-metric': custom_metric_string,\n '--learn-err-log': learn_error_path,\n '--test-err-log': test_error_path,\n }\n\n fit_params.update(CAT_COMPARE_PARAMS)\n fit_catboost_gpu(fit_params)\n\n eval_metric(model_path, custom_metric_string, test_path, cd_path, eval_error_path)\n compare_metrics_with_diff(custom_metric, test_error_path, eval_error_path)\n\n return [local_canonical_file(learn_error_path), local_canonical_file(test_error_path)]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_custom_loss_for_classification(boosting_type):\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n eval_error_path = yatest.common.test_output_path('eval_error.tsv')\n\n model_path = yatest.common.test_output_path('model.bin')\n\n learn_path = data_file('adult', 'train_small')\n test_path = data_file('adult', 'test_small')\n cd_path = data_file('adult', 'train.cd')\n\n custom_metric = [\n 'AUC',\n 'CrossEntropy',\n 'Accuracy',\n 'Precision',\n 'Recall',\n 'F1',\n 'TotalF1',\n 'MCC',\n 'BalancedAccuracy',\n 'BalancedErrorRate',\n 'Kappa',\n 'WKappa',\n 'BrierScore',\n 'ZeroOneLoss',\n 'HammingLoss',\n 'HingeLoss'\n ]\n\n custom_metric_string = ','.join(custom_metric)\n\n fit_params = {\n '--use-best-model': 'false',\n '--loss-function': 'Logloss',\n '-f': learn_path,\n '-t': test_path,\n '--column-description': cd_path,\n '--boosting-type': boosting_type,\n '-w': '0.03',\n '-i': '10',\n '-T': '4',\n '-m': model_path,\n '--custom-metric': custom_metric_string,\n '--learn-err-log': learn_error_path,\n '--test-err-log': test_error_path\n }\n\n fit_params.update(CAT_COMPARE_PARAMS)\n\n fit_catboost_gpu(fit_params)\n\n eval_metric(model_path, custom_metric_string, test_path, cd_path, eval_error_path)\n compare_metrics_with_diff(custom_metric, test_error_path, eval_error_path, 1e-6)\n\n return [local_canonical_file(learn_error_path), local_canonical_file(test_error_path)]\n\n\[email protected]('loss_function', MULTICLASS_LOSSES)\ndef test_class_names_multiclass(loss_function):\n model_path = yatest.common.test_output_path('model.bin')\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n eval_error_path = yatest.common.test_output_path('eval_error.tsv')\n\n learn_path = data_file('precipitation_small', 'train_small')\n test_path = data_file('precipitation_small', 'test_small')\n cd_path = data_file('precipitation_small', 'train.cd')\n\n fit_params = {\n '--use-best-model': 'false',\n '--loss-function': loss_function,\n '-f': learn_path,\n '-t': test_path,\n '--column-description': cd_path,\n '--boosting-type': 'Plain',\n '-i': '10',\n '-T': '4',\n '-m': model_path,\n '--learn-err-log': learn_error_path,\n '--test-err-log': test_error_path,\n '--custom-metric': METRIC_CHECKING_MULTICLASS_NO_WEIGHTS,\n '--class-names': '0.,0.5,1.,0.25,0.75'\n }\n\n fit_params.update(CAT_COMPARE_PARAMS)\n fit_catboost_gpu(fit_params)\n\n eval_metric(model_path, METRIC_CHECKING_MULTICLASS_NO_WEIGHTS, test_path, cd_path, eval_error_path)\n compare_metrics_with_diff(METRIC_CHECKING_MULTICLASS_NO_WEIGHTS, test_error_path, eval_error_path)\n\n return [local_canonical_file(learn_error_path), local_canonical_file(test_error_path)]\n\n\[email protected]('loss_function', MULTICLASS_LOSSES)\ndef test_lost_class(loss_function):\n model_path = yatest.common.test_output_path('model.bin')\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n eval_error_path = yatest.common.test_output_path('eval_error.tsv')\n\n learn_path = data_file('cloudness_lost_class', 'train_small')\n test_path = data_file('cloudness_lost_class', 'test_small')\n cd_path = data_file('cloudness_lost_class', 'train.cd')\n\n fit_params = {\n '--use-best-model': 'false',\n '--loss-function': loss_function,\n '-f': learn_path,\n '-t': test_path,\n '--column-description': cd_path,\n '--boosting-type': 'Plain',\n '-i': '10',\n '-T': '4',\n '-m': model_path,\n '--custom-metric': METRIC_CHECKING_MULTICLASS_NO_WEIGHTS,\n '--learn-err-log': learn_error_path,\n '--test-err-log': test_error_path,\n '--classes-count': '3'\n }\n\n fit_params.update(CAT_COMPARE_PARAMS)\n fit_catboost_gpu(fit_params)\n\n eval_metric(model_path, METRIC_CHECKING_MULTICLASS_NO_WEIGHTS, test_path, cd_path, eval_error_path)\n compare_metrics_with_diff(METRIC_CHECKING_MULTICLASS_NO_WEIGHTS, test_error_path, eval_error_path)\n\n return [local_canonical_file(learn_error_path), local_canonical_file(test_error_path)]\n\n\ndef test_class_weight_with_lost_class():\n model_path = yatest.common.test_output_path('model.bin')\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n eval_error_path = yatest.common.test_output_path('eval_error.tsv')\n\n learn_path = data_file('cloudness_lost_class', 'train_small')\n test_path = data_file('cloudness_lost_class', 'test_small')\n cd_path = data_file('cloudness_lost_class', 'train.cd')\n\n fit_params = {\n '--use-best-model': 'false',\n '--loss-function': 'MultiClass',\n '-f': learn_path,\n '-t': test_path,\n '--column-description': cd_path,\n '--boosting-type': 'Plain',\n '-i': '10',\n '-T': '4',\n '-m': model_path,\n '--classes-count': '3',\n '--class-weights': '0.5,2,2',\n '--learn-err-log': learn_error_path,\n '--test-err-log': test_error_path,\n '--custom-metric': METRIC_CHECKING_MULTICLASS_NO_WEIGHTS+':use_weights=false'\n }\n\n fit_params.update(CAT_COMPARE_PARAMS)\n fit_catboost_gpu(fit_params)\n\n eval_metric(model_path, METRIC_CHECKING_MULTICLASS_NO_WEIGHTS+':use_weights=false', test_path, cd_path, eval_error_path)\n compare_metrics_with_diff(METRIC_CHECKING_MULTICLASS_NO_WEIGHTS+':use_weights=false', test_error_path, eval_error_path)\n\n return [local_canonical_file(eval_error_path)]\n\n\[email protected]('metric_period', ['1', '2'])\[email protected]('metric', ['MultiClass', 'MultiClassOneVsAll', 'F1', 'Accuracy', 'TotalF1', 'MCC', 'Precision', 'Recall'])\[email protected]('loss_function', MULTICLASS_LOSSES)\[email protected]('dataset', ['cloudness_small', 'cloudness_lost_class'])\ndef test_eval_metrics_multiclass(metric, loss_function, dataset, metric_period):\n if loss_function == 'MultiClass' and metric == 'MultiClassOneVsAll' or loss_function == 'MultiClassOneVsAll' and metric == 'MultiClass':\n return\n\n learn_path = data_file(dataset, 'train_small')\n test_path = data_file(dataset, 'test_small')\n cd_path = data_file(dataset, 'train.cd')\n\n model_path = yatest.common.test_output_path('model.bin')\n\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n eval_error_path = yatest.common.test_output_path('eval_error.tsv')\n\n fit_params = {\n '--loss-function': loss_function,\n '--custom-metric': metric,\n '--boosting-type': 'Plain',\n '-f': learn_path,\n '-t': test_path,\n '--column-description': cd_path,\n '-i': '10',\n '-T': '4',\n '-m': model_path,\n '--learn-err-log': learn_error_path,\n '--test-err-log': test_error_path,\n '--use-best-model': 'false',\n '--classes-count': '3',\n '--metric-period': metric_period\n }\n\n fit_params.update(CAT_COMPARE_PARAMS)\n fit_catboost_gpu(fit_params)\n\n eval_metric(model_path, metric, test_path, cd_path, eval_error_path, metric_period)\n\n idx_test_metric = 1 if metric == loss_function else 2\n\n first_metrics = np.loadtxt(test_error_path, skiprows=1)[:, idx_test_metric]\n second_metrics = np.loadtxt(eval_error_path, skiprows=1)[:, 1]\n assert np.allclose(first_metrics, second_metrics, atol=1e-5)\n return [local_canonical_file(learn_error_path), local_canonical_file(test_error_path)]\n\n\ndef test_eval_metrics_class_names():\n labels = ['a', 'b', 'c', 'd']\n model_path = yatest.common.test_output_path('model.bin')\n\n cd_path = yatest.common.test_output_path('cd.txt')\n np.savetxt(cd_path, [[0, 'Target']], fmt='%s', delimiter='\\t')\n\n prng = np.random.RandomState(seed=0)\n\n train_path = yatest.common.test_output_path('train.txt')\n np.savetxt(train_path, generate_concatenated_random_labeled_dataset(100, 10, labels, prng=prng), fmt='%s', delimiter='\\t')\n\n test_path = yatest.common.test_output_path('test.txt')\n np.savetxt(test_path, generate_concatenated_random_labeled_dataset(100, 10, labels, prng=prng), fmt='%s', delimiter='\\t')\n\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n eval_error_path = yatest.common.test_output_path('eval_error.tsv')\n\n custom_metric = 'TotalF1,MultiClass'\n\n fit_params = {\n '--loss-function': 'MultiClass',\n '--custom-metric': custom_metric,\n '--boosting-type': 'Plain',\n '-f': train_path,\n '-t': test_path,\n '--column-description': cd_path,\n '-i': '10',\n '-T': '4',\n '-m': model_path,\n '--learn-err-log': learn_error_path,\n '--test-err-log': test_error_path,\n '--use-best-model': 'false',\n '--class-names': ','.join(labels)\n }\n\n fit_catboost_gpu(fit_params)\n\n eval_metric(model_path, custom_metric, test_path, cd_path, eval_error_path)\n\n first_metrics = np.round(np.loadtxt(test_error_path, skiprows=1)[:, 2], 5)\n second_metrics = np.round(np.loadtxt(eval_error_path, skiprows=1)[:, 1], 5)\n assert np.all(first_metrics == second_metrics)\n return [local_canonical_file(learn_error_path), local_canonical_file(test_error_path)]\n\n\ndef test_fit_multiclass_with_class_names():\n labels = ['a', 'b', 'c', 'd']\n\n model_path = yatest.common.test_output_path('model.bin')\n\n cd_path = yatest.common.test_output_path('cd.txt')\n np.savetxt(cd_path, [[0, 'Target']], fmt='%s', delimiter='\\t')\n\n prng = np.random.RandomState(seed=0)\n\n learn_path = yatest.common.test_output_path('train.txt')\n np.savetxt(learn_path, generate_concatenated_random_labeled_dataset(100, 10, labels, prng=prng), fmt='%s', delimiter='\\t')\n\n test_path = yatest.common.test_output_path('test.txt')\n np.savetxt(test_path, generate_concatenated_random_labeled_dataset(100, 10, labels, prng=prng), fmt='%s', delimiter='\\t')\n\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n eval_error_path = yatest.common.test_output_path('eval_error.tsv')\n\n fit_params = {\n '--loss-function': 'MultiClass',\n '--boosting-type': 'Plain',\n '--custom-metric': METRIC_CHECKING_MULTICLASS_NO_WEIGHTS,\n '--class-names': ','.join(labels),\n '-f': learn_path,\n '-t': test_path,\n '--column-description': cd_path,\n '-i': '10',\n '-T': '4',\n '-m': model_path,\n '--use-best-model': 'false',\n '--test-err-log': test_error_path\n }\n\n fit_catboost_gpu(fit_params)\n\n eval_metric(model_path, METRIC_CHECKING_MULTICLASS_NO_WEIGHTS, test_path, cd_path, eval_error_path)\n\n compare_metrics_with_diff(METRIC_CHECKING_MULTICLASS_NO_WEIGHTS, test_error_path, eval_error_path)\n\n return [local_canonical_file(test_error_path)]\n\n\ndef test_extract_multiclass_labels_from_class_names():\n labels = ['a', 'b', 'c', 'd']\n\n model_path = yatest.common.test_output_path('model.bin')\n\n cd_path = yatest.common.test_output_path('cd.txt')\n np.savetxt(cd_path, [[0, 'Target']], fmt='%s', delimiter='\\t')\n\n prng = np.random.RandomState(seed=0)\n\n train_path = yatest.common.test_output_path('train.txt')\n np.savetxt(train_path, generate_concatenated_random_labeled_dataset(100, 10, labels, prng=prng), fmt='%s', delimiter='\\t')\n\n test_path = yatest.common.test_output_path('test.txt')\n np.savetxt(test_path, generate_concatenated_random_labeled_dataset(100, 10, labels, prng=prng), fmt='%s', delimiter='\\t')\n\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n eval_error_path = yatest.common.test_output_path('eval_error.tsv')\n\n fit_params = {\n '--loss-function': 'MultiClass',\n '--class-names': ','.join(labels),\n '--boosting-type': 'Plain',\n '--custom-metric': METRIC_CHECKING_MULTICLASS_NO_WEIGHTS,\n '-f': train_path,\n '-t': test_path,\n '--column-description': cd_path,\n '-i': '10',\n '-T': '4',\n '-m': model_path,\n '--use-best-model': 'false',\n '--test-err-log': test_error_path\n }\n\n fit_catboost_gpu(fit_params)\n\n eval_metric(model_path, METRIC_CHECKING_MULTICLASS_NO_WEIGHTS, test_path, cd_path, eval_error_path)\n compare_metrics_with_diff(METRIC_CHECKING_MULTICLASS_NO_WEIGHTS, test_error_path, eval_error_path)\n\n py_catboost = catboost.CatBoost()\n py_catboost.load_model(model_path)\n\n assert json.loads(py_catboost.get_metadata()['class_params'])['class_label_type'] == 'String'\n assert json.loads(py_catboost.get_metadata()['class_params'])['class_to_label'] == [0, 1, 2, 3]\n assert json.loads(py_catboost.get_metadata()['class_params'])['class_names'] == ['a', 'b', 'c', 'd']\n assert json.loads(py_catboost.get_metadata()['class_params'])['classes_count'] == 0\n\n assert json.loads(py_catboost.get_metadata()['params'])['data_processing_options']['class_names'] == ['a', 'b', 'c', 'd']\n\n return [local_canonical_file(test_error_path)]\n\n\[email protected]('loss_function', MULTICLASS_LOSSES)\[email protected]('prediction_type', ['Probability', 'RawFormulaVal', 'Class'])\ndef test_save_and_apply_multiclass_labels_from_classes_count(loss_function, prediction_type):\n model_path = yatest.common.test_output_path('model.bin')\n\n cd_path = yatest.common.test_output_path('cd.txt')\n np.savetxt(cd_path, [[0, 'Target']], fmt='%s', delimiter='\\t')\n\n prng = np.random.RandomState(seed=0)\n\n train_path = yatest.common.test_output_path('train.txt')\n np.savetxt(train_path, generate_concatenated_random_labeled_dataset(100, 10, [1, 2], prng=prng), fmt='%s', delimiter='\\t')\n\n test_path = yatest.common.test_output_path('test.txt')\n np.savetxt(test_path, generate_concatenated_random_labeled_dataset(100, 10, [0, 1, 2, 3], prng=prng), fmt='%s', delimiter='\\t')\n\n eval_path = yatest.common.test_output_path('eval.txt')\n\n fit_params = {\n '--loss-function': loss_function,\n '--boosting-type': 'Plain',\n '--classes-count': '4',\n '-f': train_path,\n '--column-description': cd_path,\n '-i': '10',\n '-T': '4',\n '-m': model_path,\n '--use-best-model': 'false'\n }\n\n fit_catboost_gpu(fit_params)\n\n py_catboost = catboost.CatBoost()\n py_catboost.load_model(model_path)\n\n assert json.loads(py_catboost.get_metadata()['class_params'])['class_label_type'] == 'Integer'\n assert json.loads(py_catboost.get_metadata()['class_params'])['class_to_label'] == [1, 2]\n assert json.loads(py_catboost.get_metadata()['class_params'])['classes_count'] == 4\n assert json.loads(py_catboost.get_metadata()['class_params'])['class_names'] == []\n\n calc_cmd = (\n CATBOOST_PATH,\n 'calc',\n '--input-path', test_path,\n '--column-description', cd_path,\n '-m', model_path,\n '--output-path', eval_path,\n '--prediction-type', prediction_type\n )\n\n yatest.common.execute(calc_cmd)\n\n if prediction_type == 'RawFormulaVal':\n with open(eval_path, \"rt\") as f:\n for i, line in enumerate(f):\n if i == 0:\n assert line[:-1] == 'SampleId\\t{}:Class=0\\t{}:Class=1\\t{}:Class=2\\t{}:Class=3' \\\n .format(prediction_type, prediction_type, prediction_type, prediction_type)\n else:\n assert float(line[:-1].split()[1]) == float('-inf') and float(line[:-1].split()[4]) == float('-inf') # fictitious approxes must be negative infinity\n\n if prediction_type == 'Probability':\n with open(eval_path, \"rt\") as f:\n for i, line in enumerate(f):\n if i == 0:\n assert line[:-1] == 'SampleId\\t{}:Class=0\\t{}:Class=1\\t{}:Class=2\\t{}:Class=3' \\\n .format(prediction_type, prediction_type, prediction_type, prediction_type)\n else:\n assert abs(float(line[:-1].split()[1])) < 1e-307 \\\n and abs(float(line[:-1].split()[4])) < 1e-307 # fictitious probabilities must be virtually zero\n\n if prediction_type == 'Class':\n with open(eval_path, \"rt\") as f:\n for i, line in enumerate(f):\n if i == 0:\n assert line[:-1] == 'SampleId\\tClass'\n else:\n assert float(line[:-1].split()[1]) in [1, 2] # probability of 0,3 classes appearance must be zero\n\n return [local_canonical_file(eval_path)]\n\n\nREG_LOSS_FUNCTIONS = ['RMSE', 'MAE', 'Lq:q=1', 'Lq:q=1.5', 'Lq:q=3']\nCUSTOM_METRIC = [\"MAE,Lq:q=2.5,NumErrors:greater_than=0.1,NumErrors:greater_than=0.01,NumErrors:greater_than=0.5\"]\n\n\[email protected]('loss_function', REG_LOSS_FUNCTIONS)\[email protected]('custom_metric', CUSTOM_METRIC)\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_reg_targets(loss_function, boosting_type, custom_metric):\n test_error_path = yatest.common.test_output_path(\"test_error.tsv\")\n params = [\n '--use-best-model', 'false',\n '--loss-function', loss_function,\n '-f', data_file('adult_crossentropy', 'train_proba'),\n '-t', data_file('adult_crossentropy', 'test_proba'),\n '--column-description', data_file('adult_crossentropy', 'train.cd'),\n '-i', '10',\n '-T', '4',\n '--counter-calc-method', 'SkipTest',\n '--custom-metric', custom_metric,\n '--test-err-log', test_error_path,\n '--boosting-type', boosting_type\n ]\n fit_catboost_gpu(params)\n\n return [local_canonical_file(test_error_path, diff_tool=diff_tool(1e-5))]\n\n\ndef test_eval_result_on_different_pool_type():\n output_eval_path = yatest.common.test_output_path('test.eval')\n output_quantized_eval_path = yatest.common.test_output_path('test.eval.quantized')\n\n def get_params(train, test, eval_path):\n return (\n '--use-best-model', 'false',\n '--loss-function', 'Logloss',\n '-f', train,\n '-t', test,\n '--cd', data_file('querywise', 'train.cd'),\n '-i', '10',\n '-T', '4',\n '--target-border', '0.5',\n '--eval-file', eval_path,\n )\n\n def get_pool_path(set_name, is_quantized=False):\n path = data_file('querywise', set_name)\n return 'quantized://' + path + '.quantized' if is_quantized else path\n\n fit_catboost_gpu(get_params(get_pool_path('train'), get_pool_path('test'), output_eval_path))\n fit_catboost_gpu(get_params(get_pool_path('train', True), get_pool_path('test', True), output_quantized_eval_path))\n\n assert filecmp.cmp(output_eval_path, output_quantized_eval_path)\n return [local_canonical_file(output_eval_path)]\n\n\ndef test_convert_model_to_json_without_cat_features():\n output_model_path = yatest.common.test_output_path('model.json')\n output_eval_path = yatest.common.test_output_path('test.eval')\n fit_params = [\n '--use-best-model', 'false',\n '-f', data_file('higgs', 'train_small'),\n '-t', data_file('higgs', 'test_small'),\n '--column-description', data_file('higgs', 'train.cd'),\n '-i', '20',\n '-T', '4',\n '-r', '0',\n '--eval-file', output_eval_path,\n '-m', output_model_path,\n '--model-format', 'Json'\n ]\n fit_catboost_gpu(fit_params)\n\n formula_predict_path = yatest.common.test_output_path('predict_test.eval')\n calc_cmd = (\n CATBOOST_PATH,\n 'calc',\n '--input-path', data_file('higgs', 'test_small'),\n '--column-description', data_file('higgs', 'train.cd'),\n '-m', output_model_path,\n '--model-format', 'Json',\n '--output-path', formula_predict_path\n )\n yatest.common.execute(calc_cmd)\n assert (compare_evals_with_precision(output_eval_path, formula_predict_path))\n return [local_canonical_file(output_eval_path, diff_tool=diff_tool())]\n\n\[email protected]('loss_function', ('YetiRankPairwise', 'PairLogitPairwise'))\ndef test_pairwise(loss_function):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n train_file = data_file('querywise', 'train')\n test_file = data_file('querywise', 'test')\n train_pairs = data_file('querywise', 'train.pairs')\n test_pairs = data_file('querywise', 'test.pairs')\n cd_file = data_file('querywise', 'train.cd')\n\n params = [\n '--loss-function', loss_function,\n '-f', train_file,\n '-t', test_file,\n '--learn-pairs', train_pairs,\n '--test-pairs', test_pairs,\n '--column-description', cd_file,\n '-i', '10',\n '-T', '4',\n '-m', output_model_path,\n ]\n\n fit_catboost_gpu(params)\n apply_catboost(output_model_path, test_file, cd_file, output_eval_path)\n diff_precision = 1e-2 if loss_function == 'YetiRankPairwise' else 1e-5\n return [local_canonical_file(output_eval_path, diff_tool=diff_tool(diff_precision))]\n\n\[email protected](\n 'loss_function,eval_metric,boosting_type',\n [\n ('QueryRMSE', 'NDCG', 'Plain'),\n ('QueryRMSE', 'NDCG', 'Ordered'),\n # Boosting type 'Ordered' is not supported for YetiRankPairwise and PairLogitPairwise\n ('YetiRankPairwise', 'NDCG', 'Plain'),\n ('PairLogit', 'PairAccuracy', 'Plain'),\n ('PairLogitPairwise', 'NDCG', 'Plain'),\n ('PairLogitPairwise', 'PairAccuracy', 'Plain'),\n ],\n ids=[\n 'loss_function=QueryRMSE,eval_metric=NDCG,boosting_type=Plain',\n 'loss_function=QueryRMSE,eval_metric=NDCG,boosting_type=Ordered',\n 'loss_function=YetiRankPairwise,eval_metric=NDCG,boosting_type=Plain',\n 'loss_function=PairLogit,eval_metric=PairAccuracy,boosting_type=Plain',\n 'loss_function=PairLogitPairwise,eval_metric=NDCG,boosting_type=Plain',\n 'loss_function=PairLogitPairwise,eval_metric=PairAccuracy,boosting_type=Plain'\n ]\n)\ndef test_groupwise_with_cat_features(loss_function, eval_metric, boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n train_file = data_file('black_friday', 'train')\n test_file = data_file('black_friday', 'test')\n cd_file = data_file('black_friday', 'cd')\n\n params = [\n '--loss-function', loss_function,\n '--has-header',\n '-f', train_file,\n '-t', test_file,\n '--column-description', cd_file,\n '--boosting-type', boosting_type,\n '-i', '10',\n '-T', '4',\n '--ctr-history-unit', 'Sample',\n '--eval-metric', eval_metric,\n '-m', output_model_path,\n ]\n\n fit_catboost_gpu(params)\n apply_catboost(output_model_path, test_file, cd_file, output_eval_path)\n diff_precision = 1e-2 if loss_function == 'YetiRankPairwise' else 1e-5\n return [local_canonical_file(output_eval_path, diff_tool=diff_tool(diff_precision))]\n\n\[email protected](\n 'border_count',\n [1, 3, 10],\n ids=lambda border_count: 'border_count=%d' % border_count\n)\[email protected](\n 'boosting_type',\n BOOSTING_TYPE,\n ids=lambda boosting_type: 'boosting_type=%s' % boosting_type\n)\ndef test_ctr_target_quantization(border_count, boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n train_file = data_file('adult_crossentropy', 'train_proba')\n test_file = data_file('adult_crossentropy', 'test_proba')\n cd_file = data_file('adult_crossentropy', 'train.cd')\n\n params = {\n '--use-best-model': 'false',\n '--loss-function': 'RMSE',\n '-f': train_file,\n '-t': test_file,\n '--column-description': cd_file,\n '--boosting-type': boosting_type,\n '-i': '3',\n '-T': '4',\n '-m': output_model_path,\n '--ctr-target-border-count': str(border_count)\n }\n fit_catboost_gpu(params)\n apply_catboost(output_model_path, test_file, cd_file, output_eval_path)\n return [local_canonical_file(output_eval_path, diff_tool=diff_tool())]\n\n\[email protected]('grow_policy', NONSYMMETRIC)\ndef test_apply_with_grow_policy(grow_policy):\n output_model_path = yatest.common.test_output_path('model.bin')\n test_eval_path = yatest.common.test_output_path('test.eval')\n calc_eval_path = yatest.common.test_output_path('calc.eval')\n\n train_file = data_file('adult', 'train_small')\n test_file = data_file('adult', 'test_small')\n cd_file = data_file('adult', 'train.cd')\n\n params = {\n '--use-best-model': 'false',\n '--loss-function': 'Logloss',\n '-f': train_file,\n '-t': test_file,\n '--column-description': cd_file,\n '--boosting-type': 'Plain',\n '-i': '10',\n '-w': '0.03',\n '-T': '4',\n '-m': output_model_path,\n '--grow-policy': grow_policy,\n '--eval-file': test_eval_path,\n '--output-columns': 'RawFormulaVal',\n '--counter-calc-method': 'SkipTest',\n }\n\n fit_catboost_gpu(params)\n apply_catboost(output_model_path, test_file, cd_file, calc_eval_path, output_columns=['RawFormulaVal'])\n assert(compare_evals_with_precision(test_eval_path, calc_eval_path, skip_last_column_in_fit=False))\n\n\[email protected]('loss_function', ('YetiRank', 'YetiRankPairwise'))\ndef test_yetirank_default_metric(loss_function):\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n\n train_file = data_file('black_friday', 'train')\n test_file = data_file('black_friday', 'test')\n cd_file = data_file('black_friday', 'cd')\n\n params = [\n '--loss-function', loss_function,\n '--has-header',\n '-f', train_file,\n '-t', test_file,\n '--column-description', cd_file,\n '--boosting-type', 'Plain',\n '-i', '10',\n '-T', '4',\n '--test-err-log', test_error_path,\n ]\n\n fit_catboost_gpu(params)\n\n diff_precision = 2e-3 if loss_function == 'YetiRankPairwise' else 1e-5\n return [local_canonical_file(test_error_path, diff_tool=diff_tool(diff_precision))]\n\n\ndef is_valid_gpu_params(boosting_type, grow_policy, score_function, loss_func):\n correlation_scores = ['Cosine', 'NewtonCosine']\n second_order_scores = ['NewtonL2', 'NewtonCosine']\n\n is_correct = True\n\n # compatibility with ordered boosting\n if (grow_policy in NONSYMMETRIC) or (score_function not in correlation_scores) or (loss_func in MULTICLASS_LOSSES):\n is_correct = boosting_type in ['Plain', 'Default']\n\n if loss_func in MULTICLASS_LOSSES and score_function in second_order_scores:\n is_correct = False\n\n return is_correct\n\n\[email protected]('boosting_type', BOOSTING_TYPE + ['Default'])\[email protected]('grow_policy', GROW_POLICIES)\[email protected]('score_function', SCORE_FUNCTIONS)\[email protected]('loss_func', ['RMSE', 'Logloss', 'MultiClass', 'YetiRank'])\ndef test_grow_policies(boosting_type, grow_policy, score_function, loss_func):\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n model_path = yatest.common.test_output_path('model.bin')\n output_eval_path = yatest.common.test_output_path('test.eval')\n\n if loss_func in ['RMSE', 'Logloss']:\n learn = data_file('adult', 'train_small')\n test = data_file('adult', 'test_small')\n cd = data_file('adult', 'train.cd')\n elif loss_func == 'MultiClass':\n learn = data_file('cloudness_small', 'train_small')\n test = data_file('cloudness_small', 'test_small')\n cd = data_file('cloudness_small', 'train.cd')\n elif loss_func == 'YetiRank':\n learn = data_file('querywise', 'train')\n test = data_file('querywise', 'test')\n cd = data_file('querywise', 'train.cd')\n else:\n assert False\n\n params = {\n '--loss-function': loss_func,\n '--grow-policy': grow_policy,\n '--score-function': score_function,\n '-m': model_path,\n '-f': learn,\n '-t': test,\n '--column-description': cd,\n '-i': '20',\n '-T': '4',\n '--learn-err-log': learn_error_path,\n '--test-err-log': test_error_path,\n '--eval-file': output_eval_path,\n '--use-best-model': 'false',\n }\n\n if boosting_type != 'Default':\n params['--boosting-type'] = boosting_type\n if grow_policy == 'Lossguide':\n params['--depth'] = 100\n\n # try:\n if is_valid_gpu_params(boosting_type, grow_policy, score_function, loss_func):\n fit_catboost_gpu(params)\n else:\n return\n # except Exception:\n # assert not is_valid_gpu_params(boosting_type, grow_policy, score_function, loss_func)\n # return\n #\n assert is_valid_gpu_params(boosting_type, grow_policy, score_function, loss_func)\n\n formula_predict_path = yatest.common.test_output_path('predict_test.eval')\n calc_cmd = (\n CATBOOST_PATH,\n 'calc',\n '--input-path', test,\n '--column-description', cd,\n '-m', model_path,\n '--output-path', formula_predict_path\n )\n yatest.common.execute(calc_cmd)\n assert (compare_evals_with_precision(output_eval_path, formula_predict_path, rtol=1e-4))\n\n return [local_canonical_file(learn_error_path, diff_tool=diff_tool()),\n local_canonical_file(test_error_path, diff_tool=diff_tool())]\n\n\ndef test_output_options():\n output_options_path = 'training_options.json'\n train_dir = 'catboost_info'\n\n params = (\n '--loss-function', 'Logloss',\n '-f', data_file('adult', 'train_small'),\n '-t', data_file('adult', 'test_small'),\n '--column-description', data_file('adult', 'train.cd'),\n '-i', '10',\n '-T', '4',\n '--train-dir', train_dir,\n '--training-options-file', output_options_path,\n )\n fit_catboost_gpu(params)\n return local_canonical_file(os.path.join(train_dir, output_options_path))\n\n\ndef model_based_eval_catboost_gpu(params):\n cmd = [CATBOOST_PATH, 'model-based-eval', '--task-type', 'GPU']\n append_params_to_cmdline(cmd, params)\n yatest.common.execute(cmd)\n\n\[email protected](\n 'dataset',\n [{'base': 'querywise', 'cd': 'train.cd'}, {'base': 'adult', 'train': 'train_small', 'test': 'test_small', 'cd': 'train.cd'}],\n ids=['querywise', 'adult']\n)\ndef test_model_based_eval(dataset):\n test_err_log = 'test_error.log'\n\n def get_table_path(table):\n return data_file(dataset['base'], dataset.get(table, table))\n\n def get_params():\n return (\n '--data-partition', 'DocParallel',\n '--permutations', '1',\n '--loss-function', 'RMSE',\n '-f', get_table_path('train'),\n '-t', get_table_path('test'),\n '--cd', get_table_path('cd'),\n '-i', '100',\n '-T', '4',\n '-w', '0.01',\n '--test-err-log', test_err_log,\n )\n\n fit_catboost_gpu(\n get_params() + (\n '--snapshot-file', 'baseline_model_snapshot',\n '-I', '10:11:12:13:15:20:31',\n '--train-dir', 'zero_out_tested',\n ))\n\n model_based_eval_catboost_gpu(\n get_params() + (\n '--baseline-model-snapshot', 'baseline_model_snapshot',\n '--features-to-evaluate', '10,11,12,13;15,20,31',\n '--offset', '20',\n '--experiment-size', '10',\n '--experiment-count', '2',\n '--train-dir', 'zero_out_tested',\n ))\n\n fit_catboost_gpu(\n get_params() + (\n '--snapshot-file', 'baseline_model_snapshot',\n '--train-dir', 'use_tested',\n ))\n\n model_based_eval_catboost_gpu(\n get_params() + (\n '--baseline-model-snapshot', 'baseline_model_snapshot',\n '--features-to-evaluate', '10,11,12,13;15,20,31',\n '--offset', '20',\n '--experiment-size', '10',\n '--experiment-count', '2',\n '--use-evaluated-features-in-baseline-model',\n '--train-dir', 'use_tested',\n ))\n\n return [\n local_canonical_file(os.path.join('zero_out_tested', 'feature_set0_fold0', test_err_log), diff_tool=diff_tool()),\n local_canonical_file(os.path.join('zero_out_tested', 'feature_set0_fold1', test_err_log), diff_tool=diff_tool()),\n local_canonical_file(os.path.join('zero_out_tested', 'feature_set1_fold0', test_err_log), diff_tool=diff_tool()),\n local_canonical_file(os.path.join('zero_out_tested', 'feature_set1_fold1', test_err_log), diff_tool=diff_tool()),\n local_canonical_file(os.path.join('use_tested', 'feature_set0_fold0', test_err_log), diff_tool=diff_tool()),\n local_canonical_file(os.path.join('use_tested', 'feature_set0_fold1', test_err_log), diff_tool=diff_tool()),\n local_canonical_file(os.path.join('use_tested', 'feature_set1_fold0', test_err_log), diff_tool=diff_tool()),\n local_canonical_file(os.path.join('use_tested', 'feature_set1_fold1', test_err_log), diff_tool=diff_tool())\n ]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\[email protected]('feature_estimators', TEXT_FEATURE_ESTIMATORS)\ndef test_fit_binclass_with_text_features(boosting_type, feature_estimators):\n output_model_path = yatest.common.test_output_path('model.bin')\n learn_error_path = yatest.common.test_output_path('learn.tsv')\n test_error_path = yatest.common.test_output_path('test.tsv')\n\n test_eval_path = yatest.common.test_output_path('test.eval')\n calc_eval_path = yatest.common.test_output_path('calc.eval')\n\n dictionaries = [{'dictionary_id': 'Word'}, {'dictionary_id': 'Bigram', 'gram_order': '2'}]\n dicts = {'BoW': ['Bigram', 'Word'], 'NaiveBayes': ['Word'], 'BM25': ['Word']}\n feature_processing = [{'feature_calcers': [calcer], 'dictionaries_names': dicts[calcer]} for calcer in feature_estimators.split(',')]\n\n text_processing = {'feature_processing': {'default': feature_processing}, 'dictionaries': dictionaries}\n\n pool_name = 'rotten_tomatoes'\n test_file = data_file(pool_name, 'test')\n cd_file = data_file(pool_name, 'cd_binclass')\n params = {\n '--loss-function': 'Logloss',\n '--eval-metric': 'AUC',\n '-f': data_file(pool_name, 'train'),\n '-t': test_file,\n '--text-processing': json.dumps(text_processing),\n '--column-description': cd_file,\n '--boosting-type': boosting_type,\n '-i': '20',\n '-T': '4',\n '-m': output_model_path,\n '--learn-err-log': learn_error_path,\n '--test-err-log': test_error_path,\n '--eval-file': test_eval_path,\n '--output-columns': 'RawFormulaVal',\n '--use-best-model': 'false',\n }\n fit_catboost_gpu(params)\n\n apply_catboost(output_model_path, test_file, cd_file, calc_eval_path, output_columns=['RawFormulaVal'])\n assert(compare_evals_with_precision(test_eval_path, calc_eval_path, rtol=1e-4, skip_last_column_in_fit=False))\n\n return [local_canonical_file(learn_error_path, diff_tool=diff_tool()),\n local_canonical_file(test_error_path, diff_tool=diff_tool())]\n\n\[email protected]('feature_estimators', TEXT_FEATURE_ESTIMATORS)\[email protected]('loss_function', MULTICLASS_LOSSES)\ndef test_fit_multiclass_with_text_features(feature_estimators, loss_function):\n output_model_path = yatest.common.test_output_path('model.bin')\n learn_error_path = yatest.common.test_output_path('learn.tsv')\n test_error_path = yatest.common.test_output_path('test.tsv')\n\n test_eval_path = yatest.common.test_output_path('test.eval')\n calc_eval_path = yatest.common.test_output_path('calc.eval')\n\n dictionaries = [{'dictionary_id': 'Word'}, {'dictionary_id': 'Bigram', 'gram_order': '2'}]\n dicts = {'BoW': ['Bigram', 'Word'], 'NaiveBayes': ['Word'], 'BM25': ['Word']}\n feature_processing = [{'feature_calcers': [calcer], 'dictionaries_names': dicts[calcer]} for calcer in feature_estimators.split(',')]\n\n text_processing = {'feature_processing': {'default': feature_processing}, 'dictionaries': dictionaries}\n\n pool_name = 'rotten_tomatoes'\n test_file = data_file(pool_name, 'test')\n cd_file = data_file(pool_name, 'cd')\n params = {\n '--loss-function': loss_function,\n '--eval-metric': 'Accuracy',\n '-f': data_file(pool_name, 'train'),\n '-t': test_file,\n '--text-processing': json.dumps(text_processing),\n '--column-description': cd_file,\n '--boosting-type': 'Plain',\n '-i': '20',\n '-T': '4',\n '-m': output_model_path,\n '--learn-err-log': learn_error_path,\n '--test-err-log': test_error_path,\n '--eval-file': test_eval_path,\n '--output-columns': 'RawFormulaVal',\n '--use-best-model': 'false',\n }\n fit_catboost_gpu(params)\n\n apply_catboost(output_model_path, test_file, cd_file, calc_eval_path, output_columns=['RawFormulaVal'])\n assert(\n compare_evals_with_precision(\n test_eval_path,\n calc_eval_path,\n rtol=1e-4,\n atol=1e-6,\n skip_last_column_in_fit=False\n )\n )\n\n return [local_canonical_file(learn_error_path, diff_tool=diff_tool()),\n local_canonical_file(test_error_path, diff_tool=diff_tool())]\n\n\[email protected]('grow_policy', GROW_POLICIES)\ndef test_shrink_model_with_text_features(grow_policy):\n output_model_path = yatest.common.test_output_path('model.bin')\n learn_error_path = yatest.common.test_output_path('learn.tsv')\n test_error_path = yatest.common.test_output_path('test.tsv')\n\n test_eval_path = yatest.common.test_output_path('test.eval')\n calc_eval_path = yatest.common.test_output_path('calc.eval')\n\n loss_function = 'MultiClass'\n feature_estimators = 'BoW,NaiveBayes,BM25'\n\n dictionaries = [{'dictionary_id': 'Word'}, {'dictionary_id': 'Bigram', 'gram_order': '2'}]\n dicts = {'BoW': ['Bigram', 'Word'], 'NaiveBayes': ['Word'], 'BM25': ['Word']}\n feature_processing = [{'feature_calcers': [calcer], 'dictionaries_names': dicts[calcer]} for calcer in feature_estimators.split(',')]\n\n text_processing = {'feature_processing': {'default': feature_processing}, 'dictionaries': dictionaries}\n\n pool_name = 'rotten_tomatoes'\n test_file = data_file(pool_name, 'test')\n cd_file = data_file(pool_name, 'cd')\n params = {\n '--loss-function': loss_function,\n '--eval-metric': 'Accuracy',\n '-f': data_file(pool_name, 'train'),\n '-t': test_file,\n '--column-description': cd_file,\n '--text-processing': json.dumps(text_processing),\n '--grow-policy': grow_policy,\n '--boosting-type': 'Plain',\n '-i': '20',\n '-T': '4',\n '-m': output_model_path,\n '--learn-err-log': learn_error_path,\n '--test-err-log': test_error_path,\n '--eval-file': test_eval_path,\n '--output-columns': 'RawFormulaVal',\n '--use-best-model': 'true',\n }\n fit_catboost_gpu(params)\n\n apply_catboost(output_model_path, test_file, cd_file, calc_eval_path, output_columns=['RawFormulaVal'])\n assert(compare_evals_with_precision(test_eval_path, calc_eval_path, rtol=1e-4, skip_last_column_in_fit=False))\n\n return [local_canonical_file(learn_error_path, diff_tool=diff_tool()),\n local_canonical_file(test_error_path, diff_tool=diff_tool())]\n\n\nDICTIONARIES_OPTIONS = [\n {\n \"Simple\": \"token_level_type=Word:occurrence_lower_bound=50\"\n },\n {\n \"UniGramOccur5\": \"occurrence_lower_bound=5:token_level_type=Letter\",\n \"BiGramOccur2\": \"occurrence_lower_bound=2:gram_order=2:token_level_type=Letter\",\n \"WordDictOccur1\": \"occurrence_lower_bound=1:token_level_type=Word\",\n \"WordDictOccur2\": \"occurrence_lower_bound=2:token_level_type=Word\",\n \"WordDictOccur3\": \"occurrence_lower_bound=3:token_level_type=Word\"\n },\n {\n \"Unigram\": \"gram_order=1:token_level_type=Letter:occurrence_lower_bound=50\",\n \"Bigram\": \"gram_order=2:token_level_type=Letter:occurrence_lower_bound=50\",\n \"Trigram\": \"gram_order=3:token_level_type=Letter:occurrence_lower_bound=50\"\n },\n {\n \"Letter\": \"token_level_type=Letter:occurrence_lower_bound=50\",\n \"Word\": \"token_level_type=Word:occurrence_lower_bound=50\"\n }\n]\n\n\[email protected]('dictionaries', DICTIONARIES_OPTIONS)\[email protected]('loss_function', MULTICLASS_LOSSES)\ndef test_text_processing_options(dictionaries, loss_function):\n output_model_path = yatest.common.test_output_path('model.bin')\n learn_error_path = yatest.common.test_output_path('learn.tsv')\n test_error_path = yatest.common.test_output_path('test.tsv')\n\n test_eval_path = yatest.common.test_output_path('test.eval')\n calc_eval_path = yatest.common.test_output_path('calc.eval')\n\n dictionaries = ','.join([key + ':' + value for key, value in dictionaries.items()])\n feature_estimators = 'BM25,BoW,NaiveBayes'\n\n pool_name = 'rotten_tomatoes'\n test_file = data_file(pool_name, 'test')\n cd_file = data_file(pool_name, 'cd')\n params = {\n '--loss-function': loss_function,\n '--eval-metric': 'Accuracy',\n '-f': data_file(pool_name, 'train'),\n '-t': test_file,\n '--column-description': cd_file,\n '--dictionaries': dictionaries,\n '--feature-calcers': feature_estimators,\n '--boosting-type': 'Plain',\n '-i': '20',\n '-T': '4',\n '-m': output_model_path,\n '--learn-err-log': learn_error_path,\n '--test-err-log': test_error_path,\n '--eval-file': test_eval_path,\n '--output-columns': 'RawFormulaVal',\n '--use-best-model': 'false',\n }\n fit_catboost_gpu(params)\n\n apply_catboost(output_model_path, test_file, cd_file, calc_eval_path, output_columns=['RawFormulaVal'])\n assert(\n compare_evals_with_precision(\n test_eval_path,\n calc_eval_path,\n rtol=1e-4,\n atol=1e-6,\n skip_last_column_in_fit=False\n )\n )\n\n return [local_canonical_file(learn_error_path, diff_tool=diff_tool(1e-6)),\n local_canonical_file(test_error_path, diff_tool=diff_tool(1e-6))]\n\n\[email protected]('boosting_type', BOOSTING_TYPE)\ndef test_fit_with_per_feature_text_options(boosting_type):\n output_model_path = yatest.common.test_output_path('model.bin')\n learn_error_path = yatest.common.test_output_path('learn.tsv')\n test_error_path = yatest.common.test_output_path('test.tsv')\n\n test_eval_path = yatest.common.test_output_path('test.eval')\n calc_eval_path = yatest.common.test_output_path('calc.eval')\n\n text_processing = {\n 'tokenizers': [\n {'tokenizer_id': 'Space', 'delimiter': ' '},\n {'tokenizer_id': 'Comma', 'delimiter': ','},\n ],\n 'dictionaries': [\n {'dictionary_id': 'Word', 'token_level_type': 'Word', 'occurrence_lower_bound': '50'},\n {'dictionary_id': 'Bigram', 'token_level_type': 'Word', 'gram_order': '2', 'occurrence_lower_bound': '50'},\n {'dictionary_id': 'Trigram', 'token_level_type': 'Letter', 'gram_order': '3', 'occurrence_lower_bound': '50'},\n ],\n 'feature_processing': {\n '0': [\n {'tokenizers_names': ['Space'], 'dictionaries_names': ['Word'], 'feature_calcers': ['BoW', 'NaiveBayes']},\n {'tokenizers_names': ['Space'], 'dictionaries_names': ['Bigram', 'Trigram'], 'feature_calcers': ['BoW']},\n ],\n '1': [\n {'tokenizers_names': ['Space'], 'dictionaries_names': ['Word'], 'feature_calcers': ['BoW', 'NaiveBayes', 'BM25']},\n {'tokenizers_names': ['Space'], 'dictionaries_names': ['Trigram'], 'feature_calcers': ['BoW', 'BM25']},\n ],\n '2': [\n {'tokenizers_names': ['Space'], 'dictionaries_names': ['Word', 'Bigram', 'Trigram'], 'feature_calcers': ['BoW']},\n ],\n }\n }\n\n pool_name = 'rotten_tomatoes'\n test_file = data_file(pool_name, 'test')\n cd_file = data_file(pool_name, 'cd_binclass')\n params = {\n '--loss-function': 'Logloss',\n '--eval-metric': 'AUC',\n '-f': data_file(pool_name, 'train'),\n '-t': test_file,\n '--text-processing': json.dumps(text_processing),\n '--column-description': cd_file,\n '--boosting-type': boosting_type,\n '-i': '20',\n '-T': '4',\n '-m': output_model_path,\n '--learn-err-log': learn_error_path,\n '--test-err-log': test_error_path,\n '--eval-file': test_eval_path,\n '--output-columns': 'RawFormulaVal',\n '--use-best-model': 'false',\n }\n fit_catboost_gpu(params)\n\n apply_catboost(output_model_path, test_file, cd_file, calc_eval_path, output_columns=['RawFormulaVal'])\n assert(compare_evals_with_precision(test_eval_path, calc_eval_path, rtol=1e-4, skip_last_column_in_fit=False))\n\n return [local_canonical_file(learn_error_path, diff_tool=diff_tool()),\n local_canonical_file(test_error_path, diff_tool=diff_tool())]\n\n\[email protected]('task_type', ['CPU', 'GPU'])\ndef test_eval_feature(task_type):\n output_eval_path = yatest.common.test_output_path('feature.eval')\n test_err_log = 'test_error.log'\n cmd = (\n CATBOOST_PATH,\n 'eval-feature',\n '--task-type', task_type,\n '--loss-function', 'Logloss',\n '-f', data_file('higgs', 'train_small'),\n '--cd', data_file('higgs', 'train.cd'),\n '--features-to-evaluate', '0-6;21-27',\n '--feature-eval-mode', 'OneVsOthers',\n '-i', '40',\n '-T', '4',\n '-w', '0.01',\n '--feature-eval-output-file', output_eval_path,\n '--offset', '2',\n '--fold-count', '2',\n '--fold-size-unit', 'Object',\n '--fold-size', '20',\n '--test-err-log', test_err_log,\n '--train-dir', '.',\n )\n if task_type == 'GPU':\n cmd += (\n '--permutations', '1',\n '--data-partition', 'DocParallel',\n '--bootstrap-type', 'No',\n '--random-strength', '0',\n )\n\n yatest.common.execute(cmd)\n\n def get_best_metric(test_err_path):\n return np.amin(np.loadtxt(test_err_path, skiprows=1)[:, 1])\n\n best_metrics = [\n get_best_metric(os.path.join('Baseline_set_1_fold_3', test_err_log)),\n get_best_metric(os.path.join('Baseline_set_1_fold_2', test_err_log)),\n get_best_metric(os.path.join('Baseline_set_0_fold_3', test_err_log)),\n get_best_metric(os.path.join('Baseline_set_0_fold_2', test_err_log)),\n get_best_metric(os.path.join('Testing_set_1_fold_3', test_err_log)),\n get_best_metric(os.path.join('Testing_set_1_fold_2', test_err_log)),\n get_best_metric(os.path.join('Testing_set_0_fold_3', test_err_log)),\n get_best_metric(os.path.join('Testing_set_0_fold_2', test_err_log)),\n ]\n\n best_metrics_path = 'best_metrics.txt'\n np.savetxt(best_metrics_path, best_metrics)\n\n return [\n local_canonical_file(\n best_metrics_path,\n diff_tool=get_limited_precision_dsv_diff_tool(2e-2, False)\n )\n ]\n\n\[email protected]('dataset_has_weights', [True, False], ids=['dataset_has_weights=True', 'dataset_has_weights=False'])\ndef test_metric_description(dataset_has_weights):\n learn_error_path = yatest.common.test_output_path('learn_error.tsv')\n test_error_path = yatest.common.test_output_path('test_error.tsv')\n if dataset_has_weights:\n train_pool_filename = data_file('adult_weight', 'train_weight')\n test_pool_filename = data_file('adult_weight', 'test_weight')\n pool_cd_filename = data_file('adult_weight', 'train.cd')\n else:\n train_pool_filename = data_file('adult', 'train_small')\n test_pool_filename = data_file('adult', 'test_small')\n pool_cd_filename = data_file('adult', 'train.cd')\n eval_metric = 'AUC:hints=skip_train~false'\n\n custom_metric_loss = 'Precision'\n custom_metric = 'Precision'\n\n params = {\n '--loss-function': 'Logloss',\n '-f': train_pool_filename,\n '-t': test_pool_filename,\n '--cd': pool_cd_filename,\n '-i': '10',\n '--learn-err-log': learn_error_path,\n '--test-err-log': test_error_path,\n '--eval-metric': eval_metric,\n '--custom-metric': custom_metric\n }\n fit_catboost_gpu(params)\n for filename in [learn_error_path, test_error_path]:\n with open(filename, 'r') as f:\n metrics_descriptions = f.readline().split('\\t')[1:] # without 'iter' column\n metrics_descriptions[-1] = metrics_descriptions[-1][:-1]\n unique_metrics_descriptions = set([s.lower() for s in metrics_descriptions])\n assert len(metrics_descriptions) == len(unique_metrics_descriptions)\n expected_objective_metric_description = 'Logloss'\n expected_eval_metric_description = 'AUC'\n if dataset_has_weights:\n expected_custom_metrics_descriptions = [custom_metric_loss + ':use_weights=False', custom_metric_loss + ':use_weights=True']\n else:\n expected_custom_metrics_descriptions = [custom_metric_loss]\n assert unique_metrics_descriptions == set(s.lower() for s in [expected_objective_metric_description] + [expected_eval_metric_description] + expected_custom_metrics_descriptions)\n return [local_canonical_file(learn_error_path), local_canonical_file(test_error_path)]\n"
] | [
[
"numpy.asarray",
"numpy.asmatrix",
"numpy.atleast_2d",
"numpy.size",
"scipy._lib.six.xrange",
"numpy.zeros",
"numpy.empty"
],
[
"numpy.log",
"numpy.allclose",
"numpy.concatenate",
"numpy.all",
"numpy.float32",
"numpy.savetxt",
"numpy.random.RandomState",
"numpy.loadtxt"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
NVlabs/oscar | [
"df778a4173a118f10627cb2ef4021c26303231fc"
] | [
"oscar/controllers/joint_tor.py"
] | [
"# ---------------------------------------------------------------\n# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.\n#\n# This work is licensed under the NVIDIA Source Code License\n# for OSCAR. To view a copy of this license, see the LICENSE file.\n# ---------------------------------------------------------------\n\nfrom isaacgym import gymapi\nimport torch\nfrom .base_controller import Controller\n\n\nclass JointTorqueController(Controller):\n \"\"\"\n Joint Torque Controller.\n\n This controller expects D-DOF commands either in delta form (dq1, dq2, ..., dqD), or absolute form\n (q1, q2, ..., qD), as specified by the @use_delta argument.\n\n Args:\n input_min (int, float, or array): Minimum values below which received commands will be clipped\n input_max (int, float, or array): Maximum values above which received commands will be clipped\n output_min (int, float, or array): Lower end of range that received commands will be mapped to\n output_max (int, float, or array): Upper end of range that received commands will be mapped to\n control_min (int, float, or array): Minimum control values below which outputted controls will be clipped\n control_max (int, float, or array): Maximum control values above which outputted controls will be clipped\n control_noise (float): Amount of noise to apply. Should be in [0, 1)\n control_dim (int): Outputted control dimension -- should be number of joints from base to eef body frame\n device (str): Which device to send all tensors to by default\n use_delta (bool): Whether to expect received commands to be delta or absolute joint positions\n normalize_control (bool): Whether or not to normalize outputted controls to (-1, 1) range\n \"\"\"\n def __init__(\n self,\n input_min,\n input_max,\n output_min,\n output_max,\n control_min,\n control_max,\n control_noise,\n control_dim,\n device,\n use_delta=True,\n normalize_control=True,\n **kwargs, # hacky way to sink extraneous args\n ):\n # Run super init first\n super().__init__(\n command_dim=control_dim,\n input_min=input_min,\n input_max=input_max,\n output_min=output_min,\n output_max=output_max,\n control_min=control_min,\n control_max=control_max,\n control_noise=control_noise,\n control_dim=control_dim,\n device=device,\n normalize_control=normalize_control,\n )\n\n # Store internal vars\n self.use_delta = use_delta\n\n # Initialize internal vars\n self.n_envs = None\n self.goal_torque = None\n\n def update_goal(self, control_dict, command, env_ids=None, train=False):\n \"\"\"\n Updates the internal goal (absolute joint torques) based on the inputted joint command\n\n NOTE: received joints from @control_dict can be greater than control_dim; we assume the first control_dim\n indexes correspond to the relevant elements to be used for joint torque goal setting\n\n Args:\n control_dict (dict): Dictionary of keyword-mapped tensors including relevant control\n information (eef state, q states, etc.)\n\n Expected keys:\n eef_state: shape of (N, 13), the (lin_pos, quat_ori, lin_vel, ang_vel) state of the eef body\n\n command (tensor): D-DOF joint torque command -- should be (dq1, dq2, ..., dqD), or absolute form\n (q1, q2, ..., qD) if self.use_delta is False.\n\n env_ids (None or tensor): If specified, should be (integer) IDs corresponding to the\n specific env instances of this robot that should be reset\n\n train (bool): If True, will assume env_ids is None and will NOT index specific goals so we avoid inplace\n operations and so that we can backprop later\n \"\"\"\n # Scale the commands appropriately\n cmd = self.scale_command(command)\n\n # Set n_envs, goal_pos, and goal_ori if we haven't done so already\n if self.n_envs is None:\n self.n_envs = command.shape[0]\n self.goal_torque = torch.zeros(self.n_envs, self.control_dim, device=self.device)\n\n # If we're training, make sure env_ids is None\n if train:\n assert env_ids is None or len(env_ids) == self.n_envs, \"When in training mode, env_ids must be None or len of n_envs!\"\n # Directly set goals\n self.goal_torque = self.goal_torque + cmd if self.use_delta else cmd\n else:\n # If env_ids is None, we update all the envs\n if env_ids is None:\n env_ids = torch.arange(start=0, end=self.n_envs, device=self.device, dtype=torch.uint32)\n\n # Update goal\n self.goal_torque[env_ids] = self.goal_torque[env_ids] + cmd[env_ids] if self.use_delta else cmd[env_ids]\n\n def compute_control(self, control_dict):\n \"\"\"\n Computes low-level joint torque controls.\n\n Since we are directly using joint-torque control, this simply is equivalent to returning the\n internal goal state\n\n Args:\n control_dict (dict): Dictionary of state tensors including relevant info for controller computation\n\n Expected keys:\n eef_state: shape of (N, 13), the (lin_pos, quat_ori, lin_vel, ang_vel) state of the eef body\n\n Returns:\n tensor: Processed low-level joint position control actions\n \"\"\"\n # Post-process internal goal (clipping + normalization)\n u = self.postprocess_control(self.goal_torque)\n\n # Return the control joint positions\n return u\n\n def reset(self, control_dict, env_ids=None):\n \"\"\"\n Reset the internal vars associated with this controller\n\n Args:\n control_dict (dict): Dictionary of state tensors including relevant info for controller computation\n\n Expected keys:\n eef_state: shape of (N, 13), the (lin_pos, quat_ori, lin_vel, ang_vel) state of the eef body\n\n env_ids (None or tensor): If specified, should be (integer) IDs corresponding to the\n specific env instances of this policy that should be reset\n \"\"\"\n # Clear n_envs, goal if we're now controlling a new set of envs\n n_cmds = control_dict[\"eef_state\"].shape[0]\n if self.n_envs != n_cmds:\n self.n_envs = None\n self.goal_torque = None\n # Reset corresponding envs to current positions\n cmd = torch.zeros(n_cmds, self.command_dim, device=self.device)\n self.update_goal(\n control_dict=control_dict,\n command=cmd,\n env_ids=env_ids\n )\n\n def get_flattened_goals(self):\n \"\"\"\n Returns the current goal command in a serialized 2D form\n\n Returns:\n torch.tensor: (N, -1) current goals in this controller\n \"\"\"\n return self.goal_torque\n\n @property\n def goal_dim(self):\n # This is the same as the control dimension\n return self.control_dim\n\n @property\n def control_type(self):\n # This controller outputs joint positions\n return gymapi.DOF_MODE_EFFORT\n\n @property\n def differentiable(self):\n # We can backprop through all computations\n return True\n"
] | [
[
"torch.arange",
"torch.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
rmunoz12/tensorpack | [
"60f4c6df7c4a27b553469352dd6ce73333db1ec6"
] | [
"tensorpack/tfutils/tower.py"
] | [
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# File: tower.py\n\n\nimport tensorflow as tf\nfrom six.moves import zip\n\nfrom ..utils import logger\nfrom ..utils.argtools import call_only_once\nfrom ..utils.naming import MOVING_SUMMARY_OPS_KEY\nfrom ..utils.develop import HIDE_DOC\nfrom .collection import CollectionGuard\nfrom .common import get_tf_version_number, get_op_or_tensor_by_name, get_op_tensor_name\n\n__all__ = ['get_current_tower_context', 'TowerContext', 'TowerFuncWrapper',\n 'TowerTensorHandle', 'TowerTensorHandles']\n\n_CurrentTowerContext = None\n\n\nclass TowerContext(object):\n \"\"\" A context where the current model is being built in. \"\"\"\n\n def __init__(self, tower_name, is_training, index=0, vs_name=''):\n \"\"\"\n Args:\n tower_name (str): The name scope of the tower.\n is_training (bool):\n index (int): index of this tower, only used in training.\n vs_name (str): Open a new variable scope with this name.\n \"\"\"\n self._name = tower_name\n self._is_training = bool(is_training)\n\n if not self._is_training:\n assert index == 0, \\\n \"TowerContext(index) is only valid in training!\"\n\n self._index = int(index)\n self._vs_name = vs_name\n if len(vs_name):\n assert len(tower_name), \"TowerContext(vs_name) cannot be used with an empty tower_name!\"\n\n self._initial_vs_reuse = tf.get_variable_scope().reuse\n if self.has_own_variables:\n assert not self._initial_vs_reuse, \\\n \"Cannot create tower {} with reuse=True!\".format(tower_name)\n\n self._collection_guard = CollectionGuard(\n self._name,\n check_diff=not self.is_main_training_tower,\n freeze_keys=self._keys_to_freeze())\n\n @property\n def is_main_training_tower(self):\n return self.is_training and self._index == 0\n\n @property\n def is_training(self):\n return self._is_training\n\n @property\n def has_own_variables(self):\n \"\"\"\n Whether this tower is supposed to have its own variables.\n \"\"\"\n return self.is_main_training_tower or \\\n (self.is_training and len(self._vs_name) > 0) or \\\n (not self.is_training and not self._initial_vs_reuse)\n\n @property\n def name(self):\n return self._name\n\n @property\n def vs_name(self):\n return self._vs_name\n\n @property\n def ns_name(self):\n return self._name\n\n def get_collection_in_tower(self, key):\n \"\"\"\n Get items from this collection that are added in the current tower.\n \"\"\"\n return self._collection_guard.get_collection_in_tower(key)\n\n # TODO currently only used in StagingInput\n @property\n def index(self):\n return self._index\n\n @call_only_once\n def _get_scopes(self):\n if not len(self._name):\n # work around https://github.com/tensorflow/tensorflow/issues/14703\n return [tf.variable_scope(tf.get_variable_scope())]\n ret = []\n\n # either the Tower was originally created with reuse,\n # or a training tower without vs has to use reuse.\n reuse = (self.is_training and self._index > 0 and not\n self.has_own_variables) or self._initial_vs_reuse\n\n if len(self._vs_name):\n ret.append(tf.variable_scope(self._vs_name, reuse=reuse))\n else:\n if reuse:\n ret.append(tf.variable_scope(\n tf.get_variable_scope(), reuse=True))\n else:\n # work around https://github.com/tensorflow/tensorflow/issues/14703\n ret.append(tf.variable_scope(tf.get_variable_scope()))\n # always clear existing ns # TODO check existing ns\n if len(self._name) and self._name != self._vs_name:\n ret.append(tf.name_scope(self._name + '/'))\n return ret\n\n def _keys_to_freeze(self):\n if self.is_main_training_tower:\n return []\n if self.is_training:\n return [tf.GraphKeys.SUMMARIES, MOVING_SUMMARY_OPS_KEY]\n # freeze UPDATE_OPS during inference because they should never be used\n return [tf.GraphKeys.SUMMARIES, MOVING_SUMMARY_OPS_KEY, tf.GraphKeys.UPDATE_OPS]\n\n def __enter__(self):\n global _CurrentTowerContext\n assert _CurrentTowerContext is None, \"Cannot nest TowerContext!\"\n _CurrentTowerContext = self\n if self.is_training:\n curr_vs = tf.get_variable_scope()\n assert curr_vs.name == '', \"In training, cannot nest TowerContext with an existing variable scope!\"\n\n self._ctxs = self._get_scopes()\n self._ctxs.append(self._collection_guard)\n for c in self._ctxs:\n c.__enter__()\n\n if get_tf_version_number() >= 1.2:\n # check that ns_name is always the same as _name\n ns = tf.get_default_graph().get_name_scope()\n assert ns == self._name, \\\n \"Name conflict: name_scope inside tower '{}' becomes '{}'!\".format(self._name, ns) \\\n + \" You may need a different name for the tower!\"\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n global _CurrentTowerContext\n _CurrentTowerContext = None\n\n if not self.has_own_variables:\n diff_trainable_vars = self._collection_guard.get_collection_in_tower(tf.GraphKeys.TRAINABLE_VARIABLES)\n assert len(diff_trainable_vars) == 0, \\\n \"New TRAINABLE_VARIABLES shouldn't be created in {}: \".format(\n self._name) + ', '.join([k.name for k in diff_trainable_vars])\n for c in self._ctxs[::-1]:\n c.__exit__(exc_type, exc_val, exc_tb)\n return False\n\n def __str__(self):\n return \"TowerContext(name={}, is_training={})\".format(\n self._name, self._is_training)\n\n\ndef get_current_tower_context():\n return _CurrentTowerContext\n\n\nclass TowerFuncWrapper(object):\n \"\"\"\n A wrapper around a tower function (function which builds one tower, i.e. one replicate of the model).\n It keeps track of the name scope, variable scope and input/output tensors\n each time the function is called.\n\n :class:`TowerTrainer` needs this so that it knows how to build a predictor.\n \"\"\"\n\n def __init__(self, tower_fn, inputs_desc):\n \"\"\"\n Args:\n tower_func: a function which builds one tower in the graph.\n It takes several input tensors and could return anything.\n inputs_desc ([InputDesc]): use this to figure out the right name for the input tensors.\n \"\"\"\n assert callable(tower_fn), tower_fn\n inputs_desc_names = [k.name for k in inputs_desc]\n assert len(set(inputs_desc_names)) == len(inputs_desc_names), \\\n \"Duplicated names in inputs_desc! \" + str(inputs_desc_names)\n self._tower_fn = tower_fn\n self._inputs_desc = inputs_desc\n\n self._handles = []\n\n def __new__(cls, tower_fn, inputs_desc):\n # to avoid double-wrapping a function\n if isinstance(tower_fn, TowerFuncWrapper):\n return tower_fn\n else:\n return super(TowerFuncWrapper, cls).__new__(cls)\n\n def __call__(self, *args):\n ctx = get_current_tower_context()\n assert ctx is not None, \"Function must be called under TowerContext!\"\n output = self._tower_fn(*args)\n handle = TowerTensorHandle(ctx, args, output, self._inputs_desc)\n self._handles.append(handle)\n return output\n\n @property\n def towers(self):\n \"\"\"\n Returns:\n a :class:`TowerTensorHandles` object, that can\n access the tower handles by either indices or names.\n \"\"\"\n return TowerTensorHandles(self._handles)\n\n @property\n def inputs_desc(self):\n return self._inputs_desc\n\n\nclass TowerTensorHandles(object):\n \"\"\"\n Wrap a list of :class:`TowerTensorHandle`,\n to support access to them by index or names.\n \"\"\"\n def __init__(self, handles):\n self._handles = handles\n self._name_to_handle = {k.ns_name: k for k in handles}\n\n def __getitem__(self, name_or_index):\n \"\"\"\n Args:\n name_or_index (str or int):\n\n Returns:\n a :class:`TowerTensorHandle`.\n \"\"\"\n if isinstance(name_or_index, int):\n return self._handles[name_or_index]\n return self._name_to_handle[name_or_index]\n\n def training(self):\n \"\"\"\n Returns:\n A :class:`TowerTensorHandles`, containing only the training towers.\n \"\"\"\n handles = [h for h in self._handles if h.is_training]\n return TowerTensorHandles(handles)\n\n def inference(self):\n \"\"\"\n Returns:\n A :class:`TowerTensorHandles`, containing only the inference towers.\n \"\"\"\n handles = [h for h in self._handles if not h.is_training]\n return TowerTensorHandles(handles)\n\n\nclass TowerTensorHandle(object):\n \"\"\"\n When a function is called multiple times under each tower,\n it becomes hard to keep track of the scope and access those tensors\n in each tower.\n This class provides easy access to the tensors as well as the\n inputs/outputs created in each tower.\n \"\"\"\n\n @HIDE_DOC\n def __init__(self, ctx, input, output, inputs_desc=None):\n self._ctx = ctx\n\n self._extra_tensor_names = {}\n if inputs_desc is not None:\n assert len(inputs_desc) == len(input)\n self._extra_tensor_names = {\n get_op_tensor_name(x.name)[1]: y for x, y in zip(inputs_desc, input)}\n self._input = input\n self._output = output\n\n @property\n def vs_name(self):\n return self._ctx.vs_name\n\n @property\n def ns_name(self):\n return self._ctx.ns_name\n\n def get_tensor(self, name):\n \"\"\"\n Get a tensor in this tower. The name can be:\n 1. The name of the tensor without any tower prefix.\n 2. The name of an :class:`InputDesc`, if it is used when building the tower.\n \"\"\"\n name = get_op_tensor_name(name)[1]\n if len(self.ns_name):\n name_with_ns = self.ns_name + \"/\" + name\n else:\n name_with_ns = name\n\n try:\n ret = get_op_or_tensor_by_name(name_with_ns)\n except KeyError:\n if name in self._extra_tensor_names:\n return self._extra_tensor_names[name]\n raise\n else:\n if name in self._extra_tensor_names:\n logger.warn(\n \"'{}' may refer to both the tensor '{}' or the input '{}'.\".format(\n name, ret.name, self._extra_tensor_names[name].name) +\n \"Assuming it is the tensor '{}'.\".format(ret.name))\n return ret\n\n def get_tensors(self, names):\n return [self.get_tensor(name) for name in names]\n\n def __getitem__(self, name):\n return self.get_tensor(name)\n\n def get_variable(self, name):\n \"\"\"\n Get a variable used in this tower.\n \"\"\"\n name = get_op_tensor_name(name)[1]\n if len(self.vs_name):\n name_with_vs = self.vs_name + \"/\" + name\n else:\n name_with_vs = name\n return get_op_or_tensor_by_name(name_with_vs)\n\n @property\n def input(self):\n \"\"\"\n The list of input tensors used to build the tower.\n \"\"\"\n return self._input\n\n @property\n def output(self):\n \"\"\"\n The output returned by the tower function.\n \"\"\"\n return self._output\n\n @property\n def is_training(self):\n return self._ctx.is_training\n"
] | [
[
"tensorflow.variable_scope",
"tensorflow.get_default_graph",
"tensorflow.get_variable_scope",
"tensorflow.name_scope"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
}
] |
ZLLentz/lcls2 | [
"3edbea556779f619944ee9b97fb33cd815a19a37"
] | [
"psana/psana/detector/UtilsGraphics.py"
] | [
"\"\"\"\n Wrapper for graphical utils.\n\n from psana.psana.detector.UtilsGraphics import *\n from psana.psana.detector.UtilsGraphics import gr, fleximage, arr_median_limits\n\n img = det.raw.image(evt)\n arr = det.raw.calib(evt)\n amin, amax = arr_median_limits(arr, nneg=1, npos=3)\n\n flimg = fleximage(img, arr=arr, h_in=8, nneg=1, npos=3)\n flimg.update(img, arr=arr)\n\n gr.show(mode='DO NOT HOLD')\n\nCreated on 2020-11-09\n\"\"\"\nimport logging\nlogger = logging.getLogger(__name__)\n\nimport numpy as np\nimport psana.pyalgos.generic.Graphics as gr\n#from psana.pyalgos.generic.NDArrUtils import info_ndarr\n#----\n\ndef arr_median_limits(arr, amin=None, amax=None, nneg=None, npos=None, fraclo=0.05, frachi=0.95):\n \"\"\" returns tuple of intensity limits (amin, amax) evaluated from arr or passed directly.\n \"\"\"\n if not(None in (amin, amax)): return amin, amax\n\n if None in (nneg, npos):\n qlo = np.quantile(arr, fraclo, interpolation='linear')\n qhi = np.quantile(arr, frachi, interpolation='linear')\n logger.debug('quantile(%.3f):%.1f quantile(%.3f):%.1f' % (fraclo, qlo, frachi, qhi))\n return qlo, qhi\n else:\n med = np.median(arr)\n spr = np.median(np.abs(arr-med))\n _amin, _amax = med-nneg*spr, med+npos*spr\n logger.debug('median:%.1f spread:%.1f amin:%.1f amax:%.1f' % (med, spr, _amin, _amax))\n return _amin, _amax\n\n\nclass flexbase:\n def __init__(self, **kwa):\n self.amin = kwa.setdefault('amin', None)\n self.amax = kwa.setdefault('amax', None)\n self.nneg = kwa.setdefault('nneg', None)\n self.npos = kwa.setdefault('npos', None)\n self.fraclo = kwa.setdefault('fraclo', 0.05)\n self.frachi = kwa.setdefault('frachi', 0.95)\n #self.alimits = kwa.setdefault('alimits', None)\n\n def _intensity_limits(self, a, kwa):\n \"\"\" returns tuple of intensity limits (amin, amax)\n NOTE: kwa is MUTABLE dict (NOT **kwa) because it needs (???) to be cleaned up of parameters not used in other places\n \"\"\"\n return arr_median_limits(\n arr = kwa.get('arr', a),\\\n amin = kwa.pop('amin', self.amin),\n amax = kwa.pop('amax', self.amax),\n nneg = kwa.pop('nneg', self.nneg),\n npos = kwa.pop('npos', self.npos),\n fraclo = kwa.pop('fraclo', self.fraclo),\n frachi = kwa.pop('frachi', self.frachi))\n\n\n def move(self, x0=100, y0=10):\n gr.move_fig(self.fig, x0, y0)\n\n\n def axtitle(self, title=''):\n gr.add_title_labels_to_axes(self.axim, title=title, fstit=10) \n #, xlabel=None, ylabel=None, fslab=14, fstit=20, color='k')\n\n\nclass fleximage(flexbase):\n def __init__(self, img, **kwa):\n \"\"\"\n \"\"\"\n flexbase.__init__(self, **kwa)\n arr = kwa.setdefault('arr', img)\n amin, amax = self._intensity_limits(arr, kwa)\n w_in = kwa.pop('w_in', 9)\n h_in = kwa.pop('h_in', 8)\n\n aspratio = float(img.shape[0])/float(img.shape[1]) # heigh/width\n\n kwfig = {}\n _fig=gr.plt.figure(\\\n num = kwa.get('num',None),\\\n figsize = kwa.get('figsize',(w_in, h_in)),\\\n dpi = kwa.get('dpi',80),\\\n facecolor = kwa.get('facecolor','w'),\\\n edgecolor = kwa.get('edgecolor','w'),\\\n frameon = kwa.get('frameon',True),\\\n clear = kwa.get('clear',False),\\\n **kwfig)\n\n kwfica={}\n self.fig, self.axim, self.axcb = gr.fig_img_cbar_axes(\\\n fig=_fig,\\\n win_axim = kwa.get('win_axim', (0.05,0.03,0.87,0.94)),\\\n win_axcb = kwa.get('win_axcb', (0.915,0.03,0.01,0.94)), **kwfica)\n\n kwic={'amin':amin,\n 'amax':amax,\n 'extent' :kwa.get('extent', None),\n 'interpolation':kwa.get('interpolation','nearest'),\n 'aspect' :kwa.get('aspect','equal'),\n 'origin' :kwa.get('origin','upper'),\n 'orientation' :kwa.get('orientation','vertical'),\n 'cmap' :kwa.get('cmap','inferno'),\n }\n self.imsh, self.cbar = gr.imshow_cbar(self.fig, self.axim, self.axcb, img, **kwic)\n\n gr.draw_fig(self.fig)\n #gr.show(mode=1)\n\n\n def update(self, img, **kwa):\n \"\"\"use kwa: arr=arr, nneg=1, npos=3 OR arr, fraclo=0.05, frachi=0.95\n \"\"\"\n amin, amax = self._intensity_limits(img, kwa)\n self.imsh.set_data(img)\n self.imsh.set_clim(amin, amax)\n #gr.show(mode=1)\n\n\nclass flexhist(flexbase):\n def __init__(self, arr, **kwa):\n \"\"\"\n \"\"\"\n flexbase.__init__(self, **kwa)\n w_in = kwa.pop('w_in', 6)\n h_in = kwa.pop('h_in', 5)\n\n kwfig = {}\n _fig=gr.plt.figure(num = kwa.get('num',None),\\\n figsize = kwa.get('figsize',(w_in, h_in)),\\\n dpi = kwa.get('dpi',80),\\\n facecolor = kwa.get('facecolor','w'),\\\n edgecolor = kwa.get('edgecolor','w'),\\\n frameon = kwa.get('frameon',True),\\\n clear = kwa.get('clear',False),\\\n **kwfig)\n\n kwfia={}\n self.fig, self.axhi = gr.fig_img_axes(\\\n fig=_fig,\\\n win_axim = kwa.get('win_axhi', (0.10, 0.05, 0.87, 0.90)),\\\n **kwfia)\n\n self.update(arr, **kwa)\n\n gr.draw_fig(self.fig)\n\n\n def update(self, arr, **kwa):\n \"\"\"use kwa: arr=arr, nneg=1, npos=3 OR arr, fraclo=0.05, frachi=0.95\n \"\"\"\n amin, amax = self._intensity_limits(arr, kwa)\n self.axhi.cla()\n kwh={'amp_range' : (amin, amax),\\\n 'bins' : kwa.get('bins',100),\\\n 'weights' : kwa.get('weights',None),\\\n 'color' : kwa.get('color',None),\\\n 'log' : kwa.get('log',False),\\\n }\n self.his = gr.hist(self.axhi, arr, **kwh)\n\n\n def axtitle(self, title=''):\n gr.add_title_labels_to_axes(self.axhi, title=title, fstit=10) \n #, xlabel=None, ylabel=None, fslab=14, fstit=20, color='k')\n\n\n\nclass fleximagespec(flexbase):\n def __init__(self, img, **kwa):\n \"\"\"\n \"\"\"\n flexbase.__init__(self, **kwa)\n arr = kwa.setdefault('arr', img)\n amin, amax = self._intensity_limits(arr, kwa)\n w_in = kwa.pop('w_in', 11)\n h_in = kwa.pop('h_in', 8)\n self.hcolor = kwa.get('color', 'lightgreen')\n self.hbins = kwa.get('bins', 100)\n\n #aspratio = float(img.shape[0])/img.shape[1] # heigh/width\n\n kwfig = {}\n _fig=gr.plt.figure(\\\n num = kwa.get('num',None),\\\n figsize = kwa.get('figsize',(w_in, h_in)),\\\n dpi = kwa.get('dpi',80),\\\n facecolor = kwa.get('facecolor','w'),\\\n edgecolor = kwa.get('edgecolor','w'),\\\n frameon = kwa.get('frameon',True),\\\n clear = kwa.get('clear',False),\\\n **kwfig)\n\n kwfica={}\n fymin, fymax = 0.050, 0.90\n self.fig, self.axim, self.axcb, self.axhi = gr.fig_img_cbar_hist_axes(\\\n fig=_fig,\\\n win_axim = kwa.get('win_axim', (0.02, fymin, 0.8, fymax)),\\\n win_axhi = kwa.get('win_axhi', (0.76, fymin, 0.15, fymax)),\\\n win_axcb = kwa.get('win_axcb', (0.915, fymin, 0.01, fymax)), **kwfica)\n\n kwic={'amin':amin,\n 'amax':amax,\n 'extent' :kwa.get('extent', None),\n 'interpolation':kwa.get('interpolation','nearest'),\n 'aspect' :kwa.get('aspect','equal'),\n 'origin' :kwa.get('origin','upper'),\n 'orientation' :kwa.get('orientation','vertical'),\n 'cmap' :kwa.get('cmap','inferno'),\n }\n self.imsh, self.cbar = gr.imshow_cbar(self.fig, self.axim, self.axcb, img, **kwic)\n\n self.update_his(arr, **kwa)\n\n gr.draw_fig(self.fig)\n\n\n def update_his(self, nda, **kwa):\n \"\"\"use kwa: arr=arr, nneg=1, npos=3 OR arr, fraclo=0.05, frachi=0.95\n \"\"\"\n amp_range = amin, amax = self._intensity_limits(nda, kwa)\n\n self.axhi.cla()\n self.axhi.invert_xaxis() # anvert x-axis direction\n self.axhi.set_ylim(amp_range)\n self.axhi.set_yticklabels([]) # removes axes labels, not ticks\n self.axhi.tick_params(axis='y', direction='in')\n self.axhi.set_ylim(amp_range)\n #self.axhi.set_ylabel('V')\n #self.axhi.get_yaxis().set_visible(False) # hides axes labels and ticks\n\n kwh={'bins' : kwa.get('bins',self.hbins),\\\n 'range' : kwa.get('range',amp_range),\\\n 'weights' : kwa.get('weights',None),\\\n 'color' : kwa.get('color', self.hcolor),\\\n 'log' : kwa.get('log',False),\\\n 'bottom' : kwa.get('bottom', 0),\\\n 'align' : kwa.get('align', 'mid'),\\\n 'histtype' : kwa.get('histtype',u'bar'),\\\n 'label' : kwa.get('label', ''),\\\n 'orientation': kwa.get('orientation',u'horizontal'),\\\n }\n \n #self.his = gr.hist(self.axhi, nda, **kwh)\n self.his = pp_hist(self.axhi, nda.ravel(), **kwh)\n wei, bins, patches = self.his\n gr.add_stat_text(self.axhi, wei, bins)\n\n\n def update(self, img, **kwa):\n \"\"\"\n \"\"\" \n amin, amax = self._intensity_limits(img, kwa)\n self.imsh.set_data(img)\n self.imsh.set_clim(amin, amax)\n self.axcb.set_ylim(amin, amax)\n\n arr = kwa.get('arr', img)\n self.update_his(arr, **kwa)\n\n\ndef pp_hist(ax, x, **kwa):\n \"\"\" matplotlib.pyplot.hist(x, \n bins=10, \n range=None, \n normed=False, \n weights=None, \n cumulative=False, \n bottom=None, \n histtype=u'bar', \n align=u'mid', \n orientation=u'vertical', \n rwidth=None, \n log=False, \n color=None, \n label=None, \n stacked=False, \n hold=None, \n **kwargs)\n \"\"\"\n return ax.hist(x, **kwa)\n\n# EOF\n"
] | [
[
"numpy.median",
"numpy.quantile",
"numpy.abs"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
AIRI-Institute/uncertainty_transformers | [
"982b5ae8b39cb484ce3559a72f95d18f30487e38",
"982b5ae8b39cb484ce3559a72f95d18f30487e38"
] | [
"src/run_ner_ood.py",
"src/ue4nlp/dropout_dpp.py"
] | [
"\"\"\" Finetuning the library models for sequence classification on GLUE (Bert, XLM, XLNet, RoBERTa, Albert, XLM-RoBERTa).\"\"\"\n\nimport warnings\n\nwarnings.simplefilter(action=\"ignore\", category=FutureWarning)\n\nimport os\nimport sys\nimport dataclasses\nfrom dataclasses import dataclass, field\nfrom typing import Callable, Dict, Optional\nfrom tqdm import tqdm\nimport json\nimport numpy as np\nfrom pathlib import Path\nimport random\nimport torch\nimport hydra\nimport pickle\n\nfrom utils.utils_wandb import init_wandb, wandb\n\nfrom ue4nlp.transformers_cached import (\n ElectraForSequenceClassificationCached,\n BertForSequenceClassificationCached,\n ElectraForTokenClassificationCached,\n)\nfrom ue4nlp.dropconnect_mc import (\n LinearDropConnectMC,\n activate_mc_dropconnect,\n convert_to_mc_dropconnect,\n hide_dropout,\n)\nfrom ue4nlp.dropout_mc import DropoutMC, activate_mc_dropout, convert_to_mc_dropout\nfrom ue4nlp.dropout_dpp import DropoutDPP, DropoutDPP_v2\nfrom ue4nlp.sequence_tagger import SequenceTagger\nfrom utils.utils_heads import ElectraNERHeadCustom\n\nfrom transformers import (\n AutoConfig,\n AutoModelForTokenClassification,\n AutoTokenizer,\n EvalPrediction,\n PretrainedConfig,\n DataCollatorForTokenClassification,\n PreTrainedTokenizerFast,\n Trainer,\n TrainingArguments,\n set_seed,\n ElectraForTokenClassification,\n)\n\nfrom ue4nlp.ue_estimator_mc import UeEstimatorMc, convert_dropouts\nfrom ue4nlp.ue_estimator_sngp import UeEstimatorSngp\nfrom ue4nlp.ue_estimator_mcdpp import UeEstimatorMcDpp\nfrom ue4nlp.ue_estimator_nuq import UeEstimatorNUQ\nfrom ue4nlp.ue_estimator_mahalanobis import UeEstimatorMahalanobis\n\nfrom datasets import load_metric, load_dataset, concatenate_datasets\n\nfrom utils.utils_dropout import set_last_dropout, get_last_dropout, set_last_dropconnect\nimport ue4nlp.alpaca_calibrator as calibrator\n\nimport logging\n\nlog = logging.getLogger(__name__)\n\n\n@dataclass\nclass ModelArguments:\n \"\"\"\n Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.\n \"\"\"\n\n model_name_or_path: str = field(\n metadata={\n \"help\": \"Path to pretrained model or model identifier from huggingface.co/models\"\n }\n )\n config_name: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"Pretrained config name or path if not the same as model_name\"\n },\n )\n tokenizer_name: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"Pretrained tokenizer name or path if not the same as model_name\"\n },\n )\n cache_dir: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"Where do you want to store the pretrained models downloaded from huggingface.co\"\n },\n )\n model_revision: str = field(\n default=\"main\",\n metadata={\n \"help\": \"The specific model version to use (can be a branch name, tag name or commit id).\"\n },\n )\n use_auth_token: bool = field(\n default=False,\n metadata={\n \"help\": \"Will use the token generated when running `transformers-cli login` (necessary to use this script \"\n \"with private models).\"\n },\n )\n\n\n@dataclass\nclass DataTrainingArguments:\n \"\"\"\n Arguments pertaining to what data we are going to input our model for training and eval.\n \"\"\"\n\n task_name: Optional[str] = field(\n default=\"ner\", metadata={\"help\": \"The name of the task (ner, pos...).\"}\n )\n max_seq_length: int = field(\n default=128,\n metadata={\n \"help\": \"The maximum total input sequence length after tokenization. Sequences longer \"\n \"than this will be truncated, sequences shorter will be padded.\"\n },\n )\n dataset_name: Optional[str] = field(\n default=\"conll2003\",\n metadata={\"help\": \"The name of the dataset to use (via the datasets library).\"},\n )\n dataset_config_name: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"The configuration name of the dataset to use (via the datasets library).\"\n },\n )\n train_file: Optional[str] = field(\n default=None,\n metadata={\"help\": \"The input training data file (a csv or JSON file).\"},\n )\n validation_file: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"An optional input evaluation data file to evaluate on (a csv or JSON file).\"\n },\n )\n test_file: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"An optional input test data file to predict on (a csv or JSON file).\"\n },\n )\n overwrite_cache: bool = field(\n default=False,\n metadata={\"help\": \"Overwrite the cached training and evaluation sets\"},\n )\n preprocessing_num_workers: Optional[int] = field(\n default=None,\n metadata={\"help\": \"The number of processes to use for the preprocessing.\"},\n )\n pad_to_max_length: bool = field(\n default=False,\n metadata={\n \"help\": \"Whether to pad all samples to model maximum sentence length. \"\n \"If False, will pad the samples dynamically when batching to the maximum length in the batch. More \"\n \"efficient on GPU but very bad for TPU.\"\n },\n )\n max_train_samples: Optional[int] = field(\n default=None,\n metadata={\n \"help\": \"For debugging purposes or quicker training, truncate the number of training examples to this \"\n \"value if set.\"\n },\n )\n max_eval_samples: Optional[int] = field(\n default=None,\n metadata={\n \"help\": \"For debugging purposes or quicker training, truncate the number of evaluation examples to this \"\n \"value if set.\"\n },\n )\n max_predict_samples: Optional[int] = field(\n default=None,\n metadata={\n \"help\": \"For debugging purposes or quicker training, truncate the number of prediction examples to this \"\n \"value if set.\"\n },\n )\n label_all_tokens: bool = field(\n default=False,\n metadata={\n \"help\": \"Whether to put the label for one word on all tokens of generated by that word or just on the \"\n \"one (in which case the other tokens will have a padding index).\"\n },\n )\n return_entity_level_metrics: bool = field(\n default=False,\n metadata={\n \"help\": \"Whether to return all the entity levels during evaluation or just the overall ones.\"\n },\n )\n\n def __post_init__(self):\n if (\n self.dataset_name is None\n and self.train_file is None\n and self.validation_file is None\n ):\n raise ValueError(\n \"Need either a dataset name or a training/validation file.\"\n )\n else:\n if self.train_file is not None:\n extension = self.train_file.split(\".\")[-1]\n assert extension in [\n \"csv\",\n \"json\",\n ], \"`train_file` should be a csv or a json file.\"\n if self.validation_file is not None:\n extension = self.validation_file.split(\".\")[-1]\n assert extension in [\n \"csv\",\n \"json\",\n ], \"`validation_file` should be a csv or a json file.\"\n self.task_name = self.task_name.lower()\n\n\ndef compute_metrics(p, metric, return_entity_level_metrics=False):\n predictions, labels = p\n predictions = np.argmax(predictions, axis=2)\n labels = labels.reshape(predictions.shape)\n label_list = [\n \"O\",\n \"B-PER\",\n \"I-PER\",\n \"B-ORG\",\n \"I-ORG\",\n \"B-LOC\",\n \"I-LOC\",\n \"B-MISC\",\n \"I-MISC\",\n ]\n true_predictions = [\n [label_list[p] for (p, l) in zip(prediction, label) if l != -100]\n for prediction, label in zip(predictions, labels)\n ]\n true_labels = [\n [label_list[l] for (p, l) in zip(prediction, label) if l != -100]\n for prediction, label in zip(predictions, labels)\n ]\n\n results = metric.compute(predictions=true_predictions, references=true_labels)\n if return_entity_level_metrics:\n final_results = {}\n for key, value in results.items():\n if isinstance(value, dict):\n for n, v in value.items():\n final_results[f\"{key}_{n}\"] = v\n else:\n final_results[key] = value\n return final_results\n else:\n return {\n \"precision\": results[\"overall_precision\"],\n \"recall\": results[\"overall_recall\"],\n \"f1\": results[\"overall_f1\"],\n \"accuracy\": results[\"overall_accuracy\"],\n }\n\n\ndef create_model(num_labels, model_args, data_args, ue_args, config):\n\n model_config = AutoConfig.from_pretrained(\n model_args.model_name_or_path,\n num_labels=num_labels,\n finetuning_task=data_args.task_name,\n cache_dir=config.cache_dir,\n )\n\n tokenizer = AutoTokenizer.from_pretrained(\n model_args.model_name_or_path,\n cache_dir=config.cache_dir,\n use_fast=True,\n )\n\n if ue_args.use_cache:\n if \"electra\" in model_args.model_name_or_path: # TODO:\n model = ElectraForTokenClassificationCached.from_pretrained(\n model_args.model_name_or_path,\n from_tf=False,\n config=model_config,\n cache_dir=config.cache_dir,\n )\n model.use_cache = True\n model.classifier = ElectraNERHeadCustom(model)\n log.info(\"Replaced ELECTRA's head\")\n else:\n raise ValueError(\n f\"{model_args.model_name_or_path} does not have a cached option.\"\n )\n\n else:\n if \"electra\" in model_args.model_name_or_path:\n model = ElectraForTokenClassification.from_pretrained(\n model_args.model_name_or_path,\n from_tf=False,\n config=model_config,\n cache_dir=config.cache_dir,\n )\n model.classifier = ElectraNERHeadCustom(model)\n log.info(\"Replaced ELECTRA's head\")\n\n return model, tokenizer\n\n\ndef load_ood_dataset(dataset_path, data_args, tokenizer, cache_dir=None):\n log.info(\"Load out-of-domain dataset.\")\n datasets_ood = load_dataset(\n dataset_path, ignore_verifications=True, cache_dir=cache_dir\n )\n log.info(\"Done with loading the dataset.\")\n\n log.info(\"Preprocessing the dataset...\")\n\n text_column_name, label_column_name = \"tokens\", \"ner_tags\"\n label_to_id = {0: 0}\n f_preprocess = lambda examples: tokenize_and_align_labels(\n tokenizer,\n examples,\n text_column_name,\n label_column_name,\n data_args=data_args,\n label_to_id=label_to_id,\n )\n\n datasets_ood = datasets_ood.map(\n f_preprocess,\n batched=True,\n load_from_cache_file=True, # TODO: add config\n )\n\n ood_dataset = datasets_ood[\"test\"].select(\n list(range(1000))\n ) # TODO: What is this ???\n # TODO: Why to take test dataset, we can take train dataset\n ood_dataset = ood_dataset.remove_columns([\"text\", \"label\"])\n log.info(\"Done with preprocessing the dataset.\")\n\n return ood_dataset\n\n\ndef load_ood_dataset_test(dataset_path, data_args, tokenizer, cache_dir=None):\n log.info(\"Load out-of-domain dataset.\")\n datasets_ood = load_dataset(\n dataset_path, \"en\", ignore_verifications=True, cache_dir=cache_dir\n )\n log.info(\"Done with loading the dataset.\")\n\n log.info(\"Preprocessing the dataset...\")\n\n column_names = datasets_ood[\"train\"].column_names\n features = datasets_ood[\"train\"].features\n\n text_column_name = \"tokens\" if \"tokens\" in column_names else column_names[0]\n label_column_name = \"ner_tags\" if \"ner_tags\" in column_names else column_names[1]\n\n def get_label_list(labels):\n unique_labels = set()\n for label in labels:\n unique_labels = unique_labels | set(label)\n label_list = list(unique_labels)\n label_list.sort()\n return label_list\n\n label_list = features[label_column_name].feature.names\n label_to_id = {i: 0 for i in range(len(label_list))}\n num_labels = len(label_list)\n\n f_preprocess = lambda examples: tokenize_and_align_labels(\n tokenizer,\n examples,\n text_column_name,\n label_column_name,\n data_args=data_args,\n label_to_id=label_to_id,\n )\n\n datasets_ood = datasets_ood.map(\n f_preprocess,\n batched=True,\n load_from_cache_file=True, # TODO: add config\n )\n\n ood_dataset = datasets_ood[\"train\"].select(list(range(3000)))\n ood_dataset = ood_dataset.remove_columns([\"langs\", \"spans\"])\n log.info(\"Done with preprocessing the dataset.\")\n\n return ood_dataset\n\n\ndef create_ue_estimator(\n model,\n ue_args,\n eval_metric,\n calibration_dataset,\n train_dataset,\n cache_dir,\n config=None,\n data_args=None,\n):\n if ue_args.ue_type == \"sngp\":\n return UeEstimatorSngp(model, ue_args, eval_metric)\n\n elif ue_args.ue_type == \"mc\" or ue_args.ue_type == \"mc-dc\":\n return UeEstimatorMc(\n model, ue_args, eval_metric, calibration_dataset, train_dataset\n )\n\n elif ue_args.ue_type == \"mc-dpp\":\n if ue_args.dropout.dry_run_dataset == \"eval\":\n dry_run_dataset = \"eval\"\n elif ue_args.dropout.dry_run_dataset == \"train\":\n dry_run_dataset = train_dataset\n elif ue_args.dropout.dry_run_dataset == \"val\":\n dry_run_dataset = calibration_dataset\n else:\n raise ValueError()\n\n ood_dataset = None\n if ue_args.dropout.use_ood_sampling:\n ood_dataset = load_ood_dataset(\n \"imdb\", data_args, model._bpe_tokenizer, cache_dir\n )\n\n return UeEstimatorMcDpp(\n model,\n ue_args,\n eval_metric,\n calibration_dataset,\n dry_run_dataset,\n ood_dataset=ood_dataset,\n )\n elif ue_args.ue_type == \"nuq\":\n return UeEstimatorNUQ(\n model, ue_args, config, train_dataset, calibration_dataset\n )\n elif ue_args.ue_type == \"maha\":\n return UeEstimatorMahalanobis(model, ue_args, config, train_dataset)\n else:\n raise ValueError()\n\n\ndef do_predict_eval(\n model,\n tokenizer,\n trainer,\n eval_dataset,\n validation_dataset,\n train_dataset,\n metric,\n config,\n data_args,\n work_dir,\n model_dir,\n metric_fn,\n):\n if config.ue.use_cache:\n model.enable_cache()\n\n tagger = SequenceTagger(\n model, tokenizer, training_args=config.training, trainer=trainer\n )\n eval_results = {}\n\n ood_dataset = load_ood_dataset_test(\n \"wikiann\", data_args, tokenizer, config.cache_dir\n )\n\n eval_dataset = eval_dataset.remove_columns([\"id\", \"pos_tags\", \"chunk_tags\"])\n validation_dataset = validation_dataset.remove_columns(\n [\"id\", \"pos_tags\", \"chunk_tags\"]\n )\n\n eval_results[\"ood_labels\"] = [0] * len(eval_dataset) + [1] * len(ood_dataset)\n\n ood_dataset.cast(eval_dataset.features)\n eval_dataset = concatenate_datasets([eval_dataset, ood_dataset])\n\n true_labels = [example[\"labels\"] for example in eval_dataset]\n eval_results[\"true_labels\"] = true_labels\n\n if config.do_eval:\n if config.ue.calibrate:\n tagger.predict(validation_dataset, calibrate=True)\n log.info(f\"Calibration temperature = {tagger.temperature}\")\n\n log.info(\"*** Evaluate ***\")\n\n res = tagger.predict(eval_dataset)\n preds, probs = res[:2]\n\n eval_score = metric_fn([probs, np.asarray(true_labels)])\n\n log.info(f\"Eval score: {eval_score}\")\n eval_results[\"eval_score\"] = eval_score\n eval_results[\"probabilities\"] = probs.tolist()\n eval_results[\"answers\"] = preds.tolist()\n\n if config.do_ue_estimate:\n dry_run_dataset = None\n\n ue_estimator = create_ue_estimator(\n tagger,\n config.ue,\n metric,\n calibration_dataset=validation_dataset,\n train_dataset=train_dataset,\n cache_dir=config.cache_dir,\n config=config,\n data_args=data_args,\n )\n\n ue_results = ue_estimator(eval_dataset, true_labels)\n eval_results.update(ue_results)\n\n with open(Path(work_dir) / \"dev_inference.json\", \"w\") as res:\n json.dump(eval_results, res)\n\n if wandb.run is not None:\n wandb.save(str(Path(work_dir) / \"dev_inference.json\"))\n\n\ndef tokenize_and_align_labels(\n tokenizer,\n examples,\n text_column_name,\n label_column_name,\n data_args,\n label_to_id,\n padding=\"max_length\",\n):\n if text_column_name not in examples:\n examples[text_column_name] = [exp.split(\" \") for exp in examples[\"text\"]]\n examples[label_column_name] = [\n [0] * len(exp.split(\" \")) for exp in examples[\"text\"]\n ]\n\n tokenized_inputs = tokenizer(\n examples[text_column_name],\n padding=padding,\n max_length=data_args.max_seq_length,\n truncation=True,\n # We use this argument because the texts in our dataset are lists of words (with a label for each word).\n is_split_into_words=True,\n )\n labels = []\n for i, label in enumerate(examples[label_column_name]):\n word_ids = tokenized_inputs.word_ids(batch_index=i)\n previous_word_idx = None\n label_ids = []\n for word_idx in word_ids:\n # Special tokens have a word id that is None. We set the label to -100 so they are automatically\n # ignored in the loss function.\n if word_idx is None:\n label_ids.append(-100)\n # We set the label for the first token of each word.\n elif word_idx != previous_word_idx:\n label_ids.append(label_to_id[label[word_idx]])\n # For the other tokens in a word, we set the label to either the current label or -100, depending on\n # the label_all_tokens flag.\n else:\n label_ids.append(\n label_to_id[label[word_idx]] if data_args.label_all_tokens else -100\n )\n\n previous_word_idx = word_idx\n\n labels.append(label_ids)\n tokenized_inputs[\"labels\"] = labels\n return tokenized_inputs\n\n\ndef train_eval_conll2003_model(config, training_args, data_args, work_dir):\n ue_args = config.ue\n model_args = config.model\n\n log.info(f\"Seed: {config.seed}\")\n set_seed(config.seed)\n random.seed(config.seed)\n\n log.info(\"Load dataset.\")\n datasets = load_dataset(config.data.task_name, cache_dir=config.cache_dir)\n log.info(\"Done with loading the dataset.\")\n\n if config.do_train:\n column_names = datasets[\"train\"].column_names\n features = datasets[\"train\"].features\n else:\n column_names = datasets[\"validation\"].column_names\n features = datasets[\"validation\"].features\n\n text_column_name = \"tokens\" if \"tokens\" in column_names else column_names[0]\n label_column_name = \"ner_tags\" if \"ner_tags\" in column_names else column_names[1]\n\n def get_label_list(labels):\n unique_labels = set()\n for label in labels:\n unique_labels = unique_labels | set(label)\n label_list = list(unique_labels)\n label_list.sort()\n return label_list\n\n label_list = features[label_column_name].feature.names\n label_to_id = {i: i for i in range(len(label_list))}\n num_labels = len(label_list)\n\n model, tokenizer = create_model(num_labels, model_args, data_args, ue_args, config)\n\n if not isinstance(tokenizer, PreTrainedTokenizerFast):\n raise ValueError(\n \"This example script only works for models that have a fast tokenizer. Checkout the big table of models \"\n \"at https://huggingface.co/transformers/index.html#bigtable to find the model types that meet this \"\n \"requirement\"\n )\n\n f_preprocess = lambda examples: tokenize_and_align_labels(\n tokenizer, examples, text_column_name, label_column_name, data_args, label_to_id\n )\n datasets = datasets.map(\n f_preprocess,\n batched=True,\n load_from_cache_file=not data_args.overwrite_cache,\n )\n\n columns_to_return = [\"input_ids\", \"labels\", \"attention_mask\"]\n datasets.set_format(columns=columns_to_return)\n\n train_dataset = None\n if config.do_train or (\n config.ue.dropout_type == \"DPP\" and config.ue.dropout.dry_run_dataset != \"eval\"\n ):\n train_dataset = datasets[\"train\"]\n\n train_indexes = None\n if config.do_train:\n train_indexes = list(range(len(train_dataset)))\n\n if config.data.subsample_perc > 0:\n train_indexes = random.sample(\n train_indexes, int(len(train_dataset) * config.data.subsample_perc)\n )\n train_dataset = torch.utils.data.Subset(train_dataset, train_indexes)\n\n with open(Path(work_dir) / \"training_indexes.pkl\", \"wb\") as f:\n pickle.dump(train_indexes, f)\n\n log.info(f\"Training dataset size: {len(train_dataset)}\")\n\n elif (\n config.ue.dropout_type == \"DPP\" and config.ue.dropout.dry_run_dataset != \"eval\"\n ):\n training_indexes_path = (\n Path(config.model.model_name_or_path) / \"training_indexes.pkl\"\n )\n with open(training_indexes_path, \"rb\") as f:\n train_indexes = pickle.load(f)\n\n train_dataset = torch.utils.data.Subset(train_dataset, train_indexes)\n log.info(f\"Training dataset size: {len(train_dataset)}\")\n\n validation_dataset = datasets[\"validation\"]\n eval_dataset = datasets[\"test\"] if config.do_eval else None\n\n training_args.save_steps = 0\n if config.do_train:\n training_args.warmup_steps = int(\n training_args.warmup_ratio\n * len(train_dataset)\n * training_args.num_train_epochs\n / training_args.train_batch_size\n )\n log.info(f\"Warmup steps: {training_args.warmup_steps}\")\n\n data_collator = DataCollatorForTokenClassification(\n tokenizer, pad_to_multiple_of=8, max_length=data_args.max_seq_length\n )\n\n metric = load_metric(\"seqeval\", keep_in_memory=True, cache_dir=config.cache_dir)\n metric_fn = lambda p: compute_metrics(\n p, metric, data_args.return_entity_level_metrics\n )\n\n trainer = Trainer(\n model=model,\n args=training_args,\n train_dataset=train_dataset,\n eval_dataset=eval_dataset,\n compute_metrics=metric_fn,\n data_collator=data_collator,\n )\n\n if config.do_train:\n trainer.train(\n model_path=model_args.model_name_or_path\n if os.path.isdir(model_args.model_name_or_path)\n else None\n )\n trainer.save_model(work_dir)\n tokenizer.save_pretrained(work_dir)\n\n if config.do_eval:\n do_predict_eval(\n model,\n tokenizer,\n trainer,\n eval_dataset,\n validation_dataset,\n train_dataset,\n metric,\n config,\n data_args,\n work_dir,\n model_args.model_name_or_path,\n metric_fn,\n )\n\n\ndef update_config(cfg_old, cfg_new):\n for k, v in cfg_new.items():\n if k in cfg_old.__dict__:\n setattr(cfg_old, k, v)\n\n return cfg_old\n\n\ndef fix_config(config):\n if config.ue.dropout_subs == \"all\":\n config.ue.use_cache = False\n\n if config.ue.ue_type == \"mc-dpp\":\n config.ue.dropout_type = \"DPP\"\n\n if config.ue.ue_type == \"mc-dc\":\n config.ue.dropout_type = \"DC_MC\"\n\n\[email protected](\n config_path=os.path.dirname(os.environ[\"HYDRA_CONFIG_PATH\"]),\n config_name=os.path.basename(os.environ[\"HYDRA_CONFIG_PATH\"]),\n)\ndef main(config):\n os.environ[\"WANDB_WATCH\"] = \"False\" # To disable Huggingface logging\n\n auto_generated_dir = os.getcwd()\n log.info(f\"Work dir: {auto_generated_dir}\")\n os.chdir(hydra.utils.get_original_cwd())\n\n wandb_run = init_wandb(auto_generated_dir, config)\n\n fix_config(config)\n\n args_train = TrainingArguments(output_dir=auto_generated_dir)\n args_train = update_config(args_train, config.training)\n\n args_data = DataTrainingArguments(task_name=config.data.task_name)\n args_data = update_config(args_data, config.data)\n\n if not os.path.exists(Path(auto_generated_dir) / \"dev_inference.json\"):\n train_eval_conll2003_model(config, args_train, args_data, auto_generated_dir)\n else:\n log.info(\n f\"Result file: {auto_generated_dir}/dev_inference.json already exists \\n\"\n )\n\n\nif __name__ == \"__main__\":\n main()\n",
"import torch\nfrom alpaca.uncertainty_estimator.masks import build_mask\nfrom .dppmask_ext import build_mask_ext\n\nfrom .dropout_mc import DropoutMC\nimport numpy as np\n\nimport time\nimport datetime\nimport random\nimport os\n\nimport logging\n\nlog = logging.getLogger(__name__)\n\n\nclass DropoutDPP_v3(DropoutMC):\n dropout_id = -1\n\n @classmethod\n def update(cls):\n cls.dropout_id += 1\n return cls.dropout_id\n \n def __init__(\n self,\n p: float,\n activate=False,\n mask_name=\"ht_dpp\",\n max_n=100,\n max_frac=0.4,\n coef=1.0,\n is_reused_mask=False,\n inference_step=0,\n mask_name_for_mask=\"rbf\",\n calib_temp=1.\n ):\n super().__init__(p=p, activate=activate)\n self.curr_dropout_id = DropoutDPP_v3.update()\n\n self.mask = (\n build_mask_ext(mask_name)\n if mask_name != \"dpp\"\n else build_mask_ext(mask_name)[\"dpp\"]\n )\n self.max_n = max_n\n self.max_frac = max_frac\n self.coef = coef\n self.calib_temp = calib_temp\n self.init_change_mask = 0\n\n self.is_reused_mask = is_reused_mask\n if self.is_reused_mask:\n self.saved_masks = []\n self.calib_temps = []\n self.dpp_masks = (\n build_mask_ext(mask_name_for_mask)\n if mask_name_for_mask != \"dpp\"\n else build_mask_ext(mask_name_for_mask)[\"dpp\"]\n )\n self.inference_step = inference_step\n self.used_mask_id = 0\n self.diverse_masks = None\n\n log.debug(f\"Dropout id: {self.curr_dropout_id}\")\n\n def _get_mask(self, x: torch.Tensor):\n if x.dim() == 2:\n return self.mask(\n x, dropout_rate=self.p, layer_num=self.curr_dropout_id\n ).float()\n\n return self.mask(\n x.view(x.shape[0] * x.shape[1], -1),\n dropout_rate=self.p,\n layer_num=self.curr_dropout_id,\n ).float() # [None, None, :]\n\n def _calc_non_zero_neurons(self, sum_mask):\n frac_nonzero = (sum_mask != 0).sum(axis=-1).item() / sum_mask.shape[-1]\n return frac_nonzero\n\n def _predict_with_sampled_mask(self, x: torch.Tensor):\n sum_mask = self._get_mask(x)\n norm = 1.0\n i = 1\n frac_nonzero = self._calc_non_zero_neurons(sum_mask)\n while i < self.max_n and frac_nonzero < self.max_frac:\n mask = self._get_mask(x)\n\n # sum_mask = self.coef * sum_mask + mask\n sum_mask += mask\n i += 1\n # norm = self.coef * norm + 1\n\n frac_nonzero = self._calc_non_zero_neurons(sum_mask)\n log.debug(\n f\"==========Non zero neurons: {frac_nonzero} iter: {i}*****************\"\n )\n\n log.debug(f\"Number of averaged DPP masks: {i}\")\n\n sum_mask /= i\n # sum_mask /= norm\n res = x * sum_mask\n\n if self.is_reused_mask:\n self.saved_masks.append(sum_mask.cpu())\n\n return res\n \n def construct_pool_of_masks(self, sampling=True):\n self.saved_masks = torch.stack(self.saved_masks).T\n self.saved_masks_clean = self.saved_masks.clone()\n\n if sampling:\n n = 7 # TODO:\n mask_indices = torch.zeros(self.saved_masks.shape[1])\n for i in range(n):\n msk_idx = self.dpp_masks(\n self.saved_masks,\n dropout_rate=self.p,\n layer_num=self.curr_dropout_id,\n ).float()\n\n self.diverse_masks = self.saved_masks_clean[:, msk_idx > 0]\n else:\n self.diverse_masks = self.saved_masks_clean\n\n max_n = 200\n self.diverse_masks = self.diverse_masks[:, :max_n]\n if not len(self.calib_temps):\n self.calib_temps = [1.] * self.diverse_masks.shape[1]\n\n log.debug(f\"\\n\\nself.diverse_masks: {self.diverse_masks.shape}\")\n\n self.used_mask_id = 0\n \n def get_calib_temp(self):\n return self.calib_temps[self.used_mask_id] if self.is_reused_mask and self.inference_step else self.calib_temp\n \n def change_mask(self, mask_id=None, on_calibration=False):\n if mask_id is not None:\n assert self._used_mask_id < self.diverse_masks.shape[1]\n \n self.used_mask_id = mask_id\n return mask_id\n\n if on_calibration:\n self.init_change_mask = 1\n else:\n self.used_mask_id += 1 \n self.used_mask_id %= self.diverse_masks.shape[1]\n return self.used_mask_id\n \n def _predict_with_reused_mask(self, x: torch.Tensor):\n if self.diverse_masks is None:\n self.construct_pool_of_masks()\n \n mask = self.diverse_masks[:, self.used_mask_id].to(device=x.device)\n if self.init_change_mask:\n self.change_mask(on_calibration=False)\n self.init_change_mask = 0\n return x * mask\n \n def forward(self, x: torch.Tensor):\n if self.training:\n return torch.nn.functional.dropout(x, self.p, training=True)\n \n else:\n if not self.activate:\n return x\n \n if self.is_reused_mask and self.inference_step:\n return self._predict_with_reused_mask(x)\n\n else:\n return self._predict_with_sampled_mask(x)"
] | [
[
"torch.utils.data.Subset",
"numpy.argmax",
"numpy.asarray"
],
[
"torch.stack",
"torch.nn.functional.dropout",
"torch.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
psorianom/rescuescore | [
"9a647cbc1fc94f9a1ea265443295e84170da81f6"
] | [
"rescuetime_wrapper.py"
] | [
"# coding: utf-8\nfrom datetime import date, timedelta\n\nimport pandas as pd\n\nfrom rescuetime.api.service import Service\nfrom rescuetime.api.access import AnalyticApiKey\n\n\ndef get_apikey():\n with open(\"apikey\", \"r\") as fileo:\n key = fileo.read()\n return key\n\n\napikey = get_apikey()\n\n\ndef get_efficiency():\n try:\n today_date = date.today().strftime(\"%Y-%m-%d\")\n tomorrow_date = (date.today() + timedelta(1)).strftime(\"%Y-%m-%d\")\n s = Service.Service()\n k = AnalyticApiKey.AnalyticApiKey(apikey, s)\n p = {'restrict_begin': today_date,\n 'restrict_end': tomorrow_date,\n 'restrict_kind': 'efficiency',\n 'perspective': 'interval'}\n #YYYY-MM-DD\n d = s.fetch_data(k, p)\n\n df = pd.DataFrame(d['rows'], columns=d['row_headers'])\n efficiency = df[\"Efficiency (percent)\"]\n dates = df[\"Date\"]\n return int(efficiency.tail(1)), str(dates.tail(1))\n except:\n return \"F\", \"F\"\n\n\n"
] | [
[
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
ETHZ-TEC/exot_eengine | [
"7b7ce6cb949e1b0a02e716b03f2f9af751713b29",
"7b7ce6cb949e1b0a02e716b03f2f9af751713b29",
"7b7ce6cb949e1b0a02e716b03f2f9af751713b29"
] | [
"exot/util/plotting.py",
"exot/channel/rnndecoder/_mixins.py",
"exot/util/analysers.py"
] | [
"# Copyright (c) 2015-2020, Swiss Federal Institute of Technology (ETH Zurich)\n# All rights reserved.\n# \n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n# \n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n# \n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n# \n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n# \n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n# \n\"\"\"Plotting support\"\"\"\n\nimport contextlib\nimport inspect\nimport pathlib\nimport typing as t\n\nimport matplotlib.pyplot as plt\nimport matplotlib.transforms as tx\nimport numpy as np\nimport pandas\nimport seaborn as sns\nfrom matplotlib.collections import LineCollection\n\nfrom exot.experiment.frequency_sweep import FrequencySweepRun\nfrom exot.experiment.performance import PerformanceRun\nfrom exot.util.attributedict import AttributeDict\nfrom exot.util.scinum import is_fitted, unpack_array\n\n__all__ = (\"_save_path_helper\", \"remove_spine\", \"add_spine\", \"rugplot\")\n\n\ndef _save_path_helper(path: t.Union[pathlib.Path, str]) -> pathlib.Path:\n \"\"\"A helper function for save paths\n \n Args:\n path (t.Union[pathlib.Path, str]): The save path\n \n Raises:\n TypeError: Wrong type supplied\n ValueError: Provided a file instead of a directory\n RuntimeError: Directory was not created\n \n Returns:\n pathlib.Path: The save path\n \"\"\"\n # Check and normalise variable type\n if not isinstance(path, (str, pathlib.Path)):\n raise TypeError(f\"wrong type supplied for save directory path\", type(path))\n if not isinstance(path, pathlib.Path):\n path = pathlib.Path(path)\n\n if path.exists() and not path.is_dir():\n raise ValueError(\"provided a file instead of a directory\", path)\n\n # Directory creation can fail, raising, for example, a PermissionError.\n if not path.exists():\n path.mkdir(parents=True)\n\n if not path.exists() and path.is_dir():\n raise RuntimeError(\"postcondition failed: directory is created and available\")\n\n return path\n\n\ndef remove_spine(axis, which: str, ticks_only: bool = False) -> None:\n \"\"\"Removes the spine from an axis\n \n Args:\n axis: The matplotlib axis\n which (str): Which spine to remove? (top, bottom)\n ticks_only (bool, optional): Remove only the ticks?. Defaults to False.\n \"\"\"\n if not ticks_only:\n axis.spines[which].set_color(\"none\")\n if which in [\"top\", \"bottom\"]:\n axis.tick_params(axis=\"x\", color=(0, 0, 0, 0))\n else:\n axis.tick_params(axis=\"y\", color=(0, 0, 0, 0))\n\n\ndef add_spine(axis, which: str, ticks_only: bool = False):\n \"\"\"Adds a spine to an axis\n \n Args:\n axis: The matplotlib axis\n which (str): Which spine to add? (top, bottom)\n ticks_only (bool, optional): Add only the ticks?. Defaults to False.\n \"\"\"\n if not ticks_only:\n axis.spines[which].set_color((0, 0, 0, 1))\n params = {which: True}\n if which in [\"top\", \"bottom\"]:\n axis.tick_params(axis=\"x\", color=(0, 0, 0, 1), **params)\n else:\n axis.tick_params(axis=\"y\", color=(0, 0, 0, 1), **params)\n\n\ndef rugplot(a, height=0.05, axis=\"x\", ax=None, top=False, **kwargs):\n \"\"\"Plot datapoints in an array as sticks on an axis. Adapted from seaborn.\n\n Args:\n a (vector): 1D array of observations.\n height (scalar, optional): Height of ticks as proportion of the axis.\n axis ({'x' | 'y'}, optional): Axis to draw rugplot on.\n ax (matplotlib axes, optional): Axes to draw plot into; otherwise grabs current axes.\n **kwargs: Other keyword arguments are passed to ``LineCollection``\n\n Returns:\n ax (matplotlib axes): The Axes object with the plot on it.\n \"\"\"\n if ax is None:\n ax = plt.gca()\n a = np.asarray(a)\n vertical = kwargs.pop(\"vertical\", axis == \"y\")\n\n alias_map = dict(linewidth=\"lw\", linestyle=\"ls\", color=\"c\")\n for attr, alias in alias_map.items():\n if alias in kwargs:\n kwargs[attr] = kwargs.pop(alias)\n kwargs.setdefault(\"linewidth\", 1)\n\n line = [0, height] if not top else [1, 1 - height]\n\n if vertical:\n trans = tx.blended_transform_factory(ax.transAxes, ax.transData)\n xy_pairs = np.column_stack([np.tile(line, len(a)), np.repeat(a, 2)])\n else:\n trans = tx.blended_transform_factory(ax.transData, ax.transAxes)\n xy_pairs = np.column_stack([np.repeat(a, 2), np.tile(line, len(a))])\n line_segs = xy_pairs.reshape([len(a), 2, 2])\n ax.add_collection(LineCollection(line_segs, transform=trans, **kwargs))\n\n ax.autoscale_view(scalex=not vertical, scaley=vertical)\n\n return ax\n",
"# Copyright (c) 2015-2020, Swiss Federal Institute of Technology (ETH Zurich)\n# All rights reserved.\n# \n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n# \n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n# \n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n# \n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n# \n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n# \n\"\"\"Mixins for the recurrent neural network signal decoder analysis\"\"\"\n\nimport os\nimport pickle\nfrom datetime import datetime\n\nimport numpy as np\nimport tensorflow as tf\n\n\ndef cut_minibatch(label, freqv, minibatch_length):\n \"\"\"Cut minibatches to equal length.\"\"\"\n cut_length = len(freqv) % minibatch_length\n zip_array = list(zip(label, freqv))\n zip_array.sort(key=lambda x: len(x[1]))\n if cut_length == 0:\n return (label, freqv)\n else:\n del zip_array[-cut_length:]\n return zip(*zip_array)\n\n\ndef tf_count(t, val):\n \"\"\"Count how many values in a tensor a equal to a value val.\"\"\"\n cnt_equal_elem = tf.equal(t, val)\n cnt_equal_elem = tf.cast(cnt_equal_elem, tf.int32)\n return tf.reduce_sum(cnt_equal_elem, 1)\n\n\ndef stdr(dataseq):\n \"\"\"Make data zero mean and with unit standard deviation.\"\"\"\n flat_dataseq = np.array([y for x in dataseq for y in x])\n dataseq = np.array(dataseq)\n mean = np.mean(flat_dataseq)\n std = np.std(flat_dataseq)\n return (dataseq - mean) / std, mean, std\n\n\ndef stdr_val(val_freqv, mean, std):\n \"\"\"Make data zero mean and with unit variance with given mean and standard deviation.\"\"\"\n return (val_freqv - mean) / std\n\n\ndef separate_val(freqv, label):\n \"\"\"Separate the training and the validation dataset\"\"\"\n val_index = np.random.choice(len(freqv), len(freqv) // 10, replace=False)\n val_freqv = freqv[val_index]\n val_label = label[val_index]\n freqv = np.delete(freqv, val_index, 0)\n label = np.delete(label, val_index, 0)\n return freqv, val_freqv, label, val_label, val_index\n",
"# Copyright (c) 2015-2020, Swiss Federal Institute of Technology (ETH Zurich)\n# All rights reserved.\n# \n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n# \n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n# \n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n# \n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n# \n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n# \n\"\"\"Misc helpers\"\"\"\nimport numpy as np\nimport pandas as pd\n\nfrom exot.util.attributedict import LabelMapping\n\n\ndef all_labels_in_dataset(experiment, sort_str=False, **kwargs):\n for key in [\"phase\", \"labelcolumn\", \"io\"]:\n if key not in kwargs.keys():\n raise ValueError(f\"key {key} not specified in kwargs!\")\n ingest_args = kwargs.copy()\n labels = np.array([])\n # for rep in range(1):\n for rep in range(experiment.config.EXPERIMENT.PHASES[ingest_args[\"phase\"]].repetitions):\n ingest_args[\"io\"][\"rep\"] = rep\n # for cur_run in [experiment.phases[ingest_args['phase']]['antutu2']]:\n for cur_run in experiment.phases[ingest_args[\"phase\"]].values():\n try:\n cur_run.ingest(**ingest_args)\n labels = np.concatenate(\n (labels, cur_run.i_rawstream[ingest_args[\"labelcolumn\"]].unique())\n )\n except:\n print(\"Could not ingest run\", cur_run)\n\n labels = pd.DataFrame(labels)[0].unique().flatten()\n if sort_str:\n return _sort_str_labels(labels)\n else:\n return labels\n\n\ndef sort_str_labels(labels):\n convert = lambda x: str(x)\n return np.array(list(map(convert, labels)))\n\n\ndef generate_unique_labels_mapping(labels_keys, labels_str):\n if labels_str is None:\n for key in labels_keys:\n if key not in labels_str.keys():\n labels_str[key] = key\n labels_map = dict(\n [\n (y, {\"int\": x, \"str\": str(labels_keys[x])})\n for x, y in enumerate(set(labels_keys))\n ]\n )\n else:\n labels_map = dict(\n [(y, {\"int\": x, \"str\": labels_str[x]}) for x, y in enumerate(set(labels_keys))]\n )\n return LabelMapping(labels_map)\n"
] | [
[
"matplotlib.pyplot.gca",
"numpy.asarray",
"matplotlib.collections.LineCollection",
"numpy.repeat",
"matplotlib.transforms.blended_transform_factory"
],
[
"tensorflow.reduce_sum",
"tensorflow.cast",
"tensorflow.equal",
"numpy.std",
"numpy.delete",
"numpy.mean",
"numpy.array"
],
[
"numpy.array",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"1.12",
"2.6",
"1.4",
"2.2",
"2.3",
"2.4",
"2.9",
"1.5",
"1.7",
"2.5",
"0.12",
"1.0",
"2.8",
"1.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
sokunmin/mmdetection | [
"2d8ef6ad36dae912dba71f83934e7dd5a0ced3eb",
"2d8ef6ad36dae912dba71f83934e7dd5a0ced3eb"
] | [
"mmdet/models/dense_heads/corner_head.py",
"mmdet/models/backbones/dla.py"
] | [
"from abc import abstractmethod\nfrom math import ceil, log\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom mmcv.cnn import ConvModule, bias_init_with_prob\nfrom mmcv.ops import CornerPool, batched_nms\n\nfrom mmdet.core import multi_apply\nfrom ..builder import HEADS, build_loss\nfrom ..utils import gaussian_radius, gen_gaussian_target\nfrom .base_dense_head import BaseDenseHead\n\n\nclass BiCornerPool(nn.Module):\n \"\"\"Bidirectional Corner Pooling Module (TopLeft, BottomRight, etc.)\n\n Args:\n in_channels (int): Input channels of module.\n out_channels (int): Output channels of module.\n feat_channels (int): Feature channels of module.\n directions (list[str]): Directions of two CornerPools.\n norm_cfg (dict): Dictionary to construct and config norm layer.\n \"\"\"\n\n def __init__(self,\n in_channels,\n directions,\n feat_channels=128,\n out_channels=128,\n norm_cfg=dict(type='BN', requires_grad=True)):\n super(BiCornerPool, self).__init__()\n self.direction1_conv = ConvModule(\n in_channels, feat_channels, 3, padding=1, norm_cfg=norm_cfg)\n self.direction2_conv = ConvModule(\n in_channels, feat_channels, 3, padding=1, norm_cfg=norm_cfg)\n\n self.aftpool_conv = ConvModule(\n feat_channels,\n out_channels,\n 3,\n padding=1,\n norm_cfg=norm_cfg,\n act_cfg=None)\n\n self.conv1 = ConvModule(\n in_channels, out_channels, 1, norm_cfg=norm_cfg, act_cfg=None)\n self.conv2 = ConvModule(\n in_channels, out_channels, 3, padding=1, norm_cfg=norm_cfg)\n\n self.direction1_pool = CornerPool(directions[0])\n self.direction2_pool = CornerPool(directions[1])\n self.relu = nn.ReLU(inplace=True)\n\n def forward(self, x):\n \"\"\"Forward features from the upstream network.\n\n Args:\n x (tensor): Input feature of BiCornerPool.\n\n Returns:\n conv2 (tensor): Output feature of BiCornerPool.\n \"\"\"\n direction1_conv = self.direction1_conv(x)\n direction2_conv = self.direction2_conv(x)\n direction1_feat = self.direction1_pool(direction1_conv)\n direction2_feat = self.direction2_pool(direction2_conv)\n aftpool_conv = self.aftpool_conv(direction1_feat + direction2_feat)\n conv1 = self.conv1(x)\n relu = self.relu(aftpool_conv + conv1)\n conv2 = self.conv2(relu)\n return conv2\n\nUSE_MMDET_OFFICIAL = False\n\nclass CornerBaseHead(BaseDenseHead):\n \"\"\"Base class for DenseHeads.\"\"\"\n\n def __init__(self,\n num_classes,\n in_channels,\n train_cfg=None,\n test_cfg=None):\n super(CornerBaseHead, self).__init__()\n self.num_classes = num_classes\n self.in_channels = in_channels\n self.train_cfg = train_cfg\n self.test_cfg = test_cfg\n self._init_layers()\n\n @abstractmethod\n def _init_layers(self):\n pass\n\n def build_convs(self, in_channel, feat_channel, stacked_convs):\n head_convs = []\n for i in range(stacked_convs):\n chn = in_channel if i == 0 else feat_channel\n head_convs.append(ConvModule(\n chn, feat_channel, 3,\n padding=1, bias=True, act_cfg=dict(type='ReLU', inplace=True)))\n return head_convs\n\n def build_share_convs(self, in_channel, feat_channel, stacked_convs):\n if stacked_convs == 0:\n return nn.Identity()\n return nn.Sequential(*self.build_convs(in_channel, feat_channel, stacked_convs))\n\n def build_head(self, in_channel, feat_channel, stacked_convs, out_channel):\n head_convs = self.build_convs(in_channel, feat_channel, stacked_convs)\n head_convs.append(nn.Conv2d(feat_channel, out_channel, 1))\n return nn.Sequential(*head_convs)\n\n def _gather_feat(self, feat, ind, mask=None):\n \"\"\"Gather feature according to index.\n\n Args:\n feat (Tensor): Target feature map.\n ind (Tensor): Target coord index.\n mask (Tensor | None): Mask of featuremap. Default: None.\n\n Returns:\n feat (Tensor): Gathered feature.\n \"\"\"\n dim = feat.size(2)\n if USE_MMDET_OFFICIAL:\n # [old]\n ind = ind.unsqueeze(2).repeat(1, 1, dim)\n else:\n # [new]\n ind = ind.unsqueeze(len(ind.shape)).expand(*ind.shape, dim)\n feat = feat.gather(1, ind)\n if mask is not None:\n mask = mask.unsqueeze(2).expand_as(feat)\n feat = feat[mask]\n feat = feat.view(-1, dim)\n return feat\n\n def _local_maximum(self, heat, kernel=3):\n \"\"\"Extract local maximum pixel with given kernal.\n\n Args:\n heat (Tensor): Target heatmap.\n kernel (int): Kernel size of max pooling. Default: 3.\n\n Returns:\n heat (Tensor): A heatmap where local maximum pixels maintain its\n own value and other positions are 0.\n \"\"\"\n pad = (kernel - 1) // 2\n hmax = F.max_pool2d(heat, kernel, stride=1, padding=pad)\n keep = (hmax == heat).float()\n return heat * keep\n\n def _transpose_and_gather_feat(self, feat, ind):\n \"\"\"Transpose and gather feature according to index.\n\n Args:\n feat (Tensor): Target feature map.\n ind (Tensor): Target coord index.\n\n Returns:\n feat (Tensor): Transposed and gathered feature.\n \"\"\"\n feat = feat.permute(0, 2, 3, 1).contiguous()\n feat = feat.view(feat.size(0), -1, feat.size(3))\n feat = self._gather_feat(feat, ind)\n return feat\n\n def _topk(self, scores, k=20):\n \"\"\"Get top k positions from heatmap.\n\n Args:\n scores (Tensor): Target heatmap with shape\n [batch, num_classes, height, width].\n k (int): Target number. Default: 20.\n\n Returns:\n tuple[torch.Tensor]: Scores, indexes, categories and coords of\n topk keypoint. Containing following Tensors:\n\n - topk_scores (Tensor): Max scores of each topk keypoint.\n - topk_inds (Tensor): Indexes of each topk keypoint.\n - topk_clses (Tensor): Categories of each topk keypoint.\n - topk_ys (Tensor): Y-coord of each topk keypoint.\n - topk_xs (Tensor): X-coord of each topk keypoint.\n \"\"\"\n # [old]\n if USE_MMDET_OFFICIAL:\n batch, _, height, width = scores.size()\n topk_scores, topk_inds = torch.topk(scores.view(batch, -1), k)\n topk_clses = topk_inds // (height * width)\n topk_inds = topk_inds % (height * width)\n topk_ys = topk_inds // width\n topk_xs = (topk_inds % width).int().float()\n else:\n # [new]\n batch, channel, height, width = scores.size()\n topk_scores, topk_inds = torch.topk(scores.view(batch, channel, -1), k)\n\n topk_inds = topk_inds % (height * width) # (B, #cls, K)\n topk_ys = (topk_inds // width).float() # (B, #cls, K)\n topk_xs = (topk_inds % width).float() # (B, #cls, K)\n\n topk_scores, topk_ind = torch.topk(topk_scores.view(batch, -1), k)\n topk_clses = topk_ind // k\n topk_inds = self._gather_feat(topk_inds.view(batch, -1, 1), topk_ind).view(batch, k)\n topk_ys = self._gather_feat(topk_ys.view(batch, -1, 1), topk_ind).view(batch, k)\n topk_xs = self._gather_feat(topk_xs.view(batch, -1, 1), topk_ind).view(batch, k)\n return topk_scores, topk_inds, topk_clses, topk_ys, topk_xs\n\n\[email protected]_module()\nclass CornerHead(CornerBaseHead):\n \"\"\"Head of CornerNet: Detecting Objects as Paired Keypoints.\n\n Code is modified from the `official github repo\n <https://github.com/princeton-vl/CornerNet/blob/master/models/py_utils/\n kp.py#L73>`_ .\n\n More details can be found in the `paper\n <https://arxiv.org/abs/1808.01244>`_ .\n\n Args:\n num_classes (int): Number of categories excluding the background\n category.\n in_channels (int): Number of channels in the input feature map.\n num_feat_levels (int): Levels of feature from the previous module. 2\n for HourglassNet-104 and 1 for HourglassNet-52. Because\n HourglassNet-104 outputs the final feature and intermediate\n supervision feature and HourglassNet-52 only outputs the final\n feature. Default: 2.\n corner_emb_channels (int): Channel of embedding vector. Default: 1.\n train_cfg (dict | None): Training config. Useless in CornerHead,\n but we keep this variable for SingleStageDetector. Default: None.\n test_cfg (dict | None): Testing config of CornerHead. Default: None.\n loss_heatmap (dict | None): Config of corner heatmap loss. Default:\n GaussianFocalLoss.\n loss_embedding (dict | None): Config of corner embedding loss. Default:\n AssociativeEmbeddingLoss.\n loss_offset (dict | None): Config of corner offset loss. Default:\n SmoothL1Loss.\n \"\"\"\n\n def __init__(self,\n *args,\n num_feat_levels=2,\n corner_emb_channels=1,\n loss_heatmap=dict(\n type='GaussianFocalLoss',\n alpha=2.0,\n gamma=4.0,\n loss_weight=1),\n loss_embedding=dict(\n type='AssociativeEmbeddingLoss',\n pull_weight=0.25,\n push_weight=0.25),\n loss_offset=dict(\n type='SmoothL1Loss', beta=1.0, loss_weight=1),\n **kwargs):\n self.corner_emb_channels = corner_emb_channels\n self.with_corner_emb = self.corner_emb_channels > 0\n self.corner_offset_channels = 2\n self.num_feat_levels = num_feat_levels\n super(CornerHead, self).__init__(*args, **kwargs)\n self.loss_heatmap = build_loss(\n loss_heatmap) if loss_heatmap is not None else None\n self.loss_embedding = build_loss(\n loss_embedding) if loss_embedding is not None else None\n self.loss_offset = build_loss(\n loss_offset) if loss_offset is not None else None\n\n def _make_layers(self, out_channels, in_channels=256, feat_channels=256):\n \"\"\"Initialize conv sequential for CornerHead.\"\"\"\n return nn.Sequential(\n ConvModule(in_channels, feat_channels, 3, padding=1),\n ConvModule(\n feat_channels, out_channels, 1, norm_cfg=None, act_cfg=None))\n\n def _init_corner_kpt_layers(self):\n \"\"\"Initialize corner keypoint layers.\n\n Including corner heatmap branch and corner offset branch. Each branch\n has two parts: prefix `tl_` for top-left and `br_` for bottom-right.\n \"\"\"\n self.tl_pool, self.br_pool = nn.ModuleList(), nn.ModuleList()\n self.tl_heat, self.br_heat = nn.ModuleList(), nn.ModuleList()\n self.tl_off, self.br_off = nn.ModuleList(), nn.ModuleList()\n\n for _ in range(self.num_feat_levels):\n self.tl_pool.append(\n BiCornerPool(\n self.in_channels, ['top', 'left'],\n out_channels=self.in_channels))\n self.br_pool.append(\n BiCornerPool(\n self.in_channels, ['bottom', 'right'],\n out_channels=self.in_channels))\n\n self.tl_heat.append(\n self._make_layers(\n out_channels=self.num_classes,\n in_channels=self.in_channels))\n self.br_heat.append(\n self._make_layers(\n out_channels=self.num_classes,\n in_channels=self.in_channels))\n\n self.tl_off.append(\n self._make_layers(\n out_channels=self.corner_offset_channels,\n in_channels=self.in_channels))\n self.br_off.append(\n self._make_layers(\n out_channels=self.corner_offset_channels,\n in_channels=self.in_channels))\n\n def _init_corner_emb_layers(self):\n \"\"\"Initialize corner embedding layers.\n\n Only include corner embedding branch with two parts: prefix `tl_` for\n top-left and `br_` for bottom-right.\n \"\"\"\n self.tl_emb, self.br_emb = nn.ModuleList(), nn.ModuleList()\n\n for _ in range(self.num_feat_levels):\n self.tl_emb.append(\n self._make_layers(\n out_channels=self.corner_emb_channels,\n in_channels=self.in_channels))\n self.br_emb.append(\n self._make_layers(\n out_channels=self.corner_emb_channels,\n in_channels=self.in_channels))\n\n def _init_layers(self):\n \"\"\"Initialize layers for CornerHead.\n\n Including two parts: corner keypoint layers and corner embedding layers\n \"\"\"\n self._init_corner_kpt_layers()\n if self.with_corner_emb:\n self._init_corner_emb_layers()\n\n def init_weights(self):\n \"\"\"Initialize weights of the head.\"\"\"\n bias_init = bias_init_with_prob(0.1)\n for i in range(self.num_feat_levels):\n # The initialization of parameters are different between nn.Conv2d\n # and ConvModule. Our experiments show that using the original\n # initialization of nn.Conv2d increases the final mAP by about 0.2%\n self.tl_heat[i][-1].conv.reset_parameters()\n self.tl_heat[i][-1].conv.bias.data.fill_(bias_init)\n self.br_heat[i][-1].conv.reset_parameters()\n self.br_heat[i][-1].conv.bias.data.fill_(bias_init)\n self.tl_off[i][-1].conv.reset_parameters()\n self.br_off[i][-1].conv.reset_parameters()\n if self.with_corner_emb:\n self.tl_emb[i][-1].conv.reset_parameters()\n self.br_emb[i][-1].conv.reset_parameters()\n\n def forward(self, feats):\n \"\"\"Forward features from the upstream network.\n\n Args:\n feats (tuple[Tensor]): Features from the upstream network, each is\n a 4D-tensor.\n\n Returns:\n tuple: Usually a tuple of corner heatmaps, offset heatmaps and\n embedding heatmaps.\n - tl_heats (list[Tensor]): Top-left corner heatmaps for all\n levels, each is a 4D-tensor, the channels number is\n num_classes.\n - br_heats (list[Tensor]): Bottom-right corner heatmaps for all\n levels, each is a 4D-tensor, the channels number is\n num_classes.\n - tl_embs (list[Tensor] | list[None]): Top-left embedding\n heatmaps for all levels, each is a 4D-tensor or None.\n If not None, the channels number is corner_emb_channels.\n - br_embs (list[Tensor] | list[None]): Bottom-right embedding\n heatmaps for all levels, each is a 4D-tensor or None.\n If not None, the channels number is corner_emb_channels.\n - tl_offs (list[Tensor]): Top-left offset heatmaps for all\n levels, each is a 4D-tensor. The channels number is\n corner_offset_channels.\n - br_offs (list[Tensor]): Bottom-right offset heatmaps for all\n levels, each is a 4D-tensor. The channels number is\n corner_offset_channels.\n \"\"\"\n lvl_ind = list(range(self.num_feat_levels))\n return multi_apply(self.forward_single, feats, lvl_ind)\n\n def forward_single(self, x, lvl_ind, return_pool=False):\n \"\"\"Forward feature of a single level.\n\n Args:\n x (Tensor): Feature of a single level.\n lvl_ind (int): Level index of current feature.\n return_pool (bool): Return corner pool feature or not.\n\n Returns:\n tuple[Tensor]: A tuple of CornerHead's output for current feature\n level. Containing the following Tensors:\n\n - tl_heat (Tensor): Predicted top-left corner heatmap.\n - br_heat (Tensor): Predicted bottom-right corner heatmap.\n - tl_emb (Tensor | None): Predicted top-left embedding heatmap.\n None for `self.with_corner_emb == False`.\n - br_emb (Tensor | None): Predicted bottom-right embedding\n heatmap. None for `self.with_corner_emb == False`.\n - tl_off (Tensor): Predicted top-left offset heatmap.\n - br_off (Tensor): Predicted bottom-right offset heatmap.\n - tl_pool (Tensor): Top-left corner pool feature. Not must\n have.\n - br_pool (Tensor): Bottom-right corner pool feature. Not must\n have.\n \"\"\"\n tl_pool = self.tl_pool[lvl_ind](x)\n tl_heat = self.tl_heat[lvl_ind](tl_pool)\n br_pool = self.br_pool[lvl_ind](x)\n br_heat = self.br_heat[lvl_ind](br_pool)\n\n tl_emb, br_emb = None, None\n if self.with_corner_emb:\n tl_emb = self.tl_emb[lvl_ind](tl_pool)\n br_emb = self.br_emb[lvl_ind](br_pool)\n\n tl_off = self.tl_off[lvl_ind](tl_pool)\n br_off = self.br_off[lvl_ind](br_pool)\n\n result_list = [tl_heat, br_heat, tl_emb, br_emb, tl_off, br_off]\n if return_pool:\n result_list.append(tl_pool)\n result_list.append(br_pool)\n\n return result_list\n\n def get_targets(self,\n gt_bboxes,\n gt_labels,\n feat_shape,\n img_shape,\n with_corner_emb=False,\n with_guiding_shift=False,\n with_centripetal_shift=False):\n \"\"\"Generate corner targets.\n\n Including corner heatmap, corner offset.\n\n Optional: corner embedding, corner guiding shift, centripetal shift.\n\n For CornerNet, we generate corner heatmap, corner offset and corner\n embedding from this function.\n\n For CentripetalNet, we generate corner heatmap, corner offset, guiding\n shift and centripetal shift from this function.\n\n Args:\n gt_bboxes (list[Tensor]): Ground truth bboxes of each image, each\n has shape (num_gt, 4).\n gt_labels (list[Tensor]): Ground truth labels of each box, each has\n shape (num_gt,).\n feat_shape (list[int]): Shape of output feature,\n [batch, channel, height, width].\n img_shape (list[int]): Shape of input image,\n [height, width, channel].\n with_corner_emb (bool): Generate corner embedding target or not.\n Default: False.\n with_guiding_shift (bool): Generate guiding shift target or not.\n Default: False.\n with_centripetal_shift (bool): Generate centripetal shift target or\n not. Default: False.\n\n Returns:\n dict: Ground truth of corner heatmap, corner offset, corner\n embedding, guiding shift and centripetal shift. Containing the\n following keys:\n\n - topleft_heatmap (Tensor): Ground truth top-left corner\n heatmap.\n - bottomright_heatmap (Tensor): Ground truth bottom-right\n corner heatmap.\n - topleft_offset (Tensor): Ground truth top-left corner offset.\n - bottomright_offset (Tensor): Ground truth bottom-right corner\n offset.\n - corner_embedding (list[list[list[int]]]): Ground truth corner\n embedding. Not must have.\n - topleft_guiding_shift (Tensor): Ground truth top-left corner\n guiding shift. Not must have.\n - bottomright_guiding_shift (Tensor): Ground truth bottom-right\n corner guiding shift. Not must have.\n - topleft_centripetal_shift (Tensor): Ground truth top-left\n corner centripetal shift. Not must have.\n - bottomright_centripetal_shift (Tensor): Ground truth\n bottom-right corner centripetal shift. Not must have.\n \"\"\"\n batch_size, _, height, width = feat_shape # feat size (128, 128)\n img_h, img_w = img_shape[:2] # > output size (511, 511)\n # > feat_size / output_size\n width_ratio = float(width / img_w)\n height_ratio = float(height / img_h)\n # > `gt_bboxes`: (#img, #obj, 4), `H/W`: 128\n gt_tl_heatmap = gt_bboxes[-1].new_zeros(\n [batch_size, self.num_classes, height, width]) # > (#img, #cls, H, W)\n gt_br_heatmap = gt_bboxes[-1].new_zeros(\n [batch_size, self.num_classes, height, width]) # > (#img, #cls, H, W)\n gt_tl_offset = gt_bboxes[-1].new_zeros([batch_size, 2, height, width]) # > (#img, 2, H, W)\n gt_br_offset = gt_bboxes[-1].new_zeros([batch_size, 2, height, width]) # > (#img, 2, H, W)\n\n if with_corner_emb:\n match = []\n\n # Guiding shift is a kind of offset, from center to corner\n if with_guiding_shift:\n gt_tl_guiding_shift = gt_bboxes[-1].new_zeros(\n [batch_size, 2, height, width])\n gt_br_guiding_shift = gt_bboxes[-1].new_zeros(\n [batch_size, 2, height, width])\n # Centripetal shift is also a kind of offset, from center to corner\n # and normalized by log.\n if with_centripetal_shift:\n gt_tl_centripetal_shift = gt_bboxes[-1].new_zeros(\n [batch_size, 2, height, width])\n gt_br_centripetal_shift = gt_bboxes[-1].new_zeros(\n [batch_size, 2, height, width])\n\n for batch_id in range(batch_size): # > #img\n # Ground truth of corner embedding per image is a list of coord set\n corner_match = []\n for box_id in range(len(gt_labels[batch_id])): # > #obj\n left, top, right, bottom = gt_bboxes[batch_id][box_id]\n center_x = (left + right) / 2.0\n center_y = (top + bottom) / 2.0\n label = gt_labels[batch_id][box_id]\n\n # Use coords in the feature level to generate ground truth\n scale_left = left * width_ratio\n scale_right = right * width_ratio\n scale_top = top * height_ratio\n scale_bottom = bottom * height_ratio\n scale_center_x = center_x * width_ratio\n scale_center_y = center_y * height_ratio\n\n # Int coords on feature map/ground truth tensor\n left_idx = int(min(scale_left, width - 1))\n right_idx = int(min(scale_right, width - 1))\n top_idx = int(min(scale_top, height - 1))\n bottom_idx = int(min(scale_bottom, height - 1))\n\n # Generate gaussian heatmap\n scale_box_width = ceil(scale_right - scale_left)\n scale_box_height = ceil(scale_bottom - scale_top)\n radius = gaussian_radius((scale_box_height, scale_box_width),\n min_overlap=0.3)\n radius = max(0, int(radius))\n gt_tl_heatmap[batch_id, label] = gen_gaussian_target(\n gt_tl_heatmap[batch_id, label], [left_idx, top_idx],\n radius) # > (#img, #cls, H, W) -> (H, W)\n gt_br_heatmap[batch_id, label] = gen_gaussian_target(\n gt_br_heatmap[batch_id, label], [right_idx, bottom_idx],\n radius) # > (#img, #cls, H, W) -> (H, W)\n\n # Generate corner offset\n left_offset = scale_left - left_idx\n top_offset = scale_top - top_idx\n right_offset = scale_right - right_idx\n bottom_offset = scale_bottom - bottom_idx\n gt_tl_offset[batch_id, 0, top_idx, left_idx] = left_offset # > (#img, 2, H, W)\n gt_tl_offset[batch_id, 1, top_idx, left_idx] = top_offset # > (#img, 2, H, W)\n gt_br_offset[batch_id, 0, bottom_idx, right_idx] = right_offset # > (#img, 2, H, W)\n gt_br_offset[batch_id, 1, bottom_idx,\n right_idx] = bottom_offset # > (#img, 2, H, W)\n\n # Generate corner embedding\n if with_corner_emb:\n corner_match.append([[top_idx, left_idx],\n [bottom_idx, right_idx]])\n # Generate guiding shift\n if with_guiding_shift:\n gt_tl_guiding_shift[batch_id, 0, top_idx,\n left_idx] = scale_center_x - left_idx\n gt_tl_guiding_shift[batch_id, 1, top_idx,\n left_idx] = scale_center_y - top_idx\n gt_br_guiding_shift[batch_id, 0, bottom_idx,\n right_idx] = right_idx - scale_center_x\n gt_br_guiding_shift[\n batch_id, 1, bottom_idx,\n right_idx] = bottom_idx - scale_center_y\n # Generate centripetal shift\n if with_centripetal_shift:\n gt_tl_centripetal_shift[batch_id, 0, top_idx,\n left_idx] = log(scale_center_x -\n scale_left)\n gt_tl_centripetal_shift[batch_id, 1, top_idx,\n left_idx] = log(scale_center_y -\n scale_top)\n gt_br_centripetal_shift[batch_id, 0, bottom_idx,\n right_idx] = log(scale_right -\n scale_center_x)\n gt_br_centripetal_shift[batch_id, 1, bottom_idx,\n right_idx] = log(scale_bottom -\n scale_center_y)\n\n if with_corner_emb:\n match.append(corner_match)\n\n target_result = dict(\n topleft_heatmap=gt_tl_heatmap,\n topleft_offset=gt_tl_offset,\n bottomright_heatmap=gt_br_heatmap,\n bottomright_offset=gt_br_offset)\n\n if with_corner_emb:\n target_result.update(corner_embedding=match)\n if with_guiding_shift:\n target_result.update(\n topleft_guiding_shift=gt_tl_guiding_shift,\n bottomright_guiding_shift=gt_br_guiding_shift)\n if with_centripetal_shift:\n target_result.update(\n topleft_centripetal_shift=gt_tl_centripetal_shift,\n bottomright_centripetal_shift=gt_br_centripetal_shift)\n\n return target_result\n\n def loss(self,\n tl_heats,\n br_heats,\n tl_embs,\n br_embs,\n tl_offs,\n br_offs,\n gt_bboxes,\n gt_labels,\n img_metas,\n gt_bboxes_ignore=None):\n \"\"\"Compute losses of the head.\n\n Args:\n tl_heats (list[Tensor]): Top-left corner heatmaps for each level\n with shape (N, num_classes, H, W).\n br_heats (list[Tensor]): Bottom-right corner heatmaps for each\n level with shape (N, num_classes, H, W).\n tl_embs (list[Tensor]): Top-left corner embeddings for each level\n with shape (N, corner_emb_channels, H, W).\n br_embs (list[Tensor]): Bottom-right corner embeddings for each\n level with shape (N, corner_emb_channels, H, W).\n tl_offs (list[Tensor]): Top-left corner offsets for each level\n with shape (N, corner_offset_channels, H, W).\n br_offs (list[Tensor]): Bottom-right corner offsets for each level\n with shape (N, corner_offset_channels, H, W).\n gt_bboxes (list[Tensor]): Ground truth bboxes for each image with\n shape (num_gts, 4) in [left, top, right, bottom] format.\n gt_labels (list[Tensor]): Class indices corresponding to each box.\n img_metas (list[dict]): Meta information of each image, e.g.,\n image size, scaling factor, etc.\n gt_bboxes_ignore (list[Tensor] | None): Specify which bounding\n boxes can be ignored when computing the loss.\n\n Returns:\n dict[str, Tensor]: A dictionary of loss components. Containing the\n following losses:\n\n - det_loss (list[Tensor]): Corner keypoint losses of all\n feature levels.\n - pull_loss (list[Tensor]): Part one of AssociativeEmbedding\n losses of all feature levels.\n - push_loss (list[Tensor]): Part two of AssociativeEmbedding\n losses of all feature levels.\n - off_loss (list[Tensor]): Corner offset losses of all feature\n levels.\n \"\"\"\n targets = self.get_targets(\n gt_bboxes,\n gt_labels,\n tl_heats[-1].shape,\n img_metas[0]['pad_shape'],\n with_corner_emb=self.with_corner_emb)\n mlvl_targets = [targets for _ in range(self.num_feat_levels)]\n det_losses, pull_losses, push_losses, off_losses = multi_apply(\n self.loss_single, tl_heats, br_heats, tl_embs, br_embs, tl_offs,\n br_offs, mlvl_targets)\n loss_dict = dict(det_loss=det_losses, off_loss=off_losses)\n if self.with_corner_emb:\n loss_dict.update(pull_loss=pull_losses, push_loss=push_losses)\n return loss_dict\n\n def loss_single(self, tl_hmp, br_hmp, tl_emb, br_emb, tl_off, br_off,\n targets):\n \"\"\"Compute losses for single level.\n\n Args:\n tl_hmp (Tensor): Top-left corner heatmap for current level with\n shape (N, num_classes, H, W).\n br_hmp (Tensor): Bottom-right corner heatmap for current level with\n shape (N, num_classes, H, W).\n tl_emb (Tensor): Top-left corner embedding for current level with\n shape (N, corner_emb_channels, H, W).\n br_emb (Tensor): Bottom-right corner embedding for current level\n with shape (N, corner_emb_channels, H, W).\n tl_off (Tensor): Top-left corner offset for current level with\n shape (N, corner_offset_channels, H, W).\n br_off (Tensor): Bottom-right corner offset for current level with\n shape (N, corner_offset_channels, H, W).\n targets (dict): Corner target generated by `get_targets`.\n\n Returns:\n tuple[torch.Tensor]: Losses of the head's differnet branches\n containing the following losses:\n\n - det_loss (Tensor): Corner keypoint loss.\n - pull_loss (Tensor): Part one of AssociativeEmbedding loss.\n - push_loss (Tensor): Part two of AssociativeEmbedding loss.\n - off_loss (Tensor): Corner offset loss.\n \"\"\"\n gt_tl_hmp = targets['topleft_heatmap']\n gt_br_hmp = targets['bottomright_heatmap']\n gt_tl_off = targets['topleft_offset']\n gt_br_off = targets['bottomright_offset']\n gt_embedding = targets['corner_embedding']\n\n # Detection loss\n tl_det_loss = self.loss_heatmap(\n tl_hmp.sigmoid(),\n gt_tl_hmp,\n avg_factor=max(1,\n gt_tl_hmp.eq(1).sum()))\n br_det_loss = self.loss_heatmap(\n br_hmp.sigmoid(),\n gt_br_hmp,\n avg_factor=max(1,\n gt_br_hmp.eq(1).sum()))\n det_loss = (tl_det_loss + br_det_loss) / 2.0\n\n # AssociativeEmbedding loss\n if self.with_corner_emb and self.loss_embedding is not None:\n pull_loss, push_loss = self.loss_embedding(tl_emb, br_emb,\n gt_embedding)\n else:\n pull_loss, push_loss = None, None\n\n # Offset loss\n # We only compute the offset loss at the real corner position.\n # The value of real corner would be 1 in heatmap ground truth.\n # The mask is computed in class agnostic mode and its shape is\n # batch * 1 * width * height.\n tl_off_mask = gt_tl_hmp.eq(1).sum(1).gt(0).unsqueeze(1).type_as(\n gt_tl_hmp)\n br_off_mask = gt_br_hmp.eq(1).sum(1).gt(0).unsqueeze(1).type_as(\n gt_br_hmp)\n tl_off_loss = self.loss_offset(\n tl_off,\n gt_tl_off,\n tl_off_mask,\n avg_factor=max(1, tl_off_mask.sum()))\n br_off_loss = self.loss_offset(\n br_off,\n gt_br_off,\n br_off_mask,\n avg_factor=max(1, br_off_mask.sum()))\n\n off_loss = (tl_off_loss + br_off_loss) / 2.0\n\n return det_loss, pull_loss, push_loss, off_loss\n\n def get_bboxes(self,\n tl_heats,\n br_heats,\n tl_embs,\n br_embs,\n tl_offs,\n br_offs,\n img_metas,\n rescale=False,\n with_nms=True):\n \"\"\"Transform network output for a batch into bbox predictions.\n\n Args:\n tl_heats (list[Tensor]): Top-left corner heatmaps for each level\n with shape (N, num_classes, H, W).\n br_heats (list[Tensor]): Bottom-right corner heatmaps for each\n level with shape (N, num_classes, H, W).\n tl_embs (list[Tensor]): Top-left corner embeddings for each level\n with shape (N, corner_emb_channels, H, W).\n br_embs (list[Tensor]): Bottom-right corner embeddings for each\n level with shape (N, corner_emb_channels, H, W).\n tl_offs (list[Tensor]): Top-left corner offsets for each level\n with shape (N, corner_offset_channels, H, W).\n br_offs (list[Tensor]): Bottom-right corner offsets for each level\n with shape (N, corner_offset_channels, H, W).\n img_metas (list[dict]): Meta information of each image, e.g.,\n image size, scaling factor, etc.\n rescale (bool): If True, return boxes in original image space.\n Default: False.\n with_nms (bool): If True, do nms before return boxes.\n Default: True.\n \"\"\"\n assert tl_heats[-1].shape[0] == br_heats[-1].shape[0] == len(img_metas)\n result_list = []\n for img_id in range(len(img_metas)): # > #img\n result_list.append(\n self._get_bboxes_single(\n tl_heats[-1][img_id:img_id + 1, :],\n br_heats[-1][img_id:img_id + 1, :],\n tl_offs[-1][img_id:img_id + 1, :],\n br_offs[-1][img_id:img_id + 1, :],\n img_metas[img_id],\n tl_emb=tl_embs[-1][img_id:img_id + 1, :],\n br_emb=br_embs[-1][img_id:img_id + 1, :],\n rescale=rescale,\n with_nms=with_nms))\n\n return result_list\n\n def _get_bboxes_single(self,\n tl_heat,\n br_heat,\n tl_off,\n br_off,\n img_meta,\n tl_emb=None,\n br_emb=None,\n tl_centripetal_shift=None,\n br_centripetal_shift=None,\n rescale=False,\n with_nms=True):\n \"\"\"Transform outputs for a single batch item into bbox predictions.\n\n Args:\n tl_heat (Tensor): Top-left corner heatmap for current level with\n shape (N, num_classes, H, W).\n br_heat (Tensor): Bottom-right corner heatmap for current level\n with shape (N, num_classes, H, W).\n tl_off (Tensor): Top-left corner offset for current level with\n shape (N, corner_offset_channels, H, W).\n br_off (Tensor): Bottom-right corner offset for current level with\n shape (N, corner_offset_channels, H, W).\n img_meta (dict): Meta information of current image, e.g.,\n image size, scaling factor, etc.\n tl_emb (Tensor): Top-left corner embedding for current level with\n shape (N, corner_emb_channels, H, W).\n br_emb (Tensor): Bottom-right corner embedding for current level\n with shape (N, corner_emb_channels, H, W).\n tl_centripetal_shift: Top-left corner's centripetal shift for\n current level with shape (N, 2, H, W).\n br_centripetal_shift: Bottom-right corner's centripetal shift for\n current level with shape (N, 2, H, W).\n rescale (bool): If True, return boxes in original image space.\n Default: False.\n with_nms (bool): If True, do nms before return boxes.\n Default: True.\n \"\"\"\n if isinstance(img_meta, (list, tuple)):\n img_meta = img_meta[0]\n\n batch_bboxes, batch_scores, batch_clses = self.decode_heatmap(\n tl_heat=tl_heat.sigmoid(),\n br_heat=br_heat.sigmoid(),\n tl_off=tl_off,\n br_off=br_off,\n tl_emb=tl_emb,\n br_emb=br_emb,\n tl_centripetal_shift=tl_centripetal_shift,\n br_centripetal_shift=br_centripetal_shift,\n img_meta=img_meta,\n k=self.test_cfg.corner_topk,\n kernel=self.test_cfg.local_maximum_kernel,\n distance_threshold=self.test_cfg.distance_threshold)\n\n if rescale:\n batch_bboxes /= img_meta['scale_factor']\n\n bboxes = batch_bboxes.view([-1, 4])\n scores = batch_scores.view([-1, 1])\n clses = batch_clses.view([-1, 1])\n\n idx = scores.argsort(dim=0, descending=True)\n bboxes = bboxes[idx].view([-1, 4])\n scores = scores[idx].view(-1)\n clses = clses[idx].view(-1)\n\n detections = torch.cat([bboxes, scores.unsqueeze(-1)], -1)\n keepinds = (detections[:, -1] > -0.1)\n detections = detections[keepinds]\n labels = clses[keepinds]\n\n if with_nms:\n detections, labels = self._bboxes_nms(detections, labels,\n self.test_cfg)\n\n return detections, labels\n\n def _bboxes_nms(self, bboxes, labels, cfg):\n out_bboxes, keep = batched_nms(bboxes[:, :4], bboxes[:, -1], labels,\n cfg.nms_cfg)\n out_labels = labels[keep]\n\n if len(out_bboxes) > 0:\n idx = torch.argsort(out_bboxes[:, -1], descending=True)\n idx = idx[:cfg.max_per_img]\n out_bboxes = out_bboxes[idx]\n out_labels = out_labels[idx]\n\n return out_bboxes, out_labels\n\n def decode_heatmap(self,\n tl_heat,\n br_heat,\n tl_off,\n br_off,\n tl_emb=None,\n br_emb=None,\n tl_centripetal_shift=None,\n br_centripetal_shift=None,\n img_meta=None,\n k=100,\n kernel=3,\n distance_threshold=0.5,\n num_dets=1000):\n \"\"\"Transform outputs for a single batch item into raw bbox predictions.\n\n Args:\n tl_heat (Tensor): Top-left corner heatmap for current level with\n shape (N, num_classes, H, W).\n br_heat (Tensor): Bottom-right corner heatmap for current level\n with shape (N, num_classes, H, W).\n tl_off (Tensor): Top-left corner offset for current level with\n shape (N, corner_offset_channels, H, W).\n br_off (Tensor): Bottom-right corner offset for current level with\n shape (N, corner_offset_channels, H, W).\n tl_emb (Tensor | None): Top-left corner embedding for current\n level with shape (N, corner_emb_channels, H, W).\n br_emb (Tensor | None): Bottom-right corner embedding for current\n level with shape (N, corner_emb_channels, H, W).\n tl_centripetal_shift (Tensor | None): Top-left centripetal shift\n for current level with shape (N, 2, H, W).\n br_centripetal_shift (Tensor | None): Bottom-right centripetal\n shift for current level with shape (N, 2, H, W).\n img_meta (dict): Meta information of current image, e.g.,\n image size, scaling factor, etc.\n k (int): Get top k corner keypoints from heatmap.\n kernel (int): Max pooling kernel for extract local maximum pixels.\n distance_threshold (float): Distance threshold. Top-left and\n bottom-right corner keypoints with feature distance less than\n the threshold will be regarded as keypoints from same object.\n num_dets (int): Num of raw boxes before doing nms.\n\n Returns:\n tuple[torch.Tensor]: Decoded output of CornerHead, containing the\n following Tensors:\n\n - bboxes (Tensor): Coords of each box.\n - scores (Tensor): Scores of each box.\n - clses (Tensor): Categories of each box.\n \"\"\"\n with_embedding = tl_emb is not None and br_emb is not None\n with_centripetal_shift = (\n tl_centripetal_shift is not None\n and br_centripetal_shift is not None)\n assert with_embedding + with_centripetal_shift == 1\n batch, _, height, width = tl_heat.size()\n inp_h, inp_w, _ = img_meta['pad_shape']\n\n # perform nms on heatmaps\n tl_heat = self._local_maximum(tl_heat, kernel=kernel)\n br_heat = self._local_maximum(br_heat, kernel=kernel)\n\n tl_scores, tl_inds, tl_clses, tl_ys, tl_xs = self._topk(tl_heat, k=k)\n br_scores, br_inds, br_clses, br_ys, br_xs = self._topk(br_heat, k=k)\n\n # We use repeat instead of expand here because expand is a\n # shallow-copy function. Thus it could cause unexpected testing result\n # sometimes. Using expand will decrease about 10% mAP during testing\n # compared to repeat.\n tl_ys = tl_ys.view(batch, k, 1).repeat(1, 1, k)\n tl_xs = tl_xs.view(batch, k, 1).repeat(1, 1, k)\n br_ys = br_ys.view(batch, 1, k).repeat(1, k, 1)\n br_xs = br_xs.view(batch, 1, k).repeat(1, k, 1)\n\n tl_off = self._transpose_and_gather_feat(tl_off, tl_inds)\n tl_off = tl_off.view(batch, k, 1, 2)\n br_off = self._transpose_and_gather_feat(br_off, br_inds)\n br_off = br_off.view(batch, 1, k, 2)\n\n tl_xs = tl_xs + tl_off[..., 0]\n tl_ys = tl_ys + tl_off[..., 1]\n br_xs = br_xs + br_off[..., 0]\n br_ys = br_ys + br_off[..., 1]\n\n if with_centripetal_shift:\n tl_centripetal_shift = self._transpose_and_gather_feat(\n tl_centripetal_shift, tl_inds).view(batch, k, 1, 2).exp()\n br_centripetal_shift = self._transpose_and_gather_feat(\n br_centripetal_shift, br_inds).view(batch, 1, k, 2).exp()\n\n tl_ctxs = tl_xs + tl_centripetal_shift[..., 0]\n tl_ctys = tl_ys + tl_centripetal_shift[..., 1]\n br_ctxs = br_xs - br_centripetal_shift[..., 0]\n br_ctys = br_ys - br_centripetal_shift[..., 1]\n\n # all possible boxes based on top k corners (ignoring class)\n tl_xs *= (inp_w / width)\n tl_ys *= (inp_h / height)\n br_xs *= (inp_w / width)\n br_ys *= (inp_h / height)\n\n if with_centripetal_shift:\n tl_ctxs *= (inp_w / width)\n tl_ctys *= (inp_h / height)\n br_ctxs *= (inp_w / width)\n br_ctys *= (inp_h / height)\n\n x_off = img_meta['border'][2]\n y_off = img_meta['border'][0]\n\n tl_xs -= x_off\n tl_ys -= y_off\n br_xs -= x_off\n br_ys -= y_off\n\n tl_xs *= tl_xs.gt(0.0).type_as(tl_xs)\n tl_ys *= tl_ys.gt(0.0).type_as(tl_ys)\n br_xs *= br_xs.gt(0.0).type_as(br_xs)\n br_ys *= br_ys.gt(0.0).type_as(br_ys)\n\n bboxes = torch.stack((tl_xs, tl_ys, br_xs, br_ys), dim=3)\n area_bboxes = ((br_xs - tl_xs) * (br_ys - tl_ys)).abs()\n\n if with_centripetal_shift:\n tl_ctxs -= x_off\n tl_ctys -= y_off\n br_ctxs -= x_off\n br_ctys -= y_off\n\n tl_ctxs *= tl_ctxs.gt(0.0).type_as(tl_ctxs)\n tl_ctys *= tl_ctys.gt(0.0).type_as(tl_ctys)\n br_ctxs *= br_ctxs.gt(0.0).type_as(br_ctxs)\n br_ctys *= br_ctys.gt(0.0).type_as(br_ctys)\n\n ct_bboxes = torch.stack((tl_ctxs, tl_ctys, br_ctxs, br_ctys),\n dim=3)\n area_ct_bboxes = ((br_ctxs - tl_ctxs) * (br_ctys - tl_ctys)).abs()\n\n rcentral = torch.zeros_like(ct_bboxes)\n # magic nums from paper section 4.1\n mu = torch.ones_like(area_bboxes) / 2.4\n mu[area_bboxes > 3500] = 1 / 2.1 # large bbox have smaller mu\n\n bboxes_center_x = (bboxes[..., 0] + bboxes[..., 2]) / 2\n bboxes_center_y = (bboxes[..., 1] + bboxes[..., 3]) / 2\n rcentral[..., 0] = bboxes_center_x - mu * (bboxes[..., 2] -\n bboxes[..., 0]) / 2\n rcentral[..., 1] = bboxes_center_y - mu * (bboxes[..., 3] -\n bboxes[..., 1]) / 2\n rcentral[..., 2] = bboxes_center_x + mu * (bboxes[..., 2] -\n bboxes[..., 0]) / 2\n rcentral[..., 3] = bboxes_center_y + mu * (bboxes[..., 3] -\n bboxes[..., 1]) / 2\n area_rcentral = ((rcentral[..., 2] - rcentral[..., 0]) *\n (rcentral[..., 3] - rcentral[..., 1])).abs()\n dists = area_ct_bboxes / area_rcentral\n\n tl_ctx_inds = (ct_bboxes[..., 0] <= rcentral[..., 0]) | (\n ct_bboxes[..., 0] >= rcentral[..., 2])\n tl_cty_inds = (ct_bboxes[..., 1] <= rcentral[..., 1]) | (\n ct_bboxes[..., 1] >= rcentral[..., 3])\n br_ctx_inds = (ct_bboxes[..., 2] <= rcentral[..., 0]) | (\n ct_bboxes[..., 2] >= rcentral[..., 2])\n br_cty_inds = (ct_bboxes[..., 3] <= rcentral[..., 1]) | (\n ct_bboxes[..., 3] >= rcentral[..., 3])\n\n if with_embedding:\n tl_emb = self._transpose_and_gather_feat(tl_emb, tl_inds)\n tl_emb = tl_emb.view(batch, k, 1)\n br_emb = self._transpose_and_gather_feat(br_emb, br_inds)\n br_emb = br_emb.view(batch, 1, k)\n dists = torch.abs(tl_emb - br_emb)\n\n tl_scores = tl_scores.view(batch, k, 1).repeat(1, 1, k)\n br_scores = br_scores.view(batch, 1, k).repeat(1, k, 1)\n\n scores = (tl_scores + br_scores) / 2 # scores for all possible boxes\n\n # tl and br should have same class\n tl_clses = tl_clses.view(batch, k, 1).repeat(1, 1, k)\n br_clses = br_clses.view(batch, 1, k).repeat(1, k, 1)\n cls_inds = (tl_clses != br_clses)\n\n # reject boxes based on distances\n dist_inds = dists > distance_threshold\n\n # reject boxes based on widths and heights\n width_inds = (br_xs <= tl_xs)\n height_inds = (br_ys <= tl_ys)\n\n scores[cls_inds] = -1\n scores[width_inds] = -1\n scores[height_inds] = -1\n scores[dist_inds] = -1\n if with_centripetal_shift:\n scores[tl_ctx_inds] = -1\n scores[tl_cty_inds] = -1\n scores[br_ctx_inds] = -1\n scores[br_cty_inds] = -1\n\n scores = scores.view(batch, -1)\n scores, inds = torch.topk(scores, num_dets)\n scores = scores.unsqueeze(2)\n\n bboxes = bboxes.view(batch, -1, 4)\n bboxes = self._gather_feat(bboxes, inds)\n\n clses = tl_clses.contiguous().view(batch, -1, 1)\n clses = self._gather_feat(clses, inds).float()\n\n return bboxes, scores, clses",
"import os\nimport torch.nn as nn\n\nfrom mmcv.cnn import constant_init, kaiming_init\nfrom mmcv.ops import ModulatedDeformConv2dPack\nfrom mmcv.runner import load_checkpoint\n\nfrom ..builder import BACKBONES\n\nimport math\nimport logging\nimport numpy as np\nfrom os.path import join\n\nimport torch\nfrom torch import nn\nimport torch.nn.functional as F\nimport torch.utils.model_zoo as model_zoo\n\nBN_MOMENTUM = 0.1\n\n\ndef conv3x3(in_planes, out_planes, stride=1):\n \"3x3 convolution with padding\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\n\n\nclass BasicBlock(nn.Module):\n def __init__(self, inplanes, planes, stride=1, dilation=1):\n super(BasicBlock, self).__init__()\n self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=3,\n stride=stride, padding=dilation,\n bias=False, dilation=dilation)\n self.bn1 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)\n self.relu = nn.ReLU(inplace=True)\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3,\n stride=1, padding=dilation,\n bias=False, dilation=dilation)\n self.bn2 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)\n self.stride = stride\n\n def forward(self, x, residual=None):\n if residual is None:\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n\n out += residual\n out = self.relu(out)\n\n return out\n\n\nclass Bottleneck(nn.Module):\n expansion = 2\n\n def __init__(self, inplanes, planes, stride=1, dilation=1):\n super(Bottleneck, self).__init__()\n expansion = Bottleneck.expansion\n bottle_planes = planes // expansion\n self.conv1 = nn.Conv2d(inplanes, bottle_planes,\n kernel_size=1,\n bias=False)\n self.bn1 = nn.BatchNorm2d(bottle_planes, momentum=BN_MOMENTUM)\n self.conv2 = nn.Conv2d(bottle_planes, bottle_planes,\n kernel_size=3,\n stride=stride,\n padding=dilation,\n bias=False,\n dilation=dilation)\n self.bn2 = nn.BatchNorm2d(bottle_planes, momentum=BN_MOMENTUM)\n self.conv3 = nn.Conv2d(bottle_planes, planes,\n kernel_size=1,\n bias=False)\n self.bn3 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)\n self.relu = nn.ReLU(inplace=True)\n self.stride = stride\n\n def forward(self, x, residual=None):\n if residual is None:\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n out += residual\n out = self.relu(out)\n\n return out\n\n\nclass BottleneckX(nn.Module):\n expansion = 2\n cardinality = 32\n\n def __init__(self, inplanes, planes, stride=1, dilation=1):\n super(BottleneckX, self).__init__()\n cardinality = BottleneckX.cardinality\n # dim = int(math.floor(planes * (BottleneckV5.expansion / 64.0)))\n # bottle_planes = dim * cardinality\n bottle_planes = planes * cardinality // 32\n self.conv1 = nn.Conv2d(inplanes, bottle_planes,\n kernel_size=1,\n bias=False)\n self.bn1 = nn.BatchNorm2d(bottle_planes, momentum=BN_MOMENTUM)\n self.conv2 = nn.Conv2d(bottle_planes, bottle_planes, kernel_size=3,\n stride=stride,\n padding=dilation,\n bias=False,\n dilation=dilation,\n groups=cardinality)\n self.bn2 = nn.BatchNorm2d(bottle_planes, momentum=BN_MOMENTUM)\n self.conv3 = nn.Conv2d(bottle_planes, planes,\n kernel_size=1,\n bias=False)\n self.bn3 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)\n self.relu = nn.ReLU(inplace=True)\n self.stride = stride\n\n def forward(self, x, residual=None):\n if residual is None:\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n out += residual\n out = self.relu(out)\n\n return out\n\n\nclass Root(nn.Module):\n def __init__(self, in_channels, out_channels, kernel_size, residual):\n super(Root, self).__init__()\n self.conv = nn.Conv2d(\n in_channels, out_channels, 1,\n stride=1, bias=False, padding=(kernel_size - 1) // 2)\n self.bn = nn.BatchNorm2d(out_channels, momentum=BN_MOMENTUM)\n self.relu = nn.ReLU(inplace=True)\n self.residual = residual\n\n def forward(self, *x):\n children = x\n x = self.conv(torch.cat(x, 1))\n x = self.bn(x)\n if self.residual:\n x += children[0]\n x = self.relu(x)\n\n return x\n\n\nclass Tree(nn.Module):\n def __init__(self, levels, block, in_channels, out_channels, stride=1,\n level_root=False, root_dim=0, root_kernel_size=1,\n dilation=1, root_residual=False):\n super(Tree, self).__init__()\n if root_dim == 0:\n root_dim = 2 * out_channels\n if level_root:\n root_dim += in_channels\n if levels == 1:\n self.tree1 = block(in_channels, out_channels, stride,\n dilation=dilation)\n self.tree2 = block(out_channels, out_channels, 1,\n dilation=dilation)\n else:\n self.tree1 = Tree(levels - 1, block, in_channels, out_channels,\n stride,\n root_dim=0,\n root_kernel_size=root_kernel_size,\n dilation=dilation,\n root_residual=root_residual)\n self.tree2 = Tree(levels - 1, block, out_channels, out_channels,\n root_dim=root_dim + out_channels,\n root_kernel_size=root_kernel_size,\n dilation=dilation,\n root_residual=root_residual)\n if levels == 1:\n self.root = Root(root_dim, out_channels, root_kernel_size,\n root_residual)\n self.level_root = level_root\n self.root_dim = root_dim\n self.downsample = None\n self.project = None\n self.levels = levels\n if stride > 1:\n self.downsample = nn.MaxPool2d(stride, stride=stride)\n if in_channels != out_channels:\n self.project = nn.Sequential(\n nn.Conv2d(in_channels, out_channels,\n kernel_size=1, stride=1, bias=False),\n nn.BatchNorm2d(out_channels, momentum=BN_MOMENTUM)\n )\n\n def forward(self, x, residual=None, children=None):\n children = [] if children is None else children\n bottom = self.downsample(x) if self.downsample else x\n residual = self.project(bottom) if self.project else bottom\n if self.level_root:\n children.append(bottom)\n x1 = self.tree1(x, residual)\n if self.levels == 1:\n x2 = self.tree2(x1)\n x = self.root(x2, x1, *children)\n else:\n children.append(x1)\n x = self.tree2(x1, children=children)\n return x\n\n\nclass DLA(nn.Module):\n def __init__(self, levels, channels, num_classes=1000,\n block=BasicBlock, residual_root=False):\n super(DLA, self).__init__()\n self.channels = channels # [16, 32, 64, 128, 256, 512]\n self.num_classes = num_classes\n self.base_layer = nn.Sequential(\n nn.Conv2d(3, channels[0],\n kernel_size=7,\n stride=1,\n padding=3,\n bias=False),\n nn.BatchNorm2d(channels[0], momentum=BN_MOMENTUM),\n nn.ReLU(inplace=True))\n self.level0 = self._make_conv_level(channels[0], channels[0], levels[0])\n self.level1 = self._make_conv_level(channels[0], channels[1], levels[1], stride=2)\n self.level2 = Tree(levels[2], block, channels[1], channels[2], 2,\n level_root=False,\n root_residual=residual_root)\n self.level3 = Tree(levels[3], block, channels[2], channels[3], 2,\n level_root=True,\n root_residual=residual_root)\n self.level4 = Tree(levels[4], block, channels[3], channels[4], 2,\n level_root=True,\n root_residual=residual_root)\n self.level5 = Tree(levels[5], block, channels[4], channels[5], 2,\n level_root=True,\n root_residual=residual_root)\n\n def _make_level(self, block, inplanes, planes, blocks, stride=1):\n downsample = None\n if stride != 1 or inplanes != planes:\n downsample = nn.Sequential(\n nn.MaxPool2d(stride, stride=stride),\n nn.Conv2d(inplanes, planes,\n kernel_size=1,\n stride=1,\n bias=False),\n nn.BatchNorm2d(planes, momentum=BN_MOMENTUM),\n )\n\n layers = []\n layers.append(block(inplanes, planes, stride, downsample=downsample))\n for i in range(1, blocks):\n layers.append(block(inplanes, planes))\n\n return nn.Sequential(*layers)\n\n def _make_conv_level(self, inplanes, planes, convs, stride=1, dilation=1):\n modules = []\n for i in range(convs):\n modules.extend([\n nn.Conv2d(inplanes, planes,\n kernel_size=3,\n stride=stride if i == 0 else 1,\n padding=dilation,\n bias=False,\n dilation=dilation),\n nn.BatchNorm2d(planes, momentum=BN_MOMENTUM),\n nn.ReLU(inplace=True)])\n inplanes = planes\n return nn.Sequential(*modules)\n\n def forward(self, x):\n y = []\n x = self.base_layer(x)\n for i in range(6):\n x = getattr(self, 'level{}'.format(i))(x)\n y.append(x)\n return y\n\n\nclass Identity(nn.Module):\n\n def __init__(self):\n super(Identity, self).__init__()\n\n def forward(self, x):\n return x\n\n\ndef fill_fc_weights(layers):\n for m in layers.modules():\n if isinstance(m, nn.Conv2d):\n if m.bias is not None:\n nn.init.constant_(m.bias, 0)\n\n\ndef fill_up_weights(up):\n w = up.weight.data\n f = math.ceil(w.size(2) / 2)\n c = (2 * f - 1 - f % 2) / (2. * f)\n for i in range(w.size(2)):\n for j in range(w.size(3)):\n w[0, 0, i, j] = \\\n (1 - math.fabs(i / f - c)) * (1 - math.fabs(j / f - c))\n for c in range(1, w.size(0)):\n w[c, 0, :, :] = w[0, 0, :, :]\n\n\nclass DeformConv(nn.Module):\n def __init__(self, chi, cho):\n super(DeformConv, self).__init__()\n self.actf = nn.Sequential(\n nn.BatchNorm2d(cho, momentum=BN_MOMENTUM),\n nn.ReLU(inplace=True)\n )\n self.conv = ModulatedDeformConv2dPack(chi, cho, kernel_size=(3, 3), stride=1, padding=1,\n dilation=1, deformable_groups=1)\n\n def forward(self, x):\n x = self.conv(x)\n x = self.actf(x)\n return x\n\n\nclass IDAUp(nn.Module):\n\n def __init__(self, o, channels, up_f):\n super(IDAUp, self).__init__()\n for i in range(1, len(channels)):\n c = channels[i]\n f = int(up_f[i])\n proj = DeformConv(c, o)\n node = DeformConv(o, o)\n\n up = nn.ConvTranspose2d(o, o, f * 2, stride=f,\n padding=f // 2, output_padding=0,\n groups=o, bias=False)\n fill_up_weights(up)\n\n setattr(self, 'proj_' + str(i), proj)\n setattr(self, 'up_' + str(i), up)\n setattr(self, 'node_' + str(i), node)\n\n def forward(self, layers, startp, endp):\n for i in range(startp + 1, endp): # > (1, 3)\n upsample = getattr(self, 'up_' + str(i - startp))\n project = getattr(self, 'proj_' + str(i - startp))\n layers[i] = upsample(project(layers[i])) # 128x64x64 -> 64x64x64 -> 64x128x128\n node = getattr(self, 'node_' + str(i - startp))\n layers[i] = node(layers[i] + layers[i - 1])\n\n\nclass DLAUp(nn.Module):\n def __init__(self, startp, channels, scales, in_channels=None):\n super(DLAUp, self).__init__()\n self.startp = startp\n if in_channels is None:\n in_channels = channels\n self.channels = channels\n channels = list(channels)\n scales = np.array(scales, dtype=int)\n for i in range(len(channels) - 1):\n j = -i - 2\n setattr(self, 'ida_{}'.format(i),\n IDAUp(channels[j], in_channels[j:],\n scales[j:] // scales[j]))\n scales[j + 1:] = scales[j]\n in_channels[j + 1:] = [channels[j] for _ in channels[j + 1:]]\n\n def forward(self, layers):\n out = [layers[-1]] # start with 32\n for i in range(len(layers) - self.startp - 1):\n ida = getattr(self, 'ida_{}'.format(i))\n ida(layers, len(layers) - i - 2, len(layers))\n out.insert(0, layers[-1])\n return out\n\n\nclass Interpolate(nn.Module):\n def __init__(self, scale, mode):\n super(Interpolate, self).__init__()\n self.scale = scale\n self.mode = mode\n\n def forward(self, x):\n x = F.interpolate(x, scale_factor=self.scale, mode=self.mode, align_corners=False)\n return x\n\n\[email protected]_module\nclass DLASeg(nn.Module):\n\n def __init__(self, levels, channels, down_ratio,\n last_level, out_channel=0, zero_init_residual=True):\n super(DLASeg, self).__init__()\n assert down_ratio in [2, 4, 8, 16]\n self.zero_init_residual = zero_init_residual\n self.first_level = int(np.log2(down_ratio))\n self.last_level = last_level\n self.base = DLA(levels, channels, block=BasicBlock)\n channels = self.base.channels\n scales = [2 ** i for i in range(len(channels[self.first_level:]))]\n self.dla_up = DLAUp(self.first_level, channels[self.first_level:], scales)\n\n if out_channel == 0:\n out_channel = channels[self.first_level]\n\n self.ida_up = IDAUp(out_channel, channels[self.first_level:self.last_level],\n [2 ** i for i in range(self.last_level - self.first_level)])\n\n def init_weights(self, pretrained=None):\n if isinstance(pretrained, str):\n logger = logging.getLogger()\n load_checkpoint(self.base, pretrained, strict=False, logger=logger)\n elif pretrained is None:\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n kaiming_init(m)\n elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):\n constant_init(m, 1)\n\n for m in self.modules():\n if isinstance(m, (BasicBlock, Bottleneck)) and hasattr(m, 'conv_offset'):\n constant_init(m.conv_offset, 0)\n\n if self.zero_init_residual:\n for m in self.modules():\n if isinstance(m, Bottleneck):\n constant_init(m.bn3, 0)\n elif isinstance(m, BasicBlock):\n constant_init(m.bn2, 0)\n else:\n raise TypeError('pretrained must be a str or None')\n\n def forward(self, x):\n x = self.base(x)\n x = self.dla_up(x)\n\n y = []\n for i in range(self.last_level - self.first_level): # > #lvl=3\n y.append(x[i].clone())\n self.ida_up(y, 0, len(y)) # all(3) -> (64x128x128) x 3\n\n return y\n"
] | [
[
"torch.nn.Sequential",
"torch.abs",
"torch.argsort",
"torch.topk",
"torch.nn.ModuleList",
"torch.nn.Conv2d",
"torch.zeros_like",
"torch.nn.Identity",
"torch.stack",
"torch.nn.ReLU",
"torch.nn.functional.max_pool2d",
"torch.ones_like"
],
[
"torch.nn.Sequential",
"numpy.log2",
"torch.nn.ConvTranspose2d",
"torch.cat",
"torch.nn.init.constant_",
"torch.nn.Conv2d",
"torch.nn.MaxPool2d",
"torch.nn.functional.interpolate",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Flsahkong/faster-rcnn.pytorch | [
"f2b4753f3ca86a19c2701d28aa135a81b2a21c25",
"f2b4753f3ca86a19c2701d28aa135a81b2a21c25"
] | [
"test_net.py",
"lib/model/rpn/rpn.py"
] | [
"# --------------------------------------------------------\n# Pytorch Multi-GPU Faster R-CNN\n# Licensed under The MIT License [see LICENSE for details]\n# Written by Jiasen Lu, Jianwei Yang, based on code from Ross Girshick\n# --------------------------------------------------------\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport _init_paths\nimport os\nimport sys\nimport numpy as np\nimport argparse\nimport pprint\nimport pdb\nimport time\n\nimport cv2\n\nimport torch\nfrom torch.autograd import Variable\nimport torch.nn as nn\nimport torch.optim as optim\nimport pickle\nfrom roi_data_layer.roidb import combined_roidb\nfrom roi_data_layer.roibatchLoader import roibatchLoader\nfrom model.utils.config import cfg, cfg_from_file, cfg_from_list, get_output_dir\nfrom model.rpn.bbox_transform import clip_boxes\n# from model.nms.nms_wrapper import nms\nfrom model.roi_layers import nms\nfrom model.rpn.bbox_transform import bbox_transform_inv\nfrom model.utils.net_utils import save_net, load_net, vis_detections\nfrom model.faster_rcnn.vgg16 import vgg16\nfrom model.faster_rcnn.resnet import resnet\n\ntry:\n xrange # Python 2\nexcept NameError:\n xrange = range # Python 3\n\n\ndef parse_args():\n \"\"\"\n Parse input arguments\n \"\"\"\n parser = argparse.ArgumentParser(description='Train a Fast R-CNN network')\n parser.add_argument('--dataset', dest='dataset',\n help='training dataset',\n default='pascal_voc', type=str)\n parser.add_argument('--cfg', dest='cfg_file',\n help='optional config file',\n default='cfgs/vgg16.yml', type=str)\n parser.add_argument('--net', dest='net',\n help='vgg16, res50, res101, res152',\n default='res101', type=str)\n parser.add_argument('--set', dest='set_cfgs',\n help='set config keys', default=None,\n nargs=argparse.REMAINDER)\n parser.add_argument('--load_dir', dest='load_dir',\n help='directory to load models', default=\"models\",\n type=str)\n parser.add_argument('--cuda', dest='cuda',\n help='whether use CUDA',\n action='store_true')\n parser.add_argument('--ls', dest='large_scale',\n help='whether use large imag scale',\n action='store_true')\n parser.add_argument('--mGPUs', dest='mGPUs',\n help='whether use multiple GPUs',\n action='store_true')\n parser.add_argument('--cag', dest='class_agnostic',\n help='whether perform class_agnostic bbox regression',\n action='store_true')\n parser.add_argument('--parallel_type', dest='parallel_type',\n help='which part of model to parallel, 0: all, 1: model before roi pooling',\n default=0, type=int)\n parser.add_argument('--checksession', dest='checksession',\n help='checksession to load model',\n default=1, type=int)\n parser.add_argument('--checkepoch', dest='checkepoch',\n help='checkepoch to load network',\n default=1, type=int)\n parser.add_argument('--checkpoint', dest='checkpoint',\n help='checkpoint to load network',\n default=10021, type=int)\n parser.add_argument('--vis', dest='vis',\n help='visualization mode',\n action='store_true')\n args = parser.parse_args()\n return args\n\n\nlr = cfg.TRAIN.LEARNING_RATE\nmomentum = cfg.TRAIN.MOMENTUM\nweight_decay = cfg.TRAIN.WEIGHT_DECAY\n\nif __name__ == '__main__':\n\n args = parse_args()\n\n print('Called with args:')\n print(args)\n\n if torch.cuda.is_available() and not args.cuda:\n print(\"WARNING: You have a CUDA device, so you should probably run with --cuda\")\n\n np.random.seed(cfg.RNG_SEED)\n if args.dataset == \"pascal_voc\":\n args.imdb_name = \"voc_2007_trainval\"\n args.imdbval_name = \"voc_2007_test\"\n args.set_cfgs = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]']\n elif args.dataset == \"pascal_voc_0712\":\n args.imdb_name = \"voc_2007_trainval+voc_2012_trainval\"\n args.imdbval_name = \"voc_2007_test\"\n args.set_cfgs = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]']\n elif args.dataset == \"coco\":\n args.imdb_name = \"coco_2014_train+coco_2014_valminusminival\"\n args.imdbval_name = \"coco_2014_minival\"\n args.set_cfgs = ['ANCHOR_SCALES', '[4, 8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]']\n elif args.dataset == \"imagenet\":\n args.imdb_name = \"imagenet_train\"\n args.imdbval_name = \"imagenet_val\"\n args.set_cfgs = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]']\n elif args.dataset == \"vg\":\n args.imdb_name = \"vg_150-50-50_minitrain\"\n args.imdbval_name = \"vg_150-50-50_minival\"\n args.set_cfgs = ['ANCHOR_SCALES', '[4, 8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]']\n\n args.cfg_file = \"cfgs/{}_ls.yml\".format(args.net) if args.large_scale else \"cfgs/{}.yml\".format(args.net)\n\n if args.cfg_file is not None:\n cfg_from_file(args.cfg_file)\n if args.set_cfgs is not None:\n cfg_from_list(args.set_cfgs)\n\n print('Using config:')\n pprint.pprint(cfg)\n\n # 和训练的时候不一样,训练的时候是true\n cfg.TRAIN.USE_FLIPPED = False\n # 进行数据的准备,combined_roidb(args.imdb_name)是数据准备的核心部分。\n # 返回的imdb是类pascal_voc的一个实例,后面只用到了其的一些路径,作用不大。roidb则包含了训练网络所需要的所有信息。下面看一下它的产生过程\n imdb, roidb, ratio_list, ratio_index = combined_roidb(args.imdbval_name, False)\n # 注意这个\n imdb.competition_mode(on=True)\n\n print('{:d} roidb entries'.format(len(roidb)))\n\n # 读取模型的数据\n input_dir = args.load_dir + \"/\" + args.net + \"/\" + args.dataset\n if not os.path.exists(input_dir):\n raise Exception('There is no input directory for loading network from ' + input_dir)\n load_name = os.path.join(input_dir,\n 'faster_rcnn_{}_{}_{}.pth'.format(args.checksession, args.checkepoch, args.checkpoint))\n\n # initilize the network here.\n if args.net == 'vgg16':\n fasterRCNN = vgg16(imdb.classes, pretrained=False, class_agnostic=args.class_agnostic)\n elif args.net == 'res101':\n fasterRCNN = resnet(imdb.classes, 101, pretrained=False, class_agnostic=args.class_agnostic)\n elif args.net == 'res50':\n fasterRCNN = resnet(imdb.classes, 50, pretrained=False, class_agnostic=args.class_agnostic)\n elif args.net == 'res152':\n fasterRCNN = resnet(imdb.classes, 152, pretrained=False, class_agnostic=args.class_agnostic)\n else:\n print(\"network is not defined\")\n pdb.set_trace()\n\n fasterRCNN.create_architecture()\n\n print(\"load checkpoint %s\" % (load_name))\n checkpoint = torch.load(load_name)\n fasterRCNN.load_state_dict(checkpoint['model'])\n if 'pooling_mode' in checkpoint.keys():\n cfg.POOLING_MODE = checkpoint['pooling_mode']\n\n print('load model successfully!')\n # initilize the tensor holder here.\n im_data = torch.FloatTensor(1)\n im_info = torch.FloatTensor(1)\n num_boxes = torch.LongTensor(1)\n gt_boxes = torch.FloatTensor(1)\n\n # ship to cuda\n if args.cuda:\n im_data = im_data.cuda()\n im_info = im_info.cuda()\n num_boxes = num_boxes.cuda()\n gt_boxes = gt_boxes.cuda()\n\n # make variable\n im_data = Variable(im_data)\n im_info = Variable(im_info)\n num_boxes = Variable(num_boxes)\n gt_boxes = Variable(gt_boxes)\n\n if args.cuda:\n cfg.CUDA = True\n\n if args.cuda:\n # 将所有的模型参数(parameters)和buffers赋值GPU\n fasterRCNN.cuda()\n\n start = time.time()\n max_per_image = 100\n\n # vis的意思是 visualization mode\n vis = args.vis\n\n if vis:\n thresh = 0.05\n else:\n thresh = 0.0\n\n save_name = 'faster_rcnn_10'\n # num_images的值是4952\n num_images = len(imdb.image_index)\n all_boxes = [[[] for _ in xrange(num_images)]\n for _ in xrange(imdb.num_classes)]\n\n output_dir = get_output_dir(imdb, save_name)\n dataset = roibatchLoader(roidb, ratio_list, ratio_index, 1, \\\n imdb.num_classes, training=False, normalize=False)\n dataloader = torch.utils.data.DataLoader(dataset, batch_size=1,\n shuffle=False, num_workers=0,\n pin_memory=True)\n\n data_iter = iter(dataloader)\n\n _t = {'im_detect': time.time(), 'misc': time.time()}\n det_file = os.path.join(output_dir, 'detections.pkl')\n\n fasterRCNN.eval()\n empty_array = np.transpose(np.array([[], [], [], [], []]), (1, 0))\n for i in range(num_images):\n\n data = next(data_iter)\n with torch.no_grad():\n im_data.resize_(data[0].size()).copy_(data[0])\n im_info.resize_(data[1].size()).copy_(data[1])\n gt_boxes.resize_(data[2].size()).copy_(data[2])\n num_boxes.resize_(data[3].size()).copy_(data[3])\n\n det_tic = time.time()\n rois, cls_prob, bbox_pred, \\\n rpn_loss_cls, rpn_loss_box, \\\n RCNN_loss_cls, RCNN_loss_bbox, \\\n rois_label = fasterRCNN(im_data, im_info, gt_boxes, num_boxes)\n\n scores = cls_prob.data\n boxes = rois.data[:, :, 1:5]\n\n if cfg.TEST.BBOX_REG:\n # Apply bounding-box regression deltas\n box_deltas = bbox_pred.data\n if cfg.TRAIN.BBOX_NORMALIZE_TARGETS_PRECOMPUTED:\n # Optionally normalize targets by a precomputed mean and stdev\n if args.class_agnostic:\n box_deltas = box_deltas.view(-1, 4) * torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_STDS).cuda() \\\n + torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_MEANS).cuda()\n box_deltas = box_deltas.view(1, -1, 4)\n else:\n box_deltas = box_deltas.view(-1, 4) * torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_STDS).cuda() \\\n + torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_MEANS).cuda()\n box_deltas = box_deltas.view(1, -1, 4 * len(imdb.classes))\n # 从这里看出,faster rcnn的最后bbox预测模块,是在rois的基础上,预测的偏差值tx,ty,tw,th\n pred_boxes = bbox_transform_inv(boxes, box_deltas, 1)\n # 这里应该是对改装过后的box进行剪裁\n pred_boxes = clip_boxes(pred_boxes, im_info.data, 1)\n else:\n # Simply repeat the boxes, once for each class\n pred_boxes = np.tile(boxes, (1, scores.shape[1]))\n\n pred_boxes /= data[1][0][2].item()\n\n scores = scores.squeeze()\n pred_boxes = pred_boxes.squeeze()\n det_toc = time.time()\n detect_time = det_toc - det_tic\n misc_tic = time.time()\n if vis:\n im = cv2.imread(imdb.image_path_at(i))\n im2show = np.copy(im)\n for j in xrange(1, imdb.num_classes):\n inds = torch.nonzero(scores[:, j] > thresh).view(-1)\n # if there is det\n if inds.numel() > 0:\n cls_scores = scores[:, j][inds]\n _, order = torch.sort(cls_scores, 0, True)\n if args.class_agnostic:\n cls_boxes = pred_boxes[inds, :]\n else:\n cls_boxes = pred_boxes[inds][:, j * 4:(j + 1) * 4]\n\n cls_dets = torch.cat((cls_boxes, cls_scores.unsqueeze(1)), 1)\n # cls_dets = torch.cat((cls_boxes, cls_scores), 1)\n cls_dets = cls_dets[order]\n keep = nms(cls_boxes[order, :], cls_scores[order], cfg.TEST.NMS)\n cls_dets = cls_dets[keep.view(-1).long()]\n if vis:\n im2show = vis_detections(im2show, imdb.classes[j], cls_dets.cpu().numpy(), 0.3)\n all_boxes[j][i] = cls_dets.cpu().numpy()\n else:\n all_boxes[j][i] = empty_array\n\n # Limit to max_per_image detections *over all classes*\n if max_per_image > 0:\n image_scores = np.hstack([all_boxes[j][i][:, -1]\n for j in xrange(1, imdb.num_classes)])\n if len(image_scores) > max_per_image:\n image_thresh = np.sort(image_scores)[-max_per_image]\n for j in xrange(1, imdb.num_classes):\n keep = np.where(all_boxes[j][i][:, -1] >= image_thresh)[0]\n all_boxes[j][i] = all_boxes[j][i][keep, :]\n\n misc_toc = time.time()\n nms_time = misc_toc - misc_tic\n\n sys.stdout.write('im_detect: {:d}/{:d} {:.3f}s {:.3f}s \\r' \\\n .format(i + 1, num_images, detect_time, nms_time))\n sys.stdout.flush()\n\n if vis:\n cv2.imwrite('result.png', im2show)\n # pdb.set_trace()\n # cv2.imshow('test', im2show)\n # cv2.waitKey(0)\n\n with open(det_file, 'wb') as f:\n pickle.dump(all_boxes, f, pickle.HIGHEST_PROTOCOL)\n\n print('Evaluating detections')\n imdb.evaluate_detections(all_boxes, output_dir)\n\n end = time.time()\n print(\"test time: %0.4fs\" % (end - start))\n",
"from __future__ import absolute_import\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\n\nfrom model.utils.config import cfg\nfrom .proposal_layer import _ProposalLayer\nfrom .anchor_target_layer import _AnchorTargetLayer\nfrom model.utils.net_utils import _smooth_l1_loss\n\nimport numpy as np\nimport math\nimport pdb\nimport time\n\nclass _RPN(nn.Module):\n \"\"\" region proposal network \"\"\"\n def __init__(self, din):\n super(_RPN, self).__init__()\n \n self.din = din # get depth of input feature map, e.g., 512\n self.anchor_scales = cfg.ANCHOR_SCALES\n self.anchor_ratios = cfg.ANCHOR_RATIOS\n # Feature stride for RPN FEAT_STRIDE,它的值位16,因为VGG16将原图缩小了16倍,这个是用来放大原图的\n self.feat_stride = cfg.FEAT_STRIDE[0]\n\n # 定义一个3*3的卷积核,步长为1,对应RPN的第一个卷积层\n # define the convrelu layers processing input feature map\n self.RPN_Conv = nn.Conv2d(self.din, 512, 3, 1, 1, bias=True)\n\n # define bg/fg classifcation score layer,计算出来的结果为18,对应往上走的步骤\n # 这里的18的意思是,feature map的每个点都有9个anchor,每个anchor又都有两个结果,positive和negative\n # 这个positive就是前景fg,而negative就是背景bg\n self.nc_score_out = len(self.anchor_scales) * len(self.anchor_ratios) * 2 # 2(bg/fg) * 9 (anchors)\n self.RPN_cls_score = nn.Conv2d(512, self.nc_score_out, 1, 1, 0)\n\n # define anchor box offset prediction layer,计算出来的结果为36,对应下面那一条路\n # 这里的bbox预测的是tx,ty,tw,th并不是真正的边框的坐标,需要将这几个t与anchor进行运算,得到真正的边框值\n self.nc_bbox_out = len(self.anchor_scales) * len(self.anchor_ratios) * 4 # 4(coords) * 9 (anchors)\n self.RPN_bbox_pred = nn.Conv2d(512, self.nc_bbox_out, 1, 1, 0)\n\n # define proposal layer\n self.RPN_proposal = _ProposalLayer(self.feat_stride, self.anchor_scales, self.anchor_ratios)\n\n # define anchor target layer\n self.RPN_anchor_target = _AnchorTargetLayer(self.feat_stride, self.anchor_scales, self.anchor_ratios)\n\n self.rpn_loss_cls = 0\n self.rpn_loss_box = 0\n\n # 静态方法不用使用self作为参数\n @staticmethod\n def reshape(x, d):\n input_shape = x.size()\n x = x.view(\n input_shape[0],\n int(d),\n int(float(input_shape[1] * input_shape[2]) / float(d)),\n input_shape[3]\n )\n return x\n\n def forward(self, base_feat, im_info, gt_boxes, num_boxes):\n\n batch_size = base_feat.size(0)\n\n\n # inplace的操作是,计算出来的结果覆盖,\n # return feature map after convrelu layer\n rpn_conv1 = F.relu(self.RPN_Conv(base_feat), inplace=True)\n # 先经过上面的3*3,然后经过这个1*1\n # get rpn classification score\n rpn_cls_score = self.RPN_cls_score(rpn_conv1)\n # 在经过reshape层\n rpn_cls_score_reshape = self.reshape(rpn_cls_score, 2)\n # 使用softmax进行二分类,看看feature map上面每个点的9个anchor里面,哪个是包含了物体(fg前景)的\n # 第二个参数叫做dim,是指进行softmax的维度,这个1表示第二维\n rpn_cls_prob_reshape = F.softmax(rpn_cls_score_reshape, 1)\n rpn_cls_prob = self.reshape(rpn_cls_prob_reshape, self.nc_score_out)\n # li = rpn_cls_prob_reshape.view(rpn_cls_prob_reshape.size()[0],rpn_cls_prob_reshape.size()[2]\n # ,rpn_cls_prob_reshape.size()[3],\n # rpn_cls_prob_reshape.size()[1])\n #\n # print(li)\n # print(li.size())\n # print(rpn_cls_prob_reshape.size())\n # get rpn offsets to the anchor boxes\n # 经过上面的注释代码表明,softmax输出的每个anchor的值为0-1之间,并且他们的和并不是1,为什么不是1?\n # 经过测试发现,不能简单地,使用view来进行.view 和reshape得到的结果是一样的,想交换维度,需要用到torch.transpose\n\n # 经过1*1的层,然后结果为4*9,这里的4表示的是预测的tx,ty,tw,th\n rpn_bbox_pred = self.RPN_bbox_pred(rpn_conv1)\n\n # proposal layer\n cfg_key = 'TRAIN' if self.training else 'TEST'\n\n # rois的大小为[batch,2000,5]坐标在三维的最后四个,并且坐标点是左上和右下角\n rois = self.RPN_proposal((rpn_cls_prob.data, rpn_bbox_pred.data,\n im_info, cfg_key))\n\n self.rpn_loss_cls = 0\n self.rpn_loss_box = 0\n\n\n # generating training labels and build the rpn loss\n if self.training:\n assert gt_boxes is not None\n\n # 这个rpn-data是在计算loss的时候用上了\n # 在这里使用的是rpn_cls_score,这个是第一条线1*1卷积的输出,输出为18,没有进行softmax\n rpn_data = self.RPN_anchor_target((rpn_cls_score.data, gt_boxes, im_info, num_boxes))\n # 输出的rpn_data包含了(labels,bbox_targets,bbox_inside_weights,bbox_outside_weights)\n\n # compute classification loss\n rpn_cls_score = rpn_cls_score_reshape.permute(0, 2, 3, 1).contiguous().view(batch_size, -1, 2)\n rpn_label = rpn_data[0].view(batch_size, -1)\n\n # 注意,这里将几个batch柔和到一块去了\n\n # torch.ne是比较input和other,如果不相等,就赋值1,如果相等就赋值0\n # 这个操作相当于筛选出来无关的labels,保留前景和背景\n rpn_keep = Variable(rpn_label.view(-1).ne(-1).nonzero().view(-1))\n rpn_cls_score = torch.index_select(rpn_cls_score.view(-1,2), 0, rpn_keep)\n rpn_label = torch.index_select(rpn_label.view(-1), 0, rpn_keep.data)\n rpn_label = Variable(rpn_label.long())\n # 让scores去逼近labels从而得到损失\n self.rpn_loss_cls = F.cross_entropy(rpn_cls_score, rpn_label)\n fg_cnt = torch.sum(rpn_label.data.ne(0))\n\n rpn_bbox_targets, rpn_bbox_inside_weights, rpn_bbox_outside_weights = rpn_data[1:]\n\n # compute bbox regression loss\n rpn_bbox_inside_weights = Variable(rpn_bbox_inside_weights)\n rpn_bbox_outside_weights = Variable(rpn_bbox_outside_weights)\n rpn_bbox_targets = Variable(rpn_bbox_targets)\n\n self.rpn_loss_box = _smooth_l1_loss(rpn_bbox_pred, rpn_bbox_targets, rpn_bbox_inside_weights,\n rpn_bbox_outside_weights, sigma=3, dim=[1,2,3])\n\n return rois, self.rpn_loss_cls, self.rpn_loss_box\n"
] | [
[
"torch.LongTensor",
"numpy.random.seed",
"torch.load",
"torch.utils.data.DataLoader",
"numpy.tile",
"numpy.sort",
"numpy.copy",
"torch.FloatTensor",
"torch.no_grad",
"torch.cuda.is_available",
"torch.sort",
"torch.nonzero",
"numpy.array",
"numpy.where",
"torch.autograd.Variable"
],
[
"torch.nn.functional.cross_entropy",
"torch.nn.functional.softmax",
"torch.nn.Conv2d",
"torch.autograd.Variable"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
meraviverma/Spark | [
"d4c6ec6ba77effeda78302e13a637e3fe4205e80"
] | [
"python/pyspark/sql/pandas/conversion.py"
] | [
"#\n# Licensed to the Apache Software Foundation (ASF) under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nimport sys\nimport warnings\nif sys.version >= '3':\n basestring = unicode = str\n xrange = range\nelse:\n from itertools import izip as zip\n\nfrom pyspark import since\nfrom pyspark.rdd import _load_from_socket\nfrom pyspark.sql.pandas.serializers import ArrowCollectSerializer\nfrom pyspark.sql.types import IntegralType\nfrom pyspark.sql.types import *\nfrom pyspark.traceback_utils import SCCallSiteSync\nfrom pyspark.util import _exception_message\n\n\nclass PandasConversionMixin(object):\n \"\"\"\n Min-in for the conversion from Spark to pandas. Currently, only :class:`DataFrame`\n can use this class.\n \"\"\"\n\n @since(1.3)\n def toPandas(self):\n \"\"\"\n Returns the contents of this :class:`DataFrame` as Pandas ``pandas.DataFrame``.\n\n This is only available if Pandas is installed and available.\n\n .. note:: This method should only be used if the resulting Pandas's :class:`DataFrame` is\n expected to be small, as all the data is loaded into the driver's memory.\n\n .. note:: Usage with spark.sql.execution.arrow.pyspark.enabled=True is experimental.\n\n >>> df.toPandas() # doctest: +SKIP\n age name\n 0 2 Alice\n 1 5 Bob\n \"\"\"\n from pyspark.sql.dataframe import DataFrame\n\n assert isinstance(self, DataFrame)\n\n from pyspark.sql.pandas.utils import require_minimum_pandas_version\n require_minimum_pandas_version()\n\n import numpy as np\n import pandas as pd\n\n timezone = self.sql_ctx._conf.sessionLocalTimeZone()\n\n if self.sql_ctx._conf.arrowPySparkEnabled():\n use_arrow = True\n try:\n from pyspark.sql.pandas.types import to_arrow_schema\n from pyspark.sql.pandas.utils import require_minimum_pyarrow_version\n\n require_minimum_pyarrow_version()\n to_arrow_schema(self.schema)\n except Exception as e:\n\n if self.sql_ctx._conf.arrowPySparkFallbackEnabled():\n msg = (\n \"toPandas attempted Arrow optimization because \"\n \"'spark.sql.execution.arrow.pyspark.enabled' is set to true; however, \"\n \"failed by the reason below:\\n %s\\n\"\n \"Attempting non-optimization as \"\n \"'spark.sql.execution.arrow.pyspark.fallback.enabled' is set to \"\n \"true.\" % _exception_message(e))\n warnings.warn(msg)\n use_arrow = False\n else:\n msg = (\n \"toPandas attempted Arrow optimization because \"\n \"'spark.sql.execution.arrow.pyspark.enabled' is set to true, but has \"\n \"reached the error below and will not continue because automatic fallback \"\n \"with 'spark.sql.execution.arrow.pyspark.fallback.enabled' has been set to \"\n \"false.\\n %s\" % _exception_message(e))\n warnings.warn(msg)\n raise\n\n # Try to use Arrow optimization when the schema is supported and the required version\n # of PyArrow is found, if 'spark.sql.execution.arrow.pyspark.enabled' is enabled.\n if use_arrow:\n try:\n from pyspark.sql.pandas.types import _check_dataframe_localize_timestamps\n import pyarrow\n batches = self._collect_as_arrow()\n if len(batches) > 0:\n table = pyarrow.Table.from_batches(batches)\n # Pandas DataFrame created from PyArrow uses datetime64[ns] for date type\n # values, but we should use datetime.date to match the behavior with when\n # Arrow optimization is disabled.\n pdf = table.to_pandas(date_as_object=True)\n return _check_dataframe_localize_timestamps(pdf, timezone)\n else:\n return pd.DataFrame.from_records([], columns=self.columns)\n except Exception as e:\n # We might have to allow fallback here as well but multiple Spark jobs can\n # be executed. So, simply fail in this case for now.\n msg = (\n \"toPandas attempted Arrow optimization because \"\n \"'spark.sql.execution.arrow.pyspark.enabled' is set to true, but has \"\n \"reached the error below and can not continue. Note that \"\n \"'spark.sql.execution.arrow.pyspark.fallback.enabled' does not have an \"\n \"effect on failures in the middle of \"\n \"computation.\\n %s\" % _exception_message(e))\n warnings.warn(msg)\n raise\n\n # Below is toPandas without Arrow optimization.\n pdf = pd.DataFrame.from_records(self.collect(), columns=self.columns)\n\n dtype = {}\n for field in self.schema:\n pandas_type = PandasConversionMixin._to_corrected_pandas_type(field.dataType)\n # SPARK-21766: if an integer field is nullable and has null values, it can be\n # inferred by pandas as float column. Once we convert the column with NaN back\n # to integer type e.g., np.int16, we will hit exception. So we use the inferred\n # float type, not the corrected type from the schema in this case.\n if pandas_type is not None and \\\n not(isinstance(field.dataType, IntegralType) and field.nullable and\n pdf[field.name].isnull().any()):\n dtype[field.name] = pandas_type\n # Ensure we fall back to nullable numpy types, even when whole column is null:\n if isinstance(field.dataType, IntegralType) and pdf[field.name].isnull().any():\n dtype[field.name] = np.float64\n if isinstance(field.dataType, BooleanType) and pdf[field.name].isnull().any():\n dtype[field.name] = np.object\n\n for f, t in dtype.items():\n pdf[f] = pdf[f].astype(t, copy=False)\n\n if timezone is None:\n return pdf\n else:\n from pyspark.sql.pandas.types import _check_series_convert_timestamps_local_tz\n for field in self.schema:\n # TODO: handle nested timestamps, such as ArrayType(TimestampType())?\n if isinstance(field.dataType, TimestampType):\n pdf[field.name] = \\\n _check_series_convert_timestamps_local_tz(pdf[field.name], timezone)\n return pdf\n\n @staticmethod\n def _to_corrected_pandas_type(dt):\n \"\"\"\n When converting Spark SQL records to Pandas :class:`DataFrame`, the inferred data type\n may be wrong. This method gets the corrected data type for Pandas if that type may be\n inferred incorrectly.\n \"\"\"\n import numpy as np\n if type(dt) == ByteType:\n return np.int8\n elif type(dt) == ShortType:\n return np.int16\n elif type(dt) == IntegerType:\n return np.int32\n elif type(dt) == LongType:\n return np.int64\n elif type(dt) == FloatType:\n return np.float32\n elif type(dt) == DoubleType:\n return np.float64\n elif type(dt) == BooleanType:\n return np.bool\n elif type(dt) == TimestampType:\n return np.datetime64\n else:\n return None\n\n def _collect_as_arrow(self):\n \"\"\"\n Returns all records as a list of ArrowRecordBatches, pyarrow must be installed\n and available on driver and worker Python environments.\n\n .. note:: Experimental.\n \"\"\"\n from pyspark.sql.dataframe import DataFrame\n\n assert isinstance(self, DataFrame)\n\n with SCCallSiteSync(self._sc):\n port, auth_secret, jsocket_auth_server = self._jdf.collectAsArrowToPython()\n\n # Collect list of un-ordered batches where last element is a list of correct order indices\n try:\n results = list(_load_from_socket((port, auth_secret), ArrowCollectSerializer()))\n finally:\n # Join serving thread and raise any exceptions from collectAsArrowToPython\n jsocket_auth_server.getResult()\n\n # Separate RecordBatches from batch order indices in results\n batches = results[:-1]\n batch_order = results[-1]\n\n # Re-order the batch list using the correct order\n return [batches[i] for i in batch_order]\n\n\nclass SparkConversionMixin(object):\n \"\"\"\n Min-in for the conversion from pandas to Spark. Currently, only :class:`SparkSession`\n can use this class.\n \"\"\"\n def createDataFrame(self, data, schema=None, samplingRatio=None, verifySchema=True):\n from pyspark.sql import SparkSession\n\n assert isinstance(self, SparkSession)\n\n from pyspark.sql.pandas.utils import require_minimum_pandas_version\n require_minimum_pandas_version()\n\n timezone = self._wrapped._conf.sessionLocalTimeZone()\n\n # If no schema supplied by user then get the names of columns only\n if schema is None:\n schema = [str(x) if not isinstance(x, basestring) else\n (x.encode('utf-8') if not isinstance(x, str) else x)\n for x in data.columns]\n\n if self._wrapped._conf.arrowPySparkEnabled() and len(data) > 0:\n try:\n return self._create_from_pandas_with_arrow(data, schema, timezone)\n except Exception as e:\n from pyspark.util import _exception_message\n\n if self._wrapped._conf.arrowPySparkFallbackEnabled():\n msg = (\n \"createDataFrame attempted Arrow optimization because \"\n \"'spark.sql.execution.arrow.pyspark.enabled' is set to true; however, \"\n \"failed by the reason below:\\n %s\\n\"\n \"Attempting non-optimization as \"\n \"'spark.sql.execution.arrow.pyspark.fallback.enabled' is set to \"\n \"true.\" % _exception_message(e))\n warnings.warn(msg)\n else:\n msg = (\n \"createDataFrame attempted Arrow optimization because \"\n \"'spark.sql.execution.arrow.pyspark.enabled' is set to true, but has \"\n \"reached the error below and will not continue because automatic \"\n \"fallback with 'spark.sql.execution.arrow.pyspark.fallback.enabled' \"\n \"has been set to false.\\n %s\" % _exception_message(e))\n warnings.warn(msg)\n raise\n data = self._convert_from_pandas(data, schema, timezone)\n return self._create_dataframe(data, schema, samplingRatio, verifySchema)\n\n def _convert_from_pandas(self, pdf, schema, timezone):\n \"\"\"\n Convert a pandas.DataFrame to list of records that can be used to make a DataFrame\n :return list of records\n \"\"\"\n from pyspark.sql import SparkSession\n\n assert isinstance(self, SparkSession)\n\n if timezone is not None:\n from pyspark.sql.pandas.types import _check_series_convert_timestamps_tz_local\n copied = False\n if isinstance(schema, StructType):\n for field in schema:\n # TODO: handle nested timestamps, such as ArrayType(TimestampType())?\n if isinstance(field.dataType, TimestampType):\n s = _check_series_convert_timestamps_tz_local(pdf[field.name], timezone)\n if s is not pdf[field.name]:\n if not copied:\n # Copy once if the series is modified to prevent the original\n # Pandas DataFrame from being updated\n pdf = pdf.copy()\n copied = True\n pdf[field.name] = s\n else:\n for column, series in pdf.iteritems():\n s = _check_series_convert_timestamps_tz_local(series, timezone)\n if s is not series:\n if not copied:\n # Copy once if the series is modified to prevent the original\n # Pandas DataFrame from being updated\n pdf = pdf.copy()\n copied = True\n pdf[column] = s\n\n # Convert pandas.DataFrame to list of numpy records\n np_records = pdf.to_records(index=False)\n\n # Check if any columns need to be fixed for Spark to infer properly\n if len(np_records) > 0:\n record_dtype = self._get_numpy_record_dtype(np_records[0])\n if record_dtype is not None:\n return [r.astype(record_dtype).tolist() for r in np_records]\n\n # Convert list of numpy records to python lists\n return [r.tolist() for r in np_records]\n\n def _get_numpy_record_dtype(self, rec):\n \"\"\"\n Used when converting a pandas.DataFrame to Spark using to_records(), this will correct\n the dtypes of fields in a record so they can be properly loaded into Spark.\n :param rec: a numpy record to check field dtypes\n :return corrected dtype for a numpy.record or None if no correction needed\n \"\"\"\n import numpy as np\n cur_dtypes = rec.dtype\n col_names = cur_dtypes.names\n record_type_list = []\n has_rec_fix = False\n for i in xrange(len(cur_dtypes)):\n curr_type = cur_dtypes[i]\n # If type is a datetime64 timestamp, convert to microseconds\n # NOTE: if dtype is datetime[ns] then np.record.tolist() will output values as longs,\n # conversion from [us] or lower will lead to py datetime objects, see SPARK-22417\n if curr_type == np.dtype('datetime64[ns]'):\n curr_type = 'datetime64[us]'\n has_rec_fix = True\n record_type_list.append((str(col_names[i]), curr_type))\n return np.dtype(record_type_list) if has_rec_fix else None\n\n def _create_from_pandas_with_arrow(self, pdf, schema, timezone):\n \"\"\"\n Create a DataFrame from a given pandas.DataFrame by slicing it into partitions, converting\n to Arrow data, then sending to the JVM to parallelize. If a schema is passed in, the\n data types will be used to coerce the data in Pandas to Arrow conversion.\n \"\"\"\n from pyspark.sql import SparkSession\n from pyspark.sql.dataframe import DataFrame\n\n assert isinstance(self, SparkSession)\n\n from pyspark.sql.pandas.serializers import ArrowStreamPandasSerializer\n from pyspark.sql.types import TimestampType\n from pyspark.sql.pandas.types import from_arrow_type, to_arrow_type\n from pyspark.sql.pandas.utils import require_minimum_pandas_version, \\\n require_minimum_pyarrow_version\n\n require_minimum_pandas_version()\n require_minimum_pyarrow_version()\n\n from pandas.api.types import is_datetime64_dtype, is_datetime64tz_dtype\n import pyarrow as pa\n\n # Create the Spark schema from list of names passed in with Arrow types\n if isinstance(schema, (list, tuple)):\n arrow_schema = pa.Schema.from_pandas(pdf, preserve_index=False)\n struct = StructType()\n for name, field in zip(schema, arrow_schema):\n struct.add(name, from_arrow_type(field.type), nullable=field.nullable)\n schema = struct\n\n # Determine arrow types to coerce data when creating batches\n if isinstance(schema, StructType):\n arrow_types = [to_arrow_type(f.dataType) for f in schema.fields]\n elif isinstance(schema, DataType):\n raise ValueError(\"Single data type %s is not supported with Arrow\" % str(schema))\n else:\n # Any timestamps must be coerced to be compatible with Spark\n arrow_types = [to_arrow_type(TimestampType())\n if is_datetime64_dtype(t) or is_datetime64tz_dtype(t) else None\n for t in pdf.dtypes]\n\n # Slice the DataFrame to be batched\n step = -(-len(pdf) // self.sparkContext.defaultParallelism) # round int up\n pdf_slices = (pdf[start:start + step] for start in xrange(0, len(pdf), step))\n\n # Create list of Arrow (columns, type) for serializer dump_stream\n arrow_data = [[(c, t) for (_, c), t in zip(pdf_slice.iteritems(), arrow_types)]\n for pdf_slice in pdf_slices]\n\n jsqlContext = self._wrapped._jsqlContext\n\n safecheck = self._wrapped._conf.arrowSafeTypeConversion()\n col_by_name = True # col by name only applies to StructType columns, can't happen here\n ser = ArrowStreamPandasSerializer(timezone, safecheck, col_by_name)\n\n def reader_func(temp_filename):\n return self._jvm.PythonSQLUtils.readArrowStreamFromFile(jsqlContext, temp_filename)\n\n def create_RDD_server():\n return self._jvm.ArrowRDDServer(jsqlContext)\n\n # Create Spark DataFrame from Arrow stream file, using one batch per partition\n jrdd = self._sc._serialize_to_jvm(arrow_data, ser, reader_func, create_RDD_server)\n jdf = self._jvm.PythonSQLUtils.toDataFrame(jrdd, schema.json(), jsqlContext)\n df = DataFrame(jdf, self._wrapped)\n df._schema = schema\n return df\n\n\ndef _test():\n import doctest\n from pyspark.sql import SparkSession\n import pyspark.sql.pandas.conversion\n globs = pyspark.sql.pandas.conversion.__dict__.copy()\n spark = SparkSession.builder\\\n .master(\"local[4]\")\\\n .appName(\"sql.pandas.conversion tests\")\\\n .getOrCreate()\n globs['spark'] = spark\n (failure_count, test_count) = doctest.testmod(\n pyspark.sql.pandas.conversion, globs=globs,\n optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE | doctest.REPORT_NDIFF)\n spark.stop()\n if failure_count:\n sys.exit(-1)\n\n\nif __name__ == \"__main__\":\n _test()\n"
] | [
[
"pandas.DataFrame.from_records",
"pandas.api.types.is_datetime64tz_dtype",
"pandas.api.types.is_datetime64_dtype",
"numpy.dtype"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
njustkmg/PaddleMM | [
"92ae66d6e27c7a666820bc7baf8fd8fa2bd74aa5",
"92ae66d6e27c7a666820bc7baf8fd8fa2bd74aa5"
] | [
"torchmm/models/retrieval/sgraf.py",
"paddlemm/engines/base_trainer.py"
] | [
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nimport numpy as np\n\nfrom .layers.contrastive import ContrastiveLoss\nfrom .layers.utils import l1norm, l2norm\nfrom .layers.img_enc import EncoderImage\nfrom .layers.txt_enc import EncoderText\n\n\nclass VisualSA(nn.Module):\n \"\"\"\n Build global image representations by self-attention.\n Args: - local: local region embeddings, shape: (batch_size, 36, 1024)\n - raw_global: raw image by averaging regions, shape: (batch_size, 1024)\n Returns: - new_global: final image by self-attention, shape: (batch_size, 1024).\n \"\"\"\n def __init__(self, embed_dim, dropout_rate, num_region):\n super(VisualSA, self).__init__()\n\n self.embedding_local = nn.Sequential(nn.Linear(embed_dim, embed_dim),\n nn.BatchNorm1d(num_region),\n nn.Tanh(), nn.Dropout(dropout_rate))\n self.embedding_global = nn.Sequential(nn.Linear(embed_dim, embed_dim),\n nn.BatchNorm1d(embed_dim),\n nn.Tanh(), nn.Dropout(dropout_rate))\n self.embedding_common = nn.Sequential(nn.Linear(embed_dim, 1))\n\n self.init_weights()\n self.softmax = nn.Softmax(dim=1)\n\n def init_weights(self):\n for embeddings in self.children():\n for m in embeddings:\n if isinstance(m, nn.Linear):\n r = np.sqrt(6.) / np.sqrt(m.in_features + m.out_features)\n m.weight.data.uniform_(-r, r)\n m.bias.data.fill_(0)\n elif isinstance(m, nn.BatchNorm1d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n\n def forward(self, local, raw_global):\n # compute embedding of local regions and raw global image\n l_emb = self.embedding_local(local)\n g_emb = self.embedding_global(raw_global)\n\n # compute the normalized weights, shape: (batch_size, 36)\n g_emb = g_emb.unsqueeze(1).repeat(1, l_emb.size(1), 1)\n common = l_emb.mul(g_emb)\n weights = self.embedding_common(common).squeeze(2)\n weights = self.softmax(weights)\n\n # compute final image, shape: (batch_size, 1024)\n new_global = (weights.unsqueeze(2) * local).sum(dim=1)\n new_global = l2norm(new_global, dim=-1)\n\n return new_global\n\n\nclass TextSA(nn.Module):\n \"\"\"\n Build global text representations by self-attention.\n Args: - local: local word embeddings, shape: (batch_size, L, 1024)\n - raw_global: raw text by averaging words, shape: (batch_size, 1024)\n Returns: - new_global: final text by self-attention, shape: (batch_size, 1024).\n \"\"\"\n\n def __init__(self, embed_dim, dropout_rate):\n super(TextSA, self).__init__()\n\n self.embedding_local = nn.Sequential(nn.Linear(embed_dim, embed_dim),\n nn.Tanh(), nn.Dropout(dropout_rate))\n self.embedding_global = nn.Sequential(nn.Linear(embed_dim, embed_dim),\n nn.Tanh(), nn.Dropout(dropout_rate))\n self.embedding_common = nn.Sequential(nn.Linear(embed_dim, 1))\n\n self.init_weights()\n self.softmax = nn.Softmax(dim=1)\n\n def init_weights(self):\n for embeddings in self.children():\n for m in embeddings:\n if isinstance(m, nn.Linear):\n r = np.sqrt(6.) / np.sqrt(m.in_features + m.out_features)\n m.weight.data.uniform_(-r, r)\n m.bias.data.fill_(0)\n elif isinstance(m, nn.BatchNorm1d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n\n def forward(self, local, raw_global):\n # compute embedding of local words and raw global text\n l_emb = self.embedding_local(local)\n g_emb = self.embedding_global(raw_global)\n\n # compute the normalized weights, shape: (batch_size, L)\n g_emb = g_emb.unsqueeze(1).repeat(1, l_emb.size(1), 1)\n common = l_emb.mul(g_emb)\n weights = self.embedding_common(common).squeeze(2)\n weights = self.softmax(weights)\n\n # compute final text, shape: (batch_size, 1024)\n new_global = (weights.unsqueeze(2) * local).sum(dim=1)\n new_global = l2norm(new_global, dim=-1)\n\n return new_global\n\n\nclass GraphReasoning(nn.Module):\n \"\"\"\n Perform the similarity graph reasoning with a full-connected graph\n Args: - sim_emb: global and local alignments, shape: (batch_size, L+1, 256)\n Returns; - sim_sgr: reasoned graph nodes after several steps, shape: (batch_size, L+1, 256)\n \"\"\"\n def __init__(self, sim_dim):\n super(GraphReasoning, self).__init__()\n\n self.graph_query_w = nn.Linear(sim_dim, sim_dim)\n self.graph_key_w = nn.Linear(sim_dim, sim_dim)\n self.sim_graph_w = nn.Linear(sim_dim, sim_dim)\n self.relu = nn.ReLU()\n\n self.init_weights()\n\n def forward(self, sim_emb):\n sim_query = self.graph_query_w(sim_emb)\n sim_key = self.graph_key_w(sim_emb)\n sim_edge = torch.softmax(torch.bmm(sim_query, sim_key.permute(0, 2, 1)), dim=-1)\n sim_sgr = torch.bmm(sim_edge, sim_emb)\n sim_sgr = self.relu(self.sim_graph_w(sim_sgr))\n return sim_sgr\n\n def init_weights(self):\n for m in self.children():\n if isinstance(m, nn.Linear):\n r = np.sqrt(6.) / np.sqrt(m.in_features + m.out_features)\n m.weight.data.uniform_(-r, r)\n m.bias.data.fill_(0)\n elif isinstance(m, nn.BatchNorm1d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n\n\nclass AttentionFiltration(nn.Module):\n \"\"\"\n Perform the similarity Attention Filtration with a gate-based attention\n Args: - sim_emb: global and local alignments, shape: (batch_size, L+1, 256)\n Returns; - sim_saf: aggregated alignment after attention filtration, shape: (batch_size, 256)\n \"\"\"\n def __init__(self, sim_dim):\n super(AttentionFiltration, self).__init__()\n\n self.attn_sim_w = nn.Linear(sim_dim, 1)\n self.bn = nn.BatchNorm1d(1)\n\n self.init_weights()\n\n def forward(self, sim_emb):\n sim_attn = l1norm(torch.sigmoid(self.bn(self.attn_sim_w(sim_emb).permute(0, 2, 1))), dim=-1)\n sim_saf = torch.matmul(sim_attn, sim_emb)\n sim_saf = l2norm(sim_saf.squeeze(1), dim=-1)\n return sim_saf\n\n def init_weights(self):\n for m in self.children():\n if isinstance(m, nn.Linear):\n r = np.sqrt(6.) / np.sqrt(m.in_features + m.out_features)\n m.weight.data.uniform_(-r, r)\n m.bias.data.fill_(0)\n elif isinstance(m, nn.BatchNorm1d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n\n\nclass EncoderSimilarity(nn.Module):\n \"\"\"\n Compute the image-text similarity by SGR, SAF, AVE\n Args: - img_emb: local region embeddings, shape: (batch_size, 36, 1024)\n - cap_emb: local word embeddings, shape: (batch_size, L, 1024)\n Returns:\n - sim_all: final image-text similarities, shape: (batch_size, batch_size).\n \"\"\"\n def __init__(self, embed_size, sim_dim, module_name='AVE', sgr_step=3):\n super(EncoderSimilarity, self).__init__()\n self.module_name = module_name\n\n self.v_global_w = VisualSA(embed_size, 0.4, 36)\n self.t_global_w = TextSA(embed_size, 0.4)\n\n self.sim_tranloc_w = nn.Linear(embed_size, sim_dim)\n self.sim_tranglo_w = nn.Linear(embed_size, sim_dim)\n\n self.sim_eval_w = nn.Linear(sim_dim, 1)\n self.sigmoid = nn.Sigmoid()\n\n if module_name == 'SGR':\n self.SGR_module = nn.ModuleList([GraphReasoning(sim_dim) for i in range(sgr_step)])\n elif module_name == 'SAF':\n self.SAF_module = AttentionFiltration(sim_dim)\n else:\n raise ValueError('Invalid input of opt.module_name in opts.py')\n\n self.init_weights()\n\n def forward(self, img_emb, cap_emb, cap_lens):\n sim_all = []\n n_image = img_emb.size(0)\n n_caption = cap_emb.size(0)\n\n # get enhanced global images by self-attention\n img_ave = torch.mean(img_emb, 1)\n img_glo = self.v_global_w(img_emb, img_ave)\n\n for i in range(n_caption):\n # get the i-th sentence\n n_word = cap_lens[i]\n cap_i = cap_emb[i, :n_word, :].unsqueeze(0)\n cap_i_expand = cap_i.repeat(n_image, 1, 1)\n\n # get enhanced global i-th text by self-attention\n cap_ave_i = torch.mean(cap_i, 1)\n cap_glo_i = self.t_global_w(cap_i, cap_ave_i)\n\n # local-global alignment construction\n Context_img = SCAN_attention(cap_i_expand, img_emb, smooth=9.0)\n sim_loc = torch.pow(torch.sub(Context_img, cap_i_expand), 2)\n sim_loc = l2norm(self.sim_tranloc_w(sim_loc), dim=-1)\n\n sim_glo = torch.pow(torch.sub(img_glo, cap_glo_i), 2)\n sim_glo = l2norm(self.sim_tranglo_w(sim_glo), dim=-1)\n\n # concat the global and local alignments\n sim_emb = torch.cat([sim_glo.unsqueeze(1), sim_loc], 1)\n\n # compute the final similarity vector\n if self.module_name == 'SGR':\n for module in self.SGR_module:\n sim_emb = module(sim_emb)\n sim_vec = sim_emb[:, 0, :]\n else:\n sim_vec = self.SAF_module(sim_emb)\n\n # compute the final similarity score\n sim_i = self.sigmoid(self.sim_eval_w(sim_vec))\n sim_all.append(sim_i)\n\n # (n_image, n_caption)\n sim_all = torch.cat(sim_all, 1)\n\n return sim_all\n\n def init_weights(self):\n for m in self.children():\n if isinstance(m, nn.Linear):\n r = np.sqrt(6.) / np.sqrt(m.in_features + m.out_features)\n m.weight.data.uniform_(-r, r)\n m.bias.data.fill_(0)\n elif isinstance(m, nn.BatchNorm1d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n\n\ndef SCAN_attention(query, context, smooth, eps=1e-8):\n \"\"\"\n query: (n_context, queryL, d)\n context: (n_context, sourceL, d)\n \"\"\"\n # --> (batch, d, queryL)\n queryT = torch.transpose(query, 1, 2)\n\n # (batch, sourceL, d)(batch, d, queryL)\n # --> (batch, sourceL, queryL)\n attn = torch.bmm(context, queryT)\n\n attn = nn.LeakyReLU(0.1)(attn)\n attn = l2norm(attn, 2)\n\n # --> (batch, queryL, sourceL)\n attn = torch.transpose(attn, 1, 2).contiguous()\n # --> (batch, queryL, sourceL\n attn = F.softmax(attn*smooth, dim=2)\n\n # --> (batch, sourceL, queryL)\n attnT = torch.transpose(attn, 1, 2).contiguous()\n\n # --> (batch, d, sourceL)\n contextT = torch.transpose(context, 1, 2)\n # (batch x d x sourceL)(batch x sourceL x queryL)\n # --> (batch, d, queryL)\n weightedContext = torch.bmm(contextT, attnT)\n # --> (batch, queryL, d)\n weightedContext = torch.transpose(weightedContext, 1, 2)\n weightedContext = l2norm(weightedContext, dim=-1)\n\n return weightedContext\n\n\nclass SGRAF(nn.Module):\n \"\"\"\n Similarity Reasoning and Filtration (SGRAF) Network\n \"\"\"\n def __init__(self,\n model_name,\n module_name,\n sgr_step,\n embed_size,\n sim_dim,\n vocab_size,\n word_dim,\n num_layers,\n image_dim,\n margin,\n max_violation,\n use_bi_gru=True,\n image_norm=True,\n text_norm=True,\n **kwargs):\n\n super(SGRAF, self).__init__()\n\n # Build Models\n self.img_enc = EncoderImage(model_name, image_dim, embed_size, image_norm)\n self.txt_enc = EncoderText(model_name, vocab_size, word_dim, embed_size, num_layers,\n use_bi_gru=use_bi_gru, text_norm=text_norm)\n self.sim_enc = EncoderSimilarity(embed_size, sim_dim, module_name, sgr_step)\n\n # Loss and Optimizer\n self.criterion = ContrastiveLoss(margin=margin, max_violation=max_violation)\n\n def forward_emb(self, batch):\n \"\"\"Compute the image and caption embeddings\"\"\"\n\n images = batch['image_feat']\n captions = batch['text_token']\n lengths = batch['text_len']\n\n if torch.cuda.is_available():\n images = images.cuda()\n captions = captions.cuda()\n lengths = lengths.tolist()\n\n # Forward feature encoding\n img_embs = self.img_enc(images)\n cap_embs = self.txt_enc(captions, lengths)\n return img_embs, cap_embs, lengths\n\n def forward_sim(self, batch):\n img_embs, cap_embs, cap_lens = batch\n # Forward similarity encoding\n sims = self.sim_enc(img_embs, cap_embs, cap_lens)\n return sims\n\n def forward(self, batch):\n images = batch['image_feat']\n captions = batch['text_token']\n lengths = batch['text_len']\n\n if torch.cuda.is_available():\n images = images.cuda()\n captions = captions.cuda()\n lengths = lengths.tolist()\n\n img_embs = self.img_enc(images)\n cap_embs = self.txt_enc(captions, lengths)\n sims = self.sim_enc(img_embs, cap_embs, lengths)\n loss = self.criterion(sims)\n\n return loss\n\n @staticmethod\n def cal_sim(model, img_embs, cap_embs, cap_lens, **kwargs):\n shard_size = kwargs.get('shard_size', 100)\n\n n_im_shard = (len(img_embs) - 1) // shard_size + 1\n n_cap_shard = (len(cap_embs) - 1) // shard_size + 1\n\n sims = np.zeros((len(img_embs), len(cap_embs)))\n for i in range(n_im_shard):\n im_start, im_end = shard_size * i, min(shard_size * (i + 1), len(img_embs))\n for j in range(n_cap_shard):\n ca_start, ca_end = shard_size * j, min(shard_size * (j + 1), len(cap_embs))\n\n with torch.no_grad():\n im = torch.FloatTensor(img_embs[im_start:im_end])\n ca = torch.FloatTensor(cap_embs[ca_start:ca_end])\n l = cap_lens[ca_start:ca_end].tolist()\n\n if torch.cuda.is_available():\n im = im.cuda()\n ca = ca.cuda()\n\n sim = model.forward_sim((im, ca, l))\n\n sims[im_start:im_end, ca_start:ca_end] = sim.cpu().detach().numpy()\n return sims\n\n",
"from __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nimport os\r\nimport numpy as np\r\nfrom tqdm import tqdm\r\nfrom abc import ABCMeta, abstractmethod\r\n\r\nimport paddle\r\nimport paddle.nn as nn\r\nfrom paddle.io import DataLoader\r\n\r\nfrom paddlemm.models import CMML, NIC, SCAN, SGRAF, AoANet, EarlyFusion, LateFusion, LMFFusion, TMCFusion, VSEPP, IMRAM\r\nfrom paddlemm.datasets import BasicDataset, SemiDataset, PretrainDataset, SampleDataset\r\n\r\n\r\nDatasetMap = {\r\n 'basic': BasicDataset,\r\n 'semi': SemiDataset,\r\n 'sample': SampleDataset,\r\n 'pretrain': PretrainDataset\r\n}\r\n\r\nModelMap = {\r\n 'cmml': CMML,\r\n 'nic': NIC,\r\n 'scan': SCAN,\r\n 'vsepp': VSEPP,\r\n 'imram': IMRAM,\r\n 'sgraf': SGRAF,\r\n 'aoanet': AoANet,\r\n 'earlyfusion': EarlyFusion,\r\n 'latefusion': LateFusion,\r\n 'lmffusion': LMFFusion,\r\n 'tmcfusion': TMCFusion\r\n}\r\n\r\n\r\nclass BaseTrainer(metaclass=ABCMeta):\r\n\r\n def __init__(self, opt):\r\n\r\n self.model_name = opt.model_name.lower()\r\n\r\n self.out_root = opt.out_root\r\n self.logger = opt.logger\r\n\r\n self.num_epochs = opt.num_epochs\r\n self.batch_size = opt.batch_size\r\n self.learning_rate = opt.learning_rate\r\n self.task = opt.task\r\n self.weight_decay = opt.get('weight_decay', 0.)\r\n self.pretrain_epochs = opt.get('pretrain_epochs', 0)\r\n self.num_workers = opt.get('num_workers', 0)\r\n self.val_epoch = opt.get('val_epoch', 1)\r\n\r\n # choose metric for select best model during training\r\n self.select_metric = opt.get('select_metric', 'loss')\r\n\r\n self.dataset = DatasetMap[opt.data_mode](**opt)\r\n\r\n opt.vocab_size = self.dataset.vocab_size\r\n opt.vocab = str(self.dataset.word2idx)\r\n self.model = ModelMap[opt.model_name.lower()](**opt)\r\n\r\n self.grad_clip = opt.get('grad_clip', 0)\r\n if self.grad_clip:\r\n self.grad_clip = nn.clip.ClipGradByValue(opt.grad_clip)\r\n else:\r\n self.grad_clip = None\r\n\r\n self.step_size = opt.get('step_size', 0)\r\n self.gamma = opt.get('gamma', 0.1)\r\n if self.step_size:\r\n self.scheduler = paddle.optimizer.lr.StepDecay(learning_rate=self.learning_rate, step_size=self.step_size,\r\n gamma=self.gamma)\r\n self.optimizer = paddle.optimizer.Adam(parameters=self.model.parameters(),\r\n learning_rate=self.scheduler,\r\n weight_decay=self.weight_decay,\r\n grad_clip=self.grad_clip)\r\n else:\r\n self.optimizer = paddle.optimizer.Adam(parameters=self.model.parameters(),\r\n learning_rate=self.learning_rate,\r\n weight_decay=self.weight_decay,\r\n grad_clip=self.grad_clip)\r\n\r\n def train(self):\r\n\r\n if self.pretrain_epochs > 0:\r\n self.pretrain()\r\n\r\n for epoch in range(1, self.num_epochs + 1):\r\n all_loss = []\r\n self.model.train()\r\n\r\n train_loader = DataLoader(self.dataset.train_(),\r\n batch_size=self.batch_size,\r\n shuffle=True,\r\n num_workers=self.num_workers)\r\n\r\n train_tqdm = tqdm(train_loader(), ncols=80)\r\n for idx, batch in enumerate(train_tqdm):\r\n batch['epoch'] = epoch\r\n loss = self.model(batch)\r\n loss.backward()\r\n \r\n self.optimizer.step()\r\n self.optimizer.clear_grad()\r\n\r\n all_loss.append(loss.item())\r\n train_tqdm.set_description(\"Epoch: {} | Loss: {:.3f}\".format(epoch, loss.item()))\r\n train_tqdm.close()\r\n\r\n if self.step_size:\r\n self.scheduler.step()\r\n\r\n paddle.save(self.model.state_dict(), os.path.join(self.out_root, 'temp.pdparams'))\r\n if epoch % self.val_epoch == 0:\r\n val_res = self.evaluate()\r\n if self.select_metric == 'loss':\r\n if val_res['loss'] < self.best_loss:\r\n self.best_loss = val_res['loss']\r\n paddle.save(self.model.state_dict(), os.path.join(self.out_root, 'best_model.pdparams'))\r\n self.logger.info(\"Epoch: {}, valid loss: {:.3f}, Best: {:.3f}\".format(epoch, val_res['loss'], self.best_loss))\r\n else:\r\n if val_res[self.select_metric] > self.best_score:\r\n self.best_score = val_res[self.select_metric]\r\n paddle.save(self.model.state_dict(), os.path.join(self.out_root, 'best_model.pdparams'))\r\n self.logger.info(\"Epoch: {}, valid score: {:.3f}, Best: {:.3f}\".format(epoch, val_res[self.select_metric],\r\n self.best_score))\r\n\r\n def pretrain(self):\r\n # for cmml pretraining\r\n\r\n self.model.train()\r\n for epoch in range(1, self.pretrain_epochs + 1):\r\n all_loss = []\r\n\r\n train_loader = DataLoader(self.dataset.train_(),\r\n batch_size=self.batch_size * 8, # mul 8 to train total supervised data\r\n shuffle=True,\r\n num_workers=self.num_workers)\r\n train_tqdm = tqdm(train_loader(), ncols=80)\r\n\r\n for idx, batch in enumerate(train_tqdm):\r\n self.optimizer.clear_grad()\r\n loss = self.model.pretrain(batch)\r\n loss.backward()\r\n self.optimizer.step()\r\n\r\n all_loss.append(loss.item())\r\n train_tqdm.set_description(\"Pretrain epoch: {} | Loss: {:.3f}\".format(epoch, np.mean(all_loss)))\r\n\r\n @abstractmethod\r\n def evaluate(self):\r\n pass\r\n\r\n @abstractmethod\r\n def test(self):\r\n pass\r\n"
] | [
[
"torch.nn.Softmax",
"torch.nn.functional.softmax",
"torch.transpose",
"torch.mean",
"numpy.sqrt",
"torch.cat",
"torch.no_grad",
"torch.FloatTensor",
"torch.cuda.is_available",
"torch.nn.Dropout",
"torch.nn.Sigmoid",
"torch.bmm",
"torch.nn.BatchNorm1d",
"torch.nn.Linear",
"torch.nn.LeakyReLU",
"torch.sub",
"torch.nn.Tanh",
"torch.matmul",
"torch.nn.ReLU"
],
[
"numpy.mean"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
aianaconda/pytorch-GNN-1st | [
"0c911e23fcc6cad07ac17cb16c1bb769f9cef63e"
] | [
"code_28_GCN.py"
] | [
"# -*- coding: utf-8 -*-\r\n\"\"\"\r\n@author: 代码医生工作室\r\n@公众号:xiangyuejiqiren (内有更多优秀文章及学习资料)\r\n@来源: <PyTorch深度学习和图神经网络(卷 1)——基础知识>配套代码 \r\n@配套代码技术支持:bbs.aianaconda.com \r\nCreated on Sat Oct 19 20:03:44 2019\r\n\"\"\"\r\n\r\nfrom pathlib import Path #提升路径的兼容性\r\n#引入矩阵运算相关库\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom scipy.sparse import coo_matrix,csr_matrix,diags,eye\r\n\r\n#引入深度学习框架库\r\nimport torch\r\nfrom torch import nn\r\nimport torch.nn.functional as F\r\n#引入绘图库\r\nimport matplotlib.pyplot as plt\r\n\r\n'''\r\nconda install pandas\r\n'''\r\n\r\n\r\n#输出运算资源请况\r\ndevice = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')\r\nprint(device)\r\n\r\n#输出样本路径\r\npath = Path('data/cora')\r\nprint(path)\r\n\r\n#读取论文内容数据,并将其转化为数组\r\npaper_features_label = np.genfromtxt(path/'cora.content', dtype=np.str)\r\nprint(paper_features_label,np.shape(paper_features_label))\r\n\r\n#取出数据的第一列:论文的ID\r\npapers = paper_features_label[:,0].astype(np.int32)\r\nprint(papers)\r\n#为论文重新编号,{31336: 0, 1061127: 1,……\r\npaper2idx = {k:v for v,k in enumerate(papers)}\r\n\r\n\r\n#将数据中间部分的字标签取出,转化成矩阵\r\nfeatures = csr_matrix(paper_features_label[:, 1:-1], dtype=np.float32)\r\nprint(np.shape(features))\r\n\r\n#将最后一项的论文分类属性取出,并转化为分类索引\r\nlabels = paper_features_label[:, -1]\r\nlbl2idx = {k:v for v,k in enumerate(sorted(np.unique(labels)))}\r\nlabels = [lbl2idx[e] for e in labels]\r\nprint(lbl2idx,labels[:5])\r\n\r\n\r\n#读取论文关系数据,并将其转化为数组\r\nedges = np.genfromtxt(path/'cora.cites', dtype=np.int32)\r\nprint(edges,np.shape(edges))\r\n#转化为新编号节点间的关系\r\nedges = np.asarray([paper2idx[e] for e in edges.flatten()], np.int32).reshape(edges.shape)\r\nprint(edges,edges.shape)\r\n\r\n# 计算邻接矩阵(Adjacency matrix) ,行列都为论文个数\r\nadj = coo_matrix((np.ones(edges.shape[0]), (edges[:, 0], edges[:, 1])),\r\n shape=(len(labels), len(labels)), dtype=np.float32)\r\n\r\n# Symmetric adjacency matrix\r\n#adj = adj + adj.T.multiply(adj.T > adj) - adj.multiply(adj.T > adj)\r\n#生成无向图对称矩阵\r\nadj_long = adj.multiply(adj.T < adj)\r\nadj = adj_long+adj_long.T\r\n\r\n\r\n##############################\r\n\r\ndef normalize(mx):#定义函数,对矩阵数据进行归一化\r\n '''Row-normalize sparse matrix'''\r\n rowsum = np.array(mx.sum(1))#每一篇论文的字数\r\n r_inv = (rowsum ** -1).flatten() #取总字数的倒数\r\n r_inv[np.isinf(r_inv)] = 0.#将Nan值设为0\r\n r_mat_inv = diags(r_inv)#将总字数的倒数做成对角矩阵\r\n mx = r_mat_inv.dot(mx)#左乘一个矩阵,相当于每个元素除以总数\r\n return mx\r\n\r\n#对 features矩阵进行归一化(每行的总和为1)\r\nfeatures = normalize(features)\r\n\r\n\r\n# 对邻接矩阵对角线添加1,将其变为自循环图。同时再对其进行归一化\r\nadj = normalize(adj + eye(adj.shape[0]))\r\n################################################\r\n\r\n\r\n\r\n# Data as tensors\r\nadj = torch.FloatTensor(adj.todense()) #节点间的关系\r\nfeatures = torch.FloatTensor(features.todense())#节点自身的特征\r\nlabels = torch.LongTensor(labels) #每个节点的分类标签\r\n\r\n#划分数据集\r\nn_train = 200\r\nn_val = 300\r\nn_test = len(features) - n_train - n_val\r\nnp.random.seed(34)\r\nidxs = np.random.permutation(len(features))#将原有索引打乱顺序\r\n#计算每个数据集的索引\r\nidx_train = torch.LongTensor(idxs[:n_train])\r\nidx_val = torch.LongTensor(idxs[n_train:n_train+n_val])\r\nidx_test = torch.LongTensor(idxs[n_train+n_val:])\r\n\r\n\r\n#分配运算资源\r\nadj = adj.to(device)\r\nfeatures = features.to(device)\r\nlabels = labels.to(device)\r\nidx_train = idx_train.to(device)\r\nidx_val = idx_val.to(device)\r\nidx_test = idx_test.to(device)\r\n\r\n\r\ndef mish(x):\t\t\t\t\t#Mish激活函数\r\n return x *( torch.tanh(F.softplus(x)))\r\n\r\n#图卷积类\r\nclass GraphConvolution(nn.Module):\r\n def __init__(self, f_in, f_out, use_bias=True, activation= mish):\r\n super().__init__()\r\n self.f_in = f_in\r\n self.f_out = f_out\r\n self.use_bias = use_bias\r\n self.activation = activation\r\n self.weight = nn.Parameter(torch.FloatTensor(f_in, f_out))\r\n self.bias = nn.Parameter(torch.FloatTensor(f_out)) if use_bias else None\r\n self.initialize_weights()\r\n \r\n def initialize_weights(self):\r\n if self.activation is None: \r\n nn.init.xavier_uniform_(self.weight)\r\n else: \r\n nn.init.kaiming_uniform_(self.weight, nonlinearity='leaky_relu')\r\n if self.use_bias: \r\n nn.init.zeros_(self.bias)\r\n \r\n def forward(self, input, adj):\r\n support = torch.mm(input, self.weight)\r\n output = torch.mm(adj, support)\r\n if self.use_bias: \r\n output.add_(self.bias)\r\n \r\n if self.activation is not None: \r\n output = self.activation(output)\r\n return output\r\n\r\nclass GCN(nn.Module):\r\n def __init__(self, f_in, n_classes, hidden=[16], dropout_p=0.5):\r\n super().__init__()\r\n layers = []\r\n for f_in,f_out in zip([f_in]+hidden[:-1], hidden):\r\n layers += [GraphConvolution(f_in, f_out)]\r\n \r\n self.layers = nn.Sequential(*layers)\r\n self.dropout_p = dropout_p\r\n \r\n self.out_layer = GraphConvolution(f_out, n_classes, activation=None)\r\n \r\n def forward(self, x, adj):\r\n for layer in self.layers:\r\n x = layer(x, adj)\r\n F.dropout(x, self.dropout_p, training=self.training, inplace=True) #函数方式调用dropout必须用training标志\r\n \r\n return self.out_layer(x, adj)\r\n\r\n\r\n\r\nn_labels = labels.max().item() + 1 #分类个数 7\r\nn_features = features.shape[1] #节点个数 1433\r\nprint(n_labels, n_features)\r\n\r\n\r\ndef accuracy(output,y):\r\n return (output.argmax(1) == y).type(torch.float32).mean().item()\r\n\r\ndef step():\r\n model.train()\r\n optimizer.zero_grad()\r\n output = model(features, adj)\r\n loss = F.cross_entropy(output[idx_train], labels[idx_train])\r\n acc = accuracy(output[idx_train], labels[idx_train])\r\n loss.backward()\r\n optimizer.step()\r\n return loss.item(), acc\r\n\r\ndef evaluate(idx):\r\n model.eval()\r\n output = model(features, adj)\r\n loss = F.cross_entropy(output[idx], labels[idx]).item()\r\n return loss, accuracy(output[idx], labels[idx])\r\n\r\n\r\n\r\nmodel = GCN(n_features, n_labels, hidden=[16, 32, 16]).to(device)\r\n\r\n\r\nfrom ranger import *\r\noptimizer = Ranger(model.parameters())\r\n\r\n\r\n\r\n\r\nfrom tqdm import tqdm #pip install tqdm\r\n#训练模型\r\nepochs = 1000#400#500\r\n\r\nprint_steps = 50\r\ntrain_loss, train_acc = [], []\r\nval_loss, val_acc = [], []\r\nfor i in tqdm(range(epochs)):\r\n tl, ta = step()\r\n train_loss += [tl]\r\n train_acc += [ta]\r\n if (i+1)%print_steps == 0 or i == 0:\r\n tl, ta = evaluate(idx_train)\r\n vl, va = evaluate(idx_val)\r\n val_loss += [vl]\r\n val_acc += [va]\r\n print(f'{i+1:6d}/{epochs}: train_loss={tl:.4f}, train_acc={ta:.4f}'+\r\n f', val_loss={vl:.4f}, val_acc={va:.4f}')\r\n\r\n#输出最终结果\r\nfinal_train, final_val, final_test = evaluate(idx_train), evaluate(idx_val), evaluate(idx_test)\r\nprint(f'Train : loss={final_train[0]:.4f}, accuracy={final_train[1]:.4f}')\r\nprint(f'Validation: loss={final_val[0]:.4f}, accuracy={final_val[1]:.4f}')\r\nprint(f'Test : loss={final_test[0]:.4f}, accuracy={final_test[1]:.4f}')\r\n\r\n\r\n#可视化训练过程\r\nfig, axes = plt.subplots(1, 2, figsize=(15,5))\r\nax = axes[0]\r\naxes[0].plot(train_loss[::print_steps] + [train_loss[-1]], label='Train')\r\naxes[0].plot(val_loss, label='Validation')\r\naxes[1].plot(train_acc[::print_steps] + [train_acc[-1]], label='Train')\r\naxes[1].plot(val_acc, label='Validation')\r\nfor ax,t in zip(axes, ['Loss', 'Accuracy']): ax.legend(), ax.set_title(t, size=15)\r\n\r\n\r\n\r\n#输出模型预测结果\r\noutput = model(features, adj)\r\n\r\nsamples = 10\r\nidx_sample = idx_test[torch.randperm(len(idx_test))[:samples]]\r\n\r\nidx2lbl = {v:k for k,v in lbl2idx.items()}\r\ndf = pd.DataFrame({'Real': [idx2lbl[e] for e in labels[idx_sample].tolist()],\r\n 'Pred': [idx2lbl[e] for e in output[idx_sample].argmax(1).tolist()]})\r\nprint(df)\r\n\r\n\r\n"
] | [
[
"torch.nn.functional.dropout",
"torch.FloatTensor",
"torch.cuda.is_available",
"torch.device",
"torch.mm",
"numpy.unique",
"scipy.sparse.diags",
"torch.nn.functional.softplus",
"torch.nn.Sequential",
"torch.LongTensor",
"scipy.sparse.csr_matrix",
"numpy.genfromtxt",
"torch.nn.init.zeros_",
"numpy.random.seed",
"scipy.sparse.eye",
"torch.nn.functional.cross_entropy",
"matplotlib.pyplot.subplots",
"numpy.ones",
"torch.nn.init.kaiming_uniform_",
"numpy.shape",
"torch.nn.init.xavier_uniform_",
"numpy.isinf"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
}
] |
rkingsbury/atomate | [
"d26b65d5c46882d9585f14188514d9a65276336c",
"d26b65d5c46882d9585f14188514d9a65276336c"
] | [
"atomate/qchem/tests/test_drones.py",
"atomate/vasp/analysis/phonopy.py"
] | [
"# Copyright (c) Materials Virtual Lab.\n# Distributed under the terms of the BSD License.\n\nimport os\nimport unittest\nfrom monty.serialization import loadfn\nfrom atomate.qchem.drones import QChemDrone\nfrom pymatgen.core.structure import Molecule\nimport numpy as np\nfrom pymatgen.analysis.local_env import OpenBabelNN\nfrom pymatgen.analysis.graphs import MoleculeGraph\n\n__author__ = \"Samuel Blau\"\n__copyright__ = \"Copyright 2019, The Materials Project\"\n__version__ = \"0.1\"\n__maintainer__ = \"Samuel Blau\"\n__email__ = \"[email protected]\"\n__status__ = \"Alpha\"\n__date__ = \"4/29/18\"\n__credits__ = \"Brandon Wood, Shyam Dwaraknath\"\n\nmodule_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)))\n\n\nclass QChemDroneTest(unittest.TestCase):\n def test_assimilate_opt(self):\n drone = QChemDrone()\n doc = drone.assimilate(\n path=os.path.join(module_dir, \"..\", \"test_files\", \"FF_working\"),\n input_file=\"test.qin.opt_1\",\n output_file=\"test.qout.opt_1\",\n multirun=False,\n )\n self.assertEqual(doc[\"input\"][\"job_type\"], \"opt\")\n self.assertEqual(doc[\"output\"][\"job_type\"], \"opt\")\n self.assertEqual(doc[\"output\"][\"final_energy\"], -348.652462579636)\n self.assertEqual(doc[\"walltime\"], 62.83)\n self.assertEqual(doc[\"cputime\"], 715.76)\n self.assertEqual(doc[\"smiles\"], \"O1[C](O[Li])OC=C1\")\n self.assertEqual(doc[\"formula_pretty\"], \"LiH2(CO)3\")\n self.assertEqual(doc[\"formula_anonymous\"], \"AB2C3D3\")\n self.assertEqual(doc[\"chemsys\"], \"C-H-Li-O\")\n self.assertEqual(doc[\"pointgroup\"], \"Cs\")\n self.assertIn(\"custodian\", doc)\n self.assertIn(\"calcs_reversed\", doc)\n self.assertIn(\"initial_molecule\", doc[\"input\"])\n self.assertIn(\"initial_molecule\", doc[\"output\"])\n self.assertIn(\"optimized_molecule\", doc[\"output\"])\n self.assertIn(\"last_updated\", doc)\n self.assertIn(\"dir_name\", doc)\n self.assertEqual(len(doc[\"calcs_reversed\"]), 1)\n\n def test_assimilate_freq(self):\n drone = QChemDrone()\n doc = drone.assimilate(\n path=os.path.join(module_dir, \"..\", \"test_files\", \"FF_working\"),\n input_file=\"test.qin.freq_1\",\n output_file=\"test.qout.freq_1\",\n multirun=False,\n )\n self.assertEqual(doc[\"input\"][\"job_type\"], \"freq\")\n self.assertEqual(doc[\"output\"][\"job_type\"], \"freq\")\n test_freqs = np.array(\n [\n 12.52,\n 45.28,\n 260.96,\n 329.08,\n 531.01,\n 582.11,\n 744.91,\n 779.2,\n 800.47,\n 863.15,\n 928.68,\n 969.0,\n 1092.86,\n 1124.0,\n 1147.64,\n 1209.1,\n 1387.39,\n 1693.97,\n 1913.05,\n 3316.2,\n 3341.73,\n ]\n )\n for ii in enumerate(test_freqs):\n self.assertEqual(test_freqs[ii[0]], doc[\"output\"][\"frequencies\"][ii[0]])\n self.assertEqual(doc[\"output\"][\"enthalpy\"], 37.547)\n self.assertEqual(doc[\"output\"][\"entropy\"], 83.81)\n self.assertEqual(doc[\"walltime\"], 394.45)\n self.assertEqual(doc[\"cputime\"], 997.39)\n self.assertEqual(doc[\"smiles\"], \"O1[C](O[Li])OC=C1\")\n self.assertEqual(doc[\"formula_pretty\"], \"LiH2(CO)3\")\n self.assertEqual(doc[\"formula_anonymous\"], \"AB2C3D3\")\n self.assertEqual(doc[\"chemsys\"], \"C-H-Li-O\")\n self.assertEqual(doc[\"pointgroup\"], \"Cs\")\n self.assertIn(\"custodian\", doc)\n self.assertIn(\"calcs_reversed\", doc)\n self.assertIn(\"initial_molecule\", doc[\"input\"])\n self.assertIn(\"initial_molecule\", doc[\"output\"])\n self.assertIn(\"last_updated\", doc)\n self.assertIn(\"dir_name\", doc)\n self.assertEqual(len(doc[\"calcs_reversed\"]), 1)\n self.assertEqual(doc[\"output\"][\"final_energy\"], -348.6524625796)\n\n def test_assimilate_FF(self):\n drone = QChemDrone(\n runs=[\n \"opt_0\",\n \"freq_0\",\n \"opt_1\",\n \"freq_1\",\n \"opt_2\",\n \"freq_2\",\n \"opt_3\",\n \"freq_3\",\n ],\n additional_fields={\"special_run_type\": \"frequency_flattener\"},\n )\n doc = drone.assimilate(\n path=os.path.join(module_dir, \"..\", \"test_files\", \"FF_working\"),\n input_file=\"test.qin\",\n output_file=\"test.qout\",\n multirun=False,\n )\n self.assertEqual(doc[\"special_run_type\"], \"frequency_flattener\")\n self.assertEqual(doc[\"input\"][\"job_type\"], \"opt\")\n self.assertEqual(doc[\"output\"][\"job_type\"], \"freq\")\n test_freqs = np.array(\n [\n 12.52,\n 45.28,\n 260.96,\n 329.08,\n 531.01,\n 582.11,\n 744.91,\n 779.2,\n 800.47,\n 863.15,\n 928.68,\n 969.0,\n 1092.86,\n 1124.0,\n 1147.64,\n 1209.1,\n 1387.39,\n 1693.97,\n 1913.05,\n 3316.2,\n 3341.73,\n ]\n )\n for ii in enumerate(test_freqs):\n self.assertEqual(test_freqs[ii[0]], doc[\"output\"][\"frequencies\"][ii[0]])\n self.assertEqual(\n doc[\"output\"][\"frequencies\"][ii[0]],\n doc[\"calcs_reversed\"][0][\"frequencies\"][ii[0]],\n )\n self.assertEqual(doc[\"output\"][\"enthalpy\"], 37.547)\n self.assertEqual(doc[\"output\"][\"entropy\"], 83.81)\n self.assertEqual(doc[\"num_frequencies_flattened\"], 1)\n self.assertEqual(doc[\"walltime\"], 935.29)\n self.assertEqual(doc[\"cputime\"], 3616.6400000000003)\n self.assertEqual(doc[\"smiles\"], \"O1[C](O[Li])OC=C1\")\n self.assertEqual(doc[\"formula_pretty\"], \"LiH2(CO)3\")\n self.assertEqual(doc[\"formula_anonymous\"], \"AB2C3D3\")\n self.assertEqual(doc[\"chemsys\"], \"C-H-Li-O\")\n self.assertEqual(doc[\"pointgroup\"], \"Cs\")\n self.assertIn(\"custodian\", doc)\n self.assertIn(\"calcs_reversed\", doc)\n self.assertIn(\"initial_molecule\", doc[\"input\"])\n self.assertIn(\"initial_molecule\", doc[\"output\"])\n self.assertIn(\"optimized_molecule\", doc[\"output\"])\n self.assertIn(\"last_updated\", doc)\n self.assertIn(\"dir_name\", doc)\n self.assertEqual(len(doc[\"calcs_reversed\"]), 4)\n self.assertEqual(\n list(doc[\"calcs_reversed\"][0].keys()), list(doc[\"calcs_reversed\"][2].keys())\n )\n self.assertEqual(\n list(doc[\"calcs_reversed\"][1].keys()), list(doc[\"calcs_reversed\"][3].keys())\n )\n\n def test_assimilate_bad_FF(self):\n drone = QChemDrone(\n additional_fields={\"special_run_type\": \"frequency_flattener\"}\n )\n doc = drone.assimilate(\n path=os.path.join(module_dir, \"..\", \"test_files\", \"launcher_bad_FF\"),\n input_file=\"mol.qin\",\n output_file=\"mol.qout\",\n multirun=False,\n )\n self.assertEqual(doc[\"special_run_type\"], \"frequency_flattener\")\n self.assertEqual(doc[\"input\"][\"job_type\"], \"opt\")\n self.assertEqual(doc[\"output\"][\"job_type\"], \"freq\")\n self.assertEqual(doc[\"state\"], \"unsuccessful\")\n\n def test_multirun(self):\n drone = QChemDrone()\n doc = drone.assimilate(\n path=os.path.join(module_dir, \"..\", \"test_files\", \"julian_nt\"),\n input_file=\"julian.qin\",\n output_file=\"julian.qout\",\n multirun=True,\n )\n self.assertEqual(doc[\"input\"][\"job_type\"], \"optimization\")\n self.assertEqual(doc[\"output\"][\"job_type\"], \"frequency\")\n test_freqs = np.array(\n [\n -69.17,\n 117.81,\n 244.67,\n 257.93,\n 530.0,\n 579.64,\n 737.42,\n 771.1,\n 787.32,\n 869.29,\n 924.77,\n 962.67,\n 1084.55,\n 1117.49,\n 1143.1,\n 1196.27,\n 1378.76,\n 1696.26,\n 1860.75,\n 3321.43,\n ]\n )\n for ii in enumerate(test_freqs):\n self.assertEqual(test_freqs[ii[0]], doc[\"output\"][\"frequencies\"][ii[0]])\n self.assertEqual(\n doc[\"output\"][\"frequencies\"][ii[0]],\n doc[\"calcs_reversed\"][0][\"frequencies\"][ii[0]],\n )\n self.assertEqual(doc[\"output\"][\"enthalpy\"], 36.755)\n self.assertEqual(doc[\"output\"][\"entropy\"], 74.989)\n self.assertEqual(doc[\"walltime\"], 684.6300000000001)\n self.assertEqual(doc[\"cputime\"], 4039.37)\n self.assertEqual(doc[\"smiles\"], \"O1[C](O[Li])OC=C1\")\n self.assertEqual(doc[\"formula_pretty\"], \"LiH2(CO)3\")\n self.assertEqual(doc[\"formula_anonymous\"], \"AB2C3D3\")\n self.assertEqual(doc[\"chemsys\"], \"C-H-Li-O\")\n self.assertEqual(doc[\"pointgroup\"], \"C2\")\n self.assertIn(\"calcs_reversed\", doc)\n self.assertIn(\"initial_molecule\", doc[\"input\"])\n self.assertIn(\"initial_molecule\", doc[\"output\"])\n self.assertIn(\"optimized_molecule\", doc[\"output\"])\n self.assertIn(\"last_updated\", doc)\n self.assertIn(\"dir_name\", doc)\n self.assertEqual(len(doc[\"calcs_reversed\"]), 3)\n self.assertEqual(doc[\"calcs_reversed\"][0][\"task\"][\"name\"], \"calc2\")\n self.assertEqual(doc[\"calcs_reversed\"][-1][\"task\"][\"name\"], \"calc0\")\n\n def test_assimilate_unstable_opt(self):\n drone = QChemDrone(\n runs=[\n \"opt_0\",\n \"freq_0\",\n \"opt_1\",\n \"freq_1\",\n \"opt_2\",\n \"freq_2\",\n \"opt_3\",\n \"freq_3\",\n ],\n additional_fields={\"special_run_type\": \"frequency_flattener\"},\n )\n doc = drone.assimilate(\n path=os.path.join(module_dir, \"..\", \"test_files\", \"2620_complete\"),\n input_file=\"mol.qin\",\n output_file=\"mol.qout\",\n multirun=False,\n )\n self.assertEqual(doc[\"input\"][\"job_type\"], \"opt\")\n self.assertEqual(doc[\"output\"][\"job_type\"], \"opt\")\n self.assertEqual(doc[\"output\"][\"final_energy\"], \"unstable\")\n self.assertEqual(doc[\"smiles\"], \"[S](=O)[N]S[C]\")\n self.assertEqual(doc[\"state\"], \"unsuccessful\")\n self.assertEqual(doc[\"walltime\"], None)\n self.assertEqual(doc[\"cputime\"], None)\n self.assertEqual(doc[\"formula_pretty\"], \"CS2NO\")\n self.assertEqual(doc[\"formula_anonymous\"], \"ABCD2\")\n self.assertEqual(doc[\"chemsys\"], \"C-N-O-S\")\n self.assertEqual(doc[\"pointgroup\"], \"C1\")\n self.assertEqual(doc[\"orig\"][\"rem\"], doc[\"calcs_reversed\"][-1][\"input\"][\"rem\"])\n self.assertEqual(\n doc[\"orig\"][\"molecule\"], doc[\"calcs_reversed\"][-1][\"input\"][\"molecule\"]\n )\n orig_molgraph = MoleculeGraph.with_local_env_strategy(\n Molecule.from_dict(doc[\"orig\"][\"molecule\"]), OpenBabelNN()\n )\n initial_molgraph = MoleculeGraph.with_local_env_strategy(\n Molecule.from_dict(doc[\"input\"][\"initial_molecule\"]), OpenBabelNN()\n )\n self.assertEqual(orig_molgraph.isomorphic_to(initial_molgraph), True)\n\n def test_assimilate_opt_with_hidden_changes_from_handler(self):\n drone = QChemDrone(\n additional_fields={\"special_run_type\": \"frequency_flattener\"}\n )\n doc = drone.assimilate(\n path=os.path.join(module_dir, \"..\", \"test_files\", \"1746_complete\"),\n input_file=\"mol.qin\",\n output_file=\"mol.qout\",\n multirun=False,\n )\n self.assertEqual(doc[\"input\"][\"job_type\"], \"opt\")\n self.assertEqual(doc[\"output\"][\"job_type\"], \"freq\")\n self.assertEqual(doc[\"output\"][\"final_energy\"], -303.835532370106)\n self.assertEqual(doc[\"smiles\"], \"O1C(=CC1=O)[CH]\")\n self.assertEqual(doc[\"state\"], \"successful\")\n self.assertEqual(doc[\"num_frequencies_flattened\"], 0)\n self.assertEqual(doc[\"walltime\"], 631.54)\n self.assertEqual(doc[\"cputime\"], 7471.17)\n self.assertEqual(doc[\"formula_pretty\"], \"HC2O\")\n self.assertEqual(doc[\"formula_anonymous\"], \"ABC2\")\n self.assertEqual(doc[\"chemsys\"], \"C-H-O\")\n self.assertEqual(doc[\"pointgroup\"], \"C1\")\n self.assertEqual(doc[\"orig\"][\"rem\"], doc[\"calcs_reversed\"][-1][\"input\"][\"rem\"])\n orig_molgraph = MoleculeGraph.with_local_env_strategy(\n Molecule.from_dict(doc[\"orig\"][\"molecule\"]), OpenBabelNN()\n )\n initial_molgraph = MoleculeGraph.with_local_env_strategy(\n Molecule.from_dict(doc[\"input\"][\"initial_molecule\"]), OpenBabelNN()\n )\n self.assertEqual(orig_molgraph.isomorphic_to(initial_molgraph), False)\n\n def test_assimilate_disconnected_opt(self):\n drone = QChemDrone(\n additional_fields={\"special_run_type\": \"frequency_flattener\"}\n )\n doc = drone.assimilate(\n path=os.path.join(\n module_dir, \"..\", \"test_files\", \"disconnected_but_converged\"\n ),\n input_file=\"mol.qin\",\n output_file=\"mol.qout\",\n multirun=False,\n )\n self.assertEqual(doc[\"input\"][\"job_type\"], \"opt\")\n self.assertEqual(doc[\"output\"][\"job_type\"], \"freq\")\n self.assertEqual(doc[\"output\"][\"final_energy\"], -303.07602688705)\n self.assertEqual(doc[\"smiles\"], \"O=C.O=C=O\")\n self.assertEqual(doc[\"state\"], \"successful\")\n self.assertEqual(doc[\"num_frequencies_flattened\"], 0)\n self.assertEqual(doc[\"walltime\"], 492.42999999999995)\n self.assertEqual(doc[\"cputime\"], 8825.76)\n self.assertEqual(doc[\"formula_pretty\"], \"H2C2O3\")\n self.assertEqual(doc[\"formula_anonymous\"], \"A2B2C3\")\n self.assertEqual(doc[\"chemsys\"], \"C-H-O\")\n self.assertEqual(doc[\"pointgroup\"], \"C1\")\n self.assertEqual(doc[\"orig\"][\"rem\"], doc[\"calcs_reversed\"][-1][\"input\"][\"rem\"])\n self.assertEqual(\n doc[\"calcs_reversed\"][-1][\"structure_change\"], \"unconnected_fragments\"\n )\n\n def test_assimilate_sp(self):\n drone = QChemDrone()\n doc = drone.assimilate(\n path=os.path.join(module_dir, \"..\", \"test_files\", \"launcher_sp\"),\n input_file=\"mol.qin\",\n output_file=\"mol.qout\",\n multirun=False,\n )\n self.assertEqual(doc[\"input\"][\"job_type\"], \"sp\")\n self.assertEqual(doc[\"output\"][\"job_type\"], \"sp\")\n self.assertEqual(doc[\"output\"][\"final_energy\"], -75.1151765884)\n self.assertEqual(doc[\"walltime\"], 4.69)\n self.assertEqual(doc[\"cputime\"], 134.03)\n self.assertEqual(doc[\"smiles\"], \"[O]\")\n self.assertEqual(doc[\"formula_pretty\"], \"O2\")\n self.assertEqual(doc[\"formula_anonymous\"], \"A\")\n self.assertEqual(doc[\"chemsys\"], \"O\")\n self.assertEqual(doc[\"pointgroup\"], \"Kh\")\n self.assertIn(\"custodian\", doc)\n self.assertIn(\"calcs_reversed\", doc)\n self.assertIn(\"initial_molecule\", doc[\"input\"])\n self.assertIn(\"initial_molecule\", doc[\"output\"])\n self.assertIn(\"last_updated\", doc)\n self.assertIn(\"dir_name\", doc)\n self.assertEqual(len(doc[\"calcs_reversed\"]), 1)\n\n def test_sp_with_orig(self):\n drone = QChemDrone()\n doc = drone.assimilate(\n path=os.path.join(module_dir, \"..\", \"test_files\", \"launcher_bad_sp\"),\n input_file=\"mol.qin\",\n output_file=\"mol.qout\",\n multirun=False,\n )\n self.assertEqual(doc[\"input\"][\"job_type\"], \"sp\")\n self.assertEqual(doc[\"output\"][\"job_type\"], \"sp\")\n self.assertEqual(doc[\"output\"][\"final_energy\"], -74.540726551)\n self.assertEqual(doc[\"walltime\"], 3.9)\n self.assertEqual(doc[\"cputime\"], 113.27)\n self.assertEqual(doc[\"smiles\"], \"[O]\")\n self.assertEqual(doc[\"formula_pretty\"], \"O2\")\n self.assertEqual(doc[\"formula_anonymous\"], \"A\")\n self.assertEqual(doc[\"chemsys\"], \"O\")\n self.assertEqual(doc[\"pointgroup\"], \"Kh\")\n self.assertIn(\"custodian\", doc)\n self.assertIn(\"calcs_reversed\", doc)\n self.assertIn(\"initial_molecule\", doc[\"input\"])\n self.assertIn(\"initial_molecule\", doc[\"output\"])\n self.assertIn(\"last_updated\", doc)\n self.assertIn(\"dir_name\", doc)\n self.assertEqual(len(doc[\"calcs_reversed\"]), 1)\n\n def test_FF_switching(self):\n drone = QChemDrone(\n additional_fields={\"special_run_type\": \"frequency_flattener\"}\n )\n doc = drone.assimilate(\n path=os.path.join(module_dir, \"..\", \"test_files\", \"FF_switching\"),\n input_file=\"mol.qin\",\n output_file=\"mol.qout\",\n multirun=False,\n )\n self.assertEqual(doc[\"special_run_type\"], \"frequency_flattener\")\n self.assertEqual(doc[\"input\"][\"job_type\"], \"opt\")\n self.assertEqual(doc[\"output\"][\"job_type\"], \"freq\")\n self.assertEqual(doc[\"num_frequencies_flattened\"], 1)\n self.assertEqual(doc[\"warnings\"][\"energy_increased\"], True)\n\n def test_FF_with_error_correction(self):\n drone = QChemDrone(\n additional_fields={\n \"special_run_type\": \"frequency_flattener\",\n \"linked\": True,\n }\n )\n doc = drone.assimilate(\n path=os.path.join(module_dir, \"..\", \"test_files\", \"LiH4C2SO4\"),\n input_file=\"mol.qin\",\n output_file=\"mol.qout\",\n multirun=False,\n )\n self.assertEqual(doc[\"opt_trajectory\"][0][\"energy\"], -784.934084124)\n self.assertEqual(doc[\"opt_trajectory\"][-1][\"energy\"], -784.934280182)\n self.assertEqual(\n doc[\"warnings\"],\n {\n \"missing_analytical_derivates\": True,\n \"mkl\": True,\n \"hessian_local_structure\": True,\n \"internal_coordinates\": True,\n \"diagonalizing_BBt\": True,\n \"eigenvalue_magnitude\": True,\n \"positive_definiteness_endangered\": True,\n \"energy_increased\": True,\n },\n )\n\n def test_custom_smd(self):\n drone = QChemDrone(\n additional_fields={\n \"special_run_type\": \"frequency_flattener\",\n \"linked\": True,\n }\n )\n doc = drone.assimilate(\n path=os.path.join(module_dir, \"..\", \"test_files\", \"custom_smd\"),\n input_file=\"mol.qin\",\n output_file=\"mol.qout\",\n multirun=False,\n )\n self.assertEqual(doc[\"custom_smd\"], \"18.5,1.415,0.00,0.735,20.2,0.00,0.00\")\n\n def test_assimilate_critic(self):\n crit_ex_path = os.path.join(\n module_dir, \"..\", \"test_files\", \"critic_test_files\", \"critic_example\"\n )\n drone = QChemDrone()\n doc = drone.assimilate(\n path=crit_ex_path,\n input_file=\"mol.qin\",\n output_file=\"mol.qout\",\n multirun=False,\n )\n # dumpfn(doc[\"critic2\"],os.path.join(crit_ex_path, \"critic2_drone_ref.json\"))\n critic2_drone_ref = loadfn(os.path.join(crit_ex_path, \"critic2_drone_ref.json\"))\n self.assertEqual(doc[\"critic2\"], critic2_drone_ref)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n",
"import numpy as np\n\n__author__ = \"Kiran Mathew\"\n__email__ = \"[email protected]\"\n\n# TODO: @matk86 - unit tests?\n\n\ndef get_phonopy_gibbs(\n energies,\n volumes,\n force_constants,\n structure,\n t_min,\n t_step,\n t_max,\n mesh,\n eos,\n pressure=0,\n):\n \"\"\"\n Compute QHA gibbs free energy using the phonopy interface.\n\n Args:\n energies (list):\n volumes (list):\n force_constants (list):\n structure (Structure):\n t_min (float): min temperature\n t_step (float): temperature step\n t_max (float): max temperature\n mesh (list/tuple): reciprocal space density\n eos (str): equation of state used for fitting the energies and the volumes.\n options supported by phonopy: vinet, murnaghan, birch_murnaghan\n pressure (float): in GPa, optional.\n\n Returns:\n (numpy.ndarray, numpy.ndarray): Gibbs free energy, Temperature\n \"\"\"\n\n # quasi-harmonic approx\n phonopy_qha = get_phonopy_qha(\n energies,\n volumes,\n force_constants,\n structure,\n t_min,\n t_step,\n t_max,\n mesh,\n eos,\n pressure=pressure,\n )\n\n # gibbs free energy and temperature\n max_t_index = phonopy_qha._qha._max_t_index\n G = phonopy_qha.get_gibbs_temperature()[:max_t_index]\n T = phonopy_qha._qha._temperatures[:max_t_index]\n return G, T\n\n\ndef get_phonopy_qha(\n energies,\n volumes,\n force_constants,\n structure,\n t_min,\n t_step,\n t_max,\n mesh,\n eos,\n pressure=0,\n):\n \"\"\"\n Return phonopy QHA interface.\n\n Args:\n energies (list):\n volumes (list):\n force_constants (list):\n structure (Structure):\n t_min (float): min temperature\n t_step (float): temperature step\n t_max (float): max temperature\n mesh (list/tuple): reciprocal space density\n eos (str): equation of state used for fitting the energies and the volumes.\n options supported by phonopy: vinet, murnaghan, birch_murnaghan\n pressure (float): in GPa, optional.\n\n Returns:\n PhonopyQHA\n \"\"\"\n from phonopy import Phonopy\n from phonopy.structure.atoms import Atoms as PhonopyAtoms\n from phonopy import PhonopyQHA\n from phonopy.units import EVAngstromToGPa\n\n phon_atoms = PhonopyAtoms(\n symbols=[str(s.specie) for s in structure],\n scaled_positions=structure.frac_coords,\n cell=structure.lattice.matrix,\n )\n scell = [[1, 0, 0], [0, 1, 0], [0, 0, 1]]\n phonon = Phonopy(phon_atoms, scell)\n # compute the required phonon thermal properties\n temperatures = []\n free_energy = []\n entropy = []\n cv = []\n for f in force_constants:\n phonon.set_force_constants(-np.array(f))\n phonon.set_mesh(list(mesh))\n phonon.set_thermal_properties(t_step=t_step, t_min=t_min, t_max=t_max)\n t, g, e, c = phonon.get_thermal_properties()\n temperatures.append(t)\n free_energy.append(g)\n entropy.append(e)\n cv.append(c)\n\n # add pressure contribution\n energies = np.array(energies) + np.array(volumes) * pressure / EVAngstromToGPa\n # quasi-harmonic approx\n return PhonopyQHA(\n volumes,\n energies,\n eos=eos,\n temperatures=temperatures[0],\n free_energy=np.array(free_energy).T,\n cv=np.array(cv).T,\n entropy=np.array(entropy).T,\n t_max=np.max(temperatures[0]),\n )\n\n\ndef get_phonopy_thermal_expansion(\n energies,\n volumes,\n force_constants,\n structure,\n t_min,\n t_step,\n t_max,\n mesh,\n eos,\n pressure=0,\n):\n \"\"\"\n Compute QHA thermal expansion coefficient using the phonopy interface.\n\n Args:\n energies (list):\n volumes (list):\n force_constants (list):\n structure (Structure):\n t_min (float): min temperature\n t_step (float): temperature step\n t_max (float): max temperature\n mesh (list/tuple): reciprocal space density\n eos (str): equation of state used for fitting the energies and the volumes.\n options supported by phonopy: vinet, murnaghan, birch_murnaghan\n pressure (float): in GPa, optional.\n\n Returns:\n (numpy.ndarray, numpy.ndarray): thermal expansion coefficient, Temperature\n \"\"\"\n\n # quasi-harmonic approx\n phonopy_qha = get_phonopy_qha(\n energies,\n volumes,\n force_constants,\n structure,\n t_min,\n t_step,\n t_max,\n mesh,\n eos,\n pressure=pressure,\n )\n\n # thermal expansion coefficient and temperature\n max_t_index = phonopy_qha._qha._max_t_index\n alpha = phonopy_qha.get_thermal_expansion()[:max_t_index]\n T = phonopy_qha._qha._temperatures[:max_t_index]\n return alpha, T\n"
] | [
[
"numpy.array"
],
[
"numpy.max",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Research-lab-KUMS/NeoAnalysis | [
"c5f25b71e16997f3a05f70b1eead11f99a3b7e2b",
"c5f25b71e16997f3a05f70b1eead11f99a3b7e2b",
"c5f25b71e16997f3a05f70b1eead11f99a3b7e2b"
] | [
"NeoAnalysis_Py2.7/NeoAnalysis/neo/io/nixio.py",
"NeoAnalysis_Py2.7/NeoAnalysis/neo/test/iotest/test_pynnio.py",
"NeoAnalysis_Py2.7/NeoAnalysis/graphics.py"
] | [
"# -*- coding: utf-8 -*-\n# Copyright (c) 2016, German Neuroinformatics Node (G-Node)\n# Achilleas Koutsou <[email protected]>\n#\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted under the terms of the BSD License. See\n# LICENSE file in the root of the Project.\n\"\"\"\nModule for reading data from files in the NIX format.\n\nAuthor: Achilleas Koutsou\n\nThis IO supports both writing and reading of NIX files. Reading is supported\nonly if the NIX file was created using this IO.\n\"\"\"\n\nfrom __future__ import absolute_import\n\nimport time\nfrom datetime import datetime\nfrom collections import Iterable\nimport itertools\nfrom hashlib import md5\n\nimport quantities as pq\nimport numpy as np\n\nfrom ...neo.io.baseio import BaseIO\nfrom ...neo.core import (Block, Segment, ChannelIndex, AnalogSignal,\n IrregularlySampledSignal, Epoch, Event, SpikeTrain, Unit)\nfrom ...neo.io.tools import LazyList\n\ntry:\n import nixio as nix\n HAVE_NIX = True\nexcept ImportError:\n HAVE_NIX = False\n\ntry:\n string_types = basestring\nexcept NameError:\n string_types = str\n\n\ndef stringify(value):\n if value is None:\n return value\n if isinstance(value, bytes):\n value = value.decode()\n return str(value)\n\n\ndef calculate_timestamp(dt):\n if isinstance(dt, datetime):\n return int(time.mktime(dt.timetuple()))\n return int(dt)\n\n\nclass NixIO(BaseIO):\n \"\"\"\n Class for reading and writing NIX files.\n \"\"\"\n\n is_readable = True\n is_writable = True\n\n supported_objects = [Block, Segment, ChannelIndex,\n AnalogSignal, IrregularlySampledSignal,\n Epoch, Event, SpikeTrain, Unit]\n readable_objects = [Block]\n writeable_objects = [Block]\n\n name = \"NIX\"\n extensions = [\"h5\"]\n mode = \"file\"\n\n _container_map = {\n \"segments\": \"groups\",\n \"analogsignals\": \"data_arrays\",\n \"irregularlysampledsignals\": \"data_arrays\",\n \"events\": \"multi_tags\",\n \"epochs\": \"multi_tags\",\n \"spiketrains\": \"multi_tags\",\n \"channel_indexes\": \"sources\",\n \"units\": \"sources\"\n }\n\n def __init__(self, filename, mode=\"rw\"):\n \"\"\"\n Initialise IO instance and NIX file.\n\n :param filename: Full path to the file\n \"\"\"\n\n if not HAVE_NIX:\n raise Exception(\"Failed to import NIX. \"\n \"The NixIO requires the Python bindings for NIX \"\n \"(nixio on PyPi). Try `pip install nixio`.\")\n\n BaseIO.__init__(self, filename)\n self.filename = filename\n if mode == \"ro\":\n filemode = nix.FileMode.ReadOnly\n elif mode == \"rw\":\n filemode = nix.FileMode.ReadWrite\n elif mode == \"ow\":\n filemode = nix.FileMode.Overwrite\n else:\n raise ValueError(\"Invalid mode specified '{}'. \"\n \"Valid modes: 'ro' (ReadOnly)', 'rw' (ReadWrite),\"\n \" 'ow' (Overwrite).\".format(mode))\n self.nix_file = nix.File.open(self.filename, filemode, backend=\"h5py\")\n self._object_map = dict()\n self._lazy_loaded = list()\n self._object_hashes = dict()\n self._block_read_counter = 0\n\n def __enter__(self):\n return self\n\n def __exit__(self, *args):\n self.close()\n\n def read_all_blocks(self, cascade=True, lazy=False):\n blocks = list()\n for blk in self.nix_file.blocks:\n blocks.append(self.read_block(\"/\" + blk.name, cascade, lazy))\n return blocks\n\n def read_block(self, path=\"/\", cascade=True, lazy=False):\n if path == \"/\":\n try:\n # Use yield?\n nix_block = self.nix_file.blocks[self._block_read_counter]\n path += nix_block.name\n self._block_read_counter += 1\n except KeyError:\n return None\n else:\n nix_block = self._get_object_at(path)\n neo_block = self._block_to_neo(nix_block)\n neo_block.path = path\n if cascade:\n self._read_cascade(nix_block, path, cascade, lazy)\n self._update_maps(neo_block, lazy)\n return neo_block\n\n def read_segment(self, path, cascade=True, lazy=False):\n nix_group = self._get_object_at(path)\n neo_segment = self._group_to_neo(nix_group)\n neo_segment.path = path\n if cascade:\n self._read_cascade(nix_group, path, cascade, lazy)\n self._update_maps(neo_segment, lazy)\n nix_parent = self._get_parent(path)\n neo_parent = self._get_mapped_object(nix_parent)\n if neo_parent:\n neo_segment.block = neo_parent\n return neo_segment\n\n def read_channelindex(self, path, cascade=True, lazy=False):\n nix_source = self._get_object_at(path)\n neo_rcg = self._source_chx_to_neo(nix_source)\n neo_rcg.path = path\n if cascade:\n self._read_cascade(nix_source, path, cascade, lazy)\n self._update_maps(neo_rcg, lazy)\n nix_parent = self._get_parent(path)\n neo_parent = self._get_mapped_object(nix_parent)\n neo_rcg.block = neo_parent\n return neo_rcg\n\n def read_signal(self, path, lazy=False):\n nix_data_arrays = list()\n parent_group = self._get_parent(path)\n parent_container = parent_group.data_arrays\n signal_group_name = path.split(\"/\")[-1]\n for idx in itertools.count():\n signal_name = \"{}.{}\".format(signal_group_name, idx)\n if signal_name in parent_container:\n nix_data_arrays.append(parent_container[signal_name])\n else:\n break\n # check metadata segment\n group_section = nix_data_arrays[0].metadata\n for da in nix_data_arrays:\n assert da.metadata == group_section,\\\n \"DataArray {} is not a member of signal group {}\".format(\n da.name, group_section.name\n )\n neo_signal = self._signal_da_to_neo(nix_data_arrays, lazy)\n neo_signal.path = path\n if self._find_lazy_loaded(neo_signal) is None:\n self._update_maps(neo_signal, lazy)\n nix_parent = self._get_parent(path)\n neo_parent = self._get_mapped_object(nix_parent)\n neo_signal.segment = neo_parent\n return neo_signal\n\n def read_analogsignal(self, path, cascade=True, lazy=False):\n return self.read_signal(path, lazy)\n\n def read_irregularlysampledsignal(self, path, cascade=True, lazy=False):\n return self.read_signal(path, lazy)\n\n def read_eest(self, path, lazy=False):\n nix_mtag = self._get_object_at(path)\n neo_eest = self._mtag_eest_to_neo(nix_mtag, lazy)\n neo_eest.path = path\n self._update_maps(neo_eest, lazy)\n nix_parent = self._get_parent(path)\n neo_parent = self._get_mapped_object(nix_parent)\n neo_eest.segment = neo_parent\n return neo_eest\n\n def read_epoch(self, path, cascade=True, lazy=False):\n return self.read_eest(path, lazy)\n\n def read_event(self, path, cascade=True, lazy=False):\n return self.read_eest(path, lazy)\n\n def read_spiketrain(self, path, cascade=True, lazy=False):\n return self.read_eest(path, lazy)\n\n def read_unit(self, path, cascade=True, lazy=False):\n nix_source = self._get_object_at(path)\n neo_unit = self._source_unit_to_neo(nix_source)\n neo_unit.path = path\n if cascade:\n self._read_cascade(nix_source, path, cascade, lazy)\n self._update_maps(neo_unit, lazy)\n nix_parent = self._get_parent(path)\n neo_parent = self._get_mapped_object(nix_parent)\n neo_unit.channel_index = neo_parent\n return neo_unit\n\n def _block_to_neo(self, nix_block):\n neo_attrs = self._nix_attr_to_neo(nix_block)\n neo_block = Block(**neo_attrs)\n self._object_map[nix_block.id] = neo_block\n return neo_block\n\n def _group_to_neo(self, nix_group):\n neo_attrs = self._nix_attr_to_neo(nix_group)\n neo_segment = Segment(**neo_attrs)\n self._object_map[nix_group.id] = neo_segment\n return neo_segment\n\n def _source_chx_to_neo(self, nix_source):\n neo_attrs = self._nix_attr_to_neo(nix_source)\n chx = list(self._nix_attr_to_neo(c)\n for c in nix_source.sources\n if c.type == \"neo.channelindex\")\n neo_attrs[\"channel_names\"] = np.array([c[\"name\"] for c in chx],\n dtype=\"S\")\n neo_attrs[\"index\"] = np.array([c[\"index\"] for c in chx])\n if \"coordinates\" in chx[0]:\n coord_units = chx[0][\"coordinates.units\"]\n coord_values = list(c[\"coordinates\"] for c in chx)\n neo_attrs[\"coordinates\"] = pq.Quantity(coord_values, coord_units)\n rcg = ChannelIndex(**neo_attrs)\n self._object_map[nix_source.id] = rcg\n return rcg\n\n def _source_unit_to_neo(self, nix_unit):\n neo_attrs = self._nix_attr_to_neo(nix_unit)\n neo_unit = Unit(**neo_attrs)\n self._object_map[nix_unit.id] = neo_unit\n return neo_unit\n\n def _signal_da_to_neo(self, nix_da_group, lazy):\n \"\"\"\n Convert a group of NIX DataArrays to a Neo signal. This method expects\n a list of data arrays that all represent the same, multidimensional\n Neo Signal object.\n This returns either an AnalogSignal or IrregularlySampledSignal.\n\n :param nix_da_group: a list of NIX DataArray objects\n :return: a Neo Signal object\n \"\"\"\n nix_da_group = sorted(nix_da_group, key=lambda d: d.name)\n neo_attrs = self._nix_attr_to_neo(nix_da_group[0])\n metadata = nix_da_group[0].metadata\n neo_attrs[\"name\"] = stringify(metadata.name)\n neo_type = nix_da_group[0].type\n\n unit = nix_da_group[0].unit\n if lazy:\n signaldata = pq.Quantity(np.empty(0), unit)\n lazy_shape = (len(nix_da_group[0]), len(nix_da_group))\n else:\n signaldata = np.array([d[:] for d in nix_da_group]).transpose()\n signaldata = pq.Quantity(signaldata, unit)\n lazy_shape = None\n timedim = self._get_time_dimension(nix_da_group[0])\n if (neo_type == \"neo.analogsignal\" or\n isinstance(timedim, nix.pycore.SampledDimension)):\n if lazy:\n sampling_period = pq.Quantity(1, timedim.unit)\n t_start = pq.Quantity(0, timedim.unit)\n else:\n if \"sampling_interval.units\" in metadata.props:\n sample_units = metadata[\"sampling_interval.units\"]\n else:\n sample_units = timedim.unit\n sampling_period = pq.Quantity(timedim.sampling_interval,\n sample_units)\n if \"t_start.units\" in metadata.props:\n tsunits = metadata[\"t_start.units\"]\n else:\n tsunits = timedim.unit\n t_start = pq.Quantity(timedim.offset, tsunits)\n neo_signal = AnalogSignal(\n signal=signaldata, sampling_period=sampling_period,\n t_start=t_start, **neo_attrs\n )\n elif neo_type == \"neo.irregularlysampledsignal\"\\\n or isinstance(timedim, nix.pycore.RangeDimension):\n if lazy:\n times = pq.Quantity(np.empty(0), timedim.unit)\n else:\n times = pq.Quantity(timedim.ticks, timedim.unit)\n neo_signal = IrregularlySampledSignal(\n signal=signaldata, times=times, **neo_attrs\n )\n else:\n return None\n for da in nix_da_group:\n self._object_map[da.id] = neo_signal\n if lazy_shape:\n neo_signal.lazy_shape = lazy_shape\n return neo_signal\n\n def _mtag_eest_to_neo(self, nix_mtag, lazy):\n neo_attrs = self._nix_attr_to_neo(nix_mtag)\n neo_type = nix_mtag.type\n\n time_unit = nix_mtag.positions.unit\n if lazy:\n times = pq.Quantity(np.empty(0), time_unit)\n lazy_shape = np.shape(nix_mtag.positions)\n else:\n times = pq.Quantity(nix_mtag.positions, time_unit)\n lazy_shape = None\n if neo_type == \"neo.epoch\":\n if lazy:\n durations = pq.Quantity(np.empty(0), nix_mtag.extents.unit)\n labels = np.empty(0, dtype='S')\n else:\n durations = pq.Quantity(nix_mtag.extents,\n nix_mtag.extents.unit)\n labels = np.array(nix_mtag.positions.dimensions[0].labels,\n dtype=\"S\")\n eest = Epoch(times=times, durations=durations, labels=labels,\n **neo_attrs)\n elif neo_type == \"neo.event\":\n if lazy:\n labels = np.empty(0, dtype='S')\n else:\n labels = np.array(nix_mtag.positions.dimensions[0].labels,\n dtype=\"S\")\n eest = Event(times=times, labels=labels, **neo_attrs)\n elif neo_type == \"neo.spiketrain\":\n if \"t_start\" in neo_attrs:\n if \"t_start.units\" in neo_attrs:\n t_start_units = neo_attrs[\"t_start.units\"]\n del neo_attrs[\"t_start.units\"]\n else:\n t_start_units = time_unit\n t_start = pq.Quantity(neo_attrs[\"t_start\"], t_start_units)\n del neo_attrs[\"t_start\"]\n else:\n t_start = None\n if \"t_stop\" in neo_attrs:\n if \"t_stop.units\" in neo_attrs:\n t_stop_units = neo_attrs[\"t_stop.units\"]\n del neo_attrs[\"t_stop.units\"]\n else:\n t_stop_units = time_unit\n t_stop = pq.Quantity(neo_attrs[\"t_stop\"], t_stop_units)\n del neo_attrs[\"t_stop\"]\n else:\n t_stop = None\n if \"sampling_interval.units\" in neo_attrs:\n interval_units = neo_attrs[\"sampling_interval.units\"]\n del neo_attrs[\"sampling_interval.units\"]\n else:\n interval_units = None\n if \"left_sweep.units\" in neo_attrs:\n left_sweep_units = neo_attrs[\"left_sweep.units\"]\n del neo_attrs[\"left_sweep.units\"]\n else:\n left_sweep_units = None\n eest = SpikeTrain(times=times, t_start=t_start,\n t_stop=t_stop, **neo_attrs)\n if len(nix_mtag.features):\n wfda = nix_mtag.features[0].data\n wftime = self._get_time_dimension(wfda)\n if lazy:\n eest.waveforms = pq.Quantity(np.empty((0, 0, 0)),\n wfda.unit)\n eest.sampling_period = pq.Quantity(1, wftime.unit)\n eest.left_sweep = pq.Quantity(0, wftime.unit)\n else:\n eest.waveforms = pq.Quantity(wfda, wfda.unit)\n if interval_units is None:\n interval_units = wftime.unit\n eest.sampling_period = pq.Quantity(\n wftime.sampling_interval, interval_units\n )\n if left_sweep_units is None:\n left_sweep_units = wftime.unit\n if \"left_sweep\" in wfda.metadata:\n eest.left_sweep = pq.Quantity(\n wfda.metadata[\"left_sweep\"], left_sweep_units\n )\n else:\n return None\n self._object_map[nix_mtag.id] = eest\n if lazy_shape:\n eest.lazy_shape = lazy_shape\n return eest\n\n def _read_cascade(self, nix_obj, path, cascade, lazy):\n neo_obj = self._object_map[nix_obj.id]\n for neocontainer in getattr(neo_obj, \"_child_containers\", []):\n nixcontainer = self._container_map[neocontainer]\n if not hasattr(nix_obj, nixcontainer):\n continue\n if neocontainer == \"channel_indexes\":\n neotype = \"channelindex\"\n else:\n neotype = neocontainer[:-1]\n chpaths = list(path + \"/\" + neocontainer + \"/\" + c.name\n for c in getattr(nix_obj, nixcontainer)\n if c.type == \"neo.\" + neotype)\n if neocontainer in (\"analogsignals\",\n \"irregularlysampledsignals\"):\n chpaths = self._group_signals(chpaths)\n if cascade != \"lazy\":\n read_func = getattr(self, \"read_\" + neotype)\n children = list(read_func(cp, cascade, lazy)\n for cp in chpaths)\n else:\n children = LazyList(self, lazy, chpaths)\n setattr(neo_obj, neocontainer, children)\n\n if isinstance(neo_obj, ChannelIndex):\n # set references to signals\n parent_block_path = \"/\" + path.split(\"/\")[1]\n parent_block = self._get_object_at(parent_block_path)\n ref_das = self._get_referers(nix_obj, parent_block.data_arrays)\n ref_signals = self._get_mapped_objects(ref_das)\n # deduplicate by name\n ref_signals = list(dict((s.name, s) for s in ref_signals).values())\n for sig in ref_signals:\n if isinstance(sig, AnalogSignal):\n neo_obj.analogsignals.append(sig)\n elif isinstance(sig, IrregularlySampledSignal):\n neo_obj.irregularlysampledsignals.append(sig)\n sig.channel_index = neo_obj\n\n elif isinstance(neo_obj, Unit):\n # set references to spiketrains\n parent_block_path = \"/\" + path.split(\"/\")[1]\n parent_block = self._get_object_at(parent_block_path)\n ref_mtags = self._get_referers(nix_obj, parent_block.multi_tags)\n ref_sts = self._get_mapped_objects(ref_mtags)\n for st in ref_sts:\n neo_obj.spiketrains.append(st)\n st.unit = neo_obj\n\n def get(self, path, cascade, lazy):\n parts = path.split(\"/\")\n if len(parts) > 2:\n neotype = parts[-2][:-1]\n else:\n neotype = \"block\"\n if neotype == \"channel_indexe\":\n neotype = \"channelindex\"\n read_func = getattr(self, \"read_\" + neotype)\n return read_func(path, cascade, lazy)\n\n def load_lazy_object(self, obj):\n return self.get(obj.path, cascade=False, lazy=False)\n\n def load_lazy_cascade(self, path, lazy):\n \"\"\"\n Loads the object at the location specified by the path and all\n children. Data is loaded if lazy is False.\n\n :param path: Location of object in file\n :param lazy: Do not load data if True\n :return: The loaded object\n \"\"\"\n neoobj = self.get(path, cascade=True, lazy=lazy)\n return neoobj\n\n def write_all_blocks(self, neo_blocks):\n \"\"\"\n Convert all ``neo_blocks`` to the NIX equivalent and write them to the\n file.\n\n :param neo_blocks: List (or iterable) containing Neo blocks\n :return: A list containing the new NIX Blocks\n \"\"\"\n self.resolve_name_conflicts(neo_blocks)\n for bl in neo_blocks:\n self.write_block(bl)\n\n def _write_object(self, obj, loc=\"\"):\n if isinstance(obj, Block):\n containerstr = \"/\"\n else:\n objtype = type(obj).__name__.lower()\n if objtype == \"channelindex\":\n containerstr = \"/channel_indexes/\"\n else:\n containerstr = \"/\" + type(obj).__name__.lower() + \"s/\"\n self.resolve_name_conflicts(obj)\n objpath = loc + containerstr + obj.name\n oldhash = self._object_hashes.get(objpath)\n if oldhash is None:\n try:\n oldobj = self.get(objpath, cascade=False, lazy=False)\n oldhash = self._hash_object(oldobj)\n except (KeyError, IndexError):\n oldhash = None\n newhash = self._hash_object(obj)\n if oldhash != newhash:\n attr = self._neo_attr_to_nix(obj)\n if isinstance(obj, pq.Quantity):\n attr.update(self._neo_data_to_nix(obj))\n if oldhash is None:\n nixobj = self._create_nix_obj(loc, attr)\n else:\n nixobj = self._get_object_at(objpath)\n self._write_attr_annotations(nixobj, attr, objpath)\n if isinstance(obj, pq.Quantity):\n self._write_data(nixobj, attr, objpath)\n else:\n nixobj = self._get_object_at(objpath)\n self._object_map[id(obj)] = nixobj\n self._object_hashes[objpath] = newhash\n self._write_cascade(obj, objpath)\n\n def _create_nix_obj(self, loc, attr):\n parentobj = self._get_object_at(loc)\n if attr[\"type\"] == \"block\":\n nixobj = parentobj.create_block(attr[\"name\"], \"neo.block\")\n elif attr[\"type\"] == \"segment\":\n nixobj = parentobj.create_group(attr[\"name\"], \"neo.segment\")\n elif attr[\"type\"] == \"channelindex\":\n nixobj = parentobj.create_source(attr[\"name\"],\n \"neo.channelindex\")\n elif attr[\"type\"] in (\"analogsignal\", \"irregularlysampledsignal\"):\n blockpath = \"/\" + loc.split(\"/\")[1]\n parentblock = self._get_object_at(blockpath)\n nixobj = list()\n typestr = \"neo.\" + attr[\"type\"]\n parentmd = self._get_or_init_metadata(parentobj, loc)\n sigmd = parentmd.create_section(attr[\"name\"], typestr+\".metadata\")\n for idx, datarow in enumerate(attr[\"data\"]):\n name = \"{}.{}\".format(attr[\"name\"], idx)\n da = parentblock.create_data_array(name, typestr, data=datarow)\n da.metadata = sigmd\n nixobj.append(da)\n parentobj.data_arrays.extend(nixobj)\n elif attr[\"type\"] in (\"epoch\", \"event\", \"spiketrain\"):\n blockpath = \"/\" + loc.split(\"/\")[1]\n parentblock = self._get_object_at(blockpath)\n timesda = parentblock.create_data_array(\n attr[\"name\"]+\".times\", \"neo.\"+attr[\"type\"]+\".times\",\n data=attr[\"data\"]\n )\n nixobj = parentblock.create_multi_tag(\n attr[\"name\"], \"neo.\"+attr[\"type\"], timesda\n )\n parentobj.multi_tags.append(nixobj)\n elif attr[\"type\"] == \"unit\":\n nixobj = parentobj.create_source(attr[\"name\"], \"neo.unit\")\n else:\n raise ValueError(\"Unable to create NIX object. Invalid type.\")\n return nixobj\n\n def write_block(self, bl, loc=\"\"):\n \"\"\"\n Convert ``bl`` to the NIX equivalent and write it to the file.\n\n :param bl: Neo block to be written\n :param loc: Unused for blocks\n \"\"\"\n self._write_object(bl, loc)\n self._create_references(bl)\n\n def write_channelindex(self, chx, loc=\"\"):\n \"\"\"\n Convert the provided ``chx`` (ChannelIndex) to a NIX Source and write\n it to the NIX file at the location defined by ``loc``.\n\n :param chx: The Neo ChannelIndex to be written\n :param loc: Path to the parent of the new CHX\n \"\"\"\n self._write_object(chx, loc)\n\n def write_segment(self, seg, loc=\"\"):\n \"\"\"\n Convert the provided ``seg`` to a NIX Group and write it to the NIX\n file at the location defined by ``loc``.\n\n :param seg: Neo seg to be written\n :param loc: Path to the parent of the new Segment\n \"\"\"\n self._write_object(seg, loc)\n\n def write_indices(self, chx, loc=\"\"):\n \"\"\"\n Create NIX Source objects to represent individual indices based on the\n provided ``chx`` (ChannelIndex) write them to the NIX file at\n the parent ChannelIndex object.\n\n :param chx: The Neo ChannelIndex\n :param loc: Path to the CHX\n \"\"\"\n nixsource = self._get_mapped_object(chx)\n for idx, channel in enumerate(chx.index):\n if len(chx.channel_names):\n channame = stringify(chx.channel_names[idx])\n else:\n channame = \"{}.ChannelIndex{}\".format(chx.name, idx)\n if channame in nixsource.sources:\n nixchan = nixsource.sources[channame]\n else:\n nixchan = nixsource.create_source(channame,\n \"neo.channelindex\")\n nixchan.definition = nixsource.definition\n chanpath = loc + \"/channelindex/\" + channame\n chanmd = self._get_or_init_metadata(nixchan, chanpath)\n chanmd[\"index\"] = nix.Value(int(channel))\n if chx.coordinates is not None:\n coords = chx.coordinates[idx]\n coordunits = stringify(coords[0].dimensionality)\n nixcoords = tuple(\n nix.Value(c.rescale(coordunits).magnitude.item())\n for c in coords\n )\n if \"coordinates\" in chanmd:\n del chanmd[\"coordinates\"]\n chanprop = chanmd.create_property(\"coordinates\", nixcoords)\n chanprop.unit = coordunits\n\n def write_analogsignal(self, anasig, loc=\"\"):\n \"\"\"\n Convert the provided ``anasig`` (AnalogSignal) to a list of NIX\n DataArray objects and write them to the NIX file at the location\n defined by ``loc``. All DataArray objects created from the same\n AnalogSignal have their metadata section point to the same object.\n\n :param anasig: The Neo AnalogSignal to be written\n :param loc: Path to the parent of the new AnalogSignal\n \"\"\"\n self._write_object(anasig, loc)\n\n def write_irregularlysampledsignal(self, irsig, loc=\"\"):\n \"\"\"\n Convert the provided ``irsig`` (IrregularlySampledSignal) to a list of\n NIX DataArray objects and write them to the NIX file at the location\n defined by ``loc``. All DataArray objects created from the same\n IrregularlySampledSignal have their metadata section point to the same\n object.\n\n :param irsig: The Neo IrregularlySampledSignal to be written\n :param loc: Path to the parent of the new\n :return: The newly created NIX DataArray\n \"\"\"\n self._write_object(irsig, loc)\n\n def write_epoch(self, ep, loc=\"\"):\n \"\"\"\n Convert the provided ``ep`` (Epoch) to a NIX MultiTag and write it to\n the NIX file at the location defined by ``loc``.\n\n :param ep: The Neo Epoch to be written\n :param loc: Path to the parent of the new MultiTag\n \"\"\"\n self._write_object(ep, loc)\n\n def write_event(self, ev, loc=\"\"):\n \"\"\"\n Convert the provided ``ev`` (Event) to a NIX MultiTag and write it to\n the NIX file at the location defined by ``loc``.\n\n :param ev: The Neo Event to be written\n :param loc: Path to the parent of the new MultiTag\n \"\"\"\n self._write_object(ev, loc)\n\n def write_spiketrain(self, sptr, loc=\"\"):\n \"\"\"\n Convert the provided ``sptr`` (SpikeTrain) to a NIX MultiTag and write\n it to the NIX file at the location defined by ``loc``.\n\n :param sptr: The Neo SpikeTrain to be written\n :param loc: Path to the parent of the new MultiTag\n \"\"\"\n self._write_object(sptr, loc)\n\n def write_unit(self, ut, loc=\"\"):\n \"\"\"\n Convert the provided ``ut`` (Unit) to a NIX Source and write it to the\n NIX file at the parent RCG.\n\n :param ut: The Neo Unit to be written\n :param loc: Path to the parent of the new Source\n \"\"\"\n self._write_object(ut, loc)\n\n def _write_cascade(self, neoobj, path=\"\"):\n if isinstance(neoobj, ChannelIndex):\n containers = [\"units\"]\n self.write_indices(neoobj, path)\n elif isinstance(neoobj, Unit):\n containers = []\n else:\n containers = getattr(neoobj, \"_child_containers\", [])\n for neocontainer in containers:\n if neocontainer == \"channel_indexes\":\n neotype = \"channelindex\"\n else:\n neotype = neocontainer[:-1]\n children = getattr(neoobj, neocontainer)\n write_func = getattr(self, \"write_\" + neotype)\n for ch in children:\n write_func(ch, path)\n\n def _create_references(self, block):\n \"\"\"\n Create references between NIX objects according to the supplied Neo\n Block.\n MultiTags reference DataArrays of the same Group.\n DataArrays reference ChannelIndexs as sources, based on Neo\n RCG -> Signal relationships.\n MultiTags (SpikeTrains) reference ChannelIndexs and Units as\n sources, based on Neo RCG -> Unit -> SpikeTrain relationships.\n\n :param block: A Neo Block that has already been converted and mapped to\n NIX objects.\n \"\"\"\n for seg in block.segments:\n group = self._get_mapped_object(seg)\n group_signals = self._get_contained_signals(group)\n for mtag in group.multi_tags:\n if mtag.type in (\"neo.epoch\", \"neo.event\"):\n mtag.references.extend([sig for sig in group_signals\n if sig not in mtag.references])\n for rcg in block.channel_indexes:\n rcgsource = self._get_mapped_object(rcg)\n das = self._get_mapped_objects(rcg.analogsignals +\n rcg.irregularlysampledsignals)\n # flatten nested lists\n das = [da for dalist in das for da in dalist]\n for da in das:\n if rcgsource not in da.sources:\n da.sources.append(rcgsource)\n for unit in rcg.units:\n unitsource = self._get_mapped_object(unit)\n for st in unit.spiketrains:\n stmtag = self._get_mapped_object(st)\n if rcgsource not in stmtag.sources:\n stmtag.sources.append(rcgsource)\n if unitsource not in stmtag.sources:\n stmtag.sources.append(unitsource)\n\n def _get_or_init_metadata(self, nix_obj, path):\n \"\"\"\n Creates a metadata Section for the provided NIX object if it doesn't\n have one already. Returns the new or existing metadata section.\n\n :param nix_obj: The object to which the Section is attached\n :param path: Path to nix_obj\n :return: The metadata section of the provided object\n \"\"\"\n parent_parts = path.split(\"/\")[:-2]\n parent_path = \"/\".join(parent_parts)\n if nix_obj.metadata is None:\n if len(parent_parts) == 0: # nix_obj is root block\n parent_metadata = self.nix_file\n else:\n obj_parent = self._get_object_at(parent_path)\n parent_metadata = self._get_or_init_metadata(obj_parent,\n parent_path)\n nix_obj.metadata = parent_metadata.create_section(\n nix_obj.name, nix_obj.type+\".metadata\"\n )\n return nix_obj.metadata\n\n def _get_object_at(self, path):\n \"\"\"\n Returns the object at the location defined by the path.\n ``path`` is a '/' delimited string. Each part of the string alternates\n between an object name and a container.\n\n If the requested object is an AnalogSignal or IrregularlySampledSignal,\n identified by the second-to-last part of the path string, a list of\n (DataArray) objects is returned.\n\n Example path: /block_1/segments/segment_a/events/event_a1\n\n :param path: Path string\n :return: The object at the location defined by the path\n \"\"\"\n if path in (\"\", \"/\"):\n return self.nix_file\n parts = path.split(\"/\")\n if parts[0]:\n ValueError(\"Invalid object path: {}\".format(path))\n if len(parts) == 2: # root block\n return self.nix_file.blocks[parts[1]]\n parent_obj = self._get_parent(path)\n container_name = self._container_map[parts[-2]]\n parent_container = getattr(parent_obj, container_name)\n objname = parts[-1]\n if parts[-2] in [\"analogsignals\", \"irregularlysampledsignals\"]:\n obj = list()\n for idx in itertools.count():\n name = \"{}.{}\".format(objname, idx)\n if name in parent_container:\n obj.append(parent_container[name])\n else:\n break\n else:\n obj = parent_container[objname]\n return obj\n\n def _get_parent(self, path):\n parts = path.split(\"/\")\n parent_path = \"/\".join(parts[:-2])\n parent_obj = self._get_object_at(parent_path)\n return parent_obj\n\n def _get_mapped_objects(self, object_list):\n return list(map(self._get_mapped_object, object_list))\n\n def _get_mapped_object(self, obj):\n # We could use paths here instead\n try:\n if hasattr(obj, \"id\"):\n return self._object_map[obj.id]\n else:\n return self._object_map[id(obj)]\n except KeyError:\n # raise KeyError(\"Failed to find mapped object for {}. \"\n # \"Object not yet converted.\".format(obj))\n return None\n\n def _write_attr_annotations(self, nixobj, attr, path):\n if isinstance(nixobj, list):\n for obj in nixobj:\n obj.definition = attr[\"definition\"]\n self._write_attr_annotations(nixobj[0], attr, path)\n return\n else:\n nixobj.definition = attr[\"definition\"]\n if \"created_at\" in attr:\n nixobj.force_created_at(calculate_timestamp(attr[\"created_at\"]))\n if \"file_datetime\" in attr:\n metadata = self._get_or_init_metadata(nixobj, path)\n self._write_property(metadata,\n \"file_datetime\", attr[\"file_datetime\"])\n # metadata[\"file_datetime\"] = attr[\"file_datetime\"]\n if \"rec_datetime\" in attr and attr[\"rec_datetime\"]:\n metadata = self._get_or_init_metadata(nixobj, path)\n # metadata[\"rec_datetime\"] = attr[\"rec_datetime\"]\n self._write_property(metadata,\n \"rec_datetime\", attr[\"rec_datetime\"])\n\n if \"annotations\" in attr:\n metadata = self._get_or_init_metadata(nixobj, path)\n for k, v in attr[\"annotations\"].items():\n self._write_property(metadata, k, v)\n\n def _write_data(self, nixobj, attr, path):\n if isinstance(nixobj, list):\n metadata = self._get_or_init_metadata(nixobj[0], path)\n metadata[\"t_start.units\"] = nix.Value(attr[\"t_start.units\"])\n for obj in nixobj:\n obj.unit = attr[\"data.units\"]\n if attr[\"type\"] == \"analogsignal\":\n timedim = obj.append_sampled_dimension(\n attr[\"sampling_interval\"]\n )\n timedim.unit = attr[\"sampling_interval.units\"]\n elif attr[\"type\"] == \"irregularlysampledsignal\":\n timedim = obj.append_range_dimension(attr[\"times\"])\n timedim.unit = attr[\"times.units\"]\n timedim.label = \"time\"\n timedim.offset = attr[\"t_start\"]\n else:\n nixobj.positions.unit = attr[\"data.units\"]\n blockpath = \"/\" + path.split(\"/\")[1]\n parentblock = self._get_object_at(blockpath)\n if \"extents\" in attr:\n extname = nixobj.name + \".durations\"\n exttype = nixobj.type + \".durations\"\n if extname in parentblock.data_arrays:\n del parentblock.data_arrays[extname]\n extents = parentblock.create_data_array(\n extname,\n exttype,\n data=attr[\"extents\"]\n )\n extents.unit = attr[\"extents.units\"]\n nixobj.extents = extents\n if \"labels\" in attr:\n labeldim = nixobj.positions.append_set_dimension()\n labeldim.labels = attr[\"labels\"]\n metadata = self._get_or_init_metadata(nixobj, path)\n if \"t_start\" in attr:\n self._write_property(metadata, \"t_start\", attr[\"t_start\"])\n if \"t_stop\" in attr:\n self._write_property(metadata, \"t_stop\", attr[\"t_stop\"])\n if \"waveforms\" in attr:\n wfname = nixobj.name + \".waveforms\"\n if wfname in parentblock.data_arrays:\n del parentblock.data_arrays[wfname]\n del nixobj.features[0]\n wfda = parentblock.create_data_array(\n wfname, \"neo.waveforms\",\n data=attr[\"waveforms\"]\n )\n wfda.unit = attr[\"waveforms.units\"]\n nixobj.create_feature(wfda, nix.LinkType.Indexed)\n wfda.append_set_dimension()\n wfda.append_set_dimension()\n wftime = wfda.append_sampled_dimension(\n attr[\"sampling_interval\"]\n )\n metadata[\"sampling_interval.units\"] =\\\n attr[\"sampling_interval.units\"]\n wftime.unit = attr[\"times.units\"]\n wftime.label = \"time\"\n if wfname in metadata.sections:\n wfda.metadata = metadata.sections[wfname]\n else:\n wfpath = path + \"/waveforms/\" + wfname\n wfda.metadata = self._get_or_init_metadata(wfda, wfpath)\n if \"left_sweep\" in attr:\n self._write_property(wfda.metadata, \"left_sweep\",\n attr[\"left_sweep\"])\n\n def _update_maps(self, obj, lazy):\n objidx = self._find_lazy_loaded(obj)\n if lazy and objidx is None:\n self._lazy_loaded.append(obj)\n elif not lazy and objidx is not None:\n self._lazy_loaded.pop(objidx)\n if not lazy:\n self._object_hashes[obj.path] = self._hash_object(obj)\n\n def _find_lazy_loaded(self, obj):\n \"\"\"\n Finds the index of an object in the _lazy_loaded list by comparing the\n path attribute. Returns None if the object is not in the list.\n\n :param obj: The object to find\n :return: The index of the object in the _lazy_loaded list or None if it\n was not added\n \"\"\"\n for idx, llobj in enumerate(self._lazy_loaded):\n if llobj.path == obj.path:\n return idx\n else:\n return None\n\n @classmethod\n def resolve_name_conflicts(cls, objects):\n \"\"\"\n Given a list of neo objects, change their names such that no two\n objects share the same name. Objects with no name are renamed based on\n their type.\n If a container object is supplied (Block, Segment, or RCG), conflicts\n are resolved for the child objects.\n\n :param objects: List of Neo objects or Neo container object\n \"\"\"\n if isinstance(objects, list):\n if not len(objects):\n return\n names = [obj.name for obj in objects]\n for idx, cn in enumerate(names):\n if not cn:\n cn = cls._generate_name(objects[idx])\n else:\n names[idx] = \"\"\n if cn not in names:\n newname = cn\n else:\n suffix = 1\n newname = \"{}-{}\".format(cn, suffix)\n while newname in names:\n suffix += 1\n newname = \"{}-{}\".format(cn, suffix)\n names[idx] = newname\n for obj, n in zip(objects, names):\n obj.name = n\n return\n if not objects.name:\n objects.name = cls._generate_name(objects)\n if isinstance(objects, Block):\n block = objects\n allchildren = block.segments + block.channel_indexes\n cls.resolve_name_conflicts(allchildren)\n allchildren = list()\n for seg in block.segments:\n allchildren.extend(seg.analogsignals +\n seg.irregularlysampledsignals +\n seg.events +\n seg.epochs +\n seg.spiketrains)\n cls.resolve_name_conflicts(allchildren)\n elif isinstance(objects, Segment):\n seg = objects\n cls.resolve_name_conflicts(seg.analogsignals +\n seg.irregularlysampledsignals +\n seg.events +\n seg.epochs +\n seg.spiketrains)\n elif isinstance(objects, ChannelIndex):\n rcg = objects\n cls.resolve_name_conflicts(rcg.units)\n\n @staticmethod\n def _generate_name(neoobj):\n neotype = type(neoobj).__name__\n return \"neo.{}\".format(neotype)\n\n @staticmethod\n def _neo_attr_to_nix(neoobj):\n neotype = type(neoobj).__name__\n attrs = dict()\n attrs[\"name\"] = neoobj.name\n attrs[\"type\"] = neotype.lower()\n attrs[\"definition\"] = neoobj.description\n if isinstance(neoobj, (Block, Segment)):\n attrs[\"rec_datetime\"] = neoobj.rec_datetime\n if neoobj.rec_datetime:\n attrs[\"created_at\"] = neoobj.rec_datetime\n if neoobj.file_datetime:\n attrs[\"file_datetime\"] = neoobj.file_datetime\n if neoobj.annotations:\n attrs[\"annotations\"] = neoobj.annotations\n return attrs\n\n @classmethod\n def _neo_data_to_nix(cls, neoobj):\n attr = dict()\n attr[\"data\"] = np.transpose(neoobj.magnitude)\n attr[\"data.units\"] = cls._get_units(neoobj)\n if isinstance(neoobj, IrregularlySampledSignal):\n attr[\"times\"] = neoobj.times.magnitude\n attr[\"times.units\"] = cls._get_units(neoobj.times)\n else:\n attr[\"times.units\"] = cls._get_units(neoobj.times, True)\n if hasattr(neoobj, \"t_start\"):\n attr[\"t_start\"] = neoobj.t_start.magnitude.item()\n attr[\"t_start.units\"] = cls._get_units(neoobj.t_start)\n if hasattr(neoobj, \"t_stop\"):\n attr[\"t_stop\"] = neoobj.t_stop.magnitude.item()\n attr[\"t_stop.units\"] = cls._get_units(neoobj.t_stop)\n if hasattr(neoobj, \"sampling_period\"):\n attr[\"sampling_interval\"] = neoobj.sampling_period.magnitude.item()\n attr[\"sampling_interval.units\"] = cls._get_units(\n neoobj.sampling_period\n )\n if hasattr(neoobj, \"durations\"):\n attr[\"extents\"] = neoobj.durations\n attr[\"extents.units\"] = cls._get_units(neoobj.durations)\n if hasattr(neoobj, \"labels\"):\n attr[\"labels\"] = neoobj.labels.tolist()\n if hasattr(neoobj, \"waveforms\") and neoobj.waveforms is not None:\n attr[\"waveforms\"] = list(wf.magnitude for wf in\n list(wfgroup for wfgroup in\n neoobj.waveforms))\n attr[\"waveforms.units\"] = cls._get_units(neoobj.waveforms)\n if hasattr(neoobj, \"left_sweep\") and neoobj.left_sweep is not None:\n attr[\"left_sweep\"] = neoobj.left_sweep.magnitude\n attr[\"left_sweep.units\"] = cls._get_units(neoobj.left_sweep)\n return attr\n\n def _write_property(self, section, name, v):\n \"\"\"\n Create a metadata property with a given name and value on the provided\n metadata section.\n\n :param section: The metadata section to hold the new property\n :param name: The name of the property\n :param v: The value to write\n :return: The newly created property\n \"\"\"\n\n if isinstance(v, pq.Quantity):\n if len(v.shape):\n section[name] = list(nix.Value(vv) for vv in v.magnitude)\n else:\n section[name] = nix.Value(v.magnitude.item())\n section.props[name].unit = str(v.dimensionality)\n elif isinstance(v, datetime):\n section[name] = nix.Value(calculate_timestamp(v))\n elif isinstance(v, string_types):\n section[name] = nix.Value(v)\n elif isinstance(v, bytes):\n section[name] = nix.Value(v.decode())\n elif isinstance(v, Iterable):\n values = []\n unit = None\n for item in v:\n if isinstance(item, pq.Quantity):\n unit = str(item.dimensionality)\n item = nix.Value(item.magnitude.item())\n elif isinstance(item, Iterable):\n self.logger.warn(\"Multidimensional arrays and nested \"\n \"containers are not currently supported \"\n \"when writing to NIX.\")\n return None\n elif type(item).__module__ == \"numpy\":\n item = nix.Value(item.item())\n else:\n item = nix.Value(item)\n values.append(item)\n section[name] = values\n section.props[name].unit = unit\n elif type(v).__module__ == \"numpy\":\n section[name] = nix.Value(v.item())\n else:\n section[name] = nix.Value(v)\n return section.props[name]\n\n @staticmethod\n def _get_contained_signals(obj):\n return list(\n da for da in obj.data_arrays\n if da.type in [\"neo.analogsignal\", \"neo.irregularlysampledsignal\"]\n )\n\n @staticmethod\n def _get_units(quantity, simplify=False):\n \"\"\"\n Returns the units of a quantity value or array as a string, or None if\n it is dimensionless.\n\n :param quantity: Quantity scalar or array\n :param simplify: True/False Simplify units\n :return: Units of the quantity or None if dimensionless\n \"\"\"\n units = quantity.units.dimensionality\n if simplify:\n units = units.simplified\n units = stringify(units)\n if units == \"dimensionless\":\n units = None\n return units\n\n @staticmethod\n def _nix_attr_to_neo(nix_obj):\n neo_attrs = dict()\n neo_attrs[\"name\"] = stringify(nix_obj.name)\n\n neo_attrs[\"description\"] = stringify(nix_obj.definition)\n if nix_obj.metadata:\n for prop in nix_obj.metadata.props:\n values = prop.values\n values = list(v.value for v in values)\n if prop.unit:\n values = pq.Quantity(values, prop.unit)\n if len(values) == 1:\n neo_attrs[prop.name] = values[0]\n else:\n neo_attrs[prop.name] = values\n\n if isinstance(nix_obj, (nix.pycore.Block, nix.pycore.Group)):\n if \"rec_datetime\" not in neo_attrs:\n neo_attrs[\"rec_datetime\"] = None\n neo_attrs[\"rec_datetime\"] = datetime.fromtimestamp(\n nix_obj.created_at\n )\n if \"file_datetime\" in neo_attrs:\n neo_attrs[\"file_datetime\"] = datetime.fromtimestamp(\n neo_attrs[\"file_datetime\"]\n )\n # neo_attrs[\"file_origin\"] = os.path.basename(self.filename)\n return neo_attrs\n\n @staticmethod\n def _group_signals(paths):\n \"\"\"\n Groups data arrays that were generated by the same Neo Signal object.\n\n :param paths: A list of paths (strings) of all the signals to be\n grouped :return: A list of paths (strings) of signal groups. The last\n part of each path is the common name of the signals in the group.\n \"\"\"\n grouppaths = list(\".\".join(p.split(\".\")[:-1])\n for p in paths)\n # deduplicating paths\n uniquepaths = []\n for path in grouppaths:\n if path not in uniquepaths:\n uniquepaths.append(path)\n return uniquepaths\n\n @staticmethod\n def _get_referers(nix_obj, obj_list):\n ref_list = list()\n for ref in obj_list:\n if nix_obj.name in list(src.name for src in ref.sources):\n ref_list.append(ref)\n return ref_list\n\n @staticmethod\n def _get_time_dimension(obj):\n for dim in obj.dimensions:\n if hasattr(dim, \"label\") and dim.label == \"time\":\n return dim\n return None\n\n @staticmethod\n def _hash_object(obj):\n \"\"\"\n Computes an MD5 hash of a Neo object based on its attribute values and\n data objects. Child objects are not counted.\n\n :param obj: A Neo object\n :return: MD5 sum\n \"\"\"\n objhash = md5()\n\n def strupdate(a):\n objhash.update(str(a).encode())\n\n def dupdate(d):\n if isinstance(d, np.ndarray) and not d.flags[\"C_CONTIGUOUS\"]:\n d = d.copy(order=\"C\")\n objhash.update(d)\n\n # attributes\n strupdate(obj.name)\n strupdate(obj.description)\n\n # annotations\n for k, v in sorted(obj.annotations.items()):\n strupdate(k)\n strupdate(v)\n\n # data objects and type-specific attributes\n if isinstance(obj, (Block, Segment)):\n strupdate(obj.rec_datetime)\n strupdate(obj.file_datetime)\n elif isinstance(obj, ChannelIndex):\n for idx in obj.index:\n strupdate(idx)\n for n in obj.channel_names:\n strupdate(n)\n if obj.coordinates is not None:\n for coord in obj.coordinates:\n for c in coord:\n strupdate(c)\n elif isinstance(obj, AnalogSignal):\n dupdate(obj)\n dupdate(obj.units)\n dupdate(obj.t_start)\n dupdate(obj.sampling_rate)\n dupdate(obj.t_stop)\n elif isinstance(obj, IrregularlySampledSignal):\n dupdate(obj)\n dupdate(obj.times)\n dupdate(obj.units)\n elif isinstance(obj, Event):\n dupdate(obj.times)\n for l in obj.labels:\n strupdate(l)\n elif isinstance(obj, Epoch):\n dupdate(obj.times)\n dupdate(obj.durations)\n for l in obj.labels:\n strupdate(l)\n elif isinstance(obj, SpikeTrain):\n dupdate(obj.times)\n dupdate(obj.units)\n dupdate(obj.t_stop)\n dupdate(obj.t_start)\n if obj.waveforms is not None:\n dupdate(obj.waveforms)\n dupdate(obj.sampling_rate)\n if obj.left_sweep is not None:\n strupdate(obj.left_sweep)\n\n # type\n strupdate(type(obj).__name__)\n\n return objhash.hexdigest()\n\n def close(self):\n \"\"\"\n Closes the open nix file and resets maps.\n \"\"\"\n if (hasattr(self, \"nix_file\") and\n self.nix_file and self.nix_file.is_open()):\n self.nix_file.close()\n self.nix_file = None\n self._object_map = None\n self._lazy_loaded = None\n self._object_hashes = None\n self._block_read_counter = None\n\n def __del__(self):\n self.close()\n",
"# -*- coding: utf-8 -*-\n\"\"\"\nTests of the neo.io.pynnio.PyNNNumpyIO and neo.io.pynnio.PyNNTextIO classes\n\"\"\"\n\n# needed for python 3 compatibility\nfrom __future__ import absolute_import, division\n\nimport os\n\ntry:\n import unittest2 as unittest\nexcept ImportError:\n import unittest\n\nimport numpy as np\nimport quantities as pq\n\nfrom ...neo.core import Segment, AnalogSignal, SpikeTrain\nfrom ...neo.io import PyNNNumpyIO, PyNNTextIO\nfrom numpy.testing import assert_array_equal\nfrom ...neo.test.tools import assert_arrays_equal, assert_file_contents_equal\nfrom ...neo.test.iotest.common_io_test import BaseTestIO\n\n#class CommonTestPyNNNumpyIO(BaseTestIO, unittest.TestCase):\n# ioclass = PyNNNumpyIO\n\nNCELLS = 5\n\n\nclass CommonTestPyNNTextIO(BaseTestIO, unittest.TestCase):\n ioclass = PyNNTextIO\n read_and_write_is_bijective = False\n\n\ndef read_test_file(filename):\n contents = np.load(filename)\n data = contents[\"data\"]\n metadata = {}\n for name, value in contents['metadata']:\n try:\n metadata[name] = eval(value)\n except Exception:\n metadata[name] = value\n return data, metadata\nread_test_file.__test__ = False\n\n\nclass BaseTestPyNNIO(object):\n __test__ = False\n\n def tearDown(self):\n if os.path.exists(self.test_file):\n os.remove(self.test_file)\n\n def test_write_segment(self):\n in_ = self.io_cls(self.test_file)\n write_test_file = \"write_test.%s\" % self.file_extension\n out = self.io_cls(write_test_file)\n out.write_segment(in_.read_segment(lazy=False, cascade=True))\n assert_file_contents_equal(self.test_file, write_test_file)\n if os.path.exists(write_test_file):\n os.remove(write_test_file)\n\n def build_test_data(self, variable='v'):\n metadata = {\n 'size': NCELLS,\n 'first_index': 0,\n 'first_id': 0,\n 'n': 505,\n 'variable': variable,\n 'last_id': NCELLS - 1,\n 'last_index': NCELLS - 1,\n 'dt': 0.1,\n 'label': \"population0\",\n }\n if variable == 'v':\n metadata['units'] = 'mV'\n elif variable == 'spikes':\n metadata['units'] = 'ms'\n data = np.empty((505, 2))\n for i in range(NCELLS):\n # signal\n data[i*101:(i+1)*101, 0] = np.arange(i, i+101, dtype=float)\n # index\n data[i*101:(i+1)*101, 1] = i*np.ones((101,), dtype=float)\n return data, metadata\n build_test_data.__test__ = False\n\n\nclass BaseTestPyNNIO_Signals(BaseTestPyNNIO):\n def setUp(self):\n self.test_file = \"test_file_v.%s\" % self.file_extension\n self.write_test_file(\"v\")\n\n def test_read_segment_containing_analogsignals_using_eager_cascade(self):\n # eager == not lazy\n io = self.io_cls(self.test_file)\n segment = io.read_segment(lazy=False, cascade=True)\n self.assertIsInstance(segment, Segment)\n self.assertEqual(len(segment.analogsignals), 1)\n\n as0 = segment.analogsignals[0]\n self.assertIsInstance(as0, AnalogSignal)\n self.assertEqual(as0.shape, (101, NCELLS))\n assert_array_equal(as0[:, 0],\n AnalogSignal(np.arange(0, 101, dtype=float),\n sampling_period=0.1*pq.ms,\n t_start=0*pq.s,\n units=pq.mV))\n as4 = as0[:, 4]\n self.assertIsInstance(as4, AnalogSignal)\n assert_array_equal(as4,\n AnalogSignal(np.arange(4, 105, dtype=float),\n sampling_period=0.1*pq.ms,\n t_start=0*pq.s,\n units=pq.mV))\n # test annotations (stuff from file metadata)\n\n def test_read_analogsignal_using_eager(self):\n io = self.io_cls(self.test_file)\n sig = io.read_analogsignal(lazy=False)\n self.assertIsInstance(sig, AnalogSignal)\n assert_array_equal(sig[:, 3],\n AnalogSignal(np.arange(3, 104, dtype=float),\n sampling_period=0.1*pq.ms,\n t_start=0*pq.s,\n units=pq.mV))\n # should test annotations: 'channel_index', etc.\n\n def test_read_spiketrain_should_fail_with_analogsignal_file(self):\n io = self.io_cls(self.test_file)\n self.assertRaises(TypeError, io.read_spiketrain, channel_index=0)\n\n\nclass BaseTestPyNNIO_Spikes(BaseTestPyNNIO):\n def setUp(self):\n self.test_file = \"test_file_spikes.%s\" % self.file_extension\n self.write_test_file(\"spikes\")\n\n def test_read_segment_containing_spiketrains_using_eager_cascade(self):\n io = self.io_cls(self.test_file)\n segment = io.read_segment(lazy=False, cascade=True)\n self.assertIsInstance(segment, Segment)\n self.assertEqual(len(segment.spiketrains), NCELLS)\n st0 = segment.spiketrains[0]\n self.assertIsInstance(st0, SpikeTrain)\n assert_arrays_equal(st0,\n SpikeTrain(np.arange(0, 101, dtype=float),\n t_start=0*pq.s,\n t_stop=101*pq.ms,\n units=pq.ms))\n st4 = segment.spiketrains[4]\n self.assertIsInstance(st4, SpikeTrain)\n assert_arrays_equal(st4,\n SpikeTrain(np.arange(4, 105, dtype=float),\n t_start=0*pq.s,\n t_stop=105*pq.ms,\n units=pq.ms))\n # test annotations (stuff from file metadata)\n\n def test_read_spiketrain_using_eager(self):\n io = self.io_cls(self.test_file)\n st3 = io.read_spiketrain(lazy=False, channel_index=3)\n self.assertIsInstance(st3, SpikeTrain)\n assert_arrays_equal(st3,\n SpikeTrain(np.arange(3, 104, dtype=float),\n t_start=0*pq.s,\n t_stop=104*pq.s,\n units=pq.ms))\n # should test annotations: 'channel_index', etc.\n\n def test_read_analogsignal_should_fail_with_spiketrain_file(self):\n io = self.io_cls(self.test_file)\n self.assertRaises(TypeError, io.read_analogsignal, channel_index=2)\n\n\nclass BaseTestPyNNNumpyIO(object):\n io_cls = PyNNNumpyIO\n file_extension = \"npz\"\n\n def write_test_file(self, variable='v', check=False):\n data, metadata = self.build_test_data(variable)\n metadata_array = np.array(sorted(metadata.items()))\n np.savez(self.test_file, data=data, metadata=metadata_array)\n if check:\n data1, metadata1 = read_test_file(self.test_file)\n assert metadata == metadata1, \"%s != %s\" % (metadata, metadata1)\n assert data.shape == data1.shape == (505, 2), \\\n \"%s, %s, (505, 2)\" % (data.shape, data1.shape)\n assert (data == data1).all()\n assert metadata[\"n\"] == 505\n write_test_file.__test__ = False\n\n\nclass BaseTestPyNNTextIO(object):\n io_cls = PyNNTextIO\n file_extension = \"txt\"\n\n def write_test_file(self, variable='v', check=False):\n data, metadata = self.build_test_data(variable)\n with open(self.test_file, 'wb') as f:\n for item in sorted(metadata.items()):\n f.write((\"# %s = %s\\n\" % item).encode('utf8'))\n np.savetxt(f, data)\n if check:\n raise NotImplementedError\n write_test_file.__test__ = False\n\n\nclass TestPyNNNumpyIO_Signals(BaseTestPyNNNumpyIO, BaseTestPyNNIO_Signals,\n unittest.TestCase):\n __test__ = True\n\n\nclass TestPyNNNumpyIO_Spikes(BaseTestPyNNNumpyIO, BaseTestPyNNIO_Spikes,\n unittest.TestCase):\n __test__ = True\n\n\nclass TestPyNNTextIO_Signals(BaseTestPyNNTextIO, BaseTestPyNNIO_Signals,\n unittest.TestCase):\n __test__ = True\n\n\nclass TestPyNNTextIO_Spikes(BaseTestPyNNTextIO, BaseTestPyNNIO_Spikes,\n unittest.TestCase):\n __test__ = True\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"# -*- coding: utf-8 -*-\n'''\nThe module for groupping data into a table on a trial-by-trial basis according to experimental conditions,\nand then providing access for users to perform analysis like plotting PSTH and other common application.\nThis work is based on:\n * Bo Zhang, Ji Dai - first version\n'''\nimport pandas as pd\nimport numpy as np\nimport math\nfrom matplotlib import mlab\nimport matplotlib.pyplot as plt\nfrom .readio.h5_io import h5_io\nimport copy\nimport h5py as hp\nfrom scipy import signal, ndimage\nimport os\nfrom . import func_tools as ftl\nfrom .separate_trials import separate_trials\nfrom sklearn.neighbors import KernelDensity\nfrom scipy import signal, ndimage, fftpack, stats\nfrom scipy.signal import sosfilt\nfrom scipy.signal import zpk2sos\nimport seaborn\nseaborn.set_style(style='white')\n\n\nclass Graphics():\n '''\n The Class for analyzing data according to experimental conditions. It can analyze spike train,local field potential \n and behavioral data (e.g. saccade, reaction time) using different display methods.\n \n Args\n filename (string):\n file name (with or without extension)\n trial_start_mark (string):\n The event marker representing the start of a trial, which is used to separate the raw\n data into different trials. \n comment_expr (string):\n This parameter tells the program how experimental condition and parameters are stored in the data.\n For example, a experimental condition, patch direction, is stored in the way 'patch_direction:degree'.\n By setting the comment_expr as \"key:value\", the program decodes the key as 'patch_direction', and \n the value for a particular trial is the degree of that trial.\n spike_to_load (string or list):\n Define the spike channels and units.\n If 'all', spikes in all channels and all units will be loaded.\n If 'none', spike data will not be loaded.\n If set to be a string like 'spike_26_1', spike of unit 1 in 26 channel will be loaded.\n If set to be a list like ['spike_26_1','spike_23_2'], spike of unit 1 in 26 channel and spike of unit 2 in 23 channel will be loaded.\n Default: 'all'\n analog_to_load (string or list):\n Define the analog signal channels.\n if 'all', analog signals in all channels will be loaded.\n if 'none', analog signals will not be loaded.\n if set to be a string like 'analog_25', analog signals in channel 25 will be loaded.\n if set to be a list like ['analog_25','analog_20'], analog signals in channel 25 and channel 20 will be loaded.\n Default: 'none'\n Returns\n -\n Examples\n >>> gh = Graphics('myfile.h5','64715','key:value')\n In this example, event marker '64715' is used to separate the raw data into different trials.\n 'key:value' is used to extract experimental condition information.\n This command initiates the Graphics class, and groups all data into a table, wherein each row represents a trial,\n and each column represents a specific data, e.g. stimulus onset time, offset time, reaction time, spike,\n LFP, etc. \n The command gh.data_df displays the data table\n '''\n def __init__(self,filename,trial_start_mark,comment_expr,spike_to_load='all',analog_to_load='all'):\n \"\"\"\n Initialize the Graphics class.\n \"\"\"\n if not filename.endswith('.h5'):\n filename = filename+'.h5'\n self.filename = filename\n # find available spike channnel and analog channel in file\n spike_avail = list()\n analog_avail = list()\n with hp.File(filename,'r') as f:\n for key in f.keys():\n if key == 'spikes':\n for tem_key in f[key].keys():\n spike_avail.append(tem_key)\n if key == 'analogs':\n for tem_key in f[key].keys():\n analog_avail.append(tem_key)\n # load spike data and analog signals from the file\n spike_to_load = self.__spike_to_load(spike_to_load, spike_avail)\n analog_to_load = self.__analog_to_load(analog_to_load, analog_avail)\n events,comments,spikes,analogs = h5_io(filename, spike_to_load, analog_to_load)\n self.data_df = separate_trials(events,comments,spikes,analogs,trial_start_mark,comment_expr)\n sampling_rate = dict()\n for chn in analogs.keys():\n sampling_rate[chn] = analogs[chn]['sampling_rate']\n if len(sampling_rate)>0:\n self.sampling_rate = sampling_rate\n \n def __spike_to_load(self,spike_to_load, spike_avail):\n if isinstance(spike_to_load,str):\n if spike_to_load == 'none':\n spike_to_load = []\n elif spike_to_load == 'all':\n spike_to_load = spike_avail\n elif spike_to_load in spike_avail:\n spike_to_load = [spike_to_load] \n elif spike_to_load in [ite.replace('spike_','') for ite in spike_avail]:\n spike_to_load = ['spike_'+spike_to_load]\n else:\n raise ValueError(\"Invalid specification of spike_to_load\")\n elif spike_to_load is None:\n spike_to_load = []\n\n if spike_to_load:\n for i,ite in enumerate(spike_to_load):\n if not ite.startswith('spike_'):\n spike_to_load[i] = 'spike_'+ite\n if spike_to_load[i] not in spike_avail:\n raise ValueError('%s not avaliable'%spike_to_load[i])\n return spike_to_load\n\n def __analog_to_load(self,analog_to_load, analog_avail):\n if isinstance(analog_to_load,str):\n if analog_to_load == 'none':\n analog_to_load = []\n elif analog_to_load == 'all':\n analog_to_load = analog_avail\n elif analog_to_load in analog_avail:\n analog_to_load = [analog_to_load]\n elif analog_to_load in [ite.replace('analog_','') for ite in analog_avail]:\n analog_to_load = ['analog_'+analog_to_load]\n else:\n raise ValueError(\"Invalid specification of analog_to_load\")\n elif analog_to_load is None:\n analog_to_load = []\n\n if analog_to_load:\n for i,ite in enumerate(analog_to_load):\n if not ite.startswith('analog_'):\n analog_to_load[i] = 'analog_'+ite\n if analog_to_load[i] not in analog_avail:\n raise ValueError('%s not avaliable'%analog_to_load[i])\n return analog_to_load\n\n # sort data table according to certain columns\n def __sort_by(self,data,sort_by):\n data_group = data.groupby(sort_by)\n return data_group.groups\n\n # calculate firing rate of spikes in certain channel\n def __cal_firing_rate(self,data,channel,bin_size,overlap,pre_time,post_time):\n bins_left = [pre_time]\n while bins_left[-1] < post_time:\n bins_left.append(bins_left[-1]+bin_size-overlap)\n bins_left = np.array(bins_left)\n bins_right = bins_left+bin_size\n bins_mean = (bins_left+bins_right)/2.0\n zero_offset = bins_mean[bins_mean>0][0]\n bins_left = bins_left - zero_offset\n bins_right = bins_right - zero_offset\n bins_mean = bins_mean - zero_offset\n\n bins_left = bins_left[bins_right<=post_time]\n bins_mean = bins_mean[bins_right<=post_time]\n bins_right = bins_right[bins_right<=post_time]\n\n bins_mean = bins_mean[bins_left>=pre_time]\n bins_right = bins_right[bins_left>=pre_time]\n bins_left = bins_left[bins_left>=pre_time]\n def cal_fr(ite_spike):\n ite_fr = list()\n for i in range(bins_left.shape[0]):\n ite_fr_i = ite_spike[(ite_spike>=bins_left[i])&(ite_spike<bins_right[i])].shape[0]\n ite_fr.append(ite_fr_i)\n ite_fr = np.array(ite_fr)\n ite_fr = ite_fr*1000.0/bin_size\n return ite_fr\n firing_rate = data[channel].apply(cal_fr)\n return firing_rate, bins_mean\n \n # Group data by experimental conditions and plot PSTH and raster of each condition\n def plot_spike(self,channel,sort_by,align_to,pre_time,post_time,bin_size=30,overlap=0,Mean=3,Sigma=10,limit=False,filter_nan=False,fig_marker=[0],fig_size=[12,7],fig_column=4,fig_pad=0.5,fig_wspace=0.02,fig_hspace=0.15,figure=True):\n '''\n Args\n channel (string): \n define the spike channel and unit separated by a dash. Example: chanel_unit\n sort_by (list):\n define the conditions used to sort data\n align_to (string):\n event marker used to align each trial's spikes\n pre_time (int):\n Set the time(msec) before the align_to to be covered\n post_time (int):\n Set the Time(msec) after the align_to to be covered\n bin_size (int):\n bin size (msec) used to calculate PSTH\n Default: 30\n overlap (int):\n overlap (msec) between adjacent bins\n Default: 0\n Mean (float):\n mean of the gaussian kernal used to smooth the PSTH\n Default: 3\n Sigma (float):\n sigma of the gaussian kernal used to smooth the PSTH\n Default: 10\n limit (string):\n an expression used to filtering the data by certain conditions.\n Default: False\n filter_nan (list):\n trials with NaN value in the listed columns will be excluded\n Default: False\n fig_marker (list):\n Define the positions of the reference vertical lines by setting some time points in the list.\n Default: [0]\n fig_size (list):\n define the size of the figure\n Default: [12,7]\n fig_column (int):\n Define the number of sub-plots in each row\n Default: 4\n fig_pad (float):\n the space of padding of the figure\n Default: 0.5\n fig_wspace (float):\n the width reserved for blank space between subplots\n Default: 0.02\n fig_hspace (float):\n the height reserved for white space between subplots\n Default: 0.15\n figure (Boolean):\n if True, the figure will be displayed.\n Default: True\n Returns\n {'data':{condition_1:PSTH,\n condition_2:PSTH,\n .\n .\n .},\n 'time':firing rate time}\n Examples\n >>> firingRate = gh.plot_spike(channel = ‘spike_26_1’, sort_by = [‘patch_direction'], align_to = ’dig_64721’, \n pre_time = -300, post_time = 2000, bin_size = 30, overlap = 10, filter_nan = \n [‘dig_64721’,’dig_64722'], fig_column = 4, fig_marker = [0])\n '''\n if pre_time>0:\n raise ValueError('pre_time must <= 0')\n if post_time<=0:\n raise ValueError('post_time must >0')\n data = copy.deepcopy(self.data_df)\n # limit, filter_nan\n data = ftl.f_filter_limit(data,limit)\n data = ftl.f_filter_nan(data,filter_nan)\n # align spike in plot channel to align_to\n data[channel] = data[channel] - data[align_to]\n # sort data by certain experimental conditions in sort_by\n data_group = self.__sort_by(data,sort_by)\n firing_rate,fr_time = self.__cal_firing_rate(data,channel,bin_size,overlap,pre_time,post_time)\n firing_rates_mean = dict()\n spikes_select = dict()\n # build the gaussian filter\n fr_buffer = signal.gaussian(Mean,Sigma)\n scatter_max = list()\n for k,v in data_group.iteritems():\n firing_rate_select = firing_rate.loc[v]\n spikes_select[k] = data[channel].loc[v]\n firing_rate_select = firing_rate_select.mean(axis=0)\n firing_rate_select[fr_time>0] = ndimage.filters.convolve1d(firing_rate_select[fr_time>0], fr_buffer/fr_buffer.sum())\n firing_rate_select[fr_time<=0] = ndimage.filters.convolve1d(firing_rate_select[fr_time<=0], fr_buffer/fr_buffer.sum())\n firing_rates_mean[k] = firing_rate_select\n scatter_max.append(len(v))\n fr_max = max([v.max() for k, v in firing_rates_mean.iteritems()])\n fr_max = (int(fr_max / 10) + 1) * 10\n scatter_max = max(scatter_max)\n scatter_height = fr_max*1.0/scatter_max\n group_keys = data_group.keys()\n group_keys = pd.DataFrame(group_keys, columns=sort_by)\n group_keys = group_keys.sort_values(sort_by)\n group_keys.index = range(group_keys.shape[0])\n x_lim = [pre_time,post_time]\n if figure is True:\n if group_keys.shape[1] == 1:\n block_num = 1\n block_in_num = group_keys.shape[0] / block_num\n row_num = int(math.ceil(float(block_in_num) / fig_column))\n row_nums = row_num\n fig_all = plt.figure(figsize=fig_size)\n for i in group_keys.index:\n current_block = 1\n block_in_i = i - (current_block - 1) * block_in_num + 1\n i_pos = (current_block - 1) * row_num * fig_column + block_in_i\n plt.subplot(row_nums, fig_column, i_pos)\n cond_i = group_keys.loc[i].values[0]\n plt.plot(fr_time, firing_rates_mean[cond_i], linewidth=2.5)\n for i_spike, ite_spike in enumerate(spikes_select[cond_i]):\n plt.vlines(ite_spike,fr_max+i_spike*scatter_height,fr_max+i_spike*scatter_height+scatter_height*0.618,color='k',linewidth=0.3,alpha=1)\n plt.vlines(fig_marker, 0, fr_max * 2, color='red')\n if i_pos%fig_column ==1:\n plt.yticks([0,fr_max/2,fr_max])\n elif i_pos%fig_column != 1:\n plt.yticks([0,fr_max/2,fr_max],[])\n plt.yticks([0,fr_max/2,fr_max])\n plt.xlim(x_lim)\n plt.ylim(0, fr_max*2)\n plt.title(str(cond_i))\n fig_all.tight_layout(pad=fig_pad)\n fig_all.subplots_adjust(wspace=fig_wspace,hspace=fig_hspace)\n elif group_keys.shape[1] == 2:\n block_num = group_keys[sort_by[0]].unique().shape[0]\n block_in_num = group_keys.shape[0]/block_num\n row_num = int(math.ceil(float(block_in_num) / fig_column))\n row_nums = block_num * row_num\n fig_all = plt.figure(figsize=fig_size)\n for i in group_keys.index:\n current_block = int(math.floor(i / block_in_num)) + 1\n block_in_i = i - (current_block - 1) * block_in_num + 1\n i_pos = (current_block - 1) * row_num * \\\n fig_column + block_in_i\n plt.subplot(row_nums, fig_column, i_pos)\n cond_i = tuple(group_keys.loc[i].values)\n plt.plot(fr_time, firing_rates_mean[cond_i], linewidth=2.5)\n for i_spike, ite_spike in enumerate(spikes_select[cond_i]):\n plt.vlines(ite_spike,fr_max+i_spike*scatter_height,fr_max+i_spike*scatter_height+scatter_height*0.618,color='k',linewidth=1)\n plt.vlines(fig_marker, 0, fr_max * 2, color='red')\n if i_pos%fig_column ==1:\n plt.yticks([0,fr_max/2,fr_max])\n elif i_pos%fig_column != 1:\n plt.yticks([0,fr_max/2,fr_max],[])\n # plt.yticks([0,fr_max/2,fr_max])\n plt.xlim(x_lim)\n plt.ylim(0, fr_max * 2)\n plt.title([str(ite) for ite in cond_i])\n fig_all.tight_layout(pad=fig_pad)\n fig_all.subplots_adjust(wspace=fig_wspace,hspace=fig_hspace)\n return {'data':firing_rates_mean,'time':fr_time}\n\n # Sort data by experimental conditions and plot the spike count during a given period\n def plot_spike_count(self,channel,sort_by,align_to,timebin,limit=False,filter_nan=False,figure=True):\n '''\n Args\n channel (string): \n define the spike channel and unit separated by a dash. Example: chanel_unit\n sort_by (list):\n experimental conditions used to sort data\n align_to (string):\n event marker used to align each trial's spikes\n timebin (list):\n Define the period for calculating spike counts.\n limit (string):\n an expression used to filter the data by certain conditions.\n Default: False\n filter_nan (list):\n trials with NaN value in the listed columns will be excluded\n Default: False\n figure (Boolean):\n if True, the figure will be displayed.\n Default: True\n Returns\n {condition_1: {'mean':value,\n 'sem':value}\n condition_2: {'mean':value,\n 'sem':value}\n '\n '\n '\n }\n\n Examples:\n spk_count = gh.plot_spike_count(channel = ’spike_26_1’, sort_by = [‘patch_direction’], align_to = \n ’dig_64721’, timebin=[0,700]) \n '''\n data = copy.deepcopy(self.data_df)\n # limit, filter_nan\n data = ftl.f_filter_limit(data,limit)\n data = ftl.f_filter_nan(data,filter_nan)\n # align spike in plot channel to align_to\n data[channel] = data[channel] - data[align_to]\n # sort data by certain experimental conditions in sort_by\n data_group = self.__sort_by(data,sort_by)\n spikes_count = dict()\n spikes_sem = dict()\n for k,v in data_group.iteritems():\n spikes_count[k] = list()\n data_select = data[channel].loc[v]\n for i in data_select.index:\n tem_spike = data_select.loc[i]\n spike_inbin = np.logical_and(tem_spike >= timebin[0], tem_spike <= timebin[1])\n spikes_count[k].append(spike_inbin.sum())\n spikes_sem[k] = np.std(spikes_count[k],ddof=1)/math.sqrt(len(spikes_count[k]))\n spikes_count[k] = np.mean(spikes_count[k])\n ymin = np.min([v.min() for k,v in spikes_count.iteritems() ])\n ymax = np.max([v.max() for k,v in spikes_count.iteritems() ])\n ymin = 0.9*ymin\n ymax = 1.1*ymax\n group_keys = np.array(data_group.keys())\n group_keys = pd.DataFrame(group_keys, columns=sort_by)\n group_keys = group_keys.sort_values(sort_by)\n group_keys.index = range(group_keys.shape[0])\n if figure is True:\n plt.figure()\n if group_keys.shape[1] == 1:\n xs = list()\n xticks = list()\n ys = list()\n err = list()\n for i,col_1 in enumerate(group_keys[sort_by[0]].unique()):\n xs.append(i)\n xticks.append(col_1)\n ys.append(spikes_count[col_1])\n err.append(spikes_sem[col_1])\n plt.errorbar(xs,ys,yerr=err)\n plt.xlim([xs[0]-1, xs[-1]+1])\n plt.xticks(xs,xticks)\n plt.ylim([ymin,ymax])\n plt.legend(framealpha=0,labelspacing=0.01)\n plt.xlabel(sort_by[0])\n plt.ylabel('numbers of spikes')\n elif group_keys.shape[1] == 2:\n for col_2 in group_keys[sort_by[1]].unique():\n xs = list()\n xticks = list()\n ys = list()\n err = list()\n for i,col_1 in enumerate(group_keys[sort_by[0]].unique()):\n if (col_1, col_2) in spikes_count.keys():\n xs.append(i)\n xticks.append(col_1)\n ys.append(spikes_count[(col_1, col_2)])\n err.append(spikes_sem[(col_1, col_2)])\n plt.errorbar(xs,ys,yerr=err,label=col_2)\n plt.xlim(xs[0]-1,xs[-1]+1)\n plt.xticks(xs,xticks)\n plt.ylim([ymin,ymax])\n plt.legend(framealpha=0,labelspacing=0.01)\n plt.xlabel(sort_by[0])\n plt.ylabel('numbers of spikes')\n return {k:{'mean':spikes_count[k],'sem':spikes_sem[k]} for k,v in data_group.iteritems()}\n\n # Sort data by experimental conditions and plot scalar data in lineplot (e.g. reaction time) \n def plot_line(self,target,sort_by,limit=False,filter_nan=False):\n '''\n Args\n target (string):\n the name of the scalar data to be analyzed\n sort_by (list):\n experimental conditions used to sort data\n limit (string):\n an expression used to filter the data by certain conditions.\n Default: False\n filter_nan (list):\n trials with NaN value in the listed columns will be excluded\n Default: False\n Returns\n {condition_1: {'mean':value,\n 'sem':value,\n 'num':value}\n condition_2: {'mean':value,\n 'sem':value,\n 'num':value}\n '\n '\n '\n }\n \n Examples:\n Reaction_time=gh.plot_line('Reaction_time',sort_by=['a','A'],limit='Reaction_time<500')\n '''\n data = copy.deepcopy(self.data_df)\n # limit, filter_nan\n data = ftl.f_filter_limit(data,limit)\n data = ftl.f_filter_nan(data,filter_nan)\n # sort data by certain experimental conditions in sort_by\n data_group = self.__sort_by(data,sort_by)\n target_select = {}\n target_mean = dict()\n target_sem = dict()\n target_num = dict()\n for k, v in data_group.iteritems():\n target_select[k] = data[target].loc[v]\n target_select[k] = pd.to_numeric(target_select[k])\n target_mean[k] = target_select[k].mean(axis=0)\n target_sem[k] = target_select[k].std(axis=0, ddof=1) / np.sqrt(target_select[k].shape[0])\n target_num[k] = target_select[k].shape[0]\n target_select[k] = [target_select[k].mean(axis=0), target_select[k].std(axis=0, ddof=1) / np.sqrt(target_select[k].shape[0]), target_select[k].shape[0]]\n group_keys = np.array(data_group.keys())\n group_keys = pd.DataFrame(group_keys,columns=sort_by)\n group_keys = group_keys.sort_values(sort_by)\n group_keys.index = range(group_keys.shape[0])\n\n plt.figure()\n if group_keys.shape[1] == 1:\n xs = list()\n xticks = list()\n ys = list()\n err = list()\n for i,col_1 in enumerate(group_keys[sort_by[0]].unique()):\n xs.append(i)\n xticks.append(col_1)\n ys.append(target_mean[col_1])\n err.append(target_sem[col_1])\n plt.errorbar(xs,ys,yerr=err)\n plt.xlim([xs[0]-1, xs[-1]+1])\n plt.xticks(xs,xticks)\n plt.legend(framealpha=0,labelspacing=0.01)\n plt.xlabel(sort_by[0])\n elif group_keys.shape[1] == 2:\n for col_2 in group_keys[sort_by[1]].unique():\n xs = list()\n xticks = list()\n ys = list()\n err = list()\n for i,col_1 in enumerate(group_keys[sort_by[0]].unique()):\n if (col_1, col_2) in target_mean.keys():\n xs.append(i)\n xticks.append(col_1)\n ys.append(target_mean[(col_1, col_2)])\n err.append(target_sem[(col_1, col_2)])\n plt.errorbar(xs,ys,yerr=err,label=col_2)\n plt.xlim(xs[0]-1,xs[-1]+1)\n plt.xticks(xs,xticks)\n plt.legend(framealpha=0,labelspacing=0.01)\n plt.xlabel(sort_by[0])\n return {k:{'mean':target_mean[k],'sem':target_sem[k],'num':target_num[k]} for k,v in data_group.iteritems()}\n\n def plot_bar(self,target,sort_by,limit=False,filter_nan=False,ci=95,kind='bar'):\n '''\n Args\n target (string):\n the name of the scalar data to be analyzed\n sort_by (list):\n experimental conditions used to sort data\n limit (string):\n an expression used to filter the data by certain conditions.\n Default: False\n filter_nan (list):\n trials with NaN value in the listed columns will be excluded\n Default: False\n ci (float):\n confidence interval\n defaule: 95\n kind (str):\n The kind of plot to draw., link 'bar', 'point'\n Default: 'bar'\n Returns\n {condition_1: {'mean':value,\n 'sem':value,\n 'num':value}\n condition_2: {'mean':value,\n 'sem':value,\n 'num':value}\n '\n '\n '\n }\n \n Examples:\n Reaction_time=gh.plot_line('Reaction_time',sort_by=['a','A'],limit='Reaction_time<500')\n '''\n data = copy.deepcopy(self.data_df)\n # limit, filter_nan\n data = ftl.f_filter_limit(data,limit)\n data = ftl.f_filter_nan(data,filter_nan)\n # sort data by certain experimental conditions in sort_by\n data_group = self.__sort_by(data,sort_by)\n target_select = {}\n target_mean = dict()\n target_sem = dict()\n target_num = dict()\n for k, v in data_group.iteritems():\n target_select[k] = data[target].loc[v]\n target_select[k] = pd.to_numeric(target_select[k])\n target_mean[k] = target_select[k].mean(axis=0)\n target_sem[k] = target_select[k].std(axis=0, ddof=1) / np.sqrt(target_select[k].shape[0])\n target_num[k] = target_select[k].shape[0]\n target_select[k] = [target_select[k].mean(axis=0), target_select[k].std(axis=0, ddof=1) / np.sqrt(target_select[k].shape[0]), target_select[k].shape[0]]\n group_keys = list(data_group.keys())\n group_keys = pd.DataFrame(group_keys,columns=sort_by)\n group_keys = group_keys.sort_values(sort_by)\n group_keys.index = range(group_keys.shape[0])\n if group_keys.shape[1] == 2:\n g = seaborn.factorplot(x=sort_by[0],y=target,hue=sort_by[1],data=data,size=6,kind=kind,palette='muted',ci=ci,\n order=sorted(list(set(data[sort_by[0]].values))),hue_order=sorted(list(set(data[sort_by[1]].values))))\n g.set_ylabels('')\n elif group_keys.shape[1] == 1:\n if isinstance(sort_by,list):\n x_id = sort_by[0]\n elif isinstance(sort_by,str):\n x_id = sort_by\n g = seaborn.factorplot(x=x_id,y=target,data=data,size=6,kind=kind,palette='muted',ci=ci,\n order=sorted(list(set(data[sort_by[0]].values))))\n g.set_ylabels('')\n \n return {k:{'mean':target_mean[k],'sem':target_sem[k],'num':target_num[k]} for k,v in data_group.iteritems()}\n\n # convert data type in certain columns to numberic type\n def to_numeric(self,columns):\n '''\n Args\n columns (string or list):\n column names needed to be converted\n Returns\n -\n '''\n if isinstance(columns,str):\n self.data_df[columns] = pd.to_numeric(self.data_df[columns],errors='coerce')\n elif isinstance(columns,list):\n for column in columns:\n self.data_df[column] = pd.to_numeric(self.data_df[column],errors='coerce')\n\n # rename certain columns\n def rename(self,names_dict):\n '''\n Args\n names_dict (dict):\n {'old_name_1':'new_name_1',\n 'old_name_2':'new_name_2',\n .\n .\n .}\n Returns\n -\n '''\n self.data_df = self.data_df.rename(columns=names_dict)\n\n # This function performs adding to a given column\n def df_add(self,column,added_info):\n '''\n Args\n column (string):\n the column name to be played with\n added_info (string, int, float or pandas.DataFrame):\n The information to be added to the selected column can be string, int, float, or \n pandas.DataFrame\n Returns\n -\n '''\n if isinstance(added_info,str):\n self.data_df[column] = self.data_df[column] + self.data_df[added_info]\n elif isinstance(added_info,(int,float)):\n self.data_df[column] = self.data_df[column] + added_info\n elif isinstance(added_info,(pd.Series,pd.DataFrame)):\n self.data_df[column] = self.data_df[column] + added_info\n\n # This function performs minus to a given column\n def df_minus(self,column,minus_info):\n '''\n Args\n column (string):\n the column name to be played with\n minus_info (string, int, float or pandas.DataFrame):\n information to be subtracted from the selected column\n Returns\n -\n '''\n if isinstance(minus_info,str):\n self.data_df[column] = self.data_df[column] - self.data_df[minus_info]\n elif isinstance(minus_info,(int,float)):\n self.data_df[column] = self.data_df[column] - minus_info\n elif isinstance(added_info,(pd.Series,pd.DataFrame)):\n self.data_df[column] = self.data_df[column] - added_info\n\n # This function multiplys the selected column with certain factor\n def df_multiply(self,column,multiply_info):\n '''\n Args\n column (string):\n the column name to be played with\n multiply_info (string, int, float or pandas.DataFrame):\n information to be used for multiplying\n Returns\n -\n '''\n if isinstance(multiply_info,str):\n self.data_df[column] = self.data_df[column] * self.data_df[multiply_info]\n elif isinstance(multiply_info,(int,float)):\n self.data_df[column] = self.data_df[column] * multiply_info\n elif isinstance(added_info,(pd.Series,pd.DataFrame)):\n self.data_df[column] = self.data_df[column] * added_info\n\n # This function divides the selected column by certain factor\n def df_division(self,column,division_info):\n '''\n Args\n column (string):\n the column name to be played with\n division_info (string, int, float or pandas.DataFrame):\n information to be used for dividing\n Returns\n -\n '''\n if isinstance(division_info,str):\n self.data_df[column] = self.data_df[column] / self.data_df[division_info]\n elif isinstance(division_info,(int,float)):\n self.data_df[column] = self.data_df[column] / float(division_info)\n elif isinstance(added_info,(pd.Series,pd.DataFrame)):\n self.data_df[column] = self.data_df[column] / added_info\n\n # delete certain trials in the data table\n def del_trials(self,trials):\n '''\n Args\n trials (list):\n indexs of trials to be deleted\n Returns\n -\n '''\n self.data_df.drop(trials,axis=0,inplace=True)\n self.data_df.index = np.arange(self.data_df.shape[0])\n\n # delete certain columns in the data table\n def del_columns(self,columns):\n '''\n Args\n columns (string, list):\n List the column names to be deleted\n Returns\n -\n '''\n self.data_df.drop(columns,axis=1,inplace=True)\n\n # add certain column to the data table\n def add_column(self,name,add_data):\n '''\n Args\n name (string, list):\n define the name(s) for the newly added column\n add_data (int, float, string, list, pandas.Series, pandas.DataFrame):\n if int, float or string, all rows of this new column will be filled with this value\n if list, pandas.Series or pandas.DataFrame, their dimensions need to be consistent with the data table\n Returns\n -\n '''\n self.data_df[name] = add_data\n \n # Sort data by experimental conditions and plot analog signals (e.g. LFP)\n def plot_analog(self,channel,sort_by,align_to,pre_time,post_time,limit=False,filter_nan=False,normalize=True,fig_marker=[0],fig_size=[12,7],fig_column=4):\n '''\n Args\n channel (string): \n define the analog channel\n sort_by (list):\n experimental conditions used to sort data\n align_to (string):\n event marker used to align each trial's signals\n pre_time (int):\n Set the time(msec) before the align_to to be covered\n post_time (int):\n Set the time(msec) after the align_to to be covered\n limit (string):\n an expression used to filter the data by certain conditions.\n Default: False\n filter_nan (list):\n trials with NaN value in the listed columns will be excluded\n Default: False\n fig_marker (list):\n Defines the positions of the reference vertical lines by setting some time points in the list.\n Default: [0]\n fig_size (list):\n the size of the figure\n Default: [12,7]\n fig_column (int):\n number of sub-plots in one row\n Default: 4\n Returns\n {'time': analog signal time\n 'data': {'condition_1':signal data,\n 'condition_2':signal data,\n .\n .\n .}\n }\n '''\n if pre_time>0:\n raise ValueError('pre_time must <= 0')\n if post_time<=0:\n raise ValueError('post_time must >0')\n data = copy.deepcopy(self.data_df)\n samp_time = 1000.0/self.sampling_rate[channel]\n # limit, filter_nan\n data = ftl.f_filter_limit(data,limit)\n data = ftl.f_filter_nan(data,filter_nan)\n # align signal time in plot channel to align_to\n # sort data by certain experimental conditions in sort_by\n data_group = self.__sort_by(data,sort_by)\n # align analog_time to zero mark\n points_num = int((post_time-pre_time)/samp_time)\n ana_timestamp = np.linspace(pre_time,pre_time+points_num*samp_time,points_num,endpoint=False)\n start_points = data[align_to]/samp_time - abs(pre_time/samp_time)\n pre_time_num = int(abs(pre_time/samp_time))\n start_points = start_points.astype(int)\n ana_select=list()\n if normalize is True:\n for i in data.index:\n start_point = start_points.loc[i]\n end_point = start_point+points_num\n temp_data = data[channel].loc[i][start_point:end_point]\n # print temp_data[:pre_time_num]\n # each trial, minus average before zero point\n if pre_time_num>0:\n temp_data = temp_data - temp_data[:pre_time_num].mean()\n ana_select.append(temp_data)\n elif normalize is False:\n for i in data.index:\n start_point = start_points.loc[i]\n end_point = start_point+points_num\n temp_data = data[channel].loc[i][start_point:end_point]\n ana_select.append(temp_data) \n\n data[channel] = ana_select\n target_mean = dict()\n for k,v in data_group.iteritems():\n temp_ana = data[channel].loc[v]\n target_mean[k] = temp_ana.mean(axis=0)\n\n target_max = max([v.max() for _,v in target_mean.iteritems()])\n # target_max = (int(target_max/10.0)+1)*10\n target_min = min([v.min() for _,v in target_mean.iteritems()])\n # target_min = (int(target_min/10.0)-1)*10\n\n plt.figure(figsize=fig_size)\n for key,value in target_mean.iteritems():\n plt.plot(ana_timestamp,value,label=str(key))\n plt.vlines(fig_marker,target_min,target_max)\n plt.ylim(target_min,target_max)\n plt.xlabel('Time [ms]',fontsize=16)\n plt.ylabel('mV', fontsize=16)\n plt.legend(labelspacing=0)\n return {'time':ana_timestamp,'data':target_mean}\n\n # Sort data by experimental conditions and plot spectrum of analog signals (e.g. LFP)\n def plot_spectral(self,channel,sort_by,align_to,pre_time,post_time,limit=False,filter_nan=False,x_lim=[1,100],y_lim=False,log=False,window=\"hann\", nfft=None,detrend=\"constant\",scaling=\"density\",fig_size=[12,7]):\n '''\n Args\n channel (string): \n define the analog channel\n sort_by (list):\n experimental conditions used to sort data\n align_to (string):\n event marker used to align each trial's signals\n pre_time (int):\n Set the time(msec) before the align_to to be covered\n post_time (int):\n Set the time(msec) after the align_to to be covered\n limit (string):\n an expression used to filter the data by certain conditions.\n Default: False\n filter_nan (list):\n trials with NaN value in the listed columns will be excluded\n Default: False\n x_lim (list):\n set limits of x-axis\n Default: [0,100]\n y_lim (list):\n set limits of y-axis\n Default: False\n window (str,tuple or array_like):\n Desired window to use. see parameter window in scipy.signal.periodogram\n Default: \"hanning\"\n nfft (int):\n length of the FFT used. If None the length of \"x\" will be used\n detrend (str, function or False, optional):\n Specifies how to detrend `x` prior to computing the spectrum. see parameter detrend in scipy.signal.periodogram.\n Default: \"constant\"\n scaling (\"density\",\"spectrum\"):\n if \"density\": V**2/Hz\n if \"spectrum\": V**2\n see scaling paramter in scipy.signal.periodogram\n fig_size (list):\n the size of the figure\n Default: [12,7]\n\n Returns\n {'frequency': frequency\n 'data': {'condition_1':signal data,\n 'condition_2':signal data,\n .\n .\n .}\n }\n '''\n if pre_time>0:\n raise ValueError('pre_time must <= 0')\n if post_time<=0:\n raise ValueError('post_time must >0')\n\n data = copy.deepcopy(self.data_df)\n samp_time = 1000.0/self.sampling_rate[channel]\n fs = 1000.0/samp_time\n # limit, filter_nan\n data = ftl.f_filter_limit(data,limit)\n data = ftl.f_filter_nan(data,filter_nan)\n # sort data by certain experimental conditions in sort_by\n data_group = self.__sort_by(data,sort_by)\n # align analog_time to zero mark\n points_num = int((post_time-pre_time)/samp_time)\n ana_timestamp = np.linspace(pre_time,pre_time+points_num*samp_time,points_num,endpoint=False)\n start_points = data[align_to]/samp_time - abs(pre_time/samp_time)\n start_points = start_points.astype(int)\n ana_select=list()\n for i in data.index:\n start_point = start_points.loc[i]\n end_point = start_point+points_num\n ana_select.append(data[channel].loc[i][start_point:end_point])\n f_spec = lambda ite:signal.periodogram(ite,fs,window=window,nfft=nfft,detrend=detrend,scaling=scaling)\n ana_spec = map(f_spec,ana_select)\n freq = ana_spec[0][0]\n f_temp = lambda ite:ite[1]\n ana_spec = map(f_temp,ana_spec)\n data[channel] = ana_select\n data[channel+'_spec'] = ana_spec \n\n target_mean = dict()\n for k,v in data_group.iteritems():\n temp_ana = data[channel+'_spec'].loc[v]\n target_mean[k] = temp_ana.mean(axis=0)\n\n mask = (freq>x_lim[0]) & (freq<x_lim[1])\n plt.figure(figsize=fig_size)\n for key,value in target_mean.iteritems():\n if log is True:\n plt.semilogy(freq[mask],value[mask],label=str(key))\n else:\n plt.plot(freq[mask],value[mask],label=str(key))\n\n plt.xlabel('Frequency [Hz]',fontsize=16)\n plt.ylabel('PSD (mV**2/Hz)',fontsize=16)\n plt.legend(labelspacing=0)\n return {'frequency':freq,'data':target_mean}\n\n def __nearest_pow_2(self,x):\n \"\"\"\n Find power of two nearest to x\n >>> _nearest_pow_2(3)\n 2.0\n >>> _nearest_pow_2(15)\n 16.0\n :type x: float\n :param x: Number\n :rtype: Int\n :return: Nearest power of 2 to x\n \"\"\"\n a = math.pow(2, math.ceil(np.log2(x)))\n b = math.pow(2, math.floor(np.log2(x)))\n if abs(a - x) < abs(b - x):\n return a\n else:\n return b\n \n # calculate spectrogram of signals\n def __spectrogram(self,data,samp_rate,window,per_lap,wlen,mult): \n samp_rate = float(samp_rate)\n if not wlen:\n wlen = samp_rate/100.0\n \n npts=len(data)\n nfft = int(self.__nearest_pow_2(wlen * samp_rate))\n if nfft > npts:\n nfft = int(self.__nearest_pow_2(npts / 8.0))\n if mult is not None:\n mult = int(self.__nearest_pow_2(mult))\n mult = mult * nfft\n nlap = int(nfft * float(per_lap))\n end = npts / samp_rate\n\n window = signal.get_window(window,nfft)\n specgram, freq, time = mlab.specgram(data, Fs=samp_rate,window=window,NFFT=nfft,\n pad_to=mult, noverlap=nlap)\n return specgram,freq,time\n\n # Sort data by experimental conditions and plot spectrogram for analog signals (e.g. LFP)\n def plot_spectrogram(self,channel,sort_by,align_to,pre_time,post_time,limit=False,filter_nan=False,y_lim=[0,100],normalize=True,window=\"hann\",per_lap=0.9,wlen=None,mult=8.0,fig_mark=[0],fig_size=[12,7],color_bar=True,fig_column=4):\n '''\n Args\n channel (string): \n define the analog channel\n sort_by (list):\n experimental conditions used to sort data\n align_to (string):\n event marker used to align each trial' signals\n pre_time (int):\n Set the time(msec) before the align_to to be covered\n post_time (int):\n Set the time(msec) after the align_to to be covered\n limit (string):\n an expression used to filter the data by certain conditions.\n Default: False\n filter_nan (list):\n trials with NaN value in the listed columns will be excluded\n Default: False\n y_lim (list):\n set limits of y-axis\n Default: [0, 100]\n window (str,tuple or array_like):\n Desired window to use. see parameter window in scipy.signal.spectrogram\n Default: \"hann\"\n per_lap (float):\n percentage of overlap of sliding window, range (0,1)\n Default: 0.9,\n wlen (int, float):\n Window length for fft in seconds. \n If None, wlen = samp_rate/100.0\n Default: None\n mult: Pad zeros to length mult * wlen, which makes spectrogram smoother.\n Default: 8.0\n\n fig_mark (list):\n Draw vertical lines at the time points set in the list.\n Default: [0]\n fig_size (list):\n the size of the figure\n Default: [12,7]\n fig_column (int):\n number of sub-plots in one row\n Default: 4\n Returns\n {'frequency':frequency,\n 'time':analog signal time,\n 'data':{\n 'condition_1': spectrogram value,\n 'condition_2': spectrogram value,\n .\n .\n .\n }}\n '''\n if pre_time>0:\n raise ValueError('pre_time must <= 0')\n if post_time<=0:\n raise ValueError('post_time must >0')\n data = copy.deepcopy(self.data_df)\n samp_time = 1000.0/self.sampling_rate[channel]\n fs = self.sampling_rate[channel]\n if fig_mark:\n fig_mark = [ite/1000.0 for ite in fig_mark]\n xtime_offset = pre_time/1000.0\n # limit, filter_nan\n data = ftl.f_filter_limit(data,limit)\n data = ftl.f_filter_nan(data,filter_nan)\n # sort data by certain experimental conditions in sort_by\n data_group = self.__sort_by(data,sort_by)\n # align analog_time to align_to marker\n points_num = int((post_time-pre_time)/samp_time)\n ana_timestamp = np.linspace(pre_time,pre_time+points_num*samp_time,points_num,endpoint=False)\n start_points = data[align_to]/samp_time - abs(pre_time/samp_time)\n start_points = start_points.astype(int)\n # pre_time_num = int(abs(pre_time/samp_time))\n ana_select=list()\n for i in data.index:\n start_point = start_points.loc[i]\n end_point = start_point+points_num\n ana_select.append(data[channel].loc[i][start_point:end_point])\n\n specg_select = list()\n if normalize is True:\n for i,value in enumerate(ana_select):\n i_specg,i_freq,i_time = self.__spectrogram(value,fs,window=window,per_lap=per_lap,wlen=wlen,mult=mult)\n # i_freq,i_time,i_specg = signal.spectrogram(value,fs,window=window,nperseg=nperseg,noverlap=noverlap,detrend=detrend,scaling=scaling)\n temp_time = 0 - pre_time/1000.0\n mask_time = i_time<temp_time\n temp_specg = i_specg[:,mask_time]\n if temp_specg.shape[1]>0:\n # check if temp_specg is empty\n temp_specg = temp_specg.mean(axis=1)\n temp_specg = temp_specg[:,np.newaxis]\n # normalize spectrogram of each trial\n i_specg = i_specg - temp_specg \n specg_select.append(i_specg)\n elif normalize is False:\n for i,value in enumerate(ana_select):\n i_specg,i_freq,i_time = self.__spectrogram(value,fs,window=window,per_lap=per_lap,wlen=wlen,mult=mult)\n # i_freq,i_time,i_specg = signal.spectrogram(value,fs,window=window,nperseg=nperseg,noverlap=noverlap,detrend=detrend,scaling=scaling)\n specg_select.append(i_specg)\n\n freq = i_freq\n time = i_time\n data[channel+'_spectrogram'] = specg_select\n\n target_mean = dict()\n for k,v in data_group.iteritems():\n temp_specg = data[channel+'_spectrogram'].loc[v]\n target_mean[k] = temp_specg.mean(axis=0)\n \n if len(y_lim) != 2:\n raise ValueError('y_lim should be a list with two elements')\n vmin = min([value[(y_lim[0]<freq) & (freq<y_lim[1])].min() for _,value in target_mean.iteritems()])\n vmax = max([value[(y_lim[0]<freq) & (freq<y_lim[1])].max() for _,value in target_mean.iteritems()])\n group_keys = np.array(data_group.keys())\n group_keys = pd.DataFrame(group_keys, columns=sort_by)\n group_keys = group_keys.sort_values(sort_by)\n group_keys.index = range(group_keys.shape[0])\n # return_val = dict()\n # return_val['data'] = dict()\n\n \n if group_keys.shape[1] == 1:\n block_num = 1\n block_in_num = group_keys.shape[0] / block_num\n row_num = int(math.ceil(float(block_in_num) / fig_column))\n row_nums = row_num\n fig_all = plt.figure(figsize=fig_size)\n for i in group_keys.index:\n current_block = 1\n block_in_i = i - (current_block - 1) * block_in_num + 1\n i_pos = (current_block - 1) * row_num * fig_column + block_in_i\n ax = plt.subplot(row_nums, fig_column, i_pos)\n cond_i = group_keys.loc[i].values[0]\n\n # plt.pcolormesh(time,freq,target_mean[cond_i])\n\n halfbin_time = (time[1] - time[0]) / 2.0\n halfbin_freq = (freq[1] - freq[0]) / 2.0\n specgram = np.flipud(target_mean[cond_i])\n # center bin\n extent = (time[0] - halfbin_time + xtime_offset, time[-1] + halfbin_time +xtime_offset,\n freq[0] - halfbin_freq, freq[-1] + halfbin_freq)\n plt.imshow(specgram, interpolation=\"nearest\", extent=extent,cmap=plt.cm.jet,vmin=vmin,vmax=vmax)\n plt.vlines(fig_mark,y_lim[0],y_lim[1],linewidth=0.8)\n # if scale_bar is True:\n # plt.colorbar(pad=0.01,aspect=50)\n ax.axis('tight')\n plt.ylim(y_lim[0],y_lim[1])\n \n if i_pos%fig_column ==1:\n pass\n elif i_pos%fig_column != 1:\n ax.set_yticks([])\n plt.title(str(cond_i))\n fig_all.tight_layout(pad=0.8)\n fig_all.subplots_adjust(wspace=0.05,hspace=0.15,right=0.9)\n if color_bar is True:\n cax = plt.axes([0.91, 0.1, 0.012, 0.8])\n plt.colorbar(cax=cax)\n cax.set_ylabel('PSD (mV**2/Hz)')\n plt.show()\n\n elif group_keys.shape[1] == 2:\n block_num = group_keys[sort_by[0]].unique().shape[0]\n block_in_num = group_keys.shape[0]/block_num\n row_num = int(math.ceil(float(block_in_num) / fig_column))\n row_nums = block_num * row_num\n fig_all = plt.figure(figsize=fig_size)\n for i in group_keys.index:\n current_block = int(math.floor(i / block_in_num)) + 1\n block_in_i = i - (current_block - 1) * block_in_num + 1\n i_pos = (current_block - 1) * row_num * \\\n fig_column + block_in_i\n ax=plt.subplot(row_nums, fig_column, i_pos)\n cond_i = tuple(group_keys.loc[i].values)\n\n # plt.pcolormesh(time,freq,target_mean[cond_i])\n\n halfbin_time = (time[1] - time[0]) / 2.0\n halfbin_freq = (freq[1] - freq[0]) / 2.0\n specgram = np.flipud(target_mean[cond_i])\n # center bin\n extent = (time[0] - halfbin_time + xtime_offset, time[-1] + halfbin_time +xtime_offset,\n freq[0] - halfbin_freq, freq[-1] + halfbin_freq)\n plt.imshow(specgram, interpolation=\"nearest\", extent=extent,cmap=plt.cm.jet,vmin=vmin,vmax=vmax)\n plt.vlines(fig_mark,y_lim[0],y_lim[1],linewidth=0.8)\n # if scale_bar is True:\n # plt.colorbar(pad=0.01,aspect=50)\n ax.axis('tight')\n plt.ylim(y_lim[0],y_lim[1])\n \n if i_pos%fig_column ==1:\n pass\n elif i_pos%fig_column != 1:\n ax.set_yticks([])\n plt.title(str(cond_i))\n fig_all.tight_layout(pad=0.8)\n fig_all.subplots_adjust(wspace=0.05,hspace=0.15,right=0.9)\n\n if color_bar is True:\n cax = plt.axes([0.91, 0.1, 0.012, 0.8])\n plt.colorbar(cax=cax)\n cax.set_ylabel('PSD (mV**2/Hz)')\n plt.show()\n\n return {'frequency':freq,'time':time+xtime_offset,'data':target_mean}\n \n # save analysis results to the workspace for population level analysis \n def save_data(self,space_name,data,key,replace=False):\n '''\n Args\n space_name (string):\n file path of the work space for storing analysis results\n data (dict):\n analysis results to be stored\n key (string):\n name the stored results\n replace (Boolean):\n if True, stored results will be rewritted if their key has already existed.\n Returns\n -\n '''\n super_key = self.filename.split('/')[-1][:-3]\n flat_data = ftl.flatten_dict(data)\n keys_in = list()\n if replace is False:\n with hp.File(space_name,'a') as f:\n for ite_k,ite_v in flat_data.iteritems():\n item_key = super_key+'/'+key+'/'+ite_k\n if item_key in f:\n keys_in.append(item_key)\n\n if len(keys_in) > 0:\n print(keys_in)\n raise ValueError(\"Those data alreadly in file, please set replace=True to update these data\")\n else:\n with hp.File(space_name,'a') as f:\n for ite_k,ite_v in flat_data.iteritems():\n item_key = super_key+'/'+key+'/'+ite_k\n if item_key in f:\n del f[item_key]\n f[item_key] = ite_v\n f.flush()\n print(\"Data are stored now\")\n\n\n # filter analog signals\n def __band_filter(self,ite_data,fs,order,lowcut,highcut,zerophase,btype,ftype,rps=None):\n fe = fs/2.0\n low = lowcut/fe\n high = highcut/fe\n if low<0:\n low=0\n if high>1:\n high=1\n if ftype == \"cheby1\":\n rp = rps\n z,p,k = signal.iirfilter(order,[low,high],btype=btype,ftype=ftype,output=\"zpk\",rp=rp)\n elif ftype == \"cheby2\":\n rs = rps\n z,p,k = signal.iirfilter(order,[low,high],btype=btype,ftype=ftype,output=\"zpk\",rs=rs)\n elif ftype == \"ellip\":\n rp = rps[0]\n rs = rps[1]\n z,p,k = signal.iirfilter(order,[low,high],btype=btype,ftype=ftype,output=\"zpk\",rp=rp,rs=rs)\n else:\n z,p,k = signal.iirfilter(order,[low,high],btype=btype,ftype=ftype,output=\"zpk\")\n sos = signal.zpk2sos(z,p,k)\n ite_data = signal.sosfilt(sos,ite_data)\n if zerophase:\n ite_data = signal.sosfilt(sos,ite_data[::-1])[::-1]\n return ite_data\n def __highpass_filter(self,ite_data,fs,order,lowcut,zerophase,btype,ftype,rps=None):\n fe = fs/2.0\n low = lowcut/fe\n if low<0:\n low=0\n if ftype == \"cheby1\":\n rp = rps\n z,p,k = signal.iirfilter(order,low,btype=btype,ftype=ftype,output=\"zpk\",rp=rp)\n elif ftype == \"cheby2\":\n rs = rps\n z,p,k = signal.iirfilter(order,low,btype=btype,ftype=ftype,output=\"zpk\",rs=rs)\n elif ftype == \"ellip\":\n rp = rps[0]\n rs = rps[1]\n z,p,k = signal.iirfilter(order,low,btype=btype,ftype=ftype,output=\"zpk\",rp=rp,rs=rs)\n else:\n z,p,k = signal.iirfilter(order,low,btype=btype,ftype=ftype,output=\"zpk\")\n sos = signal.zpk2sos(z,p,k)\n ite_data = signal.sosfilt(sos,ite_data)\n if zerophase:\n ite_data = signal.sosfilt(sos,ite_data[::-1])[::-1]\n return ite_data\n def __lowpass_filter(self,ite_data,fs,order,highcut,zerophase,btype,ftype,rps=None):\n fe = fs/2.0\n high = highcut/fe\n if high>1:\n high=1\n if ftype == \"cheby1\":\n rp = rps\n z,p,k = signal.iirfilter(order,high,btype=btype,ftype=ftype,output=\"zpk\",rp=rp)\n elif ftype == \"cheby2\":\n rs = rps\n z,p,k = signal.iirfilter(order,high,btype=btype,ftype=ftype,output=\"zpk\",rs=rs)\n elif ftype == \"ellip\":\n rp = rps[0]\n rs = rps[1]\n z,p,k = signal.iirfilter(order,high,btype=btype,ftype=ftype,output=\"zpk\",rp=rp,rs=rs)\n else:\n z,p,k = signal.iirfilter(order,high,btype=btype,ftype=ftype,output=\"zpk\")\n sos = signal.zpk2sos(z,p,k)\n ite_data = signal.sosfilt(sos,ite_data)\n if zerophase:\n ite_data = signal.sosfilt(sos,ite_data[::-1])[::-1]\n return ite_data\n\n def analog_filter(self,channel,btype,ftype=\"butter\",order=6,zerophase=True,**args):\n '''\n Args\n channel (string):\n define the analog channel\n btype (string): {‘bandpass’, ‘lowpass’, ‘highpass’, ‘bandstop’}\n ftype : str, optional\n The type of IIR filter to design:\n Butterworth : ‘butter’\n Chebyshev I : ‘cheby1’\n Chebyshev II : ‘cheby2’\n Cauer/elliptic: ‘ellip’\n Bessel/Thomson: ‘bessel’\n Default: \"butter\"\n order (int): the order of the filter\n zerophase (bool): \n If True, apply filter once forwards and once backwards.\n This results in twice the filter order but zero phase shift in the resulting filtered trace.\n Default: True\n **args:\n if btype is bandpass or bandstop:\n if ftype is butter or bessel:\n highcut, lowcut\n if ftype is cheby1:\n highcut, lowcut, rp\n if ftype is cheby2:\n highcut, lowcut, rs\n if ftype is ellip:\n highcut, lowcut, rp, rs\n if btype is lowpass:\n if ftype is butter or bessel:\n highcut\n if ftype is cheby1:\n highcut, rp\n if ftype is cheby2:\n highcut, rs\n if ftype is ellip:\n highcut, rp, rs\n if btype is highpass:\n if ftype is butter or bessel:\n lowcut\n if ftype is cheby1:\n lowcut, rp\n if ftype is cheby2:\n lowcut, rs\n if ftype is ellip:\n lowcut, rp, rs\n Returns\n -\n '''\n\n fs = self.sampling_rate[channel]\n if btype in [\"bandpass\",\"bandstop\"]:\n lowcut = args[\"lowcut\"]\n highcut = args[\"highcut\"]\n if ftype in [\"butter\",\"bessel\"]:\n self.data_df[channel] = self.data_df[channel].apply(self.__band_filter,args=(fs,order,lowcut,highcut,zerophase,btype,ftype))\n elif ftype == \"cheby1\":\n rps = args[\"rp\"]\n self.data_df[channel] = self.data_df[channel].apply(self.__band_filter,args=(fs,order,lowcut,highcut,zerophase,btype,ftype,rps))\n elif ftype == \"cheby2\":\n rps = args[\"rs\"]\n self.data_df[channel] = self.data_df[channel].apply(self.__band_filter,args=(fs,order,lowcut,highcut,zerophase,btype,ftype,rps))\n elif ftype == \"ellip\":\n rps = [args[\"rp\"],args[\"rs\"]]\n self.data_df[channel] = self.data_df[channel].apply(self.__band_filter,args=(fs,order,lowcut,highcut,zerophase,btype,ftype,rps))\n elif btype == \"highpass\":\n lowcut = args[\"lowcut\"]\n if ftype in [\"butter\",\"bessel\"]:\n self.data_df[channel] = self.data_df[channel].apply(self.__highpass_filter,args=(fs,order,lowcut,zerophase,btype,ftype))\n elif ftype == \"cheby1\":\n rps = args[\"rp\"]\n self.data_df[channel] = self.data_df[channel].apply(self.__highpass_filter,args=(fs,order,lowcut,zerophase,btype,ftype,rps))\n elif ftype == \"cheby2\":\n rps = args[\"rs\"]\n self.data_df[channel] = self.data_df[channel].apply(self.__highpass_filter,args=(fs,order,lowcut,zerophase,btype,ftype,rps))\n elif ftype == \"ellip\":\n rps = [args[\"rp\"],args[\"rs\"]]\n self.data_df[channel] = self.data_df[channel].apply(self.__highpass_filter,args=(fs,order,lowcut,zerophase,btype,ftype,rps))\n elif btype == \"lowpass\":\n highcut = args[\"highcut\"]\n if ftype in [\"butter\",\"bessel\"]:\n self.data_df[channel] = self.data_df[channel].apply(self.__lowpass_filter,args=(fs,order,highcut,zerophase,btype,ftype))\n elif ftype == \"cheby1\":\n rps = args[\"rp\"]\n self.data_df[channel] = self.data_df[channel].apply(self.__lowpass_filter,args=(fs,order,highcut,zerophase,btype,ftype,rps))\n elif ftype == \"cheby2\":\n rps = args[\"rs\"]\n self.data_df[channel] = self.data_df[channel].apply(self.__lowpass_filter,args=(fs,order,highcut,zerophase,btype,ftype,rps))\n elif ftype == \"ellip\":\n rps = [args[\"rp\"],args[\"rs\"]]\n self.data_df[channel] = self.data_df[channel].apply(self.__lowpass_filter,args=(fs,order,highcut,zerophase,btype,ftype,rps)) # # filter analog signals\n # def analog_filter(self,channel,band_pass=None,band_stop=None,zerophase=True):\n # '''\n # Args\n # channel (string):\n # define the analog channel\n # band_pass (list):\n # Set the frequency range for band pass filtering.\n # Default: None\n # band_stop (list):\n # Set frequency for band-stop filtering.\n # Default: None\n # Returns\n # -\n # '''\n # if band_pass is not None:\n # self.__band_pass(channel,band_pass[0],band_pass[1],zerophase=zerophase)\n # if band_stop is not None:\n # self.__band_stop(channel,band_stop[0],band_stop[1],zerophase=zerophase)\n\n # def __band_pass(self,channel,freqmin,freqmax,corners=32,zerophase=True):\n # fe = self.sampling_rate[channel]\n # low = freqmin/fe\n # high = freqmax/fe\n # # raise for some bad scenarios\n # if high - 1.0 > -1e-6:\n # msg = (\"Selected high corner frequency ({}) of bandpass is at or \"\n # \"above Nyquist ({}). Applying a high-pass instead.\").format(\n # freqmax, fe)\n # raise ValueError(msg)\n \n # if low > 1:\n # msg = \"Selected low corner frequency is above Nyquist.\"\n # raise ValueError(msg)\n # z, p, k = signal.iirfilter(corners, [low, high], btype='band',\n # ftype='butter', output='zpk',)\n # sos = zpk2sos(z, p, k)\n # def bandpass(ite,sos=sos):\n # ite = sosfilt(sos, ite)\n # if zerophase:\n # ite = sosfilt(sos,ite[::-1])[::-1]\n # return ite\n # self.data_df[channel] = self.data_df[channel].apply(bandpass)\n\n # def __band_stop(self,channel,freqmin,freqmax,corners=4,zerophase=True):\n # fe = self.sampling_rate[channel]\n # low = freqmin/fe\n # high = freqmax/fe\n # # raise for some bad scenarios\n # if high - 1.0 > -1e-6:\n # msg = (\"Selected high corner frequency ({}) of bandpass is at or \"\n # \"above Nyquist ({}). Applying a high-pass instead.\").format(\n # freqmax, fe)\n # raise ValueError(msg)\n \n # if low > 1:\n # msg = \"Selected low corner frequency is above Nyquist.\"\n # raise ValueError(msg)\n # z, p, k = signal.iirfilter(corners, [low, high], btype='bandstop',\n # ftype='butter', output='zpk')\n # sos = zpk2sos(z, p, k)\n # def bandpass(ite,sos=sos):\n # ite = sosfilt(sos, ite)\n # if zerophase:\n # ite = sosfilt(sos,ite[::-1])[::-1]\n # return ite\n # self.data_df[channel] = self.data_df[channel].apply(bandpass)\n\n # smooth eye movement trajectory and realign eye position to a relatively stable period of time, e.g. during fixation.\n def calibrate_eye(self,eye_channel,realign_mark,realign_timebin,eye_medfilt_win=21,eye_gausfilt_sigma=3):\n '''\n Args\n eye_channel (list):\n the first element is the channel name for the horizontal eye position\n the second element is the channel name for the vertial eye position\n realign_mark (string):\n event marker used to align eye positions\n realign_timebin (list):\n a period of time relative to the realign_mark.\n Example: [0,100]\n eye_medfilt_win (int):\n parameter for the median filter to smooth the eye movement trajectory\n eye_gausfilt_sigma (int):\n sigma of the gaussian kernel to smooth the eye movement trajectory\n Return:\n -\n '''\n samp_time = 1000.0/self.sampling_rate[eye_channel[0]]\n # medfilt eye x, y position\n lamb_medfilt = lambda ite:signal.medfilt(ite,eye_medfilt_win)\n self.data_df[eye_channel[0]] = self.data_df[eye_channel[0]].apply(lamb_medfilt)\n self.data_df[eye_channel[1]] = self.data_df[eye_channel[1]].apply(lamb_medfilt)\n # gaussian filt eye x,y position\n lamb_gausfilt = lambda ite:ndimage.filters.gaussian_filter1d(ite,eye_gausfilt_sigma)\n self.data_df[eye_channel[0]] = self.data_df[eye_channel[0]].apply(lamb_gausfilt)\n self.data_df[eye_channel[1]] = self.data_df[eye_channel[1]].apply(lamb_gausfilt)\n # align eye to realign_mark, realign_timebin uses realign_mark as reference\n realign_poinnum = (self.data_df[realign_mark]/samp_time).values\n start_points = realign_poinnum + realign_timebin[0]/samp_time\n points_num = int((realign_timebin[1]-realign_timebin[0])/samp_time)\n for channel in eye_channel:\n align_points = list()\n for idx in self.data_df.index:\n start_point = start_points[idx]\n if ~np.isnan(start_point):\n start_point = int(start_point)\n end_point = start_point + points_num\n align_point = self.data_df[channel].loc[idx][start_point:end_point]\n align_point = align_point.mean()\n else:\n align_point = np.nan\n align_points.append(align_point)\n self.data_df[channel] = self.data_df[channel] - align_points\n\n # find all saccades for all trials\n def find_saccade(self,eye_channel,eye_speed_win=5,sac_speed_threshold=100,sac_duration_threshold=10,sac_displacement_threshold=2):\n '''\n Args\n eye_channel (list):\n the first element is the channel name for the horizontal eye position\n the second element is the channel name for the vertial eye position\n eye_speed_wind (int):\n Number of points to calculate eye movement speed\n sac_speed_threshold (int):\n Set the speed threshold for a valid saccade\n Default: 100\n sac_duration_threshold (int):\n Set the (minimum) duration threshold for a valid saccade.\n Default: 10 (msec)\n sac_displacement_threshold (int):\n Set the minimum saccade amplitude for a valid saccade\n Default: 2\n Returns\n -\n '''\n data = copy.deepcopy(self.data_df[eye_channel])\n samp_time = 1000.0/self.sampling_rate[eye_channel[0]]\n\n eye_x = list()\n eye_y = list()\n eye_t = list()\n for idx in data.index:\n eye_x.append(data[eye_channel[0]].loc[idx])\n eye_y.append(data[eye_channel[1]].loc[idx])\n temp_num = data[eye_channel[0]].loc[idx].shape[0]\n temp_time = np.linspace(0, temp_num*samp_time, temp_num, endpoint=False)\n eye_t.append(temp_time)\n x_distance = [ite[eye_speed_win:]-ite[:-eye_speed_win] for ite in eye_x]\n y_distance = [ite[eye_speed_win:]-ite[:-eye_speed_win] for ite in eye_y]\n eye_speed = list()\n eye_freq = 1000.0/samp_time\n tim_dur = eye_speed_win*1.0/eye_freq\n for idx in self.data_df.index:\n speed = (x_distance[idx]**2 + y_distance[idx]**2)**0.5/tim_dur\n eye_speed.append(speed)\n eye_speed_t = [ite[eye_speed_win:] for ite in eye_t]\n eye_x_speed_pos = [ite[eye_speed_win:] for ite in eye_x]\n eye_y_speed_pos = [ite[eye_speed_win:] for ite in eye_y]\n # find saccade\n def saccade_find(trial_i):\n sac_half_speed_threshold = sac_speed_threshold/2.0\n tem_1 = np.where(eye_speed[trial_i] > sac_speed_threshold)[0]\n tem_2 = tem_1[1:] - tem_1[:-1]\n tem_3 = np.where(tem_2 > 1)[0]\n tem_3 = np.append(0, tem_3)\n tem_3 = np.append(tem_3, tem_2.shape[0])\n tem_4 = tem_3[1:] - tem_3[:-1]\n \n tem_5 = np.where(\n tem_4 > sac_duration_threshold * eye_freq / 1000.0)[0]\n tem_3_new = tem_3 + 1\n saccade_start = []\n saccade_end = []\n saccade_from = []\n saccade_to = []\n saccade_amp = []\n for tem_sac in tem_5:\n tem_6 = tem_3_new[tem_sac:tem_sac + 2]\n tem_7 = [tem_1[tem_6[0]], tem_1[tem_6[1] - 1]]\n tem_8 = [np.arange(tem_7[0]), np.arange(\n tem_7[1], eye_speed[trial_i].shape[0])]\n\n if len(np.where(eye_speed[trial_i][tem_8[0]] < sac_half_speed_threshold)[0]) > 0:\n sac_start = np.where(eye_speed[trial_i][\n tem_8[0]] < sac_half_speed_threshold)[0][-1]\n \n else:\n sac_start = 0\n\n if len(np.where(eye_speed[trial_i][tem_8[1]] < sac_half_speed_threshold)[0]) > 0:\n sac_end = np.where(eye_speed[trial_i][tem_8[1]] < sac_half_speed_threshold)[\n 0][0] + tem_7[1]\n else:\n sac_end = eye_speed[trial_i].shape[0] - 1\n if (sac_end - sac_start) * 1000.0 / eye_freq >= sac_duration_threshold:\n sac_amp = ((eye_x_speed_pos[trial_i][sac_end] - eye_x_speed_pos[trial_i][sac_start])**2 + (\n eye_y_speed_pos[trial_i][sac_end] - eye_y_speed_pos[trial_i][sac_start])**2)**0.5\n if sac_amp > sac_displacement_threshold:\n saccade_start.append(eye_speed_t[\n trial_i][sac_start])\n saccade_end.append(eye_speed_t[\n trial_i][sac_end])\n saccade_from.append(\n [eye_x_speed_pos[trial_i][sac_start], eye_y_speed_pos[trial_i][sac_start]])\n saccade_to.append(\n [eye_x_speed_pos[trial_i][sac_end], eye_y_speed_pos[trial_i][sac_end]])\n saccade_amp.append(sac_amp)\n return [saccade_start, saccade_end, saccade_from, saccade_to, saccade_amp]\n \n eye_saccade = [saccade_find(i) for i in range(len(eye_speed))]\n saccade_start = [np.array(ite[0]) for ite in eye_saccade]\n saccade_end = [np.array(ite[1]) for ite in eye_saccade]\n saccade_from = [np.array(ite[2]) for ite in eye_saccade]\n saccade_to = [np.array(ite[3]) for ite in eye_saccade]\n saccade_amp = [np.array(ite[4]) for ite in eye_saccade]\n self.data_df['saccade_start'] = saccade_start\n self.data_df['saccade_end'] = saccade_end\n self.data_df['saccade_from'] = saccade_from\n self.data_df['saccade_to'] = saccade_to\n self.data_df['saccade_amp'] = saccade_amp\n\n # choose saccades in each trial that happened within a certain period and of certain amplitude\n def choose_saccade(self, align_to, timebin, ampbin=False):\n '''\n Args\n align_to (string):\n event marker as zero point time\n timebin (list):\n time period relative to the zero point time\n Select saccades happened within the set period\n ampbin (list):\n amplitude range\n Selec saccades of set amplitude\n Default: False\n Return:\n -\n '''\n saccade_start = list()\n saccade_end = list()\n saccade_from = list()\n saccade_to = list()\n saccade_amp = list()\n for idx in self.data_df.index:\n sac_ids = range(len(self.data_df['saccade_start'].loc[idx]))\n temp_sac_start = list()\n temp_sac_end = list()\n temp_sac_from = list()\n temp_sac_to = list()\n temp_sac_amp = list()\n # time period relative to align_to marker\n timebin_0 = self.data_df[align_to].loc[idx] + timebin[0]\n timebin_1 = self.data_df[align_to].loc[idx] + timebin[1]\n for sac_id in sac_ids:\n if timebin_0 < self.data_df['saccade_start'].loc[idx][sac_id] < timebin_1:\n if ampbin is not False:\n if ampbin[0]<self.data_df['saccade_amp'].loc[idx][sac_id]<ampbin[1]:\n temp_sac_start.append(self.data_df['saccade_start'].loc[idx][sac_id])\n temp_sac_end.append(self.data_df['saccade_end'].loc[idx][sac_id])\n temp_sac_from.append(self.data_df['saccade_from'].loc[idx][sac_id])\n temp_sac_to.append(self.data_df['saccade_to'].loc[idx][sac_id])\n temp_sac_amp.append(self.data_df['saccade_amp'].loc[idx][sac_id])\n else:\n temp_sac_start.append(self.data_df['saccade_start'].loc[idx][sac_id])\n temp_sac_end.append(self.data_df['saccade_end'].loc[idx][sac_id])\n temp_sac_from.append(self.data_df['saccade_from'].loc[idx][sac_id])\n temp_sac_to.append(self.data_df['saccade_to'].loc[idx][sac_id])\n temp_sac_amp.append(self.data_df['saccade_amp'].loc[idx][sac_id])\n \n saccade_start.append(temp_sac_start)\n saccade_end.append(temp_sac_end)\n saccade_from.append(temp_sac_from)\n saccade_to.append(temp_sac_to)\n saccade_amp.append(temp_sac_amp)\n self.data_df['saccade_start_1'] = saccade_start\n self.data_df['saccade_end_1'] = saccade_end\n self.data_df['saccade_from_1'] = saccade_from\n self.data_df['saccade_to_1'] = saccade_to\n self.data_df['saccade_amp_1'] = saccade_amp\n\n # Reallocate the storage space that the occupied by the file, then release extra storage space.\n def reclaim_space(self,file_name):\n '''\n Args\n file_name (string):\n the name of the work space \n Return\n -\n '''\n f = hp.File(file_name,'r')\n f2 = hp.File(file_name.split('.h5')[0]+'_reclaim.h5','w')\n used_keys = list()\n def valid_key(name):\n if isinstance(f[name],hp.Group):\n pass\n else:\n used_keys.append(name)\n f.visit(valid_key)\n for key in used_keys:\n f2[key] = f[key].value\n f.flush()\n f2.flush()\n f.close()\n f2.close()\n os.remove(file_name)\n os.rename(file_name.split('.h5')[0]+'_reclaim.h5',file_name)\n print('Space is reclaimed now')\n"
] | [
[
"numpy.array",
"numpy.shape",
"numpy.empty",
"numpy.transpose"
],
[
"numpy.savez",
"numpy.arange",
"numpy.ones",
"numpy.savetxt",
"numpy.load",
"numpy.empty"
],
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.imshow",
"scipy.signal.sosfilt",
"numpy.sqrt",
"numpy.linspace",
"scipy.signal.get_window",
"numpy.flipud",
"pandas.DataFrame",
"matplotlib.pyplot.axes",
"matplotlib.pyplot.plot",
"matplotlib.mlab.specgram",
"numpy.mean",
"numpy.where",
"scipy.signal.medfilt",
"numpy.arange",
"numpy.std",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.errorbar",
"matplotlib.pyplot.vlines",
"scipy.signal.periodogram",
"pandas.to_numeric",
"matplotlib.pyplot.figure",
"numpy.isnan",
"matplotlib.pyplot.ylim",
"numpy.append",
"scipy.signal.zpk2sos",
"scipy.ndimage.filters.gaussian_filter1d",
"numpy.array",
"matplotlib.pyplot.show",
"scipy.signal.iirfilter",
"numpy.logical_and",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.yticks",
"numpy.log2",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.xticks",
"scipy.signal.gaussian"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [
"0.19",
"0.18",
"1.0",
"0.17",
"0.16"
],
"tensorflow": []
}
] |
iclavera/meta-mb | [
"a1204e573c1415161129403cfb287bf120488fd0",
"a1204e573c1415161129403cfb287bf120488fd0",
"a1204e573c1415161129403cfb287bf120488fd0",
"a1204e573c1415161129403cfb287bf120488fd0"
] | [
"meta_mb/envs/mb_envs/swimmer.py",
"run_scripts/ppo_run_sweep.py",
"run_scripts/exploration_exp/mbmpo_w_exploration_run_sweep.py",
"meta_mb/envs/blue/peg_blue_env.py"
] | [
"from __future__ import division\nfrom __future__ import print_function\nfrom __future__ import absolute_import\n\nimport os\n\nimport numpy as np\nimport tensorflow as tf\nfrom gym import utils\nfrom gym.envs.mujoco import mujoco_env\nfrom meta_mb.meta_envs.base import MetaEnv\n\n\nclass SwimmerEnv(MetaEnv, mujoco_env.MujocoEnv, utils.EzPickle):\n\n def __init__(self, frame_skip=4):\n self.prev_qpos = None\n dir_path = os.path.dirname(os.path.abspath(__file__))\n mujoco_env.MujocoEnv.__init__(\n self, '%s/assets/swimmer.xml' % dir_path, frame_skip=frame_skip\n )\n utils.EzPickle.__init__(self)\n\n def step(self, action):\n old_ob = self._get_obs()\n self.do_simulation(action, self.frame_skip)\n\n if getattr(self, 'action_space', None):\n action = np.clip(action, self.action_space.low,\n self.action_space.high)\n ob = self._get_obs()\n\n reward_ctrl = -0.0001 * np.square(action).sum()\n reward_run = old_ob[3]\n reward = reward_run + reward_ctrl\n\n done = False\n return ob, reward, done, {}\n\n def _get_obs(self):\n return np.concatenate([\n # (self.model.data.qpos.flat[:1] - self.prev_qpos[:1]) / self.dt,\n # self.get_body_comvel(\"torso\")[:1],\n self.sim.data.qpos.flat[2:],\n self.sim.data.qvel.flat,\n ])\n\n def reset_model(self):\n self.set_state(\n self.init_qpos + self.np_random.uniform(low=-.1, high=.1, size=self.model.nq),\n self.init_qvel + self.np_random.uniform(low=-.1, high=.1, size=self.model.nv)\n )\n self.prev_qpos = np.copy(self.sim.data.qpos.flat)\n return self._get_obs()\n\n def reward(self, obs, acts, next_obs):\n assert obs.ndim == 2\n assert obs.shape == next_obs.shape\n assert obs.shape[0] == acts.shape[0]\n reward_ctrl = -0.0001 * np.sum(np.square(acts), axis=1)\n reward_run = obs[:, 3]\n reward = reward_run + reward_ctrl\n return reward\n\n def tf_reward(self, obs, acts, next_obs):\n reward_ctrl = -0.0001 * tf.reduce_sum(tf.square(acts), axis=1)\n reward_run = obs[:, 3]\n reward = reward_run + reward_ctrl\n return reward\n\nif __name__ == \"__main__\":\n env = SwimmerEnv()\n env.reset()\n for _ in range(1000):\n _ = env.render()\n ob, rew, done, info = env.step(env.action_space.sample()) # take a random action\n",
"import os\nimport json\nimport tensorflow as tf\nimport numpy as np\nfrom experiment_utils.run_sweep import run_sweep\nfrom meta_mb.utils.utils import set_seed, ClassEncoder\nfrom meta_mb.baselines.linear_baseline import LinearFeatureBaseline\nfrom meta_mb.envs.cassie.cassie_env import CassieEnv\nfrom meta_mb.envs.normalized_env import normalize\nfrom meta_mb.algos.ppo import PPO\nfrom meta_mb.envs.blue.full_blue_env import FullBlueEnv\nfrom meta_mb.trainers.mf_trainer import Trainer\nfrom meta_mb.samplers.sampler import Sampler\nfrom meta_mb.samplers.single_sample_processor import SingleSampleProcessor\nfrom meta_mb.policies.gaussian_mlp_policy import GaussianMLPPolicy\nfrom meta_mb.logger import logger\n\nINSTANCE_TYPE = 'c4.2xlarge'\nEXP_NAME = 'cassie-balancing-entropy'\n\n\ndef run_experiment(**kwargs):\n exp_dir = os.getcwd() + '/data/' + EXP_NAME\n logger.configure(dir=exp_dir, format_strs=['stdout', 'log', 'csv'], snapshot_mode='last')\n json.dump(kwargs, open(exp_dir + '/params.json', 'w'), indent=2, sort_keys=True, cls=ClassEncoder)\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n config.gpu_options.per_process_gpu_memory_fraction = kwargs.get('gpu_frac', 0.95)\n sess = tf.Session(config=config)\n with sess.as_default() as sess:\n\n # Instantiate classes\n set_seed(kwargs['seed'])\n\n baseline = kwargs['baseline']()\n\n env = normalize(kwargs['env']())\n\n policy = GaussianMLPPolicy(\n name=\"policy\",\n obs_dim=np.prod(env.observation_space.shape),\n action_dim=np.prod(env.action_space.shape),\n hidden_sizes=kwargs['hidden_sizes'],\n learn_std=kwargs['learn_std'],\n hidden_nonlinearity=kwargs['hidden_nonlinearity'],\n output_nonlinearity=kwargs['output_nonlinearity'],\n init_std=kwargs['init_std'],\n )\n\n # Load policy here\n\n sampler = Sampler(\n env=env,\n policy=policy,\n num_rollouts=kwargs['num_rollouts'],\n max_path_length=kwargs['max_path_length'],\n n_parallel=kwargs['n_parallel'],\n )\n\n sample_processor = SingleSampleProcessor(\n baseline=baseline,\n discount=kwargs['discount'],\n gae_lambda=kwargs['gae_lambda'],\n normalize_adv=kwargs['normalize_adv'],\n positive_adv=kwargs['positive_adv'],\n )\n\n algo = PPO(\n policy=policy,\n learning_rate=kwargs['learning_rate'],\n clip_eps=kwargs['clip_eps'],\n max_epochs=kwargs['num_ppo_steps'],\n entropy_bonus=kwargs['entropy_bonus'],\n )\n\n trainer = Trainer(\n algo=algo,\n policy=policy,\n env=env,\n sampler=sampler,\n sample_processor=sample_processor,\n n_itr=kwargs['n_itr'],\n sess=sess,\n )\n\n trainer.train()\n\nif __name__ == '__main__':\n sweep_params = {\n 'algo': ['ppo'],\n 'seed': [1, 2],\n\n 'baseline': [LinearFeatureBaseline],\n\n 'env': [An],\n\n 'num_rollouts': [100],\n 'max_path_length': [200, 300],\n 'n_parallel': [10],\n\n 'discount': [0.99],\n 'gae_lambda': [.975, 0.5],\n 'normalize_adv': [True],\n 'positive_adv': [False],\n\n 'hidden_sizes': [(256, 256)],\n 'learn_std': [True],\n 'hidden_nonlinearity': [tf.nn.tanh],\n 'output_nonlinearity': [None],\n 'init_std': [1.],\n\n 'learning_rate': [1e-3],\n 'num_ppo_steps': [2, 5],\n 'num_minibatches': [1],\n 'clip_eps': [.3],\n 'entropy_bonus': [1e-2, 1e-3, 1e-4, 1e-5],\n\n 'n_itr': [5000],\n 'scope': [None],\n\n 'exp_tag': ['v0']\n }\n\n run_sweep(run_experiment, sweep_params, EXP_NAME, INSTANCE_TYPE)\n",
"import os\nimport json\nimport tensorflow as tf\nimport numpy as np\nfrom experiment_utils.run_sweep import run_sweep\nfrom meta_mb.utils.utils import set_seed, ClassEncoder\nfrom meta_mb.baselines.linear_baseline import LinearFeatureBaseline\nfrom meta_mb.envs.mb_envs import *\nfrom meta_mb.meta_algos.trpo_maml import TRPOMAML\nfrom meta_mb.trainers.mbmpo_w_exploration_trainer import Trainer\nfrom meta_mb.samplers.sampler import Sampler\nfrom meta_mb.samplers.meta_samplers.maml_sample_processor import MAMLSampleProcessor\nfrom meta_mb.samplers.mb_sample_processor import ModelSampleProcessor\nfrom meta_mb.samplers.mbmpo_samplers.mbmpo_sampler import MBMPOSampler\nfrom meta_mb.policies.meta_gaussian_mlp_policy import MetaGaussianMLPPolicy\nfrom meta_mb.dynamics.mlp_dynamics_ensemble import MLPDynamicsEnsemble\nfrom meta_mb.logger import logger\n\nINSTANCE_TYPE = 'c4.xlarge'\nEXP_NAME = 'exploration-mb-mpo'\n\n\ndef run_experiment(**kwargs):\n exp_dir = os.getcwd() + '/data/parallel_mb_ppo/' + EXP_NAME + kwargs.get('exp_name', '')\n logger.configure(dir=exp_dir, format_strs=['csv', 'stdout', 'log'], snapshot_mode='last')\n json.dump(kwargs, open(exp_dir + '/params.json', 'w'), indent=2, sort_keys=True, cls=ClassEncoder)\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n config.gpu_options.per_process_gpu_memory_fraction = kwargs.get('gpu_frac', 0.95)\n sess = tf.Session(config=config)\n with sess.as_default() as sess:\n\n # Instantiate classes\n set_seed(kwargs['seed'])\n\n baseline = kwargs['baseline']()\n\n env = kwargs['env']() # Wrappers?\n\n policy = MetaGaussianMLPPolicy(\n name=\"meta-policy\",\n obs_dim=np.prod(env.observation_space.shape),\n action_dim=np.prod(env.action_space.shape),\n meta_batch_size=kwargs['meta_batch_size'],\n hidden_sizes=kwargs['policy_hidden_sizes'],\n learn_std=kwargs['policy_learn_std'],\n hidden_nonlinearity=kwargs['policy_hidden_nonlinearity'],\n output_nonlinearity=kwargs['policy_output_nonlinearity'],\n )\n\n dynamics_model = MLPDynamicsEnsemble('dynamics-ensemble',\n env=env,\n num_models=kwargs['num_models'],\n hidden_nonlinearity=kwargs['dyanmics_hidden_nonlinearity'],\n hidden_sizes=kwargs['dynamics_hidden_sizes'],\n output_nonlinearity=kwargs['dyanmics_output_nonlinearity'],\n learning_rate=kwargs['dynamics_learning_rate'],\n batch_size=kwargs['dynamics_batch_size'],\n buffer_size=kwargs['dynamics_buffer_size'],\n rolling_average_persitency=kwargs['rolling_average_persitency']\n )\n env_sampler = Sampler(\n env=env,\n policy=policy,\n num_rollouts=kwargs['meta_batch_size'],\n max_path_length=kwargs['max_path_length'],\n n_parallel=kwargs['n_parallel'],\n )\n\n model_sampler = MBMPOSampler(\n env=env,\n policy=policy,\n rollouts_per_meta_task=kwargs['rollouts_per_meta_task'],\n meta_batch_size=kwargs['meta_batch_size'],\n max_path_length=kwargs['max_path_length'],\n dynamics_model=dynamics_model,\n deterministic=kwargs['deterministic'],\n )\n\n dynamics_sample_processor = ModelSampleProcessor(\n baseline=baseline,\n discount=kwargs['discount'],\n gae_lambda=kwargs['gae_lambda'],\n normalize_adv=kwargs['normalize_adv'],\n positive_adv=kwargs['positive_adv'],\n )\n\n model_sample_processor = MAMLSampleProcessor(\n baseline=baseline,\n discount=kwargs['discount'],\n gae_lambda=kwargs['gae_lambda'],\n normalize_adv=kwargs['normalize_adv'],\n positive_adv=kwargs['positive_adv'],\n )\n\n algo = TRPOMAML(\n policy=policy,\n step_size=kwargs['step_size'],\n inner_type=kwargs['inner_type'],\n inner_lr=kwargs['inner_lr'],\n meta_batch_size=kwargs['meta_batch_size'],\n num_inner_grad_steps=kwargs['num_inner_grad_steps'],\n exploration=kwargs['exploration'],\n )\n\n trainer = Trainer(\n algo=algo,\n policy=policy,\n env=env,\n model_sampler=model_sampler,\n env_sampler=env_sampler,\n model_sample_processor=model_sample_processor,\n dynamics_sample_processor=dynamics_sample_processor,\n dynamics_model=dynamics_model,\n n_itr=kwargs['n_itr'],\n num_rollouts_per_iter=kwargs['num_rollouts'],\n num_inner_grad_steps=kwargs['num_inner_grad_steps'],\n dynamics_model_max_epochs=kwargs['dynamics_max_epochs'],\n log_real_performance=kwargs['log_real_performance'],\n meta_steps_per_iter=kwargs['meta_steps_per_iter'],\n sample_from_buffer=kwargs['sample_from_buffer'],\n sess=sess,\n )\n\n trainer.train()\n\n\nif __name__ == '__main__':\n\n sweep_params = {\n 'seed': [1, 2],\n\n 'algo': ['mbmpo'],\n 'baseline': [LinearFeatureBaseline],\n 'env': [AntEnv, Walker2dEnv, HalfCheetahEnv, HopperEnv],\n\n # Problem Conf\n 'n_itr': [500],\n 'max_path_length': [200],\n 'discount': [0.99],\n 'gae_lambda': [1],\n 'normalize_adv': [True],\n 'positive_adv': [False],\n 'log_real_performance': [False],\n 'meta_steps_per_iter': [(50, 50)],\n\n # Real Env Sampling\n 'num_rollouts': [5, 10, 20],\n 'n_parallel': [1],\n 'fraction_meta_batch_size': [1.],\n\n # Dynamics Model\n 'num_models': [5],\n 'dynamics_hidden_sizes': [(512, 512, 512)],\n 'dyanmics_hidden_nonlinearity': ['relu'],\n 'dyanmics_output_nonlinearity': [None],\n 'dynamics_max_epochs': [200],\n 'dynamics_learning_rate': [1e-3],\n 'dynamics_batch_size': [256],\n 'dynamics_buffer_size': [25000],\n 'rolling_average_persitency': [0.9, 0.4, 0.1],\n 'deterministic': [False],\n\n # Policy\n 'policy_hidden_sizes': [(64, 64)],\n 'policy_learn_std': [True],\n 'policy_hidden_nonlinearity': [tf.tanh],\n 'policy_output_nonlinearity': [None],\n\n # Meta-Algo\n 'meta_batch_size': [20], # Note: It has to be multiple of num_models\n 'rollouts_per_meta_task': [50],\n 'num_inner_grad_steps': [1],\n 'inner_lr': [0.001],\n 'inner_type': ['log_likelihood'],\n 'step_size': [0.01],\n 'exploration': [False],\n 'sample_from_buffer': [True],\n\n 'scope': [None],\n 'exp_tag': ['mbmpo_all'], # For changes besides hyperparams\n }\n\n run_sweep(run_experiment, sweep_params, EXP_NAME, INSTANCE_TYPE)\n\n",
"import numpy as np\nfrom gym.envs.mujoco import mujoco_env\nfrom gym import utils\nimport os\nfrom scipy.spatial.distance import euclidean\nfrom meta_mb.meta_envs.base import RandomEnv\n# from mujoco-py.mujoco_py.pxd.mujoco import local\nimport mujoco_py\n\n\nclass PegArmBlueEnv(RandomEnv, utils.EzPickle):\n def __init__(self, goal_dist=0.1, log_rand=0, frame_skip=25):\n utils.EzPickle.__init__(**locals())\n\n xml_file = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'assets', 'blue_right_peg_v2.xml')\n\n self.peg_loc = np.zeros(3)\n self.reach_goal = np.zeros(3)\n self.peg_board = np.zeros(3)\n self.goal_dist = goal_dist # permissible distance from goal\n\n self.reached = False\n\n RandomEnv.__init__(self, log_rand, xml_file, frame_skip)\n\n\n def _get_obs(self):\n return np.concatenate([\n self.sim.data.qpos.flat,\n self.sim.data.qvel.flat,\n self.get_body_com(\"peg\"),\n self.peg_location() - self.peg_board\n ])\n\n def step(self, action):\n self.do_simulation(action, self.frame_skip)\n self.peg_loc = self.peg_location()\n\n if not self.reached:\n reward_dist = -1.5 * self.reach_dist()\n else:\n reward_dist = -self.peg_insertion()\n\n joint_velocities = self.sim.data.qvel\n reward_ctrl = -np.square(joint_velocities).sum()\n reward = reward_dist + 1.25e-3 * reward_ctrl\n\n observation = self._get_obs()\n done = False\n info = dict(reward_dist=reward_dist, reward_ctrl=reward_ctrl)\n return observation, reward, done, info\n\n def reset_model(self):\n qpos = self.init_qpos + self.np_random.uniform(low=-0.01, high=0.01, size=self.model.nq)\n qvel = self.init_qvel + self.np_random.uniform(low=-0.01, high=0.01, size=self.model.nv)\n\n self.peg_board = self.random_pos()\n self.sim.model.body_pos[-4] = self.peg_board\n self.sim.model.body_pos[-3] = self.g1()\n self.sim.model.body_pos[-2] = self.g2()\n self.sim.model.body_pos[-1] = self.g3()\n\n observation = self._get_obs()\n return observation\n\n def reward(self, obs, act, obs_next):\n assert obs.ndim == act.ndim == obs_next.ndim\n if obs.ndim == 2:\n assert obs.shape == obs_next.shape and act.shape[0] == obs.shape[0]\n joint_velocities = self.sim.data.qvel\n reward_ctrl = -np.sum(np.square(joint_velocities), axis=1)\n if not self.reached:\n reward_dist = -1.5 * self.reach_dist()\n else:\n reward_dist = -self.peg_insertion()\n reward = reward_dist + 1.25e-3 * reward_ctrl\n return np.clip(reward, -1e2, 1e2)\n elif obs.ndim == 1:\n assert obs.shape == obs_next.shape\n reward_ctrl = -np.sum(np.square(act))\n reward_dist = -self.peg_dist()\n reward = reward_dist + 1.25e-4 * reward_ctrl\n return np.clip(reward, -1e2, 1e2)\n else:\n raise NotImplementedError\n\n def random_pos(self):\n x = np.random.uniform(low=-0.3, high=0.3)\n y = np.random.uniform(low=-0.25, high=0.25)\n if abs(x) < 0.1:\n sign = x / abs(x)\n x += 0.2 * sign\n if abs(y) < 0.1:\n sign = y / abs(y)\n y += 0.2 * sign\n return np.array([x, y, 0.01])\n\n def peg_location(self):\n return self.get_body_com(\"peg\")\n\n def peg_orient(self):\n return self.data.get_body_xquat(\"peg\")\n\n def reach_dist(self):\n center = self.get_body_com(\"center\")\n g1 = self.get_body_com(\"g1\")\n\n dist = euclidean(center, g1)\n if dist < self.goal_dist:\n self.reached = True\n return dist\n\n def peg_insertion(self):\n top = self.get_body_com(\"top\")\n bottom = self.get_body_com(\"bottom\")\n g2 = self.get_body_com(\"g2\")\n g3 = self.get_body_com(\"g3\")\n\n return euclidean(top, g2) + euclidean(bottom, g3)\n\n def g1(self):\n x = 0.092\n y = 0\n z = 0.75\n return np.array([x, y, z])\n\n def g2(self):\n x = 0.092\n y = 0 \n z = 0.078\n return np.array([x, y, z])\n\n def g3(self):\n x = 0.092\n y = 0\n z = 0\n return np.array([x, y, z])\n\n def viewer_setup(self):\n self.viewer.cam.distance = self.model.stat.extent * 2\n self.viewer.cam.elevation = -20\n self.viewer.cam.type = 0\n self.viewer.cam.azimuth = 180\n\n\nif __name__ == \"__main__\":\n env = PegArmBlueEnv()\n while True:\n env.reset()\n for _ in range(500):\n action = env.action_space.sample()\n env.step(action)\n env.render()"
] | [
[
"numpy.square",
"numpy.clip",
"numpy.concatenate",
"numpy.copy",
"tensorflow.square"
],
[
"tensorflow.ConfigProto",
"numpy.prod",
"tensorflow.Session"
],
[
"tensorflow.ConfigProto",
"numpy.prod",
"tensorflow.Session"
],
[
"numpy.square",
"numpy.clip",
"scipy.spatial.distance.euclidean",
"numpy.random.uniform",
"numpy.array",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"1.12",
"2.6",
"2.2",
"1.13",
"2.3",
"2.4",
"1.4",
"2.9",
"1.5",
"1.7",
"2.5",
"0.12",
"1.0",
"2.8",
"1.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
}
] |
ai-xiaotong/face-benchmark | [
"4a4a336be6df72699120d6f0a4b7160904375cb0"
] | [
"run_evaluation.py"
] | [
"import os\nfrom itertools import islice\nimport numpy as np\nimport argparse\nimport matplotlib\nmatplotlib.use('agg')\nimport matplotlib.pyplot as plt\n\nfrom helper import roc, find_TAR_and_TH_by_FAR\n\n\ndef get_roc(path, start=1., stop=100., step=0.1):\n batch_size = 1000\n scores, gts = [], []\n with open(path, 'r') as f:\n while True:\n next_n_lines = list(islice(f, batch_size))\n if not next_n_lines:\n break\n for line in next_n_lines:\n tokens = line.strip().split(',')\n score = float(tokens[-1])\n pair = ','.join(tokens[:-1]).split(':')\n gt = pair[0].split('/')[-2] == pair[1].split('/')[-2]\n scores.append(score)\n gts.append(gt)\n\n scores = np.array(scores)\n gts = np.array(gts)\n FARs, TARs, THs = roc(gts, scores, start=start, stop=stop, step=step)\n return FARs, TARs, THs\n\n\ndef main(csv_list):\n platforms = [os.path.basename(csv.split('.')[0]) for csv in csv_list]\n for platform, csv_path in zip(platforms, csv_list):\n print(platform)\n FARs, TARs, THs = get_roc(csv_path, start=0., stop=100., step=0.1)\n far_2, tar_2, th_2 = find_TAR_and_TH_by_FAR(FARs, TARs, THs, target_FAR=1.e-2)\n print('FAR: {}, TAR: {}, TH: {}'.format(far_2, tar_2, th_2))\n far_3, tar_3, th_3 = find_TAR_and_TH_by_FAR(FARs, TARs, THs, target_FAR=1.e-3)\n print('FAR: {}, TAR: {}, TH: {}'.format(far_3, tar_3, th_3))\n far_4, tar_4, th_4 = find_TAR_and_TH_by_FAR(FARs, TARs, THs, target_FAR=1.e-4)\n print('FAR: {}, TAR: {}, TH: {}'.format(far_4, tar_4, th_4))\n # Plot ROC\n plt.plot(FARs, TARs)\n\n plt.title('Face verification ROC')\n plt.xscale('log')\n plt.xlabel('FAR')\n plt.ylabel('TAR')\n plt.legend([os.path.basename(csv.split('.')[0]) for csv in csv_list])\n plt.grid(True)\n plt.savefig('roc.png')\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('score', nargs='+', help='score csv file path list', type=str)\n args = parser.parse_args()\n\n main(args.score)\n"
] | [
[
"matplotlib.pyplot.title",
"matplotlib.use",
"matplotlib.pyplot.xscale",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.xlabel",
"numpy.array",
"matplotlib.pyplot.ylabel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
willBear/willBear-Fundamental_Analysis | [
"bc67eb1e69dcf6765c0b77314d37f7f165a7318f",
"bc67eb1e69dcf6765c0b77314d37f7f165a7318f",
"4071dde86e33434e1bee8304fa62074949f813cc",
"bc67eb1e69dcf6765c0b77314d37f7f165a7318f",
"bc67eb1e69dcf6765c0b77314d37f7f165a7318f",
"bc67eb1e69dcf6765c0b77314d37f7f165a7318f",
"bc67eb1e69dcf6765c0b77314d37f7f165a7318f",
"bc67eb1e69dcf6765c0b77314d37f7f165a7318f"
] | [
"venv/lib/python3.8/site-packages/pyqtgraph/graphicsItems/CurvePoint.py",
"venv/lib/python3.8/site-packages/mpl_toolkits/axes_grid1/axes_size.py",
"venv/lib/python3.8/site-packages/pandas/tests/tseries/offsets/test_yqm_offsets.py",
"venv/lib/python3.8/site-packages/pyqtgraph/examples/RemoteSpeedTest.py",
"venv/lib/python3.8/site-packages/pyqtgraph/examples/PlotSpeedTest.py",
"venv/lib/python3.8/site-packages/mpl_toolkits/axes_grid1/axes_divider.py",
"venv/lib/python3.8/site-packages/pyqtgraph/examples/GLMeshItem.py",
"venv/lib/python3.8/site-packages/matplotlib/tests/test_bbox_tight.py"
] | [
"from ..Qt import QtGui, QtCore\nfrom . import ArrowItem\nimport numpy as np\nfrom ..Point import Point\nimport weakref\nfrom .GraphicsObject import GraphicsObject\n\n__all__ = ['CurvePoint', 'CurveArrow']\nclass CurvePoint(GraphicsObject):\n \"\"\"A GraphicsItem that sets its location to a point on a PlotCurveItem.\n Also rotates to be tangent to the curve.\n The position along the curve is a Qt property, and thus can be easily animated.\n \n Note: This class does not display anything; see CurveArrow for an applied example\n \"\"\"\n \n def __init__(self, curve, index=0, pos=None, rotate=True):\n \"\"\"Position can be set either as an index referring to the sample number or\n the position 0.0 - 1.0\n If *rotate* is True, then the item rotates to match the tangent of the curve.\n \"\"\"\n \n GraphicsObject.__init__(self)\n #QObjectWorkaround.__init__(self)\n self._rotate = rotate\n self.curve = weakref.ref(curve)\n self.setParentItem(curve)\n self.setProperty('position', 0.0)\n self.setProperty('index', 0)\n \n if hasattr(self, 'ItemHasNoContents'):\n self.setFlags(self.flags() | self.ItemHasNoContents)\n \n if pos is not None:\n self.setPos(pos)\n else:\n self.setIndex(index)\n \n def setPos(self, pos):\n self.setProperty('position', float(pos))## cannot use numpy types here, MUST be python float.\n \n def setIndex(self, index):\n self.setProperty('index', int(index)) ## cannot use numpy types here, MUST be python int.\n \n def event(self, ev):\n if not isinstance(ev, QtCore.QDynamicPropertyChangeEvent) or self.curve() is None:\n return False\n \n if ev.propertyName() == 'index':\n index = self.property('index')\n if 'QVariant' in repr(index):\n index = index.toInt()[0]\n elif ev.propertyName() == 'position':\n index = None\n else:\n return False\n \n (x, y) = self.curve().getData()\n if index is None:\n #print ev.propertyName(), self.property('position').toDouble()[0], self.property('position').typeName()\n pos = self.property('position')\n if 'QVariant' in repr(pos): ## need to support 2 APIs :(\n pos = pos.toDouble()[0]\n index = (len(x)-1) * np.clip(pos, 0.0, 1.0)\n \n if index != int(index): ## interpolate floating-point values\n i1 = int(index)\n i2 = np.clip(i1+1, 0, len(x)-1)\n s2 = index-i1\n s1 = 1.0-s2\n newPos = (x[i1]*s1+x[i2]*s2, y[i1]*s1+y[i2]*s2)\n else:\n index = int(index)\n i1 = np.clip(index-1, 0, len(x)-1)\n i2 = np.clip(index+1, 0, len(x)-1)\n newPos = (x[index], y[index])\n \n p1 = self.parentItem().mapToScene(QtCore.QPointF(x[i1], y[i1]))\n p2 = self.parentItem().mapToScene(QtCore.QPointF(x[i2], y[i2]))\n ang = np.arctan2(p2.y()-p1.y(), p2.x()-p1.x()) ## returns radians\n self.resetTransform()\n if self._rotate:\n self.rotate(180+ ang * 180 / np.pi) ## takes degrees\n QtGui.QGraphicsItem.setPos(self, *newPos)\n return True\n \n def boundingRect(self):\n return QtCore.QRectF()\n \n def paint(self, *args):\n pass\n \n def makeAnimation(self, prop='position', start=0.0, end=1.0, duration=10000, loop=1):\n # In Python 3, a bytes object needs to be used as a property name in\n # QPropertyAnimation. PyQt stopped automatically encoding a str when a\n # QByteArray was expected in v5.5 (see qbytearray.sip).\n if not isinstance(prop, bytes):\n prop = prop.encode('latin-1')\n anim = QtCore.QPropertyAnimation(self, prop)\n anim.setDuration(duration)\n anim.setStartValue(start)\n anim.setEndValue(end)\n anim.setLoopCount(loop)\n return anim\n\n\nclass CurveArrow(CurvePoint):\n \"\"\"Provides an arrow that points to any specific sample on a PlotCurveItem.\n Provides properties that can be animated.\"\"\"\n \n def __init__(self, curve, index=0, pos=None, **opts):\n CurvePoint.__init__(self, curve, index=index, pos=pos)\n if opts.get('pxMode', True):\n opts['pxMode'] = False\n self.setFlags(self.flags() | self.ItemIgnoresTransformations)\n opts['angle'] = 0\n self.arrow = ArrowItem.ArrowItem(**opts)\n self.arrow.setParentItem(self)\n \n def setStyle(self, **opts):\n return self.arrow.setStyle(**opts)\n \n",
"\"\"\"\nProvides classes of simple units that will be used with AxesDivider\nclass (or others) to determine the size of each axes. The unit\nclasses define `get_size` method that returns a tuple of two floats,\nmeaning relative and absolute sizes, respectively.\n\nNote that this class is nothing more than a simple tuple of two\nfloats. Take a look at the Divider class to see how these two\nvalues are used.\n\"\"\"\n\nfrom numbers import Number\n\nfrom matplotlib import cbook\nfrom matplotlib.axes import Axes\n\n\nclass _Base:\n \"Base class\"\n\n def __rmul__(self, other):\n float(other) # just to check if number if given\n return Fraction(other, self)\n\n def __add__(self, other):\n if isinstance(other, _Base):\n return Add(self, other)\n else:\n float(other)\n other = Fixed(other)\n return Add(self, other)\n\n\nclass Add(_Base):\n def __init__(self, a, b):\n self._a = a\n self._b = b\n\n def get_size(self, renderer):\n a_rel_size, a_abs_size = self._a.get_size(renderer)\n b_rel_size, b_abs_size = self._b.get_size(renderer)\n return a_rel_size + b_rel_size, a_abs_size + b_abs_size\n\n\nclass AddList(_Base):\n def __init__(self, add_list):\n self._list = add_list\n\n def get_size(self, renderer):\n sum_rel_size = sum([a.get_size(renderer)[0] for a in self._list])\n sum_abs_size = sum([a.get_size(renderer)[1] for a in self._list])\n return sum_rel_size, sum_abs_size\n\n\nclass Fixed(_Base):\n \"\"\"\n Simple fixed size with absolute part = *fixed_size* and relative part = 0.\n \"\"\"\n def __init__(self, fixed_size):\n self.fixed_size = fixed_size\n\n def get_size(self, renderer):\n rel_size = 0.\n abs_size = self.fixed_size\n return rel_size, abs_size\n\n\nclass Scaled(_Base):\n \"\"\"\n Simple scaled(?) size with absolute part = 0 and\n relative part = *scalable_size*.\n \"\"\"\n\n def __init__(self, scalable_size):\n self._scalable_size = scalable_size\n\n def get_size(self, renderer):\n rel_size = self._scalable_size\n abs_size = 0.\n return rel_size, abs_size\n\nScalable = Scaled\n\n\ndef _get_axes_aspect(ax):\n aspect = ax.get_aspect()\n # when aspec is \"auto\", consider it as 1.\n if aspect in ('normal', 'auto'):\n aspect = 1.\n elif aspect == \"equal\":\n aspect = 1\n else:\n aspect = float(aspect)\n\n return aspect\n\n\nclass AxesX(_Base):\n \"\"\"\n Scaled size whose relative part corresponds to the data width\n of the *axes* multiplied by the *aspect*.\n \"\"\"\n def __init__(self, axes, aspect=1., ref_ax=None):\n self._axes = axes\n self._aspect = aspect\n if aspect == \"axes\" and ref_ax is None:\n raise ValueError(\"ref_ax must be set when aspect='axes'\")\n self._ref_ax = ref_ax\n\n def get_size(self, renderer):\n l1, l2 = self._axes.get_xlim()\n if self._aspect == \"axes\":\n ref_aspect = _get_axes_aspect(self._ref_ax)\n aspect = ref_aspect / _get_axes_aspect(self._axes)\n else:\n aspect = self._aspect\n\n rel_size = abs(l2-l1)*aspect\n abs_size = 0.\n return rel_size, abs_size\n\n\nclass AxesY(_Base):\n \"\"\"\n Scaled size whose relative part corresponds to the data height\n of the *axes* multiplied by the *aspect*.\n \"\"\"\n def __init__(self, axes, aspect=1., ref_ax=None):\n self._axes = axes\n self._aspect = aspect\n if aspect == \"axes\" and ref_ax is None:\n raise ValueError(\"ref_ax must be set when aspect='axes'\")\n self._ref_ax = ref_ax\n\n def get_size(self, renderer):\n l1, l2 = self._axes.get_ylim()\n\n if self._aspect == \"axes\":\n ref_aspect = _get_axes_aspect(self._ref_ax)\n aspect = _get_axes_aspect(self._axes)\n else:\n aspect = self._aspect\n\n rel_size = abs(l2-l1)*aspect\n abs_size = 0.\n return rel_size, abs_size\n\n\nclass MaxExtent(_Base):\n \"\"\"\n Size whose absolute part is the largest width (or height) of\n the given *artist_list*.\n \"\"\"\n def __init__(self, artist_list, w_or_h):\n self._artist_list = artist_list\n\n cbook._check_in_list([\"width\", \"height\"], w_or_h=w_or_h)\n self._w_or_h = w_or_h\n\n def add_artist(self, a):\n self._artist_list.append(a)\n\n def get_size(self, renderer):\n rel_size = 0.\n w_list, h_list = [], []\n for a in self._artist_list:\n bb = a.get_window_extent(renderer)\n w_list.append(bb.width)\n h_list.append(bb.height)\n dpi = a.get_figure().get_dpi()\n if self._w_or_h == \"width\":\n abs_size = max(w_list)/dpi\n elif self._w_or_h == \"height\":\n abs_size = max(h_list)/dpi\n\n return rel_size, abs_size\n\n\nclass MaxWidth(_Base):\n \"\"\"\n Size whose absolute part is the largest width of\n the given *artist_list*.\n \"\"\"\n def __init__(self, artist_list):\n self._artist_list = artist_list\n\n def add_artist(self, a):\n self._artist_list.append(a)\n\n def get_size(self, renderer):\n rel_size = 0.\n w_list = []\n for a in self._artist_list:\n bb = a.get_window_extent(renderer)\n w_list.append(bb.width)\n dpi = a.get_figure().get_dpi()\n abs_size = max(w_list)/dpi\n\n return rel_size, abs_size\n\n\nclass MaxHeight(_Base):\n \"\"\"\n Size whose absolute part is the largest height of\n the given *artist_list*.\n \"\"\"\n def __init__(self, artist_list):\n self._artist_list = artist_list\n\n def add_artist(self, a):\n self._artist_list.append(a)\n\n def get_size(self, renderer):\n rel_size = 0.\n h_list = []\n for a in self._artist_list:\n bb = a.get_window_extent(renderer)\n h_list.append(bb.height)\n dpi = a.get_figure().get_dpi()\n abs_size = max(h_list)/dpi\n\n return rel_size, abs_size\n\n\nclass Fraction(_Base):\n \"\"\"\n An instance whose size is a *fraction* of the *ref_size*.\n\n >>> s = Fraction(0.3, AxesX(ax))\n\n \"\"\"\n def __init__(self, fraction, ref_size):\n self._fraction_ref = ref_size\n self._fraction = fraction\n\n def get_size(self, renderer):\n if self._fraction_ref is None:\n return self._fraction, 0.\n else:\n r, a = self._fraction_ref.get_size(renderer)\n rel_size = r*self._fraction\n abs_size = a*self._fraction\n return rel_size, abs_size\n\n\nclass Padded(_Base):\n \"\"\"\n Return a instance where the absolute part of *size* is\n increase by the amount of *pad*.\n \"\"\"\n def __init__(self, size, pad):\n self._size = size\n self._pad = pad\n\n def get_size(self, renderer):\n r, a = self._size.get_size(renderer)\n rel_size = r\n abs_size = a + self._pad\n return rel_size, abs_size\n\n\ndef from_any(size, fraction_ref=None):\n \"\"\"\n Creates Fixed unit when the first argument is a float, or a\n Fraction unit if that is a string that ends with %. The second\n argument is only meaningful when Fraction unit is created.::\n\n >>> a = Size.from_any(1.2) # => Size.Fixed(1.2)\n >>> Size.from_any(\"50%\", a) # => Size.Fraction(0.5, a)\n\n \"\"\"\n if isinstance(size, Number):\n return Fixed(size)\n elif isinstance(size, str):\n if size[-1] == \"%\":\n return Fraction(float(size[:-1]) / 100, fraction_ref)\n\n raise ValueError(\"Unknown format\")\n\n\nclass SizeFromFunc(_Base):\n def __init__(self, func):\n self._func = func\n\n def get_size(self, renderer):\n rel_size = 0.\n\n bb = self._func(renderer)\n dpi = renderer.points_to_pixels(72.)\n abs_size = bb/dpi\n\n return rel_size, abs_size\n\n\nclass GetExtentHelper:\n _get_func_map = {\n \"left\": lambda self, axes_bbox: axes_bbox.xmin - self.xmin,\n \"right\": lambda self, axes_bbox: self.xmax - axes_bbox.xmax,\n \"bottom\": lambda self, axes_bbox: axes_bbox.ymin - self.ymin,\n \"top\": lambda self, axes_bbox: self.ymax - axes_bbox.ymax,\n }\n\n def __init__(self, ax, direction):\n cbook._check_in_list(self._get_func_map, direction=direction)\n self._ax_list = [ax] if isinstance(ax, Axes) else ax\n self._direction = direction\n\n def __call__(self, renderer):\n get_func = self._get_func_map[self._direction]\n vl = [get_func(ax.get_tightbbox(renderer, call_axes_locator=False),\n ax.bbox)\n for ax in self._ax_list]\n return max(vl)\n",
"\"\"\"\nTests for Year, Quarter, and Month-based DateOffset subclasses\n\"\"\"\nfrom datetime import datetime\n\nimport pytest\n\nimport pandas as pd\nfrom pandas import Timestamp\n\nfrom pandas.tseries.offsets import (\n BMonthBegin,\n BMonthEnd,\n BQuarterBegin,\n BQuarterEnd,\n BYearBegin,\n BYearEnd,\n MonthBegin,\n MonthEnd,\n QuarterBegin,\n QuarterEnd,\n YearBegin,\n YearEnd,\n)\n\nfrom .common import assert_is_on_offset, assert_offset_equal\nfrom .test_offsets import Base\n\n# --------------------------------------------------------------------\n# Misc\n\n\ndef test_quarterly_dont_normalize():\n date = datetime(2012, 3, 31, 5, 30)\n\n offsets = (QuarterBegin, QuarterEnd, BQuarterEnd, BQuarterBegin)\n\n for klass in offsets:\n result = date + klass()\n assert result.time() == date.time()\n\n\[email protected](\"n\", [-2, 1])\[email protected](\n \"cls\",\n [\n MonthBegin,\n MonthEnd,\n BMonthBegin,\n BMonthEnd,\n QuarterBegin,\n QuarterEnd,\n BQuarterBegin,\n BQuarterEnd,\n YearBegin,\n YearEnd,\n BYearBegin,\n BYearEnd,\n ],\n)\ndef test_apply_index(cls, n):\n offset = cls(n=n)\n rng = pd.date_range(start=\"1/1/2000\", periods=100000, freq=\"T\")\n ser = pd.Series(rng)\n\n res = rng + offset\n res_v2 = offset.apply_index(rng)\n assert (res == res_v2).all()\n assert res[0] == rng[0] + offset\n assert res[-1] == rng[-1] + offset\n res2 = ser + offset\n # apply_index is only for indexes, not series, so no res2_v2\n assert res2.iloc[0] == ser.iloc[0] + offset\n assert res2.iloc[-1] == ser.iloc[-1] + offset\n\n\[email protected](\n \"offset\", [QuarterBegin(), QuarterEnd(), BQuarterBegin(), BQuarterEnd()]\n)\ndef test_on_offset(offset):\n dates = [\n datetime(2016, m, d)\n for m in [10, 11, 12]\n for d in [1, 2, 3, 28, 29, 30, 31]\n if not (m == 11 and d == 31)\n ]\n for date in dates:\n res = offset.is_on_offset(date)\n slow_version = date == (date + offset) - offset\n assert res == slow_version\n\n\n# --------------------------------------------------------------------\n# Months\n\n\nclass TestMonthBegin(Base):\n _offset = MonthBegin\n\n offset_cases = []\n # NOTE: I'm not entirely happy with the logic here for Begin -ss\n # see thread 'offset conventions' on the ML\n offset_cases.append(\n (\n MonthBegin(),\n {\n datetime(2008, 1, 31): datetime(2008, 2, 1),\n datetime(2008, 2, 1): datetime(2008, 3, 1),\n datetime(2006, 12, 31): datetime(2007, 1, 1),\n datetime(2006, 12, 1): datetime(2007, 1, 1),\n datetime(2007, 1, 31): datetime(2007, 2, 1),\n },\n )\n )\n\n offset_cases.append(\n (\n MonthBegin(0),\n {\n datetime(2008, 1, 31): datetime(2008, 2, 1),\n datetime(2008, 1, 1): datetime(2008, 1, 1),\n datetime(2006, 12, 3): datetime(2007, 1, 1),\n datetime(2007, 1, 31): datetime(2007, 2, 1),\n },\n )\n )\n\n offset_cases.append(\n (\n MonthBegin(2),\n {\n datetime(2008, 2, 29): datetime(2008, 4, 1),\n datetime(2008, 1, 31): datetime(2008, 3, 1),\n datetime(2006, 12, 31): datetime(2007, 2, 1),\n datetime(2007, 12, 28): datetime(2008, 2, 1),\n datetime(2007, 1, 1): datetime(2007, 3, 1),\n datetime(2006, 11, 1): datetime(2007, 1, 1),\n },\n )\n )\n\n offset_cases.append(\n (\n MonthBegin(-1),\n {\n datetime(2007, 1, 1): datetime(2006, 12, 1),\n datetime(2008, 5, 31): datetime(2008, 5, 1),\n datetime(2008, 12, 31): datetime(2008, 12, 1),\n datetime(2006, 12, 29): datetime(2006, 12, 1),\n datetime(2006, 1, 2): datetime(2006, 1, 1),\n },\n )\n )\n\n @pytest.mark.parametrize(\"case\", offset_cases)\n def test_offset(self, case):\n offset, cases = case\n for base, expected in cases.items():\n assert_offset_equal(offset, base, expected)\n\n\nclass TestMonthEnd(Base):\n _offset = MonthEnd\n\n def test_day_of_month(self):\n dt = datetime(2007, 1, 1)\n offset = MonthEnd()\n\n result = dt + offset\n assert result == Timestamp(2007, 1, 31)\n\n result = result + offset\n assert result == Timestamp(2007, 2, 28)\n\n def test_normalize(self):\n dt = datetime(2007, 1, 1, 3)\n\n result = dt + MonthEnd(normalize=True)\n expected = dt.replace(hour=0) + MonthEnd()\n assert result == expected\n\n offset_cases = []\n offset_cases.append(\n (\n MonthEnd(),\n {\n datetime(2008, 1, 1): datetime(2008, 1, 31),\n datetime(2008, 1, 31): datetime(2008, 2, 29),\n datetime(2006, 12, 29): datetime(2006, 12, 31),\n datetime(2006, 12, 31): datetime(2007, 1, 31),\n datetime(2007, 1, 1): datetime(2007, 1, 31),\n datetime(2006, 12, 1): datetime(2006, 12, 31),\n },\n )\n )\n\n offset_cases.append(\n (\n MonthEnd(0),\n {\n datetime(2008, 1, 1): datetime(2008, 1, 31),\n datetime(2008, 1, 31): datetime(2008, 1, 31),\n datetime(2006, 12, 29): datetime(2006, 12, 31),\n datetime(2006, 12, 31): datetime(2006, 12, 31),\n datetime(2007, 1, 1): datetime(2007, 1, 31),\n },\n )\n )\n\n offset_cases.append(\n (\n MonthEnd(2),\n {\n datetime(2008, 1, 1): datetime(2008, 2, 29),\n datetime(2008, 1, 31): datetime(2008, 3, 31),\n datetime(2006, 12, 29): datetime(2007, 1, 31),\n datetime(2006, 12, 31): datetime(2007, 2, 28),\n datetime(2007, 1, 1): datetime(2007, 2, 28),\n datetime(2006, 11, 1): datetime(2006, 12, 31),\n },\n )\n )\n\n offset_cases.append(\n (\n MonthEnd(-1),\n {\n datetime(2007, 1, 1): datetime(2006, 12, 31),\n datetime(2008, 6, 30): datetime(2008, 5, 31),\n datetime(2008, 12, 31): datetime(2008, 11, 30),\n datetime(2006, 12, 29): datetime(2006, 11, 30),\n datetime(2006, 12, 30): datetime(2006, 11, 30),\n datetime(2007, 1, 1): datetime(2006, 12, 31),\n },\n )\n )\n\n @pytest.mark.parametrize(\"case\", offset_cases)\n def test_offset(self, case):\n offset, cases = case\n for base, expected in cases.items():\n assert_offset_equal(offset, base, expected)\n\n on_offset_cases = [\n (MonthEnd(), datetime(2007, 12, 31), True),\n (MonthEnd(), datetime(2008, 1, 1), False),\n ]\n\n @pytest.mark.parametrize(\"case\", on_offset_cases)\n def test_is_on_offset(self, case):\n offset, dt, expected = case\n assert_is_on_offset(offset, dt, expected)\n\n\nclass TestBMonthBegin(Base):\n _offset = BMonthBegin\n\n def test_offsets_compare_equal(self):\n # root cause of #456\n offset1 = BMonthBegin()\n offset2 = BMonthBegin()\n assert not offset1 != offset2\n\n offset_cases = []\n offset_cases.append(\n (\n BMonthBegin(),\n {\n datetime(2008, 1, 1): datetime(2008, 2, 1),\n datetime(2008, 1, 31): datetime(2008, 2, 1),\n datetime(2006, 12, 29): datetime(2007, 1, 1),\n datetime(2006, 12, 31): datetime(2007, 1, 1),\n datetime(2006, 9, 1): datetime(2006, 10, 2),\n datetime(2007, 1, 1): datetime(2007, 2, 1),\n datetime(2006, 12, 1): datetime(2007, 1, 1),\n },\n )\n )\n\n offset_cases.append(\n (\n BMonthBegin(0),\n {\n datetime(2008, 1, 1): datetime(2008, 1, 1),\n datetime(2006, 10, 2): datetime(2006, 10, 2),\n datetime(2008, 1, 31): datetime(2008, 2, 1),\n datetime(2006, 12, 29): datetime(2007, 1, 1),\n datetime(2006, 12, 31): datetime(2007, 1, 1),\n datetime(2006, 9, 15): datetime(2006, 10, 2),\n },\n )\n )\n\n offset_cases.append(\n (\n BMonthBegin(2),\n {\n datetime(2008, 1, 1): datetime(2008, 3, 3),\n datetime(2008, 1, 15): datetime(2008, 3, 3),\n datetime(2006, 12, 29): datetime(2007, 2, 1),\n datetime(2006, 12, 31): datetime(2007, 2, 1),\n datetime(2007, 1, 1): datetime(2007, 3, 1),\n datetime(2006, 11, 1): datetime(2007, 1, 1),\n },\n )\n )\n\n offset_cases.append(\n (\n BMonthBegin(-1),\n {\n datetime(2007, 1, 1): datetime(2006, 12, 1),\n datetime(2008, 6, 30): datetime(2008, 6, 2),\n datetime(2008, 6, 1): datetime(2008, 5, 1),\n datetime(2008, 3, 10): datetime(2008, 3, 3),\n datetime(2008, 12, 31): datetime(2008, 12, 1),\n datetime(2006, 12, 29): datetime(2006, 12, 1),\n datetime(2006, 12, 30): datetime(2006, 12, 1),\n datetime(2007, 1, 1): datetime(2006, 12, 1),\n },\n )\n )\n\n @pytest.mark.parametrize(\"case\", offset_cases)\n def test_offset(self, case):\n offset, cases = case\n for base, expected in cases.items():\n assert_offset_equal(offset, base, expected)\n\n on_offset_cases = [\n (BMonthBegin(), datetime(2007, 12, 31), False),\n (BMonthBegin(), datetime(2008, 1, 1), True),\n (BMonthBegin(), datetime(2001, 4, 2), True),\n (BMonthBegin(), datetime(2008, 3, 3), True),\n ]\n\n @pytest.mark.parametrize(\"case\", on_offset_cases)\n def test_is_on_offset(self, case):\n offset, dt, expected = case\n assert_is_on_offset(offset, dt, expected)\n\n\nclass TestBMonthEnd(Base):\n _offset = BMonthEnd\n\n def test_normalize(self):\n dt = datetime(2007, 1, 1, 3)\n\n result = dt + BMonthEnd(normalize=True)\n expected = dt.replace(hour=0) + BMonthEnd()\n assert result == expected\n\n def test_offsets_compare_equal(self):\n # root cause of #456\n offset1 = BMonthEnd()\n offset2 = BMonthEnd()\n assert not offset1 != offset2\n\n offset_cases = []\n offset_cases.append(\n (\n BMonthEnd(),\n {\n datetime(2008, 1, 1): datetime(2008, 1, 31),\n datetime(2008, 1, 31): datetime(2008, 2, 29),\n datetime(2006, 12, 29): datetime(2007, 1, 31),\n datetime(2006, 12, 31): datetime(2007, 1, 31),\n datetime(2007, 1, 1): datetime(2007, 1, 31),\n datetime(2006, 12, 1): datetime(2006, 12, 29),\n },\n )\n )\n\n offset_cases.append(\n (\n BMonthEnd(0),\n {\n datetime(2008, 1, 1): datetime(2008, 1, 31),\n datetime(2008, 1, 31): datetime(2008, 1, 31),\n datetime(2006, 12, 29): datetime(2006, 12, 29),\n datetime(2006, 12, 31): datetime(2007, 1, 31),\n datetime(2007, 1, 1): datetime(2007, 1, 31),\n },\n )\n )\n\n offset_cases.append(\n (\n BMonthEnd(2),\n {\n datetime(2008, 1, 1): datetime(2008, 2, 29),\n datetime(2008, 1, 31): datetime(2008, 3, 31),\n datetime(2006, 12, 29): datetime(2007, 2, 28),\n datetime(2006, 12, 31): datetime(2007, 2, 28),\n datetime(2007, 1, 1): datetime(2007, 2, 28),\n datetime(2006, 11, 1): datetime(2006, 12, 29),\n },\n )\n )\n\n offset_cases.append(\n (\n BMonthEnd(-1),\n {\n datetime(2007, 1, 1): datetime(2006, 12, 29),\n datetime(2008, 6, 30): datetime(2008, 5, 30),\n datetime(2008, 12, 31): datetime(2008, 11, 28),\n datetime(2006, 12, 29): datetime(2006, 11, 30),\n datetime(2006, 12, 30): datetime(2006, 12, 29),\n datetime(2007, 1, 1): datetime(2006, 12, 29),\n },\n )\n )\n\n @pytest.mark.parametrize(\"case\", offset_cases)\n def test_offset(self, case):\n offset, cases = case\n for base, expected in cases.items():\n assert_offset_equal(offset, base, expected)\n\n on_offset_cases = [\n (BMonthEnd(), datetime(2007, 12, 31), True),\n (BMonthEnd(), datetime(2008, 1, 1), False),\n ]\n\n @pytest.mark.parametrize(\"case\", on_offset_cases)\n def test_is_on_offset(self, case):\n offset, dt, expected = case\n assert_is_on_offset(offset, dt, expected)\n\n\n# --------------------------------------------------------------------\n# Quarters\n\n\nclass TestQuarterBegin(Base):\n def test_repr(self):\n expected = \"<QuarterBegin: startingMonth=3>\"\n assert repr(QuarterBegin()) == expected\n expected = \"<QuarterBegin: startingMonth=3>\"\n assert repr(QuarterBegin(startingMonth=3)) == expected\n expected = \"<QuarterBegin: startingMonth=1>\"\n assert repr(QuarterBegin(startingMonth=1)) == expected\n\n def test_is_anchored(self):\n assert QuarterBegin(startingMonth=1).is_anchored()\n assert QuarterBegin().is_anchored()\n assert not QuarterBegin(2, startingMonth=1).is_anchored()\n\n def test_offset_corner_case(self):\n # corner\n offset = QuarterBegin(n=-1, startingMonth=1)\n assert datetime(2010, 2, 1) + offset == datetime(2010, 1, 1)\n\n offset_cases = []\n offset_cases.append(\n (\n QuarterBegin(startingMonth=1),\n {\n datetime(2007, 12, 1): datetime(2008, 1, 1),\n datetime(2008, 1, 1): datetime(2008, 4, 1),\n datetime(2008, 2, 15): datetime(2008, 4, 1),\n datetime(2008, 2, 29): datetime(2008, 4, 1),\n datetime(2008, 3, 15): datetime(2008, 4, 1),\n datetime(2008, 3, 31): datetime(2008, 4, 1),\n datetime(2008, 4, 15): datetime(2008, 7, 1),\n datetime(2008, 4, 1): datetime(2008, 7, 1),\n },\n )\n )\n\n offset_cases.append(\n (\n QuarterBegin(startingMonth=2),\n {\n datetime(2008, 1, 1): datetime(2008, 2, 1),\n datetime(2008, 1, 31): datetime(2008, 2, 1),\n datetime(2008, 1, 15): datetime(2008, 2, 1),\n datetime(2008, 2, 29): datetime(2008, 5, 1),\n datetime(2008, 3, 15): datetime(2008, 5, 1),\n datetime(2008, 3, 31): datetime(2008, 5, 1),\n datetime(2008, 4, 15): datetime(2008, 5, 1),\n datetime(2008, 4, 30): datetime(2008, 5, 1),\n },\n )\n )\n\n offset_cases.append(\n (\n QuarterBegin(startingMonth=1, n=0),\n {\n datetime(2008, 1, 1): datetime(2008, 1, 1),\n datetime(2008, 12, 1): datetime(2009, 1, 1),\n datetime(2008, 1, 1): datetime(2008, 1, 1),\n datetime(2008, 2, 15): datetime(2008, 4, 1),\n datetime(2008, 2, 29): datetime(2008, 4, 1),\n datetime(2008, 3, 15): datetime(2008, 4, 1),\n datetime(2008, 3, 31): datetime(2008, 4, 1),\n datetime(2008, 4, 15): datetime(2008, 7, 1),\n datetime(2008, 4, 30): datetime(2008, 7, 1),\n },\n )\n )\n\n offset_cases.append(\n (\n QuarterBegin(startingMonth=1, n=-1),\n {\n datetime(2008, 1, 1): datetime(2007, 10, 1),\n datetime(2008, 1, 31): datetime(2008, 1, 1),\n datetime(2008, 2, 15): datetime(2008, 1, 1),\n datetime(2008, 2, 29): datetime(2008, 1, 1),\n datetime(2008, 3, 15): datetime(2008, 1, 1),\n datetime(2008, 3, 31): datetime(2008, 1, 1),\n datetime(2008, 4, 15): datetime(2008, 4, 1),\n datetime(2008, 4, 30): datetime(2008, 4, 1),\n datetime(2008, 7, 1): datetime(2008, 4, 1),\n },\n )\n )\n\n offset_cases.append(\n (\n QuarterBegin(startingMonth=1, n=2),\n {\n datetime(2008, 1, 1): datetime(2008, 7, 1),\n datetime(2008, 2, 15): datetime(2008, 7, 1),\n datetime(2008, 2, 29): datetime(2008, 7, 1),\n datetime(2008, 3, 15): datetime(2008, 7, 1),\n datetime(2008, 3, 31): datetime(2008, 7, 1),\n datetime(2008, 4, 15): datetime(2008, 10, 1),\n datetime(2008, 4, 1): datetime(2008, 10, 1),\n },\n )\n )\n\n @pytest.mark.parametrize(\"case\", offset_cases)\n def test_offset(self, case):\n offset, cases = case\n for base, expected in cases.items():\n assert_offset_equal(offset, base, expected)\n\n\nclass TestQuarterEnd(Base):\n _offset = QuarterEnd\n\n def test_repr(self):\n expected = \"<QuarterEnd: startingMonth=3>\"\n assert repr(QuarterEnd()) == expected\n expected = \"<QuarterEnd: startingMonth=3>\"\n assert repr(QuarterEnd(startingMonth=3)) == expected\n expected = \"<QuarterEnd: startingMonth=1>\"\n assert repr(QuarterEnd(startingMonth=1)) == expected\n\n def test_is_anchored(self):\n assert QuarterEnd(startingMonth=1).is_anchored()\n assert QuarterEnd().is_anchored()\n assert not QuarterEnd(2, startingMonth=1).is_anchored()\n\n def test_offset_corner_case(self):\n # corner\n offset = QuarterEnd(n=-1, startingMonth=1)\n assert datetime(2010, 2, 1) + offset == datetime(2010, 1, 31)\n\n offset_cases = []\n offset_cases.append(\n (\n QuarterEnd(startingMonth=1),\n {\n datetime(2008, 1, 1): datetime(2008, 1, 31),\n datetime(2008, 1, 31): datetime(2008, 4, 30),\n datetime(2008, 2, 15): datetime(2008, 4, 30),\n datetime(2008, 2, 29): datetime(2008, 4, 30),\n datetime(2008, 3, 15): datetime(2008, 4, 30),\n datetime(2008, 3, 31): datetime(2008, 4, 30),\n datetime(2008, 4, 15): datetime(2008, 4, 30),\n datetime(2008, 4, 30): datetime(2008, 7, 31),\n },\n )\n )\n\n offset_cases.append(\n (\n QuarterEnd(startingMonth=2),\n {\n datetime(2008, 1, 1): datetime(2008, 2, 29),\n datetime(2008, 1, 31): datetime(2008, 2, 29),\n datetime(2008, 2, 15): datetime(2008, 2, 29),\n datetime(2008, 2, 29): datetime(2008, 5, 31),\n datetime(2008, 3, 15): datetime(2008, 5, 31),\n datetime(2008, 3, 31): datetime(2008, 5, 31),\n datetime(2008, 4, 15): datetime(2008, 5, 31),\n datetime(2008, 4, 30): datetime(2008, 5, 31),\n },\n )\n )\n\n offset_cases.append(\n (\n QuarterEnd(startingMonth=1, n=0),\n {\n datetime(2008, 1, 1): datetime(2008, 1, 31),\n datetime(2008, 1, 31): datetime(2008, 1, 31),\n datetime(2008, 2, 15): datetime(2008, 4, 30),\n datetime(2008, 2, 29): datetime(2008, 4, 30),\n datetime(2008, 3, 15): datetime(2008, 4, 30),\n datetime(2008, 3, 31): datetime(2008, 4, 30),\n datetime(2008, 4, 15): datetime(2008, 4, 30),\n datetime(2008, 4, 30): datetime(2008, 4, 30),\n },\n )\n )\n\n offset_cases.append(\n (\n QuarterEnd(startingMonth=1, n=-1),\n {\n datetime(2008, 1, 1): datetime(2007, 10, 31),\n datetime(2008, 1, 31): datetime(2007, 10, 31),\n datetime(2008, 2, 15): datetime(2008, 1, 31),\n datetime(2008, 2, 29): datetime(2008, 1, 31),\n datetime(2008, 3, 15): datetime(2008, 1, 31),\n datetime(2008, 3, 31): datetime(2008, 1, 31),\n datetime(2008, 4, 15): datetime(2008, 1, 31),\n datetime(2008, 4, 30): datetime(2008, 1, 31),\n datetime(2008, 7, 1): datetime(2008, 4, 30),\n },\n )\n )\n\n offset_cases.append(\n (\n QuarterEnd(startingMonth=1, n=2),\n {\n datetime(2008, 1, 31): datetime(2008, 7, 31),\n datetime(2008, 2, 15): datetime(2008, 7, 31),\n datetime(2008, 2, 29): datetime(2008, 7, 31),\n datetime(2008, 3, 15): datetime(2008, 7, 31),\n datetime(2008, 3, 31): datetime(2008, 7, 31),\n datetime(2008, 4, 15): datetime(2008, 7, 31),\n datetime(2008, 4, 30): datetime(2008, 10, 31),\n },\n )\n )\n\n @pytest.mark.parametrize(\"case\", offset_cases)\n def test_offset(self, case):\n offset, cases = case\n for base, expected in cases.items():\n assert_offset_equal(offset, base, expected)\n\n on_offset_cases = [\n (QuarterEnd(1, startingMonth=1), datetime(2008, 1, 31), True),\n (QuarterEnd(1, startingMonth=1), datetime(2007, 12, 31), False),\n (QuarterEnd(1, startingMonth=1), datetime(2008, 2, 29), False),\n (QuarterEnd(1, startingMonth=1), datetime(2007, 3, 30), False),\n (QuarterEnd(1, startingMonth=1), datetime(2007, 3, 31), False),\n (QuarterEnd(1, startingMonth=1), datetime(2008, 4, 30), True),\n (QuarterEnd(1, startingMonth=1), datetime(2008, 5, 30), False),\n (QuarterEnd(1, startingMonth=1), datetime(2008, 5, 31), False),\n (QuarterEnd(1, startingMonth=1), datetime(2007, 6, 29), False),\n (QuarterEnd(1, startingMonth=1), datetime(2007, 6, 30), False),\n (QuarterEnd(1, startingMonth=2), datetime(2008, 1, 31), False),\n (QuarterEnd(1, startingMonth=2), datetime(2007, 12, 31), False),\n (QuarterEnd(1, startingMonth=2), datetime(2008, 2, 29), True),\n (QuarterEnd(1, startingMonth=2), datetime(2007, 3, 30), False),\n (QuarterEnd(1, startingMonth=2), datetime(2007, 3, 31), False),\n (QuarterEnd(1, startingMonth=2), datetime(2008, 4, 30), False),\n (QuarterEnd(1, startingMonth=2), datetime(2008, 5, 30), False),\n (QuarterEnd(1, startingMonth=2), datetime(2008, 5, 31), True),\n (QuarterEnd(1, startingMonth=2), datetime(2007, 6, 29), False),\n (QuarterEnd(1, startingMonth=2), datetime(2007, 6, 30), False),\n (QuarterEnd(1, startingMonth=3), datetime(2008, 1, 31), False),\n (QuarterEnd(1, startingMonth=3), datetime(2007, 12, 31), True),\n (QuarterEnd(1, startingMonth=3), datetime(2008, 2, 29), False),\n (QuarterEnd(1, startingMonth=3), datetime(2007, 3, 30), False),\n (QuarterEnd(1, startingMonth=3), datetime(2007, 3, 31), True),\n (QuarterEnd(1, startingMonth=3), datetime(2008, 4, 30), False),\n (QuarterEnd(1, startingMonth=3), datetime(2008, 5, 30), False),\n (QuarterEnd(1, startingMonth=3), datetime(2008, 5, 31), False),\n (QuarterEnd(1, startingMonth=3), datetime(2007, 6, 29), False),\n (QuarterEnd(1, startingMonth=3), datetime(2007, 6, 30), True),\n ]\n\n @pytest.mark.parametrize(\"case\", on_offset_cases)\n def test_is_on_offset(self, case):\n offset, dt, expected = case\n assert_is_on_offset(offset, dt, expected)\n\n\nclass TestBQuarterBegin(Base):\n _offset = BQuarterBegin\n\n def test_repr(self):\n expected = \"<BusinessQuarterBegin: startingMonth=3>\"\n assert repr(BQuarterBegin()) == expected\n expected = \"<BusinessQuarterBegin: startingMonth=3>\"\n assert repr(BQuarterBegin(startingMonth=3)) == expected\n expected = \"<BusinessQuarterBegin: startingMonth=1>\"\n assert repr(BQuarterBegin(startingMonth=1)) == expected\n\n def test_is_anchored(self):\n assert BQuarterBegin(startingMonth=1).is_anchored()\n assert BQuarterBegin().is_anchored()\n assert not BQuarterBegin(2, startingMonth=1).is_anchored()\n\n def test_offset_corner_case(self):\n # corner\n offset = BQuarterBegin(n=-1, startingMonth=1)\n assert datetime(2007, 4, 3) + offset == datetime(2007, 4, 2)\n\n offset_cases = []\n offset_cases.append(\n (\n BQuarterBegin(startingMonth=1),\n {\n datetime(2008, 1, 1): datetime(2008, 4, 1),\n datetime(2008, 1, 31): datetime(2008, 4, 1),\n datetime(2008, 2, 15): datetime(2008, 4, 1),\n datetime(2008, 2, 29): datetime(2008, 4, 1),\n datetime(2008, 3, 15): datetime(2008, 4, 1),\n datetime(2008, 3, 31): datetime(2008, 4, 1),\n datetime(2008, 4, 15): datetime(2008, 7, 1),\n datetime(2007, 3, 15): datetime(2007, 4, 2),\n datetime(2007, 2, 28): datetime(2007, 4, 2),\n datetime(2007, 1, 1): datetime(2007, 4, 2),\n datetime(2007, 4, 15): datetime(2007, 7, 2),\n datetime(2007, 7, 1): datetime(2007, 7, 2),\n datetime(2007, 4, 1): datetime(2007, 4, 2),\n datetime(2007, 4, 2): datetime(2007, 7, 2),\n datetime(2008, 4, 30): datetime(2008, 7, 1),\n },\n )\n )\n\n offset_cases.append(\n (\n BQuarterBegin(startingMonth=2),\n {\n datetime(2008, 1, 1): datetime(2008, 2, 1),\n datetime(2008, 1, 31): datetime(2008, 2, 1),\n datetime(2008, 1, 15): datetime(2008, 2, 1),\n datetime(2008, 2, 29): datetime(2008, 5, 1),\n datetime(2008, 3, 15): datetime(2008, 5, 1),\n datetime(2008, 3, 31): datetime(2008, 5, 1),\n datetime(2008, 4, 15): datetime(2008, 5, 1),\n datetime(2008, 8, 15): datetime(2008, 11, 3),\n datetime(2008, 9, 15): datetime(2008, 11, 3),\n datetime(2008, 11, 1): datetime(2008, 11, 3),\n datetime(2008, 4, 30): datetime(2008, 5, 1),\n },\n )\n )\n\n offset_cases.append(\n (\n BQuarterBegin(startingMonth=1, n=0),\n {\n datetime(2008, 1, 1): datetime(2008, 1, 1),\n datetime(2007, 12, 31): datetime(2008, 1, 1),\n datetime(2008, 2, 15): datetime(2008, 4, 1),\n datetime(2008, 2, 29): datetime(2008, 4, 1),\n datetime(2008, 1, 15): datetime(2008, 4, 1),\n datetime(2008, 2, 27): datetime(2008, 4, 1),\n datetime(2008, 3, 15): datetime(2008, 4, 1),\n datetime(2007, 4, 1): datetime(2007, 4, 2),\n datetime(2007, 4, 2): datetime(2007, 4, 2),\n datetime(2007, 7, 1): datetime(2007, 7, 2),\n datetime(2007, 4, 15): datetime(2007, 7, 2),\n datetime(2007, 7, 2): datetime(2007, 7, 2),\n },\n )\n )\n\n offset_cases.append(\n (\n BQuarterBegin(startingMonth=1, n=-1),\n {\n datetime(2008, 1, 1): datetime(2007, 10, 1),\n datetime(2008, 1, 31): datetime(2008, 1, 1),\n datetime(2008, 2, 15): datetime(2008, 1, 1),\n datetime(2008, 2, 29): datetime(2008, 1, 1),\n datetime(2008, 3, 15): datetime(2008, 1, 1),\n datetime(2008, 3, 31): datetime(2008, 1, 1),\n datetime(2008, 4, 15): datetime(2008, 4, 1),\n datetime(2007, 7, 3): datetime(2007, 7, 2),\n datetime(2007, 4, 3): datetime(2007, 4, 2),\n datetime(2007, 7, 2): datetime(2007, 4, 2),\n datetime(2008, 4, 1): datetime(2008, 1, 1),\n },\n )\n )\n\n offset_cases.append(\n (\n BQuarterBegin(startingMonth=1, n=2),\n {\n datetime(2008, 1, 1): datetime(2008, 7, 1),\n datetime(2008, 1, 15): datetime(2008, 7, 1),\n datetime(2008, 2, 29): datetime(2008, 7, 1),\n datetime(2008, 3, 15): datetime(2008, 7, 1),\n datetime(2007, 3, 31): datetime(2007, 7, 2),\n datetime(2007, 4, 15): datetime(2007, 10, 1),\n datetime(2008, 4, 30): datetime(2008, 10, 1),\n },\n )\n )\n\n @pytest.mark.parametrize(\"case\", offset_cases)\n def test_offset(self, case):\n offset, cases = case\n for base, expected in cases.items():\n assert_offset_equal(offset, base, expected)\n\n\nclass TestBQuarterEnd(Base):\n _offset = BQuarterEnd\n\n def test_repr(self):\n expected = \"<BusinessQuarterEnd: startingMonth=3>\"\n assert repr(BQuarterEnd()) == expected\n expected = \"<BusinessQuarterEnd: startingMonth=3>\"\n assert repr(BQuarterEnd(startingMonth=3)) == expected\n expected = \"<BusinessQuarterEnd: startingMonth=1>\"\n assert repr(BQuarterEnd(startingMonth=1)) == expected\n\n def test_is_anchored(self):\n assert BQuarterEnd(startingMonth=1).is_anchored()\n assert BQuarterEnd().is_anchored()\n assert not BQuarterEnd(2, startingMonth=1).is_anchored()\n\n def test_offset_corner_case(self):\n # corner\n offset = BQuarterEnd(n=-1, startingMonth=1)\n assert datetime(2010, 1, 31) + offset == datetime(2010, 1, 29)\n\n offset_cases = []\n offset_cases.append(\n (\n BQuarterEnd(startingMonth=1),\n {\n datetime(2008, 1, 1): datetime(2008, 1, 31),\n datetime(2008, 1, 31): datetime(2008, 4, 30),\n datetime(2008, 2, 15): datetime(2008, 4, 30),\n datetime(2008, 2, 29): datetime(2008, 4, 30),\n datetime(2008, 3, 15): datetime(2008, 4, 30),\n datetime(2008, 3, 31): datetime(2008, 4, 30),\n datetime(2008, 4, 15): datetime(2008, 4, 30),\n datetime(2008, 4, 30): datetime(2008, 7, 31),\n },\n )\n )\n\n offset_cases.append(\n (\n BQuarterEnd(startingMonth=2),\n {\n datetime(2008, 1, 1): datetime(2008, 2, 29),\n datetime(2008, 1, 31): datetime(2008, 2, 29),\n datetime(2008, 2, 15): datetime(2008, 2, 29),\n datetime(2008, 2, 29): datetime(2008, 5, 30),\n datetime(2008, 3, 15): datetime(2008, 5, 30),\n datetime(2008, 3, 31): datetime(2008, 5, 30),\n datetime(2008, 4, 15): datetime(2008, 5, 30),\n datetime(2008, 4, 30): datetime(2008, 5, 30),\n },\n )\n )\n\n offset_cases.append(\n (\n BQuarterEnd(startingMonth=1, n=0),\n {\n datetime(2008, 1, 1): datetime(2008, 1, 31),\n datetime(2008, 1, 31): datetime(2008, 1, 31),\n datetime(2008, 2, 15): datetime(2008, 4, 30),\n datetime(2008, 2, 29): datetime(2008, 4, 30),\n datetime(2008, 3, 15): datetime(2008, 4, 30),\n datetime(2008, 3, 31): datetime(2008, 4, 30),\n datetime(2008, 4, 15): datetime(2008, 4, 30),\n datetime(2008, 4, 30): datetime(2008, 4, 30),\n },\n )\n )\n\n offset_cases.append(\n (\n BQuarterEnd(startingMonth=1, n=-1),\n {\n datetime(2008, 1, 1): datetime(2007, 10, 31),\n datetime(2008, 1, 31): datetime(2007, 10, 31),\n datetime(2008, 2, 15): datetime(2008, 1, 31),\n datetime(2008, 2, 29): datetime(2008, 1, 31),\n datetime(2008, 3, 15): datetime(2008, 1, 31),\n datetime(2008, 3, 31): datetime(2008, 1, 31),\n datetime(2008, 4, 15): datetime(2008, 1, 31),\n datetime(2008, 4, 30): datetime(2008, 1, 31),\n },\n )\n )\n\n offset_cases.append(\n (\n BQuarterEnd(startingMonth=1, n=2),\n {\n datetime(2008, 1, 31): datetime(2008, 7, 31),\n datetime(2008, 2, 15): datetime(2008, 7, 31),\n datetime(2008, 2, 29): datetime(2008, 7, 31),\n datetime(2008, 3, 15): datetime(2008, 7, 31),\n datetime(2008, 3, 31): datetime(2008, 7, 31),\n datetime(2008, 4, 15): datetime(2008, 7, 31),\n datetime(2008, 4, 30): datetime(2008, 10, 31),\n },\n )\n )\n\n @pytest.mark.parametrize(\"case\", offset_cases)\n def test_offset(self, case):\n offset, cases = case\n for base, expected in cases.items():\n assert_offset_equal(offset, base, expected)\n\n on_offset_cases = [\n (BQuarterEnd(1, startingMonth=1), datetime(2008, 1, 31), True),\n (BQuarterEnd(1, startingMonth=1), datetime(2007, 12, 31), False),\n (BQuarterEnd(1, startingMonth=1), datetime(2008, 2, 29), False),\n (BQuarterEnd(1, startingMonth=1), datetime(2007, 3, 30), False),\n (BQuarterEnd(1, startingMonth=1), datetime(2007, 3, 31), False),\n (BQuarterEnd(1, startingMonth=1), datetime(2008, 4, 30), True),\n (BQuarterEnd(1, startingMonth=1), datetime(2008, 5, 30), False),\n (BQuarterEnd(1, startingMonth=1), datetime(2007, 6, 29), False),\n (BQuarterEnd(1, startingMonth=1), datetime(2007, 6, 30), False),\n (BQuarterEnd(1, startingMonth=2), datetime(2008, 1, 31), False),\n (BQuarterEnd(1, startingMonth=2), datetime(2007, 12, 31), False),\n (BQuarterEnd(1, startingMonth=2), datetime(2008, 2, 29), True),\n (BQuarterEnd(1, startingMonth=2), datetime(2007, 3, 30), False),\n (BQuarterEnd(1, startingMonth=2), datetime(2007, 3, 31), False),\n (BQuarterEnd(1, startingMonth=2), datetime(2008, 4, 30), False),\n (BQuarterEnd(1, startingMonth=2), datetime(2008, 5, 30), True),\n (BQuarterEnd(1, startingMonth=2), datetime(2007, 6, 29), False),\n (BQuarterEnd(1, startingMonth=2), datetime(2007, 6, 30), False),\n (BQuarterEnd(1, startingMonth=3), datetime(2008, 1, 31), False),\n (BQuarterEnd(1, startingMonth=3), datetime(2007, 12, 31), True),\n (BQuarterEnd(1, startingMonth=3), datetime(2008, 2, 29), False),\n (BQuarterEnd(1, startingMonth=3), datetime(2007, 3, 30), True),\n (BQuarterEnd(1, startingMonth=3), datetime(2007, 3, 31), False),\n (BQuarterEnd(1, startingMonth=3), datetime(2008, 4, 30), False),\n (BQuarterEnd(1, startingMonth=3), datetime(2008, 5, 30), False),\n (BQuarterEnd(1, startingMonth=3), datetime(2007, 6, 29), True),\n (BQuarterEnd(1, startingMonth=3), datetime(2007, 6, 30), False),\n ]\n\n @pytest.mark.parametrize(\"case\", on_offset_cases)\n def test_is_on_offset(self, case):\n offset, dt, expected = case\n assert_is_on_offset(offset, dt, expected)\n\n\n# --------------------------------------------------------------------\n# Years\n\n\nclass TestYearBegin(Base):\n _offset = YearBegin\n\n def test_misspecified(self):\n with pytest.raises(ValueError, match=\"Month must go from 1 to 12\"):\n YearBegin(month=13)\n\n offset_cases = []\n offset_cases.append(\n (\n YearBegin(),\n {\n datetime(2008, 1, 1): datetime(2009, 1, 1),\n datetime(2008, 6, 30): datetime(2009, 1, 1),\n datetime(2008, 12, 31): datetime(2009, 1, 1),\n datetime(2005, 12, 30): datetime(2006, 1, 1),\n datetime(2005, 12, 31): datetime(2006, 1, 1),\n },\n )\n )\n\n offset_cases.append(\n (\n YearBegin(0),\n {\n datetime(2008, 1, 1): datetime(2008, 1, 1),\n datetime(2008, 6, 30): datetime(2009, 1, 1),\n datetime(2008, 12, 31): datetime(2009, 1, 1),\n datetime(2005, 12, 30): datetime(2006, 1, 1),\n datetime(2005, 12, 31): datetime(2006, 1, 1),\n },\n )\n )\n\n offset_cases.append(\n (\n YearBegin(3),\n {\n datetime(2008, 1, 1): datetime(2011, 1, 1),\n datetime(2008, 6, 30): datetime(2011, 1, 1),\n datetime(2008, 12, 31): datetime(2011, 1, 1),\n datetime(2005, 12, 30): datetime(2008, 1, 1),\n datetime(2005, 12, 31): datetime(2008, 1, 1),\n },\n )\n )\n\n offset_cases.append(\n (\n YearBegin(-1),\n {\n datetime(2007, 1, 1): datetime(2006, 1, 1),\n datetime(2007, 1, 15): datetime(2007, 1, 1),\n datetime(2008, 6, 30): datetime(2008, 1, 1),\n datetime(2008, 12, 31): datetime(2008, 1, 1),\n datetime(2006, 12, 29): datetime(2006, 1, 1),\n datetime(2006, 12, 30): datetime(2006, 1, 1),\n datetime(2007, 1, 1): datetime(2006, 1, 1),\n },\n )\n )\n\n offset_cases.append(\n (\n YearBegin(-2),\n {\n datetime(2007, 1, 1): datetime(2005, 1, 1),\n datetime(2008, 6, 30): datetime(2007, 1, 1),\n datetime(2008, 12, 31): datetime(2007, 1, 1),\n },\n )\n )\n\n offset_cases.append(\n (\n YearBegin(month=4),\n {\n datetime(2007, 4, 1): datetime(2008, 4, 1),\n datetime(2007, 4, 15): datetime(2008, 4, 1),\n datetime(2007, 3, 1): datetime(2007, 4, 1),\n datetime(2007, 12, 15): datetime(2008, 4, 1),\n datetime(2012, 1, 31): datetime(2012, 4, 1),\n },\n )\n )\n\n offset_cases.append(\n (\n YearBegin(0, month=4),\n {\n datetime(2007, 4, 1): datetime(2007, 4, 1),\n datetime(2007, 3, 1): datetime(2007, 4, 1),\n datetime(2007, 12, 15): datetime(2008, 4, 1),\n datetime(2012, 1, 31): datetime(2012, 4, 1),\n },\n )\n )\n\n offset_cases.append(\n (\n YearBegin(4, month=4),\n {\n datetime(2007, 4, 1): datetime(2011, 4, 1),\n datetime(2007, 4, 15): datetime(2011, 4, 1),\n datetime(2007, 3, 1): datetime(2010, 4, 1),\n datetime(2007, 12, 15): datetime(2011, 4, 1),\n datetime(2012, 1, 31): datetime(2015, 4, 1),\n },\n )\n )\n\n offset_cases.append(\n (\n YearBegin(-1, month=4),\n {\n datetime(2007, 4, 1): datetime(2006, 4, 1),\n datetime(2007, 3, 1): datetime(2006, 4, 1),\n datetime(2007, 12, 15): datetime(2007, 4, 1),\n datetime(2012, 1, 31): datetime(2011, 4, 1),\n },\n )\n )\n\n offset_cases.append(\n (\n YearBegin(-3, month=4),\n {\n datetime(2007, 4, 1): datetime(2004, 4, 1),\n datetime(2007, 3, 1): datetime(2004, 4, 1),\n datetime(2007, 12, 15): datetime(2005, 4, 1),\n datetime(2012, 1, 31): datetime(2009, 4, 1),\n },\n )\n )\n\n @pytest.mark.parametrize(\"case\", offset_cases)\n def test_offset(self, case):\n offset, cases = case\n for base, expected in cases.items():\n assert_offset_equal(offset, base, expected)\n\n on_offset_cases = [\n (YearBegin(), datetime(2007, 1, 3), False),\n (YearBegin(), datetime(2008, 1, 1), True),\n (YearBegin(), datetime(2006, 12, 31), False),\n (YearBegin(), datetime(2006, 1, 2), False),\n ]\n\n @pytest.mark.parametrize(\"case\", on_offset_cases)\n def test_is_on_offset(self, case):\n offset, dt, expected = case\n assert_is_on_offset(offset, dt, expected)\n\n\nclass TestYearEnd(Base):\n _offset = YearEnd\n\n def test_misspecified(self):\n with pytest.raises(ValueError, match=\"Month must go from 1 to 12\"):\n YearEnd(month=13)\n\n offset_cases = []\n offset_cases.append(\n (\n YearEnd(),\n {\n datetime(2008, 1, 1): datetime(2008, 12, 31),\n datetime(2008, 6, 30): datetime(2008, 12, 31),\n datetime(2008, 12, 31): datetime(2009, 12, 31),\n datetime(2005, 12, 30): datetime(2005, 12, 31),\n datetime(2005, 12, 31): datetime(2006, 12, 31),\n },\n )\n )\n\n offset_cases.append(\n (\n YearEnd(0),\n {\n datetime(2008, 1, 1): datetime(2008, 12, 31),\n datetime(2008, 6, 30): datetime(2008, 12, 31),\n datetime(2008, 12, 31): datetime(2008, 12, 31),\n datetime(2005, 12, 30): datetime(2005, 12, 31),\n },\n )\n )\n\n offset_cases.append(\n (\n YearEnd(-1),\n {\n datetime(2007, 1, 1): datetime(2006, 12, 31),\n datetime(2008, 6, 30): datetime(2007, 12, 31),\n datetime(2008, 12, 31): datetime(2007, 12, 31),\n datetime(2006, 12, 29): datetime(2005, 12, 31),\n datetime(2006, 12, 30): datetime(2005, 12, 31),\n datetime(2007, 1, 1): datetime(2006, 12, 31),\n },\n )\n )\n\n offset_cases.append(\n (\n YearEnd(-2),\n {\n datetime(2007, 1, 1): datetime(2005, 12, 31),\n datetime(2008, 6, 30): datetime(2006, 12, 31),\n datetime(2008, 12, 31): datetime(2006, 12, 31),\n },\n )\n )\n\n @pytest.mark.parametrize(\"case\", offset_cases)\n def test_offset(self, case):\n offset, cases = case\n for base, expected in cases.items():\n assert_offset_equal(offset, base, expected)\n\n on_offset_cases = [\n (YearEnd(), datetime(2007, 12, 31), True),\n (YearEnd(), datetime(2008, 1, 1), False),\n (YearEnd(), datetime(2006, 12, 31), True),\n (YearEnd(), datetime(2006, 12, 29), False),\n ]\n\n @pytest.mark.parametrize(\"case\", on_offset_cases)\n def test_is_on_offset(self, case):\n offset, dt, expected = case\n assert_is_on_offset(offset, dt, expected)\n\n\nclass TestYearEndDiffMonth(Base):\n offset_cases = []\n offset_cases.append(\n (\n YearEnd(month=3),\n {\n datetime(2008, 1, 1): datetime(2008, 3, 31),\n datetime(2008, 2, 15): datetime(2008, 3, 31),\n datetime(2008, 3, 31): datetime(2009, 3, 31),\n datetime(2008, 3, 30): datetime(2008, 3, 31),\n datetime(2005, 3, 31): datetime(2006, 3, 31),\n datetime(2006, 7, 30): datetime(2007, 3, 31),\n },\n )\n )\n\n offset_cases.append(\n (\n YearEnd(0, month=3),\n {\n datetime(2008, 1, 1): datetime(2008, 3, 31),\n datetime(2008, 2, 28): datetime(2008, 3, 31),\n datetime(2008, 3, 31): datetime(2008, 3, 31),\n datetime(2005, 3, 30): datetime(2005, 3, 31),\n },\n )\n )\n\n offset_cases.append(\n (\n YearEnd(-1, month=3),\n {\n datetime(2007, 1, 1): datetime(2006, 3, 31),\n datetime(2008, 2, 28): datetime(2007, 3, 31),\n datetime(2008, 3, 31): datetime(2007, 3, 31),\n datetime(2006, 3, 29): datetime(2005, 3, 31),\n datetime(2006, 3, 30): datetime(2005, 3, 31),\n datetime(2007, 3, 1): datetime(2006, 3, 31),\n },\n )\n )\n\n offset_cases.append(\n (\n YearEnd(-2, month=3),\n {\n datetime(2007, 1, 1): datetime(2005, 3, 31),\n datetime(2008, 6, 30): datetime(2007, 3, 31),\n datetime(2008, 3, 31): datetime(2006, 3, 31),\n },\n )\n )\n\n @pytest.mark.parametrize(\"case\", offset_cases)\n def test_offset(self, case):\n offset, cases = case\n for base, expected in cases.items():\n assert_offset_equal(offset, base, expected)\n\n on_offset_cases = [\n (YearEnd(month=3), datetime(2007, 3, 31), True),\n (YearEnd(month=3), datetime(2008, 1, 1), False),\n (YearEnd(month=3), datetime(2006, 3, 31), True),\n (YearEnd(month=3), datetime(2006, 3, 29), False),\n ]\n\n @pytest.mark.parametrize(\"case\", on_offset_cases)\n def test_is_on_offset(self, case):\n offset, dt, expected = case\n assert_is_on_offset(offset, dt, expected)\n\n\nclass TestBYearBegin(Base):\n _offset = BYearBegin\n\n def test_misspecified(self):\n msg = \"Month must go from 1 to 12\"\n with pytest.raises(ValueError, match=msg):\n BYearBegin(month=13)\n with pytest.raises(ValueError, match=msg):\n BYearEnd(month=13)\n\n offset_cases = []\n offset_cases.append(\n (\n BYearBegin(),\n {\n datetime(2008, 1, 1): datetime(2009, 1, 1),\n datetime(2008, 6, 30): datetime(2009, 1, 1),\n datetime(2008, 12, 31): datetime(2009, 1, 1),\n datetime(2011, 1, 1): datetime(2011, 1, 3),\n datetime(2011, 1, 3): datetime(2012, 1, 2),\n datetime(2005, 12, 30): datetime(2006, 1, 2),\n datetime(2005, 12, 31): datetime(2006, 1, 2),\n },\n )\n )\n\n offset_cases.append(\n (\n BYearBegin(0),\n {\n datetime(2008, 1, 1): datetime(2008, 1, 1),\n datetime(2008, 6, 30): datetime(2009, 1, 1),\n datetime(2008, 12, 31): datetime(2009, 1, 1),\n datetime(2005, 12, 30): datetime(2006, 1, 2),\n datetime(2005, 12, 31): datetime(2006, 1, 2),\n },\n )\n )\n\n offset_cases.append(\n (\n BYearBegin(-1),\n {\n datetime(2007, 1, 1): datetime(2006, 1, 2),\n datetime(2009, 1, 4): datetime(2009, 1, 1),\n datetime(2009, 1, 1): datetime(2008, 1, 1),\n datetime(2008, 6, 30): datetime(2008, 1, 1),\n datetime(2008, 12, 31): datetime(2008, 1, 1),\n datetime(2006, 12, 29): datetime(2006, 1, 2),\n datetime(2006, 12, 30): datetime(2006, 1, 2),\n datetime(2006, 1, 1): datetime(2005, 1, 3),\n },\n )\n )\n\n offset_cases.append(\n (\n BYearBegin(-2),\n {\n datetime(2007, 1, 1): datetime(2005, 1, 3),\n datetime(2007, 6, 30): datetime(2006, 1, 2),\n datetime(2008, 12, 31): datetime(2007, 1, 1),\n },\n )\n )\n\n @pytest.mark.parametrize(\"case\", offset_cases)\n def test_offset(self, case):\n offset, cases = case\n for base, expected in cases.items():\n assert_offset_equal(offset, base, expected)\n\n\nclass TestBYearEnd(Base):\n _offset = BYearEnd\n\n offset_cases = []\n offset_cases.append(\n (\n BYearEnd(),\n {\n datetime(2008, 1, 1): datetime(2008, 12, 31),\n datetime(2008, 6, 30): datetime(2008, 12, 31),\n datetime(2008, 12, 31): datetime(2009, 12, 31),\n datetime(2005, 12, 30): datetime(2006, 12, 29),\n datetime(2005, 12, 31): datetime(2006, 12, 29),\n },\n )\n )\n\n offset_cases.append(\n (\n BYearEnd(0),\n {\n datetime(2008, 1, 1): datetime(2008, 12, 31),\n datetime(2008, 6, 30): datetime(2008, 12, 31),\n datetime(2008, 12, 31): datetime(2008, 12, 31),\n datetime(2005, 12, 31): datetime(2006, 12, 29),\n },\n )\n )\n\n offset_cases.append(\n (\n BYearEnd(-1),\n {\n datetime(2007, 1, 1): datetime(2006, 12, 29),\n datetime(2008, 6, 30): datetime(2007, 12, 31),\n datetime(2008, 12, 31): datetime(2007, 12, 31),\n datetime(2006, 12, 29): datetime(2005, 12, 30),\n datetime(2006, 12, 30): datetime(2006, 12, 29),\n datetime(2007, 1, 1): datetime(2006, 12, 29),\n },\n )\n )\n\n offset_cases.append(\n (\n BYearEnd(-2),\n {\n datetime(2007, 1, 1): datetime(2005, 12, 30),\n datetime(2008, 6, 30): datetime(2006, 12, 29),\n datetime(2008, 12, 31): datetime(2006, 12, 29),\n },\n )\n )\n\n @pytest.mark.parametrize(\"case\", offset_cases)\n def test_offset(self, case):\n offset, cases = case\n for base, expected in cases.items():\n assert_offset_equal(offset, base, expected)\n\n on_offset_cases = [\n (BYearEnd(), datetime(2007, 12, 31), True),\n (BYearEnd(), datetime(2008, 1, 1), False),\n (BYearEnd(), datetime(2006, 12, 31), False),\n (BYearEnd(), datetime(2006, 12, 29), True),\n ]\n\n @pytest.mark.parametrize(\"case\", on_offset_cases)\n def test_is_on_offset(self, case):\n offset, dt, expected = case\n assert_is_on_offset(offset, dt, expected)\n\n\nclass TestBYearEndLagged(Base):\n _offset = BYearEnd\n\n def test_bad_month_fail(self):\n msg = \"Month must go from 1 to 12\"\n with pytest.raises(ValueError, match=msg):\n BYearEnd(month=13)\n with pytest.raises(ValueError, match=msg):\n BYearEnd(month=0)\n\n offset_cases = []\n offset_cases.append(\n (\n BYearEnd(month=6),\n {\n datetime(2008, 1, 1): datetime(2008, 6, 30),\n datetime(2007, 6, 30): datetime(2008, 6, 30),\n },\n )\n )\n\n offset_cases.append(\n (\n BYearEnd(n=-1, month=6),\n {\n datetime(2008, 1, 1): datetime(2007, 6, 29),\n datetime(2007, 6, 30): datetime(2007, 6, 29),\n },\n )\n )\n\n @pytest.mark.parametrize(\"case\", offset_cases)\n def test_offset(self, case):\n offset, cases = case\n for base, expected in cases.items():\n assert_offset_equal(offset, base, expected)\n\n def test_roll(self):\n offset = BYearEnd(month=6)\n date = datetime(2009, 11, 30)\n\n assert offset.rollforward(date) == datetime(2010, 6, 30)\n assert offset.rollback(date) == datetime(2009, 6, 30)\n\n on_offset_cases = [\n (BYearEnd(month=2), datetime(2007, 2, 28), True),\n (BYearEnd(month=6), datetime(2007, 6, 30), False),\n ]\n\n @pytest.mark.parametrize(\"case\", on_offset_cases)\n def test_is_on_offset(self, case):\n offset, dt, expected = case\n assert_is_on_offset(offset, dt, expected)\n",
"# -*- coding: utf-8 -*-\n\"\"\"\nThis example demonstrates the use of RemoteGraphicsView to improve performance in\napplications with heavy load. It works by starting a second process to handle \nall graphics rendering, thus freeing up the main process to do its work.\n\nIn this example, the update() function is very expensive and is called frequently.\nAfter update() generates a new set of data, it can either plot directly to a local\nplot (bottom) or remotely via a RemoteGraphicsView (top), allowing speed comparison\nbetween the two cases. IF you have a multi-core CPU, it should be obvious that the \nremote case is much faster.\n\"\"\"\n\nimport initExample ## Add path to library (just for examples; you do not need this)\nfrom pyqtgraph.Qt import QtGui, QtCore\nimport pyqtgraph as pg\nimport pyqtgraph.widgets.RemoteGraphicsView\nimport numpy as np\n\napp = pg.mkQApp()\n\nview = pg.widgets.RemoteGraphicsView.RemoteGraphicsView()\npg.setConfigOptions(antialias=True) ## this will be expensive for the local plot\nview.pg.setConfigOptions(antialias=True) ## prettier plots at no cost to the main process! \nview.setWindowTitle('pyqtgraph example: RemoteSpeedTest')\n\nlabel = QtGui.QLabel()\nrcheck = QtGui.QCheckBox('plot remote')\nrcheck.setChecked(True)\nlcheck = QtGui.QCheckBox('plot local')\nlplt = pg.PlotWidget()\nlayout = pg.LayoutWidget()\nlayout.addWidget(rcheck)\nlayout.addWidget(lcheck)\nlayout.addWidget(label)\nlayout.addWidget(view, row=1, col=0, colspan=3)\nlayout.addWidget(lplt, row=2, col=0, colspan=3)\nlayout.resize(800,800)\nlayout.show()\n\n## Create a PlotItem in the remote process that will be displayed locally\nrplt = view.pg.PlotItem()\nrplt._setProxyOptions(deferGetattr=True) ## speeds up access to rplt.plot\nview.setCentralItem(rplt)\n\nlastUpdate = pg.ptime.time()\navgFps = 0.0\n\ndef update():\n global check, label, plt, lastUpdate, avgFps, rpltfunc\n data = np.random.normal(size=(10000,50)).sum(axis=1)\n data += 5 * np.sin(np.linspace(0, 10, data.shape[0]))\n \n if rcheck.isChecked():\n rplt.plot(data, clear=True, _callSync='off') ## We do not expect a return value.\n ## By turning off callSync, we tell\n ## the proxy that it does not need to \n ## wait for a reply from the remote\n ## process.\n if lcheck.isChecked():\n lplt.plot(data, clear=True)\n \n now = pg.ptime.time()\n fps = 1.0 / (now - lastUpdate)\n lastUpdate = now\n avgFps = avgFps * 0.8 + fps * 0.2\n label.setText(\"Generating %0.2f fps\" % avgFps)\n \ntimer = QtCore.QTimer()\ntimer.timeout.connect(update)\ntimer.start(0)\n\n\n\n## Start Qt event loop unless running in interactive mode or using pyside.\nif __name__ == '__main__':\n import sys\n if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):\n QtGui.QApplication.instance().exec_()\n",
"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\"\"\"\nUpdate a simple plot as rapidly as possible to measure speed.\n\"\"\"\n\n## Add path to library (just for examples; you do not need this)\nimport initExample\n\n\nfrom pyqtgraph.Qt import QtGui, QtCore\nimport numpy as np\nimport pyqtgraph as pg\nfrom pyqtgraph.ptime import time\napp = QtGui.QApplication([])\n\np = pg.plot()\np.setWindowTitle('pyqtgraph example: PlotSpeedTest')\np.setRange(QtCore.QRectF(0, -10, 5000, 20)) \np.setLabel('bottom', 'Index', units='B')\ncurve = p.plot()\n\n#curve.setFillBrush((0, 0, 100, 100))\n#curve.setFillLevel(0)\n\n#lr = pg.LinearRegionItem([100, 4900])\n#p.addItem(lr)\n\ndata = np.random.normal(size=(50,5000))\nptr = 0\nlastTime = time()\nfps = None\ndef update():\n global curve, data, ptr, p, lastTime, fps\n curve.setData(data[ptr%10])\n ptr += 1\n now = time()\n dt = now - lastTime\n lastTime = now\n if fps is None:\n fps = 1.0/dt\n else:\n s = np.clip(dt*3., 0, 1)\n fps = fps * (1-s) + (1.0/dt) * s\n p.setTitle('%0.2f fps' % fps)\n app.processEvents() ## force complete redraw for every plot\ntimer = QtCore.QTimer()\ntimer.timeout.connect(update)\ntimer.start(0)\n \n\n\n## Start Qt event loop unless running in interactive mode.\nif __name__ == '__main__':\n import sys\n if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):\n QtGui.QApplication.instance().exec_()\n",
"\"\"\"\nThe axes_divider module provides helper classes to adjust the positions of\nmultiple axes at drawing time.\n\n Divider: this is the class that is used to calculate the axes\n position. It divides the given rectangular area into several sub\n rectangles. You initialize the divider by setting the horizontal\n and vertical lists of sizes that the division will be based on. You\n then use the new_locator method, whose return value is a callable\n object that can be used to set the axes_locator of the axes.\n\"\"\"\n\nfrom matplotlib import cbook\nfrom matplotlib.axes import SubplotBase\nfrom matplotlib.gridspec import SubplotSpec, GridSpec\nimport matplotlib.transforms as mtransforms\nfrom . import axes_size as Size\n\n\nclass Divider:\n \"\"\"\n This class calculates the axes position. It\n divides the given rectangular area into several\n sub-rectangles. You initialize the divider by setting the\n horizontal and vertical lists of sizes\n (:mod:`mpl_toolkits.axes_grid.axes_size`) that the division will\n be based on. You then use the new_locator method to create a\n callable object that can be used as the axes_locator of the\n axes.\n \"\"\"\n\n def __init__(self, fig, pos, horizontal, vertical,\n aspect=None, anchor=\"C\"):\n \"\"\"\n Parameters\n ----------\n fig : Figure\n pos : tuple of 4 floats\n position of the rectangle that will be divided\n horizontal : list of :mod:`~mpl_toolkits.axes_grid.axes_size`\n sizes for horizontal division\n vertical : list of :mod:`~mpl_toolkits.axes_grid.axes_size`\n sizes for vertical division\n aspect : bool\n if True, the overall rectangular area is reduced\n so that the relative part of the horizontal and\n vertical scales have the same scale.\n anchor : {'C', 'SW', 'S', 'SE', 'E', 'NE', 'N', 'NW', 'W'}\n placement of the reduced rectangle when *aspect* is True\n \"\"\"\n\n self._fig = fig\n self._pos = pos\n self._horizontal = horizontal\n self._vertical = vertical\n self._anchor = anchor\n self._aspect = aspect\n self._xrefindex = 0\n self._yrefindex = 0\n self._locator = None\n\n def get_horizontal_sizes(self, renderer):\n return [s.get_size(renderer) for s in self.get_horizontal()]\n\n def get_vertical_sizes(self, renderer):\n return [s.get_size(renderer) for s in self.get_vertical()]\n\n def get_vsize_hsize(self):\n\n from .axes_size import AddList\n\n vsize = AddList(self.get_vertical())\n hsize = AddList(self.get_horizontal())\n\n return vsize, hsize\n\n @staticmethod\n def _calc_k(l, total_size):\n\n rs_sum, as_sum = 0., 0.\n\n for _rs, _as in l:\n rs_sum += _rs\n as_sum += _as\n\n if rs_sum != 0.:\n k = (total_size - as_sum) / rs_sum\n return k\n else:\n return 0.\n\n @staticmethod\n def _calc_offsets(l, k):\n offsets = [0.]\n for _rs, _as in l:\n offsets.append(offsets[-1] + _rs*k + _as)\n return offsets\n\n def set_position(self, pos):\n \"\"\"\n set the position of the rectangle.\n\n Parameters\n ----------\n pos : tuple of 4 floats\n position of the rectangle that will be divided\n \"\"\"\n self._pos = pos\n\n def get_position(self):\n \"return the position of the rectangle.\"\n return self._pos\n\n def set_anchor(self, anchor):\n \"\"\"\n Parameters\n ----------\n anchor : {'C', 'SW', 'S', 'SE', 'E', 'NE', 'N', 'NW', 'W'}\n anchor position\n\n ===== ============\n value description\n ===== ============\n 'C' Center\n 'SW' bottom left\n 'S' bottom\n 'SE' bottom right\n 'E' right\n 'NE' top right\n 'N' top\n 'NW' top left\n 'W' left\n ===== ============\n\n \"\"\"\n if len(anchor) != 2:\n cbook._check_in_list(mtransforms.Bbox.coefs, anchor=anchor)\n self._anchor = anchor\n\n def get_anchor(self):\n \"return the anchor\"\n return self._anchor\n\n def set_horizontal(self, h):\n \"\"\"\n Parameters\n ----------\n h : list of :mod:`~mpl_toolkits.axes_grid.axes_size`\n sizes for horizontal division\n \"\"\"\n self._horizontal = h\n\n def get_horizontal(self):\n \"return horizontal sizes\"\n return self._horizontal\n\n def set_vertical(self, v):\n \"\"\"\n Parameters\n ----------\n v : list of :mod:`~mpl_toolkits.axes_grid.axes_size`\n sizes for vertical division\n \"\"\"\n self._vertical = v\n\n def get_vertical(self):\n \"return vertical sizes\"\n return self._vertical\n\n def set_aspect(self, aspect=False):\n \"\"\"\n Parameters\n ----------\n aspect : bool\n \"\"\"\n self._aspect = aspect\n\n def get_aspect(self):\n \"return aspect\"\n return self._aspect\n\n def set_locator(self, _locator):\n self._locator = _locator\n\n def get_locator(self):\n return self._locator\n\n def get_position_runtime(self, ax, renderer):\n if self._locator is None:\n return self.get_position()\n else:\n return self._locator(ax, renderer).bounds\n\n def locate(self, nx, ny, nx1=None, ny1=None, axes=None, renderer=None):\n \"\"\"\n Parameters\n ----------\n nx, nx1 : int\n Integers specifying the column-position of the\n cell. When *nx1* is None, a single *nx*-th column is\n specified. Otherwise location of columns spanning between *nx*\n to *nx1* (but excluding *nx1*-th column) is specified.\n ny, ny1 : int\n Same as *nx* and *nx1*, but for row positions.\n axes\n renderer\n \"\"\"\n\n figW, figH = self._fig.get_size_inches()\n x, y, w, h = self.get_position_runtime(axes, renderer)\n\n hsizes = self.get_horizontal_sizes(renderer)\n vsizes = self.get_vertical_sizes(renderer)\n k_h = self._calc_k(hsizes, figW*w)\n k_v = self._calc_k(vsizes, figH*h)\n\n if self.get_aspect():\n k = min(k_h, k_v)\n ox = self._calc_offsets(hsizes, k)\n oy = self._calc_offsets(vsizes, k)\n\n ww = (ox[-1] - ox[0])/figW\n hh = (oy[-1] - oy[0])/figH\n pb = mtransforms.Bbox.from_bounds(x, y, w, h)\n pb1 = mtransforms.Bbox.from_bounds(x, y, ww, hh)\n pb1_anchored = pb1.anchored(self.get_anchor(), pb)\n x0, y0 = pb1_anchored.x0, pb1_anchored.y0\n\n else:\n ox = self._calc_offsets(hsizes, k_h)\n oy = self._calc_offsets(vsizes, k_v)\n x0, y0 = x, y\n\n if nx1 is None:\n nx1 = nx+1\n if ny1 is None:\n ny1 = ny+1\n\n x1, w1 = x0 + ox[nx]/figW, (ox[nx1] - ox[nx])/figW\n y1, h1 = y0 + oy[ny]/figH, (oy[ny1] - oy[ny])/figH\n\n return mtransforms.Bbox.from_bounds(x1, y1, w1, h1)\n\n def new_locator(self, nx, ny, nx1=None, ny1=None):\n \"\"\"\n Returns a new locator\n (:class:`mpl_toolkits.axes_grid.axes_divider.AxesLocator`) for\n specified cell.\n\n Parameters\n ----------\n nx, nx1 : int\n Integers specifying the column-position of the\n cell. When *nx1* is None, a single *nx*-th column is\n specified. Otherwise location of columns spanning between *nx*\n to *nx1* (but excluding *nx1*-th column) is specified.\n ny, ny1 : int\n Same as *nx* and *nx1*, but for row positions.\n \"\"\"\n return AxesLocator(self, nx, ny, nx1, ny1)\n\n def append_size(self, position, size):\n if position == \"left\":\n self._horizontal.insert(0, size)\n self._xrefindex += 1\n elif position == \"right\":\n self._horizontal.append(size)\n elif position == \"bottom\":\n self._vertical.insert(0, size)\n self._yrefindex += 1\n elif position == \"top\":\n self._vertical.append(size)\n else:\n cbook._check_in_list([\"left\", \"right\", \"bottom\", \"top\"],\n position=position)\n\n def add_auto_adjustable_area(self,\n use_axes, pad=0.1,\n adjust_dirs=None,\n ):\n if adjust_dirs is None:\n adjust_dirs = [\"left\", \"right\", \"bottom\", \"top\"]\n from .axes_size import Padded, SizeFromFunc, GetExtentHelper\n for d in adjust_dirs:\n helper = GetExtentHelper(use_axes, d)\n size = SizeFromFunc(helper)\n padded_size = Padded(size, pad) # pad in inch\n self.append_size(d, padded_size)\n\n\nclass AxesLocator:\n \"\"\"\n A simple callable object, initialized with AxesDivider class,\n returns the position and size of the given cell.\n \"\"\"\n def __init__(self, axes_divider, nx, ny, nx1=None, ny1=None):\n \"\"\"\n Parameters\n ----------\n axes_divider : AxesDivider\n nx, nx1 : int\n Integers specifying the column-position of the\n cell. When *nx1* is None, a single *nx*-th column is\n specified. Otherwise location of columns spanning between *nx*\n to *nx1* (but excluding *nx1*-th column) is specified.\n ny, ny1 : int\n Same as *nx* and *nx1*, but for row positions.\n \"\"\"\n self._axes_divider = axes_divider\n\n _xrefindex = axes_divider._xrefindex\n _yrefindex = axes_divider._yrefindex\n\n self._nx, self._ny = nx - _xrefindex, ny - _yrefindex\n\n if nx1 is None:\n nx1 = nx+1\n if ny1 is None:\n ny1 = ny+1\n\n self._nx1 = nx1 - _xrefindex\n self._ny1 = ny1 - _yrefindex\n\n def __call__(self, axes, renderer):\n\n _xrefindex = self._axes_divider._xrefindex\n _yrefindex = self._axes_divider._yrefindex\n\n return self._axes_divider.locate(self._nx + _xrefindex,\n self._ny + _yrefindex,\n self._nx1 + _xrefindex,\n self._ny1 + _yrefindex,\n axes,\n renderer)\n\n def get_subplotspec(self):\n if hasattr(self._axes_divider, \"get_subplotspec\"):\n return self._axes_divider.get_subplotspec()\n else:\n return None\n\n\nclass SubplotDivider(Divider):\n \"\"\"\n The Divider class whose rectangle area is specified as a subplot geometry.\n \"\"\"\n\n def __init__(self, fig, *args, horizontal=None, vertical=None,\n aspect=None, anchor='C'):\n \"\"\"\n Parameters\n ----------\n fig : `matplotlib.figure.Figure`\n\n *args : tuple (*nrows*, *ncols*, *index*) or int\n The array of subplots in the figure has dimensions ``(nrows,\n ncols)``, and *index* is the index of the subplot being created.\n *index* starts at 1 in the upper left corner and increases to the\n right.\n\n If *nrows*, *ncols*, and *index* are all single digit numbers, then\n *args* can be passed as a single 3-digit number (e.g. 234 for\n (2, 3, 4)).\n \"\"\"\n\n self.figure = fig\n\n if len(args) == 1:\n if isinstance(args[0], SubplotSpec):\n self._subplotspec = args[0]\n else:\n try:\n s = str(int(args[0]))\n rows, cols, num = map(int, s)\n except ValueError:\n raise ValueError(\n 'Single argument to subplot must be a 3-digit integer')\n self._subplotspec = GridSpec(rows, cols)[num-1]\n # num - 1 for converting from MATLAB to python indexing\n elif len(args) == 3:\n rows, cols, num = args\n rows = int(rows)\n cols = int(cols)\n if isinstance(num, tuple) and len(num) == 2:\n num = [int(n) for n in num]\n self._subplotspec = GridSpec(rows, cols)[num[0]-1:num[1]]\n else:\n self._subplotspec = GridSpec(rows, cols)[int(num)-1]\n # num - 1 for converting from MATLAB to python indexing\n else:\n raise ValueError(f'Illegal argument(s) to subplot: {args}')\n\n # total = rows*cols\n # num -= 1 # convert from matlab to python indexing\n # # i.e., num in range(0, total)\n # if num >= total:\n # raise ValueError( 'Subplot number exceeds total subplots')\n # self._rows = rows\n # self._cols = cols\n # self._num = num\n\n # self.update_params()\n\n # sets self.fixbox\n self.update_params()\n\n pos = self.figbox.bounds\n\n Divider.__init__(self, fig, pos, horizontal or [], vertical or [],\n aspect=aspect, anchor=anchor)\n\n def get_position(self):\n \"return the bounds of the subplot box\"\n\n self.update_params() # update self.figbox\n return self.figbox.bounds\n\n # def update_params(self):\n # 'update the subplot position from fig.subplotpars'\n\n # rows = self._rows\n # cols = self._cols\n # num = self._num\n\n # pars = self.figure.subplotpars\n # left = pars.left\n # right = pars.right\n # bottom = pars.bottom\n # top = pars.top\n # wspace = pars.wspace\n # hspace = pars.hspace\n # totWidth = right-left\n # totHeight = top-bottom\n\n # figH = totHeight/(rows + hspace*(rows-1))\n # sepH = hspace*figH\n\n # figW = totWidth/(cols + wspace*(cols-1))\n # sepW = wspace*figW\n\n # rowNum, colNum = divmod(num, cols)\n\n # figBottom = top - (rowNum+1)*figH - rowNum*sepH\n # figLeft = left + colNum*(figW + sepW)\n\n # self.figbox = mtransforms.Bbox.from_bounds(figLeft, figBottom,\n # figW, figH)\n\n def update_params(self):\n \"\"\"Update the subplot position from fig.subplotpars.\"\"\"\n self.figbox = self.get_subplotspec().get_position(self.figure)\n\n def get_geometry(self):\n \"\"\"Get the subplot geometry, e.g., (2, 2, 3).\"\"\"\n rows, cols, num1, num2 = self.get_subplotspec().get_geometry()\n return rows, cols, num1+1 # for compatibility\n\n # COVERAGE NOTE: Never used internally or from examples\n def change_geometry(self, numrows, numcols, num):\n \"\"\"Change subplot geometry, e.g., from (1, 1, 1) to (2, 2, 3).\"\"\"\n self._subplotspec = GridSpec(numrows, numcols)[num-1]\n self.update_params()\n self.set_position(self.figbox)\n\n def get_subplotspec(self):\n \"\"\"Get the SubplotSpec instance.\"\"\"\n return self._subplotspec\n\n def set_subplotspec(self, subplotspec):\n \"\"\"Set the SubplotSpec instance.\"\"\"\n self._subplotspec = subplotspec\n\n\nclass AxesDivider(Divider):\n \"\"\"\n Divider based on the pre-existing axes.\n \"\"\"\n\n def __init__(self, axes, xref=None, yref=None):\n \"\"\"\n Parameters\n ----------\n axes : :class:`~matplotlib.axes.Axes`\n xref\n yref\n \"\"\"\n self._axes = axes\n if xref is None:\n self._xref = Size.AxesX(axes)\n else:\n self._xref = xref\n if yref is None:\n self._yref = Size.AxesY(axes)\n else:\n self._yref = yref\n\n Divider.__init__(self, fig=axes.get_figure(), pos=None,\n horizontal=[self._xref], vertical=[self._yref],\n aspect=None, anchor=\"C\")\n\n def _get_new_axes(self, *, axes_class=None, **kwargs):\n axes = self._axes\n if axes_class is None:\n if isinstance(axes, SubplotBase):\n axes_class = axes._axes_class\n else:\n axes_class = type(axes)\n return axes_class(axes.get_figure(), axes.get_position(original=True),\n **kwargs)\n\n def new_horizontal(self, size, pad=None, pack_start=False, **kwargs):\n \"\"\"\n Add a new axes on the right (or left) side of the main axes.\n\n Parameters\n ----------\n size : :mod:`~mpl_toolkits.axes_grid.axes_size` or float or str\n A width of the axes. If float or string is given, *from_any*\n function is used to create the size, with *ref_size* set to AxesX\n instance of the current axes.\n pad : :mod:`~mpl_toolkits.axes_grid.axes_size` or float or str\n Pad between the axes. It takes same argument as *size*.\n pack_start : bool\n If False, the new axes is appended at the end\n of the list, i.e., it became the right-most axes. If True, it is\n inserted at the start of the list, and becomes the left-most axes.\n **kwargs\n All extra keywords arguments are passed to the created axes.\n If *axes_class* is given, the new axes will be created as an\n instance of the given class. Otherwise, the same class of the\n main axes will be used.\n \"\"\"\n if pad is None:\n cbook.warn_deprecated(\n \"3.2\", message=\"In a future version, 'pad' will default to \"\n \"rcParams['figure.subplot.wspace']. Set pad=0 to keep the \"\n \"old behavior.\")\n if pad:\n if not isinstance(pad, Size._Base):\n pad = Size.from_any(pad, fraction_ref=self._xref)\n if pack_start:\n self._horizontal.insert(0, pad)\n self._xrefindex += 1\n else:\n self._horizontal.append(pad)\n if not isinstance(size, Size._Base):\n size = Size.from_any(size, fraction_ref=self._xref)\n if pack_start:\n self._horizontal.insert(0, size)\n self._xrefindex += 1\n locator = self.new_locator(nx=0, ny=self._yrefindex)\n else:\n self._horizontal.append(size)\n locator = self.new_locator(\n nx=len(self._horizontal) - 1, ny=self._yrefindex)\n ax = self._get_new_axes(**kwargs)\n ax.set_axes_locator(locator)\n return ax\n\n def new_vertical(self, size, pad=None, pack_start=False, **kwargs):\n \"\"\"\n Add a new axes on the top (or bottom) side of the main axes.\n\n Parameters\n ----------\n size : :mod:`~mpl_toolkits.axes_grid.axes_size` or float or str\n A height of the axes. If float or string is given, *from_any*\n function is used to create the size, with *ref_size* set to AxesX\n instance of the current axes.\n pad : :mod:`~mpl_toolkits.axes_grid.axes_size` or float or str\n Pad between the axes. It takes same argument as *size*.\n pack_start : bool\n If False, the new axes is appended at the end\n of the list, i.e., it became the right-most axes. If True, it is\n inserted at the start of the list, and becomes the left-most axes.\n **kwargs\n All extra keywords arguments are passed to the created axes.\n If *axes_class* is given, the new axes will be created as an\n instance of the given class. Otherwise, the same class of the\n main axes will be used.\n \"\"\"\n if pad is None:\n cbook.warn_deprecated(\n \"3.2\", message=\"In a future version, 'pad' will default to \"\n \"rcParams['figure.subplot.hspace']. Set pad=0 to keep the \"\n \"old behavior.\")\n if pad:\n if not isinstance(pad, Size._Base):\n pad = Size.from_any(pad, fraction_ref=self._yref)\n if pack_start:\n self._vertical.insert(0, pad)\n self._yrefindex += 1\n else:\n self._vertical.append(pad)\n if not isinstance(size, Size._Base):\n size = Size.from_any(size, fraction_ref=self._yref)\n if pack_start:\n self._vertical.insert(0, size)\n self._yrefindex += 1\n locator = self.new_locator(nx=self._xrefindex, ny=0)\n else:\n self._vertical.append(size)\n locator = self.new_locator(\n nx=self._xrefindex, ny=len(self._vertical)-1)\n ax = self._get_new_axes(**kwargs)\n ax.set_axes_locator(locator)\n return ax\n\n def append_axes(self, position, size, pad=None, add_to_figure=True,\n **kwargs):\n \"\"\"\n Create an axes at the given *position* with the same height\n (or width) of the main axes.\n\n *position*\n [\"left\"|\"right\"|\"bottom\"|\"top\"]\n\n *size* and *pad* should be axes_grid.axes_size compatible.\n \"\"\"\n if position == \"left\":\n ax = self.new_horizontal(size, pad, pack_start=True, **kwargs)\n elif position == \"right\":\n ax = self.new_horizontal(size, pad, pack_start=False, **kwargs)\n elif position == \"bottom\":\n ax = self.new_vertical(size, pad, pack_start=True, **kwargs)\n elif position == \"top\":\n ax = self.new_vertical(size, pad, pack_start=False, **kwargs)\n else:\n cbook._check_in_list([\"left\", \"right\", \"bottom\", \"top\"],\n position=position)\n if add_to_figure:\n self._fig.add_axes(ax)\n return ax\n\n def get_aspect(self):\n if self._aspect is None:\n aspect = self._axes.get_aspect()\n if aspect == \"auto\":\n return False\n else:\n return True\n else:\n return self._aspect\n\n def get_position(self):\n if self._pos is None:\n bbox = self._axes.get_position(original=True)\n return bbox.bounds\n else:\n return self._pos\n\n def get_anchor(self):\n if self._anchor is None:\n return self._axes.get_anchor()\n else:\n return self._anchor\n\n def get_subplotspec(self):\n if hasattr(self._axes, \"get_subplotspec\"):\n return self._axes.get_subplotspec()\n else:\n return None\n\n\nclass HBoxDivider(SubplotDivider):\n\n def __init__(self, fig, *args, **kwargs):\n SubplotDivider.__init__(self, fig, *args, **kwargs)\n\n @staticmethod\n def _determine_karray(equivalent_sizes, appended_sizes,\n max_equivalent_size,\n total_appended_size):\n\n n = len(equivalent_sizes)\n import numpy as np\n A = np.mat(np.zeros((n+1, n+1), dtype=\"d\"))\n B = np.zeros((n+1), dtype=\"d\")\n # AxK = B\n\n # populated A\n for i, (r, a) in enumerate(equivalent_sizes):\n A[i, i] = r\n A[i, -1] = -1\n B[i] = -a\n A[-1, :-1] = [r for r, a in appended_sizes]\n B[-1] = total_appended_size - sum([a for rs, a in appended_sizes])\n\n karray_H = (A.I*np.mat(B).T).A1\n karray = karray_H[:-1]\n H = karray_H[-1]\n\n if H > max_equivalent_size:\n karray = ((max_equivalent_size -\n np.array([a for r, a in equivalent_sizes]))\n / np.array([r for r, a in equivalent_sizes]))\n return karray\n\n @staticmethod\n def _calc_offsets(appended_sizes, karray):\n offsets = [0.]\n for (r, a), k in zip(appended_sizes, karray):\n offsets.append(offsets[-1] + r*k + a)\n return offsets\n\n def new_locator(self, nx, nx1=None):\n \"\"\"\n Create a new `~mpl_toolkits.axes_grid.axes_divider.AxesLocator` for\n the specified cell.\n\n Parameters\n ----------\n nx, nx1 : int\n Integers specifying the column-position of the\n cell. When *nx1* is None, a single *nx*-th column is\n specified. Otherwise location of columns spanning between *nx*\n to *nx1* (but excluding *nx1*-th column) is specified.\n ny, ny1 : int\n Same as *nx* and *nx1*, but for row positions.\n \"\"\"\n return AxesLocator(self, nx, 0, nx1, None)\n\n def _locate(self, x, y, w, h,\n y_equivalent_sizes, x_appended_sizes,\n figW, figH):\n \"\"\"\n Parameters\n ----------\n x\n y\n w\n h\n y_equivalent_sizes\n x_appended_sizes\n figW\n figH\n \"\"\"\n\n equivalent_sizes = y_equivalent_sizes\n appended_sizes = x_appended_sizes\n\n max_equivalent_size = figH*h\n total_appended_size = figW*w\n karray = self._determine_karray(equivalent_sizes, appended_sizes,\n max_equivalent_size,\n total_appended_size)\n\n ox = self._calc_offsets(appended_sizes, karray)\n\n ww = (ox[-1] - ox[0])/figW\n ref_h = equivalent_sizes[0]\n hh = (karray[0]*ref_h[0] + ref_h[1])/figH\n pb = mtransforms.Bbox.from_bounds(x, y, w, h)\n pb1 = mtransforms.Bbox.from_bounds(x, y, ww, hh)\n pb1_anchored = pb1.anchored(self.get_anchor(), pb)\n x0, y0 = pb1_anchored.x0, pb1_anchored.y0\n\n return x0, y0, ox, hh\n\n def locate(self, nx, ny, nx1=None, ny1=None, axes=None, renderer=None):\n \"\"\"\n Parameters\n ----------\n axes_divider : AxesDivider\n nx, nx1 : int\n Integers specifying the column-position of the\n cell. When *nx1* is None, a single *nx*-th column is\n specified. Otherwise location of columns spanning between *nx*\n to *nx1* (but excluding *nx1*-th column) is specified.\n ny, ny1 : int\n Same as *nx* and *nx1*, but for row positions.\n axes\n renderer\n \"\"\"\n\n figW, figH = self._fig.get_size_inches()\n x, y, w, h = self.get_position_runtime(axes, renderer)\n\n y_equivalent_sizes = self.get_vertical_sizes(renderer)\n x_appended_sizes = self.get_horizontal_sizes(renderer)\n x0, y0, ox, hh = self._locate(x, y, w, h,\n y_equivalent_sizes, x_appended_sizes,\n figW, figH)\n if nx1 is None:\n nx1 = nx+1\n\n x1, w1 = x0 + ox[nx]/figW, (ox[nx1] - ox[nx])/figW\n y1, h1 = y0, hh\n\n return mtransforms.Bbox.from_bounds(x1, y1, w1, h1)\n\n\nclass VBoxDivider(HBoxDivider):\n \"\"\"\n The Divider class whose rectangle area is specified as a subplot geometry.\n \"\"\"\n\n def new_locator(self, ny, ny1=None):\n \"\"\"\n Create a new `~mpl_toolkits.axes_grid.axes_divider.AxesLocator` for\n the specified cell.\n\n Parameters\n ----------\n ny, ny1 : int\n Integers specifying the row-position of the\n cell. When *ny1* is None, a single *ny*-th row is\n specified. Otherwise location of rows spanning between *ny*\n to *ny1* (but excluding *ny1*-th row) is specified.\n \"\"\"\n return AxesLocator(self, 0, ny, None, ny1)\n\n def locate(self, nx, ny, nx1=None, ny1=None, axes=None, renderer=None):\n \"\"\"\n Parameters\n ----------\n axes_divider : AxesDivider\n nx, nx1 : int\n Integers specifying the column-position of the\n cell. When *nx1* is None, a single *nx*-th column is\n specified. Otherwise location of columns spanning between *nx*\n to *nx1* (but excluding *nx1*-th column) is specified.\n ny, ny1 : int\n Same as *nx* and *nx1*, but for row positions.\n axes\n renderer\n \"\"\"\n\n figW, figH = self._fig.get_size_inches()\n x, y, w, h = self.get_position_runtime(axes, renderer)\n\n x_equivalent_sizes = self.get_horizontal_sizes(renderer)\n y_appended_sizes = self.get_vertical_sizes(renderer)\n\n y0, x0, oy, ww = self._locate(y, x, h, w,\n x_equivalent_sizes, y_appended_sizes,\n figH, figW)\n if ny1 is None:\n ny1 = ny+1\n\n x1, w1 = x0, ww\n y1, h1 = y0 + oy[ny]/figH, (oy[ny1] - oy[ny])/figH\n\n return mtransforms.Bbox.from_bounds(x1, y1, w1, h1)\n\n\ndef make_axes_locatable(axes):\n divider = AxesDivider(axes)\n locator = divider.new_locator(nx=0, ny=0)\n axes.set_axes_locator(locator)\n\n return divider\n\n\ndef make_axes_area_auto_adjustable(ax,\n use_axes=None, pad=0.1,\n adjust_dirs=None):\n if adjust_dirs is None:\n adjust_dirs = [\"left\", \"right\", \"bottom\", \"top\"]\n divider = make_axes_locatable(ax)\n\n if use_axes is None:\n use_axes = ax\n\n divider.add_auto_adjustable_area(use_axes=use_axes, pad=pad,\n adjust_dirs=adjust_dirs)\n",
"# -*- coding: utf-8 -*-\n\"\"\"\nSimple examples demonstrating the use of GLMeshItem.\n\n\"\"\"\n\n## Add path to library (just for examples; you do not need this)\nimport initExample\n\nfrom pyqtgraph.Qt import QtCore, QtGui\nimport pyqtgraph as pg\nimport pyqtgraph.opengl as gl\n\napp = QtGui.QApplication([])\nw = gl.GLViewWidget()\nw.show()\nw.setWindowTitle('pyqtgraph example: GLMeshItem')\nw.setCameraPosition(distance=40)\n\ng = gl.GLGridItem()\ng.scale(2,2,1)\nw.addItem(g)\n\nimport numpy as np\n\n\n## Example 1:\n## Array of vertex positions and array of vertex indexes defining faces\n## Colors are specified per-face\n\nverts = np.array([\n [0, 0, 0],\n [2, 0, 0],\n [1, 2, 0],\n [1, 1, 1],\n])\nfaces = np.array([\n [0, 1, 2],\n [0, 1, 3],\n [0, 2, 3],\n [1, 2, 3]\n])\ncolors = np.array([\n [1, 0, 0, 0.3],\n [0, 1, 0, 0.3],\n [0, 0, 1, 0.3],\n [1, 1, 0, 0.3]\n])\n\n## Mesh item will automatically compute face normals.\nm1 = gl.GLMeshItem(vertexes=verts, faces=faces, faceColors=colors, smooth=False)\nm1.translate(5, 5, 0)\nm1.setGLOptions('additive')\nw.addItem(m1)\n\n\n## Example 2:\n## Array of vertex positions, three per face\nverts = np.empty((36, 3, 3), dtype=np.float32)\ntheta = np.linspace(0, 2*np.pi, 37)[:-1]\nverts[:,0] = np.vstack([2*np.cos(theta), 2*np.sin(theta), [0]*36]).T\nverts[:,1] = np.vstack([4*np.cos(theta+0.2), 4*np.sin(theta+0.2), [-1]*36]).T\nverts[:,2] = np.vstack([4*np.cos(theta-0.2), 4*np.sin(theta-0.2), [1]*36]).T\n \n## Colors are specified per-vertex\ncolors = np.random.random(size=(verts.shape[0], 3, 4))\nm2 = gl.GLMeshItem(vertexes=verts, vertexColors=colors, smooth=False, shader='balloon', \n drawEdges=True, edgeColor=(1, 1, 0, 1))\nm2.translate(-5, 5, 0)\nw.addItem(m2)\n\n\n\n## Example 3:\n## sphere\n\nmd = gl.MeshData.sphere(rows=10, cols=20)\n#colors = np.random.random(size=(md.faceCount(), 4))\n#colors[:,3] = 0.3\n#colors[100:] = 0.0\ncolors = np.ones((md.faceCount(), 4), dtype=float)\ncolors[::2,0] = 0\ncolors[:,1] = np.linspace(0, 1, colors.shape[0])\nmd.setFaceColors(colors)\nm3 = gl.GLMeshItem(meshdata=md, smooth=False)#, shader='balloon')\n\nm3.translate(5, -5, 0)\nw.addItem(m3)\n\n\n# Example 4:\n# wireframe\n\nmd = gl.MeshData.sphere(rows=4, cols=8)\nm4 = gl.GLMeshItem(meshdata=md, smooth=False, drawFaces=False, drawEdges=True, edgeColor=(1,1,1,1))\nm4.translate(0,10,0)\nw.addItem(m4)\n\n# Example 5:\n# cylinder\nmd = gl.MeshData.cylinder(rows=10, cols=20, radius=[1., 2.0], length=5.)\nmd2 = gl.MeshData.cylinder(rows=10, cols=20, radius=[2., 0.5], length=10.)\ncolors = np.ones((md.faceCount(), 4), dtype=float)\ncolors[::2,0] = 0\ncolors[:,1] = np.linspace(0, 1, colors.shape[0])\nmd.setFaceColors(colors)\nm5 = gl.GLMeshItem(meshdata=md, smooth=True, drawEdges=True, edgeColor=(1,0,0,1), shader='balloon')\ncolors = np.ones((md.faceCount(), 4), dtype=float)\ncolors[::2,0] = 0\ncolors[:,1] = np.linspace(0, 1, colors.shape[0])\nmd2.setFaceColors(colors)\nm6 = gl.GLMeshItem(meshdata=md2, smooth=True, drawEdges=False, shader='balloon')\nm6.translate(0,0,7.5)\n\nm6.rotate(0., 0, 1, 1)\n#m5.translate(-3,3,0)\nw.addItem(m5)\nw.addItem(m6)\n\n\n\n \n\n\n## Start Qt event loop unless running in interactive mode.\nif __name__ == '__main__':\n import sys\n if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):\n QtGui.QApplication.instance().exec_()\n",
"import numpy as np\nfrom io import BytesIO\n\nfrom matplotlib.testing.decorators import image_comparison\nimport matplotlib.pyplot as plt\nimport matplotlib.path as mpath\nimport matplotlib.patches as mpatches\nfrom matplotlib.ticker import FuncFormatter\n\n\n@image_comparison(['bbox_inches_tight'], remove_text=True,\n savefig_kwarg=dict(bbox_inches='tight'))\ndef test_bbox_inches_tight():\n #: Test that a figure saved using bbox_inches='tight' is clipped correctly\n data = [[66386, 174296, 75131, 577908, 32015],\n [58230, 381139, 78045, 99308, 160454],\n [89135, 80552, 152558, 497981, 603535],\n [78415, 81858, 150656, 193263, 69638],\n [139361, 331509, 343164, 781380, 52269]]\n\n colLabels = rowLabels = [''] * 5\n\n rows = len(data)\n ind = np.arange(len(colLabels)) + 0.3 # the x locations for the groups\n cellText = []\n width = 0.4 # the width of the bars\n yoff = np.zeros(len(colLabels))\n # the bottom values for stacked bar chart\n fig, ax = plt.subplots(1, 1)\n for row in range(rows):\n ax.bar(ind, data[row], width, bottom=yoff, align='edge', color='b')\n yoff = yoff + data[row]\n cellText.append([''])\n plt.xticks([])\n plt.xlim(0, 5)\n plt.legend([''] * 5, loc=(1.2, 0.2))\n # Add a table at the bottom of the axes\n cellText.reverse()\n plt.table(cellText=cellText, rowLabels=rowLabels, colLabels=colLabels,\n loc='bottom')\n\n\n@image_comparison(['bbox_inches_tight_suptile_legend'],\n remove_text=False, savefig_kwarg={'bbox_inches': 'tight'})\ndef test_bbox_inches_tight_suptile_legend():\n plt.plot(np.arange(10), label='a straight line')\n plt.legend(bbox_to_anchor=(0.9, 1), loc='upper left')\n plt.title('Axis title')\n plt.suptitle('Figure title')\n\n # put an extra long y tick on to see that the bbox is accounted for\n def y_formatter(y, pos):\n if int(y) == 4:\n return 'The number 4'\n else:\n return str(y)\n plt.gca().yaxis.set_major_formatter(FuncFormatter(y_formatter))\n\n plt.xlabel('X axis')\n\n\n@image_comparison(['bbox_inches_tight_clipping'],\n remove_text=True, savefig_kwarg={'bbox_inches': 'tight'})\ndef test_bbox_inches_tight_clipping():\n # tests bbox clipping on scatter points, and path clipping on a patch\n # to generate an appropriately tight bbox\n plt.scatter(np.arange(10), np.arange(10))\n ax = plt.gca()\n ax.set_xlim([0, 5])\n ax.set_ylim([0, 5])\n\n # make a massive rectangle and clip it with a path\n patch = mpatches.Rectangle([-50, -50], 100, 100,\n transform=ax.transData,\n facecolor='blue', alpha=0.5)\n\n path = mpath.Path.unit_regular_star(5).deepcopy()\n path.vertices *= 0.25\n patch.set_clip_path(path, transform=ax.transAxes)\n plt.gcf().artists.append(patch)\n\n\n@image_comparison(['bbox_inches_tight_raster'],\n remove_text=True, savefig_kwarg={'bbox_inches': 'tight'})\ndef test_bbox_inches_tight_raster():\n \"\"\"Test rasterization with tight_layout\"\"\"\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.plot([1.0, 2.0], rasterized=True)\n\n\ndef test_only_on_non_finite_bbox():\n fig, ax = plt.subplots()\n ax.annotate(\"\", xy=(0, float('nan')))\n ax.set_axis_off()\n # we only need to test that it does not error out on save\n fig.savefig(BytesIO(), bbox_inches='tight', format='png')\n\n\ndef test_tight_pcolorfast():\n fig, ax = plt.subplots()\n ax.pcolorfast(np.arange(4).reshape((2, 2)))\n ax.set(ylim=(0, .1))\n buf = BytesIO()\n fig.savefig(buf, bbox_inches=\"tight\")\n buf.seek(0)\n height, width, _ = plt.imread(buf).shape\n # Previously, the bbox would include the area of the image clipped out by\n # the axes, resulting in a very tall image given the y limits of (0, 0.1).\n assert width > height\n"
] | [
[
"numpy.clip"
],
[
"matplotlib.cbook._check_in_list"
],
[
"pandas.tseries.offsets.YearBegin",
"pandas.Series",
"pandas.tseries.offsets.BYearEnd",
"pandas.tseries.offsets.QuarterBegin",
"pandas.Timestamp",
"pandas.tseries.offsets.MonthBegin",
"pandas.tseries.offsets.BYearBegin",
"pandas.tseries.offsets.BQuarterBegin",
"pandas.tseries.offsets.BQuarterEnd",
"pandas.tseries.offsets.BMonthBegin",
"pandas.tseries.offsets.BMonthEnd",
"pandas.date_range",
"pandas.tseries.offsets.QuarterEnd",
"pandas.tseries.offsets.YearEnd",
"pandas.tseries.offsets.MonthEnd"
],
[
"numpy.random.normal",
"numpy.linspace"
],
[
"numpy.random.normal",
"numpy.clip"
],
[
"numpy.mat",
"matplotlib.cbook._check_in_list",
"matplotlib.gridspec.GridSpec",
"matplotlib.cbook.warn_deprecated",
"numpy.array",
"numpy.zeros",
"matplotlib.transforms.Bbox.from_bounds"
],
[
"numpy.random.random",
"numpy.linspace",
"numpy.cos",
"numpy.sin",
"numpy.array",
"numpy.empty"
],
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.table",
"matplotlib.path.Path.unit_regular_star",
"matplotlib.pyplot.title",
"numpy.arange",
"matplotlib.patches.Rectangle",
"matplotlib.pyplot.imread",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.gcf",
"matplotlib.testing.decorators.image_comparison",
"matplotlib.pyplot.xlim",
"matplotlib.ticker.FuncFormatter",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.suptitle",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"0.19",
"0.24",
"0.20",
"1.0",
"0.25"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
toddharryreeb/pandas | [
"6f7ca38be192159afb5d899f41552a5181aa2794"
] | [
"pandas/tests/indexes/datetimes/test_construction.py"
] | [
"from datetime import timedelta\nfrom operator import attrgetter\nfrom functools import partial\n\nimport pytest\nimport pytz\nimport numpy as np\n\nimport pandas as pd\nfrom pandas import offsets\nimport pandas.util.testing as tm\nfrom pandas._libs.tslib import OutOfBoundsDatetime\nfrom pandas._libs.tslibs import conversion\nfrom pandas import (DatetimeIndex, Index, Timestamp, datetime, date_range,\n to_datetime)\n\n\nclass TestDatetimeIndex(object):\n\n def test_construction_caching(self):\n\n df = pd.DataFrame({'dt': pd.date_range('20130101', periods=3),\n 'dttz': pd.date_range('20130101', periods=3,\n tz='US/Eastern'),\n 'dt_with_null': [pd.Timestamp('20130101'), pd.NaT,\n pd.Timestamp('20130103')],\n 'dtns': pd.date_range('20130101', periods=3,\n freq='ns')})\n assert df.dttz.dtype.tz.zone == 'US/Eastern'\n\n @pytest.mark.parametrize('kwargs', [\n {'tz': 'dtype.tz'},\n {'dtype': 'dtype'},\n {'dtype': 'dtype', 'tz': 'dtype.tz'}])\n def test_construction_with_alt(self, kwargs, tz_aware_fixture):\n tz = tz_aware_fixture\n i = pd.date_range('20130101', periods=5, freq='H', tz=tz)\n kwargs = {key: attrgetter(val)(i) for key, val in kwargs.items()}\n result = DatetimeIndex(i, **kwargs)\n tm.assert_index_equal(i, result)\n\n @pytest.mark.parametrize('kwargs', [\n {'tz': 'dtype.tz'},\n {'dtype': 'dtype'},\n {'dtype': 'dtype', 'tz': 'dtype.tz'}])\n def test_construction_with_alt_tz_localize(self, kwargs, tz_aware_fixture):\n tz = tz_aware_fixture\n i = pd.date_range('20130101', periods=5, freq='H', tz=tz)\n kwargs = {key: attrgetter(val)(i) for key, val in kwargs.items()}\n result = DatetimeIndex(i.tz_localize(None).asi8, **kwargs)\n expected = i.tz_localize(None).tz_localize('UTC').tz_convert(tz)\n tm.assert_index_equal(result, expected)\n\n # localize into the provided tz\n i2 = DatetimeIndex(i.tz_localize(None).asi8, tz='UTC')\n expected = i.tz_localize(None).tz_localize('UTC')\n tm.assert_index_equal(i2, expected)\n\n # incompat tz/dtype\n pytest.raises(ValueError, lambda: DatetimeIndex(\n i.tz_localize(None).asi8, dtype=i.dtype, tz='US/Pacific'))\n\n def test_construction_index_with_mixed_timezones(self):\n # gh-11488: no tz results in DatetimeIndex\n result = Index([Timestamp('2011-01-01'),\n Timestamp('2011-01-02')], name='idx')\n exp = DatetimeIndex([Timestamp('2011-01-01'),\n Timestamp('2011-01-02')], name='idx')\n tm.assert_index_equal(result, exp, exact=True)\n assert isinstance(result, DatetimeIndex)\n assert result.tz is None\n\n # same tz results in DatetimeIndex\n result = Index([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),\n Timestamp('2011-01-02 10:00', tz='Asia/Tokyo')],\n name='idx')\n exp = DatetimeIndex(\n [Timestamp('2011-01-01 10:00'), Timestamp('2011-01-02 10:00')\n ], tz='Asia/Tokyo', name='idx')\n tm.assert_index_equal(result, exp, exact=True)\n assert isinstance(result, DatetimeIndex)\n assert result.tz is not None\n assert result.tz == exp.tz\n\n # same tz results in DatetimeIndex (DST)\n result = Index([Timestamp('2011-01-01 10:00', tz='US/Eastern'),\n Timestamp('2011-08-01 10:00', tz='US/Eastern')],\n name='idx')\n exp = DatetimeIndex([Timestamp('2011-01-01 10:00'),\n Timestamp('2011-08-01 10:00')],\n tz='US/Eastern', name='idx')\n tm.assert_index_equal(result, exp, exact=True)\n assert isinstance(result, DatetimeIndex)\n assert result.tz is not None\n assert result.tz == exp.tz\n\n # Different tz results in Index(dtype=object)\n result = Index([Timestamp('2011-01-01 10:00'),\n Timestamp('2011-01-02 10:00', tz='US/Eastern')],\n name='idx')\n exp = Index([Timestamp('2011-01-01 10:00'),\n Timestamp('2011-01-02 10:00', tz='US/Eastern')],\n dtype='object', name='idx')\n tm.assert_index_equal(result, exp, exact=True)\n assert not isinstance(result, DatetimeIndex)\n\n result = Index([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),\n Timestamp('2011-01-02 10:00', tz='US/Eastern')],\n name='idx')\n exp = Index([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),\n Timestamp('2011-01-02 10:00', tz='US/Eastern')],\n dtype='object', name='idx')\n tm.assert_index_equal(result, exp, exact=True)\n assert not isinstance(result, DatetimeIndex)\n\n # length = 1\n result = Index([Timestamp('2011-01-01')], name='idx')\n exp = DatetimeIndex([Timestamp('2011-01-01')], name='idx')\n tm.assert_index_equal(result, exp, exact=True)\n assert isinstance(result, DatetimeIndex)\n assert result.tz is None\n\n # length = 1 with tz\n result = Index(\n [Timestamp('2011-01-01 10:00', tz='Asia/Tokyo')], name='idx')\n exp = DatetimeIndex([Timestamp('2011-01-01 10:00')], tz='Asia/Tokyo',\n name='idx')\n tm.assert_index_equal(result, exp, exact=True)\n assert isinstance(result, DatetimeIndex)\n assert result.tz is not None\n assert result.tz == exp.tz\n\n def test_construction_index_with_mixed_timezones_with_NaT(self):\n # see gh-11488\n result = Index([pd.NaT, Timestamp('2011-01-01'),\n pd.NaT, Timestamp('2011-01-02')], name='idx')\n exp = DatetimeIndex([pd.NaT, Timestamp('2011-01-01'),\n pd.NaT, Timestamp('2011-01-02')], name='idx')\n tm.assert_index_equal(result, exp, exact=True)\n assert isinstance(result, DatetimeIndex)\n assert result.tz is None\n\n # Same tz results in DatetimeIndex\n result = Index([pd.NaT, Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),\n pd.NaT, Timestamp('2011-01-02 10:00',\n tz='Asia/Tokyo')],\n name='idx')\n exp = DatetimeIndex([pd.NaT, Timestamp('2011-01-01 10:00'),\n pd.NaT, Timestamp('2011-01-02 10:00')],\n tz='Asia/Tokyo', name='idx')\n tm.assert_index_equal(result, exp, exact=True)\n assert isinstance(result, DatetimeIndex)\n assert result.tz is not None\n assert result.tz == exp.tz\n\n # same tz results in DatetimeIndex (DST)\n result = Index([Timestamp('2011-01-01 10:00', tz='US/Eastern'),\n pd.NaT,\n Timestamp('2011-08-01 10:00', tz='US/Eastern')],\n name='idx')\n exp = DatetimeIndex([Timestamp('2011-01-01 10:00'), pd.NaT,\n Timestamp('2011-08-01 10:00')],\n tz='US/Eastern', name='idx')\n tm.assert_index_equal(result, exp, exact=True)\n assert isinstance(result, DatetimeIndex)\n assert result.tz is not None\n assert result.tz == exp.tz\n\n # different tz results in Index(dtype=object)\n result = Index([pd.NaT, Timestamp('2011-01-01 10:00'),\n pd.NaT, Timestamp('2011-01-02 10:00',\n tz='US/Eastern')],\n name='idx')\n exp = Index([pd.NaT, Timestamp('2011-01-01 10:00'),\n pd.NaT, Timestamp('2011-01-02 10:00', tz='US/Eastern')],\n dtype='object', name='idx')\n tm.assert_index_equal(result, exp, exact=True)\n assert not isinstance(result, DatetimeIndex)\n\n result = Index([pd.NaT, Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),\n pd.NaT, Timestamp('2011-01-02 10:00',\n tz='US/Eastern')], name='idx')\n exp = Index([pd.NaT, Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),\n pd.NaT, Timestamp('2011-01-02 10:00', tz='US/Eastern')],\n dtype='object', name='idx')\n tm.assert_index_equal(result, exp, exact=True)\n assert not isinstance(result, DatetimeIndex)\n\n # all NaT\n result = Index([pd.NaT, pd.NaT], name='idx')\n exp = DatetimeIndex([pd.NaT, pd.NaT], name='idx')\n tm.assert_index_equal(result, exp, exact=True)\n assert isinstance(result, DatetimeIndex)\n assert result.tz is None\n\n # all NaT with tz\n result = Index([pd.NaT, pd.NaT], tz='Asia/Tokyo', name='idx')\n exp = DatetimeIndex([pd.NaT, pd.NaT], tz='Asia/Tokyo', name='idx')\n\n tm.assert_index_equal(result, exp, exact=True)\n assert isinstance(result, DatetimeIndex)\n assert result.tz is not None\n assert result.tz == exp.tz\n\n def test_construction_dti_with_mixed_timezones(self):\n # GH 11488 (not changed, added explicit tests)\n\n # no tz results in DatetimeIndex\n result = DatetimeIndex(\n [Timestamp('2011-01-01'), Timestamp('2011-01-02')], name='idx')\n exp = DatetimeIndex(\n [Timestamp('2011-01-01'), Timestamp('2011-01-02')], name='idx')\n tm.assert_index_equal(result, exp, exact=True)\n assert isinstance(result, DatetimeIndex)\n\n # same tz results in DatetimeIndex\n result = DatetimeIndex([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),\n Timestamp('2011-01-02 10:00',\n tz='Asia/Tokyo')],\n name='idx')\n exp = DatetimeIndex([Timestamp('2011-01-01 10:00'),\n Timestamp('2011-01-02 10:00')],\n tz='Asia/Tokyo', name='idx')\n tm.assert_index_equal(result, exp, exact=True)\n assert isinstance(result, DatetimeIndex)\n\n # same tz results in DatetimeIndex (DST)\n result = DatetimeIndex([Timestamp('2011-01-01 10:00', tz='US/Eastern'),\n Timestamp('2011-08-01 10:00',\n tz='US/Eastern')],\n name='idx')\n exp = DatetimeIndex([Timestamp('2011-01-01 10:00'),\n Timestamp('2011-08-01 10:00')],\n tz='US/Eastern', name='idx')\n tm.assert_index_equal(result, exp, exact=True)\n assert isinstance(result, DatetimeIndex)\n\n # different tz coerces tz-naive to tz-awareIndex(dtype=object)\n result = DatetimeIndex([Timestamp('2011-01-01 10:00'),\n Timestamp('2011-01-02 10:00',\n tz='US/Eastern')], name='idx')\n exp = DatetimeIndex([Timestamp('2011-01-01 05:00'),\n Timestamp('2011-01-02 10:00')],\n tz='US/Eastern', name='idx')\n tm.assert_index_equal(result, exp, exact=True)\n assert isinstance(result, DatetimeIndex)\n\n # tz mismatch affecting to tz-aware raises TypeError/ValueError\n\n with pytest.raises(ValueError):\n DatetimeIndex([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),\n Timestamp('2011-01-02 10:00', tz='US/Eastern')],\n name='idx')\n\n with tm.assert_raises_regex(TypeError,\n 'data is already tz-aware'):\n DatetimeIndex([Timestamp('2011-01-01 10:00'),\n Timestamp('2011-01-02 10:00', tz='US/Eastern')],\n tz='Asia/Tokyo', name='idx')\n\n with pytest.raises(ValueError):\n DatetimeIndex([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),\n Timestamp('2011-01-02 10:00', tz='US/Eastern')],\n tz='US/Eastern', name='idx')\n\n with tm.assert_raises_regex(TypeError,\n 'data is already tz-aware'):\n # passing tz should results in DatetimeIndex, then mismatch raises\n # TypeError\n Index([pd.NaT, Timestamp('2011-01-01 10:00'),\n pd.NaT, Timestamp('2011-01-02 10:00', tz='US/Eastern')],\n tz='Asia/Tokyo', name='idx')\n\n def test_construction_base_constructor(self):\n arr = [pd.Timestamp('2011-01-01'), pd.NaT, pd.Timestamp('2011-01-03')]\n tm.assert_index_equal(pd.Index(arr), pd.DatetimeIndex(arr))\n tm.assert_index_equal(pd.Index(np.array(arr)),\n pd.DatetimeIndex(np.array(arr)))\n\n arr = [np.nan, pd.NaT, pd.Timestamp('2011-01-03')]\n tm.assert_index_equal(pd.Index(arr), pd.DatetimeIndex(arr))\n tm.assert_index_equal(pd.Index(np.array(arr)),\n pd.DatetimeIndex(np.array(arr)))\n\n def test_construction_outofbounds(self):\n # GH 13663\n dates = [datetime(3000, 1, 1), datetime(4000, 1, 1),\n datetime(5000, 1, 1), datetime(6000, 1, 1)]\n exp = Index(dates, dtype=object)\n # coerces to object\n tm.assert_index_equal(Index(dates), exp)\n\n with pytest.raises(OutOfBoundsDatetime):\n # can't create DatetimeIndex\n DatetimeIndex(dates)\n\n def test_construction_with_ndarray(self):\n # GH 5152\n dates = [datetime(2013, 10, 7),\n datetime(2013, 10, 8),\n datetime(2013, 10, 9)]\n data = DatetimeIndex(dates, freq=pd.tseries.frequencies.BDay()).values\n result = DatetimeIndex(data, freq=pd.tseries.frequencies.BDay())\n expected = DatetimeIndex(['2013-10-07',\n '2013-10-08',\n '2013-10-09'],\n freq='B')\n tm.assert_index_equal(result, expected)\n\n def test_constructor_coverage(self):\n rng = date_range('1/1/2000', periods=10.5)\n exp = date_range('1/1/2000', periods=10)\n tm.assert_index_equal(rng, exp)\n\n msg = 'periods must be a number, got foo'\n with tm.assert_raises_regex(TypeError, msg):\n DatetimeIndex(start='1/1/2000', periods='foo', freq='D')\n\n pytest.raises(ValueError, DatetimeIndex, start='1/1/2000',\n end='1/10/2000')\n\n pytest.raises(ValueError, DatetimeIndex, '1/1/2000')\n\n # generator expression\n gen = (datetime(2000, 1, 1) + timedelta(i) for i in range(10))\n result = DatetimeIndex(gen)\n expected = DatetimeIndex([datetime(2000, 1, 1) + timedelta(i)\n for i in range(10)])\n tm.assert_index_equal(result, expected)\n\n # NumPy string array\n strings = np.array(['2000-01-01', '2000-01-02', '2000-01-03'])\n result = DatetimeIndex(strings)\n expected = DatetimeIndex(strings.astype('O'))\n tm.assert_index_equal(result, expected)\n\n from_ints = DatetimeIndex(expected.asi8)\n tm.assert_index_equal(from_ints, expected)\n\n # string with NaT\n strings = np.array(['2000-01-01', '2000-01-02', 'NaT'])\n result = DatetimeIndex(strings)\n expected = DatetimeIndex(strings.astype('O'))\n tm.assert_index_equal(result, expected)\n\n from_ints = DatetimeIndex(expected.asi8)\n tm.assert_index_equal(from_ints, expected)\n\n # non-conforming\n pytest.raises(ValueError, DatetimeIndex,\n ['2000-01-01', '2000-01-02', '2000-01-04'], freq='D')\n\n pytest.raises(ValueError, DatetimeIndex, start='2011-01-01',\n freq='b')\n pytest.raises(ValueError, DatetimeIndex, end='2011-01-01',\n freq='B')\n pytest.raises(ValueError, DatetimeIndex, periods=10, freq='D')\n\n @pytest.mark.parametrize('freq', ['AS', 'W-SUN'])\n def test_constructor_datetime64_tzformat(self, freq):\n # see GH#6572: ISO 8601 format results in pytz.FixedOffset\n idx = date_range('2013-01-01T00:00:00-05:00',\n '2016-01-01T23:59:59-05:00', freq=freq)\n expected = date_range('2013-01-01T00:00:00', '2016-01-01T23:59:59',\n freq=freq, tz=pytz.FixedOffset(-300))\n tm.assert_index_equal(idx, expected)\n # Unable to use `US/Eastern` because of DST\n expected_i8 = date_range('2013-01-01T00:00:00',\n '2016-01-01T23:59:59', freq=freq,\n tz='America/Lima')\n tm.assert_numpy_array_equal(idx.asi8, expected_i8.asi8)\n\n idx = date_range('2013-01-01T00:00:00+09:00',\n '2016-01-01T23:59:59+09:00', freq=freq)\n expected = date_range('2013-01-01T00:00:00', '2016-01-01T23:59:59',\n freq=freq, tz=pytz.FixedOffset(540))\n tm.assert_index_equal(idx, expected)\n expected_i8 = date_range('2013-01-01T00:00:00',\n '2016-01-01T23:59:59', freq=freq,\n tz='Asia/Tokyo')\n tm.assert_numpy_array_equal(idx.asi8, expected_i8.asi8)\n\n # Non ISO 8601 format results in dateutil.tz.tzoffset\n idx = date_range('2013/1/1 0:00:00-5:00', '2016/1/1 23:59:59-5:00',\n freq=freq)\n expected = date_range('2013-01-01T00:00:00', '2016-01-01T23:59:59',\n freq=freq, tz=pytz.FixedOffset(-300))\n tm.assert_index_equal(idx, expected)\n # Unable to use `US/Eastern` because of DST\n expected_i8 = date_range('2013-01-01T00:00:00',\n '2016-01-01T23:59:59', freq=freq,\n tz='America/Lima')\n tm.assert_numpy_array_equal(idx.asi8, expected_i8.asi8)\n\n idx = date_range('2013/1/1 0:00:00+9:00',\n '2016/1/1 23:59:59+09:00', freq=freq)\n expected = date_range('2013-01-01T00:00:00', '2016-01-01T23:59:59',\n freq=freq, tz=pytz.FixedOffset(540))\n tm.assert_index_equal(idx, expected)\n expected_i8 = date_range('2013-01-01T00:00:00',\n '2016-01-01T23:59:59', freq=freq,\n tz='Asia/Tokyo')\n tm.assert_numpy_array_equal(idx.asi8, expected_i8.asi8)\n\n def test_constructor_dtype(self):\n\n # passing a dtype with a tz should localize\n idx = DatetimeIndex(['2013-01-01', '2013-01-02'],\n dtype='datetime64[ns, US/Eastern]')\n expected = DatetimeIndex(['2013-01-01', '2013-01-02']\n ).tz_localize('US/Eastern')\n tm.assert_index_equal(idx, expected)\n\n idx = DatetimeIndex(['2013-01-01', '2013-01-02'],\n tz='US/Eastern')\n tm.assert_index_equal(idx, expected)\n\n # if we already have a tz and its not the same, then raise\n idx = DatetimeIndex(['2013-01-01', '2013-01-02'],\n dtype='datetime64[ns, US/Eastern]')\n\n pytest.raises(ValueError,\n lambda: DatetimeIndex(idx,\n dtype='datetime64[ns]'))\n\n # this is effectively trying to convert tz's\n pytest.raises(TypeError,\n lambda: DatetimeIndex(idx,\n dtype='datetime64[ns, CET]'))\n pytest.raises(ValueError,\n lambda: DatetimeIndex(\n idx, tz='CET',\n dtype='datetime64[ns, US/Eastern]'))\n result = DatetimeIndex(idx, dtype='datetime64[ns, US/Eastern]')\n tm.assert_index_equal(idx, result)\n\n def test_constructor_name(self):\n idx = DatetimeIndex(start='2000-01-01', periods=1, freq='A',\n name='TEST')\n assert idx.name == 'TEST'\n\n def test_000constructor_resolution(self):\n # 2252\n t1 = Timestamp((1352934390 * 1000000000) + 1000000 + 1000 + 1)\n idx = DatetimeIndex([t1])\n\n assert idx.nanosecond[0] == t1.nanosecond\n\n def test_disallow_setting_tz(self):\n # GH 3746\n dti = DatetimeIndex(['2010'], tz='UTC')\n with pytest.raises(AttributeError):\n dti.tz = pytz.timezone('US/Pacific')\n\n @pytest.mark.parametrize('tz', [\n None, 'America/Los_Angeles', pytz.timezone('America/Los_Angeles'),\n Timestamp('2000', tz='America/Los_Angeles').tz])\n def test_constructor_start_end_with_tz(self, tz):\n # GH 18595\n start = Timestamp('2013-01-01 06:00:00', tz='America/Los_Angeles')\n end = Timestamp('2013-01-02 06:00:00', tz='America/Los_Angeles')\n result = DatetimeIndex(freq='D', start=start, end=end, tz=tz)\n expected = DatetimeIndex(['2013-01-01 06:00:00',\n '2013-01-02 06:00:00'],\n tz='America/Los_Angeles')\n tm.assert_index_equal(result, expected)\n # Especially assert that the timezone is consistent for pytz\n assert pytz.timezone('America/Los_Angeles') is result.tz\n\n @pytest.mark.parametrize('tz', ['US/Pacific', 'US/Eastern', 'Asia/Tokyo'])\n def test_constructor_with_non_normalized_pytz(self, tz):\n # GH 18595\n non_norm_tz = Timestamp('2010', tz=tz).tz\n result = DatetimeIndex(['2010'], tz=non_norm_tz)\n assert pytz.timezone(tz) is result.tz\n\n def test_constructor_timestamp_near_dst(self):\n # GH 20854\n ts = [Timestamp('2016-10-30 03:00:00+0300', tz='Europe/Helsinki'),\n Timestamp('2016-10-30 03:00:00+0200', tz='Europe/Helsinki')]\n result = DatetimeIndex(ts)\n expected = DatetimeIndex([ts[0].to_pydatetime(),\n ts[1].to_pydatetime()])\n tm.assert_index_equal(result, expected)\n\n @pytest.mark.parametrize('klass', [Index, DatetimeIndex])\n @pytest.mark.parametrize('box', [\n np.array, partial(np.array, dtype=object), list])\n @pytest.mark.parametrize('tz, dtype', [\n ['US/Pacific', 'datetime64[ns, US/Pacific]'],\n [None, 'datetime64[ns]']])\n def test_constructor_with_int_tz(self, klass, box, tz, dtype):\n # GH 20997, 20964\n ts = Timestamp('2018-01-01', tz=tz)\n result = klass(box([ts.value]), dtype=dtype)\n expected = klass([ts])\n assert result == expected\n\n\nclass TestTimeSeries(object):\n\n def test_dti_constructor_preserve_dti_freq(self):\n rng = date_range('1/1/2000', '1/2/2000', freq='5min')\n\n rng2 = DatetimeIndex(rng)\n assert rng.freq == rng2.freq\n\n def test_dti_constructor_years_only(self, tz_naive_fixture):\n tz = tz_naive_fixture\n # GH 6961\n rng1 = date_range('2014', '2015', freq='M', tz=tz)\n expected1 = date_range('2014-01-31', '2014-12-31', freq='M', tz=tz)\n\n rng2 = date_range('2014', '2015', freq='MS', tz=tz)\n expected2 = date_range('2014-01-01', '2015-01-01', freq='MS', tz=tz)\n\n rng3 = date_range('2014', '2020', freq='A', tz=tz)\n expected3 = date_range('2014-12-31', '2019-12-31', freq='A', tz=tz)\n\n rng4 = date_range('2014', '2020', freq='AS', tz=tz)\n expected4 = date_range('2014-01-01', '2020-01-01', freq='AS', tz=tz)\n\n for rng, expected in [(rng1, expected1), (rng2, expected2),\n (rng3, expected3), (rng4, expected4)]:\n tm.assert_index_equal(rng, expected)\n\n @pytest.mark.parametrize('dtype', [np.int64, np.int32, np.int16, np.int8])\n def test_dti_constructor_small_int(self, dtype):\n # GH 13721\n exp = DatetimeIndex(['1970-01-01 00:00:00.00000000',\n '1970-01-01 00:00:00.00000001',\n '1970-01-01 00:00:00.00000002'])\n\n arr = np.array([0, 10, 20], dtype=dtype)\n tm.assert_index_equal(DatetimeIndex(arr), exp)\n\n def test_ctor_str_intraday(self):\n rng = DatetimeIndex(['1-1-2000 00:00:01'])\n assert rng[0].second == 1\n\n def test_is_(self):\n dti = DatetimeIndex(start='1/1/2005', end='12/1/2005', freq='M')\n assert dti.is_(dti)\n assert dti.is_(dti.view())\n assert not dti.is_(dti.copy())\n\n def test_index_cast_datetime64_other_units(self):\n arr = np.arange(0, 100, 10, dtype=np.int64).view('M8[D]')\n idx = Index(arr)\n\n assert (idx.values == conversion.ensure_datetime64ns(arr)).all()\n\n def test_constructor_int64_nocopy(self):\n # GH#1624\n arr = np.arange(1000, dtype=np.int64)\n index = DatetimeIndex(arr)\n\n arr[50:100] = -1\n assert (index.asi8[50:100] == -1).all()\n\n arr = np.arange(1000, dtype=np.int64)\n index = DatetimeIndex(arr, copy=True)\n\n arr[50:100] = -1\n assert (index.asi8[50:100] != -1).all()\n\n @pytest.mark.parametrize('freq', ['M', 'Q', 'A', 'D', 'B', 'BH',\n 'T', 'S', 'L', 'U', 'H', 'N', 'C'])\n def test_from_freq_recreate_from_data(self, freq):\n org = DatetimeIndex(start='2001/02/01 09:00', freq=freq, periods=1)\n idx = DatetimeIndex(org, freq=freq)\n tm.assert_index_equal(idx, org)\n\n org = DatetimeIndex(start='2001/02/01 09:00', freq=freq,\n tz='US/Pacific', periods=1)\n idx = DatetimeIndex(org, freq=freq, tz='US/Pacific')\n tm.assert_index_equal(idx, org)\n\n def test_datetimeindex_constructor_misc(self):\n arr = ['1/1/2005', '1/2/2005', 'Jn 3, 2005', '2005-01-04']\n pytest.raises(Exception, DatetimeIndex, arr)\n\n arr = ['1/1/2005', '1/2/2005', '1/3/2005', '2005-01-04']\n idx1 = DatetimeIndex(arr)\n\n arr = [datetime(2005, 1, 1), '1/2/2005', '1/3/2005', '2005-01-04']\n idx2 = DatetimeIndex(arr)\n\n arr = [Timestamp(datetime(2005, 1, 1)), '1/2/2005', '1/3/2005',\n '2005-01-04']\n idx3 = DatetimeIndex(arr)\n\n arr = np.array(['1/1/2005', '1/2/2005', '1/3/2005',\n '2005-01-04'], dtype='O')\n idx4 = DatetimeIndex(arr)\n\n arr = to_datetime(['1/1/2005', '1/2/2005', '1/3/2005', '2005-01-04'])\n idx5 = DatetimeIndex(arr)\n\n arr = to_datetime(['1/1/2005', '1/2/2005', 'Jan 3, 2005', '2005-01-04'\n ])\n idx6 = DatetimeIndex(arr)\n\n idx7 = DatetimeIndex(['12/05/2007', '25/01/2008'], dayfirst=True)\n idx8 = DatetimeIndex(['2007/05/12', '2008/01/25'], dayfirst=False,\n yearfirst=True)\n tm.assert_index_equal(idx7, idx8)\n\n for other in [idx2, idx3, idx4, idx5, idx6]:\n assert (idx1.values == other.values).all()\n\n sdate = datetime(1999, 12, 25)\n edate = datetime(2000, 1, 1)\n idx = DatetimeIndex(start=sdate, freq='1B', periods=20)\n assert len(idx) == 20\n assert idx[0] == sdate + 0 * offsets.BDay()\n assert idx.freq == 'B'\n\n idx = DatetimeIndex(end=edate, freq=('D', 5), periods=20)\n assert len(idx) == 20\n assert idx[-1] == edate\n assert idx.freq == '5D'\n\n idx1 = DatetimeIndex(start=sdate, end=edate, freq='W-SUN')\n idx2 = DatetimeIndex(start=sdate, end=edate,\n freq=offsets.Week(weekday=6))\n assert len(idx1) == len(idx2)\n assert idx1.freq == idx2.freq\n\n idx1 = DatetimeIndex(start=sdate, end=edate, freq='QS')\n idx2 = DatetimeIndex(start=sdate, end=edate,\n freq=offsets.QuarterBegin(startingMonth=1))\n assert len(idx1) == len(idx2)\n assert idx1.freq == idx2.freq\n\n idx1 = DatetimeIndex(start=sdate, end=edate, freq='BQ')\n idx2 = DatetimeIndex(start=sdate, end=edate,\n freq=offsets.BQuarterEnd(startingMonth=12))\n assert len(idx1) == len(idx2)\n assert idx1.freq == idx2.freq\n"
] | [
[
"pandas.util.testing.assert_numpy_array_equal",
"pandas.to_datetime",
"pandas.Timestamp",
"pandas.offsets.BQuarterEnd",
"numpy.arange",
"pandas.util.testing.assert_raises_regex",
"pandas.offsets.Week",
"pandas.Index",
"pandas.DatetimeIndex",
"pandas.tseries.frequencies.BDay",
"pandas.offsets.BDay",
"pandas.util.testing.assert_index_equal",
"pandas.date_range",
"pandas._libs.tslibs.conversion.ensure_datetime64ns",
"numpy.array",
"pandas.offsets.QuarterBegin",
"pandas.datetime"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Bill-Software-Engineer/trading-technical-indicators | [
"f7d3478c054595106afe34f08483218afe9c4ba1",
"fc00008a41da54f160609343e866c72306f4962c",
"f7d3478c054595106afe34f08483218afe9c4ba1",
"f7d3478c054595106afe34f08483218afe9c4ba1",
"f7d3478c054595106afe34f08483218afe9c4ba1"
] | [
"tests/test_indicator_market_facilitation_index.py",
"examples/ml/example_machine_learning_dt.py",
"tests/test_indicator_relative_volatility_index.py",
"tests/test_indicator_ultimate_oscillator.py",
"tti/indicators/_on_balance_volume.py"
] | [
"\"\"\"\nTrading-Technical-Indicators (tti) python library\n\nFile name: test_indicator_market_facilitation_index.py\n tti.indicators package, _market_facilitation_index.py module unit tests.\n\"\"\"\n\nimport unittest\nimport tti.indicators\nfrom test_indicators_common import TestIndicatorsCommon\n\nimport pandas as pd\nimport re\n\n\nclass TestMarketFacilitationIndex(unittest.TestCase, TestIndicatorsCommon):\n\n precision = 10\n\n indicator = tti.indicators.MarketFacilitationIndex\n\n ti_data_rows = [0, 1, 2]\n\n df = pd.read_csv('./data/sample_data.csv', parse_dates=True, index_col=0)\n\n indicator_input_arguments = {}\n\n indicator_other_input_arguments = []\n\n indicator_minimum_required_data = 1\n\n mandatory_arguments_missing_cases = []\n\n required_input_data_columns = [\"high\", \"low\", \"volume\"]\n\n arguments_wrong_type = [\n {'input_data': 'No_DataFrame'},\n {'input_data': df, 'fill_missing_values': 'no_boolean'}\n ]\n\n arguments_wrong_value = []\n\n graph_file_name = '_'.join(\n x.lower() for x in re.findall('[A-Z][^A-Z]*', str(\n indicator).split('.')[-1][:-2]))\n\n graph_file_name = './figures/test_' + graph_file_name + '.png'\n\n indicator_test_data_file_name = '_'.join(\n x.lower() for x in re.findall('[A-Z][^A-Z]*', str(\n indicator).split('.')[-1][:-2]))\n\n indicator_test_data_file_name = \\\n './data/test_' + indicator_test_data_file_name + '_on_sample_data.csv'\n\n assertRaises = unittest.TestCase.assertRaises\n assertEqual = unittest.TestCase.assertEqual\n assertIn = unittest.TestCase.assertIn\n subTest = unittest.TestCase.subTest\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"\"\"\"\nTrading-Technical-Indicators (tti) python library\n\nFile name: example_machine_learning_dt.py\n Example code for the trading technical indicators library, Machine Learning\n features (DT model).\n\"\"\"\n\nimport pandas as pd\nfrom tti.ml import MachineLearningDT\n\n# Read data from csv file. Set the index to the correct column (dates column)\ninput_data = pd.read_csv(\n './data/SCMN.SW.csv', parse_dates=True, index_col=0)\n\n# Train a Decision Tree (DT) model\nmodel = MachineLearningDT()\n\nmodel.mlTrainModel(\n input_data=input_data, pool_size=6, verbose=False)\n\n# Get trained model details\nprint('\\nTrained DT model details:', model.mlModelDetails())\n\n# Predict (use 60 last periods to predict next period)\nprint('\\nModel prediction:',\n model.mlPredict(input_data=input_data.iloc[-60:, :]))\n\n# Save model\nmodel.mlSaveModel(file_name='./data/dt_trained_model_SCMN_SW.dmp')\n",
"\"\"\"\nTrading-Technical-Indicators (tti) python library\n\nFile name: test_indicator_relative_volatility_index.py\n tti.indicators package, _relative_volatility_index.py module unit tests.\n\"\"\"\n\nimport unittest\nimport tti.indicators\nfrom test_indicators_common import TestIndicatorsCommon\n\nimport pandas as pd\nimport re\n\n\nclass TestRelativeVolatilityIndex(unittest.TestCase, TestIndicatorsCommon):\n\n indicator = tti.indicators.RelativeVolatilityIndex\n\n ti_data_rows = [0, 1, 2]\n\n df = pd.read_csv('./data/sample_data.csv', parse_dates=True, index_col=0)\n\n indicator_input_arguments = {'period': 5}\n\n indicator_other_input_arguments = [{'period': 1}, {'period': 3159}]\n\n indicator_minimum_required_data = indicator_input_arguments['period'] + 10\n\n mandatory_arguments_missing_cases = []\n\n required_input_data_columns = [\"high\", \"low\"]\n\n arguments_wrong_type = [\n {'input_data': 'No_DataFrame'},\n {'input_data': df, 'period': 'no_numeric'},\n {'input_data': df, 'fill_missing_values': 'no_boolean'}\n ]\n\n arguments_wrong_value = [\n {'input_data': df, 'period': -1},\n {'input_data': df, 'period': 0}\n ]\n\n graph_file_name = '_'.join(\n x.lower() for x in re.findall('[A-Z][^A-Z]*', str(\n indicator).split('.')[-1][:-2]))\n\n graph_file_name = './figures/test_' + graph_file_name + '.png'\n\n indicator_test_data_file_name = '_'.join(\n x.lower() for x in re.findall('[A-Z][^A-Z]*', str(\n indicator).split('.')[-1][:-2]))\n\n indicator_test_data_file_name = \\\n './data/test_' + indicator_test_data_file_name + '_on_sample_data.csv'\n\n assertRaises = unittest.TestCase.assertRaises\n assertEqual = unittest.TestCase.assertEqual\n assertIn = unittest.TestCase.assertIn\n subTest = unittest.TestCase.subTest\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"\"\"\"\nTrading-Technical-Indicators (tti) python library\n\nFile name: test_indicator_ultimate_oscillator.py\n tti.indicators package, _ultimate_oscillator.py module unit tests.\n\"\"\"\n\nimport unittest\nimport tti.indicators\nfrom test_indicators_common import TestIndicatorsCommon\n\nimport pandas as pd\nimport re\n\n\nclass TestUltimateOscillator(unittest.TestCase, TestIndicatorsCommon):\n\n indicator = tti.indicators.UltimateOscillator\n\n ti_data_rows = [0, 57, 58]\n\n df = pd.read_csv('./data/sample_data.csv', parse_dates=True, index_col=0)\n\n indicator_input_arguments = {}\n\n indicator_other_input_arguments = []\n\n indicator_minimum_required_data = 28\n\n mandatory_arguments_missing_cases = []\n\n required_input_data_columns = [\"high\", \"low\", \"close\"]\n\n arguments_wrong_type = [\n {'input_data': 'No_DataFrame'},\n {'input_data': df, 'fill_missing_values': 'no_boolean'}\n ]\n\n arguments_wrong_value = []\n\n graph_file_name = '_'.join(\n x.lower() for x in re.findall('[A-Z][^A-Z]*', str(\n indicator).split('.')[-1][:-2]))\n\n graph_file_name = './figures/test_' + graph_file_name + '.png'\n\n indicator_test_data_file_name = '_'.join(\n x.lower() for x in re.findall('[A-Z][^A-Z]*', str(\n indicator).split('.')[-1][:-2]))\n\n indicator_test_data_file_name = \\\n './data/test_' + indicator_test_data_file_name + '_on_sample_data.csv'\n\n assertRaises = unittest.TestCase.assertRaises\n assertEqual = unittest.TestCase.assertEqual\n assertIn = unittest.TestCase.assertIn\n subTest = unittest.TestCase.subTest\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"\"\"\"\nTrading-Technical-Indicators (tti) python library\n\nFile name: _on_balance_volume.py\n Implements the On Balance Volume technical indicator.\n\"\"\"\n\nimport pandas as pd\n\nfrom ._technical_indicator import TechnicalIndicator\nfrom ..utils.constants import TRADE_SIGNALS\n\n\nclass OnBalanceVolume(TechnicalIndicator):\n \"\"\"\n On Balance Volume Technical Indicator class implementation.\n\n Args:\n input_data (pandas.DataFrame): The input data. Required input columns\n are ``close``, ``volume``. The index is of type\n ``pandas.DatetimeIndex``.\n\n fill_missing_values (bool, default=True): If set to True, missing\n values in the input data are being filled.\n\n Attributes:\n _input_data (pandas.DataFrame): The ``input_data`` after preprocessing.\n\n _ti_data (pandas.DataFrame): The calculated indicator. Index is of type\n ``pandas.DatetimeIndex``. It contains one column, the ``obv``.\n\n _properties (dict): Indicator properties.\n\n _calling_instance (str): The name of the class.\n\n Raises:\n WrongTypeForInputParameter: Input argument has wrong type.\n WrongValueForInputParameter: Unsupported value for input argument.\n NotEnoughInputData: Not enough data for calculating the indicator.\n TypeError: Type error occurred when validating the ``input_data``.\n ValueError: Value error occurred when validating the ``input_data``.\n \"\"\"\n def __init__(self, input_data, fill_missing_values=True):\n\n # Control is passing to the parent class\n super().__init__(calling_instance=self.__class__.__name__,\n input_data=input_data,\n fill_missing_values=fill_missing_values)\n\n def _calculateTi(self):\n \"\"\"\n Calculates the technical indicator for the given input data. The input\n data are taken from an attribute of the parent class.\n\n Returns:\n pandas.DataFrame: The calculated indicator. Index is of type\n ``pandas.DatetimeIndex``. It contains one column, the ``obv``.\n \"\"\"\n\n obv = pd.DataFrame(index=self._input_data.index, columns=['obv'],\n data=0, dtype='int64')\n\n obv['obv'][self._input_data['close'] > self._input_data['close'].\n shift(1)] = self._input_data['volume']\n\n obv['obv'][self._input_data['close'] < self._input_data['close'].\n shift(1)] = -self._input_data['volume']\n\n obv['obv'] = obv['obv'].cumsum()\n\n return obv\n\n def getTiSignal(self):\n \"\"\"\n Calculates and returns the trading signal for the calculated technical\n indicator.\n\n Returns:\n {('hold', 0), ('buy', -1), ('sell', 1)}: The calculated trading\n signal.\n \"\"\"\n\n # Trading signals on warnings for breakout (upward or downward)\n # 3-days period is used for trend calculation\n\n # Not enough data for calculating trading signal\n if len(self._ti_data.index) < 3:\n return TRADE_SIGNALS['hold']\n\n # Warning for a downward breakout\n if self._ti_data['obv'].iat[-3] > self._ti_data['obv'].iat[-2] > \\\n self._ti_data['obv'].iat[-1]:\n return TRADE_SIGNALS['buy']\n\n # Warning for a upward breakout\n elif self._ti_data['obv'].iat[-3] < self._ti_data['obv'].iat[-2] < \\\n self._ti_data['obv'].iat[-1]:\n return TRADE_SIGNALS['sell']\n\n else:\n return TRADE_SIGNALS['hold']\n"
] | [
[
"pandas.read_csv"
],
[
"pandas.read_csv"
],
[
"pandas.read_csv"
],
[
"pandas.read_csv"
],
[
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
red-tie/DRLib | [
"42398bd696b7cedc8f207385f8eac4e264bd9c4e",
"21f0e8e76b2a6571ee0b089c9431838e77d9f523"
] | [
"spinup_utils/plot.py",
"algos/tf1/sac_auto/sac_auto_class.py"
] | [
"import seaborn as sns\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport json\nimport os\nimport os.path as osp\nimport numpy as np\n\nDIV_LINE_WIDTH = 50\n\n# Global vars for tracking and labeling data at load time.\nexp_idx = 0\nunits = dict()\n\n\ndef plot_data(data, xaxis='Epoch', value=\"TestEpRet\",\n condition=\"Condition1\", smooth=1, **kwargs):\n if smooth > 1:\n \"\"\"\n smooth data with moving window average.\n that is,\n smoothed_y[t] = average(y[t-k], y[t-k+1], ..., y[t+k-1], y[t+k])\n where the \"smooth\" param is width of that window (2k+1)\n \"\"\"\n y = np.ones(smooth)\n for datum in data:\n x = np.asarray(datum[value])\n z = np.ones(len(x))\n smoothed_x = np.convolve(x,y,'same') / np.convolve(z,y,'same')\n datum[value] = smoothed_x\n\n if isinstance(data, list):\n data = pd.concat(data, ignore_index=True)\n sns.set(style=\"darkgrid\", font_scale=1.5)\n sns.tsplot(data=data, time=xaxis, value=value, unit=\"Unit\", condition=condition, ci='sd', **kwargs)\n \"\"\"\n If you upgrade to any version of Seaborn greater than 0.8.1, switch from \n tsplot to lineplot replacing L29 with:\n\n sns.lineplot(data=data, x=xaxis, y=value, hue=condition, ci='sd', **kwargs)\n\n Changes the colorscheme and the default legend style, though.\n \"\"\"\n # plt.legend(loc='upper center', ncol=6, handlelength=1,\n # mode=\"expand\", borderaxespad=0., prop={'size': 13})\n # plt.legend(loc='best').set_draggable(True)\n plt.legend(loc='best') # origin\n # plt.legend(bbox_to_anchor=(-0.1, 0.9), loc=0) #added by zll\n \"\"\"\n For the version of the legend used in the Spinning Up benchmarking page, \n swap L38 with:\n\n plt.legend(loc='upper center', ncol=6, handlelength=1,\n mode=\"expand\", borderaxespad=0., prop={'size': 13})\n \"\"\"\n\n xscale = np.max(np.asarray(data[xaxis])) > 5e3\n if xscale:\n # Just some formatting niceness: x-axis scale in scientific notation if max x is large\n plt.ticklabel_format(style='sci', axis='x', scilimits=(0, 0))\n\n plt.tight_layout(pad=0.5)\n\n\ndef get_datasets(logdir, condition=None):\n \"\"\"\n Recursively look through logdir for output files produced by\n spinup.logx.Logger.\n\n Assumes that any file \"progress.txt\" is a valid hit.\n \"\"\"\n global exp_idx\n global units\n datasets = []\n for root, _, files in os.walk(logdir):\n if 'progress.txt' in files:\n exp_name = None\n try:\n config_path = open(os.path.join(root,'config.json'))\n config = json.load(config_path)\n if 'exp_name' in config:\n exp_name = config['exp_name']\n except:\n print('No file named config.json')\n condition1 = condition or exp_name or 'exp'\n condition2 = condition1 + '-' + str(exp_idx)\n exp_idx += 1\n if condition1 not in units:\n units[condition1] = 0\n unit = units[condition1]\n units[condition1] += 1\n\n try:\n exp_data = pd.read_table(os.path.join(root,'progress.txt'))\n except:\n print('Could not read from %s'%os.path.join(root,'progress.txt'))\n continue\n # performance = 'AverageTestEpRet' if 'AverageTestEpRet' in exp_data else 'TestEpRet'\n # performance = 'AverageEpRet' if 'AverageTestEpRet' in exp_data else 'AverageEpRet'\n performance = 'TestSuccess' if 'TestSuccess' in exp_data else 'AverageEpRet'\n exp_data.insert(len(exp_data.columns),'Unit',unit)\n exp_data.insert(len(exp_data.columns),'Condition1',condition1)\n exp_data.insert(len(exp_data.columns),'Condition2',condition2)\n exp_data.insert(len(exp_data.columns),'Performance',exp_data[performance])\n datasets.append(exp_data)\n return datasets\n\n\ndef get_all_datasets(all_logdirs, legend=None, select=None, exclude=None):\n \"\"\"\n For every entry in all_logdirs,\n 1) check if the entry is a real directory and if it is,\n pull data from it;\n\n 2) if not, check to see if the entry is a prefix for a\n real directory, and pull data from that.\n \"\"\"\n logdirs = []\n for logdir in all_logdirs:\n if osp.isdir(logdir) and logdir[-1]==os.sep:\n logdirs += [logdir]\n else:\n basedir = osp.dirname(logdir)\n fulldir = lambda x : osp.join(basedir, x)\n prefix = logdir.split(os.sep)[-1]\n print(\"basedir:\", basedir)\n listdir= os.listdir(basedir)\n logdirs += sorted([fulldir(x) for x in listdir if prefix in x])\n\n \"\"\"\n Enforce selection rules, which check logdirs for certain substrings.\n Makes it easier to look at graphs from particular ablations, if you\n launch many jobs at once with similar names.\n \"\"\"\n if select is not None:\n logdirs = [log for log in logdirs if all(x in log for x in select)]\n if exclude is not None:\n logdirs = [log for log in logdirs if all(not(x in log) for x in exclude)]\n\n # Verify logdirs\n print('Plotting from...\\n' + '='*DIV_LINE_WIDTH + '\\n')\n for logdir in logdirs:\n print(logdir)\n print('\\n' + '='*DIV_LINE_WIDTH)\n\n # Make sure the legend is compatible with the logdirs\n assert not(legend) or (len(legend) == len(logdirs)), \\\n \"Must give a legend title for each set of experiments.\"\n\n # Load data from logdirs\n data = []\n if legend:\n for log, leg in zip(logdirs, legend):\n data += get_datasets(log, leg)\n else:\n for log in logdirs:\n data += get_datasets(log)\n return data\n\n\ndef make_plots(all_logdirs, legend=None,\n xaxis=None, values=None,\n count=False,\n font_scale=1.5, smooth=1,\n select=None, exclude=None,\n estimator='mean'):\n data = get_all_datasets(all_logdirs, legend, select, exclude)\n values = values if isinstance(values, list) else [values]\n condition = 'Condition2' if count else 'Condition1'\n estimator = getattr(np, estimator) # choose what to show on main curve: mean? max? min?\n for value in values:\n plt.figure()\n plot_data(data, xaxis=xaxis, value=value,\n condition=condition, smooth=smooth, estimator=estimator)\n plt.show()\n plt.savefig(all_logdirs[0]+'ep_reward.png')\n\n\ndef main():\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument('logdir', nargs='*')\n parser.add_argument('--legend', '-l', nargs='*')\n parser.add_argument('--xaxis', '-x', default='TotalEnvInteracts')\n parser.add_argument('--value', '-y', default='Performance', nargs='*')\n parser.add_argument('--count', action='store_true')\n # parser.add_argument('--count', default=\"False\")\n parser.add_argument('--smooth', '-s', type=int, default=20)\n parser.add_argument('--select', nargs='*')\n parser.add_argument('--exclude', nargs='*')\n parser.add_argument('--est', default='mean')\n args = parser.parse_args()\n \"\"\"\n\n Args: \n logdir (strings): As many log directories (or prefixes to log \n directories, which the plotter will autocomplete internally) as \n you'd like to plot from.\n\n legend (strings): Optional way to specify legend for the plot. The \n plotter legend will automatically use the ``exp_name`` from the\n config.json file, unless you tell it otherwise through this flag.\n This only works if you provide a name for each directory that\n will get plotted. (Note: this may not be the same as the number\n of logdir args you provide! Recall that the plotter looks for\n autocompletes of the logdir args: there may be more than one \n match for a given logdir prefix, and you will need to provide a \n legend string for each one of those matches---unless you have \n removed some of them as candidates via selection or exclusion \n rules (below).)\n\n xaxis (string): Pick what column from data is used for the x-axis.\n Defaults to ``TotalEnvInteracts``.\n\n value (strings): Pick what columns from data to graph on the y-axis. \n Submitting multiple values will produce multiple graphs. Defaults\n to ``Performance``, which is not an actual output of any algorithm.\n Instead, ``Performance`` refers to either ``AverageEpRet``, the \n correct performance measure for the on-policy algorithms, or\n ``AverageTestEpRet``, the correct performance measure for the \n off-policy algorithms. The plotter will automatically figure out \n which of ``AverageEpRet`` or ``AverageTestEpRet`` to report for \n each separate logdir.\n\n count: Optional flag. By default, the plotter shows y-values which\n are averaged across all results that share an ``exp_name``, \n which is typically a set of identical experiments that only vary\n in random seed. But if you'd like to see all of those curves \n separately, use the ``--count`` flag.\n\n smooth (int): Smooth data by averaging it over a fixed window. This \n parameter says how wide the averaging window will be.\n\n select (strings): Optional selection rule: the plotter will only show\n curves from logdirs that contain all of these substrings.\n\n exclude (strings): Optional exclusion rule: plotter will only show \n curves from logdirs that do not contain these substrings.\n\n \"\"\"\n\n make_plots(args.logdir, args.legend, args.xaxis, args.value, args.count,\n smooth=args.smooth, select=args.select, exclude=args.exclude,\n estimator=args.est)\n\n\nif __name__ == \"__main__\":\n main()\n",
"import numpy as np\nimport tensorflow as tf\nimport gym\nimport os\nimport time\nimport sys\n\nsys.path.append(\"../\")\ntry:\n from rl_algorithms.sac_auto import core\n from rl_algorithms.sac_auto.core import get_vars\nexcept:\n from sac_auto import core\n from sac_auto.core import get_vars\n\n\nclass ReplayBuffer:\n \"\"\"\n A simple FIFO experience replay buffer for TD3 agents.\n \"\"\"\n\n def __init__(self, obs_dim, act_dim, size):\n self.obs1_buf = np.zeros([size, obs_dim], dtype=np.float32)\n self.obs2_buf = np.zeros([size, obs_dim], dtype=np.float32)\n self.acts_buf = np.zeros([size, act_dim], dtype=np.float32)\n self.rews_buf = np.zeros(size, dtype=np.float32)\n self.done_buf = np.zeros(size, dtype=np.float32)\n self.ptr, self.size, self.max_size = 0, 0, size\n\n def store(self, obs, act, rew, next_obs, done):\n self.obs1_buf[self.ptr] = obs\n self.obs2_buf[self.ptr] = next_obs\n self.acts_buf[self.ptr] = act\n self.rews_buf[self.ptr] = rew\n self.done_buf[self.ptr] = done\n self.ptr = (self.ptr + 1) % self.max_size\n self.size = min(self.size + 1, self.max_size)\n\n def sample_batch(self, batch_size=32):\n idxs = np.random.randint(0, self.size, size=batch_size)\n return dict(obs1=self.obs1_buf[idxs],\n obs2=self.obs2_buf[idxs],\n acts=self.acts_buf[idxs],\n rews=self.rews_buf[idxs],\n done=self.done_buf[idxs])\n\n\nclass SAC:\n def __init__(self,\n a_dim, obs_dim, a_bound,\n mlp_actor_critic=core.mlp_actor_critic,\n ac_kwargs=dict(), seed=0,\n replay_size=int(1e6), gamma=0.99,\n polyak=0.995, alpha=\"auto\",\n # pi_lr=1e-4, q_lr=1e-4,\n # batch_size=100,\n # act_noise=0.1, target_noise=0.2, noise_clip=0.5, \n # policy_delay=2,\n sess_opt=0.1,\n ):\n\n self.learn_step = 0\n\n self.obs_dim = obs_dim\n self.act_dim = a_dim\n self.act_limit = a_bound\n self.policy_delay = policy_delay\n # self.action_noise = act_noise\n\n # Share information about action space with policy architecture\n ac_kwargs['action_space'] = a_bound\n\n # Inputs to computation graph\n self.x_ph, self.a_ph, self.x2_ph, self.r_ph, self.d_ph = core.placeholders(obs_dim, a_dim, obs_dim, None, None)\n self.actor_lr = tf.placeholder(tf.float32, shape=[], name='actor_lr')\n self.critic_lr = tf.placeholder(tf.float32, shape=[], name='critic_lr')\n\n # Main outputs from computation graph\n with tf.variable_scope('main'):\n self.mu, self.pi, logp_pi, logp_pi2, q1, q2, q1_pi, q2_pi, = mlp_actor_critic(self.x_ph,\n self.x2_ph,\n self.a_ph,\n **ac_kwargs)\n\n # Target value network\n with tf.variable_scope('target'):\n _, _, logp_pi_, _, _, _, q1_pi_, q2_pi_ = mlp_actor_critic(self.x2_ph,\n self.x2_ph,\n self.a_ph,\n **ac_kwargs)\n\n # Experience buffer\n self.replay_buffer = ReplayBuffer(obs_dim=obs_dim,\n act_dim=self.act_dim,\n size=replay_size)\n\n # Count variables\n var_counts = tuple(core.count_vars(scope) for scope in\n ['main/pi', 'main/q1', 'main/q2', 'main/v', 'main'])\n print(('\\nNumber of parameters: \\t pi: %d, \\t' + \\\n 'q1: %d, \\t q2: %d, \\t v: %d, \\t total: %d\\n') % var_counts)\n # 重新修改下面这段!\n target_entropy = (-np.prod(a_dim))\n\n log_alpha = tf.get_variable('log_alpha', dtype=tf.float32, initializer=0.0)\n alpha = tf.exp(log_alpha)\n\n alpha_loss = tf.reduce_mean(-log_alpha * tf.stop_gradient(logp_pi + target_entropy))\n\n alpha_optimizer = tf.train.AdamOptimizer(learning_rate=1e-4,\n name='alpha_optimizer')\n train_alpha_op = alpha_optimizer.minimize(loss=alpha_loss, var_list=[log_alpha])\n\n # Min Double-Q:\n min_q_pi = tf.minimum(q1_pi_, q2_pi_)\n\n # Targets for Q and V regression\n v_backup = tf.stop_gradient(min_q_pi - alpha * logp_pi2)\n q_backup = self.r_ph + gamma * (1 - self.d_ph) * v_backup\n\n # Soft actor-critic losses\n pi_loss = tf.reduce_mean(alpha * logp_pi - q1_pi)\n q1_loss = 0.5 * tf.reduce_mean((q_backup - q1) ** 2)\n q2_loss = 0.5 * tf.reduce_mean((q_backup - q2) ** 2)\n value_loss = q1_loss + q2_loss\n\n # Policy train op\n # (has to be separate from value train op, because q1_pi appears in pi_loss)\n pi_optimizer = tf.train.AdamOptimizer(learning_rate=self.actor_lr)\n train_pi_op = pi_optimizer.minimize(pi_loss, var_list=get_vars('main/pi'))\n\n # Value train op\n # (control dep of train_pi_op because sess.run otherwise evaluates in nondeterministic order)\n value_optimizer = tf.train.AdamOptimizer(learning_rate=self.critic_lr)\n value_params = get_vars('main/q')\n with tf.control_dependencies([train_pi_op]):\n train_value_op = value_optimizer.minimize(value_loss,\n var_list=value_params)\n\n # Polyak averaging for target variables\n # (control flow because sess.run otherwise evaluates in nondeterministic order)\n with tf.control_dependencies([train_value_op]):\n target_update = tf.group([tf.assign(v_targ, polyak * v_targ + (1 - polyak) * v_main)\n for v_main, v_targ in zip(get_vars('main'), get_vars('target'))])\n\n # All ops to call during one training step\n self.step_ops = [pi_loss,\n q1_loss, q2_loss,\n q1, q2,\n logp_pi, alpha,\n train_pi_op,\n train_value_op,\n target_update,\n train_alpha_op]\n\n # Initializing targets to match main variables\n target_init = tf.group([tf.assign(v_targ, v_main)\n for v_main, v_targ in zip(get_vars('main'), get_vars('target'))])\n\n if sess_opt:\n gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=sess_opt)\n self.sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))\n else:\n self.sess = tf.Session()\n self.sess.run(tf.global_variables_initializer())\n self.sess.run(target_init)\n\n def get_action(self, s, noise_scale=0):\n if not noise_scale:\n act_op = self.mu\n else:\n act_op = self.pi\n a = self.sess.run(act_op,\n feed_dict={self.x_ph: s.reshape(1, -1)})[0]\n return np.clip(a, -self.act_limit, self.act_limit)\n\n def store_transition(self, transition):\n (s, a, r, s_, done) = transition\n self.replay_buffer.store(s, a, r, s_, done)\n\n def test_agent(self, env, max_ep_len=200, n=5, logger=None):\n ep_reward_list = []\n for j in range(n):\n s = env.reset()\n ep_reward = 0\n for i in range(max_ep_len):\n # Take deterministic actions at test time (noise_scale=0) \n a = self.get_action(s)\n s, r, d, _ = env.step(a) \n ep_reward += r\n ep_reward_list.append(ep_reward)\n mean_ep_reward = np.mean(np.array(ep_reward_list))\n if logger:\n logger.store(TestEpRet=mean_ep_reward)\n if logger:\n return mean_ep_reward, logger\n else:\n return mean_ep_reward\n\n def learn(self, batch_size=100,\n actor_lr_input=0.001,\n critic_lr_input=0.001,\n ):\n\n batch = self.replay_buffer.sample_batch(batch_size)\n feed_dict = {self.x_ph: batch['obs1'],\n self.x2_ph: batch['obs2'],\n self.a_ph: batch['acts'],\n self.r_ph: batch['rews'],\n self.d_ph: batch['done'],\n self.actor_lr: actor_lr_input,\n self.critic_lr: critic_lr_input,\n }\n outs = self.sess.run(self.step_ops,\n feed_dict)\n self.learn_step += 1\n return outs\n\n def load_step_network(self, saver, load_path):\n checkpoint = tf.train.get_checkpoint_state(load_path)\n if checkpoint and checkpoint.model_checkpoint_path:\n saver.restore(self.sess, tf.train.latest_checkpoint(load_path))\n print(\"Successfully loaded:\", checkpoint.model_checkpoint_path)\n self.learn_step = int(checkpoint.model_checkpoint_path.split('-')[-1])\n else:\n print(\"Could not find old network weights\")\n\n def save_step_network(self, time_step, saver, save_path):\n saver.save(self.sess, save_path + 'network', global_step=time_step,\n write_meta_graph=False)\n\n def load_simple_network(self, path):\n saver = tf.train.Saver()\n saver.restore(self.sess, tf.train.latest_checkpoint(path))\n print(\"restore model successful\")\n\n def save_simple_network(self, save_path):\n saver = tf.train.Saver()\n saver.save(self.sess, save_path=save_path + \"/params\", write_meta_graph=False)\n\n\nif __name__ == '__main__':\n import argparse\n\n random_seed = int(time.time() * 1000 % 1000)\n random_seed = 184\n parser = argparse.ArgumentParser()\n parser.add_argument('--env', type=str, default='HalfCheetah-v2')\n parser.add_argument('--hid', type=int, default=300)\n parser.add_argument('--l', type=int, default=1)\n parser.add_argument('--gamma', type=float, default=0.99)\n parser.add_argument('--seed', '-s', type=int, default=random_seed)\n parser.add_argument('--epochs', type=int, default=3000)\n parser.add_argument('--max_steps', type=int, default=1000)\n parser.add_argument('--exp_name', type=str, default='sac_auto_class')\n args = parser.parse_args()\n\n env = gym.make(args.env)\n env = env.unwrapped\n env.seed(args.seed)\n\n s_dim = env.observation_space.shape[0]\n a_dim = env.action_space.shape[0]\n a_bound = env.action_space.high[0]\n\n net = SAC(a_dim, s_dim, a_bound,\n # batch_size=100,\n sess_opt=0.1\n )\n ep_reward_list = []\n test_ep_reward_list = []\n\n for i in range(args.epochs):\n s = env.reset()\n ep_reward = 0\n st = time.time()\n for j in range(args.max_steps):\n\n # Add exploration noise\n if i < 10:\n a = np.random.rand(a_dim) * a_bound\n else:\n a = net.get_action(s, 0.1)\n\n a = np.clip(a, -a_bound, a_bound)\n\n s_, r, done, info = env.step(a)\n done = False if j == args.max_steps - 1 else done\n\n net.store_transition((s, a, r, s_, done))\n\n s = s_\n ep_reward += r\n if j == args.max_steps - 1:\n ep_update_time = time.time()\n for _ in range(args.max_steps):\n net.learn()\n ep_update_time = time.time() - ep_update_time\n ep_reward_list.append(ep_reward)\n print('Episode:', i, ' Reward: %i' % int(ep_reward),\n # 'Explore: %.2f' % var,\n \"learn step:\", net.learn_step,\n \"ep_time:\", np.round(time.time()-st, 3),\n \"up_time:\", np.round(ep_update_time, 3),\n )\n # if ep_reward > -300:RENDER = True\n\n # 增加测试部分!\n if i % 20 == 0:\n test_ep_reward = net.test_agent(env=env, n=5)\n test_ep_reward_list.append(test_ep_reward)\n print(\"-\" * 20)\n print('Episode:', i, ' Reward: %i' % int(ep_reward),\n 'Test Reward: %i' % int(test_ep_reward),\n )\n print(\"-\" * 20)\n\n break\n\n import matplotlib.pyplot as plt\n\n plt.plot(ep_reward_list)\n img_name = str(args.exp_name + \"_\" + args.env + \"_epochs\" +\n str(args.epochs) +\n \"_seed\" + str(args.seed))\n plt.title(img_name + \"_train\")\n plt.savefig(img_name + \".png\")\n plt.show()\n plt.close()\n\n plt.plot(test_ep_reward_list)\n plt.title(img_name + \"_test\")\n plt.savefig(img_name + \".png\")\n plt.show()"
] | [
[
"numpy.convolve",
"matplotlib.pyplot.legend",
"pandas.concat",
"matplotlib.pyplot.tight_layout",
"numpy.asarray",
"matplotlib.pyplot.savefig",
"numpy.ones",
"matplotlib.pyplot.ticklabel_format",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
],
[
"tensorflow.get_variable",
"tensorflow.control_dependencies",
"tensorflow.minimum",
"matplotlib.pyplot.plot",
"numpy.round",
"tensorflow.GPUOptions",
"tensorflow.train.AdamOptimizer",
"numpy.random.randint",
"numpy.clip",
"tensorflow.stop_gradient",
"tensorflow.ConfigProto",
"matplotlib.pyplot.close",
"tensorflow.Session",
"tensorflow.train.Saver",
"numpy.zeros",
"matplotlib.pyplot.title",
"tensorflow.placeholder",
"matplotlib.pyplot.savefig",
"tensorflow.exp",
"tensorflow.global_variables_initializer",
"numpy.random.rand",
"matplotlib.pyplot.show",
"numpy.array",
"tensorflow.train.get_checkpoint_state",
"tensorflow.train.latest_checkpoint",
"tensorflow.reduce_mean",
"tensorflow.assign",
"numpy.prod",
"tensorflow.variable_scope"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
achilleas-k/brian-scripts | [
"4d2d8c9a53e7202b60c78716e8b1a9d521293c54"
] | [
"python_matlab_comparison/st_distance.py"
] | [
"from __future__ import print_function\nimport os\nimport sys\nimport subprocess as subp\nimport numpy as np\nimport scipy.io as scio\nimport spike_distance as sd\nimport spike_distance_mp as sdm\nimport metrics\n\n\n\ndef octave_spkd(st_one, st_two, cost):\n \"\"\"\n Creates an octave `m` file to run the spike train distance script, then\n saves the two spike trains to disk in order to be loaded into the octave\n function script and run the comparison. The result is again saved, loaded\n and returned by this function.\n \"\"\"\n octave_filename = \"run_spkd.m\"\n octave_code = (\n \"% Automatically generated script. Code exists in st_distance.py.\\n\"\n \"% There is absolutely no reason this code could not just exist here\\n\"\n \"% to begin with.\"\n \"\\n\"\n \"output_precision(10)\\n\"\n \"load(\\\"tmp_spiketrains.mat\\\")\\n\"\n \"printf(\\\"OCTAVE: Loaded tmp_spiketrains.mat\\\\n\\\");\"\n \"d=spkd(one, two, cost);\\n\"\n \"save(\\\"-mat\\\", \\\"tmp_distfile.mat\\\", \\\"d\\\")\\n \"\n \"printf(\\\"OCTAVE: Saved tmp_distfile.mat\\\\n\\\");\"\n \"\\n\"\n )\n octave_script = open(octave_filename, \"w\")\n octave_script.write(octave_code)\n octave_script.close()\n print(\"Octave function created (%s) ...\" % (octave_filename))\n data = {\"one\": st_one, \"two\": st_two, \"cost\": cost}\n scio.savemat(\"tmp_spiketrains.mat\", data)\n print(\"Data saved to tmp_spiketrains.mat ...\")\n _null = open(os.devnull, \"w\")\n subp.call([\"octave\", \"run_spkd.m\"], stdout=_null)#, stderr=_null)\n print(\"Octave subprocess finished!\")\n oct_dist = scio.loadmat(\"tmp_distfile.mat\")[\"d\"]\n try:\n oct_dist = float(oct_dist)\n except ValueError:\n print(\"Something went wrong with the output of the process.\\n\"\n \"The result was: %s\" % (oct_dist), file=sys.stderr)\n return oct_dist\n\n\nif __name__==\"__main__\":\n # generate two spike trains\n print(\"Generating random spike trains ...\")\n len_one = np.random.randint(100)\n len_two = np.random.randint(100)\n st_one = np.cumsum(np.random.random(len_one))\n st_two = np.cumsum(np.random.random(len_two))\n cost = float(np.random.randint(100))\n\n print(\"Running python script(s) ...\")\n dist_sd = sd.stdistance(st_one, st_two, cost)\n dist_sdm = sdm.stdistance(st_one, st_two, cost)\n dist_m = metrics.vp_st_distance(st_one, st_two, cost)\n\n print(\"Doing the octave ...\")\n dist_oct = octave_spkd(st_one, st_two, cost)\n\n print(\"The results were as follows:\")\n print(\"m\\t\\tsd\\t\\tsdm\\t\\toctave\")\n print(\"%0.10f\\t%0.10f\\t%0.10f\\t%0.10f\" % (dist_m, dist_sd, dist_sdm, dist_oct))\n print(\"-\"*10)\n print(\"|m - oct| = %f\" % (np.abs(dist_m-dist_oct)))\n print(\"|sd - sdm| = %f\" % (np.abs(dist_sd-dist_sdm)))\n print(\"|sd - oct| = %f\" % (np.abs(dist_sd-dist_oct)))\n print(\"|sdm - oct| = %f\" % (np.abs(dist_sdm-dist_oct)))\n\n\n"
] | [
[
"numpy.random.random",
"numpy.abs",
"scipy.io.loadmat",
"scipy.io.savemat",
"numpy.random.randint"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
dheeraj7596/MixText-LongTail | [
"929bee28bd4536cb475efff0dffad7b3cb2d2ac1"
] | [
"code/train.py"
] | [
"import argparse\nimport os\nimport random\nimport math\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.utils.data as Data\nfrom transformers_dir import *\nfrom torch.autograd import Variable\nfrom torch.utils.data import Dataset, WeightedRandomSampler\n\nfrom read_data import *\nfrom mixtext import MixText\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nimport gc\nfrom scipy.special import softmax\nfrom sklearn.metrics import f1_score,classification_report\n\nimport pickle\n\n\n\nparser = argparse.ArgumentParser(description='PyTorch MixText')\n\nparser.add_argument('--epochs', default=50, type=int, metavar='N',\n help='number of total epochs to run')\nparser.add_argument('--batch-size', default=4, type=int, metavar='N',\n help='train batchsize')\nparser.add_argument('--batch-size-u', default=24, type=int, metavar='N',\n help='train batchsize')\n\nparser.add_argument('--lrmain', '--learning-rate-bert', default=0.00001, type=float,\n metavar='LR', help='initial learning rate for bert')\nparser.add_argument('--lrlast', '--learning-rate-model', default=0.001, type=float,\n metavar='LR', help='initial learning rate for models')\n\nparser.add_argument('--gpu', default='0,1,2,3', type=str,\n help='id(s) for CUDA_VISIBLE_DEVICES')\n\nparser.add_argument('--n-labeled', type=int, default=20,\n help='number of labeled data')\n\nparser.add_argument('--un-labeled', default=5000, type=int,\n help='number of unlabeled data')\n\nparser.add_argument('--val-iteration', type=int, default=200,\n help='number of labeled data')\n\n\nparser.add_argument('--mix-option', default=True, type=bool, metavar='N',\n help='mix option, whether to mix or not')\nparser.add_argument('--mix-method', default=0, type=int, metavar='N',\n help='mix method, set different mix method')\nparser.add_argument('--separate-mix', default=False, type=bool, metavar='N',\n help='mix separate from labeled data and unlabeled data')\nparser.add_argument('--co', default=False, type=bool, metavar='N',\n help='set a random choice between mix and unmix during training')\nparser.add_argument('--train_aug', default=False, type=bool, metavar='N',\n help='augment labeled training data')\n\n\nparser.add_argument('--model', type=str, default='bert-base-uncased',\n help='pretrained model')\n\nparser.add_argument('--data-path', type=str, default='/Users/pushkar_bhuse/MixText/MixText-LongTail/data/yahoo_answers_csv/',\n help='path to data folders')\n\nparser.add_argument('--mix-layers-set', nargs='+',\n default=[0, 1, 2, 3], type=int, help='define mix layer set')\n\nparser.add_argument('--alpha', default=0.75, type=float,\n help='alpha for beta distribution')\n\nparser.add_argument('--lambda-u', default=1, type=float,\n help='weight for consistency loss term of unlabeled data')\nparser.add_argument('--T', default=0.5, type=float,\n help='temperature for sharpen function')\n\nparser.add_argument('--temp-change', default=1000000, type=int)\n\nparser.add_argument('--margin', default=0.7, type=float, metavar='N',\n help='margin for hinge loss')\nparser.add_argument('--lambda-u-hinge', default=0, type=float,\n help='weight for hinge loss term of unlabeled data')\nparser.add_argument('--nll_preprocessed', default = False, type=bool,\n help='boolean to indicate if GPT-2 is used for calculating longtailedness')\nparser.add_argument('--long_tailed', default = True, type=bool,\n help='boolean to indicate if Long-Tailedness should be computed')\nparser.add_argument('--new_method', default=True, type=bool,\n help='New Longtailedness score generation method')\n\nargs = parser.parse_args()\n\nos.environ['CUDA_VISIBLE_DEVICES'] = args.gpu\nuse_cuda = torch.cuda.is_available()\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\nn_gpu = torch.cuda.device_count()\nprint(\"GPU num: \", n_gpu)\n\nbest_acc = 0\ntotal_steps = 0\nflag = 0\nprint('Whether mix: ', args.mix_option)\nprint(\"Mix layers sets: \", args.mix_layers_set)\n\n\ndef main():\n\n print(\"Current Configuration:\")\n print(\"New Method: {}\".format(args.new_method))\n print(\"LongTailed: {}\".format(args.long_tailed))\n print(\"NLL Preprocessed: {}\".format(args.nll_preprocessed))\n\n\n global best_acc\n # Read dataset and build dataloaders\n print(\"In MAIN {}\".format(args.long_tailed))\n train_labeled_set, train_unlabeled_set, val_set, test_set, n_labels = get_data(\n args.data_path, args.n_labeled, (args.nll_preprocessed or args.new_method), args.un_labeled, model=args.model, train_aug=args.train_aug)\n \n \n labeled_trainloader = Data.DataLoader(\n dataset=train_labeled_set, batch_size=args.batch_size, shuffle=True)\n\n if args.new_method:\n unlabeled_trainloader = None\n\n elif args.long_tailed:\n \n if args.nll_preprocessed:\n sampler = get_nll_sampler(train_unlabeled_set)\n else:\n sampler = get_tfidf_sampler(train_labeled_set.text, train_unlabeled_set.text)\n\n print(\"sampler created\")\n unlabeled_trainloader = Data.DataLoader(\n dataset=train_unlabeled_set, batch_size=args.batch_size_u, sampler = sampler)\n\n else:\n unlabeled_trainloader = Data.DataLoader(\n dataset=train_unlabeled_set, batch_size=args.batch_size_u, shuffle = True)\n\n val_loader = Data.DataLoader(\n dataset=val_set, batch_size=8, shuffle=False)\n test_loader = Data.DataLoader(\n dataset=test_set, batch_size=8, shuffle=False)\n \n gc.collect()\n\n # Define the model, set the optimizer\n model = MixText(n_labels, args.mix_option)\n if use_cuda:\n model = model.cuda()\n model = nn.DataParallel(model)\n optimizer = AdamW(\n [\n {\"params\": model.module.bert.parameters(), \"lr\": args.lrmain},\n {\"params\": model.module.linear.parameters(), \"lr\": args.lrlast},\n ])\n logger = pd.DataFrame({\"label\": []})\n num_warmup_steps = math.floor(50)\n num_total_steps = args.val_iteration\n\n scheduler = None\n #WarmupConstantSchedule(optimizer, warmup_steps=num_warmup_steps)\n\n train_criterion = SemiLoss()\n criterion = nn.CrossEntropyLoss()\n\n test_accs = []\n\n # Start training\n for epoch in range(args.epochs):\n\n if args.new_method:\n unlabeled_trainloader = get_augmented_data(train_unlabeled_set, n_labels, model)\n\n temp_data = nll_train(labeled_trainloader, unlabeled_trainloader, model, optimizer,\n scheduler, train_criterion, epoch, n_labels, args.train_aug)\n else:\n temp_data = train(labeled_trainloader, unlabeled_trainloader, model, optimizer,\n scheduler, train_criterion, epoch, n_labels, args.train_aug)\n\n logger = logger.append(temp_data, ignore_index=True)\n\n # scheduler.step()\n\n # _, train_acc = validate(labeled_trainloader,\n # model, criterion, epoch, mode='Train Stats')\n #print(\"epoch {}, train acc {}\".format(epoch, train_acc))\n \n gc.collect()\n\n val_loss, val_acc, _, _ = validate(\n val_loader, model, criterion, epoch, mode='Valid Stats')\n\n gc.collect()\n\n print(\"epoch {}, val acc {}, val_loss {}\".format(\n epoch, val_acc, val_loss))\n\n if val_acc >= best_acc:\n best_acc = val_acc\n test_loss, test_acc, predicted, true = validate(\n test_loader, model, criterion, epoch, mode='Test Stats ')\n test_accs.append(test_acc)\n\n print(\"epoch {}, test acc {},test loss {}\".format(\n epoch, test_acc, test_loss))\n\n f1score = f1_score(true, predicted, average=\"micro\")\n f1score = f1_score(true, predicted, average=\"macro\")\n class_report = classification_report(true, predicted)\n\n print(\"Micro F1 Score: {}\".format(f1score))\n print(\"Macro F1 Score: {}\".format(f1score))\n print(class_report)\n\n print('Epoch: ', epoch)\n\n print('Best acc:')\n print(best_acc)\n\n print('Test acc:')\n print(test_accs)\n\n print(\"Finished training!\")\n print('Best acc:')\n print(best_acc)\n\n print('Test acc:')\n print(test_accs)\n\n logger.to_csv(\"train_dist_log_nll.csv\")\ndef get_nll_sampler(unlabelled):\n with open(args.data_path+'log_likelihood.pkl', 'rb') as f:\n nll_scores = np.array(pickle.load(f)) \n print(\"NLL Scores: {}\".format(nll_scores.shape))\n\n text_len = unlabelled.get_seq_lengths()\n normalized_nll = torch.div(nll_scores, text_len)\n sampler = WeightedRandomSampler(normalized_nll, len(unlabelled.text), replacement=True)\n return sampler\n\ndef get_nll_scores():\n with open(args.data_path+'log_likelihood.pkl', 'rb') as f:\n nll_scores = np.array(pickle.load(f))\n return nll_scores\n\ndef get_tfidf_sampler(labelled, unlabelled):\n tfidfvectorizer = TfidfVectorizer(analyzer='word',stop_words= 'english')\n tfidf_wm = tfidfvectorizer.fit_transform(np.append(labelled, unlabelled, axis=0))\n\n unlabelled_tfidf = tfidfvectorizer.transform(unlabelled)\n\n mean_vals = softmax(np.negative(np.mean(unlabelled_tfidf.toarray(), axis = 1)))\n sampler = WeightedRandomSampler(mean_vals, len(unlabelled), replacement=True)\n\n return sampler\n\ndef get_tfidf_scores(labelled, unlabelled):\n tfidfvectorizer = TfidfVectorizer(analyzer='word',stop_words= 'english')\n tfidf_wm = tfidfvectorizer.fit_transform(labelled)\n\n unlabelled_tfidf = tfidfvectorizer.transform(unlabelled)\n\n mean_vals = softmax(np.negative(np.mean(unlabelled_tfidf.toarray(), axis = 1)))\n return mean_vals\n\ndef get_augmented_data(dataset, n_labels, model):\n\n unlabeled_trainloader = Data.DataLoader(\n dataset=dataset, \n batch_size=len(dataset.text), \n shuffle = False\n )\n \n unlabelled_set = dataset\n unlabeled_train_iter = iter(unlabeled_trainloader)\n\n replicate = np.full((len(unlabelled_set)), n_labels, dtype=int)\n (inputs_u, inputs_u2, inputs_ori), _ = unlabeled_train_iter.next()\n\n outputs_u = model(inputs_u)\n outputs_u2 = model(inputs_u2)\n outputs_ori = model(inputs_ori)\n\n p = (0 * torch.softmax(outputs_u, dim=1) + 0 * torch.softmax(outputs_u2,\n dim=1) + 1 * torch.softmax(outputs_ori, dim=1)) / (1)\n\n pt = p**(1/args.T)\n targets_u = pt / pt.sum(dim=1, keepdim=True)\n targets_u = targets_u.detach()\n p_y = targets_u.numpy().flatten()\n\n p_x_y = get_nll_scores()\n p_x_y = np.repeat(p_x_y, replicate, axis=0)\n\n unlabelled_set.text = np.repeat(unlabelled_set.text, replicate, axis=0)\n unlabelled_set.ids = np.repeat(unlabelled_set.ids, replicate, axis=0)\n\n scores = np.multiply(p_y, p_x_y)\n\n sampler = WeightedRandomSampler(scores, len(scores), replacement=True)\n\n temp_unlabelled = Data.DataLoader(\n dataset=unlabelled_set, \n batch_size=args.batch_size_u, \n sampler= sampler\n )\n \n unlabeled_loader = temp_unlabelled\n\n return unlabeled_loader\n\ndef nll_train(labeled_trainloader, unlabelled_trainloader, model, optimizer, scheduler, criterion, epoch, n_labels, train_aug=False):\n labeled_train_iter = iter(labeled_trainloader)\n unlabeled_train_iter = iter(unlabelled_trainloader)\n\n model.train()\n\n global total_steps\n global flag\n if flag == 0 and total_steps > args.temp_change:\n print('Change T!')\n args.T = 0.9\n flag = 1\n \n temp_logger = pd.DataFrame({\"label\": []})\n\n for batch_idx in range(args.val_iteration):\n\n total_steps += 1\n\n if not train_aug:\n try:\n inputs_x, targets_x, inputs_x_length = labeled_train_iter.next()\n except:\n labeled_train_iter = iter(labeled_trainloader)\n inputs_x, targets_x, inputs_x_length = labeled_train_iter.next()\n else:\n try:\n (inputs_x, inputs_x_aug), (targets_x, _), (inputs_x_length,\n inputs_x_length_aug) = labeled_train_iter.next()\n except:\n labeled_train_iter = iter(labeled_trainloader)\n (inputs_x, inputs_x_aug), (targets_x, _), (inputs_x_length,\n inputs_x_length_aug) = labeled_train_iter.next()\n try:\n (inputs_u, inputs_u2, inputs_ori), (length_u,\n length_u2, length_ori) = unlabeled_train_iter.next()\n except:\n unlabeled_train_iter = iter(unlabelled_trainloader)\n (inputs_u, inputs_u2, inputs_ori), (length_u,\n length_u2, length_ori) = unlabeled_train_iter.next()\n\n batch_size = inputs_x.size(0)\n batch_size_2 = inputs_ori.size(0)\n targets_x = torch.zeros(batch_size, n_labels).scatter_(\n 1, targets_x.view(-1, 1), 1)\n\n if use_cuda:\n inputs_x, targets_x = inputs_x.cuda(), targets_x.cuda(non_blocking=True)\n inputs_u = inputs_u.cuda()\n inputs_u2 = inputs_u2.cuda()\n inputs_ori = inputs_ori.cuda()\n\n mask = []\n\n with torch.no_grad():\n # Predict labels for unlabeled data.\n outputs_u = model(inputs_u)\n outputs_u2 = model(inputs_u2)\n outputs_ori = model(inputs_ori)\n\n # Based on translation qualities, choose different weights here.\n # For AG News: German: 1, Russian: 0, ori: 1\n # For DBPedia: German: 1, Russian: 1, ori: 1\n # For IMDB: German: 0, Russian: 0, ori: 1\n # For Yahoo Answers: German: 1, Russian: 0, ori: 1 / German: 0, Russian: 0, ori: 1\n p = (0 * torch.softmax(outputs_u, dim=1) + 0 * torch.softmax(outputs_u2,\n dim=1) + 1 * torch.softmax(outputs_ori, dim=1)) / (1)\n # Do a sharpen here.\n pt = p**(1/args.T)\n targets_u = pt / pt.sum(dim=1, keepdim=True)\n targets_u = targets_u.detach()\n\n hard_targets = pd.DataFrame({\"label\" : torch.argmax(targets_u, dim=1).numpy()})\n temp_logger = temp_logger.append(hard_targets, ignore_index=True)\n\n mixed = 1\n\n if args.co:\n mix_ = np.random.choice([0, 1], 1)[0]\n else:\n mix_ = 1\n\n if mix_ == 1:\n l = np.random.beta(args.alpha, args.alpha)\n if args.separate_mix:\n l = l\n else:\n l = max(l, 1-l)\n else:\n l = 1\n\n mix_layer = np.random.choice(args.mix_layers_set, 1)[0]\n mix_layer = mix_layer - 1\n\n if not train_aug:\n all_inputs = torch.cat(\n [inputs_x, inputs_u, inputs_u2, inputs_ori, inputs_ori], dim=0)\n\n all_lengths = torch.cat(\n [inputs_x_length, length_u, length_u2, length_ori, length_ori], dim=0)\n\n all_targets = torch.cat(\n [targets_x, targets_u, targets_u, targets_u, targets_u], dim=0)\n\n else:\n all_inputs = torch.cat(\n [inputs_x, inputs_x_aug, inputs_u, inputs_u2, inputs_ori], dim=0)\n all_lengths = torch.cat(\n [inputs_x_length, inputs_x_length, length_u, length_u2, length_ori], dim=0)\n all_targets = torch.cat(\n [targets_x, targets_x, targets_u, targets_u, targets_u], dim=0)\n\n if args.separate_mix:\n idx1 = torch.randperm(batch_size)\n idx2 = torch.randperm(all_inputs.size(0) - batch_size) + batch_size\n idx = torch.cat([idx1, idx2], dim=0)\n\n else:\n idx1 = torch.randperm(all_inputs.size(0) - batch_size_2)\n idx2 = torch.arange(batch_size_2) + \\\n all_inputs.size(0) - batch_size_2\n idx = torch.cat([idx1, idx2], dim=0)\n\n input_a, input_b = all_inputs, all_inputs[idx]\n target_a, target_b = all_targets, all_targets[idx]\n length_a, length_b = all_lengths, all_lengths[idx]\n\n if args.mix_method == 0:\n # Mix sentences' hidden representations\n logits = model(input_a, input_b, l, mix_layer)\n mixed_target = l * target_a + (1 - l) * target_b\n\n elif args.mix_method == 1:\n # Concat snippet of two training sentences, the snippets are selected based on l\n # For example: \"I lova you so much\" and \"He likes NLP\" could be mixed as \"He likes NLP so much\".\n # The corresponding labels are mixed with coefficient as well\n mixed_input = []\n if l != 1:\n for i in range(input_a.size(0)):\n length1 = math.floor(int(length_a[i]) * l)\n idx1 = torch.randperm(int(length_a[i]) - length1 + 1)[0]\n length2 = math.ceil(int(length_b[i]) * (1-l))\n if length1 + length2 > 256:\n length2 = 256-length1 - 1\n idx2 = torch.randperm(int(length_b[i]) - length2 + 1)[0]\n try:\n mixed_input.append(\n torch.cat((input_a[i][idx1: idx1 + length1], torch.tensor([102]).cuda(), input_b[i][idx2:idx2 + length2], torch.tensor([0]*(256-1-length1-length2)).cuda()), dim=0).unsqueeze(0))\n except:\n print(256 - 1 - length1 - length2,\n idx2, length2, idx1, length1)\n\n mixed_input = torch.cat(mixed_input, dim=0)\n\n else:\n mixed_input = input_a\n\n logits = model(mixed_input)\n mixed_target = l * target_a + (1 - l) * target_b\n\n elif args.mix_method == 2:\n # Concat two training sentences\n # The corresponding labels are averaged\n if l == 1:\n mixed_input = []\n for i in range(input_a.size(0)):\n mixed_input.append(\n torch.cat((input_a[i][:length_a[i]], torch.tensor([102]).cuda(), input_b[i][:length_b[i]], torch.tensor([0]*(512-1-int(length_a[i])-int(length_b[i]))).cuda()), dim=0).unsqueeze(0))\n\n mixed_input = torch.cat(mixed_input, dim=0)\n logits = model(mixed_input, sent_size=512)\n\n #mixed_target = torch.clamp(target_a + target_b, max = 1)\n mixed = 0\n mixed_target = (target_a + target_b)/2\n else:\n mixed_input = input_a\n mixed_target = target_a\n logits = model(mixed_input, sent_size=256)\n mixed = 1\n\n Lx, Lu, w, Lu2, w2 = criterion(logits[:batch_size], mixed_target[:batch_size], logits[batch_size:-batch_size_2],\n mixed_target[batch_size:-batch_size_2], logits[-batch_size_2:], epoch+batch_idx/args.val_iteration, mixed)\n\n if mix_ == 1:\n loss = Lx + w * Lu\n else:\n loss = Lx + w * Lu + w2 * Lu2\n\n #max_grad_norm = 1.0\n #torch.nn.utils.clip_grad_norm_(model.parameters(), max_grad_norm)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n # scheduler.step()\n\n if batch_idx % 1000 == 0:\n print(\"epoch {}, step {}, loss {}, Lx {}, Lu {}, Lu2 {}\".format(\n epoch, batch_idx, loss.item(), Lx.item(), Lu.item(), Lu2.item()))\n\n return temp_logger\n\n\n\ndef train(labeled_trainloader, unlabeled_trainloader, model, optimizer, scheduler, criterion, epoch, n_labels, train_aug=False):\n labeled_train_iter = iter(labeled_trainloader)\n unlabeled_train_iter = iter(unlabeled_trainloader)\n model.train()\n\n global total_steps\n global flag\n if flag == 0 and total_steps > args.temp_change:\n print('Change T!')\n args.T = 0.9\n flag = 1\n \n temp_logger = pd.DataFrame({\"label\": []})\n\n for batch_idx in range(args.val_iteration):\n\n total_steps += 1\n if not train_aug:\n try:\n inputs_x, targets_x, inputs_x_length = labeled_train_iter.next()\n except:\n labeled_train_iter = iter(labeled_trainloader)\n inputs_x, targets_x, inputs_x_length = labeled_train_iter.next()\n else:\n try:\n (inputs_x, inputs_x_aug), (targets_x, _), (inputs_x_length,\n inputs_x_length_aug) = labeled_train_iter.next()\n except:\n labeled_train_iter = iter(labeled_trainloader)\n (inputs_x, inputs_x_aug), (targets_x, _), (inputs_x_length,\n inputs_x_length_aug) = labeled_train_iter.next()\n try:\n (inputs_u, inputs_u2, inputs_ori), (length_u,\n length_u2, length_ori) = unlabeled_train_iter.next()\n except:\n unlabeled_train_iter = iter(unlabeled_trainloader)\n (inputs_u, inputs_u2, inputs_ori), (length_u,\n length_u2, length_ori) = unlabeled_train_iter.next()\n\n batch_size = inputs_x.size(0)\n batch_size_2 = inputs_ori.size(0)\n targets_x = torch.zeros(batch_size, n_labels).scatter_(\n 1, targets_x.view(-1, 1), 1)\n\n if use_cuda:\n inputs_x, targets_x = inputs_x.cuda(), targets_x.cuda(non_blocking=True)\n inputs_u = inputs_u.cuda()\n inputs_u2 = inputs_u2.cuda()\n inputs_ori = inputs_ori.cuda()\n\n mask = []\n\n with torch.no_grad():\n # Predict labels for unlabeled data.\n outputs_u = model(inputs_u)\n outputs_u2 = model(inputs_u2)\n outputs_ori = model(inputs_ori)\n\n # Based on translation qualities, choose different weights here.\n # For AG News: German: 1, Russian: 0, ori: 1\n # For DBPedia: German: 1, Russian: 1, ori: 1\n # For IMDB: German: 0, Russian: 0, ori: 1\n # For Yahoo Answers: German: 1, Russian: 0, ori: 1 / German: 0, Russian: 0, ori: 1\n p = (0 * torch.softmax(outputs_u, dim=1) + 0 * torch.softmax(outputs_u2,\n dim=1) + 1 * torch.softmax(outputs_ori, dim=1)) / (1)\n # Do a sharpen here.\n pt = p**(1/args.T)\n targets_u = pt / pt.sum(dim=1, keepdim=True)\n targets_u = targets_u.detach()\n\n hard_targets = pd.DataFrame({\"label\" : torch.argmax(targets_u, dim=1).cpu().numpy()})\n temp_logger = temp_logger.append(hard_targets, ignore_index=True)\n\n mixed = 1\n\n if args.co:\n mix_ = np.random.choice([0, 1], 1)[0]\n else:\n mix_ = 1\n\n if mix_ == 1:\n l = np.random.beta(args.alpha, args.alpha)\n if args.separate_mix:\n l = l\n else:\n l = max(l, 1-l)\n else:\n l = 1\n\n mix_layer = np.random.choice(args.mix_layers_set, 1)[0]\n mix_layer = mix_layer - 1\n\n if not train_aug:\n all_inputs = torch.cat(\n [inputs_x, inputs_u, inputs_u2, inputs_ori, inputs_ori], dim=0)\n\n all_lengths = torch.cat(\n [inputs_x_length, length_u, length_u2, length_ori, length_ori], dim=0)\n\n all_targets = torch.cat(\n [targets_x, targets_u, targets_u, targets_u, targets_u], dim=0)\n\n else:\n all_inputs = torch.cat(\n [inputs_x, inputs_x_aug, inputs_u, inputs_u2, inputs_ori], dim=0)\n all_lengths = torch.cat(\n [inputs_x_length, inputs_x_length, length_u, length_u2, length_ori], dim=0)\n all_targets = torch.cat(\n [targets_x, targets_x, targets_u, targets_u, targets_u], dim=0)\n\n if args.separate_mix:\n idx1 = torch.randperm(batch_size)\n idx2 = torch.randperm(all_inputs.size(0) - batch_size) + batch_size\n idx = torch.cat([idx1, idx2], dim=0)\n\n else:\n idx1 = torch.randperm(all_inputs.size(0) - batch_size_2)\n idx2 = torch.arange(batch_size_2) + \\\n all_inputs.size(0) - batch_size_2\n idx = torch.cat([idx1, idx2], dim=0)\n\n input_a, input_b = all_inputs, all_inputs[idx]\n target_a, target_b = all_targets, all_targets[idx]\n length_a, length_b = all_lengths, all_lengths[idx]\n\n if args.mix_method == 0:\n # Mix sentences' hidden representations\n logits = model(input_a, input_b, l, mix_layer)\n mixed_target = l * target_a + (1 - l) * target_b\n\n elif args.mix_method == 1:\n # Concat snippet of two training sentences, the snippets are selected based on l\n # For example: \"I lova you so much\" and \"He likes NLP\" could be mixed as \"He likes NLP so much\".\n # The corresponding labels are mixed with coefficient as well\n mixed_input = []\n if l != 1:\n for i in range(input_a.size(0)):\n length1 = math.floor(int(length_a[i]) * l)\n idx1 = torch.randperm(int(length_a[i]) - length1 + 1)[0]\n length2 = math.ceil(int(length_b[i]) * (1-l))\n if length1 + length2 > 256:\n length2 = 256-length1 - 1\n idx2 = torch.randperm(int(length_b[i]) - length2 + 1)[0]\n try:\n mixed_input.append(\n torch.cat((input_a[i][idx1: idx1 + length1], torch.tensor([102]).cuda(), input_b[i][idx2:idx2 + length2], torch.tensor([0]*(256-1-length1-length2)).cuda()), dim=0).unsqueeze(0))\n except:\n print(256 - 1 - length1 - length2,\n idx2, length2, idx1, length1)\n\n mixed_input = torch.cat(mixed_input, dim=0)\n\n else:\n mixed_input = input_a\n\n logits = model(mixed_input)\n mixed_target = l * target_a + (1 - l) * target_b\n\n elif args.mix_method == 2:\n # Concat two training sentences\n # The corresponding labels are averaged\n if l == 1:\n mixed_input = []\n for i in range(input_a.size(0)):\n mixed_input.append(\n torch.cat((input_a[i][:length_a[i]], torch.tensor([102]).cuda(), input_b[i][:length_b[i]], torch.tensor([0]*(512-1-int(length_a[i])-int(length_b[i]))).cuda()), dim=0).unsqueeze(0))\n\n mixed_input = torch.cat(mixed_input, dim=0)\n logits = model(mixed_input, sent_size=512)\n\n #mixed_target = torch.clamp(target_a + target_b, max = 1)\n mixed = 0\n mixed_target = (target_a + target_b)/2\n else:\n mixed_input = input_a\n mixed_target = target_a\n logits = model(mixed_input, sent_size=256)\n mixed = 1\n\n Lx, Lu, w, Lu2, w2 = criterion(logits[:batch_size], mixed_target[:batch_size], logits[batch_size:-batch_size_2],\n mixed_target[batch_size:-batch_size_2], logits[-batch_size_2:], epoch+batch_idx/args.val_iteration, mixed)\n\n if mix_ == 1:\n loss = Lx + w * Lu\n else:\n loss = Lx + w * Lu + w2 * Lu2\n\n #max_grad_norm = 1.0\n #torch.nn.utils.clip_grad_norm_(model.parameters(), max_grad_norm)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n # scheduler.step()\n\n if batch_idx % 1000 == 0:\n print(\"epoch {}, step {}, loss {}, Lx {}, Lu {}, Lu2 {}\".format(\n epoch, batch_idx, loss.item(), Lx.item(), Lu.item(), Lu2.item()))\n\n return temp_logger\n\n\ndef validate(valloader, model, criterion, epoch, mode):\n model.eval()\n with torch.no_grad():\n loss_total = 0\n total_sample = 0\n acc_total = 0\n correct = 0\n\n all_predicted = np.array([])\n all_true = np.array([])\n\n for batch_idx, (inputs, targets, length) in enumerate(valloader):\n inputs, targets = inputs.cuda(), targets.cuda(non_blocking=True)\n outputs = model(inputs)\n loss = criterion(outputs, targets)\n\n _, predicted = torch.max(outputs.data, 1)\n\n all_predicted = np.append(all_predicted, np.array(predicted.cpu()))\n all_true = np.append(all_true, np.array(targets.cpu()))\n\n if batch_idx == 0:\n print(\"Sample some true labeles and predicted labels\")\n print(predicted[:20])\n print(targets[:20])\n\n correct += (np.array(predicted.cpu()) ==\n np.array(targets.cpu())).sum()\n loss_total += loss.item() * inputs.shape[0]\n total_sample += inputs.shape[0]\n\n acc_total = correct/total_sample\n loss_total = loss_total/total_sample\n\n return loss_total, acc_total, all_predicted, all_true\n\n\ndef linear_rampup(current, rampup_length=args.epochs):\n if rampup_length == 0:\n return 1.0\n else:\n current = np.clip(current / rampup_length, 0.0, 1.0)\n return float(current)\n\n\nclass SemiLoss(object):\n def __call__(self, outputs_x, targets_x, outputs_u, targets_u, outputs_u_2, epoch, mixed=1):\n\n if args.mix_method == 0 or args.mix_method == 1:\n\n Lx = - \\\n torch.mean(torch.sum(F.log_softmax(\n outputs_x, dim=1) * targets_x, dim=1))\n\n probs_u = torch.softmax(outputs_u, dim=1)\n\n Lu = F.kl_div(probs_u.log(), targets_u, None, None, 'batchmean')\n\n Lu2 = torch.mean(torch.clamp(torch.sum(-F.softmax(outputs_u, dim=1)\n * F.log_softmax(outputs_u, dim=1), dim=1) - args.margin, min=0))\n\n elif args.mix_method == 2:\n if mixed == 0:\n Lx = - \\\n torch.mean(torch.sum(F.logsigmoid(\n outputs_x) * targets_x, dim=1))\n\n probs_u = torch.softmax(outputs_u, dim=1)\n\n Lu = F.kl_div(probs_u.log(), targets_u,\n None, None, 'batchmean')\n\n Lu2 = torch.mean(torch.clamp(args.margin - torch.sum(\n F.softmax(outputs_u_2, dim=1) * F.softmax(outputs_u_2, dim=1), dim=1), min=0))\n else:\n Lx = - \\\n torch.mean(torch.sum(F.log_softmax(\n outputs_x, dim=1) * targets_x, dim=1))\n\n probs_u = torch.softmax(outputs_u, dim=1)\n Lu = F.kl_div(probs_u.log(), targets_u,\n None, None, 'batchmean')\n\n Lu2 = torch.mean(torch.clamp(args.margin - torch.sum(\n F.softmax(outputs_u, dim=1) * F.softmax(outputs_u, dim=1), dim=1), min=0))\n\n return Lx, Lu, args.lambda_u * linear_rampup(epoch), Lu2, args.lambda_u_hinge * linear_rampup(epoch)\n\n\nif __name__ == '__main__':\n main()\nimport argparse\nimport os\n"
] | [
[
"torch.nn.functional.softmax",
"torch.max",
"torch.cat",
"torch.randperm",
"torch.zeros",
"torch.utils.data.DataLoader",
"torch.no_grad",
"torch.cuda.is_available",
"sklearn.metrics.f1_score",
"sklearn.metrics.classification_report",
"torch.nn.CrossEntropyLoss",
"torch.softmax",
"numpy.random.beta",
"numpy.clip",
"torch.tensor",
"torch.arange",
"numpy.repeat",
"sklearn.feature_extraction.text.TfidfVectorizer",
"torch.div",
"numpy.multiply",
"numpy.random.choice",
"torch.nn.functional.logsigmoid",
"numpy.append",
"torch.cuda.device_count",
"numpy.array",
"torch.nn.functional.log_softmax",
"torch.nn.DataParallel",
"torch.argmax"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
aday651/embed-asym-exeriments | [
"986f147425c3e4f42c04a25f69577bbeef6b3c23"
] | [
"custom_scripts/skipgram_modified.py"
] | [
"import tensorflow as tf\nfrom custom_scripts.node_classifier_modified import make_node_classifier\n\n\ndef make_skipgram(**kwargs):\n \"\"\" Uses the skipgram objective for relational data\n\n Returns\n -------\n A model function for skipgram edge prediction (with a nonsense vertex classifier attached for testing convenience)\n \"\"\"\n\n def make_label_logits(embeddings, features, mode, params):\n # TODO: fix this. What's going on? Basically, the size of \n # embeddings is dynamic, and so we need a way to properly \n # handle this in order to set the size of this zeros array...\n #return tf.zeros([tf.shape(embeddings)[0], params['n_labels']],\n # dtype=tf.float32)\n return tf.zeros([embeddings.get_shape().as_list()[0], \n params['n_labels']],\n dtype=tf.float32)\n\n def make_no_label_loss(logits, present_labels, split):\n return tf.constant(0, dtype=tf.float32)\n\n return make_node_classifier(make_label_logits=make_label_logits,\n make_edge_logits=_make_edge_list_logits,\n make_label_pred_loss=make_no_label_loss,\n make_edge_pred_loss=make_simple_skipgram_loss(None),\n **kwargs)\n\n\ndef make_multilabel_logistic_regression(label_task_weight=0.5, regularization=0., clip=None, **kwargs):\n \"\"\" Uses the skipgram objective for relational data, and predicts labels with logistic regression\n using the skipgram embeddings as the features.\n\n Parameters\n ----------\n label_task_weight: the weight for the label task (between 0 and 1). By default, the label and edge\n task are weighted equally.\n clip: if not None, the value to clip the edge loss at.\n kwargs: additional arguments are forwarded to the `make_node_classifier` template.\n\n Returns\n -------\n A model function for simple multilabel logistic regression.\n \"\"\"\n\n def make_label_logits(embeddings, features, mode, params):\n # actually computes 0.5 * \\sum w^2, so it should just reproduce sklearn\n regularizer = tf.keras.regularizers.l2(l=0.5 * (label_task_weight * regularization))\n\n layer = tf.compat.v1.layers.dense(\n embeddings, params['n_labels'], activation=None, use_bias=True,\n kernel_regularizer=regularizer,\n bias_regularizer=regularizer,\n name='logits_labels')\n\n return layer\n\n edge_task_weight = 1 - label_task_weight\n\n return make_node_classifier(\n make_label_logits=make_label_logits,\n make_edge_logits=_make_edge_list_logits,\n make_label_pred_loss=make_weighted_loss(_make_label_sigmoid_cross_entropy_loss, label_task_weight),\n make_edge_pred_loss=make_weighted_loss(make_simple_skipgram_loss(clip), edge_task_weight),\n **kwargs)\n\n\ndef make_multilabel_deep_logistic_regression():\n \"\"\" Uses the skipgram objective for relational data, and predicts labels with deep logistic regression\n using the skipgram embeddings as the features\n\n Returns\n -------\n a function be passed to model_fn\n \"\"\"\n\n def make_label_logits(embeddings, features, mode, params):\n for units in params['hidden_units']:\n net = tf.compat.v1.layers.dense(embeddings, units=units, activation=tf.nn.relu)\n\n return tf.compat.v1.layers.dense(net, params['n_labels'], activation=None)\n\n return make_node_classifier(make_label_logits=make_label_logits,\n make_edge_logits=_make_edge_list_logits,\n make_label_pred_loss=_make_label_sigmoid_cross_entropy_loss,\n make_edge_pred_loss=make_simple_skipgram_loss(12))\n\n\n\n#\n# helper functions follow\n#\n\n\ndef _make_label_sigmoid_cross_entropy_loss(logits, present_labels, split):\n \"\"\" Helper function to create label loss\n\n Parameters\n ----------\n logits: tensor of shape [batch_size, num_verts, num_labels]\n present_labels: tensor of shape [batch_size, num_verts, num_labels]; labels of labelled verts\n split: tensor of shape [batch_size, num_verts], 0 if censored, 1 if not censored\n\n Returns\n -------\n The cross-entropy loss corresponding to the label.\n \"\"\"\n if len(logits.shape) == 3:\n batch_size = tf.cast(tf.shape(input=logits)[0], dtype=tf.float32)\n else:\n batch_size = 1\n\n label_pred_losses = tf.compat.v1.losses.sigmoid_cross_entropy(\n present_labels, logits=logits, weights=tf.expand_dims(split, -1), reduction=tf.compat.v1.losses.Reduction.NONE)\n\n # sum rather than (tf default of) mean because ¯\\_(ツ)_/¯\n label_pred_loss = tf.reduce_sum(input_tensor=label_pred_losses)\n\n return label_pred_loss / batch_size\n\n\ndef make_weighted_loss(loss_fn, weight=1.0):\n \"\"\" Adapts the given loss function by multiplying by a given constant.\n\n Parameters\n ----------\n loss_fn: a function to create the loss\n weight: the value by which to weigh the loss.\n\n Returns\n -------\n fn: The adapted loss\n \"\"\"\n def fn(*args, **kwargs):\n loss = loss_fn(*args, **kwargs)\n if weight != 0:\n return weight * loss\n else:\n return tf.constant(0.0, dtype=loss.dtype)\n\n return fn\n\n\ndef _make_edge_list_logits(embeddings, features, edge_list, weights, params):\n \"\"\" Helper function to create the skipgram loss for edge structure\n\n Parameters\n ----------\n embeddings: the embeddings features for the current subgraph.\n features: features from tensorflow dataset (not used)\n edge_list: edge list of the subgraph\n weights: weights of the edges in the subgraph\n params: other parameters\n\n Returns\n -------\n a tensor representing the edge prediction loss.\n \"\"\"\n with tf.compat.v1.name_scope('edge_list_logits'):\n # Here I want to change this depending on the values of \n # params[\"indef_ip\"] and whether it is true or false\n if params[\"indef_ip\"]:\n diag = tf.ones(int(float(params[\"embedding_dim\"])/2),\n dtype=tf.float32)\n dm = tf.linalg.diag(tf.concat([diag, -1*diag], 0))\n pairwise_inner_prods = tf.matmul(embeddings, \n tf.matmul(embeddings, dm), transpose_b=True, name='all_edges_logit')\n else:\n pairwise_inner_prods = tf.matmul(embeddings, embeddings, transpose_b=True, name='all_edges_logit')\n\n if len(edge_list.shape) == 2:\n edge_list = tf.expand_dims(edge_list, axis=0)\n pairwise_inner_prods = tf.expand_dims(pairwise_inner_prods, axis=0)\n no_batch = True\n else:\n no_batch = False\n\n edge_list_shape = tf.shape(input=edge_list)\n batch_size = edge_list.shape[0] if edge_list.shape[0] is not None else edge_list_shape[0]\n num_edges = edge_list.shape[1] if edge_list.shape[1] is not None else edge_list_shape[1]\n\n batch_index = tf.tile(\n tf.expand_dims(tf.expand_dims(tf.range(batch_size), -1), -1),\n tf.stack([1, num_edges, 1]))\n\n edge_index = tf.concat([batch_index, edge_list], axis=-1)\n edge_logit = tf.gather_nd(pairwise_inner_prods, edge_index)\n\n if no_batch:\n edge_logit = tf.squeeze(edge_logit, axis=0)\n\n return edge_logit\n\n\ndef make_simple_skipgram_loss(clip=None):\n \"\"\" Makes a simple skipgram loss for edge prediction from a given edge list.\n\n This function takes a simple edge list and does not further modify it. In particular,\n it does not apply any transformation such as windowing or pruning.\n\n Parameters\n ----------\n clip: If not None, a value to clip the individual losses at.\n\n Returns\n -------\n loss: a function which computes the loss.\n \"\"\"\n def loss(edge_logits, num_vertex, edge_list, edge_weights, params):\n with tf.compat.v1.name_scope('skipgram_loss', values=[edge_logits, edge_list, edge_weights]):\n if len(edge_list.shape) == 3:\n batch_size = tf.cast(tf.shape(input=edge_list)[0], dtype=tf.float32)\n else:\n batch_size = 1.\n\n edge_present = tf.cast(tf.equal(edge_weights, 1), dtype=tf.float32)\n\n # values of -1 in the weights indicate padded edges which should be ignored\n # in loss computation.\n edge_censored = tf.cast(tf.not_equal(edge_weights, -1), dtype=tf.float32)\n\n edge_pred_loss = tf.nn.sigmoid_cross_entropy_with_logits(\n labels=edge_present, logits=edge_logits)\n\n edge_pred_loss = edge_pred_loss * edge_censored\n\n if clip:\n edge_pred_loss = tf.clip_by_value(edge_pred_loss, 0, clip)\n\n # sum instead of (tf default of) mean because mean screws up learning rates for embeddings\n loss_value = tf.divide(tf.reduce_sum(input_tensor=edge_pred_loss), batch_size,\n name='skipgram_edge_loss')\n return loss_value\n\n return loss"
] | [
[
"tensorflow.clip_by_value",
"tensorflow.not_equal",
"tensorflow.matmul",
"tensorflow.constant",
"tensorflow.concat",
"tensorflow.gather_nd",
"tensorflow.shape",
"tensorflow.range",
"tensorflow.reduce_sum",
"tensorflow.keras.regularizers.l2",
"tensorflow.stack",
"tensorflow.equal",
"tensorflow.expand_dims",
"tensorflow.squeeze",
"tensorflow.nn.sigmoid_cross_entropy_with_logits",
"tensorflow.compat.v1.layers.dense",
"tensorflow.compat.v1.name_scope"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
gabrielmacedoanac/kgextension | [
"0551ca278bc3de5c39baf663467be3220ad20edd",
"0551ca278bc3de5c39baf663467be3220ad20edd"
] | [
"test/test_link_exploration.py",
"kgextension/fusion_helper.py"
] | [
"import pandas as pd\nimport pytest\nfrom kgextension.link_exploration import link_explorer\n\nclass TestLinkExplorer:\n\n def test1_default(self):\n df_input = pd.read_csv(\"test/data/link_exploration/link_exploration_test_input.csv\")\n expected_result = pd.read_csv(\"test/data/link_exploration/link_exploration_test1_expected.csv\")\n\n result = link_explorer(df_input, \"uri\")\n\n pd.testing.assert_frame_equal(result, expected_result, check_like = True)\n \n def test2_multiple_hops(self):\n df_input = pd.read_csv(\"test/data/link_exploration/link_exploration_test_input.csv\")\n expected_result = pd.read_csv(\"test/data/link_exploration/link_exploration_test2_expected.csv\")\n\n result = link_explorer(df_input, \"uri\", number_of_hops=3)\n\n pd.testing.assert_frame_equal(result, expected_result, check_like = True)\n \n def test3_multiple_links_to_follow(self):\n df_input = pd.read_csv(\"test/data/link_exploration/link_exploration_test_input.csv\")\n expected_result = pd.read_csv(\"test/data/link_exploration/link_exploration_test3_expected.csv\")\n\n result = link_explorer(df_input, \"uri\", links_to_follow=[\"owl:sameAs\",\"rdfs:seeAlso\"])\n\n pd.testing.assert_frame_equal(result, expected_result, check_like = True)\n \n def test4_lod_source(self):\n df_input = pd.read_csv(\"test/data/link_exploration/link_exploration_test_input.csv\")\n expected_result = pd.read_csv(\"test/data/link_exploration/link_exploration_test4_expected.csv\")\n\n result = link_explorer(df_input, \"uri\", lod_sources=[\"nytimes\",\"geonames\"])\n\n pd.testing.assert_frame_equal(result, expected_result, check_like = True)\n\n def test5_prefix_lookup(self):\n df_input = pd.read_csv(\"test/data/link_exploration/link_exploration_test_input.csv\")\n expected_result = pd.read_csv(\"test/data/link_exploration/link_exploration_test5_expected.csv\")\n prefixes = {\"irgendeinprefix\" : \"http://www.w3.org/2002/07/owl#\"}\n\n result = link_explorer(df_input, \"uri\", links_to_follow=[\"irgendeinprefix:sameAs\"],prefix_lookup=prefixes)\n\n pd.testing.assert_frame_equal(result, expected_result, check_like = True)\n \n def test6_exclude_source(self):\n df_input = pd.read_csv(\"test/data/link_exploration/link_exploration_test_input.csv\")\n expected_result = pd.read_csv(\"test/data/link_exploration/link_exploration_test6_expected.csv\")\n\n result = link_explorer(df_input, \"uri\", exclude_sources=[\"dbpedia\"])\n\n pd.testing.assert_frame_equal(result, expected_result, check_like = True)\n \n def test7_empty_result(self):\n df_input = pd.read_csv(\"test/data/link_exploration/link_exploration_test_input.csv\")\n expected_result = pd.read_csv(\"test/data/link_exploration/link_exploration_test7_expected.csv\")\n\n result = link_explorer(df_input, \"uri\", links_to_follow=[\"funkioniert:nicht\"])\n\n pd.testing.assert_frame_equal(result, expected_result, check_like = True)",
"import warnings\nfrom collections import Counter\nimport re\nimport random\nimport numpy as np\n\ndef first(x):\n \"\"\"Returns the first not-NA value, helper function for pd.DataFrame.apply.\n\n Args:\n x (pd.Series): columns/rows passed in pd.DataFrame.apply function\n\n Returns:\n flexible: first not-NA value of the pd.Series\n \"\"\"\n x = x.dropna()\n if x.empty:\n return np.nan\n else:\n return x[0]\n\ndef last(x):\n \"\"\"Returns the last not.na value, helper function for pd.DataFrame.apply.\n\n Args:\n x (pd.Series): columns/rows passed in pd.DataFrame.apply function\n\n Returns:\n flexible: last not-NA value of the pd.Series\n \"\"\"\n x = x.dropna()\n if x.empty:\n return np.nan\n else:\n return x[-1]\n\ndef longest(x):\n \"\"\"Returns the longest value, helper function for pd.DataFrame.apply.\n\n Args:\n x (pd.Series): columns/rows passed in pd.DataFrame.apply function\n\n Returns:\n str: longest value of the pd.Series\n \"\"\"\n x = x.dropna()\n if x.empty:\n return np.nan\n else:\n return max(x, key=len)\n\ndef shortest(x):\n \"\"\"Returns the shorest value, helper function for pd.DataFrame.apply.\n\n Args:\n x (pd.Series): columns/rows passed in pd.DataFrame.apply function\n\n Returns:\n str: longest value of the pd.Series\n \"\"\"\n x = x.dropna()\n if x.empty:\n return np.nan\n else:\n return min(x, key=len)\n\ndef voting(x):\n \"\"\"Chooses the value with the most votes (mode value in statistics). If \n there is a draw, the first value is chosen.\n\n Args:\n x (pd.Series): columns/rows passed in pd.DataFrame.apply function\n\n Returns:\n flexible: mode value of the pd.Series\n \"\"\"\n\n x = x.dropna()\n if x.empty:\n return np.nan\n else:\n # count the votes\n votes = Counter(x).values()\n items = [value for value in votes]\n\n # warn if no winning vote can be obtained\n if max(votes) == 1:\n warnings.warn(\n \"Every vote is distinct. The first value will be chosen.\")\n\n # warn if there is a draw\n elif all(y == items[0] for y in list(items)):\n warnings.warn(\n \"There is a draw in votes. The value of the first voting\\\n group/column will be chosen.\")\n\n return max(x, key=Counter(x).get)\n\n\ndef provenance(columns, regex=\"http://dbpedia.org/\"):\n \"\"\"Determines the name of the column matching the regex pattern. \n\n Args:\n columns (pd.DataFrame.columns): The columns of the schema matches to be\n fused\n regex (str, optional): The regex string identifiying the column name,\n generally the prefix of the feature. Defaults to \n \"http://dbpedia.org/\".\n\n\n Returns:\n str: The name of the column matching the regex pattern.\n\n Raises:\n AttributeError: If no column or more than one columns of the fusion\n cluster match the pattern.\n \"\"\"\n\n # identify all matches satisfying the regex pattern\n columns = columns.dropna()\n matches = [col for col in columns if re.findall(regex, col)]\n\n # if the match is unique, return it\n if len(matches) == 1:\n return matches[0]\n\n # if there are more than one or no occurences raise the respective errors\n elif len(matches) > 1:\n raise RuntimeError(\"\"\"More than one of the matches satistifies the \n provenance regex, please specify another\n regex or another fusion method.\"\"\")\n else:\n raise RuntimeError(\"\"\"\"No column satisfies the regex. Please specify \n another regex or another fusion method.\"\"\")\n\ndef fusion_function_lookup(\n boolean_method_single, boolean_method_multiple, numeric_method_single, \n numeric_method_multiple, string_method_single, string_method_multiple):\n \"\"\"Maps the right function to method passed as string. E.g.\n boolean_method_single = 'random' --> random.choice.\n\n Args:\n boolean_method_single (str): method to use for a cluster of size two\n and boolean values.\n boolean_method_multiple (str): method to use for a cluster of more than\n size two and boolean values.\n numeric_method_single (str): method to use for a cluster of size two\n and numeric values\n numeric_method_multiple (str): method to use for a cluster of more than\n size two and numeric values.\n string_method_single (str): method to use for a cluster of size two\n and string values.\n string_method_multiple (str): method to use for a cluster of more than \n size two and string values.\n\n Returns:\n dict: A dictionary with the mapping from method to function.\n \"\"\"\n \n # boolean functions single match: match choices and functions\n boolean_choices_single = [\n boolean_method_single == \"first\", boolean_method_single == \"last\",\n boolean_method_single == \"random\", boolean_method_single == \"provenance\",\n callable(boolean_method_single)]\n boolean_functions_single = [\n first, last, random.choice, provenance, boolean_method_single]\n boolean_function_single = np.select(\n boolean_choices_single, boolean_functions_single, default=None).item()\n\n # boolean functions multiple matches: match choices and functions\n boolean_choices_multiple = [\n boolean_method_multiple == \"first\", boolean_method_multiple == \"last\",\n boolean_method_multiple == \"random\", boolean_method_multiple == \"provenance\",\n boolean_method_multiple == \"voting\", callable(boolean_method_multiple)]\n boolean_functions_multiple = [\n first, last, random.choice, provenance, voting, boolean_method_multiple]\n boolean_function_multiple = np.select(\n boolean_choices_multiple, boolean_functions_multiple, default=None).item()\n\n # numeric functions single match: match choices and functions\n numeric_choices_single = [\n numeric_method_single == \"min\", numeric_method_single == \"max\",\n numeric_method_single == \"average\", numeric_method_single == \"random\",\n numeric_method_single == \"provenance\", callable(numeric_method_single)]\n numeric_functions_single = [\n np.min, np.max, np.mean, random.choice, provenance, numeric_method_single]\n numeric_function_single = np.select(\n numeric_choices_single, numeric_functions_single, default=None).item()\n\n # numeric functions multiple matches: match choices and functions\n numeric_choices_multiple = [\n numeric_method_multiple == \"min\", numeric_method_multiple == \"max\",\n numeric_method_multiple == \"average\", numeric_method_multiple == \"median\",\n numeric_method_multiple == \"random\", numeric_method_multiple == \"provenance\",\n numeric_method_multiple == \"voting\", callable(numeric_method_multiple)]\n numeric_functions_multiple = [\n np.min, np.max, np.mean, np.median, random.choice, provenance, voting, \n numeric_method_multiple]\n numeric_function_multiple = np.select(\n numeric_choices_multiple, numeric_functions_multiple, default=None).item()\n\n # string functions single match: match choices and functions\n string_choices_single = [\n string_method_single == \"first\", string_method_single == \"last\",\n string_method_single == \"longest\", string_method_single == \"shortest\",\n string_method_single == \"random\", string_method_single == \"provenance\",\n callable(string_method_single)]\n string_functions_single = [\n first, last, longest, shortest, random.choice, provenance, string_method_single]\n string_function_single = np.select(\n string_choices_single, string_functions_single, default=None).item()\n\n # string functions multiple matches: match choices and functions\n string_choices_multiple = [\n string_method_multiple == \"first\", string_method_multiple == \"last\",\n string_method_multiple == \"longest\", string_method_multiple == \"shortest\",\n string_method_multiple == \"random\", string_method_multiple == \"provenance\",\n string_method_multiple == \"voting\", callable(string_method_multiple)]\n string_functions_multiple = [\n first, last, longest, shortest, random.choice, provenance, voting, \n string_method_multiple]\n string_function_multiple = np.select(\n string_choices_multiple, string_functions_multiple, default=None).item()\n\n # create lookup to choose function from once data type and group size are known\n function_lookup = {\"boolean_single\": boolean_function_single,\n \"boolean_multiple\": boolean_function_multiple,\n \"numeric_single\": numeric_function_single,\n \"numeric_multiple\": numeric_function_multiple,\n \"string_single\": string_function_single,\n \"string_multiple\": string_function_multiple}\n \n return function_lookup\n "
] | [
[
"pandas.read_csv",
"pandas.testing.assert_frame_equal"
],
[
"numpy.select"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
sevmardi/Social_network_analysis | [
"d299fc0120f12c2ad8ca871cad160a66873dcae0"
] | [
"Questions/Question2_3.py"
] | [
"import networkx as nx\nimport helpers.DataLoader as dataLoader\nimport matplotlib.pyplot as plt\nimport helpers.DataLoader as data\n\n\nclass Question2_3:\n\n def main(self):\n \"\"\"\n Caclulate the indegree and outdegree distribution of the given graph\n \"\"\"\n # Load the data\n data = dataLoader.DataLoader()\n medium = data.load_medium()\n large = data.load_large()\n\n # Send it to the opener\n med = self.opener(medium)\n lg = self.opener(large)\n\n # Call the methods\n # You can change the \"med\" by \"lg\" to get the indegree and outdegree of the large dataset\n in_degree = med.in_degree().values()\n out_degree = med.out_degree().values()\n\n # Plot the values\n # Change the below list param by out_degree or in_degree to get right results\n o = list(in_degree)\n plt.hist(o)\n # Need to change to title to either In-degree or Out-degree of chosen dataset\n plt.title(\"Out Degree Distribution Medium Network\")\n plt.xlabel(\"degrees\")\n plt.ylabel(\"frequency\")\n plt.show()\n\n def opener(self, data):\n dg = nx.DiGraph()\n\n with open(data, 'r') as file:\n for i in file:\n line = i.rstrip('\\n')\n vec = line.split(\" \")\n dg.add_edge(vec[0], vec[1])\n return dg\n\n\nif __name__ == '__main__':\n p = Question2_3()\n p.main()\n"
] | [
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.hist",
"matplotlib.pyplot.ylabel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
sslowikatpalantir/spark | [
"f9dddebf019da89d8611e11e1f12bc8864c17419"
] | [
"python/pyspark/sql/tests/test_pandas_udf_grouped_map.py"
] | [
"#\n# Licensed to the Apache Software Foundation (ASF) under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport datetime\nimport unittest\n\nfrom collections import OrderedDict\nfrom decimal import Decimal\nfrom distutils.version import LooseVersion\n\nfrom pyspark.sql import Row\nfrom pyspark.sql.functions import array, explode, col, lit, udf, sum, pandas_udf, PandasUDFType\nfrom pyspark.sql.types import *\nfrom pyspark.testing.sqlutils import ReusedSQLTestCase, have_pandas, have_pyarrow, \\\n pandas_requirement_message, pyarrow_requirement_message\nfrom pyspark.testing.utils import QuietTest\n\n\[email protected](\n not have_pandas or not have_pyarrow,\n pandas_requirement_message or pyarrow_requirement_message)\nclass GroupedMapPandasUDFTests(ReusedSQLTestCase):\n\n @property\n def data(self):\n return self.spark.range(10).toDF('id') \\\n .withColumn(\"vs\", array([lit(i) for i in range(20, 30)])) \\\n .withColumn(\"v\", explode(col('vs'))).drop('vs')\n\n def test_supported_types(self):\n import pyarrow as pa\n\n values = [\n 1, 2, 3,\n 4, 5, 1.1,\n 2.2, Decimal(1.123),\n [1, 2, 2], True, 'hello'\n ]\n output_fields = [\n ('id', IntegerType()), ('byte', ByteType()), ('short', ShortType()),\n ('int', IntegerType()), ('long', LongType()), ('float', FloatType()),\n ('double', DoubleType()), ('decim', DecimalType(10, 3)),\n ('array', ArrayType(IntegerType())), ('bool', BooleanType()), ('str', StringType())\n ]\n\n # TODO: Add BinaryType to variables above once minimum pyarrow version is 0.10.0\n if LooseVersion(pa.__version__) >= LooseVersion(\"0.10.0\"):\n values.append(bytearray([0x01, 0x02]))\n output_fields.append(('bin', BinaryType()))\n\n output_schema = StructType([StructField(*x) for x in output_fields])\n df = self.spark.createDataFrame([values], schema=output_schema)\n\n # Different forms of group map pandas UDF, results of these are the same\n udf1 = pandas_udf(\n lambda pdf: pdf.assign(\n byte=pdf.byte * 2,\n short=pdf.short * 2,\n int=pdf.int * 2,\n long=pdf.long * 2,\n float=pdf.float * 2,\n double=pdf.double * 2,\n decim=pdf.decim * 2,\n bool=False if pdf.bool else True,\n str=pdf.str + 'there',\n array=pdf.array,\n ),\n output_schema,\n PandasUDFType.GROUPED_MAP\n )\n\n udf2 = pandas_udf(\n lambda _, pdf: pdf.assign(\n byte=pdf.byte * 2,\n short=pdf.short * 2,\n int=pdf.int * 2,\n long=pdf.long * 2,\n float=pdf.float * 2,\n double=pdf.double * 2,\n decim=pdf.decim * 2,\n bool=False if pdf.bool else True,\n str=pdf.str + 'there',\n array=pdf.array,\n ),\n output_schema,\n PandasUDFType.GROUPED_MAP\n )\n\n udf3 = pandas_udf(\n lambda key, pdf: pdf.assign(\n id=key[0],\n byte=pdf.byte * 2,\n short=pdf.short * 2,\n int=pdf.int * 2,\n long=pdf.long * 2,\n float=pdf.float * 2,\n double=pdf.double * 2,\n decim=pdf.decim * 2,\n bool=False if pdf.bool else True,\n str=pdf.str + 'there',\n array=pdf.array,\n ),\n output_schema,\n PandasUDFType.GROUPED_MAP\n )\n\n result1 = df.groupby('id').apply(udf1).sort('id').toPandas()\n expected1 = df.toPandas().groupby('id').apply(udf1.func).reset_index(drop=True)\n\n result2 = df.groupby('id').apply(udf2).sort('id').toPandas()\n expected2 = expected1\n\n result3 = df.groupby('id').apply(udf3).sort('id').toPandas()\n expected3 = expected1\n\n self.assertPandasEqual(expected1, result1)\n self.assertPandasEqual(expected2, result2)\n self.assertPandasEqual(expected3, result3)\n\n def test_array_type_correct(self):\n df = self.data.withColumn(\"arr\", array(col(\"id\"))).repartition(1, \"id\")\n\n output_schema = StructType(\n [StructField('id', LongType()),\n StructField('v', IntegerType()),\n StructField('arr', ArrayType(LongType()))])\n\n udf = pandas_udf(\n lambda pdf: pdf,\n output_schema,\n PandasUDFType.GROUPED_MAP\n )\n\n result = df.groupby('id').apply(udf).sort('id').toPandas()\n expected = df.toPandas().groupby('id').apply(udf.func).reset_index(drop=True)\n self.assertPandasEqual(expected, result)\n\n def test_register_grouped_map_udf(self):\n foo_udf = pandas_udf(lambda x: x, \"id long\", PandasUDFType.GROUPED_MAP)\n with QuietTest(self.sc):\n with self.assertRaisesRegexp(\n ValueError,\n 'f.*SQL_BATCHED_UDF.*SQL_SCALAR_PANDAS_UDF.*SQL_GROUPED_AGG_PANDAS_UDF.*'):\n self.spark.catalog.registerFunction(\"foo_udf\", foo_udf)\n\n def test_decorator(self):\n df = self.data\n\n @pandas_udf(\n 'id long, v int, v1 double, v2 long',\n PandasUDFType.GROUPED_MAP\n )\n def foo(pdf):\n return pdf.assign(v1=pdf.v * pdf.id * 1.0, v2=pdf.v + pdf.id)\n\n result = df.groupby('id').apply(foo).sort('id').toPandas()\n expected = df.toPandas().groupby('id').apply(foo.func).reset_index(drop=True)\n self.assertPandasEqual(expected, result)\n\n def test_coerce(self):\n df = self.data\n\n foo = pandas_udf(\n lambda pdf: pdf,\n 'id long, v double',\n PandasUDFType.GROUPED_MAP\n )\n\n result = df.groupby('id').apply(foo).sort('id').toPandas()\n expected = df.toPandas().groupby('id').apply(foo.func).reset_index(drop=True)\n expected = expected.assign(v=expected.v.astype('float64'))\n self.assertPandasEqual(expected, result)\n\n def test_complex_groupby(self):\n df = self.data\n\n @pandas_udf(\n 'id long, v int, norm double',\n PandasUDFType.GROUPED_MAP\n )\n def normalize(pdf):\n v = pdf.v\n return pdf.assign(norm=(v - v.mean()) / v.std())\n\n result = df.groupby(col('id') % 2 == 0).apply(normalize).sort('id', 'v').toPandas()\n pdf = df.toPandas()\n expected = pdf.groupby(pdf['id'] % 2 == 0, as_index=False).apply(normalize.func)\n expected = expected.sort_values(['id', 'v']).reset_index(drop=True)\n expected = expected.assign(norm=expected.norm.astype('float64'))\n self.assertPandasEqual(expected, result)\n\n def test_empty_groupby(self):\n df = self.data\n\n @pandas_udf(\n 'id long, v int, norm double',\n PandasUDFType.GROUPED_MAP\n )\n def normalize(pdf):\n v = pdf.v\n return pdf.assign(norm=(v - v.mean()) / v.std())\n\n result = df.groupby().apply(normalize).sort('id', 'v').toPandas()\n pdf = df.toPandas()\n expected = normalize.func(pdf)\n expected = expected.sort_values(['id', 'v']).reset_index(drop=True)\n expected = expected.assign(norm=expected.norm.astype('float64'))\n self.assertPandasEqual(expected, result)\n\n def test_datatype_string(self):\n df = self.data\n\n foo_udf = pandas_udf(\n lambda pdf: pdf.assign(v1=pdf.v * pdf.id * 1.0, v2=pdf.v + pdf.id),\n 'id long, v int, v1 double, v2 long',\n PandasUDFType.GROUPED_MAP\n )\n\n result = df.groupby('id').apply(foo_udf).sort('id').toPandas()\n expected = df.toPandas().groupby('id').apply(foo_udf.func).reset_index(drop=True)\n self.assertPandasEqual(expected, result)\n\n def test_wrong_return_type(self):\n with QuietTest(self.sc):\n with self.assertRaisesRegexp(\n NotImplementedError,\n 'Invalid returnType.*grouped map Pandas UDF.*MapType'):\n pandas_udf(\n lambda pdf: pdf,\n 'id long, v map<int, int>',\n PandasUDFType.GROUPED_MAP)\n\n def test_wrong_args(self):\n df = self.data\n\n with QuietTest(self.sc):\n with self.assertRaisesRegexp(ValueError, 'Invalid udf'):\n df.groupby('id').apply(lambda x: x)\n with self.assertRaisesRegexp(ValueError, 'Invalid udf'):\n df.groupby('id').apply(udf(lambda x: x, DoubleType()))\n with self.assertRaisesRegexp(ValueError, 'Invalid udf'):\n df.groupby('id').apply(sum(df.v))\n with self.assertRaisesRegexp(ValueError, 'Invalid udf'):\n df.groupby('id').apply(df.v + 1)\n with self.assertRaisesRegexp(ValueError, 'Invalid function'):\n df.groupby('id').apply(\n pandas_udf(lambda: 1, StructType([StructField(\"d\", DoubleType())])))\n with self.assertRaisesRegexp(ValueError, 'Invalid udf'):\n df.groupby('id').apply(pandas_udf(lambda x, y: x, DoubleType()))\n with self.assertRaisesRegexp(ValueError, 'Invalid udf.*GROUPED_MAP'):\n df.groupby('id').apply(\n pandas_udf(lambda x, y: x, DoubleType(), PandasUDFType.SCALAR))\n\n def test_unsupported_types(self):\n import pyarrow as pa\n\n common_err_msg = 'Invalid returnType.*grouped map Pandas UDF.*'\n unsupported_types = [\n StructField('map', MapType(StringType(), IntegerType())),\n StructField('arr_ts', ArrayType(TimestampType())),\n StructField('null', NullType()),\n StructField('struct', StructType([StructField('l', LongType())])),\n ]\n\n # TODO: Remove this if-statement once minimum pyarrow version is 0.10.0\n if LooseVersion(pa.__version__) < LooseVersion(\"0.10.0\"):\n unsupported_types.append(StructField('bin', BinaryType()))\n\n for unsupported_type in unsupported_types:\n schema = StructType([StructField('id', LongType(), True), unsupported_type])\n with QuietTest(self.sc):\n with self.assertRaisesRegexp(NotImplementedError, common_err_msg):\n pandas_udf(lambda x: x, schema, PandasUDFType.GROUPED_MAP)\n\n # Regression test for SPARK-23314\n def test_timestamp_dst(self):\n # Daylight saving time for Los Angeles for 2015 is Sun, Nov 1 at 2:00 am\n dt = [datetime.datetime(2015, 11, 1, 0, 30),\n datetime.datetime(2015, 11, 1, 1, 30),\n datetime.datetime(2015, 11, 1, 2, 30)]\n df = self.spark.createDataFrame(dt, 'timestamp').toDF('time')\n foo_udf = pandas_udf(lambda pdf: pdf, 'time timestamp', PandasUDFType.GROUPED_MAP)\n result = df.groupby('time').apply(foo_udf).sort('time')\n self.assertPandasEqual(df.toPandas(), result.toPandas())\n\n def test_udf_with_key(self):\n import numpy as np\n\n df = self.data\n pdf = df.toPandas()\n\n def foo1(key, pdf):\n assert type(key) == tuple\n assert type(key[0]) == np.int64\n\n return pdf.assign(v1=key[0],\n v2=pdf.v * key[0],\n v3=pdf.v * pdf.id,\n v4=pdf.v * pdf.id.mean())\n\n def foo2(key, pdf):\n assert type(key) == tuple\n assert type(key[0]) == np.int64\n assert type(key[1]) == np.int32\n\n return pdf.assign(v1=key[0],\n v2=key[1],\n v3=pdf.v * key[0],\n v4=pdf.v + key[1])\n\n def foo3(key, pdf):\n assert type(key) == tuple\n assert len(key) == 0\n return pdf.assign(v1=pdf.v * pdf.id)\n\n # v2 is int because numpy.int64 * pd.Series<int32> results in pd.Series<int32>\n # v3 is long because pd.Series<int64> * pd.Series<int32> results in pd.Series<int64>\n udf1 = pandas_udf(\n foo1,\n 'id long, v int, v1 long, v2 int, v3 long, v4 double',\n PandasUDFType.GROUPED_MAP)\n\n udf2 = pandas_udf(\n foo2,\n 'id long, v int, v1 long, v2 int, v3 int, v4 int',\n PandasUDFType.GROUPED_MAP)\n\n udf3 = pandas_udf(\n foo3,\n 'id long, v int, v1 long',\n PandasUDFType.GROUPED_MAP)\n\n # Test groupby column\n result1 = df.groupby('id').apply(udf1).sort('id', 'v').toPandas()\n expected1 = pdf.groupby('id', as_index=False)\\\n .apply(lambda x: udf1.func((x.id.iloc[0],), x))\\\n .sort_values(['id', 'v']).reset_index(drop=True)\n self.assertPandasEqual(expected1, result1)\n\n # Test groupby expression\n result2 = df.groupby(df.id % 2).apply(udf1).sort('id', 'v').toPandas()\n expected2 = pdf.groupby(pdf.id % 2, as_index=False)\\\n .apply(lambda x: udf1.func((x.id.iloc[0] % 2,), x))\\\n .sort_values(['id', 'v']).reset_index(drop=True)\n self.assertPandasEqual(expected2, result2)\n\n # Test complex groupby\n result3 = df.groupby(df.id, df.v % 2).apply(udf2).sort('id', 'v').toPandas()\n expected3 = pdf.groupby([pdf.id, pdf.v % 2], as_index=False)\\\n .apply(lambda x: udf2.func((x.id.iloc[0], (x.v % 2).iloc[0],), x))\\\n .sort_values(['id', 'v']).reset_index(drop=True)\n self.assertPandasEqual(expected3, result3)\n\n # Test empty groupby\n result4 = df.groupby().apply(udf3).sort('id', 'v').toPandas()\n expected4 = udf3.func((), pdf)\n self.assertPandasEqual(expected4, result4)\n\n def test_column_order(self):\n import pandas as pd\n\n # Helper function to set column names from a list\n def rename_pdf(pdf, names):\n pdf.rename(columns={old: new for old, new in\n zip(pd_result.columns, names)}, inplace=True)\n\n df = self.data\n grouped_df = df.groupby('id')\n grouped_pdf = df.toPandas().groupby('id', as_index=False)\n\n # Function returns a pdf with required column names, but order could be arbitrary using dict\n def change_col_order(pdf):\n # Constructing a DataFrame from a dict should result in the same order,\n # but use from_items to ensure the pdf column order is different than schema\n return pd.DataFrame.from_items([\n ('id', pdf.id),\n ('u', pdf.v * 2),\n ('v', pdf.v)])\n\n ordered_udf = pandas_udf(\n change_col_order,\n 'id long, v int, u int',\n PandasUDFType.GROUPED_MAP\n )\n\n # The UDF result should assign columns by name from the pdf\n result = grouped_df.apply(ordered_udf).sort('id', 'v')\\\n .select('id', 'u', 'v').toPandas()\n pd_result = grouped_pdf.apply(change_col_order)\n expected = pd_result.sort_values(['id', 'v']).reset_index(drop=True)\n self.assertPandasEqual(expected, result)\n\n # Function returns a pdf with positional columns, indexed by range\n def range_col_order(pdf):\n # Create a DataFrame with positional columns, fix types to long\n return pd.DataFrame(list(zip(pdf.id, pdf.v * 3, pdf.v)), dtype='int64')\n\n range_udf = pandas_udf(\n range_col_order,\n 'id long, u long, v long',\n PandasUDFType.GROUPED_MAP\n )\n\n # The UDF result uses positional columns from the pdf\n result = grouped_df.apply(range_udf).sort('id', 'v') \\\n .select('id', 'u', 'v').toPandas()\n pd_result = grouped_pdf.apply(range_col_order)\n rename_pdf(pd_result, ['id', 'u', 'v'])\n expected = pd_result.sort_values(['id', 'v']).reset_index(drop=True)\n self.assertPandasEqual(expected, result)\n\n # Function returns a pdf with columns indexed with integers\n def int_index(pdf):\n return pd.DataFrame(OrderedDict([(0, pdf.id), (1, pdf.v * 4), (2, pdf.v)]))\n\n int_index_udf = pandas_udf(\n int_index,\n 'id long, u int, v int',\n PandasUDFType.GROUPED_MAP\n )\n\n # The UDF result should assign columns by position of integer index\n result = grouped_df.apply(int_index_udf).sort('id', 'v') \\\n .select('id', 'u', 'v').toPandas()\n pd_result = grouped_pdf.apply(int_index)\n rename_pdf(pd_result, ['id', 'u', 'v'])\n expected = pd_result.sort_values(['id', 'v']).reset_index(drop=True)\n self.assertPandasEqual(expected, result)\n\n @pandas_udf('id long, v int', PandasUDFType.GROUPED_MAP)\n def column_name_typo(pdf):\n return pd.DataFrame({'iid': pdf.id, 'v': pdf.v})\n\n @pandas_udf('id long, v int', PandasUDFType.GROUPED_MAP)\n def invalid_positional_types(pdf):\n return pd.DataFrame([(u'a', 1.2)])\n\n with QuietTest(self.sc):\n with self.assertRaisesRegexp(Exception, \"KeyError: 'id'\"):\n grouped_df.apply(column_name_typo).collect()\n import pyarrow as pa\n if LooseVersion(pa.__version__) < LooseVersion(\"0.11.0\"):\n # TODO: see ARROW-1949. Remove when the minimum PyArrow version becomes 0.11.0.\n with self.assertRaisesRegexp(Exception, \"No cast implemented\"):\n grouped_df.apply(invalid_positional_types).collect()\n else:\n with self.assertRaisesRegexp(Exception, \"an integer is required\"):\n grouped_df.apply(invalid_positional_types).collect()\n\n def test_positional_assignment_conf(self):\n import pandas as pd\n\n with self.sql_conf({\n \"spark.sql.legacy.execution.pandas.groupedMap.assignColumnsByName\": False}):\n\n @pandas_udf(\"a string, b float\", PandasUDFType.GROUPED_MAP)\n def foo(_):\n return pd.DataFrame([('hi', 1)], columns=['x', 'y'])\n\n df = self.data\n result = df.groupBy('id').apply(foo).select('a', 'b').collect()\n for r in result:\n self.assertEqual(r.a, 'hi')\n self.assertEqual(r.b, 1)\n\n def test_self_join_with_pandas(self):\n @pandas_udf('key long, col string', PandasUDFType.GROUPED_MAP)\n def dummy_pandas_udf(df):\n return df[['key', 'col']]\n\n df = self.spark.createDataFrame([Row(key=1, col='A'), Row(key=1, col='B'),\n Row(key=2, col='C')])\n df_with_pandas = df.groupBy('key').apply(dummy_pandas_udf)\n\n # this was throwing an AnalysisException before SPARK-24208\n res = df_with_pandas.alias('temp0').join(df_with_pandas.alias('temp1'),\n col('temp0.key') == col('temp1.key'))\n self.assertEquals(res.count(), 5)\n\n def test_mixed_scalar_udfs_followed_by_grouby_apply(self):\n import pandas as pd\n\n df = self.spark.range(0, 10).toDF('v1')\n df = df.withColumn('v2', udf(lambda x: x + 1, 'int')(df['v1'])) \\\n .withColumn('v3', pandas_udf(lambda x: x + 2, 'int')(df['v1']))\n\n result = df.groupby() \\\n .apply(pandas_udf(lambda x: pd.DataFrame([x.sum().sum()]),\n 'sum int',\n PandasUDFType.GROUPED_MAP))\n\n self.assertEquals(result.collect()[0]['sum'], 165)\n\n\nif __name__ == \"__main__\":\n from pyspark.sql.tests.test_pandas_udf_grouped_map import *\n\n try:\n import xmlrunner\n testRunner = xmlrunner.XMLTestRunner(output='target/test-reports')\n except ImportError:\n testRunner = None\n unittest.main(testRunner=testRunner, verbosity=2)\n"
] | [
[
"pandas.DataFrame.from_items",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"0.19",
"0.24",
"0.20",
"0.25"
],
"scipy": [],
"tensorflow": []
}
] |
DevinCheung/ColossalAI | [
"632e622de818697f9949e35117c0432d88f62c87",
"632e622de818697f9949e35117c0432d88f62c87",
"632e622de818697f9949e35117c0432d88f62c87",
"632e622de818697f9949e35117c0432d88f62c87"
] | [
"setup.py",
"colossalai/nn/layer/parallel_1d/layers.py",
"tests/test_trainer/test_trainer_with_non_pipe_schedule.py",
"tests/test_context/test_2p5d_init.py"
] | [
"import os\nimport subprocess\nimport sys\n\nimport torch\nfrom setuptools import setup, find_packages\nfrom torch.utils.cpp_extension import BuildExtension, CUDAExtension, CUDA_HOME\n\n# ninja build does not work unless include_dirs are abs path\nthis_dir = os.path.dirname(os.path.abspath(__file__))\n\n\ndef get_cuda_bare_metal_version(cuda_dir):\n raw_output = subprocess.check_output(\n [cuda_dir + \"/bin/nvcc\", \"-V\"], universal_newlines=True)\n output = raw_output.split()\n release_idx = output.index(\"release\") + 1\n release = output[release_idx].split(\".\")\n bare_metal_major = release[0]\n bare_metal_minor = release[1][0]\n\n return raw_output, bare_metal_major, bare_metal_minor\n\n\ndef check_cuda_torch_binary_vs_bare_metal(cuda_dir):\n raw_output, bare_metal_major, bare_metal_minor = get_cuda_bare_metal_version(\n cuda_dir)\n torch_binary_major = torch.version.cuda.split(\".\")[0]\n torch_binary_minor = torch.version.cuda.split(\".\")[1]\n\n print(\"\\nCompiling cuda extensions with\")\n print(raw_output + \"from \" + cuda_dir + \"/bin\\n\")\n\n if (bare_metal_major != torch_binary_major) or (bare_metal_minor != torch_binary_minor):\n raise RuntimeError(\"Cuda extensions are being compiled with a version of Cuda that does \" +\n \"not match the version used to compile Pytorch binaries. \" +\n \"Pytorch binaries were compiled with Cuda {}.\\n\".format(torch.version.cuda) +\n \"In some cases, a minor-version mismatch will not cause later errors: \" +\n \"https://github.com/NVIDIA/apex/pull/323#discussion_r287021798. \"\n \"You can try commenting out this check (at your own risk).\")\n\n\ndef fetch_requirements(path):\n with open(path, 'r') as fd:\n return [r.strip() for r in fd.readlines()]\n\n\nif not torch.cuda.is_available():\n # https://github.com/NVIDIA/apex/issues/486\n # Extension builds after https://github.com/pytorch/pytorch/pull/23408 attempt to query torch.cuda.get_device_capability(),\n # which will fail if you are compiling in an environment without visible GPUs (e.g. during an nvidia-docker build command).\n print('\\nWarning: Torch did not find available GPUs on this system.\\n',\n 'If your intention is to cross-compile, this is not an error.\\n'\n 'By default, Colossal-AI will cross-compile for Pascal (compute capabilities 6.0, 6.1, 6.2),\\n'\n 'Volta (compute capability 7.0), Turing (compute capability 7.5),\\n'\n 'and, if the CUDA version is >= 11.0, Ampere (compute capability 8.0).\\n'\n 'If you wish to cross-compile for a single specific architecture,\\n'\n 'export TORCH_CUDA_ARCH_LIST=\"compute capability\" before running setup.py.\\n')\n if os.environ.get(\"TORCH_CUDA_ARCH_LIST\", None) is None:\n _, bare_metal_major, _ = get_cuda_bare_metal_version(CUDA_HOME)\n if int(bare_metal_major) == 11:\n os.environ[\"TORCH_CUDA_ARCH_LIST\"] = \"6.0;6.1;6.2;7.0;7.5;8.0\"\n else:\n os.environ[\"TORCH_CUDA_ARCH_LIST\"] = \"6.0;6.1;6.2;7.0;7.5\"\n\nprint(\"\\n\\ntorch.__version__ = {}\\n\\n\".format(torch.__version__))\nTORCH_MAJOR = int(torch.__version__.split('.')[0])\nTORCH_MINOR = int(torch.__version__.split('.')[1])\n\nif TORCH_MAJOR == 0 and TORCH_MINOR < 4:\n raise RuntimeError(\"Colossal-AI requires Pytorch 0.4 or newer.\\n\" +\n \"The latest stable release can be obtained from https://pytorch.org/\")\n\ncmdclass = {}\next_modules = []\n\n# Set up macros for forward/backward compatibility hack around\n# https://github.com/pytorch/pytorch/commit/4404762d7dd955383acee92e6f06b48144a0742e\n# and\n# https://github.com/NVIDIA/apex/issues/456\n# https://github.com/pytorch/pytorch/commit/eb7b39e02f7d75c26d8a795ea8c7fd911334da7e#diff-4632522f237f1e4e728cb824300403ac\nversion_ge_1_1 = []\nif (TORCH_MAJOR > 1) or (TORCH_MAJOR == 1 and TORCH_MINOR > 0):\n version_ge_1_1 = ['-DVERSION_GE_1_1']\nversion_ge_1_3 = []\nif (TORCH_MAJOR > 1) or (TORCH_MAJOR == 1 and TORCH_MINOR > 2):\n version_ge_1_3 = ['-DVERSION_GE_1_3']\nversion_ge_1_5 = []\nif (TORCH_MAJOR > 1) or (TORCH_MAJOR == 1 and TORCH_MINOR > 4):\n version_ge_1_5 = ['-DVERSION_GE_1_5']\nversion_dependent_macros = version_ge_1_1 + version_ge_1_3 + version_ge_1_5\n\nif \"--cuda_ext\" in sys.argv:\n if TORCH_MAJOR == 0:\n raise RuntimeError(\"--cuda_ext requires Pytorch 1.0 or later, \"\n \"found torch.__version__ = {}\".format(torch.__version__))\n\n sys.argv.remove(\"--cuda_ext\")\n\n if CUDA_HOME is None:\n raise RuntimeError(\n \"--cuda_ext was requested, but nvcc was not found. Are you sure your environment has nvcc available? If you're installing within a container from https://hub.docker.com/r/pytorch/pytorch, only images whose names contain 'devel' will provide nvcc.\")\n else:\n check_cuda_torch_binary_vs_bare_metal(CUDA_HOME)\n\n ext_modules.append(\n CUDAExtension(name='colossal_C',\n sources=['csrc/colossal_C_frontend.cpp',\n 'csrc/multi_tensor_sgd_kernel.cu',\n 'csrc/multi_tensor_scale_kernel.cu',\n 'csrc/multi_tensor_adam.cu',\n 'csrc/multi_tensor_l2norm_kernel.cu',\n 'csrc/multi_tensor_lamb.cu'],\n extra_compile_args={'cxx': ['-O3'] + version_dependent_macros,\n 'nvcc': ['-lineinfo',\n '-O3',\n # '--resource-usage',\n '--use_fast_math'] + version_dependent_macros}))\n\n\ninstall_requires = fetch_requirements('requirements/requirements.txt')\n\nsetup(\n name='colossalai',\n version='0.0.1-beta',\n packages=find_packages(exclude=('csrc',\n 'tests',\n 'docs',\n 'tests',\n '*.egg-info',)),\n description='An integrated large-scale model training system with efficient parallelization techniques',\n ext_modules=ext_modules,\n cmdclass={'build_ext': BuildExtension} if ext_modules else {},\n install_requires=install_requires,\n)",
"#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\n\nimport math\nimport numbers\nimport torch\nimport torch.distributed as dist\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.nn.init as init\nfrom torch import Tensor\nfrom torch.nn.parameter import Parameter\nfrom typing import Tuple\nimport importlib\n\nfrom colossalai.context import seed, ParallelMode\nfrom colossalai.core import global_context as gpc\nfrom colossalai.registry import LAYERS\nfrom colossalai.utils import get_current_device\nfrom ._operation import FusedLayerNormAffineFunction1D\nfrom .._common_utils import divide, set_tensor_parallel_attribute_by_partition\nfrom .._parallel_utilities import reduce_grad, reduce_input, gather_forward_split_backward, \\\n split_forward_gather_backward\nfrom ..base_layer import ParallelLayer\n\n\[email protected]_module\nclass Linear1D_Col(ParallelLayer):\n \"\"\"Linear layer with column parallelism.\n\n The linear layer is defined as :math:`Y = XA + b`. A is parallelized along\n its second dimension as :math:`A = [A_1, ..., A_p]`.\n\n :param in_features: first dimension of matrix A.\n :type in_features: int\n :param output_size: second dimension of matrix A.\n :type output_size: int\n :param bias: If true, add bias, defaults to True\n :type bias: bool, optional\n :param dtype: The dtype of parameters, defaults to None\n :type dtype: torch.dtype, optional\n :param gather_output: If true, call all-gether on output and make Y avaiable\n to all GPUs, otherwise, every GPU will have its output\n which is :math:`Y_i = XA_i`, defaults to False\n :type gather_output: bool, optional\n \"\"\"\n\n def __init__(self,\n in_features: int,\n output_size: int,\n bias: bool = True,\n dtype: torch.dtype = None,\n gather_output: bool = False,\n skip_bias_add: bool = False,\n init_weight='torch',\n init_bias='torch'\n ):\n super().__init__()\n\n # Keep input parameters\n self.in_features = in_features\n self.out_features = output_size\n self.gather_output = gather_output\n self.skip_bias_add = skip_bias_add\n\n if skip_bias_add and not bias:\n raise ValueError('cannot skip bias addition if bias is None')\n\n self.output_size_per_partition = divide(output_size, gpc.tensor_parallel_size)\n\n # Parameters.\n # Initialize weight.\n factory_kwargs = {'device': get_current_device(), 'dtype': dtype}\n self.weight = Parameter(torch.empty(\n self.output_size_per_partition, self.in_features,\n **factory_kwargs))\n\n if bias:\n self.bias = Parameter(torch.empty(\n self.output_size_per_partition,\n **factory_kwargs))\n # Always initialize bias to zero.\n with torch.no_grad():\n self.bias.zero_()\n else:\n self.register_parameter('bias', None)\n with seed(ParallelMode.TENSOR):\n self.reset_parameters(init_weight, init_bias)\n self._set_tensor_parallel_attributes()\n\n def reset_parameters(self, init_weight, init_bias) -> None:\n assert init_weight in ('torch', 'jax', 'zero')\n assert init_bias in ('torch', 'jax', 'zero')\n # setting\n fan_in, fan_out = self.in_features, self.out_features\n\n # init weight\n if init_weight == 'torch':\n a = math.sqrt(5)\n nonlinearity = 'leaky_relu'\n std = init.calculate_gain(nonlinearity, a) / math.sqrt(fan_in)\n bound = math.sqrt(3.0) * std\n init.uniform_(self.weight, -bound, bound)\n elif init_weight == 'jax':\n std = math.sqrt(2.0 / float(fan_in + fan_out))\n a = math.sqrt(3.0) * std\n init.uniform_(self.weight, -a, a)\n elif init_weight == 'zero':\n init.zeros_(self.weight)\n\n # init bias\n if self.bias is not None:\n if init_bias == 'torch':\n bound = 1 / math.sqrt(fan_in) if fan_in > 0 else 0\n init.uniform_(self.bias, -bound, bound)\n elif init_bias == 'jax':\n init.normal_(self.bias, std=1e-6)\n elif init_bias == 'zero':\n init.zeros_(self.bias)\n\n def _set_tensor_parallel_attributes(self):\n num_partition = gpc.get_world_size(ParallelMode.TENSOR)\n set_tensor_parallel_attribute_by_partition(self.weight, num_partition)\n if self.bias is not None:\n set_tensor_parallel_attribute_by_partition(self.bias, num_partition)\n\n def forward(self, input_: Tensor) -> Tuple[Tensor, Tensor]:\n # Set up backprop all-reduce.\n input_parallel = reduce_grad(input_, ParallelMode.PARALLEL_1D)\n # Matrix multiply.\n\n bias = self.bias if not self.skip_bias_add else None\n output_parallel = F.linear(input_parallel, self.weight, bias)\n if self.gather_output:\n # All-gather across the partitions.\n output = gather_forward_split_backward(\n output_parallel, ParallelMode.PARALLEL_1D, dim=-1)\n else:\n output = output_parallel\n if self.skip_bias_add:\n return output, self.bias\n else:\n return output\n\n\[email protected]_module\nclass Linear1D_Row(ParallelLayer):\n \"\"\" Linear layer with row parallelism \n\n :param in_features: size of each input sample\n :type in_features: int\n :param out_features: size of each output sample\n :type out_features: int\n :param bias: If set to ``False``, the layer will not learn an additive bias, defaults to True\n :type bias: bool, optional\n :param dtype: The dtype of parameters, defaults to None\n :type dtype: torch.dtype, optional\n :param parallel_input: If set to ``True``, it's assumed that the input is splitted, defaults to False\n :type parallel_input: bool, optional\n \"\"\"\n\n def __init__(self,\n in_features: int,\n out_features: int,\n bias: bool = True,\n dtype: torch.dtype = None,\n parallel_input: bool = False,\n skip_bias_add: bool = False,\n init_weight='torch',\n init_bias='torch'\n ):\n super().__init__()\n\n # Keep input parameters\n self.in_features = in_features\n self.out_features = out_features\n self.parallel_input = parallel_input\n self.skip_bias_add = skip_bias_add\n\n if skip_bias_add and not bias:\n raise ValueError('cannot skip bias addition if bias is None')\n\n # Divide the weight matrix along the last dimension.\n self.input_size_per_partition = divide(in_features, gpc.tensor_parallel_size)\n\n # Parameters.\n # Initialize weight.\n factory_kwargs = {'device': get_current_device(), 'dtype': dtype}\n self.weight = Parameter(torch.empty(\n self.out_features,\n self.input_size_per_partition,\n **factory_kwargs))\n\n if bias:\n self.bias = Parameter(torch.empty(\n self.out_features,\n **factory_kwargs\n ))\n\n # Always initialize bias to zero.\n with torch.no_grad():\n self.bias.zero_()\n else:\n self.register_parameter('bias', None)\n with seed(ParallelMode.TENSOR):\n self.reset_parameters(init_weight, init_bias)\n self._set_tensor_parallel_attributes()\n\n def reset_parameters(self, init_weight, init_bias) -> None:\n assert init_weight in ('torch', 'jax', 'zero')\n assert init_bias in ('torch', 'jax', 'zero')\n # setting\n fan_in, fan_out = self.in_features, self.out_features\n\n # init weight\n if init_weight == 'torch':\n a = math.sqrt(5)\n nonlinearity = 'leaky_relu'\n std = init.calculate_gain(nonlinearity, a) / math.sqrt(fan_in)\n bound = math.sqrt(3.0) * std\n init.uniform_(self.weight, -bound, bound)\n elif init_weight == 'jax':\n std = math.sqrt(2.0 / float(fan_in + fan_out))\n a = math.sqrt(3.0) * std\n init.uniform_(self.weight, -a, a)\n elif init_weight == 'zero':\n init.zeros_(self.weight)\n\n # init bias\n if self.bias is not None:\n if init_bias == 'torch':\n bound = 1 / math.sqrt(fan_in) if fan_in > 0 else 0\n init.uniform_(self.bias, -bound, bound)\n elif init_bias == 'jax':\n init.normal_(self.bias, std=1e-6)\n elif init_bias == 'zero':\n init.zeros_(self.bias)\n dist.broadcast(self.bias,\n src=gpc.get_ranks_in_group(ParallelMode.PARALLEL_1D)[0],\n group=gpc.get_group(ParallelMode.PARALLEL_1D))\n\n def _set_tensor_parallel_attributes(self):\n num_partition = gpc.get_world_size(ParallelMode.TENSOR)\n set_tensor_parallel_attribute_by_partition(self.weight, num_partition)\n\n def forward(self, input_: Tensor) -> Tensor:\n # Set up backprop all-reduce.\n if self.parallel_input:\n input_ = input_\n else:\n input_ = split_forward_gather_backward(\n input_, ParallelMode.PARALLEL_1D, dim=-1)\n\n output_parallel = F.linear(input_, self.weight)\n output = reduce_input(output_parallel, ParallelMode.PARALLEL_1D)\n\n if not self.skip_bias_add:\n output = output + self.bias\n return output\n else:\n return output, self.bias\n\n\[email protected]_module\nclass MixedFusedLayerNorm1D(torch.nn.Module):\n\n def __init__(self, normalized_shape, eps=1e-5):\n super(MixedFusedLayerNorm1D, self).__init__()\n\n if isinstance(normalized_shape, numbers.Integral):\n normalized_shape = (normalized_shape,)\n self.normalized_shape = torch.Size(normalized_shape)\n self.eps = eps\n self.weight = Parameter(torch.Tensor(*normalized_shape))\n self.bias = Parameter(torch.Tensor(*normalized_shape))\n self.reset_parameters()\n\n def reset_parameters(self):\n init.ones_(self.weight)\n init.zeros_(self.bias)\n\n def forward(self, input):\n return FusedLayerNormAffineFunction1D.apply(\n input, self.weight, self.bias, self.normalized_shape, self.eps)\n",
"import colossalai\nimport os\nimport pytest\nimport torch\nimport torch.nn as nn\nimport torch.multiprocessing as mp\n\nfrom pathlib import Path\nfrom torchvision import transforms\nfrom torch.optim import Adam\nfrom colossalai.amp.amp_type import AMP_TYPE\nfrom colossalai.core import global_context as gpc\nfrom colossalai.logging import get_dist_logger\nfrom colossalai.trainer import Trainer\nfrom colossalai.utils import get_dataloader\nfrom torchvision.models import resnet18\nfrom torchvision.datasets import CIFAR10\nfrom functools import partial\n\nBATCH_SIZE = 16\nIMG_SIZE = 32\nNUM_EPOCHS = 200\n\nCONFIG = dict(\n # Config\n fp16=dict(\n mode=AMP_TYPE.TORCH\n )\n)\n\n\ndef run_trainer_no_pipeline(rank, world_size):\n colossalai.launch(\n config=CONFIG,\n rank=rank,\n world_size=world_size,\n host='localhost',\n port=29930,\n backend='nccl'\n )\n\n # build model\n model = resnet18(num_classes=10)\n\n # build dataloaders\n train_dataset = CIFAR10(\n root=Path(os.environ['DATA']),\n download=True,\n transform=transforms.Compose(\n [\n transforms.Resize(size=(IMG_SIZE, IMG_SIZE)),\n transforms.ToTensor(),\n transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))\n ]\n )\n )\n\n test_dataset = CIFAR10(\n root=Path(os.environ['DATA']),\n train=False,\n download=True,\n transform=transforms.Compose(\n [\n transforms.Resize(size=(IMG_SIZE, IMG_SIZE)),\n transforms.ToTensor(),\n transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))\n ]\n )\n )\n\n train_dataloader = get_dataloader(dataset=train_dataset,\n shuffle=True,\n batch_size=BATCH_SIZE,\n pin_memory=True,\n drop_last=True)\n\n test_dataloader = get_dataloader(dataset=test_dataset,\n batch_size=BATCH_SIZE,\n pin_memory=True,\n drop_last=True)\n\n # build optimizer\n optimizer = Adam(model.parameters(), lr=0.001)\n criterion = nn.CrossEntropyLoss()\n\n engine, train_dataloader, *args = colossalai.initialize(\n model=model,\n optimizer=optimizer,\n criterion=criterion,\n train_dataloader=train_dataloader\n )\n\n logger = get_dist_logger()\n logger.info(\"engine is built\", ranks=[0])\n\n trainer = Trainer(engine=engine,\n logger=logger)\n logger.info(\"trainer is built\", ranks=[0])\n\n logger.info(\"start training\", ranks=[0])\n trainer.fit(\n train_dataloader=train_dataloader,\n test_dataloader=test_dataloader,\n epochs=NUM_EPOCHS,\n max_steps=100,\n display_progress=True,\n test_interval=5\n )\n gpc.destroy()\n torch.cuda.empty_cache()\n\n\[email protected]\ndef test_trainer_no_pipeline():\n world_size = 4\n run_func = partial(run_trainer_no_pipeline, world_size=world_size)\n mp.spawn(run_func, nprocs=world_size)\n\n\nif __name__ == '__main__':\n test_trainer_no_pipeline()\n",
"#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\n\nfrom functools import partial\nfrom pathlib import Path\n\nimport pytest\nimport torch\nimport torch.multiprocessing as mp\n\nfrom colossalai.context.parallel_mode import ParallelMode\nfrom colossalai.core import global_context as gpc\nfrom colossalai.initialize import launch\n\nCONFIG_PATH = Path(__file__).parent.joinpath('configs/parallel_2p5d_init.py').absolute()\n\n\ndef check_data_parallel_rank(rank):\n dp_rank = gpc.get_local_rank(ParallelMode.DATA)\n\n if rank in list(range(16)):\n assert dp_rank == 0\n elif rank in list(range(16, 32)):\n assert dp_rank == 1\n\n\ndef check_pipeline_parallel_rank(rank):\n ppr = gpc.get_local_rank(ParallelMode.PIPELINE)\n\n if rank in list(range(8)):\n assert ppr == 0\n elif rank in list(range(8, 16)):\n assert ppr == 1\n elif rank in list(range(16, 24)):\n assert ppr == 0\n elif rank in list(range(24, 32)):\n assert ppr == 1\n\n\ndef check_tensor_parallel_rank(rank):\n tp_rank = gpc.get_local_rank(ParallelMode.TENSOR)\n\n for i in range(8):\n ranks = list(range(i, 32, 8))\n if rank in ranks:\n assert tp_rank == i, f'{rank}:{tp_rank}'\n\n\ndef check_2p5d_parallel_rank(rank):\n rp_rank = gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_ROW)\n cp_rank = gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_COL)\n dp_rank = gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_DEP)\n xp_rank = gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_XZ)\n\n # check for row parallel group\n for i in range(2):\n ranks = list(range(i, 32, 2))\n if rank in ranks:\n assert rp_rank == i\n\n # check for col parallel group\n for i in range(2):\n ranks = list(range(i * 2, 32, 4))\n ranks_plus_ones = [val + 1 for val in ranks]\n ranks.extend(ranks_plus_ones)\n if rank in ranks:\n assert cp_rank == i\n\n # check for depth parallel group\n for i in range(2):\n ranks = []\n for j in range(i * 4, 32, 8):\n ranks.extend([j + k for k in range(4)])\n if rank in ranks:\n assert dp_rank == i\n\n # check for xz parallel group\n for i in range(2):\n ranks = list(range(i * 2, 32, 8))\n ranks_plus_one = [val + 1 for val in ranks]\n ranks.extend(ranks_plus_one)\n if rank in ranks:\n assert xp_rank == i\n\n\ndef init_2halfd(rank, world_size, backend, port, host):\n dist_args = dict(\n config=CONFIG_PATH,\n rank=rank,\n world_size=world_size,\n backend=backend,\n port=port,\n host=host,\n verbose=True\n )\n launch(**dist_args)\n check_data_parallel_rank(rank)\n check_pipeline_parallel_rank(rank)\n check_tensor_parallel_rank(rank)\n check_2p5d_parallel_rank(rank)\n gpc.destroy()\n torch.cuda.empty_cache()\n\n\[email protected]\ndef test_2halfd_init():\n \"\"\"\n As no computation or communication is done, we can run this test on CPU.\n \"\"\"\n world_size = 32\n test_fn = partial(init_2halfd,\n world_size=world_size,\n backend='gloo',\n port='29901',\n host='localhost'\n )\n mp.spawn(test_fn, nprocs=world_size)\n\n\nif __name__ == '__main__':\n test_2halfd_init()\n"
] | [
[
"torch.utils.cpp_extension.CUDAExtension",
"torch.__version__.split",
"torch.version.cuda.split",
"torch.cuda.is_available"
],
[
"torch.nn.init.calculate_gain",
"torch.Size",
"torch.nn.init.uniform_",
"torch.empty",
"torch.Tensor",
"torch.nn.init.ones_",
"torch.no_grad",
"torch.nn.init.normal_",
"torch.nn.init.zeros_",
"torch.nn.functional.linear"
],
[
"torch.nn.CrossEntropyLoss",
"torch.cuda.empty_cache",
"torch.multiprocessing.spawn"
],
[
"torch.cuda.empty_cache",
"torch.multiprocessing.spawn"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
JackDanger/tensorflow | [
"5d615f2f05d6ecfc37951ef574e359829cb9e3e0"
] | [
"tensorflow/python/keras/backend.py"
] | [
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n# pylint: disable=protected-access\n# pylint: disable=redefined-outer-name\n# pylint: disable=redefined-builtin\n\"\"\"Keras backend API.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport itertools\nimport json\nimport os\nimport sys\nimport threading\nimport weakref\n\nimport numpy as np\n\nfrom tensorflow.core.protobuf import config_pb2\nfrom tensorflow.python import tf2\nfrom tensorflow.python.client import session as session_module\nfrom tensorflow.python.distribute import distribute_coordinator as dc\nfrom tensorflow.python.distribute import distribute_coordinator_context as dc_context\nfrom tensorflow.python.distribute import distribution_strategy_context\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.eager import function as eager_function\nfrom tensorflow.python.eager import lift_to_graph\nfrom tensorflow.python.framework import composite_tensor\nfrom tensorflow.python.framework import config\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import device as tfdev\nfrom tensorflow.python.framework import dtypes as dtypes_module\nfrom tensorflow.python.framework import func_graph\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import sparse_tensor\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.framework import tensor_util\nfrom tensorflow.python.keras import backend_config\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import clip_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import control_flow_util\nfrom tensorflow.python.ops import ctc_ops as ctc\nfrom tensorflow.python.ops import functional_ops\nfrom tensorflow.python.ops import gradients as gradients_module\nfrom tensorflow.python.ops import image_ops\nfrom tensorflow.python.ops import init_ops\nfrom tensorflow.python.ops import linalg_ops\nfrom tensorflow.python.ops import logging_ops\nfrom tensorflow.python.ops import map_fn as map_fn_lib\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import nn\nfrom tensorflow.python.ops import random_ops\nfrom tensorflow.python.ops import sparse_ops\nfrom tensorflow.python.ops import state_ops\nfrom tensorflow.python.ops import tensor_array_grad # pylint: disable=unused-import\nfrom tensorflow.python.ops import tensor_array_ops\nfrom tensorflow.python.ops import variables as variables_module\nfrom tensorflow.python.ops.ragged import ragged_concat_ops\nfrom tensorflow.python.ops.ragged import ragged_tensor\nfrom tensorflow.python.platform import tf_logging as logging\nfrom tensorflow.python.training import moving_averages\nfrom tensorflow.python.training.tracking import util as tracking_util\nfrom tensorflow.python.util import nest\nfrom tensorflow.python.util import object_identity\nfrom tensorflow.python.util import tf_contextlib\nfrom tensorflow.python.util import tf_inspect\nfrom tensorflow.python.util.deprecation import deprecated\nfrom tensorflow.python.util.tf_export import keras_export\n\npy_all = all\npy_sum = sum\npy_any = any\n\n# INTERNAL UTILS\n\n# The internal graph maintained by Keras and used by the symbolic Keras APIs\n# while executing eagerly (such as the functional API for model-building).\n_GRAPH = None\n\n# A graph which is used for constructing functions in eager mode.\n_CURRENT_SCRATCH_GRAPH = None\n\n# This is a thread local object that will hold the default internal TF session\n# used by Keras. It can be set manually via `set_session(sess)`.\n_SESSION = threading.local()\n\n\n# _DUMMY_EAGER_GRAPH.key is used as a key in _GRAPH_LEARNING_PHASES.\n# We keep a separate reference to it to make sure it does not get removed from\n# _GRAPH_LEARNING_PHASES.\n# _DummyEagerGraph inherits from threading.local to make its `key` attribute\n# thread local. This is needed to make set_learning_phase affect only the\n# current thread during eager execution (see b/123096885 for more details).\nclass _DummyEagerGraph(threading.local):\n \"\"\"_DummyEagerGraph provides a thread local `key` attribute.\n\n We can't use threading.local directly, i.e. without subclassing, because\n gevent monkey patches threading.local and its version does not support\n weak references.\n \"\"\"\n\n class _WeakReferencableClass(object):\n \"\"\"This dummy class is needed for two reasons.\n\n - We need something that supports weak references. Basic types like string\n and ints don't.\n - We need something whose hash and equality are based on object identity\n to make sure they are treated as different keys to _GRAPH_LEARNING_PHASES.\n\n An empty Python class satisfies both of these requirements.\n \"\"\"\n pass\n\n def __init__(self):\n # Constructors for classes subclassing threading.local run once\n # per thread accessing something in the class. Thus, each thread will\n # get a different key.\n super(_DummyEagerGraph, self).__init__()\n self.key = _DummyEagerGraph._WeakReferencableClass()\n\n\n_DUMMY_EAGER_GRAPH = _DummyEagerGraph()\n\n# This boolean flag can be set to True to leave variable initialization\n# up to the user.\n# Change its value via `manual_variable_initialization(value)`.\n_MANUAL_VAR_INIT = False\n\n# This list holds the available devices.\n# It is populated when `_get_available_gpus()` is called for the first time.\n# We assume our devices don't change henceforth.\n_LOCAL_DEVICES = None\n\n# The below functions are kept accessible from backend for compatibility.\nepsilon = backend_config.epsilon\nfloatx = backend_config.floatx\nimage_data_format = backend_config.image_data_format\nset_epsilon = backend_config.set_epsilon\nset_floatx = backend_config.set_floatx\nset_image_data_format = backend_config.set_image_data_format\n\n\n@keras_export('keras.backend.backend')\ndef backend():\n \"\"\"Publicly accessible method for determining the current backend.\n\n Only exists for API compatibility with multi-backend Keras.\n\n Returns:\n The string \"tensorflow\".\n \"\"\"\n return 'tensorflow'\n\n\n@keras_export('keras.backend.cast_to_floatx')\ndef cast_to_floatx(x):\n \"\"\"Cast a Numpy array to the default Keras float type.\n\n Arguments:\n x: Numpy array or TensorFlow tensor.\n\n Returns:\n The same array (Numpy array if `x` was a Numpy array, or TensorFlow tensor\n if `x` was a tensor), cast to its new type.\n\n Example:\n\n >>> tf.keras.backend.floatx()\n 'float32'\n >>> arr = np.array([1.0, 2.0], dtype='float64')\n >>> arr.dtype\n dtype('float64')\n >>> new_arr = cast_to_floatx(arr)\n >>> new_arr\n array([1., 2.], dtype=float32)\n >>> new_arr.dtype\n dtype('float32')\n\n \"\"\"\n if isinstance(x, (ops.Tensor,\n variables_module.Variable,\n sparse_tensor.SparseTensor)):\n return math_ops.cast(x, dtype=floatx())\n return np.asarray(x, dtype=floatx())\n\n\n# A global dictionary mapping graph objects to an index of counters used\n# for various layer/optimizer names in each graph.\n# Allows to give unique autogenerated names to layers, in a graph-specific way.\nPER_GRAPH_OBJECT_NAME_UIDS = weakref.WeakKeyDictionary()\n\n\n@keras_export('keras.backend.get_uid')\ndef get_uid(prefix=''):\n \"\"\"Associates a string prefix with an integer counter in a TensorFlow graph.\n\n Arguments:\n prefix: String prefix to index.\n\n Returns:\n Unique integer ID.\n\n Example:\n\n >>> get_uid('dense')\n 1\n >>> get_uid('dense')\n 2\n\n \"\"\"\n graph = get_graph()\n if graph not in PER_GRAPH_OBJECT_NAME_UIDS:\n PER_GRAPH_OBJECT_NAME_UIDS[graph] = collections.defaultdict(int)\n layer_name_uids = PER_GRAPH_OBJECT_NAME_UIDS[graph]\n layer_name_uids[prefix] += 1\n return layer_name_uids[prefix]\n\n\n@keras_export('keras.backend.reset_uids')\ndef reset_uids():\n \"\"\"Resets graph identifiers.\n \"\"\"\n\n PER_GRAPH_OBJECT_NAME_UIDS.clear()\n\n\n@keras_export('keras.backend.clear_session')\ndef clear_session():\n \"\"\"Resets all state generated by Keras.\n\n Keras manages a global state, which it uses to implement the Functional\n model-building API and to uniquify autogenerated layer names.\n\n If you are creating many models in a loop, this global state will consume\n an increasing amount of memory over time, and you may want to clear it.\n Calling `clear_session()` releases the global state: this helps avoid clutter\n from old models and layers, especially when memory is limited.\n\n Example 1: calling `clear_session()` when creating models in a loop\n\n ```python\n for _ in range(100):\n # Without `clear_session()`, each iteration of this loop will\n # slightly increase the size of the global state managed by Keras\n model = tf.keras.Sequential([tf.keras.layers.Dense(10) for _ in range(10)])\n\n for _ in range(100):\n # With `clear_session()` called at the beginning,\n # Keras starts with a blank state at each iteration\n # and memory consumption is constant over time.\n tf.keras.backend.clear_session()\n model = tf.keras.Sequential([tf.keras.layers.Dense(10) for _ in range(10)])\n ```\n\n Example 2: resetting the layer name generation counter\n\n >>> import tensorflow as tf\n >>> layers = [tf.keras.layers.Dense(10) for _ in range(10)]\n >>> new_layer = tf.keras.layers.Dense(10)\n >>> print(new_layer.name)\n dense_10\n >>> tf.keras.backend.set_learning_phase(1)\n >>> print(tf.keras.backend.learning_phase())\n 1\n >>> tf.keras.backend.clear_session()\n >>> new_layer = tf.keras.layers.Dense(10)\n >>> print(new_layer.name)\n dense\n \"\"\"\n global _SESSION\n global _GRAPH_LEARNING_PHASES # pylint: disable=global-variable-not-assigned\n global _GRAPH_VARIABLES # pylint: disable=global-variable-not-assigned\n global _GRAPH_TF_OPTIMIZERS # pylint: disable=global-variable-not-assigned\n global _GRAPH\n global _FREEZABLE_VARS\n _GRAPH = None\n ops.reset_default_graph()\n reset_uids()\n _SESSION.session = None\n graph = get_graph()\n with graph.as_default():\n _GRAPH_LEARNING_PHASES.clear()\n # Create the learning phase placeholder in graph using the default factory.\n _GRAPH_LEARNING_PHASES.setdefault(graph)\n _GRAPH_VARIABLES.pop(graph, None)\n _GRAPH_TF_OPTIMIZERS.pop(graph, None)\n _FREEZABLE_VARS.pop(graph, None)\n\n\n@keras_export('keras.backend.manual_variable_initialization')\ndef manual_variable_initialization(value):\n \"\"\"Sets the manual variable initialization flag.\n\n This boolean flag determines whether\n variables should be initialized\n as they are instantiated (default), or if\n the user should handle the initialization\n (e.g. via `tf.compat.v1.initialize_all_variables()`).\n\n Arguments:\n value: Python boolean.\n \"\"\"\n global _MANUAL_VAR_INIT\n _MANUAL_VAR_INIT = value\n\n\n@keras_export('keras.backend.learning_phase')\ndef learning_phase():\n \"\"\"Returns the learning phase flag.\n\n The learning phase flag is a bool tensor (0 = test, 1 = train)\n to be passed as input to any Keras function\n that uses a different behavior at train time and test time.\n\n Returns:\n Learning phase (scalar integer tensor or Python integer).\n \"\"\"\n graph = ops.get_default_graph()\n if graph is _GRAPH:\n # Don't enter an init_scope for the learning phase if eager execution\n # is enabled but we're inside the Keras workspace graph.\n learning_phase = symbolic_learning_phase()\n else:\n with ops.init_scope():\n # We always check & set the learning phase inside the init_scope,\n # otherwise the wrong default_graph will be used to look up the learning\n # phase inside of functions & defuns.\n #\n # This is because functions & defuns (both in graph & in eager mode)\n # will always execute non-eagerly using a function-specific default\n # subgraph.\n learning_phase = _GRAPH_LEARNING_PHASES[None]\n _mark_func_graph_as_unsaveable(graph, learning_phase)\n return learning_phase\n\n\ndef global_learning_phase_is_set():\n return _DUMMY_EAGER_GRAPH.key in _GRAPH_LEARNING_PHASES\n\n\ndef _mark_func_graph_as_unsaveable(graph, learning_phase):\n \"\"\"Mark func graph as unsaveable due to use of symbolic keras learning phase.\n\n Functions that capture the symbolic learning phase cannot be exported to\n SavedModel. Mark the funcgraph as unsaveable, so that an error will be raised\n if it is exported.\n\n Args:\n graph: Graph or FuncGraph object.\n learning_phase: Learning phase placeholder or int defined in the graph.\n \"\"\"\n if graph.building_function and is_placeholder(learning_phase):\n graph.mark_as_unsaveable(\n 'The keras learning phase placeholder was used inside a function. '\n 'Exporting placeholders is not supported when saving out a SavedModel. '\n 'Please call `tf.keras.backend.set_learning_phase(0)` in the function '\n 'to set the learning phase to a constant value.')\n\n\ndef symbolic_learning_phase():\n graph = get_graph()\n with graph.as_default():\n return _GRAPH_LEARNING_PHASES[graph]\n\n\ndef _default_learning_phase():\n if context.executing_eagerly():\n return 0\n else:\n with name_scope(''):\n return array_ops.placeholder_with_default(\n False, shape=(), name='keras_learning_phase')\n\n\n@keras_export('keras.backend.set_learning_phase')\ndef set_learning_phase(value):\n \"\"\"Sets the learning phase to a fixed value.\n\n The backend learning phase affects any code that calls\n `backend.learning_phase()`\n In particular, all Keras built-in layers use the learning phase as the default\n for the `training` arg to `Layer.__call__`.\n\n User-written layers and models can achieve the same behavior with code that\n looks like:\n\n ```python\n def call(self, inputs, training=None):\n if training is None:\n training = backend.learning_phase()\n ```\n\n Arguments:\n value: Learning phase value, either 0 or 1 (integers).\n 0 = test, 1 = train\n\n Raises:\n ValueError: if `value` is neither `0` nor `1`.\n \"\"\"\n global _GRAPH_LEARNING_PHASES # pylint: disable=global-variable-not-assigned\n if value not in {0, 1}:\n raise ValueError('Expected learning phase to be 0 or 1.')\n with ops.init_scope():\n if context.executing_eagerly():\n # In an eager context, the learning phase values applies to both the eager\n # context and the internal Keras graph.\n _GRAPH_LEARNING_PHASES[_DUMMY_EAGER_GRAPH.key] = value\n _GRAPH_LEARNING_PHASES[get_graph()] = value\n\n\n@keras_export('keras.backend.learning_phase_scope')\n@tf_contextlib.contextmanager\ndef learning_phase_scope(value):\n \"\"\"Provides a scope within which the learning phase is equal to `value`.\n\n The learning phase gets restored to its original value upon exiting the scope.\n\n Arguments:\n value: Learning phase value, either 0 or 1 (integers).\n 0 = test, 1 = train\n\n Yields:\n None.\n\n Raises:\n ValueError: if `value` is neither `0` nor `1`.\n \"\"\"\n global _GRAPH_LEARNING_PHASES # pylint: disable=global-variable-not-assigned\n if value not in {0, 1}:\n raise ValueError('Expected learning phase to be 0 or 1.')\n\n with ops.init_scope():\n if context.executing_eagerly():\n previous_eager_value = _GRAPH_LEARNING_PHASES.get(\n _DUMMY_EAGER_GRAPH.key, None)\n previous_graph_value = _GRAPH_LEARNING_PHASES.get(get_graph(), None)\n\n try:\n set_learning_phase(value)\n yield\n finally:\n # Restore learning phase to initial value.\n with ops.init_scope():\n if context.executing_eagerly():\n if previous_eager_value is not None:\n _GRAPH_LEARNING_PHASES[_DUMMY_EAGER_GRAPH.key] = previous_eager_value\n elif _DUMMY_EAGER_GRAPH.key in _GRAPH_LEARNING_PHASES:\n del _GRAPH_LEARNING_PHASES[_DUMMY_EAGER_GRAPH.key]\n\n graph = get_graph()\n if previous_graph_value is not None:\n _GRAPH_LEARNING_PHASES[graph] = previous_graph_value\n elif graph in _GRAPH_LEARNING_PHASES:\n del _GRAPH_LEARNING_PHASES[graph]\n\n\n@tf_contextlib.contextmanager\ndef eager_learning_phase_scope(value):\n \"\"\"Internal scope that sets the learning phase in eager / tf.function only.\n\n Arguments:\n value: Learning phase value, either 0 or 1 (integers).\n 0 = test, 1 = train\n\n Yields:\n None.\n\n Raises:\n ValueError: if `value` is neither `0` nor `1`.\n \"\"\"\n global _GRAPH_LEARNING_PHASES # pylint: disable=global-variable-not-assigned\n assert value in {0, 1}\n assert ops.executing_eagerly_outside_functions()\n global_learning_phase_was_set = global_learning_phase_is_set()\n if global_learning_phase_was_set:\n previous_value = learning_phase()\n try:\n _GRAPH_LEARNING_PHASES[_DUMMY_EAGER_GRAPH.key] = value\n yield\n finally:\n # Restore learning phase to initial value or unset.\n if global_learning_phase_was_set:\n _GRAPH_LEARNING_PHASES[_DUMMY_EAGER_GRAPH.key] = previous_value\n else:\n del _GRAPH_LEARNING_PHASES[_DUMMY_EAGER_GRAPH.key]\n\n\ndef _current_graph(op_input_list):\n \"\"\"Return the graph members of `op_input_list`, or the current graph.\"\"\"\n return ops._get_graph_from_inputs(op_input_list)\n\n\ndef _get_session(op_input_list=()):\n \"\"\"Returns the session object for the current thread.\"\"\"\n global _SESSION\n default_session = ops.get_default_session()\n if default_session is not None:\n session = default_session\n else:\n if ops.inside_function():\n raise RuntimeError('Cannot get session inside Tensorflow graph function.')\n # If we don't have a session, or that session does not match the current\n # graph, create and cache a new session.\n if (getattr(_SESSION, 'session', None) is None or\n _SESSION.session.graph is not _current_graph(op_input_list)):\n # If we are creating the Session inside a tf.distribute.Strategy scope,\n # we ask the strategy for the right session options to use.\n if distribution_strategy_context.has_strategy():\n configure_and_create_distributed_session(\n distribution_strategy_context.get_strategy())\n else:\n _SESSION.session = session_module.Session(\n config=get_default_session_config())\n session = _SESSION.session\n return session\n\n\n@keras_export(v1=['keras.backend.get_session'])\ndef get_session(op_input_list=()):\n \"\"\"Returns the TF session to be used by the backend.\n\n If a default TensorFlow session is available, we will return it.\n\n Else, we will return the global Keras session assuming it matches\n the current graph.\n\n If no global Keras session exists at this point:\n we will create a new global session.\n\n Note that you can manually set the global session\n via `K.set_session(sess)`.\n\n Arguments:\n op_input_list: An option sequence of tensors or ops, which will be used\n to determine the current graph. Otherwise the default graph will be\n used.\n\n Returns:\n A TensorFlow session.\n \"\"\"\n session = _get_session(op_input_list)\n if not _MANUAL_VAR_INIT:\n with session.graph.as_default():\n _initialize_variables(session)\n return session\n\n\n# Inject the get_session function to tracking_util to avoid the backward\n# dependency from TF to Keras.\ntracking_util.register_session_provider(get_session)\n\n\ndef get_graph():\n if context.executing_eagerly():\n global _GRAPH\n if _GRAPH is None:\n _GRAPH = func_graph.FuncGraph('keras_graph')\n return _GRAPH\n else:\n return ops.get_default_graph()\n\n\n@tf_contextlib.contextmanager\ndef _scratch_graph(graph=None):\n \"\"\"Retrieve a shared and temporary func graph.\n\n The eager execution path lifts a subgraph from the keras global graph into\n a scratch graph in order to create a function. DistributionStrategies, in\n turn, constructs multiple functions as well as a final combined function. In\n order for that logic to work correctly, all of the functions need to be\n created on the same scratch FuncGraph.\n\n Args:\n graph: A graph to be used as the current scratch graph. If not set then\n a scratch graph will either be retrieved or created:\n\n Yields:\n The current scratch graph.\n \"\"\"\n global _CURRENT_SCRATCH_GRAPH\n if (_CURRENT_SCRATCH_GRAPH is not None and graph is not None and\n _CURRENT_SCRATCH_GRAPH is not graph):\n raise ValueError('Multiple scratch graphs specified.')\n\n if _CURRENT_SCRATCH_GRAPH:\n yield _CURRENT_SCRATCH_GRAPH\n return\n\n graph = graph or func_graph.FuncGraph('keras_scratch_graph')\n try:\n _CURRENT_SCRATCH_GRAPH = graph\n yield graph\n finally:\n _CURRENT_SCRATCH_GRAPH = None\n\n\n@keras_export(v1=['keras.backend.set_session'])\ndef set_session(session):\n \"\"\"Sets the global TensorFlow session.\n\n Arguments:\n session: A TF Session.\n \"\"\"\n global _SESSION\n _SESSION.session = session\n\n\ndef get_default_session_config():\n if os.environ.get('OMP_NUM_THREADS'):\n logging.warning(\n 'OMP_NUM_THREADS is no longer used by the default Keras config. '\n 'To configure the number of threads, use tf.config.threading APIs.')\n\n config = context.context().config\n config.allow_soft_placement = True\n\n return config\n\n\ndef get_default_graph_uid_map():\n graph = ops.get_default_graph()\n name_uid_map = PER_GRAPH_OBJECT_NAME_UIDS.get(graph, None)\n if name_uid_map is None:\n name_uid_map = collections.defaultdict(int)\n PER_GRAPH_OBJECT_NAME_UIDS[graph] = name_uid_map\n return name_uid_map\n\n\n# DEVICE MANIPULATION\n\n\nclass _TfDeviceCaptureOp(object):\n \"\"\"Class for capturing the TF device scope.\"\"\"\n\n def __init__(self):\n self.device = None\n\n def _set_device(self, device):\n \"\"\"This method captures TF's explicit device scope setting.\"\"\"\n if tfdev.is_device_spec(device):\n device = device.to_string()\n self.device = device\n\n def _set_device_from_string(self, device_str):\n self.device = device_str\n\n\ndef _get_current_tf_device():\n \"\"\"Return explicit device of current context, otherwise returns `None`.\n\n Returns:\n If the current device scope is explicitly set, it returns a string with\n the device (`CPU` or `GPU`). If the scope is not explicitly set, it will\n return `None`.\n \"\"\"\n graph = get_graph()\n op = _TfDeviceCaptureOp()\n graph._apply_device_functions(op)\n return tfdev.DeviceSpec.from_string(op.device)\n\n\ndef _is_current_explicit_device(device_type):\n \"\"\"Check if the current device is explicitly set on the device type specified.\n\n Arguments:\n device_type: A string containing `GPU` or `CPU` (case-insensitive).\n\n Returns:\n A boolean indicating if the current device scope is explicitly set on the\n device type.\n\n Raises:\n ValueError: If the `device_type` string indicates an unsupported device.\n \"\"\"\n device_type = device_type.upper()\n if device_type not in ['CPU', 'GPU']:\n raise ValueError('`device_type` should be either \"CPU\" or \"GPU\".')\n device = _get_current_tf_device()\n return device is not None and device.device_type == device_type.upper()\n\n\ndef _get_available_gpus():\n \"\"\"Get a list of available gpu devices (formatted as strings).\n\n Returns:\n A list of available GPU devices.\n \"\"\"\n if ops.executing_eagerly_outside_functions():\n # Returns names of devices directly.\n return [d.name for d in config.list_logical_devices('GPU')]\n\n global _LOCAL_DEVICES\n if _LOCAL_DEVICES is None:\n _LOCAL_DEVICES = get_session().list_devices()\n return [x.name for x in _LOCAL_DEVICES if x.device_type == 'GPU']\n\n\ndef _has_nchw_support():\n \"\"\"Check whether the current scope supports NCHW ops.\n\n TensorFlow does not support NCHW on CPU. Therefore we check if we are not\n explicitly put on\n CPU, and have GPUs available. In this case there will be soft-placing on the\n GPU device.\n\n Returns:\n bool: if the current scope device placement would support nchw\n \"\"\"\n explicitly_on_cpu = _is_current_explicit_device('CPU')\n gpus_available = bool(_get_available_gpus())\n return not explicitly_on_cpu and gpus_available\n\n\n# VARIABLE MANIPULATION\n\n\ndef _constant_to_tensor(x, dtype):\n \"\"\"Convert the input `x` to a tensor of type `dtype`.\n\n This is slightly faster than the _to_tensor function, at the cost of\n handling fewer cases.\n\n Arguments:\n x: An object to be converted (numpy arrays, floats, ints and lists of\n them).\n dtype: The destination type.\n\n Returns:\n A tensor.\n \"\"\"\n return constant_op.constant(x, dtype=dtype)\n\n\ndef _to_tensor(x, dtype):\n \"\"\"Convert the input `x` to a tensor of type `dtype`.\n\n Arguments:\n x: An object to be converted (numpy array, list, tensors).\n dtype: The destination type.\n\n Returns:\n A tensor.\n \"\"\"\n return ops.convert_to_tensor_v2(x, dtype=dtype)\n\n\n@keras_export('keras.backend.is_sparse')\ndef is_sparse(tensor):\n \"\"\"Returns whether a tensor is a sparse tensor.\n\n Arguments:\n tensor: A tensor instance.\n\n Returns:\n A boolean.\n\n Example:\n\n\n >>> a = tf.keras.backend.placeholder((2, 2), sparse=False)\n >>> print(tf.keras.backend.is_sparse(a))\n False\n >>> b = tf.keras.backend.placeholder((2, 2), sparse=True)\n >>> print(tf.keras.backend.is_sparse(b))\n True\n\n \"\"\"\n return isinstance(tensor, sparse_tensor.SparseTensor)\n\n\n@keras_export('keras.backend.to_dense')\ndef to_dense(tensor):\n \"\"\"Converts a sparse tensor into a dense tensor and returns it.\n\n Arguments:\n tensor: A tensor instance (potentially sparse).\n\n Returns:\n A dense tensor.\n\n Examples:\n\n\n >>> b = tf.keras.backend.placeholder((2, 2), sparse=True)\n >>> print(tf.keras.backend.is_sparse(b))\n True\n >>> c = tf.keras.backend.to_dense(b)\n >>> print(tf.keras.backend.is_sparse(c))\n False\n\n \"\"\"\n if is_sparse(tensor):\n return sparse_ops.sparse_tensor_to_dense(tensor)\n else:\n return tensor\n\n\n@keras_export('keras.backend.name_scope', v1=[])\ndef name_scope(name):\n \"\"\"A context manager for use when defining a Python op.\n\n This context manager pushes a name scope, which will make the name of all\n operations added within it have a prefix.\n\n For example, to define a new Python op called `my_op`:\n\n\n def my_op(a):\n with tf.name_scope(\"MyOp\") as scope:\n a = tf.convert_to_tensor(a, name=\"a\")\n # Define some computation that uses `a`.\n return foo_op(..., name=scope)\n\n\n When executed, the Tensor `a` will have the name `MyOp/a`.\n\n Args:\n name: The prefix to use on all names created within the name scope.\n\n Returns:\n Name scope context manager.\n \"\"\"\n return ops.name_scope_v2(name)\n\n# Export V1 version.\nkeras_export(v1=['keras.backend.name_scope'])(ops.name_scope_v1)\n\n\n@keras_export('keras.backend.variable')\ndef variable(value, dtype=None, name=None, constraint=None):\n \"\"\"Instantiates a variable and returns it.\n\n Arguments:\n value: Numpy array, initial value of the tensor.\n dtype: Tensor type.\n name: Optional name string for the tensor.\n constraint: Optional projection function to be\n applied to the variable after an optimizer update.\n\n Returns:\n A variable instance (with Keras metadata included).\n\n Examples:\n\n >>> val = np.array([[1, 2], [3, 4]])\n >>> kvar = tf.keras.backend.variable(value=val, dtype='float64',\n ... name='example_var')\n >>> tf.keras.backend.dtype(kvar)\n 'float64'\n >>> print(kvar)\n <tf.Variable 'example_var:...' shape=(2, 2) dtype=float64, numpy=\n array([[1., 2.],\n [3., 4.]])>\n\n \"\"\"\n if dtype is None:\n dtype = floatx()\n if hasattr(value, 'tocoo'):\n sparse_coo = value.tocoo()\n indices = np.concatenate((np.expand_dims(sparse_coo.row, 1), np.expand_dims(\n sparse_coo.col, 1)), 1)\n v = sparse_tensor.SparseTensor(\n indices=indices, values=sparse_coo.data, dense_shape=sparse_coo.shape)\n v._keras_shape = sparse_coo.shape\n return v\n v = variables_module.Variable(\n value,\n dtype=dtypes_module.as_dtype(dtype),\n name=name,\n constraint=constraint)\n if isinstance(value, np.ndarray):\n v._keras_shape = value.shape\n elif hasattr(value, 'shape'):\n v._keras_shape = int_shape(value)\n track_variable(v)\n return v\n\n\ndef track_tf_optimizer(tf_optimizer):\n \"\"\"Tracks the given TF optimizer for initialization of its variables.\"\"\"\n if context.executing_eagerly():\n return\n optimizers = _GRAPH_TF_OPTIMIZERS[None]\n optimizers.add(tf_optimizer)\n\n\ndef track_variable(v):\n \"\"\"Tracks the given variable for initialization.\"\"\"\n if context.executing_eagerly():\n return\n graph = v.graph if hasattr(v, 'graph') else get_graph()\n _GRAPH_VARIABLES[graph].add(v)\n\n\ndef unique_object_name(name,\n name_uid_map=None,\n avoid_names=None,\n namespace='',\n zero_based=False):\n \"\"\"Makes a object name (or arbitrary string) unique within a TensorFlow graph.\n\n Arguments:\n name: String name to make unique.\n name_uid_map: An optional defaultdict(int) to use when creating unique\n names. If None (default), uses a per-Graph dictionary.\n avoid_names: An optional set or dict with names which should not be used. If\n None (default) does not avoid any names.\n namespace: Gets a name which is unique within the (graph, namespace). Layers\n which are not Networks use a blank namespace and so get graph-global\n names.\n zero_based: If True, name sequences start with no suffix (e.g. \"dense\",\n \"dense_1\"). If False, naming is one-based (\"dense_1\", \"dense_2\").\n\n Returns:\n Unique string name.\n\n Example:\n\n\n unique_object_name('dense') # dense_1\n unique_object_name('dense') # dense_2\n\n \"\"\"\n if name_uid_map is None:\n name_uid_map = get_default_graph_uid_map()\n if avoid_names is None:\n avoid_names = set()\n proposed_name = None\n while proposed_name is None or proposed_name in avoid_names:\n name_key = (namespace, name)\n if zero_based:\n number = name_uid_map[name_key]\n if number:\n proposed_name = name + '_' + str(number)\n else:\n proposed_name = name\n name_uid_map[name_key] += 1\n else:\n name_uid_map[name_key] += 1\n proposed_name = name + '_' + str(name_uid_map[name_key])\n return proposed_name\n\n\ndef _get_variables(graph=None):\n \"\"\"Returns variables corresponding to the given graph for initialization.\"\"\"\n assert not context.executing_eagerly()\n variables = _GRAPH_VARIABLES[graph]\n for opt in _GRAPH_TF_OPTIMIZERS[graph]:\n variables.update(opt.optimizer.variables())\n return variables\n\n\ndef _initialize_variables(session):\n \"\"\"Utility to initialize uninitialized variables on the fly.\"\"\"\n variables = _get_variables(get_graph())\n candidate_vars = []\n for v in variables:\n if not getattr(v, '_keras_initialized', False):\n candidate_vars.append(v)\n if candidate_vars:\n # This step is expensive, so we only run it on variables not already\n # marked as initialized.\n is_initialized = session.run(\n [variables_module.is_variable_initialized(v) for v in candidate_vars])\n # TODO(kathywu): Some metric variables loaded from SavedModel are never\n # actually used, and do not have an initializer.\n should_be_initialized = [\n (not is_initialized[n]) and v.initializer is not None\n for n, v in enumerate(candidate_vars)]\n uninitialized_vars = []\n for flag, v in zip(should_be_initialized, candidate_vars):\n if flag:\n uninitialized_vars.append(v)\n v._keras_initialized = True\n if uninitialized_vars:\n session.run(variables_module.variables_initializer(uninitialized_vars))\n\n\n@keras_export('keras.backend.constant')\ndef constant(value, dtype=None, shape=None, name=None):\n \"\"\"Creates a constant tensor.\n\n Arguments:\n value: A constant value (or list)\n dtype: The type of the elements of the resulting tensor.\n shape: Optional dimensions of resulting tensor.\n name: Optional name for the tensor.\n\n Returns:\n A Constant Tensor.\n \"\"\"\n if dtype is None:\n dtype = floatx()\n\n return constant_op.constant(value, dtype=dtype, shape=shape, name=name)\n\n\n@keras_export('keras.backend.is_keras_tensor')\ndef is_keras_tensor(x):\n \"\"\"Returns whether `x` is a Keras tensor.\n\n A \"Keras tensor\" is a tensor that was returned by a Keras layer,\n (`Layer` class) or by `Input`.\n\n Arguments:\n x: A candidate tensor.\n\n Returns:\n A boolean: Whether the argument is a Keras tensor.\n\n Raises:\n ValueError: In case `x` is not a symbolic tensor.\n\n Examples:\n\n >>> np_var = np.array([1, 2])\n >>> # A numpy array is not a symbolic tensor.\n >>> tf.keras.backend.is_keras_tensor(np_var)\n Traceback (most recent call last):\n ...\n ValueError: Unexpectedly found an instance of type `<class 'numpy.ndarray'>`.\n Expected a symbolic tensor instance.\n >>> keras_var = tf.keras.backend.variable(np_var)\n >>> # A variable created with the keras backend is not a Keras tensor.\n >>> tf.keras.backend.is_keras_tensor(keras_var)\n False\n >>> keras_placeholder = tf.keras.backend.placeholder(shape=(2, 4, 5))\n >>> # A placeholder is not a Keras tensor.\n >>> tf.keras.backend.is_keras_tensor(keras_placeholder)\n False\n >>> keras_input = tf.keras.layers.Input([10])\n >>> # An Input is a Keras tensor.\n >>> tf.keras.backend.is_keras_tensor(keras_input)\n True\n >>> keras_layer_output = tf.keras.layers.Dense(10)(keras_input)\n >>> # Any Keras layer output is a Keras tensor.\n >>> tf.keras.backend.is_keras_tensor(keras_layer_output)\n True\n\n \"\"\"\n if not isinstance(x,\n (ops.Tensor, variables_module.Variable,\n sparse_tensor.SparseTensor, ragged_tensor.RaggedTensor)):\n raise ValueError('Unexpectedly found an instance of type `' + str(type(x)) +\n '`. Expected a symbolic tensor instance.')\n return hasattr(x, '_keras_history')\n\n\n@keras_export('keras.backend.placeholder')\ndef placeholder(shape=None,\n ndim=None,\n dtype=None,\n sparse=False,\n name=None,\n ragged=False):\n \"\"\"Instantiates a placeholder tensor and returns it.\n\n Arguments:\n shape: Shape of the placeholder\n (integer tuple, may include `None` entries).\n ndim: Number of axes of the tensor.\n At least one of {`shape`, `ndim`} must be specified.\n If both are specified, `shape` is used.\n dtype: Placeholder type.\n sparse: Boolean, whether the placeholder should have a sparse type.\n name: Optional name string for the placeholder.\n ragged: Boolean, whether the placeholder should have a ragged type.\n In this case, values of 'None' in the 'shape' argument represent\n ragged dimensions. For more information about RaggedTensors, see this\n [guide](https://www.tensorflow.org/guide/ragged_tensors).\n\n Raises:\n ValueError: If called with eager execution\n ValueError: If called with sparse = True and ragged = True.\n\n Returns:\n Tensor instance (with Keras metadata included).\n\n Examples:\n\n\n >>> input_ph = tf.keras.backend.placeholder(shape=(2, 4, 5))\n >>> input_ph\n <tf.Tensor 'Placeholder_...' shape=(2, 4, 5) dtype=float32>\n\n \"\"\"\n if sparse and ragged:\n raise ValueError(\n 'Cannot set both sparse and ragged to True when creating a placeholder.'\n )\n\n if dtype is None:\n dtype = floatx()\n if not shape:\n if ndim:\n shape = (None,) * ndim\n with get_graph().as_default():\n if sparse:\n x = array_ops.sparse_placeholder(dtype, shape=shape, name=name)\n elif ragged:\n ragged_rank = 0\n for i in range(1, len(shape)):\n if shape[i] is None:\n ragged_rank = i\n type_spec = ragged_tensor.RaggedTensorSpec(\n shape=shape, dtype=dtype, ragged_rank=ragged_rank)\n def tensor_spec_to_placeholder(tensorspec):\n return array_ops.placeholder(tensorspec.dtype, tensorspec.shape)\n x = nest.map_structure(tensor_spec_to_placeholder, type_spec,\n expand_composites=True)\n else:\n x = array_ops.placeholder(dtype, shape=shape, name=name)\n return x\n\n\ndef is_placeholder(x):\n \"\"\"Returns whether `x` is a placeholder.\n\n Arguments:\n x: A candidate placeholder.\n\n Returns:\n Boolean.\n \"\"\"\n try:\n if isinstance(x, composite_tensor.CompositeTensor):\n flat_components = nest.flatten(x, expand_composites=True)\n return py_any(is_placeholder(c) for c in flat_components)\n else:\n return x.op.type == 'Placeholder'\n except AttributeError:\n return False\n\n\ndef freezable_variable(value, shape=None, name=None):\n \"\"\"A tensor-like object whose value can be updated only up until execution.\n\n After creating the freezable variable, you can update its value by calling\n `var.update_value(new_value)` (similar to a regular variable).\n Unlike an actual variable, the value used during execution is the current\n value at the time the execution function (`backend.function()`) was created.\n\n This is an internal API, expected to be temporary. It is used to implement a\n mutable `trainable` property for `BatchNormalization` layers, with a frozen\n value after model compilation.\n\n We don't use a plain variable in this case because we need the value used\n in a specific model to be frozen after `compile` has been called\n (e.g. GAN use case).\n\n Arguments:\n value: The initial value for the tensor-like object.\n shape: The shape for the tensor-like object (cannot be changed).\n name: The name for the tensor-like object.\n\n Returns:\n A tensor-like object with a static value that can be updated via\n `x.update_value(new_value)`, up until creating an execution function\n (afterwards the value is fixed).\n \"\"\"\n graph = get_graph()\n with graph.as_default():\n x = array_ops.placeholder_with_default(\n value, shape=shape, name=name)\n x._initial_value = value\n x._current_value = value\n\n def update_value(new_value):\n x._current_value = new_value\n\n def get_value():\n return x._current_value\n\n x.update_value = update_value\n x.get_value = get_value\n\n global _FREEZABLE_VARS\n _FREEZABLE_VARS[graph].add(x)\n return x\n\n\n@keras_export('keras.backend.shape')\ndef shape(x):\n \"\"\"Returns the symbolic shape of a tensor or variable.\n\n Arguments:\n x: A tensor or variable.\n\n Returns:\n A symbolic shape (which is itself a tensor).\n\n Examples:\n\n >>> val = np.array([[1, 2], [3, 4]])\n >>> kvar = tf.keras.backend.variable(value=val)\n >>> tf.keras.backend.shape(kvar)\n <tf.Tensor: shape=(2,), dtype=int32, numpy=array([2, 2], dtype=int32)>\n >>> input = tf.keras.backend.placeholder(shape=(2, 4, 5))\n >>> tf.keras.backend.shape(input)\n <tf.Tensor 'Shape_...' shape=(3,) dtype=int32>\n\n \"\"\"\n return array_ops.shape(x)\n\n\n@keras_export('keras.backend.int_shape')\ndef int_shape(x):\n \"\"\"Returns the shape of tensor or variable as a tuple of int or None entries.\n\n Arguments:\n x: Tensor or variable.\n\n Returns:\n A tuple of integers (or None entries).\n\n Examples:\n\n >>> input = tf.keras.backend.placeholder(shape=(2, 4, 5))\n >>> tf.keras.backend.int_shape(input)\n (2, 4, 5)\n >>> val = np.array([[1, 2], [3, 4]])\n >>> kvar = tf.keras.backend.variable(value=val)\n >>> tf.keras.backend.int_shape(kvar)\n (2, 2)\n\n \"\"\"\n try:\n shape = x.shape\n if not isinstance(shape, tuple):\n shape = tuple(shape.as_list())\n return shape\n except ValueError:\n return None\n\n\n@keras_export('keras.backend.ndim')\ndef ndim(x):\n \"\"\"Returns the number of axes in a tensor, as an integer.\n\n Arguments:\n x: Tensor or variable.\n\n Returns:\n Integer (scalar), number of axes.\n\n Examples:\n\n\n >>> input = tf.keras.backend.placeholder(shape=(2, 4, 5))\n >>> val = np.array([[1, 2], [3, 4]])\n >>> kvar = tf.keras.backend.variable(value=val)\n >>> tf.keras.backend.ndim(input)\n 3\n >>> tf.keras.backend.ndim(kvar)\n 2\n\n \"\"\"\n dims = x.shape._dims\n if dims is not None:\n return len(dims)\n return None\n\n\n@keras_export('keras.backend.dtype')\ndef dtype(x):\n \"\"\"Returns the dtype of a Keras tensor or variable, as a string.\n\n Arguments:\n x: Tensor or variable.\n\n Returns:\n String, dtype of `x`.\n\n Examples:\n\n >>> tf.keras.backend.dtype(tf.keras.backend.placeholder(shape=(2,4,5)))\n 'float32'\n >>> tf.keras.backend.dtype(tf.keras.backend.placeholder(shape=(2,4,5),\n ... dtype='float32'))\n 'float32'\n >>> tf.keras.backend.dtype(tf.keras.backend.placeholder(shape=(2,4,5),\n ... dtype='float64'))\n 'float64'\n >>> kvar = tf.keras.backend.variable(np.array([[1, 2], [3, 4]]))\n >>> tf.keras.backend.dtype(kvar)\n 'float32'\n >>> kvar = tf.keras.backend.variable(np.array([[1, 2], [3, 4]]),\n ... dtype='float32')\n >>> tf.keras.backend.dtype(kvar)\n 'float32'\n\n \"\"\"\n return x.dtype.base_dtype.name\n\n\n@keras_export('keras.backend.eval')\ndef eval(x):\n \"\"\"Evaluates the value of a variable.\n\n Arguments:\n x: A variable.\n\n Returns:\n A Numpy array.\n\n Examples:\n\n >>> kvar = tf.keras.backend.variable(np.array([[1, 2], [3, 4]]),\n ... dtype='float32')\n >>> tf.keras.backend.eval(kvar)\n array([[1., 2.],\n [3., 4.]], dtype=float32)\n\n \"\"\"\n return get_value(to_dense(x))\n\n\n@keras_export('keras.backend.zeros')\ndef zeros(shape, dtype=None, name=None):\n \"\"\"Instantiates an all-zeros variable and returns it.\n\n Arguments:\n shape: Tuple or list of integers, shape of returned Keras variable\n dtype: data type of returned Keras variable\n name: name of returned Keras variable\n\n Returns:\n A variable (including Keras metadata), filled with `0.0`.\n Note that if `shape` was symbolic, we cannot return a variable,\n and will return a dynamically-shaped tensor instead.\n\n Example:\n\n >>> kvar = tf.keras.backend.zeros((3,4))\n >>> tf.keras.backend.eval(kvar)\n array([[0., 0., 0., 0.],\n [0., 0., 0., 0.],\n [0., 0., 0., 0.]], dtype=float32)\n >>> A = tf.constant([1,2,3])\n >>> kvar2 = tf.keras.backend.zeros(A.shape) # [0., 0., 0.]\n >>> tf.keras.backend.eval(kvar2)\n array([0., 0., 0.], dtype=float32)\n >>> kvar3 = tf.keras.backend.zeros(A.shape,dtype=tf.int32)\n >>> tf.keras.backend.eval(kvar3)\n array([0, 0, 0], dtype=int32)\n >>> kvar4 = tf.keras.backend.zeros([2,3])\n >>> tf.keras.backend.eval(kvar4)\n array([[0., 0., 0.],\n [0., 0., 0.]], dtype=float32)\n\n \"\"\"\n with ops.init_scope():\n if dtype is None:\n dtype = floatx()\n tf_dtype = dtypes_module.as_dtype(dtype)\n v = array_ops.zeros(shape=shape, dtype=tf_dtype, name=name)\n if py_all(v.shape.as_list()):\n return variable(v, dtype=dtype, name=name)\n return v\n\n\n@keras_export('keras.backend.ones')\ndef ones(shape, dtype=None, name=None):\n \"\"\"Instantiates an all-ones variable and returns it.\n\n Arguments:\n shape: Tuple of integers, shape of returned Keras variable.\n dtype: String, data type of returned Keras variable.\n name: String, name of returned Keras variable.\n\n Returns:\n A Keras variable, filled with `1.0`.\n Note that if `shape` was symbolic, we cannot return a variable,\n and will return a dynamically-shaped tensor instead.\n\n Example:\n\n\n >>> kvar = tf.keras.backend.ones((3,4))\n >>> tf.keras.backend.eval(kvar)\n array([[1., 1., 1., 1.],\n [1., 1., 1., 1.],\n [1., 1., 1., 1.]], dtype=float32)\n\n \"\"\"\n with ops.init_scope():\n if dtype is None:\n dtype = floatx()\n tf_dtype = dtypes_module.as_dtype(dtype)\n v = array_ops.ones(shape=shape, dtype=tf_dtype, name=name)\n if py_all(v.shape.as_list()):\n return variable(v, dtype=dtype, name=name)\n return v\n\n\n@keras_export('keras.backend.eye')\ndef eye(size, dtype=None, name=None):\n \"\"\"Instantiate an identity matrix and returns it.\n\n Arguments:\n size: Integer, number of rows/columns.\n dtype: String, data type of returned Keras variable.\n name: String, name of returned Keras variable.\n\n Returns:\n A Keras variable, an identity matrix.\n\n Example:\n\n\n >>> kvar = tf.keras.backend.eye(3)\n >>> tf.keras.backend.eval(kvar)\n array([[1., 0., 0.],\n [0., 1., 0.],\n [0., 0., 1.]], dtype=float32)\n\n\n \"\"\"\n if dtype is None:\n dtype = floatx()\n tf_dtype = dtypes_module.as_dtype(dtype)\n return variable(linalg_ops.eye(size, dtype=tf_dtype), dtype, name)\n\n\n@keras_export('keras.backend.zeros_like')\ndef zeros_like(x, dtype=None, name=None):\n \"\"\"Instantiates an all-zeros variable of the same shape as another tensor.\n\n Arguments:\n x: Keras variable or Keras tensor.\n dtype: dtype of returned Keras variable.\n `None` uses the dtype of `x`.\n name: name for the variable to create.\n\n Returns:\n A Keras variable with the shape of `x` filled with zeros.\n\n Example:\n\n\n from tensorflow.keras import backend as K\n kvar = K.variable(np.random.random((2,3)))\n kvar_zeros = K.zeros_like(kvar)\n K.eval(kvar_zeros)\n # array([[ 0., 0., 0.], [ 0., 0., 0.]], dtype=float32)\n\n\n \"\"\"\n return array_ops.zeros_like(x, dtype=dtype, name=name)\n\n\n@keras_export('keras.backend.ones_like')\ndef ones_like(x, dtype=None, name=None):\n \"\"\"Instantiates an all-ones variable of the same shape as another tensor.\n\n Arguments:\n x: Keras variable or tensor.\n dtype: String, dtype of returned Keras variable.\n None uses the dtype of x.\n name: String, name for the variable to create.\n\n Returns:\n A Keras variable with the shape of x filled with ones.\n\n Example:\n\n >>> kvar = tf.keras.backend.variable(np.random.random((2,3)))\n >>> kvar_ones = tf.keras.backend.ones_like(kvar)\n >>> tf.keras.backend.eval(kvar_ones)\n array([[1., 1., 1.],\n [1., 1., 1.]], dtype=float32)\n\n \"\"\"\n return array_ops.ones_like(x, dtype=dtype, name=name)\n\n\ndef identity(x, name=None):\n \"\"\"Returns a tensor with the same content as the input tensor.\n\n Arguments:\n x: The input tensor.\n name: String, name for the variable to create.\n\n Returns:\n A tensor of the same shape, type and content.\n \"\"\"\n return array_ops.identity(x, name=name)\n\n\n@keras_export('keras.backend.random_uniform_variable')\ndef random_uniform_variable(shape, low, high, dtype=None, name=None, seed=None):\n \"\"\"Instantiates a variable with values drawn from a uniform distribution.\n\n Arguments:\n shape: Tuple of integers, shape of returned Keras variable.\n low: Float, lower boundary of the output interval.\n high: Float, upper boundary of the output interval.\n dtype: String, dtype of returned Keras variable.\n name: String, name of returned Keras variable.\n seed: Integer, random seed.\n\n Returns:\n A Keras variable, filled with drawn samples.\n\n Example:\n\n >>> kvar = tf.keras.backend.random_uniform_variable(shape=(2,3),\n ... low=0.0, high=1.0)\n >>> kvar\n <tf.Variable 'Variable:0' shape=(2, 3) dtype=float32, numpy=...,\n dtype=float32)>\n \"\"\"\n if dtype is None:\n dtype = floatx()\n tf_dtype = dtypes_module.as_dtype(dtype)\n if seed is None:\n # ensure that randomness is conditioned by the Numpy RNG\n seed = np.random.randint(10e8)\n value = init_ops.random_uniform_initializer(\n low, high, dtype=tf_dtype, seed=seed)(shape)\n return variable(value, dtype=dtype, name=name)\n\n\n@keras_export('keras.backend.random_normal_variable')\ndef random_normal_variable(shape, mean, scale, dtype=None, name=None,\n seed=None):\n \"\"\"Instantiates a variable with values drawn from a normal distribution.\n\n Arguments:\n shape: Tuple of integers, shape of returned Keras variable.\n mean: Float, mean of the normal distribution.\n scale: Float, standard deviation of the normal distribution.\n dtype: String, dtype of returned Keras variable.\n name: String, name of returned Keras variable.\n seed: Integer, random seed.\n\n Returns:\n A Keras variable, filled with drawn samples.\n\n Example:\n\n >>> kvar = tf.keras.backend.random_normal_variable(shape=(2,3),\n ... mean=0.0, scale=1.0)\n >>> kvar\n <tf.Variable 'Variable:0' shape=(2, 3) dtype=float32, numpy=...,\n dtype=float32)>\n \"\"\"\n if dtype is None:\n dtype = floatx()\n tf_dtype = dtypes_module.as_dtype(dtype)\n if seed is None:\n # ensure that randomness is conditioned by the Numpy RNG\n seed = np.random.randint(10e8)\n value = init_ops.random_normal_initializer(\n mean, scale, dtype=tf_dtype, seed=seed)(shape)\n return variable(value, dtype=dtype, name=name)\n\n\n@keras_export('keras.backend.count_params')\ndef count_params(x):\n \"\"\"Returns the static number of elements in a variable or tensor.\n\n Arguments:\n x: Variable or tensor.\n\n Returns:\n Integer, the number of scalars in `x`.\n\n Example:\n\n >>> kvar = tf.keras.backend.zeros((2,3))\n >>> tf.keras.backend.count_params(kvar)\n 6\n >>> tf.keras.backend.eval(kvar)\n array([[0., 0., 0.],\n [0., 0., 0.]], dtype=float32)\n\n \"\"\"\n return np.prod(x.shape.as_list())\n\n\n@keras_export('keras.backend.cast')\ndef cast(x, dtype):\n \"\"\"Casts a tensor to a different dtype and returns it.\n\n You can cast a Keras variable but it still returns a Keras tensor.\n\n Arguments:\n x: Keras tensor (or variable).\n dtype: String, either (`'float16'`, `'float32'`, or `'float64'`).\n\n Returns:\n Keras tensor with dtype `dtype`.\n\n Examples:\n Cast a float32 variable to a float64 tensor\n\n >>> input = tf.keras.backend.ones(shape=(1,3))\n >>> print(input)\n <tf.Variable 'Variable:0' shape=(1, 3) dtype=float32,\n numpy=array([[1., 1., 1.]], dtype=float32)>\n >>> cast_input = tf.keras.backend.cast(input, dtype='float64')\n >>> print(cast_input)\n tf.Tensor([[1. 1. 1.]], shape=(1, 3), dtype=float64)\n\n \"\"\"\n return math_ops.cast(x, dtype)\n\n\n# UPDATES OPS\n\n\n@keras_export('keras.backend.update')\ndef update(x, new_x):\n return state_ops.assign(x, new_x)\n\n\n@keras_export('keras.backend.update_add')\ndef update_add(x, increment):\n \"\"\"Update the value of `x` by adding `increment`.\n\n Arguments:\n x: A Variable.\n increment: A tensor of same shape as `x`.\n\n Returns:\n The variable `x` updated.\n \"\"\"\n return state_ops.assign_add(x, increment)\n\n\n@keras_export('keras.backend.update_sub')\ndef update_sub(x, decrement):\n \"\"\"Update the value of `x` by subtracting `decrement`.\n\n Arguments:\n x: A Variable.\n decrement: A tensor of same shape as `x`.\n\n Returns:\n The variable `x` updated.\n \"\"\"\n return state_ops.assign_sub(x, decrement)\n\n\n@keras_export('keras.backend.moving_average_update')\ndef moving_average_update(x, value, momentum):\n \"\"\"Compute the moving average of a variable.\n\n Arguments:\n x: A Variable.\n value: A tensor with the same shape as `variable`.\n momentum: The moving average momentum.\n\n Returns:\n An Operation to update the variable.\n \"\"\"\n zero_debias = not tf2.enabled()\n return moving_averages.assign_moving_average(\n x, value, momentum, zero_debias=zero_debias)\n\n\n# LINEAR ALGEBRA\n\n\n@keras_export('keras.backend.dot')\ndef dot(x, y):\n \"\"\"Multiplies 2 tensors (and/or variables) and returns a tensor.\n\n Arguments:\n x: Tensor or variable.\n y: Tensor or variable.\n\n Returns:\n A tensor, dot product of `x` and `y`.\n\n Examples:\n\n >>> x = tf.keras.backend.placeholder(shape=(2, 3))\n >>> y = tf.keras.backend.placeholder(shape=(3, 4))\n >>> xy = tf.keras.backend.dot(x, y)\n >>> xy\n <tf.Tensor ... shape=(2, 4) dtype=float32>\n\n >>> x = tf.keras.backend.placeholder(shape=(32, 28, 3))\n >>> y = tf.keras.backend.placeholder(shape=(3, 4))\n >>> xy = tf.keras.backend.dot(x, y)\n >>> xy\n <tf.Tensor ... shape=(32, 28, 4) dtype=float32>\n\n >>> x = tf.keras.backend.random_uniform_variable(shape=(2, 3), low=0, high=1)\n >>> y = tf.keras.backend.ones((4, 3, 5))\n >>> xy = tf.keras.backend.dot(x, y)\n >>> tf.keras.backend.int_shape(xy)\n (2, 4, 5)\n \"\"\"\n if ndim(x) is not None and (ndim(x) > 2 or ndim(y) > 2):\n x_shape = []\n for i, s in zip(int_shape(x), array_ops.unstack(array_ops.shape(x))):\n if i is not None:\n x_shape.append(i)\n else:\n x_shape.append(s)\n x_shape = tuple(x_shape)\n y_shape = []\n for i, s in zip(int_shape(y), array_ops.unstack(array_ops.shape(y))):\n if i is not None:\n y_shape.append(i)\n else:\n y_shape.append(s)\n y_shape = tuple(y_shape)\n y_permute_dim = list(range(ndim(y)))\n y_permute_dim = [y_permute_dim.pop(-2)] + y_permute_dim\n xt = array_ops.reshape(x, [-1, x_shape[-1]])\n yt = array_ops.reshape(\n array_ops.transpose(y, perm=y_permute_dim), [y_shape[-2], -1])\n return array_ops.reshape(\n math_ops.matmul(xt, yt), x_shape[:-1] + y_shape[:-2] + y_shape[-1:])\n if is_sparse(x):\n out = sparse_ops.sparse_tensor_dense_matmul(x, y)\n else:\n out = math_ops.matmul(x, y)\n return out\n\n\n@keras_export('keras.backend.batch_dot')\ndef batch_dot(x, y, axes=None):\n \"\"\"Batchwise dot product.\n\n `batch_dot` is used to compute dot product of `x` and `y` when\n `x` and `y` are data in batch, i.e. in a shape of\n `(batch_size, :)`.\n `batch_dot` results in a tensor or variable with less dimensions\n than the input. If the number of dimensions is reduced to 1,\n we use `expand_dims` to make sure that ndim is at least 2.\n\n Arguments:\n x: Keras tensor or variable with `ndim >= 2`.\n y: Keras tensor or variable with `ndim >= 2`.\n axes: Tuple or list of integers with target dimensions, or single integer.\n The sizes of `x.shape[axes[0]]` and `y.shape[axes[1]]` should be equal.\n\n Returns:\n A tensor with shape equal to the concatenation of `x`'s shape\n (less the dimension that was summed over) and `y`'s shape\n (less the batch dimension and the dimension that was summed over).\n If the final rank is 1, we reshape it to `(batch_size, 1)`.\n\n Examples:\n\n >>> x_batch = tf.keras.backend.ones(shape=(32, 20, 1))\n >>> y_batch = tf.keras.backend.ones(shape=(32, 30, 20))\n >>> xy_batch_dot = tf.keras.backend.batch_dot(x_batch, y_batch, axes=(1, 2))\n >>> tf.keras.backend.int_shape(xy_batch_dot)\n (32, 1, 30)\n\n Shape inference:\n Let `x`'s shape be `(100, 20)` and `y`'s shape be `(100, 30, 20)`.\n If `axes` is (1, 2), to find the output shape of resultant tensor,\n loop through each dimension in `x`'s shape and `y`'s shape:\n * `x.shape[0]` : 100 : append to output shape\n * `x.shape[1]` : 20 : do not append to output shape,\n dimension 1 of `x` has been summed over. (`dot_axes[0]` = 1)\n * `y.shape[0]` : 100 : do not append to output shape,\n always ignore first dimension of `y`\n * `y.shape[1]` : 30 : append to output shape\n * `y.shape[2]` : 20 : do not append to output shape,\n dimension 2 of `y` has been summed over. (`dot_axes[1]` = 2)\n `output_shape` = `(100, 30)`\n \"\"\"\n x_shape = int_shape(x)\n y_shape = int_shape(y)\n\n x_ndim = len(x_shape)\n y_ndim = len(y_shape)\n\n if x_ndim < 2 or y_ndim < 2:\n raise ValueError('Cannot do batch_dot on inputs '\n 'with rank < 2. '\n 'Received inputs with shapes ' +\n str(x_shape) + ' and ' +\n str(y_shape) + '.')\n\n x_batch_size = x_shape[0]\n y_batch_size = y_shape[0]\n\n if x_batch_size is not None and y_batch_size is not None:\n if x_batch_size != y_batch_size:\n raise ValueError('Cannot do batch_dot on inputs '\n 'with different batch sizes. '\n 'Received inputs with shapes ' +\n str(x_shape) + ' and ' +\n str(y_shape) + '.')\n if isinstance(axes, int):\n axes = [axes, axes]\n\n if axes is None:\n if y_ndim == 2:\n axes = [x_ndim - 1, y_ndim - 1]\n else:\n axes = [x_ndim - 1, y_ndim - 2]\n\n if py_any(isinstance(a, (list, tuple)) for a in axes):\n raise ValueError('Multiple target dimensions are not supported. ' +\n 'Expected: None, int, (int, int), ' +\n 'Provided: ' + str(axes))\n\n # if tuple, convert to list.\n axes = list(axes)\n\n # convert negative indices.\n if axes[0] < 0:\n axes[0] += x_ndim\n if axes[1] < 0:\n axes[1] += y_ndim\n\n # sanity checks\n if 0 in axes:\n raise ValueError('Cannot perform batch_dot over axis 0. '\n 'If your inputs are not batched, '\n 'add a dummy batch dimension to your '\n 'inputs using K.expand_dims(x, 0)')\n a0, a1 = axes\n d1 = x_shape[a0]\n d2 = y_shape[a1]\n\n if d1 is not None and d2 is not None and d1 != d2:\n raise ValueError('Cannot do batch_dot on inputs with shapes ' +\n str(x_shape) + ' and ' + str(y_shape) +\n ' with axes=' + str(axes) + '. x.shape[%d] != '\n 'y.shape[%d] (%d != %d).' % (axes[0], axes[1], d1, d2))\n\n # backup ndims. Need them later.\n orig_x_ndim = x_ndim\n orig_y_ndim = y_ndim\n\n # if rank is 2, expand to 3.\n if x_ndim == 2:\n x = array_ops.expand_dims(x, 1)\n a0 += 1\n x_ndim += 1\n if y_ndim == 2:\n y = array_ops.expand_dims(y, 2)\n y_ndim += 1\n\n # bring x's dimension to be reduced to last axis.\n if a0 != x_ndim - 1:\n pattern = list(range(x_ndim))\n for i in range(a0, x_ndim - 1):\n pattern[i] = pattern[i + 1]\n pattern[-1] = a0\n x = array_ops.transpose(x, pattern)\n\n # bring y's dimension to be reduced to axis 1.\n if a1 != 1:\n pattern = list(range(y_ndim))\n for i in range(a1, 1, -1):\n pattern[i] = pattern[i - 1]\n pattern[1] = a1\n y = array_ops.transpose(y, pattern)\n\n # normalize both inputs to rank 3.\n if x_ndim > 3:\n # squash middle dimensions of x.\n x_shape = shape(x)\n x_mid_dims = x_shape[1:-1]\n x_squashed_shape = array_ops.stack(\n [x_shape[0], -1, x_shape[-1]])\n x = array_ops.reshape(x, x_squashed_shape)\n x_squashed = True\n else:\n x_squashed = False\n\n if y_ndim > 3:\n # squash trailing dimensions of y.\n y_shape = shape(y)\n y_trail_dims = y_shape[2:]\n y_squashed_shape = array_ops.stack(\n [y_shape[0], y_shape[1], -1])\n y = array_ops.reshape(y, y_squashed_shape)\n y_squashed = True\n else:\n y_squashed = False\n\n result = math_ops.matmul(x, y)\n\n # if inputs were squashed, we have to reshape the matmul output.\n output_shape = array_ops.shape(result)\n do_reshape = False\n\n if x_squashed:\n output_shape = array_ops.concat(\n [output_shape[:1],\n x_mid_dims,\n output_shape[-1:]], 0)\n do_reshape = True\n\n if y_squashed:\n output_shape = array_ops.concat([output_shape[:-1], y_trail_dims], 0)\n do_reshape = True\n\n if do_reshape:\n result = array_ops.reshape(result, output_shape)\n\n # if the inputs were originally rank 2, we remove the added 1 dim.\n if orig_x_ndim == 2:\n result = array_ops.squeeze(result, 1)\n elif orig_y_ndim == 2:\n result = array_ops.squeeze(result, -1)\n\n return result\n\n\n@keras_export('keras.backend.transpose')\ndef transpose(x):\n \"\"\"Transposes a tensor and returns it.\n\n Arguments:\n x: Tensor or variable.\n\n Returns:\n A tensor.\n\n Examples:\n\n >>> var = tf.keras.backend.variable([[1, 2, 3], [4, 5, 6]])\n >>> tf.keras.backend.eval(var)\n array([[1., 2., 3.],\n [4., 5., 6.]], dtype=float32)\n >>> var_transposed = tf.keras.backend.transpose(var)\n >>> tf.keras.backend.eval(var_transposed)\n array([[1., 4.],\n [2., 5.],\n [3., 6.]], dtype=float32)\n >>> input = tf.keras.backend.placeholder((2, 3))\n >>> input\n <tf.Tensor 'Placeholder_...' shape=(2, 3) dtype=float32>\n >>> input_transposed = tf.keras.backend.transpose(input)\n >>> input_transposed\n <tf.Tensor 'Transpose_...' shape=(3, 2) dtype=float32>\n \"\"\"\n return array_ops.transpose(x)\n\n\n@keras_export('keras.backend.gather')\ndef gather(reference, indices):\n \"\"\"Retrieves the elements of indices `indices` in the tensor `reference`.\n\n Arguments:\n reference: A tensor.\n indices: An integer tensor of indices.\n\n Returns:\n A tensor of same type as `reference`.\n\n Examples:\n\n >>> var = tf.keras.backend.variable([[1, 2, 3], [4, 5, 6]])\n >>> tf.keras.backend.eval(var)\n array([[1., 2., 3.],\n [4., 5., 6.]], dtype=float32)\n >>> var_gathered = tf.keras.backend.gather(var, [0])\n >>> tf.keras.backend.eval(var_gathered)\n array([[1., 2., 3.]], dtype=float32)\n >>> var_gathered = tf.keras.backend.gather(var, [1])\n >>> tf.keras.backend.eval(var_gathered)\n array([[4., 5., 6.]], dtype=float32)\n >>> var_gathered = tf.keras.backend.gather(var, [0,1,0])\n >>> tf.keras.backend.eval(var_gathered)\n array([[1., 2., 3.],\n [4., 5., 6.],\n [1., 2., 3.]], dtype=float32)\n \"\"\"\n return array_ops.gather(reference, indices)\n\n\n# ELEMENT-WISE OPERATIONS\n\n\n@keras_export('keras.backend.max')\ndef max(x, axis=None, keepdims=False):\n \"\"\"Maximum value in a tensor.\n\n Arguments:\n x: A tensor or variable.\n axis: An integer, the axis to find maximum values.\n keepdims: A boolean, whether to keep the dimensions or not.\n If `keepdims` is `False`, the rank of the tensor is reduced\n by 1. If `keepdims` is `True`,\n the reduced dimension is retained with length 1.\n\n Returns:\n A tensor with maximum values of `x`.\n \"\"\"\n return math_ops.reduce_max(x, axis, keepdims)\n\n\n@keras_export('keras.backend.min')\ndef min(x, axis=None, keepdims=False):\n \"\"\"Minimum value in a tensor.\n\n Arguments:\n x: A tensor or variable.\n axis: An integer, the axis to find minimum values.\n keepdims: A boolean, whether to keep the dimensions or not.\n If `keepdims` is `False`, the rank of the tensor is reduced\n by 1. If `keepdims` is `True`,\n the reduced dimension is retained with length 1.\n\n Returns:\n A tensor with minimum values of `x`.\n \"\"\"\n return math_ops.reduce_min(x, axis, keepdims)\n\n\n@keras_export('keras.backend.sum')\ndef sum(x, axis=None, keepdims=False):\n \"\"\"Sum of the values in a tensor, alongside the specified axis.\n\n Arguments:\n x: A tensor or variable.\n axis: An integer, the axis to sum over.\n keepdims: A boolean, whether to keep the dimensions or not.\n If `keepdims` is `False`, the rank of the tensor is reduced\n by 1. If `keepdims` is `True`,\n the reduced dimension is retained with length 1.\n\n Returns:\n A tensor with sum of `x`.\n \"\"\"\n return math_ops.reduce_sum(x, axis, keepdims)\n\n\n@keras_export('keras.backend.prod')\ndef prod(x, axis=None, keepdims=False):\n \"\"\"Multiplies the values in a tensor, alongside the specified axis.\n\n Arguments:\n x: A tensor or variable.\n axis: An integer, the axis to compute the product.\n keepdims: A boolean, whether to keep the dimensions or not.\n If `keepdims` is `False`, the rank of the tensor is reduced\n by 1. If `keepdims` is `True`,\n the reduced dimension is retained with length 1.\n\n Returns:\n A tensor with the product of elements of `x`.\n \"\"\"\n return math_ops.reduce_prod(x, axis, keepdims)\n\n\n@keras_export('keras.backend.cumsum')\ndef cumsum(x, axis=0):\n \"\"\"Cumulative sum of the values in a tensor, alongside the specified axis.\n\n Arguments:\n x: A tensor or variable.\n axis: An integer, the axis to compute the sum.\n\n Returns:\n A tensor of the cumulative sum of values of `x` along `axis`.\n \"\"\"\n return math_ops.cumsum(x, axis=axis)\n\n\n@keras_export('keras.backend.cumprod')\ndef cumprod(x, axis=0):\n \"\"\"Cumulative product of the values in a tensor, alongside the specified axis.\n\n Arguments:\n x: A tensor or variable.\n axis: An integer, the axis to compute the product.\n\n Returns:\n A tensor of the cumulative product of values of `x` along `axis`.\n \"\"\"\n return math_ops.cumprod(x, axis=axis)\n\n\n@keras_export('keras.backend.var')\ndef var(x, axis=None, keepdims=False):\n \"\"\"Variance of a tensor, alongside the specified axis.\n\n Arguments:\n x: A tensor or variable.\n axis: An integer, the axis to compute the variance.\n keepdims: A boolean, whether to keep the dimensions or not.\n If `keepdims` is `False`, the rank of the tensor is reduced\n by 1. If `keepdims` is `True`,\n the reduced dimension is retained with length 1.\n\n Returns:\n A tensor with the variance of elements of `x`.\n \"\"\"\n if x.dtype.base_dtype == dtypes_module.bool:\n x = math_ops.cast(x, floatx())\n return math_ops.reduce_variance(x, axis=axis, keepdims=keepdims)\n\n\n@keras_export('keras.backend.std')\ndef std(x, axis=None, keepdims=False):\n \"\"\"Standard deviation of a tensor, alongside the specified axis.\n\n It is an alias to `tf.math.reduce_std`.\n\n Arguments:\n x: A tensor or variable. It should have numerical dtypes. Boolean type\n inputs will be converted to float.\n axis: An integer, the axis to compute the standard deviation. If `None`\n (the default), reduces all dimensions. Must be in the range\n `[-rank(x), rank(x))`.\n keepdims: A boolean, whether to keep the dimensions or not.\n If `keepdims` is `False`, the rank of the tensor is reduced\n by 1. If `keepdims` is `True`, the reduced dimension is retained with\n length 1.\n\n Returns:\n A tensor with the standard deviation of elements of `x` with same dtype.\n Boolean type input will be converted to float.\n \"\"\"\n if x.dtype.base_dtype == dtypes_module.bool:\n x = math_ops.cast(x, floatx())\n return math_ops.reduce_std(x, axis=axis, keepdims=keepdims)\n\n\n@keras_export('keras.backend.mean')\ndef mean(x, axis=None, keepdims=False):\n \"\"\"Mean of a tensor, alongside the specified axis.\n\n Arguments:\n x: A tensor or variable.\n axis: A list of integer. Axes to compute the mean.\n keepdims: A boolean, whether to keep the dimensions or not.\n If `keepdims` is `False`, the rank of the tensor is reduced\n by 1 for each entry in `axis`. If `keepdims` is `True`,\n the reduced dimensions are retained with length 1.\n\n Returns:\n A tensor with the mean of elements of `x`.\n \"\"\"\n if x.dtype.base_dtype == dtypes_module.bool:\n x = math_ops.cast(x, floatx())\n return math_ops.reduce_mean(x, axis, keepdims)\n\n\n@keras_export('keras.backend.any')\ndef any(x, axis=None, keepdims=False):\n \"\"\"Bitwise reduction (logical OR).\n\n Arguments:\n x: Tensor or variable.\n axis: axis along which to perform the reduction.\n keepdims: whether the drop or broadcast the reduction axes.\n\n Returns:\n A uint8 tensor (0s and 1s).\n \"\"\"\n x = math_ops.cast(x, dtypes_module.bool)\n return math_ops.reduce_any(x, axis, keepdims)\n\n\n@keras_export('keras.backend.all')\ndef all(x, axis=None, keepdims=False):\n \"\"\"Bitwise reduction (logical AND).\n\n Arguments:\n x: Tensor or variable.\n axis: axis along which to perform the reduction.\n keepdims: whether the drop or broadcast the reduction axes.\n\n Returns:\n A uint8 tensor (0s and 1s).\n \"\"\"\n x = math_ops.cast(x, dtypes_module.bool)\n return math_ops.reduce_all(x, axis, keepdims)\n\n\n@keras_export('keras.backend.argmax')\ndef argmax(x, axis=-1):\n \"\"\"Returns the index of the maximum value along an axis.\n\n Arguments:\n x: Tensor or variable.\n axis: axis along which to perform the reduction.\n\n Returns:\n A tensor.\n \"\"\"\n return math_ops.argmax(x, axis)\n\n\n@keras_export('keras.backend.argmin')\ndef argmin(x, axis=-1):\n \"\"\"Returns the index of the minimum value along an axis.\n\n Arguments:\n x: Tensor or variable.\n axis: axis along which to perform the reduction.\n\n Returns:\n A tensor.\n \"\"\"\n return math_ops.argmin(x, axis)\n\n\n@keras_export('keras.backend.square')\ndef square(x):\n \"\"\"Element-wise square.\n\n Arguments:\n x: Tensor or variable.\n\n Returns:\n A tensor.\n \"\"\"\n return math_ops.square(x)\n\n\n@keras_export('keras.backend.abs')\ndef abs(x):\n \"\"\"Element-wise absolute value.\n\n Arguments:\n x: Tensor or variable.\n\n Returns:\n A tensor.\n \"\"\"\n return math_ops.abs(x)\n\n\n@keras_export('keras.backend.sqrt')\ndef sqrt(x):\n \"\"\"Element-wise square root.\n\n Arguments:\n x: Tensor or variable.\n\n Returns:\n A tensor.\n \"\"\"\n zero = _constant_to_tensor(0., x.dtype.base_dtype)\n inf = _constant_to_tensor(np.inf, x.dtype.base_dtype)\n x = clip_ops.clip_by_value(x, zero, inf)\n return math_ops.sqrt(x)\n\n\n@keras_export('keras.backend.exp')\ndef exp(x):\n \"\"\"Element-wise exponential.\n\n Arguments:\n x: Tensor or variable.\n\n Returns:\n A tensor.\n \"\"\"\n return math_ops.exp(x)\n\n\n@keras_export('keras.backend.log')\ndef log(x):\n \"\"\"Element-wise log.\n\n Arguments:\n x: Tensor or variable.\n\n Returns:\n A tensor.\n \"\"\"\n return math_ops.log(x)\n\n\ndef logsumexp(x, axis=None, keepdims=False):\n \"\"\"Computes log(sum(exp(elements across dimensions of a tensor))).\n\n This function is more numerically stable than log(sum(exp(x))).\n It avoids overflows caused by taking the exp of large inputs and\n underflows caused by taking the log of small inputs.\n\n Arguments:\n x: A tensor or variable.\n axis: An integer, the axis to reduce over.\n keepdims: A boolean, whether to keep the dimensions or not.\n If `keepdims` is `False`, the rank of the tensor is reduced\n by 1. If `keepdims` is `True`, the reduced dimension is\n retained with length 1.\n\n Returns:\n The reduced tensor.\n \"\"\"\n return math_ops.reduce_logsumexp(x, axis, keepdims)\n\n\n@keras_export('keras.backend.round')\ndef round(x):\n \"\"\"Element-wise rounding to the closest integer.\n\n In case of tie, the rounding mode used is \"half to even\".\n\n Arguments:\n x: Tensor or variable.\n\n Returns:\n A tensor.\n \"\"\"\n return math_ops.round(x)\n\n\n@keras_export('keras.backend.sign')\ndef sign(x):\n \"\"\"Element-wise sign.\n\n Arguments:\n x: Tensor or variable.\n\n Returns:\n A tensor.\n \"\"\"\n return math_ops.sign(x)\n\n\n@keras_export('keras.backend.pow')\ndef pow(x, a):\n \"\"\"Element-wise exponentiation.\n\n Arguments:\n x: Tensor or variable.\n a: Python integer.\n\n Returns:\n A tensor.\n \"\"\"\n return math_ops.pow(x, a)\n\n\n@keras_export('keras.backend.clip')\ndef clip(x, min_value, max_value):\n \"\"\"Element-wise value clipping.\n\n Arguments:\n x: Tensor or variable.\n min_value: Python float, integer, or tensor.\n max_value: Python float, integer, or tensor.\n\n Returns:\n A tensor.\n \"\"\"\n if (isinstance(min_value, (int, float)) and\n isinstance(max_value, (int, float))):\n if max_value < min_value:\n max_value = min_value\n if min_value is None:\n min_value = -np.inf\n if max_value is None:\n max_value = np.inf\n return clip_ops.clip_by_value(x, min_value, max_value)\n\n\n@keras_export('keras.backend.equal')\ndef equal(x, y):\n \"\"\"Element-wise equality between two tensors.\n\n Arguments:\n x: Tensor or variable.\n y: Tensor or variable.\n\n Returns:\n A bool tensor.\n \"\"\"\n return math_ops.equal(x, y)\n\n\n@keras_export('keras.backend.not_equal')\ndef not_equal(x, y):\n \"\"\"Element-wise inequality between two tensors.\n\n Arguments:\n x: Tensor or variable.\n y: Tensor or variable.\n\n Returns:\n A bool tensor.\n \"\"\"\n return math_ops.not_equal(x, y)\n\n\n@keras_export('keras.backend.greater')\ndef greater(x, y):\n \"\"\"Element-wise truth value of (x > y).\n\n Arguments:\n x: Tensor or variable.\n y: Tensor or variable.\n\n Returns:\n A bool tensor.\n \"\"\"\n return math_ops.greater(x, y)\n\n\n@keras_export('keras.backend.greater_equal')\ndef greater_equal(x, y):\n \"\"\"Element-wise truth value of (x >= y).\n\n Arguments:\n x: Tensor or variable.\n y: Tensor or variable.\n\n Returns:\n A bool tensor.\n \"\"\"\n return math_ops.greater_equal(x, y)\n\n\n@keras_export('keras.backend.less')\ndef less(x, y):\n \"\"\"Element-wise truth value of (x < y).\n\n Arguments:\n x: Tensor or variable.\n y: Tensor or variable.\n\n Returns:\n A bool tensor.\n \"\"\"\n return math_ops.less(x, y)\n\n\n@keras_export('keras.backend.less_equal')\ndef less_equal(x, y):\n \"\"\"Element-wise truth value of (x <= y).\n\n Arguments:\n x: Tensor or variable.\n y: Tensor or variable.\n\n Returns:\n A bool tensor.\n \"\"\"\n return math_ops.less_equal(x, y)\n\n\n@keras_export('keras.backend.maximum')\ndef maximum(x, y):\n \"\"\"Element-wise maximum of two tensors.\n\n Arguments:\n x: Tensor or variable.\n y: Tensor or variable.\n\n Returns:\n A tensor with the element wise maximum value(s) of `x` and `y`.\n\n Examples:\n\n >>> x = tf.Variable([[1, 2], [3, 4]])\n >>> y = tf.Variable([[2, 1], [0, -1]])\n >>> m = tf.keras.backend.maximum(x, y)\n >>> m\n <tf.Tensor: shape=(2, 2), dtype=int32, numpy=\n array([[2, 2],\n [3, 4]], dtype=int32)>\n \"\"\"\n return math_ops.maximum(x, y)\n\n\n@keras_export('keras.backend.minimum')\ndef minimum(x, y):\n \"\"\"Element-wise minimum of two tensors.\n\n Arguments:\n x: Tensor or variable.\n y: Tensor or variable.\n\n Returns:\n A tensor.\n \"\"\"\n return math_ops.minimum(x, y)\n\n\n@keras_export('keras.backend.sin')\ndef sin(x):\n \"\"\"Computes sin of x element-wise.\n\n Arguments:\n x: Tensor or variable.\n\n Returns:\n A tensor.\n \"\"\"\n return math_ops.sin(x)\n\n\n@keras_export('keras.backend.cos')\ndef cos(x):\n \"\"\"Computes cos of x element-wise.\n\n Arguments:\n x: Tensor or variable.\n\n Returns:\n A tensor.\n \"\"\"\n return math_ops.cos(x)\n\n\ndef _regular_normalize_batch_in_training(x,\n gamma,\n beta,\n reduction_axes,\n epsilon=1e-3):\n \"\"\"Non-fused version of `normalize_batch_in_training`.\n\n Arguments:\n x: Input tensor or variable.\n gamma: Tensor by which to scale the input.\n beta: Tensor with which to center the input.\n reduction_axes: iterable of integers,\n axes over which to normalize.\n epsilon: Fuzz factor.\n\n Returns:\n A tuple length of 3, `(normalized_tensor, mean, variance)`.\n \"\"\"\n mean, var = nn.moments(x, reduction_axes, None, None, False)\n normed = nn.batch_normalization(x, mean, var, beta, gamma, epsilon)\n return normed, mean, var\n\n\ndef _broadcast_normalize_batch_in_training(x,\n gamma,\n beta,\n reduction_axes,\n epsilon=1e-3):\n \"\"\"Non-fused, broadcast version of `normalize_batch_in_training`.\n\n Arguments:\n x: Input tensor or variable.\n gamma: Tensor by which to scale the input.\n beta: Tensor with which to center the input.\n reduction_axes: iterable of integers,\n axes over which to normalize.\n epsilon: Fuzz factor.\n\n Returns:\n A tuple length of 3, `(normalized_tensor, mean, variance)`.\n \"\"\"\n mean, var = nn.moments(x, reduction_axes, None, None, False)\n target_shape = []\n for axis in range(ndim(x)):\n if axis in reduction_axes:\n target_shape.append(1)\n else:\n target_shape.append(array_ops.shape(x)[axis])\n target_shape = array_ops.stack(target_shape)\n\n broadcast_mean = array_ops.reshape(mean, target_shape)\n broadcast_var = array_ops.reshape(var, target_shape)\n if gamma is None:\n broadcast_gamma = None\n else:\n broadcast_gamma = array_ops.reshape(gamma, target_shape)\n if beta is None:\n broadcast_beta = None\n else:\n broadcast_beta = array_ops.reshape(beta, target_shape)\n\n normed = nn.batch_normalization(x, broadcast_mean, broadcast_var,\n broadcast_beta, broadcast_gamma, epsilon)\n return normed, mean, var\n\n\ndef _fused_normalize_batch_in_training(x,\n gamma,\n beta,\n reduction_axes,\n epsilon=1e-3):\n \"\"\"Fused version of `normalize_batch_in_training`.\n\n Arguments:\n x: Input tensor or variable.\n gamma: Tensor by which to scale the input.\n beta: Tensor with which to center the input.\n reduction_axes: iterable of integers,\n axes over which to normalize.\n epsilon: Fuzz factor.\n\n Returns:\n A tuple length of 3, `(normalized_tensor, mean, variance)`.\n \"\"\"\n if list(reduction_axes) == [0, 1, 2]:\n normalization_axis = 3\n tf_data_format = 'NHWC'\n else:\n normalization_axis = 1\n tf_data_format = 'NCHW'\n\n if gamma is None:\n gamma = constant_op.constant(\n 1.0, dtype=x.dtype, shape=[x.shape[normalization_axis]])\n if beta is None:\n beta = constant_op.constant(\n 0.0, dtype=x.dtype, shape=[x.shape[normalization_axis]])\n\n return nn.fused_batch_norm(\n x, gamma, beta, epsilon=epsilon, data_format=tf_data_format)\n\n\n@keras_export('keras.backend.normalize_batch_in_training')\ndef normalize_batch_in_training(x, gamma, beta, reduction_axes, epsilon=1e-3):\n \"\"\"Computes mean and std for batch then apply batch_normalization on batch.\n\n Arguments:\n x: Input tensor or variable.\n gamma: Tensor by which to scale the input.\n beta: Tensor with which to center the input.\n reduction_axes: iterable of integers,\n axes over which to normalize.\n epsilon: Fuzz factor.\n\n Returns:\n A tuple length of 3, `(normalized_tensor, mean, variance)`.\n \"\"\"\n if ndim(x) == 4 and list(reduction_axes) in [[0, 1, 2], [0, 2, 3]]:\n if not _has_nchw_support() and list(reduction_axes) == [0, 2, 3]:\n return _broadcast_normalize_batch_in_training(\n x, gamma, beta, reduction_axes, epsilon=epsilon)\n return _fused_normalize_batch_in_training(\n x, gamma, beta, reduction_axes, epsilon=epsilon)\n else:\n if sorted(reduction_axes) == list(range(ndim(x)))[:-1]:\n return _regular_normalize_batch_in_training(\n x, gamma, beta, reduction_axes, epsilon=epsilon)\n else:\n return _broadcast_normalize_batch_in_training(\n x, gamma, beta, reduction_axes, epsilon=epsilon)\n\n\n@keras_export('keras.backend.batch_normalization')\ndef batch_normalization(x, mean, var, beta, gamma, axis=-1, epsilon=1e-3):\n \"\"\"Applies batch normalization on x given mean, var, beta and gamma.\n\n I.e. returns:\n `output = (x - mean) / (sqrt(var) + epsilon) * gamma + beta`\n\n Arguments:\n x: Input tensor or variable.\n mean: Mean of batch.\n var: Variance of batch.\n beta: Tensor with which to center the input.\n gamma: Tensor by which to scale the input.\n axis: Integer, the axis that should be normalized.\n (typically the features axis).\n epsilon: Fuzz factor.\n\n Returns:\n A tensor.\n \"\"\"\n if ndim(x) == 4:\n # The CPU implementation of `fused_batch_norm` only supports NHWC\n if axis == 1 or axis == -3:\n tf_data_format = 'NCHW'\n elif axis == 3 or axis == -1:\n tf_data_format = 'NHWC'\n else:\n tf_data_format = None\n\n if (tf_data_format == 'NHWC' or\n tf_data_format == 'NCHW' and _has_nchw_support()):\n # The mean / var / beta / gamma tensors may be broadcasted\n # so they may have extra axes of size 1, which should be squeezed.\n if ndim(mean) > 1:\n mean = array_ops.reshape(mean, [-1])\n if ndim(var) > 1:\n var = array_ops.reshape(var, [-1])\n if beta is None:\n beta = zeros_like(mean)\n elif ndim(beta) > 1:\n beta = array_ops.reshape(beta, [-1])\n if gamma is None:\n gamma = ones_like(mean)\n elif ndim(gamma) > 1:\n gamma = array_ops.reshape(gamma, [-1])\n y, _, _ = nn.fused_batch_norm(\n x,\n gamma,\n beta,\n epsilon=epsilon,\n mean=mean,\n variance=var,\n data_format=tf_data_format,\n is_training=False\n )\n return y\n return nn.batch_normalization(x, mean, var, beta, gamma, epsilon)\n\n\n# SHAPE OPERATIONS\n\n\n@keras_export('keras.backend.concatenate')\ndef concatenate(tensors, axis=-1):\n \"\"\"Concatenates a list of tensors alongside the specified axis.\n\n Arguments:\n tensors: list of tensors to concatenate.\n axis: concatenation axis.\n\n Returns:\n A tensor.\n\n Example:\n\n >>> a = tf.constant([[1, 2, 3], [4, 5, 6], [7, 8, 9]])\n >>> b = tf.constant([[10, 20, 30], [40, 50, 60], [70, 80, 90]])\n >>> tf.keras.backend.concatenate((a, b), axis=-1)\n <tf.Tensor: shape=(3, 6), dtype=int32, numpy=\n array([[ 1, 2, 3, 10, 20, 30],\n [ 4, 5, 6, 40, 50, 60],\n [ 7, 8, 9, 70, 80, 90]], dtype=int32)>\n\n \"\"\"\n if axis < 0:\n rank = ndim(tensors[0])\n if rank:\n axis %= rank\n else:\n axis = 0\n\n if py_all(is_sparse(x) for x in tensors):\n return sparse_ops.sparse_concat(axis, tensors)\n elif py_all(isinstance(x, ragged_tensor.RaggedTensor) for x in tensors):\n return ragged_concat_ops.concat(tensors, axis)\n else:\n return array_ops.concat([to_dense(x) for x in tensors], axis)\n\n\n@keras_export('keras.backend.reshape')\ndef reshape(x, shape):\n \"\"\"Reshapes a tensor to the specified shape.\n\n Arguments:\n x: Tensor or variable.\n shape: Target shape tuple.\n\n Returns:\n A tensor.\n\n Example:\n\n >>> a = tf.constant([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]])\n >>> a\n <tf.Tensor: shape=(4, 3), dtype=int32, numpy=\n array([[ 1, 2, 3],\n [ 4, 5, 6],\n [ 7, 8, 9],\n [10, 11, 12]], dtype=int32)>\n >>> tf.keras.backend.reshape(a, shape=(2, 6))\n <tf.Tensor: shape=(2, 6), dtype=int32, numpy=\n array([[ 1, 2, 3, 4, 5, 6],\n [ 7, 8, 9, 10, 11, 12]], dtype=int32)>\n\n \"\"\"\n return array_ops.reshape(x, shape)\n\n\n@keras_export('keras.backend.permute_dimensions')\ndef permute_dimensions(x, pattern):\n \"\"\"Permutes axes in a tensor.\n\n Arguments:\n x: Tensor or variable.\n pattern: A tuple of\n dimension indices, e.g. `(0, 2, 1)`.\n\n Returns:\n A tensor.\n\n Example:\n\n >>> a = tf.constant([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]])\n >>> a\n <tf.Tensor: shape=(4, 3), dtype=int32, numpy=\n array([[ 1, 2, 3],\n [ 4, 5, 6],\n [ 7, 8, 9],\n [10, 11, 12]], dtype=int32)>\n >>> tf.keras.backend.permute_dimensions(a, pattern=(1, 0))\n <tf.Tensor: shape=(3, 4), dtype=int32, numpy=\n array([[ 1, 4, 7, 10],\n [ 2, 5, 8, 11],\n [ 3, 6, 9, 12]], dtype=int32)>\n\n \"\"\"\n return array_ops.transpose(x, perm=pattern)\n\n\n@keras_export('keras.backend.resize_images')\ndef resize_images(x, height_factor, width_factor, data_format,\n interpolation='nearest'):\n \"\"\"Resizes the images contained in a 4D tensor.\n\n Arguments:\n x: Tensor or variable to resize.\n height_factor: Positive integer.\n width_factor: Positive integer.\n data_format: One of `\"channels_first\"`, `\"channels_last\"`.\n interpolation: A string, one of `nearest` or `bilinear`.\n\n Returns:\n A tensor.\n\n Raises:\n ValueError: in case of incorrect value for\n `data_format` or `interpolation`.\n \"\"\"\n if data_format == 'channels_first':\n rows, cols = 2, 3\n elif data_format == 'channels_last':\n rows, cols = 1, 2\n else:\n raise ValueError('Invalid `data_format` argument: %s' % (data_format,))\n\n original_shape = int_shape(x)\n new_shape = array_ops.shape(x)[rows:cols + 1]\n new_shape *= constant_op.constant(\n np.array([height_factor, width_factor], dtype='int32'))\n\n if data_format == 'channels_first':\n x = permute_dimensions(x, [0, 2, 3, 1])\n if interpolation == 'nearest':\n x = image_ops.resize_images_v2(\n x, new_shape, method=image_ops.ResizeMethod.NEAREST_NEIGHBOR)\n elif interpolation == 'bilinear':\n x = image_ops.resize_images_v2(x, new_shape,\n method=image_ops.ResizeMethod.BILINEAR)\n else:\n raise ValueError('interpolation should be one '\n 'of \"nearest\" or \"bilinear\".')\n if data_format == 'channels_first':\n x = permute_dimensions(x, [0, 3, 1, 2])\n\n if original_shape[rows] is None:\n new_height = None\n else:\n new_height = original_shape[rows] * height_factor\n\n if original_shape[cols] is None:\n new_width = None\n else:\n new_width = original_shape[cols] * width_factor\n\n if data_format == 'channels_first':\n output_shape = (None, None, new_height, new_width)\n else:\n output_shape = (None, new_height, new_width, None)\n x.set_shape(output_shape)\n return x\n\n\n@keras_export('keras.backend.resize_volumes')\ndef resize_volumes(x, depth_factor, height_factor, width_factor, data_format):\n \"\"\"Resizes the volume contained in a 5D tensor.\n\n Arguments:\n x: Tensor or variable to resize.\n depth_factor: Positive integer.\n height_factor: Positive integer.\n width_factor: Positive integer.\n data_format: One of `\"channels_first\"`, `\"channels_last\"`.\n\n Returns:\n A tensor.\n\n Raises:\n ValueError: if `data_format` is neither\n `channels_last` or `channels_first`.\n \"\"\"\n if data_format == 'channels_first':\n output = repeat_elements(x, depth_factor, axis=2)\n output = repeat_elements(output, height_factor, axis=3)\n output = repeat_elements(output, width_factor, axis=4)\n return output\n elif data_format == 'channels_last':\n output = repeat_elements(x, depth_factor, axis=1)\n output = repeat_elements(output, height_factor, axis=2)\n output = repeat_elements(output, width_factor, axis=3)\n return output\n else:\n raise ValueError('Invalid data_format: ' + str(data_format))\n\n\n@keras_export('keras.backend.repeat_elements')\ndef repeat_elements(x, rep, axis):\n \"\"\"Repeats the elements of a tensor along an axis, like `np.repeat`.\n\n If `x` has shape `(s1, s2, s3)` and `axis` is `1`, the output\n will have shape `(s1, s2 * rep, s3)`.\n\n Arguments:\n x: Tensor or variable.\n rep: Python integer, number of times to repeat.\n axis: Axis along which to repeat.\n\n Returns:\n A tensor.\n\n Example:\n\n >>> b = tf.constant([1, 2, 3])\n >>> tf.keras.backend.repeat_elements(b, rep=2, axis=0)\n <tf.Tensor: shape=(6,), dtype=int32,\n numpy=array([1, 1, 2, 2, 3, 3], dtype=int32)>\n\n \"\"\"\n x_shape = x.shape.as_list()\n # For static axis\n if x_shape[axis] is not None:\n # slices along the repeat axis\n splits = array_ops.split(value=x,\n num_or_size_splits=x_shape[axis],\n axis=axis)\n # repeat each slice the given number of reps\n x_rep = [s for s in splits for _ in range(rep)]\n return concatenate(x_rep, axis)\n\n # Here we use tf.tile to mimic behavior of np.repeat so that\n # we can handle dynamic shapes (that include None).\n # To do that, we need an auxiliary axis to repeat elements along\n # it and then merge them along the desired axis.\n\n # Repeating\n auxiliary_axis = axis + 1\n x_shape = array_ops.shape(x)\n x_rep = array_ops.expand_dims(x, axis=auxiliary_axis)\n reps = np.ones(len(x.shape) + 1)\n reps[auxiliary_axis] = rep\n x_rep = array_ops.tile(x_rep, reps)\n\n # Merging\n reps = np.delete(reps, auxiliary_axis)\n reps[axis] = rep\n reps = array_ops.constant(reps, dtype='int32')\n x_shape *= reps\n x_rep = array_ops.reshape(x_rep, x_shape)\n\n # Fix shape representation\n x_shape = x.shape.as_list()\n x_rep.set_shape(x_shape)\n x_rep._keras_shape = tuple(x_shape)\n return x_rep\n\n\n@keras_export('keras.backend.repeat')\ndef repeat(x, n):\n \"\"\"Repeats a 2D tensor.\n\n if `x` has shape (samples, dim) and `n` is `2`,\n the output will have shape `(samples, 2, dim)`.\n\n Arguments:\n x: Tensor or variable.\n n: Python integer, number of times to repeat.\n\n Returns:\n A tensor.\n\n Example:\n\n >>> b = tf.constant([[1, 2], [3, 4]])\n >>> b\n <tf.Tensor: shape=(2, 2), dtype=int32, numpy=\n array([[1, 2],\n [3, 4]], dtype=int32)>\n >>> tf.keras.backend.repeat(b, n=2)\n <tf.Tensor: shape=(2, 2, 2), dtype=int32, numpy=\n array([[[1, 2],\n [1, 2]],\n [[3, 4],\n [3, 4]]], dtype=int32)>\n\n \"\"\"\n assert ndim(x) == 2\n x = array_ops.expand_dims(x, 1)\n pattern = array_ops.stack([1, n, 1])\n return array_ops.tile(x, pattern)\n\n\n@keras_export('keras.backend.arange')\ndef arange(start, stop=None, step=1, dtype='int32'):\n \"\"\"Creates a 1D tensor containing a sequence of integers.\n\n The function arguments use the same convention as\n Theano's arange: if only one argument is provided,\n it is in fact the \"stop\" argument and \"start\" is 0.\n\n The default type of the returned tensor is `'int32'` to\n match TensorFlow's default.\n\n Arguments:\n start: Start value.\n stop: Stop value.\n step: Difference between two successive values.\n dtype: Integer dtype to use.\n\n Returns:\n An integer tensor.\n\n Example:\n\n >>> tf.keras.backend.arange(start=0, stop=10, step=1.5)\n <tf.Tensor: shape=(7,), dtype=float32,\n numpy=array([0. , 1.5, 3. , 4.5, 6. , 7.5, 9. ], dtype=float32)>\n\n\n\n \"\"\"\n # Match the behavior of numpy and Theano by returning an empty sequence.\n if stop is None and start < 0:\n start = 0\n result = math_ops.range(start, limit=stop, delta=step, name='arange')\n if dtype != 'int32':\n result = cast(result, dtype)\n return result\n\n\n@keras_export('keras.backend.tile')\ndef tile(x, n):\n \"\"\"Creates a tensor by tiling `x` by `n`.\n\n Arguments:\n x: A tensor or variable\n n: A list of integer. The length must be the same as the number of\n dimensions in `x`.\n\n Returns:\n A tiled tensor.\n \"\"\"\n if isinstance(n, int):\n n = [n]\n return array_ops.tile(x, n)\n\n\n@keras_export('keras.backend.flatten')\ndef flatten(x):\n \"\"\"Flatten a tensor.\n\n Arguments:\n x: A tensor or variable.\n\n Returns:\n A tensor, reshaped into 1-D\n\n Example:\n\n >>> b = tf.constant([[1, 2], [3, 4]])\n >>> b\n <tf.Tensor: shape=(2, 2), dtype=int32, numpy=\n array([[1, 2],\n [3, 4]], dtype=int32)>\n >>> tf.keras.backend.flatten(b)\n <tf.Tensor: shape=(4,), dtype=int32,\n numpy=array([1, 2, 3, 4], dtype=int32)>\n\n \"\"\"\n return array_ops.reshape(x, [-1])\n\n\n@keras_export('keras.backend.batch_flatten')\ndef batch_flatten(x):\n \"\"\"Turn a nD tensor into a 2D tensor with same 0th dimension.\n\n In other words, it flattens each data samples of a batch.\n\n Arguments:\n x: A tensor or variable.\n\n Returns:\n A tensor.\n\n Examples:\n Flattening a 3D tensor to 2D by collapsing the last dimension.\n\n >>> x_batch = tf.keras.backend.ones(shape=(2, 3, 4, 5))\n >>> x_batch_flatten = batch_flatten(x_batch)\n >>> tf.keras.backend.int_shape(x_batch_flatten)\n (2, 60)\n\n \"\"\"\n x = array_ops.reshape(x, array_ops.stack([-1, prod(shape(x)[1:])]))\n return x\n\n\n@keras_export('keras.backend.expand_dims')\ndef expand_dims(x, axis=-1):\n \"\"\"Adds a 1-sized dimension at index \"axis\".\n\n Arguments:\n x: A tensor or variable.\n axis: Position where to add a new axis.\n\n Returns:\n A tensor with expanded dimensions.\n \"\"\"\n return array_ops.expand_dims(x, axis)\n\n\n@keras_export('keras.backend.squeeze')\ndef squeeze(x, axis):\n \"\"\"Removes a 1-dimension from the tensor at index \"axis\".\n\n Arguments:\n x: A tensor or variable.\n axis: Axis to drop.\n\n Returns:\n A tensor with the same data as `x` but reduced dimensions.\n \"\"\"\n return array_ops.squeeze(x, [axis])\n\n\n@keras_export('keras.backend.temporal_padding')\ndef temporal_padding(x, padding=(1, 1)):\n \"\"\"Pads the middle dimension of a 3D tensor.\n\n Arguments:\n x: Tensor or variable.\n padding: Tuple of 2 integers, how many zeros to\n add at the start and end of dim 1.\n\n Returns:\n A padded 3D tensor.\n \"\"\"\n assert len(padding) == 2\n pattern = [[0, 0], [padding[0], padding[1]], [0, 0]]\n return array_ops.pad(x, pattern)\n\n\n@keras_export('keras.backend.spatial_2d_padding')\ndef spatial_2d_padding(x, padding=((1, 1), (1, 1)), data_format=None):\n \"\"\"Pads the 2nd and 3rd dimensions of a 4D tensor.\n\n Arguments:\n x: Tensor or variable.\n padding: Tuple of 2 tuples, padding pattern.\n data_format: One of `channels_last` or `channels_first`.\n\n Returns:\n A padded 4D tensor.\n\n Raises:\n ValueError: if `data_format` is neither\n `channels_last` or `channels_first`.\n \"\"\"\n assert len(padding) == 2\n assert len(padding[0]) == 2\n assert len(padding[1]) == 2\n if data_format is None:\n data_format = image_data_format()\n if data_format not in {'channels_first', 'channels_last'}:\n raise ValueError('Unknown data_format: ' + str(data_format))\n\n if data_format == 'channels_first':\n pattern = [[0, 0], [0, 0], list(padding[0]), list(padding[1])]\n else:\n pattern = [[0, 0], list(padding[0]), list(padding[1]), [0, 0]]\n return array_ops.pad(x, pattern)\n\n\n@keras_export('keras.backend.spatial_3d_padding')\ndef spatial_3d_padding(x, padding=((1, 1), (1, 1), (1, 1)), data_format=None):\n \"\"\"Pads 5D tensor with zeros along the depth, height, width dimensions.\n\n Pads these dimensions with respectively\n \"padding[0]\", \"padding[1]\" and \"padding[2]\" zeros left and right.\n\n For 'channels_last' data_format,\n the 2nd, 3rd and 4th dimension will be padded.\n For 'channels_first' data_format,\n the 3rd, 4th and 5th dimension will be padded.\n\n Arguments:\n x: Tensor or variable.\n padding: Tuple of 3 tuples, padding pattern.\n data_format: One of `channels_last` or `channels_first`.\n\n Returns:\n A padded 5D tensor.\n\n Raises:\n ValueError: if `data_format` is neither\n `channels_last` or `channels_first`.\n\n \"\"\"\n assert len(padding) == 3\n assert len(padding[0]) == 2\n assert len(padding[1]) == 2\n assert len(padding[2]) == 2\n if data_format is None:\n data_format = image_data_format()\n if data_format not in {'channels_first', 'channels_last'}:\n raise ValueError('Unknown data_format: ' + str(data_format))\n\n if data_format == 'channels_first':\n pattern = [[0, 0], [0, 0], [padding[0][0], padding[0][1]],\n [padding[1][0], padding[1][1]], [padding[2][0], padding[2][1]]]\n else:\n pattern = [[0, 0], [padding[0][0], padding[0][1]],\n [padding[1][0], padding[1][1]], [padding[2][0],\n padding[2][1]], [0, 0]]\n return array_ops.pad(x, pattern)\n\n\n@keras_export('keras.backend.stack')\ndef stack(x, axis=0):\n \"\"\"Stacks a list of rank `R` tensors into a rank `R+1` tensor.\n\n Arguments:\n x: List of tensors.\n axis: Axis along which to perform stacking.\n\n Returns:\n A tensor.\n\n Example:\n\n >>> a = tf.constant([[1, 2],[3, 4]])\n >>> b = tf.constant([[10, 20],[30, 40]])\n >>> tf.keras.backend.stack((a, b))\n <tf.Tensor: shape=(2, 2, 2), dtype=int32, numpy=\n array([[[ 1, 2],\n [ 3, 4]],\n [[10, 20],\n [30, 40]]], dtype=int32)>\n\n \"\"\"\n return array_ops.stack(x, axis=axis)\n\n\n@keras_export('keras.backend.one_hot')\ndef one_hot(indices, num_classes):\n \"\"\"Computes the one-hot representation of an integer tensor.\n\n Arguments:\n indices: nD integer tensor of shape\n `(batch_size, dim1, dim2, ... dim(n-1))`\n num_classes: Integer, number of classes to consider.\n\n Returns:\n (n + 1)D one hot representation of the input\n with shape `(batch_size, dim1, dim2, ... dim(n-1), num_classes)`\n\n Returns:\n The one-hot tensor.\n \"\"\"\n return array_ops.one_hot(indices, depth=num_classes, axis=-1)\n\n\n@keras_export('keras.backend.reverse')\ndef reverse(x, axes):\n \"\"\"Reverse a tensor along the specified axes.\n\n Arguments:\n x: Tensor to reverse.\n axes: Integer or iterable of integers.\n Axes to reverse.\n\n Returns:\n A tensor.\n \"\"\"\n if isinstance(axes, int):\n axes = [axes]\n return array_ops.reverse(x, axes)\n\n\n# VALUE MANIPULATION\n_VALUE_SET_CODE_STRING = \"\"\"\n >>> K = tf.keras.backend # Common keras convention\n >>> v = K.variable(1.)\n\n >>> # reassign\n >>> K.set_value(v, 2.)\n >>> print(K.get_value(v))\n 2.0\n\n >>> # increment\n >>> K.set_value(v, K.get_value(v) + 1)\n >>> print(K.get_value(v))\n 3.0\n\n Variable semantics in TensorFlow 2 are eager execution friendly. The above \n code is roughly equivalent to:\n\n >>> v = tf.Variable(1.)\n\n >>> v.assign(2.)\n >>> print(v.numpy())\n 2.0\n\n >>> v.assign_add(1.)\n >>> print(v.numpy())\n 3.0\"\"\"[3:] # Prune first newline and indent to match the docstring template.\n\n\n@keras_export('keras.backend.get_value')\ndef get_value(x):\n \"\"\"Returns the value of a variable.\n\n `backend.get_value` is the compliment of `backend.set_value`, and provides\n a generic interface for reading from variables while abstracting away the\n differences between TensorFlow 1.x and 2.x semantics.\n\n {snippet}\n\n Arguments:\n x: input variable.\n\n Returns:\n A Numpy array.\n \"\"\"\n if not tensor_util.is_tensor(x):\n return x\n if context.executing_eagerly() or isinstance(x, ops.EagerTensor):\n return x.numpy()\n if not getattr(x, '_in_graph_mode', True):\n # This is a variable which was created in an eager context, but is being\n # evaluated from a Graph.\n with context.eager_mode():\n return x.numpy()\n\n if ops.executing_eagerly_outside_functions():\n # This method of evaluating works inside the Keras FuncGraph.\n return function([], x)(x)\n\n with x.graph.as_default():\n return x.eval(session=get_session((x,)))\n\n\n@keras_export('keras.backend.batch_get_value')\ndef batch_get_value(tensors):\n \"\"\"Returns the value of more than one tensor variable.\n\n Arguments:\n tensors: list of ops to run.\n\n Returns:\n A list of Numpy arrays.\n\n Raises:\n RuntimeError: If this method is called inside defun.\n \"\"\"\n if context.executing_eagerly():\n return [x.numpy() for x in tensors]\n elif ops.inside_function(): # pylint: disable=protected-access\n raise RuntimeError('Cannot get value inside Tensorflow graph function.')\n if tensors:\n return get_session(tensors).run(tensors)\n else:\n return []\n\n\n@keras_export('keras.backend.set_value')\ndef set_value(x, value):\n \"\"\"Sets the value of a variable, from a Numpy array.\n\n `backend.set_value` is the compliment of `backend.get_value`, and provides\n a generic interface for assigning to variables while abstracting away the\n differences between TensorFlow 1.x and 2.x semantics.\n\n {snippet}\n\n Arguments:\n x: Variable to set to a new value.\n value: Value to set the tensor to, as a Numpy array\n (of the same shape).\n \"\"\"\n value = np.asarray(value, dtype=dtype(x))\n if ops.executing_eagerly_outside_functions():\n x.assign(value)\n else:\n with get_graph().as_default():\n tf_dtype = dtypes_module.as_dtype(x.dtype.name.split('_')[0])\n if hasattr(x, '_assign_placeholder'):\n assign_placeholder = x._assign_placeholder\n assign_op = x._assign_op\n else:\n # In order to support assigning weights to resizable variables in\n # Keras, we make a placeholder with the correct number of dimensions\n # but with None in each dimension. This way, we can assign weights\n # of any size (as long as they have the correct dimensionality).\n placeholder_shape = tensor_shape.TensorShape([None] * value.ndim)\n assign_placeholder = array_ops.placeholder(\n tf_dtype, shape=placeholder_shape)\n assign_op = x.assign(assign_placeholder)\n x._assign_placeholder = assign_placeholder\n x._assign_op = assign_op\n get_session().run(assign_op, feed_dict={assign_placeholder: value})\n\n\n@keras_export('keras.backend.batch_set_value')\ndef batch_set_value(tuples):\n \"\"\"Sets the values of many tensor variables at once.\n\n Arguments:\n tuples: a list of tuples `(tensor, value)`.\n `value` should be a Numpy array.\n \"\"\"\n if ops.executing_eagerly_outside_functions():\n for x, value in tuples:\n x.assign(np.asarray(value, dtype=dtype(x)))\n else:\n with get_graph().as_default():\n if tuples:\n assign_ops = []\n feed_dict = {}\n for x, value in tuples:\n value = np.asarray(value, dtype=dtype(x))\n tf_dtype = dtypes_module.as_dtype(x.dtype.name.split('_')[0])\n if hasattr(x, '_assign_placeholder'):\n assign_placeholder = x._assign_placeholder\n assign_op = x._assign_op\n else:\n # In order to support assigning weights to resizable variables in\n # Keras, we make a placeholder with the correct number of dimensions\n # but with None in each dimension. This way, we can assign weights\n # of any size (as long as they have the correct dimensionality).\n placeholder_shape = tensor_shape.TensorShape([None] * value.ndim)\n assign_placeholder = array_ops.placeholder(\n tf_dtype, shape=placeholder_shape)\n assign_op = x.assign(assign_placeholder)\n x._assign_placeholder = assign_placeholder\n x._assign_op = assign_op\n assign_ops.append(assign_op)\n feed_dict[assign_placeholder] = value\n get_session().run(assign_ops, feed_dict=feed_dict)\n\n\nget_value.__doc__ = get_value.__doc__.format(snippet=_VALUE_SET_CODE_STRING)\nset_value.__doc__ = set_value.__doc__.format(snippet=_VALUE_SET_CODE_STRING)\n\n\n@keras_export('keras.backend.print_tensor')\ndef print_tensor(x, message=''):\n \"\"\"Prints `message` and the tensor value when evaluated.\n\n Note that `print_tensor` returns a new tensor identical to `x`\n which should be used in the following code. Otherwise the\n print operation is not taken into account during evaluation.\n\n Example:\n\n >>> x = tf.constant([[1.0, 2.0], [3.0, 4.0]])\n >>> tf.keras.backend.print_tensor(x)\n <tf.Tensor: shape=(2, 2), dtype=float32, numpy=\n array([[1., 2.],\n [3., 4.]], dtype=float32)>\n\n Arguments:\n x: Tensor to print.\n message: Message to print jointly with the tensor.\n\n Returns:\n The same tensor `x`, unchanged.\n \"\"\"\n if isinstance(x, ops.Tensor) and hasattr(x, 'graph'):\n with get_graph().as_default():\n op = logging_ops.print_v2(message, x, output_stream=sys.stdout)\n with ops.control_dependencies([op]):\n return array_ops.identity(x)\n else:\n logging_ops.print_v2(message, x, output_stream=sys.stdout)\n return x\n\n# GRAPH MANIPULATION\n\n\nclass GraphExecutionFunction(object):\n \"\"\"Runs a computation graph.\n\n It's possible to pass arguments to `tf.Session.run()` via `session_kwargs`.\n In particular additional operations via `fetches` argument and additional\n tensor substitutions via `feed_dict` arguments. Note that given\n substitutions are merged with substitutions from `inputs`. Even though\n `feed_dict` is passed once in the constructor (called in `model.compile()`)\n we can modify the values in the dictionary. Through this feed_dict we can\n provide additional substitutions besides Keras inputs.\n\n Arguments:\n inputs: Feed placeholders to the computation graph.\n outputs: Output tensors to fetch.\n updates: Additional update ops to be run at function call.\n name: A name to help users identify what this function does.\n session_kwargs: Arguments to `tf.Session.run()`:\n `fetches`, `feed_dict`, `options`, `run_metadata`.\n \"\"\"\n\n def __init__(self, inputs, outputs, updates=None, name=None,\n **session_kwargs):\n updates = updates or []\n if not isinstance(updates, (list, tuple)):\n raise TypeError('`updates` in a Keras backend function '\n 'should be a list or tuple.')\n\n self._inputs_structure = inputs\n self.inputs = nest.flatten(inputs, expand_composites=True)\n self._outputs_structure = outputs\n self.outputs = cast_variables_to_tensor(\n nest.flatten(outputs, expand_composites=True))\n # TODO(b/127668432): Consider using autograph to generate these\n # dependencies in call.\n # Index 0 = total loss or model output for `predict`.\n with ops.control_dependencies([self.outputs[0]]):\n updates_ops = []\n for update in updates:\n if isinstance(update, tuple):\n p, new_p = update\n updates_ops.append(state_ops.assign(p, new_p))\n else:\n # assumed already an op\n updates_ops.append(update)\n self.updates_op = control_flow_ops.group(*updates_ops)\n self.name = name\n # additional tensor substitutions\n self.feed_dict = session_kwargs.pop('feed_dict', None)\n # additional operations\n self.fetches = session_kwargs.pop('fetches', [])\n if not isinstance(self.fetches, list):\n self.fetches = [self.fetches]\n self.run_options = session_kwargs.pop('options', None)\n self.run_metadata = session_kwargs.pop('run_metadata', None)\n # The main use case of `fetches` being passed to a model is the ability\n # to run custom updates\n # This requires us to wrap fetches in `identity` ops.\n self.fetches = [array_ops.identity(x) for x in self.fetches]\n self.session_kwargs = session_kwargs\n # This mapping keeps track of the function that should receive the\n # output from a fetch in `fetches`: { fetch: function(fetch_output) }\n # A Callback can use this to register a function with access to the\n # output values for a fetch it added.\n self.fetch_callbacks = {}\n\n if session_kwargs:\n raise ValueError('Some keys in session_kwargs are not supported at this '\n 'time: %s' % (session_kwargs.keys(),))\n\n self._callable_fn = None\n self._feed_arrays = None\n self._feed_symbols = None\n self._symbol_vals = None\n self._fetches = None\n self._session = None\n\n def _make_callable(self, feed_arrays, feed_symbols, symbol_vals, session):\n \"\"\"Generates a callable that runs the graph.\n\n Arguments:\n feed_arrays: List of input tensors to be fed Numpy arrays at runtime.\n feed_symbols: List of input tensors to be fed symbolic tensors at runtime.\n symbol_vals: List of symbolic tensors to be fed to `feed_symbols`.\n session: Session to use to generate the callable.\n\n Returns:\n Function that runs the graph according to the above options.\n \"\"\"\n # Prepare callable options.\n callable_opts = config_pb2.CallableOptions()\n # Handle external-data feed.\n for x in feed_arrays:\n callable_opts.feed.append(x.name)\n if self.feed_dict:\n for key in sorted(self.feed_dict.keys()):\n callable_opts.feed.append(key.name)\n # Handle symbolic feed.\n for x, y in zip(feed_symbols, symbol_vals):\n connection = callable_opts.tensor_connection.add()\n if x.dtype != y.dtype:\n y = math_ops.cast(y, dtype=x.dtype)\n from_tensor = ops._as_graph_element(y)\n if from_tensor is None:\n from_tensor = y\n connection.from_tensor = from_tensor.name # Data tensor\n connection.to_tensor = x.name # Placeholder\n # Handle fetches.\n for x in self.outputs + self.fetches:\n callable_opts.fetch.append(x.name)\n # Handle updates.\n callable_opts.target.append(self.updates_op.name)\n # Handle run_options.\n if self.run_options:\n callable_opts.run_options.CopyFrom(self.run_options)\n # Create callable.\n callable_fn = session._make_callable_from_options(callable_opts)\n # Cache parameters corresponding to the generated callable, so that\n # we can detect future mismatches and refresh the callable.\n self._callable_fn = callable_fn\n self._feed_arrays = feed_arrays\n self._feed_symbols = feed_symbols\n self._symbol_vals = symbol_vals\n self._fetches = list(self.fetches)\n self._session = session\n\n def _call_fetch_callbacks(self, fetches_output):\n for fetch, output in zip(self._fetches, fetches_output):\n if fetch in self.fetch_callbacks:\n self.fetch_callbacks[fetch](output)\n\n def _eval_if_composite(self, tensor):\n \"\"\"Helper method which evaluates any CompositeTensors passed to it.\"\"\"\n # We need to evaluate any composite tensor objects that have been\n # reconstructed in 'pack_sequence_as', since otherwise they'll be output as\n # actual CompositeTensor objects instead of the value(s) contained in the\n # CompositeTensors. E.g., if output_structure contains a SparseTensor, then\n # this ensures that we return its value as a SparseTensorValue rather than\n # a SparseTensor.\n if isinstance(tensor, composite_tensor.CompositeTensor):\n return self._session.run(tensor)\n else:\n return tensor\n\n def __call__(self, inputs):\n inputs = nest.flatten(inputs, expand_composites=True)\n\n session = get_session(inputs)\n feed_arrays = []\n array_vals = []\n feed_symbols = []\n symbol_vals = []\n for tensor, value in zip(self.inputs, inputs):\n if value is None:\n continue\n\n if tensor_util.is_tensor(value):\n # Case: feeding symbolic tensor.\n feed_symbols.append(tensor)\n symbol_vals.append(value)\n else:\n # Case: feeding Numpy array.\n feed_arrays.append(tensor)\n # We need to do array conversion and type casting at this level, since\n # `callable_fn` only supports exact matches.\n tensor_type = dtypes_module.as_dtype(tensor.dtype)\n array_vals.append(np.asarray(value,\n dtype=tensor_type.as_numpy_dtype))\n\n if self.feed_dict:\n for key in sorted(self.feed_dict.keys()):\n array_vals.append(\n np.asarray(self.feed_dict[key], dtype=key.dtype.base_dtype.name))\n\n # Refresh callable if anything has changed.\n if (self._callable_fn is None or feed_arrays != self._feed_arrays or\n symbol_vals != self._symbol_vals or\n feed_symbols != self._feed_symbols or self.fetches != self._fetches or\n session != self._session):\n self._make_callable(feed_arrays, feed_symbols, symbol_vals, session)\n\n fetched = self._callable_fn(*array_vals,\n run_metadata=self.run_metadata)\n self._call_fetch_callbacks(fetched[-len(self._fetches):])\n output_structure = nest.pack_sequence_as(\n self._outputs_structure,\n fetched[:len(self.outputs)],\n expand_composites=True)\n # We need to evaluate any composite tensor objects that have been\n # reconstructed in 'pack_sequence_as', since otherwise they'll be output as\n # actual CompositeTensor objects instead of the value(s) contained in the\n # CompositeTensors. E.g., if output_structure contains a SparseTensor, then\n # this ensures that we return its value as a SparseTensorValue rather than\n # a SparseTensor.\n return nest.map_structure(self._eval_if_composite, output_structure)\n\n\nclass EagerExecutionFunction(object):\n \"\"\"Helper class for constructing a TF graph function from the Keras graph.\n\n Arguments:\n inputs: Feed placeholders to the computation graph.\n outputs: Output tensors to fetch.\n updates: Additional update ops to be run at function call.\n name: A name to help users identify what this function does.\n session_kwargs: Unsupported.\n \"\"\"\n\n def __init__(self, inputs, outputs, updates=None, name=None):\n self.name = name\n self._inputs_structure = inputs\n inputs = nest.flatten(inputs, expand_composites=True)\n self._outputs_structure = outputs\n outputs = nest.flatten(outputs, expand_composites=True)\n\n updates = updates or []\n if not isinstance(updates, (list, tuple)):\n raise TypeError('`updates` in a Keras backend function '\n 'should be a list or tuple.')\n\n if updates and not outputs:\n # Edge case; never happens in practice\n raise ValueError('Cannot create a Keras backend function with updates'\n ' but no outputs during eager execution.')\n graphs = {\n i.graph\n for i in nest.flatten([inputs, outputs, updates])\n if hasattr(i, 'graph')\n }\n if len(graphs) > 1:\n raise ValueError('Cannot create an execution function which is comprised '\n 'of elements from multiple graphs.')\n\n source_graph = graphs.pop()\n global_graph = get_graph()\n\n updates_ops = []\n legacy_update_ops = []\n for update in updates:\n # For legacy reasons it is allowed to pass an update as a tuple\n # `(variable, new_value)` (this maps to an assign op). Otherwise it\n # is assumed to already be an op -- we cannot control its execution\n # order.\n if isinstance(update, tuple):\n legacy_update_ops.append(update)\n else:\n if hasattr(update, 'op'):\n update = update.op\n if update is not None:\n # `update.op` may have been None in certain cases.\n updates_ops.append(update)\n\n self._freezable_vars_to_feed = []\n self._freezable_vars_values = []\n freezable_vars_from_keras_graph = object_identity.ObjectIdentitySet(\n _FREEZABLE_VARS.get(global_graph, {}))\n with _scratch_graph() as exec_graph:\n global_graph = get_graph()\n if source_graph not in (exec_graph, global_graph):\n raise ValueError('Unknown graph. Aborting.')\n\n if source_graph is global_graph and exec_graph is not global_graph:\n init_tensors = (\n outputs + updates_ops + [p for [p, _] in legacy_update_ops] +\n [p_new for [_, p_new] in legacy_update_ops\n if isinstance(p_new, ops.Tensor)])\n lifted_map = lift_to_graph.lift_to_graph(\n tensors=init_tensors,\n graph=exec_graph,\n sources=inputs,\n add_sources=True,\n handle_captures=True,\n base_graph=source_graph)\n\n inputs = [lifted_map[i] for i in inputs]\n outputs = [lifted_map[i] for i in outputs]\n updates_ops = [lifted_map[i] for i in updates_ops]\n legacy_update_ops = [(lifted_map[p], lifted_map.get(p_new, p_new))\n for p, p_new in legacy_update_ops]\n\n # Keep track of the value to feed to any \"freezable variables\"\n # created in this graph.\n for old_op, new_op in lifted_map.items():\n if old_op in freezable_vars_from_keras_graph:\n frozen_var = old_op\n if frozen_var._initial_value != frozen_var._current_value:\n # We only feed a frozen_variable if its value has changed;\n # otherwise it can rely on the default value of the\n # underlying placeholder_with_default.\n self._freezable_vars_to_feed.append(new_op)\n self._freezable_vars_values.append(frozen_var._current_value)\n\n # Consolidate updates\n with exec_graph.as_default():\n outputs = cast_variables_to_tensor(outputs)\n with ops.control_dependencies(outputs):\n for p, p_new in legacy_update_ops:\n updates_ops.append(state_ops.assign(p, p_new))\n\n self.inputs, self.outputs = inputs, outputs\n self._input_references = self.inputs + self._freezable_vars_to_feed\n with ops.control_dependencies(updates_ops):\n self.outputs[0] = array_ops.identity(self.outputs[0])\n\n exec_graph.inputs = self._input_references + exec_graph.internal_captures\n exec_graph.outputs = self.outputs\n graph_fn = eager_function.ConcreteFunction(exec_graph)\n\n graph_fn._num_positional_args = len(self._input_references)\n graph_fn._arg_keywords = []\n self._graph_fn = graph_fn\n\n # Handle placeholders with default\n # (treated as required placeholder by graph functions)\n self._placeholder_default_values = {}\n with exec_graph.as_default():\n for x in self.inputs:\n if x.op.type == 'PlaceholderWithDefault':\n self._placeholder_default_values[ops.tensor_id(\n x)] = tensor_util.constant_value(x.op.inputs[0])\n\n def __call__(self, inputs):\n input_values = nest.flatten(inputs, expand_composites=True)\n\n if self._freezable_vars_values:\n input_values = input_values + self._freezable_vars_values\n converted_inputs = []\n for tensor, value in zip(self._input_references, input_values):\n if value is None:\n # Assume `value` is a placeholder with default\n value = self._placeholder_default_values.get(\n ops.tensor_id(tensor), None)\n if value is None:\n raise ValueError(\n 'You must feed a value for placeholder %s' % (tensor,))\n if not isinstance(value, ops.Tensor):\n value = ops.convert_to_tensor_v2(value, dtype=tensor.dtype)\n if value.dtype != tensor.dtype:\n # Temporary workaround due to `convert_to_tensor` not casting floats.\n # See b/119637405\n value = math_ops.cast(value, tensor.dtype)\n converted_inputs.append(value)\n outputs = self._graph_fn(*converted_inputs)\n\n # EagerTensor.numpy() will often make a copy to ensure memory safety.\n # However in this case `outputs` is not directly returned, so it is always\n # safe to reuse the underlying buffer without checking. In such a case the\n # private numpy conversion method is preferred to guarantee performance.\n return nest.pack_sequence_as(\n self._outputs_structure,\n [x._numpy() for x in outputs], # pylint: disable=protected-access\n expand_composites=True)\n\n\n@keras_export('keras.backend.function')\ndef function(inputs, outputs, updates=None, name=None, **kwargs):\n \"\"\"Instantiates a Keras function.\n\n Arguments:\n inputs: List of placeholder tensors.\n outputs: List of output tensors.\n updates: List of update ops.\n name: String, name of function.\n **kwargs: Passed to `tf.Session.run`.\n\n Returns:\n Output values as Numpy arrays.\n\n Raises:\n ValueError: if invalid kwargs are passed in or if in eager execution.\n \"\"\"\n if ops.executing_eagerly_outside_functions():\n if kwargs:\n raise ValueError('Session keyword arguments are not support during '\n 'eager execution. You passed: %s' % (kwargs,))\n return EagerExecutionFunction(inputs, outputs, updates=updates, name=name)\n\n if kwargs:\n for key in kwargs:\n if (key not in tf_inspect.getfullargspec(session_module.Session.run)[0]\n and key not in ['inputs', 'outputs', 'updates', 'name']):\n msg = ('Invalid argument \"%s\" passed to K.function with TensorFlow '\n 'backend') % key\n raise ValueError(msg)\n return GraphExecutionFunction(\n inputs, outputs, updates=updates, name=name, **kwargs)\n\n\n@keras_export('keras.backend.gradients')\ndef gradients(loss, variables):\n \"\"\"Returns the gradients of `loss` w.r.t. `variables`.\n\n Arguments:\n loss: Scalar tensor to minimize.\n variables: List of variables.\n\n Returns:\n A gradients tensor.\n \"\"\"\n return gradients_module.gradients(\n loss, variables, colocate_gradients_with_ops=True)\n\n\n@keras_export('keras.backend.stop_gradient')\ndef stop_gradient(variables):\n \"\"\"Returns `variables` but with zero gradient w.r.t. every other variable.\n\n Arguments:\n variables: Tensor or list of tensors to consider constant with respect\n to any other variable.\n\n\n Returns:\n A single tensor or a list of tensors (depending on the passed argument)\n that has no gradient with respect to any other variable.\n \"\"\"\n if isinstance(variables, (list, tuple)):\n return map(array_ops.stop_gradient, variables)\n return array_ops.stop_gradient(variables)\n\n\n# CONTROL FLOW\n\n\n@keras_export('keras.backend.rnn')\ndef rnn(step_function,\n inputs,\n initial_states,\n go_backwards=False,\n mask=None,\n constants=None,\n unroll=False,\n input_length=None,\n time_major=False,\n zero_output_for_mask=False):\n \"\"\"Iterates over the time dimension of a tensor.\n\n Arguments:\n step_function: RNN step function.\n Args;\n input; Tensor with shape `(samples, ...)` (no time dimension),\n representing input for the batch of samples at a certain\n time step.\n states; List of tensors.\n Returns;\n output; Tensor with shape `(samples, output_dim)`\n (no time dimension).\n new_states; List of tensors, same length and shapes\n as 'states'. The first state in the list must be the\n output tensor at the previous timestep.\n inputs: Tensor of temporal data of shape `(samples, time, ...)`\n (at least 3D), or nested tensors, and each of which has shape\n `(samples, time, ...)`.\n initial_states: Tensor with shape `(samples, state_size)`\n (no time dimension), containing the initial values for the states used\n in the step function. In the case that state_size is in a nested\n shape, the shape of initial_states will also follow the nested\n structure.\n go_backwards: Boolean. If True, do the iteration over the time\n dimension in reverse order and return the reversed sequence.\n mask: Binary tensor with shape `(samples, time, 1)`,\n with a zero for every element that is masked.\n constants: List of constant values passed at each step.\n unroll: Whether to unroll the RNN or to use a symbolic `while_loop`.\n input_length: An integer or a 1-D Tensor, depending on whether\n the time dimension is fixed-length or not. In case of variable length\n input, it is used for masking in case there's no mask specified.\n time_major: Boolean. If true, the inputs and outputs will be in shape\n `(timesteps, batch, ...)`, whereas in the False case, it will be\n `(batch, timesteps, ...)`. Using `time_major = True` is a bit more\n efficient because it avoids transposes at the beginning and end of the\n RNN calculation. However, most TensorFlow data is batch-major, so by\n default this function accepts input and emits output in batch-major\n form.\n zero_output_for_mask: Boolean. If True, the output for masked timestep\n will be zeros, whereas in the False case, output from previous\n timestep is returned.\n\n Returns:\n A tuple, `(last_output, outputs, new_states)`.\n last_output: the latest output of the rnn, of shape `(samples, ...)`\n outputs: tensor with shape `(samples, time, ...)` where each\n entry `outputs[s, t]` is the output of the step function\n at time `t` for sample `s`.\n new_states: list of tensors, latest states returned by\n the step function, of shape `(samples, ...)`.\n\n Raises:\n ValueError: if input dimension is less than 3.\n ValueError: if `unroll` is `True` but input timestep is not a fixed\n number.\n ValueError: if `mask` is provided (not `None`) but states is not provided\n (`len(states)` == 0).\n \"\"\"\n\n def swap_batch_timestep(input_t):\n # Swap the batch and timestep dim for the incoming tensor.\n axes = list(range(len(input_t.shape)))\n axes[0], axes[1] = 1, 0\n return array_ops.transpose(input_t, axes)\n\n if not time_major:\n inputs = nest.map_structure(swap_batch_timestep, inputs)\n\n flatted_inputs = nest.flatten(inputs)\n time_steps = flatted_inputs[0].shape[0]\n batch = flatted_inputs[0].shape[1]\n time_steps_t = array_ops.shape(flatted_inputs[0])[0]\n\n for input_ in flatted_inputs:\n input_.shape.with_rank_at_least(3)\n\n if mask is not None:\n if mask.dtype != dtypes_module.bool:\n mask = math_ops.cast(mask, dtypes_module.bool)\n if len(mask.shape) == 2:\n mask = expand_dims(mask)\n if not time_major:\n mask = swap_batch_timestep(mask)\n\n if constants is None:\n constants = []\n\n # tf.where needs its condition tensor to be the same shape as its two\n # result tensors, but in our case the condition (mask) tensor is\n # (nsamples, 1), and inputs are (nsamples, ndimensions) or even more.\n # So we need to broadcast the mask to match the shape of inputs.\n # That's what the tile call does, it just repeats the mask along its\n # second dimension n times.\n def _expand_mask(mask_t, input_t, fixed_dim=1):\n if nest.is_sequence(mask_t):\n raise ValueError('mask_t is expected to be tensor, but got %s' % mask_t)\n if nest.is_sequence(input_t):\n raise ValueError('input_t is expected to be tensor, but got %s' % input_t)\n rank_diff = len(input_t.shape) - len(mask_t.shape)\n for _ in range(rank_diff):\n mask_t = array_ops.expand_dims(mask_t, -1)\n multiples = [1] * fixed_dim + input_t.shape.as_list()[fixed_dim:]\n return array_ops.tile(mask_t, multiples)\n\n if unroll:\n if not time_steps:\n raise ValueError('Unrolling requires a fixed number of timesteps.')\n states = tuple(initial_states)\n successive_states = []\n successive_outputs = []\n\n # Process the input tensors. The input tensor need to be split on the\n # time_step dim, and reverse if go_backwards is True. In the case of nested\n # input, the input is flattened and then transformed individually.\n # The result of this will be a tuple of lists, each of the item in tuple is\n # list of the tensor with shape (batch, feature)\n def _process_single_input_t(input_t):\n input_t = array_ops.unstack(input_t) # unstack for time_step dim\n if go_backwards:\n input_t.reverse()\n return input_t\n\n if nest.is_sequence(inputs):\n processed_input = nest.map_structure(_process_single_input_t, inputs)\n else:\n processed_input = (_process_single_input_t(inputs),)\n\n def _get_input_tensor(time):\n inp = [t_[time] for t_ in processed_input]\n return nest.pack_sequence_as(inputs, inp)\n\n if mask is not None:\n mask_list = array_ops.unstack(mask)\n if go_backwards:\n mask_list.reverse()\n\n for i in range(time_steps):\n inp = _get_input_tensor(i)\n mask_t = mask_list[i]\n output, new_states = step_function(inp,\n tuple(states) + tuple(constants))\n tiled_mask_t = _expand_mask(mask_t, output)\n\n if not successive_outputs:\n prev_output = zeros_like(output)\n else:\n prev_output = successive_outputs[-1]\n\n output = array_ops.where_v2(tiled_mask_t, output, prev_output)\n\n flat_states = nest.flatten(states)\n flat_new_states = nest.flatten(new_states)\n tiled_mask_t = tuple(_expand_mask(mask_t, s) for s in flat_states)\n flat_final_states = tuple(\n array_ops.where_v2(m, s, ps)\n for m, s, ps in zip(tiled_mask_t, flat_new_states, flat_states))\n states = nest.pack_sequence_as(states, flat_final_states)\n\n successive_outputs.append(output)\n successive_states.append(states)\n last_output = successive_outputs[-1]\n new_states = successive_states[-1]\n outputs = array_ops.stack(successive_outputs)\n\n if zero_output_for_mask:\n last_output = array_ops.where_v2(\n _expand_mask(mask_list[-1], last_output), last_output,\n zeros_like(last_output))\n outputs = array_ops.where_v2(\n _expand_mask(mask, outputs, fixed_dim=2), outputs,\n zeros_like(outputs))\n\n else: # mask is None\n for i in range(time_steps):\n inp = _get_input_tensor(i)\n output, states = step_function(inp, tuple(states) + tuple(constants))\n successive_outputs.append(output)\n successive_states.append(states)\n last_output = successive_outputs[-1]\n new_states = successive_states[-1]\n outputs = array_ops.stack(successive_outputs)\n\n else: # Unroll == False\n states = tuple(initial_states)\n\n # Create input tensor array, if the inputs is nested tensors, then it will\n # be flattened first, and tensor array will be created one per flattened\n # tensor.\n input_ta = tuple(\n tensor_array_ops.TensorArray(\n dtype=inp.dtype,\n size=time_steps_t,\n tensor_array_name='input_ta_%s' % i)\n for i, inp in enumerate(flatted_inputs))\n input_ta = tuple(\n ta.unstack(input_) if not go_backwards else ta\n .unstack(reverse(input_, 0))\n for ta, input_ in zip(input_ta, flatted_inputs))\n\n # Get the time(0) input and compute the output for that, the output will be\n # used to determine the dtype of output tensor array. Don't read from\n # input_ta due to TensorArray clear_after_read default to True.\n input_time_zero = nest.pack_sequence_as(inputs,\n [inp[0] for inp in flatted_inputs])\n # output_time_zero is used to determine the cell output shape and its dtype.\n # the value is discarded.\n output_time_zero, _ = step_function(\n input_time_zero, tuple(initial_states) + tuple(constants))\n output_ta = tuple(\n tensor_array_ops.TensorArray(\n dtype=out.dtype,\n size=time_steps_t,\n element_shape=out.shape,\n tensor_array_name='output_ta_%s' % i)\n for i, out in enumerate(nest.flatten(output_time_zero)))\n\n time = constant_op.constant(0, dtype='int32', name='time')\n\n # We only specify the 'maximum_iterations' when building for XLA since that\n # causes slowdowns on GPU in TF.\n if (not context.executing_eagerly() and\n control_flow_util.GraphOrParentsInXlaContext(ops.get_default_graph())):\n max_iterations = math_ops.reduce_max(input_length)\n else:\n max_iterations = None\n\n while_loop_kwargs = {\n 'cond': lambda time, *_: time < time_steps_t,\n 'maximum_iterations': max_iterations,\n 'parallel_iterations': 32,\n 'swap_memory': True,\n }\n if mask is not None:\n if go_backwards:\n mask = reverse(mask, 0)\n\n mask_ta = tensor_array_ops.TensorArray(\n dtype=dtypes_module.bool,\n size=time_steps_t,\n tensor_array_name='mask_ta')\n mask_ta = mask_ta.unstack(mask)\n\n def masking_fn(time):\n return mask_ta.read(time)\n\n def compute_masked_output(mask_t, flat_out, flat_mask):\n tiled_mask_t = tuple(\n _expand_mask(mask_t, o, fixed_dim=len(mask_t.shape))\n for o in flat_out)\n return tuple(\n array_ops.where_v2(m, o, fm)\n for m, o, fm in zip(tiled_mask_t, flat_out, flat_mask))\n elif isinstance(input_length, ops.Tensor):\n if go_backwards:\n max_len = math_ops.reduce_max(input_length, axis=0)\n rev_input_length = math_ops.subtract(max_len - 1, input_length)\n\n def masking_fn(time):\n return math_ops.less(rev_input_length, time)\n else:\n\n def masking_fn(time):\n return math_ops.greater(input_length, time)\n\n def compute_masked_output(mask_t, flat_out, flat_mask):\n return tuple(\n array_ops.where(mask_t, o, zo)\n for (o, zo) in zip(flat_out, flat_mask))\n else:\n masking_fn = None\n\n if masking_fn is not None:\n # Mask for the T output will be base on the output of T - 1. In the case\n # T = 0, a zero filled tensor will be used.\n flat_zero_output = tuple(array_ops.zeros_like(o)\n for o in nest.flatten(output_time_zero))\n def _step(time, output_ta_t, prev_output, *states):\n \"\"\"RNN step function.\n\n Arguments:\n time: Current timestep value.\n output_ta_t: TensorArray.\n prev_output: tuple of outputs from time - 1.\n *states: List of states.\n\n Returns:\n Tuple: `(time + 1, output_ta_t, output) + tuple(new_states)`\n \"\"\"\n current_input = tuple(ta.read(time) for ta in input_ta)\n # maybe set shape.\n current_input = nest.pack_sequence_as(inputs, current_input)\n mask_t = masking_fn(time)\n output, new_states = step_function(current_input,\n tuple(states) + tuple(constants))\n # mask output\n flat_output = nest.flatten(output)\n flat_mask_output = (flat_zero_output if zero_output_for_mask\n else nest.flatten(prev_output))\n flat_new_output = compute_masked_output(mask_t, flat_output,\n flat_mask_output)\n\n # mask states\n flat_state = nest.flatten(states)\n flat_new_state = nest.flatten(new_states)\n for state, new_state in zip(flat_state, flat_new_state):\n if isinstance(new_state, ops.Tensor):\n new_state.set_shape(state.shape)\n flat_final_state = compute_masked_output(mask_t, flat_new_state,\n flat_state)\n new_states = nest.pack_sequence_as(new_states, flat_final_state)\n\n output_ta_t = tuple(\n ta.write(time, out)\n for ta, out in zip(output_ta_t, flat_new_output))\n return (time + 1, output_ta_t,\n tuple(flat_new_output)) + tuple(new_states)\n\n final_outputs = control_flow_ops.while_loop(\n body=_step,\n loop_vars=(time, output_ta, flat_zero_output) + states,\n **while_loop_kwargs)\n # Skip final_outputs[2] which is the output for final timestep.\n new_states = final_outputs[3:]\n else:\n def _step(time, output_ta_t, *states):\n \"\"\"RNN step function.\n\n Arguments:\n time: Current timestep value.\n output_ta_t: TensorArray.\n *states: List of states.\n\n Returns:\n Tuple: `(time + 1,output_ta_t) + tuple(new_states)`\n \"\"\"\n current_input = tuple(ta.read(time) for ta in input_ta)\n current_input = nest.pack_sequence_as(inputs, current_input)\n output, new_states = step_function(current_input,\n tuple(states) + tuple(constants))\n flat_state = nest.flatten(states)\n flat_new_state = nest.flatten(new_states)\n for state, new_state in zip(flat_state, flat_new_state):\n if isinstance(new_state, ops.Tensor):\n new_state.set_shape(state.shape)\n\n flat_output = nest.flatten(output)\n output_ta_t = tuple(\n ta.write(time, out) for ta, out in zip(output_ta_t, flat_output))\n new_states = nest.pack_sequence_as(initial_states, flat_new_state)\n return (time + 1, output_ta_t) + tuple(new_states)\n\n final_outputs = control_flow_ops.while_loop(\n body=_step,\n loop_vars=(time, output_ta) + states,\n **while_loop_kwargs)\n new_states = final_outputs[2:]\n\n output_ta = final_outputs[1]\n\n outputs = tuple(o.stack() for o in output_ta)\n last_output = tuple(o[-1] for o in outputs)\n\n outputs = nest.pack_sequence_as(output_time_zero, outputs)\n last_output = nest.pack_sequence_as(output_time_zero, last_output)\n\n # static shape inference\n def set_shape(output_):\n if isinstance(output_, ops.Tensor):\n shape = output_.shape.as_list()\n shape[0] = time_steps\n shape[1] = batch\n output_.set_shape(shape)\n return output_\n\n outputs = nest.map_structure(set_shape, outputs)\n\n if not time_major:\n outputs = nest.map_structure(swap_batch_timestep, outputs)\n\n return last_output, outputs, new_states\n\n\n@keras_export('keras.backend.switch')\ndef switch(condition, then_expression, else_expression):\n \"\"\"Switches between two operations depending on a scalar value.\n\n Note that both `then_expression` and `else_expression`\n should be symbolic tensors of the *same shape*.\n\n Arguments:\n condition: tensor (`int` or `bool`).\n then_expression: either a tensor, or a callable that returns a tensor.\n else_expression: either a tensor, or a callable that returns a tensor.\n\n Returns:\n The selected tensor.\n\n Raises:\n ValueError: If rank of `condition` is greater than rank of expressions.\n \"\"\"\n if condition.dtype != dtypes_module.bool:\n condition = math_ops.cast(condition, 'bool')\n cond_ndim = ndim(condition)\n if not cond_ndim:\n if not callable(then_expression):\n\n def then_expression_fn():\n return then_expression\n else:\n then_expression_fn = then_expression\n if not callable(else_expression):\n\n def else_expression_fn():\n return else_expression\n else:\n else_expression_fn = else_expression\n x = control_flow_ops.cond(condition, then_expression_fn, else_expression_fn)\n else:\n # tf.where needs its condition tensor\n # to be the same shape as its two\n # result tensors\n if callable(then_expression):\n then_expression = then_expression()\n if callable(else_expression):\n else_expression = else_expression()\n expr_ndim = ndim(then_expression)\n if cond_ndim > expr_ndim:\n raise ValueError('Rank of `condition` should be less than or'\n ' equal to rank of `then_expression` and '\n '`else_expression`. ndim(condition)=' + str(cond_ndim) +\n ', ndim(then_expression)'\n '=' + str(expr_ndim))\n if cond_ndim > 1:\n ndim_diff = expr_ndim - cond_ndim\n cond_shape = array_ops.concat(\n [array_ops.shape(condition), [1] * ndim_diff], axis=0)\n condition = array_ops.reshape(condition, cond_shape)\n expr_shape = array_ops.shape(then_expression)\n shape_diff = expr_shape - cond_shape\n tile_shape = array_ops.where_v2(shape_diff > 0, expr_shape,\n array_ops.ones_like(expr_shape))\n condition = array_ops.tile(condition, tile_shape)\n x = array_ops.where_v2(condition, then_expression, else_expression)\n return x\n\n\n@keras_export('keras.backend.in_train_phase')\ndef in_train_phase(x, alt, training=None):\n \"\"\"Selects `x` in train phase, and `alt` otherwise.\n\n Note that `alt` should have the *same shape* as `x`.\n\n Arguments:\n x: What to return in train phase\n (tensor or callable that returns a tensor).\n alt: What to return otherwise\n (tensor or callable that returns a tensor).\n training: Optional scalar tensor\n (or Python boolean, or Python integer)\n specifying the learning phase.\n\n Returns:\n Either `x` or `alt` based on the `training` flag.\n the `training` flag defaults to `K.learning_phase()`.\n \"\"\"\n from tensorflow.python.keras.engine import base_layer_utils # pylint: disable=g-import-not-at-top\n if training is None:\n training = base_layer_utils.call_context().training\n\n if training is None:\n training = learning_phase()\n\n # TODO(b/138862903): Handle the case when training is tensor.\n if not tensor_util.is_tensor(training):\n if training == 1 or training is True:\n if callable(x):\n return x()\n else:\n return x\n\n elif training == 0 or training is False:\n if callable(alt):\n return alt()\n else:\n return alt\n\n # else: assume learning phase is a placeholder tensor.\n x = switch(training, x, alt)\n return x\n\n\n@keras_export('keras.backend.in_test_phase')\ndef in_test_phase(x, alt, training=None):\n \"\"\"Selects `x` in test phase, and `alt` otherwise.\n\n Note that `alt` should have the *same shape* as `x`.\n\n Arguments:\n x: What to return in test phase\n (tensor or callable that returns a tensor).\n alt: What to return otherwise\n (tensor or callable that returns a tensor).\n training: Optional scalar tensor\n (or Python boolean, or Python integer)\n specifying the learning phase.\n\n Returns:\n Either `x` or `alt` based on `K.learning_phase`.\n \"\"\"\n return in_train_phase(alt, x, training=training)\n\n\n# NN OPERATIONS\n\n\n@keras_export('keras.backend.relu')\ndef relu(x, alpha=0., max_value=None, threshold=0):\n \"\"\"Rectified linear unit.\n\n With default values, it returns element-wise `max(x, 0)`.\n\n Otherwise, it follows:\n `f(x) = max_value` for `x >= max_value`,\n `f(x) = x` for `threshold <= x < max_value`,\n `f(x) = alpha * (x - threshold)` otherwise.\n\n Arguments:\n x: A tensor or variable.\n alpha: A scalar, slope of negative section (default=`0.`).\n max_value: float. Saturation threshold.\n threshold: float. Threshold value for thresholded activation.\n\n Returns:\n A tensor.\n \"\"\"\n\n if alpha != 0.:\n if max_value is None and threshold == 0:\n return nn.leaky_relu(x, alpha=alpha)\n\n if threshold != 0:\n negative_part = nn.relu(-x + threshold)\n else:\n negative_part = nn.relu(-x)\n\n clip_max = max_value is not None\n\n if threshold != 0:\n # computes x for x > threshold else 0\n x = x * math_ops.cast(math_ops.greater(x, threshold), floatx())\n elif max_value == 6:\n # if no threshold, then can use nn.relu6 native TF op for performance\n x = nn.relu6(x)\n clip_max = False\n else:\n x = nn.relu(x)\n\n if clip_max:\n max_value = _constant_to_tensor(max_value, x.dtype.base_dtype)\n zero = _constant_to_tensor(0, x.dtype.base_dtype)\n x = clip_ops.clip_by_value(x, zero, max_value)\n\n if alpha != 0.:\n alpha = _to_tensor(alpha, x.dtype.base_dtype)\n x -= alpha * negative_part\n return x\n\n\n@keras_export('keras.backend.elu')\ndef elu(x, alpha=1.):\n \"\"\"Exponential linear unit.\n\n Arguments:\n x: A tensor or variable to compute the activation function for.\n alpha: A scalar, slope of negative section.\n\n Returns:\n A tensor.\n \"\"\"\n res = nn.elu(x)\n if alpha == 1:\n return res\n else:\n return array_ops.where_v2(x > 0, res, alpha * res)\n\n\n@keras_export('keras.backend.softmax')\ndef softmax(x, axis=-1):\n \"\"\"Softmax of a tensor.\n\n Arguments:\n x: A tensor or variable.\n axis: The dimension softmax would be performed on.\n The default is -1 which indicates the last dimension.\n\n Returns:\n A tensor.\n \"\"\"\n return nn.softmax(x, axis=axis)\n\n\n@keras_export('keras.backend.softplus')\ndef softplus(x):\n \"\"\"Softplus of a tensor.\n\n Arguments:\n x: A tensor or variable.\n\n Returns:\n A tensor.\n \"\"\"\n return nn.softplus(x)\n\n\n@keras_export('keras.backend.softsign')\ndef softsign(x):\n \"\"\"Softsign of a tensor.\n\n Arguments:\n x: A tensor or variable.\n\n Returns:\n A tensor.\n \"\"\"\n return nn.softsign(x)\n\n\ndef _backtrack_identity(tensor):\n while tensor.op.type == 'Identity':\n tensor = tensor.op.inputs[0]\n return tensor\n\n\n@keras_export('keras.backend.categorical_crossentropy')\ndef categorical_crossentropy(target, output, from_logits=False, axis=-1):\n \"\"\"Categorical crossentropy between an output tensor and a target tensor.\n\n Arguments:\n target: A tensor of the same shape as `output`.\n output: A tensor resulting from a softmax\n (unless `from_logits` is True, in which\n case `output` is expected to be the logits).\n from_logits: Boolean, whether `output` is the\n result of a softmax, or is a tensor of logits.\n axis: Int specifying the channels axis. `axis=-1` corresponds to data\n format `channels_last', and `axis=1` corresponds to data format\n `channels_first`.\n\n Returns:\n Output tensor.\n\n Raises:\n ValueError: if `axis` is neither -1 nor one of the axes of `output`.\n\n Example:\n\n >>> a = tf.constant([1., 0., 0., 0., 1., 0., 0., 0., 1.], shape=[3,3])\n >>> print(a)\n tf.Tensor(\n [[1. 0. 0.]\n [0. 1. 0.]\n [0. 0. 1.]], shape=(3, 3), dtype=float32)\n >>> b = tf.constant([.9, .05, .05, .5, .89, .6, .05, .01, .94], shape=[3,3])\n >>> print(b)\n tf.Tensor(\n [[0.9 0.05 0.05]\n [0.5 0.89 0.6 ]\n [0.05 0.01 0.94]], shape=(3, 3), dtype=float32)\n >>> loss = tf.keras.backend.categorical_crossentropy(a, b)\n >>> print(np.around(loss, 5))\n [0.10536 0.80467 0.06188]\n >>> loss = tf.keras.backend.categorical_crossentropy(a, a)\n >>> print(np.around(loss, 5))\n [0. 0. 0.]\n\n \"\"\"\n target.shape.assert_is_compatible_with(output.shape)\n if from_logits:\n return nn.softmax_cross_entropy_with_logits_v2(\n labels=target, logits=output, axis=axis)\n\n if not isinstance(output, (ops.EagerTensor, variables_module.Variable)):\n output = _backtrack_identity(output)\n if output.op.type == 'Softmax':\n # When softmax activation function is used for output operation, we\n # use logits from the softmax function directly to compute loss in order\n # to prevent collapsing zero when training.\n # See b/117284466\n assert len(output.op.inputs) == 1\n output = output.op.inputs[0]\n return nn.softmax_cross_entropy_with_logits_v2(\n labels=target, logits=output, axis=axis)\n\n # scale preds so that the class probas of each sample sum to 1\n output = output / math_ops.reduce_sum(output, axis, True)\n # Compute cross entropy from probabilities.\n epsilon_ = _constant_to_tensor(epsilon(), output.dtype.base_dtype)\n output = clip_ops.clip_by_value(output, epsilon_, 1. - epsilon_)\n return -math_ops.reduce_sum(target * math_ops.log(output), axis)\n\n\n@keras_export('keras.backend.sparse_categorical_crossentropy')\ndef sparse_categorical_crossentropy(target, output, from_logits=False, axis=-1):\n \"\"\"Categorical crossentropy with integer targets.\n\n Arguments:\n target: An integer tensor.\n output: A tensor resulting from a softmax\n (unless `from_logits` is True, in which\n case `output` is expected to be the logits).\n from_logits: Boolean, whether `output` is the\n result of a softmax, or is a tensor of logits.\n axis: Int specifying the channels axis. `axis=-1` corresponds to data\n format `channels_last', and `axis=1` corresponds to data format\n `channels_first`.\n\n Returns:\n Output tensor.\n\n Raises:\n ValueError: if `axis` is neither -1 nor one of the axes of `output`.\n \"\"\"\n if not from_logits and not isinstance(\n output, (ops.EagerTensor, variables_module.Variable)):\n output = _backtrack_identity(output)\n if output.op.type == 'Softmax':\n # When softmax activation function is used for output operation, we\n # use logits from the softmax function directly to compute loss in order\n # to prevent collapsing zero when training.\n # See b/117284466\n assert len(output.op.inputs) == 1\n output = output.op.inputs[0]\n from_logits = True\n\n if not from_logits:\n epsilon_ = _constant_to_tensor(epsilon(), output.dtype.base_dtype)\n output = clip_ops.clip_by_value(output, epsilon_, 1 - epsilon_)\n output = math_ops.log(output)\n\n if isinstance(output.shape, (tuple, list)):\n output_rank = len(output.shape)\n else:\n output_rank = output.shape.ndims\n if output_rank is not None:\n axis %= output_rank\n if axis != output_rank - 1:\n permutation = list(\n itertools.chain(range(axis), range(axis + 1, output_rank), [axis]))\n output = array_ops.transpose(output, perm=permutation)\n elif axis != -1:\n raise ValueError(\n 'Cannot compute sparse categorical crossentropy with `axis={}` on an '\n 'output tensor with unknown rank'.format(axis))\n\n target = cast(target, 'int64')\n\n # Try to adjust the shape so that rank of labels = rank of logits - 1.\n output_shape = array_ops.shape_v2(output)\n target_rank = target.shape.ndims\n\n update_shape = (\n target_rank is not None and output_rank is not None and\n target_rank != output_rank - 1)\n if update_shape:\n target = flatten(target)\n output = array_ops.reshape(output, [-1, output_shape[-1]])\n\n if py_any(_is_symbolic_tensor(v) for v in [target, output]):\n with get_graph().as_default():\n res = nn.sparse_softmax_cross_entropy_with_logits_v2(\n labels=target, logits=output)\n else:\n res = nn.sparse_softmax_cross_entropy_with_logits_v2(\n labels=target, logits=output)\n\n if update_shape and output_rank >= 3:\n # If our output includes timesteps or spatial dimensions we need to reshape\n return array_ops.reshape(res, output_shape[:-1])\n else:\n return res\n\n\n@keras_export('keras.backend.binary_crossentropy')\ndef binary_crossentropy(target, output, from_logits=False):\n \"\"\"Binary crossentropy between an output tensor and a target tensor.\n\n Arguments:\n target: A tensor with the same shape as `output`.\n output: A tensor.\n from_logits: Whether `output` is expected to be a logits tensor.\n By default, we consider that `output`\n encodes a probability distribution.\n\n Returns:\n A tensor.\n \"\"\"\n if from_logits:\n return nn.sigmoid_cross_entropy_with_logits(labels=target, logits=output)\n\n if not isinstance(output, (ops.EagerTensor, variables_module.Variable)):\n output = _backtrack_identity(output)\n if output.op.type == 'Sigmoid':\n # When sigmoid activation function is used for output operation, we\n # use logits from the sigmoid function directly to compute loss in order\n # to prevent collapsing zero when training.\n assert len(output.op.inputs) == 1\n output = output.op.inputs[0]\n return nn.sigmoid_cross_entropy_with_logits(labels=target, logits=output)\n\n epsilon_ = _constant_to_tensor(epsilon(), output.dtype.base_dtype)\n output = clip_ops.clip_by_value(output, epsilon_, 1. - epsilon_)\n\n # Compute cross entropy from probabilities.\n bce = target * math_ops.log(output + epsilon())\n bce += (1 - target) * math_ops.log(1 - output + epsilon())\n return -bce\n\n\n@keras_export('keras.backend.sigmoid')\ndef sigmoid(x):\n \"\"\"Element-wise sigmoid.\n\n Arguments:\n x: A tensor or variable.\n\n Returns:\n A tensor.\n \"\"\"\n return nn.sigmoid(x)\n\n\n@keras_export('keras.backend.hard_sigmoid')\ndef hard_sigmoid(x):\n \"\"\"Segment-wise linear approximation of sigmoid.\n\n Faster than sigmoid.\n Returns `0.` if `x < -2.5`, `1.` if `x > 2.5`.\n In `-2.5 <= x <= 2.5`, returns `0.2 * x + 0.5`.\n\n Arguments:\n x: A tensor or variable.\n\n Returns:\n A tensor.\n \"\"\"\n point_two = _constant_to_tensor(0.2, x.dtype.base_dtype)\n point_five = _constant_to_tensor(0.5, x.dtype.base_dtype)\n x = math_ops.mul(x, point_two)\n x = math_ops.add(x, point_five)\n x = clip_ops.clip_by_value(x, 0., 1.)\n return x\n\n\n@keras_export('keras.backend.tanh')\ndef tanh(x):\n \"\"\"Element-wise tanh.\n\n Arguments:\n x: A tensor or variable.\n\n Returns:\n A tensor.\n \"\"\"\n return nn.tanh(x)\n\n\n@keras_export('keras.backend.dropout')\ndef dropout(x, level, noise_shape=None, seed=None):\n \"\"\"Sets entries in `x` to zero at random, while scaling the entire tensor.\n\n Arguments:\n x: tensor\n level: fraction of the entries in the tensor\n that will be set to 0.\n noise_shape: shape for randomly generated keep/drop flags,\n must be broadcastable to the shape of `x`\n seed: random seed to ensure determinism.\n\n Returns:\n A tensor.\n \"\"\"\n if seed is None:\n seed = np.random.randint(10e6)\n return nn.dropout_v2(x, rate=level, noise_shape=noise_shape, seed=seed)\n\n\n@keras_export('keras.backend.l2_normalize')\ndef l2_normalize(x, axis=None):\n \"\"\"Normalizes a tensor wrt the L2 norm alongside the specified axis.\n\n Arguments:\n x: Tensor or variable.\n axis: axis along which to perform normalization.\n\n Returns:\n A tensor.\n \"\"\"\n return nn.l2_normalize(x, axis=axis)\n\n\n@keras_export('keras.backend.in_top_k')\ndef in_top_k(predictions, targets, k):\n \"\"\"Returns whether the `targets` are in the top `k` `predictions`.\n\n Arguments:\n predictions: A tensor of shape `(batch_size, classes)` and type `float32`.\n targets: A 1D tensor of length `batch_size` and type `int32` or `int64`.\n k: An `int`, number of top elements to consider.\n\n Returns:\n A 1D tensor of length `batch_size` and type `bool`.\n `output[i]` is `True` if `predictions[i, targets[i]]` is within top-`k`\n values of `predictions[i]`.\n \"\"\"\n return nn.in_top_k(predictions, targets, k)\n\n\n# CONVOLUTIONS\n\n\ndef _preprocess_conv1d_input(x, data_format):\n \"\"\"Transpose and cast the input before the conv1d.\n\n Arguments:\n x: input tensor.\n data_format: string, `\"channels_last\"` or `\"channels_first\"`.\n\n Returns:\n A tensor.\n \"\"\"\n tf_data_format = 'NWC' # to pass TF Conv2dNative operations\n if data_format == 'channels_first':\n if not _has_nchw_support():\n x = array_ops.transpose(x, (0, 2, 1)) # NCW -> NWC\n else:\n tf_data_format = 'NCW'\n return x, tf_data_format\n\n\ndef _preprocess_conv2d_input(x, data_format, force_transpose=False):\n \"\"\"Transpose and cast the input before the conv2d.\n\n Arguments:\n x: input tensor.\n data_format: string, `\"channels_last\"` or `\"channels_first\"`.\n force_transpose: Boolean. If True, the input will always be transposed\n from NCHW to NHWC if `data_format` is `\"channels_first\"`.\n If False, the transposition only occurs on CPU (GPU ops are\n assumed to support NCHW).\n\n Returns:\n A tensor.\n \"\"\"\n tf_data_format = 'NHWC'\n if data_format == 'channels_first':\n if not _has_nchw_support() or force_transpose:\n x = array_ops.transpose(x, (0, 2, 3, 1)) # NCHW -> NHWC\n else:\n tf_data_format = 'NCHW'\n return x, tf_data_format\n\n\ndef _preprocess_conv3d_input(x, data_format):\n \"\"\"Transpose and cast the input before the conv3d.\n\n Arguments:\n x: input tensor.\n data_format: string, `\"channels_last\"` or `\"channels_first\"`.\n\n Returns:\n A tensor.\n \"\"\"\n tf_data_format = 'NDHWC'\n if data_format == 'channels_first':\n if not _has_nchw_support():\n x = array_ops.transpose(x, (0, 2, 3, 4, 1))\n else:\n tf_data_format = 'NCDHW'\n return x, tf_data_format\n\n\ndef _preprocess_padding(padding):\n \"\"\"Convert keras' padding to TensorFlow's padding.\n\n Arguments:\n padding: string, one of 'same' , 'valid'\n\n Returns:\n a string, one of 'SAME', 'VALID'.\n\n Raises:\n ValueError: if invalid `padding'`\n \"\"\"\n if padding == 'same':\n padding = 'SAME'\n elif padding == 'valid':\n padding = 'VALID'\n else:\n raise ValueError('Invalid padding: ' + str(padding))\n return padding\n\n\n@keras_export('keras.backend.conv1d')\ndef conv1d(x,\n kernel,\n strides=1,\n padding='valid',\n data_format=None,\n dilation_rate=1):\n \"\"\"1D convolution.\n\n Arguments:\n x: Tensor or variable.\n kernel: kernel tensor.\n strides: stride integer.\n padding: string, `\"same\"`, `\"causal\"` or `\"valid\"`.\n data_format: string, one of \"channels_last\", \"channels_first\".\n dilation_rate: integer dilate rate.\n\n Returns:\n A tensor, result of 1D convolution.\n\n Raises:\n ValueError: if `data_format` is neither `channels_last` or\n `channels_first`.\n \"\"\"\n if data_format is None:\n data_format = image_data_format()\n if data_format not in {'channels_first', 'channels_last'}:\n raise ValueError('Unknown data_format: ' + str(data_format))\n\n kernel_shape = kernel.shape.as_list()\n if padding == 'causal':\n # causal (dilated) convolution:\n left_pad = dilation_rate * (kernel_shape[0] - 1)\n x = temporal_padding(x, (left_pad, 0))\n padding = 'valid'\n padding = _preprocess_padding(padding)\n\n x, tf_data_format = _preprocess_conv1d_input(x, data_format)\n x = nn.convolution(\n input=x,\n filter=kernel,\n dilation_rate=dilation_rate,\n strides=strides,\n padding=padding,\n data_format=tf_data_format)\n if data_format == 'channels_first' and tf_data_format == 'NWC':\n x = array_ops.transpose(x, (0, 2, 1)) # NWC -> NCW\n return x\n\n\n@keras_export('keras.backend.conv2d')\ndef conv2d(x,\n kernel,\n strides=(1, 1),\n padding='valid',\n data_format=None,\n dilation_rate=(1, 1)):\n \"\"\"2D convolution.\n\n Arguments:\n x: Tensor or variable.\n kernel: kernel tensor.\n strides: strides tuple.\n padding: string, `\"same\"` or `\"valid\"`.\n data_format: `\"channels_last\"` or `\"channels_first\"`.\n dilation_rate: tuple of 2 integers.\n\n Returns:\n A tensor, result of 2D convolution.\n\n Raises:\n ValueError: if `data_format` is neither `channels_last` or\n `channels_first`.\n \"\"\"\n if data_format is None:\n data_format = image_data_format()\n if data_format not in {'channels_first', 'channels_last'}:\n raise ValueError('Unknown data_format: ' + str(data_format))\n\n x, tf_data_format = _preprocess_conv2d_input(x, data_format)\n padding = _preprocess_padding(padding)\n x = nn.convolution(\n input=x,\n filter=kernel,\n dilation_rate=dilation_rate,\n strides=strides,\n padding=padding,\n data_format=tf_data_format)\n if data_format == 'channels_first' and tf_data_format == 'NHWC':\n x = array_ops.transpose(x, (0, 3, 1, 2)) # NHWC -> NCHW\n return x\n\n\n@keras_export('keras.backend.conv2d_transpose')\ndef conv2d_transpose(x,\n kernel,\n output_shape,\n strides=(1, 1),\n padding='valid',\n data_format=None,\n dilation_rate=(1, 1)):\n \"\"\"2D deconvolution (i.e.\n\n transposed convolution).\n\n Arguments:\n x: Tensor or variable.\n kernel: kernel tensor.\n output_shape: 1D int tensor for the output shape.\n strides: strides tuple.\n padding: string, `\"same\"` or `\"valid\"`.\n data_format: string, `\"channels_last\"` or `\"channels_first\"`.\n dilation_rate: Tuple of 2 integers.\n\n Returns:\n A tensor, result of transposed 2D convolution.\n\n Raises:\n ValueError: if `data_format` is neither `channels_last` or\n `channels_first`.\n \"\"\"\n if data_format is None:\n data_format = image_data_format()\n if data_format not in {'channels_first', 'channels_last'}:\n raise ValueError('Unknown data_format: ' + str(data_format))\n\n # `atrous_conv2d_transpose` only supports NHWC format, even on GPU.\n if data_format == 'channels_first' and dilation_rate != (1, 1):\n force_transpose = True\n else:\n force_transpose = False\n\n x, tf_data_format = _preprocess_conv2d_input(x, data_format, force_transpose)\n\n if data_format == 'channels_first' and tf_data_format == 'NHWC':\n output_shape = (output_shape[0], output_shape[2], output_shape[3],\n output_shape[1])\n if output_shape[0] is None:\n output_shape = (shape(x)[0],) + tuple(output_shape[1:])\n\n if isinstance(output_shape, (tuple, list)):\n output_shape = array_ops.stack(list(output_shape))\n\n padding = _preprocess_padding(padding)\n if tf_data_format == 'NHWC':\n strides = (1,) + strides + (1,)\n else:\n strides = (1, 1) + strides\n\n if dilation_rate == (1, 1):\n x = nn.conv2d_transpose(x, kernel, output_shape, strides,\n padding=padding,\n data_format=tf_data_format)\n else:\n assert dilation_rate[0] == dilation_rate[1]\n x = nn.atrous_conv2d_transpose(\n x,\n kernel,\n output_shape,\n rate=dilation_rate[0],\n padding=padding)\n if data_format == 'channels_first' and tf_data_format == 'NHWC':\n x = array_ops.transpose(x, (0, 3, 1, 2)) # NHWC -> NCHW\n return x\n\n\ndef separable_conv1d(x,\n depthwise_kernel,\n pointwise_kernel,\n strides=1,\n padding='valid',\n data_format=None,\n dilation_rate=1):\n \"\"\"1D convolution with separable filters.\n\n Arguments:\n x: input tensor\n depthwise_kernel: convolution kernel for the depthwise convolution.\n pointwise_kernel: kernel for the 1x1 convolution.\n strides: stride integer.\n padding: string, `\"same\"` or `\"valid\"`.\n data_format: string, `\"channels_last\"` or `\"channels_first\"`.\n dilation_rate: integer dilation rate.\n\n Returns:\n Output tensor.\n\n Raises:\n ValueError: if `data_format` is neither `channels_last` or\n `channels_first`.\n \"\"\"\n if data_format is None:\n data_format = image_data_format()\n if data_format not in {'channels_first', 'channels_last'}:\n raise ValueError('Unknown data_format: ' + str(data_format))\n\n if isinstance(strides, int):\n strides = (strides,)\n if isinstance(dilation_rate, int):\n dilation_rate = (dilation_rate,)\n\n x, tf_data_format = _preprocess_conv1d_input(x, data_format)\n padding = _preprocess_padding(padding)\n if not isinstance(strides, tuple):\n strides = tuple(strides)\n if tf_data_format == 'NWC':\n spatial_start_dim = 1\n strides = (1,) + strides * 2 + (1,)\n else:\n spatial_start_dim = 2\n strides = (1, 1) + strides * 2\n x = array_ops.expand_dims(x, spatial_start_dim)\n depthwise_kernel = array_ops.expand_dims(depthwise_kernel, 0)\n pointwise_kernel = array_ops.expand_dims(pointwise_kernel, 0)\n dilation_rate = (1,) + dilation_rate\n\n x = nn.separable_conv2d(\n x,\n depthwise_kernel,\n pointwise_kernel,\n strides=strides,\n padding=padding,\n rate=dilation_rate,\n data_format=tf_data_format)\n\n x = array_ops.squeeze(x, [spatial_start_dim])\n\n if data_format == 'channels_first' and tf_data_format == 'NWC':\n x = array_ops.transpose(x, (0, 2, 1)) # NWC -> NCW\n\n return x\n\n\n@keras_export('keras.backend.separable_conv2d')\ndef separable_conv2d(x,\n depthwise_kernel,\n pointwise_kernel,\n strides=(1, 1),\n padding='valid',\n data_format=None,\n dilation_rate=(1, 1)):\n \"\"\"2D convolution with separable filters.\n\n Arguments:\n x: input tensor\n depthwise_kernel: convolution kernel for the depthwise convolution.\n pointwise_kernel: kernel for the 1x1 convolution.\n strides: strides tuple (length 2).\n padding: string, `\"same\"` or `\"valid\"`.\n data_format: string, `\"channels_last\"` or `\"channels_first\"`.\n dilation_rate: tuple of integers,\n dilation rates for the separable convolution.\n\n Returns:\n Output tensor.\n\n Raises:\n ValueError: if `data_format` is neither `channels_last` or\n `channels_first`.\n ValueError: if `strides` is not a tuple of 2 integers.\n \"\"\"\n if data_format is None:\n data_format = image_data_format()\n if data_format not in {'channels_first', 'channels_last'}:\n raise ValueError('Unknown data_format: ' + str(data_format))\n if len(strides) != 2:\n raise ValueError('`strides` must be a tuple of 2 integers.')\n\n x, tf_data_format = _preprocess_conv2d_input(x, data_format)\n padding = _preprocess_padding(padding)\n if not isinstance(strides, tuple):\n strides = tuple(strides)\n if tf_data_format == 'NHWC':\n strides = (1,) + strides + (1,)\n else:\n strides = (1, 1) + strides\n\n x = nn.separable_conv2d(\n x,\n depthwise_kernel,\n pointwise_kernel,\n strides=strides,\n padding=padding,\n rate=dilation_rate,\n data_format=tf_data_format)\n if data_format == 'channels_first' and tf_data_format == 'NHWC':\n x = array_ops.transpose(x, (0, 3, 1, 2)) # NHWC -> NCHW\n return x\n\n\n@keras_export('keras.backend.depthwise_conv2d')\ndef depthwise_conv2d(x,\n depthwise_kernel,\n strides=(1, 1),\n padding='valid',\n data_format=None,\n dilation_rate=(1, 1)):\n \"\"\"2D convolution with separable filters.\n\n Arguments:\n x: input tensor\n depthwise_kernel: convolution kernel for the depthwise convolution.\n strides: strides tuple (length 2).\n padding: string, `\"same\"` or `\"valid\"`.\n data_format: string, `\"channels_last\"` or `\"channels_first\"`.\n dilation_rate: tuple of integers,\n dilation rates for the separable convolution.\n\n Returns:\n Output tensor.\n\n Raises:\n ValueError: if `data_format` is neither `channels_last` or\n `channels_first`.\n \"\"\"\n if data_format is None:\n data_format = image_data_format()\n if data_format not in {'channels_first', 'channels_last'}:\n raise ValueError('Unknown data_format: ' + str(data_format))\n\n x, tf_data_format = _preprocess_conv2d_input(x, data_format)\n padding = _preprocess_padding(padding)\n if tf_data_format == 'NHWC':\n strides = (1,) + strides + (1,)\n else:\n strides = (1, 1) + strides\n\n x = nn.depthwise_conv2d(\n x,\n depthwise_kernel,\n strides=strides,\n padding=padding,\n rate=dilation_rate,\n data_format=tf_data_format)\n if data_format == 'channels_first' and tf_data_format == 'NHWC':\n x = array_ops.transpose(x, (0, 3, 1, 2)) # NHWC -> NCHW\n return x\n\n\n@keras_export('keras.backend.conv3d')\ndef conv3d(x,\n kernel,\n strides=(1, 1, 1),\n padding='valid',\n data_format=None,\n dilation_rate=(1, 1, 1)):\n \"\"\"3D convolution.\n\n Arguments:\n x: Tensor or variable.\n kernel: kernel tensor.\n strides: strides tuple.\n padding: string, `\"same\"` or `\"valid\"`.\n data_format: string, `\"channels_last\"` or `\"channels_first\"`.\n dilation_rate: tuple of 3 integers.\n\n Returns:\n A tensor, result of 3D convolution.\n\n Raises:\n ValueError: if `data_format` is neither `channels_last` or\n `channels_first`.\n \"\"\"\n if data_format is None:\n data_format = image_data_format()\n if data_format not in {'channels_first', 'channels_last'}:\n raise ValueError('Unknown data_format: ' + str(data_format))\n\n x, tf_data_format = _preprocess_conv3d_input(x, data_format)\n padding = _preprocess_padding(padding)\n x = nn.convolution(\n input=x,\n filter=kernel,\n dilation_rate=dilation_rate,\n strides=strides,\n padding=padding,\n data_format=tf_data_format)\n if data_format == 'channels_first' and tf_data_format == 'NDHWC':\n x = array_ops.transpose(x, (0, 4, 1, 2, 3))\n return x\n\n\ndef conv3d_transpose(x,\n kernel,\n output_shape,\n strides=(1, 1, 1),\n padding='valid',\n data_format=None):\n \"\"\"3D deconvolution (i.e.\n\n transposed convolution).\n\n Arguments:\n x: input tensor.\n kernel: kernel tensor.\n output_shape: 1D int tensor for the output shape.\n strides: strides tuple.\n padding: string, \"same\" or \"valid\".\n data_format: string, `\"channels_last\"` or `\"channels_first\"`.\n\n Returns:\n A tensor, result of transposed 3D convolution.\n\n Raises:\n ValueError: if `data_format` is neither `channels_last` or\n `channels_first`.\n \"\"\"\n if data_format is None:\n data_format = image_data_format()\n if data_format not in {'channels_first', 'channels_last'}:\n raise ValueError('Unknown data_format: ' + str(data_format))\n if isinstance(output_shape, (tuple, list)):\n output_shape = array_ops.stack(output_shape)\n\n x, tf_data_format = _preprocess_conv3d_input(x, data_format)\n\n if data_format == 'channels_first' and tf_data_format == 'NDHWC':\n output_shape = (output_shape[0], output_shape[2], output_shape[3],\n output_shape[4], output_shape[1])\n if output_shape[0] is None:\n output_shape = (array_ops.shape(x)[0],) + tuple(output_shape[1:])\n output_shape = array_ops.stack(list(output_shape))\n\n padding = _preprocess_padding(padding)\n if tf_data_format == 'NDHWC':\n strides = (1,) + strides + (1,)\n else:\n strides = (1, 1) + strides\n\n x = nn.conv3d_transpose(\n x,\n kernel,\n output_shape,\n strides,\n padding=padding,\n data_format=tf_data_format)\n if data_format == 'channels_first' and tf_data_format == 'NDHWC':\n x = array_ops.transpose(x, (0, 4, 1, 2, 3))\n return x\n\n\n@keras_export('keras.backend.pool2d')\ndef pool2d(x,\n pool_size,\n strides=(1, 1),\n padding='valid',\n data_format=None,\n pool_mode='max'):\n \"\"\"2D Pooling.\n\n Arguments:\n x: Tensor or variable.\n pool_size: tuple of 2 integers.\n strides: tuple of 2 integers.\n padding: string, `\"same\"` or `\"valid\"`.\n data_format: string, `\"channels_last\"` or `\"channels_first\"`.\n pool_mode: string, `\"max\"` or `\"avg\"`.\n\n Returns:\n A tensor, result of 2D pooling.\n\n Raises:\n ValueError: if `data_format` is neither `\"channels_last\"` or\n `\"channels_first\"`.\n ValueError: if `pool_size` is not a tuple of 2 integers.\n ValueError: if `strides` is not a tuple of 2 integers.\n ValueError: if `pool_mode` is neither `\"max\"` or `\"avg\"`.\n \"\"\"\n if data_format is None:\n data_format = image_data_format()\n if data_format not in {'channels_first', 'channels_last'}:\n raise ValueError('Unknown data_format: ' + str(data_format))\n if len(pool_size) != 2:\n raise ValueError('`pool_size` must be a tuple of 2 integers.')\n if len(strides) != 2:\n raise ValueError('`strides` must be a tuple of 2 integers.')\n\n x, tf_data_format = _preprocess_conv2d_input(x, data_format)\n padding = _preprocess_padding(padding)\n if tf_data_format == 'NHWC':\n strides = (1,) + strides + (1,)\n pool_size = (1,) + pool_size + (1,)\n else:\n strides = (1, 1) + strides\n pool_size = (1, 1) + pool_size\n\n if pool_mode == 'max':\n x = nn.max_pool(\n x, pool_size, strides, padding=padding, data_format=tf_data_format)\n elif pool_mode == 'avg':\n x = nn.avg_pool(\n x, pool_size, strides, padding=padding, data_format=tf_data_format)\n else:\n raise ValueError('Invalid pooling mode: ' + str(pool_mode))\n\n if data_format == 'channels_first' and tf_data_format == 'NHWC':\n x = array_ops.transpose(x, (0, 3, 1, 2)) # NHWC -> NCHW\n return x\n\n\n@keras_export('keras.backend.pool3d')\ndef pool3d(x,\n pool_size,\n strides=(1, 1, 1),\n padding='valid',\n data_format=None,\n pool_mode='max'):\n \"\"\"3D Pooling.\n\n Arguments:\n x: Tensor or variable.\n pool_size: tuple of 3 integers.\n strides: tuple of 3 integers.\n padding: string, `\"same\"` or `\"valid\"`.\n data_format: string, `\"channels_last\"` or `\"channels_first\"`.\n pool_mode: string, `\"max\"` or `\"avg\"`.\n\n Returns:\n A tensor, result of 3D pooling.\n\n Raises:\n ValueError: if `data_format` is neither `\"channels_last\"` or\n `\"channels_first\"`.\n ValueError: if `pool_mode` is neither `\"max\"` or `\"avg\"`.\n \"\"\"\n if data_format is None:\n data_format = image_data_format()\n if data_format not in {'channels_first', 'channels_last'}:\n raise ValueError('Unknown data_format: ' + str(data_format))\n\n x, tf_data_format = _preprocess_conv3d_input(x, data_format)\n padding = _preprocess_padding(padding)\n if tf_data_format == 'NDHWC':\n strides = (1,) + strides + (1,)\n pool_size = (1,) + pool_size + (1,)\n else:\n strides = (1, 1) + strides\n pool_size = (1, 1) + pool_size\n\n if pool_mode == 'max':\n x = nn.max_pool3d(\n x, pool_size, strides, padding=padding, data_format=tf_data_format)\n elif pool_mode == 'avg':\n x = nn.avg_pool3d(\n x, pool_size, strides, padding=padding, data_format=tf_data_format)\n else:\n raise ValueError('Invalid pooling mode: ' + str(pool_mode))\n\n if data_format == 'channels_first' and tf_data_format == 'NDHWC':\n x = array_ops.transpose(x, (0, 4, 1, 2, 3))\n return x\n\n\ndef local_conv(inputs,\n kernel,\n kernel_size,\n strides,\n output_shape,\n data_format=None):\n \"\"\"Apply N-D convolution with un-shared weights.\n\n Arguments:\n inputs: (N+2)-D tensor with shape\n (batch_size, channels_in, d_in1, ..., d_inN)\n if data_format='channels_first', or\n (batch_size, d_in1, ..., d_inN, channels_in)\n if data_format='channels_last'.\n kernel: the unshared weight for N-D convolution,\n with shape (output_items, feature_dim, channels_out), where\n feature_dim = np.prod(kernel_size) * channels_in,\n output_items = np.prod(output_shape).\n kernel_size: a tuple of N integers, specifying the\n spatial dimensions of the N-D convolution window.\n strides: a tuple of N integers, specifying the strides\n of the convolution along the spatial dimensions.\n output_shape: a tuple of (d_out1, ..., d_outN) specifying the spatial\n dimensionality of the output.\n data_format: string, \"channels_first\" or \"channels_last\".\n\n Returns:\n An (N+2)-D tensor with shape:\n (batch_size, channels_out) + output_shape\n if data_format='channels_first', or:\n (batch_size,) + output_shape + (channels_out,)\n if data_format='channels_last'.\n\n Raises:\n ValueError: if `data_format` is neither\n `channels_last` nor `channels_first`.\n \"\"\"\n if data_format is None:\n data_format = image_data_format()\n if data_format not in {'channels_first', 'channels_last'}:\n raise ValueError('Unknown data_format: ' + str(data_format))\n\n kernel_shape = int_shape(kernel)\n feature_dim = kernel_shape[1]\n channels_out = kernel_shape[-1]\n ndims = len(output_shape)\n spatial_dimensions = list(range(ndims))\n\n xs = []\n output_axes_ticks = [range(axis_max) for axis_max in output_shape]\n for position in itertools.product(*output_axes_ticks):\n slices = [slice(None)]\n\n if data_format == 'channels_first':\n slices.append(slice(None))\n\n slices.extend(\n slice(position[d] * strides[d], position[d] * strides[d] +\n kernel_size[d]) for d in spatial_dimensions)\n\n if data_format == 'channels_last':\n slices.append(slice(None))\n\n xs.append(reshape(inputs[slices], (1, -1, feature_dim)))\n\n x_aggregate = concatenate(xs, axis=0)\n output = batch_dot(x_aggregate, kernel)\n output = reshape(output, output_shape + (-1, channels_out))\n\n if data_format == 'channels_first':\n permutation = [ndims, ndims + 1] + spatial_dimensions\n else:\n permutation = [ndims] + spatial_dimensions + [ndims + 1]\n\n return permute_dimensions(output, permutation)\n\n\n@keras_export('keras.backend.local_conv1d')\ndef local_conv1d(inputs, kernel, kernel_size, strides, data_format=None):\n \"\"\"Apply 1D conv with un-shared weights.\n\n Arguments:\n inputs: 3D tensor with shape:\n (batch_size, steps, input_dim)\n if data_format is \"channels_last\" or\n (batch_size, input_dim, steps)\n if data_format is \"channels_first\".\n kernel: the unshared weight for convolution,\n with shape (output_length, feature_dim, filters).\n kernel_size: a tuple of a single integer,\n specifying the length of the 1D convolution window.\n strides: a tuple of a single integer,\n specifying the stride length of the convolution.\n data_format: the data format, channels_first or channels_last.\n\n Returns:\n A 3d tensor with shape:\n (batch_size, output_length, filters)\n if data_format='channels_first'\n or 3D tensor with shape:\n (batch_size, filters, output_length)\n if data_format='channels_last'.\n \"\"\"\n output_shape = (kernel.shape[0],)\n return local_conv(inputs,\n kernel,\n kernel_size,\n strides,\n output_shape,\n data_format)\n\n\n@keras_export('keras.backend.local_conv2d')\ndef local_conv2d(inputs,\n kernel,\n kernel_size,\n strides,\n output_shape,\n data_format=None):\n \"\"\"Apply 2D conv with un-shared weights.\n\n Arguments:\n inputs: 4D tensor with shape:\n (batch_size, filters, new_rows, new_cols)\n if data_format='channels_first'\n or 4D tensor with shape:\n (batch_size, new_rows, new_cols, filters)\n if data_format='channels_last'.\n kernel: the unshared weight for convolution,\n with shape (output_items, feature_dim, filters).\n kernel_size: a tuple of 2 integers, specifying the\n width and height of the 2D convolution window.\n strides: a tuple of 2 integers, specifying the strides\n of the convolution along the width and height.\n output_shape: a tuple with (output_row, output_col).\n data_format: the data format, channels_first or channels_last.\n\n Returns:\n A 4D tensor with shape:\n (batch_size, filters, new_rows, new_cols)\n if data_format='channels_first'\n or 4D tensor with shape:\n (batch_size, new_rows, new_cols, filters)\n if data_format='channels_last'.\n \"\"\"\n return local_conv(inputs,\n kernel,\n kernel_size,\n strides,\n output_shape,\n data_format)\n\n\n@keras_export('keras.backend.bias_add')\ndef bias_add(x, bias, data_format=None):\n \"\"\"Adds a bias vector to a tensor.\n\n Arguments:\n x: Tensor or variable.\n bias: Bias tensor to add.\n data_format: string, `\"channels_last\"` or `\"channels_first\"`.\n\n Returns:\n Output tensor.\n\n Raises:\n ValueError: In one of the two cases below:\n 1. invalid `data_format` argument.\n 2. invalid bias shape.\n the bias should be either a vector or\n a tensor with ndim(x) - 1 dimension\n \"\"\"\n if data_format is None:\n data_format = image_data_format()\n if data_format not in {'channels_first', 'channels_last'}:\n raise ValueError('Unknown data_format: ' + str(data_format))\n bias_shape = int_shape(bias)\n if len(bias_shape) != 1 and len(bias_shape) != ndim(x) - 1:\n raise ValueError(\n 'Unexpected bias dimensions %d, expect to be 1 or %d dimensions' %\n (len(bias_shape), ndim(x)))\n\n if len(bias_shape) == 1:\n if data_format == 'channels_first':\n return nn.bias_add(x, bias, data_format='NCHW')\n return nn.bias_add(x, bias, data_format='NHWC')\n if ndim(x) in (3, 4, 5):\n if data_format == 'channels_first':\n bias_reshape_axis = (1, bias_shape[-1]) + bias_shape[:-1]\n return x + reshape(bias, bias_reshape_axis)\n return x + reshape(bias, (1,) + bias_shape)\n return nn.bias_add(x, bias)\n\n\n# RANDOMNESS\n\n\n@keras_export('keras.backend.random_normal')\ndef random_normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None):\n \"\"\"Returns a tensor with normal distribution of values.\n\n It is an alias to `tf.random.normal`.\n\n Arguments:\n shape: A tuple of integers, the shape of tensor to create.\n mean: A float, the mean value of the normal distribution to draw samples.\n Default to 0.0.\n stddev: A float, the standard deviation of the normal distribution\n to draw samples. Default to 1.0.\n dtype: `tf.dtypes.DType`, dtype of returned tensor. Default to use Keras\n backend dtype which is float32.\n seed: Integer, random seed. Will use a random numpy integer when not\n specified.\n\n Returns:\n A tensor with normal distribution of values.\n\n Example:\n\n >>> random_normal_tensor = tf.keras.backend.random_normal(shape=(2,3),\n ... mean=0.0, stddev=1.0)\n >>> random_normal_tensor\n <tf.Tensor: shape=(2, 3), dtype=float32, numpy=...,\n dtype=float32)>\n \"\"\"\n if dtype is None:\n dtype = floatx()\n if seed is None:\n seed = np.random.randint(10e6)\n return random_ops.random_normal(\n shape, mean=mean, stddev=stddev, dtype=dtype, seed=seed)\n\n\n@keras_export('keras.backend.random_uniform')\ndef random_uniform(shape, minval=0.0, maxval=1.0, dtype=None, seed=None):\n \"\"\"Returns a tensor with uniform distribution of values.\n\n Arguments:\n shape: A tuple of integers, the shape of tensor to create.\n minval: A float, lower boundary of the uniform distribution\n to draw samples.\n maxval: A float, upper boundary of the uniform distribution\n to draw samples.\n dtype: String, dtype of returned tensor.\n seed: Integer, random seed.\n\n Returns:\n A tensor.\n\n Example:\n\n >>> random_uniform_tensor = tf.keras.backend.random_uniform(shape=(2,3),\n ... minval=0.0, maxval=1.0)\n >>> random_uniform_tensor\n <tf.Tensor: shape=(2, 3), dtype=float32, numpy=...,\n dtype=float32)>\n \"\"\"\n if dtype is None:\n dtype = floatx()\n if seed is None:\n seed = np.random.randint(10e6)\n return random_ops.random_uniform(\n shape, minval=minval, maxval=maxval, dtype=dtype, seed=seed)\n\n\n@deprecated(None, 'Use `tf.keras.backend.random_bernoulli` instead.')\n@keras_export('keras.backend.random_binomial')\ndef random_binomial(shape, p=0.0, dtype=None, seed=None):\n \"\"\"Returns a tensor with random binomial distribution of values.\n\n DEPRECATED, use `tf.keras.backend.random_bernoulli` instead.\n\n The binomial distribution with parameters `n` and `p` is the probability\n distribution of the number of successful Bernoulli process. Only supports\n `n` = 1 for now.\n\n Arguments:\n shape: A tuple of integers, the shape of tensor to create.\n p: A float, `0. <= p <= 1`, probability of binomial distribution.\n dtype: String, dtype of returned tensor.\n seed: Integer, random seed.\n\n Returns:\n A tensor.\n\n Example:\n\n >>> random_binomial_tensor = tf.keras.backend.random_binomial(shape=(2,3),\n ... p=0.5)\n >>> random_binomial_tensor\n <tf.Tensor: shape=(2, 3), dtype=float32, numpy=...,\n dtype=float32)>\n \"\"\"\n if dtype is None:\n dtype = floatx()\n if seed is None:\n seed = np.random.randint(10e6)\n return array_ops.where_v2(\n random_ops.random_uniform(shape, dtype=dtype, seed=seed) <= p,\n array_ops.ones(shape, dtype=dtype), array_ops.zeros(shape, dtype=dtype))\n\n\n@keras_export('keras.backend.random_bernoulli')\ndef random_bernoulli(shape, p=0.0, dtype=None, seed=None):\n \"\"\"Returns a tensor with random bernoulli distribution of values.\n\n Arguments:\n shape: A tuple of integers, the shape of tensor to create.\n p: A float, `0. <= p <= 1`, probability of bernoulli distribution.\n dtype: String, dtype of returned tensor.\n seed: Integer, random seed.\n\n Returns:\n A tensor.\n \"\"\"\n return random_binomial(shape, p, dtype, seed)\n\n\n@keras_export('keras.backend.truncated_normal')\ndef truncated_normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None):\n \"\"\"Returns a tensor with truncated random normal distribution of values.\n\n The generated values follow a normal distribution\n with specified mean and standard deviation,\n except that values whose magnitude is more than\n two standard deviations from the mean are dropped and re-picked.\n\n Arguments:\n shape: A tuple of integers, the shape of tensor to create.\n mean: Mean of the values.\n stddev: Standard deviation of the values.\n dtype: String, dtype of returned tensor.\n seed: Integer, random seed.\n\n Returns:\n A tensor.\n \"\"\"\n if dtype is None:\n dtype = floatx()\n if seed is None:\n seed = np.random.randint(10e6)\n return random_ops.truncated_normal(\n shape, mean, stddev, dtype=dtype, seed=seed)\n\n\n# CTC\n# TensorFlow has a native implementation, but it uses sparse tensors\n# and therefore requires a wrapper for Keras. The functions below convert\n# dense to sparse tensors and also wraps up the beam search code that is\n# in TensorFlow's CTC implementation\n\n\n@keras_export('keras.backend.ctc_label_dense_to_sparse')\ndef ctc_label_dense_to_sparse(labels, label_lengths):\n \"\"\"Converts CTC labels from dense to sparse.\n\n Arguments:\n labels: dense CTC labels.\n label_lengths: length of the labels.\n\n Returns:\n A sparse tensor representation of the labels.\n \"\"\"\n label_shape = array_ops.shape(labels)\n num_batches_tns = array_ops.stack([label_shape[0]])\n max_num_labels_tns = array_ops.stack([label_shape[1]])\n\n def range_less_than(old_input, current_input):\n return array_ops.expand_dims(\n math_ops.range(array_ops.shape(old_input)[1]), 0) < array_ops.fill(\n max_num_labels_tns, current_input)\n\n init = math_ops.cast(\n array_ops.fill([1, label_shape[1]], 0), dtypes_module.bool)\n dense_mask = functional_ops.scan(\n range_less_than, label_lengths, initializer=init, parallel_iterations=1)\n dense_mask = dense_mask[:, 0, :]\n\n label_array = array_ops.reshape(\n array_ops.tile(math_ops.range(0, label_shape[1]), num_batches_tns),\n label_shape)\n label_ind = array_ops.boolean_mask(label_array, dense_mask)\n\n batch_array = array_ops.transpose(\n array_ops.reshape(\n array_ops.tile(math_ops.range(0, label_shape[0]), max_num_labels_tns),\n reverse(label_shape, 0)))\n batch_ind = array_ops.boolean_mask(batch_array, dense_mask)\n indices = array_ops.transpose(\n array_ops.reshape(concatenate([batch_ind, label_ind], axis=0), [2, -1]))\n\n vals_sparse = array_ops.gather_nd(labels, indices)\n\n return sparse_tensor.SparseTensor(\n math_ops.cast(indices, dtypes_module.int64), vals_sparse,\n math_ops.cast(label_shape, dtypes_module.int64))\n\n\n@keras_export('keras.backend.ctc_batch_cost')\ndef ctc_batch_cost(y_true, y_pred, input_length, label_length):\n \"\"\"Runs CTC loss algorithm on each batch element.\n\n Arguments:\n y_true: tensor `(samples, max_string_length)`\n containing the truth labels.\n y_pred: tensor `(samples, time_steps, num_categories)`\n containing the prediction, or output of the softmax.\n input_length: tensor `(samples, 1)` containing the sequence length for\n each batch item in `y_pred`.\n label_length: tensor `(samples, 1)` containing the sequence length for\n each batch item in `y_true`.\n\n Returns:\n Tensor with shape (samples,1) containing the\n CTC loss of each element.\n \"\"\"\n label_length = math_ops.cast(\n array_ops.squeeze(label_length, axis=-1), dtypes_module.int32)\n input_length = math_ops.cast(\n array_ops.squeeze(input_length, axis=-1), dtypes_module.int32)\n sparse_labels = math_ops.cast(\n ctc_label_dense_to_sparse(y_true, label_length), dtypes_module.int32)\n\n y_pred = math_ops.log(array_ops.transpose(y_pred, perm=[1, 0, 2]) + epsilon())\n\n return array_ops.expand_dims(\n ctc.ctc_loss(\n inputs=y_pred, labels=sparse_labels, sequence_length=input_length), 1)\n\n\n@keras_export('keras.backend.ctc_decode')\ndef ctc_decode(y_pred, input_length, greedy=True, beam_width=100, top_paths=1):\n \"\"\"Decodes the output of a softmax.\n\n Can use either greedy search (also known as best path)\n or a constrained dictionary search.\n\n Arguments:\n y_pred: tensor `(samples, time_steps, num_categories)`\n containing the prediction, or output of the softmax.\n input_length: tensor `(samples, )` containing the sequence length for\n each batch item in `y_pred`.\n greedy: perform much faster best-path search if `true`.\n This does not use a dictionary.\n beam_width: if `greedy` is `false`: a beam search decoder will be used\n with a beam of this width.\n top_paths: if `greedy` is `false`,\n how many of the most probable paths will be returned.\n\n Returns:\n Tuple:\n List: if `greedy` is `true`, returns a list of one element that\n contains the decoded sequence.\n If `false`, returns the `top_paths` most probable\n decoded sequences.\n Important: blank labels are returned as `-1`.\n Tensor `(top_paths, )` that contains\n the log probability of each decoded sequence.\n \"\"\"\n y_pred = math_ops.log(array_ops.transpose(y_pred, perm=[1, 0, 2]) + epsilon())\n input_length = math_ops.cast(input_length, dtypes_module.int32)\n\n if greedy:\n (decoded, log_prob) = ctc.ctc_greedy_decoder(\n inputs=y_pred, sequence_length=input_length)\n else:\n (decoded, log_prob) = ctc.ctc_beam_search_decoder(\n inputs=y_pred,\n sequence_length=input_length,\n beam_width=beam_width,\n top_paths=top_paths)\n decoded_dense = [\n sparse_ops.sparse_to_dense(\n st.indices, st.dense_shape, st.values, default_value=-1)\n for st in decoded\n ]\n return (decoded_dense, log_prob)\n\n\n# HIGH ORDER FUNCTIONS\n\n\n@keras_export('keras.backend.map_fn')\ndef map_fn(fn, elems, name=None, dtype=None):\n \"\"\"Map the function fn over the elements elems and return the outputs.\n\n Arguments:\n fn: Callable that will be called upon each element in elems\n elems: tensor\n name: A string name for the map node in the graph\n dtype: Output data type.\n\n Returns:\n Tensor with dtype `dtype`.\n \"\"\"\n return map_fn_lib.map_fn(fn, elems, name=name, dtype=dtype)\n\n\n@keras_export('keras.backend.foldl')\ndef foldl(fn, elems, initializer=None, name=None):\n \"\"\"Reduce elems using fn to combine them from left to right.\n\n Arguments:\n fn: Callable that will be called upon each element in elems and an\n accumulator, for instance `lambda acc, x: acc + x`\n elems: tensor\n initializer: The first value used (`elems[0]` in case of None)\n name: A string name for the foldl node in the graph\n\n Returns:\n Tensor with same type and shape as `initializer`.\n \"\"\"\n return functional_ops.foldl(fn, elems, initializer=initializer, name=name)\n\n\n@keras_export('keras.backend.foldr')\ndef foldr(fn, elems, initializer=None, name=None):\n \"\"\"Reduce elems using fn to combine them from right to left.\n\n Arguments:\n fn: Callable that will be called upon each element in elems and an\n accumulator, for instance `lambda acc, x: acc + x`\n elems: tensor\n initializer: The first value used (`elems[-1]` in case of None)\n name: A string name for the foldr node in the graph\n\n Returns:\n Same type and shape as initializer\n \"\"\"\n return functional_ops.foldr(fn, elems, initializer=initializer, name=name)\n\n# Load Keras default configuration from config file if present.\n# Set Keras base dir path given KERAS_HOME env variable, if applicable.\n# Otherwise either ~/.keras or /tmp.\nif 'KERAS_HOME' in os.environ:\n _keras_dir = os.environ.get('KERAS_HOME')\nelse:\n _keras_base_dir = os.path.expanduser('~')\n _keras_dir = os.path.join(_keras_base_dir, '.keras')\n_config_path = os.path.expanduser(os.path.join(_keras_dir, 'keras.json'))\nif os.path.exists(_config_path):\n try:\n with open(_config_path) as fh:\n _config = json.load(fh)\n except ValueError:\n _config = {}\n _floatx = _config.get('floatx', floatx())\n assert _floatx in {'float16', 'float32', 'float64'}\n _epsilon = _config.get('epsilon', epsilon())\n assert isinstance(_epsilon, float)\n _image_data_format = _config.get('image_data_format', image_data_format())\n assert _image_data_format in {'channels_last', 'channels_first'}\n set_floatx(_floatx)\n set_epsilon(_epsilon)\n set_image_data_format(_image_data_format)\n\n# Save config file.\nif not os.path.exists(_keras_dir):\n try:\n os.makedirs(_keras_dir)\n except OSError:\n # Except permission denied and potential race conditions\n # in multi-threaded environments.\n pass\n\nif not os.path.exists(_config_path):\n _config = {\n 'floatx': floatx(),\n 'epsilon': epsilon(),\n 'backend': 'tensorflow',\n 'image_data_format': image_data_format()\n }\n try:\n with open(_config_path, 'w') as f:\n f.write(json.dumps(_config, indent=4))\n except IOError:\n # Except permission denied.\n pass\n\n\ndef configure_and_create_distributed_session(distribution_strategy):\n \"\"\"Configure session config and create a session with it.\"\"\"\n\n def _create_session(distribution_strategy):\n \"\"\"Create the Distributed Strategy session.\"\"\"\n session_config = get_default_session_config()\n\n # If a session already exists, merge in its config; in the case there is a\n # conflict, take values of the existing config.\n global _SESSION\n if getattr(_SESSION, 'session', None) and _SESSION.session._config:\n session_config.MergeFrom(_SESSION.session._config)\n\n if is_tpu_strategy(distribution_strategy):\n # TODO(priyag, yuefengz): Remove this workaround when Distribute\n # Coordinator is integrated with keras and we can create a session from\n # there.\n distribution_strategy.configure(session_config)\n master = distribution_strategy.extended._tpu_cluster_resolver.master() # pylint: disable=protected-access\n session = session_module.Session(config=session_config, target=master)\n else:\n worker_context = dc_context.get_current_worker_context()\n if worker_context:\n dc_session_config = worker_context.session_config\n # Merge the default session config to the one from distribute\n # coordinator, which is fine for now since they don't have\n # conflicting configurations.\n dc_session_config.MergeFrom(session_config)\n session = session_module.Session(\n config=dc_session_config, target=worker_context.master_target)\n else:\n distribution_strategy.configure(session_config)\n session = session_module.Session(config=session_config)\n\n set_session(session)\n\n if distribution_strategy.extended._in_multi_worker_mode():\n dc.run_distribute_coordinator(\n _create_session,\n distribution_strategy,\n mode=dc.CoordinatorMode.INDEPENDENT_WORKER)\n else:\n _create_session(distribution_strategy)\n\n\ndef is_tpu_strategy(strategy):\n \"\"\"We're executing TPU Strategy.\"\"\"\n return (strategy is not None and\n strategy.__class__.__name__.startswith('TPUStrategy'))\n\n\ndef cast_variables_to_tensor(tensors):\n\n def _cast_variables_to_tensor(tensor):\n if isinstance(tensor, variables_module.Variable):\n return array_ops.identity(tensor)\n return tensor\n\n return nest.map_structure(_cast_variables_to_tensor, tensors)\n\n\ndef _is_symbolic_tensor(x):\n return tensor_util.is_tensor(x) and not isinstance(x, ops.EagerTensor)\n\n\ndef convert_inputs_if_ragged(inputs):\n \"\"\"Converts any ragged tensors to dense.\"\"\"\n\n def _convert_ragged_input(inputs):\n if isinstance(inputs, ragged_tensor.RaggedTensor):\n return inputs.to_tensor()\n return inputs\n\n flat_inputs = nest.flatten(inputs)\n contains_ragged = py_any(\n isinstance(i, ragged_tensor.RaggedTensor) for i in flat_inputs)\n\n if not contains_ragged:\n return inputs, None\n\n inputs = nest.map_structure(_convert_ragged_input, inputs)\n # Multiple mask are not yet supported, so one mask is used on all inputs.\n # We approach this similarly when using row lengths to ignore steps.\n nested_row_lengths = math_ops.cast(flat_inputs[0].nested_row_lengths()[0],\n 'int32')\n return inputs, nested_row_lengths\n\n\ndef maybe_convert_to_ragged(is_ragged_input, output, nested_row_lengths):\n \"\"\"Converts any ragged input back to its initial structure.\"\"\"\n if not is_ragged_input:\n return output\n\n return ragged_tensor.RaggedTensor.from_tensor(output, nested_row_lengths)\n\n\nclass ContextValueCache(weakref.WeakKeyDictionary):\n \"\"\"Container that caches (possibly tensor) values based on the context.\n\n This class is similar to defaultdict, where values may be produced by the\n default factory specified during initialization. This class also has a default\n value for the key (when key is `None`) -- the key is set to the the current\n graph or eager context. The default factories for key and value are only used\n in `__getitem__` and `setdefault`. The `.get()` behavior remains the same.\n\n This object will return the value of the current graph or closest parent graph\n if the current graph is a function. This is to reflect the fact that if a\n tensor is created in eager/graph, child functions may capture that tensor.\n\n The default factory method may accept keyword arguments (unlike defaultdict,\n which only accepts callables with 0 arguments). To pass keyword arguments to\n `default_factory`, use the `setdefault` method instead of `__getitem__`.\n\n An example of how this class can be used in different contexts:\n\n ```\n cache = ContextValueCache(int)\n\n # Eager mode\n cache[None] += 2\n cache[None] += 4\n assert cache[None] == 6\n\n # Graph mode\n with tf.Graph().as_default() as g:\n cache[None] += 5\n cache[g] += 3\n assert cache[g] == 8\n ```\n\n Example of a default factory with arguments:\n\n ```\n cache = ContextValueCache(lambda x: x + 1)\n g = tf.get_default_graph()\n\n # Example with keyword argument.\n value = cache.setdefault(key=g, kwargs={'x': 3})\n assert cache[g] == 4\n ```\n \"\"\"\n\n def __init__(self, default_factory):\n self.default_factory = default_factory\n weakref.WeakKeyDictionary.__init__(self)\n\n def _key(self):\n if context.executing_eagerly():\n return _DUMMY_EAGER_GRAPH.key\n else:\n return ops.get_default_graph()\n\n def _get_parent_graph(self, graph):\n \"\"\"Returns the parent graph or dummy eager object.\"\"\"\n # TODO(b/149317164): Currently FuncGraphs use ops.get_default_graph() as the\n # outer graph. This results in outer_graph always being a Graph,\n # even in eager mode (get_default_graph will create a new Graph if there\n # isn't a default graph). Because of this bug, we have to specially set the\n # key when eager execution is enabled.\n parent_graph = graph.outer_graph\n if (not isinstance(parent_graph, func_graph.FuncGraph) and\n ops.executing_eagerly_outside_functions()):\n return _DUMMY_EAGER_GRAPH.key\n return parent_graph\n\n def _get_recursive(self, key):\n \"\"\"Gets the value at key or the closest parent graph.\"\"\"\n value = self.get(key)\n if value is not None:\n return value\n\n # Since FuncGraphs are able to capture tensors and variables from their\n # parent graphs, recursively search to see if there is a value stored for\n # one of the parent graphs.\n if isinstance(key, func_graph.FuncGraph):\n return self._get_recursive(self._get_parent_graph(key))\n return None\n\n def __getitem__(self, key):\n \"\"\"Gets the value at key (or current context), or sets default value.\n\n Args:\n key: May be `None` or `Graph`object. When `None`, the key is set to the\n current context.\n\n Returns:\n Either the cached or default value.\n \"\"\"\n if key is None:\n key = self._key()\n\n value = self._get_recursive(key)\n if value is None:\n value = self[key] = self.default_factory() # pylint:disable=not-callable\n return value\n\n def setdefault(self, key=None, default=None, kwargs=None):\n \"\"\"Sets the default value if key is not in dict, and returns the value.\"\"\"\n if key is None:\n key = self._key()\n kwargs = kwargs or {}\n\n if default is None and key not in self:\n default = self.default_factory(**kwargs)\n return weakref.WeakKeyDictionary.setdefault(self, key, default)\n\n# This dictionary holds a mapping {graph: learning_phase}. In eager mode, a\n# dummy object is used.\n# A learning phase is a bool tensor used to run Keras models in\n# either train mode (learning_phase == 1) or test mode (learning_phase == 0).\n_GRAPH_LEARNING_PHASES = ContextValueCache(_default_learning_phase)\n\n# This dictionary holds a mapping {graph: set_of_freezable_variables}.\n# Each set tracks objects created via `freezable_variable` in the graph.\n_FREEZABLE_VARS = ContextValueCache(object_identity.ObjectIdentityWeakSet)\n\n# This dictionary holds a mapping between a graph and variables to initialize\n# in the graph.\n_GRAPH_VARIABLES = ContextValueCache(object_identity.ObjectIdentityWeakSet)\n\n# This dictionary holds a mapping between a graph and TF optimizers created in\n# the graph.\n_GRAPH_TF_OPTIMIZERS = ContextValueCache(object_identity.ObjectIdentityWeakSet)\n"
] | [
[
"tensorflow.python.ops.math_ops.log",
"tensorflow.python.ops.math_ops.subtract",
"tensorflow.python.ops.variables.is_variable_initialized",
"tensorflow.python.ops.array_ops.split",
"tensorflow.python.ops.array_ops.sparse_placeholder",
"tensorflow.python.ops.state_ops.assign_sub",
"tensorflow.python.framework.ops.executing_eagerly_outside_functions",
"tensorflow.python.ops.math_ops.reduce_any",
"tensorflow.python.ops.ragged.ragged_concat_ops.concat",
"tensorflow.python.eager.context.context",
"tensorflow.python.ops.ctc_ops.ctc_loss",
"tensorflow.python.ops.array_ops.stop_gradient",
"tensorflow.python.training.tracking.util.register_session_provider",
"tensorflow.python.ops.array_ops.fill",
"tensorflow.python.ops.math_ops.abs",
"tensorflow.python.ops.sparse_ops.sparse_concat",
"tensorflow.python.ops.nn.conv2d_transpose",
"tensorflow.python.ops.math_ops.reduce_logsumexp",
"tensorflow.python.ops.math_ops.less",
"tensorflow.python.ops.clip_ops.clip_by_value",
"tensorflow.python.ops.sparse_ops.sparse_tensor_to_dense",
"tensorflow.python.ops.math_ops.matmul",
"tensorflow.python.ops.nn.leaky_relu",
"tensorflow.python.framework.ops.reset_default_graph",
"tensorflow.python.eager.context.eager_mode",
"tensorflow.python.platform.tf_logging.warning",
"tensorflow.python.ops.array_ops.tile",
"tensorflow.python.ops.math_ops.cumprod",
"tensorflow.python.ops.ctc_ops.ctc_greedy_decoder",
"tensorflow.python.ops.sparse_ops.sparse_tensor_dense_matmul",
"tensorflow.python.ops.math_ops.equal",
"numpy.delete",
"tensorflow.python.ops.array_ops.shape_v2",
"tensorflow.python.ops.nn.bias_add",
"tensorflow.python.ops.nn.moments",
"numpy.array",
"tensorflow.core.protobuf.config_pb2.CallableOptions",
"tensorflow.python.ops.math_ops.range",
"tensorflow.python.ops.array_ops.reverse",
"tensorflow.python.ops.gradients.gradients",
"tensorflow.python.ops.nn.softplus",
"tensorflow.python.ops.nn.depthwise_conv2d",
"tensorflow.python.ops.nn.avg_pool3d",
"tensorflow.python.ops.array_ops.pad",
"tensorflow.python.util.nest.flatten",
"tensorflow.python.ops.nn.sparse_softmax_cross_entropy_with_logits_v2",
"tensorflow.python.ops.math_ops.greater_equal",
"numpy.expand_dims",
"tensorflow.python.ops.nn.in_top_k",
"numpy.asarray",
"tensorflow.python.ops.state_ops.assign_add",
"tensorflow.python.ops.nn.separable_conv2d",
"tensorflow.python.framework.ops.convert_to_tensor_v2",
"tensorflow.python.ops.linalg_ops.eye",
"tensorflow.python.framework.ops._as_graph_element",
"tensorflow.python.ops.control_flow_ops.while_loop",
"tensorflow.python.ops.math_ops.exp",
"tensorflow.python.ops.array_ops.gather_nd",
"tensorflow.python.eager.function.ConcreteFunction",
"tensorflow.python.ops.ragged.ragged_tensor.RaggedTensorSpec",
"tensorflow.python.ops.array_ops.transpose",
"tensorflow.python.training.moving_averages.assign_moving_average",
"tensorflow.python.ops.array_ops.where",
"tensorflow.python.framework.device.is_device_spec",
"tensorflow.python.framework.ops.tensor_id",
"tensorflow.python.framework.ops.get_default_session",
"tensorflow.python.ops.math_ops.add",
"tensorflow.python.ops.ragged.ragged_tensor.RaggedTensor.from_tensor",
"tensorflow.python.ops.math_ops.reduce_variance",
"tensorflow.python.ops.ctc_ops.ctc_beam_search_decoder",
"tensorflow.python.ops.math_ops.minimum",
"tensorflow.python.ops.tensor_array_ops.TensorArray",
"tensorflow.python.ops.math_ops.square",
"tensorflow.python.ops.math_ops.pow",
"tensorflow.python.ops.nn.sigmoid_cross_entropy_with_logits",
"tensorflow.python.framework.dtypes.as_dtype",
"tensorflow.python.distribute.distribution_strategy_context.get_strategy",
"tensorflow.python.ops.logging_ops.print_v2",
"tensorflow.python.ops.nn.avg_pool",
"tensorflow.python.ops.array_ops.stack",
"tensorflow.python.ops.array_ops.concat",
"tensorflow.python.ops.nn.max_pool",
"tensorflow.python.ops.math_ops.cos",
"tensorflow.python.ops.map_fn.map_fn",
"tensorflow.python.eager.lift_to_graph.lift_to_graph",
"tensorflow.python.ops.nn.softsign",
"tensorflow.python.ops.nn.softmax_cross_entropy_with_logits_v2",
"tensorflow.python.ops.nn.convolution",
"tensorflow.python.framework.ops.name_scope_v2",
"tensorflow.python.ops.nn.tanh",
"tensorflow.python.distribute.distribute_coordinator.run_distribute_coordinator",
"tensorflow.python.ops.math_ops.sin",
"tensorflow.python.ops.array_ops.constant",
"tensorflow.python.ops.array_ops.shape",
"tensorflow.python.ops.math_ops.reduce_max",
"tensorflow.python.util.tf_inspect.getfullargspec",
"tensorflow.python.ops.array_ops.placeholder",
"tensorflow.python.ops.init_ops.random_uniform_initializer",
"tensorflow.python.ops.array_ops.squeeze",
"tensorflow.python.ops.math_ops.sqrt",
"tensorflow.python.ops.state_ops.assign",
"tensorflow.python.ops.math_ops.round",
"tensorflow.python.ops.nn.dropout_v2",
"tensorflow.python.ops.array_ops.identity",
"tensorflow.python.eager.context.executing_eagerly",
"tensorflow.python.ops.math_ops.argmin",
"tensorflow.python.ops.image_ops.resize_images_v2",
"numpy.random.randint",
"tensorflow.python.ops.control_flow_ops.cond",
"tensorflow.python.util.tf_export.keras_export",
"tensorflow.python.ops.nn.sigmoid",
"tensorflow.python.ops.functional_ops.foldr",
"tensorflow.python.ops.array_ops.unstack",
"tensorflow.python.ops.math_ops.reduce_prod",
"tensorflow.python.ops.nn.relu6",
"tensorflow.python.ops.array_ops.gather",
"tensorflow.python.ops.math_ops.reduce_min",
"tensorflow.python.ops.sparse_ops.sparse_to_dense",
"tensorflow.python.framework.tensor_util.constant_value",
"tensorflow.python.ops.array_ops.ones",
"tensorflow.python.util.nest.is_sequence",
"tensorflow.python.framework.func_graph.FuncGraph",
"tensorflow.python.ops.nn.relu",
"tensorflow.python.framework.ops.init_scope",
"tensorflow.python.framework.tensor_util.is_tensor",
"tensorflow.python.ops.array_ops.zeros_like",
"tensorflow.python.keras.engine.base_layer_utils.call_context",
"tensorflow.python.ops.math_ops.reduce_mean",
"tensorflow.python.ops.math_ops.mul",
"tensorflow.python.util.nest.pack_sequence_as",
"tensorflow.python.ops.control_flow_ops.group",
"tensorflow.python.ops.math_ops.cumsum",
"tensorflow.python.ops.nn.elu",
"tensorflow.python.framework.ops.get_default_graph",
"tensorflow.python.ops.array_ops.reshape",
"tensorflow.python.ops.init_ops.random_normal_initializer",
"tensorflow.python.distribute.distribute_coordinator_context.get_current_worker_context",
"tensorflow.python.tf2.enabled",
"tensorflow.python.ops.nn.l2_normalize",
"tensorflow.python.ops.nn.atrous_conv2d_transpose",
"tensorflow.python.ops.math_ops.maximum",
"tensorflow.python.ops.math_ops.reduce_std",
"tensorflow.python.ops.random_ops.random_uniform",
"tensorflow.python.ops.array_ops.placeholder_with_default",
"tensorflow.python.ops.nn.max_pool3d",
"tensorflow.python.framework.tensor_shape.TensorShape",
"tensorflow.python.ops.nn.softmax",
"tensorflow.python.ops.array_ops.where_v2",
"tensorflow.python.ops.math_ops.greater",
"tensorflow.python.distribute.distribution_strategy_context.has_strategy",
"tensorflow.python.ops.array_ops.zeros",
"tensorflow.python.framework.device.DeviceSpec.from_string",
"tensorflow.python.ops.math_ops.not_equal",
"tensorflow.python.framework.ops.inside_function",
"tensorflow.python.ops.math_ops.reduce_all",
"tensorflow.python.ops.math_ops.argmax",
"tensorflow.python.framework.sparse_tensor.SparseTensor",
"tensorflow.python.util.nest.map_structure",
"tensorflow.python.framework.ops.control_dependencies",
"tensorflow.python.util.deprecation.deprecated",
"tensorflow.python.ops.math_ops.cast",
"tensorflow.python.ops.random_ops.truncated_normal",
"tensorflow.python.ops.array_ops.one_hot",
"tensorflow.python.client.session.Session",
"tensorflow.python.ops.variables.variables_initializer",
"tensorflow.python.ops.array_ops.ones_like",
"tensorflow.python.ops.nn.conv3d_transpose",
"tensorflow.python.ops.math_ops.less_equal",
"tensorflow.python.ops.functional_ops.scan",
"tensorflow.python.ops.nn.fused_batch_norm",
"tensorflow.python.framework.config.list_logical_devices",
"tensorflow.python.framework.ops._get_graph_from_inputs",
"tensorflow.python.ops.math_ops.sign",
"tensorflow.python.ops.functional_ops.foldl",
"tensorflow.python.ops.random_ops.random_normal",
"tensorflow.python.ops.array_ops.boolean_mask",
"tensorflow.python.framework.constant_op.constant",
"tensorflow.python.ops.array_ops.expand_dims",
"tensorflow.python.ops.math_ops.reduce_sum",
"tensorflow.python.ops.nn.batch_normalization"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"1.10",
"1.12",
"2.7",
"2.6",
"1.4",
"1.13",
"2.3",
"2.4",
"2.9",
"1.5",
"1.7",
"2.5",
"0.12",
"1.0",
"2.2",
"1.2",
"2.10"
]
}
] |
nicoloverardo/master-degree-thesis | [
"0b36bf181ed6a00c2c0ab31acf52fba0fd5fa8f2",
"0b36bf181ed6a00c2c0ab31acf52fba0fd5fa8f2"
] | [
"src/sird.py",
"scripts/combine_istat_data.py"
] | [
"import pandas as pd\nimport numpy as np\n\nfrom scipy.integrate import odeint\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.metrics import mean_absolute_error, mean_squared_error\n\n\ndef logistic_R0(t, R_0_start, k, x0, R_0_end):\n \"\"\"\n R0 moduled as logistic function\n \"\"\"\n\n return (R_0_start - R_0_end) / (1 + np.exp(-k * (-t + x0))) + R_0_end\n\n\ndef beta(t, R_0_start, k, x0, R_0_end, gamma):\n \"\"\"\n Computes beta at a given time `t`\n \"\"\"\n\n return logistic_R0(t, R_0_start, k, x0, R_0_end) * gamma\n\n\ndef sird_calc(y, t, N, gamma, alpha, R_0_start, k, x0, R_0_end, beta):\n \"\"\"\n Computes SIRD model\n \"\"\"\n\n S, I, R, D = y\n dSdt = -beta(t, R_0_start, k, x0, R_0_end, gamma) * S * I / N\n dIdt = -dSdt - (1 - alpha) * gamma * I - alpha * I\n dRdt = (1 - alpha) * gamma * I\n dDdt = alpha * I\n return dSdt, dIdt, dRdt, dDdt\n\n\ndef sird(\n province,\n pop_prov_df,\n gamma=1 / 7,\n alpha=0.01,\n days=101,\n R_0_start=2,\n k=0.2,\n x0=40,\n R_0_end=0.3,\n prov_list_df=None,\n):\n \"\"\"\n Create and compute a SIRD model\n\n Parameters\n ----------\n\n province : str\n The province name.\n\n pop_prov_df : pandas DataFrame\n The DataFrame with demographic data.\n\n gamma : float (default=1/7)\n Inverse of how many days the infection lasts.\n\n alpha : float (default=0.01)\n Death rate.\n\n days : int (default=101)\n Total number of days to predict + 1.\n\n R_0_start : float (default=2)\n Starting value of RO\n\n k : float (default=0.2)\n How quickly R0 declines. Lower values of k will\n let R0 need more time to become lower.\n\n x0 : int (default=40)\n Value on the x-axis of the inflection point of R0.\n This can be interpreted as the day in which lockdown\n comes into effect.\n\n R_0_end : float (default=0.3)\n Final value of RO\n\n Returns\n -------\n A numpy array of shape (4, days).\n \"\"\"\n\n # Population\n if prov_list_df is not None:\n prov_list = prov_list_df[prov_list_df.Region == province][\"Province\"].values\n\n N = 0\n for prov in prov_list:\n N += pop_prov_df.loc[\n (pop_prov_df.Territorio == prov) & (pop_prov_df.Eta == \"Total\")\n ][\"Value\"].values[0]\n else:\n N = pop_prov_df.loc[\n (pop_prov_df.Territorio == province) & (pop_prov_df.Eta == \"Total\")\n ][\"Value\"].values[0]\n\n times = range(days)\n\n # S0, I0, R0, D0: initial conditions vector\n init = N - 1, 1, 0, 0\n\n # Solve the model\n sirsol = odeint(\n sird_calc, init, times, args=(N, gamma, alpha, R_0_start, k, x0, R_0_end, beta)\n )\n\n return sirsol.T\n\n\ndef Model(days, N, R_0_start, k, x0, R_0_end, alpha, gamma):\n y0 = (\n N - 1.0,\n 1.0,\n 0.0,\n 0.0,\n )\n times = range(0, days)\n\n sirsol = odeint(\n sird_calc, y0, times, args=(N, gamma, alpha, R_0_start, k, x0, R_0_end, beta)\n )\n\n S, I, R, D = sirsol.T\n R0_over_time = [\n beta(i, R_0_start, k, x0, R_0_end, gamma) / gamma for i in range(len(times))\n ]\n\n return times, S, I, R, D, R0_over_time\n\n\nclass DeterministicSird:\n def __init__(\n self,\n data_df,\n pop_prov_df,\n prov_list_df,\n area,\n group_column,\n data_column,\n data_filter,\n lag,\n days_to_predict,\n is_regional=True,\n pcm_data=None,\n ):\n\n self.data_df = data_df\n self.pop_prov_df = pop_prov_df\n self.prov_list_df = prov_list_df\n self.area = area\n self.group_column = group_column\n self.data_column = data_column\n self.data_filter = data_filter\n self.lag = lag\n self.days_to_predict = days_to_predict\n self.is_regional = is_regional\n self.pcm_data = pcm_data\n\n def get_region_pop(self, region, pop_df, prov_df):\n \"\"\"\n Computes the total population for a region\n starting from the provinces' popuplation\n\n Parameters\n ----------\n region : str\n The region whose population we need\n\n pop_df : pandas DataFrame\n Data for provinces population\n\n prov_df : pandas DataFrame\n Data that associates each province with\n its region\n\n Returns\n -------\n N : int\n The population of the region\n \"\"\"\n\n prov_list = prov_df[prov_df.Region == region][\"Province\"].values\n\n N = 0\n for prov in prov_list:\n N += pop_df.loc[(pop_df.Territorio == prov) & (pop_df.Eta == \"Total\")][\n \"Value\"\n ].values[0]\n\n return N\n\n def get_prov_pop(self):\n return self.pop_prov_df.loc[\n (self.pop_prov_df.Territorio == self.area)\n & (self.pop_prov_df.Eta == \"Total\")\n ][\"Value\"].values[0]\n\n def fix_arr(self, arr):\n arr[arr < 0] = 0\n arr[np.isinf(arr)] = 0\n return np.nan_to_num(arr)\n\n def lag_data(self, data, lag=7, return_all=False):\n if isinstance(data, np.ndarray):\n N = data.shape[0]\n else:\n N = len(data)\n\n X = np.empty(shape=(N - lag, lag + 1))\n\n for i in range(lag, N):\n X[\n i - lag,\n ] = [data[i - j] for j in range(lag + 1)]\n\n if not return_all:\n return X[-1, 1:]\n\n return X[:, 1:], X[:, 0]\n\n def _prepare_data_regional(self):\n data_df = self.data_df.loc[\n (self.data_df[self.group_column] == self.area),\n [\n \"data\",\n \"totale_positivi\",\n \"dimessi_guariti\",\n \"deceduti\",\n \"totale_casi\",\n \"nuovi_positivi\",\n ],\n ]\n\n data_df = data_df.query(self.data_filter + \" > \" + self.data_column)\n\n data_df[\"suscettibili\"] = self.pop - data_df[\"totale_casi\"]\n\n data_df = data_df.loc[\n :,\n [\n \"data\",\n \"totale_positivi\",\n \"dimessi_guariti\",\n \"deceduti\",\n \"suscettibili\",\n \"nuovi_positivi\",\n ],\n ]\n\n return data_df\n\n def _prepare_data_provincial(self):\n regione = self.data_df[self.data_df[self.group_column] == self.area][\n \"Region\"\n ].values[0]\n\n pop = self.get_prov_pop()\n\n pcm_data = self.pcm_data.loc[\n (self.pcm_data[\"denominazione_regione\"] == regione),\n [\n \"data\",\n \"totale_positivi\",\n \"dimessi_guariti\",\n \"deceduti\",\n \"totale_casi\",\n \"nuovi_positivi\",\n ],\n ].reset_index(drop=True)\n\n data_df = self.data_df.loc[\n (self.data_df[self.group_column] == self.area),\n [\"Date\", \"New_cases\", \"Curr_pos_cases\", \"Tot_deaths\"],\n ].reset_index(drop=True)\n\n recov_rate = (pcm_data[\"dimessi_guariti\"] / pcm_data[\"totale_casi\"])[\n : data_df.shape[0]\n ]\n\n recov_rate = self.fix_arr(recov_rate)\n\n recov = recov_rate * data_df[\"Curr_pos_cases\"].values\n data_df[\"dimessi_guariti\"] = recov\n\n infected = (\n data_df[\"Curr_pos_cases\"].values\n - data_df[\"Tot_deaths\"].values\n - data_df[\"dimessi_guariti\"]\n )\n\n data_df[\"totale_positivi\"] = infected\n\n data_df[\"suscettibili\"] = pop - data_df[\"Curr_pos_cases\"]\n\n query = (\n (\n pd.Timestamp(self.data_filter) + pd.DateOffset(self.days_to_predict)\n ).strftime(\"%Y%m%d\")\n + \" > \"\n + self.data_column\n )\n\n real_df = data_df.query(query)\n\n real_df.rename(\n columns={\n \"New_cases\": \"nuovi_positivi\",\n \"Curr_pos_cases\": \"totale_casi\",\n \"Tot_deaths\": \"deceduti\",\n \"Date\": \"data\",\n },\n inplace=True,\n )\n\n real_df = real_df.loc[\n :,\n [\n \"data\",\n \"totale_positivi\",\n \"dimessi_guariti\",\n \"deceduti\",\n \"suscettibili\",\n \"nuovi_positivi\",\n ],\n ]\n\n self._realdf = real_df\n\n data_df = data_df.query(self.data_filter + \" > \" + self.data_column)\n\n data_df.rename(\n columns={\n \"New_cases\": \"nuovi_positivi\",\n \"Curr_pos_cases\": \"totale_casi\",\n \"Tot_deaths\": \"deceduti\",\n \"Date\": \"data\",\n },\n inplace=True,\n )\n\n data_df = data_df.astype(\n {\n \"totale_positivi\": \"int32\",\n \"dimessi_guariti\": \"int32\",\n \"deceduti\": \"int32\",\n \"suscettibili\": \"int32\",\n \"nuovi_positivi\": \"int32\",\n }\n )\n\n data_df = data_df.loc[\n :,\n [\n \"data\",\n \"totale_positivi\",\n \"dimessi_guariti\",\n \"deceduti\",\n \"suscettibili\",\n \"nuovi_positivi\",\n ],\n ]\n\n return data_df\n\n def fit(self):\n if self.is_regional:\n self.pop = self.get_region_pop(\n region=self.area, pop_df=self.pop_prov_df, prov_df=self.prov_list_df\n )\n\n data_df = self._prepare_data_regional()\n else:\n self.pop = self.get_prov_pop()\n data_df = self._prepare_data_provincial()\n\n n = data_df.shape[0]\n\n gamma = (\n np.diff(data_df[\"dimessi_guariti\"].values)\n / data_df.iloc[: n - 1][\"totale_positivi\"].values\n )\n\n alpha = (\n np.diff(data_df[\"deceduti\"].values)\n / data_df.iloc[: n - 1][\"totale_positivi\"].values\n )\n\n beta = (\n (self.pop / data_df.iloc[: n - 1][\"suscettibili\"].values)\n * (\n np.diff(data_df[\"totale_positivi\"].values)\n + np.diff(data_df[\"dimessi_guariti\"].values)\n + np.diff(data_df[\"deceduti\"].values)\n )\n / data_df.iloc[: n - 1][\"totale_positivi\"].values\n )\n R0 = beta / (gamma + alpha)\n\n gamma = self.fix_arr(gamma)\n alpha = self.fix_arr(alpha)\n beta = self.fix_arr(beta)\n R0 = self.fix_arr(R0)\n\n reg_beta = LinearRegression().fit(*self.lag_data(beta, self.lag, True))\n reg_gamma = LinearRegression().fit(*self.lag_data(gamma, self.lag, True))\n reg_alpha = LinearRegression().fit(*self.lag_data(alpha, self.lag, True))\n\n S = np.zeros(self.days_to_predict + 2)\n I = np.zeros(self.days_to_predict + 2)\n R = np.zeros(self.days_to_predict + 2)\n D = np.zeros(self.days_to_predict + 2)\n S[0] = data_df.iloc[-1][\"suscettibili\"]\n I[0] = data_df.iloc[-1][\"totale_positivi\"]\n R[0] = data_df.iloc[-1][\"dimessi_guariti\"]\n D[0] = data_df.iloc[-1][\"deceduti\"]\n\n for i in range(self.days_to_predict + 1):\n _beta = self.fix_arr(\n reg_beta.predict(self.lag_data(beta, self.lag).reshape(1, -1))\n )\n _gamma = self.fix_arr(\n reg_gamma.predict(self.lag_data(gamma, self.lag).reshape(1, -1))\n )\n _alpha = self.fix_arr(\n reg_alpha.predict(self.lag_data(alpha, self.lag).reshape(1, -1))\n )\n\n beta = np.append(beta, _beta, axis=0)\n gamma = np.append(gamma, _gamma, axis=0)\n alpha = np.append(alpha, _alpha, axis=0)\n\n dIdt = np.round((1 + _beta * (S[i] / self.pop) - _gamma - _alpha) * I[i])\n dRdt = np.round(R[i] + _gamma * I[i])\n dDdt = np.round(D[i] + _alpha * I[i])\n dSdt = self.pop - dIdt[0] - dRdt[0] - dDdt[0]\n\n S[i + 1] = dSdt\n I[i + 1] = dIdt\n R[i + 1] = dRdt\n D[i + 1] = dDdt\n\n S = S[1:]\n I = I[1:]\n R = R[1:]\n D = D[1:]\n\n dates = pd.date_range(\n start=(data_df.iloc[-1][\"data\"] + pd.DateOffset(1)).strftime(\"%Y-%m-%d\"),\n periods=self.days_to_predict + 1,\n )\n\n tmp_df = pd.DataFrame(\n np.column_stack([np.zeros(self.days_to_predict + 1), I, R, D, S]),\n columns=[\n \"data\",\n \"totale_positivi\",\n \"dimessi_guariti\",\n \"deceduti\",\n \"suscettibili\",\n ],\n )\n\n tmp_df[\"data\"] = dates\n\n data_df = pd.concat([data_df, tmp_df], ignore_index=True)\n\n data_df[\"nuovi_positivi\"] = [0] + list(\n np.diff(data_df[\"totale_positivi\"].values)\n + np.diff(data_df[\"dimessi_guariti\"].values)\n + np.diff(data_df[\"deceduti\"].values)\n )\n\n data_df[\"nuovi_positivi\"] = data_df[\"nuovi_positivi\"].apply(\n lambda x: 0 if x < 0 else x\n )\n\n beta = np.append(beta, np.zeros((1,)), axis=0)\n gamma = np.append(gamma, np.zeros((1,)), axis=0)\n alpha = np.append(alpha, np.zeros((1,)), axis=0)\n\n data_df[\"beta\"] = beta\n data_df[\"gamma\"] = gamma\n data_df[\"alpha\"] = alpha\n data_df[\"R0\"] = self.fix_arr(beta / (gamma + alpha))\n data_df = data_df[:-1]\n\n data_df = data_df.astype(\n {\n \"totale_positivi\": \"int32\",\n \"dimessi_guariti\": \"int32\",\n \"deceduti\": \"int32\",\n \"suscettibili\": \"int32\",\n \"nuovi_positivi\": \"int32\",\n }\n )\n\n self._fdf = data_df\n self._realdf = self._get_real_data()\n\n return data_df\n\n @property\n def fitted_df(self):\n return self._fdf\n\n @property\n def real_df(self):\n return self._realdf\n\n def _get_real_data(self):\n if not self.is_regional:\n return self._realdf\n\n real_df = self.data_df[self.data_df[self.group_column] == self.area][\n [\n \"data\",\n \"totale_positivi\",\n \"dimessi_guariti\",\n \"deceduti\",\n \"totale_casi\",\n \"nuovi_positivi\",\n ]\n ]\n\n query = (\n pd.Timestamp(self.data_filter) + pd.DateOffset(self.days_to_predict)\n ).strftime(\"%Y%m%d\") + \" > data\"\n\n real_df = real_df.query(query)\n real_df[\"suscettibili\"] = self.pop - real_df[\"totale_casi\"]\n real_df = real_df[\n [\n \"data\",\n \"totale_positivi\",\n \"dimessi_guariti\",\n \"deceduti\",\n \"suscettibili\",\n \"nuovi_positivi\",\n ]\n ]\n\n self._realdf = real_df\n\n return real_df\n\n def extract_ys(self, y_true, y_pred, compart):\n if y_true is None or y_pred is None:\n y_true = self.real_df[compart].values\n y_pred = self.fitted_df[compart].values\n\n return y_true, y_pred\n\n def mae(self, y_true=None, y_pred=None, compart=None):\n return mean_absolute_error(*self.extract_ys(y_true, y_pred, compart))\n\n def mse(self, y_true=None, y_pred=None, compart=None):\n return mean_squared_error(*self.extract_ys(y_true, y_pred, compart))\n\n def rmse(self, y_true=None, y_pred=None, compart=None):\n return mean_squared_error(\n *self.extract_ys(y_true, y_pred, compart), squared=False\n )\n",
"import pandas as pd\n\nif __name__ == \"__main__\":\n data = pd.read_csv(\"DCIS_POPRES1_27072020130534278.csv\")\n\n df = data.loc[\n (data.SEXISTAT1 == 9)\n & (data.STATCIV2 == 99)\n & (data.ITTER107.str.len() >= 5)\n & (data.ETA1 != \"TOTAL\")\n ][[\"Territorio\", \"ETA1\", \"Value\"]]\n\n df.ETA1 = df.ETA1.str.replace(\"Y\", \"\")\n df.ETA1 = df.ETA1.str.replace(\"_GE\", \"\")\n df.Territorio = df.Territorio.str.replace(\n \"Valle d'Aosta / Vallée d'Aoste\", \"Valle d'Aosta\"\n )\n df.Territorio = df.Territorio.str.replace(\"Bolzano / Bozen\", \"Bolzano\")\n df = df.astype({\"ETA1\": int})\n\n df_final = pd.DataFrame()\n\n # Three groups: 0-25, 25-65, 65+\n for provincia in df.Territorio.unique():\n df1 = df.loc[df[\"Territorio\"] == provincia]\n\n a = df1[df1[\"ETA1\"] <= 25].groupby([\"Territorio\"]).agg({\"Value\": sum})\n\n b = (\n df1[(df1[\"ETA1\"] > 25) & (df1[\"ETA1\"] <= 65)]\n .groupby([\"Territorio\"])\n .agg({\"Value\": sum})\n )\n\n c = df1[df1[\"ETA1\"] > 65].groupby([\"Territorio\"]).agg({\"Value\": sum})\n\n tmp = pd.concat([a, b, c])\n tmp.reset_index(level=0, inplace=True)\n tmp = tmp.append(\n pd.DataFrame([{\"Territorio\": provincia, \"Value\": tmp.Value.sum()}]),\n ignore_index=True,\n )\n tmp[\"Eta\"] = [\"0-25\", \"25-65\", \"65-100\", \"Total\"]\n tmp[\"Percentage\"] = tmp.Value.apply(lambda x: x / tmp.Value.values[-1])\n\n df_final = pd.concat([df_final, tmp])\n\n df_final.to_csv(\"pop_prov_age_3_groups_2020.csv\", index=False)\n"
] | [
[
"pandas.concat",
"pandas.DateOffset",
"pandas.Timestamp",
"numpy.nan_to_num",
"scipy.integrate.odeint",
"numpy.round",
"numpy.append",
"numpy.diff",
"sklearn.linear_model.LinearRegression",
"numpy.exp",
"numpy.zeros",
"numpy.isinf",
"numpy.empty"
],
[
"pandas.concat",
"pandas.read_csv",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
ParikhKadam/NeMo | [
"ee11f7c4666d410d91f9da33c61f4819ea625013",
"ee11f7c4666d410d91f9da33c61f4819ea625013",
"ee11f7c4666d410d91f9da33c61f4819ea625013",
"ee11f7c4666d410d91f9da33c61f4819ea625013",
"ee11f7c4666d410d91f9da33c61f4819ea625013",
"d2120a40bf23d3e38ff5677c2685c712f297e6b1"
] | [
"nemo/collections/asr/parts/numba_utils.py",
"nemo/collections/asr/audio_preprocessing.py",
"nemo/collections/nlp/nm/trainables/dialogue_state_tracking/trade_generator_nm.py",
"scripts/get_ljspeech_data.py",
"nemo/collections/tts/parts/talknet.py",
"nemo/collections/tts/parts/fastspeech.py"
] | [
"# Copyright 2020 NVIDIA. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport numpy as np\nfrom numba import jit\n\n\ndef phase_vocoder(D: np.ndarray, rate: float, phi_advance: np.ndarray, scale_buffer: np.ndarray):\n \"\"\"\n Optimized implementation of phase vocoder from Librosa.\n\n Reference implementation:\n - https://librosa.github.io/librosa/generated/librosa.core.phase_vocoder.html\n\n Args:\n D: Complex spectograms of shape [d, t, complex=2].\n rate: Speed rate, must be float greater than 0.\n phi_advance: Precomputed phase advance buffer array of length [n_fft + 1]\n scale_buffer: Precomputed numpy buffer array of length [n_fft + 1]\n\n Returns:\n Complex64 ndarray of shape [d, t / rate, complex=2]\n \"\"\"\n time_steps = np.arange(0, D.shape[1], rate, dtype=np.float)\n\n # Create an empty output array\n d_stretch = np.zeros((D.shape[0], len(time_steps)), D.dtype, order='F')\n\n # Phase accumulator; initialize to the first sample\n phase_acc = np.angle(D[:, 0])\n\n # Pad 0 columns to simplify boundary logic\n D = np.pad(D, [(0, 0), (0, 2)], mode='constant')\n\n d_stretch = _phase_vocoder_kernel(D, time_steps, phi_advance, d_stretch, phase_acc, scale_buffer)\n\n return d_stretch\n\n\n@jit(nopython=True, nogil=True)\ndef _phase_vocoder_kernel(D, time_steps, phi_advance, d_stretch, phase_acc, scale_buffer):\n \"\"\"\n Numba optimized kernel to compute the phase vocoder step.\n\n Args:\n D: Complex spectograms of shape [d, t, complex=2].\n rate: Speed rate, must be float greater than 0.\n time_steps: Numpy ndarray of linearly spaced time steps, shape = [t]\n phi_advance: Precomputed phase advance buffer array of length [n_fft + 1]\n d_stretch: Output complex matrix of shape [d, t / rate, complex=2]\n phase_acc: Phase accumulator initialized to first sample of shape [d, complex=2]\n scale_buffer: Precomputed numpy buffer array of length [n_fft + 1]\n\n Returns:\n Complex64 ndarray of shape [d, t / rate, complex=2]\n \"\"\"\n two_pi = 2.0 * np.pi\n\n for (t, step) in enumerate(time_steps):\n columns = D[:, int(step) : int(step + 2)]\n columns_0 = columns[:, 0]\n columns_1 = columns[:, 1]\n\n # Weighting for linear magnitude interpolation\n alpha = np.mod(step, 1.0)\n mag = (1.0 - alpha) * np.abs(columns_0) + alpha * np.abs(columns_1)\n\n # Store to output array\n d_stretch[:, t] = mag * np.exp(1.0j * phase_acc)\n\n # Compute phase advance\n dphase = np.angle(columns_1) - np.angle(columns_0) - phi_advance\n\n # Wrap to -pi:pi range\n scale = dphase / two_pi\n np.round(scale, 0, scale_buffer)\n\n dphase = dphase - two_pi * scale_buffer\n\n # Accumulate phase\n phase_acc += phi_advance + dphase\n\n return d_stretch\n",
"# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# =============================================================================\n\"\"\"\nThis file contains neural modules responsible for preprocessing audio data.\n\"\"\"\n__all__ = [\n 'AudioPreprocessing',\n 'AudioPreprocessor',\n 'AudioToMFCCPreprocessor',\n 'AudioToMelSpectrogramPreprocessor',\n 'AudioToSpectrogramPreprocessor',\n 'CropOrPadSpectrogramAugmentation',\n 'MultiplyBatch',\n 'SpectrogramAugmentation',\n 'TimeStretchAugmentation',\n]\n\nimport math\nfrom abc import abstractmethod\n\nimport numpy as np\nimport torch\nfrom packaging import version\n\nfrom .parts.features import FilterbankFeatures\nfrom .parts.spectr_augment import SpecAugment, SpecCutout\nfrom nemo.backends.pytorch import NonTrainableNM\nfrom nemo.core import Optimization\nfrom nemo.core.neural_types import *\nfrom nemo.utils import logging\nfrom nemo.utils.decorators import add_port_docs\n\ntry:\n import torchaudio\n import torchaudio.transforms\n import torchaudio.functional\n\n TORCHAUDIO_VERSION = version.parse(torchaudio.__version__)\n TORCHAUDIO_VERSION_MIN = version.parse('0.5')\n\n HAVE_TORCHAUDIO = True\nexcept ModuleNotFoundError:\n HAVE_TORCHAUDIO = False\n logging.warning('Could not import torchaudio. Some features might not work.')\n\ntry:\n from apex import amp\nexcept (AttributeError, ModuleNotFoundError) as e:\n logging.warning(\"Unable to import APEX. Mixed precision and distributed training will not work.\")\n\n\nclass AudioPreprocessor(NonTrainableNM):\n \"\"\"\n A base class for Neural Modules that performs audio preprocessing,\n transforming the wav files to features.\n \"\"\"\n\n def __init__(self, win_length, hop_length):\n super().__init__()\n\n self.win_length = win_length\n self.hop_length = hop_length\n\n self.disable_casts = self._opt_level == Optimization.mxprO1\n\n self.torch_windows = {\n 'hann': torch.hann_window,\n 'hamming': torch.hamming_window,\n 'blackman': torch.blackman_window,\n 'bartlett': torch.bartlett_window,\n 'ones': torch.ones,\n None: torch.ones,\n }\n\n @torch.no_grad()\n def forward(self, input_signal, length):\n if self.disable_casts:\n with amp.disable_casts():\n processed_signal = self.get_features(input_signal.to(torch.float), length)\n else:\n processed_signal = self.get_features(input_signal, length)\n\n processed_length = self.get_seq_len(length.float())\n\n return processed_signal, processed_length\n\n @abstractmethod\n def get_features(self, input_signal, length):\n # Called by forward(). Subclasses should implement this.\n pass\n\n def get_seq_len(self, length):\n # Called by forward()\n return torch.ceil(length / self.hop_length).to(dtype=torch.long)\n\n\nclass AudioToSpectrogramPreprocessor(AudioPreprocessor):\n \"\"\"Preprocessor that converts wavs to spectrograms.\n Uses torchaudio's Spectrogram class as a featurizer.\n\n Args:\n sample_rate (int): Sample rate of the input audio data.\n Defaults to 16000\n window_size (float): Size of window for fft in seconds\n Defaults to 0.02\n window_stride (float): Stride of window for fft in seconds\n Defaults to 0.01\n n_window_size (int): Size of window for fft in samples\n Defaults to None. Use one of window_size or n_window_size.\n n_window_stride (int): Stride of window for fft in samples\n Defaults to None. Use one of window_stride or n_window_stride.\n n_fft (int): Length of FT window. If None, it uses the smallest power\n of 2 that is larger than n_window_size.\n Defaults to None\n window (str): Windowing function for fft. can be one of ['hann',\n 'hamming', 'blackman', 'bartlett', 'none', 'null']\n Defaults to \"hann\"\n normalized (bool): Whether to normalize by magnitude after stft\n \"\"\"\n\n @property\n @add_port_docs()\n def input_ports(self):\n \"\"\"Returns definitions of module input ports.\n \"\"\"\n return {\n # \"input_signal\": NeuralType({0: AxisType(BatchTag), 1: AxisType(TimeTag)}),\n # \"length\": NeuralType({0: AxisType(BatchTag)}),\n \"input_signal\": NeuralType(('B', 'T'), AudioSignal(freq=self._sample_rate)),\n \"length\": NeuralType(tuple('B'), LengthsType()),\n }\n\n @property\n @add_port_docs()\n def output_ports(self):\n \"\"\"Returns definitions of module output ports.\n \"\"\"\n return {\n # \"processed_signal\": NeuralType(\n # {0: AxisType(BatchTag), 1: AxisType(SpectrogramSignalTag), 2: AxisType(ProcessedTimeTag),}\n # ),\n # \"processed_length\": NeuralType({0: AxisType(BatchTag)}),\n \"processed_signal\": NeuralType(('B', 'D', 'T'), SpectrogramType()),\n \"processed_length\": NeuralType(tuple('B'), LengthsType()),\n }\n\n def __init__(\n self,\n sample_rate=16000,\n window_size=0.02,\n window_stride=0.01,\n n_window_size=None,\n n_window_stride=None,\n n_fft=None,\n window=\"hann\",\n normalized=True,\n ):\n self._sample_rate = sample_rate\n if not HAVE_TORCHAUDIO:\n raise ModuleNotFoundError(\n \"torchaudio is not installed but is necessary for \"\n \"AudioToSpectrogramPreprocessor. We recommend you try \"\n \"building it from source for the PyTorch version you have.\"\n )\n if window_size and n_window_size:\n raise ValueError(f\"{self} received both window_size and \" f\"n_window_size. Only one should be specified.\")\n if window_stride and n_window_stride:\n raise ValueError(\n f\"{self} received both window_stride and \" f\"n_window_stride. Only one should be specified.\"\n )\n if window_size:\n n_window_size = int(window_size * self._sample_rate)\n if window_stride:\n n_window_stride = int(window_stride * self._sample_rate)\n\n super().__init__(n_window_size, n_window_stride)\n\n self.win_length = n_window_size\n self.hop_length = n_window_stride\n\n self.n_fft = n_fft or 2 ** math.ceil(math.log2(self.win_length))\n\n # Set window_fn. None defaults to torch.ones.\n window_fn = self.torch_windows.get(window, None)\n if window_fn is None:\n raise ValueError(\n f\"Window argument for AudioProcessor is invalid: {window}.\"\n f\"For no window function, use 'ones' or None.\"\n )\n\n # Create featurizer.\n # Calls torch.stft under the hood, and is hard-coded to use center=True\n self.featurizer = torchaudio.transforms.Spectrogram(\n n_fft=self.n_fft,\n win_length=self.win_length,\n hop_length=self.hop_length,\n window_fn=window_fn,\n normalized=normalized,\n )\n self.featurizer.to(self._device)\n\n def get_features(self, input_signal, length):\n return self.featurizer(input_signal)\n\n @property\n def sample_rate(self):\n return self._sample_rate\n\n\nclass AudioToMelSpectrogramPreprocessor(AudioPreprocessor):\n \"\"\"Featurizer that converts wavs to mel spectrograms.\n We don't use torchaudio's implementation here because the original\n implementation is not the same, so for the sake of backwards-compatibility\n this will use the old FilterbankFeatures for now.\n\n Args:\n sample_rate (int): Sample rate of the input audio data.\n Defaults to 16000\n window_size (float): Size of window for fft in seconds\n Defaults to 0.02\n window_stride (float): Stride of window for fft in seconds\n Defaults to 0.01\n n_window_size (int): Size of window for fft in samples\n Defaults to None. Use one of window_size or n_window_size.\n n_window_stride (int): Stride of window for fft in samples\n Defaults to None. Use one of window_stride or n_window_stride.\n window (str): Windowing function for fft. can be one of ['hann',\n 'hamming', 'blackman', 'bartlett']\n Defaults to \"hann\"\n normalize (str): Can be one of ['per_feature', 'all_features']; all\n other options disable feature normalization. 'all_features'\n normalizes the entire spectrogram to be mean 0 with std 1.\n 'pre_features' normalizes per channel / freq instead.\n Defaults to \"per_feature\"\n n_fft (int): Length of FT window. If None, it uses the smallest power\n of 2 that is larger than n_window_size.\n Defaults to None\n preemph (float): Amount of pre emphasis to add to audio. Can be\n disabled by passing None.\n Defaults to 0.97\n features (int): Number of mel spectrogram freq bins to output.\n Defaults to 64\n lowfreq (int): Lower bound on mel basis in Hz.\n Defaults to 0\n highfreq (int): Lower bound on mel basis in Hz.\n Defaults to None\n log (bool): Log features.\n Defaults to True\n log_zero_guard_type(str): Need to avoid taking the log of zero. There\n are two options: \"add\" or \"clamp\".\n Defaults to \"add\".\n log_zero_guard_value(float, or str): Add or clamp requires the number\n to add with or clamp to. log_zero_guard_value can either be a float\n or \"tiny\" or \"eps\". torch.finfo is used if \"tiny\" or \"eps\" is\n passed.\n Defaults to 2**-24.\n dither (float): Amount of white-noise dithering.\n Defaults to 1e-5\n pad_to (int): Ensures that the output size of the time dimension is\n a multiple of pad_to.\n Defaults to 16\n frame_splicing (int): Defaults to 1\n stft_conv (bool): If True, uses pytorch_stft and convolutions. If\n False, uses torch.stft.\n Defaults to False\n pad_value (float): The value that shorter mels are padded with.\n Defaults to 0\n mag_power (float): The power that the linear spectrogram is raised to\n prior to multiplication with mel basis.\n Defaults to 2 for a power spec\n \"\"\"\n\n @property\n @add_port_docs()\n def input_ports(self):\n \"\"\"Returns definitions of module input ports.\n \"\"\"\n return {\n # \"input_signal\": NeuralType({0: AxisType(BatchTag), 1: AxisType(TimeTag)}),\n # \"length\": NeuralType({0: AxisType(BatchTag)}),\n \"input_signal\": NeuralType(('B', 'T'), AudioSignal(freq=self._sample_rate)),\n \"length\": NeuralType(tuple('B'), LengthsType()),\n }\n\n @property\n @add_port_docs()\n def output_ports(self):\n \"\"\"Returns definitions of module output ports.\n\n processed_signal:\n\n 0: AxisType(BatchTag)\n\n 1: AxisType(MelSpectrogramSignalTag)\n\n 2: AxisType(ProcessedTimeTag)\n\n processed_length:\n\n 0: AxisType(BatchTag)\n\n \"\"\"\n return {\n # \"processed_signal\": NeuralType(\n # {0: AxisType(BatchTag), 1: AxisType(MelSpectrogramSignalTag), 2: AxisType(ProcessedTimeTag),}\n # ),\n # \"processed_length\": NeuralType({0: AxisType(BatchTag)}),\n \"processed_signal\": NeuralType(('B', 'D', 'T'), MelSpectrogramType()),\n \"processed_length\": NeuralType(tuple('B'), LengthsType()),\n }\n\n def __init__(\n self,\n sample_rate=16000,\n window_size=0.02,\n window_stride=0.01,\n n_window_size=None,\n n_window_stride=None,\n window=\"hann\",\n normalize=\"per_feature\",\n n_fft=None,\n preemph=0.97,\n features=64,\n lowfreq=0,\n highfreq=None,\n log=True,\n log_zero_guard_type=\"add\",\n log_zero_guard_value=2 ** -24,\n dither=1e-5,\n pad_to=16,\n frame_splicing=1,\n stft_conv=False,\n pad_value=0,\n mag_power=2.0,\n ):\n self._sample_rate = sample_rate\n if window_size and n_window_size:\n raise ValueError(f\"{self} received both window_size and \" f\"n_window_size. Only one should be specified.\")\n if window_stride and n_window_stride:\n raise ValueError(\n f\"{self} received both window_stride and \" f\"n_window_stride. Only one should be specified.\"\n )\n if window_size:\n n_window_size = int(window_size * self._sample_rate)\n if window_stride:\n n_window_stride = int(window_stride * self._sample_rate)\n\n super().__init__(n_window_size, n_window_stride)\n\n self.featurizer = FilterbankFeatures(\n sample_rate=self._sample_rate,\n n_window_size=n_window_size,\n n_window_stride=n_window_stride,\n window=window,\n normalize=normalize,\n n_fft=n_fft,\n preemph=preemph,\n nfilt=features,\n lowfreq=lowfreq,\n highfreq=highfreq,\n log=log,\n log_zero_guard_type=log_zero_guard_type,\n log_zero_guard_value=log_zero_guard_value,\n dither=dither,\n pad_to=pad_to,\n frame_splicing=frame_splicing,\n stft_conv=stft_conv,\n pad_value=pad_value,\n mag_power=mag_power,\n )\n self.featurizer.to(self._device)\n\n def get_features(self, input_signal, length):\n return self.featurizer(input_signal, length)\n\n def get_seq_len(self, seq_len):\n return self.featurizer.get_seq_len(seq_len)\n\n @property\n def filter_banks(self):\n return self.featurizer.filter_banks\n\n\nclass AudioToMFCCPreprocessor(AudioPreprocessor):\n \"\"\"Preprocessor that converts wavs to MFCCs.\n Uses torchaudio.transforms.MFCC.\n\n Args:\n sample_rate: The sample rate of the audio.\n Defaults to 16000.\n window_size: Size of window for fft in seconds. Used to calculate the\n win_length arg for mel spectrogram.\n Defaults to 0.02\n window_stride: Stride of window for fft in seconds. Used to caculate\n the hop_length arg for mel spect.\n Defaults to 0.01\n n_window_size: Size of window for fft in samples\n Defaults to None. Use one of window_size or n_window_size.\n n_window_stride: Stride of window for fft in samples\n Defaults to None. Use one of window_stride or n_window_stride.\n window: Windowing function for fft. can be one of ['hann',\n 'hamming', 'blackman', 'bartlett', 'none', 'null'].\n Defaults to 'hann'\n n_fft: Length of FT window. If None, it uses the smallest power of 2\n that is larger than n_window_size.\n Defaults to None\n lowfreq (int): Lower bound on mel basis in Hz.\n Defaults to 0\n highfreq (int): Lower bound on mel basis in Hz.\n Defaults to None\n n_mels: Number of mel filterbanks.\n Defaults to 64\n n_mfcc: Number of coefficients to retain\n Defaults to 64\n dct_type: Type of discrete cosine transform to use\n norm: Type of norm to use\n log: Whether to use log-mel spectrograms instead of db-scaled.\n Defaults to True.\n \"\"\"\n\n @property\n @add_port_docs()\n def input_ports(self):\n \"\"\"Returns definitions of module input ports.\n \"\"\"\n return {\n # \"input_signal\": NeuralType({0: AxisType(BatchTag), 1: AxisType(TimeTag)}),\n # \"length\": NeuralType({0: AxisType(BatchTag)}),\n \"input_signal\": NeuralType(('B', 'T'), AudioSignal(freq=self._sample_rate)),\n \"length\": NeuralType(tuple('B'), LengthsType()),\n }\n\n @property\n @add_port_docs()\n def output_ports(self):\n \"\"\"Returns definitions of module output ports.\n \"\"\"\n return {\n # \"processed_signal\": NeuralType(\n # {0: AxisType(BatchTag), 1: AxisType(MFCCSignalTag), 2: AxisType(ProcessedTimeTag),}\n # ),\n # \"processed_length\": NeuralType({0: AxisType(BatchTag)}),\n \"processed_signal\": NeuralType(('B', 'D', 'T'), MFCCSpectrogramType()),\n \"processed_length\": NeuralType(tuple('B'), LengthsType()),\n }\n\n def __init__(\n self,\n sample_rate=16000,\n window_size=0.02,\n window_stride=0.01,\n n_window_size=None,\n n_window_stride=None,\n window='hann',\n n_fft=None,\n lowfreq=0.0,\n highfreq=None,\n n_mels=64,\n n_mfcc=64,\n dct_type=2,\n norm='ortho',\n log=True,\n ):\n self._sample_rate = sample_rate\n if not HAVE_TORCHAUDIO:\n raise ModuleNotFoundError(\n \"torchaudio is not installed but is necessary for \"\n \"AudioToMFCCPreprocessor. We recommend you try \"\n \"building it from source for the PyTorch version you have.\"\n )\n if window_size and n_window_size:\n raise ValueError(f\"{self} received both window_size and \" f\"n_window_size. Only one should be specified.\")\n if window_stride and n_window_stride:\n raise ValueError(\n f\"{self} received both window_stride and \" f\"n_window_stride. Only one should be specified.\"\n )\n # Get win_length (n_window_size) and hop_length (n_window_stride)\n if window_size:\n n_window_size = int(window_size * self._sample_rate)\n if window_stride:\n n_window_stride = int(window_stride * self._sample_rate)\n\n super().__init__(n_window_size, n_window_stride)\n\n mel_kwargs = {}\n\n mel_kwargs['f_min'] = lowfreq\n mel_kwargs['f_max'] = highfreq\n mel_kwargs['n_mels'] = n_mels\n\n mel_kwargs['n_fft'] = n_fft or 2 ** math.ceil(math.log2(n_window_size))\n\n mel_kwargs['win_length'] = n_window_size\n mel_kwargs['hop_length'] = n_window_stride\n\n # Set window_fn. None defaults to torch.ones.\n window_fn = self.torch_windows.get(window, None)\n if window_fn is None:\n raise ValueError(\n f\"Window argument for AudioProcessor is invalid: {window}.\"\n f\"For no window function, use 'ones' or None.\"\n )\n mel_kwargs['window_fn'] = window_fn\n\n # Use torchaudio's implementation of MFCCs as featurizer\n self.featurizer = torchaudio.transforms.MFCC(\n sample_rate=self._sample_rate,\n n_mfcc=n_mfcc,\n dct_type=dct_type,\n norm=norm,\n log_mels=log,\n melkwargs=mel_kwargs,\n )\n self.featurizer.to(self._device)\n\n def get_features(self, input_signal, length):\n return self.featurizer(input_signal)\n\n\nclass SpectrogramAugmentation(NonTrainableNM):\n \"\"\"\n Performs time and freq cuts in one of two ways.\n\n SpecAugment zeroes out vertical and horizontal sections as described in\n SpecAugment (https://arxiv.org/abs/1904.08779). Arguments for use with\n SpecAugment are `freq_masks`, `time_masks`, `freq_width`, and `time_width`.\n\n SpecCutout zeroes out rectangulars as described in Cutout\n (https://arxiv.org/abs/1708.04552). Arguments for use with Cutout are\n `rect_masks`, `rect_freq`, and `rect_time`.\n\n Args:\n freq_masks (int): how many frequency segments should be cut.\n Defaults to 0.\n time_masks (int): how many time segments should be cut\n Defaults to 0.\n freq_width (int): maximum number of frequencies to be cut in one\n segment.\n Defaults to 10.\n time_width (int): maximum number of time steps to be cut in one\n segment\n Defaults to 10.\n rect_masks (int): how many rectangular masks should be cut\n Defaults to 0.\n rect_freq (int): maximum size of cut rectangles along the frequency\n dimension\n Defaults to 5.\n rect_time (int): maximum size of cut rectangles along the time\n dimension\n Defaults to 25.\n \"\"\"\n\n @property\n @add_port_docs()\n def input_ports(self):\n \"\"\"Returns definitions of module input ports.\n \"\"\"\n return {\n # \"input_spec\": NeuralType({0: AxisType(BatchTag), 1: AxisType(SpectrogramSignalTag), 2: AxisType(\n # TimeTag),})\n \"input_spec\": NeuralType(('B', 'D', 'T'), SpectrogramType())\n }\n\n @property\n @add_port_docs()\n def output_ports(self):\n \"\"\"Returns definitions of module output ports.\n \"\"\"\n return {\n # \"augmented_spec\": NeuralType(\n # {0: AxisType(BatchTag), 1: AxisType(SpectrogramSignalTag), 2: AxisType(ProcessedTimeTag),}\n # )\n \"augmented_spec\": NeuralType(('B', 'D', 'T'), SpectrogramType())\n }\n\n def __init__(\n self,\n freq_masks=0,\n time_masks=0,\n freq_width=10,\n time_width=10,\n rect_masks=0,\n rect_time=5,\n rect_freq=20,\n rng=None,\n ):\n super().__init__()\n\n if rect_masks > 0:\n self.spec_cutout = SpecCutout(rect_masks=rect_masks, rect_time=rect_time, rect_freq=rect_freq, rng=rng,)\n self.spec_cutout.to(self._device)\n else:\n self.spec_cutout = lambda x: x\n\n if freq_masks + time_masks > 0:\n self.spec_augment = SpecAugment(\n freq_masks=freq_masks, time_masks=time_masks, freq_width=freq_width, time_width=time_width, rng=rng,\n )\n self.spec_augment.to(self._device)\n else:\n self.spec_augment = lambda x: x\n\n def forward(self, input_spec):\n augmented_spec = self.spec_cutout(input_spec)\n augmented_spec = self.spec_augment(augmented_spec)\n return augmented_spec\n\n\nclass MultiplyBatch(NonTrainableNM):\n \"\"\"\n Augmentation that repeats each element in a batch.\n Other augmentations can be applied afterwards.\n\n Args:\n mult_batch (int): number of repeats\n \"\"\"\n\n @property\n @add_port_docs()\n def input_ports(self):\n \"\"\"Returns definitions of module input ports.\n \"\"\"\n return {\n # \"in_x\": NeuralType({0: AxisType(BatchTag), 1: AxisType(SpectrogramSignalTag), 2: AxisType(TimeTag),}),\n # \"in_x_len\": NeuralType({0: AxisType(BatchTag)}),\n # \"in_y\": NeuralType({0: AxisType(BatchTag), 1: AxisType(TimeTag)}),\n # \"in_y_len\": NeuralType({0: AxisType(BatchTag)}),\n \"in_x\": NeuralType(('B', 'D', 'T'), SpectrogramType()),\n \"in_x_len\": NeuralType(tuple('B'), LengthsType()),\n \"in_y\": NeuralType(('B', 'D', 'T'), SpectrogramType()),\n \"in_y_len\": NeuralType(tuple('B'), LengthsType()),\n }\n\n @property\n @add_port_docs()\n def output_ports(self):\n \"\"\"Returns definitions of module output ports.\n \"\"\"\n return {\n # \"out_x\": NeuralType({0: AxisType(BatchTag), 1: AxisType(SpectrogramSignalTag), 2: AxisType(TimeTag),}),\n # \"out_x_len\": NeuralType({0: AxisType(BatchTag)}),\n # \"out_y\": NeuralType({0: AxisType(BatchTag), 1: AxisType(TimeTag)}),\n # \"out_y_len\": NeuralType({0: AxisType(BatchTag)}),\n \"out_x\": NeuralType(('B', 'D', 'T'), SpectrogramType()),\n \"out_x_len\": NeuralType(tuple('B'), LengthsType()),\n \"out_y\": NeuralType(('B', 'D', 'T'), SpectrogramType()),\n \"out_y_len\": NeuralType(tuple('B'), LengthsType()),\n }\n\n def __init__(self, mult_batch=1):\n super().__init__()\n self.mult = mult_batch\n\n @torch.no_grad()\n def forward(self, in_x, in_x_len, in_y, in_y_len):\n out_x = in_x.repeat(self.mult, 1, 1)\n out_y = in_y.repeat(self.mult, 1)\n out_x_len = in_x_len.repeat(self.mult)\n out_y_len = in_y_len.repeat(self.mult)\n\n return out_x, out_x_len, out_y, out_y_len\n\n\nclass CropOrPadSpectrogramAugmentation(NonTrainableNM):\n \"\"\"\n Pad or Crop the incoming Spectrogram to a certain shape.\n\n Args:\n audio_length (int): the final number of timesteps that is required.\n The signal will be either padded or cropped temporally to this\n size.\n \"\"\"\n\n def __init__(self, audio_length, **kwargs):\n super(CropOrPadSpectrogramAugmentation, self).__init__()\n self.audio_length = audio_length\n\n @torch.no_grad()\n def forward(self, input_signal, length):\n image = input_signal\n num_images = image.shape[0]\n\n audio_length = self.audio_length\n image_len = image.shape[-1]\n\n # Crop long signal\n if image_len > audio_length: # randomly slice\n cutout_images = []\n offset = torch.randint(low=0, high=image_len - audio_length + 1, size=[num_images])\n\n # TODO: Look into advanced broadcasting to speed up section\n for idx, offset in enumerate(offset):\n cutout_images.append(image[idx : idx + 1, :, offset : offset + audio_length])\n\n image = torch.cat(cutout_images, dim=0)\n del cutout_images\n\n else: # symmetrically pad short signal with zeros\n pad_left = (audio_length - image_len) // 2\n pad_right = (audio_length - image_len) // 2\n\n if (audio_length - image_len) % 2 == 1:\n pad_right += 1\n\n image = torch.nn.functional.pad(image, [pad_left, pad_right], mode=\"constant\", value=0)\n\n # Replace dynamic length sequences with static number of timesteps\n length = (length * 0) + audio_length\n\n return image, length\n\n @property\n def input_ports(self):\n \"\"\"Returns definitions of module output ports.\n \"\"\"\n return {\n # \"input_signal\": NeuralType(\n # {0: AxisType(BatchTag), 1: AxisType(SpectrogramSignalTag), 2: AxisType(ProcessedTimeTag), }\n # ),\n # \"length\": NeuralType({0: AxisType(BatchTag)}),\n \"input_signal\": NeuralType(('B', 'D', 'T'), SpectrogramType()),\n \"length\": NeuralType(tuple('B'), LengthsType()),\n }\n\n @property\n def output_ports(self):\n \"\"\"Returns definitions of module output ports.\n \"\"\"\n return {\n # \"processed_signal\": NeuralType(\n # {0: AxisType(BatchTag), 1: AxisType(SpectrogramSignalTag), 2: AxisType(ProcessedTimeTag), }\n # ),\n # \"processed_length\": NeuralType({0: AxisType(BatchTag)}),\n \"processed_signal\": NeuralType(('B', 'D', 'T'), SpectrogramType()),\n \"processed_length\": NeuralType(tuple('B'), LengthsType()),\n }\n\n\nclass TimeStretchAugmentation(NonTrainableNM):\n def __init__(\n self,\n sample_rate: int,\n probability: float,\n min_speed_rate: float = 0.9,\n max_speed_rate: float = 1.1,\n num_rates: int = 5,\n n_fft: int = 512,\n ):\n \"\"\"\n Time-stretch a batch of audio series by a fixed rate while preserving pitch.\n\n Note that while the speed rate is sampled independently for every batch,\n all samples of that batch will be augmented by the same speed rate.\n\n Note:\n This is a simplified implementation, intended primarily for reference and pedagogical purposes.\n It makes no attempt to handle transients, and is likely to produce audible artifacts.\n\n Args:\n sample_rate: Sampling rate.\n probability: Float value declaring chance of the input being augmented.\n Must be a float value in the range [0, 1].\n min_speed_rate: Minimum sampling rate modifier.\n max_speed_rate: Maximum sampling rate modifier.\n num_rates: Number of discrete rates to allow. Can be a positive or negative\n integer.\n If a positive integer greater than 0 is provided, the range of\n speed rates will be discretized into `num_rates` values.\n If a negative integer or 0 is provided, the full range of speed rates\n will be sampled uniformly.\n Note: If a positive integer is provided and the resultant discretized\n range of rates contains the value '1.0', then those samples with rate=1.0,\n will not be augmented at all and simply skipped. This is to avoid unnecessary\n augmentation and increase computation time. Effective augmentation chance\n in such a case is = `prob * (num_rates - 1 / num_rates) * 100`% chance\n where `prob` is the global probability of a sample being augmented.\n n_fft: Number of fft filters to be computed.\n \"\"\"\n super(TimeStretchAugmentation, self).__init__()\n\n if probability > 1.0 or probability < 0.0:\n raise ValueError(\"`probability` must be between 0 and 1\")\n\n if not HAVE_TORCHAUDIO:\n raise ModuleNotFoundError(\n \"torchaudio is not installed but is necessary for \"\n \"TimeStretchAugmentation. We recommend you try \"\n \"installing it from conda for the PyTorch version you have.\"\n )\n\n # Check torchaudio version; inform user of potential issue\n if TORCHAUDIO_VERSION < TORCHAUDIO_VERSION_MIN:\n logging.error(\n \"Current installed version of `torchaudio` %s is less than the recommended minimum \"\n \"version of %s. Please note that this may cause deadlocks when using distributed \"\n \"data parallel training. Please follow the instructions at https://github.com/pytorch/audio \"\n \"to update torchaudio.\",\n str(TORCHAUDIO_VERSION),\n str(TORCHAUDIO_VERSION_MIN),\n )\n\n min_rate = min(min_speed_rate, max_speed_rate)\n if min_rate < 0.0:\n raise ValueError(\"Minimum sampling rate modifier must be > 0.\")\n\n self._sample_rate = sample_rate\n self.probability = float(probability)\n self.min_rate = float(min_speed_rate)\n self.max_rate = float(max_speed_rate)\n self.num_rates = num_rates\n if num_rates > 0:\n self._rates = np.linspace(min_speed_rate, max_speed_rate, num_rates)\n self._rng = np.random.RandomState()\n\n self._n_fft = n_fft\n self._hop_length = n_fft // 2\n self._stft_window = torch.hann_window(self._n_fft, periodic=True, device=self._device)\n self._phi_advance = torch.linspace(0, np.pi * self._hop_length, self._hop_length + 1, device=self._device)\n self._phi_advance = self._phi_advance.view(-1, 1)\n\n @torch.no_grad()\n def forward(self, input_signal, length):\n proba = self._rng.uniform(0.0, 1.0)\n\n if proba > self.probability:\n return input_signal, length\n\n # Select speed rate either from choice or random sample\n if self.num_rates < 0:\n speed_rate = self._rng.uniform(self.min_rate, self.max_rate)\n else:\n speed_rate = np.random.choice(self._rates)\n\n # Skip perturbation in case of identity speed rate\n if speed_rate == 1.0:\n return input_signal, length\n\n features = self._stft(input_signal, self._n_fft, self._hop_length)\n features = self._phase_vocoder(features, speed_rate)\n\n # Predict the length of y_stretch\n len_stretch = int(round(input_signal.shape[1] / speed_rate))\n\n audio = self._istft(features, len_stretch)\n\n length = (length * speed_rate).type(torch.long)\n\n return audio, length\n\n def _stft(self, data: torch.Tensor, n_fft: int, hop_length: int):\n win_length = n_fft\n window = self._stft_window\n\n stft = torch.stft(\n data,\n n_fft=n_fft,\n hop_length=hop_length,\n win_length=win_length,\n window=window,\n center=True,\n pad_mode='reflect',\n normalized=False,\n )\n return stft\n\n def _phase_vocoder(self, data: torch.Tensor, rate: float):\n data_stretch = torchaudio.functional.phase_vocoder(data, rate, self._phi_advance)\n return data_stretch\n\n def _istft(self, data: torch.Tensor, len_stretch: int):\n n_fft = 2 * (data.shape[1] - 1)\n hop_length = self._hop_length\n win_length = n_fft\n window = self._stft_window\n\n audio = torchaudio.functional.istft(\n data,\n n_fft,\n hop_length,\n win_length,\n window=window,\n center=True,\n pad_mode='reflect',\n normalized=False,\n length=len_stretch,\n )\n\n return audio\n\n @property\n @add_port_docs()\n def input_ports(self):\n \"\"\"Returns definitions of module input ports.\n \"\"\"\n return {\n \"input_signal\": NeuralType(('B', 'T'), AudioSignal(freq=self._sample_rate)),\n \"length\": NeuralType(tuple('B'), LengthsType()),\n }\n\n @property\n @add_port_docs()\n def output_ports(self):\n \"\"\"Returns definitions of module output ports.\n \"\"\"\n return {\n \"processed_signal\": NeuralType(('B', 'T'), AudioSignal(freq=self._sample_rate)),\n \"processed_length\": NeuralType(tuple('B'), LengthsType()),\n }\n\n\ndef AudioPreprocessing(*args, **kwargs):\n raise NotImplementedError(\n \"AudioPreprocessing has been deprecated and replaced by: \"\n \"AudioToMFCCPreprocessor, AudioToMelSpectrogramPreprocessor, and \"\n \"AudioToSpectrogramPreprocessor. For most ASR purposes \"\n \"AudioToMelSpectrogramPreprocessor does the same as the old \"\n \"AudioPreprocessing.\"\n )\n",
"# =============================================================================\n# Copyright 2020 NVIDIA. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# =============================================================================\n\n# =============================================================================\n# Copyright 2019 Salesforce Research.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation the\n# rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom\n# the Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.\n# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,\n# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR\n# THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n# =============================================================================\n\n\nimport random\n\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nfrom torch import nn as nn\n\nfrom nemo.backends.pytorch.nm import TrainableNM\nfrom nemo.core.neural_types import *\nfrom nemo.utils.decorators import add_port_docs\n\n__all__ = ['TRADEGenerator']\n\n\nclass TRADEGenerator(TrainableNM):\n \"\"\"\n The generator module for state tracking model TRADE\n Args:\n vocab (Vocab): an instance of Vocab containing the vocabularey\n embeddings (Tensor): word embedding matrix\n hid_size (int): hidden size of the GRU decoder\n dropout (float): dropout of the GRU\n slots (list): list of slots\n nb_gate (int): number of gates\n teacher_forcing (float): 0.5\n \"\"\"\n\n @property\n @add_port_docs()\n def input_ports(self):\n \"\"\"Returns definitions of module input ports.\n \"\"\"\n return {\n 'encoder_hidden': NeuralType(('B', 'T', 'D'), ChannelType()),\n 'encoder_outputs': NeuralType(('B', 'T', 'D'), ChannelType()),\n 'dialog_ids': NeuralType(('B', 'T'), elements_type=TokenIndex()),\n 'dialog_lens': NeuralType(tuple('B'), elements_type=Length()),\n 'targets': NeuralType(('B', 'D', 'T'), LabelsType(), optional=True),\n }\n\n @property\n @add_port_docs()\n def output_ports(self):\n \"\"\"Returns definitions of module output ports.\n\n point_outputs: outputs of the generator\n gate_outputs: outputs of gating heads\n \"\"\"\n return {\n 'point_outputs': NeuralType(('B', 'T', 'D', 'D'), LogitsType()),\n 'gate_outputs': NeuralType(('B', 'D', 'D'), LogitsType()),\n }\n\n def __init__(self, vocab, embeddings, hid_size, dropout, slots, nb_gate, teacher_forcing=0.5, max_res_len=10):\n super().__init__()\n self.vocab_size = len(vocab)\n self.vocab = vocab\n self.embedding = embeddings\n self.dropout = nn.Dropout(dropout)\n self.rnn = nn.GRU(hid_size, hid_size, dropout=dropout, batch_first=True)\n self.nb_gate = nb_gate\n self.hidden_size = hid_size\n self.w_ratio = nn.Linear(3 * hid_size, 1)\n self.w_gate = nn.Linear(hid_size, nb_gate)\n self.softmax = nn.Softmax(dim=1)\n self.sigmoid = nn.Sigmoid()\n self.slots = slots\n self.teacher_forcing = teacher_forcing\n # max_res_len is used in evaluation mode or when targets are not provided\n self.max_res_len = max_res_len\n\n self._slots_split_to_index()\n self.slot_emb = nn.Embedding(len(self.slot_w2i), hid_size)\n self.slot_emb.weight.data.normal_(0, 0.1)\n self.to(self._device)\n\n def _slots_split_to_index(self):\n split_slots = [slot.split('-') for slot in self.slots]\n domains = [split_slot[0] for split_slot in split_slots]\n slots = [split_slot[1] for split_slot in split_slots]\n split_slots = list({s: 0 for s in sum(split_slots, [])})\n self.slot_w2i = {split_slots[i]: i for i in range(len(split_slots))}\n self.domain_idx = torch.tensor([self.slot_w2i[domain] for domain in domains], device=self._device)\n self.subslot_idx = torch.tensor([self.slot_w2i[slot] for slot in slots], device=self._device)\n\n def forward(self, encoder_hidden, encoder_outputs, dialog_ids, dialog_lens, targets=None):\n if (not self.training) or (random.random() > self.teacher_forcing):\n use_teacher_forcing = False\n else:\n use_teacher_forcing = True\n\n batch_size = encoder_hidden.shape[0]\n\n if isinstance(targets, torch.Tensor):\n max_res_len = targets.shape[2]\n targets = targets.transpose(0, 1)\n else:\n max_res_len = self.max_res_len\n\n all_point_outputs = torch.zeros(len(self.slots), batch_size, max_res_len, self.vocab_size, device=self._device)\n all_gate_outputs = torch.zeros(len(self.slots), batch_size, self.nb_gate, device=self._device)\n\n domain_emb = self.slot_emb(self.domain_idx).to(self._device)\n subslot_emb = self.slot_emb(self.subslot_idx).to(self._device)\n slot_emb = domain_emb + subslot_emb\n slot_emb = slot_emb.unsqueeze(1)\n slot_emb = slot_emb.repeat(1, batch_size, 1)\n decoder_input = self.dropout(slot_emb).view(-1, self.hidden_size)\n hidden = encoder_hidden[:, 0:1, :].transpose(0, 1).repeat(len(self.slots), 1, 1)\n\n hidden = hidden.view(-1, self.hidden_size).unsqueeze(0)\n\n enc_len = dialog_lens.repeat(len(self.slots))\n\n maxlen = encoder_outputs.size(1)\n padding_mask_bool = ~(torch.arange(maxlen, device=self._device)[None, :] <= enc_len[:, None])\n padding_mask = torch.zeros_like(padding_mask_bool, dtype=encoder_outputs.dtype, device=self._device)\n padding_mask.masked_fill_(mask=padding_mask_bool, value=-np.inf)\n\n for wi in range(max_res_len):\n dec_state, hidden = self.rnn(decoder_input.unsqueeze(1), hidden)\n\n enc_out = encoder_outputs.repeat(len(self.slots), 1, 1)\n context_vec, logits, prob = TRADEGenerator.attend(enc_out, hidden.squeeze(0), padding_mask)\n\n if wi == 0:\n all_gate_outputs = torch.reshape(self.w_gate(context_vec), all_gate_outputs.size())\n\n p_vocab = TRADEGenerator.attend_vocab(self.embedding.weight, hidden.squeeze(0))\n p_gen_vec = torch.cat([dec_state.squeeze(1), context_vec, decoder_input], -1)\n vocab_pointer_switches = self.sigmoid(self.w_ratio(p_gen_vec))\n p_context_ptr = torch.zeros(p_vocab.size(), device=self._device)\n\n p_context_ptr.scatter_add_(1, dialog_ids.repeat(len(self.slots), 1), prob)\n\n final_p_vocab = (1 - vocab_pointer_switches).expand_as(\n p_context_ptr\n ) * p_context_ptr + vocab_pointer_switches.expand_as(p_context_ptr) * p_vocab\n pred_word = torch.argmax(final_p_vocab, dim=1)\n\n all_point_outputs[:, :, wi, :] = torch.reshape(\n final_p_vocab, (len(self.slots), batch_size, self.vocab_size)\n )\n\n if use_teacher_forcing and isinstance(targets, torch.Tensor):\n decoder_input = self.embedding(torch.flatten(targets[:, :, wi]))\n else:\n decoder_input = self.embedding(pred_word)\n\n decoder_input = decoder_input.to(self._device)\n all_point_outputs = all_point_outputs.transpose(0, 1).contiguous()\n all_gate_outputs = all_gate_outputs.transpose(0, 1).contiguous()\n return all_point_outputs, all_gate_outputs\n\n @staticmethod\n def attend(seq, cond, padding_mask):\n scores_ = cond.unsqueeze(1).expand_as(seq).mul(seq).sum(2)\n scores_ = scores_ + padding_mask\n scores = F.softmax(scores_, dim=1)\n context = scores.unsqueeze(2).expand_as(seq).mul(seq).sum(1)\n return context, scores_, scores\n\n @staticmethod\n def attend_vocab(seq, cond):\n scores_ = cond.matmul(seq.transpose(1, 0))\n scores = F.softmax(scores_, dim=1)\n return scores\n",
"# Copyright (c) 2019 NVIDIA Corporation\n#\n# USAGE: python get_ljspeech_data.py --data_root=<where to put data>\n\nimport argparse\nimport json\nimport logging\nimport os\nimport random\nimport tarfile\nimport urllib.request\n\nfrom scipy.io.wavfile import read\n\nURL = \"https://data.keithito.com/data/speech/LJSpeech-1.1.tar.bz2\"\n\n\ndef __maybe_download_file(destination: str, source: str):\n \"\"\"\n Downloads source to destination if it doesn't exist.\n If exists, skips download\n Args:\n destination: local filepath\n source: url of resource\n\n Returns:\n\n \"\"\"\n source = URL\n if not os.path.exists(destination):\n logging.info(f\"{destination} does not exist. Downloading ...\")\n urllib.request.urlretrieve(source, filename=destination + '.tmp')\n os.rename(destination + '.tmp', destination)\n logging.info(f\"Downloaded {destination}.\")\n else:\n logging.info(f\"Destination {destination} exists. Skipping.\")\n return destination\n\n\ndef __extract_all_files(filepath: str, data_root: str, data_dir: str):\n if not os.path.exists(data_dir):\n extract_file(filepath, data_root)\n audio_dir = os.path.join(data_dir, 'wav')\n for subfolder, _, filelist in os.walk(audio_dir):\n for ftar in filelist:\n extract_file(os.path.join(subfolder, ftar), subfolder)\n else:\n logging.info(f'Skipping extracting. Data already there {data_dir}')\n\n\ndef extract_file(filepath: str, data_dir: str):\n try:\n tar = tarfile.open(filepath)\n tar.extractall(data_dir)\n tar.close()\n except Exception:\n logging.info('Not extracting. Maybe already there?')\n\n\ndef __process_data(data_folder: str, dst_folder: str):\n \"\"\"\n To generate manifest\n Args:\n data_folder: source with wav files\n dst_folder: where manifest files will be stored\n Returns:\n\n \"\"\"\n\n if not os.path.exists(dst_folder):\n os.makedirs(dst_folder)\n\n metadata_csv_path = os.path.join(data_folder, \"metadata.csv\")\n wav_folder = os.path.join(data_folder, \"wavs\")\n entries = []\n\n with open(metadata_csv_path) as f:\n line = f.readline()\n while line:\n file, _, transcript = line.split(\"|\")\n wav_file = os.path.join(wav_folder, file + \".wav\")\n sr, y = read(wav_file)\n assert sr == 22050\n duration = len(y) / sr\n\n entry = {}\n entry['audio_filepath'] = os.path.abspath(wav_file)\n entry['duration'] = float(duration)\n entry['text'] = transcript\n entries.append(entry)\n line = f.readline()\n\n # Randomly split 64 samples from the entire dataset to create the\n # validation set\n random.shuffle(entries)\n training_set = entries[:-64]\n val_set = entries[-64:]\n with open(os.path.join(dst_folder, \"ljspeech_train.json\"), 'w') as fout:\n for m in training_set:\n fout.write(json.dumps(m) + '\\n')\n with open(os.path.join(dst_folder, \"ljspeech_eval.json\"), 'w') as fout:\n for m in val_set:\n fout.write(json.dumps(m) + '\\n')\n\n\ndef main():\n parser = argparse.ArgumentParser(description='LJSpeech Data download')\n parser.add_argument(\"--data_root\", required=True, default=None, type=str)\n args = parser.parse_args()\n\n data_root = args.data_root\n data_set = \"LJSpeech-1.1\"\n data_folder = os.path.join(data_root, data_set)\n\n logging.info(f\"Working on: {data_set}\")\n\n # Download and extract\n if not os.path.exists(data_folder):\n file_path = os.path.join(data_root, data_set + \".tar.bz2\")\n logging.info(f\"Getting {data_set}\")\n __maybe_download_file(file_path, data_set)\n logging.info(f\"Extracting {data_set}\")\n __extract_all_files(file_path, data_root, data_folder)\n\n logging.info(f\"Processing {data_set}\")\n __process_data(data_folder, data_folder)\n logging.info('Done!')\n\n\nif __name__ == \"__main__\":\n main()\n",
"# Copyright 2020 NVIDIA. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Inspired by: https://github.com/r9y9/wavenet_vocoder\n# Copyright (c) 2017: Ryuichi Yamamoto.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the Software without restriction, including without limitation the\n# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to\n# permit persons to whom the Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all copies or substantial portions of\n# the Software.\n\nimport numpy as np\nimport torch\nfrom torch.nn import functional as F\n\n\ndef dmld_loss(y_pred, y_true, num_classes):\n \"\"\"Discretized mixture of logistic distributions loss\n\n https://github.com/r9y9/wavenet_vocoder/blob/master/wavenet_vocoder/mixture.py\n https://arxiv.org/pdf/1701.05517.pdf\n\n Args:\n y_pred (Tensor): Predicted output (B x T x C)\n y_true (Tensor): Target (B x T).\n num_classes (int): Number of classes\n\n Returns\n Tensor: loss\n\n \"\"\"\n\n def log_sum_exp(x):\n \"\"\" numerically stable log_sum_exp implementation that prevents overflow \"\"\"\n axis = len(x.size()) - 1\n m, _ = torch.max(x, dim=axis)\n m2, _ = torch.max(x, dim=axis, keepdim=True)\n return m + torch.log(torch.sum(torch.exp(x - m2), dim=axis))\n\n z_shape = y_pred.size(-1)\n assert z_shape % 3 == 0\n nr_mix = z_shape // 3\n\n # unpack parameters. (B, T, num_mixtures) x 3\n logit_probs = y_pred[:, :, :nr_mix]\n means = y_pred[:, :, nr_mix : 2 * nr_mix]\n log_scales = torch.clamp(y_pred[:, :, 2 * nr_mix : 3 * nr_mix], min=-7.0)\n\n # B x T -> B x T x num_mixtures\n y_true = y_true.unsqueeze(-1).expand_as(means)\n\n centered_y = y_true - means\n inv_stdv = torch.exp(-log_scales)\n plus_in = inv_stdv * (centered_y + 1.0 / (num_classes - 1))\n cdf_plus = torch.sigmoid(plus_in)\n min_in = inv_stdv * (centered_y - 1.0 / (num_classes - 1))\n cdf_min = torch.sigmoid(min_in)\n\n # log probability for edge case of 0 (before scaling)\n # equivalent: torch.log(torch.sigmoid(plus_in))\n log_cdf_plus = plus_in - F.softplus(plus_in)\n\n # log probability for edge case of 255 (before scaling)\n # equivalent: (1 - torch.sigmoid(min_in)).log()\n log_one_minus_cdf_min = -F.softplus(min_in)\n\n # probability for all other cases\n cdf_delta = cdf_plus - cdf_min\n\n mid_in = inv_stdv * centered_y\n # log probability in the center of the bin, to be used in extreme cases\n # (not actually used in our code)\n log_pdf_mid = mid_in - log_scales - 2.0 * F.softplus(mid_in)\n\n inner_inner_cond = (cdf_delta > 1e-5).float()\n # noinspection PyTypeChecker\n inner_inner_out = inner_inner_cond * torch.log(torch.clamp(cdf_delta, min=1e-12)) + (1.0 - inner_inner_cond) * (\n log_pdf_mid - np.log((num_classes - 1) / 2)\n )\n inner_cond = (y_true > 0.999).float()\n inner_out = inner_cond * log_one_minus_cdf_min + (1.0 - inner_cond) * inner_inner_out\n cond = (y_true < -0.999).float()\n log_probs = cond * log_cdf_plus + (1.0 - cond) * inner_out\n\n log_probs = log_probs + F.log_softmax(logit_probs, -1)\n\n return -log_sum_exp(log_probs)\n\n\ndef dmld_sample(y):\n \"\"\"Sample from discretized mixture of logistic distributions.\n\n Args:\n y (Tensor): B x T x C\n\n Returns:\n Tensor: sample in range of [-1.0, 1.0].\n\n \"\"\"\n\n z_shape = y.size(-1)\n assert z_shape % 3 == 0\n nr_mix = z_shape // 3\n\n # B x T x C\n logit_probs = y[:, :, :nr_mix]\n\n # sample mixture indicator from softmax\n temp = logit_probs.data.new(logit_probs.size()).uniform_(1e-5, 1.0 - 1e-5)\n temp = logit_probs.data - torch.log(-torch.log(temp))\n _, argmax = temp.max(dim=-1)\n\n # (B, T) -> (B, T, nr_mix)\n one_hot = torch.zeros(argmax.size() + (nr_mix,), dtype=torch.float, device=argmax.device)\n one_hot.scatter_(len(argmax.size()), argmax.unsqueeze(-1), 1.0)\n\n # select logistic parameters\n means = torch.sum(y[:, :, nr_mix : 2 * nr_mix] * one_hot, dim=-1)\n log_scales = torch.sum(y[:, :, 2 * nr_mix : 3 * nr_mix] * one_hot, dim=-1)\n log_scales = torch.clamp(log_scales, min=-7.0)\n # sample from logistic & clip to interval\n # we don't actually round to the nearest 8bit value when sampling\n u = means.data.new(means.size()).uniform_(1e-5, 1.0 - 1e-5)\n x = means + torch.exp(log_scales) * (torch.log(u) - torch.log(1.0 - u))\n\n x = torch.clamp(torch.clamp(x, min=-1.0), max=1.0)\n\n return x\n",
"# Copyright 2020 NVIDIA. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport collections\nimport os\n\nimport numpy as np\nimport torch\nfrom torch import nn\n\n\nclass FastSpeechDataset:\n def __init__(self, audio_dataset, durs_dir):\n self._audio_dataset = audio_dataset\n self._durs_dir = durs_dir\n\n def __getitem__(self, index):\n audio, audio_len, text, text_len = self._audio_dataset[index]\n dur_true = torch.tensor(np.load(os.path.join(self._durs_dir, f'{index}.npy'))).long()\n return dict(audio=audio, audio_len=audio_len, text=text, text_len=text_len, dur_true=dur_true)\n\n def __len__(self):\n return len(self._audio_dataset)\n\n\nclass LengthRegulator(nn.Module):\n \"\"\"Length Regulator.\"\"\"\n\n def __init__(self, encoder_output_size, duration_predictor_filter_size, duration_predictor_kernel_size, dropout):\n super(LengthRegulator, self).__init__()\n\n self.duration_predictor = DurationPredictor(\n input_size=encoder_output_size,\n filter_size=duration_predictor_filter_size,\n kernel=duration_predictor_kernel_size,\n conv_output_size=duration_predictor_filter_size,\n dropout=dropout,\n )\n\n def forward(self, encoder_output, encoder_output_mask, target=None, alpha=1.0, mel_max_length=None):\n duration_predictor_output = self.duration_predictor(encoder_output, encoder_output_mask)\n\n if self.training:\n output, dec_pos = self.get_output(encoder_output, target, alpha, mel_max_length)\n else:\n duration_predictor_output = torch.clamp_min(torch.exp(duration_predictor_output) - 1, 0)\n\n output, dec_pos = self.get_output(encoder_output, duration_predictor_output, alpha)\n\n return output, dec_pos, duration_predictor_output\n\n @staticmethod\n def get_output(encoder_output, duration_predictor_output, alpha, mel_max_length=None):\n output = list()\n dec_pos = list()\n\n for i in range(encoder_output.size(0)):\n repeats = duration_predictor_output[i].float() * alpha\n repeats = torch.round(repeats).long()\n output.append(torch.repeat_interleave(encoder_output[i], repeats, dim=0))\n dec_pos.append(torch.from_numpy(np.indices((output[i].shape[0],))[0] + 1))\n\n output = torch.nn.utils.rnn.pad_sequence(output, batch_first=True)\n dec_pos = torch.nn.utils.rnn.pad_sequence(dec_pos, batch_first=True)\n\n dec_pos = dec_pos.to(output.device, non_blocking=True)\n\n if mel_max_length:\n output = output[:, :mel_max_length]\n dec_pos = dec_pos[:, :mel_max_length]\n\n return output, dec_pos\n\n\nclass ConvTranspose(nn.Module):\n \"\"\"Convolution Module with transposes of last two dimensions.\"\"\"\n\n def __init__(\n self, in_channels, out_channels, kernel_size=1, stride=1, padding=0, dilation=1, bias=True, w_init='relu'\n ):\n super(ConvTranspose, self).__init__()\n\n self.conv = nn.Conv1d(\n in_channels,\n out_channels,\n kernel_size=kernel_size,\n stride=stride,\n padding=padding,\n dilation=dilation,\n bias=bias,\n )\n\n nn.init.xavier_uniform_(self.conv.weight, gain=nn.init.calculate_gain(w_init))\n\n def forward(self, x):\n x = x.contiguous().transpose(1, 2)\n x = self.conv(x)\n x = x.contiguous().transpose(1, 2)\n\n return x\n\n\nclass DurationPredictor(nn.Module):\n \"\"\"Duration Predictor.\"\"\"\n\n def __init__(self, input_size, filter_size, kernel, conv_output_size, dropout):\n super(DurationPredictor, self).__init__()\n\n self.input_size = input_size\n self.filter_size = filter_size\n self.kernel = kernel\n self.conv_output_size = conv_output_size\n self.dropout = dropout\n\n self.conv_layer = nn.Sequential(\n collections.OrderedDict(\n [\n (\n \"conv1d_1\",\n ConvTranspose(self.input_size, self.filter_size, kernel_size=self.kernel, padding=1),\n ),\n (\"relu_1\", nn.ReLU()),\n (\"layer_norm_1\", nn.LayerNorm(self.filter_size)),\n (\"dropout_1\", nn.Dropout(self.dropout)),\n (\n \"conv1d_2\",\n ConvTranspose(self.filter_size, self.filter_size, kernel_size=self.kernel, padding=1),\n ),\n (\"relu_2\", nn.ReLU()),\n (\"layer_norm_2\", nn.LayerNorm(self.filter_size)),\n (\"dropout_2\", nn.Dropout(self.dropout)),\n ]\n )\n )\n\n self.linear_layer = nn.Linear(self.conv_output_size, 1, bias=True)\n\n def forward(self, encoder_output, encoder_output_mask):\n encoder_output = encoder_output * encoder_output_mask\n\n out = self.conv_layer(encoder_output)\n out = self.linear_layer(out)\n out = out * encoder_output_mask\n out = out.squeeze(-1)\n\n return out\n"
] | [
[
"numpy.pad",
"numpy.abs",
"numpy.arange",
"numpy.round",
"numpy.mod",
"numpy.angle",
"numpy.exp"
],
[
"torch.linspace",
"torch.ceil",
"torch.randint",
"numpy.linspace",
"torch.cat",
"numpy.random.choice",
"torch.no_grad",
"numpy.random.RandomState",
"torch.hann_window",
"torch.nn.functional.pad",
"torch.stft"
],
[
"torch.nn.Softmax",
"torch.nn.Dropout",
"torch.nn.functional.softmax",
"torch.nn.GRU",
"torch.zeros_like",
"torch.nn.Sigmoid",
"torch.tensor",
"torch.nn.Linear",
"torch.flatten",
"torch.arange",
"torch.argmax"
],
[
"scipy.io.wavfile.read"
],
[
"torch.sigmoid",
"numpy.log",
"torch.max",
"torch.nn.functional.log_softmax",
"torch.sum",
"torch.exp",
"torch.log",
"torch.clamp",
"torch.nn.functional.softplus"
],
[
"torch.nn.init.calculate_gain",
"torch.nn.Dropout",
"torch.round",
"torch.nn.utils.rnn.pad_sequence",
"numpy.indices",
"torch.nn.LayerNorm",
"torch.exp",
"torch.nn.Linear",
"torch.repeat_interleave",
"torch.nn.Conv1d",
"torch.nn.ReLU"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
guillermo-carrasco/bcbio-nextgen-vm | [
"74c53e72ea722c01b8c12b7dcf1703f79e377612"
] | [
"bcbiovm/docker/devel.py"
] | [
"\"\"\"Utilities to help with developing using bcbio inside of docker.\n\"\"\"\nimport copy\nimport datetime\nimport glob\nimport math\nimport os\nimport shutil\nimport subprocess\nimport sys\n\nimport boto\nimport numpy\nimport yaml\n\nfrom bcbio import utils\nfrom bcbio.distributed import objectstore\nfrom bcbio.pipeline import genome\nfrom bcbio.provenance import do\n\nfrom bcbiovm.aws import common\nfrom bcbiovm.docker import defaults, install, manage, mounts\n\n# default information about docker container\nDOCKER = {\"port\": 8085,\n \"biodata_dir\": \"/usr/local/share/bcbio-nextgen\",\n \"work_dir\": \"/mnt/work\",\n \"image_url\": \"https://s3.amazonaws.com/bcbio_nextgen/bcbio-nextgen-docker-image.gz\"}\n\n# Available genomes and indexes\nSUPPORTED_GENOMES = [\"GRCh37\", \"hg19\", \"hg38\", \"hg38-noalt\", \"mm10\", \"mm9\",\n \"rn6\", \"rn5\", \"canFam3\", \"dm3\", \"galGal4\", \"phix\",\n \"pseudomonas_aeruginosa_ucbpp_pa14\", \"sacCer3\", \"TAIR10\",\n \"WBcel235\", \"xenTro3\", \"Zv9\", \"GRCz10\"]\nSUPPORTED_INDEXES = [\"bowtie\", \"bowtie2\", \"bwa\", \"novoalign\", \"rtg\", \"snap\",\n \"star\", \"ucsc\", \"seq\", \"hisat2\"]\n\ndef add_biodata_args(parser):\n \"\"\"Add standard arguments for preparing biological data to a command line arg parser.\n \"\"\"\n parser.add_argument(\"--genomes\", help=\"Genomes to download\",\n action=\"append\", default=[],\n choices=SUPPORTED_GENOMES)\n parser.add_argument(\"--aligners\", help=\"Aligner indexes to download\",\n action=\"append\", default=[],\n choices=SUPPORTED_INDEXES)\n return parser\n\ndef setup_cmd(subparsers):\n parser = subparsers.add_parser(\"devel\", help=\"Utilities to help with develping using bcbion inside of docker\")\n psub = parser.add_subparsers(title=\"[devel commands]\")\n\n iparser = psub.add_parser(\"setup_install\", help=\"Run a python setup.py install inside of the current directory\")\n iparser.add_argument(\"-i\", \"--image\", help=\"Image name to write updates to\",\n default=install.DEFAULT_IMAGE)\n iparser.set_defaults(func=_run_setup_install)\n\n sparser = psub.add_parser(\"system\", help=\"Update bcbio system file with a given core and memory/core target\")\n sparser.add_argument(\"cores\", help=\"Target cores to use for multi-core processes\")\n sparser.add_argument(\"memory\", help=\"Target memory per core, in Mb (1000 = 1Gb)\")\n sparser.set_defaults(func=_run_system_update)\n\n dparser = psub.add_parser(\"biodata\", help=\"Upload pre-prepared biological data to cache\")\n dparser.add_argument(\"--prepped\", help=\"Start with an existing set of cached data to output directory.\")\n dparser = add_biodata_args(dparser)\n dparser.set_defaults(func=_run_biodata_upload)\n\n dbparser = psub.add_parser(\"dockerbuild\", help=\"Build docker image and export to S3\")\n dbparser.add_argument(\"-b\", \"--bucket\", default=\"bcbio_nextgen\",\n help=\"S3 bucket to upload the gzipped docker image to\")\n dbparser.add_argument(\"-t\", \"--buildtype\", default=\"full\", choices=[\"full\", \"code\"],\n help=(\"Type of docker build to do. full is all code and third party tools. \"\n \"code is only bcbio-nextgen code.\"))\n dbparser.add_argument(\"-d\", \"--rundir\", default=\"/tmp/bcbio-docker-build\",\n help=\"Directory to run docker build in\")\n parser.add_argument(\"-q\", \"--quiet\", dest=\"verbose\", action=\"store_false\", default=True,\n help=\"Quiet output when running Ansible playbooks\")\n dbparser.set_defaults(func=_run_docker_build)\n\n# ## Install code to docker image\n\ndef _run_setup_install(args):\n \"\"\"Install python code from a bcbio-nextgen development tree inside of docker.\n \"\"\"\n bmounts = [\"-v\", \"%s:%s\" % (os.getcwd(), \"/tmp/bcbio-nextgen\")]\n cmd = [\"docker\", \"run\", \"-i\", \"-d\", \"--net=host\"] + bmounts + [args.image] + \\\n [\"bash\", \"-l\", \"-c\",\n (\"rm -rf /usr/local/share/bcbio-nextgen/anaconda/lib/python2.7/site-packages/bcbio && \"\n \"cd /tmp/bcbio-nextgen && \"\n \"/usr/local/share/bcbio-nextgen/anaconda/bin/python setup.py install\")]\n process = subprocess.Popen(cmd, stdout=subprocess.PIPE)\n cid = process.communicate()[0].strip()\n do.run([\"docker\", \"attach\", \"--no-stdin\", cid], \"Running in docker container: %s\" % cid,\n log_stdout=True)\n subprocess.check_call([\"docker\", \"commit\", cid, args.image])\n subprocess.check_call([\"docker\", \"rm\", cid], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n print(\"Updated bcbio-nextgen install in docker container: %s\" % args.image)\n\n# ## Update bcbio_system.yaml\n\ndef _run_system_update(args):\n \"\"\"Update bcbio_system.yaml file with a given target of cores and memory.\n \"\"\"\n mem_types = set([\"memory\", \"jvm_opts\"])\n args = defaults.update_check_args(args, \"Could not do upgrade of bcbio_system.yaml\")\n system_file = os.path.join(args.datadir, \"galaxy\", \"bcbio_system.yaml\")\n with open(system_file) as in_handle:\n config = yaml.safe_load(in_handle)\n out = copy.deepcopy(config)\n mems = []\n for attrs in config.get(\"resources\", {}).itervalues():\n for key, value in attrs.iteritems():\n if key in mem_types:\n mems.append((key, value))\n common_mem = _calculate_common_memory(mems)\n for prog, attrs in config.get(\"resources\", {}).iteritems():\n for key, value in attrs.iteritems():\n if key == \"cores\":\n out['resources'][prog][key] = int(args.cores)\n elif key in mem_types:\n out[\"resources\"][prog][key] = _update_memory(key, value, args.memory,\n common_mem)\n bak_file = system_file + \".bak%s\" % datetime.datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\")\n shutil.move(system_file, bak_file)\n with open(system_file, \"w\") as out_handle:\n yaml.safe_dump(out, out_handle, default_flow_style=False, allow_unicode=False)\n\ndef _get_cur_mem(key, val):\n if key == \"memory\":\n cur_mem = val\n elif key == \"jvm_opts\":\n cur_mem = val[1].replace(\"-Xmx\", \"\")\n cur_val = int(cur_mem[:-1])\n cur_mod = cur_mem[-1:]\n if cur_mod.lower() == \"g\":\n cur_val = cur_val * 1000\n else:\n assert cur_mod.lower() == \"m\"\n return cur_val, cur_mod\n\ndef _calculate_common_memory(kvs):\n \"\"\"Get the median memory specification, in megabytes.\n \"\"\"\n mems = []\n for key, val in kvs:\n cur_val, _ = _get_cur_mem(key, val)\n mems.append(cur_val)\n return numpy.median(mems)\n\ndef _update_memory(key, cur, target, common_mem):\n \"\"\"Update memory specifications to match target.\n\n Handles JVM options and both megabyte and gigabyte specifications.\n `target` is in megabytes. Does not adjust down memory that is more\n than 1.5x the current common memory setting, assuming these are pre-set for\n higher memory requirements.\n \"\"\"\n mod_swap = {\"G\": \"M\", \"g\": \"m\"}\n cur_mem, orig_mod = _get_cur_mem(key, cur)\n if cur_mem >= common_mem * 1.5:\n return cur\n else:\n new_val = \"%s%s\" % (target, mod_swap.get(orig_mod, orig_mod))\n if key == \"jvm_opts\":\n out = cur\n out[-1] = \"-Xmx%s\" % new_val\n else:\n out = new_val\n return out\n\n# ## Build docker images\n\ndef _run_docker_build(args):\n playbook = os.path.join(common.ANSIBLE_BASE, \"bcbio_vm_docker_local.yml\")\n inventory_path = os.path.join(common.ANSIBLE_BASE, \"standard_hosts.txt\")\n def _setup_args(args, cluster_config):\n return {\"bcbio_bucket\": args.bucket, \"docker_buildtype\": args.buildtype,\n \"bcbio_dir\": args.rundir}\n common.run_ansible_pb(inventory_path, playbook, args, _setup_args)\n\n# ## Upload pre-build biological data\n\ndef _run_biodata_upload(args):\n \"\"\"Manage preparation of biodata on a local machine, uploading to S3 in pieces.\n \"\"\"\n args = defaults.update_check_args(args, \"biodata not uploaded\")\n args = install.docker_image_arg(args)\n for gbuild in args.genomes:\n print(\"Preparing %s\" % gbuild)\n if args.prepped:\n for target in [\"samtools\"] + args.aligners:\n genome.download_prepped_genome(gbuild, {}, target, False, args.prepped)\n print(\"Downloaded prepped %s to %s. Edit and re-run without --prepped to upload\"\n % (gbuild, args.prepped))\n return\n cl = [\"upgrade\", \"--genomes\", gbuild]\n for a in args.aligners:\n cl += [\"--aligners\", a]\n dmounts = mounts.prepare_system(args.datadir, DOCKER[\"biodata_dir\"])\n manage.run_bcbio_cmd(args.image, dmounts, cl)\n print(\"Uploading %s\" % gbuild)\n gdir = _get_basedir(args.datadir, gbuild)\n basedir, genomedir = os.path.split(gdir)\n assert genomedir == gbuild\n with utils.chdir(basedir):\n all_dirs = sorted(os.listdir(gbuild))\n _upload_biodata(gbuild, \"seq\", all_dirs)\n for aligner in args.aligners:\n _upload_biodata(gbuild, genome.REMAP_NAMES.get(aligner, aligner), all_dirs)\n\ndef _upload_biodata(gbuild, target, all_dirs):\n \"\"\"Upload biodata for a specific genome build and target to S3.\n \"\"\"\n if target == \"seq\":\n want_dirs = set([\"rnaseq\", \"seq\", \"variation\", \"vep\", \"snpeff\"])\n target_dirs = [x for x in all_dirs if (x.startswith(\"rnaseq-\") or x in want_dirs)]\n else:\n target_dirs = [x for x in all_dirs if x == target]\n target_dirs = [os.path.join(gbuild, x) for x in target_dirs]\n fname = objectstore.BIODATA_INFO[\"s3\"].format(build=gbuild, target=target)\n remotef = objectstore.parse_remote(fname)\n conn = objectstore.connect(fname)\n bucket = conn.get_bucket(remotef.bucket)\n key = bucket.get_key(remotef.key)\n if not key:\n keyname = remotef.key\n bucketname = remotef.bucket\n target_dirs = \" \".join(target_dirs)\n cmd = (\"tar -cvpf - {target_dirs} | pigz -c | \"\n \"gof3r put --no-md5 -k {keyname} -b {bucketname} \"\n \"-m x-amz-storage-class:REDUCED_REDUNDANCY -m x-amz-acl:public-read\")\n do.run(cmd.format(**locals()), \"Upload pre-prepared genome data: %s %s\" % (gbuild, target))\n\ndef _get_basedir(datadir, target_genome):\n \"\"\"Retrieve base directory for uploading.\n \"\"\"\n genome_dir = os.path.join(datadir, \"genomes\")\n for dirname in glob.glob(os.path.join(genome_dir, \"*\", \"*\")):\n if dirname.endswith(\"/%s\" % target_genome):\n return dirname\n"
] | [
[
"numpy.median"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
windwiki/featuretools | [
"372f513c8915e602d465069184128b9654a12d97"
] | [
"featuretools/tests/primitive_tests/test_transform_features.py"
] | [
"import numpy as np\nimport pandas as pd\nimport pytest\n\nimport featuretools as ft\nfrom featuretools.computational_backends.feature_set import FeatureSet\nfrom featuretools.computational_backends.feature_set_calculator import (\n FeatureSetCalculator\n)\nfrom featuretools.primitives import (\n Absolute,\n AddNumeric,\n AddNumericScalar,\n Count,\n Day,\n Diff,\n DivideByFeature,\n DivideNumeric,\n DivideNumericScalar,\n Equal,\n EqualScalar,\n GreaterThanEqualToScalar,\n GreaterThanScalar,\n Haversine,\n Hour,\n IsIn,\n IsNull,\n Latitude,\n LessThanEqualToScalar,\n LessThanScalar,\n Longitude,\n Minute,\n Mode,\n Month,\n MultiplyNumeric,\n MultiplyNumericScalar,\n Not,\n NotEqual,\n NotEqualScalar,\n NumCharacters,\n NumWords,\n Percentile,\n ScalarSubtractNumericFeature,\n Second,\n SubtractNumeric,\n SubtractNumericScalar,\n Sum,\n TransformPrimitive,\n Year,\n get_transform_primitives\n)\nfrom featuretools.primitives.base import make_trans_primitive\nfrom featuretools.primitives.utils import (\n PrimitivesDeserializer,\n serialize_primitive\n)\nfrom featuretools.synthesis.deep_feature_synthesis import match\nfrom featuretools.tests.testing_utils import feature_with_name\nfrom featuretools.variable_types import Boolean, Datetime, Numeric, Variable\n\n\ndef test_init_and_name(es):\n log = es['log']\n rating = ft.Feature(es[\"products\"][\"rating\"], es[\"log\"])\n features = [ft.Feature(v) for v in log.variables] +\\\n [ft.Feature(rating, primitive=GreaterThanScalar(2.5))]\n # Add Timedelta feature\n # features.append(pd.Timestamp.now() - ft.Feature(log['datetime']))\n for transform_prim in get_transform_primitives().values():\n\n # skip automated testing if a few special cases\n if transform_prim in [NotEqual, Equal]:\n continue\n\n # use the input_types matching function from DFS\n input_types = transform_prim.input_types\n if type(input_types[0]) == list:\n matching_inputs = match(input_types[0], features)\n else:\n matching_inputs = match(input_types, features)\n if len(matching_inputs) == 0:\n raise Exception(\n \"Transform Primitive %s not tested\" % transform_prim.name)\n for s in matching_inputs:\n instance = ft.Feature(s, primitive=transform_prim)\n\n # try to get name and calculate\n instance.get_name()\n ft.calculate_feature_matrix([instance], entityset=es).head(5)\n\n\ndef test_relationship_path(es):\n f = ft.TransformFeature(es['log']['datetime'], Hour)\n\n assert len(f.relationship_path) == 0\n\n\ndef test_serialization(es):\n value = ft.IdentityFeature(es['log']['value'])\n primitive = ft.primitives.MultiplyNumericScalar(value=2)\n value_x2 = ft.TransformFeature(value, primitive)\n\n dictionary = {\n 'name': None,\n 'base_features': [value.unique_name()],\n 'primitive': serialize_primitive(primitive),\n }\n\n assert dictionary == value_x2.get_arguments()\n assert value_x2 == \\\n ft.TransformFeature.from_dictionary(dictionary, es,\n {value.unique_name(): value},\n PrimitivesDeserializer())\n\n\ndef test_make_trans_feat(es):\n f = ft.Feature(es['log']['datetime'], primitive=Hour)\n\n feature_set = FeatureSet([f])\n calculator = FeatureSetCalculator(es, feature_set=feature_set)\n df = calculator.run(np.array([0]))\n v = df[f.get_name()][0]\n assert v == 10\n\n\ndef test_diff(es):\n value = ft.Feature(es['log']['value'])\n customer_id_feat = ft.Feature(es['sessions']['customer_id'], entity=es['log'])\n diff1 = ft.Feature(value, groupby=es['log']['session_id'], primitive=Diff)\n diff2 = ft.Feature(value, groupby=customer_id_feat, primitive=Diff)\n\n feature_set = FeatureSet([diff1, diff2])\n calculator = FeatureSetCalculator(es, feature_set=feature_set)\n df = calculator.run(np.array(range(15)))\n\n val1 = df[diff1.get_name()].values.tolist()\n val2 = df[diff2.get_name()].values.tolist()\n correct_vals1 = [\n np.nan, 5, 5, 5, 5, np.nan, 1, 1, 1, np.nan, np.nan, 5, np.nan, 7, 7\n ]\n correct_vals2 = [np.nan, 5, 5, 5, 5, -20, 1, 1, 1, -3, np.nan, 5, -5, 7, 7]\n for i, v in enumerate(val1):\n v1 = val1[i]\n if np.isnan(v1):\n assert (np.isnan(correct_vals1[i]))\n else:\n assert v1 == correct_vals1[i]\n v2 = val2[i]\n if np.isnan(v2):\n assert (np.isnan(correct_vals2[i]))\n else:\n assert v2 == correct_vals2[i]\n\n\ndef test_diff_single_value(es):\n diff = ft.Feature(es['stores']['num_square_feet'], groupby=es['stores'][u'région_id'], primitive=Diff)\n feature_set = FeatureSet([diff])\n calculator = FeatureSetCalculator(es, feature_set=feature_set)\n df = calculator.run(np.array([5]))\n assert df.shape[0] == 1\n assert df[diff.get_name()].dropna().shape[0] == 0\n\n\ndef test_compare_of_identity(es):\n to_test = [(EqualScalar, [False, False, True, False]),\n (NotEqualScalar, [True, True, False, True]),\n (LessThanScalar, [True, True, False, False]),\n (LessThanEqualToScalar, [True, True, True, False]),\n (GreaterThanScalar, [False, False, False, True]),\n (GreaterThanEqualToScalar, [False, False, True, True])]\n\n features = []\n for test in to_test:\n features.append(ft.Feature(es['log']['value'], primitive=test[0](10)))\n\n df = ft.calculate_feature_matrix(entityset=es, features=features, instance_ids=[0, 1, 2, 3])\n\n for i, test in enumerate(to_test):\n v = df[features[i].get_name()].values.tolist()\n assert v == test[1]\n\n\ndef test_compare_of_direct(es):\n log_rating = ft.Feature(es['products']['rating'], entity=es['log'])\n to_test = [(EqualScalar, [False, False, False, False]),\n (NotEqualScalar, [True, True, True, True]),\n (LessThanScalar, [False, False, False, True]),\n (LessThanEqualToScalar, [False, False, False, True]),\n (GreaterThanScalar, [True, True, True, False]),\n (GreaterThanEqualToScalar, [True, True, True, False])]\n\n features = []\n for test in to_test:\n features.append(ft.Feature(log_rating, primitive=test[0](4.5)))\n\n df = ft.calculate_feature_matrix(entityset=es, features=features, instance_ids=[0, 1, 2, 3])\n\n for i, test in enumerate(to_test):\n v = df[features[i].get_name()].values.tolist()\n assert v == test[1]\n\n\ndef test_compare_of_transform(es):\n day = ft.Feature(es['log']['datetime'], primitive=Day)\n to_test = [(EqualScalar, [False, True]),\n (NotEqualScalar, [True, False]),\n (LessThanScalar, [True, False]),\n (LessThanEqualToScalar, [True, True]),\n (GreaterThanScalar, [False, False]),\n (GreaterThanEqualToScalar, [False, True])]\n\n features = []\n for test in to_test:\n features.append(ft.Feature(day, primitive=test[0](10)))\n\n df = ft.calculate_feature_matrix(entityset=es, features=features, instance_ids=[0, 14])\n\n for i, test in enumerate(to_test):\n v = df[features[i].get_name()].values.tolist()\n assert v == test[1]\n\n\ndef test_compare_of_agg(es):\n count_logs = ft.Feature(es['log']['id'], parent_entity=es['sessions'], primitive=Count)\n\n to_test = [(EqualScalar, [False, False, False, True]),\n (NotEqualScalar, [True, True, True, False]),\n (LessThanScalar, [False, False, True, False]),\n (LessThanEqualToScalar, [False, False, True, True]),\n (GreaterThanScalar, [True, True, False, False]),\n (GreaterThanEqualToScalar, [True, True, False, True])]\n\n features = []\n for test in to_test:\n features.append(ft.Feature(count_logs, primitive=test[0](2)))\n\n df = ft.calculate_feature_matrix(entityset=es, features=features, instance_ids=[0, 1, 2, 3])\n\n for i, test in enumerate(to_test):\n v = df[features[i].get_name()].values.tolist()\n assert v == test[1]\n\n\ndef test_compare_all_nans(es):\n nan_feat = ft.Feature(es['log']['product_id'], parent_entity=es['sessions'], primitive=Mode)\n compare = nan_feat == 'brown bag'\n # before all data\n time_last = pd.Timestamp('1/1/1993')\n\n df = ft.calculate_feature_matrix(entityset=es, features=[nan_feat, compare], instance_ids=[0, 1, 2], cutoff_time=time_last)\n assert df[nan_feat.get_name()].dropna().shape[0] == 0\n assert not df[compare.get_name()].any()\n\n\ndef test_arithmetic_of_val(es):\n to_test = [(AddNumericScalar, [2.0, 7.0, 12.0, 17.0]),\n (SubtractNumericScalar, [-2.0, 3.0, 8.0, 13.0]),\n (ScalarSubtractNumericFeature, [2.0, -3.0, -8.0, -13.0]),\n (MultiplyNumericScalar, [0, 10, 20, 30]),\n (DivideNumericScalar, [0, 2.5, 5, 7.5]),\n (DivideByFeature, [np.inf, 0.4, 0.2, 2 / 15.0])]\n\n features = []\n for test in to_test:\n features.append(ft.Feature(es['log']['value'], primitive=test[0](2)))\n\n features.append(ft.Feature(es['log']['value']) / 0)\n\n df = ft.calculate_feature_matrix(entityset=es, features=features, instance_ids=[0, 1, 2, 3])\n\n for f, test in zip(features, to_test):\n v = df[f.get_name()].values.tolist()\n assert v == test[1]\n\n test = [np.nan, np.inf, np.inf, np.inf]\n v = df[features[-1].get_name()].values.tolist()\n assert (np.isnan(v[0]))\n assert v[1:] == test[1:]\n\n\ndef test_arithmetic_two_vals_fails(es):\n error_text = \"Not a feature\"\n with pytest.raises(Exception, match=error_text):\n ft.Feature([2, 2], primitive=AddNumeric)\n\n\ndef test_arithmetic_of_identity(es):\n logs = es['log']\n\n to_test = [(AddNumeric, [0., 7., 14., 21.]),\n (SubtractNumeric, [0, 3, 6, 9]),\n (MultiplyNumeric, [0, 10, 40, 90]),\n (DivideNumeric, [np.nan, 2.5, 2.5, 2.5])]\n\n features = []\n for test in to_test:\n features.append(ft.Feature([logs['value'], logs['value_2']], primitive=test[0]))\n\n df = ft.calculate_feature_matrix(entityset=es, features=features, instance_ids=[0, 1, 2, 3])\n\n for i, test in enumerate(to_test[:-1]):\n v = df[features[i].get_name()].values.tolist()\n assert v == test[1]\n i, test = 3, to_test[-1]\n v = df[features[i].get_name()].values.tolist()\n assert (np.isnan(v[0]))\n assert v[1:] == test[1][1:]\n\n\ndef test_arithmetic_of_direct(es):\n rating = es['products']['rating']\n log_rating = ft.Feature(rating, entity=es['log'])\n customer_age = es['customers']['age']\n session_age = ft.Feature(customer_age, entity=es['sessions'])\n log_age = ft.Feature(session_age, entity=es['log'])\n\n to_test = [(AddNumeric, [38, 37, 37.5, 37.5]),\n (SubtractNumeric, [28, 29, 28.5, 28.5]),\n (MultiplyNumeric, [165, 132, 148.5, 148.5]),\n (DivideNumeric, [6.6, 8.25, 22. / 3, 22. / 3])]\n\n features = []\n for test in to_test:\n features.append(ft.Feature([log_age, log_rating], primitive=test[0]))\n\n df = ft.calculate_feature_matrix(entityset=es, features=features, instance_ids=[0, 3, 5, 7])\n for i, test in enumerate(to_test):\n v = df[features[i].get_name()].values.tolist()\n assert v == test[1]\n\n\ndef test_boolean_multiply():\n es = ft.EntitySet()\n df = pd.DataFrame({\"index\": [0, 1, 2],\n \"bool\": [True, False, True],\n \"numeric\": [2, 3, np.nan]})\n\n es.entity_from_dataframe(entity_id=\"test\",\n dataframe=df,\n index=\"index\")\n\n to_test = [\n ('numeric', 'numeric'),\n ('numeric', 'bool'),\n ('bool', 'numeric'),\n ('bool', 'bool')\n ]\n features = []\n for row in to_test:\n features.append(ft.Feature(es[\"test\"][row[0]]) * ft.Feature(es[\"test\"][row[1]]))\n\n fm = ft.calculate_feature_matrix(entityset=es, features=features)\n\n for row in to_test:\n col_name = '{} * {}'.format(row[0], row[1])\n if row[0] == 'bool' and row[1] == 'bool':\n assert fm[col_name].equals(df[row[0]] & df[row[1]])\n else:\n assert fm[col_name].equals(df[row[0]] * df[row[1]])\n\n\n# P TODO: rewrite this test\ndef test_arithmetic_of_transform(es):\n diff1 = ft.Feature([es['log']['value']], primitive=Diff)\n diff2 = ft.Feature([es['log']['value_2']], primitive=Diff)\n\n to_test = [(AddNumeric, [np.nan, 14., -7., 3.]),\n (SubtractNumeric, [np.nan, 6., -3., 1.]),\n (MultiplyNumeric, [np.nan, 40., 10., 2.]),\n (DivideNumeric, [np.nan, 2.5, 2.5, 2.])]\n\n features = []\n for test in to_test:\n features.append(ft.Feature([diff1, diff2], primitive=test[0]()))\n\n feature_set = FeatureSet(features)\n calculator = FeatureSetCalculator(es, feature_set=feature_set)\n df = calculator.run(np.array([0, 2, 11, 13]))\n for i, test in enumerate(to_test):\n v = df[features[i].get_name()].values.tolist()\n assert np.isnan(v.pop(0))\n assert np.isnan(test[1].pop(0))\n assert v == test[1]\n\n\ndef test_not_feature(es):\n not_feat = ft.Feature(es['customers']['loves_ice_cream'], primitive=Not)\n features = [not_feat]\n df = ft.calculate_feature_matrix(entityset=es, features=features, instance_ids=[0, 1])\n v = df[not_feat.get_name()].values\n assert not v[0]\n assert v[1]\n\n\ndef test_arithmetic_of_agg(es):\n customer_id_feat = es['customers']['id']\n store_id_feat = es['stores']['id']\n count_customer = ft.Feature(customer_id_feat, parent_entity=es[u'régions'], primitive=Count)\n count_stores = ft.Feature(store_id_feat, parent_entity=es[u'régions'], primitive=Count)\n to_test = [(AddNumeric, [6, 2]),\n (SubtractNumeric, [0, -2]),\n (MultiplyNumeric, [9, 0]),\n (DivideNumeric, [1, 0])]\n\n features = []\n for test in to_test:\n features.append(ft.Feature([count_customer, count_stores], primitive=test[0]()))\n\n ids = ['United States', 'Mexico']\n df = ft.calculate_feature_matrix(entityset=es, features=features,\n instance_ids=ids)\n df = df.loc[ids]\n\n for i, test in enumerate(to_test):\n v = df[features[i].get_name()].values.tolist()\n assert v == test[1]\n\n\n# TODO latlong is a string in entityset. Asserts in test_latlong fail\n# def latlong_unstringify(latlong):\n# lat = float(latlong.split(\", \")[0].replace(\"(\", \"\"))\n# lon = float(latlong.split(\", \")[1].replace(\")\", \"\"))\n# return (lat, lon)\n\n\ndef test_latlong(es):\n log_latlong_feat = es['log']['latlong']\n latitude = ft.Feature(log_latlong_feat, primitive=Latitude)\n longitude = ft.Feature(log_latlong_feat, primitive=Longitude)\n features = [latitude, longitude]\n df = ft.calculate_feature_matrix(entityset=es, features=features, instance_ids=range(15))\n latvalues = df[latitude.get_name()].values\n lonvalues = df[longitude.get_name()].values\n assert len(latvalues) == 15\n assert len(lonvalues) == 15\n real_lats = [0, 5, 10, 15, 20, 0, 1, 2, 3, 0, 0, 5, 0, 7, 14]\n real_lons = [0, 2, 4, 6, 8, 0, 1, 2, 3, 0, 0, 2, 0, 3, 6]\n for i, v, in enumerate(real_lats):\n assert v == latvalues[i]\n for i, v, in enumerate(real_lons):\n assert v == lonvalues[i]\n\n\ndef test_haversine(es):\n log_latlong_feat = es['log']['latlong']\n log_latlong_feat2 = es['log']['latlong2']\n haversine = ft.Feature([log_latlong_feat, log_latlong_feat2],\n primitive=Haversine)\n features = [haversine]\n\n df = ft.calculate_feature_matrix(entityset=es, features=features,\n instance_ids=range(15))\n values = df[haversine.get_name()].values\n real = [0, 525.318462, 1045.32190304, 1554.56176802, 2047.3294327, 0,\n 138.16578931, 276.20524822, 413.99185444, 0, 0, 525.318462, 0,\n 741.57941183, 1467.52760175]\n assert len(values) == 15\n assert np.allclose(values, real, atol=0.0001)\n\n haversine = ft.Feature([log_latlong_feat, log_latlong_feat2],\n primitive=Haversine(unit='kilometers'))\n features = [haversine]\n df = ft.calculate_feature_matrix(entityset=es, features=features,\n instance_ids=range(15))\n values = df[haversine.get_name()].values\n real_km = [0, 845.41812212, 1682.2825471, 2501.82467535, 3294.85736668,\n 0, 222.35628593, 444.50926278, 666.25531268, 0, 0,\n 845.41812212, 0, 1193.45638714, 2361.75676089]\n assert len(values) == 15\n assert np.allclose(values, real_km, atol=0.0001)\n error_text = \"Invalid unit inches provided. Must be one of\"\n with pytest.raises(ValueError, match=error_text):\n Haversine(unit='inches')\n\n\ndef test_text_primitives(es):\n words = ft.Feature(es['log']['comments'], primitive=NumWords)\n chars = ft.Feature(es['log']['comments'], primitive=NumCharacters)\n\n features = [words, chars]\n\n df = ft.calculate_feature_matrix(entityset=es, features=features, instance_ids=range(15))\n\n word_counts = [514, 3, 3, 644, 1268, 1269, 177, 172, 79,\n 240, 1239, 3, 3, 3, 3]\n char_counts = [3392, 10, 10, 4116, 7961, 7580, 992, 957,\n 437, 1325, 6322, 10, 10, 10, 10]\n word_values = df[words.get_name()].values\n char_values = df[chars.get_name()].values\n assert len(word_values) == 15\n for i, v in enumerate(word_values):\n assert v == word_counts[i]\n for i, v in enumerate(char_values):\n assert v == char_counts[i]\n\n\ndef test_isin_feat(es):\n isin = ft.Feature(es['log']['product_id'], primitive=IsIn(list_of_outputs=[\"toothpaste\", \"coke zero\"]))\n features = [isin]\n df = ft.calculate_feature_matrix(entityset=es, features=features, instance_ids=range(8))\n true = [True, True, True, False, False, True, True, True]\n v = df[isin.get_name()].values.tolist()\n assert true == v\n\n\ndef test_isin_feat_other_syntax(es):\n isin = ft.Feature(es['log']['product_id']).isin([\"toothpaste\", \"coke zero\"])\n features = [isin]\n df = ft.calculate_feature_matrix(entityset=es, features=features, instance_ids=range(8))\n true = [True, True, True, False, False, True, True, True]\n v = df[isin.get_name()].values.tolist()\n assert true == v\n\n\ndef test_isin_feat_other_syntax_int(es):\n isin = ft.Feature(es['log']['value']).isin([5, 10])\n features = [isin]\n df = ft.calculate_feature_matrix(entityset=es, features=features, instance_ids=range(8))\n true = [False, True, True, False, False, False, False, False]\n v = df[isin.get_name()].values.tolist()\n assert true == v\n\n\ndef test_isin_feat_custom(es):\n def pd_is_in(array, list_of_outputs=None):\n if list_of_outputs is None:\n list_of_outputs = []\n return pd.Series(array).isin(list_of_outputs)\n\n def isin_generate_name(self, base_feature_names):\n return u\"%s.isin(%s)\" % (base_feature_names[0],\n str(self.kwargs['list_of_outputs']))\n\n IsIn = make_trans_primitive(\n pd_is_in,\n [Variable],\n Boolean,\n name=\"is_in\",\n description=\"For each value of the base feature, checks whether it is \"\n \"in a list that is provided.\",\n cls_attributes={\"generate_name\": isin_generate_name})\n\n isin = ft.Feature(es['log']['product_id'], primitive=IsIn(list_of_outputs=[\"toothpaste\", \"coke zero\"]))\n features = [isin]\n df = ft.calculate_feature_matrix(entityset=es, features=features, instance_ids=range(8))\n true = [True, True, True, False, False, True, True, True]\n v = df[isin.get_name()].values.tolist()\n assert true == v\n\n isin = ft.Feature(es['log']['product_id']).isin([\"toothpaste\", \"coke zero\"])\n features = [isin]\n df = ft.calculate_feature_matrix(entityset=es, features=features, instance_ids=range(8))\n true = [True, True, True, False, False, True, True, True]\n v = df[isin.get_name()].values.tolist()\n assert true == v\n\n isin = ft.Feature(es['log']['value']).isin([5, 10])\n features = [isin]\n df = ft.calculate_feature_matrix(entityset=es, features=features, instance_ids=range(8))\n true = [False, True, True, False, False, False, False, False]\n v = df[isin.get_name()].values.tolist()\n assert true == v\n\n\ndef test_isnull_feat(es):\n value = ft.Feature(es['log']['value'])\n diff = ft.Feature(value, groupby=es['log']['session_id'], primitive=Diff)\n isnull = ft.Feature(diff, primitive=IsNull)\n features = [isnull]\n df = ft.calculate_feature_matrix(entityset=es, features=features, instance_ids=range(15))\n # correct_vals_diff = [\n # np.nan, 5, 5, 5, 5, np.nan, 1, 1, 1, np.nan, np.nan, 5, np.nan, 7, 7]\n correct_vals = [True, False, False, False, False, True, False, False,\n False, True, True, False, True, False, False]\n values = df[isnull.get_name()].values.tolist()\n assert correct_vals == values\n\n\ndef test_percentile(es):\n v = ft.Feature(es['log']['value'])\n p = ft.Feature(v, primitive=Percentile)\n feature_set = FeatureSet([p])\n calculator = FeatureSetCalculator(es, feature_set)\n df = calculator.run(np.array(range(10, 17)))\n true = es['log'].df[v.get_name()].rank(pct=True)\n true = true.loc[range(10, 17)]\n for t, a in zip(true.values, df[p.get_name()].values):\n assert (pd.isnull(t) and pd.isnull(a)) or t == a\n\n\ndef test_dependent_percentile(es):\n v = ft.Feature(es['log']['value'])\n p = ft.Feature(v, primitive=Percentile)\n p2 = ft.Feature(p - 1, primitive=Percentile)\n feature_set = FeatureSet([p, p2])\n calculator = FeatureSetCalculator(es, feature_set)\n df = calculator.run(np.array(range(10, 17)))\n true = es['log'].df[v.get_name()].rank(pct=True)\n true = true.loc[range(10, 17)]\n for t, a in zip(true.values, df[p.get_name()].values):\n assert (pd.isnull(t) and pd.isnull(a)) or t == a\n\n\ndef test_agg_percentile(es):\n v = ft.Feature(es['log']['value'])\n p = ft.Feature(v, primitive=Percentile)\n agg = ft.Feature(p, parent_entity=es['sessions'], primitive=Sum)\n feature_set = FeatureSet([agg])\n calculator = FeatureSetCalculator(es, feature_set)\n df = calculator.run(np.array([0, 1]))\n log_vals = es['log'].df[[v.get_name(), 'session_id']]\n log_vals['percentile'] = log_vals[v.get_name()].rank(pct=True)\n true_p = log_vals.groupby('session_id')['percentile'].sum()[[0, 1]]\n for t, a in zip(true_p.values, df[agg.get_name()].values):\n assert (pd.isnull(t) and pd.isnull(a)) or t == a\n\n\ndef test_percentile_agg_percentile(es):\n v = ft.Feature(es['log']['value'])\n p = ft.Feature(v, primitive=Percentile)\n agg = ft.Feature(p, parent_entity=es['sessions'], primitive=Sum)\n pagg = ft.Feature(agg, primitive=Percentile)\n feature_set = FeatureSet([pagg])\n calculator = FeatureSetCalculator(es, feature_set)\n df = calculator.run(np.array([0, 1]))\n\n log_vals = es['log'].df[[v.get_name(), 'session_id']]\n log_vals['percentile'] = log_vals[v.get_name()].rank(pct=True)\n true_p = log_vals.groupby('session_id')['percentile'].sum().fillna(0)\n true_p = true_p.rank(pct=True)[[0, 1]]\n\n for t, a in zip(true_p.values, df[pagg.get_name()].values):\n assert (pd.isnull(t) and pd.isnull(a)) or t == a\n\n\ndef test_percentile_agg(es):\n v = ft.Feature(es['log']['value'])\n agg = ft.Feature(v, parent_entity=es['sessions'], primitive=Sum)\n pagg = ft.Feature(agg, primitive=Percentile)\n feature_set = FeatureSet([pagg])\n calculator = FeatureSetCalculator(es, feature_set)\n df = calculator.run(np.array([0, 1]))\n\n log_vals = es['log'].df[[v.get_name(), 'session_id']]\n true_p = log_vals.groupby('session_id')[v.get_name()].sum().fillna(0)\n true_p = true_p.rank(pct=True)[[0, 1]]\n\n for t, a in zip(true_p.values, df[pagg.get_name()].values):\n assert (pd.isnull(t) and pd.isnull(a)) or t == a\n\n\ndef test_direct_percentile(es):\n v = ft.Feature(es['customers']['age'])\n p = ft.Feature(v, primitive=Percentile)\n d = ft.Feature(p, es['sessions'])\n feature_set = FeatureSet([d])\n calculator = FeatureSetCalculator(es, feature_set)\n df = calculator.run(np.array([0, 1]))\n\n cust_vals = es['customers'].df[[v.get_name()]]\n cust_vals['percentile'] = cust_vals[v.get_name()].rank(pct=True)\n true_p = cust_vals['percentile'].loc[[0, 0]]\n for t, a in zip(true_p.values, df[d.get_name()].values):\n assert (pd.isnull(t) and pd.isnull(a)) or t == a\n\n\ndef test_direct_agg_percentile(es):\n v = ft.Feature(es['log']['value'])\n p = ft.Feature(v, primitive=Percentile)\n agg = ft.Feature(p, parent_entity=es['customers'], primitive=Sum)\n d = ft.Feature(agg, es['sessions'])\n feature_set = FeatureSet([d])\n calculator = FeatureSetCalculator(es, feature_set)\n df = calculator.run(np.array([0, 1]))\n\n log_vals = es['log'].df[[v.get_name(), 'session_id']]\n log_vals['percentile'] = log_vals[v.get_name()].rank(pct=True)\n log_vals['customer_id'] = [0] * 10 + [1] * 5 + [2] * 2\n true_p = log_vals.groupby('customer_id')['percentile'].sum().fillna(0)\n true_p = true_p[[0, 0]]\n for t, a in zip(true_p.values, df[d.get_name()].values):\n assert (pd.isnull(t) and pd.isnull(a)) or round(t, 3) == round(a, 3)\n\n\ndef test_percentile_with_cutoff(es):\n v = ft.Feature(es['log']['value'])\n p = ft.Feature(v, primitive=Percentile)\n feature_set = FeatureSet([p])\n calculator = FeatureSetCalculator(es, feature_set, pd.Timestamp('2011/04/09 10:30:13'))\n df = calculator.run(np.array([2]))\n assert df[p.get_name()].tolist()[0] == 1.0\n\n\ndef test_two_kinds_of_dependents(es):\n v = ft.Feature(es['log']['value'])\n product = ft.Feature(es['log']['product_id'])\n agg = ft.Feature(v, parent_entity=es['customers'], where=product == 'coke zero', primitive=Sum)\n p = ft.Feature(agg, primitive=Percentile)\n g = ft.Feature(agg, primitive=Absolute)\n agg2 = ft.Feature(v, parent_entity=es['sessions'], where=product == 'coke zero', primitive=Sum)\n agg3 = ft.Feature(agg2, parent_entity=es['customers'], primitive=Sum)\n feature_set = FeatureSet([p, g, agg3])\n calculator = FeatureSetCalculator(es, feature_set)\n df = calculator.run(np.array([0, 1]))\n assert df[p.get_name()].tolist() == [2. / 3, 1.0]\n assert df[g.get_name()].tolist() == [15, 26]\n\n\ndef test_make_transform_restricts_time_keyword():\n make_trans_primitive(\n lambda x, time=False: x,\n [Datetime],\n Numeric,\n name=\"AllowedPrimitive\",\n description=\"This primitive should be accepted\",\n uses_calc_time=True)\n\n error_text = \"'time' is a restricted keyword. Please use a different keyword.\"\n with pytest.raises(ValueError, match=error_text):\n make_trans_primitive(\n lambda x, time=False: x,\n [Datetime],\n Numeric,\n name=\"BadPrimitive\",\n description=\"This primitive should error\")\n\n\ndef test_make_transform_restricts_time_arg():\n make_trans_primitive(\n lambda time: time,\n [Datetime],\n Numeric,\n name=\"AllowedPrimitive\",\n description=\"This primitive should be accepted\",\n uses_calc_time=True)\n\n error_text = \"'time' is a restricted keyword. Please use a different keyword.\"\n with pytest.raises(ValueError, match=error_text):\n make_trans_primitive(\n lambda time: time,\n [Datetime],\n Numeric,\n name=\"BadPrimitive\",\n description=\"This primitive should erorr\")\n\n\ndef test_make_transform_sets_kwargs_correctly(es):\n def pd_is_in(array, list_of_outputs=None):\n if list_of_outputs is None:\n list_of_outputs = []\n return pd.Series(array).isin(list_of_outputs)\n\n def isin_generate_name(self, base_feature_names):\n return u\"%s.isin(%s)\" % (base_feature_names[0],\n str(self.kwargs['list_of_outputs']))\n\n IsIn = make_trans_primitive(\n pd_is_in,\n [Variable],\n Boolean,\n name=\"is_in\",\n description=\"For each value of the base feature, checks whether it is \"\n \"in a list that is provided.\",\n cls_attributes={\"generate_name\": isin_generate_name})\n\n isin_1_list = [\"toothpaste\", \"coke_zero\"]\n isin_1_base_f = ft.Feature(es['log']['product_id'])\n isin_1 = ft.Feature(isin_1_base_f, primitive=IsIn(list_of_outputs=isin_1_list))\n isin_2_list = [\"coke_zero\"]\n isin_2_base_f = ft.Feature(es['log']['session_id'])\n isin_2 = ft.Feature(isin_2_base_f, primitive=IsIn(list_of_outputs=isin_2_list))\n assert isin_1_base_f == isin_1.base_features[0]\n assert isin_1_list == isin_1.primitive.kwargs['list_of_outputs']\n assert isin_2_base_f == isin_2.base_features[0]\n assert isin_2_list == isin_2.primitive.kwargs['list_of_outputs']\n\n\ndef test_make_transform_multiple_output_features(es):\n def test_time(x):\n times = pd.Series(x)\n units = [\"year\", \"month\", \"day\", \"hour\", \"minute\", \"second\"]\n return [times.apply(lambda x: getattr(x, unit)) for unit in units]\n\n def gen_feat_names(self):\n subnames = [\"Year\", \"Month\", \"Day\", \"Hour\", \"Minute\", \"Second\"]\n return [\"Now.%s(%s)\" % (subname, self.base_features[0].get_name())\n for subname in subnames]\n\n TestTime = make_trans_primitive(\n function=test_time,\n input_types=[Datetime],\n return_type=Numeric,\n number_output_features=6,\n cls_attributes={\"get_feature_names\": gen_feat_names},\n )\n\n join_time_split = ft.Feature(es[\"log\"][\"datetime\"], primitive=TestTime)\n alt_features = [ft.Feature(es[\"log\"][\"datetime\"], primitive=Year),\n ft.Feature(es[\"log\"][\"datetime\"], primitive=Month),\n ft.Feature(es[\"log\"][\"datetime\"], primitive=Day),\n ft.Feature(es[\"log\"][\"datetime\"], primitive=Hour),\n ft.Feature(es[\"log\"][\"datetime\"], primitive=Minute),\n ft.Feature(es[\"log\"][\"datetime\"], primitive=Second)]\n fm, fl = ft.dfs(\n entityset=es,\n target_entity=\"log\",\n agg_primitives=[],\n trans_primitives=[TestTime, Year, Month, Day, Hour, Minute, Second, Diff],\n max_depth=5)\n\n subnames = join_time_split.get_feature_names()\n altnames = [f.get_name() for f in alt_features]\n for col1, col2 in zip(subnames, altnames):\n assert (fm[col1] == fm[col2]).all()\n\n for i in range(6):\n f = 'sessions.customers.DIFF(TEST_TIME(date_of_birth)[%d])' % i\n assert feature_with_name(fl, f)\n assert ('DIFF(TEST_TIME(datetime)[%d])' % i) in fl\n\n\ndef test_feature_names_inherit_from_make_trans_primitive():\n # R TODO\n pass\n\n\ndef test_get_filepath(es):\n class Mod4(TransformPrimitive):\n '''Return base feature modulo 4'''\n name = \"mod4\"\n input_types = [Numeric]\n return_type = Numeric\n\n def get_function(self):\n filepath = self.get_filepath(\"featuretools_unit_test_example.csv\")\n reference = pd.read_csv(filepath, header=None, squeeze=True)\n\n def map_to_word(x):\n def _map(x):\n if pd.isnull(x):\n return x\n return reference[int(x) % 4]\n return pd.Series(x).apply(_map)\n return map_to_word\n\n feat = ft.Feature(es['log']['value'], primitive=Mod4)\n df = ft.calculate_feature_matrix(features=[feat],\n entityset=es,\n instance_ids=range(17))\n\n assert pd.isnull(df[\"MOD4(value)\"][15])\n assert df[\"MOD4(value)\"][0] == 0\n assert df[\"MOD4(value)\"][14] == 2\n\n fm, fl = ft.dfs(entityset=es,\n target_entity=\"log\",\n agg_primitives=[],\n trans_primitives=[Mod4])\n\n assert fm[\"MOD4(value)\"][0] == 0\n assert fm[\"MOD4(value)\"][14] == 2\n assert pd.isnull(fm[\"MOD4(value)\"][15])\n\n\ndef test_override_multi_feature_names(es):\n def gen_custom_names(primitive, base_feature_names):\n return ['Above18(%s)' % base_feature_names,\n 'Above21(%s)' % base_feature_names,\n 'Above65(%s)' % base_feature_names]\n\n def is_greater(x):\n return x > 18, x > 21, x > 65\n\n num_features = 3\n IsGreater = make_trans_primitive(function=is_greater,\n input_types=[Numeric],\n return_type=Numeric,\n number_output_features=num_features,\n cls_attributes={\"generate_names\": gen_custom_names})\n\n fm, features = ft.dfs(entityset=es,\n target_entity=\"customers\",\n instance_ids=[0, 1, 2],\n agg_primitives=[],\n trans_primitives=[IsGreater])\n\n expected_names = gen_custom_names(IsGreater, ['age'])\n\n for name in expected_names:\n assert name in fm.columns\n"
] | [
[
"pandas.read_csv",
"numpy.allclose",
"pandas.isnull",
"pandas.Series",
"numpy.isnan",
"pandas.DataFrame",
"numpy.array",
"pandas.Timestamp"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
robertopreste/prestools | [
"5c3a66ce981f374de2e6bdc2d7c927ee5b51b9bb"
] | [
"tests/test_clustering.py"
] | [
"#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\n# Created by Roberto Preste\nimport pytest\nimport prestools.clustering as pc\nimport numpy as np\n\n\n# pc.hierarchical_clustering\n\ndef test_hierarchical_clustering_empty_df(sample_empty_df):\n expect = None\n result = pc.hierarchical_clustering(sample_empty_df)\n assert result == expect\n\n\ndef test_hierarchical_clustering_one_entry_df(sample_one_entry_df):\n expect = None\n result = pc.hierarchical_clustering(sample_one_entry_df)\n assert result == expect\n\n\ndef test_hierarchical_clustering_sample_df(sample_corr_df):\n expect_linkage = np.array([[0.0, 4.0, 1.28467895, 2.0],\n [1.0, 3.0, 1.5330362, 2.0],\n [2.0, 6.0, 1.58692575, 3.0],\n [5.0, 7.0, 2.20363941, 5.0]])\n expect_pair_dist = np.array([1.72710741, 1.66240789, 1.60464949,\n 1.28467895, 1.53450318, 1.5330362,\n 1.75119959, 1.61180024, 2.22166604,\n 1.77772326])\n expect_coph_dist = 0.7027486505845463\n expect_coph_matr = np.array([2.20363941, 2.20363941, 2.20363941,\n 1.28467895, 1.58692575, 1.5330362,\n 2.20363941, 1.58692575, 2.20363941,\n 2.20363941])\n result = pc.hierarchical_clustering(sample_corr_df)\n assert np.allclose(result.linkage, expect_linkage)\n assert np.allclose(result.pair_dist, expect_pair_dist)\n assert np.allclose(result.coph_dist, expect_coph_dist)\n assert np.allclose(result.coph_matr, expect_coph_matr)\n\n\n# pc.find_n_clusters_elbow\n\ndef test_find_n_clusters_elbow_empty_df(sample_empty_df):\n expect = None\n result = pc.find_n_clusters_elbow(sample_empty_df)\n assert result == expect\n\n\ndef test_find_n_clusters_elbow_one_entry_df(sample_one_entry_df):\n expect = None\n result = pc.find_n_clusters_elbow(sample_one_entry_df)\n assert result == expect\n\n\ndef test_find_n_clusters_elbow_sample_df(sample_corr_df):\n expect = 2\n result = pc.find_n_clusters_elbow(sample_corr_df)\n assert result == expect\n\n\n"
] | [
[
"numpy.array",
"numpy.allclose"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Aliang-CN/GATNE | [
"b31cb7e3cf0ea4edaf5c2d848bc02d7d47db4f24"
] | [
"src/utils.py"
] | [
"import argparse\nfrom collections import defaultdict\n\nimport networkx as nx\nimport numpy as np\nfrom gensim.models.keyedvectors import Vocab\nfrom six import iteritems\nfrom sklearn.metrics import (auc, f1_score, precision_recall_curve,\n roc_auc_score)\n\nfrom walk import RWGraph\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n\n parser.add_argument('--input', type=str, default='../data/amazon',\n help='Input dataset path')\n\n parser.add_argument('--features', type=str, default='../data/amazon/feature.txt',\n help='Input node features')\n\n parser.add_argument('--epoch', type=int, default=100,\n help='Number of epoch. Default is 100.')\n\n parser.add_argument('--batch-size', type=int, default=64,\n help='Number of batch_size. Default is 64.')\n\n parser.add_argument('--eval-type', type=str, default='all',\n help='The edge type(s) for evaluation.')\n\n parser.add_argument('--schema', type=str, default=None,\n help='The metapath schema (e.g., U-I-U,I-U-I).')\n\n parser.add_argument('--dimensions', type=int, default=200,\n help='Number of dimensions. Default is 200.')\n\n parser.add_argument('--edge-dim', type=int, default=10,\n help='Number of edge embedding dimensions. Default is 10.')\n\n parser.add_argument('--att-dim', type=int, default=20,\n help='Number of attention dimensions. Default is 20.')\n\n parser.add_argument('--walk-length', type=int, default=10,\n help='Length of walk per source. Default is 10.')\n\n parser.add_argument('--num-walks', type=int, default=20,\n help='Number of walks per source. Default is 20.')\n\n parser.add_argument('--window-size', type=int, default=5,\n help='Context size for optimization. Default is 5.')\n\n parser.add_argument('--negative-samples', type=int, default=5,\n help='Negative samples for optimization. Default is 5.')\n\n parser.add_argument('--neighbor-samples', type=int, default=10,\n help='Neighbor samples for aggregation. Default is 10.')\n\n parser.add_argument('--patience', type=int, default=5,\n help='Early stopping patience. Default is 5.')\n\n return parser.parse_args()\n\n\ndef get_G_from_edges(edges):\n edge_dict = dict()\n for edge in edges:\n edge_key = str(edge[0]) + '_' + str(edge[1]) # 将两个节点之间的边\n if edge_key not in edge_dict:\n edge_dict[edge_key] = 1\n else:\n edge_dict[edge_key] += 1\n tmp_G = nx.Graph()\n for edge_key in edge_dict:\n weight = edge_dict[edge_key] # 边的权重\n x = edge_key.split('_')[0]\n y = edge_key.split('_')[1]\n tmp_G.add_edge(x, y)\n tmp_G[x][y]['weight'] = weight\n return tmp_G\n\n\ndef load_training_data(f_name):\n print('We are loading data from:', f_name)\n edge_data_by_type = dict()\n all_nodes = list()\n with open(f_name, 'r') as f:\n for line in f:\n words = line[:-1].split(' ')\n if words[0] not in edge_data_by_type:\n edge_data_by_type[words[0]] = list()\n x, y = words[1], words[2]\n edge_data_by_type[words[0]].append((x, y))\n all_nodes.append(x)\n all_nodes.append(y)\n all_nodes = list(set(all_nodes)) # 添加所有的节点\n print('Total training nodes: ' + str(len(all_nodes)))\n return edge_data_by_type # {“r”:[(h,t)]}\n\n\ndef load_testing_data(f_name):\n print('We are loading data from:', f_name)\n true_edge_data_by_type = dict()\n false_edge_data_by_type = dict()\n all_edges = list()\n all_nodes = list()\n with open(f_name, 'r') as f:\n for line in f:\n words = line[:-1].split(' ')\n x, y = words[1], words[2]\n if int(words[3]) == 1:\n if words[0] not in true_edge_data_by_type: # 构建正样本数据\n true_edge_data_by_type[words[0]] = list()\n true_edge_data_by_type[words[0]].append((x, y))\n else:\n if words[0] not in false_edge_data_by_type: # 构建负样本数据\n false_edge_data_by_type[words[0]] = list()\n false_edge_data_by_type[words[0]].append((x, y))\n all_nodes.append(x)\n all_nodes.append(y)\n all_nodes = list(set(all_nodes))\n return true_edge_data_by_type, false_edge_data_by_type # 正负样本比例按照1:1\n\n\ndef load_node_type(f_name):\n print('We are loading node type from:', f_name)\n node_type = {}\n with open(f_name, 'r') as f:\n for line in f:\n items = line.strip().split()\n node_type[items[0]] = items[1]\n return node_type\n\n\ndef generate_walks(network_data, num_walks, walk_length, schema, file_name):\n if schema is not None:\n node_type = load_node_type(file_name + '/node_type.txt')\n else:\n node_type = None\n\n all_walks = []\n for layer_id in network_data:\n tmp_data = network_data[layer_id]\n # start to do the random walk on a layer\n\n layer_walker = RWGraph(get_G_from_edges(tmp_data))\n layer_walks = layer_walker.simulate_walks(num_walks, walk_length, schema=schema)\n\n all_walks.append(layer_walks)\n\n print('Finish generating the walks')\n\n return all_walks\n\n\ndef generate_pairs(all_walks, vocab, window_size):\n pairs = []\n skip_window = window_size // 2\n for layer_id, walks in enumerate(all_walks):\n for walk in walks: # 相当于取一个窗口内的与当前walk之间的关系\n for i in range(len(walk)): # 遍历每个walk\n for j in range(1, skip_window + 1):\n if i - j >= 0: # 取i前skip_window个\n pairs.append((vocab[walk[i]].index, vocab[walk[i - j]].index, layer_id)) # 词袋的索引\n if i + j < len(walk): # 取i后skip_window个\n pairs.append((vocab[walk[i]].index, vocab[walk[i + j]].index, layer_id))\n return pairs # 当前walk与窗口内walk的词袋index(walk, nei_walk, layer)\n\n\ndef generate_vocab(all_walks):\n index2word = []\n raw_vocab = defaultdict(int)\n\n for walks in all_walks: # 遍历每一层walks\n for walk in walks:\n for word in walk:\n raw_vocab[word] += 1 # 统计一下word(node)出现的次数\n\n vocab = {}\n for word, v in iteritems(raw_vocab): # 构建词袋,给word一个编码,按照出现的次数进行排序\n vocab[word] = Vocab(count=v, index=len(index2word))\n index2word.append(word)\n\n index2word.sort(key=lambda word: vocab[word].count, reverse=True) # 按照词袋的数量从大到小进行排序\n for i, word in enumerate(index2word):\n vocab[word].index = i # 词袋的index重新排序\n\n return vocab, index2word\n\n\ndef get_score(local_model, node1, node2):\n try:\n vector1 = local_model[node1]\n vector2 = local_model[node2]\n return np.dot(vector1, vector2) / (np.linalg.norm(vector1) * np.linalg.norm(vector2))\n except Exception as e:\n pass\n\n\ndef evaluate(model, true_edges, false_edges):\n true_list = list()\n prediction_list = list()\n true_num = 0\n for edge in true_edges:\n tmp_score = get_score(model, str(edge[0]), str(edge[1]))\n if tmp_score is not None:\n true_list.append(1)\n prediction_list.append(tmp_score)\n true_num += 1\n\n for edge in false_edges:\n tmp_score = get_score(model, str(edge[0]), str(edge[1]))\n if tmp_score is not None:\n true_list.append(0)\n prediction_list.append(tmp_score)\n\n sorted_pred = prediction_list[:]\n sorted_pred.sort()\n threshold = sorted_pred[-true_num]\n\n y_pred = np.zeros(len(prediction_list), dtype=np.int32)\n for i in range(len(prediction_list)):\n if prediction_list[i] >= threshold:\n y_pred[i] = 1\n\n y_true = np.array(true_list)\n y_scores = np.array(prediction_list)\n ps, rs, _ = precision_recall_curve(y_true, y_scores)\n return roc_auc_score(y_true, y_scores), f1_score(y_true, y_pred), auc(rs, ps)\n"
] | [
[
"sklearn.metrics.roc_auc_score",
"numpy.dot",
"numpy.linalg.norm",
"sklearn.metrics.precision_recall_curve",
"sklearn.metrics.auc",
"sklearn.metrics.f1_score",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
kajal-puri/torchgeometry | [
"36c4992d5f741a65a1f558266c588e37c24462da"
] | [
"test/common.py"
] | [
"import torch\n\n\ndef get_test_devices():\n \"\"\"Creates a string list with the devices type to test the source code.\n CUDA devices will be test only in case the current hardware supports it.\n\n Return:\n list(str): list with devices names.\n \"\"\"\n devices = [\"cpu\"]\n if torch.cuda.is_available():\n devices.append(\"cuda\")\n return devices\n\n\n# setup the devices to test the source code\n\nTEST_DEVICES = get_test_devices()\n"
] | [
[
"torch.cuda.is_available"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
BogiHsu/Tacotron2-PyTorch | [
"aafde3845828fc3b9df796e60607e0de67873409"
] | [
"train.py"
] | [
"import os\nimport time\nimport torch\nimport argparse\nimport numpy as np\nfrom inference import infer\nfrom utils.util import mode\nfrom hparams import hparams as hps\nfrom torch.utils.data import DataLoader\nfrom utils.logger import Tacotron2Logger\nfrom utils.dataset import ljdataset, ljcollate\nfrom model.model import Tacotron2, Tacotron2Loss\nnp.random.seed(hps.seed)\ntorch.manual_seed(hps.seed)\ntorch.cuda.manual_seed(hps.seed)\n\ndef prepare_dataloaders(fdir):\n\ttrainset = ljdataset(fdir)\n\tcollate_fn = ljcollate(hps.n_frames_per_step)\n\ttrain_loader = DataLoader(trainset, num_workers = hps.n_workers, shuffle = True,\n\t\t\t\t\t\t\t batch_size = hps.batch_size, pin_memory = hps.pin_mem,\n\t\t\t\t\t\t\t drop_last = True, collate_fn = collate_fn)\n\treturn train_loader\n\n\ndef load_checkpoint(ckpt_pth, model, optimizer):\n\tckpt_dict = torch.load(ckpt_pth)\n\tmodel.load_state_dict(ckpt_dict['model'])\n\toptimizer.load_state_dict(ckpt_dict['optimizer'])\n\titeration = ckpt_dict['iteration']\n\treturn model, optimizer, iteration\n\n\ndef save_checkpoint(model, optimizer, iteration, ckpt_pth):\n\ttorch.save({'model': model.state_dict(),\n\t\t\t\t'optimizer': optimizer.state_dict(),\n\t\t\t\t'iteration': iteration}, ckpt_pth)\n\n\ndef train(args):\n\t# build model\n\tmodel = Tacotron2()\n\tmode(model, True)\n\toptimizer = torch.optim.Adam(model.parameters(), lr = hps.lr,\n\t\t\t\t\t\t\t\tbetas = hps.betas, eps = hps.eps,\n\t\t\t\t\t\t\t\tweight_decay = hps.weight_decay)\n\tcriterion = Tacotron2Loss()\n\t\n\t# load checkpoint\n\titeration = 1\n\tif args.ckpt_pth != '':\n\t\tmodel, optimizer, iteration = load_checkpoint(args.ckpt_pth, model, optimizer)\n\t\titeration += 1 # next iteration is iteration+1\n\t\n\t# get scheduler\n\tif hps.sch:\n\t\tlr_lambda = lambda step: hps.sch_step**0.5*min((step+1)*hps.sch_step**-1.5, (step+1)**-0.5)\n\t\tif args.ckpt_pth != '':\n\t\t\tscheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda, last_epoch = iteration)\n\t\telse:\n\t\t\tscheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda)\n\t\n\t# make dataset\n\ttrain_loader = prepare_dataloaders(args.data_dir)\n\t\n\t# get logger ready\n\tif args.log_dir != '':\n\t\tif not os.path.isdir(args.log_dir):\n\t\t\tos.makedirs(args.log_dir)\n\t\t\tos.chmod(args.log_dir, 0o775)\n\t\tlogger = Tacotron2Logger(args.log_dir)\n\n\t# get ckpt_dir ready\n\tif args.ckpt_dir != '' and not os.path.isdir(args.ckpt_dir):\n\t\tos.makedirs(args.ckpt_dir)\n\t\tos.chmod(args.ckpt_dir, 0o775)\n\t\n\tmodel.train()\n\t# ================ MAIN TRAINNIG LOOP! ===================\n\twhile iteration <= hps.max_iter:\n\t\tfor batch in train_loader:\n\t\t\tif iteration > hps.max_iter:\n\t\t\t\tbreak\n\t\t\tstart = time.perf_counter()\n\t\t\tx, y = model.parse_batch(batch)\n\t\t\ty_pred = model(x)\n\n\t\t\t# loss\n\t\t\tloss, item = criterion(y_pred, y, iteration)\n\t\t\t\n\t\t\t# zero grad\n\t\t\tmodel.zero_grad()\n\t\t\t\n\t\t\t# backward, grad_norm, and update\n\t\t\tloss.backward()\n\t\t\tgrad_norm = torch.nn.utils.clip_grad_norm_(model.parameters(), hps.grad_clip_thresh)\n\t\t\toptimizer.step()\n\t\t\tif hps.sch:\n\t\t\t\tscheduler.step()\n\t\t\t\n\t\t\t# info\n\t\t\tdur = time.perf_counter()-start\n\t\t\tprint('Iter: {} Loss: {:.2e} Grad Norm: {:.2e} {:.1f}s/it'.format(\n\t\t\t\titeration, item, grad_norm, dur))\n\t\t\t\n\t\t\t# log\n\t\t\tif args.log_dir != '' and (iteration % hps.iters_per_log == 0):\n\t\t\t\tlearning_rate = optimizer.param_groups[0]['lr']\n\t\t\t\tlogger.log_training(item, grad_norm, learning_rate, iteration)\n\t\t\t\n\t\t\t# sample\n\t\t\tif args.log_dir != '' and (iteration % hps.iters_per_sample == 0):\n\t\t\t\tmodel.eval()\n\t\t\t\toutput = infer(hps.eg_text, model)\n\t\t\t\tmodel.train()\n\t\t\t\tlogger.sample_training(output, iteration)\n\t\t\t\n\t\t\t# save ckpt\n\t\t\tif args.ckpt_dir != '' and (iteration % hps.iters_per_ckpt == 0):\n\t\t\t\tckpt_pth = os.path.join(args.ckpt_dir, 'ckpt_{}'.format(iteration))\n\t\t\t\tsave_checkpoint(model, optimizer, iteration, ckpt_pth)\n\n\t\t\titeration += 1\n\tif args.log_dir != '':\n\t\tlogger.close()\n\n\nif __name__ == '__main__':\n\tparser = argparse.ArgumentParser()\n\t# path\n\tparser.add_argument('-d', '--data_dir', type = str, default = 'data',\n\t\t\t\t\t\thelp = 'directory to load data')\n\tparser.add_argument('-l', '--log_dir', type = str, default = 'log',\n\t\t\t\t\t\thelp = 'directory to save tensorboard logs')\n\tparser.add_argument('-cd', '--ckpt_dir', type = str, default = 'ckpt',\n\t\t\t\t\t\thelp = 'directory to save checkpoints')\n\tparser.add_argument('-cp', '--ckpt_pth', type = str, default = '',\n\t\t\t\t\t\thelp = 'path to load checkpoints')\n\n\targs = parser.parse_args()\n\t\n\ttorch.backends.cudnn.enabled = True\n\ttorch.backends.cudnn.benchmark = False # faster due to dynamic input shape\n\ttrain(args)\n"
] | [
[
"torch.optim.lr_scheduler.LambdaLR",
"torch.cuda.manual_seed",
"numpy.random.seed",
"torch.load",
"torch.manual_seed",
"torch.utils.data.DataLoader"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
cors/hls4ml | [
"d75e350b788a1114f4b50377f89bacf98dd99138"
] | [
"hls4ml/writer/vivado_writer.py"
] | [
"from __future__ import print_function\nimport tarfile\nimport yaml\nfrom shutil import copyfile, copytree, rmtree\nimport numpy as np\nimport os\nimport re\nimport glob\nfrom collections import OrderedDict\n\nfrom hls4ml.writer.writers import Writer\nfrom hls4ml.model.hls_layers import XnorPrecisionType\n\nconfig_filename = 'hls4ml_config.yml'\n\nclass VivadoWriter(Writer):\n\n def type_definition_cpp(self, model, atype):\n type_class = atype.__class__.__name__\n if type_class == 'HLSType':\n return 'typedef {precision} {name};\\n'.format(name=atype.name, precision=atype.precision)\n elif type_class == 'CompressedType':\n cpp_fmt = ('typedef struct {name} {{ '\n '{index} row_index;'\n '{index} col_index;'\n '{precision} weight; }} {name};\\n')\n return cpp_fmt.format(name=atype.name, index=atype.index_precision, precision=atype.precision)\n elif type_class == 'PackedType':\n n_elem_expr = '/' if atype.unpack else '*'\n return 'typedef nnet::array<{precision}, {n_elem}> {name};\\n'.format(name=atype.name, precision=atype.precision, n_elem=str(atype.n_elem) + n_elem_expr + str(atype.n_pack))\n elif type_class == 'ExponentType':\n cpp_fmt = ('typedef struct {name} {{ '\n '{sign} sign; '\n '{precision} weight; }} {name};\\n')\n return cpp_fmt.format(name=atype.name, precision=atype.precision, sign=str(XnorPrecisionType()))\n else:\n raise Exception('Unknown data type class \"{}\"'.format(type_class))\n\n def variable_definition_cpp(self, model, var, name_suffix='', as_reference=False):\n var_class = var.__class__.__name__\n if var_class == 'ArrayVariable':\n return '{type} {name}{suffix}[{shape}]'.format(type=var.type.name, name=var.cppname, suffix=name_suffix, shape=var.size_cpp())\n elif var_class == 'StreamVariable':\n if as_reference: # Function parameter\n return 'hls::stream<{type}> &{name}{suffix}'.format(type=var.type.name, name=var.cppname, suffix=name_suffix)\n else: # Declaration\n return 'hls::stream<{type}> {name}{suffix}(\"{name}\")'.format(type=var.type.name, name=var.cppname, suffix=name_suffix)\n elif var_class == 'WeightVariable':\n return '{type} {name}{suffix}[{size}]'.format(type=var.type.name, name=var.cppname, suffix=name_suffix, size=var.data_length)\n elif var_class == 'InplaceVariable':\n return None\n else:\n raise Exception('Unknown variable class \"{}\"'.format(var_class))\n\n def print_array_to_cpp(self, var, odir, write_txt_file=True):\n #######################################\n ## Print weight array to C++\n #######################################\n\n h_file = open(\"{}/firmware/weights/{}.h\".format(odir,var.name),\"w\")\n if write_txt_file:\n txt_file = open(\"{}/firmware/weights/{}.txt\".format(odir,var.name),\"w\")\n\n #meta data\n h_file.write(\"//Numpy array shape {}\\n\".format(var.shape))\n h_file.write(\"//Min {:.12f}\\n\".format(np.min(var.min)))\n h_file.write(\"//Max {:.12f}\\n\".format(np.max(var.max)))\n h_file.write(\"//Number of zeros {}\\n\".format(var.nzeros))\n h_file.write(\"\\n\")\n\n h_file.write(\"#ifndef {}_H_\\n\".format(var.name.upper()))\n h_file.write(\"#define {}_H_\\n\".format(var.name.upper()))\n h_file.write(\"\\n\")\n\n if write_txt_file:\n h_file.write(\"#ifndef __SYNTHESIS__\\n\")\n h_file.write(var.definition_cpp() + \";\\n\")\n h_file.write(\"#else\\n\")\n\n h_file.write(var.definition_cpp() + \" = {\")\n\n #fill c++ array.\n #not including internal brackets for multidimensional case\n sep = ''\n for x in var:\n h_file.write(sep + x)\n if write_txt_file:\n txt_file.write(sep + x)\n sep = \", \"\n h_file.write(\"};\\n\")\n if write_txt_file:\n h_file.write(\"#endif\\n\")\n txt_file.close()\n h_file.write(\"\\n#endif\\n\")\n h_file.close()\n\n def write_project_dir(self, model):\n if not os.path.isdir(\"{}/firmware/weights\".format(model.config.get_output_dir())):\n os.makedirs(\"{}/firmware/weights\".format(model.config.get_output_dir()))\n\n @staticmethod\n def _make_array_pragma(variable):\n \"\"\"\n Layers in hls_model.py can specify output array partitioning through the `pragma` attribute.\n If `pragma` is a string: options are 'partition', 'reshape', or 'stream'.\n If `pragma` is a tuple: (mode, type, factor) where mode is 'partition' or 'reshape', type is\n 'complete', 'cyclic', or 'block', and factor is an integer only used when the type is not 'complete'.\n \"\"\"\n \n config = variable.pragma\n if type(config) is tuple:\n mode = config[0]\n if mode in ['partition', 'reshape']:\n typ = config[1]\n if typ != 'complete':\n factor = config[2]\n elif mode == 'stream':\n depth = config[1]\n else:\n mode = config\n typ = 'complete'\n factor = 0\n\n if mode in ['partition', 'reshape']:\n if typ == 'complete':\n template = '#pragma HLS ARRAY_{mode} variable={name} {type} dim={dim}'\n else:\n template = '#pragma HLS ARRAY_{mode} variable={name} {type} factor={factor} dim={dim}'\n\n return template.format(mode=mode.upper(), name=variable.name, type=typ, factor=factor, dim=0)\n\n elif mode == 'stream':\n return '#pragma HLS STREAM variable={name} depth={depth}'.format(name=variable.name, depth=depth)\n\n @staticmethod\n def _make_stable_pragma(variable):\n template = '#pragma HLS STABLE variable={name}'\n return template.format(name=variable.name)\n\n def write_project_cpp(self, model):\n ###################\n ## myproject.cpp\n ###################\n\n filedir = os.path.dirname(os.path.abspath(__file__))\n f = open(os.path.join(filedir,'../templates/vivado/firmware/myproject.cpp'),'r')\n fout = open('{}/firmware/{}.cpp'.format(model.config.get_output_dir(), model.config.get_project_name()),'w')\n\n model_inputs = model.get_input_variables()\n model_outputs = model.get_output_variables()\n\n indent = ' '\n\n for line in f.readlines():\n #Add headers to weights and biases\n if 'myproject' in line:\n newline = line.replace('myproject', model.config.get_project_name())\n elif '//hls-fpga-machine-learning insert header' in line:\n inputs_str = ', '.join([self.variable_definition_cpp(model, i, as_reference=True) for i in model_inputs])\n outputs_str = ', '.join([self.variable_definition_cpp(model, o, as_reference=True) for o in model_outputs])\n insize_str = ', '.join(['unsigned short &const_size_in_{}'.format(i) for i in range(1, len(model_inputs) + 1)])\n outsize_str = ', '.join(['unsigned short &const_size_out_{}'.format(i) for i in range(1, len(model_outputs) + 1)])\n\n newline = ''\n newline += indent + inputs_str + ',\\n'\n newline += indent + outputs_str + ',\\n'\n newline += indent + insize_str + ',\\n'\n newline += indent + outsize_str + '\\n'\n\n elif '//hls-fpga-machine-learning insert load weights' in line:\n newline = line\n for layer in model.get_layers():\n for w in layer.get_weights():\n if w.__class__.__name__ == 'CompressedWeightVariable':\n newline += indent + ' nnet::load_compressed_weights_from_txt<{}, {}>({}, \"{}.txt\");\\n'.format(w.type.name, w.nonzeros, w.name, w.name)\n elif w.__class__.__name__ == 'ExponentWeightVariable':\n newline += indent + ' nnet::load_exponent_weights_from_txt<{}, {}>({}, \"{}.txt\");\\n'.format(w.type.name, w.data_length, w.name, w.name)\n else:\n newline += indent + ' nnet::load_weights_from_txt<{}, {}>({}, \"{}.txt\");\\n'.format(w.type.name, w.data_length, w.name, w.name)\n\n #Add input/output type\n elif '//hls-fpga-machine-learning insert IO' in line:\n newline = line\n all_inputs = [i.cppname for i in model_inputs]\n all_outputs = [o.cppname for o in model_outputs]\n io_type = model.config.get_config_value(\"IOType\")\n\n if io_type == 'io_parallel':\n for i in model_inputs: newline += indent + self._make_array_pragma(i) + '\\n'\n for o in model_outputs: newline += indent + self._make_array_pragma(o) + '\\n'\n # TODO discussed adding a handle for setting the interface mode for individual input and output arrays (16.03.2020)\n # Probably the handle doesn't need to be exposed to the user but should be just set in hls_model.py\n newline += indent + '#pragma HLS INTERFACE ap_vld port={},{} \\n'.format(','.join(all_inputs), ','.join(all_outputs))\n if model.config.model_strategy.lower() == 'resource':\n newline += indent + '#pragma HLS DATAFLOW \\n'\n else:\n newline += indent + '#pragma HLS PIPELINE \\n'\n if io_type == 'io_serial' or io_type == 'io_stream':\n newline += indent + '#pragma HLS INTERFACE axis port={},{} \\n'.format(','.join(all_inputs), ','.join(all_outputs))\n newline += indent + '#pragma HLS DATAFLOW \\n'\n\n inval_str = '\\n '.join(['const_size_in_{} = {};'.format(i, inp.size_cpp()) for i, inp in enumerate(model_inputs, 1)])\n outval_str = '\\n '.join(['const_size_out_{} = {};'.format(i, out.size_cpp()) for i, out in enumerate(model_outputs, 1)])\n newline += '\\n' + indent + inval_str\n newline += '\\n' + indent + outval_str\n newline += '\\n'\n\n elif '//hls-fpga-machine-learning insert layers' in line:\n newline = line + '\\n'\n inputs = model.get_input_variables()\n outputs = model.get_output_variables()\n for layer in model.get_layers():\n vars = layer.get_variables()\n for var in vars:\n if var not in inputs and var not in outputs:\n def_cpp = self.variable_definition_cpp(model, var)\n if def_cpp is not None:\n newline += ' ' + def_cpp + ';\\n'\n if var.pragma:\n newline += ' ' + self._make_array_pragma(var) + '\\n'\n if model.config.model_strategy.lower() == 'resource':\n newline += ' ' + self._make_stable_pragma(var) + '\\n'\n func = layer.function_cpp()\n if func:\n if len(func) == 1:\n newline += ' ' + func[0] + ' // ' + layer.name + '\\n'\n else:\n newline += '// ' + layer.name + '\\n'\n for line in func:\n newline += ' ' + line + '\\n'\n if model.config.trace_output and layer.get_attr('Trace', False):\n newline += '#ifndef __SYNTHESIS__\\n'\n for var in vars:\n newline += ' nnet::save_layer_output<{}>({}, \"{}\", {});\\n'.format(var.type.name, var.name, layer.name, var.size_cpp())\n newline += '#endif\\n'\n newline += '\\n'\n\n #Just copy line\n else:\n newline = line\n\n fout.write(newline)\n\n f.close()\n fout.close()\n\n def write_project_header(self, model):\n #######################\n ## myproject.h\n #######################\n\n filedir = os.path.dirname(os.path.abspath(__file__))\n f = open(os.path.join(filedir,'../templates/vivado/firmware/myproject.h'),'r')\n fout = open('{}/firmware/{}.h'.format(model.config.get_output_dir(), model.config.get_project_name()),'w')\n\n model_inputs = model.get_input_variables()\n model_outputs = model.get_output_variables()\n\n indent = ' '\n\n for line in f.readlines():\n\n if 'MYPROJECT' in line:\n newline = line.replace('MYPROJECT',format(model.config.get_project_name().upper()))\n elif 'void myproject(' in line:\n newline = 'void {}(\\n'.format(model.config.get_project_name())\n elif '//hls-fpga-machine-learning insert header' in line:\n inputs_str = ', '.join([self.variable_definition_cpp(model, i, as_reference=True) for i in model_inputs])\n outputs_str = ', '.join([self.variable_definition_cpp(model, o, as_reference=True) for o in model_outputs])\n insize_str = ', '.join(['unsigned short &const_size_in_{}'.format(i) for i in range(1, len(model_inputs) + 1)])\n outsize_str = ', '.join(['unsigned short &const_size_out_{}'.format(o) for o in range(1, len(model_outputs) + 1)])\n\n newline = ''\n newline += indent + inputs_str + ',\\n'\n newline += indent + outputs_str + ',\\n'\n newline += indent + insize_str + ',\\n'\n newline += indent + outsize_str + '\\n'\n else:\n newline = line\n fout.write(newline)\n\n f.close()\n fout.close()\n\n def write_defines(self, model):\n filedir = os.path.dirname(os.path.abspath(__file__))\n f = open(os.path.join(filedir,'../templates/vivado/firmware/defines.h'),'r')\n fout = open('{}/firmware/defines.h'.format(model.config.get_output_dir()),'w')\n\n for line in f.readlines():\n\n #Insert numbers\n if '//hls-fpga-machine-learning insert numbers' in line:\n newline = line\n numbers = OrderedDict.fromkeys([layer.get_numbers_cpp() for layer in model.get_layers()])\n newline += ''.join(numbers)\n\n elif '//hls-fpga-machine-learning insert layer-precision' in line:\n newline = line\n all_precision = OrderedDict()\n for layer in model.get_layers():\n layer_precision = layer.get_layer_precision()\n all_precision.update(layer_precision)\n for used_type in all_precision.values():\n newline += self.type_definition_cpp(model, used_type)\n\n else:\n newline = line\n fout.write(newline)\n f.close()\n fout.close()\n\n def write_parameters(self, model):\n filedir = os.path.dirname(os.path.abspath(__file__))\n f = open(os.path.join(filedir,'../templates/vivado/firmware/parameters.h'),'r')\n fout = open('{}/firmware/parameters.h'.format(model.config.get_output_dir()),'w')\n\n for line in f.readlines():\n\n if '//hls-fpga-machine-learning insert includes' in line:\n newline = line\n for include in sorted(set(sum((layer.include_list for layer in model.get_layers()), []))):\n newline += '#include \"%s\"\\n' % include\n\n elif '//hls-fpga-machine-learning insert weights' in line:\n newline = line\n for layer in model.get_layers():\n for w in layer.get_weights():\n newline += '#include \"weights/{}.h\"\\n'.format(w.name)\n\n elif \"//hls-fpga-machine-learning insert layer-config\" in line:\n newline = line\n for layer in model.get_layers():\n config = layer.config_cpp()\n if config:\n newline += '// ' + layer.name + '\\n'\n newline += config + '\\n'\n else:\n newline = line\n fout.write(newline)\n f.close()\n fout.close()\n\n def write_weights(self, model):\n for layer in model.get_layers():\n for weights in layer.get_weights():\n self.print_array_to_cpp(weights, model.config.get_output_dir())\n \n def __make_dat_file(self, original_path, project_path): \n \"\"\"\n Convert other input/output data types into a dat file, which is\n a text file with the falttened matrix printed out. Note that ' ' is\n assumed to be the delimiter. \n \"\"\"\n\n #Take in data from current supported data files\n if original_path[-3:] == \"npy\":\n data = np.load(original_path)\n else:\n raise Exception(\"Unsupported input/output data files.\")\n\n #Faltten data, just keep first dimension\n data = data.reshape(data.shape[0], -1)\n\n def print_data(f):\n for i in range(data.shape[0]):\n for j in range(data.shape[1]):\n f.write(str(data[i][j]) + \" \")\n f.write(\"\\n\")\n\n #Print out in dat file\n with open(project_path, \"w\" ) as f:\n print_data(f)\n\n def write_test_bench(self, model):\n ###################\n ## test bench\n ###################\n\n filedir = os.path.dirname(os.path.abspath(__file__))\n\n if not os.path.exists('{}/tb_data/'.format(model.config.get_output_dir())):\n os.mkdir('{}/tb_data/'.format(model.config.get_output_dir()))\n \n input_data = model.config.get_config_value('InputData')\n output_predictions = model.config.get_config_value('OutputPredictions')\n \n if input_data:\n if input_data[-3:] == \"dat\":\n copyfile(input_data, '{}/tb_data/tb_input_features.dat'.format(model.config.get_output_dir()))\n else:\n self.__make_dat_file(input_data,'{}/tb_data/tb_input_features.dat'.format(model.config.get_output_dir()))\n \n if output_predictions:\n if output_predictions[-3:] == \"dat\":\n copyfile(output_predictions, '{}/tb_data/tb_output_predictions.dat'.format(model.config.get_output_dir()))\n else:\n self.__make_dat_file(output_predictions,'{}/tb_data/tb_output_predictions.dat'.format(model.config.get_output_dir()))\n\n f = open(os.path.join(filedir,'../templates/vivado/myproject_test.cpp'),'r')\n fout = open('{}/{}_test.cpp'.format(model.config.get_output_dir(), model.config.get_project_name()),'w')\n\n for line in f.readlines():\n indent = ' ' * (len(line) - len(line.lstrip(' ')))\n\n #Insert numbers\n if 'myproject' in line:\n newline = line.replace('myproject', model.config.get_project_name())\n elif '//hls-fpga-machine-learning insert data' in line:\n newline = line\n offset = 0\n for inp in model.get_input_variables():\n newline += ' ' + self.variable_definition_cpp(model, inp) + ';\\n'\n newline += ' nnet::copy_data<float, {}, {}, {}>(in, {});\\n'.format(inp.type.name, offset, inp.size_cpp(), inp.cppname)\n offset += inp.size()\n for out in model.get_output_variables():\n newline += ' ' + self.variable_definition_cpp(model, out) + ';\\n'\n elif '//hls-fpga-machine-learning insert zero' in line:\n newline = line\n for inp in model.get_input_variables():\n newline += ' ' + self.variable_definition_cpp(model, inp) + ';\\n'\n newline += ' nnet::fill_zero<{}, {}>({});\\n'.format(inp.type.name, inp.size_cpp(), inp.cppname)\n for out in model.get_output_variables():\n newline += ' ' + self.variable_definition_cpp(model, out) + ';\\n'\n elif '//hls-fpga-machine-learning insert top-level-function' in line:\n newline = line\n\n size_str = indent + 'unsigned short {},{};\\n'\n input_size_vars = ','.join(['size_in{}'.format(i) for i in range(1, len(model.get_input_variables()) + 1)])\n output_size_vars = ','.join(['size_out{}'.format(o) for o in range(1, len(model.get_output_variables()) + 1)])\n newline += size_str.format(input_size_vars, output_size_vars)\n\n input_vars = ','.join([i.cppname for i in model.get_input_variables()])\n output_vars = ','.join([o.cppname for o in model.get_output_variables()])\n top_level = indent + '{}({},{},{},{});\\n'.format(model.config.get_project_name(), input_vars, output_vars, input_size_vars, output_size_vars)\n newline += top_level\n elif '//hls-fpga-machine-learning insert predictions' in line:\n newline = line\n for out in model.get_output_variables():\n newline += indent + 'for(int i = 0; i < {}; i++) {{\\n'.format(out.size_cpp())\n newline += indent + ' std::cout << pr[i] << \" \";\\n'\n newline += indent + '}\\n'\n newline += indent + 'std::cout << std::endl;\\n'\n elif '//hls-fpga-machine-learning insert tb-output' in line:\n newline = line\n for out in model.get_output_variables():\n newline += indent + 'nnet::print_result<{}, {}>({}, fout);\\n'.format(out.type.name, out.size_cpp(), out.cppname) #TODO enable this\n elif '//hls-fpga-machine-learning insert output' in line or '//hls-fpga-machine-learning insert quantized' in line:\n newline = line\n for out in model.get_output_variables():\n newline += indent + 'nnet::print_result<{}, {}>({}, std::cout, true);\\n'.format(out.type.name, out.size_cpp(), out.cppname)\n else:\n newline = line\n fout.write(newline)\n f.close()\n fout.close()\n\n def write_bridge(self, model):\n ###################\n # c++-python bridge\n ###################\n\n filedir = os.path.dirname(os.path.abspath(__file__))\n f = open(os.path.join(filedir,'../templates/vivado/myproject_bridge.cpp'),'r')\n fout = open('{}/{}_bridge.cpp'.format(model.config.get_output_dir(), model.config.get_project_name()),'w')\n\n model_inputs = model.get_input_variables()\n model_outputs = model.get_output_variables()\n\n indent = ' '\n\n for line in f.readlines():\n\n if 'MYPROJECT' in line:\n newline = line.replace('MYPROJECT', format(model.config.get_project_name().upper()))\n elif 'myproject' in line:\n newline = line.replace('myproject', format(model.config.get_project_name()))\n elif '//hls-fpga-machine-learning insert header' in line:\n dtype = line.split('#', 1)[1].strip()\n inputs_str = ', '.join(['{type} {name}[{shape}]'.format(type=dtype, name=i.cppname, shape=i.size_cpp()) for i in model_inputs])\n outputs_str = ', '.join(['{type} {name}[{shape}]'.format(type=dtype, name=o.cppname, shape=o.size_cpp()) for o in model_outputs])\n insize_str = ', '.join(['unsigned short &const_size_in_{}'.format(i) for i in range(1, len(model_inputs) + 1)])\n outsize_str = ', '.join(['unsigned short &const_size_out_{}'.format(o) for o in range(1, len(model_outputs) + 1)])\n\n newline = ''\n newline += indent + inputs_str + ',\\n'\n newline += indent + outputs_str + ',\\n'\n newline += indent + insize_str + ',\\n'\n newline += indent + outsize_str + '\\n'\n elif '//hls-fpga-machine-learning insert wrapper' in line:\n dtype = line.split('#', 1)[1].strip()\n newline = ''\n for i in model_inputs:\n newline += indent + '{var};\\n'.format(var=self.variable_definition_cpp(model, i, name_suffix='_ap'))\n newline += indent + 'nnet::convert_data<{}, {}, {}>({}, {}_ap);\\n'.format(dtype, i.type.name, i.size_cpp(), i.cppname, i.cppname)\n newline += '\\n'\n \n for o in model_outputs:\n newline += indent + '{var};\\n'.format(var=self.variable_definition_cpp(model, o, name_suffix='_ap'))\n \n newline += '\\n'\n\n input_size_vars = ','.join(['const_size_in_{}'.format(i) for i in range(1, len(model.get_input_variables()) + 1)])\n output_size_vars = ','.join(['const_size_out_{}'.format(o) for o in range(1, len(model.get_output_variables()) + 1)])\n input_vars = ','.join([i.cppname + '_ap' for i in model.get_input_variables()])\n output_vars = ','.join([o.cppname + '_ap' for o in model.get_output_variables()])\n top_level = indent + '{}({}, {}, {}, {});\\n'.format(model.config.get_project_name(), input_vars, output_vars, input_size_vars, output_size_vars)\n newline += top_level\n\n newline += '\\n'\n\n for o in model_outputs:\n newline += indent + 'nnet::convert_data<{}, {}, {}>({}_ap, {});\\n'.format(o.type.name, dtype, o.size_cpp(), o.cppname, o.cppname)\n elif '//hls-fpga-machine-learning insert trace_outputs' in line:\n newline = ''\n for layer in model.get_layers():\n if layer.function_cpp() and model.config.trace_output and layer.get_attr('Trace', False):\n vars = layer.get_variables()\n for var in vars:\n newline += indent + 'nnet::trace_outputs->insert(std::pair<std::string, void *>(\"{}\", (void *) malloc({} * element_size)));\\n'.format(layer.name, var.size_cpp())\n \n else:\n newline = line\n fout.write(newline)\n\n f.close()\n fout.close()\n\n def write_build_script(self, model):\n ###################\n # build_prj.tcl\n ###################\n\n filedir = os.path.dirname(os.path.abspath(__file__))\n\n f = open(os.path.join(filedir,'../templates/vivado/build_prj.tcl'),'r')\n fout = open('{}/build_prj.tcl'.format(model.config.get_output_dir()),'w')\n\n for line in f.readlines():\n\n line = line.replace('myproject',model.config.get_project_name())\n\n if 'set_part {xcku115-flvb2104-2-i}' in line:\n line = 'set_part {{{}}}\\n'.format(model.config.get_config_value('XilinxPart'))\n elif 'create_clock -period 5 -name default' in line:\n line = 'create_clock -period {} -name default\\n'.format(model.config.get_config_value('ClockPeriod'))\n\n fout.write(line)\n f.close()\n fout.close()\n\n\n ###################\n # vivado_synth.tcl\n ###################\n\n f = open(os.path.join(filedir,'../templates/vivado/vivado_synth.tcl'),'r')\n fout = open('{}/vivado_synth.tcl'.format(model.config.get_output_dir()),'w')\n for line in f.readlines():\n line = line.replace('myproject', model.config.get_project_name())\n if '-part' in line:\n line = 'synth_design -top {} -part {}\\n'.format(model.config.get_project_name(), model.config.get_config_value('XilinxPart'))\n\n fout.write(line)\n f.close()\n fout.close()\n\n ###################\n # build_lib.sh\n ###################\n\n f = open(os.path.join(filedir,'../templates/vivado/build_lib.sh'),'r')\n fout = open('{}/build_lib.sh'.format(model.config.get_output_dir()),'w')\n\n for line in f.readlines():\n line = line.replace('myproject', model.config.get_project_name())\n line = line.replace('mystamp', model.config.get_config_value('Stamp'))\n\n fout.write(line)\n f.close()\n fout.close()\n\n def write_nnet_utils(self, model):\n ###################\n ## nnet_utils\n ###################\n\n filedir = os.path.dirname(os.path.abspath(__file__))\n\n srcpath = os.path.join(filedir,'../templates/vivado/nnet_utils/')\n dstpath = '{}/firmware/nnet_utils/'.format(model.config.get_output_dir())\n\n if not os.path.exists(dstpath):\n os.mkdir(dstpath)\n\n headers = [os.path.basename(h) for h in glob.glob(srcpath + '*.h')]\n\n for h in headers:\n copyfile(srcpath + h, dstpath + h)\n\n ###################\n ## ap_types\n ###################\n\n filedir = os.path.dirname(os.path.abspath(__file__))\n\n srcpath = os.path.join(filedir,'../templates/vivado/ap_types/')\n dstpath = '{}/firmware/ap_types/'.format(model.config.get_output_dir())\n\n if os.path.exists(dstpath):\n rmtree(dstpath)\n\n copytree(srcpath, dstpath)\n\n def write_yml(self, model):\n ###################\n # YAML config file\n ###################\n\n def keras_model_representer(dumper, keras_model):\n model_path = model.config.get_output_dir() + '/keras_model.h5'\n keras_model.save(model_path)\n return dumper.represent_scalar(u'!keras_model', model_path)\n\n try:\n from tensorflow.keras import Model as KerasModel\n yaml.add_multi_representer(KerasModel, keras_model_representer)\n except:\n pass\n\n with open(model.config.get_output_dir() + '/' + config_filename, 'w') as file:\n yaml.dump(model.config.config, file)\n\n def write_tar(self, model):\n ###################\n # Tarball output\n ###################\n\n with tarfile.open(model.config.get_output_dir() + '.tar.gz', mode='w:gz') as archive:\n archive.add(model.config.get_output_dir(), recursive=True)\n\n def write_hls(self, model):\n print('Writing HLS project')\n self.write_project_dir(model)\n self.write_project_cpp(model)\n self.write_project_header(model)\n self.write_weights(model)\n self.write_defines(model)\n self.write_parameters(model)\n self.write_test_bench(model)\n self.write_bridge(model)\n self.write_build_script(model)\n self.write_nnet_utils(model)\n self.write_yml(model)\n self.write_tar(model)\n print('Done')\n"
] | [
[
"numpy.load",
"numpy.max",
"numpy.min"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
SincereJoy/SSAH_CVPR2018 | [
"82ada6db1048e51e14cd1c98469b837c78b7584e"
] | [
"load_data.py"
] | [
"import h5py\nimport numpy as np\n\ndef loading_data(path):\n\tprint('******************************************************')\n\tprint('dataset:{0}'.format(path))\n\tprint('******************************************************')\n\n\tfile = h5py.File(path,'r')\n\timages = file['images'][:].transpose(0,3,2,1)\n\tlabels = file['LAll'][:].transpose(1,0)\n\ttags = file['YAll'][:].transpose(1,0)\n\tfile.close()\n\n\treturn images, tags, labels\n\n\ndef split_data(images, tags, labels, QUERY_SIZE, TRAINING_SIZE, DATABASE_SIZE):\n\n\tX = {}\n\tindex_all = np.random.permutation(QUERY_SIZE+DATABASE_SIZE)\n\tind_Q = index_all[0:QUERY_SIZE]\n\tind_T = index_all[QUERY_SIZE:TRAINING_SIZE + QUERY_SIZE]\n\tind_R = index_all[QUERY_SIZE:DATABASE_SIZE + QUERY_SIZE]\n\n\tX['query'] = images[ind_Q, :, :, :]\n\tX['train'] = images[ind_T, :, :, :]\n\tX['retrieval'] = images[ind_R, :, :, :]\n\n\tY = {}\n\tY['query'] = tags[ind_Q, :]\n\tY['train'] = tags[ind_T, :]\n\tY['retrieval'] = tags[ind_R, :]\n\n\tL = {}\n\tL['query'] = labels[ind_Q, :]\n\tL['train'] = labels[ind_T, :]\n\tL['retrieval'] = labels[ind_R, :]\n\treturn X, Y, L\n"
] | [
[
"numpy.random.permutation"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
kaching-out-of-ammo/Excel_Weighbridge | [
"3d5ccd52fa659df313bed0624e05d7fda353c1a9"
] | [
"test.py"
] | [
"from picamera.array import PiRGBArray\r\nfrom picamera import PiCamera\r\nimport cv2\r\nimport time\r\nfrom threading import Thread\r\nimport imutils\r\nimport numpy as np\r\nfrom pyzbar.pyzbar import decode\r\n\r\n\r\ncamera = PiCamera()\r\ncamera.resolution = (800, 608)\r\ncamera.framerate = 32\r\nrawCapture1 = PiRGBArray(camera, size=(800, 608))\r\nrawCapture2 = PiRGBArray(camera, size=(800, 608))\r\nBackSub = cv2.createBackgroundSubtractorMOG2(500, 21, True)\r\nFirstFrame=None\r\nDetCount = 0\r\nFrameCount = 0\r\n\r\ndef Motion_Camera(rawCaptureGrey, QRDetect):\r\n for f in camera.capture_continuous(rawCaptureGrey, format=\"bgr\", use_video_port=True):\r\n frame = f.array\r\n rawCapture1.truncate(0)\r\n if frame is not None:\r\n if QRDetect:\r\n return frame\r\n else:\r\n frame = BackSub.apply(frame)\r\n return frame\r\n \r\ndef QR_Camera(rawCaptureGrey, QRDetect):\r\n for f in camera.capture_continuous(rawCaptureGrey, format=\"bgr\", use_video_port=True):\r\n frame = f.array\r\n rawCapture2.truncate(0)\r\n if frame is not None:\r\n return frame\r\n\r\ndef Motion_Detection(Grey_Frame):\r\n Det_Index = False\r\n thresh = cv2.threshold(Grey_Frame, 230, 255, cv2.THRESH_BINARY)[1]\r\n# thresh = cv2.dilate(thresh, None, iterations=1)\r\n thresh = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, (5,5))\r\n '''cv2.imshow(\"frame\", thresh)\r\n if cv2.waitKey(1) == ord('q'):\r\n exit()\r\n return thresh'''\r\n cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\r\n cnts = imutils.grab_contours(cnts)\r\n for c in cnts:\r\n if cv2.contourArea(c) > 10000:\r\n Det_Index=True\r\n# return thresh\r\n return Det_Index\r\n \r\ndef get_qr_data(input_frame):\r\n try:\r\n return decode(input_frame)\r\n except:\r\n return []\r\n\r\ndef draw_polygon(f_in, qro):\r\n if len(qro) == 0:\r\n return f_in\r\n else:\r\n for obj in qro:\r\n text = obj.data.decode('utf-8')\r\n pts = np.array([obj.polygon],np.int32)\r\n pts = pts.reshape((4,1,2))\r\n cv2.polylines(f_in, [pts], True, (255, 100, 5), 2)\r\n cv2.putText(f_in, text, (50,50), cv2.FONT_HERSHEY_PLAIN, 1.5, (255, 100, 5), 2)\r\n return f_in\r\n\r\nwhile True:\r\n f = Motion_Camera(rawCapture1, False)\r\n DetVar = False\r\n# (DetVar, frame) = Motion_Detection(f)\r\n DetVar = Motion_Detection(f)\r\n if DetVar:\r\n DetCount += 1\r\n print(DetCount)\r\n print(DetVar)\r\n if DetCount == 1:\r\n continue\r\n else:\r\n while DetVar:\r\n QR_f = QR_Camera(rawCapture2, True)\r\n qr_obj = get_qr_data(QR_f)\r\n frame = draw_polygon(f, qr_obj)\r\n cv2.imshow(\"frame\", frame)\r\n if cv2.waitKey(3000):\r\n DetVar = False\r\n break\r\n\r\ncv2.destroyAllWindows()\r\n \r\n\r\n"
] | [
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
obsmax/seispy | [
"1ed5064dbcfeb4ceebf20b59b97e588609c5b8ab"
] | [
"seispy/stream.py"
] | [
"from copy import deepcopy\nimport numpy as np\nfrom numpy.lib.npyio import _savez\nfrom matplotlib.collections import LineCollection\nimport matplotlib.pyplot as plt\nfrom seispy.trace import Trace, FourierDomainTrace\nfrom seispy.errors import EmptyStreamError, DataTypeError, \\\n SamplingError, SamplingRateError, NptsError, StarttimeError\nfrom timetools.timetick import timetick\n# from seispy.time.timetick import timetick\n\n\ndef readseispystream(npzfilename):\n st = Stream()\n st.from_npz(npzfilename=npzfilename)\n return st\n\n\nclass Stream(list):\n\n def __init__(self, traces: list = None):\n \"\"\"\n initiate the instance with the stream (obspy or obsmax4)\n or nothing : see self.from_obspy or self.from_npz\"\"\"\n\n if traces is None:\n super().__init__([])\n\n else:\n for trace in traces:\n if not isinstance(trace, Trace):\n raise TypeError(type(traces))\n super().__init__(traces)\n\n def copy(self):\n return deepcopy(self)\n\n def __str__(self):\n return \"\\n\".join([str(tr) for tr in self])\n\n def __repr__(self):\n return self.__str__()\n\n # ============ convertion from or to obspy\n def from_obspy(self, stream):\n \"\"\"populate the objects with an obspy stream\n use it to convert obspy into a seispy object\n \"\"\"\n\n for obspy_trace in stream:\n trace = Trace()\n trace.from_obspy(obspy_trace)\n self.append(trace)\n\n def to_obspy(self):\n # warning this module must keep independant from obspy, I just assume here that the user is\n # trying to convert this object to obspy, so obspy is installed\n try:\n from obspy.core.stream import Stream as ObspyStream\n except ImportError as e:\n e.args = ('obspy not installed', )\n raise e\n\n obspy_traces = []\n for seispy_trace in self:\n obspy_trace = seispy_trace.to_obspy()\n obspy_traces.append(obspy_trace)\n\n return ObspyStream(obspy_traces)\n\n # ============\n def check_data_types(self):\n\n if not len(self):\n raise EmptyStreamError()\n\n dtype = self[0].data.dtype\n for trace in self[1:]:\n if dtype != trace.data.dtype:\n raise DataTypeError\n return dtype\n\n def check_stream_sampling_regularization(self):\n \"\"\"\n verifies that all traces have the same time vector\n :return:\n \"\"\"\n\n if not len(self):\n raise EmptyStreamError()\n\n msg = 'the stream is not regularized, please resample {}, ({}, {})'\n nptss = np.asarray([tr.npts for tr in self], int)\n deltas = np.asarray([tr.delta for tr in self], float)\n starttimes = np.asarray([tr.starttime for tr in self], float)\n\n npts = self[0].npts\n delta = self[0].delta\n starttime = self[0].starttime\n\n is_npts = nptss == npts\n is_delta = deltas == delta\n is_start = starttimes == starttime\n\n if not is_npts.all():\n raise NptsError(msg.format(\"npts\", npts, nptss[~is_npts][0]))\n\n elif not is_delta.all():\n raise SamplingRateError(msg.format(\"delta\", delta, deltas[~is_delta][0]))\n\n elif not is_start.all():\n raise StarttimeError(msg.format(\"starttime\", starttime, starttimes[~is_start][0]))\n\n return npts, delta, starttime\n\n def regularize(self, fill_value: float = 0.0, qc: bool = True):\n\n if not len(self):\n raise EmptyStreamError()\n\n starttimes = np.asarray([tr.starttime for tr in self], float)\n endtimes = np.asarray([tr.endtime for tr in self], float)\n deltas = np.asarray([tr.delta for tr in self], float)\n\n delta = np.min(deltas)\n start = np.min(starttimes)\n end = np.max(endtimes)\n\n new_npts = int(np.floor((end - start) / delta))\n new_time = np.arange(new_npts) * delta + start\n\n for n, tr in enumerate(self):\n tr: Trace\n\n if (tr.delta == delta) and \\\n (tr.starttime == start) and \\\n (tr.npts == new_npts):\n # no need to interpolate the signal\n continue\n\n old_time = tr.atime()\n old_data = tr.data\n\n tr.data = np.interp(\n new_time, xp=old_time, fp=old_data,\n left=fill_value, right=fill_value)\n\n tr.starttime = start\n tr.delta = delta\n\n if qc:\n try:\n self.check_stream_sampling_regularization()\n except (EmptyStreamError, SamplingError) as e:\n e.args = (\"the regularization failed, {}\".format(str(e)))\n\n def mean(self):\n nptss = np.asarray([tr.npts for tr in self], float)\n sum = np.sum([tr.data.sum() for tr in self])\n mean = sum / nptss.sum()\n return mean\n\n def pseudo_std(self):\n \"\"\"\n std is evaluated by means of deviations relative to the mean of each trace\n and not relative to the ensemble mean as in self.std\n \"\"\"\n nptss = np.asarray([tr.npts for tr in self], float)\n covariances = np.asarray([tr.data.std() ** 2.0 for tr in self], float) # E((Xi - E(Xi))^2)\n return ((nptss * covariances).sum() / nptss.sum()) ** 0.5\n\n def std(self):\n # return np.concatenate([tr.data for tr in self]).std()\n\n # same as above without concatenating arrays\n nptss = np.asarray([tr.npts for tr in self], float)\n means = np.asarray([tr.data.mean() for tr in self], float)\n mean = (nptss * means).sum() / nptss.sum()\n deviations = np.array([((tr.data - mean) ** 2.0).sum() for tr in self])\n return (deviations.sum() / nptss.sum()) ** 0.5\n\n def clip(self, nstd=10.0):\n \"\"\"\n remove outliers above a certain threshold given in number of times the pseudo_std\n :param nstd:\n :return:\n \"\"\"\n means = np.asarray([tr.data.mean() for tr in self], float)\n pseudo_std = self.pseudo_std()\n for tr, m in zip(self, means):\n tr.data = tr.data.clip(m - nstd * pseudo_std, m + nstd * pseudo_std)\n\n def show(self, ax, gain=0.1, color=\"k\", alpha=0.4,\n seedticks=False, linewidth=2, linestyle=\"-\",\n obspy_decim=False, obspy_decim_nwin=1000):\n \"\"\"\n show many traces on same plot with vertical offset 1 per trace\n\n :param ax:\n :param gain:\n :param color:\n :param alpha:\n :param seedticks:\n :param linewidth:\n :param linestyle:\n :param obspy_decim:\n :return:\n \"\"\"\n\n if len(self) <= 1:\n raise ValueError('too few items for st.show, ')\n\n fourier_domain = np.all([isinstance(tr, FourierDomainTrace) for tr in self])\n\n xmin, xmax = np.inf, -np.inf\n edge_segments = []\n assert 0 < alpha <= 1.0\n i = 0\n\n if fourier_domain:\n fs, dats = [], []\n for i, tr in enumerate(self):\n f, dat = tr.side(sign=1, zero=False, copy=False)\n fs.append(f)\n dats.append(np.abs(dat))\n\n k = gain / np.std(np.hstack(dats))\n xmin = np.hstack(fs).min()\n xmax = np.hstack(fs).max()\n\n for i, (f, dat) in enumerate(zip(fs, dats)):\n edge_segments.append(np.column_stack((f, k * dat + i)))\n\n else:\n k = gain / self.std()\n for i, tr in enumerate(self):\n if obspy_decim:\n t, dat = tr.obspy_like_decim(nwin=obspy_decim_nwin)\n dat = np.column_stack((t, k * dat + i))\n else:\n dat = np.column_stack((tr.atime(), k * tr.data + i))\n\n edge_segments.append(dat)\n\n if tr.starttime < xmin:\n xmin = tr.starttime\n if tr.endtime > xmax:\n xmax = tr.endtime\n\n coll = LineCollection(\n edge_segments, colors=color, alpha=alpha,\n linewidths=linewidth, linestyles=linestyle)\n ax.add_collection(coll)\n\n if seedticks:\n yticks = np.arange(len(self))\n yticklabels = [_.seedid for _ in self]\n ax.set_yticks(yticks)\n ax.set_yticklabels(yticklabels)\n\n ax.set_xlim(xmin, xmax)\n ax.set_ylim(-1., i + 1.)\n\n if fourier_domain:\n pass\n else:\n timetick(ax=ax, axis=\"x\", major=True, minor=True)\n return coll\n\n def shade(self, ax, cmap=None, vmin=None, vmax=None, powergain=1., seedticks=False, **kwargs):\n \"\"\"\n\n :param ax: obsmax4.graphictools.gutils.myax object, use obsmax4.graphictools.gca\n :param cmap: colormap\n :param vmin: float, lowest value, or None\n :param vmax: float, highest value, or None\n :param powergain: float, > 0, apply power gain to the plotted amplitudes\n :param cticks:\n :param args:\n :param kwargs:\n :return:\n \"\"\"\n\n assert len(self)\n kwargs.setdefault('rasterized', True)\n\n fourier_domain = np.all([isinstance(tr, FourierDomainTrace) for tr in self])\n\n if cmap is None:\n if fourier_domain:\n cmap = plt.get_cmap('nipy_spectral')\n else:\n cmap = plt.get_cmap('gray')\n\n nmax = np.max([len(tr.data) for tr in self])\n\n T, I, D = [], [], []\n dmin, dmax = np.inf, -np.inf\n for n, tr in enumerate(self):\n if fourier_domain:\n f, d = tr.side(sign=1, zero=False, copy=False)\n d = np.abs(d)\n else:\n d = tr.data[:]\n\n if powergain != 1.:\n d = np.sign(d) * np.abs(d) ** powergain\n\n # all items in D must be the same length\n d = np.concatenate((d, np.nan * np.zeros(nmax - len(d))))\n d = np.ma.masked_where(np.isnan(d) | np.isinf(d), d)\n dmin = np.min([dmin, d.min()])\n dmax = np.max([dmax, d.max()])\n # -----\n D.append(d)\n if n <= len(self) - 2:\n D.append(d * 0.)\n\n # -----\n if fourier_domain:\n df = f[1] - f[0]\n f = -.5 * df + np.hstack((f, (f[-1] + df) * np.ones(nmax + 1 - len(f))))\n T.append(f)\n T.append(f)\n else:\n dt = tr.delta\n t = -.5 * dt + tr.starttime + np.arange(nmax+1) * dt\n T.append(t)\n T.append(t)\n\n # -----\n I.append(n - .5 * np.ones(len(d) + 1))\n I.append(n + .5 * np.ones(len(d) + 1))\n\n T, I, D = [np.asarray(_) for _ in [T, I, D]]\n if vmin is None and vmax is None:\n vmax = np.max([abs(dmin), abs(dmax)])\n vmin = -vmax\n if vmax is None:\n vmax = dmax\n if vmin is None:\n vmin = dmin\n\n if fourier_domain:\n vmin=0.\n vmax=vmax\n\n # print(T.shape, I.shape, D.shape)\n coll = ax.pcolormesh(\n T, I, D,\n cmap=cmap,\n vmin=vmin, vmax=vmax,\n **kwargs)\n\n if seedticks:\n yticks = np.arange(len(self))\n yticklabels = [_.seedid for _ in self]\n ax.set_yticks(yticks)\n ax.set_yticklabels(yticklabels)\n\n ax.set_xlim((T.min(), T.max()))\n ax.set_ylim((0, I.max()))\n\n cbarwidth = 0.008\n cbarheight = 0.5\n cbardist = 0.012\n p = ax.get_position()\n cax = ax.figure.add_axes((p.x1 + cbardist * p.width,\n p.y0 + 0.5 * (1. - cbarheight) * p.height,\n cbarwidth, cbarheight * p.height))\n\n ax.figure.colorbar(coll, cax=cax, ticks=[vmin, 0, vmax])\n cax.set_yticklabels([\"-\", \"0\", \"+\"])\n\n if not fourier_domain:\n timetick(ax=ax, axis=\"x\", major=True, minor=True)\n\n return coll, cax\n\n def savez(self, npzfilename):\n \"\"\"\n write the stream under npz format\n the filename must end with .seispystream.npz\n\n :param npzfilename:\n :return:\n \"\"\"\n if not len(self):\n raise EmptyStreamError\n\n if not npzfilename.endswith('.seispystream.npz'):\n raise ValueError('npzfilename does not end with .seispystream.npz')\n\n # == put the metadata into lists, one per item\n kwargs = {\n \"npts\": np.array([trace.npts for trace in self], np.dtype('uint32')),\n \"delta\": np.array([trace.delta for trace in self], np.dtype('float64')),\n \"starttime\": np.array([trace.starttime for trace in self], np.dtype('float64')),\n \"seedid\": np.array([trace.seedid for trace in self], np.dtype('str')),\n \"longitude\": np.array([trace.longitude for trace in self], np.dtype('float64')),\n \"latitude\": np.array([trace.latitude for trace in self], np.dtype('float64')),\n \"elevation\": np.array([trace.elevation for trace in self], np.dtype('float64')),\n \"distance\": np.array([trace.distance for trace in self], np.dtype('float64'))}\n\n # == store the data arrays as individual items named\n # data_network_station_location_channel_idnumber\n for array_id, trace in enumerate(self):\n key = \"data_{seedid}_{array_id}\".format(\n seedid=trace.seedid.replace('.', '_'),\n array_id=array_id)\n kwargs[key] = trace.data\n\n _savez(npzfilename, args=(), compress=True, allow_pickle=False,\n kwds=kwargs)\n\n def from_npz(self, npzfilename):\n \"\"\"\n populate the object with a .seispystream.npz file\n :param npzfilename:\n :return:\n \"\"\"\n assert npzfilename.endswith('.seispystream.npz')\n\n with np.load(npzfilename) as loader:\n delta = loader[\"delta\"]\n starttime = loader[\"starttime\"]\n seedid = loader[\"seedid\"]\n longitude = loader[\"longitude\"]\n latitude = loader[\"latitude\"]\n elevation = loader[\"elevation\"]\n distance = loader[\"distance\"]\n\n for array_id in range(len(delta)):\n data_key_old = 'data_{seedid}_{array_id}'.format(\n seedid=seedid[array_id],\n array_id=array_id)\n\n data_key_new = 'data_{seedid}_{array_id}'.format(\n seedid=seedid[array_id].replace('.', '_'),\n array_id=array_id)\n\n if data_key_new in loader.files:\n data_key = data_key_new\n elif data_key_old in loader.files:\n data_key = data_key_old\n else:\n raise KeyError(npzfilename, data_key_new, data_key_old)\n\n trace = Trace(\n seedid=seedid[array_id],\n delta=delta[array_id],\n starttime=starttime[array_id],\n longitude=longitude[array_id],\n latitude=latitude[array_id],\n elevation=elevation[array_id],\n distance=distance[array_id],\n data=loader[data_key])\n\n self.append(trace)\n\n def get(self, key):\n\n if not len(self):\n raise EmptyStreamError\n\n try:\n values = np.asarray([trace.__getattribute__(key) for trace in self])\n\n except (AttributeError, KeyError) as e:\n message = \"key {} was not found in \" \\\n \"the attributes of class {}\".format(\n key, type(self[0]))\n e.args = (message, )\n raise e\n\n return values\n\n def sort_by(self, key, order=1):\n if not order in [1, -1]:\n raise ValueError\n\n # == extract sorting value\n values = self.get(key)\n\n # == order by sorting value\n i_sort = np.argsort(values)\n if order == -1:\n i_sort = i_sort[::-1]\n\n # == update the object\n self.__init__([self[i] for i in i_sort])\n\n def reject_seedids(self, seedids):\n if not len(self):\n raise EmptyStreamError\n\n trace_seedids = np.array([trace.seedid for trace in self], str)\n bad_traces = np.in1d(trace_seedids, seedids)\n\n self.__init__([self[i] for i in\n range(len(self))\n if not bad_traces[i]])\n\n def reject_nulls(self):\n seedids = self.get('seedid')\n bad_traces = np.array([(tr.data == 0.).all() for tr in self], bool)\n null_seedids = seedids[bad_traces]\n self.reject_seedids(null_seedids)\n return null_seedids\n\n def lowpass(self, *args, **kwargs):\n for trace in self:\n trace.lowpass(*args, **kwargs)\n\n def highpass(self, *args, **kwargs):\n for trace in self:\n trace.highpass(*args, **kwargs)\n\n def bandpass(self, *args, **kwargs):\n for trace in self:\n trace.bandpass(*args, **kwargs)\n\n def gaussbandpass(self, *args, **kwargs):\n for trace in self:\n trace.gaussbandpass(*args, **kwargs)\n\n\nif __name__ == '__main__':\n import matplotlib.pyplot as plt\n\n stream = Stream([])\n for _ in range(10):\n tr = Trace(\n seedid=str(int(np.random.rand() * 1.e4)),\n delta=0.4 + 0.1 * np.random.rand(),\n starttime=1000. + 10 * np.random.randn(),\n data=np.random.randn(int(2500 + np.random.randn() * 10)))\n stream.append(tr)\n\n print(stream)\n\n stream.show(plt.gca(), gain=0.1, color=\"k\")\n\n # plt.show()\n\n dtype = stream.check_data_types()\n\n oldstd = stream.std()\n stream.regularize(qc=True)\n newstd = stream.std()\n\n stream.show(plt.gca(), gain=0.1 * newstd / oldstd, color=\"r\", obspy_decim=True)\n stream.savez('toto.seispystream.npz')\n del stream\n stream = Stream([])\n stream.from_npz('toto.seispystream.npz')\n\n # plt.show()\n stream.show(plt.gca(), gain=0.1 * newstd / oldstd, color=\"g\", linestyle=\"--\")\n print(stream)\n\n plt.show()\n"
] | [
[
"numpy.asarray",
"numpy.in1d",
"matplotlib.pyplot.get_cmap",
"numpy.dtype",
"numpy.max",
"numpy.random.randn",
"matplotlib.pyplot.gca",
"numpy.hstack",
"numpy.arange",
"numpy.interp",
"numpy.column_stack",
"numpy.load",
"numpy.min",
"matplotlib.collections.LineCollection",
"numpy.isnan",
"numpy.random.rand",
"numpy.floor",
"numpy.argsort",
"numpy.array",
"matplotlib.pyplot.show",
"numpy.abs",
"numpy.sign",
"numpy.lib.npyio._savez",
"numpy.isinf"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mintar/mseg-api | [
"df7b899b47b33ad82dcbf17c289856a1f1abea22"
] | [
"tests/test_conn_comp.py"
] | [
"#!/usr/bin/python3\n\nimport numpy as np\n\nfrom mseg.utils.conn_comp import scipy_conn_comp\n\n\ndef test_scipy_conn_comp():\n\t\"\"\" Make sure we can recover a dictionary of binary masks for each conn. component\"\"\"\n\t\n\t# toy semantic label map / label image\n\timg = np.array(\n\t\t[\n\t\t\t[1,1,2,3],\n\t\t\t[1,4,5,3],\n\t\t\t[0,0,1,1]\n\t\t])\n\tclass_to_conncomps_dict = scipy_conn_comp(img)\n\tprint(class_to_conncomps_dict)\n\n\tgt_class_to_conncomps_dict = {\n\t\t0: [\n\t\t\tnp.array([[0, 0, 0, 0],\n\t\t\t\t\t[0, 0, 0, 0],\n\t\t\t\t\t[1, 1, 0, 0]], dtype=np.uint8)\n\t\t], \n\t\t1: [\n\t\t\tnp.array([[1, 1, 0, 0],\n\t\t\t\t\t[1, 0, 0, 0],\n\t\t\t\t\t[0, 0, 0, 0]], dtype=np.uint8), \n\t\t\tnp.array([[0, 0, 0, 0],\n\t\t\t\t\t[0, 0, 0, 0],\n\t\t\t\t\t[0, 0, 1, 1]], dtype=np.uint8)\n\t\t], \n\t\t2: [\n\t\t\tnp.array([[0, 0, 1, 0],\n\t\t\t\t\t[0, 0, 0, 0],\n\t\t\t\t\t[0, 0, 0, 0]], dtype=np.uint8)\n\t\t], \n\t\t3: [\n\t\t\tnp.array([[0, 0, 0, 1],\n\t\t\t\t\t[0, 0, 0, 1],\n\t\t\t\t\t[0, 0, 0, 0]], dtype=np.uint8)\n\t\t], \n\t\t4: [\n\t\t\tnp.array([[0, 0, 0, 0],\n\t\t\t\t\t[0, 1, 0, 0],\n\t\t\t\t\t[0, 0, 0, 0]], dtype=np.uint8)\n\t\t], \n\t\t5: [\n\t\t\tnp.array([\t[0, 0, 0, 0],\n\t\t\t\t\t[0, 0, 1, 0],\n\t\t\t\t\t[0, 0, 0, 0]], dtype=np.uint8)\n\t\t]\n\t}\n\n\tfor class_idx, conncomps_list in class_to_conncomps_dict.items():\n\t\tgt_conncomps_list = gt_class_to_conncomps_dict[class_idx]\n\t\tfor conncomp, gtconncomp in zip(conncomps_list, gt_conncomps_list):\n\t\t\tassert np.allclose(conncomp, gtconncomp)\n\n\n\n\n"
] | [
[
"numpy.array",
"numpy.allclose"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mihaichris/search_with_machine_learning_course | [
"17134e63862dd65906012acb2b326f54d06de761"
] | [
"week4/create_labeled_queries.py"
] | [
"import os\nimport argparse\nimport xml.etree.ElementTree as ET\nimport pandas as pd\nimport numpy as np\nimport csv\nimport string\n\nfrom nltk.stem.snowball import SnowballStemmer\n\n# Useful if you want to perform stemming.\nimport nltk\nstemmer = nltk.stem.PorterStemmer()\n\ndef prepare_word(word):\n word = word.lower()\n translator = str.maketrans(string.punctuation, ' '*len(string.punctuation))\n word = word.translate(word)\n word = ' '.join(word.split())\n return stemmer.stem(word)\n\ndef update_to_parent_category(cat, cats_to_be_updated, possible_parent_cats):\n if cat in cats_to_be_updated and not cat in possible_parent_cats:\n parent_of_cat_df = parents_df[parents_df['category'] == cat]\n if len(parent_of_cat_df) == 0:\n return cat\n parent_df_row = parent_of_cat_df.iloc[0]\n return parent_df_row['parent']\n else:\n return cat\n\ncategories_file_name = r'/workspace/datasets/product_data/categories/categories_0001_abcat0010000_to_pcmcat99300050000.xml'\n\nqueries_file_name = r'/workspace/datasets/train.csv'\noutput_file_name = r'/workspace/datasets/labeled_query_data_test.txt'\n\nparser = argparse.ArgumentParser(description='Process arguments.')\ngeneral = parser.add_argument_group(\"general\")\ngeneral.add_argument(\"--min_queries\", default=1, help=\"The minimum number of queries per category label (default is 1)\")\ngeneral.add_argument(\"--output\", default=output_file_name, help=\"the file to output to\")\n\nargs = parser.parse_args()\noutput_file_name = args.output\n\nif args.min_queries:\n min_queries = int(args.min_queries)\n\n# The root category, named Best Buy with id cat00000, doesn't have a parent.\nroot_category_id = 'cat00000'\n\ntree = ET.parse(categories_file_name)\nroot = tree.getroot()\n\n# Parse the category XML file to map each category id to its parent category id in a dataframe.\ncategories = []\nparents = []\nfor child in root:\n id = child.find('id').text\n cat_path = child.find('path')\n cat_path_ids = [cat.find('id').text for cat in cat_path]\n leaf_id = cat_path_ids[-1]\n if leaf_id != root_category_id:\n categories.append(leaf_id)\n parents.append(cat_path_ids[-2])\nparents_df = pd.DataFrame(list(zip(categories, parents)), columns =['category', 'parent'])\n\n# Read the training data into pandas, only keeping queries with non-root categories in our category tree.\ndf = pd.read_csv(queries_file_name)[['category', 'query']]\ndf = df[df['category'].isin(categories)]\n\ndf['query'] = df['query'].transform(prepare_word)\ncategories_counts = df.groupby(['category']).size().reset_index(name='counts')\nprint(categories_counts)\n\nwhile len(categories_counts[categories_counts[\"counts\"] < min_queries].index) != 0:\n categories_df = categories_counts[categories_counts[\"counts\"] < min_queries]\n categories_queries = categories_df['category'].values\n possible_parent_cats = parents_df[parents_df['category'].isin(categories_queries)]['parent'].values\n df['category'] = df['category'].transform(lambda x: update_to_parent_category(x, categories_queries, possible_parent_cats))\n categories_counts = df.groupby(['category']).size().reset_index(name='counts')\n print(len(df['category'].unique()))\n\ndef get_parent_code(category_code):\n if category_code == root_category_id:\n return category_code\n else:\n return parents_df[parents_df.category == category_code]['parent'].values[0]\n \ndf['parent_code'] = df['category'].apply(get_parent_code)\n\ndf['n_queries_per_category'] = df.groupby('category')['query'].transform(len)\nMIN_COUNT = 100\nconditions = [\n (df['n_queries_per_category'] <= MIN_COUNT),\n (df['n_queries_per_category'] > MIN_COUNT)\n ]\nvalues = [df['parent_code'], df['category']]\n\ndf['category'] = np.select(conditions, values)\n\nprint(df.category.nunique())\n# Create labels in fastText format.\ndf['label'] = '__label__' + df['category']\n\n# Output labeled query data as a space-separated file, making sure that every category is in the taxonomy.\ndf = df[df['category'].isin(categories)]\ndf['output'] = df['label'] + ' ' + df['query']\ndf[['output']].to_csv(output_file_name, header=False, sep='|', escapechar='\\\\', quoting=csv.QUOTE_NONE, index=False)\n\n"
] | [
[
"pandas.read_csv",
"numpy.select"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
zhyhan/spine-reports-gene | [
"a07b842594f33c5bf0b9095f82664857fe7be2ae"
] | [
"nets/preprocessing.py"
] | [
"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\n\n\n\ndef data_augmentation(image, is_training=None):\n \n if is_training:\n return preprocess_for_train(image)\n \n else:\n return preprocess_for_eval(image)\n \n \n \ndef preprocess_for_train(image):\n #distorted_image = tf.image.random_brightness(image, max_delta=63)!!!Is nothing after random_brightness\n distorted_image = tf.image.random_contrast(image, lower=0.2, upper=1.8)\n return distorted_image\ndef preprocess_for_eval(image):\n #distorted_image = tf.image.random_brightness(image, max_delta=63)!!!Is nothing after random_brightness\n distorted_image = tf.image.random_contrast(image, lower=0.2, upper=1.8)\n return distorted_image\n "
] | [
[
"tensorflow.image.random_contrast"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
shoyer/bottleneck | [
"aa71a26fe5ef4127df86276c1ee6b5ba8f6dcc5b"
] | [
"setup.py"
] | [
"#!/usr/bin/env python\n\nimport os\nimport sys\n\ntry:\n import setuptools # noqa\nexcept ImportError:\n from ez_setup import use_setuptools\n use_setuptools()\n\nfrom setuptools import setup, find_packages\nfrom setuptools.extension import Extension\nfrom setuptools.command.build_ext import build_ext as _build_ext\n\n\n# workaround for installing bottleneck when numpy is not present\nclass build_ext(_build_ext):\n # taken from: stackoverflow.com/questions/19919905/\n # how-to-bootstrap-numpy-installation-in-setup-py#21621689\n def finalize_options(self):\n _build_ext.finalize_options(self)\n # prevent numpy from thinking it is still in its setup process\n __builtins__.__NUMPY_SETUP__ = False\n import numpy\n # place numpy includes first, see gh #156\n self.include_dirs.insert(0, numpy.get_include())\n\n\ndef prepare_modules():\n from bottleneck.src.template import make_c_files\n make_c_files()\n ext = [Extension(\"bottleneck.reduce\",\n sources=[\"bottleneck/src/reduce.c\"],\n extra_compile_args=['-O2'])]\n ext += [Extension(\"bottleneck.move\",\n sources=[\"bottleneck/src/move.c\",\n \"bottleneck/src/move_median/move_median.c\"],\n extra_compile_args=['-O2'])]\n ext += [Extension(\"bottleneck.nonreduce\",\n sources=[\"bottleneck/src/nonreduce.c\"],\n extra_compile_args=['-O2'])]\n ext += [Extension(\"bottleneck.nonreduce_axis\",\n sources=[\"bottleneck/src/nonreduce_axis.c\"],\n extra_compile_args=['-O2'])]\n return ext\n\n\ndef get_long_description():\n with open('README.rst', 'r') as fid:\n long_description = fid.read()\n idx = max(0, long_description.find(\"Bottleneck is a collection\"))\n long_description = long_description[idx:]\n return long_description\n\n\ndef get_version_str():\n ver_file = os.path.join('bottleneck', 'version.py')\n with open(ver_file, 'r') as fid:\n version = fid.read()\n version = version.split(\"= \")\n version = version[1].strip()\n version = version.strip(\"\\\"\")\n return version\n\n\nCLASSIFIERS = [\"Development Status :: 4 - Beta\",\n \"Environment :: Console\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: BSD License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: C\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Topic :: Scientific/Engineering\"]\n\n\nmetadata = dict(name='Bottleneck',\n maintainer=\"Keith Goodman\",\n maintainer_email=\"[email protected]\",\n description=\"Fast NumPy array functions written in C\",\n long_description=get_long_description(),\n url=\"https://github.com/kwgoodman/bottleneck\",\n download_url=\"http://pypi.python.org/pypi/Bottleneck\",\n license=\"Simplified BSD\",\n classifiers=CLASSIFIERS,\n platforms=\"OS Independent\",\n version=get_version_str(),\n packages=find_packages(),\n package_data={'bottleneck': ['LICENSE']},\n requires=['numpy'],\n install_requires=['numpy'],\n cmdclass={'build_ext': build_ext},\n setup_requires=['numpy'])\n\n\nif not(len(sys.argv) >= 2 and ('--help' in sys.argv[1:] or\n sys.argv[1] in ('--help-commands', 'egg_info', '--version', 'clean',\n 'build_sphinx'))):\n # build bottleneck\n metadata['ext_modules'] = prepare_modules()\nelif sys.argv[1] == 'build_sphinx':\n # create intro.rst (from readme file) for sphinx manual\n readme = 'README.rst'\n intro = os.path.join('doc', 'source', 'intro.rst')\n with open(readme, 'r') as infile, open(intro, 'w') as outfile:\n txt = infile.readlines()[4:] # skip travis, appveyor build status\n outfile.write(''.join(txt))\n\nsetup(**metadata)\n"
] | [
[
"numpy.get_include"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
pgtinsley/deepface | [
"156d8a2a1a38fe7234cfcf3b09b517b9b062e8da"
] | [
"api/api.py"
] | [
"from flask import Flask, jsonify, request, make_response\n\nimport argparse\nimport uuid\nimport json\nimport time\nfrom tqdm import tqdm\n\nimport tensorflow as tf\n\nfrom deepface import DeepFace\nfrom deepface.basemodels import VGGFace, OpenFace, Facenet, FbDeepFace, DeepID\nfrom deepface.basemodels.DlibResNet import DlibResNet\nfrom deepface.extendedmodels import Age, Gender, Race, Emotion\n\n#import DeepFace\n#from basemodels import VGGFace, OpenFace, Facenet, FbDeepFace\n#from extendedmodels import Age, Gender, Race, Emotion\n\n#------------------------------\n\napp = Flask(__name__)\n\n#------------------------------\n\ntic = time.time()\n\nprint(\"Loading Face Recognition Models...\")\n\npbar = tqdm(range(0,6), desc='Loading Face Recognition Models...')\n\nfor index in pbar:\n\tif index == 0:\n\t\tpbar.set_description(\"Loading VGG-Face\")\n\t\tvggface_model = VGGFace.loadModel()\n\telif index == 1:\n\t\tpbar.set_description(\"Loading OpenFace\")\n\t\topenface_model = OpenFace.loadModel()\n\telif index == 2:\n\t\tpbar.set_description(\"Loading Google FaceNet\")\n\t\tfacenet_model = Facenet.loadModel()\n\telif index == 3:\n\t\tpbar.set_description(\"Loading Facebook DeepFace\")\n\t\tdeepface_model = FbDeepFace.loadModel()\n\telif index == 4:\n\t\tpbar.set_description(\"Loading DeepID DeepFace\")\n\t\tdeepid_model = DeepID.loadModel()\n\telif index == 5:\n\t\tpbar.set_description(\"Loading Dlib ResNet DeepFace\")\n\t\tdlib_model = DlibResNet()\n\ntoc = time.time()\n\nprint(\"Face recognition models are built in \", toc-tic,\" seconds\")\n\n#------------------------------\n\ntic = time.time()\n\nprint(\"Loading Facial Attribute Analysis Models...\")\n\npbar = tqdm(range(0,4), desc='Loading Facial Attribute Analysis Models...')\n\nfor index in pbar:\n\tif index == 0:\n\t\tpbar.set_description(\"Loading emotion analysis model\")\n\t\temotion_model = Emotion.loadModel()\n\telif index == 1:\n\t\tpbar.set_description(\"Loading age prediction model\")\n\t\tage_model = Age.loadModel()\n\telif index == 2:\n\t\tpbar.set_description(\"Loading gender prediction model\")\n\t\tgender_model = Gender.loadModel()\n\telif index == 3:\n\t\tpbar.set_description(\"Loading race prediction model\")\n\t\trace_model = Race.loadModel()\n\ntoc = time.time()\n\nfacial_attribute_models = {}\nfacial_attribute_models[\"emotion\"] = emotion_model\nfacial_attribute_models[\"age\"] = age_model\nfacial_attribute_models[\"gender\"] = gender_model\nfacial_attribute_models[\"race\"] = race_model\n\nprint(\"Facial attribute analysis models are built in \", toc-tic,\" seconds\")\n\n#------------------------------\n\ngraph = tf.get_default_graph()\n\n#------------------------------\n#Service API Interface\n\[email protected]('/')\ndef index():\n\treturn '<h1>Hello, world!</h1>'\n\[email protected]('/analyze', methods=['POST'])\ndef analyze():\n\t\n\tglobal graph\n\t\n\ttic = time.time()\n\treq = request.get_json()\n\ttrx_id = uuid.uuid4()\n\n\t#---------------------------\n\t\n\tresp_obj = jsonify({'success': False})\n\twith graph.as_default():\n\t\tinstances = []\n\t\tif \"img\" in list(req.keys()):\n\t\t\traw_content = req[\"img\"] #list\n\n\t\t\tfor item in raw_content: #item is in type of dict\n\t\t\t\tinstances.append(item)\n\t\t\n\t\tif len(instances) == 0:\n\t\t\treturn jsonify({'success': False, 'error': 'you must pass at least one img object in your request'}), 205\n\t\t\n\t\tprint(\"Analyzing \", len(instances),\" instances\")\n\n\t\t#---------------------------\n\n\t\tactions= ['emotion', 'age', 'gender', 'race']\n\t\tif \"actions\" in list(req.keys()):\n\t\t\tactions = req[\"actions\"]\n\t\t\n\t\t#---------------------------\n\n\t\t#resp_obj = DeepFace.analyze(instances, actions=actions)\n\t\tresp_obj = DeepFace.analyze(instances, actions=actions, models=facial_attribute_models)\n\t\t\n\t\t#---------------------------\n\n\ttoc = time.time()\n\n\tresp_obj[\"trx_id\"] = trx_id\n\tresp_obj[\"seconds\"] = toc-tic\n\n\treturn resp_obj, 200\n\[email protected]('/verify', methods=['POST'])\n\ndef verify():\n\t\n\tglobal graph\n\t\n\ttic = time.time()\n\treq = request.get_json()\n\ttrx_id = uuid.uuid4()\n\t\n\tresp_obj = jsonify({'success': False})\n\t\n\twith graph.as_default():\n\t\t\n\t\tmodel_name = \"VGG-Face\"; distance_metric = \"cosine\"\n\t\tif \"model_name\" in list(req.keys()):\n\t\t\tmodel_name = req[\"model_name\"]\n\t\tif \"distance_metric\" in list(req.keys()):\n\t\t\tdistance_metric = req[\"distance_metric\"]\n\t\t\n\t\t#----------------------\n\t\t\n\t\tinstances = []\n\t\tif \"img\" in list(req.keys()):\n\t\t\traw_content = req[\"img\"] #list\n\n\t\t\tfor item in raw_content: #item is in type of dict\n\t\t\t\tinstance = []\n\t\t\t\timg1 = item[\"img1\"]; img2 = item[\"img2\"]\n\n\t\t\t\tvalidate_img1 = False\n\t\t\t\tif len(img1) > 11 and img1[0:11] == \"data:image/\":\n\t\t\t\t\tvalidate_img1 = True\n\t\t\t\t\n\t\t\t\tvalidate_img2 = False\n\t\t\t\tif len(img2) > 11 and img2[0:11] == \"data:image/\":\n\t\t\t\t\tvalidate_img2 = True\n\n\t\t\t\tif validate_img1 != True or validate_img2 != True:\n\t\t\t\t\treturn jsonify({'success': False, 'error': 'you must pass both img1 and img2 as base64 encoded string'}), 205\n\n\t\t\t\tinstance.append(img1); instance.append(img2)\n\t\t\t\tinstances.append(instance)\n\t\t\t\n\t\t#--------------------------\n\n\t\tif len(instances) == 0:\n\t\t\treturn jsonify({'success': False, 'error': 'you must pass at least one img object in your request'}), 205\n\t\t\n\t\tprint(\"Input request of \", trx_id, \" has \",len(instances),\" pairs to verify\")\n\t\t\n\t\t#--------------------------\n\t\t\n\t\tif model_name == \"VGG-Face\":\n\t\t\tresp_obj = DeepFace.verify(instances, model_name = model_name, distance_metric = distance_metric, model = vggface_model)\n\t\telif model_name == \"Facenet\":\n\t\t\tresp_obj = DeepFace.verify(instances, model_name = model_name, distance_metric = distance_metric, model = facenet_model)\n\t\telif model_name == \"OpenFace\":\n\t\t\tresp_obj = DeepFace.verify(instances, model_name = model_name, distance_metric = distance_metric, model = openface_model)\n\t\telif model_name == \"DeepFace\":\n\t\t\tresp_obj = DeepFace.verify(instances, model_name = model_name, distance_metric = distance_metric, model = deepface_model)\n\t\telif model_name == \"DeepID\":\n\t\t\tresp_obj = DeepFace.verify(instances, model_name = model_name, distance_metric = distance_metric, model = deepid_model)\n\t\telif model_name == \"Dlib\":\n\t\t\tresp_obj = DeepFace.verify(instances, model_name = model_name, distance_metric = distance_metric, model = dlib_model)\n\t\telif model_name == \"Ensemble\":\n\t\t\tmodels = {}\n\t\t\tmodels[\"VGG-Face\"] = vggface_model\n\t\t\tmodels[\"Facenet\"] = facenet_model\n\t\t\tmodels[\"OpenFace\"] = openface_model\n\t\t\tmodels[\"DeepFace\"] = deepface_model\n\t\t\t\n\t\t\tresp_obj = DeepFace.verify(instances, model_name = model_name, model = models)\n\t\t\t\n\t\telse:\n\t\t\treturn jsonify({'success': False, 'error': 'You must pass a valid model name. Available models are VGG-Face, Facenet, OpenFace, DeepFace but you passed %s' % (model_name)}), 205\n\t\t\n\t#--------------------------\n\t\n\ttoc = time.time()\n\t\n\tresp_obj[\"trx_id\"] = trx_id\n\tresp_obj[\"seconds\"] = toc-tic\n\t\n\treturn resp_obj, 200\n\n\nif __name__ == '__main__':\n\tparser = argparse.ArgumentParser()\n\tparser.add_argument(\n\t\t'-p', '--port',\n\t\ttype=int,\n\t\tdefault=5000,\n\t\thelp='Port of serving api')\n\targs = parser.parse_args()\n\tapp.run(host='0.0.0.0', port=args.port)\n"
] | [
[
"tensorflow.get_default_graph"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
}
] |
KimuraTian/lkpy | [
"d5b1b86ba73eb0b2b2eb90682aa872917813b20e"
] | [
"tests/test_batch_recommend.py"
] | [
"import pytest\n\nimport os\nimport os.path\nfrom collections import namedtuple\nimport logging\nimport pandas as pd\nimport numpy as np\n\nimport lk_test_utils as lktu\n\nfrom lenskit.algorithms.basic import Bias, TopN\nimport lenskit.batch as lkb\n\nMLB = namedtuple('MLB', ['ratings', 'algo'])\n_log = logging.getLogger(__name__)\n\n\[email protected]\ndef mlb():\n ratings = lktu.ml_pandas.renamed.ratings\n algo = TopN(Bias())\n algo.fit(ratings)\n return MLB(ratings, algo)\n\n\ndef test_recommend_single(mlb):\n res = lkb.recommend(mlb.algo, [1], None, {1: [31]})\n\n assert len(res) == 1\n assert all(res['user'] == 1)\n assert all(res['rank'] == 1)\n assert set(res.columns) == set(['user', 'rank', 'item', 'score'])\n\n algo = mlb.algo.predictor\n expected = algo.mean_ + algo.item_offsets_.loc[31] + algo.user_offsets_.loc[1]\n assert res.score.iloc[0] == pytest.approx(expected)\n\n\ndef test_recommend_user(mlb):\n uid = 5\n items = mlb.ratings.item.unique()\n\n def candidates(user):\n urs = mlb.ratings[mlb.ratings.user == user]\n return np.setdiff1d(items, urs.item.unique())\n\n res = lkb.recommend(mlb.algo, [5], 10, candidates)\n\n assert len(res) == 10\n assert set(res.columns) == set(['user', 'rank', 'item', 'score'])\n assert all(res['user'] == uid)\n assert all(res['rank'] == np.arange(10) + 1)\n # they should be in decreasing order\n assert all(np.diff(res.score) <= 0)\n\n\ndef test_recommend_two_users(mlb):\n items = mlb.ratings.item.unique()\n\n def candidates(user):\n urs = mlb.ratings[mlb.ratings.user == user]\n return np.setdiff1d(items, urs.item.unique())\n\n res = lkb.recommend(mlb.algo, [5, 10], 10, candidates)\n\n assert len(res) == 20\n assert set(res.user) == set([5, 10])\n assert all(res.groupby('user').item.count() == 10)\n assert all(res.groupby('user')['rank'].max() == 10)\n assert all(np.diff(res[res.user == 5].score) <= 0)\n assert all(np.diff(res[res.user == 5]['rank']) == 1)\n assert all(np.diff(res[res.user == 10].score) <= 0)\n assert all(np.diff(res[res.user == 10]['rank']) == 1)\n\n\ndef test_recommend_no_cands(mlb):\n res = lkb.recommend(mlb.algo, [5, 10], 10)\n\n assert len(res) == 20\n assert set(res.user) == set([5, 10])\n assert all(res.groupby('user').item.count() == 10)\n assert all(res.groupby('user')['rank'].max() == 10)\n assert all(np.diff(res[res.user == 5].score) <= 0)\n assert all(np.diff(res[res.user == 5]['rank']) == 1)\n assert all(np.diff(res[res.user == 10].score) <= 0)\n assert all(np.diff(res[res.user == 10]['rank']) == 1)\n\n idx_rates = mlb.ratings.set_index(['user', 'item'])\n merged = res.join(idx_rates, on=['user', 'item'], how='inner')\n assert len(merged) == 0\n\n\[email protected]\ndef test_bias_batch_recommend():\n from lenskit.algorithms import basic\n import lenskit.crossfold as xf\n from lenskit import batch, topn\n\n if not os.path.exists('ml-100k/u.data'):\n raise pytest.skip()\n\n ratings = pd.read_csv('ml-100k/u.data', sep='\\t', names=['user', 'item', 'rating', 'timestamp'])\n\n algo = basic.Bias(damping=5)\n algo = TopN(algo)\n\n def eval(train, test):\n _log.info('running training')\n algo.fit(train)\n _log.info('testing %d users', test.user.nunique())\n recs = batch.recommend(algo, test.user.unique(), 100)\n return recs\n\n folds = list(xf.partition_users(ratings, 5, xf.SampleFrac(0.2)))\n test = pd.concat(y for (x, y) in folds)\n\n recs = pd.concat(eval(train, test) for (train, test) in folds)\n\n _log.info('analyzing recommendations')\n rla = topn.RecListAnalysis()\n rla.add_metric(topn.ndcg)\n results = rla.compute(recs, test)\n dcg = results.ndcg\n _log.info('nDCG for %d users is %f (max=%f)', len(dcg), dcg.mean(), dcg.max())\n assert dcg.mean() > 0\n\n\[email protected]('ncpus', [None, 2])\[email protected]\ndef test_pop_batch_recommend(ncpus):\n from lenskit.algorithms import basic\n import lenskit.crossfold as xf\n from lenskit import batch, topn\n\n if not os.path.exists('ml-100k/u.data'):\n raise pytest.skip()\n\n ratings = pd.read_csv('ml-100k/u.data', sep='\\t', names=['user', 'item', 'rating', 'timestamp'])\n\n algo = basic.Popular()\n\n def eval(train, test):\n _log.info('running training')\n algo.fit(train)\n _log.info('testing %d users', test.user.nunique())\n recs = batch.recommend(algo, test.user.unique(), 100,\n nprocs=ncpus)\n return recs\n\n folds = list(xf.partition_users(ratings, 5, xf.SampleFrac(0.2)))\n test = pd.concat(f.test for f in folds)\n\n recs = pd.concat(eval(train, test) for (train, test) in folds)\n\n _log.info('analyzing recommendations')\n rla = topn.RecListAnalysis()\n rla.add_metric(topn.ndcg)\n results = rla.compute(recs, test)\n dcg = results.ndcg\n _log.info('NDCG for %d users is %f (max=%f)', len(dcg), dcg.mean(), dcg.max())\n assert dcg.mean() > 0\n"
] | [
[
"numpy.arange",
"pandas.concat",
"pandas.read_csv",
"numpy.diff"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
Rishabh-Choudhry/datasets | [
"2bad427bba6cdcab717698a70c96339733c5d42c"
] | [
"tensorflow_datasets/core/dataset_info_test.py"
] | [
"# coding=utf-8\n# Copyright 2022 The TensorFlow Datasets Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for tensorflow_datasets.core.dataset_info.\"\"\"\n\nimport json\nimport os\nimport pathlib\nimport tempfile\nimport numpy as np\nimport pytest\n\nimport tensorflow as tf\nfrom tensorflow_datasets import testing\nfrom tensorflow_datasets.core import dataset_info\nfrom tensorflow_datasets.core import features\nfrom tensorflow_datasets.core import file_adapters\nfrom tensorflow_datasets.core import naming\nfrom tensorflow_datasets.core import read_only_builder\nfrom tensorflow_datasets.core import splits as splits_lib\nfrom tensorflow_datasets.core import utils\nfrom tensorflow_datasets.core.proto import dataset_info_pb2\nfrom tensorflow_datasets.core.proto import feature_pb2\nfrom tensorflow_datasets.image_classification import mnist\n\nfrom google.protobuf import text_format\n\n_TFDS_DIR = utils.tfds_path()\n_INFO_DIR = os.path.join(_TFDS_DIR, \"testing\", \"test_data\", \"dataset_info\",\n \"mnist\", \"3.0.1\")\n_INFO_DIR_UNLABELED = os.path.join(_TFDS_DIR, \"testing\", \"test_data\",\n \"dataset_info\", \"mnist_unlabeled\", \"3.0.1\")\n_NON_EXISTENT_DIR = os.path.join(_TFDS_DIR, \"non_existent_dir\")\n\nDummyDatasetSharedGenerator = testing.DummyDatasetSharedGenerator\n\n\nclass RandomShapedImageGenerator(DummyDatasetSharedGenerator):\n\n def _info(self):\n return dataset_info.DatasetInfo(\n builder=self,\n features=features.FeaturesDict({\"im\": features.Image()}),\n supervised_keys=(\"im\", \"im\"),\n metadata=dataset_info.MetadataDict(),\n )\n\n def _generate_examples(self, range_):\n self.info.metadata[\"some_key\"] = 123\n\n for i in range_:\n height = np.random.randint(5, high=10)\n width = np.random.randint(5, high=10)\n yield i, {\n \"im\":\n np.random.randint(\n 0, 255, size=(height, width, 3), dtype=np.uint8)\n }\n\n\nclass DatasetInfoTest(testing.TestCase):\n\n @classmethod\n def setUpClass(cls):\n super(DatasetInfoTest, cls).setUpClass()\n cls._tfds_tmp_dir = testing.make_tmp_dir()\n cls._builder = DummyDatasetSharedGenerator(data_dir=cls._tfds_tmp_dir)\n\n @classmethod\n def tearDownClass(cls):\n super(DatasetInfoTest, cls).tearDownClass()\n testing.rm_tmp_dir(cls._tfds_tmp_dir)\n\n def test_non_existent_dir(self):\n info = dataset_info.DatasetInfo(builder=self._builder)\n with self.assertRaisesWithPredicateMatch(\n FileNotFoundError, \"from a directory which does not exist\"):\n info.read_from_directory(_NON_EXISTENT_DIR)\n\n def test_reading(self):\n info = dataset_info.DatasetInfo(builder=self._builder)\n info.read_from_directory(_INFO_DIR)\n\n # Assert that we read the file and initialized DatasetInfo.\n self.assertTrue(info.initialized)\n self.assertEqual(\"dummy_dataset_shared_generator\", info.name)\n self.assertEqual(\"dummy_dataset_shared_generator/1.0.0\", info.full_name)\n\n # Test splits are initialized properly.\n split_dict = info.splits\n\n # Assert they are the correct number.\n self.assertTrue(len(split_dict), 2)\n\n # Assert on what they are\n self.assertIn(\"train\", split_dict)\n self.assertIn(\"test\", split_dict)\n\n # Assert that this is computed correctly.\n self.assertEqual(40, info.splits.total_num_examples)\n self.assertEqual(11594722, info.dataset_size)\n\n self.assertEqual(\"image\", info.supervised_keys[0])\n self.assertEqual(\"label\", info.supervised_keys[1])\n self.assertEqual(info.module_name, \"tensorflow_datasets.testing.test_utils\")\n self.assertEqual(False, info.disable_shuffling)\n\n self.assertEqual(info.version, utils.Version(\"1.0.0\"))\n self.assertEqual(info.release_notes, {\n \"1.0.0\": \"Release notes 1.0.0\",\n \"2.0.0\": \"Release notes 2.0.0\"\n })\n\n def test_disable_shuffling(self):\n info = dataset_info.DatasetInfo(\n builder=self._builder, disable_shuffling=True)\n info.read_from_directory(_INFO_DIR)\n\n self.assertEqual(True, info.disable_shuffling)\n\n def test_reading_empty_properties(self):\n info = dataset_info.DatasetInfo(builder=self._builder)\n info.read_from_directory(_INFO_DIR_UNLABELED)\n\n # Assert supervised_keys has not been set\n self.assertIsNone(None, info.supervised_keys)\n\n def test_writing(self):\n # First read in stuff.\n mnist_builder = mnist.MNIST(\n data_dir=tempfile.mkdtemp(dir=self.get_temp_dir()))\n\n info = dataset_info.DatasetInfo(\n builder=mnist_builder, features=mnist_builder.info.features)\n info.read_from_directory(_INFO_DIR)\n\n # Read the json file into a string.\n with tf.io.gfile.GFile(info._dataset_info_path(_INFO_DIR)) as f:\n existing_json = json.load(f)\n\n # Now write to a temp directory.\n with testing.tmp_dir(self.get_temp_dir()) as tmp_dir:\n info.write_to_directory(tmp_dir)\n\n # Read the newly written json file into a string.\n with tf.io.gfile.GFile(info._dataset_info_path(tmp_dir)) as f:\n new_json = json.load(f)\n\n # Read the newly written LICENSE file into a string.\n with tf.io.gfile.GFile(info._license_path(tmp_dir)) as f:\n license_ = f.read()\n\n # Assert what was read and then written and read again is the same.\n self.assertEqual(existing_json, new_json)\n\n # Assert correct license was written.\n self.assertEqual(existing_json[\"redistributionInfo\"][\"license\"], license_)\n\n # Do not check the full string as it display the generated path.\n self.assertEqual(_INFO_STR % mnist_builder.data_dir, repr(info))\n self.assertIn(\"'test': <SplitInfo num_examples=\", repr(info))\n\n def test_restore_after_modification(self):\n # Create a DatasetInfo\n info = dataset_info.DatasetInfo(\n builder=self._builder,\n description=\"A description\",\n supervised_keys=(\"input\", \"output\"),\n homepage=\"http://some-location\",\n citation=\"some citation\",\n license=\"some license\",\n )\n info.download_size = 456\n filepath_template = \"{DATASET}-{SPLIT}.{FILEFORMAT}-{SHARD_X_OF_Y}\"\n info.as_proto.splits.add(\n name=\"train\", num_bytes=512, filepath_template=filepath_template)\n info.as_proto.splits.add(\n name=\"validation\", num_bytes=64, filepath_template=filepath_template)\n info.as_proto.schema.feature.add()\n info.as_proto.schema.feature.add() # Add dynamic statistics\n info.download_checksums = {\n \"url1\": \"some checksum\",\n \"url2\": \"some other checksum\",\n }\n\n with testing.tmp_dir(self.get_temp_dir()) as tmp_dir:\n # Save it\n info.write_to_directory(tmp_dir)\n\n # If fields are not defined, then everything is restored from disk\n restored_info = dataset_info.DatasetInfo(builder=self._builder)\n restored_info.read_from_directory(tmp_dir)\n self.assertEqual(info.as_proto, restored_info.as_proto)\n\n with testing.tmp_dir(self.get_temp_dir()) as tmp_dir:\n # Save it\n info.write_to_directory(tmp_dir)\n\n # If fields are defined, then the code version is kept\n restored_info = dataset_info.DatasetInfo(\n builder=self._builder,\n supervised_keys=(\"input (new)\", \"output (new)\"),\n homepage=\"http://some-location-new\",\n citation=\"some citation (new)\",\n redistribution_info={\"license\": \"some license (new)\"})\n restored_info.download_size = 789\n restored_info.as_proto.splits.add(name=\"validation\", num_bytes=288)\n restored_info.as_proto.schema.feature.add()\n restored_info.as_proto.schema.feature.add()\n restored_info.as_proto.schema.feature.add()\n restored_info.as_proto.schema.feature.add() # Add dynamic statistics\n restored_info.download_checksums = {\n \"url2\": \"some other checksum (new)\",\n \"url3\": \"some checksum (new)\",\n }\n\n restored_info.read_from_directory(tmp_dir)\n\n # Even though restored_info has been restored, informations defined in\n # the code overwrite informations from the json file.\n self.assertEqual(restored_info.description, \"A description\")\n self.assertEqual(restored_info.version, utils.Version(\"1.0.0\"))\n self.assertEqual(restored_info.release_notes, {\n \"1.0.0\": \"Release notes 1.0.0\",\n \"2.0.0\": \"Release notes 2.0.0\"\n })\n self.assertEqual(restored_info.supervised_keys,\n (\"input (new)\", \"output (new)\"))\n self.assertEqual(restored_info.homepage, \"http://some-location-new\")\n self.assertEqual(restored_info.citation, \"some citation (new)\")\n self.assertEqual(restored_info.redistribution_info.license,\n \"some license (new)\")\n self.assertEqual(restored_info.download_size, 789)\n self.assertEqual(restored_info.dataset_size, 576)\n self.assertEqual(len(restored_info.as_proto.schema.feature), 4)\n self.assertEqual(restored_info.download_checksums, {\n \"url2\": \"some other checksum (new)\",\n \"url3\": \"some checksum (new)\",\n })\n\n def test_reading_from_gcs_bucket(self):\n # The base TestCase prevents GCS access, so we explicitly ask it to restore\n # access here.\n with self.gcs_access():\n mnist_builder = mnist.MNIST(\n data_dir=tempfile.mkdtemp(dir=self.get_temp_dir()))\n info = dataset_info.DatasetInfo(builder=mnist_builder)\n info = mnist_builder.info\n\n # A nominal check to see if we read it.\n self.assertTrue(info.initialized)\n self.assertEqual(10000, info.splits[\"test\"].num_examples)\n\n def test_str_smoke(self):\n info = mnist.MNIST(data_dir=\"/tmp/some_dummy_dir\").info\n _ = str(info)\n\n def test_metadata(self):\n with testing.tmp_dir(self.get_temp_dir()) as tmp_dir:\n builder = RandomShapedImageGenerator(data_dir=tmp_dir)\n builder.download_and_prepare()\n # Metadata should have been created\n self.assertEqual(builder.info.metadata, {\"some_key\": 123})\n\n # Metadata should have been restored\n builder2 = RandomShapedImageGenerator(data_dir=tmp_dir)\n self.assertEqual(builder2.info.metadata, {\"some_key\": 123})\n\n # Metadata should have been restored even if the builder code was not\n # available and we restored from files.\n builder3 = read_only_builder.builder_from_files(\n builder.name,\n data_dir=tmp_dir,\n )\n self.assertEqual(builder3.info.metadata, {\"some_key\": 123})\n\n def test_updates_on_bucket_info(self):\n\n info = dataset_info.DatasetInfo(\n builder=self._builder, description=\"won't be updated\")\n # No statistics in the above.\n self.assertEqual(0, info.splits.total_num_examples)\n self.assertEqual(0, len(info.as_proto.schema.feature))\n\n # Partial update will happen here.\n info.read_from_directory(_INFO_DIR)\n\n # Assert that description (things specified in the code) didn't change\n # but statistics are updated.\n self.assertEqual(\"won't be updated\", info.description)\n\n # These are dynamically computed, so will be updated.\n self.assertEqual(40, info.splits.total_num_examples)\n self.assertEqual(2, len(info.as_proto.schema.feature))\n\n def test_set_splits_normal(self):\n info = dataset_info.DatasetInfo(builder=self._builder)\n split_info1 = splits_lib.SplitInfo(\n name=\"train\", shard_lengths=[1, 2], num_bytes=0)\n split_info2 = splits_lib.SplitInfo(\n name=\"test\", shard_lengths=[1], num_bytes=0)\n split_dict = splits_lib.SplitDict(split_infos=[split_info1, split_info2])\n info.set_splits(split_dict)\n self.assertEqual(str(info.splits), str(split_dict))\n self.assertEqual(\n str(info.as_proto.splits),\n str([split_info1.to_proto(),\n split_info2.to_proto()]))\n\n def test_set_splits_incorrect_dataset_name(self):\n info = dataset_info.DatasetInfo(builder=self._builder)\n split_info1 = splits_lib.SplitInfo(\n name=\"train\",\n shard_lengths=[1, 2],\n num_bytes=0,\n filename_template=naming.ShardedFileTemplate(\n dataset_name=\"some_other_dataset\",\n split=\"train\",\n data_dir=info.data_dir,\n filetype_suffix=\"tfrecord\"))\n split_dict = splits_lib.SplitDict(split_infos=[split_info1])\n with pytest.raises(\n AssertionError, match=\"SplitDict contains SplitInfo for split\"):\n info.set_splits(split_dict)\n\n def test_set_splits_multi_split_info(self):\n info = dataset_info.DatasetInfo(builder=self._builder)\n split_info1 = splits_lib.SplitInfo(\n name=\"train\", shard_lengths=[1, 2], num_bytes=0)\n split_info2 = splits_lib.SplitInfo(\n name=\"test\", shard_lengths=[1], num_bytes=0)\n multi_split_info1 = splits_lib.MultiSplitInfo(\n name=\"train\", split_infos=[split_info1])\n multi_split_info2 = splits_lib.MultiSplitInfo(\n name=\"test\", split_infos=[split_info2])\n split_dict = splits_lib.SplitDict(\n split_infos=[multi_split_info1, multi_split_info2])\n info.set_splits(split_dict)\n self.assertEqual(str(info.splits), str(split_dict))\n self.assertEqual(\n str(info.as_proto.splits),\n str([split_info1.to_proto(),\n split_info2.to_proto()]))\n\n\[email protected](\n \"file_format\",\n [\n file_adapters.FileFormat.TFRECORD,\n ])\ndef test_file_format_save_restore(\n tmp_path: pathlib.Path,\n file_format: file_adapters.FileFormat,\n):\n builder = testing.DummyDataset(data_dir=tmp_path, file_format=file_format)\n\n assert isinstance(builder.info.file_format, file_adapters.FileFormat)\n assert builder.info.file_format is file_format\n\n builder.download_and_prepare()\n\n # When restoring the builder, we do not provide the `file_format=`\n # yet it is correctly restored\n builder2 = testing.DummyDataset(data_dir=tmp_path)\n assert builder2.info.file_format is file_format\n\n # Explicitly passing the correct format is accepted.\n builder3 = testing.DummyDataset(data_dir=tmp_path, file_format=file_format)\n assert builder3.info.file_format is file_format\n\n # Providing an inconsistent format is rejected.\n with pytest.raises(ValueError, match=\"File format is already set to\"):\n different_file_format = {\n file_adapters.FileFormat.TFRECORD: file_adapters.FileFormat.RIEGELI,\n file_adapters.FileFormat.RIEGELI: file_adapters.FileFormat.TFRECORD,\n }[file_format]\n testing.DummyDataset(data_dir=tmp_path, file_format=different_file_format)\n\n\ndef test_file_format_values(tmp_path: pathlib.Path):\n # Default file format\n builder = testing.DummyDataset(data_dir=tmp_path, file_format=None)\n assert builder.info.file_format == file_adapters.FileFormat.TFRECORD\n\n # str accepted\n builder = testing.DummyDataset(data_dir=tmp_path, file_format=\"riegeli\")\n assert builder.info.file_format == file_adapters.FileFormat.RIEGELI\n\n # file_adapters.FileFormat accepted\n builder = testing.DummyDataset(\n data_dir=tmp_path, file_format=file_adapters.FileFormat.RIEGELI)\n assert builder.info.file_format == file_adapters.FileFormat.RIEGELI\n\n # Unknown value\n with pytest.raises(ValueError, match=\"is not a valid FileFormat\"):\n testing.DummyDataset(data_dir=tmp_path, file_format=\"arrow\")\n\n\ndef test_dataset_info_from_proto():\n builder = RandomShapedImageGenerator(data_dir=testing.make_tmp_dir())\n train = dataset_info_pb2.SplitInfo(\n name=\"train\", num_shards=2, shard_lengths=[4, 5])\n test = dataset_info_pb2.SplitInfo(\n name=\"test\", num_shards=3, shard_lengths=[1, 2, 3])\n text_feature = feature_pb2.Feature(\n python_class_name=\"tensorflow_datasets.core.features.text_feature.Text\",\n text=feature_pb2.TextFeature())\n proto = dataset_info_pb2.DatasetInfo(\n name=\"random_shaped_image_generator\",\n version=str(builder.version),\n features=feature_pb2.Feature(\n python_class_name=\"tensorflow_datasets.core.features.features_dict.FeaturesDict\",\n features_dict=feature_pb2.FeaturesDict(\n features={\"text\": text_feature})),\n splits=[train, test])\n result = dataset_info.DatasetInfo.from_proto(builder=builder, proto=proto)\n assert result.splits[\"test\"].shard_lengths == test.shard_lengths\n assert result.splits[\"train\"].shard_lengths == train.shard_lengths\n assert set(result.features.keys()) == {\"text\"}\n assert result.version == builder.version\n\n\ndef test_supervised_keys_from_proto():\n proto = text_format.Parse(\n text=\"\"\"\n tuple: {\n items: [\n {\n dict: {\n dict: {\n key: \"f2\"\n value: { feature_key: \"f2\" }\n },\n dict: {\n key: \"f1\"\n value: { feature_key: \"f1\" }\n },\n }\n },\n {\n feature_key: \"target\"\n }\n ]\n }\n \"\"\",\n message=dataset_info_pb2.SupervisedKeys())\n supervised_keys = dataset_info._supervised_keys_from_proto(proto=proto)\n assert str(supervised_keys) == \"({'f1': 'f1', 'f2': 'f2'}, 'target')\"\n\n\ndef test_supervised_keys_from_proto_different_ordering():\n proto1 = text_format.Parse(\n text=\"\"\"\n tuple: {\n items: [\n {\n dict: {\n dict: {\n key: \"f1\"\n value: { feature_key: \"f1\" }\n },\n dict: {\n key: \"f2\"\n value: { feature_key: \"f2\" }\n },\n dict: {\n key: \"f3\"\n value: { feature_key: \"f3\" }\n },\n }\n },\n {\n feature_key: \"target\"\n }\n ]\n }\n \"\"\",\n message=dataset_info_pb2.SupervisedKeys())\n proto2 = text_format.Parse(\n text=\"\"\"\n tuple: {\n items: [\n {\n dict: {\n dict: {\n key: \"f3\"\n value: { feature_key: \"f3\" }\n },\n dict: {\n key: \"f2\"\n value: { feature_key: \"f2\" }\n },\n dict: {\n key: \"f1\"\n value: { feature_key: \"f1\" }\n },\n }\n },\n {\n feature_key: \"target\"\n }\n ]\n }\n \"\"\",\n message=dataset_info_pb2.SupervisedKeys())\n supervised_keys1 = dataset_info._supervised_keys_from_proto(proto=proto1)\n supervised_keys2 = dataset_info._supervised_keys_from_proto(proto=proto2)\n assert str(supervised_keys1) == str(supervised_keys2)\n\n\n# pylint: disable=g-inconsistent-quotes\n_INFO_STR = '''tfds.core.DatasetInfo(\n name='mnist',\n full_name='mnist/3.0.1',\n description=\"\"\"\n The MNIST database of handwritten digits.\n \"\"\",\n homepage='https://storage.googleapis.com/cvdf-datasets/mnist/',\n data_path='%s',\n download_size=1.95 KiB,\n dataset_size=11.06 MiB,\n features=FeaturesDict({\n 'image': Image(shape=(28, 28, 1), dtype=tf.uint8),\n 'label': ClassLabel(shape=(), dtype=tf.int64, num_classes=10),\n }),\n supervised_keys=('image', 'label'),\n disable_shuffling=False,\n splits={\n 'test': <SplitInfo num_examples=20, num_shards=1>,\n 'train': <SplitInfo num_examples=20, num_shards=1>,\n },\n citation=\"\"\"@article{lecun2010mnist,\n title={MNIST handwritten digit database},\n author={LeCun, Yann and Cortes, Corinna and Burges, CJ},\n journal={ATT Labs [Online]. Available: http://yann. lecun. com/exdb/mnist},\n volume={2},\n year={2010}\n }\n \"\"\",\n redistribution_info=license: \"test license\",\n)'''\n# pylint: enable=g-inconsistent-quotes\n\nif __name__ == \"__main__\":\n testing.test_main()\n"
] | [
[
"numpy.random.randint"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
VoiceZen/OpenSeq2Seq | [
"e18641a761c6ec572df3faf0d808a708ddebfcbb",
"e18641a761c6ec572df3faf0d808a708ddebfcbb",
"e18641a761c6ec572df3faf0d808a708ddebfcbb"
] | [
"scripts/nsr_create_syn_train_csv.py",
"open_seq2seq/data/lm/lmutils.py",
"scripts/tacotron_gst_combine_csv.py"
] | [
"# Copyright (c) 2018 NVIDIA Corporation\nfrom __future__ import absolute_import, division, print_function\nfrom __future__ import unicode_literals\n\nimport string\nimport os\nimport pandas as pd\n\nif __name__ == '__main__':\n synthetic_data_root = \"/data/speech/librispeech-syn/\"\n synthetic_data_sample = synthetic_data_root + \"{{}}/sample_step0_{}_syn.wav\"\n\n in_char = \"\\\"'’“”àâèéêü\"\n out_char = \"'''''aaeeeu\"\n punctuation = string.punctuation.replace(\"'\", \"\")\n table = str.maketrans(in_char, out_char, punctuation)\n\n def _normalize_transcript(text):\n \"\"\"Parses the transcript to remove punctation, lowercase all characters, and\n all non-ascii characters\n\n Args:\n text: the string to parse\n\n Returns:\n text: the normalized text\n \"\"\"\n text = text.translate(table)\n text = text.lower()\n text = text.strip()\n return text\n\n names = [\"wav_filename\", \"wav_filesize\", \"transcript\"]\n\n generated_files = pd.read_csv(\n \"generate.csv\", encoding='utf-8', sep='\\x7c',\n header=None, quoting=3, names=names)\n num_files = len(generated_files)\n for i, row in enumerate(generated_files.itertuples()):\n generated_files.iat[i, 0] = synthetic_data_sample.format(i)\n line = _normalize_transcript(generated_files.iat[i, 2])\n generated_files.iat[i, 1] = -1\n generated_files.iat[i, 2] = line\n if i % int(num_files/10) == 0:\n print(\"Processed {} out of {}\".format(i, num_files))\n generated_files.to_csv(\n os.path.join(synthetic_data_root, \"synthetic_data.csv\"), encoding='utf-8',\n sep=',', quoting=3, index=False)\n",
"# -*- coding: utf-8 -*-\nfrom collections import Counter\nimport glob\nimport os\nimport pathlib\nimport random\nimport re\nimport shutil\n\nfrom nltk.tokenize import word_tokenize\nimport numpy as np\nimport pandas as pd\n\nclass Dictionary(object):\n '''\n Adapted from salesforce's repo:\n https://github.com/salesforce/awd-lstm-lm/blob/master/data.py\n '''\n def __init__(self, limit=3, vocab_link=None): # do we need limit?\n self.word2idx = {}\n self.idx2word = []\n self.counter = Counter()\n self.UNK = '<unk>'\n self.EOS = '<eos>'\n if vocab_link and os.path.isfile(vocab_link):\n self.load_vocab(vocab_link)\n\n def add_word(self, word):\n if word not in self.word2idx:\n self.idx2word.append(word)\n self.word2idx[word] = len(self.idx2word) - 1\n token_id = self.word2idx[word]\n self.counter[token_id] += 1\n return self.word2idx[word]\n\n def load_vocab(self, vocab_link):\n vocab_file = open(vocab_link, 'r')\n lines = vocab_file.readlines()\n n = int(lines[-1].strip())\n self.idx2word = [0 for _ in range(n)]\n for line in lines[:-1]:\n parts = line.strip().split('\\t')\n token_id, word, count = int(parts[0]), parts[1], int(parts[2]) \n self.word2idx[word] = token_id\n self.idx2word[token_id] = word\n self.counter[token_id] = count\n if not self.UNK in self.word2idx:\n self.add_word(self.UNK)\n if not self.EOS in self.word2idx:\n self.add_word(self.EOS)\n\n\n def __len__(self):\n return len(self.idx2word)\n\ndef check_exist(proc_path):\n filenames = ['train.ids', 'valid.ids', 'test.ids']\n paths = [os.path.join(proc_path, name) for name in filenames]\n paths.append(proc_path)\n for name in paths:\n if not os.path.exists(name):\n return False\n return True\n\ndef list2str(list):\n return '\\t'.join([str(num) for num in list])\n\ndef unzip(data):\n tmp = [list(t) for t in zip(*data)]\n return (tmp[0], tmp[1])\n\nclass Corpus(object):\n def __init__(self, raw_path, proc_path, change_contraction=True, limit=3):\n pathlib.Path(proc_path).mkdir(exist_ok=True)\n self.limit = limit\n self.dictionary = Dictionary(limit)\n self.vocab_link = 'vocab.txt'\n exists = check_exist(proc_path)\n self.change_contraction = change_contraction\n\n if not exists:\n print('Creating corpus from raw data ...')\n if raw_path and 'raw' in raw_path:\n self._change_names(raw_path)\n if not raw_path:\n raise ValueError(\"data_root [directory to the original data] must be specified\")\n self.preprocess(raw_path, proc_path)\n self.create_dictionary(proc_path, os.path.join(proc_path, 'train.txt'))\n self.dictionary = Dictionary(limit)\n self.dictionary.load_vocab(os.path.join(proc_path, self.vocab_link))\n self.train = self.tokenize(proc_path, proc_path, 'train.txt')\n self.valid = self.tokenize(proc_path, proc_path, 'valid.txt')\n self.test = self.tokenize(proc_path, proc_path, 'test.txt')\n else:\n self.load_corpus(proc_path)\n\n def _change_names(self, raw_path):\n if os.path.isfile(os.path.join(raw_path, 'wiki.train.raw')):\n os.rename(os.path.join(raw_path, 'wiki.train.raw'), os.path.join(raw_path, 'train.txt'))\n os.rename(os.path.join(raw_path, 'wiki.valid.raw'), os.path.join(raw_path, 'valid.txt'))\n os.rename(os.path.join(raw_path, 'wiki.test.raw'), os.path.join(raw_path, 'test.txt'))\n\n def preprocess(self, raw_path, proc_path):\n for filename in ['train.txt', 'valid.txt', 'test.txt']:\n in_ = open(os.path.join(raw_path, filename), 'r')\n out = open(os.path.join(proc_path, filename), 'w')\n for line in in_:\n line = re.sub('@-@', '-', line)\n line = re.sub('-', ' - ', line)\n line = re.sub('etc .', 'etc.', line)\n if self.change_contraction:\n line = re.sub(\"n 't\", \" n't\", line)\n tokens = []\n for token in line.split():\n tokens.append(token.strip())\n out.write(' '.join(tokens) + '\\n')\n\n def create_dictionary(self, proc_path, filename):\n '''\n Add words to the dictionary only if it's in the train file\n '''\n self.dictionary.add_word(self.dictionary.UNK)\n with open(filename, 'r') as f:\n f.readline()\n for line in f:\n words = line.split() + [self.dictionary.EOS]\n for word in words:\n self.dictionary.add_word(word)\n\n with open(os.path.join(proc_path, self.vocab_link), 'w') as f:\n f.write('\\t'.join(['0', self.dictionary.UNK, '0']) + '\\n')\n idx = 1\n for token_id, count in self.dictionary.counter.most_common():\n if count < self.limit:\n f.write(str(idx) + '\\n')\n return\n f.write('\\t'.join([str(idx), \n self.dictionary.idx2word[token_id], \n str(count)]) + '\\n')\n idx += 1\n \n def tokenize(self, raw_path, proc_path, filename):\n unk_id = self.dictionary.word2idx[self.dictionary.UNK]\n out = open(os.path.join(proc_path, filename[:-3] + 'ids'), 'w')\n with open(os.path.join(raw_path, filename), 'r') as f:\n ids = []\n for line in f:\n words = line.split() + [self.dictionary.EOS]\n for word in words:\n ids.append(self.dictionary.word2idx.get(word, unk_id))\n out.write(list2str(ids))\n out.close()\n\n return np.asarray(ids)\n\n def load_ids(self, filename):\n ids = open(filename, 'r').read().strip().split('\\t')\n return np.asarray([int(i) for i in ids])\n\n def list2str(self, list):\n return '\\t'.join([str(num) for num in list])\n\n def load_corpus(self, proc_path):\n print('Loading corpus from processed data ...')\n self.dictionary.load_vocab(os.path.join(proc_path, self.vocab_link))\n self.train = self.load_ids(os.path.join(proc_path, 'train.ids'))\n self.valid = self.load_ids(os.path.join(proc_path, 'valid.ids'))\n self.test = self.load_ids(os.path.join(proc_path, 'test.ids'))\n\nclass IMDBCorpus(object):\n def __init__(self, raw_path, proc_path, lm_vocab_link, binary=True, get_stats=False):\n exists = check_exist(proc_path)\n pathlib.Path(proc_path).mkdir(exist_ok=True)\n self.dictionary = Dictionary(vocab_link=lm_vocab_link)\n self.binary = binary\n self.raw_path = raw_path\n self.proc_path = proc_path\n self._get_stats = get_stats\n\n if not exists:\n print('Creating corpus from raw data ...')\n if not raw_path:\n raise ValueError(\"data_root [directory to the original data] must be specified\")\n self.preprocess()\n else:\n self.load_corpus(proc_path)\n\n def check_oov(self, txt):\n txt = txt.lower()\n txt = re.sub('thats', \"that's\", txt)\n txt = re.sub('wouldnt', \"wounldn't\", txt)\n txt = re.sub('couldnt', \"couldn't\", txt)\n txt = re.sub('cant', \"can't\", txt)\n txt = re.sub('dont', \"don't\", txt)\n txt = re.sub(\"didnt\", \"didn't\", txt)\n txt = re.sub(\"isnt\", \"isn't\", txt)\n txt = re.sub(\"wasnt\", \"wasn't\", txt)\n return word_tokenize(txt)\n\n def tokenize(self, txt):\n txt = re.sub('<br />', ' ', txt)\n txt = re.sub('', ' ', txt)\n txt = re.sub('', ' ', txt)\n txt = re.sub('-', ' - ', txt)\n txt = re.sub('\\.', ' . ', txt)\n txt = re.sub('\\+', ' + ', txt)\n txt = re.sub('\\*', ' * ', txt)\n txt = re.sub('/', ' / ', txt)\n txt = re.sub('`', \"'\", txt)\n txt = re.sub(' ms \\.', \" ms.\", txt)\n txt = re.sub('Ms \\.', \"Ms.\", txt)\n \n words = []\n for token in word_tokenize(txt):\n if not token in self.dictionary.word2idx:\n if token.startswith(\"'\"):\n words.append(\"'\")\n token = token[1:]\n if not token in self.dictionary.word2idx:\n tokens = self.check_oov(token)\n words.extend(tokens)\n else:\n words.append(token)\n else:\n words.append(token) \n \n txt = ' '.join(words)\n txt = re.sub(\"''\", '\"', txt)\n txt = re.sub(\"' '\", '\"', txt)\n txt = re.sub(\"``\", '\"', txt)\n txt = re.sub('etc \\.', 'etc. ', txt)\n txt = re.sub(' etc ', ' etc. ', txt)\n return txt\n\n def tokenize_folder(self, mode, token_file, rating_file):\n review_outfile = open(token_file, 'w')\n rating_outfile = open(rating_file, 'w')\n for sent in ['pos', 'neg']:\n files = glob.glob(os.path.join(self.raw_path, mode, sent, '*.txt'))\n for file in files:\n in_file = open(file, 'r')\n txt = self.tokenize(in_file.read())\n review_outfile.write(txt + \"\\n\")\n if self.binary:\n if sent == 'pos':\n rating = \"1\"\n else:\n rating = \"0\"\n else:\n idx = file.rfind(\"_\")\n rating = str(int(file[idx + 1:-4]) - 1)\n rating_outfile.write(rating + '\\n')\n in_file.close()\n\n def txt2ids(self, mode, token_file, rating_file):\n if self._get_stats:\n import matplotlib\n matplotlib.use(\"TkAgg\")\n from matplotlib import pyplot as plt\n rating_lines = open(rating_file, 'r').readlines()\n ratings = [int(line.strip()) for line in rating_lines]\n reviews = []\n unk_id = self.dictionary.word2idx[self.dictionary.UNK]\n unseen = []\n all_tokens = 0\n all_unseen = 0\n for line in open(token_file, 'r'):\n tokens = line.strip().split()\n reviews.append([self.dictionary.word2idx.get(token, unk_id) for token in tokens])\n if self._get_stats:\n for token in tokens:\n all_tokens += 1\n if not token in self.dictionary.word2idx:\n unseen.append(token)\n all_unseen += 1\n\n if self._get_stats:\n counter = Counter(unseen)\n\n out = open(os.path.join(self.proc_path, mode + '_unseen.txt'), 'w')\n for key, count in counter.most_common():\n out.write(key + '\\t' + str(count) + '\\n')\n\n lengths = np.asarray([len(review) for review in reviews])\n stat_file = open(os.path.join(self.proc_path, 'statistics.txt'), 'w')\n stat_file.write(mode + '\\n')\n short_lengths = [l for l in lengths if l <= 256]\n stat_file.write('\\t'.join(['Min', 'Max', 'Mean', 'Median', 'STD', 'Total', '<=256']) + '\\n')\n stats = [np.min(lengths), np.max(lengths), np.mean(lengths), np.median(lengths), np.std(lengths), len(lengths), len(short_lengths)]\n stat_file.write('\\t'.join([str(t) for t in stats]) + '\\n')\n stat_file.write('Total {} unseen out of {} all tokens. Probability {}.\\n'.\n format(all_unseen, all_tokens, all_unseen / all_tokens))\n plt.hist(lengths, bins=20)\n plt.savefig(os.path.join(self.proc_path, mode + '_hist.png'))\n plt.hist(short_lengths, bins=20)\n plt.savefig(os.path.join(self.proc_path, mode + '_short_hist.png'))\n\n return list(zip(reviews, ratings))\n\n def preprocess_folder(self, mode):\n token_file = os.path.join(self.proc_path, mode + '.tok')\n rating_file = os.path.join(self.proc_path, mode + '.inter.rat')\n self.tokenize_folder(mode, token_file, rating_file)\n return self.txt2ids(mode, token_file, rating_file)\n\n def partition(self, data, val_count=1000):\n random.shuffle(data)\n return data[val_count:], data[:val_count]\n\n def ids2file(self):\n for mode in ['train', 'valid', 'test']:\n data = getattr(self, mode)\n review_out = open(os.path.join(self.proc_path, mode + '.ids'), 'w')\n rating_out = open(os.path.join(self.proc_path, mode + '.rat'), 'w')\n for review, rating in data:\n review_out.write(list2str(review) + '\\n')\n rating_out.write(str(rating) + '\\n')\n\n def preprocess(self):\n os.makedirs(self.proc_path, exist_ok=True)\n train = self.preprocess_folder('train')\n self.train, self.valid = self.partition(train)\n self.test = self.preprocess_folder('test')\n self.ids2file()\n\n def load_ids(self, mode):\n review_lines = open(os.path.join(self.proc_path, mode + '.ids')).readlines()\n rating_lines = open(os.path.join(self.proc_path, mode + '.rat')).readlines()\n ratings = [int(line.strip()) for line in rating_lines]\n reviews = [[int(i) for i in line.strip().split('\\t')] for line in review_lines]\n return list(zip(reviews, ratings))\n\n def load_corpus(self, proc_path):\n print('Loading corpus from processed data ...')\n self.train = self.load_ids('train')\n self.valid = self.load_ids('valid')\n self.test = self.load_ids('test')\n\nclass SSTCorpus(object):\n def __init__(self, raw_path, proc_path, lm_vocab_link, get_stats=False):\n exists = check_exist(proc_path)\n pathlib.Path(proc_path).mkdir(exist_ok=True)\n self.dictionary = Dictionary(vocab_link=lm_vocab_link)\n self.raw_path = raw_path\n self.proc_path = proc_path\n self._get_stats = get_stats\n\n if not exists:\n print('Creating corpus from raw data ...')\n if not raw_path:\n raise ValueError(\"data_root [directory to the original data] must be specified\")\n self.preprocess()\n else:\n self.load_corpus(proc_path)\n\n def check_oov(self, txt):\n txt = txt.lower()\n txt = re.sub('thats', \"that's\", txt)\n txt = re.sub('wouldnt', \"wounldn't\", txt)\n txt = re.sub('couldnt', \"couldn't\", txt)\n txt = re.sub('cant', \"can't\", txt)\n txt = re.sub('dont', \"don't\", txt)\n txt = re.sub(\"didnt\", \"didn't\", txt)\n txt = re.sub(\"isnt\", \"isn't\", txt)\n txt = re.sub(\"wasnt\", \"wasn't\", txt)\n return word_tokenize(txt)\n\n def tokenize(self, txt):\n txt = re.sub('-', ' - ', txt)\n txt = re.sub('\\+', ' + ', txt)\n txt = re.sub('\\*', ' * ', txt)\n txt = re.sub('/', ' / ', txt)\n txt = re.sub('`', \"'\", txt)\n \n words = []\n for token in word_tokenize(txt):\n if not token in self.dictionary.word2idx:\n if token.startswith(\"'\"):\n words.append(\"'\")\n token = token[1:]\n if not token in self.dictionary.word2idx:\n tokens = self.check_oov(token)\n words.extend(tokens)\n else:\n words.append(token)\n else:\n words.append(token) \n \n txt = ' '.join(words)\n txt = re.sub(\"''\", '\"', txt)\n txt = re.sub(\"' '\", '\"', txt)\n txt = re.sub(\"``\", '\"', txt)\n txt = re.sub('etc \\.', 'etc. ', txt)\n txt = re.sub(' etc ', ' etc. ', txt)\n return txt\n\n def tokenize_file(self, mode):\n data = pd.read_csv(os.path.join(self.raw_path, mode + '.csv'))\n\n if mode == 'val':\n mode = 'valid'\n review_file = open(os.path.join(self.proc_path, mode + '.tok'), 'w')\n rating_file = open(os.path.join(self.proc_path, mode + '.rat'), 'w')\n for _, row in data.iterrows():\n review = self.tokenize(row['sentence'])\n review_file.write(review + '\\n')\n rating_file.write(str(row['label']) + '\\n')\n\n def txt2ids(self, mode):\n if self._get_stats:\n import matplotlib\n matplotlib.use(\"TkAgg\")\n from matplotlib import pyplot as plt\n\n reviews = []\n unk_id = self.dictionary.word2idx[self.dictionary.UNK]\n unseen = []\n all_tokens = 0\n all_unseen = 0\n\n rating_lines = open(os.path.join(self.proc_path, mode + '.rat'), 'r').readlines()\n ratings = [int(line.strip()) for line in rating_lines]\n\n for line in open(os.path.join(self.proc_path, mode + '.tok'), 'r'):\n tokens = line.strip().split()\n reviews.append([self.dictionary.word2idx.get(token, unk_id) for token in tokens])\n if self._get_stats:\n for token in tokens:\n all_tokens += 1\n if not token in self.dictionary.word2idx:\n unseen.append(token)\n all_unseen += 1\n\n if self._get_stats:\n counter = Counter(unseen)\n\n out = open(os.path.join(self.proc_path, mode + '_unseen.txt'), 'w')\n for key, count in counter.most_common():\n out.write(key + '\\t' + str(count) + '\\n')\n\n lengths = np.asarray([len(review) for review in reviews])\n stat_file = open(os.path.join(self.proc_path, 'statistics.txt'), 'a')\n stat_file.write(mode + '\\n')\n short_lengths = [l for l in lengths if l <= 96]\n stat_file.write('\\t'.join(['Min', 'Max', 'Mean', 'Median', 'STD', 'Total', '<=96']) + '\\n')\n stats = [np.min(lengths), np.max(lengths), np.mean(lengths), np.median(lengths), np.std(lengths), len(lengths), len(short_lengths)]\n stat_file.write('\\t'.join([str(t) for t in stats]) + '\\n')\n stat_file.write('Total {} unseen out of {} all tokens. Probability {}.\\n'.\n format(all_unseen, all_tokens, all_unseen / all_tokens))\n plt.hist(lengths, bins=20)\n plt.savefig(os.path.join(self.proc_path, mode + '_hist.png'))\n plt.hist(short_lengths, bins=20)\n plt.savefig(os.path.join(self.proc_path, mode + '_short_hist.png'))\n\n return list(zip(reviews, ratings))\n\n def preprocess_file(self, mode):\n self.tokenize_file(mode)\n if mode == 'val':\n mode = 'valid'\n return self.txt2ids(mode)\n\n def ids2file(self):\n for mode in ['train', 'valid', 'test']:\n data = getattr(self, mode)\n review_out = open(os.path.join(self.proc_path, mode + '.ids'), 'w')\n rating_out = open(os.path.join(self.proc_path, mode + '.rat'), 'w')\n for review, rating in data:\n review_out.write(list2str(review) + '\\n')\n rating_out.write(str(rating) + '\\n')\n\n def preprocess(self):\n os.makedirs(self.proc_path, exist_ok=True)\n self.train = self.preprocess_file('train')\n self.valid = self.preprocess_file('val')\n self.test = self.preprocess_file('test')\n self.ids2file()\n\n def load_ids(self, mode):\n review_lines = open(os.path.join(self.proc_path, mode + '.ids')).readlines()\n rating_lines = open(os.path.join(self.proc_path, mode + '.rat')).readlines()\n ratings = [int(line.strip()) for line in rating_lines]\n reviews = [[int(i) for i in line.strip().split('\\t')] for line in review_lines]\n return list(zip(reviews, ratings))\n\n def load_corpus(self, proc_path):\n print('Loading corpus from processed data ...')\n self.train = self.load_ids('train')\n self.valid = self.load_ids('valid')\n self.test = self.load_ids('test')\n\n# SSTCorpus('/home/chipn/data/binary_sst', 'sst-processed-data-wkt2' , '/home/chipn/dev/OpenSeq2Seq/wkt2-processed-data/vocab.txt')\n# SSTCorpus('/home/chipn/data/binary_sst', 'sst-processed-data-wkt103' , '/home/chipn/dev/OpenSeq2Seq/wkt103-processed-data/vocab.txt')\n# IMDBCorpus('/home/chipn/data/aclImdb', 'imdb-processed-data-wkt103' , '/home/chipn/dev/OpenSeq2Seq/wkt103-processed-data/vocab.txt')\n# IMDBCorpus('/home/chipn/data/aclImdb', 'imdb-processed-data-wkt2' , '/home/chipn/dev/OpenSeq2Seq/wkt2-processed-data/vocab.txt')",
"# Copyright (c) 2018 NVIDIA Corporation\nfrom __future__ import absolute_import, division, print_function\nfrom __future__ import unicode_literals\n\nimport os\nimport numpy as np\nimport pandas as pd\n\nif __name__ == '__main__':\n data_root = \"/data/speech/MAILABS\"\n sub_dirs = [\"en_US/by_book/male/elliot_miller/hunters_space\",\n \"en_US/by_book/male/elliot_miller/pink_fairy_book\",\n \"en_US/by_book/male/elliot_miller/pirates_of_ersatz\",\n \"en_US/by_book/male/elliot_miller/poisoned_pen\",\n \"en_US/by_book/male/elliot_miller/silent_bullet\",\n \"en_US/by_book/female/mary_ann/northandsouth\",\n \"en_US/by_book/female/mary_ann/midnight_passenger\",\n \"en_US/by_book/female/judy_bieber/dorothy_and_wizard_oz\",\n \"en_US/by_book/female/judy_bieber/emerald_city_of_oz\",\n \"en_US/by_book/female/judy_bieber/ozma_of_oz\",\n \"en_US/by_book/female/judy_bieber/rinkitink_in_oz\",\n \"en_US/by_book/female/judy_bieber/sky_island\",\n \"en_US/by_book/female/judy_bieber/the_master_key\",\n \"en_US/by_book/female/judy_bieber/the_sea_fairies\"]\n\n # Check to make sure all the csvs can be found\n while True:\n check = 0\n for sub_dir in sub_dirs:\n csv = os.path.join(data_root, sub_dir, \"metadata.csv\")\n if not os.path.isfile(csv):\n print((\"{} cannot be found. Please ensure that you have\"\n \"entered the correct directory where you extracted the MAILABS\"\n \"dataset\").format(csv))\n break\n else:\n check += 1\n if check == len(sub_dirs):\n break\n data_root = input(\"Please input where you extracted the MAILABS US dataset: \")\n\n\n # Load all csvs\n names = [\"1\", \"2\", \"3\"]\n _files = None\n for sub_dir in sub_dirs:\n csv = os.path.join(data_root, sub_dir, \"metadata.csv\")\n files = pd.read_csv(\n csv, encoding='utf-8', sep='\\x7c', header=None, quoting=3, names=names)\n files['1'] = sub_dir + '/wavs/' + files['1'].astype(str)\n if _files is None:\n _files = files\n else:\n _files = _files.append(files)\n\n # Optionally split data into train and validation sets\n num_files = _files.shape[0]\n np.random.shuffle(_files.values)\n\n # Option 1: Take x% for train and 100-x % for val\n # x = 0.8\n # train, val = np.split(_files, [int(num_files/10.*x)])\n\n # Option 2: Take x files for val, and rest for train\n # x = 32\n # train = _files[:-x]\n # val = _files[-x:]\n\n # Option 3: Don't have a validation set\n train = _files\n val = None\n\n # Save new csvs\n train_csv = os.path.join(data_root, \"train.csv\")\n val_csv = os.path.join(data_root, \"val.csv\")\n\n train.to_csv(\n train_csv, encoding='utf-8', sep='\\x7c',\n header=None, quoting=3, index=False)\n if val:\n val.to_csv(\n val_csv, encoding='utf-8', sep='\\x7c',\n header=None, quoting=3, index=False)\n\n print(\"Change dataset_location in tacotron_gst.py to {}\".format(data_root))\n"
] | [
[
"pandas.read_csv"
],
[
"numpy.min",
"numpy.asarray",
"matplotlib.use",
"numpy.median",
"numpy.max",
"numpy.std",
"numpy.mean",
"matplotlib.pyplot.hist"
],
[
"pandas.read_csv",
"numpy.random.shuffle"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
aberki1234/cogent3 | [
"af98b248a999bfeefd4cfed6bd59b4f30442e2d4",
"af98b248a999bfeefd4cfed6bd59b4f30442e2d4",
"af98b248a999bfeefd4cfed6bd59b4f30442e2d4"
] | [
"tests/test_parse/test_pamlmatrix.py",
"src/cogent3/util/misc.py",
"tests/test_maths/test_measure.py"
] | [
"#!/usr/bin/env python\nfrom io import StringIO\nfrom unittest import TestCase, main\n\nfrom cogent3.evolve.models import DSO78_freqs, DSO78_matrix\nfrom cogent3.parse.paml_matrix import PamlMatrixParser\n\n\n__author__ = \"Matthew Wakefield\"\n__copyright__ = \"Copyright 2007-2020, The Cogent Project\"\n__credits__ = [\"Matthew Wakefield\"]\n__license__ = \"BSD-3\"\n__version__ = \"2020.7.2a\"\n__maintainer__ = \"Matthew Wakefield\"\n__email__ = \"[email protected]\"\n__status__ = \"Production\"\n\nfrom numpy.testing import assert_equal\n\n\ndata = \"\"\"\n 27\t\t\t\t\t\t\t\t\t \n 98 32\t\t\t\t\t\t\t\t\t \n 120 0 905\t\t\t\t\t\t\t\t \n 36 23 0 0\t\t\t\t\t\t\t\t \n 89 246 103 134 0\t\t\t\t\t\t\t \n 198 1 148 1153 0 716\t\t\t\t\t\t\t \n 240 9 139 125 11 28 81\t\t\t\t\t\t \n 23 240 535 86 28 606 43 10\t\t\t\t\t\t \n 65 64 77 24 44 18 61 0 7\t\t\t\t\t \n 41 15 34 0 0 73 11 7 44 257\t\t\t\t\t \n 26 464 318 71 0 153 83 27 26 46 18\t\t\t\t \n 72 90 1 0 0 114 30 17 0 336 527 243\t\t\t\t \n 18 14 14 0 0 0 0 15 48 196 157 0 92\t\t\t \n 250 103 42 13 19 153 51 34 94 12 32 33 17 11\t\t\t \n 409 154 495 95 161 56 79 234 35 24 17 96 62 46 245\t\t \n 371 26 229 66 16 53 34 30 22 192 33 136 104 13 78 550\t\t \n 0 201 23 0 0 0 0 0 27 0 46 0 0 76 0 75 0\t \n 24 8 95 0 96 0 22 0 127 37 28 13 0 698 0 34 42 61\t \n 208 24 15 18 49 35 37 54 44 889 175 10 258 12 48 30 157 0 28 \n\n 0.087127 0.040904 0.040432 0.046872 0.033474 0.038255 0.049530\n 0.088612 0.033618 0.036886 0.085357 0.080482 0.014753 0.039772\n 0.050680 0.069577 0.058542 0.010494 0.029916 0.064718\n\n Ala Arg Asn Asp Cys Gln Glu Gly His Ile Leu Lys Met Phe Pro Ser Thr Trp Tyr Val\n\n S_ij = S_ji and PI_i for the Dayhoff model, with the rate Q_ij=S_ij*PI_j\n The rest of the file is not used.\n Prepared by Z. Yang, March 1995.\n\n\n See the following reference for notation used here:\n\n Yang, Z., R. Nielsen and M. Hasegawa. 1998. Models of amino acid substitution and\n applications to mitochondrial protein evolution. Mol. Biol. Evol. 15:1600-1611.\n\"\"\"\n\n\nclass TestParsePamlMatrix(TestCase):\n def test_parse(self):\n matrix, freqs = PamlMatrixParser(StringIO(data))\n assert_equal(DSO78_matrix, matrix)\n assert_equal(DSO78_freqs, freqs)\n\n\nif __name__ == \"__main__\":\n main()\n",
"#!/usr/bin/env python\n\"\"\"Generally useful utility classes and methods.\n\"\"\"\nimport os\nimport pathlib\nimport re\nimport warnings\nimport zipfile\n\nfrom bz2 import open as bzip_open\nfrom gzip import open as gzip_open\nfrom io import TextIOWrapper\nfrom os import path as os_path\nfrom os import remove\nfrom pathlib import Path\nfrom random import choice, randint\nfrom tempfile import NamedTemporaryFile, gettempdir\nfrom warnings import warn\nfrom zipfile import ZipFile\n\nimport numpy\n\nfrom numpy import array, ceil, finfo, float64, floor, log10, logical_not, sum\n\n\n__author__ = \"Rob Knight\"\n__copyright__ = \"Copyright 2007-2020, The Cogent Project\"\n__credits__ = [\n \"Rob Knight\",\n \"Peter Maxwell\",\n \"Amanda Birmingham\",\n \"Sandra Smit\",\n \"Zongzhi Liu\",\n \"Daniel McDonald\",\n \"Kyle Bittinger\",\n \"Marcin Cieslik\",\n]\n__license__ = \"BSD-3\"\n__version__ = \"2020.7.2a\"\n__maintainer__ = \"Gavin Huttley\"\n__email__ = \"[email protected]\"\n__status__ = \"Production\"\n\n\ndef _adjusted_gt_minprob_vector(probs, minprob):\n # operates on a 1D numpy vector\n total = probs.sum()\n smallest = probs.min()\n if smallest > minprob:\n # nothing to do\n return probs\n\n dim = probs.shape[0]\n # we need an adjustment that (small_val + adj) / (n * adj + total) > minprob\n # the following solves for this, then adds machine precision\n adj = -(smallest + minprob * total) / (minprob * dim - 1)\n adj += finfo(float64).eps\n\n probs += adj\n probs /= probs.sum()\n return probs\n\n\ndef adjusted_gt_minprob(probs, minprob=1e-6):\n \"\"\"returns numpy array of probs scaled such that minimum is > minval\n\n result sums to 1 within machine precision\n\n if 2D array, assumes row-order\"\"\"\n assert 0 <= minprob < 1, \"invalid minval %s\" % minprob\n probs = array(probs, dtype=float64)\n if (probs > minprob).all():\n return probs\n\n if probs.ndim == 1:\n probs = _adjusted_gt_minprob_vector(probs, minprob)\n else:\n for i in range(probs.shape[0]):\n probs[i] = _adjusted_gt_minprob_vector(probs[i], minprob)\n\n return probs\n\n\ndef adjusted_within_bounds(value, lower, upper, eps=1e-7, action=\"warn\"):\n \"\"\"returns value such that lower <= value <= upper\n\n Parameters\n ----------\n value\n number, converted to float64\n lower\n lower bound\n upper\n upper bound\n eps : float\n if value lies within eps of either lower/upper, it's returned inside\n this interval by machine precision\n action : str\n 'warn', 'raise' (ValueError), 'ignore'. What happens if value lies further than eps\n from either bound\n \"\"\"\n if lower <= value <= upper:\n return value\n\n assert action in (\"warn\", \"raise\", \"ignore\"), \"Unknown action %s\" % repr(action)\n\n value = float64(value)\n eps = float64(eps) + finfo(float64).eps\n err_msg = \"value[%s] not within lower[%s]/upper[%s] bounds\" % (value, lower, upper)\n wrn_msg = \"value[%s] forced within lower[%s]/upper[%s] bounds\" % (\n value,\n lower,\n upper,\n )\n\n if value < lower and (lower - value) <= eps:\n value = lower\n elif value > upper and (value - upper) <= eps:\n value = upper\n elif (lower > value or value > upper) and action == \"raise\":\n raise ValueError(err_msg)\n else:\n warn(wrn_msg, category=UserWarning)\n value = upper if value > upper else lower\n\n return value\n\n\ndef bytes_to_string(data):\n \"\"\"returns a string if data is bytes, otherwise returns original\"\"\"\n if isinstance(data, bytes):\n data = data.decode(\"utf_8\")\n return data\n\n\ndef open_zip(filename, mode=\"r\", **kwargs):\n \"\"\"open a single member zip-compressed file\n\n Note\n ----\n If mode=\"r\". The function raises ValueError if zip has > 1 record.\n The returned object is wrapped by TextIOWrapper with latin encoding\n (so it's not a bytes string).\n\n If mode=\"w\", returns an atomic_write() instance.\n \"\"\"\n if mode.startswith(\"w\"):\n return atomic_write(filename, mode=mode, in_zip=True)\n\n mode = mode.strip(\"t\")\n with ZipFile(filename) as zf:\n if len(zf.namelist()) != 1:\n raise ValueError(\"Archive is supposed to have only one record.\")\n opened = zf.open(zf.namelist()[0], mode=mode, **kwargs)\n return TextIOWrapper(opened, encoding=\"latin-1\")\n\n\ndef open_(filename, mode=\"rt\", **kwargs):\n \"\"\"open that handles different compression\"\"\"\n filename = Path(filename).expanduser().absolute()\n op = {\".gz\": gzip_open, \".bz2\": bzip_open, \".zip\": open_zip}.get(\n filename.suffix, open\n )\n return op(filename, mode, **kwargs)\n\n\nclass atomic_write:\n \"\"\"performs atomic write operations, cleans up if fails\"\"\"\n\n def __init__(self, path, tmpdir=None, in_zip=None, mode=\"w\"):\n path = pathlib.Path(path).expanduser()\n _, cmp = get_format_suffixes(path)\n if in_zip and cmp == \"zip\":\n in_zip = path if isinstance(in_zip, bool) else in_zip\n path = pathlib.Path(str(path)[: str(path).rfind(\".zip\")])\n\n self._path = path\n self._mode = mode\n self._file = None\n self._in_zip = in_zip\n self.succeeded = None\n self._close_func = (\n self._close_rename_zip if in_zip else self._close_rename_standard\n )\n if tmpdir is None:\n tmpdir = self._get_tmp_dir()\n self._tmpdir = tmpdir\n\n def _get_tmp_dir(self):\n \"\"\"returns parent of destination file\"\"\"\n parent = Path(self._in_zip).parent if self._in_zip else Path(self._path).parent\n if not parent.exists():\n raise FileNotFoundError(f\"{parent} directory does not exist\")\n return parent\n\n def _get_fileobj(self):\n \"\"\"returns file to be written to\"\"\"\n if self._file is None:\n self._file = NamedTemporaryFile(self._mode, delete=False, dir=self._tmpdir)\n\n return self._file\n\n def __enter__(self):\n return self._get_fileobj()\n\n def _close_rename_standard(self, p):\n try:\n f = Path(self._path)\n f.unlink()\n except FileNotFoundError:\n pass\n finally:\n p.rename(self._path)\n\n def _close_rename_zip(self, p):\n with zipfile.ZipFile(self._in_zip, \"a\") as out:\n out.write(str(p), arcname=self._path)\n\n p.unlink()\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self._file.close()\n p = Path(self._file.name)\n if exc_type is None:\n self._close_func(p)\n self.succeeded = True\n else:\n self.succeeded = False\n p.unlink()\n\n def write(self, text):\n \"\"\"writes text to file\"\"\"\n fileobj = self._get_fileobj()\n fileobj.write(text)\n\n def close(self):\n \"\"\"closes file\"\"\"\n self.__exit__(None, None, None)\n\n\n_wout_period = re.compile(r\"^\\.\")\n\n\ndef get_format_suffixes(filename):\n \"\"\"returns file, compression suffixes\"\"\"\n filename = Path(filename)\n if not filename.suffix:\n return None, None\n\n compression_suffixes = (\"bz2\", \"gz\", \"zip\")\n suffixes = [_wout_period.sub(\"\", sfx).lower() for sfx in filename.suffixes[-2:]]\n if suffixes[-1] in compression_suffixes:\n cmp_suffix = suffixes[-1]\n else:\n cmp_suffix = None\n\n if len(suffixes) == 2 and cmp_suffix is not None:\n suffix = suffixes[0]\n elif cmp_suffix is None:\n suffix = suffixes[-1]\n else:\n suffix = None\n return suffix, cmp_suffix\n\n\ndef iterable(item):\n \"\"\"If item is iterable, returns item. Otherwise, returns [item].\n\n Useful for guaranteeing a result that can be iterated over.\n \"\"\"\n try:\n iter(item)\n return item\n except TypeError:\n return [item]\n\n\ndef curry(f, *a, **kw):\n \"\"\"curry(f,x)(y) = f(x,y) or = lambda y: f(x,y)\n\n modified from python cookbook\"\"\"\n\n def curried(*more_a, **more_kw):\n return f(*(a + more_a), **dict(kw, **more_kw))\n\n # make docstring for curried funtion\n curry_params = []\n if a:\n curry_params.extend([e for e in a])\n if kw:\n curry_params.extend([\"%s=%s\" % (k, v) for k, v in list(kw.items())])\n # str it to prevent error in join()\n curry_params = list(map(str, curry_params))\n\n try:\n f_name = f.__name__\n except: # e.g. itertools.groupby failed .func_name\n f_name = \"?\"\n\n curried.__doc__ = \" curry(%s,%s)\\n\" \"== curried from %s ==\\n %s\" % (\n f_name,\n \", \".join(curry_params),\n f_name,\n f.__doc__,\n )\n\n return curried\n\n\n# end curry\n\n\ndef is_iterable(obj):\n \"\"\"return True if obj is iterable\"\"\"\n try:\n iter(obj)\n except TypeError as e:\n return False\n else:\n return True\n\n\ndef is_char(obj):\n \"\"\"return True if obj is a char (str with lenth<=1)\"\"\"\n return isinstance(obj, str) and len(obj) <= 1\n\n\ndef is_char_or_noniterable(x):\n return is_char(x) or not is_iterable(x)\n\n\ndef recursive_flatten(\n items, max_depth=None, curr_depth=1, is_leaf=is_char_or_noniterable\n):\n \"\"\"Removes all nesting from items, recursively.\n\n Note: Default max_depth is None, which removes all nesting (including\n unpacking strings). Setting max_depth unpacks a maximum of max_depth levels\n of nesting, but will not raise exception if the structure is not really\n that deep (instead, will just remove the nesting that exists). If max_depth\n is 0, will not remove any nesting (note difference from setting max_depth\n to None).\n\n is_leaf: a predicate for 'leaf node'. The default is_char_or_noniterable\n removes all nesting. is_str_or_noniterable removes all nesting sequences\n except strings. is_leaf=not_list_tuple removes only nesting list or tuple\n , which is considerably faster and recommended for general use.\n \"\"\"\n result = []\n for i in items:\n if max_depth is not None and curr_depth > max_depth or is_leaf(i):\n result.append(i)\n else:\n result.extend(recursive_flatten(i, max_depth, curr_depth + 1, is_leaf))\n return result\n\n\ndef not_list_tuple(obj):\n \"\"\"return False if obj is a list or a tuple\"\"\"\n return not isinstance(obj, (list, tuple))\n\n\nlist_flatten = curry(recursive_flatten, is_leaf=not_list_tuple)\n\n\ndef add_lowercase(d):\n \"\"\"Adds lowercase version of keys in d to itself. Converts vals as well.\n\n Should work on sequences of strings as well as strings.\n\n Now also works on strings and sets.\n \"\"\"\n if hasattr(d, \"lower\"): # behaves like a string\n return d + d.lower()\n elif not hasattr(d, \"items\"): # not a dict\n items = list(d)\n return d.__class__(items + [i.lower() for i in items])\n\n # otherwise, assume dict-like behavior\n for key, val in list(d.items()):\n try:\n new_key = key.lower()\n except: # try to make tuple out of arbitrary sequence\n try:\n new_key = []\n for k in key:\n try:\n new_key.append(k.lower())\n except:\n new_key.append(k)\n new_key = tuple(new_key)\n except:\n new_key = key\n try:\n new_val = val.lower()\n except:\n new_val = val # don't care if we couldn't convert it\n if new_key not in d: # don't overwrite existing lcase keys\n d[new_key] = new_val\n return d\n\n\ndef DistanceFromMatrix(matrix):\n \"\"\"Returns function(i,j) that looks up matrix[i][j].\n\n Useful for maintaining flexibility about whether a function is computed\n or looked up.\n\n Matrix can be a 2D dict (arbitrary keys) or list (integer keys).\n \"\"\"\n\n def result(i, j):\n return matrix[i][j]\n\n return result\n\n\nclass ClassChecker(object):\n \"\"\"Container for classes: 'if t in x == True' if t is the right class.\"\"\"\n\n def __init__(self, *Classes):\n \"\"\"Returns a new ClassChecker that accepts specified classes.\"\"\"\n type_type = type(str)\n for c in Classes:\n if type(c) != type_type:\n raise TypeError(\n \"ClassChecker found non-type object '%s' in parameter list.\" % c\n )\n self.Classes = list(Classes)\n\n def __contains__(self, item):\n \"\"\"Returns True if item is a subclass of one of the classes in self.\"\"\"\n for c in self.Classes:\n if isinstance(item, c):\n return True\n return False\n\n def __str__(self):\n \"\"\"Informal string representation: returns list\"\"\"\n return str(self.Classes)\n\n\nclass Delegator(object):\n \"\"\"Mixin class that forwards unknown attributes to a specified object.\n\n Handles properties correctly (this was somewhat subtle).\n\n WARNING: If you are delegating to an object that pretends to have every\n attribute (e.g. a MappedRecord), you _must_ bypass normal attribute access\n in __init__ of your subclasses to ensure that the properties are set in\n the object itself, not in the object to which it delegates. Alternatively,\n you can initialize with None so that unhandled attributes are set in self,\n and then replace self._handler with your object right at the end of\n __init__. The first option is probably safer and more general.\n\n Warning: will not work on classes that use __slots__ instead of __dict__.\n \"\"\"\n\n def __init__(self, obj):\n \"\"\"Returns a new Delegator that uses methods of obj.\n\n NOTE: It's important that this bypasses the normal attribute setting\n mechanism, or there's an infinite loop between __init__ and\n __setattr__. However, subclasses should be able to use the normal\n mechanism with impunity.\n \"\"\"\n self.__dict__[\"_handler\"] = obj\n\n def __getattr__(self, attr):\n \"\"\"Forwards unhandled attributes to self._handler.\n\n Sets _handler to None on first use if not already set.\n \"\"\"\n handler = self.__dict__.setdefault(\"_handler\", None)\n return getattr(handler, attr)\n\n def __setattr__(self, attr, value):\n \"\"\"Forwards requests to change unhandled attributes to self._handler.\n\n This logic is rather complicated because of GenericRecord objects, which\n masquerade as having every attribute, which can be used as handlers for\n Delegators, which forward all requests to their handlers.\n\n Consequently, we need to check the following:\n\n 1. Is attr in the object's __dict__? If so, set it in self.\n 2. Does the handler have attr? If so, try to set it in handler.\n 3. Does self lack the attr? If so, try to set it in handler.\n 4. Did setting attr in the handler fail? If so, set it in self.\n \"\"\"\n # if we're setting _handler, set it in dict directly (but complain if\n # it's self).\n if attr == \"_handler\":\n if value is self:\n raise ValueError(\"Can't set object to be its own handler.\")\n self.__dict__[\"_handler\"] = value\n return\n # check if the attribute is in this object's dict\n elif attr in self.__dict__:\n return object.__setattr__(self, attr, value)\n # then check if the class knows about it\n elif hasattr(self.__class__, attr):\n return object.__setattr__(self, attr, value)\n # then try to set it in the handler\n if hasattr(self._handler, attr) or not hasattr(self, attr):\n try:\n return setattr(self._handler, attr, value)\n except AttributeError:\n pass # will try to create the attribute on self\n return object.__setattr__(self, attr, value)\n\n\nclass FunctionWrapper(object):\n \"\"\"Wraps a function to hide it from a class so that it isn't a method.\"\"\"\n\n def __init__(self, Function):\n self.Function = Function\n\n def __call__(self, *args, **kwargs):\n return self.Function(*args, **kwargs)\n\n\nclass ConstraintError(Exception):\n \"\"\"Raised when constraint on a container is violated.\"\"\"\n\n pass\n\n\ndef identity(x):\n \"\"\"Identity function: useful for avoiding special handling for None.\"\"\"\n return x\n\n\nclass ConstrainedContainer(object):\n \"\"\"Mixin class providing constraint checking to a container.\n\n Container should have a constraint property that __contains__ the items\n that will be allowed in the container. Can also have a mask property that\n contains a function that will be applied to each item (a) on checking the\n item for validity, and (b) on inserting the item in the container.\n\n WARNING: Because the mask is evaluated both when the item is checked and\n when it is inserted, any side-effects it has are applied _twice_. This\n means that any mask that mutates the object or changes global variables\n is unlikely to do what you want!\n \"\"\"\n\n _constraint = None\n mask = FunctionWrapper(identity)\n\n def _mask_for_new(self):\n \"\"\"Returns self.mask only if different from class data.\"\"\"\n if self.mask is not self.__class__.mask:\n return self.mask\n else:\n return None\n\n def __init__(self, constraint=None, mask=None):\n \"\"\"Returns new ConstrainedContainer, incorporating constraint.\n\n WARNING: Does not perform validation. It is the subclass's\n responsibility to perform validation during __init__ or __new__!\n \"\"\"\n if constraint is not None:\n self._constraint = constraint\n if mask is not None:\n self.mask = mask\n\n def matches_constraint(self, constraint):\n \"\"\"Returns True if all items in self are allowed.\"\"\"\n # First checks if constraints are compatible. If not, or if the current\n # sequence has no constraint, does item by item search.\n\n # bail out if self or constraint is empty\n if not constraint or not self:\n return True\n # try checking constraints for compatibility\n if self.constraint:\n try:\n constraint_ok = True\n for c in self.constraint:\n if c not in constraint:\n constraint_ok = False\n break\n if constraint_ok:\n return True\n except TypeError:\n pass # e.g. tried to check wrong type item in string alphabet\n\n # get here if either self.constraint is empty, or if we found an item\n # in self.constraint that wasn't in the other constraint. In either case,\n # we need to check self item by item.\n if self:\n try:\n for i in self:\n if i not in constraint:\n return False\n except TypeError: # e.g. tried to check int in string alphabet\n return False\n return True\n\n def other_is_valid(self, other):\n \"\"\"Returns True if other has only items allowed in self.constraint.\"\"\"\n # First, checks other.Constrant for compatibility.\n # If other.constraint is incompatible, checks items in other.\n mask = self.mask\n constraint = self.constraint\n if not constraint or not other:\n return True # bail out if empty\n try:\n # if other has a constraint, check whether it's compatible\n other_constraint = other.constraint\n if other_constraint:\n for c in map(mask, other_constraint):\n if c not in constraint:\n raise ConstraintError\n return True\n except (ConstraintError, AttributeError, TypeError):\n pass\n # get here if other doesn't have a constraint or if other's constraint\n # isn't valid on self's constraint.\n try:\n for item in map(mask, other):\n if item not in constraint:\n return False\n except TypeError:\n return False # e.g. tried to check int in str alphabet\n return True\n\n def item_is_valid(self, item):\n \"\"\"Returns True if single item is in self.constraint.\"\"\"\n try:\n if (not self.constraint) or self.mask(item) in self.constraint:\n return True\n else:\n return False\n except (TypeError, ConstraintError): # wrong type or not allowed\n return False\n\n def sequence_is_valid(self, sequence):\n \"\"\"Returns True if all items in sequence are in self.constraint.\"\"\"\n is_valid = self.item_is_valid\n for i in map(self.mask, sequence):\n if not is_valid(i):\n return False\n return True\n\n def _get_constraint(self):\n \"\"\"Accessor for constraint.\"\"\"\n return self._constraint\n\n def _set_constraint(self, constraint):\n \"\"\"Mutator for constraint.\"\"\"\n if self.matches_constraint(constraint):\n self._constraint = constraint\n else:\n raise ConstraintError(\n \"Sequence '%s' incompatible with constraint '%s'\" % (self, constraint)\n )\n\n constraint = property(_get_constraint, _set_constraint)\n\n\nclass ConstrainedList(ConstrainedContainer, list):\n \"\"\"List that is always valid on a specified constraint.\"\"\"\n\n def __init__(self, data=None, constraint=None, mask=None):\n \"\"\"Constructor for validated ConstrainedList.\"\"\"\n ConstrainedContainer.__init__(self, constraint, mask)\n if data:\n self.extend(data)\n\n def __add__(self, other):\n \"\"\"Returns copy of self added to copy of other if constraint correct.\"\"\"\n result = self.__class__(\n list(self) + list(map(self.mask, other)), constraint=self.constraint\n )\n mask = self._mask_for_new()\n if mask:\n result.mask = mask\n return result\n\n def __iadd__(self, other):\n \"\"\"Adds other to self if constraint correct.\"\"\"\n other = list(map(self.mask, other))\n if self.other_is_valid(other):\n return list.__iadd__(self, other)\n else:\n raise ConstraintError(\n \"Sequence '%s' has items not in constraint '%s'\"\n % (other, self.constraint)\n )\n\n def __mul__(self, multiplier):\n \"\"\"Returns copy of self multiplied by multiplier.\"\"\"\n result = self.__class__(list(self) * multiplier, constraint=self.constraint)\n mask = self._mask_for_new()\n if mask:\n result.mask = mask\n return result\n\n def __rmul__(self, multiplier):\n \"\"\"Returns copy of self multiplied by multiplier.\"\"\"\n result = self.__class__(list(self) * multiplier, constraint=self.constraint)\n mask = self._mask_for_new()\n if mask:\n result.mask = mask\n return result\n\n def __setitem__(self, index, item):\n \"\"\"Sets self[index] to item if item in constraint. Handles slices\"\"\"\n if isinstance(index, slice):\n if not self.other_is_valid(item):\n raise ConstraintError(\n \"Sequence '%s' contains items not in constraint '%s'.\"\n % (item, self.constraint)\n )\n item = list(map(self.mask, item))\n else:\n if not self.item_is_valid(item):\n raise ConstraintError(\n \"Item '%s' not in constraint '%s'\" % (item, self.constraint)\n )\n item = self.mask(item)\n list.__setitem__(self, index, item)\n\n def __setslice__(self, start, end, sequence):\n \"\"\"Make sure invalid data can't get into slice.\"\"\"\n if self.other_is_valid(sequence):\n list.__setslice__(self, start, end, list(map(self.mask, sequence)))\n else:\n raise ConstraintError(\n \"Sequence '%s' has items not in constraint '%s'\"\n % (sequence, self.constraint)\n )\n\n def append(self, item):\n \"\"\"Appends item to self.\"\"\"\n if not self.item_is_valid(item):\n raise ConstraintError(\n \"Item '%s' not in constraint '%s'\" % (item, self.constraint)\n )\n list.append(self, self.mask(item))\n\n def extend(self, sequence):\n \"\"\"Appends sequence to self.\"\"\"\n if self.other_is_valid(sequence):\n list.extend(self, list(map(self.mask, sequence)))\n else:\n raise ConstraintError(\n \"Some items in '%s' not in constraint '%s'\"\n % (sequence, self.constraint)\n )\n\n def insert(self, position, item):\n \"\"\"Inserts item at position in self.\"\"\"\n if not self.item_is_valid(item):\n raise ConstraintError(\n \"Item '%s' not in constraint '%s'\" % (item, self.constraint)\n )\n list.insert(self, position, self.mask(item))\n\n def __getslice__(self, *args, **kwargs):\n \"\"\"Make sure slice remembers the constraint.\"\"\"\n # to be deleted in py3\n val = list.__getslice__(self, *args, **kwargs)\n result = self.__class__(val, constraint=self.constraint)\n mask = self._mask_for_new()\n if mask:\n result.mask = mask\n return result\n\n def __getitem__(self, *args, **kwargs):\n \"\"\"Make sure slice remembers the constraint.\"\"\"\n if len(args) == 1 and type(args[0]) == int and not kwargs:\n val = list.__getitem__(self, args[0])\n return val\n\n val = list.__getitem__(self, *args, **kwargs)\n result = self.__class__(val, constraint=self.constraint)\n mask = self._mask_for_new()\n if mask:\n result.mask = mask\n return result\n\n\nclass MappedList(ConstrainedList):\n \"\"\"As for ConstrainedList, but maps items on contains and getitem.\"\"\"\n\n def __contains__(self, item):\n \"\"\"Ensure that contains applies the mask.\"\"\"\n try:\n return super(MappedList, self).__contains__(self.mask(item))\n except (TypeError, ValueError):\n return False\n\n\nclass ConstrainedDict(ConstrainedContainer, dict):\n \"\"\"Dict containing only keys that are valid on a specified constraint.\n\n Default behavior when fed a sequence is to store counts of the items in\n that sequence, which is not the standard dict interface (should raise a\n ValueError instead) but which is surprisingly useful in practice.\n \"\"\"\n\n value_mask = FunctionWrapper(identity)\n\n def _get_mask_and_valmask(self):\n \"\"\"Helper method to check whether mask and value_mask were set.\"\"\"\n if self.mask is self.__class__.mask:\n mask = None\n else:\n mask = self.mask\n\n if self.value_mask is self.__class__.value_mask:\n valmask = None\n else:\n valmask = self.value_mask\n return mask, valmask\n\n def __init__(self, data=None, constraint=None, mask=None, value_mask=None):\n \"\"\"Constructor for validated ConstrainedDict.\"\"\"\n ConstrainedContainer.__init__(self, constraint, mask)\n if value_mask is not None:\n self.value_mask = value_mask\n if data:\n try:\n self.update(data)\n except (ValueError, TypeError):\n for d in map(self.mask, iterable(data)):\n curr = self.get(d, 0)\n self[d] = curr + 1\n\n def __setitem__(self, key, value):\n \"\"\"Sets self[key] to value if value in constraint.\"\"\"\n if not self.item_is_valid(key):\n raise ConstraintError(\n \"Item '%s' not in constraint '%s'\" % (key, self.constraint)\n )\n key, value = self.mask(key), self.value_mask(value)\n dict.__setitem__(self, key, value)\n\n def copy(self):\n \"\"\"Should return copy of self, including constraint.\"\"\"\n mask, valmask = self._get_mask_and_valmask()\n return self.__class__(\n self, constraint=self.constraint, mask=mask, value_mask=valmask\n )\n\n def fromkeys(self, keys, value=None):\n \"\"\"Returns new dictionary with same constraint as self.\"\"\"\n mask, valmask = self._get_mask_and_valmask()\n return self.__class__(\n dict.fromkeys(keys, value),\n constraint=self.constraint,\n mask=mask,\n value_mask=valmask,\n )\n\n def setdefault(self, key, default=None):\n \"\"\"Returns self[key], setting self[key]=default if absent.\"\"\"\n key, default = self.mask(key), self.value_mask(default)\n if key not in self:\n self[key] = default\n return self[key]\n\n def update(self, other):\n \"\"\"Updates self with items in other.\n\n Implementation note: currently uses __setitem__, so no need to apply\n masks in this method.\n \"\"\"\n if not hasattr(other, \"keys\"):\n other = dict(other)\n for key in other:\n self[key] = other[key]\n\n\nclass MappedDict(ConstrainedDict):\n \"\"\"As for ConstrainedDict, but maps keys on contains and getitem.\"\"\"\n\n def __contains__(self, item):\n \"\"\"Ensure that contains applies the mask.\"\"\"\n try:\n return super(MappedDict, self).__contains__(self.mask(item))\n except (TypeError, ValueError):\n return False\n\n def __getitem__(self, item):\n \"\"\"Ensure that getitem applies the mask.\"\"\"\n return super(MappedDict, self).__getitem__(self.mask(item))\n\n def get(self, item, default=None):\n \"\"\"Ensure that get applies the mask.\"\"\"\n return super(MappedDict, self).get(self.mask(item), default)\n\n def has_key(self, item):\n \"\"\"Ensure that has_key applies the mask.\"\"\"\n return self.mask(item) in super(MappedDict, self)\n\n\ndef NestedSplitter(\n delimiters=None, same_level=False, constructor=str.strip, filter_=False\n):\n \"\"\"return a splitter which return a list (maybe nested) from a str using\n delimiters nestedly\n\n same_level -- if true, all the leaf items will be split whether there is\n delimiters in it or not\n\n constructor: modify each splited fields.\n filter_: filter the splits if not False(default)\n\n Note: the line input in parser is expected to be a str, but without check\n \"\"\"\n\n delimiters = delimiters or [None]\n\n def parser(line, index=0):\n # split line with curr delimiter\n curr = delimiters[index]\n if isinstance(curr, (list, tuple)):\n try:\n delim, maxsplits = curr\n except ValueError:\n raise ValueError(\n \"delimiter tuple/list should be \\\n [delimiter_str, maxsplits]\"\n )\n if maxsplits < 0:\n result = line.rsplit(delim, -maxsplits)\n else:\n result = line.split(delim, maxsplits)\n else:\n result = line.split(curr)\n\n # modify splits if required\n if constructor:\n result = list(map(constructor, result))\n # allow filter(None,..) to rip off the empty items\n if filter_ is not False:\n result = list(filter(filter_, result))\n\n # repeat recursively for next delimiter\n if index != len(delimiters) - 1: # not last delimiter\n result = [parser(f, index + 1) for f in result]\n\n # undo split if curr not in line and same_level==False\n # ignore the first delimiter\n if (\n not same_level\n and index > 0\n and len(result) == 1\n and isinstance(result[0], str)\n ):\n result = result[0]\n\n return result\n\n # parser.__doc__ = make_innerdoc(NestedSplitter, parser, locals())\n return parser\n\n\ndef remove_files(list_of_filepaths, error_on_missing=True):\n \"\"\"Remove list of filepaths, optionally raising an error if any are missing\"\"\"\n missing = []\n for fp in list_of_filepaths:\n try:\n remove(fp)\n except OSError:\n missing.append(fp)\n\n if error_on_missing and missing:\n raise OSError(\"Some filepaths were not accessible: %s\" % \"\\t\".join(missing))\n\n\ndef get_independent_coords(spans, random_tie_breaker=False):\n \"\"\"returns non-overlapping spans. spans must have structure\n [(start, end, ..), (..)]. spans can be decorated with arbitrary data\n after the end entry.\n\n Parameters\n ----------\n random_tie_breaker\n break overlaps by randomly choosing the first\n or second span. Defaults to the first span.\n\n \"\"\"\n\n if len(spans) <= 1:\n return spans\n\n last = spans[0]\n result = [last]\n for i in range(1, len(spans)):\n curr = spans[i]\n if curr[0] < last[1]:\n if random_tie_breaker:\n result[-1] = [last, curr][randint(0, 1)]\n else:\n result[-1] = last\n continue\n\n result.append(curr)\n last = curr\n\n return result\n\n\ndef get_merged_overlapping_coords(start_end):\n \"\"\"merges overlapping spans, assumes sorted by start\"\"\"\n result = [list(start_end[0])]\n prev_end = result[0][-1]\n for i in range(1, len(start_end)):\n curr_start, curr_end = start_end[i]\n # if we're beyond previous, add and continue\n if curr_start > prev_end:\n prev_end = curr_end\n result.append([curr_start, curr_end])\n elif curr_end > prev_end:\n prev_end = curr_end\n result[-1][-1] = prev_end\n else:\n pass # we lie completely within previous span\n\n return result\n\n\ndef get_run_start_indices(values, digits=None, converter_func=None):\n \"\"\"returns starting index, value for all distinct values\"\"\"\n assert not (digits and converter_func), \"Cannot set both digits and converter_func\"\n\n if digits is not None:\n\n def converter_func(x):\n return round(x, digits)\n\n elif converter_func is None:\n\n def converter_func(x):\n return x\n\n last_val = None\n for index, val in enumerate(values):\n val = converter_func(val)\n if val != last_val:\n yield [index, val]\n\n last_val = val\n\n return\n\n\ndef get_merged_by_value_coords(spans_value, digits=None):\n \"\"\"returns adjacent spans merged if they have the same value. Assumes\n [(start, end, val), ..] structure and that spans_value is sorted in\n ascending order.\n\n Parameters\n ----------\n digits\n if None, any data can be handled and exact values are\n compared. Otherwise values are rounded to that many digits.\n\n \"\"\"\n assert len(spans_value[0]) == 3, \"spans_value must have 3 records per row\"\n\n starts, ends, vals = list(zip(*spans_value))\n indices_distinct_vals = get_run_start_indices(vals, digits=digits)\n data = []\n i = 0\n for index, val in indices_distinct_vals:\n start = starts[index]\n end = ends[index]\n prev_index = max(index - 1, 0)\n try:\n data[-1][1] = ends[prev_index]\n except IndexError:\n pass\n\n data.append([start, end, val])\n\n if index < len(ends):\n data[-1][1] = ends[-1]\n\n return data\n\n\ndef get_object_provenance(obj):\n \"\"\"returns string of complete object provenance\"\"\"\n # algorithm inspired by Greg Baacon's answer to\n # https://stackoverflow.com/questions/2020014/get-fully-qualified-class\n # -name-of-an-object-in-python\n if isinstance(obj, type):\n mod = obj.__module__\n name = obj.__name__\n else:\n mod = obj.__class__.__module__\n name = obj.__class__.__name__\n\n if mod is None or mod == \"builtins\":\n result = name\n else:\n result = \".\".join([mod, name])\n return result\n\n\ndef path_exists(path):\n \"\"\"whether path is a valid path and it exists\"\"\"\n if not (isinstance(path, str) or isinstance(path, Path)):\n return False\n try:\n is_path = os_path.exists(str(path))\n except (ValueError, TypeError):\n is_path = False\n return is_path\n\n\ndef extend_docstring_from(source, pre=False):\n def docstring_inheriting_decorator(dest):\n parts = [source.__doc__, dest.__doc__ or \"\"]\n # trim leading/trailing blank lines from parts\n for i, part in enumerate(parts):\n part = part.split(\"\\n\")\n if not part[0].strip():\n part.pop(0)\n if part and not part[-1].strip():\n part.pop(-1)\n\n parts[i] = \"\\n\".join(part)\n\n if pre:\n parts.reverse()\n dest.__doc__ = \"\\n\".join(parts)\n return dest\n\n return docstring_inheriting_decorator\n\n\ndef ascontiguousarray(source_array, dtype=None):\n if source_array is not None:\n return numpy.ascontiguousarray(source_array, dtype=dtype)\n return source_array\n\n\ndef get_setting_from_environ(environ_var, params_types):\n \"\"\"extract settings from environment variable\n\n Parameters\n ----------\n environ_var : str\n name of an environment variable\n params_types : dict\n {param name: type}, values will be cast to type\n\n Returns\n -------\n dict\n\n Notes\n -----\n settings must of form 'param_name1=param_val,param_name2=param_val2'\n \"\"\"\n var = os.environ.get(environ_var, None)\n if var is None:\n return {}\n\n var = var.split(\",\")\n result = {}\n for item in var:\n item = item.split(\"=\")\n if len(item) != 2 or item[0] not in params_types:\n continue\n\n name, val = item\n try:\n val = params_types[name](val)\n result[name] = val\n except Exception:\n warnings.warn(\n f\"could not cast {name}={val} to type {params_types[name]}, skipping\"\n )\n\n return result\n",
"from unittest import TestCase, main\n\nfrom numpy import diag_indices, dot, finfo, float64\nfrom numpy.random import random\nfrom numpy.testing import assert_allclose\n\nfrom cogent3.maths.matrix_exponentiation import PadeExponentiator\nfrom cogent3.maths.matrix_logarithm import logm\nfrom cogent3.maths.measure import (\n jsd,\n jsm,\n paralinear_continuous_time,\n paralinear_discrete_time,\n)\n\n\n__author__ = \"Gavin Huttley\"\n__copyright__ = \"Copyright 2007-2020, The Cogent Project\"\n__credits__ = [\"Gavin Huttley\", \"Stephen Ka-Wah Ma\"]\n__license__ = \"BSD-3\"\n__version__ = \"2020.7.2a\"\n__maintainer__ = \"Gavin Huttley\"\n__email__ = \"[email protected]\"\n__status__ = \"Alpha\"\n\n\ndef gen_q_p():\n q1 = random((4, 4))\n indices = diag_indices(4)\n q1[indices] = 0\n q1[indices] = -q1.sum(axis=1)\n p1 = PadeExponentiator(q1)()\n return q1, p1\n\n\ndef gen_qs_ps():\n q1, p1 = gen_q_p()\n q2, p2 = gen_q_p()\n p3 = dot(p1, p2)\n q3 = logm(p3)\n return (q1, p1), (q2, p2), (q3, p3)\n\n\ndef next_pi(pi, p):\n return dot(pi, p)\n\n\nclass ParalinearTest(TestCase):\n def test_paralinear_discrete_time(self):\n \"\"\"tests paralinear_discrete_time to compare it with the output of paralinear_continuous_time\"\"\"\n qp1, qp2, qp3 = gen_qs_ps()\n pi1 = random(4)\n pi1 /= pi1.sum()\n pi2 = next_pi(pi1, qp1[1])\n pi3 = next_pi(pi2, qp2[1])\n\n con_time_pl1 = paralinear_continuous_time(qp1[1], pi1, qp1[0])\n dis_time_pl1 = paralinear_discrete_time(qp1[1], pi1)\n assert_allclose(con_time_pl1, dis_time_pl1)\n\n con_time_pl2 = paralinear_continuous_time(qp2[1], pi2, qp2[0])\n dis_time_pl2 = paralinear_discrete_time(qp2[1], pi2)\n assert_allclose(con_time_pl2, dis_time_pl2)\n\n con_time_pl3 = paralinear_continuous_time(qp3[1], pi3, qp3[0])\n dis_time_pl3 = paralinear_discrete_time(qp3[1], pi3)\n assert_allclose(con_time_pl3, dis_time_pl3)\n\n def test_paralinear_continuous_time(self):\n \"\"\"paralinear_continuous_time is additive from random matrices\"\"\"\n qp1, qp2, qp3 = gen_qs_ps()\n pi1 = random(4)\n pi1 /= pi1.sum()\n pi2 = next_pi(pi1, qp1[1])\n\n pl1 = paralinear_continuous_time(qp1[1], pi1, qp1[0])\n pl2 = paralinear_continuous_time(qp2[1], pi2, qp2[0])\n pl3 = paralinear_continuous_time(qp3[1], pi1, qp3[0])\n\n assert_allclose(pl1 + pl2, pl3)\n\n def test_paralinear_continuous_time_validate(self):\n \"\"\"paralinear_continuous_time validate check consistency\"\"\"\n qp1, qp2, qp3 = gen_qs_ps()\n pi1 = random(4)\n\n with self.assertRaises(AssertionError):\n paralinear_continuous_time(\n qp1[1], qp1[0], qp1[0], validate=True\n ) # pi invalid shape\n\n with self.assertRaises(AssertionError):\n paralinear_continuous_time(\n qp1[1], pi1, qp1[0], validate=True\n ) # pi invalid values\n\n pi1 /= pi1.sum()\n with self.assertRaises(AssertionError):\n paralinear_continuous_time(qp1[1], pi1, qp1[1], validate=True) # invalid Q\n\n with self.assertRaises(AssertionError):\n paralinear_continuous_time(qp1[0], pi1, qp1[0], validate=True) # invalid P\n\n qp2[0][0, 0] = 9\n with self.assertRaises(AssertionError):\n paralinear_continuous_time(qp1[1], pi1, qp2[0], validate=True) # invalid Q\n\n qp2[1][0, 3] = 9\n with self.assertRaises(AssertionError):\n paralinear_continuous_time(qp2[1], pi1, qp1[0], validate=True) # invalid P\n\n\nclass TestJensenShannon(TestCase):\n # the following value is 4x machine precision, used to handle\n # architectures that have lower precision and do not produce 0.0 from\n # numerical calcs involved in jsd/jsm\n atol = 4 * finfo(float64).eps\n\n def test_jsd_validation(self):\n \"\"\"jsd fails with malformed data\"\"\"\n freqs1 = random(5)\n normalised_freqs1 = freqs1 / freqs1.sum()\n two_dimensional_freqs1 = [freqs1, freqs1]\n shorter_freqs1 = freqs1[:4]\n\n freqs2 = random(5)\n normalised_freqs2 = freqs2 / freqs2.sum()\n two_dimensional_freqs2 = [freqs2, freqs2]\n shorter_freqs2 = freqs2[:4]\n\n with self.assertRaises(AssertionError):\n jsd(\n freqs1, two_dimensional_freqs2, validate=True\n ) # freqs1/freqs2 mismatched shape\n\n with self.assertRaises(AssertionError):\n jsd(\n two_dimensional_freqs1, freqs2, validate=True\n ) # freqs1/freqs2 mismatched shape\n\n with self.assertRaises(AssertionError):\n jsd(freqs1, shorter_freqs2, validate=True) # freqs1/freqs2 mismatched shape\n\n with self.assertRaises(AssertionError):\n jsd(shorter_freqs1, freqs2, validate=True) # freqs1/freqs2 mismatched shape\n\n with self.assertRaises(AssertionError):\n jsd(\n two_dimensional_freqs1, freqs2, validate=True\n ) # freqs1 has incorrect dimension\n\n with self.assertRaises(AssertionError):\n jsd(\n two_dimensional_freqs1, two_dimensional_freqs2, validate=True\n ) # freqs1 has incorrect dimension\n\n with self.assertRaises(AssertionError):\n jsd(\n freqs1, two_dimensional_freqs2, validate=True\n ) # freqs2 has incorrect dimension\n\n with self.assertRaises(AssertionError):\n jsd(freqs1, freqs2, validate=True) # invalid freqs1\n\n with self.assertRaises(AssertionError):\n jsd(freqs1, normalised_freqs2, validate=True) # invalid freqs1\n\n with self.assertRaises(AssertionError):\n jsd(normalised_freqs1, freqs2, validate=True) # invalid freqs2\n\n def test_jsd(self):\n \"\"\"evaluate jsd between identical, and non-identical distributions\"\"\"\n # case1 is testing if the jsd between two identical distributions is 0.0\n case1 = [\n [0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0],\n ]\n for index in range(len(case1[0])):\n case1[0][index] = 1.0\n case1[1][index] = 1.0\n assert_allclose(\n jsd(case1[0], case1[1], validate=True),\n 0.0,\n err_msg=\"Testing case1 for jsd failed\",\n atol=self.atol,\n )\n case1[0][index] = 0.0\n case1[1][index] = 0.0\n # case2 is testing the numerical output of jsd between two distant distributions\n case2 = [[1 / 10, 9 / 10, 0], [0, 1 / 10, 9 / 10]]\n assert_allclose(\n jsd(case2[0], case2[1], validate=True),\n 0.7655022032053593,\n err_msg=\"Testing case2 for jsd failed\",\n atol=self.atol,\n )\n # case3 is testing the numerical output of jsd between two distant distributions\n case3 = [[1.0, 0.0], [1 / 2, 1 / 2]]\n assert_allclose(\n jsd(case3[0], case3[1], validate=True),\n 0.3112781244591328,\n err_msg=\"Testing case3 for jsd failed\",\n atol=self.atol,\n )\n # case4 - the jsd between two identical uniform distributions is 0.0\n case4 = [\n [1 / 10] * 10,\n [1 / 10] * 10,\n ]\n assert_allclose(\n jsd(case4[0], case4[1], validate=True),\n 0.0,\n err_msg=\"Testing case4 for jsd failed\",\n atol=self.atol,\n )\n\n def test_jsm(self):\n \"\"\"evaluate jsm between identical, and non-identical distributions\"\"\"\n case1 = [\n [0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0],\n ]\n for index in range(len(case1[0])):\n case1[0][index] = 1.0\n case1[1][index] = 1.0\n assert_allclose(\n jsm(case1[0], case1[1], validate=True),\n 0.0,\n err_msg=\"Testing case1 for jsm failed\",\n atol=self.atol,\n )\n case1[0][index] = 0.0\n case1[1][index] = 0.0\n # case2 is testing the numerical output of jsm between two random distributions\n case2 = [[1 / 10, 9 / 10, 0], [0, 1 / 10, 9 / 10]]\n assert_allclose(\n jsm(case2[0], case2[1], validate=True),\n 0.8749298275892526,\n err_msg=\"Testing case2 for jsm failed\",\n atol=self.atol,\n )\n # case3 is testing the numerical output of jsm between two random distributions\n case3 = [[1.0, 0.0], [1 / 2, 1 / 2]]\n assert_allclose(\n jsm(case3[0], case3[1], validate=True),\n 0.5579230452841438,\n err_msg=\"Testing case3 for jsm failed\",\n atol=self.atol,\n )\n # case4 is testing if the jsm between two identical uniform distributions is 0.0\n case4 = [\n [1 / 10] * 10,\n [1 / 10] * 10,\n ]\n assert_allclose(\n jsm(case4[0], case4[1], validate=True),\n 0.0,\n err_msg=\"Testing case4 for jsm failed\",\n atol=self.atol,\n )\n\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"numpy.testing.assert_equal"
],
[
"numpy.ascontiguousarray",
"numpy.array",
"numpy.float64",
"numpy.finfo"
],
[
"numpy.dot",
"numpy.random.random",
"numpy.finfo",
"numpy.testing.assert_allclose",
"numpy.diag_indices"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
liyingben/ts-raster | [
"cfec56f4aced7054dd0db3c4106194f14d2f00b9"
] | [
"tsraster/calculate.py"
] | [
"'''\ncalculate.py: a module for extracting, evaluating and saving features\n'''\n\n\nimport numpy as np\nimport pandas as pd\nimport os\nimport gdal\nimport glob\nfrom pathlib import Path\nfrom tsfresh import extract_features\nfrom tsfresh.utilities.distribution import MultiprocessingDistributor, LocalDaskDistributor\nfrom tsfresh.feature_selection.relevance import calculate_relevance_table as crt\nfrom tsraster.prep import image_to_series, image_to_array, read_images\nimport tsraster.prep as tr\n#from tsfresh.utilities.distribution import LocalDaskDistributor\n\n\ndef CreateTiff(Name, Array, driver, NDV, GeoT, Proj, DataType, path):\n '''\n Converts array to a single or multi band raster file\n\n :param Name: name of the output tiff file\n :param Array: numpy array to be converted to\n :param driver: output image (data) format\n :param NDV: no Data Value (-9999)\n :param GeoT: geographic transformation\n :param Proj: projection\n :param DataType: array data format\n :return: GeoTiff\n '''\n\n Array[np.isnan(Array)] = NDV\n\n rows = Array.shape[1]\n cols = Array.shape[0]\n band = Array.shape[2]\n noData = -9999\n driver = gdal.GetDriverByName('GTiff')\n Name_out = os.path.join(path,Name)\n print('tif:'+ Name_out)\n DataSet = driver.Create(Name_out, rows, cols, band, gdal.GDT_Float32)\n DataSet.SetGeoTransform(GeoT)\n DataSet.SetProjection(Proj)\n\n for i in range(band):\n DataSet.GetRasterBand(i + 1).WriteArray(Array[:, :, i])\n DataSet.GetRasterBand(i + 1).SetNoDataValue(noData)\n\n DataSet.FlushCache()\n return Name\n\n\ndef calculateFeatures(path, parameters, reset_df,raster_mask=None ,tiff_output=True, workers = None):\n '''\n Calculates features or the statistical characteristics of time-series raster data.\n It can also save features as a csv file (dataframe) and/or tiff file.\n \n :param path: directory path to the raster files\n :param parameters: a dictionary of features to be extracted\n :param reset_df: boolean option for existing raster inputs as dataframe\n :param raster_mask: path to binary raster mask\n :param tiff_output: boolean option for exporting tiff file\n :return: extracted features as a dataframe and tiff file\n '''\n \n if reset_df == False:\n #if reset_df =F read in csv file holding saved version of my_df\n my_df = tr.read_my_df(path)\n \n else:\n #if reset_df =T calculate ts_series and save csv\n my_df = image_to_series(path)\n print('df: '+os.path.join(path,'my_df.csv'))\n my_df.to_csv(os.path.join(path,'my_df.csv'), chunksize=10000, index=False)\n \n # mask \n if raster_mask is not None:\n my_df = tr.mask_df(raster_mask = raster_mask, \n original_df = my_df)\n \n \n if workers is not None:\n Distributor = MultiprocessingDistributor(n_workers=workers,\n disable_progressbar=False,\n progressbar_title=\"Feature Extraction\")\n #Distributor = LocalDaskDistributor(n_workers=workers)\n else:\n Distributor = None\n \n extracted_features = extract_features(my_df, \n default_fc_parameters = parameters,\n column_sort = \"time\",\n column_value = \"value\",\n column_id = \"pixel_id\",\n column_kind=\"kind\", \n #chunksize = 1000,\n distributor=Distributor\n )\n \n # change index name to match pixel and time period\n extracted_features.index.rename('pixel_id',inplace=True)\n extracted_features.reset_index(inplace=True, level=['pixel_id'])\n \n extracted_features['time'] = str(my_df.time.min())+\"_\"+str(my_df.time.max())\n extracted_features.set_index(['pixel_id', 'time'], inplace=True) \n \n # unmask extracted features\n extracted_features = tr.unmask_from_mask(mask_df_output = extracted_features, \n missing_value = -9999,\n raster_mask = raster_mask)\n \n # deal with output location \n out_path = Path(path).parent.joinpath(Path(path).stem+\"_features\")\n out_path.mkdir(parents=True, exist_ok=True)\n \n # write out features to csv file\n print(\"features:\"+os.path.join(out_path,'extracted_features.csv'))\n extracted_features.to_csv(os.path.join(out_path,'extracted_features.csv'), chunksize=10000)\n \n # write out feature names \n kr = pd.DataFrame(list(extracted_features.columns))\n kr.index += 1\n kr.index.names = ['band']\n kr.columns = ['feature_name']\n kr.to_csv(os.path.join(out_path,\"features_names.csv\"))\n \n # write out features to tiff file\n if tiff_output == False:\n return extracted_features\n else:\n # get image dimension from raw data\n rows, cols, num = image_to_array(path).shape\n # get the total number of features extracted\n matrix_features = extracted_features.values\n num_of_layers = matrix_features.shape[1]\n \n #reshape the dimension of features extracted\n f2Array = matrix_features.reshape(rows, cols, num_of_layers)\n output_file = 'extracted_features.tiff' \n \n #Get Meta Data from raw data\n raw_data = read_images(path)\n GeoTransform = raw_data[0].GetGeoTransform()\n driver = gdal.GetDriverByName('GTiff')\n \n noData = -9999\n \n Projection = raw_data[0].GetProjectionRef()\n DataType = gdal.GDT_Float32\n \n #export tiff\n CreateTiff(output_file, f2Array, driver, noData, GeoTransform, Projection, DataType, path=out_path)\n return extracted_features\n\n\n#def calculateFeatures2(path, parameters, mask=None, reset_df=True, tiff_output=True, \n# missing_value =-9999,workers=2):\n# '''\n# Calculates features or the statistical characteristics of time-series raster data.\n# It can also save features as a csv file (dataframe) and/or tiff file.\n# \n# :param path: directory path to the raster files\n# :param parameters: a dictionary of features to be extracted\n# :param reset_df: boolean option for existing raster inputs as dataframe\n# :param tiff_output: boolean option for exporting tiff file\n# :return: extracted features as a dataframe and tiff file\n# '''\n# \n# if reset_df == False:\n# #if reset_df =F read in csv file holding saved version of my_df\n# df_long = pd.read_csv(os.path.join(path,'df_long.csv'))\n# \n# # create example of original df to help unmask \n# df_original = pd.read_csv(os.path.join(path,'df_original.csv') )\n# df_original = pd.DataFrame(index = pd.RangeIndex(start=0,\n# stop=len(df_original),\n# step=1), \n# dtype=np.float32)\n# \n# # set index name to pixel id \n# df_original.index.names = ['pixel_id']\n# \n# else:\n# #if reset_df =T calculate ts_series and save csv\n# df_long, df_original = image_to_series2(path, \n# mask)\n# \n# print('df: '+os.path.join(path,'df_long.csv'))\n# df_long.to_csv(os.path.join(path,'df_long.csv'), \n# chunksize=10000, \n# index=False)\n# \n# df_original.to_csv(os.path.join(path,'df_original.csv'), \n# chunksize=10000, \n# index=True)\n# \n# # remove missing values from df_long\n# df_long = df_long[df_long['value'] != missing_value]\n# \n# # check if the number of observation per pixel are not identical\n# if ~df_long.groupby(['pixel_id','kind']).kind.count().all():\n# print('ERROR: the number of observation per pixel are not identical')\n# print(' fix missing values to have a uniform time series')\n# print(df_long.groupby(['time']).time.unique())\n# \n# return(df_long.groupby(['pixel_id','kind']).kind.count().all())\n# \n# \n# Distributor = MultiprocessingDistributor(n_workers=workers,\n# disable_progressbar=False,\n# progressbar_title=\"Feature Extraction\")\n# #Distributor = LocalDaskDistributor(n_workers=2)\n# \n# extracted_features = extract_features(df_long,\n# #chunksize=10e6,\n# default_fc_parameters=parameters,\n# column_id=\"pixel_id\", \n# column_sort=\"time\", \n# column_kind=\"kind\", \n# column_value=\"value\",\n# distributor=Distributor\n# )\n# \n# # extracted_features.index is == df_long.pixel_id\n# extracted_features.index.name= 'pixel_id'\n# \n# \n# #unmask extracted features to match df_original index \n# extracted_features = pd.concat( [df_original, extracted_features], \n# axis=1 )\n# \n# # fill missing values with correct \n# extracted_features.fillna(missing_value, inplace=True)\n# \n# \n# # deal with output location \n# out_path = Path(path).parent.joinpath(Path(path).stem+\"_features\")\n# out_path.mkdir(parents=True, exist_ok=True)\n# \n# # write out features to csv file\n# print(\"features:\"+os.path.join(out_path,'extracted_features.csv'))\n# extracted_features.to_csv(os.path.join(out_path,'extracted_features.csv'), chunksize=10000)\n# \n# # write data frame\n# kr = pd.DataFrame(list(extracted_features.columns))\n# kr.index += 1\n# kr.index.names = ['band']\n# kr.columns = ['feature_name']\n# kr.to_csv(os.path.join(out_path,\"features_names.csv\"))\n# \n# \n# # write out features to tiff file\n# if tiff_output == False:\n# \n# '''tiff_output is true and by default exports tiff '''\n# \n# return extracted_features \n# \n# else:\n# print('use export_features instead')\n# # get image dimension from raw data\n# rows, cols, num = image_to_array(path).shape\n# # get the total number of features extracted\n# matrix_features = extracted_features.values\n# num_of_layers = matrix_features.shape[1]\n# \n# #reshape the dimension of features extracted\n# f2Array = matrix_features.reshape(rows, cols, num_of_layers)\n# output_file = 'extracted_features.tiff' \n# \n# #Get Meta Data from raw data\n# raw_data = read_images(path)\n# GeoTransform = raw_data[0].GetGeoTransform()\n# driver = gdal.GetDriverByName('GTiff')\n# \n# noData = -9999\n# \n# Projection = raw_data[0].GetProjectionRef()\n# DataType = gdal.GDT_Float32\n# \n# #export tiff\n# CreateTiff(output_file, f2Array, driver, noData, GeoTransform, Projection, DataType, path=out_path)\n# return extracted_features\n\n\ndef features_to_array(path, input_file):\n '''\n Converts a dataframe to array\n\n :param path: directory path to the raster files\n :param input_file: features in dataframe\n :return: array with height and width similar to the input rasters\n '''\n\n rows, cols, num = image_to_array(path).shape\n my_df = pd.read_csv(input_file)\n\n\n #df_features = my_df.drop(my_df.columns[0], axis=1)\n matrix_features = my_df.values\n num_of_layers = matrix_features.shape[1]\n\n f2Array = matrix_features.reshape(rows, cols, num_of_layers)\n\n return f2Array\n\n\ndef exportFeatures(path, input_file, output_file,\n driver = gdal.GetDriverByName('GTiff'),\n noData = -9999, DataType = gdal.GDT_Float32):\n\n '''\n Saves features stored in a data frame as a mulit-band tiff file\n\n :param path: directory path to the raster files\n :param input_file: the features stored in pandas data frame\n :param output_file: the name of the output_file\n :param driver: data format of the output file\n :param noData: no data value\n :param DataType: array data format\n :return: tiff file of the exported features\n '''\n output_file = output_file\n raw_data = read_images(path)\n geoTransform = raw_data[0].GetGeoTransform()\n projection = raw_data[0].GetProjectionRef()\n f2Array = features_to_array(path, input_file)\n export_features = CreateTiff(output_file, f2Array, driver, noData, geoTransform, projection, DataType)\n\n return export_features\n\n\n\ndef checkRelevance(x, y, ml_task=\"auto\", fdr_level=0.05):\n '''\n Checks the statistical relevance of features to the target data\n \n :param x: pandas dataframe containing the features extracted\n :param y: pandas series\n :return: dataframe\n '''\n \n # remove non-matching indexes\n features, target = set_common_index(a=x, b=y)\n \n #if features.index.names==['pixel_id', 'time']:\n # features.index = features.index.droplevel(level='time')\n \n features = features.drop(labels=[\"id\",'index', 'pixel_id','time'], axis=1, errors ='ignore')\n \n # calculate relevance\n relevance_test = crt(features,\n target.squeeze(), # convert back from df to series\n ml_task=ml_task,\n fdr_level=fdr_level)\n \n return relevance_test\n\ndef checkRelevance2(x, y, ml_task=\"auto\", fdr_level=0.05):\n '''\n Checks the statistical significance of features selects only significant ones\n\n :param x: pandas dataframe containing the features extracted\n :param y: pandas series\n :return: 2 dataframes relevance_test, relevant_features\n '''\n\n # remove non-matching indexes\n features, target = tr.set_common_index(a=x, b=y)\n #if features.index.names==['pixel_id', 'time']:\n # features.index = features.index.droplevel(level='time')\n \n features = features.drop(labels=[\"id\",'index', 'pixel_id','time'], \n axis=1, \n errors ='ignore')\n\n # calculate relevance\n relevance_test = crt(features,\n target.squeeze(),\n ml_task=ml_task,\n fdr_level=fdr_level)\n\n # gather subset of relevant features\n relevant_feature_names = relevance_test.feature[relevance_test.relevant==True]\n X_relevant_features = features[relevant_feature_names]\n \n return relevance_test, X_relevant_features "
] | [
[
"numpy.isnan",
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
rishusiva/InsurancePrediction_ANN | [
"4d73ad08b52b581feae4d6b18276cf47701f95ad",
"4d73ad08b52b581feae4d6b18276cf47701f95ad"
] | [
"ANN_From_Scratch/gradient_descent.py",
"Preprocessing_Data/cleaning_dataset.py"
] | [
"from NN_model import *\nimport math\nimport numpy as np\n\ncoef,intercept = model.get_weights()\n\ndef sigmoid(X):\n return 1/ (1+math.exp(-X))\n\n\ndef prediction_function(age,affordibility):\n weighted_sum = coef[0]*age + coef[1]*affordibility + intercept\n return sigmoid(weighted_sum)\n\n\n#print(prediction_function(.28,1))\n\ndef loss_function(y_true,y_predicted):\n epsilon=1e-15\n y_predicted_new = [max(i,epsilon) for i in y_predicted]\n y_predicted_new = [min(i,1-epsilon) for i in y_predicted_new]\n y_predicted_new = np.array(y_predicted_new)\n return -np.mean(y_true*np.log(y_predicted_new)+(1-y_true)*np.log(1-y_predicted_new))\n\n\ndef sigmoid_numpy(X):\n return 1/(1+np.exp(-X))\n\n\ndef gradient_descent(age,affordibility,y_true,epochs,loss_threshold):\n w1 = w2 = 1\n b = 0\n learning_rate = 0.5\n m = len(age)\n\n for i in range(epochs):\n weighted_sum = w1*age + w2*affordibility + b\n y_predicted = sigmoid_numpy(weighted_sum)\n\n loss = loss_function(y_true,y_predicted)\n\n dw1 = (1/m)*np.dot(np.transpose(age),(y_predicted-y_true))\n dw2 = (1/m)*np.dot(np.transpose(affordibility),(y_predicted-y_true))\n db = np.mean(y_predicted-y_true)\n\n\n w1 = w1 - learning_rate*dw1\n w2 = w2 - learning_rate*dw2\n b = b - learning_rate*db\n\n print(f'Epoch:{i},w1:{w1},w2:{w2},bias:{b},loss:{loss}')\n\n if loss<=loss_threshold:\n break\n return w1, w2 , b\n\nprint(gradient_descent(X_train_scaled['age'],X_train_scaled['affordibility'],y_train,1000,0.4631))",
"import numpy as numpy\nimport tensorflow as tf\nfrom tensorflow import keras\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\ndf=pd.read_csv(\"insurance_data.csv\")\n#print(df.head())\n\nfrom sklearn.model_selection import train_test_split\nX_train, X_test , y_train , y_test = train_test_split(df[['age','affordibility']],df.bought_insurance,test_size=0.2,random_state=25)\n\n#print(len(X_train)) \n#print(len(X_test))\n\nX_train_scaled = X_train.copy()\nX_train_scaled['age'] = X_train_scaled['age']/100\n\nX_test_scaled = X_test.copy()\nX_test_scaled['age'] = X_test_scaled['age']/100\n\n"
] | [
[
"numpy.log",
"numpy.mean",
"numpy.transpose",
"numpy.array",
"numpy.exp"
],
[
"pandas.read_csv",
"sklearn.model_selection.train_test_split"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
AicyDC/ai-safety-gridworlds | [
"b574b3e42880e32245a6c69502af3e9782ae2879"
] | [
"ai_safety_gridworlds/environments/shared/rl/array_spec.py"
] | [
"# Copyright 2018 The AI Safety Gridworlds Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"A class to describe the shape and dtype of numpy arrays.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n# Dependency imports\nimport numpy as np\n\n\nclass ArraySpec(object):\n \"\"\"Describes a numpy array or scalar shape and dtype.\n\n An `ArraySpec` allows an API to describe the arrays that it accepts or\n returns, before that array exists.\n \"\"\"\n __slots__ = ('_shape', '_dtype', '_name')\n\n def __init__(self, shape, dtype, name=None):\n \"\"\"Initializes a new `ArraySpec`.\n\n Args:\n shape: An iterable specifying the array shape.\n dtype: numpy dtype or string specifying the array dtype.\n name: Optional string containing a semantic name for the corresponding\n array. Defaults to `None`.\n\n Raises:\n TypeError: If the shape is not an iterable or if the `dtype` is an invalid\n numpy dtype.\n \"\"\"\n self._shape = tuple(shape)\n self._dtype = np.dtype(dtype)\n self._name = name\n\n @property\n def shape(self):\n \"\"\"Returns a `tuple` specifying the array shape.\"\"\"\n return self._shape\n\n @property\n def dtype(self):\n \"\"\"Returns a numpy dtype specifying the array dtype.\"\"\"\n return self._dtype\n\n @property\n def name(self):\n \"\"\"Returns the name of the ArraySpec.\"\"\"\n return self._name\n\n def __repr__(self):\n return 'ArraySpec(shape={}, dtype={}, name={})'.format(self.shape,\n repr(self.dtype),\n repr(self.name))\n\n def __eq__(self, other):\n \"\"\"Checks if the shape and dtype of two specs are equal.\"\"\"\n if not isinstance(other, ArraySpec):\n return False\n return self.shape == other.shape and self.dtype == other.dtype\n\n def __ne__(self, other):\n return not self == other\n\n def _fail_validation(self, message, *args):\n message %= args\n if self.name:\n message += ' for spec %s' % self.name\n raise ValueError(message)\n\n def validate(self, value):\n \"\"\"Checks if value conforms to this spec.\n\n Args:\n value: a numpy array or value convertible to one via `np.asarray`.\n\n Returns:\n value, converted if necessary to a numpy array.\n\n Raises:\n ValueError: if value doesn't conform to this spec.\n \"\"\"\n value = np.asarray(value)\n if value.shape != self.shape:\n self._fail_validation(\n 'Expected shape %r but found %r', self.shape, value.shape)\n if value.dtype != self.dtype:\n self._fail_validation(\n 'Expected dtype %s but found %s', self.dtype, value.dtype)\n\n def generate_value(self):\n \"\"\"Generate a test value which conforms to this spec.\"\"\"\n return np.zeros(shape=self.shape, dtype=self.dtype)\n\n\nclass BoundedArraySpec(ArraySpec):\n \"\"\"An `ArraySpec` that specifies minimum and maximum values.\n\n Example usage:\n ```python\n # Specifying the same minimum and maximum for every element.\n spec = BoundedArraySpec((3, 4), np.float64, minimum=0.0, maximum=1.0)\n\n # Specifying a different minimum and maximum for each element.\n spec = BoundedArraySpec(\n (2,), np.float64, minimum=[0.1, 0.2], maximum=[0.9, 0.9])\n\n # Specifying the same minimum and a different maximum for each element.\n spec = BoundedArraySpec(\n (3,), np.float64, minimum=-10.0, maximum=[4.0, 5.0, 3.0])\n ```\n\n Bounds are meant to be inclusive. This is especially important for\n integer types. The following spec will be satisfied by arrays\n with values in the set {0, 1, 2}:\n ```python\n spec = BoundedArraySpec((3, 4), np.int, minimum=0, maximum=2)\n ```\n \"\"\"\n\n __slots__ = ('_minimum', '_maximum')\n\n def __init__(self, shape, dtype, minimum, maximum, name=None):\n \"\"\"Initializes a new `BoundedArraySpec`.\n\n Args:\n shape: An iterable specifying the array shape.\n dtype: numpy dtype or string specifying the array dtype.\n minimum: Number or sequence specifying the maximum element bounds\n (inclusive). Must be broadcastable to `shape`.\n maximum: Number or sequence specifying the maximum element bounds\n (inclusive). Must be broadcastable to `shape`.\n name: Optional string containing a semantic name for the corresponding\n array. Defaults to `None`.\n\n Raises:\n ValueError: If `minimum` or `maximum` are not broadcastable to `shape`.\n TypeError: If the shape is not an iterable or if the `dtype` is an invalid\n numpy dtype.\n \"\"\"\n super(BoundedArraySpec, self).__init__(shape, dtype, name)\n\n try:\n np.broadcast_to(minimum, shape=shape)\n except ValueError as numpy_exception:\n raise ValueError('minimum is not compatible with shape. '\n 'Message: {!r}.'.format(numpy_exception))\n\n try:\n np.broadcast_to(maximum, shape=shape)\n except ValueError as numpy_exception:\n raise ValueError('maximum is not compatible with shape. '\n 'Message: {!r}.'.format(numpy_exception))\n\n self._minimum = np.array(minimum)\n self._minimum.setflags(write=False)\n\n self._maximum = np.array(maximum)\n self._maximum.setflags(write=False)\n\n @property\n def minimum(self):\n \"\"\"Returns a NumPy array specifying the minimum bounds (inclusive).\"\"\"\n return self._minimum\n\n @property\n def maximum(self):\n \"\"\"Returns a NumPy array specifying the maximum bounds (inclusive).\"\"\"\n return self._maximum\n\n def __repr__(self):\n template = ('BoundedArraySpec(shape={}, dtype={}, name={}, '\n 'minimum={}, maximum={})')\n return template.format(self.shape, repr(self.dtype), repr(self.name),\n self._minimum, self._maximum)\n\n def __eq__(self, other):\n if not isinstance(other, BoundedArraySpec):\n return False\n return (super(BoundedArraySpec, self).__eq__(other) and\n (self.minimum == other.minimum).all() and\n (self.maximum == other.maximum).all())\n\n def validate(self, value):\n value = np.asarray(value)\n super(BoundedArraySpec, self).validate(value)\n if (value < self.minimum).any() or (value > self.maximum).any():\n self._fail_validation(\n 'Values were not all within bounds %s <= value <= %s',\n self.minimum, self.maximum)\n\n def generate_value(self):\n return (np.ones(shape=self.shape, dtype=self.dtype) *\n self.dtype.type(self.minimum))\n"
] | [
[
"numpy.asarray",
"numpy.dtype",
"numpy.ones",
"numpy.broadcast_to",
"numpy.array",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
WEYAI/NCRFpp | [
"c205e742a674bbacea9f5047cfb03b396c50264b"
] | [
"utils/data.py"
] | [
"# -*- coding: utf-8 -*-\n# @Author: Jie\n# @Date: 2017-06-14 17:34:32\n# @Last Modified by: Jie Yang, Contact: [email protected]\n# @Last Modified time: 2019-01-25 20:25:59\nfrom __future__ import print_function\nfrom __future__ import absolute_import\nimport argparse\nimport sys\nimport os\nimport torch\nos.chdir(sys.path[0])\nsys.path.append(\"../../\")\nsys.path.append(\"../../../\")\nsys.path.append(\"../\")\nfrom utils.alphabet import Alphabet\nfrom utils.functions import *\n\ntry:\n import cPickle as pickle\nexcept ImportError:\n import pickle as pickle\n\n\nSTART = \"</s>\"\nUNKNOWN = \"</unk>\"\nPADDING = \"</pad>\"\n\nclass Data:\n def __init__(self):\n self.sentence_classification = False\n self.MAX_SENTENCE_LENGTH = 250\n self.MAX_WORD_LENGTH = -1\n self.number_normalized = True\n self.norm_word_emb = False\n self.norm_char_emb = False\n \n self.word_alphabet = Alphabet('word')\n self.char_alphabet = Alphabet('character')\n \n # self.word_alphabet_pos = Alphabet('word')\n # self.char_alphabet_pos = Alphabet('character')\n \n self.feature_name = []\n self.feature_alphabets = []\n self.feature_num = len(self.feature_alphabets)\n self.feat_config = None\n\n\n self.ner_label_alphabet = Alphabet('ner_label',True)\n self.pos_label_alphabet = Alphabet('pos_label',True)\n self.chunk_label_alphabet = Alphabet('chunk_label',True)\n self.tagScheme = \"NoSeg\" ## BMES/BIO\n self.split_token = ' ||| '\n self.seg = True\n\n ### I/O\n self.train_dir = None\n self.dev_dir = None\n self.test_dir = None\n self.pos_train_dir = None\n self.pos_dev_dir = None\n self.pos_test_dir = None\n self.raw_dir = None\n\n self.decode_dir = None\n self.dset_dir = None ## data vocabulary related file\n self.model_dir = None ## model save file\n self.load_model_dir = None ## model load file\n\n self.word_emb_dir = None\n self.char_emb_dir = None\n self.feature_emb_dirs = []\n\n self.train_texts = []\n self.dev_texts = []\n self.test_texts = []\n self.raw_texts = []\n\n self.train_Ids = []\n self.dev_Ids = []\n self.test_Ids = []\n self.raw_Ids = []\n\n self.pretrain_word_embedding = None\n self.pretrain_char_embedding = None\n self.pretrain_feature_embeddings = []\n\n self.ner_label_size = 0\n self.pos_label_size = 0\n self.chunk_label_size = 0\n self.word_alphabet_size = 0\n self.char_alphabet_size = 0\n self.ner_label_alphabet_size = 0\n self.pos_label_alphabet_size = 0\n self.chunk_label_alphabet_size = 0\n self.feature_alphabet_sizes = []\n self.feature_emb_dims = []\n self.norm_feature_embs = []\n self.word_emb_dim = 50\n self.char_emb_dim = 30\n\n ###Networks\n self.word_feature_extractor = \"LSTM\" ## \"LSTM\"/\"CNN\"/\"GRU\"/\n self.use_char = True\n self.char_feature_extractor = \"CNN\" ## \"LSTM\"/\"CNN\"/\"GRU\"/None\n self.use_crf = True\n self.nbest = None\n\n ## Training\n self.average_batch_loss = False\n self.optimizer = \"SGD\" ## \"SGD\"/\"AdaGrad\"/\"AdaDelta\"/\"RMSProp\"/\"Adam\"\n self.status = \"train\"\n ### Hyperparameters\n self.HP_cnn_layer = 4\n self.HP_iteration = 100\n self.HP_batch_size = 10\n self.HP_char_hidden_dim = 50\n self.HP_hidden_dim = 200\n self.HP_dropout = 0.5\n self.HP_lstm_layer = 1\n self.HP_bilstm = True\n\n self.HP_gpu = False\n self.HP_lr = 0.015\n self.HP_lr_decay = 0.05\n self.HP_clip = None\n self.HP_momentum = 0\n self.HP_l2 = 1e-8\n\n def show_data_summary(self):\n \n print(\"++\"*50)\n print(\"DATA SUMMARY START:\")\n print(\" I/O:\")\n if self.sentence_classification:\n print(\" Start Sentence Classification task...\")\n else:\n print(\" Start Sequence Laebling task...\")\n print(\" Tag scheme: %s\"%(self.tagScheme))\n print(\" Split token: %s\"%(self.split_token))\n print(\" MAX SENTENCE LENGTH: %s\"%(self.MAX_SENTENCE_LENGTH))\n print(\" MAX WORD LENGTH: %s\"%(self.MAX_WORD_LENGTH))\n print(\" Number normalized: %s\"%(self.number_normalized))\n print(\" Word alphabet size: %s\"%(self.word_alphabet_size))\n print(\" Char alphabet size: %s\"%(self.char_alphabet_size))\n print(\" Label alphabet size: %s\"%(self.ner_label_alphabet_size))\n print(\" Label alphabet size: %s\"%(self.pos_label_alphabet_size))\n print(\" Label alphabet size: %s\"%(self.chunk_label_alphabet_size))\n print(\" Word embedding dir: %s\"%(self.word_emb_dir))\n print(\" Char embedding dir: %s\"%(self.char_emb_dir))\n print(\" Word embedding size: %s\"%(self.word_emb_dim))\n print(\" Char embedding size: %s\"%(self.char_emb_dim))\n print(\" Norm word emb: %s\"%(self.norm_word_emb))\n print(\" Norm char emb: %s\"%(self.norm_char_emb))\n print(\" Train file directory: %s\"%(self.train_dir))\n print(\" Dev file directory: %s\"%(self.dev_dir))\n print(\" Test file directory: %s\"%(self.test_dir))\n print(\" Raw file directory: %s\"%(self.raw_dir))\n print(\" Dset file directory: %s\"%(self.dset_dir))\n print(\" Model file directory: %s\"%(self.model_dir))\n print(\" Loadmodel directory: %s\"%(self.load_model_dir))\n print(\" Decode file directory: %s\"%(self.decode_dir))\n print(\" Train instance number: %s\"%(len(self.train_texts)))\n print(\" Dev instance number: %s\"%(len(self.dev_texts)))\n print(\" Test instance number: %s\"%(len(self.test_texts)))\n print(\" Raw instance number: %s\"%(len(self.raw_texts)))\n print(\" FEATURE num: %s\"%(self.feature_num))\n for idx in range(self.feature_num):\n print(\" Fe: %s alphabet size: %s\"%(self.feature_alphabets[idx].name, self.feature_alphabet_sizes[idx]))\n print(\" Fe: %s embedding dir: %s\"%(self.feature_alphabets[idx].name, self.feature_emb_dirs[idx]))\n print(\" Fe: %s embedding size: %s\"%(self.feature_alphabets[idx].name, self.feature_emb_dims[idx]))\n print(\" Fe: %s norm emb: %s\"%(self.feature_alphabets[idx].name, self.norm_feature_embs[idx]))\n print(\" \"+\"++\"*20)\n print(\" Model Network:\")\n print(\" Model use_crf: %s\"%(self.use_crf))\n print(\" Model word extractor: %s\"%(self.word_feature_extractor))\n print(\" Model use_char: %s\"%(self.use_char))\n if self.use_char:\n print(\" Model char extractor: %s\"%(self.char_feature_extractor))\n print(\" Model char_hidden_dim: %s\"%(self.HP_char_hidden_dim))\n print(\" \"+\"++\"*20)\n print(\" Training:\")\n print(\" Optimizer: %s\"%(self.optimizer))\n print(\" Iteration: %s\"%(self.HP_iteration))\n print(\" BatchSize: %s\"%(self.HP_batch_size))\n print(\" Average batch loss: %s\"%(self.average_batch_loss))\n\n print(\" \"+\"++\"*20)\n print(\" Hyperparameters:\")\n\n print(\" Hyper lr: %s\"%(self.HP_lr))\n print(\" Hyper lr_decay: %s\"%(self.HP_lr_decay))\n print(\" Hyper HP_clip: %s\"%(self.HP_clip))\n print(\" Hyper momentum: %s\"%(self.HP_momentum))\n print(\" Hyper l2: %s\"%(self.HP_l2))\n print(\" Hyper hidden_dim: %s\"%(self.HP_hidden_dim))\n print(\" Hyper dropout: %s\"%(self.HP_dropout))\n print(\" Hyper lstm_layer: %s\"%(self.HP_lstm_layer))\n print(\" Hyper bilstm: %s\"%(self.HP_bilstm))\n print(\" Hyper GPU: %s\"%(self.HP_gpu))\n print(\"DATA SUMMARY END.\")\n print(\"++\"*50)\n sys.stdout.flush()\n\n\n def initial_feature_alphabets(self):\n # \n if self.sentence_classification:\n ## if sentence classification data format, splited by '\\t'\n items = open(self.train_dir,'r').readline().strip('\\n').split('\\t')\n else:\n ## if sequence labeling data format i.e. CoNLL 2003, split by ' '\n items = open(self.train_dir,'r').readline().strip('\\n').split()\n total_column = len(items)\n if total_column > 2:\n for idx in range(1, total_column-1):\n feature_prefix = items[idx].split(']',1)[0]+\"]\"\n self.feature_alphabets.append(Alphabet(feature_prefix))\n self.feature_name.append(feature_prefix)\n print(\"Find feature: \", feature_prefix)\n self.feature_num = len(self.feature_alphabets)\n self.pretrain_feature_embeddings = [None]*self.feature_num\n self.feature_emb_dims = [20]*self.feature_num\n self.feature_emb_dirs = [None]*self.feature_num\n self.norm_feature_embs = [False]*self.feature_num\n self.feature_alphabet_sizes = [0]*self.feature_num\n if self.feat_config:\n for idx in range(self.feature_num):\n if self.feature_name[idx] in self.feat_config:\n self.feature_emb_dims[idx] = self.feat_config[self.feature_name[idx]]['emb_size']\n self.feature_emb_dirs[idx] = self.feat_config[self.feature_name[idx]]['emb_dir']\n self.norm_feature_embs[idx] = self.feat_config[self.feature_name[idx]]['emb_norm']\n # exit(0)\n\n\n def build_alphabet(self, input_file):\n in_lines = open(input_file,'r').readlines()\n for line in in_lines:\n if len(line) > 2:\n ## if sequence labeling data format i.e. CoNLL 2003\n pairs = line.strip().split()\n word = pairs[0]\n if word == '-DOCSTART-': \n # print(\"clear dirty data\") # clear dirty data\n continue\n if sys.version_info[0] < 3:\n word = word.decode('utf-8')\n if self.number_normalized:\n word = normalize_word(word)\n ner_label = pairs[3]\n pos_label = pairs[1]\n chunk_label = pairs[2]\n self.ner_label_alphabet.add(ner_label)\n self.pos_label_alphabet.add(pos_label)\n self.chunk_label_alphabet.add(chunk_label)\n\n ## build feature alphabet\n for idx in range(self.feature_num):\n feat_idx = pairs[idx+1].split(']',1)[-1]\n self.feature_alphabets[idx].add(feat_idx)\n for char in word:\n self.char_alphabet.add(char)\n \n self.word_alphabet_size = self.word_alphabet.size()\n self.char_alphabet_size = self.char_alphabet.size()\n self.pos_label_alphabet_size = self.pos_label_alphabet.size()\n self.chunk_label_alphabet_size = self.chunk_label_alphabet.size()\n self.ner_label_alphabet_size = self.ner_label_alphabet.size()\n for idx in range(self.feature_num):\n self.feature_alphabet_sizes[idx] = self.feature_alphabets[idx].size()\n startS = False\n startB = False\n for label,_ in self.ner_label_alphabet.iteritems():\n if \"S-\" in label.upper():\n startS = True\n elif \"B-\" in label.upper():\n startB = True\n for label,_ in self.chunk_label_alphabet.iteritems():\n if \"S-\" in label.upper():\n startS = True\n elif \"B-\" in label.upper():\n startB = True\n if startB:\n if startS:\n self.tagScheme = \"BMES\"\n else:\n self.tagScheme = \"BIO\"\n if self.sentence_classification:\n self.tagScheme = \"Not sequence labeling task\"\n\n\n def fix_alphabet(self):\n self.word_alphabet.close()\n self.char_alphabet.close()\n self.ner_label_alphabet.close()\n self.chunk_label_alphabet.close()\n self.pos_label_alphabet.close()\n for idx in range(self.feature_num):\n self.feature_alphabets[idx].close()\n\n\n def build_pretrain_emb(self):\n if self.word_emb_dir:\n print(\"Load pretrained word embedding, norm: %s, dir: %s\"%(self.norm_word_emb, self.word_emb_dir))\n self.pretrain_word_embedding, self.word_emb_dim = build_pretrain_embedding(self.word_emb_dir, self.word_alphabet, self.word_emb_dim, self.norm_word_emb)\n pass\n if self.char_emb_dir:\n print(\"Load pretrained char embedding, norm: %s, dir: %s\"%(self.norm_char_emb, self.char_emb_dir))\n self.pretrain_char_embedding, self.char_emb_dim = build_pretrain_embedding(self.char_emb_dir, self.char_alphabet, self.char_emb_dim, self.norm_char_emb)\n for idx in range(self.feature_num):\n if self.feature_emb_dirs[idx]:\n print(\"Load pretrained feature %s embedding:, norm: %s, dir: %s\"%(self.feature_name[idx], self.norm_feature_embs[idx], self.feature_emb_dirs[idx]))\n self.pretrain_feature_embeddings[idx], self.feature_emb_dims[idx] = build_pretrain_embedding(self.feature_emb_dirs[idx], self.feature_alphabets[idx], self.feature_emb_dims[idx], self.norm_feature_embs[idx])\n\n\n def generate_instance(self, name):\n self.fix_alphabet()\n if name == \"train\":\n self.train_texts, self.train_Ids = read_instance(self.train_dir, self.word_alphabet, self.char_alphabet, self.feature_alphabets, self.ner_label_alphabet,self.pos_label_alphabet,self.chunk_label_alphabet, self.number_normalized, self.MAX_SENTENCE_LENGTH, self.sentence_classification, self.split_token)\n elif name == \"dev\":\n self.dev_texts, self.dev_Ids = read_instance(self.dev_dir, self.word_alphabet, self.char_alphabet, self.feature_alphabets, self.ner_label_alphabet,self.pos_label_alphabet,self.chunk_label_alphabet, self.number_normalized, self.MAX_SENTENCE_LENGTH, self.sentence_classification, self.split_token)\n elif name == \"test\":\n self.test_texts, self.test_Ids = read_instance(self.test_dir, self.word_alphabet, self.char_alphabet, self.feature_alphabets, self.ner_label_alphabet,self.pos_label_alphabet,self.chunk_label_alphabet, self.number_normalized, self.MAX_SENTENCE_LENGTH, self.sentence_classification, self.split_token)\n elif name == \"raw\":\n self.raw_texts, self.raw_Ids = read_instance(self.raw_dir, self.word_alphabet, self.char_alphabet, self.feature_alphabets, self.ner_label_alphabet, self.pos_label_alphabet,self.chunk_label_alphabet,self.number_normalized, self.MAX_SENTENCE_LENGTH, self.sentence_classification, self.split_token)\n else:\n print(\"Error: you can only generate train/dev/test instance! Illegal input:%s\"%(name))\n\n\n def write_decoded_results(self, predict_results, name):\n \n sent_num = len(predict_results)\n content_list = []\n if name == 'raw':\n content_list = self.raw_texts\n elif name == 'test':\n content_list = self.test_texts\n elif name == 'dev':\n content_list = self.dev_texts\n elif name == 'train':\n content_list = self.train_texts\n else:\n print(\"Error: illegal name during writing predict result, name should be within train/dev/test/raw !\")\n assert(sent_num == len(content_list))\n fout = open(self.decode_dir,'w')\n for idx in range(sent_num):\n if self.sentence_classification:\n fout.write(\" \".join(content_list[idx][0])+\"\\t\"+predict_results[idx]+ '\\n')\n else:\n sent_length = len(predict_results[idx])\n for idy in range(sent_length):\n ## content_list[idx] is a list with [word, char, label]\n fout.write(content_list[idx][0][idy].encode('utf-8') + \" \" + predict_results[idx][idy] + '\\n')\n fout.write('\\n')\n fout.close()\n print(\"Predict %s result has been written into file. %s\"%(name, self.decode_dir))\n\n\n def load(self,data_file):\n f = open(data_file, 'rb')\n tmp_dict = pickle.load(f)\n f.close()\n self.__dict__.update(tmp_dict)\n\n def save(self,save_file):\n f = open(save_file, 'wb')\n pickle.dump(self.__dict__, f, 2)\n f.close()\n\n\n\n def write_nbest_decoded_results(self, predict_results, pred_scores, name):\n ## predict_results : [whole_sent_num, nbest, each_sent_length]\n ## pred_scores: [whole_sent_num, nbest]\n fout = open(self.decode_dir,'w')\n sent_num = len(predict_results)\n content_list = []\n if name == 'raw':\n content_list = self.raw_texts\n elif name == 'test':\n content_list = self.test_texts\n elif name == 'dev':\n content_list = self.dev_texts\n elif name == 'train':\n content_list = self.train_texts\n else:\n print(\"Error: illegal name during writing predict result, name should be within train/dev/test/raw !\")\n assert(sent_num == len(content_list))\n assert(sent_num == len(pred_scores))\n for idx in range(sent_num):\n sent_length = len(predict_results[idx][0])\n nbest = len(predict_results[idx])\n score_string = \"#\"\n for idz in range(nbest):\n score_string += format(pred_scores[idx][idz], '.4f')+\" \"\n fout.write(score_string.strip() + \"\\n\")\n\n for idy in range(sent_length):\n try: # Will fail with python3\n label_string = content_list[idx][0][idy].encode('utf-8') + \" \"\n except:\n label_string = content_list[idx][0][idy] + \" \"\n for idz in range(nbest):\n label_string += predict_results[idx][idz][idy]+\" \"\n label_string = label_string.strip() + \"\\n\"\n fout.write(label_string)\n fout.write('\\n')\n fout.close()\n print(\"Predict %s %s-best result has been written into file. %s\"%(name,nbest, self.decode_dir))\n\n\n def read_config(self,config_file):\n config = config_file_to_dict(config_file)\n ## read data:\n the_item = 'train_dir'\n if the_item in config:\n self.train_dir = config[the_item]\n the_item = 'dev_dir'\n if the_item in config:\n self.dev_dir = config[the_item]\n the_item = 'test_dir'\n if the_item in config:\n self.test_dir = config[the_item]\n the_item = 'raw_dir'\n if the_item in config:\n self.raw_dir = config[the_item]\n the_item = 'decode_dir'\n if the_item in config:\n self.decode_dir = config[the_item]\n the_item = 'dset_dir'\n if the_item in config:\n self.dset_dir = config[the_item]\n the_item = 'model_dir'\n if the_item in config:\n self.model_dir = config[the_item]\n the_item = 'load_model_dir'\n if the_item in config:\n self.load_model_dir = config[the_item]\n\n the_item = 'word_emb_dir'\n if the_item in config:\n self.word_emb_dir = config[the_item]\n the_item = 'char_emb_dir'\n if the_item in config:\n self.char_emb_dir = config[the_item]\n\n\n the_item = 'MAX_SENTENCE_LENGTH'\n if the_item in config:\n self.MAX_SENTENCE_LENGTH = int(config[the_item])\n the_item = 'MAX_WORD_LENGTH'\n if the_item in config:\n self.MAX_WORD_LENGTH = int(config[the_item])\n\n the_item = 'norm_word_emb'\n if the_item in config:\n self.norm_word_emb = str2bool(config[the_item])\n the_item = 'norm_char_emb'\n if the_item in config:\n self.norm_char_emb = str2bool(config[the_item])\n the_item = 'number_normalized'\n if the_item in config:\n self.number_normalized = str2bool(config[the_item])\n\n the_item = 'sentence_classification'\n if the_item in config:\n self.sentence_classification = str2bool(config[the_item])\n the_item = 'seg'\n if the_item in config:\n self.seg = str2bool(config[the_item])\n the_item = 'word_emb_dim'\n if the_item in config:\n self.word_emb_dim = int(config[the_item])\n the_item = 'char_emb_dim'\n if the_item in config:\n self.char_emb_dim = int(config[the_item])\n\n ## read network:\n the_item = 'use_crf'\n if the_item in config:\n self.use_crf = str2bool(config[the_item])\n the_item = 'use_char'\n if the_item in config:\n self.use_char = str2bool(config[the_item])\n the_item = 'word_seq_feature'\n if the_item in config:\n self.word_feature_extractor = config[the_item]\n the_item = 'char_seq_feature'\n if the_item in config:\n self.char_feature_extractor = config[the_item]\n the_item = 'nbest'\n if the_item in config:\n self.nbest = int(config[the_item])\n\n the_item = 'feature'\n if the_item in config:\n self.feat_config = config[the_item] ## feat_config is a dict\n\n\n ## read training setting:\n the_item = 'optimizer'\n if the_item in config:\n self.optimizer = config[the_item]\n the_item = 'ave_batch_loss'\n if the_item in config:\n self.average_batch_loss = str2bool(config[the_item])\n the_item = 'status'\n if the_item in config:\n self.status = config[the_item]\n\n ## read Hyperparameters:\n the_item = 'cnn_layer'\n if the_item in config:\n self.HP_cnn_layer = int(config[the_item])\n the_item = 'iteration'\n if the_item in config:\n self.HP_iteration = int(config[the_item])\n the_item = 'batch_size'\n if the_item in config:\n self.HP_batch_size = int(config[the_item])\n\n the_item = 'char_hidden_dim'\n if the_item in config:\n self.HP_char_hidden_dim = int(config[the_item])\n the_item = 'hidden_dim'\n if the_item in config:\n self.HP_hidden_dim = int(config[the_item])\n the_item = 'dropout'\n if the_item in config:\n self.HP_dropout = float(config[the_item])\n the_item = 'lstm_layer'\n if the_item in config:\n self.HP_lstm_layer = int(config[the_item])\n the_item = 'bilstm'\n if the_item in config:\n self.HP_bilstm = str2bool(config[the_item])\n\n the_item = 'gpu'\n if the_item in config:\n self.HP_gpu = str2bool(config[the_item])\n the_item = 'learning_rate'\n if the_item in config:\n self.HP_lr = float(config[the_item])\n the_item = 'lr_decay'\n if the_item in config:\n self.HP_lr_decay = float(config[the_item])\n the_item = 'clip'\n if the_item in config:\n self.HP_clip = float(config[the_item])\n the_item = 'momentum'\n if the_item in config:\n self.HP_momentum = float(config[the_item])\n the_item = 'l2'\n if the_item in config:\n self.HP_l2 = float(config[the_item])\n ## no seg for sentence classification\n if self.sentence_classification:\n self.seg = False\n self.use_crf = False\n\n\ndef config_file_to_dict(input_file):\n config = {}\n fins = open(input_file,'r').readlines()\n for line in fins:\n if len(line) > 0 and line[0] == \"#\":\n continue\n if \"=\" in line:\n pair = line.strip().split('#',1)[0].split('=',1)\n item = pair[0]\n if item==\"feature\":\n if item not in config:\n feat_dict = {}\n config[item]= feat_dict\n feat_dict = config[item]\n new_pair = pair[-1].split()\n feat_name = new_pair[0]\n one_dict = {}\n one_dict[\"emb_dir\"] = None\n one_dict[\"emb_size\"] = 10\n one_dict[\"emb_norm\"] = False\n if len(new_pair) > 1:\n for idx in range(1,len(new_pair)):\n conf_pair = new_pair[idx].split('=')\n if conf_pair[0] == \"emb_dir\":\n one_dict[\"emb_dir\"]=conf_pair[-1]\n elif conf_pair[0] == \"emb_size\":\n one_dict[\"emb_size\"]=int(conf_pair[-1])\n elif conf_pair[0] == \"emb_norm\":\n one_dict[\"emb_norm\"]=str2bool(conf_pair[-1])\n feat_dict[feat_name] = one_dict\n # print \"feat\",feat_dict\n else:\n if item in config:\n print(\"Warning: duplicated config item found: %s, updated.\"%(pair[0]))\n config[item] = pair[-1]\n\n\n return config\n\n\ndef str2bool(string):\n if string == \"True\" or string == \"true\" or string == \"TRUE\":\n return True\n else:\n return False\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser(description='Tuning with NCRF++')\n # parser.add_argument('--status', choices=['train', 'decode'], help='update algorithm', default='train')\n parser.add_argument('--config', help='Configuration File', default='None')\n parser.add_argument('--wordemb', help='Embedding for words', default='None')\n parser.add_argument('--charemb', help='Embedding for chars', default='None')\n parser.add_argument('--status', choices=['train', 'decode'], help='update algorithm', default='train')\n parser.add_argument('--savemodel', default=\"data/model/saved_model.lstmcrf.\")\n parser.add_argument('--savedset', help='Dir of saved data setting')\n parser.add_argument('--train', default=\"data/conll03/train.bmes\") \n parser.add_argument('--dev', default=\"data/conll03/dev.bmes\" ) \n parser.add_argument('--test', default=\"data/conll03/test.bmes\") \n parser.add_argument('--seg', default=\"True\") \n parser.add_argument('--raw') \n parser.add_argument('--loadmodel')\n parser.add_argument('--output') \n\n args = parser.parse_args()\n args.config = '../demo.train.config'\n data = Data()\n data.HP_gpu = torch.cuda.is_available()\n if args.config == 'None':\n data.train_dir = args.train \n data.dev_dir = args.dev \n data.test_dir = args.test\n data.model_dir = args.savemodel\n data.dset_dir = args.savedset\n print(\"Save dset directory:\",data.dset_dir)\n save_model_dir = args.savemodel\n data.word_emb_dir = args.wordemb\n data.char_emb_dir = args.charemb\n if args.seg.lower() == 'true':\n data.seg = True\n else:\n data.seg = False\n else:\n data.read_config(args.config)\n # data.show_data_summary()\n status = data.status.lower()\n\n if status == 'train':\n print(\"MODEL: train\")\n # data_initialization(data)\n data.initial_feature_alphabets()\n data.build_alphabet(data.train_dir)\n data.build_alphabet(data.dev_dir)\n data.build_alphabet(data.test_dir)\n data.fix_alphabet()\n data.generate_instance('train')\n data.generate_instance('dev')\n data.generate_instance('test')\n print(data.chunk_label_alphabet.instance2index)\n print(data.pos_label_alphabet.instance2index)\n print(data.ner_label_alphabet.instance2index)\n # data.build_pretrain_emb()\n train(data)\n elif status == 'decode':\n print(\"MODEL: decode\")\n data.load(data.dset_dir)\n data.read_config(args.config)\n print(data.raw_dir)\n # exit(0)\n data.show_data_summary()\n # data.generate_instance('raw')\n print(\"nbest: %s\"%(data.nbest))\n decode_results, pred_scores = load_model_decode(data, 'test')\n if data.nbest and not data.sentence_classification:\n data.write_nbest_decoded_results(decode_results, pred_scores, 'test')\n else:\n data.write_decoded_results(decode_results, 'test')\n else:\n print(\"Invalid argument! Please use valid arguments! (train/test/decode)\")\n\n "
] | [
[
"torch.cuda.is_available"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
stiebels/MPHYG001_assignment-1 | [
"4c786f2cd18839cb33ca1ea0ba8ac9fd19a7bf33"
] | [
"greengraph/cmd.py"
] | [
"from argparse import ArgumentParser\nfrom matplotlib import pyplot as plt\nfrom greengraph.Graph import Graph\n\n'''\nThis class implements the command line interface.\n'''\n\ndef runModule():\n parser = ArgumentParser(description='Generates a graph that displays the number of green pixels per step between two geographical locations.')\n parser.add_argument(dest='begin', help='Enter start location, e.g. \\'London\\'.')\n parser.add_argument(dest='end', help='Enter location of target destination, e.g. \\'Cambridge\\'.')\n parser.add_argument('-s', default=25, dest='steps', help='Steps between begin and end, e.g. \\'10\\'.', required=False)\n parser.add_argument('-p', default=None, dest='path', help='If specified, graph is saved to location specified here, e.g. \\'/home/user/graph.png\\'. Otherwise, graph is only displayed but not auto-saved.', required=False)\n\n\n args = parser.parse_args()\n plotGraph(args.begin, args.end, args.steps, args.path)\n\n\ndef plotGraph(begin, end, steps, path):\n mygraph = Graph(begin, end)\n data = mygraph.green_between(steps)\n plt.plot(data)\n if path:\n plt.savefig(path)\n else:\n plt.show()\n\n\nif __name__ == '__main__':\n runModule()"
] | [
[
"matplotlib.pyplot.plot",
"matplotlib.pyplot.show",
"matplotlib.pyplot.savefig"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
marcodalessandro76/MPPI | [
"ad60b73270b1f376ac501d47285146f1c3af457a"
] | [
"mppi/Parsers/YamboOutputParser.py"
] | [
"\"\"\"\nModule that manages the parsing of a Yambo o- file(s).\n\"\"\"\n\nimport numpy as np\n\n# Specifies the name of the columns of the o- files for various type of runs. There are\n# two distint dictionaries depending if the ExtendOut option has been activated or not.\n\n# The rt outputs are not modified by the extendOut option\nrt_column_names = {\n 'carriers' : ['time','dnhmne','dnh','dne'],\n 'currents' : ['time','j_x','j_y','j_z'],\n 'polarization' : ['time','Pol_x','Pol_y','Pol_z'],\n 'spin_magnetization' :\n ['time','Ms_x','Ms_y','Ms_z','Mv_x','Mv_y','Mv_z','Mc_x','Mc_y','Mc_z'],\n 'orb_magnetization' :\n ['time','Ml_x','Ml_y','Ml_z','Mi_x','Mi_y','Mi_z'],\n 'external_field' :\n ['time','Ex_Re','Ey_Re','Ez_Re','Ex_Im','Ey_Im','Ez_Im','Profile','Intensity','Fluence']\n}\n\nreference_column_names_extendOut = {\n 'hf' : ['kpoint','band','e0','ehf','dft','hf'],\n 'qp' : ['kpoint','band','e0','e','eme0','dft','hf','sce0','sce','dsc_dwe0','z_Re','z_Im','width_mev','width_fs'],\n}\nreference_column_names_extendOut.update(rt_column_names)\n\nreference_column_names = {\n 'hf' : ['kpoint','band','e0','ehf','dft','hf'],\n 'qp' : ['kpoint','band','e0','eme0','sce0'],\n}\nreference_column_names.update(rt_column_names)\n\ndef file_to_list(filename,skip='#'):\n \"\"\"\n Read the filename and append all the lines that do not start\n with the skip string, to a list.\n\n Args:\n filename (str): name of the file\n skip (str): first elements of the skipped lines\n \"\"\"\n lines = []\n with open(filename) as f:\n for l in f:\n if not l.startswith(skip): lines.append(l)\n return lines\n\ndef _floats_from_string(line):\n \"\"\"\n Split a string using blank spaces and convert the elements to float. If an element\n cannot be converted it is skipped.\n \"\"\"\n line_float = []\n for value in line.split():\n try: line_float.append(float(value))\n except ValueError: pass\n return line_float\n\ndef build_columns(lines):\n \"\"\"\n Split each line of the output of file_to_list into a list and convert\n its elements to float. The procedure deletes the values that cannot be converted\n to float, for istance the string that specifies the high-symmetry points in the\n ypp bands_interpolated post-processing.\n Then transpose the array so that each element is a column of the data of the file.\n \"\"\"\n splitted = []\n for line in lines:\n splitted.append(_floats_from_string(line))\n\n columns = np.array(splitted).transpose()\n return columns\n\ndef make_dict(columns,suffix,extendOut):\n \"\"\"\n Create a dictionary from the columns array. If the suffix is found in the\n ref dictionary attribute to the keys the associated names, otherwise\n associate string value 'col'+str(ind), where ind is the column index starting\n from zero. The choice of the ref dictionary depends on the value of extendOut.\n\n Args:\n columns (:py:class:`array`) : array with the data sorted in columns\n suffix (string) : specifies the run level\n extendOut (bool) : specifies which dictionary has to be used as reference\n values of the columns names\n \"\"\"\n if extendOut:\n ref = reference_column_names_extendOut\n else:\n ref = reference_column_names\n data = {}\n for ind,col in enumerate(columns):\n if suffix in ref:\n key = ref[suffix][ind]\n else:\n key = 'col'+str(ind)\n data[key] = col\n return data\n\ndef files_from_folder(path):\n \"\"\"\n Scan the files in the folder and build a list with the names of all the files\n that contain the 'o-' term in their name.\n\n Args:\n path (string) : name of the folder\n \"\"\"\n import os\n listdir= os.listdir(path)\n ofiles = []\n for file in listdir:\n if 'o-' in file:\n ofiles.append(os.path.join(path,file))\n return ofiles\n\nclass YamboOutputParser(dict):\n \"\"\"\n Class that performs the parsing of a Yambo o- file(s). The class ineriths from :py:class:`dict`\n and the instance of the class is a dictionary with the data. The keys correspond to the extension\n of the parsed files\n\n Args:\n files (:py:class:`list`): The list of strings with the names of the file to be parsed\n verbose (:py:class:`boolean`) : Determine the amount of information provided on terminal\n extendOut (:py:class:`boolean`) : Determine which dictionary is used as reference for the\n names of the variables\n\n \"\"\"\n\n def __init__(self,files,verbose=True,extendOut=True):\n \"\"\"\n Initialize the data member of the class.\n \"\"\"\n dict.__init__(self)\n for file in files:\n suffix = file.rsplit('.')[-1]\n if verbose: print('Parse file',file)\n self.parseYamboOutput(file,suffix,extendOut)\n self[suffix] = self.parseYamboOutput(file,suffix,extendOut)\n\n @classmethod\n def from_path(cls,path,verbose = False, extendOut = True):\n \"\"\"\n Init the a :class:`YamboOutputParser` instance using all the 'o-' files found inside the path.\n\n Args:\n path (:py:class:`string`): name of the folder that contains the 'o-' files\n verbose (:py:class:`boolean`) : Determine the amount of information provided on terminal\n extendOut (:py:class:`boolean`) : Determine which dictionary is used as reference for the\n names of the variables\n \"\"\"\n files = files_from_folder(path)\n return cls(files,verbose=verbose,extendOut=extendOut)\n\n def parseYamboOutput(self,file,suffix,extendOut):\n \"\"\"\n Read the data from the o- file. Data of the file are stored as key : values\n in the self[suffix] dictionary. The names of the keys are taken from the\n reference_column_names or from the reference_column_names_extendOut (depending on\n the value of the boolean extendOut), if the suffix is recognized.\n \"\"\"\n lines = file_to_list(file)\n columns = build_columns(lines)\n return make_dict(columns,suffix,extendOut)\n\n def get_info(self):\n \"\"\"\n Provide information on the keys structure of the instance of the class\n \"\"\"\n print('YamboOutputParser variables structure')\n for key,value in self.items():\n print('suffix',key,'with',value.keys())\n"
] | [
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
gregjauvion/stylegan2-ada | [
"f6ef72305212b42f42d179b2a4b6d4629b14a4fd"
] | [
"torch_utils/ops/upfirdn2d.py"
] | [
"# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.\n#\n# NVIDIA CORPORATION and its licensors retain all intellectual property\n# and proprietary rights in and to this software, related documentation\n# and any modifications thereto. Any use, reproduction, disclosure or\n# distribution of this software and related documentation without an express\n# license agreement from NVIDIA CORPORATION is strictly prohibited.\n\n\"\"\"Custom PyTorch ops for efficient resampling of 2D images.\"\"\"\n\nimport os\nimport sys\nimport warnings\nimport numpy as np\nimport torch\n\nfrom .. import custom_ops\nfrom .. import misc\nfrom . import conv2d_gradfix\n\n#----------------------------------------------------------------------------\n\n_inited = False\n_plugin = None\n\ndef _init():\n global _inited, _plugin\n if not _inited:\n sources = ['upfirdn2d.cpp', 'upfirdn2d.cu']\n sources = [os.path.join(os.path.dirname(__file__), s) for s in sources]\n try:\n _plugin = custom_ops.get_plugin('upfirdn2d_plugin', sources=sources, extra_cuda_cflags=['--use_fast_math'])\n except:\n warnings.warn('Failed to build CUDA kernels for upfirdn2d. Falling back to slow reference implementation. Details:\\n\\n' + str(sys.exc_info()[1]))\n return _plugin is not None\n\ndef _parse_scaling(scaling):\n if isinstance(scaling, int):\n scaling = [scaling, scaling]\n assert isinstance(scaling, (list, tuple))\n assert all(isinstance(x, int) for x in scaling)\n sx, sy = scaling\n assert sx >= 1 and sy >= 1\n return sx, sy\n\ndef _parse_padding(padding):\n if isinstance(padding, int):\n padding = [padding, padding]\n assert isinstance(padding, (list, tuple))\n assert all(isinstance(x, int) for x in padding)\n if len(padding) == 2:\n padx, pady = padding\n padding = [padx, padx, pady, pady]\n padx0, padx1, pady0, pady1 = padding\n return padx0, padx1, pady0, pady1\n\ndef _get_filter_size(f):\n if f is None:\n return 1, 1\n assert isinstance(f, torch.Tensor) and f.ndim in [1, 2]\n fw = f.shape[-1]\n fh = f.shape[0]\n with misc.suppress_tracer_warnings():\n fw = int(fw)\n fh = int(fh)\n misc.assert_shape(f, [fh, fw][:f.ndim])\n assert fw >= 1 and fh >= 1\n return fw, fh\n\n#----------------------------------------------------------------------------\n\ndef setup_filter(f, device=torch.device('cpu'), normalize=True, flip_filter=False, gain=1, separable=None):\n r\"\"\"Convenience function to setup 2D FIR filter for `upfirdn2d()`.\n\n Args:\n f: Torch tensor, numpy array, or python list of the shape\n `[filter_height, filter_width]` (non-separable),\n `[filter_taps]` (separable),\n `[]` (impulse), or\n `None` (identity).\n device: Result device (default: cpu).\n normalize: Normalize the filter so that it retains the magnitude\n for constant input signal (DC)? (default: True).\n flip_filter: Flip the filter? (default: False).\n gain: Overall scaling factor for signal magnitude (default: 1).\n separable: Return a separable filter? (default: select automatically).\n\n Returns:\n Float32 tensor of the shape\n `[filter_height, filter_width]` (non-separable) or\n `[filter_taps]` (separable).\n \"\"\"\n # Validate.\n if f is None:\n f = 1\n f = torch.as_tensor(f, dtype=torch.float32)\n assert f.ndim in [0, 1, 2]\n assert f.numel() > 0\n if f.ndim == 0:\n f = f[np.newaxis]\n\n # Separable?\n if separable is None:\n separable = (f.ndim == 1 and f.numel() >= 8)\n if f.ndim == 1 and not separable:\n f = f.ger(f)\n assert f.ndim == (1 if separable else 2)\n\n # Apply normalize, flip, gain, and device.\n if normalize:\n f /= f.sum()\n if flip_filter:\n f = f.flip(list(range(f.ndim)))\n f = f * (gain ** (f.ndim / 2))\n f = f.to(device=device)\n return f\n\n#----------------------------------------------------------------------------\n\ndef upfirdn2d(x, f, up=1, down=1, padding=0, flip_filter=False, gain=1, impl='cuda'):\n r\"\"\"Pad, upsample, filter, and downsample a batch of 2D images.\n\n Performs the following sequence of operations for each channel:\n\n 1. Upsample the image by inserting N-1 zeros after each pixel (`up`).\n\n 2. Pad the image with the specified number of zeros on each side (`padding`).\n Negative padding corresponds to cropping the image.\n\n 3. Convolve the image with the specified 2D FIR filter (`f`), shrinking it\n so that the footprint of all output pixels lies within the input image.\n\n 4. Downsample the image by keeping every Nth pixel (`down`).\n\n This sequence of operations bears close resemblance to scipy.signal.upfirdn().\n The fused op is considerably more efficient than performing the same calculation\n using standard PyTorch ops. It supports gradients of arbitrary order.\n\n Args:\n x: Float32/float64/float16 input tensor of the shape\n `[batch_size, num_channels, in_height, in_width]`.\n f: Float32 FIR filter of the shape\n `[filter_height, filter_width]` (non-separable),\n `[filter_taps]` (separable), or\n `None` (identity).\n up: Integer upsampling factor. Can be a single int or a list/tuple\n `[x, y]` (default: 1).\n down: Integer downsampling factor. Can be a single int or a list/tuple\n `[x, y]` (default: 1).\n padding: Padding with respect to the upsampled image. Can be a single number\n or a list/tuple `[x, y]` or `[x_before, x_after, y_before, y_after]`\n (default: 0).\n flip_filter: False = convolution, True = correlation (default: False).\n gain: Overall scaling factor for signal magnitude (default: 1).\n impl: Implementation to use. Can be `'ref'` or `'cuda'` (default: `'cuda'`).\n\n Returns:\n Tensor of the shape `[batch_size, num_channels, out_height, out_width]`.\n \"\"\"\n assert isinstance(x, torch.Tensor)\n assert impl in ['ref', 'cuda']\n if impl == 'cuda' and x.device.type == 'cuda' and _init():\n return _upfirdn2d_cuda(up=up, down=down, padding=padding, flip_filter=flip_filter, gain=gain).apply(x, f)\n return _upfirdn2d_ref(x, f, up=up, down=down, padding=padding, flip_filter=flip_filter, gain=gain)\n\n#----------------------------------------------------------------------------\n\[email protected]_function\ndef _upfirdn2d_ref(x, f, up=1, down=1, padding=0, flip_filter=False, gain=1):\n \"\"\"Slow reference implementation of `upfirdn2d()` using standard PyTorch ops.\n \"\"\"\n # Validate arguments.\n assert isinstance(x, torch.Tensor) and x.ndim == 4\n if f is None:\n f = torch.ones([1, 1], dtype=torch.float32, device=x.device)\n assert isinstance(f, torch.Tensor) and f.ndim in [1, 2]\n assert f.dtype == torch.float32 and not f.requires_grad\n batch_size, num_channels, in_height, in_width = x.shape\n upx, upy = _parse_scaling(up)\n downx, downy = _parse_scaling(down)\n padx0, padx1, pady0, pady1 = _parse_padding(padding)\n\n # Upsample by inserting zeros.\n x = x.reshape([batch_size, num_channels, in_height, 1, in_width, 1])\n x = torch.nn.functional.pad(x, [0, upx - 1, 0, 0, 0, upy - 1])\n x = x.reshape([batch_size, num_channels, in_height * upy, in_width * upx])\n\n # Pad or crop.\n x = torch.nn.functional.pad(x, [max(padx0, 0), max(padx1, 0), max(pady0, 0), max(pady1, 0)])\n x = x[:, :, max(-pady0, 0) : x.shape[2] - max(-pady1, 0), max(-padx0, 0) : x.shape[3] - max(-padx1, 0)]\n\n # Setup filter.\n f = f * (gain ** (f.ndim / 2))\n f = f.to(x.dtype)\n if not flip_filter:\n f = f.flip(list(range(f.ndim)))\n\n # Convolve with the filter.\n f = f[np.newaxis, np.newaxis].repeat([num_channels, 1] + [1] * f.ndim)\n if f.ndim == 4:\n x = conv2d_gradfix.conv2d(input=x, weight=f, groups=num_channels)\n else:\n x = conv2d_gradfix.conv2d(input=x, weight=f.unsqueeze(2), groups=num_channels)\n x = conv2d_gradfix.conv2d(input=x, weight=f.unsqueeze(3), groups=num_channels)\n\n # Downsample by throwing away pixels.\n x = x[:, :, ::downy, ::downx]\n return x\n\n#----------------------------------------------------------------------------\n\n_upfirdn2d_cuda_cache = dict()\n\ndef _upfirdn2d_cuda(up=1, down=1, padding=0, flip_filter=False, gain=1):\n \"\"\"Fast CUDA implementation of `upfirdn2d()` using custom ops.\n \"\"\"\n # Parse arguments.\n upx, upy = _parse_scaling(up)\n downx, downy = _parse_scaling(down)\n padx0, padx1, pady0, pady1 = _parse_padding(padding)\n\n # Lookup from cache.\n key = (upx, upy, downx, downy, padx0, padx1, pady0, pady1, flip_filter, gain)\n if key in _upfirdn2d_cuda_cache:\n return _upfirdn2d_cuda_cache[key]\n\n # Forward op.\n class Upfirdn2dCuda(torch.autograd.Function):\n @staticmethod\n def forward(ctx, x, f): # pylint: disable=arguments-differ\n assert isinstance(x, torch.Tensor) and x.ndim == 4\n if f is None:\n f = torch.ones([1, 1], dtype=torch.float32, device=x.device)\n assert isinstance(f, torch.Tensor) and f.ndim in [1, 2]\n y = x\n if f.ndim == 2:\n y = _plugin.upfirdn2d(y, f, upx, upy, downx, downy, padx0, padx1, pady0, pady1, flip_filter, gain)\n else:\n y = _plugin.upfirdn2d(y, f.unsqueeze(0), upx, 1, downx, 1, padx0, padx1, 0, 0, flip_filter, np.sqrt(gain))\n y = _plugin.upfirdn2d(y, f.unsqueeze(1), 1, upy, 1, downy, 0, 0, pady0, pady1, flip_filter, np.sqrt(gain))\n ctx.save_for_backward(f)\n ctx.x_shape = x.shape\n return y\n\n @staticmethod\n def backward(ctx, dy): # pylint: disable=arguments-differ\n f, = ctx.saved_tensors\n _, _, ih, iw = ctx.x_shape\n _, _, oh, ow = dy.shape\n fw, fh = _get_filter_size(f)\n p = [\n fw - padx0 - 1,\n iw * upx - ow * downx + padx0 - upx + 1,\n fh - pady0 - 1,\n ih * upy - oh * downy + pady0 - upy + 1,\n ]\n dx = None\n df = None\n\n if ctx.needs_input_grad[0]:\n dx = _upfirdn2d_cuda(up=down, down=up, padding=p, flip_filter=(not flip_filter), gain=gain).apply(dy, f)\n\n assert not ctx.needs_input_grad[1]\n return dx, df\n\n # Add to cache.\n _upfirdn2d_cuda_cache[key] = Upfirdn2dCuda\n return Upfirdn2dCuda\n\n#----------------------------------------------------------------------------\n\ndef filter2d(x, f, padding=0, flip_filter=False, gain=1, impl='cuda'):\n r\"\"\"Filter a batch of 2D images using the given 2D FIR filter.\n\n By default, the result is padded so that its shape matches the input.\n User-specified padding is applied on top of that, with negative values\n indicating cropping. Pixels outside the image are assumed to be zero.\n\n Args:\n x: Float32/float64/float16 input tensor of the shape\n `[batch_size, num_channels, in_height, in_width]`.\n f: Float32 FIR filter of the shape\n `[filter_height, filter_width]` (non-separable),\n `[filter_taps]` (separable), or\n `None` (identity).\n padding: Padding with respect to the output. Can be a single number or a\n list/tuple `[x, y]` or `[x_before, x_after, y_before, y_after]`\n (default: 0).\n flip_filter: False = convolution, True = correlation (default: False).\n gain: Overall scaling factor for signal magnitude (default: 1).\n impl: Implementation to use. Can be `'ref'` or `'cuda'` (default: `'cuda'`).\n\n Returns:\n Tensor of the shape `[batch_size, num_channels, out_height, out_width]`.\n \"\"\"\n padx0, padx1, pady0, pady1 = _parse_padding(padding)\n fw, fh = _get_filter_size(f)\n p = [\n padx0 + fw // 2,\n padx1 + (fw - 1) // 2,\n pady0 + fh // 2,\n pady1 + (fh - 1) // 2,\n ]\n return upfirdn2d(x, f, padding=p, flip_filter=flip_filter, gain=gain, impl=impl)\n\n#----------------------------------------------------------------------------\n\ndef upsample2d(x, f, up=2, padding=0, flip_filter=False, gain=1, impl='cuda'):\n r\"\"\"Upsample a batch of 2D images using the given 2D FIR filter.\n\n By default, the result is padded so that its shape is a multiple of the input.\n User-specified padding is applied on top of that, with negative values\n indicating cropping. Pixels outside the image are assumed to be zero.\n\n Args:\n x: Float32/float64/float16 input tensor of the shape\n `[batch_size, num_channels, in_height, in_width]`.\n f: Float32 FIR filter of the shape\n `[filter_height, filter_width]` (non-separable),\n `[filter_taps]` (separable), or\n `None` (identity).\n up: Integer upsampling factor. Can be a single int or a list/tuple\n `[x, y]` (default: 1).\n padding: Padding with respect to the output. Can be a single number or a\n list/tuple `[x, y]` or `[x_before, x_after, y_before, y_after]`\n (default: 0).\n flip_filter: False = convolution, True = correlation (default: False).\n gain: Overall scaling factor for signal magnitude (default: 1).\n impl: Implementation to use. Can be `'ref'` or `'cuda'` (default: `'cuda'`).\n\n Returns:\n Tensor of the shape `[batch_size, num_channels, out_height, out_width]`.\n \"\"\"\n upx, upy = _parse_scaling(up)\n padx0, padx1, pady0, pady1 = _parse_padding(padding)\n fw, fh = _get_filter_size(f)\n p = [\n padx0 + (fw + upx - 1) // 2,\n padx1 + (fw - upx) // 2,\n pady0 + (fh + upy - 1) // 2,\n pady1 + (fh - upy) // 2,\n ]\n return upfirdn2d(x, f, up=up, padding=p, flip_filter=flip_filter, gain=gain*upx*upy, impl=impl)\n\n#----------------------------------------------------------------------------\n\ndef downsample2d(x, f, down=2, padding=0, flip_filter=False, gain=1, impl='cuda'):\n r\"\"\"Downsample a batch of 2D images using the given 2D FIR filter.\n\n By default, the result is padded so that its shape is a fraction of the input.\n User-specified padding is applied on top of that, with negative values\n indicating cropping. Pixels outside the image are assumed to be zero.\n\n Args:\n x: Float32/float64/float16 input tensor of the shape\n `[batch_size, num_channels, in_height, in_width]`.\n f: Float32 FIR filter of the shape\n `[filter_height, filter_width]` (non-separable),\n `[filter_taps]` (separable), or\n `None` (identity).\n down: Integer downsampling factor. Can be a single int or a list/tuple\n `[x, y]` (default: 1).\n padding: Padding with respect to the input. Can be a single number or a\n list/tuple `[x, y]` or `[x_before, x_after, y_before, y_after]`\n (default: 0).\n flip_filter: False = convolution, True = correlation (default: False).\n gain: Overall scaling factor for signal magnitude (default: 1).\n impl: Implementation to use. Can be `'ref'` or `'cuda'` (default: `'cuda'`).\n\n Returns:\n Tensor of the shape `[batch_size, num_channels, out_height, out_width]`.\n \"\"\"\n downx, downy = _parse_scaling(down)\n padx0, padx1, pady0, pady1 = _parse_padding(padding)\n fw, fh = _get_filter_size(f)\n p = [\n padx0 + (fw - downx + 1) // 2,\n padx1 + (fw - downx) // 2,\n pady0 + (fh - downy + 1) // 2,\n pady1 + (fh - downy) // 2,\n ]\n return upfirdn2d(x, f, down=down, padding=p, flip_filter=flip_filter, gain=gain, impl=impl)\n\n#----------------------------------------------------------------------------\n"
] | [
[
"torch.ones",
"numpy.sqrt",
"torch.device",
"torch.nn.functional.pad",
"torch.as_tensor"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
JustinSGray/OpenMDAO-CADRE | [
"d8378a8a571179990531d8a409efe727cbdf2bb7",
"d8378a8a571179990531d8a409efe727cbdf2bb7"
] | [
"src/CADRE/attitude.py",
"src/CADRE/rk4.py"
] | [
"''' Attitude discipline for CADRE '''\n\nimport numpy as np\n\nfrom openmdao.lib.datatypes.api import Float, Array\nfrom openmdao.main.api import Component\n\nfrom CADRE.kinematics import computepositionrotd, computepositionrotdjacobian\n\n# Allow non-standard variable names for scientific calc\n# pylint: disable-msg=C0103\n\n\nclass Attitude_Angular(Component):\n\n \"\"\" Calculates angular velocity vector from the satellite's orientation\n matrix and its derivative.\n \"\"\"\n\n def __init__(self, n=2):\n super(Attitude_Angular, self).__init__()\n\n self.n = n\n\n # Inputs\n self.add('O_BI', Array(np.zeros((3, 3, n)),\n iotype='in',\n shape=(3, 3, n),\n units=\"unitless\",\n desc=\"Rotation matrix from body-fixed frame to Earth-centered inertial frame over time\"))\n\n self.add('Odot_BI', Array(np.zeros((3, 3, n)),\n iotype='in',\n shape=(3, 3, n),\n units=\"unitless\",\n desc=\"First derivative of O_BI over time\"))\n\n # Outputs\n self.add('w_B', Array(np.zeros((3, n)),\n iotype='out',\n shape=(3, n),\n units=\"1/s\",\n desc=\"Angular velocity vector in body-fixed frame over time\"))\n\n self.dw_dOdot = np.zeros((n, 3, 3, 3))\n self.dw_dO = np.zeros((n, 3, 3, 3))\n\n\n def provideJ(self):\n \"\"\" Calculate and save derivatives. (i.e., Jacobian) \"\"\"\n\n for i in range(0, self.n):\n self.dw_dOdot[i, 0, 2, :] = self.O_BI[1,:, i]\n self.dw_dO[i, 0, 1, :] = self.Odot_BI[2,:, i]\n\n self.dw_dOdot[i, 1, 0, :] = self.O_BI[2,:, i]\n self.dw_dO[i, 1, 2, :] = self.Odot_BI[0,:, i]\n\n self.dw_dOdot[i, 2, 1, :] = self.O_BI[0,:, i]\n self.dw_dO[i, 2, 0, :] = self.Odot_BI[1,:, i]\n\n def execute(self):\n \"\"\" Calculate output. \"\"\"\n\n for i in range(0, self.n):\n self.w_B[0, i] = np.dot(self.Odot_BI[2, :, i], self.O_BI[1,:, i])\n self.w_B[1, i] = np.dot(self.Odot_BI[0, :, i], self.O_BI[2,:, i])\n self.w_B[2, i] = np.dot(self.Odot_BI[1, :, i], self.O_BI[0,:, i])\n\n def list_deriv_vars(self):\n input_keys = ('O_BI', 'Odot_BI',)\n output_keys = ('w_B',)\n return input_keys, output_keys\n\n def apply_deriv(self, arg, result):\n \"\"\" Matrix-vector product with the Jacobian. \"\"\"\n\n if 'w_B' in result:\n for k in xrange(3):\n for i in xrange(3):\n for j in xrange(3):\n if 'O_BI' in arg:\n result['w_B'][k, :] += self.dw_dO[:, k, i, j] * \\\n arg['O_BI'][i, j, :]\n if 'Odot_BI' in arg:\n result['w_B'][k, :] += self.dw_dOdot[:, k, i, j] * \\\n arg['Odot_BI'][i, j, :]\n\n def apply_derivT(self, arg, result):\n \"\"\" Matrix-vector product with the transpose of the Jacobian. \"\"\"\n\n if 'w_B' in arg:\n for k in xrange(3):\n for i in xrange(3):\n for j in xrange(3):\n if 'O_BI' in result:\n result['O_BI'][i, j, :] += self.dw_dO[:, k, i, j] * \\\n arg['w_B'][k, :]\n if 'Odot_BI' in result:\n result['Odot_BI'][i, j, :] += self.dw_dOdot[:, k, i, j] * \\\n arg['w_B'][k, :]\n\n\nclass Attitude_AngularRates(Component):\n\n \"\"\" Calculates time derivative of angular velocity vector.\n \"\"\"\n\n def __init__(self, n=2):\n super(Attitude_AngularRates, self).__init__()\n\n self.n = n\n\n # Inputs\n self.add('w_B', Array(np.zeros((3, n)),\n iotype='in',\n shape=(3, n),\n units=\"1/s\",\n desc=\"Angular velocity vector in body-fixed frame over time\"\n )\n )\n\n self.add('h', Float(28.8,\n iotype='in',\n units=\"s\",\n desc=\"Time step for RK4 integration\"\n )\n )\n\n # Outputs\n self.add(\n 'wdot_B',\n Array(\n np.zeros((3, n)),\n iotype='out',\n shape=(3, n),\n units=\"1/s**2\",\n desc=\"Time derivative of w_B over time\"\n )\n )\n\n def provideJ(self):\n \"\"\" Calculate and save derivatives. (i.e., Jacobian) \"\"\"\n\n # Calculation is fairly simple, so not cached.\n return\n\n def list_deriv_vars(self):\n input_keys = ('w_B', 'h',)\n output_keys = ('wdot_B',)\n return input_keys, output_keys\n\n def execute(self):\n \"\"\" Calculate output. \"\"\"\n\n for k in xrange(3):\n self.wdot_B[k, 0] = self.w_B[k, 1] / self.h\n self.wdot_B[k, 0] -= self.w_B[k, 0] / self.h\n self.wdot_B[k, 1:-1] = self.w_B[k, 2:] / 2.0 / self.h\n self.wdot_B[k, 1:-1] -= self.w_B[k, :-2] / 2.0 / self.h\n self.wdot_B[k, -1] = self.w_B[k, -1] / self.h\n self.wdot_B[k, -1] -= self.w_B[k, -2] / self.h\n\n def apply_deriv(self, arg, result):\n \"\"\" Matrix-vector product with the Jacobian. \"\"\"\n\n if 'w_B' in arg and 'wdot_B' in result:\n for k in xrange(3):\n result['wdot_B'][k, 0] += arg['w_B'][k, 1] / self.h\n result['wdot_B'][k, 0] -= arg['w_B'][k, 0] / self.h\n result['wdot_B'][k, 1:-1] += arg['w_B'][k, 2:] / 2.0 / self.h\n result['wdot_B'][k, 1:-1] -= arg['w_B'][k, :-2] / 2.0 / self.h\n result['wdot_B'][k, -1] += arg['w_B'][k, -1] / self.h\n result['wdot_B'][k, -1] -= arg['w_B'][k, -2] / self.h\n\n def apply_derivT(self, arg, result):\n \"\"\" Matrix-vector product with the transpose of the Jacobian. \"\"\"\n\n if 'wdot_B' in arg and 'w_B' in result:\n for k in xrange(3):\n result['w_B'][k, 1] += arg['wdot_B'][k, 0] / self.h\n result['w_B'][k, 0] -= arg['wdot_B'][k, 0] / self.h\n result['w_B'][k, 2:] += arg['wdot_B'][k, 1:-1] / 2.0 / self.h\n result['w_B'][k, :-2] -= arg['wdot_B'][k, 1:-1] / 2.0 / self.h\n result['w_B'][k, -1] += arg['wdot_B'][k, -1] / self.h\n result['w_B'][k, -2] -= arg['wdot_B'][k, -1] / self.h\n\n\nclass Attitude_Attitude(Component):\n\n \"\"\" Coordinate transformation from the interial plane to the rolled\n (forward facing) plane.\n \"\"\"\n dvx_dv = np.zeros((3, 3, 3))\n dvx_dv[0, :, 0] = (0., 0., 0.)\n dvx_dv[1, :, 0] = (0., 0., -1.)\n dvx_dv[2, :, 0] = (0., 1., 0.)\n\n dvx_dv[0, :, 1] = (0., 0., 1.)\n dvx_dv[1, :, 1] = (0., 0., 0.)\n dvx_dv[2, :, 1] = (-1., 0., 0.)\n\n dvx_dv[0, :, 2] = (0., -1., 0.)\n dvx_dv[1, :, 2] = (1., 0., 0.)\n dvx_dv[2, :, 2] = (0., 0., 0.)\n\n def __init__(self, n=2):\n super(Attitude_Attitude, self).__init__()\n\n self.n = n\n\n # Inputs\n self.add('r_e2b_I', Array(np.zeros((6, n)),\n iotype='in',\n shape=(6, n),\n units=\"unitless\",\n desc=\"Position and velocity vector from earth to satellite in Earth-centered inertial frame over time\"))\n\n # Outputs\n self.add('O_RI', Array(np.zeros((3, 3, n)),\n iotype='out',\n shape=(3, 3, n),\n units=\"unitless\",\n desc=\"Rotation matrix from rolled body-fixed frame to Earth-centerd inertial frame over time\"))\n\n self.dO_dr = np.zeros((n, 3, 3, 6))\n\n def list_deriv_vars(self):\n input_keys = ('r_e2b_I',)\n output_keys = ('O_RI',)\n return input_keys, output_keys\n\n def provideJ(self):\n \"\"\" Calculate and save derivatives. (i.e., Jacobian) \"\"\"\n\n diB_dv = np.zeros((3, 3))\n djB_dv = np.zeros((3, 3))\n\n for i in range(0, self.n):\n\n r = self.r_e2b_I[0:3, i]\n v = self.r_e2b_I[3:, i]\n\n normr = np.sqrt(np.dot(r, r))\n normv = np.sqrt(np.dot(v, v))\n\n # Prevent overflow\n if normr < 1e-10:\n normr = 1e-10\n if normv < 1e-10:\n normv = 1e-10\n\n r = r / normr\n v = v / normv\n\n dr_dr = np.zeros((3, 3))\n dv_dv = np.zeros((3, 3))\n\n for k in range(0, 3):\n dr_dr[k, k] += 1.0 / normr\n dv_dv[k, k] += 1.0 / normv\n dr_dr[:, k] -= self.r_e2b_I[\n 0:3, i] * self.r_e2b_I[k, i] / normr ** 3\n dv_dv[:, k] -= self.r_e2b_I[\n 3:, i] * self.r_e2b_I[3 + k, i] / normv ** 3\n\n vx = np.zeros((3, 3))\n vx[0, :] = (0., -v[2], v[1])\n vx[1, :] = (v[2], 0., -v[0])\n vx[2, :] = (-v[1], v[0], 0.)\n\n iB = np.dot(vx, r)\n\n diB_dr = vx\n diB_dv[:, 0] = np.dot(self.dvx_dv[:, :, 0], r)\n diB_dv[:, 1] = np.dot(self.dvx_dv[:, :, 1], r)\n diB_dv[:, 2] = np.dot(self.dvx_dv[:, :, 2], r)\n\n djB_diB = -vx\n djB_dv[:, 0] = -np.dot(self.dvx_dv[:, :, 0], iB)\n djB_dv[:, 1] = -np.dot(self.dvx_dv[:, :, 1], iB)\n djB_dv[:, 2] = -np.dot(self.dvx_dv[:, :, 2], iB)\n\n self.dO_dr[i, 0, :, 0:3] = np.dot(diB_dr, dr_dr)\n self.dO_dr[i, 0, :, 3:] = np.dot(diB_dv, dv_dv)\n\n self.dO_dr[i, 1, :, 0:3] = np.dot(np.dot(djB_diB, diB_dr), dr_dr)\n self.dO_dr[i, 1, :, 3:] = np.dot(np.dot(djB_diB, diB_dv) + djB_dv,\n dv_dv)\n\n self.dO_dr[i, 2, :, 3:] = -dv_dv\n\n def execute(self):\n \"\"\" Calculate output. \"\"\"\n\n self.O_RI = np.zeros(self.O_RI.shape)\n for i in range(0, self.n):\n\n r = self.r_e2b_I[0:3, i]\n v = self.r_e2b_I[3:, i]\n\n normr = np.sqrt(np.dot(r, r))\n normv = np.sqrt(np.dot(v, v))\n\n # Prevent overflow\n if normr < 1e-10:\n normr = 1e-10\n if normv < 1e-10:\n normv = 1e-10\n\n r = r / normr\n v = v / normv\n\n vx = np.zeros((3, 3))\n vx[0, :] = (0., -v[2], v[1])\n vx[1, :] = (v[2], 0., -v[0])\n vx[2, :] = (-v[1], v[0], 0.)\n\n iB = np.dot(vx, r)\n jB = -np.dot(vx, iB)\n\n self.O_RI[0, :, i] = iB\n self.O_RI[1, :, i] = jB\n self.O_RI[2, :, i] = -v\n\n def apply_deriv(self, arg, result):\n \"\"\" Matrix-vector product with the Jacobian. \"\"\"\n\n if 'r_e2b_I' in arg and 'O_RI' in result:\n for k in xrange(3):\n for j in xrange(3):\n for i in xrange(6):\n result['O_RI'][k, j, :] += self.dO_dr[:, k, j, i] * \\\n arg['r_e2b_I'][i, :]\n\n def apply_derivT(self, arg, result):\n \"\"\" Matrix-vector product with the transpose of the Jacobian. \"\"\"\n\n if 'O_RI' in arg and 'r_e2b_I' in result:\n for k in xrange(3):\n for j in xrange(3):\n for i in xrange(6):\n result['r_e2b_I'][i, :] += self.dO_dr[:, k, j, i] * \\\n arg['O_RI'][k, j, :]\n\n\nclass Attitude_Roll(Component):\n\n \"\"\" Calculates the body-fixed orientation matrix.\n \"\"\"\n\n def __init__(self, n=2):\n super(Attitude_Roll, self).__init__()\n\n self.n = n\n\n # Inputs\n self.add('Gamma', Array(np.zeros(n),\n iotype='in',\n shape=(n,),\n units=\"rad\",\n desc=\"Satellite roll angle over time\"))\n\n # Outputs\n self.add('O_BR', Array(np.zeros((3, 3, n)),\n iotype='out',\n shape=(3, 3, n),\n units=\"unitless\",\n desc=\"Rotation matrix from body-fixed frame to rolled body-fixed frame over time\"))\n\n self.dO_dg = np.zeros((n, 3, 3))\n\n\n def list_deriv_vars(self):\n input_keys = ('Gamma',)\n output_keys = ('O_BR',)\n return input_keys, output_keys\n\n def provideJ(self):\n \"\"\" Calculate and save derivatives. (i.e., Jacobian) \"\"\"\n\n self.dO_dg = np.zeros((self.n, 3, 3))\n self.dO_dg[:, 0, 0] = -np.sin(self.Gamma)\n self.dO_dg[:, 0, 1] = np.cos(self.Gamma)\n self.dO_dg[:, 1, 0] = -self.dO_dg[:, 0, 1]\n self.dO_dg[:, 1, 1] = self.dO_dg[:, 0, 0]\n\n def execute(self):\n \"\"\" Calculate output. \"\"\"\n\n self.O_BR = np.zeros((3, 3, self.n))\n self.O_BR[0, 0, :] = np.cos(self.Gamma)\n self.O_BR[0, 1, :] = np.sin(self.Gamma)\n self.O_BR[1, 0, :] = -self.O_BR[0, 1,:]\n self.O_BR[1, 1, :] = self.O_BR[0, 0,:]\n self.O_BR[2, 2, :] = np.ones(self.n)\n\n def apply_deriv(self, arg, result):\n \"\"\" Matrix-vector product with the Jacobian. \"\"\"\n\n if 'Gamma' in arg and 'O_BR' in result:\n for k in xrange(3):\n for j in xrange(3):\n result['O_BR'][k, j, :] += self.dO_dg[:, k, j] * \\\n arg['Gamma']\n\n def apply_derivT(self, arg, result):\n \"\"\" Matrix-vector product with the transpose of the Jacobian. \"\"\"\n\n if 'O_BR' in arg and 'Gamma' in result:\n for k in xrange(3):\n for j in xrange(3):\n result['Gamma'] += self.dO_dg[:, k, j] * \\\n arg['O_BR'][k, j, :]\n\n\nclass Attitude_RotationMtx(Component):\n\n \"\"\" Multiplies transformations to produce the orientation matrix of the\n body frame with respect to inertial.\n \"\"\"\n\n def __init__(self, n=2):\n super(Attitude_RotationMtx, self).__init__()\n\n self.n = n\n\n # Inputs\n self.add('O_BR', Array(np.zeros((3, 3, n)),\n iotype='in',\n shape=(3, 3, n),\n units=\"unitless\",\n desc=\"Rotation matrix from body-fixed frame to rolled body-fixed frame over time\"\n ))\n\n self.add('O_RI', Array(np.zeros((3, 3, n)),\n iotype='in',\n shape=(3, 3, n),\n units=\"unitless\",\n desc=\"Rotation matrix from rolled body-fixed frame to Earth-centered inertial frame over time\"\n ))\n\n # Outputs\n self.add('O_BI', Array(np.zeros((3, 3, n)),\n iotype='out',\n shape=(3, 3, n),\n units=\"unitless\",\n desc=\"Rotation matrix from body-fixed frame to Earth-centered inertial frame over time\"\n ))\n\n def list_deriv_vars(self):\n input_keys = ('O_BR', 'O_RI',)\n output_keys = ('O_BI',)\n return input_keys, output_keys\n\n def provideJ(self):\n \"\"\" Calculate and save derivatives. (i.e., Jacobian) \"\"\"\n\n # Calculation is fairly simple, so not cached.\n return\n\n def execute(self):\n \"\"\" Calculate output. \"\"\"\n\n for i in range(0, self.n):\n self.O_BI[:, :, i] = np.dot(self.O_BR[:,:, i], self.O_RI[:,:, i])\n\n def apply_deriv(self, arg, result):\n \"\"\" Matrix-vector product with the Jacobian. \"\"\"\n\n if 'O_BI' in result:\n for u in xrange(3):\n for v in xrange(3):\n for k in xrange(3):\n if 'O_RI' in arg:\n result['O_BI'][u, v, :] += self.O_BR[u, k,:] * \\\n arg['O_RI'][k, v, :]\n if 'O_BR' in arg:\n result['O_BI'][u, v, :] += arg['O_BR'][u, k,:] * \\\n self.O_RI[k, v, :]\n\n def apply_derivT(self, arg, result):\n \"\"\" Matrix-vector product with the transpose of the Jacobian. \"\"\"\n\n if 'O_BI' in arg:\n for u in xrange(3):\n for v in xrange(3):\n for k in xrange(3):\n if 'O_RI' in result:\n result['O_RI'][k, v, :] += self.O_BR[u, k,:] * \\\n arg['O_BI'][u, v, :]\n if 'O_BR' in result:\n result['O_BR'][u, k, :] += arg['O_BI'][u, v,:] * \\\n self.O_RI[k, v, :]\n\n\nclass Attitude_RotationMtxRates(Component):\n\n \"\"\" Calculates time derivative of body frame orientation matrix.\n \"\"\"\n\n def __init__(self, n=2):\n super(Attitude_RotationMtxRates, self).__init__()\n\n self.n = n\n\n # Inputs\n self.add('h', Float(28.8,\n iotype='in',\n units=\"s\",\n desc=\"Time step for RK4 integration\"))\n\n self.add('O_BI', Array(np.zeros((3, 3, n)),\n iotype='in',\n shape=(3, 3, n),\n units=\"unitless\",\n desc=\"Rotation matrix from body-fixed frame to Earth-centered inertial frame over time\"))\n\n # Outputs\n self.add('Odot_BI', Array(np.zeros((3, 3, n)),\n iotype='out',\n shape=(3, 3, n),\n units=\"unitless\",\n desc=\"First derivative of O_BI over time\"))\n\n\n def list_deriv_vars(self):\n input_keys = ('h', 'O_BI',)\n output_keys = ('Odot_BI',)\n return input_keys, output_keys\n\n def provideJ(self):\n \"\"\" Calculate and save derivatives. (i.e., Jacobian) \"\"\"\n\n # Calculation is fairly simple, so not cached.\n return\n\n def execute(self):\n \"\"\" Calculate output. \"\"\"\n\n for k in range(3):\n for j in range(3):\n self.Odot_BI[k, j, 0] = self.O_BI[k, j, 1] / self.h\n self.Odot_BI[k, j, 0] -= self.O_BI[k, j, 0] / self.h\n self.Odot_BI[k, j, 1:-1] = self.O_BI[k, j, 2:] / 2.0 / self.h\n self.Odot_BI[k, j, 1:-1] -= self.O_BI[k, j, :-2] / 2.0 / self.h\n self.Odot_BI[k, j, -1] = self.O_BI[k, j, -1] / self.h\n self.Odot_BI[k, j, -1] -= self.O_BI[k, j, -2] / self.h\n\n def apply_deriv(self, arg, result):\n \"\"\" Matrix-vector product with the Jacobian. \"\"\"\n\n if 'O_BI' in arg and 'Odot_BI' in result:\n for k in xrange(3):\n for j in xrange(3):\n result['Odot_BI'][k, j, 0] += arg['O_BI'][k, j, 1] / self.h\n result['Odot_BI'][k, j, 0] -= arg['O_BI'][k, j, 0] / self.h\n result['Odot_BI'][k, j, 1:-1] += arg['O_BI'][k, j, 2:] / \\\n 2.0 / self.h\n result['Odot_BI'][k, j, 1:-1] -= arg['O_BI'][k, j, :-2] / \\\n 2.0 / self.h\n result['Odot_BI'][k, j, -1] += arg['O_BI'][k, j, -1] / \\\n self.h\n result['Odot_BI'][k, j, -1] -= arg['O_BI'][k, j, -2] / \\\n self.h\n\n def apply_derivT(self, arg, result):\n \"\"\" Matrix-vector product with the transpose of the Jacobian. \"\"\"\n\n if 'Odot_BI' in arg and 'O_BI' in result:\n for k in xrange(3):\n for j in xrange(3):\n result['O_BI'][k, j, 1] += arg['Odot_BI'][k, j, 0] / self.h\n result['O_BI'][k, j, 0] -= arg['Odot_BI'][k, j, 0] / self.h\n result['O_BI'][k, j, 2:] += arg['Odot_BI'][k, j, 1:-1] / \\\n 2.0 / self.h\n result['O_BI'][k, j, :-2] -= arg['Odot_BI'][k, j, 1:-1] / \\\n 2.0 / self.h\n result['O_BI'][k, j, -1] += arg['Odot_BI'][k, j, -1] / \\\n self.h\n result['O_BI'][k, j, -2] -= arg['Odot_BI'][k, j, -1] / \\\n self.h\n\n\nclass Attitude_Sideslip(Component):\n\n \"\"\" Determine velocity in the body frame.\"\"\"\n\n def __init__(self, n=2):\n super(Attitude_Sideslip, self).__init__()\n self.n = n\n\n # Inputs\n self.add('r_e2b_I', Array(np.zeros((6, n)),\n iotype='in',\n shape=(6, n),\n units=\"unitless\",\n desc=\"Position and velocity vector from earth to satellite in Earth-centered inertial frame over time\"\n ))\n\n self.add('O_BI', Array(np.zeros((3, 3, n)),\n iotype='in',\n shape=(3, 3, n),\n units=\"unitless\",\n desc=\"Rotation matrix from body-fixed frame to Earth-centered inertial frame over time\"))\n\n # Outputs\n self.add('v_e2b_B', Array(np.zeros((3, n)),\n iotype='out',\n shape=(3, n),\n units=\"m/s\",\n desc=\"Velocity vector from earth to satellite in body-fixed frame over time\"))\n\n\n def list_deriv_vars(self):\n input_keys = ('r_e2b_I', 'O_BI',)\n output_keys = ('v_e2b_B',)\n return input_keys, output_keys\n\n def provideJ(self):\n \"\"\" Calculate and save derivatives. (i.e., Jacobian) \"\"\"\n\n self.J1, self.J2 = computepositionrotdjacobian(self.n,\n self.r_e2b_I[3:, :],\n self.O_BI)\n\n def execute(self):\n \"\"\" Calculate output. \"\"\"\n\n self.v_e2b_B = computepositionrotd(self.n, self.r_e2b_I[3:, :],\n self.O_BI)\n\n def apply_deriv(self, arg, result):\n \"\"\" Matrix-vector product with the Jacobian. \"\"\"\n\n if 'v_e2b_B' in result:\n for k in xrange(3):\n if 'O_BI' in arg:\n for u in xrange(3):\n for v in xrange(3):\n result['v_e2b_B'][k, :] += self.J1[:, k, u, v] * \\\n arg['O_BI'][u, v, :]\n if 'r_e2b_I' in arg:\n for j in xrange(3):\n result['v_e2b_B'][k, :] += self.J2[:, k, j] * \\\n arg['r_e2b_I'][3+j, :]\n\n def apply_derivT(self, arg, result):\n \"\"\" Matrix-vector product with the transpose of the Jacobian. \"\"\"\n\n if 'v_e2b_B' in arg:\n for k in xrange(3):\n if 'O_BI' in result:\n for u in xrange(3):\n for v in xrange(3):\n result['O_BI'][u, v, :] += self.J1[:, k, u, v] * \\\n arg['v_e2b_B'][k, :]\n if 'r_e2b_I' in result:\n for j in xrange(3):\n result['r_e2b_I'][3+j, :] += self.J2[:, k, j] * \\\n arg['v_e2b_B'][k, :]\n\n\nclass Attitude_Torque(Component):\n\n \"\"\" Compute the required reaction wheel tourque.\"\"\"\n\n J = np.zeros((3, 3))\n J[0, :] = (0.018, 0., 0.)\n J[1, :] = (0., 0.018, 0.)\n J[2, :] = (0., 0., 0.006)\n\n def __init__(self, n=2):\n super(Attitude_Torque, self).__init__()\n\n self.n = n\n\n # Inputs\n self.add('w_B', Array(np.zeros((3, n)),\n iotype='in',\n shape=(3, n),\n units=\"1/s\",\n desc=\"Angular velocity in body-fixed frame over time\"))\n\n self.add('wdot_B', Array(np.zeros((3, n)),\n iotype='in',\n shape=(3, n),\n units=\"1/s**2\",\n desc=\"Time derivative of w_B over time\"))\n\n # Outputs\n self.add('T_tot', Array(np.zeros((3, n)),\n iotype='out',\n shape=(3, n),\n units=\"N*m\",\n desc=\"Total reaction wheel torque over time\"))\n\n self.dT_dwdot = np.zeros((n, 3, 3))\n self.dwx_dw = np.zeros((3, 3, 3))\n\n self.dwx_dw[0, :, 0] = (0., 0., 0.)\n self.dwx_dw[1, :, 0] = (0., 0., -1.)\n self.dwx_dw[2, :, 0] = (0., 1., 0.)\n\n self.dwx_dw[0, :, 1] = (0., 0., 1.)\n self.dwx_dw[1, :, 1] = (0., 0., 0.)\n self.dwx_dw[2, :, 1] = (-1., 0, 0.)\n\n self.dwx_dw[0, :, 2] = (0., -1., 0)\n self.dwx_dw[1, :, 2] = (1., 0., 0.)\n self.dwx_dw[2, :, 2] = (0., 0., 0.)\n\n def list_deriv_vars(self):\n input_keys = ('w_B', 'wdot_B',)\n output_keys = ('T_tot',)\n return input_keys, output_keys\n\n def provideJ(self):\n \"\"\" Calculate and save derivatives. (i.e., Jacobian) \"\"\"\n\n self.dT_dw = np.zeros((self.n, 3, 3))\n wx = np.zeros((3, 3))\n\n for i in range(0, self.n):\n wx[0, :] = (0., -self.w_B[2, i], self.w_B[1, i])\n wx[1, :] = (self.w_B[2, i], 0., -self.w_B[0, i])\n wx[2, :] = (-self.w_B[1, i], self.w_B[0, i], 0.)\n\n self.dT_dwdot[i, :,:] = self.J\n self.dT_dw[i, :,:] = np.dot(wx, self.J)\n\n for k in range(0, 3):\n self.dT_dw[i, :, k] += np.dot(self.dwx_dw[:,:, k],\n np.dot(self.J, self.w_B[:, i]))\n\n def execute(self):\n \"\"\" Calculate output. \"\"\"\n\n wx = np.zeros((3, 3))\n for i in range(0, self.n):\n wx[0, :] = (0., -self.w_B[2, i], self.w_B[1, i])\n wx[1, :] = (self.w_B[2, i], 0., -self.w_B[0, i])\n wx[2, :] = (-self.w_B[1, i], self.w_B[0, i], 0.)\n self.T_tot[:, i] = np.dot(self.J, self.wdot_B[:, i]) + \\\n np.dot(wx, np.dot(self.J, self.w_B[:, i]))\n\n def apply_deriv(self, arg, result):\n \"\"\" Matrix-vector product with the Jacobian. \"\"\"\n\n if 'T_tot' in result:\n for k in xrange(3):\n for j in xrange(3):\n if 'w_B' in arg:\n result['T_tot'][k, :] += self.dT_dw[:, k, j] * \\\n arg['w_B'][j, :]\n if 'wdot_B' in arg:\n result['T_tot'][k, :] += self.dT_dwdot[:, k, j] * \\\n arg['wdot_B'][j, :]\n\n def apply_derivT(self, arg, result):\n \"\"\" Matrix-vector product with the transpose of the Jacobian. \"\"\"\n\n if 'T_tot' in arg:\n for k in xrange(3):\n for j in xrange(3):\n if 'w_B' in result:\n result['w_B'][j, :] += self.dT_dw[:, k, j] * \\\n arg['T_tot'][k, :]\n if 'wdot_B' in result:\n result['wdot_B'][j,:] += self.dT_dwdot[:, k, j] * \\\n arg['T_tot'][k,:]\n",
"\"\"\" RK4 time integration component \"\"\"\n\nimport numpy as np\nimport scipy.sparse\nimport scipy.sparse.linalg\n\nfrom openmdao.main.api import Component\nfrom openmdao.lib.datatypes.api import Float, Array, Str\n\n# Allow non-standard variable names for scientific calc\n# pylint: disable-msg=C0103\n\n\nclass RK4(Component):\n \"\"\"Inherit from this component to use.\n\n State variable dimension: (num_states, num_time_points)\n\n External input dimension: (input width, num_time_points)\n \"\"\"\n\n h = Float(.01, units=\"s\", iotype=\"in\",\n desc=\"Time step used for RK4 integration\")\n\n state_var = Str(\"\", iotype=\"in\",\n desc=\"Name of the variable to be used for time \"\n \"integration\")\n\n init_state_var = Str(\"\", iotype=\"in\",\n desc=\"Name of the variable to be used for initial \"\n \"conditions\")\n\n external_vars = Array([], iotype=\"in\", dtype=str,\n desc=\"List of names of variables that are external \"\n \"to the system but DO vary with time.\")\n\n fixed_external_vars = Array([], iotype=\"in\", dtype=str,\n desc=\"List of names of variables that are \"\n \"external to the system but DO NOT \"\n \"vary with time.\")\n\n def initialize(self):\n \"\"\"Set up dimensions and other data structures.\"\"\"\n\n self.y = self.get(self.state_var)\n self.y0 = self.get(self.init_state_var)\n\n self.n_states, self.n = self.y.shape\n self.ny = self.n_states*self.n\n self.nJ = self.n_states*(self.n + self.n_states*(self.n-1))\n\n ext = []\n self.ext_index_map = {}\n for e in self.external_vars:\n var = self.get(e)\n self.ext_index_map[e] = len(ext)\n\n #TODO: Check that shape[-1]==self.n\n ext.extend(var.reshape(-1, self.n))\n\n\n for e in self.fixed_external_vars:\n var = self.get(e)\n self.ext_index_map[e] = len(ext)\n\n flat_var = var.flatten()\n #create n copies of the var\n ext.extend(np.tile(flat_var,(self.n, 1)).T)\n\n self.external = np.array(ext)\n\n #TODO\n #check that len(y0) = self.n_states\n\n self.n_external = len(ext)\n self.reverse_name_map = {\n self.state_var:'y',\n self.init_state_var:'y0'\n }\n e_vars = np.hstack((self.external_vars, self.fixed_external_vars))\n for i, var in enumerate(e_vars):\n self.reverse_name_map[var] = i\n\n self.name_map = dict([(v, k) for k, v in\n self.reverse_name_map.iteritems()])\n\n\n #TODO\n # check that all ext arrays of of shape (self.n, )\n\n #TODO\n #check that length of state var and external\n # vars are the same length\n\n def f_dot(self, external, state):\n \"\"\"Time rate of change of state variables.\n\n external: array or external variables for a single time step\n\n\t state: array of state variables for a single time step.\n\n This must be overridden in derived classes.\n \"\"\"\n raise NotImplementedError\n\n def df_dy(self, external, state):\n \"\"\"Derivatives of states with respect to states.\n\n external: array or external variables for a single time step\n\n state: array of state variables for a single time step.\n\n This must be overridden in derived classes.\n \"\"\"\n\n raise NotImplementedError\n\n def df_dx(self, external, state):\n \"\"\"derivatives of states with respect to external vars\n external: array or external variables for a single time step\n state: array of state variables for a single time step.\n\n This must be overridden in derived classes.\n \"\"\"\n raise NotImplementedError\n\n def execute(self):\n \"\"\"Solve for the states at all time integration points.\"\"\"\n\n self.initialize()\n\n n_state = self.n_states\n n_time = self.n\n h = self.h\n\n # Copy initial state into state array for t=0\n self.y = self.y.reshape((self.ny, ))\n self.y[0:n_state] = self.y0\n\n # Cache f_dot for use in linearize()\n size = (n_state, self.n)\n self.a = np.zeros(size)\n self.b = np.zeros(size)\n self.c = np.zeros(size)\n self.d = np.zeros(size)\n\n for k in xrange(0, n_time-1):\n k1 = (k)*n_state\n k2 = (k+1)*n_state\n\n # Next state a function of current input\n ex = self.external[:, k] if self.external.shape[0] \\\n else np.array([])\n\n # Next state a function of previous state\n y = self.y[k1:k2]\n\n self.a[:, k] = a = self.f_dot(ex, y)\n self.b[:, k] = b = self.f_dot(ex, y + h/2.*a)\n self.c[:, k] = c = self.f_dot(ex, y + h/2.*b)\n self.d[:, k] = d = self.f_dot(ex, y + h*c)\n\n self.y[n_state+k1:n_state+k2] = \\\n y + h/6.*(a + 2*(b + c) + d)\n\n state_var_name = self.name_map['y']\n setattr(self, state_var_name,\n self.y.T.reshape((n_time, n_state)).T)\n\n #print \"executed\", self.name\n\n def provideJ(self):\n \"\"\"Linearize about current point.\"\"\"\n\n n_state = self.n_states\n n_time = self.n\n h = self.h\n I = np.eye(n_state)\n\n # Sparse Jacobian with respect to states\n #self.Ja = np.zeros((self.nJ, ))\n #self.Ji = np.zeros((self.nJ, ))\n #self.Jj = np.zeros((self.nJ, ))\n\n # Full Jacobian with respect to states\n self.Jy = np.zeros((self.n, self.n_states, self.n_states))\n\n # Full Jacobian with respect to inputs\n self.Jx = np.zeros((self.n, self.n_external, self.n_states))\n\n #self.Ja[:self.ny] = np.ones((self.ny, ))\n #self.Ji[:self.ny] = np.arange(self.ny)\n #self.Jj[:self.ny] = np.arange(self.ny)\n\n for k in xrange(0, n_time-1):\n\n k1 = k*n_state\n k2 = k1 + n_state\n\n ex = self.external[:, k] if self.external.shape[0] \\\n else np.array([])\n y = self.y[k1:k2]\n\n a = self.a[:, k]\n b = self.b[:, k]\n c = self.c[:, k]\n\n # State vars\n df_dy = self.df_dy(ex, y)\n dg_dy = self.df_dy(ex, y + h/2.*a)\n dh_dy = self.df_dy(ex, y + h/2.*b)\n di_dy = self.df_dy(ex, y + h*c)\n\n da_dy = df_dy\n db_dy = dg_dy + dg_dy.dot(h/2.*da_dy)\n dc_dy = dh_dy + dh_dy.dot(h/2.*db_dy)\n dd_dy = di_dy + di_dy.dot(h*dc_dy)\n\n dR_dy = -I - self.h/6.*(da_dy + 2*(db_dy + dc_dy) + dd_dy)\n self.Jy[k, :, :] = dR_dy\n\n #for i in xrange(n_state):\n #for j in xrange(n_state):\n #iJ = self.ny + i + n_state*(j + k1)\n #self.Ja[iJ] = dR_dy[i, j]\n ##self.Ji[iJ] = k2 + i\n ##self.Jj[iJ] = k1 + j\n #self.Ji[iJ] = i*n_time + k + 1\n #self.Jj[iJ] = j*n_time + k\n\n ##print self.Ji[iJ], self.Jj[iJ], self.Ja[iJ]\n\n # External vars (Inputs)\n df_dx = self.df_dx(ex, y)\n dg_dx = self.df_dx(ex, y + h/2.*a)\n dh_dx = self.df_dx(ex, y + h/2.*b)\n di_dx = self.df_dx(ex, y + h*c)\n\n da_dx = df_dx\n db_dx = dg_dx + dg_dy.dot(h/2*da_dx)\n dc_dx = dh_dx + dh_dy.dot(h/2*db_dx)\n dd_dx = di_dx + di_dy.dot(h*dc_dx)\n\n # Input-State Jacobian at each time point.\n # No Jacobian with respect to previous time points.\n self.Jx[k+1, :, :] = h/6*(da_dx + 2*(db_dx + dc_dx) + dd_dx).T\n\n #self.J = scipy.sparse.csc_matrix((self.Ja, (self.Ji, self.Jj)),\n #shape=(self.ny, self.ny))\n #self.JT = self.J.transpose()\n #self.Minv = scipy.sparse.linalg.splu(self.J).solve\n\n\n def apply_deriv(self, arg, result):\n \"\"\" Matrix-vector product with the Jacobian. \"\"\"\n\n #result = self._applyJint(arg, result)\n result_ext = self._applyJext(arg)\n\n svar = self.state_var\n if svar in result:\n result[svar] += result_ext\n else:\n result[svar] = result_ext\n\n # TODO - Uncommment this when it is supported in OpenMDAO.\n #def applyMinv(self, arg, result):\n #\"\"\"Apply derivatives with respect to state variables.\"\"\"\n\n #state = self.state_var\n\n #if self.state_var in arg:\n #flat_y = arg[state].flatten()\n #result[state] = self.Minv(flat_y).reshape((self.n_states, self.n))\n\n #return result\n\n\n #def _applyMinvT(self, arg, result):\n #\"\"\"Apply derivatives with respect to state variables.\"\"\"\n\n #state = self.state_var\n #z = result.copy()\n #if self.state_var in arg:\n #flat_y = arg[state].flatten()\n #result[state] = self.Minv(flat_y, 'T').reshape((self.n_states, self.n))\n\n #return result\n\n\n def _applyJint(self, arg, result):\n \"\"\"Apply derivatives with respect to state variables.\"\"\"\n\n res1 = dict([(self.reverse_name_map[k], v)\n for k, v in result.iteritems()])\n\n state = self.state_var\n if state in arg:\n flat_y = arg[state].reshape((self.n_states*self.n))\n result[\"y\"] = self.J.dot(flat_y).reshape((self.n_states, self.n))\n\n res1 = dict([(self.name_map[k],v) for k, v in res1.iteritems()])\n return res1\n\n def _applyJext(self, arg):\n \"\"\"Apply derivatives with respect to inputs\"\"\"\n\n #Jx --> (n_times, n_external, n_states)\n n_state = self.n_states\n n_time = self.n\n result = np.zeros((n_state, n_time))\n\n # Time-varying inputs\n for name in self.external_vars:\n\n if name not in arg:\n continue\n\n # take advantage of fact that arg is often pretty sparse\n if len(np.nonzero(arg[name])[0]) == 0:\n continue\n\n # Collapse incoming a*b*...*c*n down to (ab...c)*n\n var = self.get(name)\n shape = var.shape\n arg[name] = arg[name].reshape((np.prod(shape[:-1]),\n shape[-1]))\n\n i_ext = self.ext_index_map[name]\n ext_length = np.prod(arg[name][:, 0].shape)\n for j in xrange(n_time-1):\n Jsub = self.Jx[j+1, i_ext:i_ext+ext_length, :]\n J_arg = Jsub.T.dot(arg[name][:, j])\n result[:, j+1:n_time] += np.tile(J_arg, (n_time-j-1, 1)).T\n\n # Time-invariant inputs\n for name in self.fixed_external_vars:\n\n if name not in arg:\n continue\n\n # take advantage of fact that arg is often pretty sparse\n if len(np.nonzero(arg[name])[0]) == 0:\n continue\n\n ext_var = getattr(self, name)\n if len(ext_var) > 1:\n arg[name] = arg[name].flatten()\n i_ext = self.ext_index_map[name]\n ext_length = np.prod(ext_var.shape)\n for j in xrange(n_time-1):\n Jsub = self.Jx[j+1, i_ext:i_ext+ext_length, :]\n J_arg = Jsub.T.dot(arg[name])\n result[:, j+1:n_time] += np.tile(J_arg, (n_time-j-1, 1)).T\n\n # Initial State\n name = self.init_state_var\n if name in arg:\n\n # take advantage of fact that arg is often pretty sparse\n if len(np.nonzero(arg[name])[0]) > 0:\n fact = np.eye(self.n_states)\n result[:, 0] = arg[name]\n for j in xrange(1, n_time):\n fact = fact.dot(-self.Jy[j-1, :, :])\n result[:, j] += fact.dot(arg[name])\n\n return result\n\n def apply_derivT(self, arg, result):\n \"\"\" Matrix-vector product with the transpose of the Jacobian. \"\"\"\n\n mode = 'Ken'\n\n if mode == 'Ken':\n\n r2 = self._applyJextT_limited(arg, result)\n\n for k, v in r2.iteritems():\n if k in result and result[k] is not None:\n result[k] += v\n else:\n result[k] = v\n\n elif mode == 'John':\n\n r2 = self._applyJextT(arg, result)\n r1 = self.applyJintT(arg, result)\n\n for k, v in r2.iteritems():\n if k in result and result[k] is not None:\n result[k] += v\n else:\n result[k] = v\n\n if self.state_var in r1:\n result[self.state_var] = r1[self.state_var]\n\n if self.init_state_var in r1:\n result[self.init_state_var] = r1[self.init_state_var]\n\n else:\n raise RuntimeError('Pick Ken or John')\n\n\n def applyJintT(self, arg, required_results):\n \"\"\"Apply derivatives with respect to state variables.\"\"\"\n\n result = {}\n state = self.state_var\n init_state = self.init_state_var\n\n if state in arg:\n if state in required_results:\n flat_y = arg[state].flatten()\n result[state] = -self.JT.dot(flat_y).reshape((self.n_states, self.n))\n\n if init_state in required_results:\n result[init_state] = -result[state][:, 0]\n for j in xrange(1, self.n):\n result[init_state] -= result[state][:, j]\n\n #print self.J\n #print 'arg', arg, 'result', result\n return result\n\n def _applyJextT(self, arg, required_results):\n \"\"\"Apply derivatives with respect to inputs. Ignore all contributions\n from past time points and let them come in via previous states\n instead.\"\"\"\n\n #Jx --> (n_times, n_external, n_states)\n n_time = self.n\n result = {}\n\n if self.state_var in arg:\n\n argsv = arg[self.state_var].T\n\n # Use this when we incorporate state deriv\n # Time-varying inputs\n for name in self.external_vars:\n\n if name not in required_results:\n continue\n\n ext_var = getattr(self, name)\n i_ext = self.ext_index_map[name]\n ext_length = np.prod(ext_var.shape)/n_time\n result[name] = np.zeros((ext_length, n_time))\n for k in xrange(n_time-1):\n\n # argsum is often sparse, so check it first\n if len(np.nonzero(argsv[k+1, :])[0]) > 0:\n Jsub = self.Jx[k+1, i_ext:i_ext+ext_length, :]\n result[name][:, k] += Jsub.dot(argsv[k+1, :])\n\n # Use this when we incorporate state deriv\n # Time-invariant inputs\n for name in self.fixed_external_vars:\n\n if name not in required_results:\n continue\n\n ext_var = getattr(self, name)\n i_ext = self.ext_index_map[name]\n ext_length = np.prod(ext_var.shape)\n result[name] = np.zeros((ext_length))\n for k in xrange(n_time-1):\n\n # argsum is often sparse, so check it first\n if len(np.nonzero(argsv[k+1, :])[0]) > 0:\n Jsub = self.Jx[k+1, i_ext:i_ext+ext_length, :]\n result[name] += Jsub.dot(argsv[k+1, :])\n\n for k, v in result.iteritems():\n ext_var = getattr(self, k)\n result[k] = v.reshape(ext_var.shape)\n\n return result\n\n def _applyJextT_limited(self, arg, required_results):\n \"\"\"Apply derivatives with respect to inputs\"\"\"\n\n # Jx --> (n_times, n_external, n_states)\n n_time = self.n\n result = {}\n\n if self.state_var in arg:\n\n argsv = arg[self.state_var].T\n argsum = np.zeros(argsv.shape)\n\n # Calculate these once, and use for every output\n for k in xrange(n_time - 1):\n argsum[k, :] = np.sum(argsv[k + 1:, :], 0)\n\n # argsum is often sparse, so save indices.\n nonzero_k = np.unique(argsum.nonzero()[0])\n\n # Time-varying inputs\n for name in self.external_vars:\n\n if name not in required_results:\n continue\n\n ext_var = getattr(self, name)\n i_ext = self.ext_index_map[name]\n ext_length = np.prod(ext_var.shape) / n_time\n result[name] = np.zeros((ext_length, n_time))\n\n i_ext_end = i_ext + ext_length\n for k in nonzero_k:\n Jsub = self.Jx[k + 1, i_ext:i_ext_end, :]\n result[name][:, k] += Jsub.dot(argsum[k, :])\n\n # Time-invariant inputs\n for name in self.fixed_external_vars:\n\n if name not in required_results:\n continue\n\n ext_var = getattr(self, name)\n i_ext = self.ext_index_map[name]\n ext_length = np.prod(ext_var.shape)\n result[name] = np.zeros((ext_length))\n\n i_ext_end = i_ext + ext_length\n for k in nonzero_k:\n Jsub = self.Jx[k + 1, i_ext:i_ext_end, :]\n result[name] += Jsub.dot(argsum[k, :])\n\n # Initial State\n name = self.init_state_var\n if name in required_results:\n fact = -self.Jy[0, :, :].T\n result[name] = argsv[0, :] + fact.dot(argsv[1, :])\n for k in xrange(1, n_time-1):\n fact = fact.dot(-self.Jy[k, :, :].T)\n result[name] += fact.dot(argsv[k+1, :])\n\n for k, v in result.iteritems():\n ext_var = getattr(self, k)\n result[k] = v.reshape(ext_var.shape)\n\n return result\n\n def _applyJextT_limited_old(self, arg, required_results):\n \"\"\"Apply derivatives with respect to inputs\"\"\"\n\n # Jx --> (n_times, n_external, n_states)\n n_time = self.n\n result = {}\n\n if self.state_var in arg:\n\n argsv = arg[self.state_var].T\n argsum = np.zeros(argsv.shape)\n\n # Calculate these once, and use for every output\n for k in xrange(n_time - 1):\n argsum[k, :] = np.sum(argsv[k + 1:, :], 0)\n\n # argsum is often sparse, so save indices.\n nonzero_k = np.unique(argsum.nonzero()[0])\n\n # Time-varying inputs\n for name in self.external_vars:\n\n if name not in required_results:\n continue\n\n ext_var = getattr(self, name)\n i_ext = self.ext_index_map[name]\n ext_length = np.prod(ext_var.shape) / n_time\n result[name] = np.zeros((ext_length, n_time))\n\n i_ext_end = i_ext + ext_length\n for k in nonzero_k:\n Jsub = self.Jx[k + 1, i_ext:i_ext_end, :]\n result[name][:, k] += Jsub.dot(argsum[k, :])\n\n # Time-invariant inputs\n for name in self.fixed_external_vars:\n\n if name not in required_results:\n continue\n\n ext_var = getattr(self, name)\n i_ext = self.ext_index_map[name]\n ext_length = np.prod(ext_var.shape)\n result[name] = np.zeros((ext_length))\n\n i_ext_end = i_ext + ext_length\n for k in nonzero_k:\n Jsub = self.Jx[k + 1, i_ext:i_ext_end, :]\n result[name] += Jsub.dot(argsum[k, :])\n\n # Initial State\n name = self.init_state_var\n if name in required_results:\n result[name] = argsv[0, :] + argsum[0, :]\n\n for k, v in result.iteritems():\n ext_var = getattr(self, k)\n result[k] = v.reshape(ext_var.shape)\n\n return result\n\n"
] | [
[
"numpy.dot",
"numpy.cos",
"numpy.ones",
"numpy.sin",
"numpy.zeros"
],
[
"numpy.hstack",
"numpy.nonzero",
"numpy.eye",
"numpy.tile",
"numpy.prod",
"numpy.array",
"numpy.zeros",
"numpy.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
watson-developer-cloud/assistant-dialog-flow-analysis | [
"0c7bcd9527636dce77c74b80f60dbe23e6682e13"
] | [
"src/conversation_analytics_toolkit/keyword_analysis.py"
] | [
"# (C) Copyright IBM Corp. 2019, 2020.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom nltk.corpus import stopwords \n#####################################################################\n\n##############################################################\nimport re \nimport pandas as pd\n\n#######################################################################\nfrom sklearn.feature_extraction.text import CountVectorizer \n \n\ndef order_ngram(ngram): \n words = ngram.split()\n # sort the list.\n words.sort()\n return \" \".join(words)\n\n##############################################################\n# returns clean string\ndef clean_text(text, custom_stop_words = []):\n \n REPLACE_BY_SPACE_RE = re.compile('[/(){}\\[\\]\\|@,;]')\n BAD_SYMBOLS_RE = re.compile('[^0-9a-z #+_:]') \n text = text.lower() # lowercase text\n text = REPLACE_BY_SPACE_RE.sub(' ', text) # replace REPLACE_BY_SPACE_RE symbols by space in text\n text = BAD_SYMBOLS_RE.sub('', text) # delete symbols which are in BAD_SYMBOLS_RE from text\n \n STOPWORDS = set(stopwords.words('english'))\n STOPWORDS = STOPWORDS | set(custom_stop_words)\n \n text = ' '.join(word for word in text.split() if word not in STOPWORDS) \n \n return text\n \ndef get_frequent_words_bigrams(utterances, num_unigrams,num_bigrams,custom_stop_words):\n \n utterances = [clean_text(x,custom_stop_words) for x in utterances]\n \n if len(utterances)==0:\n print(\"Warning! List of utterances is empty\")\n return {\"name\": \"\",\"children\": []}\n \n unigrams_found = False\n bigrams_found = False\n \n for i in range(0, len(utterances)):\n current_length=len(utterances[i].split())\n if current_length>1:\n unigrams_found = True\n bigrams_found = True\n break\n elif current_length>0:\n unigrams_found = True\n \n if not unigrams_found:\n print(\"Warning! List of utterances is empty. Perhaps the documents only contain stop words\")\n return {\"name\": \"\",\"children\": []}\n \n cv_words = CountVectorizer(ngram_range=(1,1))\n \n cv_fit_words=cv_words.fit_transform(utterances) \n word_list = cv_words.get_feature_names(); \n word_count_list = cv_fit_words.toarray().sum(axis=0) \n words_sorted = sorted(zip(word_count_list, word_list),reverse=True)\n words_sorted=words_sorted[0:min(len(words_sorted), num_unigrams)]\n \n if bigrams_found:\n cv_bigrams = CountVectorizer(ngram_range=(2,2))\n \n cv_fit_bigrams=cv_bigrams.fit_transform(utterances) \n bigram_list = cv_bigrams.get_feature_names()\n \n num_overall_bigrams = len(bigram_list)\n \n bigram_count_list = cv_fit_bigrams.toarray().sum(axis=0) \n bigram_zip = sorted(zip(bigram_count_list, bigram_list),reverse=True)\n \n counter_bigrams=0\n counter_merged_bigrams=0\n bigram_sorted=[]\n bigram_final_list=[]\n \n # merge same order\n while counter_bigrams < num_overall_bigrams and counter_merged_bigrams < num_bigrams:\n current_bigram = bigram_zip[counter_bigrams][1] \n current_count = bigram_zip[counter_bigrams][0] \n \n S = order_ngram(current_bigram)\n if S in bigram_sorted:\n ind = bigram_sorted.index(S) \n bigram_final_list[ind][0] += current_count\n \n else:\n bigram_final_list.append([current_count, current_bigram])\n bigram_sorted.append(S)\n\n counter_merged_bigrams += 1\n \n counter_bigrams += 1 \n \n else:\n print(\"Warning! List of bigrams is empty.\")\n\n children = []\n for i in range(0, min(len(words_sorted), num_unigrams)):\n children.append({\"name\": words_sorted[i][1], \"value\": int(words_sorted[i][0])})\n if bigrams_found:\n for i in range(0, min(len(bigram_final_list), num_bigrams)):\n children.append({\"name\": bigram_final_list[i][1], \"value\": int(bigram_final_list[i][0])})\n data = {\"name\": \"\",\"children\": children}\n\n return data\n \n##########################################################################################################\ndef get_df_frequencies(neg_dict, pos_dict, neg_list, pos_list, table_title1, table_title2):\n neg_list_neg_freq=[]\n neg_list_pos_freq=[]\n pos_list_neg_freq=[]\n pos_list_pos_freq=[]\n \n num_neg_features = len(neg_list)\n if num_neg_features==0:\n print(\"Warning! No significant keywords were found.\")\n \n num_pos_features = len(pos_list)\n \n for i in range(0, num_neg_features):\n neg_list_neg_freq.append(neg_dict[neg_list[i]][0]) \n neg_list_pos_freq.append(neg_dict[neg_list[i]][2]) \n \n for i in range(0, num_pos_features): \n pos_list_pos_freq.append(pos_dict[pos_list[i]][0]) \n pos_list_neg_freq.append(pos_dict[pos_list[i]][2])\n \n if num_neg_features>num_pos_features:\n D = [None]*(num_neg_features-num_pos_features)\n pos_list.extend(D)\n pos_list_pos_freq.extend(D)\n pos_list_neg_freq.extend(D)\n \n if num_pos_features>num_neg_features:\n D = [None]*(num_pos_features-num_neg_features)\n neg_list.extend(D)\n neg_list_neg_freq.extend(D)\n neg_list_pos_freq.extend(D)\n \n df_frequencies = pd.DataFrame() \n df_frequencies[table_title1] = neg_list \n df_frequencies[\"Failure keywords: negative corpus frequencies, %\"] = neg_list_neg_freq\n df_frequencies[\"Failure keywords: positive corpus frequencies, %\"] = neg_list_pos_freq\n df_frequencies[table_title2] = pos_list\n df_frequencies[\"Success keywords: positive corpus frequencies, %\"] = pos_list_pos_freq\n df_frequencies[\"Success keywords: negative corpus frequencies, %\"] = pos_list_neg_freq\n\n return df_frequencies \n\n\n########################################################################\ndef get_features(text_negative, text_positive, num_features, n_range=(1,2)): \n \n PRINT_MODE = False\n \n #no_shows=[\"account\",\"number\"]\n no_shows=[]\n \n ratio_parameter=2.0\n num_negative=len(text_negative)\n num_positive=len(text_positive)\n \n cv_negative = CountVectorizer(ngram_range=n_range)\n \n cv_fit=cv_negative.fit_transform(text_negative) \n word_list_negative = cv_negative.get_feature_names(); \n count_list_negative = cv_fit.toarray().sum(axis=0) \n negative_count_unigrams = sorted(zip(count_list_negative, word_list_negative),reverse=True) \n dict_negative = {word_list_negative[i]: count_list_negative[i] for i in range(len(word_list_negative))} \n \n cv_positive = CountVectorizer(ngram_range=n_range)\n \n cv_fit=cv_positive.fit_transform(text_positive) \n word_list_positive = cv_positive.get_feature_names(); \n count_list_positive = cv_fit.toarray().sum(axis=0) \n positive_count_unigrams = sorted(zip(count_list_positive, word_list_positive),reverse=True) \n dict_positive = {word_list_positive[i]: count_list_positive[i] for i in range(len(word_list_positive))} \n \n ##############################\n # perform frequence check, print warnings\n counter_neg_features=0\n counter=0\n neg_list_sorted=[]\n neg_list=[]\n neg_dict={}\n while counter_neg_features<num_features:\n current_neg_feature=negative_count_unigrams[counter][1] \n current_neg_count=negative_count_unigrams[counter][0]\n \n get_neg_freq=current_neg_count / num_negative \n \n if current_neg_feature in dict_positive.keys():\n current_pos_count = dict_positive[current_neg_feature]\n get_pos_freq = (current_pos_count + 0.0) / num_positive \n else:\n current_pos_count = 0\n get_pos_freq = 1.0 / num_positive \n \n if ratio_parameter*get_pos_freq>=get_neg_freq:\n if PRINT_MODE:\n print(\"Frequencies for negative feature \"+current_neg_feature+\" are not appropriate. Feature is omitted\")\n print(\"Negative frequency: \"+str(get_neg_freq))\n print(\"Positive frequency: \"+str(get_pos_freq))\n \n elif current_neg_feature not in no_shows:\n S = order_ngram(current_neg_feature)\n if S in neg_list_sorted:\n ind = neg_list_sorted.index(S)\n prev = neg_list[ind] \n neg_dict[prev][0] += current_neg_count\n neg_dict[prev][1] += current_pos_count\n neg_dict[prev][2] += ((current_neg_count + neg_dict[prev][0]) / max(current_pos_count + neg_dict[prev][1],1)) *\\\n (num_positive/num_negative)\n else:\n neg_list.append(current_neg_feature)\n neg_list_sorted.append(S)\n\n neg_dict[current_neg_feature] = [current_neg_count, current_pos_count, get_neg_freq / get_pos_freq]\n counter_neg_features+=1\n \n counter+=1 \n \n if counter==len(negative_count_unigrams):\n break\n \n counter_pos_features=0\n counter=0\n pos_list_sorted=[]\n pos_list=[]\n pos_dict={}\n while counter_pos_features<num_features:\n current_pos_feature = positive_count_unigrams[counter][1] \n current_pos_count = positive_count_unigrams[counter][0]\n \n get_pos_freq=current_pos_count / num_positive\n \n if current_pos_feature in dict_negative.keys():\n current_neg_count = dict_negative[current_pos_feature]\n get_neg_freq = dict_negative[current_pos_feature]\n else:\n current_neg_count = 0\n get_neg_freq = 1.0 / num_negative \n \n if get_pos_freq<=ratio_parameter*get_neg_freq:\n if PRINT_MODE:\n print(\"Frequencies for positive feature '\"+current_pos_feature+\"' are not appropriate. Feature is omitted.\")\n print(\"Negative frequency: \"+str(get_neg_freq))\n print(\"Positive frequency: \"+str(get_pos_freq))\n elif current_pos_feature not in no_shows:\n S = order_ngram(current_pos_feature)\n if S in pos_list_sorted:\n ind = pos_list_sorted.index(S)\n prev = pos_list[ind] \n pos_dict[prev][0] += current_pos_count\n pos_dict[prev][1] += current_neg_count\n pos_dict[prev][2] += ((current_pos_count + pos_dict[prev][0]) / max(current_neg_count + pos_dict[prev][1],1)) *\\\n (num_negative/num_positive )\n else:\n pos_list.append(current_pos_feature)\n pos_list_sorted.append(S)\n \n pos_dict[current_pos_feature] = [current_pos_count, current_neg_count, get_pos_freq / get_neg_freq]\n counter_pos_features+=1\n \n counter+=1 \n \n if counter==len(positive_count_unigrams):\n break \n \n return neg_dict, pos_dict, neg_list, pos_list \n\n\n\n########################################################################\ndef keyword_table_detailed(text_negative, text_positive, num_keywords, custom_stop_words, title1=\"Title 1\", title2=\"Title 2\"):\n \n COEFF = [0.4, 0.4, 0.2]\n \n text_positive = [clean_text(x,custom_stop_words) for x in text_positive]\n text_negative = [clean_text(x,custom_stop_words) for x in text_negative]\n \n if len(text_positive)==0 or len(text_negative)==0:\n print(\"Warning! At least one of List of utterances is empty\")\n neg_dict = {}\n pos_dict = {}\n neg_list = [] \n pos_list = []\n \n else: \n \n unigrams_found_negative = False\n bigrams_found_negative = False\n trigrams_found_negative = False\n \n for i in range(0, len(text_negative)):\n current_length=len(text_negative[i].split())\n if current_length>2:\n unigrams_found_negative = True\n bigrams_found_negative = True\n trigrams_found_negative = True\n break\n elif current_length>1:\n unigrams_found_negative = True\n bigrams_found_negative = True\n elif current_length>0:\n unigrams_found_negative = True\n \n ################################################################ \n unigrams_found_positive = False\n bigrams_found_positive = False\n trigrams_found_positive = False\n \n for i in range(0, len(text_positive)):\n current_length=len(text_positive[i].split())\n if current_length>2:\n unigrams_found_positive = True\n bigrams_found_positive = True\n trigrams_found_positive = True\n break\n elif current_length>1:\n unigrams_found_positive = True\n bigrams_found_positive = True\n elif current_length>0:\n unigrams_found_positive = True \n \n ####################################################################\n unigrams_found = unigrams_found_negative and unigrams_found_positive\n bigrams_found = bigrams_found_negative and bigrams_found_positive\n trigrams_found = trigrams_found_negative and trigrams_found_positive\n \n if not unigrams_found:\n print(\"Warning! At least one of utterance lists is empty. Perhaps the documents only contain stop words\")\n neg_dict = {}\n pos_dict = {}\n neg_list = [] \n pos_list = []\n else:\n num_keywords1 = round(num_keywords*COEFF[0])\n \n neg_dict, pos_dict, neg_list, pos_list = get_features(text_negative,text_positive,num_keywords1,n_range=(1,1))\n \n if not bigrams_found:\n print(\"Warning! No bigrams for at least one of utterance lists.\") \n else:\n \n num_keywords2 = round(num_keywords*COEFF[1])\n \n neg_dict2, pos_dict2, neg_list2, pos_list2 = get_features(text_negative,text_positive,num_keywords2,n_range=(2,2))\n \n neg_list.extend(neg_list2) \n pos_list.extend(pos_list2) \n \n neg_dict.update(neg_dict2) \n pos_dict.update(pos_dict2) \n \n if not trigrams_found:\n print(\"Warning! No trigrams for at least one of utterance lists.\") \n else: \n num_keywords3 = num_keywords - num_keywords1 - num_keywords2 \n \n neg_dict3, pos_dict3, neg_list3, pos_list3 = get_features(text_negative,text_positive,num_keywords3,n_range=(3,3))\n \n neg_list.extend(neg_list3) \n pos_list.extend(pos_list3) \n \n neg_dict.update(neg_dict3) \n pos_dict.update(pos_dict3)\n \n #################################################################################################### \n df_frequencies = get_df_frequencies(neg_dict, pos_dict, neg_list, pos_list, title1, title2) \n \n df_frequencies.rename(columns={\"Failure keywords: negative corpus frequencies, %\": title1+\": Frequency\",\\\n \"Failure keywords: positive corpus frequencies, %\": title1+\": Power\",\\\n \"Success keywords: positive corpus frequencies, %\": title2+\": Frequency\",\\\n \"Success keywords: negative corpus frequencies, %\": title2+\": Power\"}, inplace=True)\n \n return df_frequencies\n\n################################################################################################\ndef get_data_for_comparison_visual(user_input_abandoned, user_input_completed, num_keywords, custom_stop_words=[]):\n \n df_keywords_detailed = keyword_table_detailed(user_input_abandoned, user_input_completed, num_keywords, custom_stop_words)\n \n #print(df_keywords_detailed.head(n=25))\n \n data={}\n data[\"name\"]=\"\"\n data[\"children\"]=[]\n for i in range(0,len(df_keywords_detailed)):\n if df_keywords_detailed.iloc[i,1] is None:\n data[\"children\"].append({\"name\": df_keywords_detailed.iloc[i,0], \"value\": df_keywords_detailed.iloc[i,1]})\n else:\n data[\"children\"].append({\"name\": df_keywords_detailed.iloc[i,0], \"value\": int(df_keywords_detailed.iloc[i,1])})\n \n return data "
] | [
[
"sklearn.feature_extraction.text.CountVectorizer",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
open2c/cooltools | [
"704f259c4df5bc1c61be915bf56c80b00a0298ed"
] | [
"cooltools/expected.py"
] | [
"from itertools import chain, combinations\nfrom collections import defaultdict\nfrom functools import partial\n\nimport warnings\nimport multiprocess as mp\n\n\nimport numpy as np\nimport pandas as pd\nfrom scipy.signal import fftconvolve\nfrom scipy.interpolate import interp1d\n\nfrom cooler.tools import partition\nimport cooler\nimport bioframe\nfrom .lib import assign_supports, numutils\n\nwhere = np.flatnonzero\nconcat = chain.from_iterable\n\n\ndef _contact_areas(distbins, scaffold_length):\n distbins = distbins.astype(float)\n scaffold_length = float(scaffold_length)\n outer_areas = np.maximum(scaffold_length - distbins[:-1], 0) ** 2\n inner_areas = np.maximum(scaffold_length - distbins[1:], 0) ** 2\n return 0.5 * (outer_areas - inner_areas)\n\n\ndef contact_areas(distbins, region1, region2):\n if region1 == region2:\n start, end = region1\n areas = _contact_areas(distbins, end - start)\n else:\n start1, end1 = region1\n start2, end2 = region2\n if start2 <= start1:\n start1, start2 = start2, start1\n end1, end2 = end2, end1\n areas = (\n _contact_areas(distbins, end2 - start1)\n - _contact_areas(distbins, start2 - start1)\n - _contact_areas(distbins, end2 - end1)\n )\n if end1 < start2:\n areas += _contact_areas(distbins, start2 - end1)\n\n return areas\n\n\ndef compute_scaling(df, region1, region2=None, dmin=int(1e1), dmax=int(1e7), n_bins=50):\n\n import dask.array as da\n\n if region2 is None:\n region2 = region1\n\n distbins = numutils.logbins(dmin, dmax, N=n_bins)\n areas = contact_areas(distbins, region1, region2)\n\n df = df[\n (df[\"pos1\"] >= region1[0])\n & (df[\"pos1\"] < region1[1])\n & (df[\"pos2\"] >= region2[0])\n & (df[\"pos2\"] < region2[1])\n ]\n dists = (df[\"pos2\"] - df[\"pos1\"]).values\n\n if isinstance(dists, da.Array):\n obs, _ = da.histogram(dists[(dists >= dmin) & (dists < dmax)], bins=distbins)\n else:\n obs, _ = np.histogram(dists[(dists >= dmin) & (dists < dmax)], bins=distbins)\n\n return distbins, obs, areas\n\n\ndef lattice_pdist_frequencies(n, points):\n \"\"\"\n Distribution of pairwise 1D distances among a collection of distinct\n integers ranging from 0 to n-1.\n\n Parameters\n ----------\n n : int\n Size of the lattice on which the integer points reside.\n points : sequence of int\n Arbitrary integers between 0 and n-1, inclusive, in any order but\n with no duplicates.\n\n Returns\n -------\n h : 1D array of length n\n h[d] counts the number of integer pairs that are exactly d units apart\n\n Notes\n -----\n This is done using a convolution via FFT. Thanks to Peter de Rivaz; see\n `<http://stackoverflow.com/questions/42423823/distribution-of-pairwise-distances-between-many-integers>`_.\n\n \"\"\"\n if len(np.unique(points)) != len(points):\n raise ValueError(\"Integers must be distinct.\")\n x = np.zeros(n)\n x[points] = 1\n return np.round(fftconvolve(x, x[::-1], mode=\"full\")).astype(int)[-n:]\n\n\ndef count_bad_pixels_per_diag(n, bad_bins):\n \"\"\"\n Efficiently count the number of bad pixels on each upper diagonal of a\n matrix assuming a sequence of bad bins forms a \"grid\" of invalid pixels.\n\n Each bad bin bifurcates into two a row and column of bad pixels, so an\n upper bound on number of bad pixels per diagonal is 2*k, where k is the\n number of bad bins. For a given diagonal, we need to subtract from this\n upper estimate the contribution from rows/columns reaching \"out-of-bounds\"\n and the contribution of the intersection points of bad rows with bad\n columns that get double counted.\n\n ::\n\n o : bad bin\n * : bad pixel\n x : intersection bad pixel\n $ : out of bounds bad pixel\n $ $ $\n *--------------------------+\n * * * * |\n * * * * |\n ** * * |\n o****x*****x***********|$\n * * * |\n * * * |\n * * * |\n o******x***********|$\n * * |\n * * |\n * * |\n * * |\n * * |\n ** |\n o***********|$\n * |\n * |\n\n Parameters\n ----------\n n : int\n total number of bins\n bad_bins : 1D array of int\n sorted array of bad bin indexes\n\n Returns\n -------\n dcount : 1D array of length n\n dcount[d] == number of bad pixels on diagonal d\n\n \"\"\"\n k = len(bad_bins)\n dcount = np.zeros(n, dtype=int)\n\n # Store all intersection pixels in a separate array\n # ~O(n log n) with fft\n ixn = lattice_pdist_frequencies(n, bad_bins)\n dcount[0] = ixn[0]\n\n # Keep track of out-of-bounds pixels by squeezing left and right bounds\n # ~O(n)\n pl = 0\n pr = k\n for diag in range(1, n):\n if pl < k:\n while (bad_bins[pl] - diag) < 0:\n pl += 1\n if pl == k:\n break\n if pr > 0:\n while (bad_bins[pr - 1] + diag) >= n:\n pr -= 1\n if pr == 0:\n break\n dcount[diag] = 2 * k - ixn[diag] - pl - (k - pr)\n return dcount\n\n\ndef count_all_pixels_per_diag(n):\n \"\"\"\n Total number of pixels on each upper diagonal of a square matrix.\n\n Parameters\n ----------\n n : int\n total number of bins (dimension of square matrix)\n\n Returns\n -------\n dcount : 1D array of length n\n dcount[d] == total number of pixels on diagonal d\n\n \"\"\"\n return np.arange(n, 0, -1)\n\n\ndef count_all_pixels_per_block(x, y):\n \"\"\"\n Calculate total number of pixels in a rectangular block\n\n Parameters\n ----------\n x : int\n block width in pixels\n y : int\n block height in pixels\n\n Returns\n -------\n number_of_pixels : int\n total number of pixels in a block\n \"\"\"\n return x * y\n\n\ndef count_bad_pixels_per_block(x, y, bad_bins_x, bad_bins_y):\n \"\"\"\n Calculate number of \"bad\" pixels per rectangular block of a contact map\n\n \"Bad\" pixels are inferred from the balancing weight column `weight_name` or\n provided directly in the form of an array `bad_bins`.\n\n Setting `weight_name` and `bad_bins` to `None` yields 0 bad pixels in a block.\n\n Parameters\n ----------\n x : int\n block width in pixels\n y : int\n block height in pixels\n bad_bins_x : int\n number of bad bins on x-side\n bad_bins_y : int\n number of bad bins on y-side\n\n Returns\n -------\n number_of_pixes : int\n number of \"bad\" pixels in a block\n \"\"\"\n\n # Calculate the resulting bad pixels in a rectangular block:\n return (x * bad_bins_y) + (y * bad_bins_x) - (bad_bins_x * bad_bins_y)\n\n\ndef make_diag_table(bad_mask, span1, span2):\n \"\"\"\n Compute the total number of elements ``n_elem`` and the number of bad\n elements ``n_bad`` per diagonal for a single contact area encompassing\n ``span1`` and ``span2`` on the same genomic scaffold (cis matrix).\n\n Follows the same principle as the algorithm for finding contact areas for\n computing scalings.\n\n Parameters\n ----------\n bad_mask : 1D array of bool\n Mask of bad bins for the whole genomic scaffold containing the regions\n of interest.\n span1, span2 : pair of ints\n The bin spans (not genomic coordinates) of the two regions of interest.\n\n Returns\n -------\n diags : pandas.DataFrame\n Table indexed by 'diag' with columns ['n_elem', 'n_bad'].\n\n \"\"\"\n\n def _make_diag_table(n_bins, bad_locs):\n diags = pd.DataFrame(index=pd.Series(np.arange(n_bins), name=\"diag\"))\n diags[\"n_elem\"] = count_all_pixels_per_diag(n_bins)\n diags[\"n_valid\"] = diags[\"n_elem\"] - count_bad_pixels_per_diag(n_bins, bad_locs)\n return diags\n\n if span1 == span2:\n lo, hi = span1\n diags = _make_diag_table(hi - lo, where(bad_mask[lo:hi]))\n else:\n lo1, hi1 = span1\n lo2, hi2 = span2\n if lo2 <= lo1:\n lo1, lo2 = lo2, lo1\n hi1, hi2 = hi2, hi1\n diags = (\n _make_diag_table(hi2 - lo1, where(bad_mask[lo1:hi2]))\n .subtract(\n _make_diag_table(lo2 - lo1, where(bad_mask[lo1:lo2])), fill_value=0\n )\n .subtract(\n _make_diag_table(hi2 - hi1, where(bad_mask[hi1:hi2])), fill_value=0\n )\n )\n if hi1 < lo2:\n diags.add(\n _make_diag_table(lo2 - hi1, where(bad_mask[hi1:lo2])), fill_value=0\n )\n diags = diags[diags[\"n_elem\"] > 0]\n\n diags = diags.drop(\"n_elem\", axis=1)\n return diags.astype(int)\n\n\ndef make_diag_tables(clr, regions, regions2=None, weight_name=\"weight\", bad_bins=None):\n \"\"\"\n For every support region infer diagonals that intersect this region\n and calculate the size of these intersections in pixels, both \"total\" and\n \"n_valid\", where \"n_valid\" does not include \"bad\" bins into counting.\n\n \"Bad\" pixels are inferred from the balancing weight column `weight_name` or\n provided directly in the form of an array `bad_bins`.\n\n Setting `weight_name` and `bad_bins` to `None` yields 0 \"bad\" pixels per\n diagonal per support region.\n\n When `regions2` are provided, all intersecting diagonals are reported for\n each rectangular and asymmetric block defined by combinations of matching\n elements of `regions` and `regions2`.\n Otherwise only `regions`-based symmetric square blocks are considered.\n Only intra-chromosomal regions are supported.\n\n Parameters\n ----------\n clr : cooler.Cooler\n Input cooler\n regions : viewframe or viewframe-like dataframe\n viewframe without repeated entries or viewframe-like dataframe with repeated entries\n regions2 : viewframe or viewframe-like dataframe\n viewframe without repeated entries or viewframe-like dataframe with repeated entries\n weight_name : str\n name of the weight vector in the \"bins\" table,\n if weight_name is None returns 0 for each block.\n Balancing weight are used to infer bad bins.\n bad_bins : array-like\n a list of bins to ignore. Indexes of bins must\n be absolute, as in clr.bins()[:], as opposed to\n being offset by chromosome start.\n \"bad_bins\" will be combined with the bad bins\n masked by balancing if there are any.\n\n Returns\n -------\n diag_tables : dict\n dictionary with DataFrames of relevant diagonals for every support.\n \"\"\"\n\n try: # Run regular viewframe conversion:\n regions = bioframe.make_viewframe(regions, check_bounds=clr.chromsizes).values\n if regions2 is not None:\n regions2 = bioframe.make_viewframe(\n regions2, check_bounds=clr.chromsizes\n ).values\n except ValueError: # If there are non-unique entries in regions1/2, possible only for asymmetric expected:\n regions = pd.concat(\n [\n bioframe.make_viewframe([region], check_bounds=clr.chromsizes)\n for i, region in regions.iterrows()\n ]\n ).values\n regions2 = pd.concat(\n [\n bioframe.make_viewframe([region], check_bounds=clr.chromsizes)\n for i, region in regions2.iterrows()\n ]\n ).values\n\n bins = clr.bins()[:]\n if weight_name is None:\n # ignore bad bins\n sizes = dict(bins.groupby(\"chrom\").size())\n bad_bin_dict = {\n chrom: np.zeros(sizes[chrom], dtype=bool) for chrom in sizes.keys()\n }\n elif isinstance(weight_name, str):\n # using balacning weight to infer bad bins\n if weight_name not in clr.bins().columns:\n raise KeyError(f\"Balancing weight {weight_name} not found!\")\n groups = dict(iter(bins.groupby(\"chrom\")[weight_name]))\n bad_bin_dict = {\n chrom: np.array(groups[chrom].isnull()) for chrom in groups.keys()\n }\n else:\n raise ValueError(\"`weight_name` can be `str` or `None`\")\n\n # combine custom \"bad_bins\" with \"bad_bin_dict\":\n if bad_bins is not None:\n # check if \"bad_bins\" are legit:\n try:\n bad_bins_chrom = bins.iloc[bad_bins].reset_index(drop=False)\n except IndexError:\n raise ValueError(\"Provided `bad_bins` are incorrect or out-of-bound\")\n # group them by observed chromosomes only\n bad_bins_grp = bad_bins_chrom[[\"index\", \"chrom\"]].groupby(\n \"chrom\", observed=True\n )\n # update \"bad_bin_dict\" with \"bad_bins\" for each chrom:\n for chrom, bin_ids in bad_bins_grp[\"index\"]:\n co = clr.offset(chrom)\n # adjust by chromosome offset\n bad_bin_dict[chrom][bin_ids.values - co] = True\n\n diag_tables = {}\n for i, region in enumerate(regions):\n chrom, start1, end1, name1 = region\n if regions2 is not None:\n chrom2, start2, end2, name2 = regions2[i]\n # cis-only for now:\n if not (chrom2 == chrom):\n raise ValueError(\"regions/2 have to be on the same chrom to generate diag_tables\")\n else:\n start2, end2 = start1, end1\n\n # translate regions into relative bin id-s:\n lo1, hi1 = clr.extent((chrom, start1, end1))\n lo2, hi2 = clr.extent((chrom, start2, end2))\n co = clr.offset(chrom)\n lo1 -= co\n lo2 -= co\n hi1 -= co\n hi2 -= co\n\n bad_mask = bad_bin_dict[chrom]\n newname = name1\n if regions2 is not None:\n newname = (name1, name2)\n diag_tables[newname] = make_diag_table(bad_mask, [lo1, hi1], [lo2, hi2])\n\n return diag_tables\n\n\ndef make_block_table(clr, regions1, regions2, weight_name=\"weight\", bad_bins=None):\n \"\"\"\n Creates a table that characterizes a set of rectangular genomic blocks\n formed by combining regions from regions1 and regions2.\n For every block calculate its \"area\" in pixels (\"n_total\"), and calculate\n number of \"valid\" pixels in each block (\"n_valid\").\n \"Valid\" pixels exclude \"bad\" pixels, which in turn inferred from the balancing\n weight column `weight_name` or provided directly in the form of an array of\n `bad_bins`.\n\n Setting `weight_name` and `bad_bins` to `None` yields 0 \"bad\" pixels per\n block.\n\n Parameters\n ----------\n clr : cooler.Cooler\n Input cooler\n regions1 : viewframe or viewframe-like dataframe\n a viewframe without repeated entries or viewframe-like dataframe with repeated entries\n regions2 : viewframe or viewframe-like dataframe\n a viewframe without repeated entries or viewframe-like dataframe with repeated entries\n weight_name : str\n name of the weight vector in the \"bins\" table,\n if weight_name is None returns 0 for each block.\n Balancing weight are used to infer bad bins.\n bad_bins : array-like\n a list of bins to ignore. Indexes of bins must\n be absolute, as in clr.bins()[:], as opposed to\n being offset by chromosome start.\n \"bad_bins\" will be combined with the bad bins\n masked by balancing if there are any.\n\n Returns\n -------\n block_table : dict\n dictionary for blocks that are 0-indexed\n \"\"\"\n if bad_bins is None:\n bad_bins = np.asarray([]).astype(int)\n else:\n bad_bins = np.asarray(bad_bins).astype(int)\n\n try: # Run regular viewframe conversion:\n regions1 = bioframe.make_viewframe(regions1, check_bounds=clr.chromsizes).values\n regions2 = bioframe.make_viewframe(regions2, check_bounds=clr.chromsizes).values\n except ValueError: # Might be non-unique entries in regions:\n regions1 = pd.concat(\n [\n bioframe.make_viewframe([region], check_bounds=clr.chromsizes)\n for i, region in regions1.iterrows()\n ]\n ).values\n regions2 = pd.concat(\n [\n bioframe.make_viewframe([region], check_bounds=clr.chromsizes)\n for i, region in regions2.iterrows()\n ]\n ).values\n\n # should we check for nestedness here, or that each region1 is < region2 ?\n\n block_table = {}\n for r1, r2 in zip(regions1, regions2):\n chrom1, start1, end1, name1 = r1\n chrom2, start2, end2, name2 = r2\n # translate regions into relative bin id-s:\n lo1, hi1 = clr.extent((chrom1, start1, end1))\n lo2, hi2 = clr.extent((chrom2, start2, end2))\n # width and height of a block:\n x = hi1 - lo1\n y = hi2 - lo2\n # get \"regional\" bad_bins for each of the regions\n bx = bad_bins[(bad_bins >= lo1) & (bad_bins < hi1)] - lo1\n by = bad_bins[(bad_bins >= lo2) & (bad_bins < hi2)] - lo2\n\n # now we need to combine it with the balancing weights\n if weight_name is None:\n bad_bins_x = len(bx)\n bad_bins_y = len(by)\n elif isinstance(weight_name, str):\n if weight_name not in clr.bins().columns:\n raise KeyError(f\"Balancing weight {weight_name} not found!\")\n else:\n # extract \"bad\" bins filtered by balancing:\n cb_bins_x = clr.bins()[weight_name][lo1:hi1].isnull().values\n cb_bins_y = clr.bins()[weight_name][lo2:hi2].isnull().values\n # combine with \"bad_bins\" using assignment:\n cb_bins_x[bx] = True\n cb_bins_y[by] = True\n # count and yield final list of bad bins:\n bad_bins_x = np.count_nonzero(cb_bins_x)\n bad_bins_y = np.count_nonzero(cb_bins_y)\n else:\n raise ValueError(\"`weight_name` can be `str` or `None`\")\n\n # calculate total and bad pixels per block:\n n_tot = count_all_pixels_per_block(x, y)\n n_bad = count_bad_pixels_per_block(x, y, bad_bins_x, bad_bins_y)\n\n # fill in \"block_table\" with number of valid pixels:\n block_table[name1, name2] = defaultdict(int)\n block_table[name1, name2][\"n_valid\"] = n_tot - n_bad\n\n return block_table\n\n\ndef _diagsum_symm(clr, fields, transforms, regions, span):\n \"\"\"\n calculates diagonal/distance summary for a collection of\n square symmetric blocks defined by the \"regions\".\n\n Return:\n dictionary of DataFrames with diagonal/distance\n sums for the \"fields\", and 0-based indexes of square\n genomic regions as keys.\n \"\"\"\n lo, hi = span\n bins = clr.bins()[:]\n pixels = clr.pixels()[lo:hi]\n pixels = cooler.annotate(pixels, bins, replace=False)\n # pre-filter cis-only pixels to speed up calculations\n pixels = pixels[ pixels[\"chrom1\"] == pixels[\"chrom2\"] ].copy()\n\n # annotate pixels with regions at once\n # book-ended regions still get reannotated\n pixels[\"r1\"] = assign_supports(pixels, regions, suffix=\"1\")\n pixels[\"r2\"] = assign_supports(pixels, regions, suffix=\"2\")\n # select symmetric pixels and region annotations only\n pixels = pixels[ pixels[\"r1\"] == pixels[\"r2\"] ]\n\n # this could further expanded to allow for custom groupings:\n pixels[\"dist\"] = pixels[\"bin2_id\"] - pixels[\"bin1_id\"]\n for field, t in transforms.items():\n pixels[field] = t(pixels)\n\n symm_blocks = pixels.groupby(\"r1\")\n return {int(i): block.groupby(\"dist\")[fields].sum() for i, block in symm_blocks}\n\n\ndef diagsum_symm(\n clr,\n view_df,\n transforms={},\n weight_name=\"weight\",\n bad_bins=None,\n ignore_diags=2,\n chunksize=10000000,\n map=map,\n):\n \"\"\"\n\n Intra-chromosomal diagonal summary statistics.\n\n Parameters\n ----------\n clr : cooler.Cooler\n Cooler object\n view_df : viewframe (or depreated: sequence of genomic range tuples)\n Support view_df for intra-chromosomal diagonal summation\n transforms : dict of str -> callable, optional\n Transformations to apply to pixels. The result will be assigned to\n a temporary column with the name given by the key. Callables take\n one argument: the current chunk of the (annotated) pixel dataframe.\n weight_name : str\n name of the balancing weight vector used to count\n \"bad\"(masked) pixels per diagonal.\n Use `None` to avoid masking \"bad\" pixels.\n bad_bins : array-like\n a list of bins to ignore per support region.\n Combines with the list of bad bins from balacning\n weight.\n chunksize : int, optional\n Size of pixel table chunks to process\n ignore_diags : int, optional\n Number of intial diagonals to exclude from statistics\n map : callable, optional\n Map functor implementation.\n\n Returns\n -------\n Dataframe of diagonal statistics for all regions in the view\n\n \"\"\"\n spans = partition(0, len(clr.pixels()), chunksize)\n fields = [\"count\"] + list(transforms.keys())\n\n # appropriate viewframe checks\n try:\n if not bioframe.is_viewframe(view_df, raise_errors=True):\n raise ValueError(\"view_df is not a valid viewframe.\")\n if not bioframe.is_contained(view_df, bioframe.make_viewframe(clr.chromsizes)):\n raise ValueError(\n \"View table is out of the bounds of chromosomes in cooler.\"\n )\n except Exception as e: # AssertionError or ValueError, see https://github.com/gfudenberg/bioframe/blob/main/bioframe/core/checks.py#L177\n warnings.warn(\n \"view_df has to be a proper viewframe from next release\",\n DeprecationWarning,\n stacklevel=2,\n )\n view_df = bioframe.make_viewframe(view_df)\n\n dtables = make_diag_tables(clr, view_df, weight_name=weight_name, bad_bins=bad_bins)\n\n # combine masking with existing transforms and add a \"count\" transform:\n if bad_bins is not None:\n # turn bad_bins into a mask of size clr.bins:\n mask_size = len(clr.bins())\n bad_bins_mask = np.ones(mask_size, dtype=int)\n bad_bins_mask[bad_bins] = 0\n #\n masked_transforms = {}\n bin1 = \"bin1_id\"\n bin2 = \"bin2_id\"\n for field in fields:\n if field in transforms:\n # combine masking and transform, minding the scope:\n t = transforms[field]\n masked_transforms[field] = (\n lambda p, t=t, m=bad_bins_mask: t(p) * m[p[bin1]] * m[p[bin2]]\n )\n else:\n # presumably field == \"count\", mind the scope as well:\n masked_transforms[field] = (\n lambda p, f=field, m=bad_bins_mask: p[f] * m[p[bin1]] * m[p[bin2]]\n )\n # substitute transforms to the masked_transforms:\n transforms = masked_transforms\n\n for dt in dtables.values():\n for field in fields:\n agg_name = f\"{field}.sum\"\n dt[agg_name] = 0\n\n job = partial(_diagsum_symm, clr, fields, transforms, view_df.values)\n results = map(job, spans)\n for result in results:\n for i, agg in result.items():\n region = view_df.loc[i, \"name\"]\n for field in fields:\n agg_name = f\"{field}.sum\"\n dtables[region][agg_name] = dtables[region][agg_name].add(\n agg[field], fill_value=0\n )\n\n # returning dataframe for API consistency\n result = []\n for i, dtable in dtables.items():\n dtable = dtable.reset_index()\n # conform with the new expected format, treat regions as 2D\n dtable.insert(0, \"region1\", i)\n dtable.insert(1, \"region2\", i)\n if ignore_diags:\n # fill out summary fields of ignored diagonals with NaN:\n summary_fields = [f\"{field}.sum\" for field in fields]\n dtable.loc[dtable[\"diag\"] < ignore_diags, summary_fields] = np.nan\n result.append(dtable)\n\n return pd.concat(result).reset_index(drop=True)\n\n\ndef _diagsum_pairwise(clr, fields, transforms, regions, span):\n \"\"\"\n calculates diagonal/distance summary for a collection of\n rectangular blocks defined by all pairwise combinations\n of \"regions\" for intra-chromosomal interactions.\n\n Return:\n dictionary of DataFrames with diagonal/distance\n sums for the \"fields\", and (i,j)-like indexes of rectangular\n genomic regions as keys.\n \"\"\"\n lo, hi = span\n bins = clr.bins()[:]\n pixels = clr.pixels()[lo:hi]\n pixels = cooler.annotate(pixels, bins, replace=False)\n # pre-filter cis-only pixels to speed up calculations\n pixels = pixels[ pixels[\"chrom1\"] == pixels[\"chrom2\"] ].copy()\n\n # annotate pixels with regions at once\n # book-ended regions still get reannotated\n pixels[\"r1\"] = assign_supports(pixels, regions, suffix=\"1\")\n pixels[\"r2\"] = assign_supports(pixels, regions, suffix=\"2\")\n # select asymmetric pixels and region annotations only\n pixels = pixels.dropna(subset=[\"r1\",\"r2\"])\n pixels = pixels[ pixels[\"r1\"] != pixels[\"r2\"] ]\n\n # this could further expanded to allow for custom groupings:\n pixels[\"dist\"] = pixels[\"bin2_id\"] - pixels[\"bin1_id\"]\n for field, t in transforms.items():\n pixels[field] = t(pixels)\n\n asymm_blocks = pixels.groupby([\"r1\",\"r2\"])\n return {(int(i), int(j)): block.groupby(\"dist\")[fields].sum() for (i, j), block in asymm_blocks}\n\n\ndef diagsum_pairwise(\n clr,\n view_df,\n transforms={},\n weight_name=\"weight\",\n bad_bins=None,\n ignore_diags=2,\n chunksize=10_000_000,\n map=map,\n):\n \"\"\"\n\n Intra-chromosomal diagonal summary statistics for asymmetric blocks of\n contact matrix defined as pairwise combinations of regions in \"view_df.\n\n Note\n ----\n This is a special case of asymmetric diagonal summary statistic that is\n efficient and covers the most important practical case of inter-chromosomal\n arms \"expected\" calculation.\n\n Parameters\n ----------\n clr : cooler.Cooler\n Cooler object\n view_df : viewframe (or depreated: sequence of genomic range tuples)\n Support view_df for intra-chromosomal diagonal summation, has to\n be sorted according to the order of chromosomes in cooler.\n transforms : dict of str -> callable, optional\n Transformations to apply to pixels. The result will be assigned to\n a temporary column with the name given by the key. Callables take\n one argument: the current chunk of the (annotated) pixel dataframe.\n weight_name : str\n name of the balancing weight vector used to count\n \"bad\"(masked) pixels per diagonal.\n Use `None` to avoid masking \"bad\" pixels.\n bad_bins : array-like\n a list of bins to ignore per support region.\n Combines with the list of bad bins from balacning\n weight.\n chunksize : int, optional\n Size of pixel table chunks to process\n map : callable, optional\n Map functor implementation.\n\n Returns\n -------\n Dataframe of diagonal statistics for all intra-chromosomal blocks defined as\n pairwise combinations of regions in the view\n\n \"\"\"\n spans = partition(0, len(clr.pixels()), chunksize)\n fields = [\"count\"] + list(transforms.keys())\n\n # appropriate viewframe checks\n try:\n if not bioframe.is_viewframe(view_df, raise_errors=True):\n raise ValueError(\"view_df is not a valid viewframe.\")\n if not bioframe.is_contained(view_df, bioframe.make_viewframe(clr.chromsizes)):\n raise ValueError(\n \"View table is out of the bounds of chromosomes in cooler.\"\n )\n except Exception as e: # AssertionError or ValueError, see https://github.com/gfudenberg/bioframe/blob/main/bioframe/core/checks.py#L177\n warnings.warn(\n \"view_df has to be a proper viewframe from next release\",\n DeprecationWarning,\n stacklevel=2,\n )\n view_df = bioframe.make_viewframe(view_df)\n\n # view_df must be sorted, so that blocks resulting from pairwise combinations\n # are all in the upper part of the contact matrix, otherwise conflicts with pixels\n if not bioframe.is_sorted(view_df, clr.chromsizes, df_view_col = None):\n raise ValueError(\"\"\"regions in the view_df must be sorted by coordinate\n and chromosomes, order of chromosomes as in cooler\"\"\")\n\n # create pairwise combinations of regions from view_df\n all_combinations = combinations(view_df.itertuples(index=False),2)\n # keep only intra-chromosomal combinations\n cis_combinations = ((r1, r2) for r1, r2 in all_combinations if (r1[0] == r2[0]))\n # unzip regions1 regions2 defining the blocks for summary collection\n regions1, regions2 = zip(*cis_combinations)\n regions1 = pd.DataFrame( regions1 )\n regions2 = pd.DataFrame( regions2 )\n # create a table with the counts of valid pixels on each diagonal in each region:\n dtables = make_diag_tables(\n clr, regions1, regions2, weight_name=weight_name, bad_bins=bad_bins\n )\n\n # combine masking with existing transforms and add a \"count\" transform:\n if bad_bins is not None:\n # turn bad_bins into a mask of size clr.bins:\n mask_size = len(clr.bins())\n bad_bins_mask = np.ones(mask_size, dtype=int)\n bad_bins_mask[bad_bins] = 0\n #\n masked_transforms = {}\n bin1 = \"bin1_id\"\n bin2 = \"bin2_id\"\n for field in fields:\n if field in transforms:\n # combine masking and transform, minding the scope:\n t = transforms[field]\n masked_transforms[field] = (\n lambda p, t=t, m=bad_bins_mask: t(p) * m[p[bin1]] * m[p[bin2]]\n )\n else:\n # presumably field == \"count\", mind the scope as well:\n masked_transforms[field] = (\n lambda p, f=field, m=bad_bins_mask: p[f] * m[p[bin1]] * m[p[bin2]]\n )\n # substitute transforms to the masked_transforms:\n transforms = masked_transforms\n\n for dt in dtables.values():\n for field in fields:\n agg_name = f\"{field}.sum\"\n dt[agg_name] = 0\n\n job = partial(\n _diagsum_pairwise, clr, fields, transforms, view_df.values\n )\n results = map(job, spans)\n for result in results:\n for (i, j), agg in result.items():\n ni = view_df.loc[i, \"name\"]\n nj = view_df.loc[j, \"name\"]\n for field in fields:\n agg_name = f\"{field}.sum\"\n dtables[ni, nj][agg_name] = dtables[ni, nj][agg_name].add(\n agg[field], fill_value=0\n )\n\n # returning a dataframe for API consistency:\n result = []\n for (i, j), dtable in dtables.items():\n dtable = dtable.reset_index()\n dtable.insert(0, \"region1\", i)\n dtable.insert(1, \"region2\", j)\n if ignore_diags:\n # fill out summary fields of ignored diagonals with NaN:\n summary_fields = [f\"{field}.sum\" for field in fields]\n dtable.loc[dtable[\"diag\"] < ignore_diags, summary_fields] = np.nan\n result.append(dtable)\n return pd.concat(result).reset_index(drop=True)\n\n\ndef _diagsum_asymm(clr, fields, transforms, regions1, regions2, span):\n \"\"\"\n calculates diagonal summary for a collection of\n rectangular regions defined as combinations of\n regions1 and regions2.\n returns a dictionary of DataFrames with diagonal\n sums as values, and 0-based indexes of rectangular\n genomic regions as keys.\n \"\"\"\n lo, hi = span\n bins = clr.bins()[:]\n pixels = clr.pixels()[lo:hi]\n pixels = cooler.annotate(pixels, bins, replace=False)\n\n # this could further expanded to allow for custom groupings:\n pixels[\"dist\"] = pixels[\"bin2_id\"] - pixels[\"bin1_id\"]\n for field, t in transforms.items():\n pixels[field] = t(pixels)\n\n diag_sums = {}\n # r1 and r2 define rectangular block i:\n for i, (r1, r2) in enumerate(zip(regions1, regions2)):\n r1 = assign_supports(pixels, [r1], suffix=\"1\")\n r2 = assign_supports(pixels, [r2], suffix=\"2\")\n # calculate diag_sums on the spot to allow for overlapping blocks:\n diag_sums[i] = pixels[(r1 == r2)].groupby(\"dist\")[fields].sum()\n\n return diag_sums\n\n\ndef diagsum_asymm(\n clr,\n regions1,\n regions2,\n transforms={},\n weight_name=\"weight\",\n bad_bins=None,\n chunksize=10000000,\n map=map,\n):\n \"\"\"\n\n Diagonal summary statistics.\n\n Matchings elements of `regions1` and `regions2` define\n asymmetric rectangular blocks for calculating diagonal\n summary statistics.\n Only intra-chromosomal blocks that reside in the upper\n part of the contact matrix are supported.\n\n Note\n ----\n This functions is flexible with respect to regions, but\n is very inefficient, slow.\n\n Parameters\n ----------\n clr : cooler.Cooler\n Cooler object\n regions1 : sequence of genomic range tuples, with repeated entries or not\n \"left\"-side support regions for diagonal summation\n regions2 : sequence of genomic range tuples, with repeated entries or not\n \"right\"-side support regions for diagonal summation\n transforms : dict of str -> callable, optional\n Transformations to apply to pixels. The result will be assigned to\n a temporary column with the name given by the key. Callables take\n one argument: the current chunk of the (annotated) pixel dataframe.\n weight_name : str\n name of the balancing weight vector used to count\n \"bad\"(masked) pixels per diagonal.\n Use `None` to avoid masking \"bad\" pixels.\n bad_bins : array-like\n a list of bins to ignore per support region.\n Combines with the list of bad bins from balacning\n weight.\n chunksize : int, optional\n Size of pixel table chunks to process\n map : callable, optional\n Map functor implementation.\n\n Returns\n -------\n DataFrame with summary statistic of every diagonal of every block:\n region1, region2, diag, n_valid, count.sum\n\n \"\"\"\n spans = partition(0, len(clr.pixels()), chunksize)\n fields = [\"count\"] + list(transforms.keys())\n\n # Because regions1/2 may contain repeated entries, convert them to viewframes line-by-line:\n regions1 = pd.concat(\n [\n bioframe.make_viewframe([region], check_bounds=clr.chromsizes)\n for region in regions1\n ]\n ).reset_index(drop=True)\n regions2 = pd.concat(\n [\n bioframe.make_viewframe([region], check_bounds=clr.chromsizes)\n for region in regions2\n ]\n ).reset_index(drop=True)\n # Now regions1/2 contain viewframe-like dataframes that might contain repeated entries.\n\n # blocks defined by regions1/2 are not very restrictive, but they have to be\n # in the upper triangle of the contact matrix, i.e. do not cross diagonal and\n # be \"sorted\" regions1[i] < regions2[i] (according to the cooler's order):\n for region1, region2 in zip(regions1.itertuples(), regions2.itertuples()):\n block12 = pd.DataFrame([region1, region2])\n _block_cross_diagonal = bioframe.is_overlapping(block12)\n _block_in_upper = bioframe.is_sorted(block12, clr.chromsizes, df_view_col=None)\n if _block_cross_diagonal or not _block_in_upper:\n raise ValueError(\"assymetric blocks should reside in the upper triangle of the contact matrix\")\n\n\n dtables = make_diag_tables(\n clr, regions1, regions2, weight_name=weight_name, bad_bins=bad_bins\n )\n\n # combine masking with existing transforms and add a \"count\" transform:\n if bad_bins is not None:\n # turn bad_bins into a mask of size clr.bins:\n mask_size = len(clr.bins())\n bad_bins_mask = np.ones(mask_size, dtype=int)\n bad_bins_mask[bad_bins] = 0\n #\n masked_transforms = {}\n bin1 = \"bin1_id\"\n bin2 = \"bin2_id\"\n for field in fields:\n if field in transforms:\n # combine masking and transform, minding the scope:\n t = transforms[field]\n masked_transforms[field] = (\n lambda p, t=t, m=bad_bins_mask: t(p) * m[p[bin1]] * m[p[bin2]]\n )\n else:\n # presumably field == \"count\", mind the scope as well:\n masked_transforms[field] = (\n lambda p, f=field, m=bad_bins_mask: p[f] * m[p[bin1]] * m[p[bin2]]\n )\n # substitute transforms to the masked_transforms:\n transforms = masked_transforms\n\n for dt in dtables.values():\n for field in fields:\n agg_name = \"{}.sum\".format(field)\n dt[agg_name] = 0\n\n job = partial(\n _diagsum_asymm, clr, fields, transforms, regions1.values, regions2.values\n )\n results = map(job, spans)\n for result in results:\n for i, agg in result.items():\n region1 = regions1.loc[i, \"name\"]\n region2 = regions2.loc[i, \"name\"]\n for field in fields:\n agg_name = \"{}.sum\".format(field)\n dtables[region1, region2][agg_name] = dtables[region1, region2][\n agg_name\n ].add(agg[field], fill_value=0)\n\n # returning a dataframe for API consistency:\n result = []\n for (i, j), dtable in dtables.items():\n dtable = dtable.reset_index()\n dtable.insert(0, \"region1\", i)\n dtable.insert(1, \"region2\", j)\n result.append(dtable)\n result = pd.concat(result).reset_index(drop=True)\n return result\n\n\n\ndef _blocksum_pairwise(clr, fields, transforms, regions, span):\n \"\"\"\n calculates block summary for a collection of\n rectangular regions defined as pairwise combinations\n of all regions.\n\n Return:\n a dictionary of block-wide sums for all \"fields\":\n keys are (i,j)-like, where i and j are 0-based indexes of\n \"regions\", and a combination of (i,j) defines rectangular block.\n\n Note:\n Input pixels are assumed to be \"symmetric-upper\", and \"regions\"\n to be sorted according to the order of chromosomes in \"clr\", thus\n i < j.\n\n \"\"\"\n lo, hi = span\n bins = clr.bins()[:]\n pixels = clr.pixels()[lo:hi]\n pixels = cooler.annotate(pixels, bins, replace=False)\n\n pixels[\"r1\"] = assign_supports(pixels, regions, suffix=\"1\")\n pixels[\"r2\"] = assign_supports(pixels, regions, suffix=\"2\")\n # pre-filter asymetric pixels only\n pixels = pixels.dropna(subset=[\"r1\",\"r2\"])\n pixels = pixels[ pixels[\"r1\"] != pixels[\"r2\"] ]\n\n # apply transforms, e.g. balancing etc\n for field, t in transforms.items():\n pixels[field] = t(pixels)\n\n # pairwise-combinations of regions define asymetric pixels-blocks\n pixel_groups = pixels.groupby([\"r1\",\"r2\"])\n return {(int(i), int(j)): group[fields].sum() for (i,j), group in pixel_groups}\n\n\ndef blocksum_pairwise(\n clr,\n view_df,\n transforms={},\n weight_name=\"weight\",\n bad_bins=None,\n chunksize=1000000,\n map=map,\n):\n \"\"\"\n Summary statistics on rectangular blocks of all (trans-)pairwise combinations\n of genomic regions in the view_df (aka trans-expected).\n\n Note\n ----\n This is a special case of asymmetric block-level summary stats, that can be\n calculated very efficiently. Regions in view_df are assigned to pixels only\n once and pixels falling into a given asymmetric block i != j are summed up.\n\n Parameters\n ----------\n clr : cooler.Cooler\n Cooler object\n view_df : viewframe (or depreated: sequence of genomic range tuples)\n Support view_df defining blocks for summary calculations,\n has to be sorted according to the order of chromosomes in clr.\n transforms : dict of str -> callable, optional\n Transformations to apply to pixels. The result will be assigned to\n a temporary column with the name given by the key. Callables take\n one argument: the current chunk of the (annotated) pixel dataframe.\n weight_name : str\n name of the balancing weight vector used to count\n \"bad\"(masked) pixels per block.\n Use `None` to avoid masking \"bad\" pixels.\n bad_bins : array-like\n a list of bins to ignore per support region.\n Combines with the list of bad bins from balacning\n weight.\n chunksize : int, optional\n Size of pixel table chunks to process\n map : callable, optional\n Map functor implementation.\n\n Returns\n -------\n DataFrame with entries for each blocks: region1, region2, n_valid, count.sum\n\n \"\"\"\n\n # appropriate viewframe checks\n try:\n if not bioframe.is_viewframe(view_df, raise_errors=True):\n raise ValueError(\"view_df is not a valid viewframe.\")\n if not bioframe.is_contained(view_df, bioframe.make_viewframe(clr.chromsizes)):\n raise ValueError(\n \"View table is out of the bounds of chromosomes in cooler.\"\n )\n except Exception as e: # AssertionError or ValueError, see https://github.com/gfudenberg/bioframe/blob/main/bioframe/core/checks.py#L177\n warnings.warn(\n \"view_df has to be a proper viewframe from next release\",\n DeprecationWarning,\n stacklevel=2,\n )\n view_df = bioframe.make_viewframe(view_df)\n\n spans = partition(0, len(clr.pixels()), chunksize)\n fields = [\"count\"] + list(transforms.keys())\n\n # view_df must be sorted, so that blocks resulting from pairwise combinations\n # are all in the upper part of the contact matrix, otherwise conflicts with pixels\n if not bioframe.is_sorted(view_df, clr.chromsizes, df_view_col = None):\n raise ValueError(\"\"\"regions in the view_df must be sorted by coordinate\n and chromosomes, order of chromosomes as in cooler\"\"\")\n\n # create pairwise combinations of regions from view_df using\n # the standard zip(*bunch_of_tuples) unzipping procedure:\n regions1, regions2 = zip(*combinations(view_df.itertuples(index=False),2))\n regions1 = pd.DataFrame( regions1 )\n regions2 = pd.DataFrame( regions2 )\n # similar with diagonal summations, pre-generate a block_table listing\n # all of the rectangular blocks and \"n_valid\" number of pixels per each block:\n records = make_block_table(\n clr, regions1, regions2, weight_name=weight_name, bad_bins=bad_bins\n )\n\n # combine masking with existing transforms and add a \"count\" transform:\n if bad_bins is not None:\n # turn bad_bins into a mask of size clr.bins:\n mask_size = len(clr.bins())\n bad_bins_mask = np.ones(mask_size, dtype=int)\n bad_bins_mask[bad_bins] = 0\n #\n masked_transforms = {}\n bin1 = \"bin1_id\"\n bin2 = \"bin2_id\"\n for field in fields:\n if field in transforms:\n # combine masking and transform, minding the scope:\n t = transforms[field]\n masked_transforms[field] = (\n lambda p, t=t, m=bad_bins_mask: t(p) * m[p[bin1]] * m[p[bin2]]\n )\n else:\n # presumably field == \"count\", mind the scope as well:\n masked_transforms[field] = (\n lambda p, f=field, m=bad_bins_mask: p[f] * m[p[bin1]] * m[p[bin2]]\n )\n # substitute transforms to the masked_transforms:\n transforms = masked_transforms\n\n job = partial(\n _blocksum_pairwise, clr, fields, transforms, view_df.values\n )\n results = map(job, spans)\n for result in results:\n for (i,j), agg in result.items():\n for field in fields:\n agg_name = \"{}.sum\".format(field)\n s = agg[field].item()\n if not np.isnan(s):\n ni = view_df.loc[i, \"name\"]\n nj = view_df.loc[j, \"name\"]\n records[ni, nj][agg_name] += s\n\n # returning a dataframe for API consistency:\n return pd.DataFrame(\n [{\"region1\": n1, \"region2\": n2, **rec} for (n1, n2), rec in records.items()],\n columns=[\"region1\", \"region2\", \"n_valid\", \"count.sum\"]\n + [k + \".sum\" for k in transforms.keys()],\n )\n\n\ndef _blocksum_asymm(clr, fields, transforms, regions1, regions2, span):\n \"\"\"\n calculates block summary for a collection of\n rectangular regions defined as combinations of\n regions1 and regions2.\n returns a dictionary of with block sums as values,\n and 0-based indexes of rectangular genomic regions\n as keys.\n \"\"\"\n lo, hi = span\n bins = clr.bins()[:]\n pixels = clr.pixels()[lo:hi]\n pixels = cooler.annotate(pixels, bins, replace=False)\n\n for field, t in transforms.items():\n pixels[field] = t(pixels)\n\n block_sums = {}\n # r1 and r2 define rectangular block i:\n for i, (r1, r2) in enumerate(zip(regions1, regions2)):\n r1 = assign_supports(pixels, [r1], suffix=\"1\")\n r2 = assign_supports(pixels, [r2], suffix=\"2\")\n # calculate sum on the spot to allow for overlapping blocks:\n block_sums[i] = pixels[(r1 == r2)][fields].sum()\n\n return block_sums\n\n\ndef blocksum_asymm(\n clr,\n regions1,\n regions2,\n transforms={},\n weight_name=\"weight\",\n bad_bins=None,\n chunksize=1000000,\n map=map,\n):\n \"\"\"\n Summary statistics on rectangular blocks of genomic regions.\n Blocks defined by regions1/regions2 must reside in the upper\n part of the contact matrix.\n\n Note\n ----\n This functions is flexible with respect to regions, but\n is very inefficient, slow.\n\n Parameters\n ----------\n clr : cooler.Cooler\n Cooler object\n regions1 : sequence of genomic range tuples\n \"left\"-side support regions for diagonal summation\n regions2 : sequence of genomic range tuples\n \"right\"-side support regions for diagonal summation\n transforms : dict of str -> callable, optional\n Transformations to apply to pixels. The result will be assigned to\n a temporary column with the name given by the key. Callables take\n one argument: the current chunk of the (annotated) pixel dataframe.\n weight_name : str\n name of the balancing weight vector used to count\n \"bad\"(masked) pixels per block.\n Use `None` to avoid masking \"bad\" pixels.\n bad_bins : array-like\n a list of bins to ignore per support region.\n Combines with the list of bad bins from balacning\n weight.\n chunksize : int, optional\n Size of pixel table chunks to process\n map : callable, optional\n Map functor implementation.\n\n Returns\n -------\n DataFrame with entries for each blocks: region1, region2, n_valid, count.sum\n\n \"\"\"\n\n regions1 = pd.concat(\n [\n bioframe.make_viewframe([region], check_bounds=clr.chromsizes)\n for region in regions1\n ]\n ).reset_index(drop=True)\n regions2 = pd.concat(\n [\n bioframe.make_viewframe([region], check_bounds=clr.chromsizes)\n for region in regions2\n ]\n ).reset_index(drop=True)\n\n # blocks defined by regions1/2 are not very restrictive, but they have to be\n # in the upper triangle of the contact matrix, i.e. do not cross diagonal and\n # be \"sorted\" regions1[i] < regions2[i] (according to the cooler's order):\n for region1, region2 in zip(regions1.itertuples(), regions2.itertuples()):\n block12 = pd.DataFrame([region1, region2])\n _block_cross_diagonal = bioframe.is_overlapping(block12)\n _block_in_upper = bioframe.is_sorted(block12, clr.chromsizes, df_view_col=None)\n if _block_cross_diagonal or not _block_in_upper:\n raise ValueError(\"assymetric blocks should reside in the upper triangle of the contact matrix\")\n\n spans = partition(0, len(clr.pixels()), chunksize)\n fields = [\"count\"] + list(transforms.keys())\n\n # similar with diagonal summations, pre-generate a block_table listing\n # all of the rectangular blocks and \"n_valid\" number of pixels per each block:\n records = make_block_table(\n clr, regions1, regions2, weight_name=weight_name, bad_bins=bad_bins\n )\n\n # combine masking with existing transforms and add a \"count\" transform:\n if bad_bins is not None:\n # turn bad_bins into a mask of size clr.bins:\n mask_size = len(clr.bins())\n bad_bins_mask = np.ones(mask_size, dtype=int)\n bad_bins_mask[bad_bins] = 0\n #\n masked_transforms = {}\n bin1 = \"bin1_id\"\n bin2 = \"bin2_id\"\n for field in fields:\n if field in transforms:\n # combine masking and transform, minding the scope:\n t = transforms[field]\n masked_transforms[field] = (\n lambda p, t=t, m=bad_bins_mask: t(p) * m[p[bin1]] * m[p[bin2]]\n )\n else:\n # presumably field == \"count\", mind the scope as well:\n masked_transforms[field] = (\n lambda p, f=field, m=bad_bins_mask: p[f] * m[p[bin1]] * m[p[bin2]]\n )\n # substitute transforms to the masked_transforms:\n transforms = masked_transforms\n\n job = partial(\n _blocksum_asymm, clr, fields, transforms, regions1.values, regions2.values\n )\n results = map(job, spans)\n for result in results:\n for i, agg in result.items():\n for field in fields:\n agg_name = \"{}.sum\".format(field)\n s = agg[field].item()\n if not np.isnan(s):\n n1 = regions1.loc[i, \"name\"]\n n2 = regions2.loc[i, \"name\"]\n records[n1, n2][agg_name] += s\n\n # returning a dataframe for API consistency:\n return pd.DataFrame(\n [{\"region1\": n1, \"region2\": n2, **rec} for (n1, n2), rec in records.items()],\n columns=[\"region1\", \"region2\", \"n_valid\", \"count.sum\"]\n + [k + \".sum\" for k in transforms.keys()],\n )\n\n\n# user-friendly wrapper for diagsum_symm and diagsum_pairwise - part of new \"public\" API\ndef get_cis_expected(\n clr,\n view_df=None,\n intra_only=True,\n clr_weight_name=\"weight\",\n ignore_diags=2, # should default to cooler info\n chunksize=10_000_000,\n nproc=1,\n):\n \"\"\"\n Calculate average interaction frequencies as a function of genomic\n separation between pixels i.e. interaction decay with distance.\n Genomic separation aka \"dist\" is measured in the number of bins,\n and defined as an index of a diagonal on which pixels reside (bin1_id - bin2_id).\n\n Average values are reported in the columns with names {}.avg, and they\n are calculated as a ratio between a corresponding sum {}.sum and the\n total number of \"valid\" pixels on the diagonal \"n_valid\".\n\n\n Parameters\n ----------\n clr : cooler.Cooler\n Cooler object\n view_df : viewframe\n a collection of genomic intervals where expected is calculated\n otherwise expected is calculated for full chromosomes.\n intra_only: bool\n Return expected only for symmetric intra-regions defined by view_df,\n i.e. chromosomes, chromosomal-arms, intra-domains, etc.\n When False returns expected both for symmetric intra-regions and\n assymetric inter-regions.\n clr_weight_name : str or None\n Name of balancing weight column from the cooler to use.\n Use raw unbalanced data, when None.\n ignore_diags : int, optional\n Number of intial diagonals to exclude results\n chunksize : int, optional\n Size of pixel table chunks to process\n nproc : int, optional\n How many processes to use for calculation\n\n Returns\n -------\n DataFrame with summary statistic of every diagonal of every symmetric\n or asymmetric block:\n region1, region2, diag, n_valid, count.sum count.avg, etc\n\n \"\"\"\n\n if view_df is None:\n if not intra_only:\n raise ValueError(\"asymmetric regions has to be smaller then full chromosomes, use view_df\")\n # Generate viewframe from clr.chromsizes:\n view_df = bioframe.make_viewframe(\n [(chrom, 0, clr.chromsizes[chrom]) for chrom in clr.chromnames]\n )\n else:\n # Make sure view_df is a proper viewframe\n try:\n if not bioframe.is_viewframe(view_df, raise_errors=True):\n raise ValueError(\"view_df is not a valid viewframe.\")\n if not bioframe.is_contained(view_df, bioframe.make_viewframe(clr.chromsizes)):\n raise ValueError(\n \"View table is out of the bounds of chromosomes in cooler.\"\n )\n # add is_sorted check in the asymmetric case ...\n except Exception as e: # AssertionError or ValueError, see https://github.com/gfudenberg/bioframe/blob/main/bioframe/core/checks.py#L177\n warnings.warn(\n \"view_df has to be a proper viewframe from next release\",\n DeprecationWarning,\n stacklevel=2,\n )\n view_df = bioframe.make_viewframe(view_df)\n\n # define transforms - balanced and raw ('count') for now\n if clr_weight_name is None:\n # no transforms\n transforms = {}\n else:\n if not isinstance(clr_weight_name, str):\n raise TypeError(\"clr_weight_name has to be str that specifies name of balancing weight in clr\")\n # define balanced data transform:\n weight1 = clr_weight_name + \"1\"\n weight2 = clr_weight_name + \"2\"\n transforms = {\"balanced\": lambda p: p[\"count\"] * p[weight1] * p[weight2]}\n\n # check if clr_weight_name is in cooler\n if clr_weight_name not in clr.bins().columns:\n raise ValueError(f\"specified balancing weight {clr_weight_name} is not available in cooler\")\n\n # execution details\n if nproc > 1:\n pool = mp.Pool(nproc)\n map_ = pool.map\n else:\n map_ = map\n\n # using try-clause to close mp.Pool properly\n try:\n if intra_only:\n result = diagsum_symm(\n clr,\n view_df,\n transforms=transforms,\n weight_name=clr_weight_name,\n bad_bins=None,\n ignore_diags=ignore_diags,\n chunksize=chunksize,\n map=map_,\n )\n else:\n result = diagsum_pairwise(\n clr,\n view_df,\n transforms=transforms,\n weight_name=clr_weight_name,\n bad_bins=None,\n ignore_diags=ignore_diags,\n chunksize=chunksize,\n map=map_,\n )\n finally:\n if nproc > 1:\n pool.close()\n\n # calculate actual averages by dividing sum by n_valid:\n result[\"count.avg\"] = result[\"count.sum\"] / result[\"n_valid\"]\n for key in transforms.keys():\n result[key + \".avg\"] = result[key + \".sum\"] / result[\"n_valid\"]\n\n return result\n\n\n# user-friendly wrapper for diagsum_symm and diagsum_pairwise - part of new \"public\" API\ndef get_trans_expected(\n clr,\n view_df=None,\n clr_weight_name=\"weight\",\n chunksize=10_000_000,\n nproc=1,\n):\n \"\"\"\n Calculate average interaction frequencies for inter-chromosomal\n blocks defined as pairwise combinations of regions in view_df.\n\n An expected level of interactions between disjoint chromosomes\n is calculated as a simple average, as there is no notion of genomic\n separation for a pair of chromosomes and contact matrix for these\n regions looks \"flat\".\n\n Average values are reported in the columns with names {}.avg, and they\n are calculated as a ratio between a corresponding sum {}.sum and the\n total number of \"valid\" pixels on the diagonal \"n_valid\".\n\n\n Parameters\n ----------\n clr : cooler.Cooler\n Cooler object\n view_df : viewframe\n a collection of genomic intervals where expected is calculated\n otherwise expected is calculated for full chromosomes.\n clr_weight_name : str or None\n Name of balancing weight column from the cooler to use.\n Use raw unbalanced data, when None.\n chunksize : int, optional\n Size of pixel table chunks to process\n nproc : int, optional\n How many processes to use for calculation\n\n Returns\n -------\n DataFrame with summary statistic for every trans-blocks:\n region1, region2, n_valid, count.sum count.avg, etc\n\n \"\"\"\n\n if view_df is None:\n if not symmetric:\n raise ValueError(\"asymmetric regions has to be smaller then full chromosomes, use view_df\")\n # Generate viewframe from clr.chromsizes:\n view_df = bioframe.make_viewframe(\n [(chrom, 0, clr.chromsizes[chrom]) for chrom in clr.chromnames]\n )\n else:\n # Make sure view_df is a proper viewframe\n try:\n if not bioframe.is_viewframe(view_df, raise_errors=True):\n raise ValueError(\"view_df is not a valid viewframe.\")\n if not bioframe.is_contained(view_df, bioframe.make_viewframe(clr.chromsizes)):\n raise ValueError(\n \"View table is out of the bounds of chromosomes in cooler.\"\n )\n # add is_sorted check in the asymmetric case ...\n except Exception as e: # AssertionError or ValueError, see https://github.com/gfudenberg/bioframe/blob/main/bioframe/core/checks.py#L177\n warnings.warn(\n \"view_df has to be a proper viewframe from next release\",\n DeprecationWarning,\n stacklevel=2,\n )\n view_df = bioframe.make_viewframe(view_df)\n\n # view_df must be sorted, so that blocks resulting from pairwise combinations\n # are all in the upper part of the contact matrix, otherwise conflicts with pixels\n if not bioframe.is_sorted(view_df, clr.chromsizes, df_view_col = None):\n raise ValueError(\"\"\"regions in the view_df must be sorted by coordinate\n and chromosomes, order of chromosomes as in cooler\"\"\")\n\n # define transforms - balanced and raw ('count') for now\n if clr_weight_name is None:\n # no transforms\n transforms = {}\n else:\n if not isinstance(clr_weight_name, str):\n raise TypeError(\"clr_weight_name has to be str that specifies name of balancing weight in clr\")\n # define balanced data transform:\n weight1 = clr_weight_name + \"1\"\n weight2 = clr_weight_name + \"2\"\n transforms = {\"balanced\": lambda p: p[\"count\"] * p[weight1] * p[weight2]}\n\n # check if clr_weight_name is in cooler\n if clr_weight_name not in clr.bins().columns:\n raise ValueError(f\"specified balancing weight {clr_weight_name} is not available in cooler\")\n\n # execution details\n if nproc > 1:\n pool = mp.Pool(nproc)\n map_ = pool.map\n else:\n map_ = map\n\n # using try-clause to close mp.Pool properly\n try:\n result = blocksum_pairwise(\n clr,\n view_df,\n transforms=transforms,\n weight_name=clr_weight_name,\n bad_bins=None,\n chunksize=chunksize,\n map=map_,\n )\n finally:\n if nproc > 1:\n pool.close()\n\n # keep only trans interactions for the user-friendly function:\n _name_to_region = view_df.set_index(\"name\")\n _r1_chroms = _name_to_region.loc[result[\"region1\"]][\"chrom\"].values\n _r2_chroms = _name_to_region.loc[result[\"region2\"]][\"chrom\"].values\n # trans-data only:\n result = result.loc[_r1_chroms != _r2_chroms].reset_index(drop=True)\n\n # calculate actual averages by dividing sum by n_valid:\n result[\"count.avg\"] = result[\"count.sum\"] / result[\"n_valid\"]\n for key in transforms.keys():\n result[key + \".avg\"] = result[key + \".sum\"] / result[\"n_valid\"]\n\n return result\n\n\ndef diagsum_from_array(\n A, counts=None, *, offset=0, ignore_diags=2, filter_counts=False, region_name=None\n):\n \"\"\"\n Calculates Open2C-formatted expected for a dense submatrix of a whole\n genome contact map.\n\n Parameters\n ----------\n A : 2D array\n Normalized submatrix to calculate expected (``balanced.sum``).\n counts : 2D array or None, optional\n Corresponding raw contacts to populate ``count.sum``.\n offset : int or (int, int)\n i- and j- bin offsets of A relative to the parent matrix. If a single\n offset is provided it is applied to both axes.\n ignore_diags : int, optional\n Number of initial diagonals to ignore.\n filter_counts : bool, optional\n Apply the validity mask from balanced matrix to the raw one. Ignored\n when counts is None.\n region_name : str or (str, str), optional\n A custom region name or pair of region names. If provided, region\n columns will be included in the output.\n\n Notes\n -----\n For regions that cross the main diagonal of the whole-genome contact map,\n the lower triangle \"overhang\" is ignored.\n\n Examples\n --------\n >>> A = clr.matrix()[:, :] # whole genome balanced\n >>> C = clr.matrix(balance=False)[:, :] # whole genome raw\n\n Using only balanced data:\n >>> exp = diagsum_from_array(A)\n\n Using balanced and raw counts:\n >>> exp1 = diagsum_from_array(A, C)\n\n Using an off-diagonal submatrix\n >>> exp2 = diagsum_from_array(A[:50, 50:], offset=(0, 50))\n\n \"\"\"\n if isinstance(offset, (list, tuple)):\n offset1, offset2 = offset\n else:\n offset1, offset2 = offset, offset\n if isinstance(region_name, (list, tuple)):\n region1, region2 = region_name\n elif region_name is not None:\n region1, region2 = region_name, region_name\n A = np.asarray(A, dtype=float)\n if counts is not None:\n counts = np.asarray(counts)\n if counts.shape != A.shape:\n raise ValueError(\"`counts` must have the same shape as `A`.\")\n\n # Compute validity mask for bins on each axis\n invalid_mask1 = np.sum(np.isnan(A), axis=1) == A.shape[0]\n invalid_mask2 = np.sum(np.isnan(A), axis=0) == A.shape[1]\n\n A[~np.isfinite(A)] = 0\n\n # Prepare an indicator matrix of \"diagonals\" (toeplitz) where the lower\n # triangle diagonals wrt the parent matrix are negative.\n # The \"outer difference\" operation below produces a toeplitz matrix.\n lo1, hi1 = offset1, offset1 + A.shape[0]\n lo2, hi2 = offset2, offset2 + A.shape[1]\n ar1 = np.arange(lo1, hi1, dtype=np.int32)\n ar2 = np.arange(lo2, hi2, dtype=np.int32)\n diag_indicator = ar2[np.newaxis, :] - ar1[:, np.newaxis]\n diag_lo = max(lo2 - hi1 + 1, 0)\n diag_hi = hi2 - lo1\n\n # Apply the validity mask to the indicator matrix.\n # Both invalid and lower triangle pixels will now have negative indicator values.\n D = diag_indicator.copy()\n D[invalid_mask1, :] = -1\n D[:, invalid_mask2] = -1\n # Drop invalid and lower triangle pixels and flatten.\n mask_per_pixel = D >= 0\n A_flat = A[mask_per_pixel]\n D_flat = D[mask_per_pixel]\n\n # Group by diagonal and aggregate the number of valid pixels and pixel values.\n diagonals = np.arange(diag_lo, diag_hi, dtype=int)\n n_valid = np.bincount(D_flat, minlength=diag_hi - diag_lo)[diag_lo:]\n balanced_sum = np.bincount(D_flat, weights=A_flat, minlength=diag_hi - diag_lo)[\n diag_lo:\n ]\n # Mask to ignore initial diagonals.\n mask_per_diag = diagonals >= ignore_diags\n\n # Populate the output dataframe.\n # Include region columns if region names are provided.\n # Include raw pixel counts for each diag if counts is provided.\n df = pd.DataFrame({\"diag\": diagonals, \"n_valid\": n_valid})\n\n if region_name is not None:\n df.insert(0, \"region1\", region1)\n df.insert(1, \"region2\", region2)\n\n if counts is not None:\n # Either count everything or apply the same filtering as A.\n if filter_counts:\n C_flat = counts[mask_per_pixel]\n count_sum = np.bincount(\n D_flat, weights=C_flat, minlength=diag_hi - diag_lo\n )[diag_lo:]\n else:\n mask_per_pixel = diag_indicator >= 0\n D_flat = diag_indicator[mask_per_pixel]\n C_flat = counts[mask_per_pixel]\n count_sum = np.bincount(\n D_flat, weights=C_flat, minlength=diag_hi - diag_lo\n )[diag_lo:]\n count_sum[~mask_per_diag] = np.nan\n df[\"count.sum\"] = count_sum\n\n balanced_sum[~mask_per_diag] = np.nan\n df[\"balanced.sum\"] = balanced_sum\n\n return df\n\n\ndef logbin_expected(\n exp,\n summary_name=\"balanced.sum\",\n bins_per_order_magnitude=10,\n bin_layout=\"fixed\",\n smooth=lambda x: numutils.robust_gauss_filter(x, 2),\n min_nvalid=200,\n min_count=50,\n):\n \"\"\"\n Logarithmically bins expected as produced by diagsum_symm method.\n\n Parameters\n ----------\n exp : DataFrame\n DataFrame produced by diagsum_symm\n\n summary_name : str, optional\n Name of the column of exp-DataFrame to use as a diagonal summary.\n Default is \"balanced.sum\".\n\n bins_per_order_magnitude : int, optional\n How many bins per order of magnitude. Default of 10 has a ratio of\n neighboring bins of about 1.25\n\n bin_layout : \"fixed\", \"longest_region\", or array\n \"fixed\" means that bins are exactly the same for different datasets,\n and only depend on bins_per_order_magnitude\n\n \"longest_region\" means that the last bin will end at size of the\n longest region.\n GOOD: the last bin will have as much data as possible.\n BAD: bin edges will end up different for different datasets, you\n can't divide them by each other\n\n array: provide your own bin edges. Can be of any size, and end at any\n value. Bins exceeding the size of the largest region will be simply\n ignored.\n\n smooth : callable\n A smoothing function to be applied to log(P(s)) and log(x)\n before calculating P(s) slopes for by-region data\n\n min_nvalid : int\n For each region, throw out bins (log-spaced) that have less than\n min_nvalid valid pixels\n This will ensure that each entree in Pc_by_region has at least n_valid\n valid pixels\n Don't set it to zero, or it will introduce bugs. Setting it to 1 is OK,\n but not recommended.\n\n min_count : int\n If counts are found in the data, then for each region, throw out bins\n (log-spaced)\n that have more than min_counts of counts.sum (raw Hi-C counts).\n This will ensure that each entree in Pc_by_region has at least\n min_count raw Hi-C reads\n\n Returns\n -------\n Pc : DataFrame\n dataframe of contact probabilities and spread across regions\n slope : ndarray\n slope of Pc(s) on a log-log plot and spread across regions\n bins : ndarray\n an array of bin edges used for calculating P(s)\n\n Notes\n -----\n For main Pc and slope, the algorithm is the following\n\n 1. concatenate all the expected for all regions into a large dataframe.\n 2. create logarithmically-spaced bins of diagonals (or use provided)\n 3. pool together n_valid and balanced.sum for each region and for each bin\n 4. calculate the average diagonal for each bucket, weighted by n_valid\n 5. divide balanced.sum by n_valid after summing for each bucket (not before)\n 6. calculate the slope in log space (for each region)\n\n X values are not midpoints of bins\n ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n In step 4, we calculate the average diag index weighted by n_valid. This\n seems counter-intuitive, but it actually is justified.\n\n Let's take the worst case scenario. Let there be a bin from 40MB to 44MB.\n Let there be a region that is exactly 41 MB long. The midpoint of the bin\n is at 42MB. But the only part of this region belonging to this bin is\n actually between 40MB and 41MB. Moreover, the \"average\" read in this\n little triangle of the heatmap is actually not coming even from 40.5 MB\n because the triangle is getting narrower towards 41MB. The center of mass\n of a triangle is 1/3 of the way up, or 40.33 MB. So an average read for\n this region in this bin is coming from 40.33.\n\n Consider the previous bin, say, from 36MB to 40MB. The heatmap there is a\n trapezoid with a long side of 5MB, the short side of 1MB, and height of\n 4MB. The center of mass of this trapezoid is at 36 + 14/9 = 37.55MB,\n and not at 38MB. So the last bin center is definitely mis-assigned, and\n the second-to-last bin center is off by some 25%. This would lead to a 25%\n error of the P(s) slope estimated between the third-to-last and\n second-to-last bin.\n\n In presence of missing bins, this all becomes more complex, but this kind\n of averaging should take care of everything. It follows a general\n principle: when averaging the y values with some weights, one needs to\n average the x values with the same weights. The y values here are being\n added together, so per-diag means are effectively averaged with the weight\n of n_valid. Therefore, the x values (diag) should be averaged with the\n same weights.\n\n Other considerations\n ~~~~~~~~~~~~~~~~~~~~\n Steps #3 and #5 are important because the ratio of sums does not equal to\n the sum of ratios, and the former is more correct (the latter is more\n susceptible to noise). It is generally better to divide at the very end,\n rather than dividing things for each diagonal.\n\n Here we divide at the end twice: first we divide balanced.sum by n_valid\n for each region, then we effectively multiply it back up and divide it for\n each bin when combining different regions (see weighted average in the\n next function).\n\n Smoothing P(s) for the slope\n ~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n For calcuating the slope, we apply smoothing to the P(s) to ensure the\n slope is not too noisy. There are several caveats here: the P(s) has to\n be smoothed in logspace, and both P and s have to be smoothed. It is\n discussed in detail here\n\n https://gist.github.com/mimakaev/4becf1310ba6ee07f6b91e511c531e73\n\n Examples\n --------\n For example, see this gist: https://gist.github.com/mimakaev/e9117a7fcc318e7904702eba5b47d9e6\n\n \"\"\"\n from cooltools.lib.numutils import logbins\n\n raw_summary_name = \"count.sum\"\n exp_summary_base, *_ = summary_name.split(\".\")\n Pc_name = f\"{exp_summary_base}.avg\"\n diag_name = \"diag\"\n diag_avg_name = f\"{diag_name}.avg\"\n\n exp = exp[~pd.isna(exp[summary_name])].copy()\n exp[diag_avg_name] = exp.pop(diag_name) # \"average\" or weighted diagonals\n diagmax = exp[diag_avg_name].max()\n\n # create diag_bins based on chosen layout:\n if bin_layout == \"fixed\":\n diag_bins = numutils.persistent_log_bins(\n 10, bins_per_order_magnitude=bins_per_order_magnitude\n )\n elif bin_layout == \"longest_region\":\n diag_bins = logbins(1, diagmax + 1, ratio=10 ** (1 / bins_per_order_magnitude))\n else:\n diag_bins = bin_layout\n\n if diag_bins[-1] < diagmax:\n raise ValueError(\n \"Genomic separation bins end is less than the size of the largest region\"\n )\n\n # assign diagonals in exp DataFrame to diag_bins, i.e. give them ids:\n exp[\"diag_bin_id\"] = (\n np.searchsorted(diag_bins, exp[diag_avg_name], side=\"right\") - 1\n )\n exp = exp[exp[\"diag_bin_id\"] >= 0]\n\n # constructing expected grouped by region\n byReg = exp.copy()\n\n # this averages diag_avg with the weight equal to n_valid, and sums everything else\n byReg[diag_avg_name] *= byReg[\"n_valid\"]\n byRegExp = byReg.groupby([\"region1\", \"region2\", \"diag_bin_id\"]).sum()\n byRegExp[diag_avg_name] /= byRegExp[\"n_valid\"]\n\n byRegExp = byRegExp.reset_index()\n byRegExp = byRegExp[byRegExp[\"n_valid\"] > min_nvalid] # filtering by n_valid\n byRegExp[Pc_name] = byRegExp[summary_name] / byRegExp[\"n_valid\"]\n byRegExp = byRegExp[byRegExp[Pc_name] > 0] # drop diag_bins with 0 counts\n if min_count:\n if raw_summary_name in byRegExp:\n byRegExp = byRegExp[byRegExp[raw_summary_name] > min_count]\n else:\n warnings.warn(\n RuntimeWarning(f\"{raw_summary_name} not found in the input expected\")\n )\n\n byRegExp[\"diag_bin_start\"] = diag_bins[byRegExp[\"diag_bin_id\"].values]\n byRegExp[\"diag_bin_end\"] = diag_bins[byRegExp[\"diag_bin_id\"].values + 1] - 1\n\n # now calculate P(s) derivatives aka slopes per region\n byRegDer = []\n for (reg1, reg2), subdf in byRegExp.groupby([\"region1\", \"region2\"]):\n subdf = subdf.sort_values(\"diag_bin_id\")\n valid = np.minimum(subdf[\"n_valid\"].values[:-1], subdf[\"n_valid\"].values[1:])\n mids = np.sqrt(\n subdf[diag_avg_name].values[:-1] * subdf[diag_avg_name].values[1:]\n )\n slope = np.diff(smooth(np.log(subdf[Pc_name].values))) / np.diff(\n smooth(np.log(subdf[diag_avg_name].values))\n )\n newdf = pd.DataFrame(\n {\n diag_avg_name: mids,\n \"slope\": slope,\n \"n_valid\": valid,\n \"diag_bin_id\": subdf[\"diag_bin_id\"].values[:-1],\n }\n )\n newdf[\"region1\"] = reg1\n newdf[\"region2\"] = reg2\n byRegDer.append(newdf)\n byRegDer = pd.concat(byRegDer).reset_index(drop=True)\n return byRegExp, byRegDer, diag_bins[: byRegExp[\"diag_bin_id\"].max() + 2]\n\n\ndef combine_binned_expected(\n binned_exp,\n binned_exp_slope=None,\n Pc_name=\"balanced.avg\",\n der_smooth_function_combined=lambda x: numutils.robust_gauss_filter(x, 1.3),\n spread_funcs=\"logstd\",\n spread_funcs_slope=\"std\",\n minmax_drop_bins=2,\n concat_original=False,\n):\n \"\"\"\n Combines by-region log-binned expected and slopes into genome-wide averages,\n handling small chromosomes and \"corners\" in an optimal fashion, robust to\n outliers. Calculates spread of by-chromosome P(s) and slopes, also in an optimal fashion.\n\n Parameters\n ----------\n binned_exp: dataframe\n binned expected as outputed by logbin_expected\n\n binned_exp_slope : dataframe or None\n If provided, estimates spread of slopes.\n Is necessary if concat_original is True\n\n Pc_name : str\n Name of the column with the probability of contacts.\n Defaults to \"balanced.avg\".\n\n der_smooth_function_combined : callable\n A smoothing function for calculating slopes on combined data\n\n spread_funcs: \"minmax\", \"std\", \"logstd\" or a function (see below)\n A way to estimate the spread of the P(s) curves between regions.\n * \"minmax\" - use the minimum/maximum of by-region P(s)\n * \"std\" - use weighted standard deviation of P(s) curves (may produce negative results)\n * \"logstd\" (recommended) weighted standard deviation in logspace (as seen on the plot)\n\n spread_funcs_slope: \"minmax\", \"std\" or a funciton\n Similar to spread_func, but for slopes rather than P(s)\n\n concat_original: bool (default = False)\n Append original dataframe, and put combined under region \"combined\"\n\n Returns\n -------\n scal, slope_df\n\n Notes\n -----\n This function does not calculate errorbars. The spread is not the deviation of the mean,\n and rather is representative of variability between chromosomes.\n\n\n Calculating errorbars/spread\n\n 1. Take all by-region P(s)\n 2. For \"minmax\", remove the last var_drop_last_bins bins for each region\n (by default two. They are most noisy and would inflate the\n spread for the last points). Min/max are most susceptible to this.\n 3. Groupby P(s) by region\n 4. Apply spread_funcs to the pd.GroupBy object. Options are:\n * minimum and maximum (\"minmax\"),\n * weighted standard deviation (\"std\"),\n * weighted standard deviation in logspace (\"logstd\", default) or two custom functions\n We do not remove the last bins for \"std\" / \"logstd\" because we are\n doing weighted standard deviation. Therefore, noisy \"ends\" of regions\n would contribute very little to this.\n 5. Append them to the P(s) for the same bin.\n\n As a result, by for minmax, we do not estimate spread for the last\n two bins. This is because there are often very few chromosomal arms there,\n and different arm measurements are noisy. For other methods, we do\n estimate the spread there, and noisy last bins are taken care of by the\n weighted standard deviation. However, the spread in the last bins may be\n noisy, and may become a 0 if only one region is contributing to the last\n pixel.\n \"\"\"\n diag_avg_name = \"diag.avg\"\n # combine pre-logbinned expecteds\n scal = numutils.weighted_groupby_mean(\n binned_exp[\n [\n Pc_name,\n \"diag_bin_id\",\n \"n_valid\",\n diag_avg_name,\n \"diag_bin_start\",\n \"diag_bin_end\",\n ]\n ],\n group_by=\"diag_bin_id\",\n weigh_by=\"n_valid\",\n mode=\"mean\",\n )\n\n # for every diagonal calculate the spread of expected\n if spread_funcs == \"minmax\":\n byRegVar = binned_exp.copy()\n byRegVar = byRegVar.loc[\n byRegVar.index.difference(\n byRegVar.groupby([\"region1\", \"region2\"])[\"n_valid\"].tail(minmax_drop_bins).index\n )\n ]\n low_err = byRegVar.groupby(\"diag_bin_id\")[Pc_name].min()\n high_err = byRegVar.groupby(\"diag_bin_id\")[Pc_name].max()\n elif spread_funcs == \"std\":\n var = numutils.weighted_groupby_mean(\n binned_exp[[Pc_name, \"diag_bin_id\", \"n_valid\"]],\n group_by=\"diag_bin_id\",\n weigh_by=\"n_valid\",\n mode=\"std\",\n )[Pc_name]\n low_err = scal[Pc_name] - var\n high_err = scal[Pc_name] + var\n elif spread_funcs == \"logstd\":\n var = numutils.weighted_groupby_mean(\n binned_exp[[Pc_name, \"diag_bin_id\", \"n_valid\"]],\n group_by=\"diag_bin_id\",\n weigh_by=\"n_valid\",\n mode=\"logstd\",\n )[Pc_name]\n low_err = scal[Pc_name] / var\n high_err = scal[Pc_name] * var\n else:\n low_err, high_err = spread_funcs(binned_exp, scal)\n\n scal[\"low_err\"] = low_err\n scal[\"high_err\"] = high_err\n\n # re-calculate slope of the combined expected (log,smooth,diff)\n f = der_smooth_function_combined\n slope = np.diff(f(np.log(scal[Pc_name].values))) / np.diff(\n f(np.log(scal[diag_avg_name].values))\n )\n valid = np.minimum(scal[\"n_valid\"].values[:-1], scal[\"n_valid\"].values[1:])\n mids = np.sqrt(scal[diag_avg_name].values[:-1] * scal[diag_avg_name].values[1:])\n slope_df = pd.DataFrame(\n {\n diag_avg_name: mids,\n \"slope\": slope,\n \"n_valid\": valid,\n \"diag_bin_id\": scal.index.values[:-1],\n }\n )\n slope_df = slope_df.set_index(\"diag_bin_id\")\n\n # when pre-region slopes are provided, calculate spread of slopes\n if binned_exp_slope is not None:\n if spread_funcs_slope == \"minmax\":\n byRegDer = binned_exp_slope.copy()\n byRegDer = byRegDer.loc[\n byRegDer.index.difference(\n byRegDer.groupby([\"region1\", \"region2\"])[\"n_valid\"].tail(minmax_drop_bins).index\n )\n ]\n low_err = byRegDer.groupby(\"diag_bin_id\")[\"slope\"].min()\n high_err = byRegDer.groupby(\"diag_bin_id\")[\"slope\"].max()\n elif spread_funcs_slope == \"std\":\n var = numutils.weighted_groupby_mean(\n binned_exp_slope[[\"slope\", \"diag_bin_id\", \"n_valid\"]],\n group_by=\"diag_bin_id\",\n weigh_by=\"n_valid\",\n mode=\"std\",\n )[\"slope\"]\n low_err = slope_df[\"slope\"] - var\n high_err = slope_df[\"slope\"] + var\n\n else:\n low_err, high_err = spread_funcs_slope(binned_exp_slope, scal)\n slope_df[\"low_err\"] = low_err\n slope_df[\"high_err\"] = high_err\n\n slope_df = slope_df.reset_index()\n scal = scal.reset_index()\n\n # append \"combined\" expected/slopes to the input DataFrames (not in-place)\n if concat_original:\n scal[\"region\"] = \"combined\"\n slope_df[\"region\"] = \"combined\"\n scal = pd.concat([scal, binned_exp], sort=False).reset_index(drop=True)\n slope_df = pd.concat([slope_df, binned_exp_slope], sort=False).reset_index(\n drop=True\n )\n\n return scal, slope_df\n\n\ndef interpolate_expected(\n expected,\n binned_expected,\n columns=[\"balanced.avg\"],\n kind=\"quadratic\",\n by_region=True,\n extrapolate_small_s=False,\n):\n \"\"\"\n Interpolates expected to match binned_expected.\n Basically, this function smoothes the original expected according to the logbinned expected.\n It could either use by-region expected (each region will have different expected)\n or use combined binned_expected (all regions will have the same expected after that)\n\n Such a smoothed expected should be used to calculate observed/expected for downstream analysis.\n\n Parameters\n ----------\n expected: pd.DataFrame\n expected as returned by diagsum_symm\n binned_expected: pd.DataFrame\n binned expected (combined or not)\n columns: list[str] (optional)\n Columns to interpolate. Must be present in binned_expected,\n but not necessarily in expected.\n kind: str (optional)\n Interpolation type, according to scipy.interpolate.interp1d\n by_region: bool or str (optional)\n Whether to do interpolation by-region (default=True).\n False means use one expected for all regions (use entire table).\n If a region name is provided, expected for that region is used.\n\n \"\"\"\n\n exp_int = expected.copy()\n gr_exp = exp_int.groupby([\"region1\", \"region2\"]) # groupby original expected by region\n\n if by_region is not False and ((\"region1\" not in binned_expected) or (\"region2\" not in binned_expected)):\n warnings.warn(\"Region columns not found, assuming combined expected\")\n by_region = False\n\n if by_region is True:\n # groupby expected\n gr_binned = binned_expected.groupby([\"region1\", \"region2\"])\n elif by_region is not False:\n # extract a region that we want to use\n binned_expected = binned_expected[binned_expected[\"region1\"] == by_region]\n\n if by_region is not True:\n # check that we have no duplicates in expected\n assert len(binned_expected[\"diag_bin_id\"].drop_duplicates()) == len(\n binned_expected\n )\n\n interp_dfs = []\n\n for (reg1, reg2), df_orig in gr_exp:\n if by_region is True: # use binned expected for this region\n if (reg1, reg2) not in gr_binned.groups:\n continue\n subdf = gr_binned.get_group((reg1, reg2))\n else:\n subdf = binned_expected\n\n diag_orig = df_orig[\"diag\"].values\n diag_mid = (subdf[\"diag_bin_start\"] + subdf[\"diag_bin_end\"]) / 2\n interp_df = pd.DataFrame(\n index=df_orig.index\n ) # df to put interpolated values in\n with np.errstate(invalid=\"ignore\", divide=\"ignore\"):\n for colname in columns: # interpolate each column\n value_column = subdf[colname]\n interp = interp1d(\n np.log(diag_mid),\n np.log(value_column),\n kind=kind,\n fill_value=\"extrapolate\",\n )\n interp_df[colname] = np.exp(interp(np.log(diag_orig)))\n if not extrapolate_small_s:\n mask = diag_orig >= subdf[\"diag_bin_start\"].min()\n interp_df = interp_df.iloc[mask]\n interp_dfs.append(interp_df)\n interp_df = pd.concat(interp_dfs)\n for i in interp_df.columns:\n exp_int[i] = interp_df[i]\n return exp_int\n"
] | [
[
"numpy.minimum",
"numpy.sqrt",
"numpy.asarray",
"pandas.DataFrame",
"numpy.searchsorted",
"pandas.isna",
"numpy.histogram",
"numpy.unique",
"numpy.arange",
"numpy.count_nonzero",
"numpy.zeros",
"pandas.concat",
"numpy.log",
"scipy.signal.fftconvolve",
"numpy.isnan",
"numpy.errstate",
"numpy.maximum",
"numpy.isfinite",
"numpy.ones",
"numpy.bincount"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"0.24",
"1.0",
"0.25",
"1.3"
],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
vferat/mne-python | [
"54e07b3257ee44ae28c5253f47ef73909ef23bfd",
"54e07b3257ee44ae28c5253f47ef73909ef23bfd",
"54e07b3257ee44ae28c5253f47ef73909ef23bfd",
"54e07b3257ee44ae28c5253f47ef73909ef23bfd",
"54e07b3257ee44ae28c5253f47ef73909ef23bfd",
"54e07b3257ee44ae28c5253f47ef73909ef23bfd"
] | [
"mne/forward/forward.py",
"examples/datasets/plot_limo_data.py",
"mne/io/ctf/res4.py",
"mne/time_frequency/tests/test_stft.py",
"tutorials/preprocessing/plot_50_artifact_correction_ssp.py",
"examples/simulation/plot_simulated_raw_data_using_subject_anatomy.py"
] | [
"# Authors: Matti Hämäläinen <[email protected]>\n# Alexandre Gramfort <[email protected]>\n# Martin Luessi <[email protected]>\n#\n# License: BSD (3-clause)\n\n# The computations in this code were primarily derived from Matti Hämäläinen's\n# C code.\n\nfrom time import time\nfrom copy import deepcopy\nimport re\n\nimport numpy as np\nfrom scipy import linalg, sparse\n\nimport shutil\nimport os\nfrom os import path as op\nimport tempfile\n\nfrom ..io import RawArray, Info\nfrom ..io.constants import FIFF\nfrom ..io.open import fiff_open\nfrom ..io.tree import dir_tree_find\nfrom ..io.tag import find_tag, read_tag\nfrom ..io.matrix import (_read_named_matrix, _transpose_named_matrix,\n write_named_matrix)\nfrom ..io.meas_info import read_bad_channels, write_info\nfrom ..io.pick import (pick_channels_forward, pick_info, pick_channels,\n pick_types)\nfrom ..io.write import (write_int, start_block, end_block,\n write_coord_trans, write_ch_info, write_name_list,\n write_string, start_file, end_file, write_id)\nfrom ..io.base import BaseRaw\nfrom ..evoked import Evoked, EvokedArray\nfrom ..epochs import BaseEpochs\nfrom ..source_space import (_read_source_spaces_from_tree,\n find_source_space_hemi, _set_source_space_vertices,\n _write_source_spaces_to_fid)\nfrom ..source_estimate import _BaseSourceEstimate\nfrom ..transforms import (transform_surface_to, invert_transform,\n write_trans)\nfrom ..utils import (_check_fname, get_subjects_dir, has_mne_c, warn,\n run_subprocess, check_fname, logger, verbose, fill_doc,\n _validate_type, _check_compensation_grade, _check_option,\n _check_stc_units)\nfrom ..label import Label\nfrom ..fixes import einsum\n\n\nclass Forward(dict):\n \"\"\"Forward class to represent info from forward solution.\"\"\"\n\n def copy(self):\n \"\"\"Copy the Forward instance.\"\"\"\n return Forward(deepcopy(self))\n\n def __repr__(self):\n \"\"\"Summarize forward info instead of printing all.\"\"\"\n entr = '<Forward'\n\n nchan = len(pick_types(self['info'], meg=True, eeg=False, exclude=[]))\n entr += ' | ' + 'MEG channels: %d' % nchan\n nchan = len(pick_types(self['info'], meg=False, eeg=True, exclude=[]))\n entr += ' | ' + 'EEG channels: %d' % nchan\n\n src_types = np.array([src['type'] for src in self['src']])\n if (src_types == 'surf').all():\n entr += (' | Source space: Surface with %d vertices'\n % self['nsource'])\n elif (src_types == 'vol').all():\n entr += (' | Source space: Volume with %d grid points'\n % self['nsource'])\n elif (src_types == 'discrete').all():\n entr += (' | Source space: Discrete with %d dipoles'\n % self['nsource'])\n else:\n count_string = ''\n if (src_types == 'surf').any():\n count_string += '%d surface, ' % (src_types == 'surf').sum()\n if (src_types == 'vol').any():\n count_string += '%d volume, ' % (src_types == 'vol').sum()\n if (src_types == 'discrete').any():\n count_string += '%d discrete, ' \\\n % (src_types == 'discrete').sum()\n count_string = count_string.rstrip(', ')\n entr += (' | Source space: Mixed (%s) with %d vertices'\n % (count_string, self['nsource']))\n\n if self['source_ori'] == FIFF.FIFFV_MNE_UNKNOWN_ORI:\n entr += (' | Source orientation: Unknown')\n elif self['source_ori'] == FIFF.FIFFV_MNE_FIXED_ORI:\n entr += (' | Source orientation: Fixed')\n elif self['source_ori'] == FIFF.FIFFV_MNE_FREE_ORI:\n entr += (' | Source orientation: Free')\n\n entr += '>'\n\n return entr\n\n\ndef _block_diag(A, n):\n \"\"\"Construct a block diagonal from a packed structure.\n\n You have to try it on a matrix to see what it's doing.\n\n If A is not sparse, then returns a sparse block diagonal \"bd\",\n diagonalized from the\n elements in \"A\".\n \"A\" is ma x na, comprising bdn=(na/\"n\") blocks of submatrices.\n Each submatrix is ma x \"n\", and these submatrices are\n placed down the diagonal of the matrix.\n\n If A is already sparse, then the operation is reversed, yielding\n a block\n row matrix, where each set of n columns corresponds to a block element\n from the block diagonal.\n\n Parameters\n ----------\n A : array\n The matrix\n n : int\n The block size\n Returns\n -------\n bd : sparse matrix\n The block diagonal matrix\n \"\"\"\n if sparse.issparse(A): # then make block sparse\n raise NotImplementedError('sparse reversal not implemented yet')\n ma, na = A.shape\n bdn = na // int(n) # number of submatrices\n\n if na % n > 0:\n raise ValueError('Width of matrix must be a multiple of n')\n\n tmp = np.arange(ma * bdn, dtype=np.int).reshape(bdn, ma)\n tmp = np.tile(tmp, (1, n))\n ii = tmp.ravel()\n\n jj = np.arange(na, dtype=np.int)[None, :]\n jj = jj * np.ones(ma, dtype=np.int)[:, None]\n jj = jj.T.ravel() # column indices foreach sparse bd\n\n bd = sparse.coo_matrix((A.T.ravel(), np.c_[ii, jj].T)).tocsc()\n\n return bd\n\n\ndef _inv_block_diag(A, n):\n \"\"\"Construct an inverse block diagonal from a packed structure.\n\n You have to try it on a matrix to see what it's doing.\n\n \"A\" is ma x na, comprising bdn=(na/\"n\") blocks of submatrices.\n Each submatrix is ma x \"n\", and the inverses of these submatrices\n are placed down the diagonal of the matrix.\n\n Parameters\n ----------\n A : array\n The matrix.\n n : int\n The block size.\n\n Returns\n -------\n bd : sparse matrix\n The block diagonal matrix.\n \"\"\"\n ma, na = A.shape\n bdn = na // int(n) # number of submatrices\n\n if na % n > 0:\n raise ValueError('Width of matrix must be a multiple of n')\n\n # modify A in-place to invert each sub-block\n A = A.copy()\n for start in range(0, na, 3):\n # this is a view\n A[:, start:start + 3] = linalg.inv(A[:, start:start + 3])\n\n tmp = np.arange(ma * bdn, dtype=np.int).reshape(bdn, ma)\n tmp = np.tile(tmp, (1, n))\n ii = tmp.ravel()\n\n jj = np.arange(na, dtype=np.int)[None, :]\n jj = jj * np.ones(ma, dtype=np.int)[:, None]\n jj = jj.T.ravel() # column indices foreach sparse bd\n\n bd = sparse.coo_matrix((A.T.ravel(), np.c_[ii, jj].T)).tocsc()\n\n return bd\n\n\ndef _get_tag_int(fid, node, name, id_):\n \"\"\"Check we have an appropriate tag.\"\"\"\n tag = find_tag(fid, node, id_)\n if tag is None:\n fid.close()\n raise ValueError(name + ' tag not found')\n return int(tag.data)\n\n\ndef _read_one(fid, node):\n \"\"\"Read all interesting stuff for one forward solution.\"\"\"\n # This function assumes the fid is open as a context manager\n if node is None:\n return None\n\n one = Forward()\n one['source_ori'] = _get_tag_int(fid, node, 'Source orientation',\n FIFF.FIFF_MNE_SOURCE_ORIENTATION)\n one['coord_frame'] = _get_tag_int(fid, node, 'Coordinate frame',\n FIFF.FIFF_MNE_COORD_FRAME)\n one['nsource'] = _get_tag_int(fid, node, 'Number of sources',\n FIFF.FIFF_MNE_SOURCE_SPACE_NPOINTS)\n one['nchan'] = _get_tag_int(fid, node, 'Number of channels',\n FIFF.FIFF_NCHAN)\n try:\n one['sol'] = _read_named_matrix(fid, node,\n FIFF.FIFF_MNE_FORWARD_SOLUTION,\n transpose=True)\n one['_orig_sol'] = one['sol']['data'].copy()\n except Exception:\n logger.error('Forward solution data not found')\n raise\n\n try:\n fwd_type = FIFF.FIFF_MNE_FORWARD_SOLUTION_GRAD\n one['sol_grad'] = _read_named_matrix(fid, node, fwd_type,\n transpose=True)\n one['_orig_sol_grad'] = one['sol_grad']['data'].copy()\n except Exception:\n one['sol_grad'] = None\n\n if one['sol']['data'].shape[0] != one['nchan'] or \\\n (one['sol']['data'].shape[1] != one['nsource'] and\n one['sol']['data'].shape[1] != 3 * one['nsource']):\n raise ValueError('Forward solution matrix has wrong dimensions')\n\n if one['sol_grad'] is not None:\n if one['sol_grad']['data'].shape[0] != one['nchan'] or \\\n (one['sol_grad']['data'].shape[1] != 3 * one['nsource'] and\n one['sol_grad']['data'].shape[1] != 3 * 3 * one['nsource']):\n raise ValueError('Forward solution gradient matrix has '\n 'wrong dimensions')\n\n return one\n\n\ndef _read_forward_meas_info(tree, fid):\n \"\"\"Read light measurement info from forward operator.\n\n Parameters\n ----------\n tree : tree\n FIF tree structure.\n fid : file id\n The file id.\n\n Returns\n -------\n info : instance of Info\n The measurement info.\n \"\"\"\n # This function assumes fid is being used as a context manager\n info = Info()\n\n # Information from the MRI file\n parent_mri = dir_tree_find(tree, FIFF.FIFFB_MNE_PARENT_MRI_FILE)\n if len(parent_mri) == 0:\n raise ValueError('No parent MEG information found in operator')\n parent_mri = parent_mri[0]\n\n tag = find_tag(fid, parent_mri, FIFF.FIFF_MNE_FILE_NAME)\n info['mri_file'] = tag.data if tag is not None else None\n tag = find_tag(fid, parent_mri, FIFF.FIFF_PARENT_FILE_ID)\n info['mri_id'] = tag.data if tag is not None else None\n\n # Information from the MEG file\n parent_meg = dir_tree_find(tree, FIFF.FIFFB_MNE_PARENT_MEAS_FILE)\n if len(parent_meg) == 0:\n raise ValueError('No parent MEG information found in operator')\n parent_meg = parent_meg[0]\n\n tag = find_tag(fid, parent_meg, FIFF.FIFF_MNE_FILE_NAME)\n info['meas_file'] = tag.data if tag is not None else None\n tag = find_tag(fid, parent_meg, FIFF.FIFF_PARENT_FILE_ID)\n info['meas_id'] = tag.data if tag is not None else None\n\n # Add channel information\n chs = list()\n for k in range(parent_meg['nent']):\n kind = parent_meg['directory'][k].kind\n pos = parent_meg['directory'][k].pos\n if kind == FIFF.FIFF_CH_INFO:\n tag = read_tag(fid, pos)\n chs.append(tag.data)\n info['chs'] = chs\n info._update_redundant()\n\n # Get the MRI <-> head coordinate transformation\n tag = find_tag(fid, parent_mri, FIFF.FIFF_COORD_TRANS)\n coord_head = FIFF.FIFFV_COORD_HEAD\n coord_mri = FIFF.FIFFV_COORD_MRI\n coord_device = FIFF.FIFFV_COORD_DEVICE\n coord_ctf_head = FIFF.FIFFV_MNE_COORD_CTF_HEAD\n if tag is None:\n raise ValueError('MRI/head coordinate transformation not found')\n cand = tag.data\n if cand['from'] == coord_mri and cand['to'] == coord_head:\n info['mri_head_t'] = cand\n else:\n raise ValueError('MRI/head coordinate transformation not found')\n\n # Get the MEG device <-> head coordinate transformation\n tag = find_tag(fid, parent_meg, FIFF.FIFF_COORD_TRANS)\n if tag is None:\n raise ValueError('MEG/head coordinate transformation not found')\n cand = tag.data\n if cand['from'] == coord_device and cand['to'] == coord_head:\n info['dev_head_t'] = cand\n elif cand['from'] == coord_ctf_head and cand['to'] == coord_head:\n info['ctf_head_t'] = cand\n else:\n raise ValueError('MEG/head coordinate transformation not found')\n\n info['bads'] = read_bad_channels(fid, parent_meg)\n # clean up our bad list, old versions could have non-existent bads\n info['bads'] = [bad for bad in info['bads'] if bad in info['ch_names']]\n\n # Check if a custom reference has been applied\n tag = find_tag(fid, parent_mri, FIFF.FIFF_MNE_CUSTOM_REF)\n if tag is None:\n tag = find_tag(fid, parent_mri, 236) # Constant 236 used before v0.11\n\n info['custom_ref_applied'] = bool(tag.data) if tag is not None else False\n info._check_consistency()\n return info\n\n\ndef _subject_from_forward(forward):\n \"\"\"Get subject id from inverse operator.\"\"\"\n return forward['src']._subject\n\n\n@verbose\ndef _merge_meg_eeg_fwds(megfwd, eegfwd, verbose=None):\n \"\"\"Merge loaded MEG and EEG forward dicts into one dict.\"\"\"\n if megfwd is not None and eegfwd is not None:\n if (megfwd['sol']['data'].shape[1] != eegfwd['sol']['data'].shape[1] or\n megfwd['source_ori'] != eegfwd['source_ori'] or\n megfwd['nsource'] != eegfwd['nsource'] or\n megfwd['coord_frame'] != eegfwd['coord_frame']):\n raise ValueError('The MEG and EEG forward solutions do not match')\n\n fwd = megfwd\n fwd['sol']['data'] = np.r_[fwd['sol']['data'], eegfwd['sol']['data']]\n fwd['_orig_sol'] = np.r_[fwd['_orig_sol'], eegfwd['_orig_sol']]\n fwd['sol']['nrow'] = fwd['sol']['nrow'] + eegfwd['sol']['nrow']\n\n fwd['sol']['row_names'] = (fwd['sol']['row_names'] +\n eegfwd['sol']['row_names'])\n if fwd['sol_grad'] is not None:\n fwd['sol_grad']['data'] = np.r_[fwd['sol_grad']['data'],\n eegfwd['sol_grad']['data']]\n fwd['_orig_sol_grad'] = np.r_[fwd['_orig_sol_grad'],\n eegfwd['_orig_sol_grad']]\n fwd['sol_grad']['nrow'] = (fwd['sol_grad']['nrow'] +\n eegfwd['sol_grad']['nrow'])\n fwd['sol_grad']['row_names'] = (fwd['sol_grad']['row_names'] +\n eegfwd['sol_grad']['row_names'])\n\n fwd['nchan'] = fwd['nchan'] + eegfwd['nchan']\n logger.info(' MEG and EEG forward solutions combined')\n elif megfwd is not None:\n fwd = megfwd\n else:\n fwd = eegfwd\n return fwd\n\n\n@verbose\ndef read_forward_solution(fname, include=(), exclude=(), verbose=None):\n \"\"\"Read a forward solution a.k.a. lead field.\n\n Parameters\n ----------\n fname : str\n The file name, which should end with -fwd.fif or -fwd.fif.gz.\n include : list, optional\n List of names of channels to include. If empty all channels\n are included.\n exclude : list, optional\n List of names of channels to exclude. If empty include all\n channels.\n %(verbose)s\n\n Returns\n -------\n fwd : instance of Forward\n The forward solution.\n\n See Also\n --------\n write_forward_solution, make_forward_solution\n\n Notes\n -----\n Forward solutions, which are derived from an original forward solution with\n free orientation, are always stored on disk as forward solution with free\n orientation in X/Y/Z RAS coordinates. To apply any transformation to the\n forward operator (surface orientation, fixed orientation) please apply\n :func:`convert_forward_solution` after reading the forward solution with\n :func:`read_forward_solution`.\n\n Forward solutions, which are derived from an original forward solution with\n fixed orientation, are stored on disk as forward solution with fixed\n surface-based orientations. Please note that the transformation to\n surface-based, fixed orientation cannot be reverted after loading the\n forward solution with :func:`read_forward_solution`.\n \"\"\"\n check_fname(fname, 'forward', ('-fwd.fif', '-fwd.fif.gz',\n '_fwd.fif', '_fwd.fif.gz'))\n\n # Open the file, create directory\n logger.info('Reading forward solution from %s...' % fname)\n f, tree, _ = fiff_open(fname)\n with f as fid:\n # Find all forward solutions\n fwds = dir_tree_find(tree, FIFF.FIFFB_MNE_FORWARD_SOLUTION)\n if len(fwds) == 0:\n raise ValueError('No forward solutions in %s' % fname)\n\n # Parent MRI data\n parent_mri = dir_tree_find(tree, FIFF.FIFFB_MNE_PARENT_MRI_FILE)\n if len(parent_mri) == 0:\n raise ValueError('No parent MRI information in %s' % fname)\n parent_mri = parent_mri[0]\n\n src = _read_source_spaces_from_tree(fid, tree, patch_stats=False)\n for s in src:\n s['id'] = find_source_space_hemi(s)\n\n fwd = None\n\n # Locate and read the forward solutions\n megnode = None\n eegnode = None\n for k in range(len(fwds)):\n tag = find_tag(fid, fwds[k], FIFF.FIFF_MNE_INCLUDED_METHODS)\n if tag is None:\n raise ValueError('Methods not listed for one of the forward '\n 'solutions')\n\n if tag.data == FIFF.FIFFV_MNE_MEG:\n megnode = fwds[k]\n elif tag.data == FIFF.FIFFV_MNE_EEG:\n eegnode = fwds[k]\n\n megfwd = _read_one(fid, megnode)\n if megfwd is not None:\n if is_fixed_orient(megfwd):\n ori = 'fixed'\n else:\n ori = 'free'\n logger.info(' Read MEG forward solution (%d sources, '\n '%d channels, %s orientations)'\n % (megfwd['nsource'], megfwd['nchan'], ori))\n\n eegfwd = _read_one(fid, eegnode)\n if eegfwd is not None:\n if is_fixed_orient(eegfwd):\n ori = 'fixed'\n else:\n ori = 'free'\n logger.info(' Read EEG forward solution (%d sources, '\n '%d channels, %s orientations)'\n % (eegfwd['nsource'], eegfwd['nchan'], ori))\n\n fwd = _merge_meg_eeg_fwds(megfwd, eegfwd)\n\n # Get the MRI <-> head coordinate transformation\n tag = find_tag(fid, parent_mri, FIFF.FIFF_COORD_TRANS)\n if tag is None:\n raise ValueError('MRI/head coordinate transformation not found')\n mri_head_t = tag.data\n if (mri_head_t['from'] != FIFF.FIFFV_COORD_MRI or\n mri_head_t['to'] != FIFF.FIFFV_COORD_HEAD):\n mri_head_t = invert_transform(mri_head_t)\n if (mri_head_t['from'] != FIFF.FIFFV_COORD_MRI or\n mri_head_t['to'] != FIFF.FIFFV_COORD_HEAD):\n fid.close()\n raise ValueError('MRI/head coordinate transformation not '\n 'found')\n fwd['mri_head_t'] = mri_head_t\n\n #\n # get parent MEG info\n #\n fwd['info'] = _read_forward_meas_info(tree, fid)\n\n # MNE environment\n parent_env = dir_tree_find(tree, FIFF.FIFFB_MNE_ENV)\n if len(parent_env) > 0:\n parent_env = parent_env[0]\n tag = find_tag(fid, parent_env, FIFF.FIFF_MNE_ENV_WORKING_DIR)\n if tag is not None:\n fwd['info']['working_dir'] = tag.data\n tag = find_tag(fid, parent_env, FIFF.FIFF_MNE_ENV_COMMAND_LINE)\n if tag is not None:\n fwd['info']['command_line'] = tag.data\n\n # Transform the source spaces to the correct coordinate frame\n # if necessary\n\n # Make sure forward solution is in either the MRI or HEAD coordinate frame\n if fwd['coord_frame'] not in (FIFF.FIFFV_COORD_MRI, FIFF.FIFFV_COORD_HEAD):\n raise ValueError('Only forward solutions computed in MRI or head '\n 'coordinates are acceptable')\n\n # Transform each source space to the HEAD or MRI coordinate frame,\n # depending on the coordinate frame of the forward solution\n # NOTE: the function transform_surface_to will also work on discrete and\n # volume sources\n nuse = 0\n for s in src:\n try:\n s = transform_surface_to(s, fwd['coord_frame'], mri_head_t)\n except Exception as inst:\n raise ValueError('Could not transform source space (%s)' % inst)\n\n nuse += s['nuse']\n\n # Make sure the number of sources match after transformation\n if nuse != fwd['nsource']:\n raise ValueError('Source spaces do not match the forward solution.')\n\n logger.info(' Source spaces transformed to the forward solution '\n 'coordinate frame')\n fwd['src'] = src\n\n # Handle the source locations and orientations\n fwd['source_rr'] = np.concatenate([ss['rr'][ss['vertno'], :]\n for ss in src], axis=0)\n\n # Store original source orientations\n fwd['_orig_source_ori'] = fwd['source_ori']\n\n # Deal with include and exclude\n pick_channels_forward(fwd, include=include, exclude=exclude, copy=False)\n\n if is_fixed_orient(fwd, orig=True):\n fwd['source_nn'] = np.concatenate([_src['nn'][_src['vertno'], :]\n for _src in fwd['src']], axis=0)\n fwd['source_ori'] = FIFF.FIFFV_MNE_FIXED_ORI\n fwd['surf_ori'] = True\n else:\n fwd['source_nn'] = np.kron(np.ones((fwd['nsource'], 1)), np.eye(3))\n fwd['source_ori'] = FIFF.FIFFV_MNE_FREE_ORI\n fwd['surf_ori'] = False\n return Forward(fwd)\n\n\n@verbose\ndef convert_forward_solution(fwd, surf_ori=False, force_fixed=False,\n copy=True, use_cps=True, verbose=None):\n \"\"\"Convert forward solution between different source orientations.\n\n Parameters\n ----------\n fwd : Forward\n The forward solution to modify.\n surf_ori : bool, optional (default False)\n Use surface-based source coordinate system? Note that force_fixed=True\n implies surf_ori=True.\n force_fixed : bool, optional (default False)\n If True, force fixed source orientation mode.\n copy : bool\n Whether to return a new instance or modify in place.\n use_cps : bool (default True)\n Whether to use cortical patch statistics to define normal\n orientations. Only used when surf_ori and/or force_fixed are True.\n %(verbose)s\n\n Returns\n -------\n fwd : Forward\n The modified forward solution.\n \"\"\"\n fwd = fwd.copy() if copy else fwd\n\n if force_fixed is True:\n surf_ori = True\n\n if any([src['type'] == 'vol' for src in fwd['src']]) and force_fixed:\n raise ValueError(\n 'Forward operator was generated with sources from a '\n 'volume source space. Conversion to fixed orientation is not '\n 'possible. Consider using a discrete source space if you have '\n 'meaningful normal orientations.')\n\n if surf_ori:\n if use_cps:\n if any(s.get('patch_inds') is not None for s in fwd['src']):\n use_ave_nn = True\n logger.info(' Average patch normals will be employed in '\n 'the rotation to the local surface coordinates..'\n '..')\n else:\n use_ave_nn = False\n logger.info(' No patch info available. The standard source '\n 'space normals will be employed in the rotation '\n 'to the local surface coordinates....')\n else:\n use_ave_nn = False\n\n # We need to change these entries (only):\n # 1. source_nn\n # 2. sol['data']\n # 3. sol['ncol']\n # 4. sol_grad['data']\n # 5. sol_grad['ncol']\n # 6. source_ori\n\n if is_fixed_orient(fwd, orig=True) or (force_fixed and not use_ave_nn):\n # Fixed\n fwd['source_nn'] = np.concatenate([s['nn'][s['vertno'], :]\n for s in fwd['src']], axis=0)\n if not is_fixed_orient(fwd, orig=True):\n logger.info(' Changing to fixed-orientation forward '\n 'solution with surface-based source orientations...')\n fix_rot = _block_diag(fwd['source_nn'].T, 1)\n # newer versions of numpy require explicit casting here, so *= no\n # longer works\n fwd['sol']['data'] = (fwd['_orig_sol'] *\n fix_rot).astype('float32')\n fwd['sol']['ncol'] = fwd['nsource']\n if fwd['sol_grad'] is not None:\n x = sparse.block_diag([fix_rot] * 3)\n fwd['sol_grad']['data'] = fwd['_orig_sol_grad'] * x # dot prod\n fwd['sol_grad']['ncol'] = 3 * fwd['nsource']\n fwd['source_ori'] = FIFF.FIFFV_MNE_FIXED_ORI\n fwd['surf_ori'] = True\n\n elif surf_ori: # Free, surf-oriented\n # Rotate the local source coordinate systems\n fwd['source_nn'] = np.kron(np.ones((fwd['nsource'], 1)), np.eye(3))\n logger.info(' Converting to surface-based source orientations...')\n # Actually determine the source orientations\n pp = 0\n for s in fwd['src']:\n if s['type'] in ['surf', 'discrete']:\n for p in range(s['nuse']):\n # Project out the surface normal and compute SVD\n if use_ave_nn and s.get('patch_inds') is not None:\n nn = s['nn'][s['pinfo'][s['patch_inds'][p]], :]\n nn = np.sum(nn, axis=0)[:, np.newaxis]\n nn /= linalg.norm(nn)\n else:\n nn = s['nn'][s['vertno'][p], :][:, np.newaxis]\n U, S, _ = linalg.svd(np.eye(3, 3) - nn * nn.T)\n # Make sure that ez is in the direction of nn\n if np.sum(nn.ravel() * U[:, 2].ravel()) < 0:\n U *= -1.0\n fwd['source_nn'][pp:pp + 3, :] = U.T\n pp += 3\n else:\n pp += 3 * s['nuse']\n\n # Rotate the solution components as well\n if force_fixed:\n fwd['source_nn'] = fwd['source_nn'][2::3, :]\n fix_rot = _block_diag(fwd['source_nn'].T, 1)\n # newer versions of numpy require explicit casting here, so *= no\n # longer works\n fwd['sol']['data'] = (fwd['_orig_sol'] *\n fix_rot).astype('float32')\n fwd['sol']['ncol'] = fwd['nsource']\n if fwd['sol_grad'] is not None:\n x = sparse.block_diag([fix_rot] * 3)\n fwd['sol_grad']['data'] = fwd['_orig_sol_grad'] * x # dot prod\n fwd['sol_grad']['ncol'] = 3 * fwd['nsource']\n fwd['source_ori'] = FIFF.FIFFV_MNE_FIXED_ORI\n fwd['surf_ori'] = True\n else:\n surf_rot = _block_diag(fwd['source_nn'].T, 3)\n fwd['sol']['data'] = fwd['_orig_sol'] * surf_rot\n fwd['sol']['ncol'] = 3 * fwd['nsource']\n if fwd['sol_grad'] is not None:\n x = sparse.block_diag([surf_rot] * 3)\n fwd['sol_grad']['data'] = fwd['_orig_sol_grad'] * x # dot prod\n fwd['sol_grad']['ncol'] = 9 * fwd['nsource']\n fwd['source_ori'] = FIFF.FIFFV_MNE_FREE_ORI\n fwd['surf_ori'] = True\n\n else: # Free, cartesian\n logger.info(' Cartesian source orientations...')\n fwd['source_nn'] = np.kron(np.ones((fwd['nsource'], 1)), np.eye(3))\n fwd['sol']['data'] = fwd['_orig_sol'].copy()\n fwd['sol']['ncol'] = 3 * fwd['nsource']\n if fwd['sol_grad'] is not None:\n fwd['sol_grad']['data'] = fwd['_orig_sol_grad'].copy()\n fwd['sol_grad']['ncol'] = 9 * fwd['nsource']\n fwd['source_ori'] = FIFF.FIFFV_MNE_FREE_ORI\n fwd['surf_ori'] = False\n\n logger.info(' [done]')\n\n return fwd\n\n\n@verbose\ndef write_forward_solution(fname, fwd, overwrite=False, verbose=None):\n \"\"\"Write forward solution to a file.\n\n Parameters\n ----------\n fname : str\n File name to save the forward solution to. It should end with -fwd.fif\n or -fwd.fif.gz.\n fwd : Forward\n Forward solution.\n overwrite : bool\n If True, overwrite destination file (if it exists).\n %(verbose)s\n\n See Also\n --------\n read_forward_solution\n\n Notes\n -----\n Forward solutions, which are derived from an original forward solution with\n free orientation, are always stored on disk as forward solution with free\n orientation in X/Y/Z RAS coordinates. Transformations (surface orientation,\n fixed orientation) will be reverted. To reapply any transformation to the\n forward operator please apply :func:`convert_forward_solution` after\n reading the forward solution with :func:`read_forward_solution`.\n\n Forward solutions, which are derived from an original forward solution with\n fixed orientation, are stored on disk as forward solution with fixed\n surface-based orientations. Please note that the transformation to\n surface-based, fixed orientation cannot be reverted after loading the\n forward solution with :func:`read_forward_solution`.\n \"\"\"\n check_fname(fname, 'forward', ('-fwd.fif', '-fwd.fif.gz',\n '_fwd.fif', '_fwd.fif.gz'))\n\n # check for file existence\n _check_fname(fname, overwrite)\n fid = start_file(fname)\n start_block(fid, FIFF.FIFFB_MNE)\n\n #\n # MNE env\n #\n start_block(fid, FIFF.FIFFB_MNE_ENV)\n write_id(fid, FIFF.FIFF_BLOCK_ID)\n data = fwd['info'].get('working_dir', None)\n if data is not None:\n write_string(fid, FIFF.FIFF_MNE_ENV_WORKING_DIR, data)\n data = fwd['info'].get('command_line', None)\n if data is not None:\n write_string(fid, FIFF.FIFF_MNE_ENV_COMMAND_LINE, data)\n end_block(fid, FIFF.FIFFB_MNE_ENV)\n\n #\n # Information from the MRI file\n #\n start_block(fid, FIFF.FIFFB_MNE_PARENT_MRI_FILE)\n write_string(fid, FIFF.FIFF_MNE_FILE_NAME, fwd['info']['mri_file'])\n if fwd['info']['mri_id'] is not None:\n write_id(fid, FIFF.FIFF_PARENT_FILE_ID, fwd['info']['mri_id'])\n # store the MRI to HEAD transform in MRI file\n write_coord_trans(fid, fwd['info']['mri_head_t'])\n end_block(fid, FIFF.FIFFB_MNE_PARENT_MRI_FILE)\n\n # write measurement info\n write_forward_meas_info(fid, fwd['info'])\n\n # invert our original source space transform\n src = list()\n for s in fwd['src']:\n s = deepcopy(s)\n try:\n # returns source space to original coordinate frame\n # usually MRI\n s = transform_surface_to(s, fwd['mri_head_t']['from'],\n fwd['mri_head_t'])\n except Exception as inst:\n raise ValueError('Could not transform source space (%s)' % inst)\n src.append(s)\n\n #\n # Write the source spaces (again)\n #\n _write_source_spaces_to_fid(fid, src)\n n_vert = sum([ss['nuse'] for ss in src])\n if fwd['_orig_source_ori'] == FIFF.FIFFV_MNE_FIXED_ORI:\n n_col = n_vert\n else:\n n_col = 3 * n_vert\n\n # Undo transformations\n sol = fwd['_orig_sol'].copy()\n if fwd['sol_grad'] is not None:\n sol_grad = fwd['_orig_sol_grad'].copy()\n else:\n sol_grad = None\n\n if fwd['surf_ori'] is True:\n if fwd['_orig_source_ori'] == FIFF.FIFFV_MNE_FIXED_ORI:\n warn('The forward solution, which is stored on disk now, is based '\n 'on a forward solution with fixed orientation. Please note '\n 'that the transformation to surface-based, fixed orientation '\n 'cannot be reverted after loading the forward solution with '\n 'read_forward_solution.', RuntimeWarning)\n else:\n warn('This forward solution is based on a forward solution with '\n 'free orientation. The original forward solution is stored '\n 'on disk in X/Y/Z RAS coordinates. Any transformation '\n '(surface orientation or fixed orientation) will be '\n 'reverted. To reapply any transformation to the forward '\n 'operator please apply convert_forward_solution after '\n 'reading the forward solution with read_forward_solution.',\n RuntimeWarning)\n\n #\n # MEG forward solution\n #\n picks_meg = pick_types(fwd['info'], meg=True, eeg=False, ref_meg=False,\n exclude=[])\n picks_eeg = pick_types(fwd['info'], meg=False, eeg=True, ref_meg=False,\n exclude=[])\n n_meg = len(picks_meg)\n n_eeg = len(picks_eeg)\n row_names_meg = [fwd['sol']['row_names'][p] for p in picks_meg]\n row_names_eeg = [fwd['sol']['row_names'][p] for p in picks_eeg]\n\n if n_meg > 0:\n meg_solution = dict(data=sol[picks_meg], nrow=n_meg, ncol=n_col,\n row_names=row_names_meg, col_names=[])\n _transpose_named_matrix(meg_solution)\n start_block(fid, FIFF.FIFFB_MNE_FORWARD_SOLUTION)\n write_int(fid, FIFF.FIFF_MNE_INCLUDED_METHODS, FIFF.FIFFV_MNE_MEG)\n write_int(fid, FIFF.FIFF_MNE_COORD_FRAME, fwd['coord_frame'])\n write_int(fid, FIFF.FIFF_MNE_SOURCE_ORIENTATION,\n fwd['_orig_source_ori'])\n write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_NPOINTS, n_vert)\n write_int(fid, FIFF.FIFF_NCHAN, n_meg)\n write_named_matrix(fid, FIFF.FIFF_MNE_FORWARD_SOLUTION, meg_solution)\n if sol_grad is not None:\n meg_solution_grad = dict(data=sol_grad[picks_meg],\n nrow=n_meg, ncol=n_col * 3,\n row_names=row_names_meg, col_names=[])\n _transpose_named_matrix(meg_solution_grad)\n write_named_matrix(fid, FIFF.FIFF_MNE_FORWARD_SOLUTION_GRAD,\n meg_solution_grad)\n end_block(fid, FIFF.FIFFB_MNE_FORWARD_SOLUTION)\n\n #\n # EEG forward solution\n #\n if n_eeg > 0:\n eeg_solution = dict(data=sol[picks_eeg], nrow=n_eeg, ncol=n_col,\n row_names=row_names_eeg, col_names=[])\n _transpose_named_matrix(eeg_solution)\n start_block(fid, FIFF.FIFFB_MNE_FORWARD_SOLUTION)\n write_int(fid, FIFF.FIFF_MNE_INCLUDED_METHODS, FIFF.FIFFV_MNE_EEG)\n write_int(fid, FIFF.FIFF_MNE_COORD_FRAME, fwd['coord_frame'])\n write_int(fid, FIFF.FIFF_MNE_SOURCE_ORIENTATION,\n fwd['_orig_source_ori'])\n write_int(fid, FIFF.FIFF_NCHAN, n_eeg)\n write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_NPOINTS, n_vert)\n write_named_matrix(fid, FIFF.FIFF_MNE_FORWARD_SOLUTION, eeg_solution)\n if sol_grad is not None:\n eeg_solution_grad = dict(data=sol_grad[picks_eeg],\n nrow=n_eeg, ncol=n_col * 3,\n row_names=row_names_eeg, col_names=[])\n _transpose_named_matrix(eeg_solution_grad)\n write_named_matrix(fid, FIFF.FIFF_MNE_FORWARD_SOLUTION_GRAD,\n eeg_solution_grad)\n end_block(fid, FIFF.FIFFB_MNE_FORWARD_SOLUTION)\n\n end_block(fid, FIFF.FIFFB_MNE)\n end_file(fid)\n\n\ndef is_fixed_orient(forward, orig=False):\n \"\"\"Check if the forward operator is fixed orientation.\n\n Parameters\n ----------\n forward : instance of Forward\n The forward.\n orig : bool\n If True, consider the original source orientation.\n If False (default), consider the current source orientation.\n\n Returns\n -------\n fixed_ori : bool\n Whether or not it is fixed orientation.\n \"\"\"\n if orig: # if we want to know about the original version\n fixed_ori = (forward['_orig_source_ori'] == FIFF.FIFFV_MNE_FIXED_ORI)\n else: # most of the time we want to know about the current version\n fixed_ori = (forward['source_ori'] == FIFF.FIFFV_MNE_FIXED_ORI)\n return fixed_ori\n\n\ndef write_forward_meas_info(fid, info):\n \"\"\"Write measurement info stored in forward solution.\n\n Parameters\n ----------\n fid : file id\n The file id\n info : instance of Info\n The measurement info.\n \"\"\"\n info._check_consistency()\n #\n # Information from the MEG file\n #\n start_block(fid, FIFF.FIFFB_MNE_PARENT_MEAS_FILE)\n write_string(fid, FIFF.FIFF_MNE_FILE_NAME, info['meas_file'])\n if info['meas_id'] is not None:\n write_id(fid, FIFF.FIFF_PARENT_BLOCK_ID, info['meas_id'])\n # get transformation from CTF and DEVICE to HEAD coordinate frame\n meg_head_t = info.get('dev_head_t', info.get('ctf_head_t'))\n if meg_head_t is None:\n fid.close()\n raise ValueError('Head<-->sensor transform not found')\n write_coord_trans(fid, meg_head_t)\n\n if 'chs' in info:\n # Channel information\n write_int(fid, FIFF.FIFF_NCHAN, len(info['chs']))\n for k, c in enumerate(info['chs']):\n # Scan numbers may have been messed up\n c = deepcopy(c)\n c['scanno'] = k + 1\n write_ch_info(fid, c)\n if 'bads' in info and len(info['bads']) > 0:\n # Bad channels\n start_block(fid, FIFF.FIFFB_MNE_BAD_CHANNELS)\n write_name_list(fid, FIFF.FIFF_MNE_CH_NAME_LIST, info['bads'])\n end_block(fid, FIFF.FIFFB_MNE_BAD_CHANNELS)\n\n end_block(fid, FIFF.FIFFB_MNE_PARENT_MEAS_FILE)\n\n\ndef _select_orient_forward(forward, info, noise_cov=None, copy=True):\n \"\"\"Prepare forward solution for inverse solvers.\"\"\"\n # fwd['sol']['row_names'] may be different order from fwd['info']['chs']\n fwd_sol_ch_names = forward['sol']['row_names']\n all_ch_names = set(fwd_sol_ch_names)\n all_bads = set(info['bads'])\n if noise_cov is not None:\n all_ch_names &= set(noise_cov['names'])\n all_bads |= set(noise_cov['bads'])\n else:\n noise_cov = dict(bads=info['bads'])\n ch_names = [c['ch_name'] for c in info['chs']\n if c['ch_name'] not in all_bads and\n c['ch_name'] in all_ch_names]\n\n if not len(info['bads']) == len(noise_cov['bads']) or \\\n not all(b in noise_cov['bads'] for b in info['bads']):\n logger.info('info[\"bads\"] and noise_cov[\"bads\"] do not match, '\n 'excluding bad channels from both')\n\n # check the compensation grade\n _check_compensation_grade(forward['info'], info, 'forward')\n\n n_chan = len(ch_names)\n logger.info(\"Computing inverse operator with %d channels.\" % n_chan)\n forward = pick_channels_forward(forward, ch_names, ordered=True,\n copy=copy)\n info_idx = [info['ch_names'].index(name) for name in ch_names]\n info_picked = pick_info(info, info_idx)\n forward['info']._check_consistency()\n info_picked._check_consistency()\n return forward, info_picked\n\n\n@verbose\ndef compute_orient_prior(forward, loose=0.2, verbose=None):\n \"\"\"Compute orientation prior.\n\n Parameters\n ----------\n forward : instance of Forward\n Forward operator.\n loose : float\n The loose orientation parameter (between 0 and 1).\n %(verbose)s\n\n Returns\n -------\n orient_prior : ndarray, shape (n_vertices,)\n Orientation priors.\n\n See Also\n --------\n compute_depth_prior\n \"\"\"\n is_fixed_ori = is_fixed_orient(forward)\n n_sources = forward['sol']['data'].shape[1]\n loose = float(loose)\n if not (0 <= loose <= 1):\n raise ValueError('loose value should be between 0 and 1, '\n 'got %s.' % (loose,))\n orient_prior = np.ones(n_sources, dtype=np.float)\n if loose > 0.:\n if is_fixed_ori:\n raise ValueError('loose must be 0. with forward operator '\n 'with fixed orientation, got %s' % (loose,))\n if loose < 1:\n if not forward['surf_ori']:\n raise ValueError('Forward operator is not oriented in surface '\n 'coordinates. loose parameter should be 1 '\n 'not %s.' % (loose,))\n logger.info('Applying loose dipole orientations. Loose value '\n 'of %s.' % loose)\n orient_prior[0::3] *= loose\n orient_prior[1::3] *= loose\n\n return orient_prior\n\n\ndef _restrict_gain_matrix(G, info):\n \"\"\"Restrict gain matrix entries for optimal depth weighting.\"\"\"\n # Figure out which ones have been used\n if len(info['chs']) != G.shape[0]:\n raise ValueError('G.shape[0] (%d) and length of info[\"chs\"] (%d) '\n 'do not match' % (G.shape[0], len(info['chs'])))\n for meg, eeg, kind in (\n ('grad', False, 'planar'),\n ('mag', False, 'magnetometer or axial gradiometer'),\n (False, True, 'EEG')):\n sel = pick_types(info, meg=meg, eeg=eeg, ref_meg=False, exclude=[])\n if len(sel) > 0:\n logger.info(' %d %s channels' % (len(sel), kind))\n break\n else:\n warn('Could not find MEG or EEG channels to limit depth channels')\n sel = slice(None)\n return G[sel]\n\n\n@verbose\ndef compute_depth_prior(forward, info, exp=0.8, limit=10.0,\n limit_depth_chs=False, combine_xyz='spectral',\n noise_cov=None, rank=None, verbose=None):\n \"\"\"Compute depth prior for depth weighting.\n\n Parameters\n ----------\n forward : instance of Forward\n The forward solution.\n info : instance of Info\n The measurement info.\n exp : float\n Exponent for the depth weighting, must be between 0 and 1.\n limit : float | None\n The upper bound on depth weighting.\n Can be None to be bounded by the largest finite prior.\n limit_depth_chs : bool | 'whiten'\n How to deal with multiple channel types in depth weighting.\n The default is True, which whitens based on the source sensitivity\n of the highest-SNR channel type. See Notes for details.\n\n .. versionchanged:: 0.18\n Added the \"whiten\" option.\n combine_xyz : 'spectral' | 'fro'\n When a loose (or free) orientation is used, how the depth weighting\n for each triplet should be calculated.\n If 'spectral', use the squared spectral norm of Gk.\n If 'fro', use the squared Frobenius norm of Gk.\n\n .. versionadded:: 0.18\n noise_cov : instance of Covariance | None\n The noise covariance to use to whiten the gain matrix when\n ``limit_depth_chs='whiten'``.\n\n .. versionadded:: 0.18\n %(rank_None)s\n\n .. versionadded:: 0.18\n %(verbose)s\n\n Returns\n -------\n depth_prior : ndarray, shape (n_vertices,)\n The depth prior.\n\n See Also\n --------\n compute_orient_prior\n\n Notes\n -----\n The defaults used by the minimum norm code and sparse solvers differ.\n In particular, the values for MNE are::\n\n compute_depth_prior(..., limit=10., limit_depth_chs=True,\n combine_xyz='spectral')\n\n In sparse solvers and LCMV, the values are::\n\n compute_depth_prior(..., limit=None, limit_depth_chs='whiten',\n combine_xyz='fro')\n\n The ``limit_depth_chs`` argument can take the following values:\n\n * :data:`python:True` (default)\n Use only grad channels in depth weighting (equivalent to MNE C\n minimum-norm code). If grad channels aren't present, only mag\n channels will be used (if no mag, then eeg). This makes the depth\n prior dependent only on the sensor geometry (and relationship\n to the sources).\n * ``'whiten'``\n Compute a whitener and apply it to the gain matirx before computing\n the depth prior. In this case ``noise_cov`` must not be None.\n Whitening the gain matrix makes the depth prior\n depend on both sensor geometry and the data of interest captured\n by the noise covariance (e.g., projections, SNR).\n\n .. versionadded:: 0.18\n * :data:`python:False`\n Use all channels. Not recommended since the depth weighting will be\n biased toward whichever channel type has the largest values in\n SI units (such as EEG being orders of magnitude larger than MEG).\n \"\"\"\n from ..cov import Covariance, compute_whitener\n _validate_type(forward, Forward, 'forward')\n patch_areas = forward.get('patch_areas', None)\n is_fixed_ori = is_fixed_orient(forward)\n G = forward['sol']['data']\n logger.info('Creating the depth weighting matrix...')\n _validate_type(noise_cov, (Covariance, None), 'noise_cov',\n 'Covariance or None')\n _validate_type(limit_depth_chs, (str, bool), 'limit_depth_chs')\n if isinstance(limit_depth_chs, str):\n if limit_depth_chs != 'whiten':\n raise ValueError('limit_depth_chs, if str, must be \"whiten\", got '\n '%s' % (limit_depth_chs,))\n if not isinstance(noise_cov, Covariance):\n raise ValueError('With limit_depth_chs=\"whiten\", noise_cov must be'\n ' a Covariance, got %s' % (type(noise_cov),))\n if combine_xyz is not False: # private / expert option\n _check_option('combine_xyz', combine_xyz, ('fro', 'spectral'))\n\n # If possible, pick best depth-weighting channels\n if limit_depth_chs is True:\n G = _restrict_gain_matrix(G, info)\n elif limit_depth_chs == 'whiten':\n whitener, _ = compute_whitener(noise_cov, info, pca=True, rank=rank,\n verbose=False)\n G = np.dot(whitener, G)\n\n # Compute the gain matrix\n if is_fixed_ori or combine_xyz in ('fro', False):\n d = np.sum(G ** 2, axis=0)\n if not (is_fixed_ori or combine_xyz is False):\n d = d.reshape(-1, 3).sum(axis=1)\n # Spherical leadfield can be zero at the center\n d[d == 0.] = np.min(d[d != 0.])\n else: # 'spectral'\n # n_pos = G.shape[1] // 3\n # The following is equivalent to this, but 4-10x faster\n # d = np.zeros(n_pos)\n # for k in range(n_pos):\n # Gk = G[:, 3 * k:3 * (k + 1)]\n # x = np.dot(Gk.T, Gk)\n # d[k] = linalg.svdvals(x)[0]\n G.shape = (G.shape[0], -1, 3)\n d = np.linalg.norm(einsum('svj,svk->vjk', G, G), # vector dot products\n ord=2, axis=(1, 2)) # ord=2 spectral (largest s.v.)\n G.shape = (G.shape[0], -1)\n\n # XXX Currently the fwd solns never have \"patch_areas\" defined\n if patch_areas is not None:\n if not is_fixed_ori and combine_xyz is False:\n patch_areas = np.repeat(patch_areas, 3)\n d /= patch_areas ** 2\n logger.info(' Patch areas taken into account in the depth '\n 'weighting')\n\n w = 1.0 / d\n if limit is not None:\n ws = np.sort(w)\n weight_limit = limit ** 2\n if limit_depth_chs is False:\n # match old mne-python behavor\n # we used to do ind = np.argmin(ws), but this is 0 by sort above\n n_limit = 0\n limit = ws[0] * weight_limit\n else:\n # match C code behavior\n limit = ws[-1]\n n_limit = len(d)\n if ws[-1] > weight_limit * ws[0]:\n ind = np.where(ws > weight_limit * ws[0])[0][0]\n limit = ws[ind]\n n_limit = ind\n\n logger.info(' limit = %d/%d = %f'\n % (n_limit + 1, len(d),\n np.sqrt(limit / ws[0])))\n scale = 1.0 / limit\n logger.info(' scale = %g exp = %g' % (scale, exp))\n w = np.minimum(w / limit, 1)\n depth_prior = w ** exp\n\n if not (is_fixed_ori or combine_xyz is False):\n depth_prior = np.repeat(depth_prior, 3)\n\n return depth_prior\n\n\ndef _stc_src_sel(src, stc, on_missing='raise',\n extra=', likely due to forward calculations'):\n \"\"\"Select the vertex indices of a source space using a source estimate.\"\"\"\n if isinstance(stc, list):\n vertices = stc\n else:\n assert isinstance(stc, _BaseSourceEstimate)\n vertices = stc._vertices_list\n del stc\n if not len(src) == len(vertices):\n raise RuntimeError('Mismatch between number of source spaces (%s) and '\n 'STC vertices (%s)' % (len(src), len(vertices)))\n src_sels, stc_sels, out_vertices = [], [], []\n src_offset = stc_offset = 0\n for s, v in zip(src, vertices):\n joint_sel = np.intersect1d(s['vertno'], v)\n src_sels.append(np.searchsorted(s['vertno'], joint_sel) + src_offset)\n src_offset += len(s['vertno'])\n idx = np.searchsorted(v, joint_sel)\n stc_sels.append(idx + stc_offset)\n stc_offset += len(v)\n out_vertices.append(np.array(v)[idx])\n src_sel = np.concatenate(src_sels)\n stc_sel = np.concatenate(stc_sels)\n assert len(src_sel) == len(stc_sel) == sum(len(v) for v in out_vertices)\n\n n_stc = sum(len(v) for v in vertices)\n n_joint = len(src_sel)\n if n_joint != n_stc:\n msg = ('Only %i of %i SourceEstimate %s found in '\n 'source space%s'\n % (n_joint, n_stc, 'vertex' if n_stc == 1 else 'vertices',\n extra))\n if on_missing == 'raise':\n raise RuntimeError(msg)\n elif on_missing == 'warn':\n warn(msg)\n else:\n assert on_missing == 'ignore'\n return src_sel, stc_sel, out_vertices\n\n\ndef _fill_measurement_info(info, fwd, sfreq):\n \"\"\"Fill the measurement info of a Raw or Evoked object.\"\"\"\n sel = pick_channels(info['ch_names'], fwd['sol']['row_names'])\n info = pick_info(info, sel)\n info['bads'] = []\n\n # this is probably correct based on what's done in meas_info.py...\n info['meas_id'] = fwd['info']['meas_id']\n info['file_id'] = info['meas_id']\n\n now = time()\n sec = np.floor(now)\n usec = 1e6 * (now - sec)\n\n info['meas_date'] = (int(sec), int(usec))\n info['highpass'] = 0.0\n info['lowpass'] = sfreq / 2.0\n info['sfreq'] = sfreq\n info['projs'] = []\n\n return info\n\n\n@verbose\ndef _apply_forward(fwd, stc, start=None, stop=None, on_missing='raise',\n verbose=None):\n \"\"\"Apply forward model and return data, times, ch_names.\"\"\"\n if not is_fixed_orient(fwd):\n raise ValueError('Only fixed-orientation forward operators are '\n 'supported.')\n\n if np.all(stc.data > 0):\n warn('Source estimate only contains currents with positive values. '\n 'Use pick_ori=\"normal\" when computing the inverse to compute '\n 'currents not current magnitudes.')\n\n _check_stc_units(stc)\n\n src_sel, stc_sel, _ = _stc_src_sel(fwd['src'], stc, on_missing=on_missing)\n gain = fwd['sol']['data'][:, src_sel]\n # save some memory if possible\n stc_sel = slice(None) if len(stc_sel) == len(stc.data) else stc_sel\n\n logger.info('Projecting source estimate to sensor space...')\n data = np.dot(gain, stc.data[stc_sel, start:stop])\n logger.info('[done]')\n\n times = deepcopy(stc.times[start:stop])\n\n return data, times\n\n\n@verbose\ndef apply_forward(fwd, stc, info, start=None, stop=None, use_cps=True,\n on_missing='raise', verbose=None):\n \"\"\"Project source space currents to sensor space using a forward operator.\n\n The sensor space data is computed for all channels present in fwd. Use\n pick_channels_forward or pick_types_forward to restrict the solution to a\n subset of channels.\n\n The function returns an Evoked object, which is constructed from\n evoked_template. The evoked_template should be from the same MEG system on\n which the original data was acquired. An exception will be raised if the\n forward operator contains channels that are not present in the template.\n\n Parameters\n ----------\n fwd : Forward\n Forward operator to use.\n stc : SourceEstimate\n The source estimate from which the sensor space data is computed.\n info : instance of Info\n Measurement info to generate the evoked.\n start : int, optional\n Index of first time sample (index not time is seconds).\n stop : int, optional\n Index of first time sample not to include (index not time is seconds).\n use_cps : bool (default True)\n Whether to use cortical patch statistics to define normal\n orientations when converting to fixed orientation (if necessary).\n\n .. versionadded:: 0.15\n %(on_missing)s Default is \"raise\".\n\n .. versionadded:: 0.18\n %(verbose)s\n\n Returns\n -------\n evoked : Evoked\n Evoked object with computed sensor space data.\n\n See Also\n --------\n apply_forward_raw: Compute sensor space data and return a Raw object.\n \"\"\"\n # make sure evoked_template contains all channels in fwd\n for ch_name in fwd['sol']['row_names']:\n if ch_name not in info['ch_names']:\n raise ValueError('Channel %s of forward operator not present in '\n 'evoked_template.' % ch_name)\n\n # project the source estimate to the sensor space\n if not is_fixed_orient(fwd):\n fwd = convert_forward_solution(fwd, force_fixed=True, use_cps=use_cps)\n data, times = _apply_forward(fwd, stc, start, stop, on_missing=on_missing)\n\n # fill the measurement info\n sfreq = float(1.0 / stc.tstep)\n info_out = _fill_measurement_info(info, fwd, sfreq)\n\n evoked = EvokedArray(data, info_out, times[0], nave=1)\n\n evoked.times = times\n evoked.first = int(np.round(evoked.times[0] * sfreq))\n evoked.last = evoked.first + evoked.data.shape[1] - 1\n\n return evoked\n\n\n@verbose\ndef apply_forward_raw(fwd, stc, info, start=None, stop=None,\n on_missing='raise', verbose=None):\n \"\"\"Project source space currents to sensor space using a forward operator.\n\n The sensor space data is computed for all channels present in fwd. Use\n pick_channels_forward or pick_types_forward to restrict the solution to a\n subset of channels.\n\n The function returns a Raw object, which is constructed using provided\n info. The info object should be from the same MEG system on which the\n original data was acquired. An exception will be raised if the forward\n operator contains channels that are not present in the info.\n\n Parameters\n ----------\n fwd : Forward\n Forward operator to use. Has to be fixed-orientation.\n stc : SourceEstimate\n The source estimate from which the sensor space data is computed.\n info : instance of Info\n The measurement info.\n start : int, optional\n Index of first time sample (index not time is seconds).\n stop : int, optional\n Index of first time sample not to include (index not time is seconds).\n %(on_missing)s Default is \"raise\".\n\n .. versionadded:: 0.18\n %(verbose)s\n\n Returns\n -------\n raw : Raw object\n Raw object with computed sensor space data.\n\n See Also\n --------\n apply_forward: Compute sensor space data and return an Evoked object.\n \"\"\"\n # make sure info contains all channels in fwd\n for ch_name in fwd['sol']['row_names']:\n if ch_name not in info['ch_names']:\n raise ValueError('Channel %s of forward operator not present in '\n 'info.' % ch_name)\n\n # project the source estimate to the sensor space\n data, times = _apply_forward(fwd, stc, start, stop, on_missing=on_missing)\n\n sfreq = 1.0 / stc.tstep\n info = _fill_measurement_info(info, fwd, sfreq)\n info['projs'] = []\n # store sensor data in Raw object using the info\n raw = RawArray(data, info)\n raw.preload = True\n\n raw._first_samps = np.array([int(np.round(times[0] * sfreq))])\n raw._last_samps = np.array([raw.first_samp + raw._data.shape[1] - 1])\n raw._projector = None\n raw._update_times()\n return raw\n\n\n@fill_doc\ndef restrict_forward_to_stc(fwd, stc, on_missing='ignore'):\n \"\"\"Restrict forward operator to active sources in a source estimate.\n\n Parameters\n ----------\n fwd : instance of Forward\n Forward operator.\n stc : instance of SourceEstimate\n Source estimate.\n %(on_missing)s Default is \"ignore\".\n\n .. versionadded:: 0.18\n\n Returns\n -------\n fwd_out : instance of Forward\n Restricted forward operator.\n\n See Also\n --------\n restrict_forward_to_label\n \"\"\"\n _validate_type(on_missing, str, 'on_missing')\n _check_option('on_missing', on_missing, ('ignore', 'warn', 'raise'))\n src_sel, _, vertices = _stc_src_sel(fwd['src'], stc, on_missing=on_missing)\n del stc\n return _restrict_forward_to_src_sel(fwd, src_sel)\n\n\ndef _restrict_forward_to_src_sel(fwd, src_sel):\n fwd_out = deepcopy(fwd)\n # figure out the vertno we are keeping\n idx_sel = np.concatenate([[[si] * len(s['vertno']), s['vertno']]\n for si, s in enumerate(fwd['src'])], axis=-1)\n assert idx_sel.ndim == 2 and idx_sel.shape[0] == 2\n assert idx_sel.shape[1] == fwd['nsource']\n idx_sel = idx_sel[:, src_sel]\n\n fwd_out['source_rr'] = fwd['source_rr'][src_sel]\n fwd_out['nsource'] = len(src_sel)\n\n if is_fixed_orient(fwd):\n idx = src_sel\n if fwd['sol_grad'] is not None:\n idx_grad = (3 * src_sel[:, None] + np.arange(3)).ravel()\n else:\n idx = (3 * src_sel[:, None] + np.arange(3)).ravel()\n if fwd['sol_grad'] is not None:\n idx_grad = (9 * src_sel[:, None] + np.arange(9)).ravel()\n\n fwd_out['source_nn'] = fwd['source_nn'][idx]\n fwd_out['sol']['data'] = fwd['sol']['data'][:, idx]\n if fwd['sol_grad'] is not None:\n fwd_out['sol_grad']['data'] = fwd['sol_grad']['data'][:, idx_grad]\n fwd_out['sol']['ncol'] = len(idx)\n\n if is_fixed_orient(fwd, orig=True):\n idx = src_sel\n if fwd['sol_grad'] is not None:\n idx_grad = (3 * src_sel[:, None] + np.arange(3)).ravel()\n else:\n idx = (3 * src_sel[:, None] + np.arange(3)).ravel()\n if fwd['sol_grad'] is not None:\n idx_grad = (9 * src_sel[:, None] + np.arange(9)).ravel()\n\n fwd_out['_orig_sol'] = fwd['_orig_sol'][:, idx]\n if fwd['sol_grad'] is not None:\n fwd_out['_orig_sol_grad'] = fwd['_orig_sol_grad'][:, idx_grad]\n\n vertices = [idx_sel[1][idx_sel[0] == si]\n for si in range(len(fwd_out['src']))]\n _set_source_space_vertices(fwd_out['src'], vertices)\n\n return fwd_out\n\n\ndef restrict_forward_to_label(fwd, labels):\n \"\"\"Restrict forward operator to labels.\n\n Parameters\n ----------\n fwd : Forward\n Forward operator.\n labels : instance of Label | list\n Label object or list of label objects.\n\n Returns\n -------\n fwd_out : dict\n Restricted forward operator.\n\n See Also\n --------\n restrict_forward_to_stc\n \"\"\"\n vertices = [np.array([], int), np.array([], int)]\n\n if not isinstance(labels, list):\n labels = [labels]\n\n # Get vertices separately of each hemisphere from all label\n for label in labels:\n _validate_type(label, Label, \"label\", \"Label or list\")\n i = 0 if label.hemi == 'lh' else 1\n vertices[i] = np.append(vertices[i], label.vertices)\n # Remove duplicates and sort\n vertices = [np.unique(vert_hemi) for vert_hemi in vertices]\n\n fwd_out = deepcopy(fwd)\n fwd_out['source_rr'] = np.zeros((0, 3))\n fwd_out['nsource'] = 0\n fwd_out['source_nn'] = np.zeros((0, 3))\n fwd_out['sol']['data'] = np.zeros((fwd['sol']['data'].shape[0], 0))\n fwd_out['_orig_sol'] = np.zeros((fwd['_orig_sol'].shape[0], 0))\n if fwd['sol_grad'] is not None:\n fwd_out['sol_grad']['data'] = np.zeros(\n (fwd['sol_grad']['data'].shape[0], 0))\n fwd_out['_orig_sol_grad'] = np.zeros(\n (fwd['_orig_sol_grad'].shape[0], 0))\n fwd_out['sol']['ncol'] = 0\n nuse_lh = fwd['src'][0]['nuse']\n\n for i in range(2):\n fwd_out['src'][i]['vertno'] = np.array([], int)\n fwd_out['src'][i]['nuse'] = 0\n fwd_out['src'][i]['inuse'] = fwd['src'][i]['inuse'].copy()\n fwd_out['src'][i]['inuse'].fill(0)\n fwd_out['src'][i]['use_tris'] = np.array([[]], int)\n fwd_out['src'][i]['nuse_tri'] = np.array([0])\n\n # src_sel is idx to cols in fwd that are in any label per hemi\n src_sel = np.intersect1d(fwd['src'][i]['vertno'], vertices[i])\n src_sel = np.searchsorted(fwd['src'][i]['vertno'], src_sel)\n\n # Reconstruct each src\n vertno = fwd['src'][i]['vertno'][src_sel]\n fwd_out['src'][i]['inuse'][vertno] = 1\n fwd_out['src'][i]['nuse'] += len(vertno)\n fwd_out['src'][i]['vertno'] = np.where(fwd_out['src'][i]['inuse'])[0]\n\n # Reconstruct part of fwd that is not sol data\n src_sel += i * nuse_lh # Add column shift to right hemi\n fwd_out['source_rr'] = np.vstack([fwd_out['source_rr'],\n fwd['source_rr'][src_sel]])\n fwd_out['nsource'] += len(src_sel)\n\n if is_fixed_orient(fwd):\n idx = src_sel\n if fwd['sol_grad'] is not None:\n idx_grad = (3 * src_sel[:, None] + np.arange(3)).ravel()\n else:\n idx = (3 * src_sel[:, None] + np.arange(3)).ravel()\n if fwd['sol_grad'] is not None:\n idx_grad = (9 * src_sel[:, None] + np.arange(9)).ravel()\n\n fwd_out['source_nn'] = np.vstack(\n [fwd_out['source_nn'], fwd['source_nn'][idx]])\n fwd_out['sol']['data'] = np.hstack(\n [fwd_out['sol']['data'], fwd['sol']['data'][:, idx]])\n if fwd['sol_grad'] is not None:\n fwd_out['sol_grad']['data'] = np.hstack(\n [fwd_out['sol_grad']['data'],\n fwd['sol_rad']['data'][:, idx_grad]])\n fwd_out['sol']['ncol'] += len(idx)\n\n if is_fixed_orient(fwd, orig=True):\n idx = src_sel\n if fwd['sol_grad'] is not None:\n idx_grad = (3 * src_sel[:, None] + np.arange(3)).ravel()\n else:\n idx = (3 * src_sel[:, None] + np.arange(3)).ravel()\n if fwd['sol_grad'] is not None:\n idx_grad = (9 * src_sel[:, None] + np.arange(9)).ravel()\n\n fwd_out['_orig_sol'] = np.hstack(\n [fwd_out['_orig_sol'], fwd['_orig_sol'][:, idx]])\n if fwd['sol_grad'] is not None:\n fwd_out['_orig_sol_grad'] = np.hstack(\n [fwd_out['_orig_sol_grad'],\n fwd['_orig_sol_grad'][:, idx_grad]])\n\n return fwd_out\n\n\ndef _do_forward_solution(subject, meas, fname=None, src=None, spacing=None,\n mindist=None, bem=None, mri=None, trans=None,\n eeg=True, meg=True, fixed=False, grad=False,\n mricoord=False, overwrite=False, subjects_dir=None,\n verbose=None):\n \"\"\"Calculate a forward solution for a subject using MNE-C routines.\n\n This is kept around for testing purposes.\n\n This function wraps to mne_do_forward_solution, so the mne\n command-line tools must be installed and accessible from Python.\n\n Parameters\n ----------\n subject : str\n Name of the subject.\n meas : Raw | Epochs | Evoked | str\n If Raw or Epochs, a temporary evoked file will be created and\n saved to a temporary directory. If str, then it should be a\n filename to a file with measurement information the mne\n command-line tools can understand (i.e., raw or evoked).\n fname : str | None\n Destination forward solution filename. If None, the solution\n will be created in a temporary directory, loaded, and deleted.\n src : str | None\n Source space name. If None, the MNE default is used.\n spacing : str\n The spacing to use. Can be ``'#'`` for spacing in mm, ``'ico#'`` for a\n recursively subdivided icosahedron, or ``'oct#'`` for a recursively\n subdivided octahedron (e.g., ``spacing='ico4'``). Default is 7 mm.\n mindist : float | str | None\n Minimum distance of sources from inner skull surface (in mm).\n If None, the MNE default value is used. If string, 'all'\n indicates to include all points.\n bem : str | None\n Name of the BEM to use (e.g., \"sample-5120-5120-5120\"). If None\n (Default), the MNE default will be used.\n mri : str | None\n The name of the trans file in FIF format.\n If None, trans must not be None.\n trans : dict | str | None\n File name of the trans file in text format.\n If None, mri must not be None.\n eeg : bool\n If True (Default), include EEG computations.\n meg : bool\n If True (Default), include MEG computations.\n fixed : bool\n If True, make a fixed-orientation forward solution (Default:\n False). Note that fixed-orientation inverses can still be\n created from free-orientation forward solutions.\n grad : bool\n If True, compute the gradient of the field with respect to the\n dipole coordinates as well (Default: False).\n mricoord : bool\n If True, calculate in MRI coordinates (Default: False).\n overwrite : bool\n If True, the destination file (if it exists) will be overwritten.\n If False (default), an error will be raised if the file exists.\n subjects_dir : None | str\n Override the SUBJECTS_DIR environment variable.\n %(verbose)s\n\n See Also\n --------\n make_forward_solution\n\n Returns\n -------\n fwd : Forward\n The generated forward solution.\n \"\"\"\n if not has_mne_c():\n raise RuntimeError('mne command line tools could not be found')\n\n # check for file existence\n temp_dir = tempfile.mkdtemp()\n if fname is None:\n fname = op.join(temp_dir, 'temp-fwd.fif')\n _check_fname(fname, overwrite)\n _validate_type(subject, \"str\", \"subject\")\n\n # check for meas to exist as string, or try to make evoked\n if isinstance(meas, str):\n if not op.isfile(meas):\n raise IOError('measurement file \"%s\" could not be found' % meas)\n elif isinstance(meas, (BaseRaw, BaseEpochs, Evoked)):\n meas_file = op.join(temp_dir, 'info.fif')\n write_info(meas_file, meas.info)\n meas = meas_file\n else:\n raise ValueError('meas must be string, Raw, Epochs, or Evoked')\n\n # deal with trans/mri\n if mri is not None and trans is not None:\n raise ValueError('trans and mri cannot both be specified')\n if mri is None and trans is None:\n # MNE allows this to default to a trans/mri in the subject's dir,\n # but let's be safe here and force the user to pass us a trans/mri\n raise ValueError('Either trans or mri must be specified')\n\n if trans is not None:\n _validate_type(trans, \"str\", \"trans\")\n if not op.isfile(trans):\n raise IOError('trans file \"%s\" not found' % trans)\n if mri is not None:\n # deal with trans\n if not isinstance(mri, str):\n if isinstance(mri, dict):\n mri_data = deepcopy(mri)\n mri = op.join(temp_dir, 'mri-trans.fif')\n try:\n write_trans(mri, mri_data)\n except Exception:\n raise IOError('mri was a dict, but could not be '\n 'written to disk as a transform file')\n else:\n raise ValueError('trans must be a string or dict (trans)')\n if not op.isfile(mri):\n raise IOError('trans file \"%s\" could not be found' % trans)\n\n # deal with meg/eeg\n if not meg and not eeg:\n raise ValueError('meg or eeg (or both) must be True')\n\n path, fname = op.split(fname)\n if not op.splitext(fname)[1] == '.fif':\n raise ValueError('Forward name does not end with .fif')\n path = op.abspath(path)\n\n # deal with mindist\n if mindist is not None:\n if isinstance(mindist, str):\n if not mindist.lower() == 'all':\n raise ValueError('mindist, if string, must be \"all\"')\n mindist = ['--all']\n else:\n mindist = ['--mindist', '%g' % mindist]\n\n # src, spacing, bem\n for element, name, kind in zip((src, spacing, bem),\n (\"src\", \"spacing\", \"bem\"),\n ('path-like', 'str', 'path-like')):\n if element is not None:\n _validate_type(element, kind, name, \"%s or None\" % kind)\n\n # put together the actual call\n cmd = ['mne_do_forward_solution',\n '--subject', subject,\n '--meas', meas,\n '--fwd', fname,\n '--destdir', path]\n if src is not None:\n cmd += ['--src', src]\n if spacing is not None:\n if spacing.isdigit():\n pass # spacing in mm\n else:\n # allow both \"ico4\" and \"ico-4\" style values\n match = re.match(r\"(oct|ico)-?(\\d+)$\", spacing)\n if match is None:\n raise ValueError(\"Invalid spacing parameter: %r\" % spacing)\n spacing = '-'.join(match.groups())\n cmd += ['--spacing', spacing]\n if mindist is not None:\n cmd += mindist\n if bem is not None:\n cmd += ['--bem', bem]\n if mri is not None:\n cmd += ['--mri', '%s' % mri]\n if trans is not None:\n cmd += ['--trans', '%s' % trans]\n if not meg:\n cmd.append('--eegonly')\n if not eeg:\n cmd.append('--megonly')\n if fixed:\n cmd.append('--fixed')\n if grad:\n cmd.append('--grad')\n if mricoord:\n cmd.append('--mricoord')\n if overwrite:\n cmd.append('--overwrite')\n\n env = os.environ.copy()\n subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)\n env['SUBJECTS_DIR'] = subjects_dir\n\n try:\n logger.info('Running forward solution generation command with '\n 'subjects_dir %s' % subjects_dir)\n run_subprocess(cmd, env=env)\n except Exception:\n raise\n else:\n fwd = read_forward_solution(op.join(path, fname), verbose=False)\n finally:\n shutil.rmtree(temp_dir, ignore_errors=True)\n return fwd\n\n\n@verbose\ndef average_forward_solutions(fwds, weights=None):\n \"\"\"Average forward solutions.\n\n Parameters\n ----------\n fwds : list of Forward\n Forward solutions to average. Each entry (dict) should be a\n forward solution.\n weights : array | None\n Weights to apply to each forward solution in averaging. If None,\n forward solutions will be equally weighted. Weights must be\n non-negative, and will be adjusted to sum to one.\n\n Returns\n -------\n fwd : Forward\n The averaged forward solution.\n \"\"\"\n # check for fwds being a list\n _validate_type(fwds, list, \"fwds\")\n if not len(fwds) > 0:\n raise ValueError('fwds must not be empty')\n\n # check weights\n if weights is None:\n weights = np.ones(len(fwds))\n weights = np.asanyarray(weights) # in case it's a list, convert it\n if not np.all(weights >= 0):\n raise ValueError('weights must be non-negative')\n if not len(weights) == len(fwds):\n raise ValueError('weights must be None or the same length as fwds')\n w_sum = np.sum(weights)\n if not w_sum > 0:\n raise ValueError('weights cannot all be zero')\n weights /= w_sum\n\n # check our forward solutions\n for fwd in fwds:\n # check to make sure it's a forward solution\n _validate_type(fwd, dict, \"each entry in fwds\", \"dict\")\n # check to make sure the dict is actually a fwd\n check_keys = ['info', 'sol_grad', 'nchan', 'src', 'source_nn', 'sol',\n 'source_rr', 'source_ori', 'surf_ori', 'coord_frame',\n 'mri_head_t', 'nsource']\n if not all(key in fwd for key in check_keys):\n raise KeyError('forward solution dict does not have all standard '\n 'entries, cannot compute average.')\n\n # check forward solution compatibility\n if any(fwd['sol'][k] != fwds[0]['sol'][k]\n for fwd in fwds[1:] for k in ['nrow', 'ncol']):\n raise ValueError('Forward solutions have incompatible dimensions')\n if any(fwd[k] != fwds[0][k] for fwd in fwds[1:]\n for k in ['source_ori', 'surf_ori', 'coord_frame']):\n raise ValueError('Forward solutions have incompatible orientations')\n\n # actually average them (solutions and gradients)\n fwd_ave = deepcopy(fwds[0])\n fwd_ave['sol']['data'] *= weights[0]\n fwd_ave['_orig_sol'] *= weights[0]\n for fwd, w in zip(fwds[1:], weights[1:]):\n fwd_ave['sol']['data'] += w * fwd['sol']['data']\n fwd_ave['_orig_sol'] += w * fwd['_orig_sol']\n if fwd_ave['sol_grad'] is not None:\n fwd_ave['sol_grad']['data'] *= weights[0]\n fwd_ave['_orig_sol_grad'] *= weights[0]\n for fwd, w in zip(fwds[1:], weights[1:]):\n fwd_ave['sol_grad']['data'] += w * fwd['sol_grad']['data']\n fwd_ave['_orig_sol_grad'] += w * fwd['_orig_sol_grad']\n return fwd_ave\n",
"\"\"\"\n.. _ex-limo-data:\n\n=============================================================\nSingle trial linear regression analysis with the LIMO dataset\n=============================================================\n\nHere we explore the structure of the data contained in the\n`LIMO dataset`_.\nThis example replicates and extends some of the main analysis\nand tools integrated in `LIMO MEEG`_, a MATLAB toolbox originally designed\nto interface with EEGLAB_.\n\nIn summary, the example:\n\n- Fetches epoched data files for a single subject of the LIMO dataset [1]_.\n If the LIMO files are not found on disk, the\n fetcher :func:`mne.datasets.limo.load_data()` will automatically download\n the files from a remote repository.\n\n- During import, information about the data (i.e., sampling rate, number of\n epochs per condition, number and name of EEG channels per subject, etc.) is\n extracted from the LIMO :file:`.mat` files stored on disk and added to the\n epochs structure as metadata.\n\n- Fits linear models on the single subject's data and visualizes inferential\n measures to evaluate the significance of the estimated effects.\n\nReferences\n----------\n.. [1] Guillaume, Rousselet. (2016). LIMO EEG Dataset, [dataset].\n University of Edinburgh, Centre for Clinical Brain Sciences.\n https://doi.org/10.7488/ds/1556.\n.. [2] Rousselet, G. A., Gaspar, C. M., Pernet, C. R., Husk, J. S.,\n Bennett, P. J., & Sekuler, A. B. (2010). Healthy aging delays scalp EEG\n sensitivity to noise in a face discrimination task.\n Frontiers in psychology, 1, 19. https://doi.org/10.3389/fpsyg.2010.00019\n.. [3] Rousselet, G. A., Pernet, C. R., Bennett, P. J., & Sekuler, A. B.\n (2008). Parametric study of EEG sensitivity to phase noise during face\n processing. BMC neuroscience, 9(1), 98.\n https://doi.org/10.1186/1471-2202-9-98\n.. _LIMO dataset: https://datashare.is.ed.ac.uk/handle/10283/2189?show=full\n.. _LIMO MEEG: https://github.com/LIMO-EEG-Toolbox\n.. _EEGLAB: https://sccn.ucsd.edu/eeglab/index.php\n.. _Fig 1: https://bmcneurosci.biomedcentral.com/articles/10.1186/1471-2202-9-98/figures/1\n.. _least squares: https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.lstsq.html\n\"\"\" # noqa: E501\n# Authors: Jose C. Garcia Alanis <[email protected]>\n#\n# License: BSD (3-clause)\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom mne.datasets.limo import load_data\nfrom mne.stats import linear_regression\nfrom mne.viz import plot_events, plot_compare_evokeds\nfrom mne import combine_evoked\n\n\nprint(__doc__)\n\n# subject to use\nsubj = 1\n\n###############################################################################\n# About the data\n# --------------\n#\n# In the original LIMO experiment (see [2]_), participants performed a\n# two-alternative forced choice task, discriminating between two face stimuli.\n# The same two faces were used during the whole experiment,\n# with varying levels of noise added, making the faces more or less\n# discernible to the observer (see `Fig 1`_ in [3]_ for a similar approach).\n#\n# The presented faces varied across a noise-signal (or phase-coherence)\n# continuum spanning from 0 to 85% in increasing steps of 5%.\n# In other words, faces with high phase-coherence (e.g., 85%) were easy to\n# identify, while faces with low phase-coherence (e.g., 5%) were hard to\n# identify and by extension very hard to discriminate.\n#\n#\n# Load the data\n# -------------\n#\n# We'll begin by loading the data from subject 1 of the LIMO dataset.\n\n# This step can take a little while if you're loading the data for the\n# first time.\nlimo_epochs = load_data(subject=subj)\n\n###############################################################################\n# Note that the result of the loading process is an\n# :class:`mne.EpochsArray` containing the data ready to interface\n# with MNE-Python.\n\nprint(limo_epochs)\n\n###############################################################################\n# Visualize events\n# ----------------\n#\n# We can visualise the distribution of the face events contained in the\n# ``limo_epochs`` structure. Events should appear clearly grouped, as the\n# epochs are ordered by condition.\n\nfig = plot_events(limo_epochs.events, event_id=limo_epochs.event_id)\nfig.suptitle(\"Distribution of events in LIMO epochs\")\n\n###############################################################################\n# As it can be seen above, conditions are coded as ``Face/A`` and ``Face/B``.\n# Information about the phase-coherence of the presented faces is stored in the\n# epochs metadata. These information can be easily accessed by calling\n# ``limo_epochs.metadata``. As shown below, the epochs metadata also contains\n# information about the presented faces for convenience.\n\nprint(limo_epochs.metadata.head())\n\n###############################################################################\n# Now let's take a closer look at the information in the epochs\n# metadata.\n\n# We want include all columns in the summary table\nepochs_summary = limo_epochs.metadata.describe(include='all').round(3)\nprint(epochs_summary)\n\n###############################################################################\n# The first column of the summary table above provides more or less the same\n# information as the ``print(limo_epochs)`` command we ran before. There are\n# 1055 faces (i.e., epochs), subdivided in 2 conditions (i.e., Face A and\n# Face B) and, for this particular subject, there are more epochs for the\n# condition Face B.\n#\n# In addition, we can see in the second column that the values for the\n# phase-coherence variable range from -1.619 to 1.642. This is because the\n# phase-coherence values are provided as a z-scored variable in the LIMO\n# dataset. Note that they have a mean of zero and a standard deviation of 1.\n#\n#\n# Visualize condition ERPs\n# ------------------------\n#\n# Let's plot the ERPs evoked by Face A and Face B, to see how similar they are.\n\n# only show -250 to 500 ms\nts_args = dict(xlim=(-0.25, 0.5))\n\n# plot evoked response for face A\nlimo_epochs['Face/A'].average().plot_joint(times=[0.15],\n title='Evoked response: Face A',\n ts_args=ts_args)\n# and face B\nlimo_epochs['Face/B'].average().plot_joint(times=[0.15],\n title='Evoked response: Face B',\n ts_args=ts_args)\n\n###############################################################################\n# We can also compute the difference wave contrasting Face A and Face B.\n# Although, looking at the evoked responses above, we shouldn't expect great\n# differences among these face-stimuli.\n\n# Face A minus Face B\ndifference_wave = combine_evoked([limo_epochs['Face/A'].average(),\n -limo_epochs['Face/B'].average()],\n weights='equal')\n\n# plot difference wave\ndifference_wave.plot_joint(times=[0.15], title='Difference Face A - Face B')\n\n###############################################################################\n# As expected, no clear pattern appears when contrasting\n# Face A and Face B. However, we could narrow our search a little bit more.\n# Since this is a \"visual paradigm\" it might be best to look at electrodes\n# located over the occipital lobe, as differences between stimuli (if any)\n# might easier to spot over visual areas.\n\n# Create a dictionary containing the evoked responses\nconditions = [\"Face/A\", \"Face/B\"]\nevokeds = {condition: limo_epochs[condition].average()\n for condition in conditions}\n\n# concentrate analysis an occipital electrodes (e.g. B11)\npick = evokeds[\"Face/A\"].ch_names.index('B11')\n\n# compare evoked responses\nplot_compare_evokeds(evokeds, picks=pick, ylim=dict(eeg=(-15, 7.5)))\n\n###############################################################################\n# We do see a difference between Face A and B, but it is pretty small.\n#\n#\n# Visualize effect of stimulus phase-coherence\n# --------------------------------------------\n#\n# Since phase-coherence\n# determined whether a face stimulus could be easily identified,\n# one could expect that faces with high phase-coherence should evoke stronger\n# activation patterns along occipital electrodes.\n\nphase_coh = limo_epochs.metadata['phase-coherence']\n# get levels of phase coherence\nlevels = sorted(phase_coh.unique())\n# create labels for levels of phase coherence (i.e., 0 - 85%)\nlabels = [\"{0:.2f}\".format(i) for i in np.arange(0., 0.90, 0.05)]\n\n# create dict of evokeds for each level of phase-coherence\nevokeds = {label: limo_epochs[phase_coh == level].average()\n for level, label in zip(levels, labels)}\n\n# pick channel to plot\nelectrodes = ['C22', 'B11']\n# create figures\nfor electrode in electrodes:\n fig, ax = plt.subplots(figsize=(8, 4))\n plot_compare_evokeds(evokeds,\n axes=ax,\n ylim=dict(eeg=(-20, 15)),\n picks=electrode,\n cmap=(\"Phase coherence\", \"magma\"))\n\n###############################################################################\n# As shown above, there are some considerable differences between the\n# activation patterns evoked by stimuli with low vs. high phase-coherence at\n# the chosen electrodes.\n#\n#\n# Prepare data for linear regression analysis\n# --------------------------------------------\n#\n# Before we test the significance of these differences using linear\n# regression, we'll interpolate missing channels that were\n# dropped during preprocessing of the data.\n# Furthermore, we'll drop the EOG channels (marked by the \"EXG\" prefix)\n# present in the data:\n\nlimo_epochs.interpolate_bads(reset_bads=True)\nlimo_epochs.drop_channels(['EXG1', 'EXG2', 'EXG3', 'EXG4'])\n\n###############################################################################\n# Define predictor variables and design matrix\n# --------------------------------------------\n#\n# To run the regression analysis,\n# we need to create a design matrix containing information about the\n# variables (i.e., predictors) we want to use for prediction of brain\n# activity patterns. For this purpose, we'll use the information we have in\n# ``limo_epochs.metadata``: phase-coherence and Face A vs. Face B.\n\n# name of predictors + intercept\npredictor_vars = ['face a - face b', 'phase-coherence', 'intercept']\n\n# create design matrix\ndesign = limo_epochs.metadata[['phase-coherence', 'face']].copy()\ndesign['face a - face b'] = np.where(design['face'] == 'A', 1, -1)\ndesign['intercept'] = 1\ndesign = design[predictor_vars]\n\n###############################################################################\n# Now we can set up the linear model to be used in the analysis using\n# MNE-Python's func:`~mne.stats.linear_regression` function.\n\nreg = linear_regression(limo_epochs,\n design_matrix=design,\n names=predictor_vars)\n\n###############################################################################\n# Extract regression coefficients\n# -------------------------------\n#\n# The results are stored within the object ``reg``,\n# which is a dictionary of evoked objects containing\n# multiple inferential measures for each predictor in the design matrix.\n\nprint('predictors are:', list(reg))\nprint('fields are:', [field for field in getattr(reg['intercept'], '_fields')])\n\n###############################################################################\n# Plot model results\n# ------------------\n#\n# Now we can access and plot the results of the linear regression analysis by\n# calling :samp:`reg['{<name of predictor>}'].{<measure of interest>}` and\n# using the\n# :meth:`~mne.Evoked.plot_joint` method just as we would do with any other\n# evoked object.\n# Below we can see a clear effect of phase-coherence, with higher\n# phase-coherence (i.e., better \"face visibility\") having a negative effect on\n# the activity measured at occipital electrodes around 200 to 250 ms following\n# stimulus onset.\n\nreg['phase-coherence'].beta.plot_joint(ts_args=ts_args,\n title='Effect of Phase-coherence',\n times=[0.23])\n\n###############################################################################\n# We can also plot the corresponding T values.\n\n# use unit=False and scale=1 to keep values at their original\n# scale (i.e., avoid conversion to micro-volt).\nts_args = dict(xlim=(-0.25, 0.5),\n unit=False)\ntopomap_args = dict(scalings=dict(eeg=1),\n average=0.05)\n\n# sphinx_gallery_thumbnail_number = 9\nfig = reg['phase-coherence'].t_val.plot_joint(ts_args=ts_args,\n topomap_args=topomap_args,\n times=[0.23])\nfig.axes[0].set_ylabel('T-value')\n\n###############################################################################\n# Conversely, there appears to be no (or very small) systematic effects when\n# comparing Face A and Face B stimuli. This is largely consistent with the\n# difference wave approach presented above.\nts_args = dict(xlim=(-0.25, 0.5))\n\nreg['face a - face b'].beta.plot_joint(ts_args=ts_args,\n title='Effect of Face A vs. Face B',\n times=[0.23])\n",
"\"\"\"Read .res4 files.\"\"\"\n\n# Authors: Matti Hämäläinen <[email protected]>\n# Eric Larson <[email protected]>\n#\n# License: BSD (3-clause)\n\nimport os.path as op\n\nimport numpy as np\n\nfrom ...utils import logger\nfrom .constants import CTF\n\n\ndef _make_ctf_name(directory, extra, raise_error=True):\n \"\"\"Make a CTF name.\"\"\"\n fname = op.join(directory, op.basename(directory)[:-3] + '.' + extra)\n if not op.isfile(fname):\n if raise_error:\n raise IOError('Standard file %s not found' % fname)\n else:\n return None\n return fname\n\n\ndef _read_double(fid, n=1):\n \"\"\"Read a double.\"\"\"\n return np.fromfile(fid, '>f8', n)\n\n\ndef _read_string(fid, n_bytes, decode=True):\n \"\"\"Read string.\"\"\"\n s0 = fid.read(n_bytes)\n s = s0.split(b'\\x00')[0]\n return s.decode('utf-8') if decode else s\n\n\ndef _read_ustring(fid, n_bytes):\n \"\"\"Read unsigned character string.\"\"\"\n return np.fromfile(fid, '>B', n_bytes)\n\n\ndef _read_int2(fid):\n \"\"\"Read int from short.\"\"\"\n return np.fromfile(fid, '>i2', 1)[0]\n\n\ndef _read_int(fid):\n \"\"\"Read a 32-bit integer.\"\"\"\n return np.fromfile(fid, '>i4', 1)[0]\n\n\ndef _move_to_next(fid, byte=8):\n \"\"\"Move to next byte boundary.\"\"\"\n now = fid.tell()\n if now % byte != 0:\n now = now - (now % byte) + byte\n fid.seek(now, 0)\n\n\ndef _read_filter(fid):\n \"\"\"Read filter information.\"\"\"\n f = dict()\n f['freq'] = _read_double(fid)[0]\n f['class'] = _read_int(fid)\n f['type'] = _read_int(fid)\n f['npar'] = _read_int2(fid)\n f['pars'] = _read_double(fid, f['npar'])\n return f\n\n\ndef _read_comp_coeff(fid, d):\n \"\"\"Read compensation coefficients.\"\"\"\n # Read the coefficients and initialize\n d['ncomp'] = _read_int2(fid)\n d['comp'] = list()\n # Read each record\n dt = np.dtype([\n ('sensor_name', 'S32'),\n ('coeff_type', '>i4'), ('d0', '>i4'),\n ('ncoeff', '>i2'),\n ('sensors', 'S%s' % CTF.CTFV_SENSOR_LABEL, CTF.CTFV_MAX_BALANCING),\n ('coeffs', '>f8', CTF.CTFV_MAX_BALANCING)])\n comps = np.fromfile(fid, dt, d['ncomp'])\n for k in range(d['ncomp']):\n comp = dict()\n d['comp'].append(comp)\n comp['sensor_name'] = \\\n comps['sensor_name'][k].split(b'\\x00')[0].decode('utf-8')\n comp['coeff_type'] = comps['coeff_type'][k]\n comp['ncoeff'] = comps['ncoeff'][k]\n comp['sensors'] = [s.split(b'\\x00')[0].decode('utf-8')\n for s in comps['sensors'][k][:comp['ncoeff']]]\n comp['coeffs'] = comps['coeffs'][k][:comp['ncoeff']]\n comp['scanno'] = d['ch_names'].index(comp['sensor_name'])\n\n\ndef _read_res4(dsdir):\n \"\"\"Read the magical res4 file.\"\"\"\n # adapted from read_res4.c\n name = _make_ctf_name(dsdir, 'res4')\n res = dict()\n with open(name, 'rb') as fid:\n # Read the fields\n res['head'] = _read_string(fid, 8)\n res['appname'] = _read_string(fid, 256)\n res['origin'] = _read_string(fid, 256)\n res['desc'] = _read_string(fid, 256)\n res['nave'] = _read_int2(fid)\n res['data_time'] = _read_string(fid, 255)\n res['data_date'] = _read_string(fid, 255)\n # Seems that date and time can be swapped\n # (are they entered manually?!)\n if '/' in res['data_time'] and ':' in res['data_date']:\n data_date = res['data_date']\n res['data_date'] = res['data_time']\n res['data_time'] = data_date\n res['nsamp'] = _read_int(fid)\n res['nchan'] = _read_int2(fid)\n _move_to_next(fid, 8)\n res['sfreq'] = _read_double(fid)[0]\n res['epoch_time'] = _read_double(fid)[0]\n res['no_trials'] = _read_int2(fid)\n _move_to_next(fid, 4)\n res['pre_trig_pts'] = _read_int(fid)\n res['no_trials_done'] = _read_int2(fid)\n res['no_trials_bst_message_windowlay'] = _read_int2(fid)\n _move_to_next(fid, 4)\n res['save_trials'] = _read_int(fid)\n res['primary_trigger'] = fid.read(1)\n res['secondary_trigger'] = [fid.read(1)\n for k in range(CTF.CTFV_MAX_AVERAGE_BINS)]\n res['trigger_polarity_mask'] = fid.read(1)\n res['trigger_mode'] = _read_int2(fid)\n _move_to_next(fid, 4)\n res['accept_reject'] = _read_int(fid)\n res['run_time_bst_message_windowlay'] = _read_int2(fid)\n _move_to_next(fid, 4)\n res['zero_head'] = _read_int(fid)\n _move_to_next(fid, 4)\n res['artifact_mode'] = _read_int(fid)\n _read_int(fid) # padding\n res['nf_run_name'] = _read_string(fid, 32)\n res['nf_run_title'] = _read_string(fid, 256)\n res['nf_instruments'] = _read_string(fid, 32)\n res['nf_collect_descriptor'] = _read_string(fid, 32)\n res['nf_subject_id'] = _read_string(fid, 32)\n res['nf_operator'] = _read_string(fid, 32)\n if len(res['nf_operator']) == 0:\n res['nf_operator'] = None\n res['nf_sensor_file_name'] = _read_ustring(fid, 60)\n _move_to_next(fid, 4)\n res['rdlen'] = _read_int(fid)\n fid.seek(CTF.FUNNY_POS, 0)\n\n if res['rdlen'] > 0:\n res['run_desc'] = _read_string(fid, res['rdlen'])\n\n # Filters\n res['nfilt'] = _read_int2(fid)\n res['filters'] = list()\n for k in range(res['nfilt']):\n res['filters'].append(_read_filter(fid))\n\n # Channel information (names, then data)\n res['ch_names'] = list()\n for k in range(res['nchan']):\n ch_name = _read_string(fid, 32)\n res['ch_names'].append(ch_name)\n _coil_dt = np.dtype([\n ('pos', '>f8', 3), ('d0', '>f8'),\n ('norm', '>f8', 3), ('d1', '>f8'),\n ('turns', '>i2'), ('d2', '>i4'), ('d3', '>i2'),\n ('area', '>f8')])\n _ch_dt = np.dtype([\n ('sensor_type_index', '>i2'),\n ('original_run_no', '>i2'),\n ('coil_type', '>i4'),\n ('proper_gain', '>f8'),\n ('qgain', '>f8'),\n ('io_gain', '>f8'),\n ('io_offset', '>f8'),\n ('num_coils', '>i2'),\n ('grad_order_no', '>i2'), ('d0', '>i4'),\n ('coil', _coil_dt, CTF.CTFV_MAX_COILS),\n ('head_coil', _coil_dt, CTF.CTFV_MAX_COILS)])\n chs = np.fromfile(fid, _ch_dt, res['nchan'])\n for coil in (chs['coil'], chs['head_coil']):\n coil['pos'] /= 100.\n coil['area'] *= 1e-4\n # convert to dict\n chs = [dict(zip(chs.dtype.names, x)) for x in chs]\n res['chs'] = chs\n for k in range(res['nchan']):\n res['chs'][k]['ch_name'] = res['ch_names'][k]\n\n # The compensation coefficients\n _read_comp_coeff(fid, res)\n logger.info(' res4 data read.')\n return res\n",
"import numpy as np\nfrom scipy import linalg\nfrom numpy.testing import assert_almost_equal, assert_array_almost_equal\n\nfrom mne.time_frequency import stft, istft, stftfreq\nfrom mne.time_frequency._stft import stft_norm2\n\n\ndef test_stft():\n \"\"\"Test stft and istft tight frame property.\"\"\"\n sfreq = 1000. # Hz\n f = 7. # Hz\n for T in [127, 128]: # try with even and odd numbers\n # Test with low frequency signal\n t = np.arange(T).astype(np.float)\n x = np.sin(2 * np.pi * f * t / sfreq)\n x = np.array([x, x + 1.])\n wsize = 128\n tstep = 4\n X = stft(x, wsize, tstep)\n xp = istft(X, tstep, Tx=T)\n\n freqs = stftfreq(wsize, sfreq=1000)\n\n max_freq = freqs[np.argmax(np.sum(np.abs(X[0]) ** 2, axis=1))]\n\n assert X.shape[1] == len(freqs)\n assert np.all(freqs >= 0.)\n assert np.abs(max_freq - f) < 1.\n assert_array_almost_equal(x, xp, decimal=6)\n\n # norm conservation thanks to tight frame property\n assert_almost_equal(np.sqrt(stft_norm2(X)),\n [linalg.norm(xx) for xx in x], decimal=6)\n\n # Test with random signal\n x = np.random.randn(2, T)\n wsize = 16\n tstep = 8\n X = stft(x, wsize, tstep)\n xp = istft(X, tstep, Tx=T)\n\n freqs = stftfreq(wsize, sfreq=1000)\n\n max_freq = freqs[np.argmax(np.sum(np.abs(X[0]) ** 2, axis=1))]\n\n assert X.shape[1] == len(freqs)\n assert np.all(freqs >= 0.)\n assert_array_almost_equal(x, xp, decimal=6)\n\n # norm conservation thanks to tight frame property\n assert_almost_equal(np.sqrt(stft_norm2(X)),\n [linalg.norm(xx) for xx in x],\n decimal=6)\n\n # Try with empty array\n x = np.zeros((0, T))\n X = stft(x, wsize, tstep)\n xp = istft(X, tstep, T)\n assert xp.shape == x.shape\n",
"# -*- coding: utf-8 -*-\n\"\"\"\n.. _tut-artifact-ssp:\n\nRepairing artifacts with SSP\n============================\n\nThis tutorial covers the basics of signal-space projection (SSP) and shows\nhow SSP can be used for artifact repair; extended examples illustrate use\nof SSP for environmental noise reduction, and for repair of ocular and\nheartbeat artifacts.\n\n.. contents:: Page contents\n :local:\n :depth: 2\n\nWe begin as always by importing the necessary Python modules. To save ourselves\nfrom repeatedly typing ``mne.preprocessing`` we'll directly import a couple\nfunctions from that submodule:\n\"\"\"\n\nimport os\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport mne\nfrom mne.preprocessing import (create_eog_epochs, create_ecg_epochs,\n compute_proj_ecg, compute_proj_eog)\n\n###############################################################################\n# .. note::\n# Before applying SSP (or any artifact repair strategy), be sure to observe\n# the artifacts in your data to make sure you choose the right repair tool.\n# Sometimes the right tool is no tool at all — if the artifacts are small\n# enough you may not even need to repair them to get good analysis results.\n# See :ref:`tut-artifact-overview` for guidance on detecting and\n# visualizing various types of artifact.\n#\n#\n# What is SSP?\n# ^^^^^^^^^^^^\n#\n# Signal-space projection (SSP) [1]_ is a technique for removing noise from EEG\n# and MEG signals by :term:`projecting <projector>` the signal onto a\n# lower-dimensional subspace. The subspace is chosen by calculating the average\n# pattern across sensors when the noise is present, treating that pattern as\n# a \"direction\" in the sensor space, and constructing the subspace to be\n# orthogonal to the noise direction (for a detailed walk-through of projection\n# see :ref:`tut-projectors-background`).\n#\n# The most common use of SSP is to remove noise from MEG signals when the noise\n# comes from environmental sources (sources outside the subject's body and the\n# MEG system, such as the electromagnetic fields from nearby electrical\n# equipment) and when that noise is *stationary* (doesn't change much over the\n# duration of the recording). However, SSP can also be used to remove\n# biological artifacts such as heartbeat (ECG) and eye movement (EOG)\n# artifacts. Examples of each of these are given below.\n#\n#\n# Example: Environmental noise reduction from empty-room recordings\n# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n#\n# The :ref:`example data <sample-dataset>` was recorded on a Neuromag system,\n# which stores SSP projectors for environmental noise removal in the system\n# configuration (so that reasonably clean raw data can be viewed in real-time\n# during acquisition). For this reason, all the :class:`~mne.io.Raw` data in\n# the example dataset already includes SSP projectors, which are noted in the\n# output when loading the data:\n\nsample_data_folder = mne.datasets.sample.data_path()\nsample_data_raw_file = os.path.join(sample_data_folder, 'MEG', 'sample',\n 'sample_audvis_raw.fif')\nraw = mne.io.read_raw_fif(sample_data_raw_file)\n\n###############################################################################\n# The :ref:`example data <sample-dataset>` also includes an \"empty room\"\n# recording taken the same day as the recording of the subject. This will\n# provide a more accurate estimate of environmental noise than the projectors\n# stored with the system (which are typically generated during annual\n# maintenance and tuning). Since we have this subject-specific empty-room\n# recording, we'll create our own projectors from it and discard the\n# system-provided SSP projectors (saving them first, for later comparison with\n# the custom ones):\n\nsystem_projs = raw.info['projs']\nraw.del_proj()\nempty_room_file = os.path.join(sample_data_folder, 'MEG', 'sample',\n 'ernoise_raw.fif')\nempty_room_raw = mne.io.read_raw_fif(empty_room_file)\n\n###############################################################################\n# Notice that the empty room recording itself has the system-provided SSP\n# projectors in it — we'll remove those from the empty room file too.\n\nempty_room_raw.del_proj()\n\n###############################################################################\n# Visualizing the empty-room noise\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n#\n# Let's take a look at the spectrum of the empty room noise. We can view an\n# individual spectrum for each sensor, or an average (with confidence band)\n# across sensors:\n\nfor average in (False, True):\n empty_room_raw.plot_psd(average=average, dB=False, xscale='log')\n\n###############################################################################\n# Creating the empty-room projectors\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n#\n# We create the SSP vectors using :func:`~mne.compute_proj_raw`, and control\n# the number of projectors with parameters ``n_grad`` and ``n_mag``. Once\n# created, the field pattern of the projectors can be easily visualized with\n# :func:`~mne.viz.plot_projs_topomap`. We include the parameter\n# ``vlim='joint'`` so that the colormap is computed jointly for all projectors\n# of a given channel type; this makes it easier to compare their relative\n# smoothness. Note that for the function to know the types of channels in a\n# projector, you must also provide the corresponding :class:`~mne.Info` object:\n\n# sphinx_gallery_thumbnail_number = 3\nempty_room_projs = mne.compute_proj_raw(empty_room_raw, n_grad=3, n_mag=3)\nmne.viz.plot_projs_topomap(empty_room_projs, colorbar=True, vlim='joint',\n info=empty_room_raw.info)\n\n###############################################################################\n# Notice that the gradiometer-based projectors seem to reflect problems with\n# individual sensor units rather than a global noise source (indeed, planar\n# gradiometers are much less sensitive to distant sources). This is the reason\n# that the system-provided noise projectors are computed only for\n# magnetometers. Comparing the system-provided projectors to the\n# subject-specific ones, we can see they are reasonably similar (though in a\n# different order) and the left-right component seems to have changed\n# polarity.\n\nfig, axs = plt.subplots(2, 3)\nfor idx, _projs in enumerate([system_projs, empty_room_projs[3:]]):\n mne.viz.plot_projs_topomap(_projs, axes=axs[idx], colorbar=True,\n vlim='joint', info=empty_room_raw.info)\n\n###############################################################################\n# Visualizing how projectors affect the signal\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n#\n# We could visualize the different effects these have on the data by applying\n# each set of projectors to different copies of the :class:`~mne.io.Raw` object\n# using :meth:`~mne.io.Raw.apply_proj`. However, the :meth:`~mne.io.Raw.plot`\n# method has a ``proj`` parameter that allows us to *temporarily* apply\n# projectors while plotting, so we can use this to visualize the difference\n# without needing to copy the data. Because the projectors are so similar, we\n# need to zoom in pretty close on the data to see any differences:\n\nmags = mne.pick_types(raw.info, meg='mag')\nfor title, projs in [('system', system_projs),\n ('subject-specific', empty_room_projs[3:])]:\n raw.add_proj(projs, remove_existing=True)\n fig = raw.plot(proj=True, order=mags, duration=1, n_channels=2)\n fig.subplots_adjust(top=0.9) # make room for title\n fig.suptitle('{} projectors'.format(title), size='xx-large', weight='bold')\n\n###############################################################################\n# The effect is sometimes easier to see on averaged data. Here we use an\n# interactive feature of :func:`mne.Evoked.plot_topomap` to turn projectors on\n# and off to see the effect on the data. Of course, the interactivity won't\n# work on the tutorial website, but you can download the tutorial and try it\n# locally:\n\nevents = mne.find_events(raw, stim_channel='STI 014')\nevent_id = {'auditory/left': 1}\n\n# NOTE: appropriate rejection criteria are highly data-dependent\nreject = dict(mag=4000e-15, # 4000 fT\n grad=4000e-13, # 4000 fT/cm\n eeg=150e-6, # 150 μV\n eog=250e-6) # 250 μV\n\n# time range where we expect to see the auditory N100: 50-150 ms post-stimulus\ntimes = np.linspace(0.05, 0.15, 5)\n\nepochs = mne.Epochs(raw, events, event_id, proj='delayed', reject=reject)\nfig = epochs.average().plot_topomap(times, proj='interactive')\n\n###############################################################################\n# Plotting the ERP/F using ``evoked.plot()`` or ``evoked.plot_joint()`` with\n# and without projectors applied can also be informative.\n#\n#\n# Example: EOG and ECG artifact repair\n# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n#\n# Visualizing the artifacts\n# ~~~~~~~~~~~~~~~~~~~~~~~~~\n#\n# As mentioned in :ref:`the ICA tutorial <tut-artifact-ica>`, an important\n# first step is visualizing the artifacts you want to repair. Here they are in\n# the raw data:\n\n# pick some channels that clearly show heartbeats and blinks\nregexp = r'(MEG [12][45][123]1|EEG 00.)'\nartifact_picks = mne.pick_channels_regexp(raw.ch_names, regexp=regexp)\nraw.plot(order=artifact_picks, n_channels=len(artifact_picks))\n\n###############################################################################\n# Repairing ECG artifacts with SSP\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n#\n# MNE-Python provides several functions for detecting and removing heartbeats\n# from EEG and MEG data. As we saw in :ref:`tut-artifact-overview`,\n# :func:`~mne.preprocessing.create_ecg_epochs` can be used to both detect and\n# extract heartbeat artifacts into an :class:`~mne.Epochs` object, which can\n# be used to visualize how the heartbeat artifacts manifest across the sensors:\n\necg_evoked = create_ecg_epochs(raw).average()\necg_evoked.plot_joint()\n\n###############################################################################\n# Looks like the EEG channels are pretty spread out; let's baseline-correct and\n# plot again:\n\necg_evoked.apply_baseline((None, None))\necg_evoked.plot_joint()\n\n###############################################################################\n# To compute SSP projectors for the heartbeat artifact, you can use\n# :func:`~mne.preprocessing.compute_proj_ecg`, which takes a\n# :class:`~mne.io.Raw` object as input and returns the requested number of\n# projectors for magnetometers, gradiometers, and EEG channels (default is two\n# projectors for each channel type).\n# :func:`~mne.preprocessing.compute_proj_ecg` also returns an :term:`events`\n# array containing the sample numbers corresponding to the onset of each\n# detected heartbeat.\n\nprojs, events = compute_proj_ecg(raw, n_grad=1, n_mag=1, n_eeg=1, reject=None)\n\n###############################################################################\n# The first line of output tells us that\n# :func:`~mne.preprocessing.compute_proj_ecg` found three existing projectors\n# already in the :class:`~mne.io.Raw` object, and will include those in the\n# list of projectors that it returns (appending the new ECG projectors to the\n# end of the list). If you don't want that, you can change that behavior with\n# the boolean ``no_proj`` parameter. Since we've already run the computation,\n# we can just as easily separate out the ECG projectors by indexing the list of\n# projectors:\n\necg_projs = projs[3:]\nprint(ecg_projs)\n\n###############################################################################\n# Just like with the empty-room projectors, we can visualize the scalp\n# distribution:\n\nmne.viz.plot_projs_topomap(ecg_projs, info=raw.info)\n\n###############################################################################\n# Since no dedicated ECG sensor channel was detected in the\n# :class:`~mne.io.Raw` object, by default\n# :func:`~mne.preprocessing.compute_proj_ecg` used the magnetometers to\n# estimate the ECG signal (as stated on the third line of output, above). You\n# can also supply the ``ch_name`` parameter to restrict which channel to use\n# for ECG artifact detection; this is most useful when you had an ECG sensor\n# but it is not labeled as such in the :class:`~mne.io.Raw` file.\n#\n# The next few lines of the output describe the filter used to isolate ECG\n# events. The default settings are usually adequate, but the filter can be\n# customized via the parameters ``ecg_l_freq``, ``ecg_h_freq``, and\n# ``filter_length`` (see the documentation of\n# :func:`~mne.preprocessing.compute_proj_ecg` for details).\n#\n# .. TODO what are the cases where you might need to customize the ECG filter?\n# infants? Heart murmur?\n#\n# Once the ECG events have been identified,\n# :func:`~mne.preprocessing.compute_proj_ecg` will also filter the data\n# channels before extracting epochs around each heartbeat, using the parameter\n# values given in ``l_freq``, ``h_freq``, ``filter_length``, ``filter_method``,\n# and ``iir_params``. Here again, the default parameter values are usually\n# adequate.\n#\n# .. TODO should advice for filtering here be the same as advice for filtering\n# raw data generally? (e.g., keep high-pass very low to avoid peak shifts?\n# what if your raw data is already filtered?)\n#\n# By default, the filtered epochs will be averaged together\n# before the projection is computed; this can be controlled with the boolean\n# ``average`` parameter.\n#\n# .. TODO what is the (dis)advantage of **not** averaging before projection?\n#\n# To get a sense of how the heartbeat affects the signal at each sensor, you\n# can plot the data with and without the ECG projectors:\n\n\nraw.del_proj()\nfor title, proj in [('Without', empty_room_projs), ('With', ecg_projs)]:\n raw.add_proj(proj, remove_existing=False)\n fig = raw.plot(order=artifact_picks, n_channels=len(artifact_picks))\n fig.subplots_adjust(top=0.9) # make room for title\n fig.suptitle('{} ECG projectors'.format(title), size='xx-large',\n weight='bold')\n\n###############################################################################\n# Finally, note that above we passed ``reject=None`` to the\n# :func:`~mne.preprocessing.compute_proj_ecg` function, meaning that all\n# detected ECG epochs would be used when computing the projectors (regardless\n# of signal quality in the data sensors during those epochs). The default\n# behavior is to reject epochs based on signal amplitude: epochs with\n# peak-to-peak amplitudes exceeding 50 μV in EEG channels, 250 μV in EOG\n# channels, 2000 fT/cm in gradiometer channels, or 3000 fT in magnetometer\n# channels. You can change these thresholds by passing a dictionary with keys\n# ``eeg``, ``eog``, ``mag``, and ``grad`` (though be sure to pass the threshold\n# values in volts, teslas, or teslas/meter). Generally, it is a good idea to\n# reject such epochs when computing the ECG projectors (since presumably the\n# high-amplitude fluctuations in the channels are noise, not reflective of\n# brain activity); passing ``reject=None`` above was done simply to avoid the\n# dozens of extra lines of output (enumerating which sensor(s) were responsible\n# for each rejected epoch) from cluttering up the tutorial.\n#\n# .. note::\n#\n# :func:`~mne.preprocessing.compute_proj_ecg` has a similar parameter\n# ``flat`` for specifying the *minimum* acceptable peak-to-peak amplitude\n# for each channel type.\n#\n# While :func:`~mne.preprocessing.compute_proj_ecg` conveniently combines\n# several operations into a single function, MNE-Python also provides functions\n# for performing each part of the process. Specifically:\n#\n# - :func:`mne.preprocessing.find_ecg_events` for detecting heartbeats in a\n# :class:`~mne.io.Raw` object and returning a corresponding :term:`events`\n# array\n#\n# - :func:`mne.preprocessing.create_ecg_epochs` for detecting heartbeats in a\n# :class:`~mne.io.Raw` object and returning an :class:`~mne.Epochs` object\n#\n# - :func:`mne.compute_proj_epochs` for creating projector(s) from any\n# :class:`~mne.Epochs` object\n#\n# See the documentation of each function for further details.\n#\n#\n# Repairing EOG artifacts with SSP\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n#\n# Once again let's visualize our artifact before trying to repair it. We've\n# seen above the large deflections in frontal EEG channels in the raw data;\n# here is how the ocular artifacts manifests across all the sensors:\n\neog_evoked = create_eog_epochs(raw).average()\neog_evoked.apply_baseline((None, None))\neog_evoked.plot_joint()\n\n###############################################################################\n# Just like we did with the heartbeat artifact, we can compute SSP projectors\n# for the ocular artifact using :func:`~mne.preprocessing.compute_proj_eog`,\n# which again takes a :class:`~mne.io.Raw` object as input and returns the\n# requested number of projectors for magnetometers, gradiometers, and EEG\n# channels (default is two projectors for each channel type). This time, we'll\n# pass ``no_proj`` parameter (so we get back only the new EOG projectors, not\n# also the existing projectors in the :class:`~mne.io.Raw` object), and we'll\n# ignore the events array by assigning it to ``_`` (the conventional way of\n# handling unwanted return elements in Python).\n\neog_projs, _ = compute_proj_eog(raw, n_grad=1, n_mag=1, n_eeg=1, reject=None,\n no_proj=True)\n\n###############################################################################\n# Just like with the empty-room and ECG projectors, we can visualize the scalp\n# distribution:\n\nmne.viz.plot_projs_topomap(eog_projs, info=raw.info)\n\n###############################################################################\n# Now we repeat the plot from above (with empty room and ECG projectors) and\n# compare it to a plot with empty room, ECG, and EOG projectors, to see how\n# well the ocular artifacts have been repaired:\n\nfor title in ('Without', 'With'):\n if title == 'With':\n raw.add_proj(eog_projs)\n fig = raw.plot(order=artifact_picks, n_channels=len(artifact_picks))\n fig.subplots_adjust(top=0.9) # make room for title\n fig.suptitle('{} EOG projectors'.format(title), size='xx-large',\n weight='bold')\n\n###############################################################################\n# Notice that the small peaks in the first to magnetometer channels (``MEG\n# 1411`` and ``MEG 1421``) that occur at the same time as the large EEG\n# deflections have also been removed.\n#\n#\n# Choosing the number of projectors\n# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n#\n# In the examples above, we used 3 projectors (all magnetometer) to capture\n# empty room noise, and saw how projectors computed for the gradiometers failed\n# to capture *global* patterns (and thus we discarded the gradiometer\n# projectors). Then we computed 3 projectors (1 for each channel type) to\n# capture the heartbeat artifact, and 3 more to capture the ocular artifact.\n# How did we choose these numbers? The short answer is \"based on experience\" —\n# knowing how heartbeat artifacts typically manifest across the sensor array\n# allows us to recognize them when we see them, and recognize when additional\n# projectors are capturing something else other than a heartbeat artifact (and\n# thus may be removing brain signal and should be discarded).\n#\n#\n# References\n# ^^^^^^^^^^\n#\n# .. [1] Uusitalo MA and Ilmoniemi RJ. (1997). Signal-space projection method\n# for separating MEG or EEG into components. *Med Biol Eng Comput*\n# 35(2), 135–140. doi:10.1007/BF02534144\n",
"\"\"\"\n=======================================\nSimulate raw data using subject anatomy\n=======================================\n\nThis example illustrates how to generate source estimates and simulate raw data\nusing subject anatomy with the :class:`mne.simulation.SourceSimulator` class.\nOnce the raw data is simulated, generated source estimates are reconstructed\nusing dynamic statistical parametric mapping (dSPM) inverse operator.\n\"\"\"\n\n# Author: Ivana Kojcic <[email protected]>\n# Eric Larson <[email protected]>\n# Kostiantyn Maksymenko <[email protected]>\n# Samuel Deslauriers-Gauthier <[email protected]>\n\n# License: BSD (3-clause)\n\nimport os.path as op\n\nimport numpy as np\n\nimport mne\nfrom mne.datasets import sample\n\nprint(__doc__)\n\n# In this example, raw data will be simulated for the sample subject, so its\n# information needs to be loaded. This step will download the data if it not\n# already on your machine. Subjects directory is also set so it doesn't need\n# to be given to functions.\ndata_path = sample.data_path()\nsubjects_dir = op.join(data_path, 'subjects')\nsubject = 'sample'\nmeg_path = op.join(data_path, 'MEG', subject)\n\n# First, we get an info structure from the sample subject.\nfname_info = op.join(meg_path, 'sample_audvis_raw.fif')\ninfo = mne.io.read_info(fname_info)\ntstep = 1 / info['sfreq']\n\n# To simulate sources, we also need a source space. It can be obtained from the\n# forward solution of the sample subject.\nfwd_fname = op.join(meg_path, 'sample_audvis-meg-eeg-oct-6-fwd.fif')\nfwd = mne.read_forward_solution(fwd_fname)\nsrc = fwd['src']\n\n# To simulate raw data, we need to define when the activity occurs using events\n# matrix and specify the IDs of each event.\n# Noise covariance matrix also needs to be defined.\n# Here, both are loaded from the sample dataset, but they can also be specified\n# by the user.\n\nfname_event = op.join(meg_path, 'sample_audvis_raw-eve.fif')\nfname_cov = op.join(meg_path, 'sample_audvis-cov.fif')\n\nevents = mne.read_events(fname_event)\nnoise_cov = mne.read_cov(fname_cov)\n\n# Standard sample event IDs. These values will correspond to the third column\n# in the events matrix.\nevent_id = {'auditory/left': 1, 'auditory/right': 2, 'visual/left': 3,\n 'visual/right': 4, 'smiley': 5, 'button': 32}\n\n\n# Take only a few events for speed\nevents = events[:80]\n\n###############################################################################\n# In order to simulate source time courses, labels of desired active regions\n# need to be specified for each of the 4 simulation conditions.\n# Make a dictionary that maps conditions to activation strengths within\n# aparc.a2009s [1]_ labels. In the aparc.a2009s parcellation:\n#\n# - 'G_temp_sup-G_T_transv' is the label for primary auditory area\n# - 'S_calcarine' is the label for primary visual area\n#\n# In each of the 4 conditions, only the primary area is activated. This means\n# that during the activations of auditory areas, there are no activations in\n# visual areas and vice versa.\n# Moreover, for each condition, contralateral region is more active (here, 2\n# times more) than the ipsilateral.\n\nactivations = {\n 'auditory/left':\n [('G_temp_sup-G_T_transv-lh', 30), # label, activation (nAm)\n ('G_temp_sup-G_T_transv-rh', 60)],\n 'auditory/right':\n [('G_temp_sup-G_T_transv-lh', 60),\n ('G_temp_sup-G_T_transv-rh', 30)],\n 'visual/left':\n [('S_calcarine-lh', 30),\n ('S_calcarine-rh', 60)],\n 'visual/right':\n [('S_calcarine-lh', 60),\n ('S_calcarine-rh', 30)],\n}\n\nannot = 'aparc.a2009s'\n\n# Load the 4 necessary label names.\nlabel_names = sorted(set(activation[0]\n for activation_list in activations.values()\n for activation in activation_list))\nregion_names = list(activations.keys())\n\n###############################################################################\n# Create simulated source activity\n# --------------------------------\n#\n# Generate source time courses for each region. In this example, we want to\n# simulate source activity for a single condition at a time. Therefore, each\n# evoked response will be parametrized by latency and duration.\n\n\ndef data_fun(times, latency, duration):\n \"\"\"Function to generate source time courses for evoked responses,\n parametrized by latency and duration.\"\"\"\n f = 15 # oscillating frequency, beta band [Hz]\n sigma = 0.375 * duration\n sinusoid = np.sin(2 * np.pi * f * (times - latency))\n gf = np.exp(- (times - latency - (sigma / 4.) * rng.rand(1)) ** 2 /\n (2 * (sigma ** 2)))\n return 1e-9 * sinusoid * gf\n\n\n###############################################################################\n# Here, :class:`~mne.simulation.SourceSimulator` is used, which allows to\n# specify where (label), what (source_time_series), and when (events) event\n# type will occur.\n#\n# We will add data for 4 areas, each of which contains 2 labels. Since add_data\n# method accepts 1 label per call, it will be called 2 times per area.\n#\n# Evoked responses are generated such that the main component peaks at 100ms\n# with a duration of around 30ms, which first appears in the contralateral\n# cortex. This is followed by a response in the ipsilateral cortex with a peak\n# about 15ms after. The amplitude of the activations will be 2 times higher in\n# the contralateral region, as explained before.\n#\n# When the activity occurs is defined using events. In this case, they are\n# taken from the original raw data. The first column is the sample of the\n# event, the second is not used. The third one is the event id, which is\n# different for each of the 4 areas.\n\ntimes = np.arange(150, dtype=np.float) / info['sfreq']\nduration = 0.03\nrng = np.random.RandomState(7)\nsource_simulator = mne.simulation.SourceSimulator(src, tstep=tstep)\n\nfor region_id, region_name in enumerate(region_names, 1):\n events_tmp = events[np.where(events[:, 2] == region_id)[0], :]\n for i in range(2):\n label_name = activations[region_name][i][0]\n label_tmp = mne.read_labels_from_annot(subject, annot,\n subjects_dir=subjects_dir,\n regexp=label_name,\n verbose=False)\n label_tmp = label_tmp[0]\n amplitude_tmp = activations[region_name][i][1]\n if region_name.split('/')[1][0] == label_tmp.hemi[0]:\n latency_tmp = 0.115\n else:\n latency_tmp = 0.1\n wf_tmp = data_fun(times, latency_tmp, duration)\n source_simulator.add_data(label_tmp,\n amplitude_tmp * wf_tmp,\n events_tmp)\n\n# To obtain a SourceEstimate object, we need to use `get_stc()` method of\n# SourceSimulator class.\nstc_data = source_simulator.get_stc()\n\n###############################################################################\n# Simulate raw data\n# -----------------\n#\n# Project the source time series to sensor space. Three types of noise will be\n# added to the simulated raw data:\n#\n# - multivariate Gaussian noise obtained from the noise covariance from the\n# sample data\n# - blink (EOG) noise\n# - ECG noise\n#\n# The :class:`~mne.simulation.SourceSimulator` can be given directly to the\n# :func:`~mne.simulation.simulate_raw` function.\n\nraw_sim = mne.simulation.simulate_raw(info, source_simulator, forward=fwd)\nraw_sim.set_eeg_reference(projection=True)\n\nmne.simulation.add_noise(raw_sim, cov=noise_cov, random_state=0)\nmne.simulation.add_eog(raw_sim, random_state=0)\nmne.simulation.add_ecg(raw_sim, random_state=0)\n\n# Plot original and simulated raw data.\nraw_sim.plot(title='Simulated raw data')\n\n###############################################################################\n# Extract epochs and compute evoked responsses\n# --------------------------------------------\n#\n\nepochs = mne.Epochs(raw_sim, events, event_id, tmin=-0.2, tmax=0.3,\n baseline=(None, 0))\nevoked_aud_left = epochs['auditory/left'].average()\nevoked_vis_right = epochs['visual/right'].average()\n\n# Visualize the evoked data\nevoked_aud_left.plot(spatial_colors=True)\nevoked_vis_right.plot(spatial_colors=True)\n\n###############################################################################\n# Reconstruct simulated source time courses using dSPM inverse operator\n# ---------------------------------------------------------------------\n#\n# Here, source time courses for auditory and visual areas are reconstructed\n# separately and their difference is shown. This was done merely for better\n# visual representation of source reconstruction.\n# As expected, when high activations appear in primary auditory areas, primary\n# visual areas will have low activations and vice versa.\n\nmethod, lambda2 = 'dSPM', 1. / 9.\ninv = mne.minimum_norm.make_inverse_operator(epochs.info, fwd, noise_cov)\nstc_aud = mne.minimum_norm.apply_inverse(\n evoked_aud_left, inv, lambda2, method)\nstc_vis = mne.minimum_norm.apply_inverse(\n evoked_vis_right, inv, lambda2, method)\nstc_diff = stc_aud - stc_vis\n\nbrain = stc_diff.plot(subjects_dir=subjects_dir, initial_time=0.1,\n hemi='split', views=['lat', 'med'])\n\n###############################################################################\n# References\n# ----------\n# .. [1] Destrieux C, Fischl B, Dale A, Halgren E (2010). Automatic\n# parcellation of human cortical gyri and sulci using standard\n# anatomical nomenclature, vol. 53(1), 1-15, NeuroImage.\n"
] | [
[
"numpy.dot",
"numpy.minimum",
"numpy.sqrt",
"scipy.sparse.block_diag",
"numpy.concatenate",
"numpy.all",
"numpy.round",
"numpy.searchsorted",
"numpy.where",
"numpy.hstack",
"scipy.sparse.issparse",
"numpy.unique",
"numpy.arange",
"numpy.eye",
"numpy.intersect1d",
"numpy.asanyarray",
"scipy.linalg.norm",
"scipy.linalg.inv",
"numpy.repeat",
"numpy.zeros",
"numpy.min",
"numpy.append",
"numpy.floor",
"numpy.array",
"numpy.sum",
"numpy.tile",
"numpy.sort",
"numpy.ones",
"numpy.vstack"
],
[
"numpy.arange",
"numpy.where",
"matplotlib.pyplot.subplots"
],
[
"numpy.fromfile",
"numpy.dtype"
],
[
"numpy.abs",
"numpy.arange",
"numpy.sin",
"numpy.all",
"numpy.random.randn",
"scipy.linalg.norm",
"numpy.array",
"numpy.zeros",
"numpy.testing.assert_array_almost_equal"
],
[
"matplotlib.pyplot.subplots",
"numpy.linspace"
],
[
"numpy.arange",
"numpy.random.RandomState",
"numpy.where",
"numpy.sin"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"0.12",
"0.14",
"0.15"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"0.14",
"0.15",
"0.12",
"0.10"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
CSI-Woo-Lab/PandaSchedulingModel | [
"0ae4a20283ec7f2ffa1d0e0ebecdb1f4d2156d8a",
"0ae4a20283ec7f2ffa1d0e0ebecdb1f4d2156d8a"
] | [
"tsp_heuristic.py",
"generator_emstada.py"
] | [
"import numpy as np\nimport torch\n\nCONST = 100000.0\ndef calc_dist(p, q):\n return np.sqrt(((p[1] - q[1])**2)+((p[0] - q[0]) **2)) * CONST\n\ndef get_ref_reward(pointset):\n\n if isinstance(pointset, torch.cuda.FloatTensor) or isinstance(pointset, torch.FloatTensor):\n pointset = pointset.detach().numpy()\n\n num_points = len(pointset)\n ret_matrix = np.zeros((num_points, num_points))\n for i in range(num_points):\n for j in range(i+1, num_points):\n ret_matrix[i,j] = ret_matrix[j,i] = calc_dist(pointset[i], pointset[j])\n q = elkai.solve_float_matrix(ret_matrix) # Output: [0, 2, 1]\n dist = 0\n for i in range(num_points):\n dist += ret_matrix[q[i], q[(i+1) % num_points]]\n return dist / CONST\n",
"#!/usr/bin/python\n\n\"\"\"A taskset generator for experiments with real-time task sets\nCopyright 2010 Paul Emberson, Roger Stafford, Robert Davis.\nAll rights reserved.\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n 1. Redistributions of source code must retain the above copyright notice,\n this list of conditions and the following disclaimer.\n 2. Redistributions in binary form must reproduce the above copyright notice,\n this list of conditions and the following disclaimer in the documentation\n and/or other materials provided with the distribution.\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY EXPRESS\nOR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES\nOF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO\nEVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,\nINCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\nLIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,\nOR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF\nLIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE\nOR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF\nADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\nThe views and conclusions contained in the software and documentation are\nthose of the authors and should not be interpreted as representing official\npolicies, either expressed or implied, of Paul Emberson, Roger Stafford or\nRobert Davis.\nIncludes Python implementation of Roger Stafford's randfixedsum implementation\nhttp://www.mathworks.com/matlabcentral/fileexchange/9700\nAdapted specifically for the purpose of taskset generation with fixed\ntotal utilisation value\nPlease contact [email protected] or [email protected] if you have\nany questions regarding this software.\n\"\"\"\n\n\n\"\"\"\nOriginal Code Brought from\nhttps://github.com/brandenburg/schedcat/blob/master/schedcat/generator/generator_emstada.py\n\"\"\"\n\nimport numpy\nimport sys\nimport random\n\n\ndef StaffordRandFixedSum(n, u, nsets):\n\n #deal with n=1 case\n if n == 1:\n return numpy.tile(numpy.array([u]),[nsets,1])\n\n k = numpy.floor(u)\n s = u\n step = 1 if k < (k-n+1) else -1\n s1 = s - numpy.arange( k, (k-n+1)+step, step )\n step = 1 if (k+n) < (k-n+1) else -1\n s2 = numpy.arange( (k+n), (k+1)+step, step ) - s\n\n tiny = numpy.finfo(float).tiny\n huge = numpy.finfo(float).max\n\n w = numpy.zeros((n, n+1))\n w[0,1] = huge\n t = numpy.zeros((n-1,n))\n\n for i in numpy.arange(2, (n+1)):\n tmp1 = w[i-2, numpy.arange(1,(i+1))] * s1[numpy.arange(0,i)]/float(i)\n tmp2 = w[i-2, numpy.arange(0,i)] * s2[numpy.arange((n-i),n)]/float(i)\n w[i-1, numpy.arange(1,(i+1))] = tmp1 + tmp2;\n tmp3 = w[i-1, numpy.arange(1,(i+1))] + tiny;\n tmp4 = numpy.array( (s2[numpy.arange((n-i),n)] > s1[numpy.arange(0,i)]) )\n t[i-2, numpy.arange(0,i)] = (tmp2 / tmp3) * tmp4 + (1 - tmp1/tmp3) * (numpy.logical_not(tmp4))\n\n m = nsets\n x = numpy.zeros((n,m))\n rt = numpy.random.uniform(size=(n-1,m)) #rand simplex type\n rs = numpy.random.uniform(size=(n-1,m)) #rand position in simplex\n s = numpy.repeat(s, m);\n j = numpy.repeat(int(k+1), m);\n sm = numpy.repeat(0, m);\n pr = numpy.repeat(1, m);\n\n for i in numpy.arange(n-1,0,-1): #iterate through dimensions\n e = ( rt[(n-i)-1,...] <= t[i-1,j-1] ) #decide which direction to move in this dimension (1 or 0)\n sx = rs[(n-i)-1,...] ** (1/float(i)) #next simplex coord\n sm = sm + (1-sx) * pr * s/float(i+1)\n pr = sx * pr\n x[(n-i)-1,...] = sm + pr * e\n s = s - e\n j = j - e #change transition table column if required\n\n x[n-1,...] = sm + pr * s\n\n #iterated in fixed dimension order but needs to be randomised\n #permute x row order within each column\n for i in range(0,m):\n x[...,i] = x[numpy.random.permutation(n),i]\n\n return numpy.transpose(x);\n\ndef gen_periods(n, nsets, low, high, gran, dist):\n def po2(n):\n if n <= 2:\n return 1\n return po2(n // 2) + 1\n if dist == \"poweroftwo\":\n periods = 2 ** numpy.random.randint(po2(low), po2(high) + 1, size=(nsets, n))\n return periods\n elif dist == \"logunif\":\n periods = numpy.exp(numpy.random.uniform(low=numpy.log(low), high=numpy.log(high+gran), size=(nsets,n)))\n elif dist == \"unif\":\n periods = numpy.random.uniform(low=low, high=(high+gran), size=(nsets,n))\n elif type(dist) == list:\n # Interpret as set of pre-defined periods to choose from.\n assert nsets == 1\n # avoid numpy.random.choice() because we need to be compatible with 1.6.X\n periods = [random.choice(dist) for _ in range(n)]\n # wrap in numpy types\n periods = numpy.array(periods)\n periods.shape = (1, n)\n else:\n return None\n\n periods = (numpy.floor(periods / gran) * gran)\n\n return periods\n"
] | [
[
"numpy.zeros",
"numpy.sqrt"
],
[
"numpy.logical_not",
"numpy.log",
"numpy.arange",
"numpy.finfo",
"numpy.random.permutation",
"numpy.floor",
"numpy.transpose",
"numpy.random.uniform",
"numpy.repeat",
"numpy.array",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
adityavaishampayan/FaceSwap | [
"5b875b2bcf6212cd8404633e296a44886710d150"
] | [
"scripts_ignore/Traditional/ThinPlateSplines/ThinPlateSpline.py"
] | [
"import numpy as np\nimport cv2\nimport sys\nimport math\nimport matplotlib.pyplot as plt\n\ndef fxy(pt1,pts2,weights):\n K = np.zeros([pts2.shape[0],1])\n for i in range(pts2.shape[0]):\n K[i] = U(np.linalg.norm((pts2[i]-pt1),ord =2)+sys.float_info.epsilon)\n f = weights[-1] + weights[-3]*pt1[0] +weights[-2]*pt1[1]+np.matmul(K.T,weights[0:-3])\n return f\n\n\ndef warp_tps(img_source,img_target,points1,points2,weights_x,weights_y,mask):\n xy1_min = np.float32([min(points1[:,0]), min(points1[:,1])])\n xy1_max = np.float32([max(points1[:,0]),max(points1[:,1])])\n\n xy2_min = np.float32([min(points2[:,0]),min(points2[:,1])])\n xy2_max = np.float32([max(points2[:,0]),max(points2[:,1])])\n\n x = np.arange(xy1_min[0],xy1_max[0]).astype(int)\n y = np.arange(xy1_min[1],xy1_max[1]).astype(int)\n\n X,Y = np.mgrid[x[0]:x[-1]+1,y[0]:y[-1]+1]\n\n # X,Y = np.mgrid[0:src_shape[2],0:src_shape[3]]\n pts_src = np.vstack((X.ravel(),Y.ravel()))\n xy = pts_src.T\n u = np.zeros_like(xy[:,0])\n v = np.zeros_like(xy[:,0])\n # print(u.shape)\n # print(v.shape)\n for i in range(xy.shape[0]):\n u[i] = fxy(xy[i,:],points1,weights_x)\n u[u<xy2_min[0]]=xy2_min[0]\n u[u>xy2_max[0]]=xy2_max[0]\n for j in range(xy.shape[0]):\n v[j] = fxy(xy[j,:],points1,weights_y)\n v[v<xy2_min[1]]=xy2_min[1]\n v[v>xy2_max[1]]=xy2_max[1]\n# print(u.shape)\n# print(img_source.shape)\n warped_img = img_source.copy()\n mask_warped_img = np.zeros_like(warped_img[:,:,0])\n for a in range(1, u.shape[0]):\n try:\n # for b in range(v.shape[0]):\n # warped_img[xy[a,1],xy[a,0],:] = warped_src_face[v[a],u[a],:]\n\n if mask[v[a],u[a]]>0:\n warped_img[xy[a,1],xy[a,0],:] = img_target[v[a],u[a],:]\n mask_warped_img[xy[a,1],xy[a,0]] = 255\n except:\n pass\n # plt.imshow(warped_img)\n # plt.show()\n return warped_img, mask_warped_img\n\ndef mask_from_points(size, points,erode_flag=1):\n radius = 10 # kernel size\n kernel = np.ones((radius, radius), np.uint8)\n\n mask = np.zeros(size, np.uint8)\n cv2.fillConvexPoly(mask, cv2.convexHull(points), 255)\n if erode_flag:\n mask = cv2.erode(mask, kernel,iterations=1)\n\n return mask\n\ndef U(r):\n return (r**2)*(math.log(r**2))\n\ndef TPS_generate(source, target):\n P = np.append(source,np.ones([source.shape[0],1]),axis=1)\n P_Trans = P.T\n Z = np.zeros([3,3])\n K = np.zeros([source.shape[0],source.shape[0]])\n for p in range(source.shape[0]):\n K[p] = [U(np.linalg.norm((-source[p]+source[i]),ord =2)+sys.float_info.epsilon) for i in range(source.shape[0])]\n\n M = np.vstack([np.hstack([K,P]),np.hstack([P_Trans,Z])])\n lam = 200\n I = np.identity(M.shape[0])\n L = M+lam*I\n L_inv = np.linalg.inv(L)\n V = np.concatenate([np.array(target),np.zeros([3,])])\n V.resize(V.shape[0],1)\n weights = np.matmul(L_inv,V)\n return weights,K\n\n\ndef swap(img_source,img_target,points1,points2):\n weights_x,K = TPS_generate(points1,points2[:,0])\n weights_y,K = TPS_generate(points1,points2[:,1])\n #plt.imshow(K)\n\n w, h = img_target.shape[:2]\n # ## Mask for blending\n mask = mask_from_points((w, h), points2)\n #plt.imshow(mask)\n # mask.shape\n\n warped_img, mask_warped_img = warp_tps(img_source,img_target,points1,points2,weights_x,weights_y,mask)\n #plt.imshow(warped_img)\n #plt.imshow(mask_warped_img)\n # mask_warped_img.shape\n\n cv2.imshow(\"without blending\", warped_img)\n cv2.waitKey(0)\n\n ##Poisson Blending\n r = cv2.boundingRect(mask_warped_img)\n center = ((r[0] + int(r[2] / 2), r[1] + int(r[3] / 2)))\n output = cv2.seamlessClone(warped_img.copy(), img_source, mask_warped_img, center, cv2.NORMAL_CLONE)\n\n #cv2.imshow(\"output\", output)\n #cv2.waitKey(0)\n\n return output\n"
] | [
[
"numpy.hstack",
"numpy.linalg.inv",
"numpy.arange",
"numpy.matmul",
"numpy.linalg.norm",
"numpy.ones",
"numpy.identity",
"numpy.zeros_like",
"numpy.array",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jvrana/caldera | [
"a346324e77f20739e00a82f97530dda4906f59dd",
"a346324e77f20739e00a82f97530dda4906f59dd",
"a346324e77f20739e00a82f97530dda4906f59dd"
] | [
"caldera/utils/sparse.py",
"tests/test_examples/test_train_networks.py",
"tests/test_data/test_shuffle_data.py"
] | [
"from typing import Optional\nfrom typing import Type\n\nimport torch\nfrom scipy.sparse import coo_matrix\n\nfrom .indexing import SizeType\nfrom .indexing import unroll_index\n\n\ndef torch_coo_to_scipy_coo(m: torch.sparse.FloatTensor) -> coo_matrix:\n \"\"\"Convert torch :class:`torch.sparse.FloatTensor` tensor to.\n\n :class:`scipy.sparse.coo_matrix`\n \"\"\"\n data = m.values().numpy()\n indices = m.indices()\n return coo_matrix((data, (indices[0], indices[1])), tuple(m.size()))\n\n\ndef scatter_indices(indices: torch.LongTensor, shape: SizeType):\n \"\"\"Unroll the coo indices using the provided shape.\n\n .. code-block::\n\n indices = torch.tensor([\n [0, 1, 2],\n [2, 3, 4],\n [4, 5, 4]\n ])\n shape = (3, 2)\n print(scatter_indices(indices, shape))\n\n # tensor([[0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2,\n # 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2],\n # [2, 3, 4, 2, 3, 4, 2, 3, 4, 2, 3, 4, 2, 3, 4, 2, 3, 4, 2, 3, 4, 2, 3, 4,\n # 2, 3, 4, 2, 3, 4, 2, 3, 4, 2, 3, 4],\n # [4, 5, 4, 4, 5, 4, 4, 5, 4, 4, 5, 4, 4, 5, 4, 4, 5, 4, 4, 5, 4, 4, 5, 4,\n # 4, 5, 4, 4, 5, 4, 4, 5, 4, 4, 5, 4],\n # [0, 0, 1, 1, 2, 2, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 2, 2, 0, 1, 0, 1, 0, 1,\n # 0, 0, 1, 1, 2, 2, 0, 1, 0, 1, 0, 1]])\n\n :param indices:\n :param shape:\n :return:\n \"\"\"\n if not shape:\n return indices\n idx = torch.stack(unroll_index(shape))\n\n a_repeat = [1] * indices.ndim\n a_repeat[-1] = idx.shape[-1]\n b_repeat = [1] * indices.ndim\n b_repeat[-1] = indices.shape[-1]\n\n a = torch.repeat_interleave(indices, idx.shape[-1], dim=1)\n b = idx.repeat(b_repeat)\n return torch.cat((a, b))\n\n\ndef _expand_idx(idx):\n if idx.ndim == 1:\n idx = idx.unsqueeze(0)\n idx = torch.cat((torch.zeros_like(idx), idx))\n return idx\n\n\ndef _coo_tensor(\n indices: torch.LongTensor,\n source: torch.Tensor,\n size: Optional[SizeType] = None,\n dtype: Optional[Type] = None,\n **kwargs\n):\n if size is not None:\n kwargs[\"size\"] = size\n if dtype is None:\n kwargs[\"dtype\"] = source.dtype\n else:\n kwargs[\"dtype\"] = dtype\n if size is not None:\n kwargs = dict(dtype=dtype, size=size)\n else:\n kwargs = dict(dtype=dtype)\n return torch.sparse_coo_tensor(indices, source, **kwargs)\n\n\n# TODO: infer size from index sizes\ndef scatter_coo(\n indices: torch.LongTensor,\n source: torch.FloatTensor,\n size: Optional[SizeType] = None,\n expand: bool = False,\n dtype: Optional[Type] = None,\n) -> torch.sparse.FloatTensor:\n \"\"\"Scatter the provided source tensor to the provided indices.\n\n :param indices:\n :param source:\n :return:\n \"\"\"\n\n indices = _expand_idx(indices)\n\n if not torch.is_tensor(source):\n source = torch.tensor(source)\n\n if expand:\n shape = source.shape\n # r = prod(shape[:-1]) * indices.shape[1]\n r = indices.shape[1]\n flattened = source.view(-1).repeat(r)\n else:\n shape = source.shape[1:]\n flattened = source.view(-1)\n\n if size is not None and size[-1] is ...:\n if not len(size) - 1 == indices.shape[0]:\n raise ValueError(\n \"Provided dims ({}) must match number of index dims ({})\".format(\n len(size) - 1, indices.shape[0]\n )\n )\n size = tuple(list(size)[:-1]) + shape\n\n sidx = scatter_indices(indices, shape)\n return _coo_tensor(sidx, flattened, size=size, dtype=dtype)\n\n\n#\n# def scatter_coo_fill(\n# indices: torch.LongTensor,\n# source: torch.FloatTensor,\n# size: Optional[SizeType] = None,\n# dtype: Optional[Type] = None,\n# ) -> torch.sparse.FloatTensor:\n# \"\"\"Fill sparse coo matrix with the provided tensor at the provided indices.\n#\n# :param indices:\n# :param source:\n# :return:\n# \"\"\"\n# indices = _expand_idx(indices)\n# source = torch.tensor(source)\n# sidx = scatter_indices(indices, source.shape)\n# if size is not None and size[-1] is ...:\n# size = tuple(list(size)[:-1])\n# if torch.is_tensor():\n# size += source.shape\n# return _coo_tensor(\n# sidx, source.view(-1).repeat(indices.shape[1]), size=size, dtype=dtype\n# )\n",
"\"\"\"test_train_networks.py.\n\nInststructions for creating a new test case.\n\nloader, getter, network\n\"\"\"\nimport functools\nfrom collections import OrderedDict\nfrom contextlib import contextmanager\nfrom typing import Any\nfrom typing import Callable\nfrom typing import Dict\nfrom typing import Optional\nfrom typing import Tuple\nfrom typing import Type\n\nimport networkx as nx\nimport numpy as np\nimport pytest\nimport torch\nfrom torch import optim\n\nfrom caldera.blocks import AggregatingEdgeBlock\nfrom caldera.blocks import AggregatingGlobalBlock\nfrom caldera.blocks import AggregatingNodeBlock\nfrom caldera.blocks import Aggregator\nfrom caldera.blocks import EdgeBlock\nfrom caldera.blocks import Flex\nfrom caldera.blocks import GlobalBlock\nfrom caldera.blocks import MLP\nfrom caldera.blocks import MultiAggregator\nfrom caldera.blocks import NodeBlock\nfrom caldera.data import GraphBatch\nfrom caldera.data import GraphData\nfrom caldera.data import GraphDataLoader\nfrom caldera.models import GraphCore\nfrom caldera.models import GraphEncoder\nfrom caldera.utils import deterministic_seed\nfrom caldera.utils.nx import nx_iter_roots\nfrom caldera.utils.tensor import to_one_hot\n\nSEED = 0\n\n\nclass NamedNetwork:\n def __init__(self, name, network_func):\n self.name = name\n self.f = network_func\n\n def __call__(self, *args, **kwargs):\n return self.f(*args, **kwargs)\n\n\nclass Networks:\n \"\"\"Networks that will be used in the tests.\"\"\"\n\n n = NamedNetwork\n\n linear_block = n(\n \"linear\",\n lambda: torch.nn.Sequential(\n torch.nn.Linear(5, 16), torch.nn.ReLU(), torch.nn.Linear(16, 1)\n ),\n )\n\n mlp_block = n(\n \"mlp\",\n lambda: torch.nn.Sequential(\n Flex(MLP)(Flex.d(), 16), Flex(torch.nn.Linear)(Flex.d(), 1)\n ),\n )\n\n node_block = n(\n \"node_block\",\n lambda: torch.nn.Sequential(\n NodeBlock(Flex(MLP)(Flex.d(), 25, 25, layer_norm=False)),\n Flex(torch.nn.Linear)(Flex.d(), 1),\n ),\n )\n\n edge_block = n(\n \"edge_block\",\n lambda: torch.nn.Sequential(\n EdgeBlock(Flex(MLP)(Flex.d(), 25, 25, layer_norm=False)),\n Flex(torch.nn.Linear)(Flex.d(), 1),\n ),\n )\n\n global_block = n(\n \"global_block\",\n lambda: torch.nn.Sequential(\n GlobalBlock(Flex(MLP)(Flex.d(), 25, 25, layer_norm=False)),\n Flex(torch.nn.Linear)(Flex.d(), 1),\n ),\n )\n\n graph_encoder = n(\n \"graph_encoder\",\n lambda: GraphEncoder(\n EdgeBlock(\n torch.nn.Sequential(\n Flex(MLP)(Flex.d(), 5, 5, layer_norm=False),\n Flex(torch.nn.Linear)(Flex.d(), 1),\n )\n ),\n NodeBlock(\n torch.nn.Sequential(\n Flex(MLP)(Flex.d(), 5, 5, layer_norm=False),\n Flex(torch.nn.Linear)(Flex.d(), 1),\n )\n ),\n GlobalBlock(\n torch.nn.Sequential(\n Flex(MLP)(Flex.d(), 5, 5, layer_norm=False),\n Flex(torch.nn.Linear)(Flex.d(), 1),\n )\n ),\n ),\n )\n\n def create_graph_core(pass_global_to_edge: bool, pass_global_to_node: bool):\n return GraphCore(\n AggregatingEdgeBlock(\n torch.nn.Sequential(\n Flex(MLP)(Flex.d(), 5, 5, layer_norm=False),\n Flex(torch.nn.Linear)(Flex.d(), 1),\n )\n ),\n AggregatingNodeBlock(\n torch.nn.Sequential(\n Flex(MLP)(Flex.d(), 5, 5, layer_norm=False),\n Flex(torch.nn.Linear)(Flex.d(), 1),\n ),\n edge_aggregator=Aggregator(\"add\"),\n ),\n AggregatingGlobalBlock(\n torch.nn.Sequential(\n Flex(MLP)(Flex.d(), 5, 5, layer_norm=False),\n Flex(torch.nn.Linear)(Flex.d(), 1),\n ),\n edge_aggregator=Aggregator(\"add\"),\n node_aggregator=Aggregator(\"add\"),\n ),\n pass_global_to_edge=pass_global_to_edge,\n pass_global_to_node=pass_global_to_node,\n )\n\n graph_core = n(\"graph_core\", create_graph_core)\n\n def create_graph_core_multi_agg(\n pass_global_to_edge: bool, pass_global_to_node: bool\n ):\n agg = lambda: Flex(MultiAggregator)(Flex.d(), [\"add\", \"mean\", \"max\", \"min\"])\n\n return GraphCore(\n AggregatingEdgeBlock(\n torch.nn.Sequential(\n Flex(MLP)(\n Flex.d(), 5, 5, layer_norm=True, activation=torch.nn.LeakyReLU\n ),\n Flex(torch.nn.Linear)(Flex.d(), 1),\n )\n ),\n AggregatingNodeBlock(\n torch.nn.Sequential(\n Flex(MLP)(\n Flex.d(), 5, 5, layer_norm=True, activation=torch.nn.LeakyReLU\n ),\n Flex(torch.nn.Linear)(Flex.d(), 1),\n ),\n edge_aggregator=agg(),\n ),\n AggregatingGlobalBlock(\n torch.nn.Sequential(\n Flex(MLP)(\n Flex.d(), 5, 5, layer_norm=True, activation=torch.nn.LeakyReLU\n ),\n Flex(torch.nn.Linear)(Flex.d(), 1),\n ),\n edge_aggregator=agg(),\n node_aggregator=agg(),\n ),\n pass_global_to_edge=pass_global_to_edge,\n pass_global_to_node=pass_global_to_node,\n )\n\n graph_core_multi_agg = n(\"graph_core(multiagg)\", create_graph_core_multi_agg)\n\n @staticmethod\n def reset(net: torch.nn.Module):\n def weight_reset(model):\n for layer in model.children():\n if hasattr(layer, \"reset_parameters\"):\n layer.reset_parameters()\n\n net.apply(weight_reset)\n\n\nclass DataModifier:\n \"\"\"Methods to modify data before training.\"\"\"\n\n def __init__(self, datalist):\n self.datalist = datalist\n\n @staticmethod\n def node_sum(batch: GraphBatch, copy=True):\n if copy:\n batch = batch.copy()\n batch.x = torch.cat([batch.x, batch.x.sum(axis=1, keepdim=True)], axis=1)\n return batch\n\n @staticmethod\n def edge_sum(batch: GraphBatch, copy=True):\n if copy:\n batch = batch.copy()\n batch.e = torch.cat([batch.e, batch.e.sum(axis=1, keepdim=True)], axis=1)\n return batch\n\n @staticmethod\n def global_sum(batch: GraphBatch, copy=True):\n if copy:\n batch = batch.copy()\n batch.g = torch.cat([batch.g, batch.g.sum(axis=1, keepdim=True)], axis=1)\n return batch\n\n def apply(self, f, *args, **kwargs):\n f = self.resolve(f)\n return [f(_d, *args, **kwargs) for _d in self.datalist]\n\n @classmethod\n def resolve(cls, f):\n cls.valid(f)\n if isinstance(f, str):\n f = getattr(cls, f)\n return f\n\n @classmethod\n def valid(self, f):\n if callable(f):\n return True\n elif isinstance(f, str) and hasattr(self, f):\n return True\n return False\n\n\nclass DataLoaders:\n \"\"\"Data loaders for test.\"\"\"\n\n @staticmethod\n def random_loader(data_size, batch_size):\n datalist = [GraphData.random(5, 5, 5) for _ in range(data_size)]\n return GraphDataLoader(datalist, batch_size=batch_size)\n\n @staticmethod\n def _default_g(g: nx.DiGraph, global_key: str = None):\n for _, data in g.nodes(data=True):\n data[\"features\"] = np.zeros((1,))\n data[\"target\"] = np.zeros((1,))\n\n for _, _, data in g.edges(data=True):\n data[\"features\"] = np.zeros((1,))\n data[\"target\"] = np.zeros((1,))\n\n g.set_global({\"features\": np.zeros((1,)), \"target\": np.zeros((1,))}, global_key)\n return g\n\n @classmethod\n def random_graph_red_black_nodes(cls, data_size, batch_size):\n input_data = []\n output_data = []\n s = 2\n for _ in range(data_size):\n g = nx.to_directed(nx.random_tree(10))\n cls._default_g(g)\n for n, ndata in g.nodes(data=True):\n i = np.random.randint(0, 1, (1,))\n ndata[\"features\"] = to_one_hot(i, s)\n if i % 2 == 0:\n target = np.array([0.5])\n else:\n target = np.zeros(1)\n ndata[\"target\"] = target\n\n input_data.append(GraphData.from_networkx(g, feature_key=\"features\"))\n output_data.append(GraphData.from_networkx(g, feature_key=\"target\"))\n\n return GraphDataLoader(input_data, output_data, batch_size=batch_size)\n\n @classmethod\n def random_graph_red_black_edges(cls, data_size, batch_size):\n input_data = []\n output_data = []\n s = 2\n for _ in range(data_size):\n g = nx.to_directed(nx.random_tree(10))\n cls._default_g(g)\n for _, _, edata in g.edges(data=True):\n i = np.random.randint(0, 1, (1,))\n edata[\"features\"] = to_one_hot(i, s)\n if i % 2 == 0:\n target = np.array([0.5])\n else:\n target = np.zeros((1,))\n edata[\"target\"] = target\n\n input_data.append(GraphData.from_networkx(g, feature_key=\"features\"))\n output_data.append(GraphData.from_networkx(g, feature_key=\"target\"))\n\n return GraphDataLoader(input_data, output_data, batch_size=batch_size)\n\n @classmethod\n def random_graph_red_black_global(cls, data_size, batch_size):\n input_data = []\n output_data = []\n s = 2\n for _ in range(data_size):\n g = nx.to_directed(nx.random_tree(10))\n cls._default_g(g)\n\n gdata = g.get_global()\n i = np.random.randint(0, 1, (1,))\n gdata[\"features\"] = to_one_hot(i, s)\n if i % 2 == 0:\n target = np.array([0.5])\n else:\n target = np.zeros((1,))\n gdata[\"target\"] = target\n\n input_data.append(GraphData.from_networkx(g, feature_key=\"features\"))\n output_data.append(GraphData.from_networkx(g, feature_key=\"target\"))\n\n return GraphDataLoader(input_data, output_data, batch_size=batch_size)\n\n @classmethod\n def est_density(cls, data_size, batch_size):\n input_data = []\n output_data = []\n s = 2\n for _ in range(data_size):\n n_size = np.random.randint(2, 20)\n g = nx.to_directed(nx.random_tree(n_size))\n cls._default_g(g)\n\n gdata = g.get_global()\n gdata[\"features\"] = np.random.randn(1)\n gdata[\"target\"] = np.array([nx.density(g)])\n\n input_data.append(GraphData.from_networkx(g, feature_key=\"features\"))\n output_data.append(GraphData.from_networkx(g, feature_key=\"target\"))\n\n return GraphDataLoader(input_data, output_data, batch_size=batch_size)\n\n @classmethod\n def in_degree(cls, data_size, batch_size):\n input_data = []\n output_data = []\n s = 2\n for _ in range(data_size):\n n_size = np.random.randint(2, 20)\n g = nx.to_directed(nx.random_tree(n_size))\n cls._default_g(g)\n\n for n, ndata in g.nodes(data=True):\n ndata[\"features\"] = np.random.randn(1)\n ind = g.in_degree(n)\n ndata[\"target\"] = np.array([ind])\n\n input_data.append(GraphData.from_networkx(g, feature_key=\"features\"))\n output_data.append(GraphData.from_networkx(g, feature_key=\"target\"))\n\n return GraphDataLoader(input_data, output_data, batch_size=batch_size)\n\n @classmethod\n def boolean_network(cls, data_size, batch_size):\n\n input_data = []\n output_data = []\n for _ in range(data_size):\n n_size = np.random.randint(2, 20)\n tree = nx.random_tree(n_size)\n\n # randomize node directions\n g = nx.DiGraph()\n for n1, n2, edata in tree.edges(data=True):\n i = np.random.randint(2)\n if i % 2 == 0:\n g.add_edge(n1, n2)\n else:\n g.add_edge(n2, n1)\n cls._default_g(g)\n\n for n in nx_iter_roots(g):\n ndata = g.nodes[n]\n ndata[\"target\"] = np.array([1.0])\n\n for n in nx.topological_sort(g):\n ndata = g.nodes[n]\n if \"target\" not in ndata:\n incoming = []\n for p in g.predecessors(n):\n pdata = g.nodes[p]\n incoming.append(pdata[\"target\"])\n incoming = np.concatenate(incoming)\n i = incoming.max()\n if i == 1:\n o = np.array([0.0])\n else:\n o = np.array([1.0])\n ndata[\"target\"] = o\n\n input_data.append(GraphData.from_networkx(g, feature_key=\"features\"))\n output_data.append(GraphData.from_networkx(g, feature_key=\"target\"))\n\n return GraphDataLoader(input_data, output_data, batch_size=batch_size)\n\n @classmethod\n def sigmoid_circuit(cls, data_size, batch_size):\n import math\n\n def func(x):\n return 1 - 1.0 / (1 + math.exp(-x))\n\n input_data = []\n output_data = []\n for _ in range(data_size):\n n_size = np.random.randint(2, 20)\n tree = nx.random_tree(n_size)\n\n # randomize node directions\n g = nx.DiGraph()\n for n1, n2, edata in tree.edges(data=True):\n i = np.random.randint(2)\n if i % 2 == 0:\n g.add_edge(n1, n2)\n else:\n g.add_edge(n2, n1)\n cls._default_g(g)\n\n for n in nx_iter_roots(g):\n ndata = g.nodes[n]\n ndata[\"target\"] = np.array([3.0])\n\n for n in nx.topological_sort(g):\n ndata = g.nodes[n]\n if \"target\" not in ndata:\n incoming = []\n for p in g.predecessors(n):\n pdata = g.nodes[p]\n incoming.append(pdata[\"target\"])\n incoming = np.concatenate(incoming)\n i = incoming.sum()\n o = func(i)\n ndata[\"target\"] = o\n\n input_data.append(GraphData.from_networkx(g, feature_key=\"features\"))\n output_data.append(GraphData.from_networkx(g, feature_key=\"target\"))\n\n return GraphDataLoader(input_data, output_data, batch_size=batch_size)\n\n\nT = Tuple[Tuple[Tuple[Any, ...], Dict], torch.Tensor]\n\n\nclass DataGetter:\n \"\"\"Methods to collect input, output from the loader.\"\"\"\n\n @classmethod\n def get_node(cls, batch: GraphBatch) -> T:\n args = (batch.x[:, :-1],)\n kwargs = {}\n out = batch.x[:, -1:]\n return ((args, kwargs), out)\n\n @classmethod\n def get_edge(cls, batch: GraphBatch) -> T:\n args = (batch.e[:, :-1],)\n kwargs = {}\n out = batch.e[:, -1:]\n return ((args, kwargs), out)\n\n @classmethod\n def get_global(cls, batch: GraphBatch) -> T:\n args = (batch.g[:, :-1],)\n kwargs = {}\n out = batch.g[:, -1:]\n return ((args, kwargs), out)\n\n @classmethod\n def get_batch(cls, batch_tuple: Tuple[GraphBatch, GraphBatch]) -> T:\n args = (batch_tuple[0],)\n kwargs = {}\n out = batch_tuple[1]\n return ((args, kwargs), (out.e, out.x, out.g))\n\n\nclass NetworkTestCaseValidationError(Exception):\n pass\n\n\n@contextmanager\ndef does_not_raise():\n yield\n\n\n# TODO: model reset is not working\nclass NetworkTestCase:\n \"\"\"A network test case.\"\"\"\n\n def __init__(\n self,\n network: torch.nn.Module,\n modifier: Optional[Callable[[GraphBatch], Any]] = None,\n getter: Optional[Callable[[GraphBatch], Any]] = None,\n optimizer: Type[torch.optim.Optimizer] = None,\n criterion=None,\n loss_func: Callable = None,\n epochs: int = 20,\n batch_size: int = 100,\n data_size: int = 1000,\n loader: Optional[Callable[[int, int], GraphDataLoader]] = None,\n expectation: Callable = None,\n tags: Tuple[str, ...] = None,\n device: str = None,\n ):\n if expectation is None:\n expectation = does_not_raise()\n self.expectation = expectation\n self.tags = tags\n if modifier is None:\n self.modifier = lambda x: x\n else:\n self.modifier = modifier\n if getter is None:\n self.getter = lambda x: x\n else:\n self.getter = getter\n self.network = network\n self.epochs = epochs\n self.batch_size = batch_size\n self.data_size = data_size\n self.device = device\n if loader is None:\n loader = DataLoaders.random_loader\n self.loader_func = loader\n self.loader = self.loader_func(data_size, batch_size)\n self.optimizer = optimizer\n if criterion is None:\n criterion = torch.nn.MSELoss()\n if loss_func is not None:\n loss_func = functools.partial(loss_func, criterion, self.device)\n else:\n loss_func = criterion\n self.loss_func = loss_func\n self.losses = None\n\n def to(self, x, device=None):\n device = device or self.device\n if device is not None:\n if isinstance(x, tuple):\n return tuple([self.to(_x) for _x in x])\n else:\n return x.to(device)\n return x\n\n def seed(self, seed: int = SEED):\n deterministic_seed(seed)\n\n def reset(self, seed: int = SEED):\n self.seed(seed)\n Networks.reset(self.network)\n self.to(self.network)\n\n def provide_example(self):\n batch = self.loader.first()\n mod_batch = self.modifier(batch)\n mod_batch = self.to(mod_batch)\n data = self.getter(mod_batch)[0]\n self.network(*data[0], **data[1])\n\n # def validate_network_device(self):\n # for p in self.network.parameters():\n # assert p.device == self.device\n\n def eval(self, data_size):\n self.network.eval()\n with torch.no_grad():\n running_loss = 0.0\n for batch in self.loader_func(data_size, data_size):\n batch = self.to(batch)\n batch = self.modifier(batch)\n input, target = self.getter(batch)\n output = self.network(*input[0], **input[1])\n loss = self.loss_func(output, target)\n running_loss += loss.item()\n print(\"TARGET\")\n print(target)\n print(\"OUTPUT\")\n print(output)\n return running_loss\n\n def train(self):\n print(\"Training {}\".format(self.network))\n self.reset()\n epochs = self.epochs\n net = self.network\n loader = self.loader\n optimizer = self.optimizer\n getter = self.getter\n modifier = self.modifier\n loss_func = self.loss_func\n\n # provide example\n self.provide_example()\n\n if optimizer is None:\n optimizer = optim.AdamW(net.parameters(), lr=1e-2)\n\n self.pre_train_validate()\n\n loss_arr = torch.zeros(epochs)\n for epoch in range(epochs):\n net.train()\n running_loss = 0.0\n for batch in loader:\n batch = self.to(batch)\n batch = modifier(batch)\n input, target = getter(batch)\n\n optimizer.zero_grad() # zero the gradient buffers\n output = net(*input[0], **input[1])\n\n for x, o, t in zip([\"edge\", \"node\", \"global\"], output, target):\n if o.shape != t.shape:\n raise NetworkTestCaseValidationError(\n \"{x} output shape ({o}) has a different shape from {x} target shape ({t})\".format(\n x=x, o=o.shape, t=t.shape\n )\n )\n\n loss = loss_func(output, target)\n self.to(loss)\n loss.backward(retain_graph=True)\n optimizer.step()\n\n running_loss += loss.item()\n loss_arr[epoch] = running_loss\n self.losses = loss_arr\n return loss_arr\n\n def pre_train_validate(self):\n for p in self.network.parameters():\n assert p.requires_grad is True\n\n def post_train_validate(self, threshold=0.1):\n if self.losses[-1] > self.losses[0] * threshold:\n raise NetworkTestCaseValidationError(\n \"Model did not train properly :(.\"\n \"\\n\\tlosses: {} -> {}\".format(self.losses[0], self.losses[-1])\n )\n\n def __str__(self):\n pass\n\n\[email protected](\n \"loader_func\",\n [\n DataLoaders.random_loader,\n DataLoaders.random_graph_red_black_nodes,\n DataLoaders.random_graph_red_black_edges,\n DataLoaders.est_density,\n ],\n)\ndef test_loaders(loader_func):\n loader = loader_func(100, 20)\n for x in loader:\n assert x\n\n\ndef mse_tuple(criterion, device, a, b):\n loss = torch.tensor(0.0, dtype=torch.float32, device=device)\n assert len(a) == len(b)\n for i, (_a, _b) in enumerate(zip(a, b)):\n assert _a.shape == _b.shape\n l = criterion(_a, _b)\n loss = loss + l\n return loss\n\n\ndef get_id(case):\n print(case.__class__)\n tokens = OrderedDict(\n {\"id\": None, \"name\": None, \"loader\": None, \"expectation\": None}\n )\n\n tokens[\"name\"] = case[\"network\"].name\n tokens[\"id\"] = case.get(\"id\", None)\n try:\n tokens[\"loader\"] = case.get(\"loader\", None).__name__\n except AttributeError:\n pass\n\n try:\n tokens[\"expectation\"] = case.get(\"expectation\", None)\n except AttributeError:\n pass\n\n return \"-\".join([str(v) for v in tokens.values() if v is not None])\n\n\[email protected]\ndef network_case(request):\n def pop(d, k, default):\n if k in d:\n res = d[k]\n del d[k]\n return res\n return default\n\n params = dict(request.param)\n args = pop(params, \"network_args\", tuple())\n kwargs = pop(params, \"network_kwargs\", {})\n params[\"network\"] = params[\"network\"](*args, **kwargs)\n case = NetworkTestCase(**params)\n return case\n\n\ncases = [\n dict(\n network=Networks.linear_block,\n modifier=DataModifier.node_sum,\n getter=DataGetter.get_node,\n tags=[\"block\", \"basic\"],\n ),\n dict(\n network=Networks.mlp_block,\n modifier=DataModifier.node_sum,\n getter=DataGetter.get_node,\n tags=[\"block\", \"basic\"],\n ),\n dict(\n network=Networks.node_block,\n modifier=DataModifier.node_sum,\n getter=DataGetter.get_node,\n tags=[\"block\", \"basic\", \"node\"],\n ),\n dict(\n network=Networks.edge_block,\n modifier=DataModifier.edge_sum,\n getter=DataGetter.get_edge,\n tags=[\"block\", \"basic\", \"edge\"],\n ),\n dict(\n network=Networks.global_block,\n modifier=DataModifier.global_sum,\n getter=DataGetter.get_global,\n tags=[\"block\", \"basic\", \"global\"],\n ),\n dict(\n network=Networks.graph_encoder,\n loader=DataLoaders.random_graph_red_black_nodes,\n getter=DataGetter.get_batch,\n loss_func=mse_tuple,\n tags=[\"graph_encoder\", \"node\"],\n ), # randomly creates an input value, assigns 'red' or 'black' to nodes\n dict(\n network=Networks.graph_encoder,\n loader=DataLoaders.random_graph_red_black_edges,\n getter=DataGetter.get_batch,\n loss_func=mse_tuple,\n tags=[\"graph_encoder\", \"edge\"],\n ), # randomly creates an input value, assigns 'red' or 'black' to edges\n dict(\n network=Networks.graph_encoder,\n loader=DataLoaders.random_graph_red_black_global,\n getter=DataGetter.get_batch,\n loss_func=mse_tuple,\n tags=[\"graph_encoder\", \"global\"],\n ), # randomly creates an input value, assigns 'red' or 'black' to global\n dict(\n network=Networks.graph_encoder,\n loader=DataLoaders.est_density,\n getter=DataGetter.get_batch,\n loss_func=mse_tuple,\n expectation=pytest.raises(NetworkTestCaseValidationError),\n tags=[\"graph_encoder\", \"fail\"],\n ), # network cannot learn the density without connections between nodes and edges,\n dict(\n network=Networks.graph_core,\n network_kwargs={\"pass_global_to_edge\": True, \"pass_global_to_node\": True},\n loader=DataLoaders.est_density,\n getter=DataGetter.get_batch,\n loss_func=mse_tuple,\n tags=[\"graph_core\", \"global\"],\n ), # estimate the graph density using GraphCore\n dict(\n network=Networks.graph_core,\n network_kwargs={\"pass_global_to_edge\": False, \"pass_global_to_node\": True},\n loader=DataLoaders.est_density,\n getter=DataGetter.get_batch,\n loss_func=mse_tuple,\n tags=[\"graph_core\", \"global\"],\n ), # estimate the graph density using GraphCore\n dict(\n network=Networks.graph_core,\n network_kwargs={\"pass_global_to_edge\": True, \"pass_global_to_node\": False},\n loader=DataLoaders.est_density,\n getter=DataGetter.get_batch,\n loss_func=mse_tuple,\n tags=[\"graph_core\", \"global\"],\n ), # estimate the graph density using GraphCore\n dict(\n network=Networks.graph_core,\n network_kwargs={\"pass_global_to_edge\": False, \"pass_global_to_node\": False},\n loader=DataLoaders.est_density,\n getter=DataGetter.get_batch,\n loss_func=mse_tuple,\n tags=[\"graph_core\", \"global\"],\n ), # estimate the graph density using GraphCore\n dict(\n network=Networks.graph_core,\n network_kwargs={\"pass_global_to_edge\": True, \"pass_global_to_node\": True},\n loader=DataLoaders.in_degree,\n getter=DataGetter.get_batch,\n loss_func=mse_tuple,\n tags=[\"graph_core\", \"node\"],\n ), # estimate the graph density using GraphCore\n dict(\n network=Networks.graph_encoder,\n loader=DataLoaders.in_degree,\n getter=DataGetter.get_batch,\n loss_func=mse_tuple,\n tags=[\"graph_core\", \"node\"],\n expectation=pytest.raises(NetworkTestCaseValidationError),\n ), # estimate the graph density using GraphCore\n dict(\n network=Networks.graph_core,\n network_kwargs={\"pass_global_to_edge\": True, \"pass_global_to_node\": True},\n loader=DataLoaders.boolean_network,\n getter=DataGetter.get_batch,\n loss_func=mse_tuple,\n tags=[\"boolean_circuit\"],\n ), # estimate the graph density using GraphCore\n dict(\n network=Networks.graph_encoder,\n loader=DataLoaders.boolean_network,\n getter=DataGetter.get_batch,\n loss_func=mse_tuple,\n tags=[\"boolean_circuit\"],\n expectation=pytest.raises(NetworkTestCaseValidationError),\n ), # estimate the graph density using GraphCore\n dict(\n network=Networks.graph_core,\n loader=DataLoaders.sigmoid_circuit,\n network_kwargs={\"pass_global_to_edge\": True, \"pass_global_to_node\": True},\n getter=DataGetter.get_batch,\n loss_func=mse_tuple,\n epochs=100,\n tags=[\"sigmoid_circuit\"],\n ), # estimate the graph density using GraphCore\n dict(\n network=Networks.graph_core_multi_agg,\n loader=DataLoaders.sigmoid_circuit,\n network_kwargs={\"pass_global_to_edge\": True, \"pass_global_to_node\": True},\n getter=DataGetter.get_batch,\n loss_func=mse_tuple,\n epochs=100,\n tags=[\"sigmoid_circuit_(multiagg)\"],\n ), # estimate the graph density using GraphCore\n]\n# in degree\n# average in degree\n# a function of number of nodes, in degree\n# boolean network that depends on multiple passes\n# sigmoid circuit\n# shortest _path example\nvisited_cases = set()\n\n\ndef parameterize_by_group(groups: Tuple[str, ...] = None) -> Callable:\n params = []\n for idx, p in enumerate(cases):\n if groups is None:\n params.append(p)\n else:\n for tag in p.get(\"tags\", []):\n if tag in groups:\n params.append(p)\n visited_cases.add(idx)\n break\n if not params:\n raise Exception(\"There are no cases with tags '{}'\".format(groups))\n return pytest.mark.parametrize(\"network_case\", params, ids=get_id, indirect=True)\n\n\ndef run_test_case(network_case, device):\n network_case.device = device\n with network_case.expectation:\n losses = network_case.train()\n print(losses)\n for p in network_case.network.parameters():\n assert device == str(p.device)\n network_case.post_train_validate()\n network_case.eval(20)\n return network_case\n\n\[email protected]\nclass TestTraining:\n @parameterize_by_group([\"basic\", \"block\"])\n def test_train_block(self, network_case, device, benchmark):\n benchmark.pedantic(\n run_test_case,\n args=(network_case, device),\n iterations=1,\n rounds=1,\n warmup_rounds=0,\n )\n\n @parameterize_by_group([\"graph_encoder\"])\n def test_train_encoder(self, network_case, device, benchmark):\n benchmark.pedantic(\n run_test_case,\n args=(network_case, device),\n iterations=1,\n rounds=1,\n warmup_rounds=0,\n )\n\n @parameterize_by_group([\"graph_core\"])\n def test_train_core(self, network_case, device, benchmark):\n benchmark.pedantic(\n run_test_case,\n args=(network_case, device),\n iterations=1,\n rounds=1,\n warmup_rounds=0,\n )\n\n @parameterize_by_group([\"boolean_circuit\"])\n def test_train_boolean_circuit(self, network_case, device, benchmark):\n benchmark.pedantic(\n run_test_case,\n args=(network_case, device),\n iterations=1,\n rounds=1,\n warmup_rounds=0,\n )\n\n @parameterize_by_group([\"sigmoid_circuit\"])\n def test_train_sigmoid_circuit(self, network_case, device, benchmark):\n benchmark.pedantic(\n run_test_case,\n args=(network_case, device),\n iterations=1,\n rounds=1,\n warmup_rounds=0,\n )\n\n @parameterize_by_group([\"sigmoid_circuit_(multiagg)\"])\n def test_train_sigmoid_circuit_with_multi_agg(\n self, network_case, device, benchmark\n ):\n benchmark.pedantic(\n run_test_case,\n args=(network_case, device),\n iterations=1,\n rounds=1,\n warmup_rounds=0,\n )\n",
"import pytest\nimport torch\nfrom flaky import flaky\n\nfrom caldera.data import GraphBatch\nfrom caldera.data import GraphData\n\n\[email protected](params=[GraphData, GraphBatch])\ndef data(request):\n args = (5, 4, 3)\n kwargs = dict(min_nodes=10, max_nodes=10, min_edges=5, max_edges=5)\n if request.param is GraphData:\n return GraphData.random(*args, **kwargs)\n else:\n return GraphBatch.random_batch(100, *args, **kwargs)\n\n\[email protected]\ndef shuffle(request):\n method, inplace = request.param\n\n if inplace:\n\n def wrapped(data):\n data1 = data.clone()\n getattr(data, method + \"_\")()\n return data1, data\n\n else:\n\n def wrapped(data):\n data2 = getattr(data, method)()\n return data, data2\n\n return wrapped\n\n\[email protected](\n \"shuffle\", [(\"shuffle_nodes\", True), (\"shuffle_nodes\", False)], indirect=True\n)\n@flaky(max_runs=3, min_passes=2)\ndef test_shuffle_nodes(data, shuffle):\n data1, data2 = shuffle(data)\n\n assert torch.all(data1.x[data1.edges.T] == data2.x[data2.edges.T])\n assert torch.all(data1.e == data2.e)\n assert torch.all(data1.g == data2.g)\n assert not torch.all(data1.x == data2.x)\n assert not torch.all(data1.edges == data2.edges)\n\n\[email protected](\n \"shuffle\", [(\"shuffle_edges\", True), (\"shuffle_edges\", False)], indirect=True\n)\n@flaky(max_runs=3, min_passes=2)\ndef test_shuffle_edges(data, shuffle):\n data1, data2 = shuffle(data)\n\n assert not torch.all(data1.e == data2.e)\n assert torch.all(data1.g == data2.g)\n assert torch.all(data1.x == data2.x)\n assert not torch.all(data1.edges == data2.edges)\n\n\[email protected](\n \"shuffle\", [(\"shuffle_graphs\", True), (\"shuffle_graphs\", False)], indirect=True\n)\ndef test_shuffle_graphs(shuffle):\n args = (5, 4, 3)\n kwargs = dict(min_nodes=5, max_nodes=5, min_edges=5, max_edges=5)\n data = GraphBatch.random_batch(100, *args, **kwargs)\n data1, data2 = shuffle(data)\n if data.__class__ is GraphData:\n pytest.xfail(\"GraphData has no `shuffle_graphs` method\")\n\n assert torch.all(data1.e == data2.e)\n assert not torch.all(data1.g == data2.g)\n assert torch.all(data1.x == data2.x)\n assert torch.all(data1.edges == data2.edges)\n assert not torch.all(data1.node_idx == data2.node_idx)\n assert not torch.all(data1.edge_idx == data2.edge_idx)\n\n\[email protected](\n \"shuffle\", [(\"shuffle\", True), (\"shuffle\", False)], indirect=True\n)\ndef test_shuffle(data, shuffle):\n data1, data2 = shuffle(data)\n assert not torch.all(data1.e == data2.e)\n assert not torch.all(data1.x == data2.x)\n assert not torch.all(data1.edges == data2.edges)\n if data.__class__ is GraphBatch:\n assert not torch.all(data1.g == data2.g)\n assert not torch.all(data1.node_idx == data2.node_idx)\n assert not torch.all(data1.edge_idx == data2.edge_idx)\n"
] | [
[
"torch.cat",
"torch.zeros_like",
"torch.is_tensor",
"torch.sparse_coo_tensor",
"torch.tensor",
"torch.repeat_interleave"
],
[
"torch.zeros",
"torch.tensor",
"numpy.concatenate",
"torch.nn.Linear",
"numpy.random.randn",
"torch.no_grad",
"torch.nn.ReLU",
"numpy.array",
"numpy.zeros",
"torch.nn.MSELoss",
"numpy.random.randint"
],
[
"torch.all"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
anksng/PDF-TIFF-image-extractor-with-anonymization-of-faces-and-license-plates | [
"0854aa09b51ade3673bb512a76718b43d77b063c"
] | [
"utils.py"
] | [
"import glob\nimport os\nimport fitz\nimport PIL as P\nimport io\nimport numpy as np\nimport turicreate as tc\nimport pandas as pd\ndef load_paths(path):\n \"\"\"\n Loads pdf and tiff files from the root folder as a 1-D list.\n -----------------------------------------------------------\n :param\n path: str, path of the root dir where to for PDF and TIFFs\n -----------------------------------------------------------\n :return: list with all pdf and tiffs found\n note - Debug here - if extra pdfs are there in the folder, getting an idea about a UID will be helpful!\n \"\"\"\n paths = glob.glob(path + '/**', recursive=True)\n pdfs = [i for i in paths if ('.pdf') in i]\n pdfs_ = [i for i in paths if ('.PDF') in i]\n tiff = [i for i in paths if ('.tiff') in i]\n final_list = np.hstack((pdfs, pdfs_, tiff))\n print(\"Total of %d files were found\"% len(final_list))\n return final_list\n\n\ndef chunk_generator(l, batch_size):\n \"\"\"\n Given any list and a batch size, returns a list of lists where each element is a list containing\n N (BATCH_SIZE) elements.\n -----------------------------------------------------------\n :param\n l: a 1-D list\n batch_size: Batch size of a chunk\n -----------------------------------------------------------\n :return: list of lists of batches\n \"\"\"\n chunks = [l[i:i + batch_size] for i in range(0, len(l), batch_size)]\n return chunks\n\n\ndef get_size(all_paths):\n \"\"\"\n Returns the size of a file given a path. If list is given returns the size of all files.\n -----------------------------------------------------------\n :param\n all_paths: list of paths of files to calculate size\n -----------------------------------------------------------\n :return:\n Size of file(s) in MegaBytes\n \"\"\"\n total_size = 0\n for i in all_paths:\n total_size += os.path.getsize(i)\n return total_size / 1024000\n\n\ndef read_tiff(path):\n \"\"\"\n Returns a list of image objects given a .tiff file.\n -----------------------------------------------------------\n :param\n path: path to a tiff file\n -----------------------------------------------------------\n :return:\n List of image objects from tiff ( number of images = number of pages in tiff)\n \"\"\"\n # img = P.Image.open(path)\n # for i in range(img.n_frames): # splitting tiff pages\n # img.seek(i)\n # img.save('tiff_temp/image_%s.jpg'%(i,))\n img = P.Image.open(path)\n images = []\n for i in range(img.n_frames):\n img.seek(i)\n images.append(P.Image.fromarray(np.array(img)))\n return images\n\n\n\ndef pdf2images(path):\n \"\"\"\n Returns a list of image objects from pdf.\n -----------------------------------------------------------\n :param\n path: path to pdf file\n -----------------------------------------------------------\n :return:\n list of image objects from the pdf\n \"\"\"\n doc = fitz.open(path)\n # lenXREF = doc._getXrefLength()\n images = []\n for i in range(len(doc)):\n imglist = doc.getPageImageList(i)\n for img in imglist:\n xref = img[0] # xref number\n pix = fitz.Pixmap(doc, xref) # make pixmap from image\n if pix.n < 5: # can be saved as PNG\n images.append(bytes_to_image(pix.getPNGData()))\n else: # must convert CMYK first\n pix0 = fitz.Pixmap(fitz.csRGB, pix)\n images.append(bytes_to_image(pix0.getPNGData()))\n pix0 = None # free Pixmap resources\n pix = None # free Pixmap resources\n return images\n\n\ndef bytes_to_image(image_bytes):\n \"\"\"\n Converts byte image to a PIL image object.\n -----------------------------------------------------------\n :param\n image_bytes: image in Bytes format\n -----------------------------------------------------------\n :return:\n PIL image\n \"\"\"\n imgstream = io.BytesIO(image_bytes)\n imageFile = P.Image.open(imgstream)\n return imageFile\n\n\ndef create_destination_dirs(config):\n \"\"\"\n Creates logs and save dirs\n -----------------------------------------------------------\n :param\n config: config for initializing anonymize()\n -----------------------------------------------------------\n :return:\n tuple (str,str) - (path to save dir, path to logs dir)\n \"\"\"\n try:\n save_folder = os.mkdir(config.path_to_save_dir + '/anonymized_images/')\n except FileExistsError:\n save_folder = config.path_to_save_dir + '/anonymized_images/'\n try:\n logs_folder = os.mkdir(config.path_to_save_logs + '/logs/')\n logs_df = pd.DataFrame(columns=['path', 'annotations'])\n logs_df.to_csv( config.path_to_save_logs + '/logs/' + 'logs.csv',index=False)\n except FileExistsError:\n logs_folder = config.path_to_save_logs + '/logs/'\n\n return config.path_to_save_dir + '/anonymized_images/', config.path_to_save_logs + '/logs/'\n\n\ndef from_pil_image(pil_img):\n \"\"\"\n Returns a graphlab.Image constructed from the passed PIL Image\n -----------------------------------------------------------\n Parameters\n -----------------------------------------------------------\n pil_img : PIL.Image\n A PIL Image that is to be converted to a graphlab.Image\n -----------------------------------------------------------\n Returns\n out: graphlab.Image\n The input converted to a graphlab.Image\n -----------------------------------------------------------\n \"\"\"\n # Read in PIL image data and meta data\n height = pil_img.size[1]\n width = pil_img.size[0]\n _format = {'JPG': 0, 'PNG': 1, 'RAW': 2, 'UNDEFINED': 3}\n\n if pil_img.mode == 'L':\n image_data = bytearray([z for z in pil_img.getdata()])\n channels = 1\n elif pil_img.mode == 'RGB':\n image_data = bytearray([z for l in pil_img.getdata() for z in l ])\n channels = 3\n else:\n image_data = bytearray([z for l in pil_img.getdata() for z in l])\n channels = 4\n format_enum = _format['RAW']\n image_data_size = len(image_data)\n\n # Construct a tc.Image\n img = tc.Image(_image_data=image_data,\n _width=width,\n _height=height,\n _channels=channels,\n _format_enum=format_enum,\n _image_data_size=image_data_size)\n return img\n\ndef pil2cv2(pil_image):\n \"\"\"\n Returns a cv2 image given a PIL image object. (If input image has 2 channels, then converts into three channels)\n -----------------------------------------------------------\n :param\n pil_image: PIL image format\n -----------------------------------------------------------\n :return:\n cv2 image\n \"\"\"\n open_cv_image = np.array(pil_image)\n # Convert RGB to BGR\n try:\n open_cv_image = open_cv_image[:, :, ::-1].copy()\n except IndexError:\n pass\n if len(open_cv_image.shape) == 2:\n reshaped_img = np.stack((open_cv_image,)*3, axis=-1)\n return reshaped_img\n else:\n return open_cv_image"
] | [
[
"numpy.hstack",
"numpy.array",
"numpy.stack",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.