repo_name
stringlengths 6
130
| hexsha
sequence | file_path
sequence | code
sequence | apis
sequence | possible_versions
list |
---|---|---|---|---|---|
Will03/NVSM_pytorch | [
"45e91efa6e4571a955c0f76807f2d6b5d7ffa66a",
"1d50581f13255def43cb4025735487644d0fe93a"
] | [
"src/models/vectorTraining.py",
"src/models/linear_model_example.py"
] | [
"import numpy as np\nfrom sklearn.metrics.pairwise import cosine_similarity\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nimport os\n\n\n\ndataPath = '../../Willll/' # Relative path of homework data\n\n\n# r=root, d=directories, f = files\n\nDocList = []\nQueryList = []\nDocData = []\nQueryData = []\n\ndef articleParser(myPath):\n with open(myPath, 'r') as fp:\n docData = fp.read().replace('\\n', '')\n return docData\n\n# read Query List\nwith open(dataPath+'test/query_list.txt', 'r') as fp:\n tmpLine = fp.readline()\n while tmpLine:\n tmpLine = tmpLine.strip('\\n')\n if tmpLine != '':\n QueryList.append(tmpLine)\n tmpLine = fp.readline()\n\n# Read query data\nfor eachQ in QueryList:\n QueryData.append(articleParser(dataPath+'test/query/%s'%eachQ))\n\n\nfor r, d, f in os.walk(dataPath+'doc'):\n for file in f:\n DocList.append(file)\n\nfor eachD in DocList:\n DocData.append(articleParser(dataPath+'doc/'+eachD))\n\n\n# TF-IDF\nmax_df = 0.95 # Ignore words with high df. (Similar effect to stopword filtering)\nmin_df = 5 # Ignore words with low df.\nsmooth_idf = True # Smooth idf weights by adding 1 to df.\nsublinear_tf = True # Replace tf with 1 + log(tf).\n\n# Rocchio (Below is a param set called Ide Dec-Hi)\nalpha = 1\nbeta = 0.75\ngamma = 0.15\nrel_count = 5 # Use top-5 relevant documents to update query vector.\nnrel_count = 1 # Use only the most non-relevant document to update query vector.\niters = 5\nprint('start train')\n# Build TF-IDF vectors of docs and queries\nvectorizer = TfidfVectorizer(max_df=max_df, min_df=min_df,\n smooth_idf=smooth_idf, sublinear_tf=sublinear_tf)\ndoc_tfidfs = vectorizer.fit_transform(DocData).toarray()\nquery_vecs = vectorizer.transform(QueryData).toarray()\nprint('start count simi')\n# Rank documents based on cosine similarity\ncos_sim = cosine_similarity(query_vecs, doc_tfidfs)\nrankings = np.flip(cos_sim.argsort(), axis=1)\n\nprint('start write file')\nlimit = 600\nfor query_name, ranking in zip(QueryList, rankings):\n ranked_docs=''\n index = 0\n for idx in ranking:\n if index >=600:\n break\n ranked_docs += '%s,'%DocList[idx]\n with open('../../Willll/%s.txt'%query_name, mode='w') as file:\n file.write('%s' % (ranked_docs))\n",
"from pathlib import Path\n\nimport torch\nimport torch.optim as optim\nfrom torch.utils.data import DataLoader\n\nfrom utils import create_dataset, create_pytorch_datasets, create_query_dataset, \\\n evaluate_queries, load_data\nfrom train_model import train\nfrom evaluate_model import evaluate, generate_eval\n\nfrom nvsm_linear import NVSMLinear, loss_function\n\ndef main():\n model_folder = Path('../../models')\n data_folder = Path('../../data/processed')\n model_path = model_folder / 'nvsm_30_20_10.pt'\n batch_size = 1000\n voc, stoi, itos, docs = load_data(\n model_folder,\n data_folder\n )\n doc_names = [doc['name'] for doc in docs]\n print('Vocabulary size', len(voc))\n n_grams, document_ids = create_dataset(\n tok_docs = [doc['tokens'] for doc in docs],\n stoi = stoi,\n n = 10\n )\n print('N-grams number', len(n_grams))\n k_values = [1, 3, 5, 10]\n (train_data,\n eval_data,\n eval_train_data) = create_pytorch_datasets(n_grams, document_ids)\n print('Train dataset size', len(train_data))\n print('Eval dataset size', len(eval_data))\n print('Eval (training) dataset size', len(eval_train_data))\n train_loader = DataLoader(train_data, batch_size = batch_size, shuffle = True)\n eval_loader = DataLoader(eval_data, batch_size = batch_size, shuffle = False)\n eval_train_loader = DataLoader(eval_train_data, batch_size = batch_size, shuffle = False)\n device = torch.device('cuda')\n lamb = 1e-3 # regularization weight in the loss\n nvsm = NVSMLinear(\n n_doc = len(doc_names),\n n_tok = len(stoi),\n dim_doc_emb = 20,\n dim_tok_emb = 30,\n neg_sampling_rate = 10,\n pad_token_id = stoi['<PAD>']\n ).to(device)\n optimizer = optim.Adam(nvsm.parameters(), lr = 1e-3)\n train(\n nvsm = nvsm,\n device = device,\n optimizer = optimizer,\n epochs = 120,\n train_loader = train_loader,\n eval_loader = eval_train_loader,\n k_values = k_values,\n loss_function = loss_function,\n lamb = lamb,\n print_every = 500\n )\n torch.save(nvsm.state_dict(), model_path)\n nvsm.eval()\n recall_at_ks = evaluate(\n nvsm = nvsm,\n device = device,\n eval_loader = eval_loader,\n recalls = k_values,\n loss_function = loss_function,\n )\n print(generate_eval(k_values, recall_at_ks))\n queries_text = [\n 'violence king louis decapitated',\n 'domain language translate',\n 'governement robespierre',\n 'perfect imperfect information',\n 'ontology translation',\n 'high levels of political violence',\n 'state education system which promotes civic values',\n 'political struggles',\n 'Almost all future revolutionary movements looked back to the Revolution as their predecessor',\n 'Habermas argued that the dominant cultural model in 17th century France was a \"representational\" culture',\n 'mathematical model winning strategy',\n 'solutions for two-person zero-sum games',\n 'cooperative coalitions bargaining',\n 'eigenvalue',\n 'graph, dimension and components',\n 'inner product vertex'\n ]\n evaluation_results = evaluate_queries(\n nvsm,\n queries_text,\n doc_names,\n stoi,\n batch_size,\n device\n )\n for query, doc_idx in zip(queries_text, evaluation_results):\n print(f'{query:35} -> {doc_names[doc_idx]}')\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"sklearn.metrics.pairwise.cosine_similarity",
"sklearn.feature_extraction.text.TfidfVectorizer"
],
[
"torch.device",
"torch.utils.data.DataLoader"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
faiyazsamin/FaceRecognition | [
"9c0bd65f300784910a923f446cf33bacfc502b52"
] | [
"Final.min.py"
] | [
"import cv2\nimport numpy as np\nimport os\n\nsubjects = [\"\",\"Mama\",\"Samin\",\"Delwar\"]\n\n\ndef detect_faces(colored_img, scaleFactor=1.06):\n\n img_copy = colored_img.copy()\n gray = cv2.cvtColor(img_copy, cv2.COLOR_BGR2GRAY)\n f_cascade = cv2.CascadeClassifier('data/lbpcascade_frontalface.xml')\n faces = f_cascade.detectMultiScale(gray, scaleFactor=scaleFactor, minNeighbors=5);\n if len(faces) == 0:\n return None, None\n (x, y, w, h) = faces[0]\n return gray[y:y+w, x:x+h], faces[0]\n\n\ndef prepare_training_data(data_folder_path):\n\n dirs = os.listdir(data_folder_path)\n faces = []\n labels = []\n\n for dir_name in dirs:\n\n if not dir_name.startswith(\"s\"):\n continue\n\n label = int(dir_name.replace(\"s\", \"\"))\n subject_dir_path = data_folder_path + \"/\" + dir_name\n subject_images_names = os.listdir(subject_dir_path)\n\n for image_name in subject_images_names:\n if image_name.startswith(\".\"):\n continue\n\n image_path = subject_dir_path + \"/\" + image_name\n image = cv2.imread(image_path)\n cv2.imshow(\"Training on image...\", cv2.resize(image, (400, 500)))\n cv2.waitKey(10)\n\n face, rect = detect_faces(image)\n if face is not None:\n faces.append(face)\n labels.append(label)\n\n cv2.destroyAllWindows()\n cv2.waitKey(1)\n cv2.destroyAllWindows()\n print(\"Total faces: \", len(faces))\n print(\"Total labels: \", len(labels))\n\n return faces, labels\n\n\ndef trainData(trainingDataPath, output_path):\n face_recognizer = cv2.face.LBPHFaceRecognizer_create()\n faces, labels = prepare_training_data(trainingDataPath)\n\n face_recognizer.train(faces, np.array(labels))\n face_recognizer.write(output_path)\n\n\ndef loadTrainedData(path):\n recognizer = cv2.face.LBPHFaceRecognizer_create()\n recognizer.read(path)\n return recognizer\n\n\ndef predictStaticImage(test_img,trainer_file):\n img = test_img.copy()\n face, rect = detect_faces(img)\n lt = loadTrainedData(trainer_file)\n label, confidence = lt.predict(face)\n label_text = subjects[label]\n (x, y, w, h) = rect\n cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 2)\n cv2.putText(img, label_text, (rect[0], rect[1] - 5), cv2.FONT_HERSHEY_PLAIN, 1.5, (0, 255, 0), 2)\n print(\"Confidence =\",confidence)\n return img\n\ndef showImage(image):\n cv2.imshow('Frame', image)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\n\ndef camToFile(framesToCapture,output_dir):\n cam = cv2.VideoCapture(1)\n detector = cv2.CascadeClassifier('data/haarcascade_frontalface_alt.xml')\n sampleNum = 0\n\n while True:\n ret, img = cam.read()\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n face = detector.detectMultiScale(gray, 1.5, 5)\n for (x, y, w, h) in face:\n cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2)\n\n sampleNum = sampleNum + 1\n if sampleNum%(100/framesToCapture) == 0:\n print(\"Frames Captured:\", int(sampleNum/(100/framesToCapture)))\n cv2.imwrite(output_dir+\"/\"+ str(int(sampleNum/(100/framesToCapture))) + \".jpg\", gray[y:y + h, x:x + w])\n\n cv2.imshow('frame', img)\n if cv2.waitKey(100) & 0xFF == ord('q'):\n break\n elif sampleNum >= 100:\n break\n\n\ndef detectFace(trainer_file):\n recognizer = cv2.face.LBPHFaceRecognizer_create()\n recognizer.read(trainer_file)\n faceCascade = cv2.CascadeClassifier(\"data/haarcascade_frontalface_alt.xml\")\n\n cam = cv2.VideoCapture(1)\n font = cv2.FONT_HERSHEY_DUPLEX\n while True:\n ret, im = cam.read()\n gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)\n faces = faceCascade.detectMultiScale(gray, scaleFactor=1.2, minNeighbors=5, minSize=(100, 100),\n flags=cv2.CASCADE_SCALE_IMAGE)\n for (x, y, w, h) in faces:\n nbr_predicted, conf = recognizer.predict(gray[y:y + h, x:x + w])\n cv2.rectangle(im, (x - 50, y - 50), (x + w + 50, y + h + 50), (0, 225, 0), 2)\n nbr_predicted = subjects[nbr_predicted]\n cv2.putText(im, str(nbr_predicted), (x + 30, y + h + 30), font, 1, (0, 0, 225)) # Draw the text\n cv2.imshow('FaceDetector', im)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n cam.release()\n cv2.destroyAllWindows()\n\n\n#trainData('training-data','test.yml')\ndetectFace('test.yml')\n#showImage(predictStaticImage(cv2.imread(\"test-data/4.jpg\"),'test3.yml'))\n#camToFile(20,'training-data/s7')"
] | [
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ankitdobhal/analytics-zoo | [
"b8374bcd6c73bba49fe0b0ab075528cdd94cf2af",
"b8374bcd6c73bba49fe0b0ab075528cdd94cf2af"
] | [
"pyzoo/test/zoo/zouwu/autots/test_auto_ts.py",
"pyzoo/test/zoo/zouwu/model/anomaly/test_model_anomaly.py"
] | [
"#\n# Copyright 2018 Analytics Zoo Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport pytest\nimport numpy as np\nfrom test.zoo.pipeline.utils.test_utils import ZooTestCase\n\nfrom zoo.automl.config.recipe import LSTMGridRandomRecipe, MTNetGridRandomRecipe\nfrom zoo.zouwu.autots.forecast import AutoTSTrainer\nfrom zoo.zouwu.autots.forecast import TSPipeline\n\nimport pandas as pd\n\n\[email protected](\"init_ray_context_fixture\")\nclass TestZouwuAutoTS(ZooTestCase):\n\n def setup_method(self, method):\n # super(TestZouwuAutoTS, self).setup_method(method)\n self.create_data()\n\n def teardown_method(self, method):\n pass\n\n def create_data(self):\n sample_num = np.random.randint(100, 200)\n self.train_df = pd.DataFrame({\"datetime\": pd.date_range(\n '1/1/2019', periods=sample_num), \"value\": np.random.randn(sample_num)})\n val_sample_num = np.random.randint(20, 30)\n self.validation_df = pd.DataFrame({\"datetime\": pd.date_range(\n '1/1/2019', periods=val_sample_num), \"value\": np.random.randn(val_sample_num)})\n\n def test_AutoTSTrainer_smoke(self):\n horizon = np.random.randint(1, 6)\n tsp = AutoTSTrainer(dt_col=\"datetime\",\n target_col=\"value\",\n horizon=horizon,\n extra_features_col=None\n )\n pipeline = tsp.fit(self.train_df)\n assert isinstance(pipeline, TSPipeline)\n assert pipeline.internal.config is not None\n evaluate_result = pipeline.evaluate(self.validation_df)\n if horizon > 1:\n assert evaluate_result[0].shape[0] == horizon\n else:\n assert evaluate_result[0]\n predict_df = pipeline.predict(self.validation_df)\n assert not predict_df.empty\n\n def test_AutoTrainer_LstmRecipe(self):\n horizon = np.random.randint(1, 6)\n tsp = AutoTSTrainer(dt_col=\"datetime\",\n target_col=\"value\",\n horizon=horizon,\n extra_features_col=None\n )\n pipeline = tsp.fit(self.train_df,\n self.validation_df,\n recipe=LSTMGridRandomRecipe(\n num_rand_samples=5,\n batch_size=[1024],\n lstm_2_units=[8],\n training_iteration=1,\n epochs=1\n ))\n assert isinstance(pipeline, TSPipeline)\n assert pipeline.internal.config is not None\n evaluate_result = pipeline.evaluate(self.validation_df)\n if horizon > 1:\n assert evaluate_result[0].shape[0] == horizon\n else:\n assert evaluate_result[0]\n predict_df = pipeline.predict(self.validation_df)\n assert not predict_df.empty\n\n def test_AutoTrainer_MTNetRecipe(self):\n horizon = np.random.randint(1, 6)\n tsp = AutoTSTrainer(dt_col=\"datetime\",\n target_col=\"value\",\n horizon=horizon,\n extra_features_col=None\n )\n pipeline = tsp.fit(self.train_df,\n self.validation_df,\n recipe=MTNetGridRandomRecipe(\n num_rand_samples=5,\n time_step=[5],\n long_num=[2],\n batch_size=[1024],\n cnn_hid_size=[32, 50],\n training_iteration=1,\n epochs=1\n ))\n assert isinstance(pipeline, TSPipeline)\n assert pipeline.internal.config is not None\n evaluate_result = pipeline.evaluate(self.validation_df)\n if horizon > 1:\n assert evaluate_result[0].shape[0] == horizon\n else:\n assert evaluate_result[0]\n predict_df = pipeline.predict(self.validation_df)\n assert not predict_df.empty\n\n\nif __name__ == \"__main__\":\n pytest.main([__file__])\n",
"#\n# Copyright 2018 Analytics Zoo Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport pytest\nimport numpy as np\nimport pandas as pd\nfrom test.zoo.pipeline.utils.test_utils import ZooTestCase\n\nfrom zoo.zouwu.model.forecast.lstm_forecaster import LSTMForecaster\nfrom zoo.zouwu.model.anomaly import ThresholdDetector, ThresholdEstimator\n\n\nclass TestZouwuModelAnomaly(ZooTestCase):\n\n def gen_data(self, feature_num=6, sample_num=100):\n return pd.DataFrame(data=np.random.randn(sample_num, feature_num))\n\n def train_test_split(self, df, test_num, look_back):\n test_split_index = test_num + look_back + 1\n\n # train_df\n train_df = df[:-test_num]\n test_df = df[-test_split_index:]\n test_df = test_df.reset_index(drop=True)\n return train_df, test_df\n\n def roll_data(self, dataset, look_back, target_col_indexes):\n \"\"\"\n Generate input samples from rolling\n \"\"\"\n X, Y = [], []\n data = dataset.to_numpy()\n for i in range(len(dataset) - look_back - 1):\n X.append(data[i: (i + look_back)])\n # Y.append(dataset.iloc[i + look_back, target_col_indexes])\n Y.append(data[i + look_back][target_col_indexes])\n return np.array(X), np.array(Y)\n\n def test_app(self):\n look_back = 4\n\n # generate dataframe\n data = self.gen_data(feature_num=6, sample_num=100)\n # split train and test dataframes\n train_df, test_df = self.train_test_split(data, test_num=20, look_back=look_back)\n\n # roll data to generate model input\n x_train, y_train = self.roll_data(dataset=train_df, look_back=look_back,\n target_col_indexes=[0])\n x_test, y_test = self.roll_data(dataset=test_df, look_back=look_back,\n target_col_indexes=[0])\n\n # create model, train on train data and predict on test\n lstm_config = {\"lstm_units\": [32] * 2, \"lr\": 0.001}\n forecaster = LSTMForecaster(target_dim=1, feature_dim=x_train.shape[-1], **lstm_config)\n forecaster.fit(x=x_train, y=y_train, batch_size=1024, epochs=50, distributed=False)\n y_predict = forecaster.predict(x_test)\n\n # find anomaly by comparing the difference between y_predict and y_test (actual)\n threshold = 10\n detector = ThresholdDetector()\n anomaly_indexes = detector.detect(y=y_test,\n yhat=y_predict,\n threshold=threshold)\n assert len(anomaly_indexes) == 0\n\n # if user don't have a threshold, he can choose to use estimator\n # to find a threshold first\n ratio = 0.1\n threshold = ThresholdEstimator().fit(y=y_test, yhat=y_predict, ratio=ratio)\n fitted_anomaly_indexes = detector.detect(y=y_test, yhat=y_predict, threshold=threshold)\n assert len(fitted_anomaly_indexes) == int(ratio * y_test.shape[0])\n\n def test_threshold_case1_multivariant(self):\n sample_num = 10\n feature_dim = 5\n num_anomaly = 5\n # predicted value\n y_pred = np.full((sample_num, feature_dim), 0)\n # actual value\n y_test = np.full(sample_num * feature_dim, 0.2)\n\n gen_rand_indexes = [0, 7, 16, 33, 45]\n y_test[gen_rand_indexes] = 10\n y_test = y_test.reshape((sample_num, feature_dim))\n\n anomaly_indexes = ThresholdDetector().detect(y=y_test, yhat=y_pred, threshold=3)\n assert len(anomaly_indexes) == num_anomaly\n\n def test_threshold_case4(self):\n sample_num = 10\n feature_dim = 5\n num_anomaly = 5\n # actual value\n y_test = np.zeros(sample_num * feature_dim)\n\n gen_rand_indexes = [0, 7, 16, 33, 45]\n y_test[gen_rand_indexes] = 10\n y_test = y_test.reshape((sample_num, feature_dim))\n\n # use threshold (-1, 1) for each dimension\n threshold_min = np.ones_like(y_test) * (-1)\n threshold_max = np.ones_like(y_test)\n anomaly_indexes = ThresholdDetector().detect(y=y_test, yhat=None,\n threshold=(threshold_min, threshold_max))\n assert len(anomaly_indexes) == num_anomaly\n\n def test_threshold_gaussian(self):\n sample_num = 500\n # actual value\n y_test = np.full(sample_num, 2)\n mu, sigma, ratio = 3, 0.1, 0.01\n s = np.random.normal(mu, sigma, sample_num)\n y = y_test + s\n\n threshold = ThresholdEstimator().fit(y, y_test, mode=\"gaussian\", ratio=ratio)\n from scipy.stats import norm\n assert abs(threshold-(norm.ppf(1-ratio)*sigma+mu)) < 0.02\n\n\nif __name__ == \"__main__\":\n pytest.main([__file__])\n"
] | [
[
"numpy.random.randn",
"pandas.date_range",
"numpy.random.randint"
],
[
"scipy.stats.norm.ppf",
"numpy.ones_like",
"numpy.full",
"numpy.random.normal",
"numpy.random.randn",
"numpy.array",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
hzm2016/assistive-gym-robosuite | [
"5c529f4444cc386383618bfa584341740a8468f9",
"5c529f4444cc386383618bfa584341740a8468f9",
"5c529f4444cc386383618bfa584341740a8468f9",
"5c529f4444cc386383618bfa584341740a8468f9",
"5c529f4444cc386383618bfa584341740a8468f9"
] | [
"code/pytorch/methods/SSAC.py",
"envs/mujoco/utils/play_model.py",
"envs/mujoco/controllers/test/test_pd_controller.py",
"envs/robosuite/robosuite/controllers/joint_vel.py",
"code/pytorch/utils/sim_contextual_solver.py"
] | [
"import os\nimport torch\nimport torch.nn.functional as F\nimport glob\nimport numpy as np\nfrom torch.optim import Adam\nfrom utils.utils import soft_update, hard_update\nfrom utils.model import GaussianPolicy, QNetwork, DeterministicPolicy\nfrom keras.models import Sequential, Model\nfrom keras.layers import Dense, Dropout, Input, merge, Lambda, Activation\nfrom keras.layers.merge import Add, Multiply, Concatenate, concatenate\nfrom keras.initializers import RandomUniform\nfrom keras.optimizers import Adam\nimport keras.backend as K\nfrom keras import metrics\n\n\ndef weighted_entropy(p, w_norm):\n # w = tf.divide(tf.exp(A - np.max(A)), prob)\n # w_norm = w / K.sum(w)\n return K.sum(w_norm * p * K.log(p + 1e-8))\n\n\ndef weighted_mean(p, w_norm):\n # w = tf.exp(A- np.max(A))\n # w_norm = w / K.sum(w)\n p_weighted = np.multiply(w_norm, p)\n return K.mean(p_weighted, axis=0)\n\n\ndef weighted_mse(Q_target, Q_pred, w_norm):\n # w = tf.exp(A- np.max(A))\n # w_norm = w / K.sum(w)\n error = K.square(Q_target - Q_pred)\n return K.mean(w_norm * error)\n\n\ndef softmax(x):\n col = x.shape[1]\n x_max = np.reshape(np.amax(x, axis=1), (-1, 1))\n e_x = np.exp(x - np.matlib.repmat(x_max, 1, col) )\n e_x_sum = np.reshape( np.sum(e_x, axis=1), (-1, 1))\n out = e_x / np.matlib.repmat(e_x_sum, 1, col)\n return out\n\n\ndef weighted_mean_array(x, weights):\n weights_mean = np.mean(weights, axis=1)\n x_weighted = np.multiply(x, weights)\n mean_weighted = np.divide(np.mean(x_weighted, axis=1), weights_mean)\n return np.reshape(mean_weighted, (-1, 1))\n\n\ndef p_sample(p):\n row, col = p.shape\n p_sum = np.reshape(np.sum(p, axis=1), (row, 1))\n p_normalized = p/np.matlib.repmat(p_sum, 1, col)\n p_cumsum = np.matrix(np.cumsum( p_normalized, axis=1))\n # print(p_cumsum[0])\n rand = np.matlib.repmat(np.random.random((row, 1)), 1, col)\n # print(rand[0])\n o_softmax = np.argmax(p_cumsum >= rand, axis=1)\n return o_softmax\n\n\ndef entropy(p):\n return K.sum(p * K.log((p + 1e-8)))\n\n\ndef add_normal(x_input, outshape, at_eps):\n \"\"\"\n add normal noise to the input\n \"\"\"\n epsilon = K.random_normal(shape=outshape, mean=0., stddev=1.)\n x_out = x_input + at_eps * np.multiply(epsilon, np.absolute(x_input))\n return x_out\n\n\ndef kl(p, q):\n return K.sum(p * K.log((p + 1e-8) / (q + 1e-8)))\n\n\nclass Multi_SAC(object):\n def __init__(self, state_dim, action_dim, option_dim, max_action, action_space):\n\n self.alpha = 0.2\n self.lr = 0.0003\n self.option_num = option_dim\n\n self.policy_type = \"Gaussian\"\n self.target_update_interval = 1\n self.automatic_entropy_tuning = True\n\n self.device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n \"\"\" critic network \"\"\"\n self.critic = QNetwork(state_dim, action_dim, 400).to(device=self.device)\n self.critic_optim = Adam(self.critic.parameters(), lr=self.lr)\n\n self.critic_target = QNetwork(state_dim, action_dim, 400).to(self.device)\n hard_update(self.critic_target, self.critic)\n\n self.sampling_prob = torch.FloatTensor(state).to(self.device)\n # ===================================================================== #\n # Option Model #\n # ===================================================================== #\n self.option_state_input, self.option_action_input, self.option_input_concat, self.option_out_dec, \\\n self.option_out, self.option_out_noise, self.option_model = self.create_option_model()\n Advantage = np.stop_gradient(self.target_q_value - self.predicted_v_value)\n Weight = np.divide(np.exp(Advantage - np.max(Advantage)), self.sampling_prob)\n W_norm = Weight/K.mean(Weight)\n\n critic_conditional_entropy = weighted_entropy(self.option_out, tf.stop_gradient(W_norm))\n p_weighted_ave = weighted_mean(self.option_out, tf.stop_gradient(W_norm))\n self.critic_entropy = critic_conditional_entropy - self.c_ent * entropy(p_weighted_ave)\n\n self.vat_loss = kl(self.option_out, self.option_out_noise)\n self.reg_loss = metrics.mean_absolute_error(self.option_input_concat, self.option_out_dec)\n self.option_loss = self.reg_loss + self.entropy_coeff * (self.critic_entropy) + self.c_reg * self.vat_loss\n self.option_optimize = tf.train.AdamOptimizer(self.option_lr).minimize(self.option_loss)\n\n \"\"\" option network \"\"\"\n self.it = 0\n\n if self.policy_type == \"Gaussian\":\n # Target Entropy = −dim(A) (e.g. , -6 for HalfCheetah-v2) as given in the paper\n if self.automatic_entropy_tuning == True:\n self.target_entropy = -torch.prod(torch.Tensor(action_space.shape).to(self.device)).item()\n self.log_alpha = torch.zeros(1, requires_grad=True, device=self.device)\n self.alpha_optim = Adam([self.log_alpha], lr=self.lr)\n\n self.policy = GaussianPolicy(state_dim, action_dim, 400, max_action).to(self.device)\n self.policy_optim = Adam(self.policy.parameters(), lr=self.lr)\n\n elif self.policy_type == \"Multi_Gaussian\":\n if self.automatic_entropy_tuning == True:\n self.target_entropy = -torch.prod(torch.Tensor(action_space.shape).to(self.device)).item()\n self.log_alpha = torch.zeros(1, requires_grad=True, device=self.device)\n self.alpha_optim = Adam([self.log_alpha], lr=self.lr)\n\n self.policy = GaussianPolicy(state_dim, action_dim, 400, max_action).to(self.device)\n self.policy_optim = Adam(self.policy.parameters(), lr=self.lr)\n\n else:\n self.alpha = 0\n self.automatic_entropy_tuning = False\n self.policy = DeterministicPolicy(state_dim, action_dim, 400, max_action).to(self.device)\n self.policy_optim = Adam(self.policy.parameters(), lr=self.lr)\n\n def select_action(self, state, eval=True):\n\n state = torch.FloatTensor(state).to(self.device).unsqueeze(0)\n\n if eval == False:\n action, _, _ = self.policy.sample(state)\n else:\n _, _, action = self.policy.sample(state)\n return action.detach().cpu().numpy()[0]\n\n def train_actor_option(self, inputs, a_gradient, option):\n self.sess.run(self.actor_optimizer_list[option], feed_dict={\n self.actor_state_input_list[option]: inputs,\n self.action_gradient_list[option]: a_gradient\n })\n\n def train_critic(self, inputs, action, target_q_value, predicted_v_value, sampling_prob):\n return self.sess.run([self.critic_optimize], feed_dict={\n self.critic_state_input: inputs,\n self.critic_action_input: action,\n self.target_q_value: target_q_value,\n self.predicted_v_value: predicted_v_value,\n self.sampling_prob: sampling_prob\n })\n\n def train_option(self, inputs, action, target_q_value, predicted_v_value, sampling_prob):\n return self.sess.run([self.option_optimize], feed_dict={\n self.option_state_input: inputs,\n self.option_action_input: action,\n self.target_q_value: target_q_value,\n self.predicted_v_value: predicted_v_value,\n self.sampling_prob: sampling_prob\n })\n\n def max_option(self, inputs):\n Q_predict = []\n n = inputs.shape[0]\n for o in range(int(self.option_num)):\n action_i = self.predict_actor_target(inputs, o)\n Q_predict_i, _ = self.predict_critic_target(inputs, action_i)\n if o == 0:\n Q_predict = np.reshape(Q_predict_i, (-1, 1))\n else:\n Q_predict = np.concatenate((Q_predict, np.reshape(Q_predict_i, (-1, 1))), axis=1)\n\n o_max = np.argmax(Q_predict, axis=1)\n Q_max = np.max(Q_predict, axis=1)\n return o_max, Q_max, Q_predict\n\n def softmax_option_target(self, inputs):\n Q_predict = []\n n = inputs.shape[0]\n for o in range(int(self.option_num)):\n action_i = self.predict_actor_target(inputs, o)\n Q_predict_i, _ = self.predict_critic_target(inputs, action_i)\n\n if o == 0:\n Q_predict = np.reshape( Q_predict_i, (-1, 1) )\n else:\n Q_predict = np.concatenate((Q_predict, np.reshape(Q_predict_i, (-1, 1)) ), axis= 1)\n\n p = softmax(Q_predict)\n o_softmax = p_sample(p)\n n = Q_predict.shape[0]\n Q_softmax = Q_predict[np.arange(n), o_softmax.flatten()]\n\n return o_softmax, np.reshape(Q_softmax, (n, 1)), Q_predict\n\n def predict_actor_option(self, inputs, option):\n return self.sess.run(self.actor_out_list[option], feed_dict={self.actor_state_input_list[option]: inputs})\n\n def predict_actor(self, inputs, options):\n action_list = []\n for o in range(self.option_num):\n action_o = self.predict_actor_option(inputs, o)\n action_list.append(action_o)\n\n n = inputs.shape[0]\n action = 0\n if n == 1 or np.isscalar(options):\n action = action_list[options]\n # calculate the action\n else:\n for i in range(n):\n if i == 0:\n action = action_list[int(options[i])][i, :]\n else:\n action = np.vstack((action, action_list[int(options[i])][i, :]))\n\n return action\n",
"import argparse\n\nimport numpy as np\n\nfrom envs.mujoco.utils.experiment_files import (get_latest_experiment_dir, get_model,\n get_latest_checkpoint, get_params)\nfrom envs.mujoco.utils.load_model import load_params, load_model\n\n\n# def load_params(params_path):\n# with open(params_path) as f:\n# data = json.load(f)\n# return data\n\n\n# def load_model(model_path, params):\n# env_cls = globals()[params['env']]\n# orig_env = env_cls(**params['env_options'])\n# env = DummyVecEnv([lambda: orig_env])\n\n# if params['alg'] == 'PPO2':\n# model = PPO2.load(model_path, env=env)\n# elif params['alg'] == 'SAC':\n# model = SAC.load(model_path, env=env)\n# else:\n# raise NotImplementedError\n\n# return orig_env, model\n\ndef replay_model(env, model, deterministic=True, num_episodes=None, record=False, render=True):\n # Don't record data forever.\n assert (not record) or (num_episodes is not None), \\\n \"there must be a finite number of episodes to record the data\"\n \n # Initialize counts and data.\n num_episodes = num_episodes if num_episodes else np.inf\n episode_count = 0\n infos = []\n\n # Simulate forward.\n obs = env.reset()\n while episode_count < num_episodes:\n # import pdb; pdb.set_trace()\n action, _states = model.predict(obs, deterministic=deterministic)\n clipped_action = np.clip(action, env.action_space.low,\n env.action_space.high)\n obs, reward, done, info = env.step(clipped_action, render=render)\n if record:\n infos.append(info)\n if done:\n obs = env.reset()\n episode_count += 1\n\n return infos\n\n\nif __name__ == '__main__':\n # Parse command line arguments.\n parser = argparse.ArgumentParser()\n parser.add_argument(\n 'directory', type=str, help='The directory of the experiment.')\n parser.add_argument(\n '--deterministic', action='store_true', help='Optionally simulate the deterministic system.')\n\n args = parser.parse_args()\n\n # Load the model if it's availeble, otherwise that latest checkpoint.\n experiment_dir = get_latest_experiment_dir(args.directory)\n params_path = get_params(experiment_dir)\n params = load_params(params_path)\n\n model_path = get_model(experiment_dir)\n if model_path is None:\n model_path = get_latest_checkpoint(experiment_dir)\n\n env, model = load_model(model_path, params)\n\n # Replay model.\n replay_model(env, model, deterministic=args.deterministic)\n",
"import numpy as np\n\nfrom envs.mujoco.controllers import PDController\nfrom envs.mujoco.controllers.test.common import create_sim\n\n\ndef test_pd_controller():\n\tsim = create_sim()\n\toptions = dict()\n\toptions['set_velocity'] = True\n\t\n\t# Test the null action with velocity control enabled\n\tcontroller = PDController(sim, **options)\n\tassert len(controller.action_space.low) == 14\n\t\n\tnull_action = np.zeros(14)\n\tcontroller.set_action(null_action)\n\tassert np.all(controller.get_torque() == np.zeros(7))\n\t\n\t# Test the null action with no velocity control\n\toptions = dict()\n\toptions['set_velocity'] = False\n\t\n\tcontroller = PDController(sim, **options)\n\tassert len(controller.action_space.low) == 7\n\t\n\t# The null action at the origin should be zero\n\tnull_action = np.zeros(7)\n\tcontroller.set_action(null_action)\n\tassert np.all(controller.get_torque() == np.zeros(7))\n\t\n\t# # Test the action space.\n\t# # import pdb; pdb.set_trace()\n\t# expected_action_limits = 300.*np.ones(7)\n\t# assert np.allclose(controller.action_space.low, -expected_action_limits)\n\t# assert np.allclose(controller.action_space.high, expected_action_limits)\n\t\n\t# # Test that the torque is linear in the action.\n\t# action_1 = np.array([1., 2., 3., 4., 5., 6., 7.])\n\t# action_2 = 2*np.array(action_1)\n\t# controller.set_action(action_1)\n\t# torque_1 = controller.get_torque()\n\t# controller.set_action(action_2)\n\t# torque_2 = controller.get_torque()\n\t\n\t# controller.set_action(action_1 + action_2)\n\t# assert np.allclose(controller.get_torque(), torque_1 + torque_2)\n\t\n\t# # Test that the action scaling works.\n\t# options_2 = dict()\n\t# options_2[\"action_scaling\"] = 2.\n\t# sim_2 = create_sim()\n\t# controller_2 = DirectTorqueController(sim_2, **options_2)\n\t\n\t# controller_2.set_action(action_1)\n\t# torque_ratio = controller_2.get_torque()/torque_1\n\t# scaling_ratio = options_2[\"action_scaling\"]/options[\"action_scaling\"]\n\t# assert np.allclose(torque_ratio, scaling_ratio)\n\n\nif __name__ == '__main__':\n\ttest_pd_controller()\n",
"from envs.robosuite.robosuite.controllers.base_controller import Controller\nfrom envs.robosuite.robosuite.utils.buffers import RingBuffer\nimport numpy as np\n\n\nclass JointVelocityController(Controller):\n \"\"\"\n Controller for controlling the robot arm's joint velocities. This is simply a P controller with desired torques\n (pre gravity compensation) taken to be proportional to the velocity error of the robot joints.\n\n NOTE: Control input actions assumed to be taken as absolute joint velocities. A given action to this\n controller is assumed to be of the form: (vel_j0, vel_j1, ... , vel_jn-1) for an n-joint robot\n\n Args:\n sim (MjSim): Simulator instance this controller will pull robot state updates from\n\n eef_name (str): Name of controlled robot arm's end effector (from robot XML)\n\n joint_indexes (dict): Each key contains sim reference indexes to relevant robot joint information, namely:\n\n :`'joints'`: list of indexes to relevant robot joints\n :`'qpos'`: list of indexes to relevant robot joint positions\n :`'qvel'`: list of indexes to relevant robot joint velocities\n\n actuator_range (2-tuple of array of float): 2-Tuple (low, high) representing the robot joint actuator range\n\n input_max (float or list of float): Maximum above which an inputted action will be clipped. Can be either be\n a scalar (same value for all action dimensions), or a list (specific values for each dimension). If the\n latter, dimension should be the same as the control dimension for this controller\n\n input_min (float or list of float): Minimum below which an inputted action will be clipped. Can be either be\n a scalar (same value for all action dimensions), or a list (specific values for each dimension). If the\n latter, dimension should be the same as the control dimension for this controller\n\n output_max (float or list of float): Maximum which defines upper end of scaling range when scaling an input\n action. Can be either be a scalar (same value for all action dimensions), or a list (specific values for\n each dimension). If the latter, dimension should be the same as the control dimension for this controller\n\n output_min (float or list of float): Minimum which defines upper end of scaling range when scaling an input\n action. Can be either be a scalar (same value for all action dimensions), or a list (specific values for\n each dimension). If the latter, dimension should be the same as the control dimension for this controller\n\n kp (float or list of float): velocity gain for determining desired torques based upon the joint vel errors.\n Can be either be a scalar (same value for all action dims), or a list (specific values for each dim)\n\n policy_freq (int): Frequency at which actions from the robot policy are fed into this controller\n\n velocity_limits (2-list of float or 2-list of list of floats): Limits (m/s) below and above which the magnitude\n of a calculated goal joint velocity will be clipped. Can be either be a 2-list (same min/max value for all\n joint dims), or a 2-list of list (specific min/max values for each dim)\n\n interpolator (Interpolator): Interpolator object to be used for interpolating from the current joint velocities\n to the goal joint velocities during each timestep between inputted actions\n\n **kwargs: Does nothing; placeholder to \"sink\" any additional arguments so that instantiating this controller\n via an argument dict that has additional extraneous arguments won't raise an error\n \"\"\"\n\n def __init__(self,\n sim,\n eef_name,\n joint_indexes,\n actuator_range,\n input_max=1,\n input_min=-1,\n output_max=1,\n output_min=-1,\n kp=0.25,\n policy_freq=20,\n velocity_limits=None,\n interpolator=None,\n **kwargs # does nothing; used so no error raised when dict is passed with extra terms used previously\n ):\n\n super().__init__(\n sim,\n eef_name,\n joint_indexes,\n actuator_range,\n )\n # Control dimension\n self.control_dim = len(joint_indexes[\"joints\"])\n\n # input and output max and min (allow for either explicit lists or single numbers)\n self.input_max = self.nums2array(input_max, self.joint_dim)\n self.input_min = self.nums2array(input_min, self.joint_dim)\n self.output_max = self.nums2array(output_max, self.joint_dim)\n self.output_min = self.nums2array(output_min, self.joint_dim)\n\n # gains and corresopnding vars\n self.kp = self.nums2array(kp, self.joint_dim)\n # if kp is a single value, map wrist gains accordingly (scale down x10 for final two joints)\n\n if type(kp) is float or type(kp) is int:\n # Scale kpp according to how wide the actuator range is for this robot\n low, high = self.actuator_limits\n self.kp = kp * (high - low)\n self.ki = self.kp * 0.005\n self.kd = self.kp * 0.001\n self.last_err = np.zeros(self.joint_dim)\n self.derr_buf = RingBuffer(dim=self.joint_dim, length=5)\n self.summed_err = np.zeros(self.joint_dim)\n self.saturated = False\n self.last_joint_vel = np.zeros(self.joint_dim)\n\n # limits\n self.velocity_limits = np.array(velocity_limits) if velocity_limits is not None else None\n\n # control frequency\n self.control_freq = policy_freq\n\n # interpolator\n self.interpolator = interpolator\n\n # initialize torques and goal velocity\n self.goal_vel = None # Goal velocity desired, pre-compensation\n self.current_vel = np.zeros(self.joint_dim) # Current velocity setpoint, pre-compensation\n self.torques = None # Torques returned every time run_controller is called\n\n def set_goal(self, velocities):\n \"\"\"\n Sets goal based on input @velocities.\n\n Args:\n velocities (Iterable): Desired joint velocities\n\n Raises:\n AssertionError: [Invalid action dimension size]\n \"\"\"\n # Update state\n self.update()\n\n # Otherwise, check to make sure velocities is size self.joint_dim\n assert len(velocities) == self.joint_dim, \\\n \"Goal action must be equal to the robot's joint dimension space! Expected {}, got {}\".format(\n self.joint_dim, len(velocities)\n )\n\n self.goal_vel = self.scale_action(velocities)\n if self.velocity_limits is not None:\n self.goal_vel = np.clip(self.goal_vel, self.velocity_limits[0], self.velocity_limits[1])\n\n if self.interpolator is not None:\n self.interpolator.set_goal(self.goal_vel)\n\n def run_controller(self):\n \"\"\"\n Calculates the torques required to reach the desired setpoint\n\n Returns:\n np.array: Command torques\n \"\"\"\n # Make sure goal has been set\n if self.goal_vel is None:\n self.set_goal(np.zeros(self.joint_dim))\n\n # Update state\n self.update()\n\n # Only linear interpolator is currently supported\n if self.interpolator is not None:\n if self.interpolator.order == 1:\n # Linear case\n self.current_vel = self.interpolator.get_interpolated_goal()\n else:\n # Nonlinear case not currently supported\n pass\n else:\n self.current_vel = np.array(self.goal_vel)\n\n # We clip the current joint velocity to be within a reasonable range for stability\n joint_vel = np.clip(self.joint_vel, self.output_min, self.output_max)\n\n # Compute necessary error terms for PID velocity controller\n err = self.current_vel - joint_vel\n derr = err - self.last_err\n self.last_err = err\n self.derr_buf.push(derr)\n\n # Only add to I component if we're not saturated (anti-windup)\n if not self.saturated:\n self.summed_err += err\n\n # Compute command torques via PID velocity controller plus gravity compensation torques\n torques = self.kp * err + \\\n self.ki * self.summed_err + \\\n self.kd * self.derr_buf.average + \\\n self.torque_compensation\n\n # Clip torques\n self.torques = self.clip_torques(torques)\n\n # Check if we're saturated\n self.saturated = False if np.sum(np.abs(self.torques - torques)) == 0 else True\n\n # Always run superclass call for any cleanups at the end\n super().run_controller()\n\n # Return final torques\n return self.torques\n\n def reset_goal(self):\n \"\"\"\n Resets joint velocity goal to be all zeros\n \"\"\"\n self.goal_vel = np.zeros(self.joint_dim)\n\n # Reset interpolator if required\n if self.interpolator is not None:\n self.interpolator.set_goal(self.goal_vel)\n\n @property\n def name(self):\n return 'JOINT_VELOCITY'\n",
"import copy as cp\n\nimport numpy as np\n\nfrom ..multi_tasks_learning.GPREPS import *\nfrom envs.mujoco.utils.quaternion import mat2Quat\nfrom .plot_result import *\n\nimport time\n\nfrom envs.abb.models import utils\n\n\nclass Solver(object):\n\tdef __init__(self, args, env, project_path):\n\t\t\n\t\tself.args = args\n\t\t\n\t\t# ##################### Training Parameters ######################\n\t\tself.K = self.args.num_policy_update # 上层policy训练循环总数\n\t\tself.N = self.args.num_real_episodes # 在上层policy的一个训练周期中,下层RL训练,改变context参数的次数\n\t\tself.n = 1 # 下层RL训练,每改变一次context参数,执行RL的episode数\n\t\tself.d = 1 # 下层RL每d个step更新一次网络参数\n\t\tself.M = self.args.num_simulated_episodes # 在上层policy的一个训练周期中,通过model获得artificial trajectory的过程中,改变context参数的次数\n\t\tself.L = 5 # 每改变一次context参数,通过model获得artificial trajectory的次数\n\t\t\n\t\t# ##################### Policy Parameters ######################\n\t\tself.MAX_EP_STEPS = self.args.max_episode_steps # RL的最大步数\n\t\tself.context_dim = 6 # including initial state (x, y, z) and terminal state (x, y, z)\n\t\tself.num_waypoints = 3 # depends on the task to solve ::: predefined\n\t\tself.eps_max = 0.25\n\t\tself.eps_min = 0.10\n\t\t\n\t\t# Impedance parameters ::: stiffness damping is derived from a constant relation Kd = beta * sqrt(Kp)\n\t\tself.contextual_impedance_dim = 12\n\t\tself.contextual_impedance_lower_bound = np.array([0, 0.2, 0.2, 1.0, 1.0, 0.5, 1.0, 1.0, 0.0, 0.5, 0.5, 2])\n\t\tself.contextual_impedance_upper_bound = np.array([0.0, 1.0, 1.0, 5.0, 5.0, 3.0, 5.0, 5.0, 2.0, 2.0, 2.0, 8])\n\t\t\n\t\tself.memory_dim = 500 # Context parameters\n\t\t# Give several initial expe\n\t\t# print(\"weights :::\", weights)\n\t\t# print(\"stiffness pos :::\", stiffness_pos)rt data ::: very important for policy search\n\t\tself.initial_a = np.array([1.0, 0.8, 0.1, 4, 4, 1, 3, 3, 1, 1, 1, 4])\n\t\t\n\t\t# attractor points\n\t\tself.pos_list = None\n\t\tself.quat_list = None\n\t\t\n\t\t# lower-level RL controller :::\n\t\tself.observation_dim = 12 # state\n\t\tself.action_dim = 6 # control command ::: action\n\t\t\n\t\tself.env = env\n\t\tself.done = False\n\t\tself.safe = True\n\t\tself.render = self.args.render\n\t\t\n\t\tself.file_name = ''\n\t\tself.project_path = project_path\n\t\tself.result_path = project_path + \"results/runs/mujoco\"\n\t\t\n\t\tself.evaluations = []\n\t\tself.eval_episodes_states = []\n\t\t\n\t\t# # Set seeds\n\t\t# torch.manual_seed(args.seed)\n\t\t# np.random.seed(args.seed)points\n\t\t\n\t\t# context parameter model\n\t\tself.replay_buffer = utils.ReplayBuffer()\n\t\t\n\t\t# lower-level memory\n\t\tself.replay_buffer_model = utils.ReplayBuffer(1e4)\n\t\t\n\t\t# Initialize GPREPS\n\t\tself.gpreps = GPREPS(self.context_dim, self.contextual_impedance_dim, self.memory_dim,\n\t\t\t\t\t\t\t self.contextual_impedance_lower_bound, self.contextual_impedance_upper_bound,\n\t\t\t\t\t\t\t self.initial_a, 0.25)\n\t\t\n\t\t# For model-based reps :::\n\t\t# self.r_model = R_MODEL(self.policy,\n\t\t# \t\t\t\t\t self.env,\n\t\t# \t\t\t\t\t self.context_dim,\n\t\t# \t\t\t\t\t self.contextual_impedance_dim,\n\t\t# \t\t\t\t\t self.observation_dim,\n\t\t# \t\t\t\t\t self.action_dim,\n\t\t# \t\t\t\t\t self.MAX_EP_STEPS)\n\t\t\n\t\t# self.s_model = S_MODEL()\n\t\t\n\t\tself.total_timesteps = 0\n\t\tself.episode_timesteps = 0\n\t\tself.episode_number = 0\n\t\tself.episode_reward = 0\n\t\tself.reward_scale = 0.001\n\t\tself.pre_num_steps = self.total_timesteps\n\t\tself.best_reward = 0.0\n\t\tself.timesteps_since_eval = 0\n\t\t\n\t\t\"\"\" training performance \"\"\"\n\t\tself.training_reward = []\n\t\tself.training_time = []\n\t\tself.training_states = []\n\t\tself.training_im_actions = []\n\t\t\n\t\t\"\"\" evaluation performance \"\"\"\n\t\tself.evaluations_reward = []\n\t\tself.evaluations_time = []\n\t\tself.evaluations_actions = []\n\t\tself.evaluations_im_actions = []\n\t\tself.evaluations_states = []\n\t\tself.evaluations_options = []\n\t\t\n\t\tself.episode_reward_average_list = []\n\t\tself.successful_rate_list = []\n\t\t\n\t\tself.episode_reward_average_list = []\n\t\tself.successful_rate_list = []\n\t\n\tdef reset(self):\n\t\t\"\"\"\n\t\t\tRandom offset value :::\n\t\t\"\"\"\n\t\t# initial point ::: add initial offset based on the top of hole\n\t\tinitial_offset = np.zeros(3)\n\t\tinitial_offset[:2] = np.random.uniform(-0.035, 0.035, size=2)\n\t\tinitial_offset[2] = 0.07\n\t\t\n\t\t# final optimal point\n\t\ttarget_pos, target_rot = self.env._get_target_obs()\n\t\tquat_hole_top = mat2Quat(target_rot[2])\n\t\tquat_hole_base = mat2Quat(target_rot[1])\n\t\t\n\t\t# attractor points ::: could be constant\n\t\tself.pos_list = np.concatenate(([target_pos[1]], [target_pos[2]], [target_pos[2] - [0.0, 0.0, -0.05]]), axis=0)\n\t\tself.quat_list = np.concatenate(([quat_hole_base], [quat_hole_top], [quat_hole_top]), axis=0)\n\t\t# print(\"pos_list :::\", self.pos_list)\n\t\t# print(\"quat_list :::\", self.quat_list)\n\t\t\n\t\tself.obs, self.initial_state, self.target_state = \\\n\t\t\tself.env.vic_reset(initial_offset=initial_offset, pos_list=self.pos_list, quat_list=self.quat_list)\n\t\t\n\t\tself.episode_reward = 0\n\t\tself.episode_timesteps = 0\n\t\t\n\t\t# successful episode number\n\t\t# self.episode_number = 0\n\t\tself.done = False\n\t\tself.safe = True\n\t\tself.state = self.initial_state\n\t\t\n\t\t# return context of one episode\n\t\t# self.context_state = np.concatenate((self.initial_state, self.target_state))\n\t\tself.context_state = self.get_context()\n\t\treturn self.context_state, self.state, self.done, self.safe\n\t\n\tdef get_context(self):\n\t\t# print('Get Context Parameter for one episode!!!')\n\t\tcontext = np.concatenate((self.initial_state, self.target_state))\n\t\treturn context\n\t\n\tdef train(self, w=None):\n\t\t\"\"\"\n\t\t\texecute reinforcement learning and store data for model training\n\t\t\"\"\"\n\t\tself.episode_number = 0\n\t\tself.episode_reward_average = 0\n\t\tfor i in range(self.N):\n\t\t\t# print(\"Iteration ::::::::::::::::::::::\", i)\n\t\t\t\n\t\t\tz, _, _, _ = self.reset()\n\t\t\tz = self.get_context().reshape(-1, self.context_dim)\n\t\t\t# print(\"context parameters :::\", z)\n\t\t\t\n\t\t\t# choose impedance parameters ::: only stiffness and weights, damping is related to stiffness\n\t\t\tif w is None:\n\t\t\t\tw = self.gpreps.choose_action(z)[0]\n\t\t\t\n\t\t\t# print(\"Choose Impedance Parameters :::\", w)\n\t\t\t\n\t\t\t# Extract Parameters for IMOGIC\n\t\t\tweights = np.array(w[:3])\n\t\t\t\n\t\t\tstiffness_pos = np.array([w[3:6], w[6:9], w[9:]])\n\t\t\t# print(\"weights :::\", weights)\n\t\t\t# print(\"stiffness pos :::\", stiffness_pos)\n\t\t\t\n\t\t\tstiffness_rotation = np.ones((3, 3)) # fixed in this experiment :::\n\t\t\t\n\t\t\tstiffness_list_test = np.concatenate((stiffness_pos, stiffness_rotation), axis=1)\n\t\t\t\n\t\t\t# stiffness_list = np.array([[4, 4, 0.1, 1, 1, 1],\n\t\t\t# \t\t\t\t\t\t [0.5, 0.5, 8, 1, 1, 1]])\n\t\t\t# weights_list = np.array([0.9, 0.1])\n\t\t\t\n\t\t\tstiffness_list = stiffness_list_test\n\t\t\tbeta = weights[0]\n\t\t\tdamping_list = 2 * np.sqrt(stiffness_list)\n\t\t\tweights_list = weights\n\t\t\tweights_list[0] = 1\n\t\t\t\n\t\t\t# Start realistic simulation\n\t\t\t# while self.episode_number < self.n:\n\t\t\t# print(\"Episode Number :::\", self.episode_number)\n\t\t\t# self.reset()\n\t\t\twhile self.episode_timesteps < self.MAX_EP_STEPS:\n\t\t\t\t\"\"\"\n\t\t\t\t\tbasic action from VIC\n\t\t\t\t\"\"\"\n\t\t\t\tobs, reward, self.done, info, self.state, action, self.safe = \\\n\t\t\t\t\tself.env.step_im(stiffness=stiffness_list, damping=damping_list, weights=weights_list)\n\t\t\t\t\n\t\t\t\tnew_obs = obs[:12]\n\t\t\t\t\n\t\t\t\tif self.render:\n\t\t\t\t\tself.env.render()\n\t\t\t\t# time.sleep(0.001)\n\t\t\t\t\n\t\t\t\t# store data into buffer ::: for RL training\n\t\t\t\t# self.replay_buffer.add((self.obs, new_obs, action, reward, self.done, 0))\n\t\t\t\t\n\t\t\t\t# here ::: self.obs and new_obs # for model training\n\t\t\t\t# self.replay_buffer_model.add((self.obs, new_obs, action))\n\t\t\t\t\n\t\t\t\tself.episode_reward += reward\n\t\t\t\tself.obs = new_obs\n\t\t\t\tself.episode_timesteps += 1\n\t\t\t\t\n\t\t\t\tif self.done or not self.safe or self.episode_timesteps == self.MAX_EP_STEPS - 1:\n\t\t\t\t\tprint('RL episode :::', self.episode_number,\n\t\t\t\t\t\t 'step :::', self.episode_timesteps,\n\t\t\t\t\t\t 'done? :::', self.done,\n\t\t\t\t\t\t 'safe? :::', self.safe,\n\t\t\t\t\t\t 'episode reward :::', self.episode_reward)\n\t\t\t\t\tif self.done:\n\t\t\t\t\t\t# calculate successful rate\n\t\t\t\t\t\tself.episode_number += 1\n\t\t\t\t\tbreak\n\t\t\t\n\t\t\t# print(\"z :::\", z)\n\t\t\t# print(\"w :::\", w)\n\t\t\t# print(\"episode reward :::\", self.episode_reward * 0.01)\n\t\t\t\n\t\t\tself.gpreps.store_realistic_data(z[0], w, self.episode_reward * self.reward_scale)\n\t\t\tself.episode_reward_average += self.episode_reward * self.reward_scale\n\t\t\n\t\treturn self.episode_number / self.N, self.episode_reward_average / self.N\n\t\n\tdef contextual_main(self):\n\t\t\"\"\"\n\t\t\tmain function of GPRREPS for contextual policy training\n\t\t\"\"\"\n\t\tself.episode_reward_average_list = []\n\t\tself.successful_rate_list = []\n\t\t\n\t\tfor k in range(self.K):\n\t\t\tprint('Training Cycle...', k)\n\t\t\tsuccessful_rate, episode_reward_average = self.train()\n\t\t\t\n\t\t\t# evaluate learning performance every cycle\n\t\t\tprint('Successful Rate...', successful_rate)\n\t\t\tprint(\"Average reward :::\", episode_reward_average)\n\t\t\tself.episode_reward_average_list.append(cp.deepcopy(episode_reward_average))\n\t\t\tself.successful_rate_list.append(cp.deepcopy(successful_rate))\n\t\t\t\n\t\t\t# run RL and collect data\n\t\t\t# print('Running RL...')\n\t\t\t\n\t\t\t# train reward and context model\n\t\t\t# print('Training GMR models...')\n\t\t\t# self.r_model.train_reward_model(self.replay_buffer_model)\n\t\t\t\n\t\t\t# self.s_model.train_context_model()\n\t\t\t\n\t\t\t##################################\n\t\t\t# Predict Rewards and Store Data #\n\t\t\t##################################\n\t\t\t# print('Generate artificial trajectories !!!')\n\t\t\t# for j in range(self.M):\n\t\t\t# \t# Predict Rewards\n\t\t\t# \tR = 0.\n\t\t\t# \tZ = self.get_context()\n\t\t\t# \tW = self.gpreps.choose_action(Z)\n\t\t\t#\n\t\t\t# \t# Predict L Trajectories\n\t\t\t# \tfor l in range(self.L):\n\t\t\t# \t\tR += self.r_model.trajectory(Z, W)\n\t\t\t# \treward = R / self.L\n\t\t\t# \tprint('Artificial running cycle', j + 1,\n\t\t\t# \t\t 'ra:', Z[0], 'rd:', Z[1],\n\t\t\t# \t\t 'reward:', reward)\n\t\t\t#\n\t\t\t# \t# Construct Artificial Dataset D\n\t\t\t# \tself.gpreps.store_simulated_data(Z, W, reward)\n\t\t\t\n\t\t\t# Sample and Update Policy\n\t\t\t# self.context_average = np.array([0.8, 0.8, 1.3, 0.8, 0.1, 1.29])\n\t\t\t#\n\t\t\t# print('memory :::', self.gpreps.memory_realistic)\n\t\t\t\n\t\t\tprint('****************** Start Training *****************')\n\t\t\teps_k = self.eps_min + k * (self.eps_max - self.eps_min) / self.K\n\t\t\tself.gpreps.learn(training_type='Realistic', eps=eps_k)\n\t\t\t\n\t\t\t# evaluation policy\n\t\t\tprint('************ Training cycle finished **************')\n\t\t\n\t\t# Plot learning results\n\t\tplot_single_reward(data=self.successful_rate_list, font_size=18)\n\t\tplot_single_reward(self.episode_reward_average_list, font_size=18, y_label_list=['Episode Reward Average'])\n\t\t\n\t\treturn self.episode_reward_average_list, self.successful_rate_list\n\t\n\tdef model_contextual_main(self):\n\t\t\"\"\"\n\t\t\tmain function of GPRREPS for contextual policy training\n\t\t\"\"\"\n\t\tself.episode_reward_average_list = []\n\t\tself.successful_rate_list = []\n\t\t\n\t\tfor k in range(self.K):\n\t\t\t\n\t\t\tprint('Training Cycle...', k)\n\t\t\tsuccessful_rate, episode_reward_average = self.train()\n\t\t\t\n\t\t\t# evaluate learning performance every cycle\n\t\t\tprint('Successful Rate...', successful_rate)\n\t\t\tprint(\"Average reward :::\", episode_reward_average)\n\t\t\tself.episode_reward_average_list.append(cp.deepcopy(episode_reward_average))\n\t\t\tself.successful_rate_list.append(cp.deepcopy(successful_rate))\n\t\t\t\n\t\t\t# run RL and collect data\n\t\t\t# print('Running RL...')\n\t\t\t\n\t\t\t# train reward and context model\n\t\t\t# print('Training GMR models...')\n\t\t\t# self.r_model.train_reward_model(self.replay_buffer_model)\n\t\t\t\n\t\t\t# self.s_model.train_context_model()\n\t\t\t\n\t\t\t##################################\n\t\t\t# Predict Rewards and Store Data #\n\t\t\t##################################\n\t\t\t# print('Generate artificial trajectories !!!')\n\t\t\t# for j in range(self.M):\n\t\t\t# \t# Predict Rewards\n\t\t\t# \tR = 0.\n\t\t\t# \tZ = self.get_context()\n\t\t\t# \tW = self.gpreps.choose_action(Z)\n\t\t\t#\n\t\t\t# \t# Predict L Trajectories\n\t\t\t# \tfor l in range(self.L):\n\t\t\t# \t\tR += self.r_model.trajectory(Z, W)\n\t\t\t# \treward = R / self.L\n\t\t\t# \tprint('Artificial running cycle', j + 1,\n\t\t\t# \t\t 'ra:', Z[0], 'rd:', Z[1],\n\t\t\t# \t\t 'reward:', reward)\n\t\t\t#\n\t\t\t# \t# Construct Artificial Dataset D\n\t\t\t# \tself.gpreps.store_simulated_data(Z, W, reward)\n\t\t\t\n\t\t\t# Sample and Update Policy\n\t\t\t# self.context_average = np.array([0.8, 0.8, 1.3, 0.8, 0.1, 1.29])\n\t\t\t#\n\t\t\t# print('memory :::', self.gpreps.memory_realistic)\n\t\t\t\n\t\t\tif k > self.args.start_policy_update_idx:\n\t\t\t\tself.gpreps.train_reward_model(sample_number=100, type='GP')\n\t\t\t\t\n\t\t\t\tfor m in range(self.M):\n\t\t\t\t\tz, _, _, _ = self.reset()\n\t\t\t\t\tz = self.get_context().reshape(-1, self.context_dim)\n\t\t\t\t\tw = self.gpreps.choose_action(z)[0]\n\t\t\t\t\tsample_reward = self.gpreps.generate_artifical_trajectory([np.array(z[0])], [w])\n\t\t\t\t\tself.gpreps.store_simulated_data(z[0], w, sample_reward)\n\t\t\t\n\t\t\tif k > self.args.start_policy_update_idx:\n\t\t\t\tprint('****************** Start Training *****************')\n\t\t\t\teps_k = self.eps_min + k * (self.eps_max - self.eps_min) / self.K\n\t\t\t\tself.gpreps.learn(training_type='Simulated', eps=eps_k)\n\t\t\t\n\t\t\tif k % 100 == 0:\n\t\t\t\t# Plot learning results\n\t\t\t\tplot_single_reward(data=self.successful_rate_list, font_size=18,\n\t\t\t\t\t\t\t\t y_label_list=['Successful Rate'])\n\t\t\t\tplot_single_reward(data=self.episode_reward_average_list, font_size=18,\n\t\t\t\t\t\t\t\t y_label_list=['Episode Reward Average'])\n\t\t\n\t\t# Plot learning results\n\t\tplot_single_reward(data=self.successful_rate_list, font_size=18)\n\t\tplot_single_reward(data=self.episode_reward_average_list, font_size=18, y_label_list=['Episode Reward Average'])\n\t\t\n\t\treturn self.episode_reward_average_list, self.successful_rate_list\n\t\n\tdef CGPUCB_main(self, num_rolluts=1000, beta=100):\n\t\t\"\"\"\n\t\t\tmain function of GPUCB for contextual policy training\n\t\t\"\"\"\n\t\t# give initial parameters\n\t\tnum_mesh_grid = 3\n\t\tmesh_grid_dist = np.array([0., 0.4, 0.4, 2.0, 2.0, 1.25, 2.0, 2.0, 1.0, 0.75, 0.75, 4.0])\n\t\tcontextual_impedance_lower_bound = np.array([0.0, 0.2, 0.2, 1.0, 1.0, 0.5, 1.0, 1.0, 0.0, 0.5, 0.5, 2])\n\t\tcontextual_impedance_upper_bound = np.array([0.0, 1.4, 1.4, 7.0, 7.0, 4.25, 7.0, 7.0, 3.0, 2.75, 2.75, 12])\n\t\t\n\t\tpara_samples = [np.array([0., 0., 0.])]\n\t\tfor i in range(1, self.contextual_impedance_dim):\n\t\t\tsample_list = np.arange(contextual_impedance_lower_bound[i], contextual_impedance_upper_bound[i],\n\t\t\t\t\t\t\t\t\tmesh_grid_dist[i])\n\t\t\tpara_samples.append(sample_list)\n\t\t\n\t\t# print(\"para_samples :::\", para_samples)\n\t\t\n\t\tself.meshgrid = np.array(\n\t\t\tnp.meshgrid(para_samples[0], para_samples[1], para_samples[2], para_samples[3], para_samples[4],\n\t\t\t\t\t\tpara_samples[5],\n\t\t\t\t\t\tpara_samples[6], para_samples[7], para_samples[8], para_samples[9], para_samples[10],\n\t\t\t\t\t\tpara_samples[11]))\n\t\t\n\t\tself.sample_grid = self.meshgrid.reshape(self.meshgrid.shape[0], -1).T\n\t\t\n\t\tself.mu = np.array([0. for _ in range(self.sample_grid.shape[0])])\n\t\tself.sigma = np.array([0.5 for _ in range(self.sample_grid.shape[0])])\n\t\tself.beta = beta\n\t\t# evaluation results\n\t\t\n\t\tself.realistic_sample_list = []\n\t\tself.episode_reward_average_list = []\n\t\tself.successful_rate_list = []\n\t\t\n\t\tgp = GaussianProcessRegressor()\n\t\t\n\t\tfor k in range(num_rolluts):\n\t\t\tgrid_idx = self.argmax_ucb(self.mu, self.sigma, self.beta)\n\t\t\tprint(\"gird_idx\", grid_idx)\n\t\t\tprint(\"sample_gird\", self.sample_grid)\n\t\t\toptimal_sample = self.sample_grid[grid_idx]\n\t\t\tprint(\"optimal_sample\", optimal_sample)\n\t\t\tsuccessful_rate, episode_reward_average = self.train(w=optimal_sample)\n\t\t\tself.realistic_sample_list.append(optimal_sample)\n\t\t\tself.episode_reward_average_list.append(episode_reward_average)\n\t\t\tself.successful_rate_list.append(successful_rate)\n\t\t\t\n\t\t\tgp.fit(self.realistic_sample_list, self.episode_reward_average_list)\n\t\t\tself.mu, self.sigma = gp.predict(self.sample_grid, return_std=True)\n\t\t\n\t\t# Plot learning results\n\t\tplot_single_reward(data=self.successful_rate_list, font_size=18)\n\t\tplot_single_reward(self.episode_reward_average_list, font_size=18,\n\t\t\t\t\t\t y_label_list=['Episode Reward Average'])\n\t\t\n\t\treturn self.episode_reward_average_list, self.successful_rate_list\n\t\n\tdef argmax_ucb(self, mu, sigma, beta):\n\t\treturn np.argmax(mu + sigma * np.sqrt(beta))\n\t\n\tdef eval_once(self):\n\t\t# self.pbar.update(self.total_timesteps - self.pre_num_steps)\n\t\t# self.pre_num_steps = self.total_timesteps\n\t\t# if self.timesteps_since_eval >= self.args.eval_freq:\n\t\t# self.timesteps_since_eval %= self.args.eval_freq\n\t\t\"\"\" evaluate the policy for once \"\"\"\n\t\tavg_reward, avg_time, eval_actions, eval_states, eval_im_actions, eval_options = \\\n\t\t\tevaluate_assembly_policy(self.env, self.policy, self.args)\n\t\tself.evaluations_reward.append(cp.deepcopy(avg_reward))\n\t\tself.evaluations_time.append(cp.deepcopy(avg_time))\n\t\tself.evaluations_actions.append(cp.deepcopy(eval_actions))\n\t\tself.evaluations_states.append(cp.deepcopy(eval_states))\n\t\tself.evaluations_im_actions.append(cp.deepcopy(eval_im_actions))\n\t\tself.evaluations_options.append(cp.deepcopy(eval_options))\n\t\tprint('evaluations_reward :::::::::::::::', self.evaluations_reward)\n\t\tprint('evaluations_time :::::::::::::::::', self.evaluations_time)\n\t\t\n\t\t\"\"\" save test data numpy \"\"\"\n\t\tnp.save(self.log_dir + \"/test_reward\", self.evaluations_reward)\n\t\tnp.save(self.log_dir + \"/test_time\", self.evaluations_time)\n\t\tnp.save(self.log_dir + \"/test_actions\", self.evaluations_actions)\n\t\tnp.save(self.log_dir + \"/test_options\", self.evaluations_options)\n\t\tnp.save(self.log_dir + \"/test_im_actions\", self.evaluations_im_actions)\n\t\tnp.save(self.log_dir + \"/test_states\", self.evaluations_states)\n\t\t\n\t\tutils.write_table(self.log_dir + \"/test_reward\", np.asarray(self.evaluations_reward))\n\t\tutils.write_table(self.log_dir + \"/test_time\", np.asarray(self.evaluations_time))\n\t\t\n\t\tif self.args.save_all_policy:\n\t\t\tself.policy.save(\n\t\t\t\tself.file_name + str(int(int(self.total_timesteps / self.args.eval_freq) * self.args.eval_freq)),\n\t\t\t\tdirectory=self.log_dir)\n\t\telse:\n\t\t\tself.policy.save(self.file_name, directory=self.log_dir)\n\n\n# print('total_timesteps ::::::::::::::::::::::::::', self.total_timesteps)\n# print('episode_reward :::::::::::::::::::::::::::', self.episode_reward)\n# self.training_reward.append(cp.deepcopy(self.episode_reward))\n# self.training_time.append(cp.deepcopy(self.episode_timesteps))\n# self.training_states.append(cp.deepcopy(epi_states))\n# self.training_im_actions.append(cp.deepcopy(epi_actions))\n#\n# np.save(self.log_dir + \"/train_reward\", self.training_reward)\n# np.save(self.log_dir + \"/train_time\", self.training_time)\n# np.save(self.log_dir + \"/train_states\", self.training_states)\n# np.save(self.log_dir + \"/train_im_actions\", self.training_im_actions)\n#\n# utils.write_table(self.log_dir + \"/train_reward\", np.asarray(self.training_reward))\n# utils.write_table(self.log_dir + \"/train_time\", np.asarray(self.training_time))\n\n\ndef evaluate_assembly_policy(env, policy, args):\n\t\"\"\"\n\t\tRuns policy for X episodes and returns average reward\n\t\"\"\"\n\tavg_reward = 0.\n\teval_actions = []\n\teval_im_actions = []\n\teval_states = []\n\teval_options = []\n\tstart_time = time.time()\n\tfor _ in range(args.num_eval_episodes):\n\t\tobs, state, done = env.reset()\n\t\tdone = False\n\t\tepisode_step = 0\n\t\tepi_actions = []\n\t\tepi_im_actions = []\n\t\tepi_options = []\n\t\tepi_states = []\n\t\twhile not done and episode_step < args.max_episode_steps:\n\t\t\tif 'HRLACOP' in args.policy_name:\n\t\t\t\taction, option = policy.select_evaluate_action([np.array(obs)])\n\t\t\t\tepi_options.append(cp.deepcopy(option))\n\t\t\telse:\n\t\t\t\taction = policy.select_action(np.array(obs))\n\t\t\t\n\t\t\tepi_states.append(cp.deepcopy(state))\n\t\t\tobs, state, reward, done, _, execute_action = env.step(action)\n\t\t\tepi_actions.append(cp.deepcopy(action))\n\t\t\tepi_im_actions.append(cp.deepcopy(execute_action))\n\t\t\tavg_reward += reward\n\t\t\tepisode_step += 1\n\t\teval_states.append(cp.deepcopy(epi_states))\n\t\teval_actions.append(cp.deepcopy(epi_actions))\n\t\teval_im_actions.append(cp.deepcopy(epi_im_actions))\n\t\teval_options.append(cp.deepcopy(epi_options))\n\tavg_time = (time.time() - start_time) / args.num_eval_episodes\n\tavg_reward /= args.eval_episodes\n\treturn avg_reward, avg_time, eval_actions, eval_states, eval_im_actions, eval_options\n\n# class GPREPS(object):\n# \tdef __init__(self, w_dim, z_dim, memory_dim, w_bound):\n# \t\t# initialize parameters\n# \t\tself.memory = []\n# \t\tself.pointer = 0\n# \t\tself.w_dim, self.z_dim, self.memory_dim, self.w_bound = w_dim, z_dim, memory_dim, w_bound\n#\n# \t\t# build actor\n# \t\tself.a = np.ones((w_dim, 1), dtype=np.float32)\n# \t\tself.A = np.ones((w_dim, z_dim), dtype=np.float32)\n# \t\tself.COV = np.ones((w_dim, w_dim), dtype=np.float32)\n#\n# \tdef choose_action(self, z):\n# \t\tz = np.array([z])\n# \t\tu = self.a + np.dot(self.A, z.transpose())\n# \t\tu = u.transpose()[0]\n# \t\treturn np.random.multivariate_normal(mean=u, cov=self.COV, size=1)\n#\n# \tdef learn(self, z_):\n# \t\teta, theta = argmin(self.memory, z_, self.z_dim)\n#\n# \t\tp = 0.\n# \t\tP_ = []\n# \t\tZ = []\n# \t\tB = []\n# \t\tfor i in range(self.memory_dim):\n# \t\t\tz, w, r = self.memory[i]\n# \t\t\tz = np.array([z])\n# \t\t\tw = np.array(w)\n# \t\t\tr = np.array([r])\n# \t\t\tp = np.exp((r - np.dot(z, theta)) / eta)\n# \t\t\tz_ = np.c_[np.array([1.]), z]\n# \t\t\tZ.append(z_[0])\n# \t\t\tB.append(w[0])\n# \t\t\tP_.append(p[0])\n# \t\tp, B, Z = np.array(p), np.array(B), np.array(Z)\n# \t\tP = P_ * np.eye(self.memory_dim)\n#\n# \t\t# calculate mean action\n# \t\ttarget1 = np.linalg.inv(np.dot(np.dot(Z.transpose(), P), Z))\n# \t\t# print(np.shape(P), np.shape(Z.transpose()), np.shape(B))\n# \t\ttarget2 = np.dot(np.dot(Z.transpose(), P), B)\n# \t\ttarget = np.dot(target1, target2).transpose()\n# \t\tself.a = target[:, :1]\n# \t\tself.A = target[:, 1:]\n#\n# \t\t# calculate the COV\n# \t\tErr = 0\n# \t\tfor i in range(self.memory_dim):\n# \t\t\tz, w, r = self.memory[i]\n# \t\t\tz = np.array([z])\n# \t\t\tw = np.array([w])\n# \t\t\terr = w - self.a - np.dot(self.A, z.transpose())\n# \t\t\tErr += np.dot(err, err.transpose()) * P_[i]\n# \t\tself.COV = Err / np.sum(P_)\n# \t\t# COV 减维到2维,否则维度是(1, 12, 12, 1)\n# \t\tCOV = np.zeros([12, 12])\n# \t\tfor i in range(12):\n# \t\t\tfor j in range(12):\n# \t\t\t\tCOV[i][j] = self.COV[0][i][j][0]\n# \t\tprint(np.shape(self.COV))\n# \t\tprint(np.shape(COV))\n# \t\tself.COV = COV\n# \t\tprint('Contextual policy search upper level parameters updated')\n#\n# \tdef store_data(self, z, w, r):\n# \t\ttransition = [z, w, [r]]\n# \t\tif len(self.memory) == self.memory:\n# \t\t\tindex = self.pointer % self.memory_dim # replace the old memory with new memory\n# \t\t\tself.memory[index] = transition\n# \t\telse:\n# \t\t\tself.memory.append(transition)\n# \t\tself.pointer += 1\n\n# **** 程序结构 ****\n# 主函数:solver.contextual_main()\n# 运行RL: solver.train()\n# 上层policy:class GPREPS\n# 获得环境变量z:solver.get_context()\n# model:class R_MODEL\n# 训练model:R_MODEL.train_reward_model()\n# 生成trajectory:R_MODEL.trajectory()\n# w=u(z): class S_MODEL (未完成),目前是直接读取solver.get_context()\n\n# **** 环境变量w设定 ****\n# w = np.array([ra, rd)]\n# ra:速度系数(0.6~1),乘在action上面\n# rd:深度系数(0.6~1),乘在depth上面\n\n# **** 待定参数: ****\n# solver.init() 参数 K N d n M L w_boundary ......\n# env\n# safe or not: 安全边界力大小 (solver.train(), model.trajectory())\n# trajectory初始位置obs 及其方差大小 (model.trajectory())\n# trajectory reward function (model.trajectory())\n"
] | [
[
"numpy.amax",
"numpy.absolute",
"numpy.random.random",
"numpy.multiply",
"torch.zeros",
"numpy.reshape",
"numpy.arange",
"torch.Tensor",
"numpy.cumsum",
"numpy.stop_gradient",
"numpy.max",
"numpy.argmax",
"numpy.mean",
"torch.FloatTensor",
"numpy.isscalar",
"torch.cuda.is_available",
"numpy.matlib.repmat",
"numpy.sum"
],
[
"numpy.clip"
],
[
"numpy.zeros"
],
[
"numpy.array",
"numpy.zeros",
"numpy.abs",
"numpy.clip"
],
[
"numpy.sqrt",
"numpy.meshgrid",
"numpy.asarray",
"numpy.arange",
"numpy.save",
"numpy.ones",
"numpy.concatenate",
"numpy.random.uniform",
"numpy.array",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
erprashu/Metal_erning | [
"79d1a6a457be37258df50a9194946caeb86845a2"
] | [
"test.py"
] | [
"# -*- coding: utf-8 -*-\nimport argparse\n\nimport torch\nimport torch.nn.functional as F\nfrom torch.utils.data import DataLoader\n\nfrom torch.autograd import Variable\n\nfrom tqdm import tqdm\n\nfrom models.protonet_embedding import ProtoNetEmbedding\nfrom models.R2D2_embedding import R2D2Embedding\nfrom models.ResNet12_embedding import resnet12\n\nfrom models.classification_heads import ClassificationHead, R2D2Head\n\nfrom utils import pprint, set_gpu, Timer, count_accuracy, log\n\nimport random\nimport numpy as np\nimport os\nimport pdb\n\ndef get_model(options):\n # Choose the embedding network\n if options.network == 'ProtoNet':\n network = ProtoNetEmbedding().cuda()\n elif options.network == 'R2D2':\n network = R2D2Embedding().cuda()\n elif options.network == 'ResNet':\n if options.dataset == 'miniImageNet' or options.dataset == 'tieredImageNet':\n network = resnet12(avg_pool=False, drop_rate=0.1, dropblock_size=5).cuda()\n network = torch.nn.DataParallel(network)\n else:\n network = resnet12(avg_pool=False, drop_rate=0.1, dropblock_size=2).cuda()\n network = torch.nn.DataParallel(network)\n else:\n print (\"Cannot recognize the network type\")\n assert(False)\n \n # Choose the classification head\n if opt.head == 'ProtoNet':\n cls_head = ClassificationHead(base_learner='ProtoNet').cuda() \n elif opt.head == 'Ridge':\n cls_head = ClassificationHead(base_learner='Ridge').cuda()\n elif opt.head == 'R2D2':\n cls_head = R2D2Head().cuda()\n elif opt.head == 'SVM':\n cls_head = ClassificationHead(base_learner='SVM-CS').cuda()\n else:\n print (\"Cannot recognize the classification head type\")\n assert(False)\n \n return (network, cls_head)\n\ndef get_dataset(options):\n # Choose the embedding network\n if options.dataset == 'miniImageNet':\n from data.mini_imagenet import MiniImageNet, FewShotDataloader\n dataset_test = MiniImageNet(phase='test')\n data_loader = FewShotDataloader\n elif options.dataset == 'tieredImageNet':\n from data.tiered_imagenet import tieredImageNet, FewShotDataloader\n dataset_test = tieredImageNet(phase='test')\n data_loader = FewShotDataloader\n elif options.dataset == 'CIFAR_FS':\n from data.CIFAR_FS import CIFAR_FS, FewShotDataloader\n dataset_test = CIFAR_FS(phase='test')\n data_loader = FewShotDataloader\n elif options.dataset == 'FC100':\n from data.FC100 import FC100, FewShotDataloader\n dataset_test = FC100(phase='test')\n data_loader = FewShotDataloader\n else:\n print (\"Cannot recognize the dataset type\")\n assert(False)\n \n return (dataset_test, data_loader)\n\n\ndef self_mix(data):\n size = data.size()\n W = size[-1]\n H = size[-2]\n # uniform\n cx = np.random.randint(W)\n cy = np.random.randint(H)\n\n cut_w = W//2\n cut_h = H//2\n\n bbx1 = np.clip(cx - cut_w // 2, 0, W)\n bby1 = np.clip(cy - cut_h // 2, 0, H)\n bbx2 = np.clip(cx + cut_w // 2, 0, W)\n bby2 = np.clip(cy + cut_h // 2, 0, H)\n\n while True:\n bbxn = np.random.randint(0, W-(bbx2-bbx1))\n bbyn = np.random.randint(0, H-(bby2-bby1))\n\n if bbxn != bbx1 or bbyn != bby1:\n break\n if (bbx2 - bbx1) == (bby2 - bby1):\n k = random.sample([0, 1, 2, 3], 1)[0]\n else:\n k = 0\n data[:, :, bbx1:bbx2, bby1:bby2] = torch.rot90(data[:, :, bbxn:bbxn + (bbx2-bbx1), bbyn:bbyn + (bby2-bby1)], k, [2,3])\n #data[:, :, bbx1:bbx2, bby1:bby2] = data[:, :, bbxn:bbxn + (bbx2-bbx1), bbyn:bbyn + (bby2-bby1)]\n\n return data\n\ndef flip(x, dim):\n indices = [slice(None)] * x.dim()\n indices[dim] = torch.arange(x.size(dim) - 1, -1, -1,\n dtype=torch.long, device=x.device)\n return x[tuple(indices)]\n\ndef build_grid(source_size,target_size):\n k = float(target_size)/float(source_size)\n direct = torch.linspace(-k,k,target_size).unsqueeze(0).repeat(target_size,1).unsqueeze(-1)\n full = torch.cat([direct,direct.transpose(1,0)],dim=2).unsqueeze(0)\n\n return full.cuda()\n\ndef random_crop_grid(x,grid):\n delta = x.size(2)-grid.size(1)\n grid = grid.repeat(x.size(0),1,1,1).cuda()\n #Add random shifts by x\n grid[:,:,:,0] = grid[:,:,:,0]+ torch.FloatTensor(x.size(0)).cuda().random_(0, delta).unsqueeze(-1).unsqueeze(-1).expand(-1, grid.size(1), grid.size(2)) /x.size(2)\n #Add random shifts by y\n grid[:,:,:,1] = grid[:,:,:,1]+ torch.FloatTensor(x.size(0)).cuda().random_(0, delta).unsqueeze(-1).unsqueeze(-1).expand(-1, grid.size(1), grid.size(2)) /x.size(2)\n\n return grid\n\ndef random_cropping(batch, t):\n #Building central crop of t pixel size\n grid_source = build_grid(batch.size(-1),t)\n #Make radom shift for each batch\n grid_shifted = random_crop_grid(batch,grid_source)\n #Sample using grid sample\n sampled_batch = F.grid_sample(batch, grid_shifted, mode='nearest')\n\n return sampled_batch\n\ndef shot_aug(data_support, labels_support, n_support, method, opt):\n size = data_support.shape\n if method == \"fliplr\":\n n_support = opt.s_du * n_support\n data_shot = flip(data_support, -1)\n data_support = torch.cat((data_support, data_shot), dim = 1)\n labels_support = torch.cat((labels_support, labels_support), dim = 1)\n elif method == \"random_crop\":\n n_support = opt.s_du * n_support\n data_shot = F.pad(data_support.view([-1] + list(data_support.shape[-3:])), (4,4,4,4))\n data_shot = random_cropping(data_shot, 32)\n data_support = torch.cat((data_support, data_shot.view([size[0], -1] + list(data_support.shape[-3:]))), dim = 1)\n labels_support = torch.cat((labels_support, labels_support), dim = 1)\n return data_support, labels_support, n_support\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--gpu', default='0')\n parser.add_argument('--load', default='./experiments/exp_1/best_model.pth',\n help='path of the checkpoint file')\n parser.add_argument('--episode', type=int, default=1000,\n help='number of episodes to test')\n parser.add_argument('--way', type=int, default=5,\n help='number of classes in one test episode')\n parser.add_argument('--shot', type=int, default=1,\n help='number of support examples per training class')\n parser.add_argument('--shot_aug', '-shotaug', default=[], nargs='+', type=str,\n help='If use shot level data augmentation.')\n parser.add_argument('--s_du', type=int, default=1,\n help='number of support examples augmented by shot')\n parser.add_argument('--query', type=int, default=15,\n help='number of query examples per training class')\n parser.add_argument('--network', type=str, default='ProtoNet',\n help='choose which embedding network to use. ProtoNet, R2D2, ResNet')\n parser.add_argument('--head', type=str, default='ProtoNet',\n help='choose which embedding network to use. ProtoNet, Ridge, R2D2, SVM')\n parser.add_argument('--dataset', type=str, default='miniImageNet',\n help='choose which classification head to use. miniImageNet, tieredImageNet, CIFAR_FS, FC100')\n\n opt = parser.parse_args()\n (dataset_test, data_loader) = get_dataset(opt)\n\n dloader_test = data_loader(\n dataset=dataset_test,\n nKnovel=opt.way,\n nKbase=0,\n nExemplars=opt.shot, # num training examples per novel category\n nTestNovel=opt.query * opt.way, # num test examples for all the novel categories\n nTestBase=0, # num test examples for all the base categories\n batch_size=1,\n num_workers=1,\n epoch_size=opt.episode, # num of batches per epoch\n )\n\n set_gpu(opt.gpu)\n \n # Define the models\n (embedding_net, cls_head) = get_model(opt)\n \n # Load saved model checkpoints\n saved_models = torch.load(opt.load)\n embedding_net.load_state_dict(saved_models['embedding'])\n embedding_net.eval()\n cls_head.load_state_dict(saved_models['head'])\n cls_head.eval()\n \n # Evaluate on test set\n test_accuracies = []\n for i, batch in enumerate(tqdm(dloader_test()), 1):\n data_support, labels_support, data_query, labels_query, _, _ = [x.cuda() for x in batch]\n n_support = opt.way * opt.shot\n n_query = opt.way * opt.query\n \n for method in opt.shot_aug:\n data_support, labels_support, n_support = shot_aug(data_support, labels_support, n_support, method, opt)\n\n with torch.no_grad():\n emb_support = embedding_net(data_support.reshape([-1] + list(data_support.shape[-3:])))\n emb_support = emb_support.reshape(1, n_support, -1)\n \n emb_query = embedding_net(data_query.reshape([-1] + list(data_query.shape[-3:])))\n emb_query = emb_query.reshape(1, n_query, -1)\n\n if opt.head == 'SVM':\n logits = cls_head(emb_query, emb_support, labels_support, opt.way, opt.shot, maxIter=3)\n else:\n logits = cls_head(emb_query, emb_support, labels_support, opt.way, opt.shot)\n\n acc = count_accuracy(logits.reshape(-1, opt.way), labels_query.reshape(-1))\n test_accuracies.append(acc.item())\n \n avg = np.mean(np.array(test_accuracies))\n std = np.std(np.array(test_accuracies))\n ci = std / np.sqrt(i + 1)\n \n if i % 50 == 0:\n print('Episode [{}/{}]:\\t\\t\\tAccuracy: {:.2f} ± {:.2f} % ({:.2f} %)'\\\n .format(i, opt.episode, avg, ci, acc))\n"
] | [
[
"torch.linspace",
"numpy.sqrt",
"numpy.clip",
"torch.load",
"torch.cat",
"torch.rot90",
"torch.nn.functional.grid_sample",
"torch.no_grad",
"torch.nn.DataParallel",
"numpy.array",
"numpy.random.randint"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mdeegen/pb_bss | [
"e8c380e27d82707e8d2b2d83c5c918d47ea5d89f"
] | [
"tests/test_distribution/test_von_mises_fisher.py"
] | [
"import numpy as np\nfrom numpy.testing import assert_allclose, assert_equal\nimport unittest\nfrom pb_bss.distribution import VonMisesFisher\nfrom pb_bss.distribution import VonMisesFisherTrainer\n\n\nclass TestGaussian(unittest.TestCase):\n def test_shapes(self):\n samples = 10000\n mean = np.ones((3,))\n covariance = np.eye(3)\n x = np.random.multivariate_normal(mean, covariance, size=(samples,))\n model = VonMisesFisherTrainer().fit(x)\n assert_equal(model.mean.shape, mean.shape)\n assert_equal(model.concentration.shape, ())\n\n def test_shapes_independent_dims(self):\n samples = 10000\n mean = np.ones((3,))\n covariance = np.eye(3)\n x = np.random.multivariate_normal(mean, covariance, size=(13, samples,))\n model = VonMisesFisherTrainer().fit(x)\n assert_equal(model.mean.shape, np.tile(mean, (13, 1)).shape)\n assert_equal(model.concentration.shape, (13,))\n\n def test_von_mises_fisher(self):\n samples = 10000\n mean = np.ones((3,))\n mean /= np.linalg.norm(mean, axis=-1)\n concentration = 50\n\n # ToDo: Implement VonMisesFisher(...).sample(...)\n return\n\n x = VonMisesFisher(mean, concentration).sample(size=(samples,))\n model = VonMisesFisherTrainer().fit(x)\n assert_allclose(model.mean, mean, atol=0.1)\n assert_allclose(model.covariance, concentration, atol=0.1)\n"
] | [
[
"numpy.testing.assert_equal",
"numpy.random.multivariate_normal",
"numpy.eye",
"numpy.linalg.norm",
"numpy.tile",
"numpy.ones",
"numpy.testing.assert_allclose"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
vamships/RelationPrediction | [
"45f48e8d09331e7244a7fe8d2d9d0fefa7e1f76b"
] | [
"code/extras/highway_layer.py"
] | [
"import numpy as np\nimport tensorflow as tf\nfrom model import Model\nfrom common.shared_functions import glorot_variance, make_tf_variable, make_tf_bias\n\nclass HighwayLayer(Model):\n vertex_embedding_function = {'train': None, 'test': None}\n\n def __init__(self, shape, next_component=None, next_component_2=None):\n self.next_component = next_component\n self.next_component_2 = next_component_2\n self.shape = shape\n\n def compute_vertex_embeddings(self, mode='train'):\n if self.vertex_embedding_function[mode] is None:\n code_1 = self.next_component.get_all_codes(mode=mode)[0]\n code_2 = self.next_component_2.get_all_codes(mode=mode)[0]\n\n gates = self.get_gates(mode=mode)\n\n self.vertex_embedding_function[mode] = gates * code_1 + (1-gates) * code_2\n\n return self.vertex_embedding_function[mode]\n\n def local_initialize_train(self):\n variance = glorot_variance(self.shape)\n\n self.W = make_tf_variable(0, variance, self.shape)\n self.b = make_tf_bias(self.shape[1], init=1)\n\n def local_get_weights(self):\n return [self.W, self.b]\n\n def get_gates(self, mode='train'):\n code = self.next_component_2.get_all_codes(mode=mode)[0]\n hidden = tf.matmul(code, self.W) + self.b\n\n return tf.nn.sigmoid(hidden)\n\n def get_all_codes(self, mode='train'):\n collected_messages = self.compute_vertex_embeddings(mode=mode)\n\n return collected_messages, None, collected_messages\n\n def get_all_subject_codes(self, mode='train'):\n return self.compute_vertex_embeddings(mode=mode)\n\n def get_all_object_codes(self, mode='train'):\n return self.compute_vertex_embeddings(mode=mode)\n"
] | [
[
"tensorflow.matmul",
"tensorflow.nn.sigmoid"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
seba-1511/gsoc15-demo | [
"c57d5cce7903511edd4048f8bfed2ad0dc6f6b6b"
] | [
"keras/preprocessing/sequence.py"
] | [
"import numpy as np\n\ndef pad_sequences(seqs, maxlen=None, dtype='int32'):\n \"\"\"\n Pad each sequence to the same lenght: \n the lenght of the longuest sequence.\n\n If maxlen is provided, any sequence longer\n than maxlen is truncated to maxlen.\n \"\"\"\n lengths = [len(s) for s in seqs]\n\n nb_samples = len(seqs)\n if maxlen is None:\n maxlen = np.max(lengths)\n\n x = np.zeros((nb_samples, maxlen)).astype(dtype)\n for idx, s in enumerate(seqs):\n x[idx, :lengths[idx]] = s[:maxlen]\n\n return x"
] | [
[
"numpy.max",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
wxy1988/ASR | [
"8ef3ef347523044c89c46c263ecc7b8e9b2c06d1",
"8ef3ef347523044c89c46c263ecc7b8e9b2c06d1"
] | [
"transformer/third_party/feat_convert/kaldi_io/batchmk.py",
"transformer/model_pretrain.py"
] | [
"#!/usr/bin/python\r\n# coding=utf-8\r\n\r\n\"\"\"\r\n@version:\r\n@author: Dong Linhao\r\n@license: Apache Licence\r\n@contact: [email protected]\r\n@site:\r\n@software: PyCharm Community Edition\r\n@file: batchmk.py\r\n@time: 09/04/17 21:10\r\n\"\"\"\r\n\r\nimport src.io.fea as fea\r\nimport tensorflow as tf\r\nimport numpy as np\r\nimport time\r\n\r\nLONGEST_FRMS = 2000\r\n\r\nclass lstm_batch(object):\r\n def __init__(self, num_streams, num_steps, input_dim):\r\n self.sample_feat_list = [np.zeros([LONGEST_FRMS, input_dim]) for _ in range(num_streams)]\r\n self.sample_label_list = [np.zeros([LONGEST_FRMS]) for _ in range(num_streams)]\r\n self.sample_mask_list = [np.zeros([LONGEST_FRMS]) for _ in range(num_streams)]\r\n\r\n self.curt = np.zeros(num_streams, dtype=int)\r\n self.lent = np.zeros(num_streams, dtype=int)\r\n self.reset_flag = np.zeros(num_streams, dtype=bool)\r\n\r\n self.num_streams = num_streams\r\n self.num_steps = num_steps\r\n self.input_dim = input_dim\r\n self.handled_utt_num = 0\r\n self.handled_frm_num = 0\r\n self.cur_epoch_finish = False\r\n\r\n def set_stream_num(self, num_streams):\r\n self.num_streams = num_streams\r\n\r\n self.sample_feat_list = [np.zeros([LONGEST_FRMS, self.input_dim]) for _ in range(num_streams)]\r\n self.sample_label_list = [np.zeros([LONGEST_FRMS]) for _ in range(num_streams)]\r\n self.sample_mask_list = [np.zeros([LONGEST_FRMS]) for _ in range(num_streams)]\r\n\r\n self.curt = np.zeros(num_streams, dtype=int)\r\n self.lent = np.zeros(num_streams, dtype=int)\r\n self.reset_flag = np.zeros(num_streams, dtype=bool)\r\n\r\n def reset(self):\r\n self.sample_feat_list = [np.zeros([LONGEST_FRMS, self.input_dim]) for _ in range(self.num_streams)]\r\n self.sample_label_list = [np.zeros([LONGEST_FRMS]) for _ in range(self.num_streams)]\r\n self.sample_mask_list = [np.zeros([LONGEST_FRMS]) for _ in range(self.num_streams)]\r\n\r\n self.curt = np.zeros(self.num_streams, dtype=int)\r\n self.lent = np.zeros(self.num_streams, dtype=int)\r\n self.reset_flag = np.zeros(self.num_streams, dtype=bool)\r\n\r\n self.handled_utt_num = 0\r\n self.handled_frm_num = 0\r\n self.cur_epoch_finish = False\r\n\r\n def make_batch(self, sess, sample, run_device, total_utt_num):\r\n with tf.device(run_device):\r\n multistream_feat_batch = [np.zeros([self.num_steps, self.input_dim]) for _ in range(self.num_streams)]\r\n multistream_label_batch = [np.zeros([self.num_steps]) for _ in range(self.num_streams)]\r\n multistream_mask_batch = [np.zeros([self.num_steps]) for _ in range(self.num_streams)]\r\n reset_flag = np.zeros(self.num_streams, dtype=bool)\r\n\r\n for s in range(self.num_streams):\r\n if self.curt[s] < self.lent[s]:\r\n reset_flag[s] = False\r\n continue\r\n\r\n if self.handled_utt_num < total_utt_num:\r\n sample_feats, sample_labels, sample_masks = sess.run(sample)\r\n self.handled_utt_num += 1\r\n self.sample_feat_list[s] = sample_feats\r\n self.sample_label_list[s] = sample_labels\r\n self.sample_mask_list[s] = sample_masks\r\n self.lent[s] = np.shape(sample_feats)[0]\r\n self.curt[s] = 0\r\n reset_flag[s] = True\r\n\r\n for s in range(self.num_streams):\r\n if self.curt[s] < self.lent[s]:\r\n multistream_feat_batch[s] = self.sample_feat_list[s][self.curt[s]:self.curt[s]+self.num_steps, :]\r\n multistream_label_batch[s] = self.sample_label_list[s][self.curt[s]:self.curt[s]+self.num_steps]\r\n multistream_mask_batch[s] = self.sample_mask_list[s][self.curt[s]:self.curt[s]+self.num_steps]\r\n\r\n self.curt[s] += self.num_steps\r\n self.handled_frm_num += np.sum(multistream_mask_batch[s])\r\n else:\r\n multistream_mask_batch[s] = np.zeros([self.num_steps])\r\n\r\n final_feat_batch = np.stack(multistream_feat_batch, axis=1)\r\n final_label_batch = np.stack(multistream_label_batch, axis=1)\r\n final_mask_batch = np.stack(multistream_mask_batch, axis=1)\r\n\r\n done = True\r\n for s in range(self.num_streams):\r\n if self.curt[s] < self.lent[s]:\r\n done = False\r\n if done:\r\n self.cur_epoch_finish = True\r\n\r\n return final_feat_batch, final_label_batch, final_mask_batch, reset_flag\r\n\r\n\r\ndef getfilelst(scp_file_path):\r\n # get tf list\r\n tf_list = []\r\n with open(scp_file_path) as list_file:\r\n for line in list_file:\r\n tf_list.append(line.strip())\r\n return tf_list\r\n\r\n\r\ndef process_my_feature(feature, label, flags):\r\n # Add delta\r\n if flags.add_delta:\r\n feature = fea.tf_fea_add_delt(feature)\r\n # CMVN\r\n feature = fea.tf_fea_cmvn_global(feature, flags.feat_mean, flags.feat_var)\r\n # Splice\r\n feature = fea.tf_fea_splice(feature, flags.l_splice, flags.r_splice)\r\n feature = tf.reshape(feature, [-1, flags.input_dim])\r\n\r\n return feature[:], label[:]\r\n\r\n\r\ndef read_my_file_format(filename_queue, org_feat_dim):\r\n # build reader\r\n reader = tf.TFRecordReader()\r\n _, serialized_example = reader.read(filename_queue)\r\n raw_example = tf.parse_single_example(\r\n serialized_example,\r\n # Defaults are not specified since both keys are required.\r\n features={\r\n 'feat': tf.FixedLenFeature([], tf.string),\r\n 'label': tf.FixedLenFeature([], tf.string),\r\n })\r\n example = tf.decode_raw(raw_example['feat'], tf.float32)\r\n example = tf.reshape(example, [-1, org_feat_dim])\r\n label = tf.decode_raw(raw_example['label'], tf.int32)\r\n\r\n return example, label\r\n\r\n\r\ndef lstm_input_pipeline(flags, is_training, num_epochs=None, shuffle_state = True):\r\n with tf.device(flags.default_device):\r\n if is_training:\r\n filenames = getfilelst(flags.trn_data_dir + '/tf.lst')\r\n else:\r\n filenames = getfilelst(flags.cv_data_dir + '/tf.lst')\r\n\r\n # generate file queue\r\n filename_queue = tf.train.string_input_producer(\r\n filenames, num_epochs = num_epochs, shuffle = shuffle_state)\r\n\r\n # read from file queue\r\n sample = read_my_file_format(filename_queue, flags.org_feat_dim)\r\n\r\n # handle sample\r\n sample_feats, sample_labels = process_my_feature(sample[0], sample[1], flags)\r\n sample_length = tf.shape(sample_feats)[0]\r\n sample_masks = tf.ones([sample_length], dtype=tf.float32)\r\n\r\n # add target delay\r\n if flags.target_delay > 0:\r\n feats_part1 = tf.slice(sample_feats, [flags.target_delay, 0], [sample_length-flags.target_delay, -1])\r\n last_frm_feats = tf.slice(sample_feats, [sample_length-1, 0], [1, -1])\r\n feats_part2 = tf.concat([last_frm_feats for _ in range(flags.target_delay)], axis=0)\r\n sample_feats = tf.concat([feats_part1, feats_part2], axis=0)\r\n\r\n padding_length = flags.num_steps - sample_length % flags.num_steps\r\n padding_feats = tf.zeros([padding_length, flags.input_dim], dtype=tf.float32)\r\n feats = tf.concat(axis=0, values=[sample_feats, padding_feats])\r\n padding_labels = tf.zeros([padding_length], dtype=tf.int32)\r\n labels = tf.concat(axis=0, values=[sample_labels, padding_labels])\r\n padding_masks = tf.zeros([padding_length], dtype=tf.float32)\r\n frame_masks = tf.concat(axis=0, values=[sample_masks, padding_masks])\r\n\r\n return feats, labels, frame_masks\r\n\r\n",
"# coding=utf-8\nimport logging\nimport random\nimport re\n\nimport tensorflow as tf\nfrom tensorflow.python.ops import init_ops\n\nimport third_party.tensor2tensor.common_attention as common_attention\nimport third_party.tensor2tensor.common_layers as common_layers\nfrom utils import average_gradients, shift_right, embedding, residual, dense, ff_hidden\nfrom utils import learning_rate_decay, multihead_attention\n\n\nclass Model(object):\n def __init__(self, config, num_gpus):\n self.graph = tf.Graph()\n self._config = config\n\n self._devices = ['/gpu:%d' % i for i in range(num_gpus)] if num_gpus > 0 else ['/cpu:0']\n\n # Placeholders and saver.\n with self.graph.as_default():\n src_pls = []\n dst_pls = []\n for i, device in enumerate(self._devices):\n with tf.device(device):\n pls_batch_x = tf.placeholder(dtype=tf.float32, shape=[None, None, self._config.train.input_dim],\n name='src_pl_{}'.format(i)) # [batch, feat, feat_dim]\n pls_batch_y = tf.placeholder(dtype=tf.int32, shape=[None, None],\n name='dst_pl_{}'.format(i)) # [batch, len]\n src_pls.append(pls_batch_x)\n dst_pls.append(pls_batch_y)\n self.src_pls = tuple(src_pls)\n self.dst_pls = tuple(dst_pls)\n\n self.encoder_scope = 'encoder'\n self.decoder_scope = 'decoder'\n\n def prepare_training(self):\n with self.graph.as_default():\n # Optimizer\n self.global_step = tf.get_variable(name='global_step', dtype=tf.int64, shape=[],\n trainable=False, initializer=tf.zeros_initializer)\n\n self.learning_rate = tf.convert_to_tensor(self._config.train.learning_rate, dtype=tf.float32)\n if self._config.train.optimizer == 'adam':\n self._optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate)\n elif self._config.train.optimizer == 'adam_decay':\n self.learning_rate *= learning_rate_decay(self._config, self.global_step)\n self._optimizer = tf.train.AdamOptimizer(\n learning_rate=self.learning_rate, beta1=0.9, beta2=0.98, epsilon=1e-9)\n elif self._config.train.optimizer == 'sgd':\n self._optimizer = tf.train.GradientDescentOptimizer(learning_rate=self.learning_rate)\n elif self._config.train.optimizer == 'mom':\n self._optimizer = tf.train.MomentumOptimizer(self.learning_rate, momentum=0.9)\n\n # Uniform scaling initializer.\n self._initializer = init_ops.variance_scaling_initializer(scale=1.0, mode='fan_avg', distribution='uniform')\n\n def build_train_model(self, test=True, reuse=None):\n \"\"\"Build model for training. \"\"\"\n logging.info('Build train model.')\n self.prepare_training()\n\n with self.graph.as_default():\n acc_list, loss_list, gv_list = [], [], []\n cache = {}\n load = dict([(d, 0) for d in self._devices])\n for i, (X, Y, device) in enumerate(zip(self.src_pls, self.dst_pls, self._devices)):\n\n def daisy_chain_getter(getter, name, *args, **kwargs):\n \"\"\"Get a variable and cache in a daisy chain.\"\"\"\n device_var_key = (device, name)\n if device_var_key in cache:\n # if we have the variable on the correct device, return it.\n return cache[device_var_key]\n if name in cache:\n # if we have it on a different device, copy it from the last device\n v = tf.identity(cache[name])\n else:\n var = getter(name, *args, **kwargs)\n v = tf.identity(var._ref()) # pylint: disable=protected-access\n # update the cache\n cache[name] = v\n cache[device_var_key] = v\n return v\n\n def balanced_device_setter(op):\n \"\"\"Balance variables to all devices.\"\"\"\n if op.type in {'Variable', 'VariableV2', 'VarHandleOp'}:\n # return self._sync_device\n min_load = min(load.values())\n min_load_devices = [d for d in load if load[d] == min_load]\n chosen_device = random.choice(min_load_devices)\n load[chosen_device] += op.outputs[0].get_shape().num_elements()\n return chosen_device\n return device\n\n def identity_device_setter(op):\n return device\n\n device_setter = balanced_device_setter\n\n with tf.variable_scope(tf.get_variable_scope(),\n initializer=self._initializer,\n custom_getter=daisy_chain_getter,\n reuse=reuse):\n with tf.device(device_setter):\n logging.info('Build model on %s.' % device)\n encoder_output = self.encoder(X, is_training=True, reuse=i > 0 or None,\n encoder_scope=self.encoder_scope)\n decoder_output = self.decoder(shift_right(Y), encoder_output, is_training=True,\n reuse=i > 0 or None, decoder_scope=self.decoder_scope)\n acc, loss = self.train_output(decoder_output, Y, reuse=i > 0 or None,\n decoder_scope=self.decoder_scope)\n acc_list.append(acc)\n loss_list.append(loss)\n\n var_list = tf.trainable_variables()\n if self._config.train.var_filter:\n var_list = [v for v in var_list if re.match(self._config.train.var_filter, v.name)]\n gv_list.append(self._optimizer.compute_gradients(loss, var_list=var_list))\n\n self.accuracy = tf.reduce_mean(acc_list)\n self.loss = tf.reduce_mean(loss_list)\n\n # Clip gradients and then apply.\n grads_and_vars = average_gradients(gv_list)\n avg_abs_grads = tf.reduce_mean(tf.abs(grads_and_vars[0]))\n\n if self._config.train.grads_clip > 0:\n grads, self.grads_norm = tf.clip_by_global_norm([gv[0] for gv in grads_and_vars],\n clip_norm=self._config.train.grads_clip)\n grads_and_vars = zip(grads, [gv[1] for gv in grads_and_vars])\n else:\n self.grads_norm = tf.global_norm([gv[0] for gv in grads_and_vars])\n\n self.train_op = self._optimizer.apply_gradients(grads_and_vars, global_step=self.global_step)\n\n # Summaries\n tf.summary.scalar('acc', self.accuracy)\n tf.summary.scalar('loss', self.loss)\n tf.summary.scalar('learning_rate', self.learning_rate)\n tf.summary.scalar('grads_norm', self.grads_norm)\n tf.summary.scalar('avg_abs_grads', avg_abs_grads)\n self.summary_op = tf.summary.merge_all()\n\n self.saver = tf.train.Saver(var_list=tf.global_variables(), max_to_keep=20)\n\n # We may want to test the model during training.\n if test:\n self.build_test_model(reuse=True)\n\n def build_test_model(self, reuse=None):\n \"\"\"Build model for inference.\"\"\"\n logging.info('Build test model.')\n with self.graph.as_default(), tf.variable_scope(tf.get_variable_scope(), reuse=reuse):\n prediction_list = []\n loss_sum = 0\n for i, (X, Y, device) in enumerate(zip(self.src_pls, self.dst_pls, self._devices)):\n with tf.device(device):\n logging.info('Build model on %s.' % device)\n dec_input = shift_right(Y)\n\n # Avoid errors caused by empty input by a condition phrase.\n\n def true_fn():\n enc_output = self.encoder(X, is_training=False, reuse=i > 0 or None,\n encoder_scope=self.encoder_scope)\n prediction = self.beam_search(enc_output, reuse=i > 0 or None)\n dec_output = self.decoder(dec_input, enc_output, is_training=False, reuse=True,\n decoder_scope=self.decoder_scope)\n loss = self.test_loss(dec_output, Y, reuse=True, decoder_scope=self.decoder_scope)\n return prediction, loss\n\n def false_fn():\n return tf.zeros([0, 0], dtype=tf.int32), 0.0\n\n prediction, loss = tf.cond(tf.greater(tf.shape(X)[0], 0), true_fn, false_fn)\n\n loss_sum += loss\n prediction_list.append(prediction)\n\n max_length = tf.reduce_max([tf.shape(pred)[1] for pred in prediction_list])\n\n def pad_to_max_length(input, length):\n \"\"\"Pad the input (with rank 2) with 3(</S>) to the given length in the second axis.\"\"\"\n shape = tf.shape(input)\n padding = tf.ones([shape[0], length - shape[1]], dtype=tf.int32) * 3\n return tf.concat([input, padding], axis=1)\n\n prediction_list = [pad_to_max_length(pred, max_length) for pred in prediction_list]\n self.prediction = tf.concat(prediction_list, axis=0)\n self.loss_sum = loss_sum\n\n self.saver = tf.train.Saver(var_list=tf.global_variables())\n\n def encoder(self, encoder_input, is_training, reuse, encoder_scope):\n \"\"\"Encoder.\"\"\"\n with tf.variable_scope(encoder_scope, reuse=reuse):\n return self.encoder_impl(encoder_input, is_training)\n\n def decoder(self, decoder_input, encoder_output, is_training, reuse, decoder_scope):\n \"\"\"Decoder\"\"\"\n with tf.variable_scope(decoder_scope, reuse=reuse):\n return self.decoder_impl(decoder_input, encoder_output, is_training)\n\n def decoder_with_caching(self, decoder_input, decoder_cache, encoder_output, is_training, reuse, decoder_scope):\n \"\"\"Incremental Decoder\"\"\"\n with tf.variable_scope(decoder_scope, reuse=reuse):\n return self.decoder_with_caching_impl(decoder_input, decoder_cache, encoder_output, is_training)\n\n def beam_search(self, encoder_output, reuse):\n \"\"\"Beam search in graph.\"\"\"\n beam_size, batch_size = self._config.test.beam_size, tf.shape(encoder_output)[0]\n inf = 1e10\n\n def get_bias_scores(scores, bias):\n \"\"\"\n If a sequence is finished, we only allow one alive branch. This function aims to give one branch a zero score\n and the rest -inf score.\n Args:\n scores: A real value array with shape [batch_size * beam_size, beam_size].\n bias: A bool array with shape [batch_size * beam_size].\n\n Returns:\n A real value array with shape [batch_size * beam_size, beam_size].\n \"\"\"\n bias = tf.to_float(bias)\n b = tf.constant([0.0] + [-inf] * (beam_size - 1))\n b = tf.tile(b[None, :], multiples=[batch_size * beam_size, 1])\n return scores * (1 - bias[:, None]) + b * bias[:, None]\n\n def get_bias_preds(preds, bias):\n \"\"\"\n If a sequence is finished, all of its branch should be </S> (3).\n Args:\n preds: A int array with shape [batch_size * beam_size, beam_size].\n bias: A bool array with shape [batch_size * beam_size].\n\n Returns:\n A int array with shape [batch_size * beam_size].\n \"\"\"\n bias = tf.to_int32(bias)\n return preds * (1 - bias[:, None]) + bias[:, None] * 3\n\n # Prepare beam search inputs.\n # [batch_size, 1, *, hidden_units]\n encoder_output = encoder_output[:, None, :, :]\n # [batch_size, beam_size, feat_len, hidden_units]\n encoder_output = tf.tile(encoder_output, multiples=[1, beam_size, 1, 1])\n # [batch_size * beam_size, feat_len, hidden_units]\n encoder_output = tf.reshape(encoder_output, [batch_size * beam_size, -1, encoder_output.get_shape()[-1].value])\n # [[<S>, <S>, ..., <S>]], shape: [batch_size * beam_size, 1]\n preds = tf.ones([batch_size * beam_size, 1], dtype=tf.int32) * 2\n scores = tf.constant([0.0] + [-inf] * (beam_size - 1), dtype=tf.float32) # [beam_size]\n scores = tf.tile(scores, multiples=[batch_size]) # [batch_size * beam_size]\n bias = tf.zeros_like(scores, dtype=tf.bool) # 是否结束的标识位\n # 缓存的历史结果,[batch_size * beam_size, 0, num_blocks , hidden_units ]\n cache = tf.zeros([batch_size * beam_size, 0, self._config.num_blocks, self._config.hidden_units])\n\n def step(i, bias, preds, scores, cache):\n # Where are we.\n i += 1\n\n # Call decoder and get predictions.\n decoder_output, cache = self.decoder_with_caching(preds, cache, encoder_output, is_training=False,\n reuse=reuse, decoder_scope='decoder')\n last_preds, last_k_preds, last_k_scores = self.test_output(decoder_output, reuse=reuse, decoder_scope='decoder')\n\n last_k_preds = get_bias_preds(last_k_preds, bias)\n last_k_scores = get_bias_scores(last_k_scores, bias)\n\n # Update scores.\n scores = scores[:, None] + last_k_scores # [batch_size * beam_size, beam_size]\n scores = tf.reshape(scores, shape=[batch_size, beam_size ** 2]) # [batch_size, beam_size * beam_size]\n\n # Pruning.\n scores, k_indices = tf.nn.top_k(scores, k=beam_size)\n scores = tf.reshape(scores, shape=[-1]) # [batch_size * beam_size]\n base_indices = tf.reshape(tf.tile(tf.range(batch_size)[:, None], multiples=[1, beam_size]), shape=[-1])\n base_indices *= beam_size ** 2\n k_indices = base_indices + tf.reshape(k_indices, shape=[-1]) # [batch_size * beam_size]\n\n # Update predictions.\n last_k_preds = tf.gather(tf.reshape(last_k_preds, shape=[-1]), indices=k_indices)\n preds = tf.gather(preds, indices=k_indices / beam_size)\n cache = tf.gather(cache, indices=k_indices / beam_size)\n preds = tf.concat((preds, last_k_preds[:, None]), axis=1) # [batch_size * beam_size, i]\n\n # Whether sequences finished.\n bias = tf.equal(preds[:, -1], 3) # </S>?\n\n return i, bias, preds, scores, cache\n\n def not_finished(i, bias, preds, scores, cache):\n return tf.logical_and(\n tf.reduce_any(tf.logical_not(bias)),\n tf.less_equal(\n i,\n tf.reduce_min([tf.shape(encoder_output)[1] + 50, self._config.test.max_target_length])\n )\n )\n\n i, bias, preds, scores, cache = tf.while_loop(cond=not_finished,\n body=step,\n loop_vars=[0, bias, preds, scores, cache],\n shape_invariants=[\n tf.TensorShape([]),\n tf.TensorShape([None]),\n tf.TensorShape([None, None]),\n tf.TensorShape([None]),\n tf.TensorShape([None, None, None, None])],\n back_prop=False)\n\n scores = tf.reshape(scores, shape=[batch_size, beam_size])\n preds = tf.reshape(preds, shape=[batch_size, beam_size, -1]) # [batch_size, beam_size, max_length]\n lengths = tf.reduce_sum(tf.to_float(tf.not_equal(preds, 3)), axis=-1) # [batch_size, beam_size]\n lp = tf.pow((5 + lengths) / (5 + 1), self._config.test.lp_alpha) # Length penalty\n scores /= lp # following GNMT\n max_indices = tf.to_int32(tf.argmax(scores, axis=-1)) # [batch_size]\n max_indices += tf.range(batch_size) * beam_size\n preds = tf.reshape(preds, shape=[batch_size * beam_size, -1])\n\n final_preds = tf.gather(preds, indices=max_indices)\n final_preds = final_preds[:, 1:] # remove <S> flag\n return final_preds\n\n def test_output(self, decoder_output, reuse, decoder_scope):\n \"\"\"During test, we only need the last prediction at each time.\"\"\"\n with tf.variable_scope(decoder_scope, reuse=reuse):\n last_logits = dense(decoder_output[:, -1], self._config.dst_vocab_size, use_bias=False,\n name=\"dst_embedding\" if self._config.tie_embedding_and_softmax else \"softmax\",\n reuse=True if self._config.tie_embedding_and_softmax else None)\n last_preds = tf.to_int32(tf.argmax(last_logits, axis=-1))\n z = tf.nn.log_softmax(last_logits)\n last_k_scores, last_k_preds = tf.nn.top_k(z, k=self._config.test.beam_size, sorted=False)\n last_k_preds = tf.to_int32(last_k_preds)\n return last_preds, last_k_preds, last_k_scores\n\n def test_loss(self, decoder_output, Y, reuse, decoder_scope):\n \"\"\"This function help users to compute PPL during test.\"\"\"\n with tf.variable_scope(decoder_scope, reuse=reuse):\n logits = dense(decoder_output, self._config.dst_vocab_size, use_bias=False,\n name=\"dst_embedding\" if self._config.tie_embedding_and_softmax else \"softmax\",\n reuse=True if self._config.tie_embedding_and_softmax else None)\n mask = tf.to_float(tf.not_equal(Y, 0))\n labels = tf.one_hot(Y, depth=self._config.dst_vocab_size)\n loss = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=labels)\n loss_sum = tf.reduce_sum(loss * mask)\n return loss_sum\n\n def train_output(self, decoder_output, Y, reuse, decoder_scope):\n \"\"\"Calculate loss and accuracy.\"\"\"\n with tf.variable_scope(decoder_scope, reuse=reuse):\n logits = dense(decoder_output, self._config.dst_vocab_size, use_bias=False,\n name=\"dst_embedding\" if self._config.tie_embedding_and_softmax else \"softmax\",\n reuse=True if self._config.tie_embedding_and_softmax else None)\n preds = tf.to_int32(tf.argmax(logits, axis=-1))\n mask = tf.to_float(tf.not_equal(Y, 0))\n acc = tf.reduce_sum(tf.to_float(tf.equal(preds, Y)) * mask) / tf.reduce_sum(mask)\n\n # Smoothed loss\n loss = common_layers.smoothing_cross_entropy(logits=logits, labels=Y,\n vocab_size=self._config.dst_vocab_size,\n confidence=1 - self._config.train.label_smoothing)\n mean_loss = tf.reduce_sum(loss * mask) / (tf.reduce_sum(mask))\n\n return acc, mean_loss\n\n def encoder_impl(self, encoder_input, is_training):\n \"\"\"\n This is an interface leave to be implemented by sub classes.\n Args:\n encoder_input: A tensor with shape [batch_size, src_length]\n\n Returns: A Tensor with shape [batch_size, src_length, num_hidden]\n\n \"\"\"\n raise NotImplementedError()\n\n def decoder_impl(self, decoder_input, encoder_output, is_training):\n \"\"\"\n This is an interface leave to be implemented by sub classes.\n Args:\n decoder_input: A Tensor with shape [batch_size, dst_length]\n encoder_output: A Tensor with shape [batch_size, src_length, num_hidden]\n\n Returns: A Tensor with shape [batch_size, dst_length, num_hidden]\n\n \"\"\"\n raise NotImplementedError()\n\n def decoder_with_caching_impl(self, decoder_input, decoder_cache, encoder_output, is_training):\n \"\"\"\n This is an interface leave to be implemented by sub classes.\n Args:\n decoder_input: A Tensor with shape [batch_size, dst_length]\n decoder_cache: A Tensor with shape [batch_size, *, *, num_hidden]\n encoder_output: A Tensor with shape [batch_size, src_length, num_hidden]\n\n Returns: A Tensor with shape [batch_size, dst_length, num_hidden]\n\n \"\"\"\n raise NotImplementedError()\n\n\nclass Transformer(Model):\n def __init__(self, *args, **kargs):\n super(Transformer, self).__init__(*args, **kargs)\n activations = {\"relu\": tf.nn.relu,\n \"sigmoid\": tf.sigmoid,\n \"tanh\": tf.tanh,\n \"swish\": lambda x: x * tf.sigmoid(x),\n \"glu\": lambda x, y: x * tf.sigmoid(y)}\n self._ff_activation = activations[self._config.ff_activation]\n\n def encoder_impl(self, encoder_input, is_training):\n\n attention_dropout_rate = self._config.attention_dropout_rate if is_training else 0.0\n residual_dropout_rate = self._config.residual_dropout_rate if is_training else 0.0\n\n # Mask\n encoder_padding = tf.equal(tf.reduce_sum(tf.abs(encoder_input), axis=-1), 0.0)\n encoder_output = dense(encoder_input, self._config.hidden_units, activation=tf.identity,\n use_bias=True, name=\"src_change\")\n encoder_output = tf.contrib.layers.layer_norm(encoder_output, center=True, scale=True, trainable=True)\n\n # Add positional signal\n encoder_output = common_attention.add_timing_signal_1d(encoder_output)\n # Dropout\n encoder_output = tf.layers.dropout(encoder_output,\n rate=residual_dropout_rate,\n training=is_training)\n\n # Blocks\n for i in range(self._config.num_blocks_enc):\n with tf.variable_scope(\"block_{}\".format(i)):\n # Multihead Attention\n encoder_output = residual(encoder_output,\n multihead_attention(\n query_antecedent=encoder_output,\n memory_antecedent=None,\n bias=common_attention.attention_bias_ignore_padding(encoder_padding),\n total_key_depth=self._config.hidden_units,\n total_value_depth=self._config.hidden_units,\n output_depth=self._config.hidden_units,\n num_heads=self._config.num_heads,\n dropout_rate=attention_dropout_rate,\n name='encoder_self_attention',\n summaries=True),\n dropout_rate=residual_dropout_rate)\n\n # Feed Forward\n encoder_output = residual(encoder_output,\n ff_hidden(\n inputs=encoder_output,\n hidden_size=4 * self._config.hidden_units,\n output_size=self._config.hidden_units,\n activation=self._ff_activation),\n dropout_rate=residual_dropout_rate)\n # Mask padding part to zeros.\n encoder_output *= tf.expand_dims(1.0 - tf.to_float(encoder_padding), axis=-1)\n return encoder_output\n\n def decoder_impl(self, decoder_input, encoder_output, is_training):\n # decoder_input: [batch_size, step]\n # encoder_output: [batch_size, time_step, hidden_units]\n attention_dropout_rate = self._config.attention_dropout_rate if is_training else 0.0\n residual_dropout_rate = self._config.residual_dropout_rate if is_training else 0.0\n\n encoder_padding = tf.equal(tf.reduce_sum(tf.abs(encoder_output), axis=-1), 0.0)\n encoder_attention_bias = common_attention.attention_bias_ignore_padding(encoder_padding)\n\n decoder_output = embedding(decoder_input,\n vocab_size=self._config.dst_vocab_size,\n dense_size=self._config.hidden_units,\n multiplier=self._config.hidden_units ** 0.5 if self._config.scale_embedding else 1.0,\n name=\"dst_embedding\")\n # Positional Encoding\n decoder_output += common_attention.add_timing_signal_1d(decoder_output)\n # Dropout\n decoder_output = tf.layers.dropout(decoder_output,\n rate=residual_dropout_rate,\n training=is_training)\n # Bias for preventing peeping later information\n self_attention_bias = common_attention.attention_bias_lower_triangle(tf.shape(decoder_input)[1])\n\n # Blocks\n for i in range(self._config.num_blocks_dec):\n with tf.variable_scope(\"block_{}\".format(i)):\n # Multihead Attention (self-attention)\n decoder_output = residual(decoder_output,\n multihead_attention(\n query_antecedent=decoder_output,\n memory_antecedent=None,\n bias=self_attention_bias,\n total_key_depth=self._config.hidden_units,\n total_value_depth=self._config.hidden_units,\n num_heads=self._config.num_heads,\n dropout_rate=attention_dropout_rate,\n output_depth=self._config.hidden_units,\n name=\"decoder_self_attention\",\n summaries=True),\n dropout_rate=residual_dropout_rate)\n\n # Multihead Attention (vanilla attention)\n decoder_output = residual(decoder_output,\n multihead_attention(\n query_antecedent=decoder_output,\n memory_antecedent=encoder_output,\n bias=encoder_attention_bias,\n total_key_depth=self._config.hidden_units,\n total_value_depth=self._config.hidden_units,\n output_depth=self._config.hidden_units,\n num_heads=self._config.num_heads,\n dropout_rate=attention_dropout_rate,\n name=\"decoder_vanilla_attention\",\n summaries=True),\n dropout_rate=residual_dropout_rate)\n\n # Feed Forward\n decoder_output = residual(decoder_output,\n ff_hidden(\n decoder_output,\n hidden_size=4 * self._config.hidden_units,\n output_size=self._config.hidden_units,\n activation=self._ff_activation),\n dropout_rate=residual_dropout_rate)\n return decoder_output\n\n def decoder_with_caching_impl(self, decoder_input, decoder_cache, encoder_output, is_training):\n # decoder_input: [batch_size * beam_size, step], 该step逐步增加,即1,2,3,..\n # decoder_cache: [batch_size * beam_size, 0, num_blocks , hidden_units ]\n # encoder_output: [batch_size * beam_size, time_step, hidden_units]\n attention_dropout_rate = self._config.attention_dropout_rate if is_training else 0.0\n residual_dropout_rate = self._config.residual_dropout_rate if is_training else 0.0\n\n encoder_padding = tf.equal(tf.reduce_sum(tf.abs(encoder_output), axis=-1), 0.0)\n encoder_attention_bias = common_attention.attention_bias_ignore_padding(encoder_padding)\n\n decoder_output = embedding(decoder_input,\n vocab_size=self._config.dst_vocab_size,\n dense_size=self._config.hidden_units,\n multiplier=self._config.hidden_units ** 0.5 if self._config.scale_embedding else 1.0,\n name=\"dst_embedding\")\n # Positional Encoding\n decoder_output += common_attention.add_timing_signal_1d(decoder_output)\n # Dropout\n decoder_output = tf.layers.dropout(decoder_output,\n rate=residual_dropout_rate,\n training=is_training)\n\n new_cache = []\n\n # Blocks\n for i in range(self._config.num_blocks):\n with tf.variable_scope(\"block_{}\".format(i)):\n # Multihead Attention (self-attention)\n decoder_output = residual(decoder_output[:, -1:, :],\n multihead_attention(\n query_antecedent=decoder_output,\n memory_antecedent=None,\n bias=None,\n total_key_depth=self._config.hidden_units,\n total_value_depth=self._config.hidden_units,\n num_heads=self._config.num_heads,\n dropout_rate=attention_dropout_rate,\n reserve_last=True,\n output_depth=self._config.hidden_units,\n name=\"decoder_self_attention\",\n summaries=True),\n dropout_rate=residual_dropout_rate)\n\n # Multihead Attention (vanilla attention)\n decoder_output = residual(decoder_output,\n multihead_attention(\n query_antecedent=decoder_output,\n memory_antecedent=encoder_output,\n bias=encoder_attention_bias,\n total_key_depth=self._config.hidden_units,\n total_value_depth=self._config.hidden_units,\n output_depth=self._config.hidden_units,\n num_heads=self._config.num_heads,\n dropout_rate=attention_dropout_rate,\n reserve_last=True,\n name=\"decoder_vanilla_attention\",\n summaries=True),\n dropout_rate=residual_dropout_rate)\n\n # Feed Forward\n decoder_output = residual(decoder_output,\n ff_hidden(\n decoder_output,\n hidden_size=4 * self._config.hidden_units,\n output_size=self._config.hidden_units,\n activation=self._ff_activation),\n dropout_rate=residual_dropout_rate)\n\n decoder_output = tf.concat([decoder_cache[:, :, i, :], decoder_output], axis=1)\n new_cache.append(decoder_output[:, :, None, :])\n\n new_cache = tf.concat(new_cache, axis=2) # [batch_size, n_step, num_blocks, num_hidden]\n\n return decoder_output, new_cache\n"
] | [
[
"tensorflow.device",
"tensorflow.concat",
"tensorflow.FixedLenFeature",
"tensorflow.zeros",
"tensorflow.shape",
"tensorflow.slice",
"tensorflow.decode_raw",
"tensorflow.reshape",
"tensorflow.ones",
"numpy.stack",
"tensorflow.train.string_input_producer",
"numpy.shape",
"tensorflow.TFRecordReader",
"numpy.zeros",
"numpy.sum"
],
[
"tensorflow.convert_to_tensor",
"tensorflow.get_variable",
"tensorflow.python.ops.init_ops.variance_scaling_initializer",
"tensorflow.concat",
"tensorflow.nn.log_softmax",
"tensorflow.nn.softmax_cross_entropy_with_logits",
"tensorflow.zeros",
"tensorflow.device",
"tensorflow.layers.dropout",
"tensorflow.reduce_sum",
"tensorflow.equal",
"tensorflow.global_variables",
"tensorflow.train.AdamOptimizer",
"tensorflow.to_int32",
"tensorflow.summary.scalar",
"tensorflow.Graph",
"tensorflow.gather",
"tensorflow.nn.top_k",
"tensorflow.train.MomentumOptimizer",
"tensorflow.to_float",
"tensorflow.trainable_variables",
"tensorflow.argmax",
"tensorflow.logical_not",
"tensorflow.tile",
"tensorflow.TensorShape",
"tensorflow.pow",
"tensorflow.shape",
"tensorflow.identity",
"tensorflow.zeros_like",
"tensorflow.train.GradientDescentOptimizer",
"tensorflow.summary.merge_all",
"tensorflow.one_hot",
"tensorflow.global_norm",
"tensorflow.not_equal",
"tensorflow.constant",
"tensorflow.range",
"tensorflow.reduce_mean",
"tensorflow.reshape",
"tensorflow.sigmoid",
"tensorflow.ones",
"tensorflow.contrib.layers.layer_norm",
"tensorflow.clip_by_global_norm",
"tensorflow.variable_scope",
"tensorflow.get_variable_scope",
"tensorflow.abs"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
lferraz/VideoProcessingFramework | [
"19b87eddc0539d90ae4025629bac7c93c1387d56"
] | [
"SampleEncodeMultiThread.py"
] | [
"#\n# Copyright 2020 NVIDIA Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n# Starting from Python 3.8 DLL search policy has changed.\n# We need to add path to CUDA DLLs explicitly.\nimport sys\nimport os\n\nif os.name == 'nt':\n # Add CUDA_PATH env variable\n cuda_path = os.environ[\"CUDA_PATH\"]\n if cuda_path:\n os.add_dll_directory(cuda_path)\n else:\n print(\"CUDA_PATH environment variable is not set.\", file = sys.stderr)\n print(\"Can't set CUDA DLLs search path.\", file = sys.stderr)\n exit(1)\n\n # Add PATH as well for minor CUDA releases\n sys_path = os.environ[\"PATH\"]\n if sys_path:\n paths = sys_path.split(';')\n for path in paths:\n if os.path.isdir(path):\n os.add_dll_directory(path)\n else:\n print(\"PATH environment variable is not set.\", file = sys.stderr)\n exit(1)\n\nimport PyNvCodec as nvc\nimport numpy as np\n\nfrom threading import Thread\n \nclass Worker(Thread):\n def __init__(self, gpuID, width, height, rawFilePath, encFilePath):\n Thread.__init__(self)\n\n res = width + 'x' + height\n \n self.nvUpl = nvc.PyFrameUploader(int(width), int(height), nvc.PixelFormat.YUV420, gpuID)\n self.nvCvt = nvc.PySurfaceConverter(int(width), int(height), nvc.PixelFormat.YUV420, nvc.PixelFormat.NV12, gpuID)\n self.nvEnc = nvc.PyNvEncoder({'preset': 'hq', 'codec': 'h264', 's': res}, gpuID)\n\n self.encFile = open(encFilePath, \"wb\")\n self.rawFile = open(rawFilePath, \"rb\")\n \n def run(self):\n try:\n while True:\n frameSize = self.nvEnc.Width() * self.nvEnc.Height() * 3 / 2\n rawFrame = np.fromfile(self.rawFile, np.uint8, count = int(frameSize))\n if not (rawFrame.size):\n print('No more video frames')\n break\n\n rawSurface = self.nvUpl.UploadSingleFrame(rawFrame)\n if (rawSurface.Empty()):\n print('Failed to upload video frame to GPU')\n break\n \n cvtSurface = self.nvCvt.Execute(rawSurface)\n if (cvtSurface.Empty()):\n print('Failed to do color conversion')\n break\n\n encFrame = np.ndarray(shape=(0), dtype=np.uint8)\n success = self.nvEnc.EncodeSingleSurface(cvtSurface, encFrame)\n if(success):\n bits = bytearray(encFrame)\n self.encFile.write(bits)\n\n #Encoder is asynchronous, so we need to flush it\n encFrame = np.ndarray(shape=(0), dtype=np.uint8)\n success = self.nvEnc.Flush(encFrame)\n if(success):\n bits = bytearray(encFrame)\n self.encFile.write(bits)\n \n except Exception as e:\n print(getattr(e, 'message', str(e)))\n decFile.close()\n \ndef create_threads(gpu_id1, width_1, height_1, input_file1, output_file1,\n gpu_id2, width_2, height_2, input_file2, output_file2):\n \n th1 = Worker(gpu_id1, width_1, height_1, input_file1, output_file1)\n th2 = Worker(gpu_id2, width_2, height_2, input_file2, output_file2)\n \n th1.start()\n th2.start()\n \n th1.join()\n th2.join()\n \nif __name__ == \"__main__\":\n print(\"This sample encodes 2 videos simultaneously from YUV files into 1/4 of initial size.\")\n print(\"Usage: SampleDecode.py $gpu_id1 $width_1 $height_1 $input_file1 $output_file_1 $gpu_id2 $width_2 $height_2 $input_file2 $output_file2\")\n \n if(len(sys.argv) < 11):\n print(\"Provide input CLI arguments as shown above\")\n exit(1)\n \n gpu_1 = int(sys.argv[1])\n width_1 = sys.argv[2]\n height_1 = sys.argv[3]\n input_1 = sys.argv[4]\n output_1 = sys.argv[5]\n \n gpu_2 = int(sys.argv[6])\n width_2 = sys.argv[7]\n height_2 = sys.argv[8]\n input_2 = sys.argv[9]\n output_2 = sys.argv[10]\n \n create_threads(gpu_1, width_1, height_1, input_1, output_1, gpu_2, width_2, height_2, input_2, output_2)\n"
] | [
[
"numpy.ndarray"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
haoxingchen/SSFormers | [
"3c2ea14db6a453d3345e03a790dd452af5fde8d8"
] | [
"modules/fsl_semi_query.py"
] | [
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom .encoder import make_encoder\nfrom .semi_query import make_query\n\n\nclass FSLSemiQuery(nn.Module):\n def __init__(self, cfg):\n super().__init__()\n\n self.encoder = make_encoder(cfg)\n self.query = make_query(self.encoder.out_channels, cfg)\n self.forward_encoding = cfg.model.forward_encoding\n self.pyramid_list = self._parse_encoding_params()\n\n def _parse_encoding_params(self):\n idx = self.forward_encoding.find('-')\n if idx < 0:\n return []\n blocks = self.forward_encoding[idx + 1:].split(',')\n blocks = [int(s) for s in blocks]\n return blocks\n\n def _pyramid_encoding(self, x):\n b, n, c, h, w = x.shape\n x = x.view(-1, c, h, w)\n feature_list = []\n for size_ in self.pyramid_list:\n feature_list.append(F.adaptive_avg_pool2d(x, size_).view(b, n, c, 1, -1))\n\n if not feature_list:\n out = x.view(b, n, c, 1, -1)\n else:\n out = torch.cat(feature_list, dim=-1)\n return out\n\n def forward_Grid(self, support_x, support_y, query_x, query_y, unlabeled_x):\n b, s, grids_sc, h, w = support_x.shape\n grids_s = grids_sc // 3\n _, q, grids_qc = query_x.shape[:3]\n grids_q = grids_qc // 3\n\n support_xf = F.adaptive_avg_pool2d(self.encoder(support_x.view(-1, 3, h, w)), 1)\n support_xf = support_xf.view(b, s, grids_s, -1).permute(0, 1, 3, 2).unsqueeze(-1)\n query_xf = F.adaptive_avg_pool2d(self.encoder(query_x.view(-1, 3, h, w)), 1)\n query_xf = query_xf.view(b, q, grids_q, -1).permute(0, 1, 3, 2).unsqueeze(-1)\n unlabeled_xf = F.adaptive_avg_pool2d(self.encoder(unlabeled_x.view(-1, 3, h, w)), 1)\n unlabeled_xf = unlabeled_xf.view(b, q, grids_q, -1).permute(0, 1, 3, 2).unsqueeze(-1)\n\n query = self.query(support_xf, support_y, query_xf, query_y, unlabeled_xf)\n return query\n\n def forward_FCN(self, support_x, support_y, query_x, query_y, unlabeled_x):\n b, s, c, h, w = support_x.shape\n q = query_x.shape[1]\n\n support_xf = self.encoder(support_x.view(-1, c, h, w))\n query_xf = self.encoder(query_x.view(-1, c, h, w))\n unlabeled_xf = self.encoder(unlabeled_x.view(-1, c, h, w))\n\n fc, fh, fw = support_xf.shape[-3:]\n support_xf = support_xf.view(b, s, fc, fh, fw)\n query_xf = query_xf.view(b, q, fc, fh, fw)\n\n query = self.query(support_xf, support_y, query_xf, query_y, unlabeled_xf)\n return query\n\n def forward(self, support_x, support_y, query_x, query_y, unlabeled_x):\n if self.forward_encoding == \"FCN\":\n query = self.forward_FCN(support_x, support_y, query_x, query_y, unlabeled_x)\n elif self.forward_encoding.startswith(\"Grid\"):\n query = self.forward_Grid(support_x, support_y, query_x, query_y, unlabeled_x)\n else:\n raise NotImplementedError\n return query\n\n\ndef make_semi_fsl(cfg):\n return FSLSemiQuery(cfg)\n\n"
] | [
[
"torch.nn.functional.adaptive_avg_pool2d",
"torch.cat"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
wryoung412/CS294_Deep_RL_fall2017 | [
"077167de524157cc5f85f40232e5bcf6933ab2f5"
] | [
"hw2/train_pg.py"
] | [
"import numpy as np\nimport tensorflow as tf\nimport gym\nimport logz\nimport scipy.signal\nimport os\nimport time\nimport inspect\nfrom multiprocessing import Process\n\n#============================================================================================#\n# Utilities\n#============================================================================================#\n\ndef build_mlp(\n input_placeholder, \n output_size,\n scope, \n n_layers=2, \n size=64, \n activation=tf.tanh,\n output_activation=None\n ):\n #========================================================================================#\n # ----------SECTION 3----------\n # Network building\n #\n # Your code should make a feedforward neural network (also called a multilayer perceptron)\n # with 'n_layers' hidden layers of size 'size' units. \n # \n # The output layer should have size 'output_size' and activation 'output_activation'.\n #\n # Hint: use tf.layers.dense\n #========================================================================================#\n\n with tf.variable_scope(scope):\n # MY_CODE_HERE\n hidden = input_placeholder\n for i in range(n_layers):\n hidden = tf.layers.dense(hidden, size, activation, name='blah' + str(i))\n return tf.layers.dense(hidden, output_size, output_activation)\n\ndef pathlength(path):\n return len(path[\"reward\"])\n\ndef reward_to_q(rewards, gamma, reward_to_go):\n q = np.zeros_like(rewards)\n T = len(rewards)\n if reward_to_go:\n q += rewards\n for i in range(1, T):\n q[:(T - i)] += gamma * q[i:T]\n else:\n r = 0\n for i in range(T - 1, -1, -1):\n r = rewards[i] + gamma * r\n q = r * np.ones_like(q)\n return q\n \n\n#============================================================================================#\n# Policy Gradient\n#============================================================================================#\n\n# batch_size is more natural for PG as we need to take average over paths. \n# timesteps_per_batch is more relevant for Q-learning as learning is done step by step.\n\n# CartPole\n# Here is a good run\n# python train_pg.py CartPole-v0 --n_layers 4 --target_reward 200 --learning_rate 1e-2 --nn_baseline --batch_size 10\n# ********** Iteration 8 ************\n# total trials: 90\n# ----------------------------------------\n# | Time | 31.1 |\n# | Iteration | 8 |\n# | AverageReturn | 200 |\n# | StdReturn | 0 |\n# | MaxReturn | 200 |\n# | MinReturn | 200 |\n# | EpLenMean | 200 |\n# | EpLenStd | 0 |\n# | TimestepsThisBatch | 2e+03 |\n# | TimestepsSoFar | 1.15e+04 |\n# ----------------------------------------\n#\n# MountainCar\n# Working poorly. It seems some good exploration is needed to get any positive path.\n# \n# Acrobot\n# Similar to MountainCar, but it is possible to randomly get a positive path,\n# and then the model starts to learn.\n# I can get to about 90 steps. What is the \"solve\" criterion?\n# https://github.com/jonholifield/Acrobot-v1\n\n# Box2D\n# https://github.com/pybox2d/pybox2d/blob/master/INSTALL.md\n# 'sudo' python setup.py install: should not use sudo in venv, it complains about setuptools not found\n# LunarLander\n# It does not do that well but works to some extent. \n\n\ndef train_PG(exp_name='',\n env_name='CartPole-v0',\n n_iter=100, \n gamma=1.0, \n # min_timesteps_per_batch=1000,\n batch_size=20,\n max_path_length=None,\n learning_rate=5e-3, \n reward_to_go=True, \n animate=True, \n logdir=None, \n normalize_advantages=True,\n nn_baseline=False, \n seed=0,\n # network arguments\n n_layers=1,\n size=32,\n target_reward=None\n ):\n\n start = time.time()\n\n TODO = 1\n\n # Configure output directory for logging\n logz.configure_output_dir(logdir)\n\n # Log experimental parameters\n args = inspect.getargspec(train_PG)[0]\n locals_ = locals()\n params = {k: locals_[k] if k in locals_ else None for k in args}\n logz.save_params(params)\n\n # Set random seeds\n tf.set_random_seed(seed)\n np.random.seed(seed)\n\n # Make the gym environment\n env = gym.make(env_name)\n \n # Is this env continuous, or discrete?\n discrete = isinstance(env.action_space, gym.spaces.Discrete)\n assert discrete, 'only discrete is implemented'\n\n # Maximum length for episodes\n max_path_length = max_path_length or env.spec.max_episode_steps\n\n #========================================================================================#\n # Notes on notation:\n # \n # Symbolic variables have the prefix sy_, to distinguish them from the numerical values\n # that are computed later in the function\n # \n # Prefixes and suffixes:\n # ob - observation \n # ac - action\n # _no - this tensor should have shape (batch size /n/, observation dim)\n # _na - this tensor should have shape (batch size /n/, action dim)\n # _n - this tensor should have shape (batch size /n/)\n # \n # Note: batch size /n/ is defined at runtime, and until then, the shape for that axis\n # is None\n #========================================================================================#\n\n # Observation and action sizes\n ob_dim = env.observation_space.shape[0]\n ac_dim = env.action_space.n if discrete else env.action_space.shape[0]\n\n #========================================================================================#\n # ----------SECTION 4----------\n # Placeholders\n # \n # Need these for batch observations / actions / advantages in policy gradient loss function.\n #========================================================================================#\n\n sy_ob_no = tf.placeholder(shape=[None, ob_dim], name=\"ob\", dtype=tf.float32)\n if discrete:\n sy_ac_na = tf.placeholder(shape=[None], name=\"ac\", dtype=tf.int32) \n else:\n sy_ac_na = tf.placeholder(shape=[None, ac_dim], name=\"ac\", dtype=tf.float32) \n\n # Define a placeholder for advantages\n sy_adv_n = tf.placeholder(shape=[None], name=\"adv\", dtype=tf.float32) \n\n\n #========================================================================================#\n # ----------SECTION 4----------\n # Networks\n # \n # Make symbolic operations for\n # 1. Policy network outputs which describe the policy distribution.\n # a. For the discrete case, just logits for each action.\n #\n # b. For the continuous case, the mean / log std of a Gaussian distribution over \n # actions.\n #\n # Hint: use the 'build_mlp' function you defined in utilities.\n #\n # Note: these ops should be functions of the placeholder 'sy_ob_no'\n #\n # 2. Producing samples stochastically from the policy distribution.\n # a. For the discrete case, an op that takes in logits and produces actions.\n #\n # Should have shape [None]\n #\n # b. For the continuous case, use the reparameterization trick:\n # The output from a Gaussian distribution with mean 'mu' and std 'sigma' is\n #\n # mu + sigma * z, z ~ N(0, I)\n #\n # This reduces the problem to just sampling z. (Hint: use tf.random_normal!)\n #\n # Should have shape [None, ac_dim]\n #\n # Note: these ops should be functions of the policy network output ops.\n #\n # 3. Computing the log probability of a set of actions that were actually taken, \n # according to the policy.\n #\n # Note: these ops should be functions of the placeholder 'sy_ac_na', and the \n # policy network output ops.\n # \n #========================================================================================#\n\n if discrete:\n # MY_CODE_HERE\n sy_logits_na = build_mlp(\n sy_ob_no,\n ac_dim,\n \"nn_policy\",\n n_layers=n_layers,\n size=size)\n sy_sampled_ac = tf.multinomial(sy_logits_na, 1) # Hint: Use the tf.multinomial op\n sy_logprob_n = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=sy_logits_na, labels=sy_ac_na)\n\n else:\n # YOUR_CODE_HERE\n sy_mean = TODO\n sy_logstd = TODO # logstd should just be a trainable variable, not a network output.\n sy_sampled_ac = TODO\n sy_logprob_n = TODO # Hint: Use the log probability under a multivariate gaussian. \n\n\n\n #========================================================================================#\n # ----------SECTION 4----------\n # Loss Function and Training Operation\n #========================================================================================#\n\n # MY_CODE_HERE\n # Loss function that we'll differentiate to get the policy gradient.\n # TODO: reduce_mean is not really correct here\n loss = tf.reduce_mean(sy_logprob_n * sy_adv_n)\n update_op = tf.train.AdamOptimizer(learning_rate).minimize(loss)\n\n\n #========================================================================================#\n # ----------SECTION 5----------\n # Optional Baseline\n #========================================================================================#\n\n if nn_baseline:\n baseline_prediction = tf.squeeze(build_mlp(\n sy_ob_no, \n 1, \n \"nn_baseline\",\n n_layers=n_layers,\n size=size))\n # Define placeholders for targets, a loss function and an update op for fitting a \n # neural network baseline. These will be used to fit the neural network baseline. \n # MY_CODE_HERE\n sy_q_n = tf.placeholder(shape=[None], name='q', dtype=tf.float32)\n baseline_loss = tf.nn.l2_loss(baseline_prediction - sy_q_n)\n baseline_update_op = tf.train.AdamOptimizer(learning_rate).minimize(baseline_loss)\n\n\n #========================================================================================#\n # Tensorflow Engineering: Config, Session, Variable initialization\n #========================================================================================#\n\n tf_config = tf.ConfigProto(inter_op_parallelism_threads=1, intra_op_parallelism_threads=1) \n\n sess = tf.Session(config=tf_config)\n sess.__enter__() # equivalent to `with sess:`\n tf.global_variables_initializer().run() #pylint: disable=E1101\n\n tf_board = os.path.join('/tmp/gube/hw2')\n writer = tf.summary.FileWriter(os.path.join(tf_board, str(int(time.time()))))\n writer.add_graph(sess.graph)\n merged_summary = tf.summary.merge_all()\n\n #========================================================================================#\n # Training Loop\n #========================================================================================#\n\n total_timesteps = 0\n total_trials = 0\n\n for itr in range(n_iter):\n print(\"********** Iteration %i ************\"%itr)\n\n # Collect paths until we have enough timesteps\n timesteps_this_batch = 0\n trials_this_batch = 0\n paths = []\n while True:\n ob = env.reset()\n obs, acs, rewards = [], [], []\n animate_this_episode=(len(paths)==0 and (itr % 5 == 0) and animate)\n steps = 0\n while True:\n if animate_this_episode:\n env.render()\n time.sleep(0.05)\n obs.append(ob)\n ac = sess.run(sy_sampled_ac, feed_dict={sy_ob_no : ob[None]})\n ac = ac[0][0] # was ac[0]\n acs.append(ac)\n ob, rew, done, _ = env.step(ac)\n rewards.append(rew)\n steps += 1\n if done or steps > max_path_length:\n break\n total_trials += 1\n trials_this_batch += 1\n path = {\"observation\" : np.array(obs), \n \"reward\" : np.array(rewards), \n \"action\" : np.array(acs)}\n paths.append(path)\n timesteps_this_batch += pathlength(path)\n # if timesteps_this_batch > min_timesteps_per_batch:\n # break\n if trials_this_batch == batch_size:\n break\n total_timesteps += timesteps_this_batch\n print('total trials:', total_trials)\n\n # Build arrays for observation, action for the policy gradient update by concatenating \n # across paths\n ob_no = np.concatenate([path[\"observation\"] for path in paths])\n ac_na = np.concatenate([path[\"action\"] for path in paths])\n\n #====================================================================================#\n # ----------SECTION 4----------\n # Computing Q-values\n #\n # Your code should construct numpy arrays for Q-values which will be used to compute\n # advantages (which will in turn be fed to the placeholder you defined above). \n #\n # Recall that the expression for the policy gradient PG is\n #\n # PG = E_{tau} [sum_{t=0}^T grad log pi(a_t|s_t) * (Q_t - b_t )]\n #\n # where \n #\n # tau=(s_0, a_0, ...) is a trajectory,\n # Q_t is the Q-value at time t, Q^{pi}(s_t, a_t),\n # and b_t is a baseline which may depend on s_t. \n #\n # You will write code for two cases, controlled by the flag 'reward_to_go':\n #\n # Case 1: trajectory-based PG \n #\n # (reward_to_go = False)\n #\n # Instead of Q^{pi}(s_t, a_t), we use the total discounted reward summed over \n # entire trajectory (regardless of which time step the Q-value should be for). \n #\n # For this case, the policy gradient estimator is\n #\n # E_{tau} [sum_{t=0}^T grad log pi(a_t|s_t) * Ret(tau)]\n #\n # where\n #\n # Ret(tau) = sum_{t'=0}^T gamma^t' r_{t'}.\n #\n # Thus, you should compute\n #\n # Q_t = Ret(tau)\n #\n # Case 2: reward-to-go PG \n #\n # (reward_to_go = True)\n #\n # Here, you estimate Q^{pi}(s_t, a_t) by the discounted sum of rewards starting\n # from time step t. Thus, you should compute\n #\n # Q_t = sum_{t'=t}^T gamma^(t'-t) * r_{t'}\n #\n #\n # Store the Q-values for all timesteps and all trajectories in a variable 'q_n',\n # like the 'ob_no' and 'ac_na' above. \n #\n #====================================================================================#\n\n # MY_CODE_HERE\n q_n = np.concatenate([reward_to_q(path['reward'], gamma, reward_to_go) for path in paths])\n\n #====================================================================================#\n # ----------SECTION 5----------\n # Computing Baselines\n #====================================================================================#\n\n if nn_baseline:\n # If nn_baseline is True, use your neural network to predict reward-to-go\n # at each timestep for each trajectory, and save the result in a variable 'b_n'\n # like 'ob_no', 'ac_na', and 'q_n'.\n #\n # Hint #bl1: rescale the output from the nn_baseline to match the statistics\n # (mean and std) of the current or previous batch of Q-values. (Goes with Hint\n # #bl2 below.)\n\n # MY_CODE_HERE\n # The bootstrap version uses r_t + v(s_{t+1}) - v(s_t), which is biased\n b_n = sess.run(baseline_prediction, feed_dict={sy_ob_no: ob_no})\n adv_n = q_n - b_n\n else:\n adv_n = q_n.copy()\n\n #====================================================================================#\n # ----------SECTION 4----------\n # Advantage Normalization\n #====================================================================================#\n\n if normalize_advantages:\n # On the next line, implement a trick which is known empirically to reduce variance\n # in policy gradient methods: normalize adv_n to have mean zero and std=1. \n # MY_CODE_HERE\n adv_mu = np.mean(adv_n)\n adv_std = np.std(adv_n)\n # Could be more robust than this\n if adv_std == 0.0:\n return\n # The normalization could be problematic.\n # For environments like CartPole, the reward is an integer and is capped at 200.\n # When not using base, adv_n could all be 200 and adv_std = 0. \n adv_n = (adv_n - adv_mu) / adv_std\n\n\n #====================================================================================#\n # ----------SECTION 5----------\n # Optimizing Neural Network Baseline\n #====================================================================================#\n if nn_baseline:\n # ----------SECTION 5----------\n # If a neural network baseline is used, set up the targets and the inputs for the \n # baseline. \n # \n # Fit it to the current batch in order to use for the next iteration. Use the \n # baseline_update_op you defined earlier.\n #\n # Hint #bl2: Instead of trying to target raw Q-values directly, rescale the \n # targets to have mean zero and std=1. (Goes with Hint #bl1 above.)\n\n # MY_CODE_HERE\n # TODO: what is the right way to fit?\n # 1. Using fixed number of steps.\n # It might not balance the good vs bad paths well, but 100 seems pretty good. \n # 2. Using timesteps as number of steps. This is CartPole specific.\n print('timesteps:', timesteps_this_batch)\n for i in range(100):\n sess.run(baseline_update_op, feed_dict={sy_ob_no: ob_no, sy_q_n: q_n})\n\n #====================================================================================#\n # ----------SECTION 4----------\n # Performing the Policy Update\n #====================================================================================#\n\n # Call the update operation necessary to perform the policy gradient update based on \n # the current batch of rollouts.\n # \n # For debug purposes, you may wish to save the value of the loss function before\n # and after an update, and then log them below. \n\n # MY_CODE_HERE\n sess.run(update_op, feed_dict={sy_ob_no: ob_no,\n sy_ac_na: ac_na,\n sy_adv_n: adv_n})\n\n\n # Log diagnostics\n returns = [path[\"reward\"].sum() for path in paths]\n ep_lengths = [pathlength(path) for path in paths]\n logz.log_tabular(\"Time\", time.time() - start)\n logz.log_tabular(\"Iteration\", itr)\n logz.log_tabular(\"AverageReturn\", np.mean(returns))\n logz.log_tabular(\"StdReturn\", np.std(returns))\n logz.log_tabular(\"MaxReturn\", np.max(returns))\n logz.log_tabular(\"MinReturn\", np.min(returns))\n logz.log_tabular(\"EpLenMean\", np.mean(ep_lengths))\n logz.log_tabular(\"EpLenStd\", np.std(ep_lengths))\n logz.log_tabular(\"TimestepsThisBatch\", timesteps_this_batch)\n logz.log_tabular(\"TimestepsSoFar\", total_timesteps)\n logz.dump_tabular()\n logz.pickle_tf_vars()\n \n # This stopping criterion is not robust when the batch size is small.\n if target_reward is not None:\n if np.mean([path[\"reward\"].sum() for path in paths]) >= target_reward:\n return\n\ndef main():\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument('env_name', type=str)\n parser.add_argument('--exp_name', type=str, default='vpg')\n parser.add_argument('--render', action='store_true')\n parser.add_argument('--discount', type=float, default=1.0)\n parser.add_argument('--n_iter', '-n', type=int, default=100)\n parser.add_argument('--batch_size', '-b', type=int, default=1000)\n parser.add_argument('--ep_len', '-ep', type=float, default=-1.)\n parser.add_argument('--learning_rate', '-lr', type=float, default=5e-3)\n parser.add_argument('--reward_to_go', '-rtg', action='store_true')\n parser.add_argument('--dont_normalize_advantages', '-dna', action='store_true')\n parser.add_argument('--nn_baseline', '-bl', action='store_true')\n parser.add_argument('--seed', type=int, default=1)\n parser.add_argument('--n_experiments', '-e', type=int, default=1)\n parser.add_argument('--n_layers', '-l', type=int, default=1)\n parser.add_argument('--size', '-s', type=int, default=32)\n parser.add_argument('--target_reward', type=float, default=None)\n args = parser.parse_args()\n\n if not(os.path.exists('data')):\n os.makedirs('data')\n logdir = args.exp_name + '_' + args.env_name + '_' + time.strftime(\"%d-%m-%Y_%H-%M-%S\")\n logdir = os.path.join('data', logdir)\n if not(os.path.exists(logdir)):\n os.makedirs(logdir)\n\n max_path_length = args.ep_len if args.ep_len > 0 else None\n\n for e in range(args.n_experiments):\n seed = args.seed + 10*e\n print('Running experiment with seed %d'%seed)\n def train_func():\n train_PG(\n exp_name=args.exp_name,\n env_name=args.env_name,\n n_iter=args.n_iter,\n gamma=args.discount,\n # min_timesteps_per_batch=args.batch_size,\n batch_size=args.batch_size,\n max_path_length=max_path_length,\n learning_rate=args.learning_rate,\n reward_to_go=args.reward_to_go,\n animate=args.render,\n logdir=os.path.join(logdir,'%d'%seed),\n normalize_advantages=not(args.dont_normalize_advantages),\n nn_baseline=args.nn_baseline, \n seed=seed,\n n_layers=args.n_layers,\n size=args.size,\n target_reward=args.target_reward\n )\n # Awkward hacky process runs, because Tensorflow does not like\n # repeatedly calling train_PG in the same thread.\n p = Process(target=train_func, args=tuple())\n p.start()\n p.join()\n \n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"numpy.concatenate",
"numpy.max",
"tensorflow.nn.l2_loss",
"numpy.zeros_like",
"numpy.mean",
"tensorflow.train.AdamOptimizer",
"numpy.ones_like",
"tensorflow.layers.dense",
"tensorflow.ConfigProto",
"numpy.std",
"tensorflow.Session",
"numpy.min",
"tensorflow.placeholder",
"tensorflow.global_variables_initializer",
"tensorflow.summary.merge_all",
"tensorflow.nn.sparse_softmax_cross_entropy_with_logits",
"tensorflow.set_random_seed",
"numpy.array",
"numpy.random.seed",
"tensorflow.reduce_mean",
"tensorflow.variable_scope",
"tensorflow.multinomial"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
andi611/Mockingjay-Speech-Representation | [
"e77df17a7f63a983c3757140c7a1e8c199cac614"
] | [
"runner_mockingjay.py"
] | [
"# -*- coding: utf-8 -*- #\n\"\"\"*********************************************************************************************\"\"\"\n# FileName [ runner_mockingjay.py ]\n# Synopsis [ runner for the mockingjay model ]\n# Author [ Andy T. Liu (Andi611) ]\n# Copyright [ Copyleft(c), Speech Lab, NTU, Taiwan ]\n\"\"\"*********************************************************************************************\"\"\"\n\n\n###############\n# IMPORTATION #\n###############\nimport yaml\nimport torch\nimport random\nimport argparse\nimport numpy as np\nfrom utility.timer import Timer\n\n\n#############################\n# MOCKINGJAY CONFIGURATIONS #\n#############################\ndef get_mockingjay_args():\n \n parser = argparse.ArgumentParser(description='Argument Parser for the mockingjay project.')\n \n # setting\n parser.add_argument('--config', default='config/mockingjay_libri.yaml', type=str, help='Path to experiment config.')\n parser.add_argument('--seed', default=1337, type=int, help='Random seed for reproducable results.', required=False)\n\n # Logging\n parser.add_argument('--logdir', default='log/log_mockingjay/', type=str, help='Logging path.', required=False)\n parser.add_argument('--name', default=None, type=str, help='Name for logging.', required=False)\n\n # model ckpt\n parser.add_argument('--load', action='store_true', help='Load pre-trained model to restore training, no need to specify this during testing.')\n parser.add_argument('--ckpdir', default='result/result_mockingjay/', type=str, help='Checkpoint/Result path.', required=False)\n parser.add_argument('--ckpt', default='mockingjay_libri_sd1337_LinearLarge/mockingjay-500000.ckpt', type=str, help='path to mockingjay model checkpoint.', required=False)\n # parser.add_argument('--ckpt', default='mockingjay_libri_sd1337_MelBase/mockingjay-500000.ckpt', type=str, help='path to mockingjay model checkpoint.', required=False)\n parser.add_argument('--dckpt', default='baseline_sentiment_libri_sd1337/baseline_sentiment-500000.ckpt', type=str, help='path to downstream checkpoint.', required=False)\n parser.add_argument('--apc_path', default='./result/result_apc/apc_libri_sd1337_standard/apc-500000.ckpt', type=str, help='path to the apc model checkpoint.', required=False)\n\n # mockingjay\n parser.add_argument('--train', action='store_true', help='Train the model.')\n parser.add_argument('--run_mockingjay', action='store_true', help='train and test the downstream tasks using mockingjay representations.')\n parser.add_argument('--run_apc', action='store_true', help='train and test the downstream tasks using apc representations.')\n parser.add_argument('--fine_tune', action='store_true', help='fine tune the mockingjay model with downstream task.')\n parser.add_argument('--plot', action='store_true', help='Plot model generated results during testing.')\n \n # phone task\n parser.add_argument('--train_phone', action='store_true', help='Train the phone classifier on mel or mockingjay representations.')\n parser.add_argument('--test_phone', action='store_true', help='Test mel or mockingjay representations using the trained phone classifier.')\n \n # sentiment task\n parser.add_argument('--train_sentiment', action='store_true', help='Train the sentiment classifier on mel or mockingjay representations.')\n parser.add_argument('--test_sentiment', action='store_true', help='Test mel or mockingjay representations using the trained sentiment classifier.')\n \n # speaker verification task\n parser.add_argument('--train_speaker', action='store_true', help='Train the speaker classifier on mel or mockingjay representations.')\n parser.add_argument('--test_speaker', action='store_true', help='Test mel or mockingjay representations using the trained speaker classifier.')\n \n # Options\n parser.add_argument('--with_head', action='store_true', help='inference with the spectrogram head, the model outputs spectrogram.')\n parser.add_argument('--output_attention', action='store_true', help='plot attention')\n parser.add_argument('--load_ws', default='result/result_mockingjay_sentiment/10111754-10170300-weight_sum/best_val.ckpt', help='load weighted-sum weights from trained downstream model')\n parser.add_argument('--cpu', action='store_true', help='Disable GPU training.')\n parser.add_argument('--no-msg', action='store_true', help='Hide all messages.')\n\n\n args = parser.parse_args()\n setattr(args,'gpu', not args.cpu)\n setattr(args,'verbose', not args.no_msg)\n config = yaml.load(open(args.config,'r'))\n config['timer'] = Timer()\n \n return config, args\n\n\n########\n# MAIN #\n########\ndef main():\n \n # get arguments\n config, args = get_mockingjay_args()\n \n # Fix seed and make backends deterministic\n random.seed(args.seed)\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n if torch.cuda.is_available(): torch.cuda.manual_seed_all(args.seed)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = True\n\n # Train Mockingjay\n if args.train:\n from mockingjay.solver import Trainer\n trainer = Trainer(config, args)\n trainer.load_data(split='train')\n trainer.set_model(inference=False)\n trainer.exec()\n\n ##################################################################################\n \n # Train Phone Task\n elif args.train_phone:\n from downstream.solver import Downstream_Trainer\n task = 'mockingjay_phone' if args.run_mockingjay \\\n else 'apc_phone' if args.run_apc else 'baseline_phone'\n trainer = Downstream_Trainer(config, args, task=task)\n trainer.load_data(split='train', load='phone')\n trainer.set_model(inference=False)\n trainer.exec()\n\n # Test Phone Task\n elif args.test_phone:\n from downstream.solver import Downstream_Tester\n task = 'mockingjay_phone' if args.run_mockingjay \\\n else 'apc_phone' if args.run_apc else 'baseline_phone'\n tester = Downstream_Tester(config, args, task=task)\n tester.load_data(split='test', load='phone')\n tester.set_model(inference=True)\n tester.exec()\n\n ##################################################################################\n\n # Train Sentiment Task\n elif args.train_sentiment:\n from downstream.solver import Downstream_Trainer\n task = 'mockingjay_sentiment' if args.run_mockingjay \\\n else 'apc_sentiment' if args.run_apc else 'baseline_sentiment'\n trainer = Downstream_Trainer(config, args, task=task)\n trainer.load_data(split='train', load='sentiment')\n trainer.set_model(inference=False)\n trainer.exec()\n\n # Test Sentiment Task\n elif args.test_sentiment:\n from downstream.solver import Downstream_Tester\n task = 'mockingjay_sentiment' if args.run_mockingjay \\\n else 'apc_sentiment' if args.run_apc else 'baseline_sentiment'\n tester = Downstream_Tester(config, args, task=task)\n tester.load_data(split='test', load='sentiment')\n tester.set_model(inference=True)\n tester.exec()\n\n ##################################################################################\n \n # Train Speaker Task\n elif args.train_speaker:\n from downstream.solver import Downstream_Trainer\n task = 'mockingjay_speaker' if args.run_mockingjay \\\n else 'apc_speaker' if args.run_apc else 'baseline_speaker'\n trainer = Downstream_Trainer(config, args, task=task)\n trainer.load_data(split='train', load='speaker')\n # trainer.load_data(split='train', load='speaker_large') # Deprecated\n trainer.set_model(inference=False)\n trainer.exec()\n\n # Test Speaker Task\n elif args.test_speaker:\n from downstream.solver import Downstream_Tester\n task = 'mockingjay_speaker' if args.run_mockingjay \\\n else 'apc_speaker' if args.run_apc else 'baseline_speaker'\n tester = Downstream_Tester(config, args, task=task)\n tester.load_data(split='test', load='speaker')\n # tester.load_data(split='test', load='speaker_large') # Deprecated\n tester.set_model(inference=True)\n tester.exec()\n\n ##################################################################################\n\n # Visualize Mockingjay\n elif args.plot:\n from mockingjay.solver import Tester\n tester = Tester(config, args)\n tester.load_data(split='test', load_mel_only=True)\n tester.set_model(inference=True, with_head=args.with_head, output_attention=args.output_attention)\n tester.plot(with_head=args.with_head)\n\n config['timer'].report()\n\n\n########################\n# GET MOCKINGJAY MODEL #\n########################\ndef get_mockingjay_model(from_path='result/result_mockingjay/mockingjay_libri_sd1337_best/mockingjay-500000.ckpt', display_settings=False):\n ''' Wrapper that loads the mockingjay model from checkpoint path '''\n\n # load config and paras\n all_states = torch.load(from_path, map_location='cpu')\n config = all_states['Settings']['Config']\n paras = all_states['Settings']['Paras']\n\n # display checkpoint settings\n if display_settings:\n for cluster in config:\n print(cluster + ':')\n for item in config[cluster]:\n print('\\t' + str(item) + ': ', config[cluster][item])\n print('paras:')\n v_paras = vars(paras)\n for item in v_paras:\n print('\\t' + str(item) + ': ', v_paras[item])\n\n # load model with Tester\n from mockingjay.solver import Tester\n mockingjay = Tester(config, paras)\n mockingjay.set_model(inference=True, with_head=False, from_path=from_path)\n return mockingjay\n\n\nif __name__ == '__main__':\n main()\n\n"
] | [
[
"numpy.random.seed",
"torch.load",
"torch.manual_seed",
"torch.cuda.manual_seed_all",
"torch.cuda.is_available"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
creaiter/Classification-PyTorch | [
"2feabf4b3d0d561420399bdf65840a58af76069d",
"2feabf4b3d0d561420399bdf65840a58af76069d"
] | [
"models/wideresnet.py",
"models/shufflenetv2.py"
] | [
"import math\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.init as init\nimport torch.nn.functional as F\nfrom torch.nn.parameter import Parameter\n\n\ndef conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):\n \"\"\"3x3 convolution with padding\"\"\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=dilation, groups=groups, bias=False, dilation=dilation)\n\n\ndef conv1x1(in_planes, out_planes, stride=1):\n \"\"\"1x1 convolution\"\"\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)\n\n\ndef relu(inplace=False):\n \"\"\"ReLU activation\"\"\"\n return nn.ReLU(inplace=inplace)\n\n\ndef bn(num_features):\n \"\"\"Batch normalization 2D\"\"\"\n return nn.BatchNorm2d(num_features)\n\n\nclass BasicBlock(nn.Module):\n expansion = 1\n __constants__ = ['downsample']\n\n def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,\n base_width=64, dilation=1):\n super(BasicBlock, self).__init__()\n if groups != 1 or base_width != 64:\n raise ValueError('BasicBlock only supports groups=1 and base_width=64')\n if dilation > 1:\n raise NotImplementedError(\"Dilation > 1 not supported in BasicBlock\")\n # Both self.conv1 and self.downsample layers downsample the input when stride != 1\n self.conv1 = conv3x3(inplanes, planes, stride)\n self.bn1 = bn(planes)\n self.relu1 = relu(inplace=True)\n self.conv2 = conv3x3(planes, planes)\n self.bn2 = bn(planes)\n self.relu2 = relu(inplace=True)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n identity = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu1(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n\n if self.downsample is not None:\n identity = self.downsample(x)\n\n out += identity\n out = self.relu2(out)\n\n return out\n\n\nclass Bottleneck(nn.Module):\n expansion = 4\n __constants__ = ['downsample']\n\n def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,\n base_width=64, dilation=1):\n super(Bottleneck, self).__init__()\n width = int(planes * (base_width / 64.)) * groups\n # Both self.conv2 and self.downsample layers downsample the input when stride != 1\n self.conv1 = conv1x1(inplanes, width)\n self.bn1 = bn(width)\n self.relu1 = relu(inplace=True)\n self.conv2 = conv3x3(width, width, stride, groups, dilation)\n self.bn2 = bn(width)\n self.relu2 = relu(inplace=True)\n self.conv3 = conv1x1(width, planes * self.expansion)\n self.bn3 = bn(planes * self.expansion)\n self.relu3 = relu(inplace=True)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n identity = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu1(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu2(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n if self.downsample is not None:\n identity = self.downsample(x)\n\n out += identity\n out = self.relu3(out)\n\n return out\n\n\nclass WideResNet_Cifar(nn.Module):\n def __init__(self, block, layers, width_mult=1, num_classes=10, zero_init_residual=False,\n groups=1, width_per_group=64, replace_stride_with_dilation=None):\n super(WideResNet_Cifar, self).__init__()\n self.block_name = str(block.__name__)\n \n self.inplanes = 16\n self.dilation = 1\n if replace_stride_with_dilation is None:\n # each element in the tuple indicates if we should replace\n # the 2x2 stride with a dilated convolution instead\n replace_stride_with_dilation = [False, False, False]\n if len(replace_stride_with_dilation) != 3: \n raise ValueError(\"replace_stride_with_dilation should be None \"\n \"or a 3-element tuple, got {}\".format(replace_stride_with_dilation))\n self.groups = groups\n self.base_width = width_per_group\n \n self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=3, stride=1, padding=1,\n bias=False)\n self.bn1 = bn(self.inplanes)\n self.relu1 = relu(inplace=False)\n\n self.layer1 = self._make_layer(block, 16 * width_mult, layers[0])\n self.layer2 = self._make_layer(block, 32 * width_mult, layers[1], stride=2,\n dilate=replace_stride_with_dilation[0])\n self.layer3 = self._make_layer(block, 64 * width_mult, layers[2], stride=2,\n dilate=replace_stride_with_dilation[1])\n self.avgpool = nn.AdaptiveAvgPool2d((1, 1))\n self.fc = nn.Linear(64 * block.expansion * width_mult, num_classes)\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n #nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n\n # Zero-initialize the last BN in each residual branch,\n # so that the residual branch starts with zeros, and each residual block behaves like an identity.\n # This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677\n if zero_init_residual:\n for m in self.modules():\n if isinstance(m, Bottleneck):\n nn.init.constant_(m.bn3.weight, 0)\n elif isinstance(m, BasicBlock):\n nn.init.constant_(m.bn2.weight, 0)\n\n def _make_layer(self, block, planes, blocks, stride=1, dilate=False):\n downsample = None\n previous_dilation = self.dilation\n if dilate:\n self.dilation *= stride\n stride = 1\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n conv1x1(self.inplanes, planes * block.expansion, stride),\n bn(planes * block.expansion),\n )\n\n layers = []\n layers.append(block(self.inplanes, planes, stride, downsample, self.groups,\n self.base_width, previous_dilation))\n self.inplanes = planes * block.expansion\n for _ in range(1, blocks):\n layers.append(block(self.inplanes, planes, groups=self.groups,\n base_width=self.base_width, dilation=self.dilation))\n\n return nn.Sequential(*layers)\n\n def _forward_impl(self, x):\n # See note [TorchScript super()]\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu1(x)\n\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n\n x = self.avgpool(x)\n x = torch.flatten(x, 1)\n x = self.fc(x)\n return x\n\n def forward(self, x):\n return self._forward_impl(x)\n\n\n# Model configurations\n'''\nmodel_cfgs = {\n 18: (BasicBlock, [2, 2, 2, 2]),\n 34: (BasicBlock, [3, 4, 6, 3]),\n 50: (Bottleneck, [3, 4, 6, 3]),\n 101: (Bottleneck, [3, 4, 23, 3]),\n 152: (Bottleneck, [3, 8, 36, 3]),\n}\n'''\nmodel_cfgs_cifar = {\n 16: (BasicBlock, [2, 2, 2]),\n 22: (BasicBlock, [3, 3, 3]),\n 28: (BasicBlock, [4, 4, 4]),\n 40: (BasicBlock, [6, 6, 6]),\n 52: (BasicBlock, [8, 8, 8]),\n}\n\n\ndef set_model(cfg):\n r\"\"\"\n Args:\n cfg: configuration\n \"\"\"\n # set model configurations\n if data in ['cifar10', 'cifar100']:\n assert (cfg.layers - 4) % 6 == 0, \"The number of layers should be 16, 22, 28, 40, 52, etc.\"\n assert cfg.width_mult == int(cfg.width_mult), \"The width multiplier should be an integer value.\"\n n = int((cfg.layers - 4) / 6)\n layers = [n, n, n]\n image_size = 32\n num_classes = int(cfg.dataset[5:])\n model = WideResNet_Cifar(BasicBlock, layers, cfg.width_mult, num_classes)\n \n elif data == 'imagenet':\n model = None\n image_size = None\n raise Exception('Undefined dataset for WideResNet architecture.')\n\n else:\n raise Exception('Undefined dataset for WideResNet architecture.')\n \n\n return model, image_size",
"'''ShuffleNetV2 in PyTorch.\n\nSee the paper \"ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design\" for more details.\n'''\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\ndef conv2d(inplanes, out_planes, kernel_size=3, stride=1, padding=0, groups=1, bias=False):\n \"\"\"convolution with padding\"\"\"\n return nn.Conv2d(inplanes, out_planes, kernel_size=kernel_size, stride=stride,\n padding=padding, groups=groups, bias=bias)\n\n\ndef relu(inplace=False):\n \"\"\"ReLU activation\"\"\"\n return nn.ReLU(inplace=inplace)\n\n\nclass ConvBNReLU(nn.Module):\n def __init__(self, in_planes, out_planes, kernel_size=3, stride=1, groups=1, act=True):\n super(ConvBNReLU, self).__init__()\n padding = (kernel_size - 1) // 2 if kernel_size > 1 else 0\n \n self.conv = conv2d(in_planes, out_planes, kernel_size, stride, padding, groups=groups)\n self.bn = nn.BatchNorm2d(out_planes)\n self.act = relu(inplace=True) if act else None\n\n def forward(self, x):\n out = self.bn(self.conv(x))\n out = self.act(out) if self.act is not None else out\n return out\n\n\nclass ShuffleBlock(nn.Module):\n def __init__(self, groups=2):\n super(ShuffleBlock, self).__init__()\n self.groups = groups\n\n def forward(self, x):\n '''Channel shuffle: [N,C,H,W] -> [N,g,C/g,H,W] -> [N,C/g,g,H,w] -> [N,C,H,W]'''\n N, C, H, W = x.size()\n g = self.groups\n return x.view(N, g, C//g, H, W).permute(0, 2, 1, 3, 4).reshape(N, C, H, W)\n\n\nclass SplitBlock(nn.Module):\n def __init__(self, ratio):\n super(SplitBlock, self).__init__()\n self.ratio = ratio\n\n def forward(self, x):\n c = int(x.size(1) * self.ratio)\n return x[:, :c, :, :], x[:, c:, :, :]\n\n\nclass BasicBlock(nn.Module):\n def __init__(self, in_channels, split_ratio=0.5):\n super(BasicBlock, self).__init__()\n self.split = SplitBlock(split_ratio)\n in_channels = int(in_channels * split_ratio)\n\n self.conv = nn.Sequential(\n # pw\n ConvBNReLU(in_channels, in_channels, kernel_size=1, stride=1, act=True),\n # dw\n ConvBNReLU(in_channels, in_channels, kernel_size=3, stride=1, groups=in_channels, act=False),\n # pw-linear\n ConvBNReLU(in_channels, in_channels, kernel_size=1, stride=1, act=True)\n )\n self.shuffle = ShuffleBlock()\n\n def forward(self, x):\n x1, x2 = self.split(x)\n out = torch.cat([x1, self.conv(x2)], 1)\n out = self.shuffle(out)\n return out\n\n\nclass DownBlock(nn.Module):\n def __init__(self, in_channels, out_channels, stride=2):\n super(DownBlock, self).__init__()\n mid_channels = out_channels // 2\n \n # left\n self.conv1 = nn.Sequential(\n # dw\n ConvBNReLU(in_channels, in_channels, kernel_size=3, stride=stride, groups=in_channels, act=False),\n # pw-linear\n ConvBNReLU(in_channels, mid_channels, kernel_size=1, stride=1, act=True)\n )\n # right\n self.conv2 = nn.Sequential(\n # pw\n ConvBNReLU(in_channels, mid_channels, kernel_size=1, stride=1, act=True),\n # dw\n ConvBNReLU(mid_channels, mid_channels, kernel_size=3, stride=stride, groups=mid_channels, act=False),\n # pw-linear\n ConvBNReLU(mid_channels, mid_channels, kernel_size=1, stride=1, act=True)\n )\n self.shuffle = ShuffleBlock()\n\n def forward(self, x):\n out = torch.cat([self.conv1(x), self.conv2(x)], 1)\n out = self.shuffle(out)\n return out\n\n\nclass ShuffleNetV2(nn.Module):\n def __init__(self, out_channels, num_blocks, num_classes=1000):\n super(ShuffleNetV2, self).__init__()\n self.in_channels = 24\n\n self.conv1 = ConvBNReLU(3, self.in_channels, kernel_size=3, stride=2, act=True)\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n \n self.layer1 = self._make_layer(out_channels[0], num_blocks[0])\n self.layer2 = self._make_layer(out_channels[1], num_blocks[1])\n self.layer3 = self._make_layer(out_channels[2], num_blocks[2])\n self.conv2 = ConvBNReLU(out_channels[2], out_channels[3],\n kernel_size=1, stride=1, act=True)\n self.avgpool = nn.AdaptiveAvgPool2d((1, 1))\n self.linear = nn.Linear(out_channels[3], num_classes)\n\n def _make_layer(self, out_channels, num_blocks):\n layers = [DownBlock(self.in_channels, out_channels)]\n for i in range(num_blocks):\n layers.append(BasicBlock(out_channels))\n self.in_channels = out_channels\n return nn.Sequential(*layers)\n\n def forward(self, x):\n out = self.conv1(x)\n out = self.maxpool(out)\n\n out = self.layer1(out)\n out = self.layer2(out)\n out = self.layer3(out)\n out = self.conv2(out)\n\n out = self.avgpool(out)\n #out = torch.flatten(out, 1)\n out = out.view(out.size(0), -1)\n out = self.linear(out)\n return out\n\n\nclass ShuffleNetV2_CIFAR(nn.Module):\n def __init__(self, out_channels, num_blocks, num_classes=10):\n super(ShuffleNetV2_CIFAR, self).__init__()\n self.in_channels = 24\n\n self.conv1 = ConvBNReLU(3, self.in_channels, kernel_size=3, stride=1, act=True)\n \n self.layer1 = self._make_layer(out_channels[0], num_blocks[0])\n self.layer2 = self._make_layer(out_channels[1], num_blocks[1])\n self.layer3 = self._make_layer(out_channels[2], num_blocks[2])\n self.conv2 = ConvBNReLU(out_channels[2], out_channels[3],\n kernel_size=1, stride=1, act=True)\n self.avgpool = nn.AdaptiveAvgPool2d((1, 1))\n self.linear = nn.Linear(out_channels[3], num_classes)\n\n def _make_layer(self, out_channels, num_blocks):\n if self.in_channels == 24:\n layers = [DownBlock(self.in_channels, out_channels, stride=1)]\n else:\n layers = [DownBlock(self.in_channels, out_channels)]\n for i in range(num_blocks):\n layers.append(BasicBlock(out_channels))\n self.in_channels = out_channels\n return nn.Sequential(*layers)\n\n def forward(self, x):\n out = self.conv1(x)\n\n out = self.layer1(out)\n out = self.layer2(out)\n out = self.layer3(out)\n out = self.conv2(out)\n\n out = self.avgpool(out)\n #out = torch.flatten(out, 1)\n out = out.view(out.size(0), -1)\n out = self.linear(out)\n return out\n\n\nmodel_cfgs = {\n 0.5: {'out_channels': (48, 96, 192, 1024),\n 'num_blocks': (3, 7, 3)},\n 1: {'out_channels': (116, 232, 464, 1024),\n 'num_blocks': (3, 7, 3)},\n 1.5: {'out_channels': (176, 352, 704, 1024),\n 'num_blocks': (3, 7, 3)},\n 2: {'out_channels': (224, 488, 976, 2048),\n 'num_blocks': (3, 7, 3)}\n}\n\n\ndef set_model(cfg):\n r\"\"\"\n Args:\n cfg: configuration\n \"\"\"\n # set model configurations\n assert cfg.width_mult in model_cfgs.keys(), \"The width multiplier for ShuffleNetV2 should be 0.5, 1, 1.5, or 2.\"\n \n if cfg.dataset in ['cifar10', 'cifar100']:\n out_channels = model_cfgs[cfg.width_mult]['out_channels']\n num_blocks = model_cfgs[cfg.width_mult]['num_blocks']\n image_size = 32\n num_classes = int(cfg.dataset[5:])\n model = ShuffleNetV2_CIFAR(out_channels, num_blocks, num_classes)\n\n elif cfg.dataset in ['imagenet']:\n out_channels = model_cfgs[cfg.width_mult]['out_channels']\n num_blocks = model_cfgs[cfg.width_mult]['num_blocks']\n image_size = 224\n num_classes = 1000\n model = ShuffleNetV2(out_channels, num_blocks, num_classes)\n\n else:\n raise Exception('Undefined dataset for ShuffleNetV2 architecture.')\n\n return model, image_size"
] | [
[
"torch.nn.Sequential",
"torch.nn.init.constant_",
"torch.nn.Conv2d",
"torch.flatten",
"torch.nn.Linear",
"torch.nn.AdaptiveAvgPool2d",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU"
],
[
"torch.nn.Sequential",
"torch.nn.Conv2d",
"torch.nn.MaxPool2d",
"torch.nn.Linear",
"torch.nn.AdaptiveAvgPool2d",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
myutman/contracode | [
"f2a589e1efd2788874fd0468d1ecc30d6a14c396"
] | [
"scripts/hf_per_train_shard_tokenize.py"
] | [
"import sys\nimport numpy as np\nimport pandas as pd\nimport multiprocessing as mp\nfrom transformers import BertTokenizerFast\nfrom tqdm import tqdm\n\nif __name__ == \"__main__\":\n assert len(sys.argv) == 2\n data_shard_idx = int(sys.argv[1])\n data_shard_path = f\"/data/ajay/contracode/data/hf_data/train_chunks/augmented_pretrain_df.{data_shard_idx:04d}.train.pickle.gz\"\n data_shard_path_out = (\n f\"/data/ajay/contracode/data/hf_data/train_chunks_tokenized/augmented_pretrain_tokenized_df.{data_shard_idx:04d}.train.pickle.gz\"\n )\n\n def load_tokenizer(path=\"data/vocab/8k_bpe/8k_bpe-vocab.txt\"):\n return BertTokenizerFast(path, clean_text=True, lowercase=False, strip_accents=True, unk_token=\"<unk>\")\n\n def load_data(path):\n return pd.read_pickle(path)\n\n tokenizer = load_tokenizer()\n df_shard = load_data(data_shard_path)\n tqdm.pandas()\n df_shard[\"toks\"] = df_shard[\"text\"].progress_apply(lambda x: np.asarray(tokenizer.encode(x)))\n df_shard = df_shard[[\"data_idx\", \"toks\"]]\n\n df_shard.to_pickle(data_shard_path_out)\n"
] | [
[
"pandas.read_pickle"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
penguinflys/imgviz | [
"3deadced1fcce8ca51716c705d07a058b1839514"
] | [
"examples/resize.py"
] | [
"#!/usr/bin/env python\n\nimport matplotlib.pyplot as plt\n\nimport imgviz\n\n\ndef resize():\n data = imgviz.data.arc2017()\n\n rgb = data[\"rgb\"]\n\n H, W = rgb.shape[:2]\n rgb_resized = imgviz.resize(rgb, height=0.1)\n\n # -------------------------------------------------------------------------\n\n plt.figure(dpi=200)\n\n plt.subplot(121)\n plt.title(\"rgb:\\n{}\".format(rgb.shape))\n plt.imshow(rgb)\n plt.axis(\"off\")\n\n plt.subplot(122)\n plt.title(\"rgb_resized:\\n{}\".format(rgb_resized.shape))\n plt.imshow(rgb_resized)\n plt.axis(\"off\")\n\n img = imgviz.io.pyplot_to_numpy()\n plt.close()\n\n return img\n\n\nif __name__ == \"__main__\":\n from base import run_example\n\n run_example(resize)\n"
] | [
[
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.close",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Nathaniel-Rodriguez/neuralnetsim | [
"c353af92fb3f44539370220963b07bdfd9822149"
] | [
"src/neuralnetsim/simulation.py"
] | [
"__all__ = [\"simulate_model\",\n \"simulate_grid\",\n \"simulate_orig\"]\n\n\nimport neuralnetsim\nimport networkx as nx\nimport numpy as np\nfrom distributed import Client\nfrom pathlib import Path\nfrom typing import Type\nfrom typing import Dict\nfrom typing import Any\nfrom typing import List\nfrom typing import Union\n\n\ndef simulation_worker(\n graph: nx.DiGraph,\n rng: np.random.RandomState,\n x0: np.ndarray,\n parameter_path: Path,\n circuit_type: Union[Type[neuralnetsim.DistributionCircuit],\n Type[neuralnetsim.NeuralCircuit]],\n duration: float,\n kernel_parameters: Dict\n) -> Dict[int, np.ndarray]:\n \"\"\"\n\n\n :param x0:\n :param parameter_path:\n :param circuit_type:\n :param graph:\n :param rng:\n :param duration:\n :param kernel_parameters:\n :return:\n \"\"\"\n circuit_parameters = neuralnetsim.load(parameter_path)\n circuit_parameters.network = graph\n circuit_parameters.from_optimizer(x0)\n with neuralnetsim.CircuitManager(circuit_type, kernel_parameters,\n circuit_parameters, rng) as circuit:\n circuit.run(duration)\n return circuit.get_spike_trains()\n\n\ndef simulate_model(\n x0,\n parameter_path: Path,\n fitted_graph_path: Path,\n name: str,\n client: Client,\n duration: float,\n seed: int,\n circuit_type: Type,\n save_path: Path,\n kernel_parameters: Dict[str, Any] = None,\n):\n \"\"\"\n Data in list is matched to the order of the graphs in the fitted graph\n result file.\n\n :param x0:\n :param parameter_path:\n :param fitted_graph_path:\n :param name:\n :param client:\n :param duration:\n :param seed:\n :param circuit_type:\n :param save_path:\n :param kernel_parameters:\n :return:\n \"\"\"\n if kernel_parameters is None:\n kernel_parameters = {}\n fitted_graph_results = neuralnetsim.load(fitted_graph_path)\n rng = np.random.RandomState(seed)\n sims = client.map(\n simulation_worker,\n [graph for graph in fitted_graph_results['graphs']],\n [np.random.RandomState(rng.randint(1, 2**31))\n for _ in range(len(fitted_graph_results['graphs']))],\n pure=False,\n x0=x0,\n parameter_path=parameter_path,\n circuit_type=circuit_type,\n duration=duration,\n kernel_parameters=kernel_parameters\n )\n data = client.gather(sims)\n neuralnetsim.save(\n {\n 'spike_data': data,\n 'seed': seed,\n 'name': name,\n 'duration': duration,\n 'kernel_parameters': kernel_parameters\n },\n save_path\n )\n\n\ndef grid_worker(\n graph: nx.DiGraph,\n rng: np.random.RandomState,\n par: float,\n x0: np.ndarray,\n par_key: str,\n parameter_path: Path,\n circuit_type: Union[Type[neuralnetsim.DistributionCircuit],\n Type[neuralnetsim.NeuralCircuit]],\n duration: float,\n kernel_parameters: Dict\n) -> Dict[int, np.ndarray]:\n \"\"\"\n\n :param graph:\n :param rng:\n :param par:\n :param x0:\n :param par_key:\n :param parameter_path:\n :param circuit_type:\n :param duration:\n :param kernel_parameters:\n :return:\n \"\"\"\n kernel_parameters.update({'grng_seed': rng.randint(1, 2e5),\n **kernel_parameters})\n circuit_parameters = neuralnetsim.load(parameter_path)\n circuit_parameters.network = graph\n circuit_parameters.from_optimizer(x0)\n circuit_parameters.set_par(par_key, par)\n with neuralnetsim.CircuitManager(circuit_type, kernel_parameters,\n circuit_parameters, rng) as circuit:\n circuit.run(duration)\n # if not circuit.run(duration,\n # memory_guard={\n # 'duration': 1000.0,\n # 'max_spikes': 8000 # ~10 spikes/ms\n # }):\n # return {node: np.ndarray([])\n # for node in circuit_parameters.network.nodes()}\n return circuit.get_spike_trains()\n\n\ndef simulate_grid(\n x0,\n par_range: Union[List[float], np.ndarray],\n par_key: str,\n parameter_path: Path,\n fitted_graph_path: Path,\n name: str,\n client: Client,\n duration: float,\n seed: int,\n circuit_type: Type,\n save_path: Path,\n kernel_parameters: Dict[str, Any] = None,\n):\n \"\"\"\n\n :param x0:\n :param par_range:\n :param par_key:\n :param parameter_path:\n :param fitted_graph_path:\n :param name:\n :param client:\n :param duration:\n :param seed:\n :param circuit_type:\n :param save_path:\n :param kernel_parameters:\n :return:\n \"\"\"\n if kernel_parameters is None:\n kernel_parameters = {}\n fitted_graph_results = neuralnetsim.load(fitted_graph_path)\n rng = np.random.RandomState(seed)\n num_graphs = range(len(fitted_graph_results['graphs']))\n sims = client.map(\n grid_worker,\n [graph\n for _ in par_range\n for graph in fitted_graph_results['graphs']],\n [np.random.RandomState(rng.randint(1, 2**31))\n for _ in par_range\n for _ in num_graphs],\n [par for par in par_range\n for _ in num_graphs],\n pure=False,\n x0=x0,\n par_key=par_key,\n parameter_path=parameter_path,\n circuit_type=circuit_type,\n duration=duration,\n kernel_parameters=kernel_parameters\n )\n data = client.gather(sims)\n neuralnetsim.save(\n {\n 'spike_data': data,\n 'original_graph': fitted_graph_results['original'],\n 'graphs': [graph for _ in par_range\n for graph in fitted_graph_results['graphs']],\n 'target_modularities':\n [mu for _ in par_range\n for mu in fitted_graph_results['target_modularities']],\n 'grid_par': [par for par in par_range for _ in num_graphs],\n 'par_key': par_key,\n 'seed': seed,\n 'name': name,\n 'duration': duration,\n 'kernel_parameters': kernel_parameters\n },\n save_path\n )\n\n\ndef orig_worker(\n rng: np.random.RandomState,\n par: float,\n graph: nx.DiGraph,\n x0: np.ndarray,\n par_key: str,\n parameter_path: Path,\n circuit_type: Union[Type[neuralnetsim.DistributionCircuit],\n Type[neuralnetsim.NeuralCircuit]],\n duration: float,\n kernel_parameters: Dict\n):\n kernel_parameters.update({'grng_seed': rng.randint(1, 2e5),\n **kernel_parameters})\n circuit_parameters = neuralnetsim.load(parameter_path)\n circuit_parameters.network = graph\n circuit_parameters.from_optimizer(x0)\n circuit_parameters.set_par(par_key, par)\n with neuralnetsim.CircuitManager(circuit_type, kernel_parameters,\n circuit_parameters, rng) as circuit:\n if not circuit.run(duration,\n memory_guard={\n 'duration': 1000.0,\n 'max_spikes': 8000 # ~10 spikes/ms\n }):\n return {node: np.ndarray([])\n for node in circuit_parameters.network.nodes()}\n return circuit.get_spike_trains()\n\n\ndef simulate_orig(\n x0,\n par_range: Union[List[float], np.ndarray],\n par_key: str,\n parameter_path: Path,\n orig_graph_path: Path,\n n_trials: int,\n client: Client,\n duration: float,\n seed: int,\n circuit_type: Type,\n save_path: Path,\n kernel_parameters: Dict[str, Any] = None,\n):\n \"\"\"\n\n :param x0:\n :param par_range:\n :param par_key:\n :param parameter_path:\n :param fitted_graph_path:\n :param name:\n :param client:\n :param duration:\n :param seed:\n :param circuit_type:\n :param save_path:\n :param kernel_parameters:\n :return:\n \"\"\"\n if kernel_parameters is None:\n kernel_parameters = {}\n graph = neuralnetsim.load(orig_graph_path)\n rng = np.random.RandomState(seed)\n sims = client.map(\n orig_worker,\n [np.random.RandomState(rng.randint(1, 2**31))\n for _ in par_range\n for _ in range(n_trials)],\n [par for par in par_range\n for _ in range(n_trials)],\n pure=False,\n x0=x0,\n graph=graph,\n par_key=par_key,\n parameter_path=parameter_path,\n circuit_type=circuit_type,\n duration=duration,\n kernel_parameters=kernel_parameters\n )\n data = client.gather(sims)\n neuralnetsim.save(\n {\n 'spike_data': data,\n 'control_var': [par for par in par_range for _ in range(n_trials)],\n 'control_key': par_key,\n 'seed': seed,\n 'duration': duration\n },\n save_path\n )\n"
] | [
[
"numpy.random.RandomState",
"numpy.ndarray"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
geneti/courseworkproj | [
"5843cc14c2ce01172420befca5d2683f1123096a",
"5843cc14c2ce01172420befca5d2683f1123096a"
] | [
"pattern_recognition/code/DataSplit.py",
"distributed_system/code/Mesh_node.py"
] | [
"import pandas as pd\nimport numpy as np\nimport math\nimport matplotlib.pyplot as plt\nimport copy\n\nraw_data = pd.read_csv('./raw_data.csv', header = 0, index_col = 0)\nsample_num = raw_data.shape[0]\n\n# sort features by nominal or non-nominal \ndtypes = {}\nfor j in range(raw_data.shape[1]):\n if isinstance(raw_data.iloc[0,j], str) or pd.isna(raw_data.iloc[0,j]):\n dtypes[raw_data.columns[j]] = str\n else:\n dtypes[raw_data.columns[j]] = np.float64\n\ndata = pd.read_csv('./raw_data.csv',sep = ',', header = 0, index_col = 0, dtype = dtypes)\n\n# separate the housing prices into several zones\ndata['PriceLevel'] = 'level'\n\nfor i in range(sample_num):\n if data.iloc[i,79] <= 135000:\n data.iloc[i,80] = 'level_1'\n elif data.iloc[i,79] <= 165000:\n data.iloc[i,80] = 'level_2'\n elif data.iloc[i,79] <= 200000:\n data.iloc[i,80] = 'level_3'\n else:\n data.iloc[i,80] = 'level_4'\ndata = data.drop(columns = 'SalePrice')\n\n#shuffle the data\ndata = data.sample(frac=1).reset_index(drop=True)\nprint('data: ',data)\n\n\ntmp = sample_num*9/10\nprint(data.shape)\ntrain = data.iloc[0:int(tmp),:]\ntest = data.iloc[int(tmp)+1:sample_num,:]\n\ntrain.to_csv('./train.csv')\ntest.to_csv('./test.csv')",
"import pandas as pd\nimport numpy as np\nimport sys\nimport matplotlib.pyplot as plt\nimport math\n\nimport Gaussian_2d, NodeDistribution\n\nclass Node(object):\n\tdef __init__(self, gateway_prob, x, y, index):\n\t\t# node index 1:Num\n\t\tself.index = index\n\t\t# Location info\n\t\tself.x_pos = x\n\t\tself.y_pos = y\n\t\t# transmit power\n\t\tself.pt = 1\n\t\t# antenna gain, dB unit\n\t\tself.gain = 1\n\t\t# node height\n\t\tself.height = 1\n\t\t# Carrier sense threshold\n\t\tself.CS_th = 1\n\t\t# Receiver threshold\n\t\tself.Rx_th = 1\n\t\t# judge the node is or not a gateway\n\t\tself.is_gateway = np.random.binomial(1, gateway_prob)\n\t\t# out_neighbours mean those nodes can be covered by signals sent by current node\n\t\t# in_neighbours mean current node can receive signal from other nodes\n\t\t# elements are index not instance\n\t\tself.out_neighbours = []\n\t\tself.in_neighbours = []\n\t\t# min hop count\n\t\tself.min_hop_count = 1000\n\n"
] | [
[
"pandas.isna",
"pandas.read_csv"
],
[
"numpy.random.binomial"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
GyroscopeHQ/keras-rl | [
"35f9b50c3b35f52722d740e8ee42e33c1750e44a"
] | [
"rl/policy.py"
] | [
"from __future__ import division\nimport numpy as np\n\nfrom rl.util import *\n\n\nclass Policy(object):\n def _set_agent(self, agent):\n self.agent = agent\n\n @property\n def metrics_names(self):\n return []\n\n @property\n def metrics(self):\n return []\n\n def select_action(self, **kwargs):\n raise NotImplementedError()\n\n def get_config(self):\n return {}\n\n\nclass LinearAnnealedPolicy(Policy):\n def __init__(self, inner_policy, attr, value_max, value_min, value_test, nb_steps):\n if not hasattr(inner_policy, attr):\n raise ValueError('Policy \"{}\" does not have attribute \"{}\".'.format(attr))\n\n super(LinearAnnealedPolicy, self).__init__()\n\n self.inner_policy = inner_policy\n self.attr = attr\n self.value_max = value_max\n self.value_min = value_min\n self.value_test = value_test\n self.nb_steps = nb_steps\n\n def get_current_value(self):\n if self.agent.training:\n # Linear annealed: f(x) = ax + b.\n a = -float(self.value_max - self.value_min) / float(self.nb_steps)\n b = float(self.value_max)\n value = max(self.value_min, a * float(self.agent.step) + b)\n else:\n value = self.value_test\n return value\n\n def select_action(self, **kwargs):\n setattr(self.inner_policy, self.attr, self.get_current_value())\n return self.inner_policy.select_action(**kwargs)\n\n @property\n def metrics_names(self):\n return ['mean_{}'.format(self.attr)]\n\n @property\n def metrics(self):\n return [getattr(self.inner_policy, self.attr)]\n\n def get_config(self):\n config = super(LinearAnnealedPolicy, self).get_config()\n config['attr'] = self.attr\n config['value_max'] = self.value_max\n config['value_min'] = self.value_min\n config['value_test'] = self.value_test\n config['nb_steps'] = self.nb_steps\n config['inner_policy'] = get_object_config(self.inner_policy)\n return config\n\n\nclass EpsGreedyQPolicy(Policy):\n def __init__(self, eps=.1):\n super(EpsGreedyQPolicy, self).__init__()\n self.eps = eps\n\n def select_action(self, q_values):\n assert q_values.ndim == 1\n nb_actions = q_values.shape[0]\n\n if np.random.uniform() < self.eps:\n action = np.random.random_integers(0, nb_actions-1)\n else:\n action = np.argmax(q_values)\n return action\n\n def get_config(self):\n config = super(EpsGreedyQPolicy, self).get_config()\n config['eps'] = self.eps\n return config\n\n\nclass GreedyQPolicy(Policy):\n def select_action(self, q_values):\n assert q_values.ndim == 1\n action = np.argmax(q_values)\n return action\n\n\nclass BoltzmannQPolicy(Policy):\n def __init__(self, tau=1., clip=(-500., 500.)):\n super(BoltzmannQPolicy, self).__init__()\n self.tau = tau\n self.clip = clip\n\n def select_action(self, q_values):\n assert q_values.ndim == 1\n q_values = q_values.astype('float64')\n nb_actions = q_values.shape[0]\n\n exp_values = np.exp(np.clip(q_values / self.tau, self.clip[0], self.clip[1]))\n probs = exp_values / np.sum(exp_values)\n action = np.random.choice(range(nb_actions), p=probs)\n return action\n\n def get_config(self):\n config = super(BoltzmannQPolicy, self).get_config()\n config['tau'] = self.tau\n config['clip'] = self.clip\n return config\n"
] | [
[
"numpy.clip",
"numpy.argmax",
"numpy.random.random_integers",
"numpy.random.uniform",
"numpy.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
efthymis-mcl/algomorphism | [
"69a41e98e10458ac333da1350fc39da8a00b80d3"
] | [
"algomorphism/datasets/graph_base.py"
] | [
"from typing import List\nimport numpy as np\n\n\nclass GraphBaseDataset(object):\n def __int__(self):\n pass\n\n @staticmethod\n def numpy_to_mega_batch(x_list, a_list):\n \"\"\"\n List of numpy arrays to mega batch array.\n\n Args:\n x_list (`list[np.ndarray]`): feature matrixes.\n a_list (`list[np.ndarray]`): adjency matrixes.\n Returns:\n `tuple[np.ndarray, np.ndarray]`: batched x, a lists\n Examples:\n\n >>> graph_base = GraphBaseDataset()\n >>> x_list = [np.random.rand(6,4) for _ in range(6)]+[np.random.rand(3,4) for _ in range(6)]\n >>> a_list = [np.random.rand(6,6) for _ in range(6)]+[np.random.rand(3,3) for _ in range(6)]\n >>> x, a = graph_base.numpy_to_mega_batch(x,a)\n >>> print(a.shape)\n (12, 6, 6)\n >>> print(x.shape)\n (12, 6, 4)\n \"\"\"\n\n def a_post_concat(a):\n a_con = np.concatenate([a, np.zeros((a.shape[0], max_d - a.shape[1]))], axis=1)\n a_con = np.concatenate([a_con, np.zeros((max_d - a_con.shape[0], a_con.shape[1]))], axis=0)\n return a_con\n\n def x_post_concat(x):\n x_con = np.concatenate([x, np.zeros((max_d - x.shape[0], x.shape[1]))], axis=0)\n return x_con\n\n max_d = max([a.shape[0] for a in a_list])\n mega_batch_a = []\n mega_batch_x = []\n for (x, a) in zip(x_list, a_list):\n if a.shape[0] < max_d:\n a = a_post_concat(a)\n x = x_post_concat(x)\n mega_batch_a.append(a)\n mega_batch_x.append(x)\n mega_batch_a = np.array(mega_batch_a)\n mega_batch_x = np.stack(mega_batch_x, axis=0)\n\n return mega_batch_x, mega_batch_a\n\n @staticmethod\n def numpy_to_disjoint(x_list, a_list):\n \"\"\"\n Args:\n x_list (`List[np.ndarray]`): feature matrixes,\n a_list (`List[np.ndarray]`): adajence matrixes.\n\n Returns:\n `tuple[np.ndarray, np.ndarray]`: disjoint matrixes of x_list, a_list.\n\n Examples:\n >>> x_list = [np.random.rand(6,4) for _ in range(6)]+[np.random.rand(3,4) for _ in range(6)]\n >>> a_list = [np.random.rand(6,6) for _ in range(6)]+[np.random.rand(3,3) for _ in range(6)]\n >>> gbd = GraphBaseDataset()\n >>> x, a = gbd.numpy_to_disjoint(x_list,a_list)\n >>> print(a.shape)\n (54, 54)\n >>> print(x.shape)\n (54, 48)\n \"\"\"\n def zero_padding_concat(x, x_disjoint, nx, ndx):\n x_disjoint = np.concatenate([x_disjoint, np.zeros((x_disjoint.shape[0], nx))], axis=1)\n x = np.concatenate([np.zeros((x.shape[0], ndx)), x], axis=1)\n x_disjoint = np.concatenate([x_disjoint, x], axis=0)\n return x_disjoint\n\n a_disjoint = a_list[0]\n x_disjoint = x_list[0]\n for a, x in zip(a_list[1:], x_list[1:]):\n na = a.shape[1]\n nda = a_disjoint.shape[1]\n nx = x.shape[1]\n ndx = x_disjoint.shape[1]\n a_disjoint = zero_padding_concat(a, a_disjoint, na, nda)\n x_disjoint = zero_padding_concat(x, x_disjoint, nx, ndx)\n\n return x_disjoint, a_disjoint\n\n @staticmethod\n def renormalization(a):\n \"\"\"\n Give an adjacency matrix and returns the renormalized.\n\n Args:\n a: A ndarray, adjacency matrix.\n\n Returns:\n atld: A ndarray, renormalized adjacency matrix.\n\n Examples:\n >>> grapbase = GraphBaseDataset()\n >>> a = np.array([[[0,1,1], [1,0,0], [1,0,0]]])\n >>> atld = grapbase.renormalization(a)\n >>> print(atld)\n [[[0.33333333 0.40824829 0.40824829]\n [0.40824829 0.5 0. ]\n [0.40824829 0. 0.5 ]]]\n\n References:\n Thomas N. Kipf, Max Welling. Semi-supervised classification with graph convolutional networks,\n https://arxiv.org/pdf/1609.02907.pdf\n \"\"\"\n\n ai = a + np.eye(a.shape[-1])\n degree = np.sum(ai, axis=-1)\n degree = np.eye(a.shape[-1]) * degree\n degree_inv = np.linalg.inv(degree)\n degree_inv = np.power(degree_inv, 0.5)\n\n atld = np.matmul(degree_inv, ai)\n atld = np.matmul(atld, degree_inv)\n return atld\n\n\n"
] | [
[
"numpy.power",
"numpy.linalg.inv",
"numpy.eye",
"numpy.matmul",
"numpy.stack",
"numpy.concatenate",
"numpy.array",
"numpy.zeros",
"numpy.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
goncalovalverde/seshat | [
"deff5cdd985f81ac2b4ebd077eea11f7c4f4118f"
] | [
"reader/gitlab.py"
] | [
"import gitlab\nimport dateutil.parser\nimport reader.cache\nimport hashlib\nimport logging\nfrom pandas import DataFrame, NaT\nfrom datetime import datetime\n\n\nclass Gitlab:\n def __init__(self, gitlab_config: dict, workflow: dict):\n self.gitlab_config = gitlab_config\n self.workflow = workflow\n\n def cache_name(self):\n token = self.gitlab_config[\"token\"]\n workflow = str(self.workflow)\n url = self.gitlab_config[\"url\"]\n project_id = (\n self.gitlab_config.get(\"project_id\")\n if self.gitlab_config.get(\"project_id\")\n else self.gitlab_config.get(\"group_id\")\n )\n name_hashed = hashlib.md5(\n (token + url + workflow + str(project_id)).encode(\"utf-8\")\n )\n return name_hashed.hexdigest()\n\n self.cache = reader.cache.Cache(cache_name(self))\n\n def get_gitlab_instance(self):\n gl = gitlab.Gitlab(\n self.gitlab_config[\"url\"], private_token=self.gitlab_config[\"token\"]\n )\n gl.auth()\n\n return gl\n\n def get_issue_data(self, issue):\n issue_data = {\n \"Key\": issue.id,\n \"Type\": \"issue\",\n \"Creator\": issue.author[\"name\"],\n \"Created\": dateutil.parser.parse(issue.created_at).replace(tzinfo=None),\n \"Done\": (\n dateutil.parser.parse(issue.created_at).replace(tzinfo=None)\n if issue.created_at\n else NaT\n ),\n }\n return issue_data\n\n def get_issues(self):\n gl = self.get_gitlab_instance()\n\n if self.gitlab_config.get(\"project_id\"):\n project = gl.projects.get(self.gitlab_config[\"project_id\"])\n issues = project.issues.list()\n\n elif self.gitlab_config.get(\"group_id\"):\n group = gl.groups.get(self.gitlab_config[\"group_id\"])\n issues = group.issues.list()\n\n else:\n raise Exception(\"No valid project_id or group_id found!\")\n\n return issues\n\n def get_data(self) -> DataFrame:\n\n if self.gitlab_config[\"cache\"] and self.cache.is_valid():\n logging.debug(\"Getting gitlab data from cache\")\n df_issue_data = self.cache.read()\n return df_issue_data\n\n issues = self.get_issues()\n\n # issue_data = {\"Key\": [], \"Type\": [], \"Creator\": [], \"Created\": [], \"Done\": []}\n issues_data = [self.get_issue_data(issue) for issue in issues]\n\n df_issues_data = DataFrame(issues_data)\n\n if self.gitlab_config[\"cache\"]:\n logging.debug(\"Storing gitlab issue data in cache\")\n self.cache.write(df_issues_data)\n\n return df_issues_data\n\n def refresh_data(self, date: datetime) -> DataFrame:\n if self.gitlab_config[\"cache\"] and self.cache.is_valid():\n self.cache.clean()\n\n return self.get_data()\n"
] | [
[
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
ahkarimi/MMTOD | [
"d8160f643a0ee1943630b45fa094617dd2237c7e"
] | [
"main.py"
] | [
"from flask import Flask, request, jsonify, render_template, session\nimport os\nimport pickle\nimport datetime\nimport time\nimport pandas as pd\nimport numpy as np\nimport random\nimport logging\n\n##__________________________________ GPT-3 code __________________________________________##\nfrom colorama import Fore, Back, Style\nimport torch\nfrom transformers import GPT2Tokenizer, GPT2LMHeadModel, OpenAIGPTLMHeadModel, OpenAIGPTTokenizer\nimport sys, os\nimport pprint\nimport numpy as np\nimport torch\nfrom image_handler import Handler\n\nimg_handler_obj = Handler()\n# args = ArgsParser().parse()\n\n# device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n# n_gpu = 0 if args.no_cuda else torch.cuda.device_count()\n\npp = pprint.PrettyPrinter(indent=4)\nprev_beliefs = {}\ndomain_queue = []\n\n# sys.stdout.flush()\n\nmodel_checkpoint = \"./output/checkpoint-108420\"\n\ndecoding = \"DECODING METHOD HERE\"\n\n## if decoding == 'nucleus':\n## TOP_P = float(sys.argv[3])\n\ndelay = 0.5\n\n## multiwoz_db = MultiWozDB()\n\nprint('\\nLoading Model', end=\"\")\n\nif 'openai' in model_checkpoint:\n tokenizer = OpenAIGPTTokenizer.from_pretrained(model_checkpoint)\n model = OpenAIGPTLMHeadModel.from_pretrained(model_checkpoint)\nelse:\n tokenizer = GPT2Tokenizer.from_pretrained(model_checkpoint)\n model = GPT2LMHeadModel.from_pretrained(model_checkpoint)\n\n# model.load_state_dict(torch.load(model_checkpoint))\nmodel.eval()\nmodel.to('cpu')\n\nbreak_tokens = tokenizer.encode(tokenizer.eos_token) + tokenizer.encode('?') + tokenizer.encode('!')\n# break_tokens = tokenizer.encode(tokenizer.eos_token)\nMAX_LEN = model.config.n_ctx\n\nif 'openai-gpt' in model_checkpoint:\n tokenizer.add_special_tokens({'bos_token': '<|endoftext|>'})\n tokenizer.add_special_tokens({'eos_token': '<|endoftext|>'})\n\nsample = 1\n#print()\n#print('\\n What would you like to ask?')\n# history = []\ncontext = ''\ninput_text = ''\nturn = 0\n\n\n# dbmatch = 0\n\ndef get_belief_new_dbsearch(sent):\n if '<|belief|>' in sent:\n tmp = sent.strip(' ').split('<|belief|>')[-1].split('<|endofbelief|>')[0]\n # elif 'belief.' in sent:\n # tmp = sent.strip(' ').split('<belief>')[-1].split('<action>')[0]\n # elif 'belief' not in sent:\n # return []\n else:\n return []\n # else:\n # raise TypeError('unknown belief separator')\n tmp = tmp.strip(' .,')\n # assert tmp.endswith('<endofbelief>')\n tmp = tmp.replace('<|endofbelief|>', '')\n tmp = tmp.replace('<|endoftext|>', '')\n belief = tmp.split(',')\n new_belief = []\n for bs in belief:\n bs = bs.strip(' .,')\n if bs not in new_belief:\n new_belief.append(bs)\n return new_belief\n\n\ndef convert_belief(belief):\n dic = {}\n for bs in belief:\n if bs in [' ', '']:\n continue\n domain = bs.split(' ')[0]\n slot = bs.split(' ')[1]\n if slot == 'book':\n slot = ' '.join(bs.split(' ')[1:3])\n value = ' '.join(bs.split(' ')[3:])\n else:\n value = ' '.join(bs.split(' ')[2:])\n if domain not in dic:\n dic[domain] = {}\n try:\n dic[domain][slot] = value\n except:\n print(domain)\n print(slot)\n return dic\n\ndef get_turn_domain(beliefs, q):\n for k in beliefs.keys():\n if k not in q:\n q.append(k)\n turn_domain = k\n return turn_domain\n return q[-1]\n\n\n\n\n\ndef get_action_new(sent):\n if '<|action|>' not in sent:\n return []\n elif '<|belief|>' in sent:\n tmp = sent.split('<|belief|>')[-1].split('<|response|>')[0].split('<|action|>')[-1].strip()\n elif '<|action|>' in sent:\n tmp = sent.split('<|response|>')[0].split('<|action|>')[-1].strip()\n else:\n return []\n tmp = tmp.strip(' .,')\n # if not tmp.endswith('<endofaction>'):\n # ipdb.set_trace()\n tmp = tmp.replace('<|endofaction|>', '')\n tmp = tmp.replace('<|endoftext|>', '')\n action = tmp.split(',')\n new_action = []\n for act in action:\n if act == '':\n continue\n act = act.strip(' .,')\n if act not in new_action:\n new_action.append(act)\n return new_action\n\n\n\ndef get_response_new(sent, venuename):\n if '<|response|>' in sent:\n tmp = sent.split('<|belief|>')[-1].split('<|action|>')[-1].split('<|response|>')[-1]\n else:\n return ''\n # if '<belief>' in sent:\n # tmp = sent.split('<belief>')[-1].split('<action>')[-1].split('<response>')[-1]\n # elif '<action>' in sent:\n # tmp = sent.split('<action>')[-1].split('<response>')[-1]\n # elif '<response>' in sent:\n # tmp = sent.split('<response>')[-1]\n # else:\n # tmp = sent\n tmp = tmp.strip(' .,')\n # assert tmp.endswith('<endofresponse>')\n tmp = tmp.replace('<|endofresponse|>', '')\n tmp = tmp.replace('<|endoftext|>', '')\n tokens = tokenizer.encode(tmp)\n new_tokens = []\n for tok in tokens:\n # if tok in break_tokens:\n if tok in tokenizer.encode(tokenizer.eos_token):\n continue\n new_tokens.append(tok)\n # ipdb.set_trace()\n response = tokenizer.decode(new_tokens).strip(' ,.')\n response = response.replace('[venuename]', '{}'.format(venuename))\n return response\n\n\ndef get_venuename(bs):\n name = ''\n if 'venuename' in bs[0]:\n tmp_list = bs[0].split('venuename')[-1].split(' ')\n #action = tmp_list[-1]\n name = ' '. join(tmp_list[:-1])\n return name\n\n\ndef get_open_span(bs):\n action_names = []\n for tmp in bs[0].split(';'):\n if 'open span' in tmp:\n action = tmp.split('open span')[-1].split(' ')[-1]\n name = tmp.split('open span')[-1].split(action)[0]\n action_names.append((name, action))\n return action_names\n\n\n##____________________________ End of GPT-3 code __________________________________________##\n\n\nlogging.basicConfig(level=logging.DEBUG)\n\napp = Flask(__name__)\napp.secret_key = 'MY_SECRET_KEY'\n\n\ndef label_Message(message):\n logging.warning('In label_Message')\n # load the model from disk\n model_filename = 'model/model.pkl'\n tfidf_filename = 'model/tfidf.pkl'\n \n model = pickle.load(open(model_filename, 'rb'))\n tfidf = pickle.load(open(tfidf_filename, 'rb'))\n \n pred = model.predict(tfidf.transform([message]))\n message_label = pred[0]\n \n \n logging.warning('Out label_Message')\n return message_label\n\ndef label_to_persian(label):\n res = ''\n if label == 'HAPPY':\n res = 'خوشحال'\n elif label == 'SAD':\n res = 'ناراحت'\n\n return\n\ndef Create_message(message):\n global context\n global turn\n logging.warning('In create message')\n global result\n label = session['label']\n state = session['state']\n result = session['result']\n result['response'] = ''\n result['status'] = 'on'\n result['has_image'] = 'False'\n\n raw_text = message\n input_text = raw_text.replace('you> ', '')\n if input_text in ['q', 'quit']:\n return \"Ok, bye. Just for now!\"\n\n user = '<|user|> {}'.format(input_text)\n context = context + ' ' + user\n text = '<|endoftext|> <|context|> {} <|endofcontext|>'.format(context)\n\n # print(context)\n\n text = text.strip()\n indexed_tokens = tokenizer.encode(text)\n\n if len(indexed_tokens) > MAX_LEN:\n indexed_tokens = indexed_tokens[-1 * MAX_LEN:]\n\n # Convert indexed tokens in a PyTorch tensor\n tokens_tensor = torch.tensor([indexed_tokens])\n\n # If you have a GPU, put everything on cuda\n tokens_tensor = tokens_tensor.to('cpu')\n predicted_index = indexed_tokens[-1]\n\n with torch.no_grad():\n # Greedy decoding\n\n while predicted_index not in break_tokens:\n outputs = model(tokens_tensor)\n predictions = outputs[0]\n predicted_index = torch.argmax(predictions[0, -1, :]).item()\n indexed_tokens += [predicted_index]\n tokens_tensor = torch.tensor([indexed_tokens]).to('cpu')\n if len(indexed_tokens) > MAX_LEN:\n break\n if tokenizer.decode(indexed_tokens).endswith('<|endofbelief|>'):\n break\n\n tmp_pred = tokenizer.decode(indexed_tokens)\n\n print('\\ntmp_pred:\\n', tmp_pred)\n\n belief_text = get_belief_new_dbsearch(tmp_pred)\n print('\\nbelief_text:\\n', belief_text)\n\n beliefs = convert_belief(belief_text)\n # domain = list(beliefs.keys())[0]\n domain = get_turn_domain(beliefs, domain_queue)\n\n # Convert indexed tokens in a PyTorch tensor\n tokens_tensor = torch.tensor([indexed_tokens])\n\n # If you have a GPU, put everything on cuda\n tokens_tensor = tokens_tensor.to('cpu')\n predicted_index = indexed_tokens[-1]\n\n truncate_action = False\n # Predict all tokens\n with torch.no_grad():\n while predicted_index not in break_tokens:\n outputs = model(tokens_tensor)\n predictions = outputs[0]\n predicted_index = torch.argmax(predictions[0, -1, :]).item()\n indexed_tokens += [predicted_index]\n if len(indexed_tokens) > MAX_LEN:\n break\n\n predicted_text = tokenizer.decode(indexed_tokens)\n if '<|action|>' in predicted_text:\n generated_actions = predicted_text.split('<|action|>')[-1].split('<|endofaction|>')[0].split(',')\n new_actions = []\n for a in generated_actions:\n if a in ['', ' ']:\n continue\n new_actions.append(a.strip())\n len_actions = len(new_actions)\n if len(list(set(new_actions))) > len(new_actions) or (len_actions > 10 and not truncate_action):\n # ipdb.set_trace()\n actions = '<|action|> {} <|endofaction|>'.format(' , '.join(list(set(new_actions))))\n indexed_tokens = tokenizer.encode('{} {}'.format(predicted_text.split('<|action|>')[0], actions))\n # print('action truncated')\n truncate_action = True\n tokens_tensor = torch.tensor([indexed_tokens]).to('cpu')\n\n predicted_text = tokenizer.decode(indexed_tokens)\n print('\\npredicted_text:\\n', predicted_text)\n\n action_text = get_action_new(predicted_text)\n print('\\naction_text:\\n', action_text)\n\n venuename = get_venuename(action_text)\n #print('\\nVenuename:\\n', venuename)\n\n response_text = get_response_new(predicted_text, venuename)\n print('\\nresponse_text:\\n', response_text)\n #print(predicted_text)\n\n\n\n open_spans = get_open_span(action_text)\n print('\\open_spans:\\n', open_spans)\n\n # handling images\n\n if venuename:\n result['has_image'] = 'True'\n images = img_handler_obj.get_imgs_url(query=venuename + \"in Singapore\", num_of_img=5)\n result['image'] = images[0]\n print(images)\n\n delex_system = '{}'.format(response_text)\n context = context + ' ' + delex_system\n\n turn += 1\n prev_beliefs = beliefs\n\n result['response'] = response_text\n session['result'] = result\n return result\n\n\n \[email protected]('/')\ndef index():\n session['state'] = 'start'\n session['label'] = ''\n session['result'] = {}\n return render_template('index2.html')\n\[email protected]('/send_message', methods=['POST'])\ndef send_message():\n message = request.form['message']\n response_text = Create_message(message)\n\n \n #print('\\nRESPONSE TEXT ', response_text)\n return jsonify(response_text)\n\n\n\n\n"
] | [
[
"torch.argmax",
"torch.no_grad",
"torch.tensor"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jssprz/attentive-visual-semantic-specialized-network-for-video-captioning | [
"00815884ba892c00db2d3778bd0083618ff6d2d7"
] | [
"test.py"
] | [
"import os\nimport argparse\nimport pickle\n\nfrom utils import decode_from_tokens\nfrom vocabulary import Vocabulary\nfrom configuration_file import ConfigurationFile\nfrom model.encoder import Encoder\nfrom model.decoder import AVSSNDecoder\n\nimport h5py\nimport torch\nimport numpy as np\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Generate captions por test samples')\n parser.add_argument('-chckpt', '--checkpoint_path', type=str, default='pretrain/chckpt.pt',\n help='Set the path to pre-trained model (default is pretrain/chckpt.pt).')\n parser.add_argument('-data', '--dataset_folder', type=str, default='data/MSVD',\n help='Set the path to dataset folder (default is data/MSVD).')\n parser.add_argument('-out', '--output_folder', type=str, default='results/MSVD',\n help='Set the path to output folder (default is results/MSVD).')\n\n args = parser.parse_args()\n\n # load vocabulary\n with open(os.path.join(args.dataset_folder, 'corpus.pkl'), \"rb\") as f:\n corpus = pickle.load(f)\n idx2word_dict = corpus[4]\n vocab = Vocabulary.from_idx2word_dict(idx2word_dict, False)\n print('Size of vocabulary: {}'.format(len(vocab)))\n\n # Pretrained Embedding\n pretrained_embedding = torch.Tensor(corpus[5])\n\n #max_frames = 20 #30\n cnn_feature_size = 2048\n c3d_feature_size = 4096\n i3d_feature_size = 400\n res_eco_features_size = 3584\n projected_size = 512\n hidden_size = 1024 # Number of hidden layer units of the cyclic network\n mid_size = 128 # The middle of the boundary detection layer represents the dimension\n\n n_tags = 300\n global_tagger_hidden_size = 1024\n specific_tagger_hidden_size = 128\n hidden_size = 1024\n embedding_size = 300 #1024\n rnn_in_size = 300 #1024\n rnn_hidden_size = 1024\n\n config = ConfigurationFile(os.path.join(args.dataset_folder, 'config.ini'), 'attn-vscn-max')\n\n # Models\n encoder = Encoder(cnn_feature_size=cnn_feature_size,\n c3d_feature_size=c3d_feature_size,\n i3d_feature_size=i3d_feature_size,\n n_tags=n_tags,\n hidden_size=hidden_size,\n global_tagger_hidden_size=global_tagger_hidden_size,\n specific_tagger_hidden_size=specific_tagger_hidden_size,\n n_layers=config.encoder_num_layers,\n input_dropout_p=config.encoder_dropout_p,\n rnn_dropout_p=config.encoder_dropout_p,\n bidirectional=config.encoder_bidirectional,\n rnn_cell=config.encoder_rnn_cell,\n device='cpu')\n\n decoder = AVSSNDecoder(in_seq_length=config.max_frames, \n out_seq_length=config.max_words,\n n_feats=res_eco_features_size + 512,\n n_tags=n_tags,\n embedding_size=embedding_size,\n pretrained_embedding=pretrained_embedding,\n hidden_size=hidden_size, \n rnn_in_size=rnn_in_size, \n rnn_hidden_size=rnn_hidden_size,\n vocab=vocab,\n device='cpu',\n rnn_cell=config.decoder_rnn_cell,\n encoder_num_layers=config.encoder_num_layers,\n encoder_bidirectional=config.encoder_bidirectional,\n num_layers=config.decoder_num_layers,\n dropout_p=config.decoder_dropout_p,\n beam_size=config.decoder_beam_size,\n temperature=config.decoder_temperature, \n train_sample_max=config.decoder_train_sample_max,\n test_sample_max=config.decoder_test_sample_max,\n beam_search_logic = config.decoder_beam_search_logic)\n\n # Checkpoint\n checkpoint = torch.load(args.checkpoint_path, map_location='cpu')\n\n # 1. filter out unnecessary keys for encoder\n chckpt_dict = {k: v for k, v in checkpoint['encoder'].items() if k not in ['fc1.weight', 'fc1.bias', 'fc2.weight', 'fc2.bias']}\n encoder_dict = encoder.state_dict()\n encoder_dict.update(chckpt_dict)\n\n encoder.load_state_dict(encoder_dict)\n decoder.load_state_dict(checkpoint['decoder'])\n\n #load test set features\n test_vidxs = sorted(list(set(corpus[2][1])))\n\n with h5py.File(os.path.join(args.dataset_folder, config.features_path), 'r') as feats_file:\n print('loading visual feats...')\n dataset = feats_file[config.dataset_name]\n cnn_feats = torch.from_numpy(dataset['cnn_features'][test_vidxs]).float()\n c3d_feats = torch.from_numpy(dataset['c3d_features'][test_vidxs]).float()\n cnn_globals = torch.zeros(cnn_feats.size(0), 512) # torch.from_numpy(dataset['cnn_globals'][test_vidxs]).float()\n cnn_sem_globals = torch.from_numpy(dataset['cnn_sem_globals'][test_vidxs]).float()\n f_counts = dataset['count_features'][test_vidxs]\n print('visual feats loaded')\n\n res_eco_globals = torch.from_numpy(np.load(os.path.join(args.dataset_folder, 'resnext_eco.npy'))[test_vidxs])\n tags_globals = torch.from_numpy(np.load(os.path.join(args.dataset_folder, 'tag_feats.npy'))[test_vidxs])\n\n encoder.eval()\n decoder.eval()\n\n with torch.no_grad():\n video_encoded = encoder(cnn_feats, c3d_feats, cnn_globals, tags_globals, res_eco_globals)\n logits, tokens = decoder(video_encoded, None, teacher_forcing_ratio=0)\n\n scores = logits.max(dim=2)[0].mean(dim=1)\n\n confidences, sentences = [], []\n for score, seq in zip(scores, tokens):\n s = decode_from_tokens(seq, vocab)\n print(score, s)\n sentences.append(s)\n confidences.append(score)\n\n if not os.path.exists(args.output_folder):\n os.makedirs(args.output_folder)\n\n with open(os.path.join(args.output_folder, 'predictions.txt'), 'w') as fo:\n for vidx, sentence in zip(test_vidxs, sentences):\n fo.write(f'{vidx}\\t{sentence}\\n')\n"
] | [
[
"torch.no_grad",
"torch.from_numpy",
"torch.Tensor",
"torch.load"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
dnidever/apogee | [
"83ad7496a0b4193df9e2c01b06dc36cb879ea6c1",
"83ad7496a0b4193df9e2c01b06dc36cb879ea6c1",
"83ad7496a0b4193df9e2c01b06dc36cb879ea6c1",
"83ad7496a0b4193df9e2c01b06dc36cb879ea6c1",
"83ad7496a0b4193df9e2c01b06dc36cb879ea6c1",
"83ad7496a0b4193df9e2c01b06dc36cb879ea6c1"
] | [
"external/synple/synple.py",
"python/apogee/dr.py",
"python/apogee/utils/spectra.py",
"python/apogee/aspcap/elem.py",
"python/apogee/aspcap/persist.py",
"python/apogee/apred/lsfmap.py"
] | [
"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\"\"\"Python wrapper for synspec \n\nCalculation of synthetic spectra of stars and convolution with a rotational/Gaussian kernel.\nMakes the use of synspec simpler, and retains the main functionalities (when used from\npython). The command line interface is even simpler but fairly limited. \n\nFor information on\nsynspec visit http://nova.astro.umd.edu/Synspec43/synspec.html.\n\nExample\n-------\n\nTo compute the solar spectrum between 6160 and 6164 angstroms, using a model atmosphere in\nthe file sun.mod (provided with the distribution), with the output going into the file\nsun.syn\n\n $synple.py sun.mod 6160. 6164. \n\nTo force a micro of 1.1 km/s, and convolve the spectrum with a Gaussian kernel with a fwhm \nof 0.1 angstroms\n\n $synple.py sun.mod 6160. 6164. 1.1 0.1\n\nTo perform the calculations above in python and compare the emergent normalized profiles\n\n >>> from synple import syn\n >>> x, y, z = syn('sun.mod', (6160.,6164.))\n >>> x2, y2, z2 = syn('sun.mod', (6160.,6164.), vmicro=1.1, fwhm=0.1)\n\n in plain python\n >>> import matplotlib.pyplot as plt\n >>> plt.ion()\n >>> plt.plot(x,y/z, x2, y2/z2)\n\n or ipython\n In [1]: %pylab\n In [2]: plot(x,y/z, x2, y2/z2)\n\n\n\"\"\"\nimport os\nimport sys\nimport subprocess\nimport numpy as np\nimport glob\nimport time\nimport copy\nimport gzip\nfrom scipy import interpolate\nimport matplotlib.pyplot as plt\nfrom itertools import product\n\n\n#configuration\n#synpledir = /home/callende/synple\nsynpledir = os.path.dirname(os.path.realpath(__file__))\n\n\n#relative paths\nmodeldir = synpledir + \"/models\"\nmodelatomdir = synpledir + \"/data\"\nlinelistdir = synpledir + \"/linelists\"\nbindir = synpledir + \"/bin\"\nsynspec = bindir + \"/s54d\"\nrotin = bindir + \"/rotin3\"\n\n\n#other stuff\nclight = 299792.458\nepsilon = 0.6 #clv coeff.\nbolk = 1.38054e-16 # erg/ K\nzero = \" 0 \"\none = \" 1 \"\ntwo = \" 2 \"\n\ndef syn(modelfile, wrange, dw=None, strength=1e-4, vmicro=None, abu=None, \\\n linelist=['gfallx3_bpo.19','kmol3_0.01_30.20'], atom='ap18', vrot=0.0, fwhm=0.0, \\\n steprot=0.0, stepfwhm=0.0, clean=True, save=False, synfile=None, \n compute=True, tmpdir=None):\n\n \"\"\"Computes a synthetic spectrum\n\n Interface to the fortran codes synspec/rotin that only requires two mandatory inputs: \n a model atmosphere (modelfile) and the limits of the spectral range (wrange). The code \n recognizes Kurucz, MARCS and Phoenix LTE model atmospheres. The sampling of the frequency \n grid is chosen internally, but can also be set by adding a constant wavelength step (dw).\n The abundances and microturbulence velocity can be set through the abu and vmicro \n parameters, but default values will be taken from the model atmosphere. Rotational and \n Gaussian broadening can be introduced (vrot and fwhm parameters). The computed spectrum \n can be written to a file (save == True). \n\n\n Parameters\n ----------\n modelfile : str\n file with a model atmosphere\n wrange: tuple or list of two floats\n initial and ending wavelengths (angstroms)\n dw: float, optional\n wavelength step for the output fluxes\n this will be the maximum interval for the radiative \n transfer, and will trigger interpolation at the end\n (default is None for automatic selection)\n strength: float, optional\n threshold in the line-to-continuum opacity ratio for \n selecting lines (default is 1e-4)\n vmicro: float, optional\n microturbulence (km/s) \n (default is taken from the model atmosphere)\n abu: array of floats (99 elements), optional\n chemical abundances relative to hydrogen (N(X)/N(H))\n (default taken from input model atmosphere)\n linelist: array of str\n filenames of the line lists, the first one corresponds to \n the atomic lines and all the following ones (optional) to\n molecular lines\n (default ['gfallx3_bpo.19','kmol3_0.01_30.20'] from Allende Prieto+ 2018)\n atom: str\n 'ap18' -- generic opacities used in Allende Prieto+ 2018\n 'yo19' -- restricted set for NLTE calculations for APOGEE 2019 (Osorio+ 2019)\n 'hhm' -- continuum opacity is simplified to H and H-\n (default 'ap18')\n vrot: float\n projected rotational velocity (km/s)\n (default 0.)\n steprot: float\n wavelength step for convolution with rotational kernel (angstroms)\n set to 0. for automatic adjustment (default 0.)\n fwhm: float\n Gaussian broadening: macroturbulence, instrumental, etc. (angstroms)\n (default 0.)\n stepfwhm: float\n wavelength step for Gaussian convolution (angstroms)\n set to 0. for automatic adjustment (default 0.)\n clean: bool\n True by the default, set to False to avoid the removal of the synspec\n temporary files/links (default True)\n save: bool\n set to True to save the computed spectrum to a file (default False)\n the root of the model atmosphere file, with an extension \".syn\" will be used\n but see the parameter synfile to change that\n synfile: str\n when save is True, this can be used to set the name of the output file\n (default None)\n compute: bool\n set to False to skip the actual synspec run, triggering clean=False\n (default True)\n tmpdir: string\n when is not None a temporary directory with this name will be created to store\n the temporary synspec input/output files, and the synple log file (usually named\n syn.log) will be named as tmpdir_syn.log.\n\n Returns\n -------\n wave: numpy array of floats\n wavelengths (angstroms)\n flux: numpy array of floats\n flux (H_lambda in ergs/s/cm2/A)\n cont: numpy array of floats\n continuum flux (same units as flux)\n\n \"\"\"\n \n #basic checks on the line list and model atmosphere\n checksynspec(linelist,modelfile)\n\n #read model atmosphere\n atmostype, teff, logg, vmicro2, abu2, nd, atmos = read_model(modelfile)\n\n if vmicro == None: vmicro = vmicro2\n if abu == None: abu = abu2\n if dw == None: \n #space = 1e-2 \n space = np.mean(wrange) * np.sqrt(9.12e-15 * np.min(atmos['t']) + vmicro** 2) / clight / 3.\n else: \n space = dw\n\n\n #check input parameters are valid\n imode = checkinput(wrange, vmicro, linelist)\n \n\n print ('teff,logg,vmicro=',teff,logg,vmicro)\n #print ('abu=',abu)\n #print (len(abu))\n #print ('nd=',nd)\n #print ('linelist=',linelist)\n #print ('wrange=',wrange)\n\n logfile = 'syn.log'\n if tmpdir is not None:\n startdir = os.getcwd()\n logfile = os.path.join(startdir,os.path.split(tmpdir)[-1]) + \"_\" + logfile\n try:\n os.mkdir(tmpdir)\n except OSError:\n print( \"cannot create tmpdir %s \" % (tmpdir) )\n try:\n os.chdir(tmpdir)\n except OSError:\n print(\"cannot enter tmpdir %s \" % (tmpdir) )\n\n\n cleanup()\n\n writetas('tas',nd,linelist) #non-std param. file\n write5(teff,logg,abu,atom) #abundance/opacity file\n write8(teff,logg,nd,atmos,atmostype) #model atmosphere\n write55(wrange,space,imode,2,strength,vmicro,linelist,atmostype) #synspec control file\n create_links(linelist) #auxiliary data\n\n if compute == False:\n\n wave = None\n flux = None \n cont = None\n\n else:\n\n synin = open('fort.5')\n synout = open(logfile,'w')\n\n start = time.time()\n p = subprocess.Popen([synspec], stdin=synin, stdout = synout, stderr= synout, shell=True)\n p.wait()\n\n synout.flush()\n synout.close()\n synin.close()\n\n assert (os.path.isfile('fort.7')), 'Error: I cannot read the file *fort.7* in '+tmpdir+' -- looks like synspec has crashed, please look at syn.log'\n\n assert (os.path.isfile('fort.17')), 'Error: I cannot read the file *fort.17* in '+tmpdir+' -- looks like synspec has crashed, please look at syn.log'\n\n\n wave, flux = np.loadtxt('fort.7', unpack=True)\n wave2, flux2 = np.loadtxt('fort.17', unpack=True)\n if dw == None and fwhm <= 0. and vrot <= 0.: cont = np.interp(wave, wave2, flux2)\n end = time.time()\n print('syn ellapsed time ',end - start, 'seconds')\n\n if fwhm > 0. or vrot > 0.:\n start = time.time()\n print( vrot, fwhm, space, steprot, stepfwhm)\n wave, flux = call_rotin (wave, flux, vrot, fwhm, space, steprot, stepfwhm, clean=False, reuseinputfiles=True)\n if dw == None: cont = np.interp(wave, wave2, flux2)\n end = time.time()\n print('convol ellapsed time ',end - start, 'seconds')\n\n if (dw != None): \n nsamples = int((wrange[1] - wrange[0])/dw) + 1\n wave3 = np.arange(nsamples)*dw + wrange[0]\n #flux = np.interp(wave3, wave, flux)\n flux = interp_spl(wave3, wave, flux) \n cont = np.interp(wave3, wave2, flux2)\n wave = wave3\n\n if clean == True: cleanup()\n\n if tmpdir is not None:\n try:\n os.chdir(startdir)\n except OSError:\n print(\"cannot change directory from tmpdir %s to startdir %s\" % (tmpdir,startdir) ) \n if clean == True:\n try:\n os.rmdir(tmpdir)\n except OSError:\n print(\"cannot remove directory tmpdir %s\" % (tmpdir) )\n \n\n if save == True:\n if synfile == None: \n tmpstr = os.path.split(modelfile)[-1]\n synfile = tmpstr[:tmpstr.rfind('.')]+'.syn'\n np.savetxt(synfile,(wave,flux,cont))\n\n\n return(wave, flux, cont)\n\n\ndef mpsyn(modelfile, wrange, dw=None, strength=1e-4, vmicro=None, abu=None, \\\n linelist=['gfallx3_bpo.19','kmol3_0.01_30.20'],atom='ap18', vrot=0.0, fwhm=0.0, \\\n steprot=0.0, stepfwhm=0.0, clean=True, save=False, synfile=None, \n compute=True, nthreads=1):\n\n \"\"\"Computes a synthetic spectrum, splitting the spectral range in nthreads parallel calculations\n\n Wrapper for syn, using multiprocessing, to speed-up the calculation of a broad spectral range\n\n Parameters\n ----------\n modelfile : str\n file with a model atmosphere\n wrange: tuple or list of two floats\n initial and ending wavelengths (angstroms)\n dw: float, optional\n wavelength step for the output fluxes\n this will be the maximum interval for the radiative \n transfer, and will trigger interpolation at the end\n (default is None for automatic selection)\n strength: float, optional\n threshold in the line-to-continuum opacity ratio for \n selecting lines (default is 1e-4)\n vmicro: float, optional\n microturbulence (km/s) \n (default is taken from the model atmosphere)\n abu: array of floats (99 elements), optional\n chemical abundances relative to hydrogen (N(X)/N(H))\n (default taken from input model atmosphere)\n linelist: array of str\n filenames of the line lists, the first one corresponds to \n the atomic lines and all the following ones (optional) to\n molecular lines\n (default ['gfallx3_bpo.19','kmol3_0.01_30.20'] from Allende Prieto+ 2018)\n atom: str\n 'ap18' -- generic opacities used in Allende Prieto+ 2018\n 'yo19' -- restricted set for NLTE calculations for APOGEE 2019 (Osorio+ 2019)\n 'hhm' -- continuum opacity is simplified to H and H-\n (default 'ap18')\n vrot: float\n projected rotational velocity (km/s)\n (default 0.)\n steprot: float\n wavelength step for convolution with rotational kernel (angstroms)\n set to 0. for automatic adjustment (default 0.)\n fwhm: float\n Gaussian broadening: macroturbulence, instrumental, etc. (angstroms)\n (default 0.)\n stepfwhm: float\n wavelength step for Gaussian convolution (angstroms)\n set to 0. for automatic adjustment (default 0.)\n clean: bool\n True by the default, set to False to avoid the removal of the synspec\n temporary files/links (default True)\n save: bool\n set to True to save the computed spectrum to a file (default False)\n the root of the model atmosphere file, with an extension \".syn\" will be used\n but see the parameter synfile to change that\n synfile: str\n when save is True, this can be used to set the name of the output file\n (default None)\n compute: bool\n set to False to skip the actual synspec run, triggering clean=False\n (default True)\n nthreads: int\n choose the number of cores to use in the calculation\n (default 1, 0 has the meaning that the code should take all the cores available)\n\n Returns\n -------\n wave: numpy array of floats\n wavelengths (angstroms)\n flux: numpy array of floats\n flux (H_lambda in ergs/s/cm2/A)\n cont: numpy array of floats\n continuum flux (same units as flux)\n\n \"\"\"\n\n from multiprocessing import Pool,cpu_count\n\n\n if nthreads == 0: \n nthreads = cpu_count()\n\n delta = (wrange[1]-wrange[0])/nthreads\n pars = []\n for i in range(nthreads):\n\n wrange1 = (wrange[0]+delta*i,wrange[0]+delta*(i+1))\n\n pararr = [modelfile, wrange1, dw, strength, vmicro, abu, \\\n linelist, atom, vrot, fwhm, \\\n steprot, stepfwhm, clean, save, synfile, \n compute, 'par'+str(i) ]\n pars.append(pararr)\n\n pool = Pool(nthreads)\n results = pool.starmap(syn,pars)\n pool.close()\n pool.join()\n\n x = results[0][0]\n y = results[0][1]\n z = results[0][2]\n\n if len(results) > 1:\n for i in range(len(results)-1):\n x = np.concatenate((x, results[i+1][0][1:]) )\n y = np.concatenate((y, results[i+1][1][1:]) )\n z = np.concatenate((z, results[i+1][2][1:]) )\n\n return(x,y,z)\n\ndef raysyn(modelfile, wrange, dw=None, strength=1e-4, vmicro=None, abu=None, \\\n linelist=['gfallx3_bpo.19','kmol3_0.01_30.20'], atom='ap18', vrot=0.0, fwhm=0.0, \\\n steprot=0.0, stepfwhm=0.0, clean=True, save=False, synfile=None, \n compute=True, nthreads=1):\n\n \"\"\"Computes a synthetic spectrum, splitting the spectral range in nthreads parallel calculations \n\n Wrapper for syn, using ray, to speed-up the calculation of a broad spectral range\n\n Parameters\n ----------\n modelfile : str\n file with a model atmosphere\n wrange: tuple or list of two floats\n initial and ending wavelengths (angstroms)\n dw: float, optional\n wavelength step for the output fluxes\n this will be the maximum interval for the radiative \n transfer, and will trigger interpolation at the end\n (default is None for automatic selection)\n strength: float, optional\n threshold in the line-to-continuum opacity ratio for \n selecting lines (default is 1e-4)\n vmicro: float, optional\n microturbulence (km/s) \n (default is taken from the model atmosphere)\n abu: array of floats (99 elements), optional\n chemical abundances relative to hydrogen (N(X)/N(H))\n (default taken from input model atmosphere)\n linelist: array of str\n filenames of the line lists, the first one corresponds to \n the atomic lines and all the following ones (optional) to\n molecular lines\n (default ['gfallx3_bpo.19','kmol3_0.01_30.20'] from Allende Prieto+ 2018)\n atom: str\n 'ap18' -- generic opacities used in Allende Prieto+ 2018\n 'yo19' -- restricted set for NLTE calculations for APOGEE 2019 (Osorio+ 2019)\n 'hhm' -- continuum opacity is simplified to H and H-\n (default 'ap18')\n vrot: float\n projected rotational velocity (km/s)\n (default 0.)\n steprot: float\n wavelength step for convolution with rotational kernel (angstroms)\n set to 0. for automatic adjustment (default 0.)\n fwhm: float\n Gaussian broadening: macroturbulence, instrumental, etc. (angstroms)\n (default 0.)\n stepfwhm: float\n wavelength step for Gaussian convolution (angstroms)\n set to 0. for automatic adjustment (default 0.)\n clean: bool\n True by the default, set to False to avoid the removal of the synspec\n temporary files/links (default True)\n save: bool\n set to True to save the computed spectrum to a file (default False)\n the root of the model atmosphere file, with an extension \".syn\" will be used\n but see the parameter synfile to change that\n synfile: str\n when save is True, this can be used to set the name of the output file\n (default None)\n compute: bool\n set to False to skip the actual synspec run, triggering clean=False\n (default True)\n nthreads: int\n choose the number of cores to use in the calculation\n (default 1, 0 has the meaning that the code should take all the cores available)\n\n Returns\n -------\n wave: numpy array of floats\n wavelengths (angstroms)\n flux: numpy array of floats\n flux (H_lambda in ergs/s/cm2/A)\n cont: numpy array of floats\n continuum flux (same units as flux)\n\n \"\"\"\n\n import psutil\n import ray\n\n @ray.remote\n def fun(vari,cons):\n\n wrange,tmpdir = vari\n\n modelfile,dw,strength,vmicro,abu,linelist, \\\n atom,vrot,fwhm,steprot,stepfwhm,clean,save,synfile,compute = cons\n\n x, y, z = syn(modelfile, wrange, dw, strength, vmicro, abu, \\\n linelist, atom, vrot, fwhm, \\\n steprot, stepfwhm, clean, save, synfile, \n compute, tmpdir)\n\n return(x,y,z)\n\n\n if nthreads == 0: \n nthreads = psutil.cpu_count(logical=False)\n\n print('nthreads=',nthreads)\n\n ray.init(num_cpus=nthreads)\n\n rest = [ modelfile,dw,strength,vmicro,abu,linelist, \\\n atom,vrot,fwhm,steprot,stepfwhm,clean,save,synfile,compute ]\n\n constants = ray.put(rest)\n\n delta = (wrange[1]-wrange[0])/nthreads\n pars = []\n for i in range(nthreads):\n\n wrange1 = (wrange[0]+delta*i,wrange[0]+delta*(i+1))\n folder = 'par'+str(i)\n\n pararr = [wrange1, 'par'+str(i) ]\n pars.append(pararr)\n\n results = ray.get([fun.remote(pars[i],constants) for i in range(nthreads)])\n\n x = results[0][0]\n y = results[0][1]\n z = results[0][2]\n\n if len(results) > 1:\n for i in range(len(results)-1):\n x = np.concatenate((x, results[i+1][0][1:]) )\n y = np.concatenate((y, results[i+1][1][1:]) )\n z = np.concatenate((z, results[i+1][2][1:]) )\n\n return(x,y,z)\n\n\n\ndef multisyn(modelfiles, wrange, dw=None, strength=1e-4, abu=None, \\\n vmicro=None, vrot=0.0, fwhm=0.0, nfe=0.0, \\\n linelist=['gfallx3_bpo.19','kmol3_0.01_30.20'], atom='ap18', \\\n steprot=0.0, stepfwhm=0.0, clean=True, save=None, nthreads=1):\n\n \"\"\"Computes synthetic spectra for a list of files. The values of vmicro, vrot, \n fwhm, and nfe can be iterables. Whether or not dw is specified the results will be \n placed on a common wavelength scale by interpolation. When not specified, dw will be \n chosen as appropriate for the first model in modelfiles.\n\n\n Parameters\n ----------\n modelfiles : list of str\n files with model atmospheres\n wrange: tuple or list of two floats\n initial and ending wavelengths (angstroms)\n dw: float\n wavelength step for the output fluxes.\n Unlike in 'syn' this will not be used to set the maximum wavelength step for \n synthesizing any of the spectra; the appropriate step will be chosen dynamically.\n Unlike in 'syn', interpolation to a constant step will always be done\n (default is None for automatic selection based on the first model of the list)\n strength: float, optional\n threshold in the line-to-continuum opacity ratio for \n selecting lines (default is 1e-4)\n abu: array of floats (99 elements), optional\n chemical abundances relative to hydrogen (N(X)/N(H))\n (default taken from input model atmosphere)\n vmicro: float, optional, can be an iterable\n microturbulence (km/s) \n (default is taken from the model atmosphere)\n vrot: float, can be an iterable\n projected rotational velocity (km/s)\n (default 0.)\n fwhm: float, can be an iterable\n Gaussian broadening: macroturbulence, instrumental, etc. (angstroms)\n (default 0.)\n nfe: float, can be an iterable\n [N/Fe] nitrogen abundance change from the one specified in the array 'abu' (dex)\n (default 0.)\n linelist: array of str\n filenames of the line lists, the first one corresponds to \n the atomic lines and all the following ones (optional) to\n molecular lines\n (default ['gfallx3_bpo.19','kmol3_0.01_30.20'] from Allende Prieto+ 2018)\n atom: str\n 'ap18' -- generic opacities used in Allende Prieto+ 2018\n 'yo19' -- restricted set for NLTE calculations for APOGEE 2019 (Osorio+ 2019)\n 'hhm' -- continuum opacity is simplified to H and H-\n (default 'ap18')\n steprot: float\n wavelength step for convolution with rotational kernel (angstroms)\n set to 0. for automatic adjustment (default 0.)\n stepfwhm: float\n wavelength step for Gaussian convolution (angstroms)\n set to 0. for automatic adjustment (default 0.)\n clean: bool\n True by the default, set to False to avoid the removal of the synspec\n temporary files/links (default True)\n save: bool\n set to True to save the computed spectra to files (default False)\n the root of the model atmosphere file, with an extension \".syn\" will be used\n if multiple values of vmicro, vrot, fwhm or nfe are used, their values are\n prepended to the file names \n (default None)\n nthreads: int\n choose the number of cores to use in the calculation\n (default 1, 0 has the meaning that the code should take all the cores available)\n\n\n\n Returns\n -------\n wave: numpy array of floats (1D)\n wavelengths (angstroms)\n flux: numpy array of floats (2D -- as many rows as models input)\n flux (H_lambda in ergs/s/cm2/A)\n cont: numpy array of floats (2D -- as many rows as models input)\n continuum flux (same units as flux)\n\n \"\"\"\n\n\n #when vmicro, vrot, fwhm or nitrogen are not iterables, we create ones, otherwise we copy them\n try: \n nvmicro = len(vmicro)\n vmicros = vmicro\n except TypeError:\n nvmicro = 1\n vmicros = [ vmicro ] \n try: \n nvrot = len(vrot)\n vrots = vrots\n except TypeError:\n nvrot = 1\n vrots = [ vrot ] \n try: \n nfwhm = len(fwhm)\n fwhms = fwhm\n except TypeError:\n nfwhm = 1\n fwhms = [ fwhm ] \n try: \n nnfe = len(nfe)\n nnfes = nfe\n except TypeError:\n nnfe = 1\n nfes = [ nfe ] \n\n assert (len(modelfiles) > 0), 'multisyn needs at least one model to work with'\n wave = None\n flux = None\n cont = None\n\n for entry in modelfiles:\n for vmicro1 in vmicros:\n for nfe1 in nfes:\n\n abu1 = copy.copy(abu) \n\n #if need be, adjust nitrogen abundance according to nfe\n if (abs(nfe1) > 1e-7):\n if (abu1 == None):\n checksynspec(linelist,entry)\n atmostype, teff, logg, vmicro2, abu1, nd, atmos = read_model(entry)\n abu1[6] = abu1[6] * 10.**nfe1\n\n x, y, z = mpsyn(entry, wrange, dw=None, strength=strength, \\\n vmicro=vmicro1, abu=abu1, linelist=linelist, atom=atom, \\\n clean=clean, save=save, nthreads=nthreads)\n\n space = np.mean(np.diff(x))\n \n for vrot1 in vrots:\n for fwhm1 in fwhms:\n\n if fwhm1> 0. or vrot1 > 0.:\n start = time.time()\n print( entry, vmicro1, nfe1, vrot1, fwhm1, space)\n x2, y2 = call_rotin (x, y, vrot, fwhm, space, steprot, stepfwhm, \\\n clean=False, reuseinputfiles=True)\n z2 = np.interp(x2, x, z)\n end = time.time()\n print('convol ellapsed time ',end - start, 'seconds')\n else:\n x2, y2, z2 = x, y, z\n\n\n if entry == modelfiles[0] and vmicro1 == vmicros[0] and vrot1 == vrots[0] and fwhm1 == fwhms[0] and nfe1 == nfes[0]:\n if dw == None: dw = np.median(np.diff(x2))\n nsamples = int((wrange[1] - wrange[0])/dw) + 1\n wave = np.arange(nsamples)*dw + wrange[0]\n #flux = np.interp(wave, x2, y2)\n flux = interp_spl(wave, x2, y2)\n cont = np.interp(wave, x2, z2)\n else:\n #flux = np.vstack ( (flux, np.interp(wave, x, y) ) )\n flux = np.vstack ( (flux, interp_spl(wave, x, y) ) )\n cont = np.vstack ( (cont, np.interp(wave, x, z) ) )\n\n\n return(wave, flux, cont)\n\n\n\ndef polysyn(modelfiles, wrange, dw=None, strength=1e-4, abu=None, \\\n vmicro=None, vrot=0.0, fwhm=0.0, nfe=0.0, \\\n linelist=['gfallx3_bpo.19','kmol3_0.01_30.20'],atom='ap18', \\\n steprot=0.0, stepfwhm=0.0, clean=True, save=None):\n\n \"\"\"Sets up a directory tree for computing synthetic spectra for a list of files in \n parallel. The values of vmicro, vrot, fwhm, and nfe can be iterables. Whether or not \n dw is specified the results will be placed on a common wavelength scale by interpolation.\n When not specified, dw will be chosen as appropriate for the first model in modelfiles.\n\n\n Parameters\n ----------\n modelfiles : list of str\n files with model atmospheres\n wrange: tuple or list of two floats\n initial and ending wavelengths (angstroms)\n dw: float\n Unlike in 'syn' this will not be used to set the maximum wavelength step for \n synthesizing any of the spectra; the appropriate step will be chosen dynamically.\n Unlike in 'syn', interpolation to a constant step will always be done\n (default is None for automatic selection based on the first model of the list)\n strength: float, optional\n threshold in the line-to-continuum opacity ratio for \n selecting lines (default is 1e-4)\n abu: array of floats (99 elements), optional\n chemical abundances relative to hydrogen (N(X)/N(H))\n (default taken from input model atmosphere)\n vmicro: float, optional, can be an iterable\n microturbulence (km/s) \n (default is taken from the model atmosphere)\n vrot: float, can be an iterable\n projected rotational velocity (km/s)\n (default 0.)\n fwhm: float, can be an iterable\n Gaussian broadening: macroturbulence, instrumental, etc. (angstroms)\n (default 0.)\n nfe: float, can be an iterable\n [N/Fe] nitrogen abundance change from the one specified in the array 'abu' (dex)\n (default 0.)\n linelist: array of str\n filenames of the line lists, the first one corresponds to \n the atomic lines and all the following ones (optional) to\n molecular lines\n (default ['gfallx3_bpo.19','kmol3_0.01_30.20'] from Allende Prieto+ 2018)\n atom: str\n 'ap18' -- generic opacities used in Allende Prieto+ 2018\n 'yo19' -- restricted set for NLTE calculations for APOGEE 2019 (Osorio+ 2019)\n 'hhm' -- continuum opacity is simplified to H and H-\n (default 'ap18')\n steprot: float\n wavelength step for convolution with rotational kernel (angstroms)\n set to 0. for automatic adjustment (default 0.)\n stepfwhm: float\n wavelength step for Gaussian convolution (angstroms)\n set to 0. for automatic adjustment (default 0.)\n clean: bool\n True by the default, set to False to avoid the removal of the synspec\n temporary files/links (default True)\n save: bool\n set to True to save the computed spectra to files (default False)\n the root of the model atmosphere file, with an extension \".syn\" will be used\n if multiple values of vmicro, vrot, fwhm or nfe are used, their values are\n prepended to the file names \n (default None)\n\n\n Returns\n -------\n wave: numpy array of floats (1D)\n wavelengths (angstroms)\n flux: numpy array of floats (2D -- as many rows as models input)\n flux (H_lambda in ergs/s/cm2/A)\n cont: numpy array of floats (2D -- as many rows as models input)\n continuum flux (same units as flux)\n\n \"\"\"\n\n #synspec does not currently run in parallel\n nthreads = 1\n\n\n #when vmicro, vrot, fwhm or nitrogen are not iterables, we create ones, otherwise we copy them\n try: \n nvmicro = len(vmicro)\n vmicros = vmicro\n except TypeError:\n nvmicro = 1\n vmicros = [ vmicro ] \n try: \n nvrot = len(vrot)\n vrots = vrots\n except TypeError:\n nvrot = 1\n vrots = [ vrot ] \n try: \n nfwhm = len(fwhm)\n fwhms = fwhm\n except TypeError:\n nfwhm = 1\n fwhms = [ fwhm ] \n try: \n nnfe = len(nfe)\n nnfes = nfe\n except TypeError:\n nnfe = 1\n nfes = [ nfe ] \n\n\n idir = 0\n for entry in modelfiles:\n for vmicro1 in vmicros:\n for nfe1 in nfes:\n\n idir = idir + 1\n dir = ( \"hyd%07d\" % (idir) )\n try:\n os.mkdir(dir)\n except OSError:\n print( \"cannot create dir hyd%07d\" % (idir) )\n try:\n os.chdir(dir)\n except OSError:\n print( \"cannot change dir to hyd%07d\" % (idir) )\n\n if entry == 'missing':\n pass\n else:\n #setup the slurm script\n sfile = dir+\".job\"\n now=time.strftime(\"%c\")\n s = open(sfile ,\"w\")\n s.write(\"#!/bin/bash \\n\")\n s.write(\"#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-# \\n\")\n s.write(\"#This script was written by synple on \"+now+\" \\n\") \n s.write(\"#SBATCH -J \"+dir+\" \\n\")\n s.write(\"#SBATCH -o \"+dir+\"_%j.out\"+\" \\n\")\n s.write(\"#SBATCH -e \"+dir+\"_%j.err\"+\" \\n\")\n s.write(\"#SBATCH -n \"+str(nthreads)+\" \\n\")\n s.write(\"#SBATCH -t 04:00:00\"+\" \\n\") #hh:mm:ss\n s.write(\"#SBATCH -D \"+os.path.abspath(os.curdir)+\" \\n\")\n s.write(\"#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-# \\n\\n\\n\")\n\n\n abu1 = copy.copy(abu)\n\n #if need be, adjust nitrogen abundance according to nfe\n if (abs(nfe1) > 1e-7):\n if (abu1 == None):\n checksynspec(linelist,entry)\n atmostype, teff, logg, vmicro2, abu1, nd, atmos = read_model(entry)\n abu1[6] = abu1[6] * 10.**nfe1\n\n x, y, z = syn(entry, wrange, dw=None, strength=strength, vmicro=vmicro1, \\\n abu=abu1, linelist=linelist, atom=atom, compute=False)\n\n s.write(synspec+\" < \"+\"fort.5\"+\"\\n\")\n\n si = open(\"fort.55\",'r')\n for i in range(6): line = si.readline()\n entries = line.split()\n space = float(entries[5])\n si.close()\n \n iconv = 0\n for vrot1 in vrots:\n for fwhm1 in fwhms:\n\n print('iconv=',iconv)\n\n iconv = iconv + 1\n inconv = (\"%07dfort.5\" % (iconv) )\n outconv = (\"'%07dfort.7'\" % (iconv) )\n if fwhm1> 0. or vrot1 > 0.:\n f = open(inconv,'w')\n f.write( ' %s %s %s \\n' % (\"'fort.7'\", \"'fort.17'\", outconv) )\n f.write( ' %f %f %f \\n' % (vrot1, space, steprot) )\n f.write( ' %f %f \\n' % (fwhm1, stepfwhm) )\n print('stepfwhm=',stepfwhm)\n f.write( ' %f %f %i \\n' % (wrange[0], wrange[1], 0) )\n f.close()\n s.write(rotin+\" < \"+inconv+\"\\n\")\n else:\n s.write(\"cp \"+\" fort.7 \"+outconv[1:-1]+\"\\n\")\n\n s.close()\n os.chmod(sfile ,0o755)\n\n try:\n os.chdir('..')\n except OSError:\n print( \"cannot exit dir hyd%07d\" % (idir) )\n\n\n return(None,None,None)\n\n\n\ndef polyopt(wrange=(9.e2,1.e5),dw=0.1,strength=1e-3, linelist=['gfallx3_bpo.19','kmol3_0.01_30.20'], \\\n tlt = (20,3.08,0.068), tlrho = (20,-14.0,0.59), \\\n tfeh=(1,0.0,0.0), tafe=(1,0.0,0.0), tcfe=(1,0.0,0.0), tnfe=(1,0.0,0.0), \\\n tofe=(1,0.0,0.0), trfe=(1,0.0,0.0), tsfe=(1,0.0,0.0), tvmicro=(1,1.0,0.0), \\\n zexclude=None, atom='ap18'):\n\n \"\"\"Sets up a directory tree for computing opacity tables for TLUSTY. The table collection forms \n a regular grid defined by triads in various parameters. Each triad has three values (n, llimit, step)\n that define an array x = np.range(n)*step + llimit. Triads in teff (tteff) and logg\n (tlogg) are mandatory. Triads in [Fe/H] (tfeh), [alpha/Fe] (tafe), [C/Fe] (tcfe), \n [N/Fe] (tnfe), [O/Fe] (tofe), [r/Fe] (rfe), and [s/Fe] (sfe) are optional since \n arrays with just one 0.0 are included by default.\n\n Parameters\n ----------\n wrange: tuple or list of two floats\n initial and ending wavelengths (angstroms)\n dw: float\n Unlike in 'syn' this will not be used to set the maximum wavelength step for \n synthesizing any of the spectra; the appropriate step will be chosen dynamically.\n Unlike in 'syn', interpolation to a constant step will always be done\n (default is None for automatic selection based on the first model of the list)\n strength: float, optional\n threshold in the line-to-continuum opacity ratio for \n selecting lines (default is 1e-4)\n linelist: array of str\n filenames of the line lists, the first one corresponds to \n the atomic lines and all the following ones (optional) to\n molecular lines\n (default ['gfallx3_bpo.19','kmol3_0.01_30.20'] from Allende Prieto+ 2018)\n atom: str\n 'ap18' -- generic opacities used in Allende Prieto+ 2018\n 'yo19' -- restricted set for NLTE calculations for APOGEE 2019 (Osorio+ 2019)\n 'hhm' -- continuum opacity is simplified to H and H-\n (default 'ap18')\n tlt: tuple\n log10(T) triad (n, llimit, step) for opacity grid\n (default values chosen for grid lt = np.arange(20)*0.068 + 3.08,\n to cover the range in the DR16 APOGEE MARCS grids)\n tlrho: tuple\n log10(rho) triad (n, llimit, step) for opacity grid\n (default values chosen for grid lrho = np.arange(20)*0.59 -14.0,\n to cover the range in the DR16 APOGEE MARCS grids)\n tteff: tuple\n Teff triad (n, llimit, step)\n tlogg: tuple\n logg triad (n, llimit, step)\n tfeh: tuple\n [Fe/H] triad\n tafe: tuple\n [alpha/Fe] triad \n tcfe: tuple\n [C/Fe] triad\n tnfe: tuple\n [N/Fe] triad\n tofe: tuple\n [O/Fe] triad\n rfeh: tuple\n [r/Fe] triad (r-elements abundance ratio)\n sfeh: tuple\n [s.Fe] triad (s-elements abundance ratio)\n zexclude: list\n atomic numbers of the elements whose opacity is NOT to be\n included in the table\n (default None)\n\n \"\"\"\n\n #pynspec does not currently run in parallel\n nthreads = 1\n\n #expanding the triads t* into iterables\n try: \n nfeh = len(tfeh)\n assert (nfeh == 3), 'Error: feh triad must have three elements (n, llimit, step)'\n fehs = np.arange(tfeh[0])*tfeh[2] + tfeh[1]\n except TypeError:\n print('Error: feh triad must have three elements (n, llimit, step)')\n return ()\n\n try: \n nafe = len(tafe)\n assert (nafe == 3), 'Error: afe triad must have three elements (n, llimit, step)'\n afes = np.arange(tafe[0])*tafe[2] + tafe[1]\n except TypeError:\n print('Error: afe triad must have three elements (n, llimit, step)')\n return ()\n\n try: \n ncfe = len(tcfe)\n assert (ncfe == 3), 'Error: cfe triad must have three elements (n, llimit, step)'\n cfes = np.arange(tcfe[0])*tcfe[2] + tcfe[1]\n except TypeError:\n print('Error: cfe triad must have three elements (n, llimit, step)')\n return ()\n\n try: \n nnfe = len(tnfe)\n assert (nnfe == 3), 'Error: nfe triad must have three elements (n, llimit, step)'\n nfes = np.arange(tnfe[0])*tnfe[2] + tnfe[1]\n except TypeError:\n print('Error: nfe triad must have three elements (n, llimit, step)')\n return ()\n\n try: \n nofe = len(tofe)\n assert (nofe == 3), 'Error: ofe triad must have three elements (n, llimit, step)'\n ofes = np.arange(tofe[0])*tofe[2] + tofe[1]\n except TypeError:\n print('Error: ofe triad must have three elements (n, llimit, step)')\n return ()\n\n try: \n nrfe = len(trfe)\n assert (nrfe == 3), 'Error: rfe triad must have three elements (n, llimit, step)'\n rfes = np.arange(trfe[0])*trfe[2] + trfe[1]\n except TypeError:\n print('Error: rfe triad must have three elements (n, llimit, step)')\n return ()\n\n try: \n nsfe = len(tsfe)\n assert (nsfe == 3), 'Error: sfe triad must have three elements (n, llimit, step)'\n sfes = np.arange(tsfe[0])*tsfe[2] + tsfe[1]\n except TypeError:\n print('Error: sfe triad must have three elements (n, llimit, step)')\n return ()\n \n try: \n nvmicro = len(tvmicro)\n assert (nvmicro == 3), 'Error: vmicro triad must have three elements (n, llimit, step)'\n vmicros = np.arange(tvmicro[0])*tvmicro[2] + tvmicro[1]\n except TypeError:\n print('Error: vmicro triad must have three elements (n, llimit, step)')\n return ()\n \n\n #ranges for the opacity table\n try: \n nlt = len(tlt)\n assert (nlt == 3), 'Error: lt triad must have three elements (n, llimit, step)'\n lt = np.arange(tlt[0])*tlt[2] + tlt[1] #log10(T)\n except TypeError:\n print('Error: tlt triad must have three elements (n, llimit, step)')\n return ()\n\n try: \n nlrho = len(tlrho)\n assert (nlrho == 3), 'Error: lrho triad must have three elements (n, llimit, step)'\n lrho = np.arange(tlrho[0])*tlrho[2] + tlrho[1] #log10(density)\n except TypeError:\n print('Error: tlrho triad must have three elements (n, llimit, step)')\n return ()\n\n \n symbol, mass, sol = elements()\n z_metals = np.arange(97,dtype=int) + 3\n #Ar usually included among alphas in MARCS and not in Kurucz/Meszaros\n z_alphas = np.array([8,10,12,14,16,18,20,22],dtype=int) \n # rs increases: notes and data below from comments in the MARCS code (provided by B.Edvardsson) \n # Fractional r-process abundance for Ga-Bi (r+s simply assumed == 100%) | Date 2000-01-18\n # (Note: Ga-Sr (31-38) was just copied from Kaeppeler et al. 1989, below)\n # s-process from Stellar models: Arlandini C., Kaeppeler F., Wisshak K.,\n # Gallino R., Busso M., Straniero O., 1999, Astrophys J. 525, 886-900\n # Fractions corrected to the revised meteoritic abundances\n # of Grevesse N., Sauval A.J. 1998, Space Science Review 85, 161-174 \n # -0.99 is assigned to unstable elements\n z_rs = np.arange(62,dtype=int) + 31\n rfrac= np.array([.43, .47, .81, .85, .39, .47, \n .41, .11, .08, .17, .15, .50,-.99, .68, .86, \n .54, .80, .48, .65, .35, .75, .83, .80, .80, \n .85, .19, .38, .23, .51, .44,-.99, .71, .93, \n .85, .93, .85, .92, .83, .87, .67, .80, .44, \n .59, .44, .91, .91, .99, .95, .94, .41, .24, \n .54, .95,-.99,-.99,-.99,-.99,-.99,-.99, 1.0, \n -.99, 1.0], dtype=float) \n\n\n\n idir = 0\n for feh in fehs:\n for afe in afes:\n for cfe in cfes:\n for nfe in nfes:\n for ofe in ofes:\n for rfe in rfes:\n for sfe in sfes: \n for vmicro in vmicros:\n \n print(feh,afe,cfe,nfe,ofe,rfe,sfe)\n\n idir = idir + 1\n dir = ( \"hyd%07d\" % (idir) )\n try:\n os.mkdir(dir)\n except OSError:\n print( \"cannot create dir hyd%07d\" % (idir) )\n try:\n os.chdir(dir)\n except OSError:\n print( \"cannot change dir to hyd%07d\" % (idir) )\n\n #check input parameters are valid\n imode = checkinput(wrange, vmicro, linelist)\n\n #setup the slurm script\n sfile = dir+\".job\"\n now=time.strftime(\"%c\")\n s = open(sfile ,\"w\")\n s.write(\"#!/bin/bash \\n\")\n s.write(\"#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-# \\n\")\n s.write(\"#This script was written by synple on \"+now+\" \\n\") \n s.write(\"#SBATCH -J \"+dir+\" \\n\")\n s.write(\"#SBATCH -o \"+dir+\"_%j.out\"+\" \\n\")\n s.write(\"#SBATCH -e \"+dir+\"_%j.err\"+\" \\n\")\n s.write(\"#SBATCH -n \"+str(nthreads)+\" \\n\")\n s.write(\"#SBATCH --ntasks-per-node \"+str(4)+\" \\n\")\n s.write(\"#SBATCH -t 48:00:00\"+\" \\n\") #hh:mm:ss\n s.write(\"#SBATCH -D \"+os.path.abspath(os.curdir)+\" \\n\")\n s.write(\"#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-# \\n\\n\\n\")\n\n \n abu = copy.copy(sol)\n\n if (abs(feh) > 1e-7): \n for i in range(len(z_metals)): \n abu[z_metals[i] - 1] = abu[z_metals[i] - 1] * 10.**feh\n if (abs(afe) > 1e-7): \n for i in range(len(z_alphas)):\n abu[z_alphas[i] - 1] = abu[z_alphas[i] - 1] * 10.**afe\n if (abs(cfe) > 1e-7): abu[5] = abu[5] * 10.**cfe\n if (abs(nfe) > 1e-7): abu[6] = abu[6] * 10.**nfe\n if (abs(ofe) > 1e-7): abu[7] = abu[7] * 10.**ofe\n if (abs(rfe) > 1e-7): \n for i in range(len(z_rs)): \n if rfrac[i] > 0.0: abu[z_rs[i] - 1] = abu[z_rs[i] - 1] * rfrac[i] * 10.**rfe\n if (abs(sfe) > 1e-7): \n for i in range(len(z_rs)): \n if rfrac[i] > 0.0: abu[z_rs[i] - 1] = abu[z_rs[i] - 1] * (1.0 - rfrac[i]) * 10.**sfe\n\n\n write55(wrange,dw=dw,imode=-3,hydprf=0, strength=strength, vmicro=vmicro, linelist=linelist)\n\n write5(9999.,9.9,abu,atom)\n \n writetas('tas',1,linelist)\n\n write2(lt,lrho,wrange,filename='opt.dat', \\\n strength=strength,inttab=1)\n\n if zexclude != None: \n write3(zexclude)\n \n create_links(linelist)\n \n s.write('time ' + synspec + \" < \"+\"fort.5\"+\"\\n\")\n s.close()\n os.chmod(sfile ,0o755)\n \n try:\n os.chdir('..')\n except OSError:\n print( \"cannot exit dir hyd%07d\" % (idir) )\t\t \n\n return()\n\n\n\n\ndef collect_marcs(modeldir=modeldir, tteff=None, tlogg=None, tfeh=(1,0.0,0.0), tafe=(1,0.0,0.0), \\\n tcfe=(1,0.0,0.0), tnfe=(1,0.0,0.0), tofe=(1,0.0,0.0), trfe=(1,0.0,0.0), tsfe=(1,0.0,0.0), \\\n ignore_missing_models=False):\n\n \"\"\"Collects all the MARCS models in modeldir that are part of a regular grid defined\n by triads in various parameters. Each triad has three values (n, llimit, step)\n that define an array x = np.range(n)*step + llimit. Triads in teff (tteff) and logg\n (tlogg) are mandatory. Triads in [Fe/H] (tfeh), [alpha/Fe] (tafe), [C/Fe] (tcfe), \n [N/Fe] (tnfe), [O/Fe] (tofe), [r/Fe] (rfe), and [s/Fe] (sfe) are optional since \n arrays with just one 0.0 are included by default.\n\n Parameters\n ----------\n modeldir: str\n directory where model atmosphere files are\n tteff: tuple\n Teff triad (n, llimit, step)\n tlogg: tuple\n logg triad (n, llimit, step)\n tfeh: tuple\n [Fe/H] triad\n tafe: tuple\n [alpha/Fe] triad \n tcfe: tuple\n [C/Fe] triad\n tnfe: tuple\n [N/Fe] triad\n tofe: tuple\n [O/Fe] triad\n rfeh: tuple\n [r/Fe] triad (r-elements abundance ratio)\n sfeh: tuple\n [s.Fe] triad (s-elements abundance ratio)\n ignore_missing_models: bool\n set to True to avoid stopping when a model is missing,\n in which case a None is entered in the returning list\n \n Returns\n -------\n files: list of str\n file names with MARCS models that are in modeldir and match\n the parameters in the requested grid\n\n \"\"\"\n\n #expanding the triads t* into iterables\n try: \n nteff = len(tteff)\n assert (nteff == 3), 'Error: Teff triad must have three elements (n, llimit, step)'\n teffs = np.arange(tteff[0])*tteff[2] + tteff[1]\n except TypeError:\n print('Error: Teff triad must have three elements (n, llimit, step)')\n return ()\n\n try: \n nlogg = len(tlogg)\n assert (nlogg == 3), 'Error: logg triad must have three elements (n, llimit, step)'\n loggs = np.arange(tlogg[0])*tlogg[2] + tlogg[1]\n except TypeError:\n print('Error: logg triad must have three elements (n, llimit, step)')\n return ()\n\n try: \n nfeh = len(tfeh)\n assert (nfeh == 3), 'Error: feh triad must have three elements (n, llimit, step)'\n fehs = np.arange(tfeh[0])*tfeh[2] + tfeh[1]\n except TypeError:\n print('Error: feh triad must have three elements (n, llimit, step)')\n return ()\n\n try: \n nafe = len(tafe)\n assert (nafe == 3), 'Error: afe triad must have three elements (n, llimit, step)'\n afes = np.arange(tafe[0])*tafe[2] + tafe[1]\n except TypeError:\n print('Error: afe triad must have three elements (n, llimit, step)')\n return ()\n\n try: \n ncfe = len(tcfe)\n assert (ncfe == 3), 'Error: cfe triad must have three elements (n, llimit, step)'\n cfes = np.arange(tcfe[0])*tcfe[2] + tcfe[1]\n except TypeError:\n print('Error: cfe triad must have three elements (n, llimit, step)')\n return ()\n\n try: \n nnfe = len(tnfe)\n assert (nnfe == 3), 'Error: nfe triad must have three elements (n, llimit, step)'\n nfes = np.arange(tnfe[0])*tnfe[2] + tnfe[1]\n except TypeError:\n print('Error: nfe triad must have three elements (n, llimit, step)')\n return ()\n\n try: \n nofe = len(tofe)\n assert (nofe == 3), 'Error: ofe triad must have three elements (n, llimit, step)'\n ofes = np.arange(tofe[0])*tofe[2] + tofe[1]\n except TypeError:\n print('Error: ofe triad must have three elements (n, llimit, step)')\n return ()\n\n try: \n nrfe = len(trfe)\n assert (nrfe == 3), 'Error: rfe triad must have three elements (n, llimit, step)'\n rfes = np.arange(trfe[0])*trfe[2] + trfe[1]\n except TypeError:\n print('Error: rfe triad must have three elements (n, llimit, step)')\n return ()\n\n try: \n nsfe = len(tsfe)\n assert (nsfe == 3), 'Error: sfe triad must have three elements (n, llimit, step)'\n sfes = np.arange(tsfe[0])*tsfe[2] + tsfe[1]\n except TypeError:\n print('Error: sfe triad must have three elements (n, llimit, step)')\n return ()\n\n files = []\n\n fi = open('files.txt','w')\n\n for teff in teffs:\n for logg in loggs:\n for feh in fehs:\n for afe in afes:\n for cfe in cfes:\n for nfe in nfes:\n for ofe in ofes:\n for rfe in rfes:\n for sfe in sfes: \n \n print(teff,logg,feh,afe,cfe,nfe,ofe,rfe,sfe)\n code = 'm*_t*_x3'\n\n if logg >= 3.5: \n a1 = 'p' \n else: \n a1 = 's'\n\n filename = (\"%s%4i_g%+.1f_%s_z%+.2f_a%+.2f_c%+.2f_n%+.2f_o%+.2f_r%+.2f_s%+.2f.mod*\" % (a1,teff,logg,code,feh,afe,cfe,nfe,ofe,rfe,sfe) )\n\n file = glob.glob(os.path.join(modeldir,filename))\n\n if ignore_missing_models == False:\n assert len(file) > 0, 'Cannot find model '+filename+' in modeldir '+modeldir \n assert len(file) == 1, 'More than one model matches '+filename+' in modeldir '+modeldir\n else:\n if (len(file) == 0): files.append('missing')\n \n if (len(file) == 1): files.append(file[0])\n\n fi.write( \"%s %4i %+.1f %s %+.2f %+.2f %+.2f %+.2f %+.2f %+.2f %+.2f\\n\" % (files[-1],teff,logg,feh,afe,cfe,nfe,ofe,rfe,sfe) )\n\n\n\n fi.close()\n\n return(files)\n\ndef collect_k2odfnew(modeldir=modeldir, tteff=None, tlogg=None, tfeh=(1,0.0,0.0), tafe=(1,0.0,0.0), \\\n ignore_missing_models=False):\n\n \"\"\"Collects all the ODFNEW Castelli/Kurucz models in modeldir that are part of a regular grid defined\n by triads in various parameters. Each triad has three values (n, llimit, step)\n that define an array x = np.range(n)*step + llimit. Triads in teff (tteff) and logg\n (tlogg) are mandatory. Triads in [Fe/H] (tfeh), and [alpha/Fe] (tafe) are optional since \n arrays with just one 0.0 are included by default. \n\n NOTE: There are ODFNEW models with only afe=[alpha/Fe]=0.0 or 0.4. The latter are used whenever\n afe takes values > 0.0, while the afe=0.0 models are used otherwise.\n\n Parameters\n ----------\n modeldir: str\n directory where model atmosphere files are\n tteff: tuple\n Teff triad (n, llimit, step)\n tlogg: tuple\n logg triad (n, llimit, step)\n tfeh: tuple\n [Fe/H] triad\n tafe: tuple\n [alpha/Fe] triad \n ignore_missing_models: bool\n set to True to avoid stopping when a model is missing,\n in which case a None is entered in the returning list\n \n Returns\n -------\n files: list of str\n file names with Kurucz ODFNEWS models that are in modeldir and match\n the parameters in the requested grid\n\n \"\"\"\n\n #expanding the triads t* into iterables\n try: \n nteff = len(tteff)\n assert (nteff == 3), 'Error: Teff triad must have three elements (n, llimit, step)'\n teffs = np.arange(tteff[0])*tteff[2] + tteff[1]\n except TypeError:\n print('Error: Teff triad must have three elements (n, llimit, step)')\n return ()\n\n try: \n nlogg = len(tlogg)\n assert (nlogg == 3), 'Error: logg triad must have three elements (n, llimit, step)'\n loggs = np.arange(tlogg[0])*tlogg[2] + tlogg[1]\n except TypeError:\n print('Error: logg triad must have three elements (n, llimit, step)')\n return ()\n\n try: \n nfeh = len(tfeh)\n assert (nfeh == 3), 'Error: feh triad must have three elements (n, llimit, step)'\n fehs = np.arange(tfeh[0])*tfeh[2] + tfeh[1]\n except TypeError:\n print('Error: feh triad must have three elements (n, llimit, step)')\n return ()\n\n try: \n nafe = len(tafe)\n assert (nafe == 3), 'Error: afe triad must have three elements (n, llimit, step)'\n afes = np.arange(tafe[0])*tafe[2] + tafe[1]\n except TypeError:\n print('Error: afe triad must have three elements (n, llimit, step)')\n return ()\n\n\n files = []\n\n fi = open('files.txt','w')\n\n for teff in teffs:\n for logg in loggs:\n for feh in fehs:\n for afe in afes:\n \n print(teff,logg,feh,afe)\n code = 'k2odfnew.dat'\n\n if afe > 0.0: \n a1 = 'a' \n else: \n a1 = ''\n\n if feh < 0.0:\n a2 = 'am'\n else:\n a2 = 'ap'\n\n filename = (\"t%05ig%.1f%s%02i%s\" % (teff,logg,a2,int(abs(feh)*10),a1+code) )\n\n file = glob.glob(os.path.join(modeldir,filename))\n\n\n if ignore_missing_models == False:\n assert len(file) > 0, 'Cannot find model '+filename+' in modeldir '+modeldir \n assert len(file) == 1, 'More than one model matches '+filename+' in modeldir '+modeldir\n else:\n if (len(file) == 0): files.append('missing')\n \n if (len(file) == 1): files.append(file[0])\n\n fi.write( \"%s %4i %+.1f %+.2f %+.2f \\n\" % (files[-1],teff,logg,feh,afe) )\n\n fi.close()\n\n return(files)\n\n\n\ndef getallt(modelfiles):\n\n \"\"\"Collects all the values for temperature, density and electron number density\n in a list of files with model atmospheres\n\n Parameters\n ----------\n modelfiles : list of str\n files with model atmospheres\n\n Returns\n -------\n t: list\n list of all temperatures in all the layers of the input model atmospheres \n rho: list\n list of all values of gas pressure in all the layers of the input model atmospheres\n \n ne: list\n list of all values of electron number density in all the layers of the input model atmospheres\n\n \"\"\"\n\n t = []\n rho = []\n ne = []\n\n for entry in modelfiles:\n print('reading ',entry)\n teff, logg, vmicro, abu, nd, atmos = read_marcs_model2(entry)\n #atmostype,teff,logg,vmicro,abu,nd,atmos = read_model(entry)\n for value in atmos['t']: t.append(value)\n for value in atmos['rho']: rho.append(value)\n for value in atmos['ne']: ne.append(value)\n\n return(t,rho,ne)\n\n\n\ndef call_rotin(wave=None, flux=None, vrot=0.0, fwhm=0.0, space=1e-2, steprot=0.0, stepfwhm=0.0, clean=True, reuseinputfiles=False):\n\n\n \"\"\"Convolves a synthetic spectrum with a rotation and/or Gaussian kernel\n\n Interface to the fortran code rotin.\n\n Parameters\n ----------\n wave: numpy array of floats\n wavelengths (angstroms)\n flux: numpy array of floats\n flux \n vrot: float\n projected rotational velocity (km/s)\n (default 0.)\n space: float, optional\n characteristic wavelength scale for variations in the spectrum (angstroms)\n (default is 1e-2)\n steprot: float\n wavelength step for convolution with rotational kernel (angstroms)\n set to 0. for automatic adjustment (default 0.)\n fwhm: float\n Gaussian broadening: macroturbulence, instrumental, etc. (angstroms)\n (default 0.)\n stepfwhm: float\n wavelength step for Gaussian convolution (angstroms)\n set to 0. for automatic adjustment (default 0.)\n clean: bool\n True by the default, set to False to avoid the removal of the rotin\n temporary files (default Tr<ue)\n reuseinputfiles: bool\n set to take the input data from the output synspec file (fort.7) rather than \n from the input arrays (wave, flux)\n\n Returns\n -------\n wave2: numpy array of floats\n wavelengths (angstroms)\n flux2: numpy array of floats\n flux \n\n\n \"\"\"\n if reuseinputfiles == False:\n f = open('fort.7','w')\n f2 = open('fort.17','w')\n maxflux = np.max(flux)\n for i in range(len(wave)):\n f.write( ' %f %f \\n' % (wave[i], flux[i]) )\n f2.write( ' %f %f \\n' % (wave[i], maxflux) )\n f.close()\n f2.close()\n\n f = open('fort.5','w')\n f.write( ' %s %s %s \\n' % (\"'fort.7'\", \"'fort.17'\", \"'fort.11'\") )\n f.write( ' %f %f %f \\n' % (vrot, space, steprot) )\n f.write( ' %f %f \\n' % (fwhm, stepfwhm) )\n print('stepfwhm=',stepfwhm)\n f.write( ' %f %f %i \\n' % (np.min(wave), np.max(wave), 0) )\n f.close()\n\n synin = open('fort.5')\n synout = open('syn.log','a')\n p = subprocess.Popen([rotin], stdin=synin, stdout = synout, stderr = synout)\n p.wait()\n synout.flush()\n synout.close()\n synin.close()\n \n assert (os.path.isfile('fort.11')), 'Error: I cannot read the file *fort.11* in '+tmpdir+' -- looks like rotin has crashed, please look at syn.log'\n\n wave2, flux2 = np.loadtxt('fort.11', unpack=True)\n print(len(wave),len(wave2))\n \n if clean == True: cleanup()\n\n return(wave2, flux2)\n\ndef read_model(modelfile):\n \n \"\"\"Reads a model atmosphere into a structure\n \n Parameters\n ---------- \n modelfile : str\n file with a model atmosphere\n \n Returns\n -------\n atmostype : str\n type of model atmosphere (kurucz/marcs/phoenix)\n teff : float\n effective temperature (K)\n logg : float\n log10 of the surface gravity (cm s-2)\n vmicro : float\n microturbulence velocity (km/s)\n abu : list\n abundances, number densities of nuclei relative to hydrogen N(X)/N(H)\n for elements Z=1,99 (H to Es)\n nd: int\n number of depths (layers) of the model\n atmos: numpy structured array\n array with the run with depth of column mass, temperature, gas pressure \n and electron density\n \"\"\"\n\n #check\n if not os.path.isfile(modelfile):\n mf = os.path.join(modeldir,modelfile)\n if os.path.isfile(mf): modelfile = mf\n\n atmostype = identify_atmostype(modelfile)\n\n if atmostype == 'kurucz':\n teff, logg, vmicro, abu, nd, atmos = read_kurucz_model(modelfile) \n if atmostype == 'marcs':\n teff, logg, vmicro, abu, nd, atmos = read_marcs_model2(modelfile)\n if atmostype == 'phoenix':\n teff, logg, vmicro, abu, nd, atmos = read_phoenix_model(modelfile)\n\n return (atmostype,teff,logg,vmicro,abu,nd,atmos)\n\ndef identify_atmostype(modelfile):\n\n \"\"\"Idenfies the type of model atmosphere in an input file\n\n Valid options are kurucz, marcs or phoenix\n\n Parameters\n ----------\n modelfile: str\n file with a model atmosphere\n\n Returns\n -------\n atmostype: str\n can take the value 'kurucz', 'marcs' or 'phoenix' ('tlusty' soon to be added!)\n\n \"\"\"\n\n if ('PHOENIX' in modelfile and 'fits' in modelfile): atmostype = 'phoenix'\n else: \n if modelfile[-3:] == '.gz':\n f = gzip.open(modelfile,'rt')\n else:\n f = open(modelfile,'r')\n line = f.readline()\n print('modelfile / line=',modelfile,line)\n type(line)\n if ('TEFF' in line): atmostype = 'kurucz'\n else: atmostype = 'marcs'\n f.close()\n \n return(atmostype)\n\ndef checksynspec(linelist,modelfile):\n\n \"\"\"checking that executables and data are where it should be\n\n Parameters\n ----------\n linelist: array of str\n file names of the line lists to be used. The first string should correspond\n to the atomic line list and is mandatory. The remainder are optional and\n correspond to molecular line lists. All files should be in synspec format.\n (see documentation at http://nova.astro.umd.edu/Synspec43/synspec.html)\n\n \"\"\"\n\n dirs = [synpledir,modelatomdir,linelistdir,bindir]\n for entry in dirs: assert (os.path.isdir(entry)), 'dir '+entry+' missing'\n\n files = [synspec,rotin]\n for entry in linelist: \n if not os.path.isfile(entry):\n ll = os.path.join(linelistdir,entry)\n if os.path.isfile(ll): files.append(ll)\n for entry in files: assert (os.path.isfile(entry)), 'file '+entry+' missing'\n\n if not os.path.isfile(modelfile):\n mf = os.path.join(modeldir,modelfile)\n if os.path.isfile(mf): modelfile = mf\n\n print(modeldir)\n print(modelfile)\n assert (os.path.isfile(modelfile)),'model atmosphere file '+modelfile+' missing'\n\n\n return(True)\n\n\ndef checkinput(wrange, vmicro, linelist):\n\n \"\"\"checking input parameters from user\n\n\n Parameters\n ----------\n wrange: tuple or list of two floats\n initial and ending wavelengths (angstroms)\n vmicro: float, optional\n microturbulence (km/s) \n (default is taken from the model atmosphere)\n linelist: array of str\n filenames of the line lists, the first one corresponds to \n the atomic lines and all the following ones (optional) to\n molecular lines\n (default ['gfallx3_bpo.19','kmol3_0.01_30.20'] from Allende Prieto+ 2018)\n\n Returns\n ------\n imode: int\n appropriate value for the variable imode, which specifies whether\n one will use many atomic lines (imode=0), just a few (imode=1),\n or none (H lines are an exception; imode=2)\n\n \"\"\"\n\n\n #determine imode\n # imode = 0 is default, atoms and molecules, at least 2 line lists \n # synple sets IFMOL = 1 in 'tas' when an input molecular line list is used\n # but does not set it when only an atomic line list is given\n # imode = 2 for pure continuum\n # imode = 1 for few-lines mode\n # imode = -3 for regular opacity tables (TLUSTY)\n\n if len(linelist) == 0: \n imode = 2 # no atomic or molecular line list -> pure continuum and no molecules\n else:\n\n #find range of atomic line list\n if not os.path.isfile(linelist[0]):\n ll = os.path.join(linelistdir,linelist[0])\n if os.path.isfile(ll): linelist[0] = ll\n\n nlines, minlambda, maxlambda = getlinelistrange(linelist[0])\n\n #check\n if nlines > 10:\n assert (wrange[0] > minlambda-1 and wrange[1] < maxlambda+1),'wrange exceeds the allow range ('+str(minlambda)+' to '+str(maxlambda)+')'\n imode = 0\n else:\n imode = 1\n\n assert (vmicro >= 0.0),'vmicro = '+str(vmicro)+' but cannot < 0.'\n \n return(imode)\n\ndef getlinelistrange(atomiclinelist):\n#finds out min and max wavelengths for a line list\n\n f = open(atomiclinelist,'r')\n line = f.readline()\n entries = line.split()\n minlambda = float(entries[0])*10.\n fsize = os.path.getsize(atomiclinelist)\n f.seek(fsize-103)\n line = f.readline()\n f.close()\n entries = line.split()\n maxlambda = float(entries[0])*10.\n nlines = int(0.01 * fsize)\n\n return(nlines, minlambda,maxlambda)\n\n\n\ndef writetas(filename,nd,linelist):\n#write non-std input parameters\n# input: filename -- str -- name of the non-std. param. file to print\n# nd -- int -- number of layers in the model\n# nd -- list -- names of the linelist files (atomic first, then one \n#\t\t\t\tor more molecular ones\n \n f = open(filename,'w')\n f.write(\"ND= \"+str(nd)+\" \\n\")\n if len(linelist) > 1: f.write(\"IFMOL= \"+one+\" \\n\")\n f.write(\"TMOLIM= 8000. \\n\")\n\n f.close()\n\n return()\n\ndef write3(zexclude):\n \n f = open('fort.3','w')\n for z in zexclude:\n f.write( \" %d %10.4e \\n\" % (z, 0.0) )\n f.close()\n \n return()\n\n\ndef write2(lt,lrho,wrange, filename='opt.data', dlw=2e-5, binary=False,strength=1e-4,inttab=1):\n#write fort.2 file for creating opacity tables for TLUSTY\n\n f = open('fort.2','w')\n f.write( \" %d %10.4e %10.4e \\n\" % (len(lt),10.**lt[0],10.**lt[-1]) )\n f.write( \" %d \\n\" % (1) )\n f.write( \" %d %10.4e %10.4e \\n\" % (len(lrho),10.**lrho[0],10.**lrho[-1]) )\n \n nsamples = int( (np.log10(wrange[1]) - np.log10(wrange[0]) )/dlw) + 1 \n f.write( \" %d %d %10.4e %10.4e \\n\" % (nsamples,inttab,wrange[0],wrange[1]) ) \n if binary == True: \n ibingr = 1\n else:\n ibingr = 0\n filename = \"'\"+filename+\"'\"\n f.write( \" %s %d \\n\" % (filename,ibingr) )\n f.close()\n\n return()\n\n\ndef write55(wrange,dw=1e-2,imode=0,hydprf=2,strength=1e-4,vmicro=0.0, \\\n linelist=['gfallx3_bpo.19','kmol3_0.01_30.20'], atmostype='kurucz'):\n\n\n #imode,idst,iprin\n #inmod,zero,ichang,ichemc\n #lyman,zero,zero,zero,zero\n #one,nlte,icontl,zero,ifhe2\n #ihydpr,ihe1pr,ihe2pr\n #wstart,wend,cutoff,zero,strength,wdist \n\n if (atmostype == 'tlusty' or atmostype == 'marcs'): inmod = 1 \n else: inmod = 0\n\n f = open('fort.55','w')\n f.write(\" \"+str(imode)+\" \"+2*zero+\"\\n\")\n f.write(\" \"+str(inmod)+3*zero+\"\\n\")\n f.write(5*zero+\"\\n\")\n f.write(one+4*zero+\"\\n\")\n f.write(str(hydprf)+2*zero+\"\\n\")\n if imode == -3:\n f.write( ' %f %f %f %i %e %f \\n ' % (wrange[0], -wrange[1], 100., 2000, strength, dw) )\n else:\n f.write( ' %f %f %f %i %e %f \\n ' % (wrange[0], wrange[1], 200., 2000, strength, dw) )\n ll = len(linelist)\n if ll < 2: f.write(2*zero)\n else: f.write(str(ll-1) + ' ' + ' '.join(map(str,np.arange(ll-1)+20)))\n f.write(\"\\n\")\n f.write( ' %f \\n' % (vmicro) )\n f.close()\n\ndef write5(teff,logg,abu, atom='ap18', ofile='fort.5', nlte=False, tl=False):\n\n symbol, mass, sol = elements()\n\n f = open(ofile,'w')\n f.write(' '+str(teff)+\" \"+str(logg).format('%7.4f')+\" ! TEFF, GRAV \\n\")\n if nlte:\n f.write(\" F F ! LTE, GRAY \\n\")\n else:\n f.write(\" T F ! LTE, GRAY \\n\")\n f.write(\" 'tas' ! name of non-standard flags \\n\")\n f.write(\" 50 ! frequencies \\n\")\n\n if tl: \n natom = 30\n else:\n natom = len(abu)\n\n f.write(\" \"+str(natom)+\" ! NATOMS \\n\") \n\n assert (atom == 'hhm' or atom == 'ap18' or atom == 'yo19'), 'atom must be one of: hhm/ap18/yo19!'\n ex = np.ones(natom)\n if atom == 'hhm' : \n zex = [1] #atomic numbers of elements included explicitly (contributing cont. opacity)\n elif atom == 'yo19':\n zex = [1,11,12,19,20]\n elif atom == 'ap18': \n zex = [1,2,6,7,8,11,12,13,14,20,26]\n\n for i in zex: ex[i-1] = 2\n if nlte: ex[0] = -3\n\n for i in range(natom):\n f.write(' %2d %e %i %s\\n' % (ex[i], abu[i], 0, ' ! ' +symbol[i]) )\n\n for i in range(3): f.write(\"* \\n\")\n \n if atom == 'hhm': # highly simplified continuum opacities -- just H and H-\n f.write(\" 1 -1 1 0 0 1 ' H 1' 'data/hm.dat' \\n\" )\n f.write(\" 0 0 3 0 \\n\")\n f.write(\" 1 0 9 0 0 0 ' H 1' 'data/h1s.dat' \\n\")\n f.write(\" 1 1 1 1 0 0 ' H 2' ' ' \\n\")\n f.write(\" 0 0 0 -1 0 0 ' ' ' ' \\n\")\n elif atom == \"yo19\": # set for NLTE calculations for APOGEE (see Osorio+ 2019 A&A paper)\n f.write(\"* ../data_atom for ions \\n\")\n f.write(\" 1 -1 1 0 0 1 ' H 0' 'data_atom/hm.dat' \\n\")\n f.write(\" 0 0 3 0 \\n\")\n f.write(\" 1 0 16 0 0 0 ' H 1' 'data_atom/h1_16lev2.dat' \\n\")\n f.write(\" 1 1 1 1 0 0 ' H 2' ' ' \\n\")\n f.write(\" 11 0 42 0 0 0 'Na 1' 'data_atom/NaIkas.tl' \\n\")\n f.write(\" 11 1 1 1 0 0 'Na 2' '' \\n\")\n f.write(\" 12 0 96 0 0 0 'Mg 1' 'data_atom/Mg1kas_F_ccc.tl' \\n\")\n f.write(\" 12 1 29 0 0 0 'Mg 2' 'data_atom/Mg2kas_F_ccc.tl' \\n\")\n f.write(\" 12 2 1 1 0 0 'Mg 3' ' ' \\n\")\n f.write(\" 19 0 31 0 0 0 'K 1' 'data_atom/KIkas.tl' \\n\")\n f.write(\" 19 1 1 1 0 0 'K 2' '' \\n\")\n f.write(\" 20 0 66 0 0 0 'Ca 1' 'data_atom/Ca1kas_F_zat.tl' \\n\")\n f.write(\" 20 1 24 0 0 0 'Ca 2' 'data_atom/Ca2kas_F_zat.tl' \\n\")\n f.write(\" 20 2 1 1 0 0 'Ca 3' ' ' \\n\")\n f.write(\" 0 0 0 -1 0 0 ' ' ' ' \\n\")\n elif atom == 'ap18': # generic set used in Allende Prieto+ (2018) A&A paper\n f.write(\"* ../data for ions \\n\")\n f.write(\" 1 -1 1 0 0 1 ' H 1' 'data/hm.dat' \\n\")\n f.write(\" 0 0 3 0 \\n\")\n f.write(\" 1 0 9 0 0 0 ' H 1' 'data/h1s.dat' \\n\")\n f.write(\" 1 1 1 1 0 0 ' H 2' ' ' \\n\")\n f.write(\" 2 0 14 0 0 0 'He 1' 'data/he1.dat' \\n\")\n f.write(\" 2 1 14 0 0 0 'He 2' 'data/he2.dat ' \\n\")\n f.write(\" 2 2 1 1 0 0 'He 3' ' ' \\n\")\n f.write(\" 6 0 104 0 0 0 ' C 1' 'data/c1.t' \\n\")\n f.write(\" 6 1 40 0 0 0 ' C 2' 'data/c2.t' \\n\")\n f.write(\" 6 2 1 1 0 0 ' C 3' ' ' \\n\")\n f.write(\" 7 0 89 0 0 0 ' N 1' 'data/n1.t' \\n\")\n f.write(\" 7 1 51 0 0 0 ' N 2' 'data/n2.t' \\n\")\n f.write(\" 7 2 1 1 0 0 ' N 3' ' ' \\n\")\n f.write(\" 8 0 54 0 0 0 ' O 1' 'data/o1.t' \\n\")\n f.write(\" 8 1 74 0 0 0 ' O 2' 'data/o2.t' \\n\")\n f.write(\" 8 2 1 1 0 0 ' O 3' ' ' \\n\")\n f.write(\" 11 0 32 0 0 0 'Na 1' 'data/na1.t' \\n\")\n f.write(\" 11 1 8 0 0 0 'Na 2' 'data/na2.t' \\n\")\n f.write(\" 11 2 1 1 0 0 'Na 3' ' ' \\n\")\n f.write(\" 12 0 71 0 0 0 'Mg 1' 'data/mg1.t' \\n\")\n f.write(\" 12 1 31 0 0 0 'Mg 2' 'data/mg2.t' \\n\")\n f.write(\" 12 2 1 1 0 0 'Mg 3' ' ' \\n\")\n f.write(\" 13 0 33 0 0 0 'Al 1' 'data/al1.t' \\n\")\n f.write(\" 13 1 81 0 0 0 'Al 2' 'data/al2.t' \\n\")\n f.write(\" 13 2 1 1 0 0 'Al 3' ' ' \\n\")\n f.write(\" 14 0 57 0 0 0 'Si 1' 'data/si1.t' \\n\")\n f.write(\" 14 1 46 0 0 0 'Si 2' 'data/si2.t' \\n\")\n f.write(\" 14 2 1 1 0 0 'Si 3' ' ' \\n\")\n f.write(\" 20 0 79 0 0 0 'Ca 1' 'data/ca1.t' \\n\")\n f.write(\" 20 1 32 0 0 0 'Ca 2' 'data/ca2.t' \\n\")\n f.write(\" 20 2 1 1 0 0 'Ca 3' ' ' \\n\")\n f.write(\" 26 0 49 0 0 0 'Fe 1' 'data/tlusty_fe1_topmod.dat' \\n\")\n f.write(\" 26 1 41 0 0 0 'Fe 2' 'data/tlusty_fe2_topmod.dat' \\n\")\n f.write(\" 26 2 1 1 0 0 'Fe 3' ' ' \\n\")\n f.write(\" 0 0 0 -1 0 0 ' ' ' ' \\n\")\n f.write(\"* \\n\")\n f.write(\"* end \\n\")\n f.close()\n\ndef write8(teff, logg, nd, atmos, atmostype, ofile='fort.8'):\n\n f = open(ofile,'w')\n if atmostype == 'tlusty':\n f.write(\" \"+str(nd)+\" \"+str(3)+\"\\n\")\n for i in range(nd):\n f.write(' %e ' % atmos['dm'][i])\n f.write(\"\\n\")\n for i in range(nd):\n f.write( '%f %e %e \\n' % (atmos['t'][i], atmos['ne'][i], atmos['rho'][i] ) )\n f.close()\n\n else:\n\n if atmostype == 'marcs':\n f.write(\" \"+str(nd)+\" \"+str(-4)+\"\\n\")\n for i in range(nd):\n f.write(' %e ' % atmos['dm'][i])\n f.write(\"\\n\")\n for i in range(nd):\n f.write( '%f %e %e %e \\n' % (atmos['t'][i], atmos['ne'][i], atmos['rho'][i], atmos['rho'][i]/atmos['mmw'][i]/1.67333e-24 + atmos['ne'][i] ) )\n f.close()\n\n else:\n f.write( 'TEFF %7.0f GRAVITY %7.5f LTE \\n' % (teff, logg) )\n for i in range(21): f.write('\\n')\n f.write( 'READ DECK6%3i RHOX,T,P,XNE \\n' % nd )\n for i in range(nd): \n f.write( '%e %f %e %e \\n' % (atmos['dm'][i], atmos['t'][i], atmos['p'][i], atmos['ne'][i]) )\n f.close()\n\n return()\n \n\ndef create_links(linelist):\n#create soft links for line lists, mand odel atom dir \n\n for i in range(len(linelist)):\n if not os.path.isfile(linelist[i]):\n ll = os.path.join(linelistdir,linelist[i])\n if os.path.isfile(ll): linelist[i] = ll\n if i == 0: os.symlink(linelist[0],'fort.19')\n else: os.symlink(linelist[i],'fort.'+str(20-1+i))\n\n os.symlink(modelatomdir,'./data')\n\n return()\n\ndef cleanup():\n#cleanup all temporary files\n\n files = os.listdir('.')\n for entry in files: \n if os.path.islink(entry) and entry.startswith('fort'): os.unlink(entry)\n if os.path.isfile(entry) and entry.startswith('fort'): os.remove(entry)\n\n if os.path.islink('data'): os.unlink('data')\n if os.path.isfile('tas'): os.remove('tas')\n assert (not os.path.isdir('data')), 'A subdirectory *data* exists in this folder, and that prevents the creation of a link to the data directory for synple'\n\n\n return()\n\n\ndef read_kurucz_model(modelfile):\n \n \"\"\"Reads a Kurucz model atmospheres\n \n Parameters\n ----------\n modelfile: str\n file name \n \n Returns\n -------\n\n teff : float\n effective temperature (K)\n logg : float\n log10 of the surface gravity (cm s-2)\n vmicro : float\n microturbulence velocity (km/s)\n abu : list\n abundances, number densities of nuclei relative to hydrogen N(X)/N(H)\n for elements Z=1,99 (H to Es)\n nd: int\n number of depths (layers) of the model\n atmos: numpy structured array\n array with the run with depth of column mass, temperature, gas pressure \n and electron density \n \n \"\"\"\n\n f = open(modelfile,'r')\n line = f.readline()\n entries = line.split()\n assert (entries[0] == 'TEFF' and entries[2] == 'GRAVITY'), 'Cannot find Teff and logg in the file header'\n teff = float(entries[1])\n logg = float(entries[3])\n\n while entries[0] != 'ABUNDANCE': \n line = f.readline()\n entries = line.split()\n\n abu = []\n\n if entries[1] == 'SCALE': \n scale = float(entries[2])\n \n\n while entries[0] == 'ABUNDANCE':\n i = 0\n for word in entries: \n if (word == 'CHANGE'): w = i\n i = i + 1 \n for i in range(int((len(entries)-w-1)/2)):\n z = int(entries[w+1+2*i])\n if (z == 1): nhntot = float(entries[w+2+2*i])\n if (z < 3): abu.append(float(entries[w+2+2*i]) / nhntot) \n else: abu.append(scale*10.**(float(entries[w+2+2*i])) / nhntot)\n\n line = f.readline()\n entries = line.split() \n\n assert (entries[0] == 'READ'), 'I cannot find the header of the atmospheric table in the input Kurucz model'\n\n nd = int(entries[2]) - 1\n line = f.readline()\n entries = line.split()\n line = f.readline()\n entries = line.split()\n vmicro = float(entries[6])/1e5\n\n dm = [ float(entries[0]) ]\n t = [ float(entries[1]) ]\n p = [ float(entries[2]) ]\n ne = [ float(entries[3]) ] \n\n for i in range(nd-1):\n line = f.readline()\n entries = line.split()\n dm.append( float(entries[0]))\n t.append( float(entries[1]))\n p.append( float(entries[2]))\n ne.append( float(entries[3]))\n\n atmos = np.zeros(nd, dtype={'names':('dm', 't', 'p','ne'),\n 'formats':('f', 'f', 'f','f')}) \n atmos['dm'] = dm\n atmos['t'] = t\n atmos['p'] = p\n atmos['ne'] = ne\n\n return (teff,logg,vmicro,abu,nd,atmos)\n\n\ndef read_marcs_model(modelfile):\n \n \"\"\"Reads a MARCS model atmospheres\n \n Parameters\n ----------\n modelfile: str\n file name. It can be a gzipped (.gz) file\n \n Returns\n -------\n\n teff : float\n effective temperature (K)\n logg : float\n log10 of the surface gravity (cm s-2)\n vmicro : float\n microturbulence velocity (km/s)\n abu : list\n abundances, number densities of nuclei relative to hydrogen N(X)/N(H)\n for elements Z=1,99 (H to Es)\n nd: int\n number of depths (layers) of the model\n atmos: numpy structured array\n array with the run with depth of column mass, temperature, gas pressure \n and electron density \n \n \"\"\" \n\n if modelfile[-3:] == '.gz':\n f = gzip.open(modelfile,'rt')\n else:\n f = open(modelfile,'r')\n line = f.readline()\n line = f.readline()\n entries = line.split()\n assert (entries[1] == 'Teff'), 'Cannot find Teff in the file header'\n teff = float(entries[0])\n line = f.readline()\n line = f.readline()\n entries = line.split()\n assert (entries[1] == 'Surface' and entries[2] == 'gravity'), 'Cannot find logg in the file header'\n logg = np.log10(float(entries[0]))\n line = f.readline()\n entries = line.split()\n assert (entries[1] == 'Microturbulence'), 'Cannot find vmicro in the file header'\n vmicro = float(entries[0])\n\n while entries[0] != 'Logarithmic': \n line = f.readline()\n entries = line.split()\n\n abu = []\n line = f.readline()\n entries = line.split()\n\n i = 0\n while entries[1] != 'Number':\n for word in entries: \n abu.append( 10.**(float(word)-12.0) )\n i = i + 1 \n line = f.readline()\n entries = line.split() \n\n if i < 99: \n for j in range(99-i):\n abu.append(1e-111)\n i = i + 1\n\n nd = int(entries[0])\n line = f.readline()\n entries = line.split()\n\n assert (entries[0] == 'Model'), 'I cannot find the header of the atmospheric table in the input MARCS model'\n\n line = f.readline()\n line = f.readline()\n entries = line.split()\n\n t = [ float(entries[4]) ]\n p = [ float(entries[6]) ]\n ne = [ float(entries[5]) / bolk / float(entries[4]) ] \n\n for i in range(nd-1):\n line = f.readline()\n entries = line.split()\n\n t.append( float(entries[4]))\n p.append( float(entries[6]))\n ne.append( float(entries[5]) / bolk / float(entries[4]))\n\n line = f.readline()\n line = f.readline()\n entries = line.split()\n\n dm = [ float(entries[-1]) ]\n\n for i in range(nd-1):\n line = f.readline()\n entries = line.split()\n\n dm.append( float(entries[7]))\n\n atmos = np.zeros(nd, dtype={'names':('dm', 't', 'p','ne'),\n 'formats':('f', 'f', 'f','f')}) \n atmos['dm'] = dm\n atmos['t'] = t\n atmos['p'] = p\n atmos['ne'] = ne\n\n return (teff,logg,vmicro,abu,nd,atmos)\n\ndef read_marcs_model2(modelfile):\n \n \"\"\"Reads a MARCS model atmospheres. \n While read_marcs_model returns T, Pg and Ne in the structure 'atmos'\n read_marcs_model2 returns T, rho, mmw, and Ne.\n \n Parameters\n ----------\n modelfile: str\n file name. It can be a gzipped (.gz) file\n \n Returns\n -------\n\n teff : float\n effective temperature (K)\n logg : float\n log10 of the surface gravity (cm s-2)\n vmicro : float\n microturbulence velocity (km/s)\n abu : list\n abundances, number densities of nuclei relative to hydrogen N(X)/N(H)\n for elements Z=1,99 (H to Es)\n nd: int\n number of depths (layers) of the model\n atmos: numpy structured array\n array with the run with depth of column mass, temperature, density, \n mean molecular weight and electron number density \n \n \"\"\" \n\n if modelfile[-3:] == '.gz':\n f = gzip.open(modelfile,'rt')\n else:\n f = open(modelfile,'r')\n line = f.readline()\n line = f.readline()\n entries = line.split()\n assert (entries[1] == 'Teff'), 'Cannot find Teff in the file header'\n teff = float(entries[0])\n line = f.readline()\n line = f.readline()\n entries = line.split()\n assert (entries[1] == 'Surface' and entries[2] == 'gravity'), 'Cannot find logg in the file header'\n logg = np.log10(float(entries[0]))\n line = f.readline()\n entries = line.split()\n assert (entries[1] == 'Microturbulence'), 'Cannot find vmicro in the file header'\n vmicro = float(entries[0])\n\n while entries[0] != 'Logarithmic': \n line = f.readline()\n entries = line.split()\n\n abu = []\n line = f.readline()\n entries = line.split()\n\n i = 0\n while entries[1] != 'Number':\n for word in entries: \n abu.append( 10.**(float(word)-12.0) )\n i = i + 1 \n line = f.readline()\n entries = line.split() \n\n if i < 99: \n for j in range(99-i):\n abu.append(1e-111)\n i = i + 1\n\n nd = int(entries[0])\n line = f.readline()\n entries = line.split()\n\n assert (entries[0] == 'Model'), 'I cannot find the header of the atmospheric table in the input MARCS model'\n\n line = f.readline()\n line = f.readline()\n entries = line.split()\n\n t = [ float(entries[4]) ]\n p = [ float(entries[6]) ]\n ne = [ float(entries[5]) / bolk / float(entries[4]) ] \n\n for i in range(nd-1):\n line = f.readline()\n entries = line.split()\n\n t.append( float(entries[4]))\n p.append( float(entries[6]))\n ne.append( float(entries[5]) / bolk / float(entries[4]))\n\n line = f.readline()\n line = f.readline()\n entries = line.split()\n\n rho = [ float(entries[3]) ]\n dm = [ float(entries[7]) ]\n mmw = [ float(entries[4]) ]\n\n for i in range(nd-1):\n line = f.readline()\n entries = line.split()\n\n rho.append( float(entries[3]))\n dm.append( float(entries[7]))\n mmw.append( float(entries[4]))\n\n atmos = np.zeros(nd, dtype={'names':('dm', 't', 'rho','mmw','ne'),\n 'formats':('f', 'f', 'f','f','f')}) \n atmos['dm'] = dm\n atmos['t'] = t\n atmos['rho'] = rho\n atmos['mmw'] = mmw\n atmos['ne'] = ne\n\n return (teff,logg,vmicro,abu,nd,atmos)\n\n\ndef read_phoenix_model(modelfile):\n\n \"\"\"Reads a FITS Phoenix model atmospheres\n \n Parameters\n ----------\n modelfile: str\n file name \n \n Returns\n -------\n\n teff : float\n effective temperature (K)\n logg : float\n log10 of the surface gravity (cm s-2)\n vmicro : float\n microturbulence velocity (km/s)\n abu : list\n abundances, number densities of nuclei relative to hydrogen N(X)/N(H)\n for elements Z=1,99 (H to Es)\n nd: int\n number of depths (layers) of the model\n atmos: numpy structured array\n array with the run with depth of column mass, temperature, gas pressure \n and electron density \n \n \"\"\" \n\n from astropy.io import fits\n\n h = fits.open(modelfile)[0].header\n f = fits.open(modelfile)[1].data\n\n nd = len(f['temp'])\n\n teff = float(h['PHXTEFF'])\n logg = float(h['PHXLOGG'])\n vmicro = float(h['PHXXI_L'])\n\n m_h = float(h['PHXM_H'])\n alpha = float(h['PHXALPHA'])\n \n symbol, mass,sol = elements(husser=True) \n abu = sol \n z_metals = np.arange(97,dtype=int) + 3\n z_alphas = np.array([8,10,12,14,16,20,22],dtype=int)\n for i in range(len(z_metals)): abu[z_metals[i] - 1] = abu[z_metals[i] - 1] + m_h\n for i in range(len(z_alphas)): abu[z_alphas[i] - 1] = abu[z_alphas[i] - 1] + alpha\n \n\n atmos = np.zeros(nd, dtype={'names':('dm', 't', 'p','ne'),\n 'formats':('f', 'f', 'f','f')}) \n\n atmos['dm'] = f['pgas'] / 10.**logg\n atmos['t'] = f['temp']\n atmos['p'] = f['pgas']\n atmos['ne'] = f['pe']/ bolk / f['temp']\n\n return (teff,logg,vmicro,abu,nd,atmos)\n\n\ndef read_phoenix_text_model(modelfile):\n \n \n \"\"\"Reads a plain-text Phoenix model atmospheres\n \n Parameters\n ----------\n modelfile: str\n file name \n \n Returns\n -------\n\n teff : float\n effective temperature (K)\n logg : float\n log10 of the surface gravity (cm s-2)\n vmicro : float\n microturbulence velocity (km/s)\n abu : list\n abundances, number densities of nuclei relative to hydrogen N(X)/N(H)\n for elements Z=1,99 (H to Es)\n nd: int\n number of depths (layers) of the model\n atmos: numpy structured array\n array with the run with depth of column mass, temperature, gas pressure \n and electron density \n \n \"\"\" \n\n\n f = open(modelfile,'r')\n line = f.readline()\n while line[0:4] != \" no.\":\n line = f.readline()\n entries = line.split()\n nd = int(entries[5])\n print('nd=',nd)\n while line[0:14] != \" model: teff\":\n line = f.readline()\n entries = line.split()\n teff = float(entries[3])\n print('teff=',teff)\n line = f.readline()\n line = f.readline()\n entries = line.split()\n assert (entries[0] == 'log(g):' and entries[2] == '[cm/s**2]'), 'Cannot find logg in the file header'\n logg = float(entries[1])\n print('logg=',logg)\n line = f.readline()\n while line[0:22] != \" Element abundances :\": \n line = f.readline()\n\n\n symbol,mass,sol = elements()\n\n sy = []\n ab = []\n\n while line[0:29] != \" Element abundances relative\": \n line = f.readline()\n #print(line)\n if line[0:9] == ' element:':\n entries = line.split()\n for word in entries[1:]: sy.append(word)\n if line[0:11] == ' abundance:':\n entries = line.split()\n for word in entries[1:]: ab.append(word)\n\n assert (len(sy) == len(ab)), 'different elements in arrays sy (elemental symbols) and ab (abundances)'\n\n abu = np.ones(99)*1e-99\n i = 0\n for item in sy:\n try:\n index = symbol.index(item)\n abu[index] = 10.**(float(ab[i])-12.) \n except ValueError:\n print(\"the symbol \",item,\" is not recognized as a valid element\")\n i = i + 1\n\n print('abu=',abu)\n\n while line[0:72] != \" l tstd temperature pgas pe density mu\": \n line = f.readline()\n\n line = f.readline()\n entries = line.split()\n\n t = [ float(entries[2].replace('D','E')) ]\n p = [ float(entries[3].replace('D','E')) ]\n ne = [ float(entries[4].replace('D','E')) / bolk / float(entries[2].replace('D','E')) ] \n dm = [ float(entries[3].replace('D','E')) / 10.**logg ] #assuming hydrostatic equil. and negliglible radiation and turb. pressure\n\n for i in range(nd-1):\n line = f.readline()\n entries = line.split()\n\n t.append( float(entries[2].replace('D','E')))\n p.append( float(entries[3].replace('D','E')))\n ne.append( float(entries[4].replace('D','E')) / bolk / float(entries[2]))\n dm.append ( float(entries[3].replace('D','E')) / 10.**logg )\n\n vmicro = 0.0\n while (line[0:6] != \" greli\"):\n line = f.readline()\n if line == '':\n print('Cannot find a value for vmicro (vturb) in the model atmosphere file ',modelfile)\n break\n \n if line != '':\n entries = line.split()\n vmicro = float(entries[5])\n\n atmos = np.zeros(nd, dtype={'names':('dm', 't', 'p','ne'),\n 'formats':('f', 'f', 'f','f')}) \n atmos['dm'] = dm\n atmos['t'] = t\n atmos['p'] = p\n atmos['ne'] = ne\n\n return (teff,logg,vmicro,abu,nd,atmos)\n\ndef interp_spl(xout, x, y):\n\n \"\"\"Interpolates in 1D using cubic splines\n\n Parameters\n ----------\n x: numpy array or list\n input abscissae\n y: numpy array or list\n input ordinates \n xout: numpy array or list\n array of abscissae to interpolate to\n\n Returns\n -------\n yout: numpy array or list\n array of interpolated values\n\n \"\"\"\n\n tck = interpolate.splrep(x, y, s=0)\n yout = interpolate.splev(xout, tck, der=0)\n\n return(yout)\n\n\ndef elements(husser=False):\n \n \"\"\"Reads the solar elemental abundances\n \n Parameters\n ----------\n husser: bool, optional\n when set the abundances adopted for Phoenix models by Huser et al. (2013)\n are adopted. Otherwise Asplund et al. (2005) are used -- consistent with\n the MARCS (Gustafsson et al. 2008) models and and Kurucz (Meszaros et al. 2012)\n Kurucz model atmospheres.\n \n Returns\n -------\n symbol: numpy array of str\n element symbols\n mass: numpy array of floats\n atomic masses (elements Z=1-99)\n sol: numpy array of floats\n solar abundances N/N(H)\n \n \"\"\"\n\n symbol = [\n 'H' ,'He','Li','Be','B' ,'C' ,'N' ,'O' ,'F' ,'Ne', \n 'Na','Mg','Al','Si','P' ,'S' ,'Cl','Ar','K' ,'Ca', \n 'Sc','Ti','V' ,'Cr','Mn','Fe','Co','Ni','Cu','Zn', \n 'Ga','Ge','As','Se','Br','Kr','Rb','Sr','Y' ,'Zr', \n 'Nb','Mo','Tc','Ru','Rh','Pd','Ag','Cd','In','Sn', \n 'Sb','Te','I' ,'Xe','Cs','Ba','La','Ce','Pr','Nd', \n 'Pm','Sm','Eu','Gd','Tb','Dy','Ho','Er','Tm','Yb', \n 'Lu','Hf','Ta','W' ,'Re','Os','Ir','Pt','Au','Hg', \n 'Tl','Pb','Bi','Po','At','Rn','Fr','Ra','Ac','Th', \n 'Pa','U' ,'Np','Pu','Am','Cm','Bk','Cf','Es' ]\n\n mass = [ 1.00794, 4.00260, 6.941, 9.01218, 10.811, 12.0107, 14.00674, 15.9994,\n 18.99840, 20.1797, 22.98977, 24.3050, 26.98154, 28.0855, 30.97376, \n 32.066, 35.4527, 39.948, 39.0983, 40.078, 44.95591, 47.867, 50.9415, \n 51.9961, 54.93805, 55.845, 58.93320, 58.6934, 63.546, 65.39, 69.723, \n 72.61, 74.92160, 78.96, 79.904, 83.80, 85.4678, 87.62, 88.90585, \n 91.224, 92.90638, 95.94, 98., 101.07, 102.90550, 106.42, 107.8682, \n 112.411, 114.818, 118.710, 121.760, 127.60, 126.90447, 131.29, \n 132.90545, 137.327, 138.9055, 140.116, 140.90765, 144.24, 145, 150.36, \n 151.964, 157.25, 158.92534, 162.50, 164.93032, 167.26, 168.93421, \n 173.04, 174.967, 178.49, 180.9479, 183.84, 186.207, 190.23, 192.217, \n 195.078, 196.96655, 200.59, 204.3833, 207.2, 208.98038, 209., 210., \n 222., 223., 226., 227., 232.0381, 231.03588, 238.0289, 237., 244., \n 243., 247., 247., 251., 252. ]\n\n if not husser:\n #Asplund, Grevesse and Sauval (2005), basically the same as \n #Grevesse N., Asplund M., Sauval A.J. 2007, Space Science Review 130, 205\n sol = [ 0.911, 10.93, 1.05, 1.38, 2.70, 8.39, 7.78, 8.66, 4.56, 7.84, \n 6.17, 7.53, 6.37, 7.51, 5.36, 7.14, 5.50, 6.18, 5.08, 6.31, \n 3.05, 4.90, 4.00, 5.64, 5.39, 7.45, 4.92, 6.23, 4.21, 4.60, \n 2.88, 3.58, 2.29, 3.33, 2.56, 3.28, 2.60, 2.92, 2.21, 2.59, \n 1.42, 1.92, -9.99, 1.84, 1.12, 1.69, 0.94, 1.77, 1.60, 2.00, \n 1.00, 2.19, 1.51, 2.27, 1.07, 2.17, 1.13, 1.58, 0.71, 1.45, \n -9.99, 1.01, 0.52, 1.12, 0.28, 1.14, 0.51, 0.93, 0.00, 1.08, \n 0.06, 0.88, -0.17, 1.11, 0.23, 1.45, 1.38, 1.64, 1.01, 1.13,\n 0.90, 2.00, 0.65, -9.99, -9.99, -9.99, -9.99, -9.99, -9.99, 0.06, \n -9.99, -0.52, -9.99, -9.99, -9.99, -9.99, -9.99, -9.99, -9.99 ]\n\t \n sol[0] = 1.\n\n else:\n #a combination of meteoritic/photospheric abundances from Asplund et al. 2009\n #chosen for the Husser et al. (2013) Phoenix model atmospheres\n sol = [ 12.00, 10.93, 3.26, 1.38, 2.79, 8.43, 7.83, 8.69, 4.56, 7.93, \n 6.24, 7.60, 6.45, 7.51, 5.41, 7.12, 5.50, 6.40, 5.08, 6.34, \n 3.15, 4.95, 3.93, 5.64, 5.43, 7.50, 4.99, 6.22, 4.19, 4.56, \n 3.04, 3.65, 2.30, 3.34, 2.54, 3.25, 2.36, 2.87, 2.21, 2.58, \n 1.46, 1.88, -9.99, 1.75, 1.06, 1.65, 1.20, 1.71, 0.76, 2.04, \n 1.01, 2.18, 1.55, 2.24, 1.08, 2.18, 1.10, 1.58, 0.72, 1.42, \n -9.99, 0.96, 0.52, 1.07, 0.30, 1.10, 0.48, 0.92, 0.10, 0.92, \n 0.10, 0.85, -0.12, 0.65, 0.26, 1.40, 1.38, 1.62, 0.80, 1.17,\n 0.77, 2.04, 0.65, -9.99, -9.99, -9.99, -9.99, -9.99, -9.99, 0.06, \n -9.99, -0.54, -9.99, -9.99, -9.99, -9.99, -9.99, -9.99, -9.99 ]\n\n sol[0] = 1.\n for i in range(len(sol)-1): sol[i+1] = 10.**(sol[i+1]-12.0)\n\n return (symbol,mass,sol)\n\n\ndef lgconv(xinput, yinput, fwhm, ppr=None):\n\n \"\"\"convolution with a Gaussian in linear lambda scale\n for a constant resolution\n\n Parameters\n ----------\n xinput: numpy float array\n wavelengths \n yinput: numpy array of floats\n fluxes\n fwhm: float\n FWHM of the Gaussian (same units as for xinput)\n ppr: float, optional\n Points per resolution element to downsample the convolved spectrum\n (default None, to keep the original sampling)\n\n Returns\n -------\n x: numpy float array\n wavelengths after convolution, will be a subset of xinput when that is linear, \n otherwise a subset of the linearly resampled version\n y: numpy array of floats\n fluxes after convolution\n\n \"\"\"\n\n #resampling to a linear lambda wavelength scale if need be\n xx = np.diff(xinput)\n if max(xx) - min(xx) > 1.e-7: #input not linearly sampled\n nel = len(xinput)\n minx = np.min(xinput)\n maxx = np.max(xinput)\n x = np.linspace(minx,maxx,nel)\n #y = np.interp( x, xinput, yinput)\n y = interp_spl( x, xinput, yinput)\n else: #input linearly sampled\n x = xinput\n y = yinput\n\n step = x[1] - x[0]\n sigma=fwhm/2.0/np.sqrt(-2.0*np.log(0.5))\n npoints = 2*int(3*fwhm/2./step)+1\n half = npoints * step /2.\n xx = np.linspace(-half,half,npoints)\n kernel = np.exp(-(xx-np.mean(xx))**2/2./sigma**2)\n kernel = kernel/np.sum(kernel)\n\n y = np.convolve(y,kernel,'valid')\n #y = ss.fftconvolve(y,kernel,'valid')\n print(npoints)\n edge = int(npoints/2)\n x = x[edge:-edge]\n\n print(xinput.size,x.size,y.size)\n\n if ppr != None:\n fac = int(fwhm / step / ppr)\n subset = np.arange(x.size / fac, dtype=int) * fac \n x = x[subset]\n y = y[subset]\n\n return(x,y)\n\ndef vgconv(xinput,yinput,fwhm, ppr=None):\n\n\n \"\"\"convolution with a Gaussian in log lambda scale\n for a constant resolving power\n\n Parameters\n ----------\n xinput: numpy float array\n wavelengths \n yinput: numpy array of floats\n fluxes\n fwhm: float\n FWHM of the Gaussian (km/s)\n ppr: float, optional\n Points per resolution element to downsample the convolved spectrum\n (default None, to keep the original sampling)\n\n Returns\n -------\n x: numpy float array\n wavelengths after convolution, will be a subset of xinput when that is equidistant\n in log lambda, otherwise a subset of the resampled version\n y: numpy array of floats\n fluxes after convolution\n\n \"\"\"\n #resampling to ln(lambda) if need be\n xx = np.diff(np.log(xinput))\n if max(xx) - min(xx) > 1.e-7: #input not equidist in loglambda\n nel = len(xinput)\n minx = np.log(xinput[0])\n maxx = np.log(xinput[-1])\n x = np.linspace(minx,maxx,nel)\n step = x[1] - x[0]\n x = np.exp(x)\n #y = np.interp( x, xinput, yinput)\n y = interp_spl( x, xinput, yinput)\n else:\n x = xinput\n y = yinput\n step = np.log(xinput[1])-np.log(xinput[0])\n\n fwhm = fwhm/clight # inverse of the resolving power\n sigma=fwhm/2.0/np.sqrt(-2.0*np.log(0.5))\n npoints = 2*int(3*fwhm/2./step)+1\n half = npoints * step /2.\n xx = np.linspace(-half,half,npoints)\n kernel = np.exp(-(xx-np.mean(xx))**2/2./sigma**2)\n kernel = kernel/np.sum(kernel)\n\n y = np.convolve(y,kernel,'valid')\n edge = int(npoints/2)\n x = x[edge:-edge]\n\n #print(xinput.size,x.size,y.size)\n\n if ppr != None:\n fac = int(fwhm / step / ppr)\n print(fwhm,step,ppr,fac)\n subset = np.arange(x.size / fac, dtype=int) * fac \n x = x[subset]\n y = y[subset]\n\n\n return(x,y)\n\ndef rotconv(xinput,yinput,vsini, ppr=None):\n\n\n \"\"\"convolution with a Rotation profile \n\n Parameters\n ----------\n xinput: numpy float array\n wavelengths \n yinput: numpy array of floats\n fluxes\n vsini: float\n projected rotational velocity (km/s)\n ppr: float, optional\n Points per resolution element to downsample the convolved spectrum\n (default None, to keep the original sampling)\n\n Returns\n -------\n x: numpy float array\n wavelengths after convolution, will be a subset of xinput when that is equidistant\n in log lambda, otherwise a subset of the resampled version\n y: numpy array of floats\n fluxes after convolution\n\n \"\"\"\n\n #resampling to ln(lambda) if need be\n xx = np.diff(np.log(xinput))\n if max(xx) - min(xx) > 1.e-7: #input not equidist in loglambda\n nel = len(xinput)\n minx = np.min(np.log(xinput))\n maxx = np.max(np.log(xinput))\n x = np.linspace(minx,maxx,nel)\n step = x[1] - x[0]\n x = np.exp(x)\n #y = np.interp( x, xinput, yinput)\n y = interp_spl( x, xinput, yinput)\n else:\n x = xinput\n y = yinput\n\n deltamax=vsini/clight\n npoints = 2*int(deltamax/step)+1\n xx = np.linspace(-deltamax,deltamax,npoints)\n c1=2.0*(1.0-epsilon)/np.pi/(1.0-epsilon/3.0)/deltamax\n c2=0.5*epsilon/(1.0-epsilon/3.0)/deltamax\n r2=(xx/deltamax)**2\n kernel = c1*np.sqrt(1.0-r2)+c2*(1.0-r2)\n kernel = kernel/np.sum(kernel)\n\n\n y = np.convolve(y,kernel,'valid')\n print(xinput.size,x.size,y.size)\n edge = int(npoints/2)\n x = x[edge:-edge]\n\n if ppr != None:\n fac = int(deltamax / step / ppr)\n subset = np.arange(x.size / fac, dtype=int) * fac \n x = x[subset]\n y = y[subset]\n\n return(x,y)\n\ndef gsynth(synthfile,fwhm=0.0,outsynthfile=None,ppr=5,wrange=None,freeze=None):\n\n \"\"\"Smooth the spectra in a FERRE grid by Gaussian convolution\n\n Parameters\n ----------\n synthfile: str\n name of the input FERRE synth file \n fwhm: float\n FWHM of the Gaussian kernel (km/s) \n (default is 0.0, which means no convolution is performed)\n outsynthfile: str\n name of the output FERRE synth file\n (default is the same as synth file, but starting with 'n')\n ppr: float, optional\n Points per resolution element to downsample the convolved spectrum\n (default is 5, set to None to keep the original sampling)\n wrange: tuple\n Starting and ending wavelengths (if a smaller range that \n the input's is desired)\n (default None, to keep the original range)\n freeze: dictionary\n Allows to reduce the dimensionality of the grid. The keys are the labels\n of the dimensions to freeze (as given in in the header of the input grid) \n with the values that should be adopted for those 'frozen' dimensions. \n Example: set freeze = {'TEFF': 5000.} to fix that value for the Teff dimension\n in a grid.\n (default None, to retain all the original dimensions)\n Returns\n -------\n writes outsynthfile with the smooth spectra\n\n \"\"\"\n\n if outsynthfile is None: outsynthfile='n'+synthfile[1:]\n logw=0\n\n #read header, update and write out\n fin = open(synthfile,'r')\n fout = open(outsynthfile,'w')\n hd = []\n labels = []\n line = fin.readline()\n hd.append(line)\n while line[1] != \"/\":\n line = fin.readline()\n if \"N_P\" in line: n_p = np.array(line.split()[2:],dtype=int)\n if \"STEPS\" in line: steps = np.array(line.split()[2:],dtype=float)\n if \"LLIMITS\" in line: llimits = np.array(line.split()[2:],dtype=float)\n if \"LABEL\" in line: labels.append(line.split()[-1][1:-1])\n if \"NPIX\" in line: npix = int(line.split()[2])\n if \"N_OF_DIM\" in line: ndim = int(line.split()[2])\n if \"WAVE\" in line: wave = np.array(line.split()[2:],dtype=float)\n if \"LOGW\" in line: logw = int(line.split()[2]) \n if \"RESOLUTION\" in line: resolution = float(line.split()[2])\n hd.append(line)\n\n assert (len(n_p) == len(steps) & len(n_p) == len(llimits) & len(n_p) == len(labels) & len(n_p) == ndim), 'The dimension of the parameters from the header are inconsistent'\n\n #update header parameters\n x = np.arange(npix)*wave[1]+wave[0]\n if logw == 1: x=10.**x\n if logw == 2: x=np.exp(x)\n \n #define indices for grid loops\n ll = []\n ind_n_p = []\n i = 0\n for entry in labels:\n if freeze is not None: \n lfkeys = list(freeze.keys())\n if entry not in lfkeys: ind_n_p.append(i)\n else:\n ind_n_p.append(i)\n ll.append(np.arange(n_p[i]))\n i = i + 1\n ind = list(product(*ll))\n \n if wrange is not None:\n assert (len(wrange) == 2), 'Error: wrange must have two elements'\n section1 = np.where( (x >= wrange[0]*(1.-10.*fwhm/clight)) & (x <= wrange[1]*(1.+10.*fwhm/clight)) )\n x = x[section1]\n npix = len(x)\n \n if fwhm > 1.e-7:\n y = np.ones(npix)\n xx,yy = vgconv(x,y,fwhm,ppr=ppr)\n else:\n print('Warning -- fwhm <= 1.e-7, no convolution will be performed, ppr will be ignored')\n xx = x\n \n print(len(x),len(xx))\n \n if wrange is not None: \n section2 = np.where( (xx >= wrange[0]) & (xx <= wrange[1]) ) \n xx = xx [section2]\n \n #print(x,xx)\n #print(len(x),len(xx))\n \n jlabel = 0\n for line in hd:\n if \"N_OF_DIM\" in line: line = \" N_OF_DIM = \"+str(len(ind_n_p))+\"\\n\" \n if \"N_P\" in line: line = \" N_P = \"+' '.join(map(str,n_p[ind_n_p]))+\"\\n\" \n if \"STEPS\" in line: line = \" STEPS = \"+' '.join(map(str,steps[ind_n_p]))+\"\\n\" \n if \"LLIMITS\" in line: line = \" LLIMITS = \"+' '.join(map(str,llimits[ind_n_p]))+\"\\n\"\n if freeze is not None:\n if \"LABEL\" in line:\n ilabel = line.split()[-1][1:-1] #drop starting/ending quotes\n if ilabel in lfkeys:\n continue\n else:\n jlabel = jlabel + 1\n line = \" LABEL(\"+str(jlabel)+\") = \"+ilabel+\"\\n\"\n if \"NPIX\" in line: line = \" NPIX = \"+str(len(xx))+\"\\n\"\n if \"WAVE\" in line: line = \" WAVE = \"+str(np.log10(xx[0]))+\" \"+str(np.log10(xx[1])-np.log10(xx[0]))+\"\\n\"\n if \"LOGW\" in line: line = \" LOGW = 1 \\n\"\n if \"RESOLUTION\" in line: line = \" RESOLUTION = \"+str(clight/np.sqrt(clight**2/resolution**2 + fwhm**2))+\"\\n\"\n fout.write(line)\n\n #smooth and write data\n k = 0\n j = 0\n ntot = np.prod(n_p)\n for i in ind:\n j = j + 1\n print('line ',j,' of ',ntot)\n #print(k,ntot,i)\n #print(i,steps,llimits)\n par = i*steps+llimits\n line = fin.readline()\n if freeze is not None:\n skip = True\n for entry in lfkeys: \n if (abs(freeze[entry] - par[labels.index(entry)]) < 1e-6): skip = False\n if skip: continue\n y = np.array(line.split(),dtype=float)\n if wrange is not None: y = y [section1]\n if fwhm > 1.e-7:\n xx,yy = vgconv(x,y,fwhm,ppr=ppr)\n else:\n xx,yy = x, y \n if wrange is not None: yy = yy[section2]\n yy.tofile(fout,sep=\" \",format=\"%0.4e\")\n fout.write(\"\\n\")\n k = k + 1\n\n fin.close()\n fout.close()\n\nif __name__ == \"__main__\":\n\n npar = len(sys.argv)\n assert (npar >= 4), 'Synple requires at least 3 input parameters (modelfile wstart wend)'\n assert (npar <= 7), 'Synple requires at maximum 6 input parameters (modelfile wstart wend vmicro vrot fwhm)'\n vmicro = None\n vrot = 0.0\n fwhm = 0.0\n modelfile = sys.argv[1]\n wstart = float(sys.argv[2])\n wend = float(sys.argv[3])\n if (npar > 4): \n vmicro = float(sys.argv[4])\n if (npar > 5):\n fwhm = float(sys.argv[5])\n if (npar > 6):\n vrot = float(sys.argv[6])\n\n #symbol, mass, sol = elements()\n x, y, z = syn(modelfile, (wstart,wend), save=True, vmicro=vmicro, vrot=vrot, fwhm=fwhm)\n\n\n",
"#\n# Routine to make plots comparing parameters and abundances for dr papers\n#\nimport pdb\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom apogee.utils import apload\nfrom tools import match\nfrom tools import plots\n\ndef dr_compare() :\n # load the DRs, select stars with SN>150\n dr12load=apload.ApLoad(dr='dr12')\n dr12=dr12load.allStar()[1].data\n gd=np.where(dr12['SNR'] > 150)[0]\n dr12=dr12[gd]\n\n dr13load=apload.ApLoad(dr='dr13')\n dr13=dr13load.allStar()[1].data\n gd=np.where(dr13['SNR'] > 150)[0]\n dr13=dr13[gd]\n\n dr14load=apload.ApLoad(dr='dr14')\n dr14=dr14load.allStar()[1].data\n gd=np.where(dr14['SNR'] > 150)[0]\n dr14=dr14[gd]\n c=apload.allStar()[3].data\n\n # match them\n m1a,m2a=match.match(dr12['APOGEE_ID'],dr13['APOGEE_ID'])\n m1b,m2b=match.match(dr12['APOGEE_ID'],dr14['APOGEE_ID'])\n m1c,m2c=match.match(dr13['APOGEE_ID'],dr14['APOGEE_ID'])\n\n # parameter figures\n figu,axu=plots.multi(3,7,hspace=0.001,wspace=0.001)\n figc,axc=plots.multi(3,7,hspace=0.001,wspace=0.001)\n\n tit=[r'T$_{\\rm eff}$','log g',r'V$_{\\rm micro}$','[M/H]','[C/M]','[N/M]',r'[$\\alpha$/M]']\n for iparam in range(7) :\n \n print(iparam)\n for iy,param in enumerate(['FPARAM','PARAM']) :\n if iy == 0 :\n ax=axu\n else :\n ax=axc\n yt=r'$\\Delta$'+tit[iparam]\n if iparam == 6 : xt=r'T$_{\\rm eff}$'\n else : xt=None\n if iparam == 0 :\n ax[iparam,0].text(0.5,1.0,'DR13-DR12',transform=ax[iparam,0].transAxes,ha='center',va='bottom')\n ax[iparam,1].text(0.5,1.0,'DR14-DR12',transform=ax[iparam,1].transAxes,ha='center',va='bottom')\n ax[iparam,2].text(0.5,1.0,'DR14-DR13',transform=ax[iparam,2].transAxes,ha='center',va='bottom')\n\n if iparam == 0 :\n yr=[-300,300]\n elif iparam == 1 : \n yr=[-0.5,0.5]\n else :\n yr=[-0.3,0.3]\n\n xr=[3500,6000]\n \n axim = plots.plotc(ax[iparam,0],dr12['TEFF'][m1a],dr13[param][m2a,iparam]-dr12[param][m1a,iparam],dr12[param][m1a,3],size=1,xr=xr,yr=yr,zr=[-1,0.5],yt=yt,xt=xt,rasterized=True)\n plots.plotl(ax[iparam,0],xr,[0.,0.],ls=':')\n plots.plotc(ax[iparam,1],dr12['TEFF'][m1b],dr14[param][m2b,iparam]-dr12[param][m1b,iparam],dr12[param][m1b,3],size=1,xr=xr,yr=yr,zr=[-1,0.5],xt=xt,rasterized=True)\n plots.plotl(ax[iparam,1],xr,[0.,0.],ls=':')\n plots.plotc(ax[iparam,2],dr13['TEFF'][m1c],dr14[param][m2c,iparam]-dr13[param][m1c,iparam],dr13[param][m1c,3],size=1,xr=xr,yr=yr,zr=[-1,0.5],xt=xt,rasterized=True)\n plots.plotl(ax[iparam,2],xr,[0.,0.],ls=':')\n for iax in range(3) :\n ax[iparam,iax].tick_params(axis='both',labelsize=8)\n\n # add colorbar\n for fig in [figu, figc] :\n cbaxes = fig.add_axes([0.91, 0.1, 0.01, 0.8])\n cb = plt.colorbar(axim, cax = cbaxes)\n cb.set_label('[M/H]')\n cbaxes.tick_params(axis='both',labelsize=8)\n\n figu.savefig('drcomp_uncal.pdf')\n figc.savefig('drcomp_cal.pdf')\n plots.close()\n\n # abundance figure\n fig,ax=plots.multi(3,14,hspace=0.001,wspace=0.001,figsize=(8,32))\n\n for ielem,elem in enumerate(['C','N','O','Na','Mg','Al','Si','S','K','Ca','Ti','V','Mn','Ni']) :\n print(elem)\n yt=r'$\\Delta$'+elem\n if ielem == 13 : xt=r'T$_{\\rm eff}$'\n else : xt=None\n if ielem == 0 :\n ax[ielem,0].text(0.5,1.0,'DR13-DR12',transform=ax[ielem,0].transAxes,ha='center',va='bottom')\n ax[ielem,1].text(0.5,1.0,'DR14-DR12',transform=ax[ielem,1].transAxes,ha='center',va='bottom')\n ax[ielem,2].text(0.5,1.0,'DR14-DR13',transform=ax[ielem,2].transAxes,ha='center',va='bottom')\n\n yr=[-0.5,0.5]\n\n dr12elem=dr12[elem.upper()+'_H'][m1a]-dr12['FE_H'][m1a]\n dr13elem=dr13[elem.upper()+'_FE'][m2a]\n gd=np.where((dr12elem > -99) & (dr13elem>-99))[0]\n plots.plotc(ax[ielem,0],dr12['TEFF'][m1a[gd]],dr13elem[gd]-dr12elem[gd],dr12['PARAM'][m1a[gd],3],size=1,xr=[3500,6000],yr=yr,zr=[-1,0.5],yt=yt,xt=xt,nytick=5,rasterized=True)\n plots.plotl(ax[ielem,0],xr,[0.,0.],ls=':')\n ax[ielem,0].tick_params(axis='both',labelsize=8)\n \n dr12elem=dr12[elem.upper()+'_H'][m1b]-dr12['FE_H'][m1b]\n dr14elem=dr14[elem.upper()+'_FE'][m2b]\n gd=np.where((dr12elem > -99) & (dr14elem>-99))[0]\n plots.plotc(ax[ielem,1],dr12['TEFF'][m1b[gd]],dr14elem[gd]-dr12elem[gd],dr12['PARAM'][m1b[gd],3],size=1,xr=[3500,6000],yr=yr,zr=[-1,0.5],xt=xt,nytick=5,rasterized=True)\n plots.plotl(ax[ielem,1],xr,[0.,0.],ls=':')\n ax[ielem,1].tick_params(axis='both',labelsize=8)\n\n dr13elem=dr13[elem.upper()+'_FE'][m1c]\n dr14elem=dr14[elem.upper()+'_FE'][m2c]\n gd=np.where((dr13elem > -99) & (dr14elem>-99))[0]\n plots.plotc(ax[ielem,2],dr13['TEFF'][m1c[gd]],dr14elem[gd]-dr13elem[gd],dr13['PARAM'][m1c[gd],3],size=1,xr=[3500,6000],yr=yr,zr=[-1,0.5],xt=xt,nytick=5,rasterized=True)\n plots.plotl(ax[ielem,2],xr,[0.,0.],ls=':')\n ax[ielem,2].tick_params(axis='both',labelsize=8)\n\n cbaxes = fig.add_axes([0.91, 0.1, 0.01, 0.8])\n cb = plt.colorbar(axim, cax = cbaxes)\n cb.set_label('[M/H]')\n cbaxes.tick_params(axis='both',labelsize=8)\n\n for item in (cbaxes.get_xticklabels() + cbaxes.get_yticklabels()) : item.set_fontsize(8)\n fig.savefig('drcomp_elem.pdf')\n\ndef kurucz_marcs() :\n\n dr13load=apload.ApLoad(dr='dr13')\n dr13=dr13load.allStar()[1].data\n gd=np.where(dr13['SNR'] > 150)[0]\n dr13=dr13[gd]\n\n dr13load.aspcap = 'l30g'\n dr13_marcs=dr13load.allStar()[1].data\n gd=np.where(dr13_marcs['SNR'] > 150)[0]\n dr13_marcs=dr13_marcs[gd]\n\n fig,ax=plots.multi(2,1,wspace=0.001)\n axim = plots.plotc(ax[0],dr13['FPARAM'][:,0],dr13['FPARAM'][:,1],dr13['FPARAM'] [:,3],\n xr=[4200,3000],yr=[5,-1],zr=[-2,0.5],xt=r'T$_{\\rm eff}$',yt='log g',rasterized=True)\n plots.plotc(ax[1],dr13_marcs['FPARAM'][:,0],dr13_marcs['FPARAM'][:,1],dr13_marcs['FPARAM'] [:,3],\n xr=[4200,3000],yr=[5,-1],zr=[-2,0.5],xt=r'T$_{\\rm eff}$',rasterized=True)\n for iax in range(2) :\n for item in (ax[iax].get_xticklabels() + ax[iax].get_yticklabels()) : item.set_fontsize(10)\n ax[iax].xaxis.label.set_size(10)\n ax[iax].yaxis.label.set_size(10)\n\n cbaxes = fig.add_axes([0.91, 0.1, 0.01, 0.8])\n cb = plt.colorbar(axim, cax = cbaxes)\n cb.set_label('[M/H]')\n cbaxes.tick_params(axis='both',labelsize=10)\n cbaxes.yaxis.label.set_size(10)\n\n fig.savefig('kurucz_marcs.pdf')\n\n",
"# encoding: utf-8\n#\n# @Author: Jon Holtzman\n# @Date: March 2018\n# @Filename: spectra.py\n# @License: BSD 3-Clause\n# @Copyright: Jon Holtzman\n\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import absolute_import\nfrom __future__ import unicode_literals\n\nimport numpy as np\nimport copy\n\n# utility routines for working with spectra\n\ndef fits2vector(header,axis) :\n \"\"\" Routine to return vector of axis values from a FITS header CRVAL, CDELT, NAXIS for specified axis\n \"\"\"\n caxis='{:1d}'.format(axis)\n return header['CRVAL'+caxis]+header['CDELT'+caxis]*np.arange(header['NAXIS'+caxis])\n\ndef vector(start,delta,n) :\n \"\"\" Routine to return vector of values given start, delta, n\n \"\"\"\n return float(start)+np.arange(int(n))*float(delta)\n\ndef add_dim(header,crval,cdelt,crpix,ctype,idim) :\n \"\"\" Add a set of CRVAL/CDELT,CRPIX,CTYPE cards to header\n \"\"\"\n header.append(('CRVAL{:d}'.format(idim),crval))\n header.append(('CDELT{:d}'.format(idim),cdelt))\n header.append(('CRPIX{:d}'.format(idim),crpix))\n header.append(('CTYPE{:d}'.format(idim),ctype))\n\ndef vactoair(wave_vac) :\n \"\"\" Convert vacuum wavelengths to air wavelengths\n\n Corrects for the index of refraction of air under standard conditions. \n Wavelength values below 2000 A will not be altered. Accurate to about 10 m/s.\n\n From IDL Astronomy Users Library, which references Ciddor 1996 Applied Optics 35, 1566\n \"\"\"\n if not isinstance(wave_vac, np.ndarray) : \n vac = np.array([wave_vac])\n else :\n vac = wave_vac\n\n air = copy.copy(vac)\n g = np.where(vac >= 2000)[0] #Only modify above 2000 A\n sigma2 = (1.e4/vac[g] )**2. #Convert to wavenumber squared\n\n # Compute conversion factor\n fact = 1. + 5.792105E-2/(238.0185E0 - sigma2) + 1.67917E-3/( 57.362E0 - sigma2)\n \n air[g] = vac[g]/fact\n return air \n\ndef airtovac(wave_air) :\n \"\"\" Convert air wavelengths to vacuum wavelengths\n\n Corrects for the index of refraction of air under standard conditions. \n Wavelength values below 2000 A will not be altered. Accurate to about 10 m/s.\n\n From IDL Astronomy Users Library, which references Ciddor 1996 Applied Optics 35, 1566\n \"\"\"\n if not isinstance(wave_air, np.ndarray) : \n air = np.array([wave_air])\n else :\n air = wave_air\n\n vac = copy.copy(air)\n g = np.where(vac >= 2000)[0] #Only modify above 2000 A\n\n for iter in range(2) :\n sigma2 = (1e4/vac[g])**2. # Convert to wavenumber squared\n # Compute conversion factor\n fact = 1. + 5.792105E-2/(238.0185E0 - sigma2) + 1.67917E-3/( 57.362E0 - sigma2)\n\n vac[g] = air[g]*fact #Convert Wavelength\n return vac\n",
"# routines related to individual element calibration for APOGEE/ASPCAP\n\nimport matplotlib.pyplot as plt\nfrom matplotlib.ticker import MultipleLocator, FormatStrFormatter\nimport numpy as np\nfrom apogee.utils import apload\nfrom apogee.utils import apselect\nfrom apogee.aspcap import err\nfrom tools import plots\nfrom tools import html\nfrom tools import fit\nfrom tools import match\nimport pdb\nfrom astropy.io import fits\nfrom astropy.io import ascii\ntry:\n import esutil\nexcept:\n pass\nimport copy\nimport os\n\ndef read(file='allStar-testcal.fits') :\n '''\n Read allStar file, get main structure, elem_symbol, and elemtoh\n '''\n dr13load=apload.ApLoad(dr='dr13')\n #a=apload.allStar()[1].data\n c=dr13load.allStar()[3].data\n #a=fits.open('../dist/allStar+.fits')[1].data\n #x,y,z,r = galmodel.lbd2xyz(a['GLON'],a['GLAT'],a['DISO'][:,2]/1000.)\n #zone=np.where((r>9) & (r<11) & (dt<40))[0]\n\n a=fits.open(file)[1].data\n #c=fits.open(file)[3].data\n elem=c['ELEM_SYMBOL'][0]\n elemtoh=c['ELEMTOH'][0]\n\n return a, elem, elemtoh\n\ndef arctabun(el) :\n '''\n Define Arcturus abundances, and return requested abundance\n '''\n abun = { \"C\" : 0.090000, \"CI\" : 0.09, \"N\" : 0.400000, \"O\" : 0.480000, \"Na\" : 0.210000, \"Mg\" : 0.370000, \"Al\" : 0.400000, \"Si\" : 0.330000, \"P\" : 0.070000, \"S\" : 0.350000, \"K\" : 0.200000, \"Ca\" : 0.090000, \"Sc\" : 0.070000, \"Ti\" : 0.250000, \"TiII\" : 0.25, \"V\" : 0.160000, \"Cr\" : -0.050000, \"Mn\" : -0.120000, \"Fe\" : -0.000000, \"Co\" : 0.040000, \"Ni\" : 0.030000, \"Cu\" : -0.050000, \"Ge\" : 0.000000, \"Rb\" : 0.000000, \"Y\" : 0.000000, \"Ce\" : -0.190000, \"Nd\" : 0.130000, \"Yb\" : 0., \"M\" : 0., \"alpha\" : 0.3}\n return(abun[el]) \n\ndef optabun(el) :\n '''\n ??? define abundance offsets from some optical analysis ???\n '''\n abun = {\"Na\" : -0.15, \"Mg\" : 0.06, \"Al\" : 0.04, \"Si\" : -0.21, \"Ca\" : 0.11, \"Ti\" : -0.14, \"TiII\" : 0.08, \"V\" : -0.15, \"Cr\" : -0.04, \"Mn\" : -0.36, \"Fe\" : 0.06, \"Co\" : -0.26}\n try :\n return(abun[el]) \n except :\n return(-9999.)\n\n\ndef refabun(el,dwarf=False) :\n '''\n Return reference abundance: 0 if giant, Arcturus if not?\n '''\n if dwarf :\n return 0.\n else :\n return arctabun(el)\n\ndef plot(a,elem,etoh,dwarf=False,suffix='',gcal=None,dcal=None,glon=None,glat=None,res=None,usemh=False,sn=[200,1000]) :\n '''\n Make a bunch of plots for elemental abundances\n '''\n\n try: os.mkdir('elem')\n except: pass\n\n # selection\n #dt=a['FPARAM'][:,0]-(4468+(a['FPARAM'][:,1]-2.5)/0.0018 - 382.5*a['FPARAM'][:,3])\n #gd=apselect.select(a[zone],badval='STAR_BAD',logg=[-1,3.5],sn=[200,1000],teff=[4000,4800])\n #gd=zone[gd]\n if dwarf :\n tit = 'Dwarfs, S/N>200'\n prefix = 'd'+suffix\n tmax=6500\n gd=apselect.select(a,badval='STAR_BAD',sn=sn,raw=True,glon=glon,glat=glat,dwarfs=True)\n etoh[0]=1\n etoh[1]=1\n etoh[2]=1\n ref=apselect.select(a,id='VESTA')\n else :\n tit = 'Giants, S/N>200'\n prefix = 'g'+suffix\n tmax=6500\n gd=apselect.select(a,badval='STAR_BAD',sn=sn,raw=True,glon=glon,glat=glat,giants=True)\n ref=apselect.select(a,id='alpha_Boo')\n out = open('elem/'+prefix+'.dat','w')\n\n\n # get the indices for different grids, and for stars near solar metallicity\n fgrid=apselect.select(a[gd],grid='F',raw=True)\n gkgrid=apselect.select(a[gd],grid='GK',raw=True)\n mgrid=apselect.select(a[gd],grid='M',raw=True)\n solar=apselect.select(a[gd],mh=[-0.1,0.1],raw=True)\n\n ifeh=17\n if len(a['FELEM'].shape) == 2: felem_feh = a['FELEM'][:,ifeh]\n else : felem_feh = a['FELEM'][:,0,ifeh]\n \n ytit=[]\n files=[]\n # loop over elements\n nelem=len(elem)\n for ielem in range(nelem+2) :\n file=[]\n if ielem < nelem :\n el = elem[ielem].strip()\n #eelem = a['ELEM'][gd,ielem]\n eelem = a['X_M'][gd,ielem]\n if len(a['FELEM'].shape) == 2: felem = a['FELEM'][gd,ielem]\n else : felem = a['FELEM'][gd,0,ielem]\n eelem_err = a['X_M_ERR'][gd,ielem]\n if len(a['FELEM'].shape) == 2: felem_err = a['FELEM_ERR'][gd,ielem]\n else: felem_err = a['FELEM_ERR'][gd,0,ielem]\n tmp=etoh[ielem]\n if ielem > 2 :\n if usemh and etoh[ielem] :\n #eelem -= a['FPARAM'][gd,3]\n felem -= a['FPARAM'][gd,3]\n elif not usemh and not etoh[ielem] :\n #eelem += a['FPARAM'][gd,3]\n felem += a['FPARAM'][gd,3]\n else :\n giants = apselect.select(a[gd],grid='g_',raw=True)\n if not usemh :\n #eelem[giants] += a['FPARAM'][gd[giants],3] \n felem[giants] += a['FPARAM'][gd[giants],3] \n dwarfs = apselect.select(a[gd],grid='d_',raw=True)\n if usemh :\n #eelem[dwarfs] -= a['FPARAM'][gd[dwarfs],3] \n felem[dwarfs] -= a['FPARAM'][gd[dwarfs],3] \n elif ielem == nelem :\n el = 'M'\n eelem = a['PARAM'][gd,0]\n felem = a['FPARAM'][gd,3]\n eelem_err = np.sqrt(a['PARAM_COV'][gd,3,3])\n felem_err = np.sqrt(a['FPARAM_COV'][gd,3,3])\n tmp = 1\n else :\n el = 'alpha'\n eelem = a['PARAM'][gd,6]\n felem = a['FPARAM'][gd,6]\n eelem_err = np.sqrt(a['PARAM_COV'][gd,6,6])\n felem_err = np.sqrt(a['FPARAM_COV'][gd,6,6])\n tmp = 0\n if not usemh :\n eelem += a['FPARAM'][gd,3]\n felem += a['FPARAM'][gd,3]\n\n if (tmp == 1 and not usemh) or (tmp == 0 and usemh ):\n refoffset=0\n else :\n refoffset=a['FPARAM'][ref,3]\n if usemh :\n refoffset *= -1\n\n name=prefix+el\n print(name)\n fname = 'elem/'+name\n # loop over plots\n xtit = []\n for iplot in range(0,8) :\n #for iplot in range(2,3) :\n if iplot == 0 :\n #x = a['ELEM'][gd,ifeh]\n x = a['X_H'][gd,ifeh]\n xr = [-1.5,1.]\n xt= '[Fe/H] (cal)'\n y = eelem\n if not usemh: y-=a['PARAM'][gd,3]\n yr=[-0.25,0.5]\n yt = '['+name+'/M](cal)'\n z = a['FPARAM'][gd,0]\n zr = [3000,tmax]\n zt='Teff'\n xtit.append('calibrated vs [Fe/H]')\n elif iplot == 1 :\n x = felem_feh[gd]\n xr = [-1.5,1.]\n xt= '[Fe/H] (raw)'\n y = felem\n if not usemh: y-=a['FPARAM'][gd,3]\n yr=[-0.25,0.5]\n yt = '['+name+'/M](raw)'\n z = a['FPARAM'][gd,0]\n zr = [3000,tmax]\n zt='Teff'\n xtit.append('raw vs [Fe/H]')\n elif iplot == 2 :\n x = a['FPARAM'][gd,0]\n xr = [2500,tmax]\n xt= 'Teff'\n y = eelem\n if not usemh: y-=a['PARAM'][gd,3]\n yr=[-0.25,0.5]\n yt = '['+name+'/M](cal)'\n #z = a['ELEM'][gd,ifeh]\n z = a['X_H'][gd,ifeh]\n zr = [-1.5,1.]\n zt='[Fe/H]'\n xtit.append('calibrated vs Teff')\n elif iplot == 3 :\n x = a['FPARAM'][gd,0]\n xr = [2500,tmax]\n xt= 'Teff'\n y = felem\n if not usemh: y-=a['FPARAM'][gd,3]\n yr=[-0.25,0.5]\n yt = '['+name+'/M](raw)'\n z = felem_feh[gd]\n zr = [-1.5,1.]\n zt='[Fe/H]'\n xtit.append('raw vs Teff')\n elif iplot == 4 :\n x = a['FPARAM'][gd,0]\n xr = [3000,tmax]\n xt = 'Teff'\n y = eelem-felem\n yr = [-0.3,0.3]\n yt = 'cal - raw'\n z = felem_feh[gd]\n zr = [-1.5,1.]\n zt='[Fe/H]'\n xtit.append('calibration')\n elif iplot == 5 :\n x = a['FPARAM'][gd,0]\n xr = [2500,tmax]\n xt = 'Teff'\n y = eelem_err\n yr= [0,0.3]\n yt = 'Empirical uncertainty'\n z = felem_feh[gd]\n zr = [-1.5,1.]\n zt='[Fe/H]'\n xtit.append('empirical uncertainty')\n elif iplot == 6 :\n x = a['FPARAM'][gd,0]\n xr = [2500,tmax]\n xt = 'Teff'\n y = felem_err\n yr= [0,0.3]\n yt = 'FERRE uncertainty'\n z = felem_feh[gd]\n zr = [-1.5,1.]\n zt='[Fe/H]'\n xtit.append('FERRE uncertainty')\n elif iplot == 7 :\n x = a['FPARAM'][gd,0]\n xr = [2500,tmax]\n xt = 'Teff'\n if ielem < nelem :\n y = a['ELEM_CHI2'][gd,ielem]\n else :\n y = x*0.\n yr= [0,50]\n yt = 'ELEM_CHI2'\n z = felem_feh[gd]\n zr = [-1.5,1.]\n zt='[Fe/H]'\n xtit.append('CHI2 from element fit')\n \n fig=plt.figure(figsize=(10,8))\n ax=fig.add_subplot(111)\n if len(x) > 0 :\n if len(fgrid) > 0 :\n plots.plotc(ax,x[fgrid],y[fgrid],z[fgrid],xr=xr,yr=yr,zr=zr,colorbar=False,size=10,marker='s',yt=yt,xt=xt,zt=zt)\n if len(gkgrid) > 0 :\n plots.plotc(ax,x[gkgrid],y[gkgrid],z[gkgrid],xr=xr,yr=yr,zr=zr,size=10,marker='o',yt=yt,xt=xt,zt=zt)\n if len(mgrid) > 0 :\n plots.plotc(ax,x[mgrid],y[mgrid],z[mgrid],xr=xr,yr=yr,zr=zr,size=7,marker='^',yt=yt,xt=xt,zt=zt)\n if (iplot == 0 or iplot == 2) : \n if res is not None :\n clust, = np.where(res['col3'] == ielem)\n plots.plotp(ax,res['col4'][clust],res['col9'][clust],xr=xr,yr=yr,size=50,marker='o',facecolors='none',linewidth=1)\n\n # plot the reference star abundance (Arcturus or Vesta)\n if ielem < nelem-2 :\n #refval = a['ELEM'][ref,ielem]+refoffset\n #referr = a['ELEM_ERR'][ref,ielem]\n refval = a['X_M'][ref,ielem]\n referr = a['X_M_ERR'][ref,ielem]\n elif ielem == nelem-2 :\n refval = a['PARAM'][ref,3]+refoffset\n referr = np.sqrt(a['PARAM_COV'][ref,3,3])\n else :\n refval = a['PARAM'][ref,6]+refoffset\n referr = np.sqrt(a['PARAM_COV'][ref,6,6])\n if not usemh: refval -= a['PARAM'][ref,3]\n reflit = (refabun(el,dwarf=dwarf)-refabun('Fe',dwarf=dwarf))\n plots.plotl(ax,xr,[refval-reflit,refval-reflit],color='r')\n\n # Plot the median of solar abundance stars\n cal=np.where(y[solar] > -9000)[0]\n med = np.median(y[solar[cal]])\n plots.plotl(ax,xr,[med,med],color='y')\n plt.text(xr[0]+0.05*(xr[1]-xr[0]),yr[0]+0.85*(yr[1]-yr[0]),'solar metallicity stars: {:4.2f}'.format(med),color='y')\n if iplot == 0 :\n out.write(el+'{:8.3f} {:8d}\\n'.format(med,len(cal)))\n\n # Plot the offset from the optical analysis \n opt=optabun(el)\n plots.plotl(ax,xr,[opt,opt],color='m')\n plt.text(xr[0]+0.05*(xr[1]-xr[0]),yr[0]+0.75*(yr[1]-yr[0]),'optical offset: {:4.2f}'.format(opt),color='m')\n\n # Plot M67 points\n #clust=fits.open('../../cal/clust.fits')[1].data\n #m67=np.where(np.core.defchararray.find(clust['FIELD'],'M67') >= 0)\n #m1, m2 = esutil.numpy_util.match(a['APOGEE_ID'][gd],clust['APOGEE_ID'][m67])\n #plots.plotp(ax,x[m1],y[m1],size=6,color='k',xr=xr,yr=yr)\n #if iplot == 2 :\n # print(m1, x[m1])\n if dwarf : \n refstar = 'VESTA'\n if dcal is not None : \n refclust=dcal[ielem] #-dcal[ifeh]\n plots.plotl(ax,xr,[refclust,refclust],color='g')\n plt.text(xr[0]+0.05*(xr[1]-xr[0]),yr[0]+0.95*(yr[1]-yr[0]),'M67 dwarfs: {:4.2f}'.format(refclust),color='g')\n else :\n refstar = 'Arcturus'\n if gcal is not None : \n refclust=gcal[ielem] #-gcal[ifeh]\n plots.plotl(ax,xr,[refclust,refclust],color='g')\n plt.text(xr[0]+0.05*(xr[1]-xr[0]),yr[0]+0.95*(yr[1]-yr[0]),'M67 giants: {:4.2f}'.format(refclust),color='g')\n if dcal is not None : \n drefclust=dcal[ielem] #-dcal[ifeh]\n plots.plotl(ax,xr,[refclust-drefclust,refclust-drefclust],color='b')\n plt.text(xr[0]+0.05*(xr[1]-xr[0]),yr[0]+0.90*(yr[1]-yr[0]),'M67 dwarfs: {:4.2f}'.format(drefclust),color='b')\n plt.text(xr[0]+0.05*(xr[1]-xr[0]),yr[0]+0.05*(yr[1]-yr[0]),\n refstar+' ASPCAP: {:4.2f}+/-{:4.2f}'.format(refval[0],referr[0])+' lit: '+'{:4.2f}'.format(reflit),color='r')\n\n #plt.show()\n plt.savefig(fname+'{:1d}.png'.format(iplot))\n plt.close()\n file.append(name+'{:1d}.png'.format(iplot))\n\n ytit.append(name)\n\n# plt.show()\n files.append(file)\n out.close()\n html.htmltab(files,ytitle=ytit,file='elem/'+prefix+'elem.html',xtitle=xtit,header=tit)\n\ndef elemindex() :\n ''' Make the HTML pages (assumes plots have already been made) for individual elements '''\n\n a,elem,elemtoh = read()\n\n # loop over elements\n nelem=len(elem)\n for ielem in range(len(elem)+2) :\n if ielem < len(elem) :\n el = elem[ielem].strip()\n elif ielem == nelem :\n el = 'M'\n elif ielem == nelem+1 :\n el = 'alpha'\n\n ytit=[]\n files=[]\n for prefix in [ 'g','d' ] :\n for suffix in [ '', 'gal' ] :\n name=prefix+el\n file = [prefix+'1mh'+suffix+el+'2.png',prefix+'1emh'+suffix+el+'2.png',prefix+'1eemh'+suffix+el+'2.png',\n prefix+'2mh'+suffix+el+'2.png',prefix+'2emh'+suffix+el+'2.png',prefix+'2eemh'+suffix+el+'2.png',\n prefix+'3mh'+suffix+el+'2.png',prefix+'3emh'+suffix+el+'2.png',prefix+'3eemh'+suffix+el+'2.png']\n xtit = ['Linear 4000-5250','Linear 3750-5250','Linear 3500-5250',\n 'Quadratic 4000-5250','Quadratic 3750-5250','Quadratic 3500-5250',\n 'Cubic 4000-5250','Cubic 3750-5250','Cubic 3500-5250']\n files.append(file)\n\n ytit = ['Giants (full)','Giants (70<l<110)','Dwarfs (full)','Dwarfs (70<l<110)']\n html.htmltab(files,file='elem/'+el+'.html',xtitle=xtit,ytitle=ytit)\n\ndef main() :\n ''' Make series of plots and web pages for each calibration \"type\" ''' \n\n #files = ['testcal','testcal1mh','testcal2mh','testcal3mh',\n # 'testcal1emh','testcal2emh','testcal3emh', \n # 'testcal1eemh','testcal2eemh','testcal3eemh'] \n #dirs = ['../testcal','testcal1mh','testcal2mh','testcal3mh',\n # 'testcal1emh','testcal2emh','testcal3emh', \n # 'testcal1eemh','testcal2eemh','testcal3eemh'] \n #suffixes = ['','1mh','2mh','3mh','1emh','2emh','3emh','1eemh','2eemh','3eemh']\n files = ['l30e.2']\n dirs = ['../cal']\n suffixes = ['']\n for i in range(len(files)) :\n a,e,etoh = read(file='allStar-'+files[i]+'.fits')\n gcal = fits.open(dirs[i]+'/giantcal.fits')[2].data['ABUN'][0,:,17]\n dcal = fits.open(dirs[i]+'/dwarfcal.fits')[2].data['ABUN'][0,:,17]\n for d in [ False, True ] :\n if d :\n res = ascii.read(dirs[i]+'/dwarfcal.res')\n else :\n res = ascii.read(dirs[i]+'/giantcal.res')\n tmp=etoh # since etoh gets changed for dwarfs\n plot(a,e,tmp,suffix=suffixes[i],dwarf=d,gcal=gcal,dcal=dcal,res=None,usemh=True,sn=[200,1000])\n plot(a,e,tmp,suffix=suffixes[i]+'gal',dwarf=d,gcal=gcal,dcal=dcal,glon=[70,110],glat=[-5,5],res=None,usemh=True)\n #a,e,etoh = read()\n #plot(a,e,etoh)\n #plot(a,e,etoh,dwarf=True)\n\ndef globalscatter(allstar,elems,vscatter=[0,0.2],pm=True,dist=True) :\n ''' \n Compute scatter in clusters\n '''\n clust=apselect.clustdata()\n gd=apselect.select(allstar,badval='STAR_BAD',vscatter=vscatter)\n members=[]\n print('selecting')\n clusts = ['N2420', 'M67', 'N188', 'N7789', 'N6819', 'N6791']\n fp=open('global.dat','w')\n for cluster in clusts :\n j=np.array(apselect.clustmember(allstar[gd],cluster,raw=True,pm=pm,dist=dist))\n print(cluster,len(j))\n members.append(j)\n for jj in j :\n fp.write('{:s} {:s} {:8.3f} {:8.1f} {:8.1f} {:8.1f} {:8.2f} {:s}\\n'.format(\n cluster,allstar['APOGEE_ID'][gd[jj]],allstar['FE_H'][gd[jj]],allstar['TEFF'][gd[jj]],\n allstar['SNR'][gd[jj]],allstar['ASPCAP_CHI2'][gd[jj]],\n allstar['VSCATTER'][gd[jj]],allstar['STARFLAGS'][gd[jj]]))\n fp.close()\n\n iel=0\n nels=len(elems[0])+2\n fig,ax=plots.multi(2,int(round(nels/2.)),hspace=0.001,wspace=0.001,figsize=(8,10))\n plots.event(fig)\n plots._data=allstar\n plots._id_cols=['APOGEE_ID']\n\n color=['r','g','b','c','m','y']\n for iel,el in enumerate(np.append(elems,['M','alpha'])) :\n iclust=0\n all=np.array([])\n ix=iel%2\n iy=iel/2\n for cluster in clusts :\n i=np.where(clust.name == cluster)\n mh=clust[i].mh\n name=clust[i].name\n # get cluster members\n j=members[iclust]\n if len(j) > 0 :\n if el.strip() == 'Fe' :\n abun=allstar['X_H'][gd[j],iel]\n ok=np.where(((allstar['ELEMFLAG'][gd[j],iel] & 255) == 0) & (allstar['X_H_ERR'][gd[j],iel] < 0.2))[0]\n elif el.strip() == 'M' :\n abun=allstar['M_H'][gd[j]]\n ok=np.where(((allstar['PARAMFLAG'][gd[j],3] & 255) == 0) & (allstar['M_H_ERR'][gd[j]] < 0.2))[0]\n elif el.strip() == 'alpha' :\n abun=allstar['ALPHA_M'][gd[j]]\n ok=np.where(((allstar['PARAMFLAG'][gd[j],6] & 255) == 0) & (allstar['ALPHA_M_ERR'][gd[j]] < 0.2))[0]\n else :\n abun=allstar['X_M'][gd[j],iel]\n ok=np.where(((allstar['ELEMFLAG'][gd[j],iel] & 255) == 0) & (allstar['X_M_ERR'][gd[j],iel] < 0.2) & (allstar['X_M'][gd[j],iel] > -999) )[0]\n if len(ok) > 3 :\n all=np.append(all,abun[ok]-abun[ok].mean())\n plots.plotp(ax[iy,ix],allstar['TEFF'][gd[j[ok]]],abun[ok]-abun[ok].mean(),color=color[iclust],size=10,yr=[-0.5,0.5])\n\n iclust+=1\n print('{:s} {:10.3f} {:10.3f} {:d}\\n'.format(el, all.mean(), all.std(), len(all)))\n ax[iy,ix].text(0.1,0.9,el.strip(),ha='left',va='top',transform=ax[iy,ix].transAxes)\n ax[iy,ix].text(0.9,0.9,'{:8.3f}'.format(all.std()),ha='right',va='top',transform=ax[iy,ix].transAxes)\n iel+=1\n\ndef getabun(data,elems,elemtoh,el,xh=False,terange=[-1,10000],calib=False,line=0) :\n '''\n Return the abundance of the requested element, given data array, elem array, element\n '''\n if calib :\n param = 'PARAM'\n else :\n param = 'FPARAM'\n if el.strip() == 'M' :\n ok=np.where(((data['PARAMFLAG'][:,3] & 255) == 0) & (data['FPARAM_COV'][:,3,3] < 0.2) &\n (data['FPARAM'][:,0] >= terange[0]) & (data['FPARAM'][:,0] <= terange[1]) & (data[param][:,3] > -9990.) )[0]\n abun = data[param][:,3]\n elif el.strip() == 'alpha' :\n ok=np.where(((data['PARAMFLAG'][:,6] & 255) == 0) & (data['FPARAM_COV'][:,6,6] < 0.2) &\n (data['FPARAM'][:,0] >= terange[0]) & (data['FPARAM'][:,0] <= terange[1]) & (data[param][:,6] > -9990.) )[0]\n abun = data[param][:,6]\n if xh : abun+=data['FPARAM'][:,3]\n else :\n iel=np.where(np.core.defchararray.strip(elems) == el.strip())[0][0]\n if calib :\n if xh : \n abun = data['X_H'][:,iel]\n abunerr = data['X_H_ERR'][:,iel]\n else :\n abun = data['X_M'][:,iel]\n abunerr = data['X_M_ERR'][:,iel]\n else :\n if len(data['FELEM'].shape) == 2: \n abun = data['FELEM'][:,iel]\n abunerr = data['FELEM_ERR'][:,iel]\n else : \n abun = data['FELEM'][:,line,iel]\n abunerr = data['FELEM_ERR'][:,line,iel]\n\n if xh and not elemtoh[iel] : abun+=data['FPARAM'][:,3]\n if not xh and elemtoh[iel] : abun-=data['FPARAM'][:,3]\n #if el.strip() == 'C' or el.strip() == 'CI' or el.strip() == 'N' :\n # # special case for C and N for dwarfs, since those use [M/H] dimension\n # try :\n # dw = np.where((np.core.defchararray.find(data['ASPCAP_CLASS'],'GKd')>=0) | (np.core.defchararray.find(data['ASPCAP_CLASS'],'Fd')>=0) |\n # (np.core.defchararray.find(data['ASPCAP_CLASS'],'Md')>=0))[0]\n # except :\n # dw = np.where((np.core.defchararray.find(data['CLASS'],'GKd')>=0) | (np.core.defchararray.find(data['CLASS'],'Fd')>=0) |\n # (np.core.defchararray.find(data['CLASS'],'Md')>=0))[0]\n # if xh : abun[dw]-=data['FPARAM'][dw,3]\n # else : abun[dw]-=data['FPARAM'][dw,3]\n if calib : badflag = 255\n else : badflag = 0\n ok=np.where(( (data['ELEMFLAG'][:,iel] & badflag) == 0) &\n (abunerr < 0.2) &\n (data['FPARAM'][:,0] >= terange[0]) & \n (data['FPARAM'][:,0] <= terange[1]) & \n (abun > -9990.) )[0]\n return abun, ok\n\ndef cal(allstar,elems,elemtoh,doels,xh=False,plot=True,sepplot=False,hard=None, maxvisit=100,cal='default',dwarfs=False,inter=False,\n errpar=False,calib=False,nx=4,ny=2,maxvscatter=0.2,pm=True,dist=True, lines=False) :\n ''' \n Determine internal calibration relations for elements\n \n Args:\n allstar : allStar-like HDUList\n elems : list of elems\n elemtoh : coresponding list of elemtoh code\n\n Keyword args:\n xh : fit in [X/H]? (default=False, i.e. fit in [X/M])\n plot : show individual element plots\n '''\n\n # select cluster members from array that don't have STAR_BAD into data structure\n clusters=apselect.clustdata()\n calclusters=['M92','M15','M13','M3','M5','M12','M35','N2420','N188','M67','N7789','Pleiades','N6819','N6791',\n 'N6397','M55','N3201','N6752','N362','M4','N2808','47TUC']\n #calclusters=['N2420','N188','M67','N7789','Pleiades','N6819','N6791']\n clusts = clusters.name\n types = np.arange(len(clusts))\n markers = np.chararray(len(clusts))\n colors = np.chararray(len(clusts))\n markers[np.where(clusters.mh > -999)[0]] = 's'\n markers[np.where(clusters.mh < -1)[0]] = 'o'\n markers[np.where(clusters.mh > 0)[0]] = '^'\n allcol=['r','g','b','c','m','y']\n for i in range(len(colors)) : colors[i] = allcol[i%6]\n if dwarfs : \n logg=[3.8,5.5]\n reject=0.25\n glon=[0,360]\n else : \n logg=[-1,3.8]\n reject=0.15\n glon=[70,110]\n #solar=apselect.select(allstar[1].data,badval='STAR_BAD',badtarg=['YOUNG','EMBEDDED','EMISSION','EXTENDED'],\n # raw=True,logg=logg,glon=glon,glat=[-5,5],sn=[200,10000])\n #solar=apselect.select(allstar[1].data,badval='STAR_BAD',badtarg=['YOUNG','EMBEDDED','EMISSION','EXTENDED'],\n # raw=True,logg=logg,sn=[200,10000],maxdist=500.)\n solar=apselect.select(allstar[1].data,badval='STAR_BAD',badtarg=['YOUNG','EMBEDDED','EMISSION','EXTENDED'],\n raw=True,logg=logg,sn=[200,10000])\n try :\n gd=np.where((allstar[1].data['gaia_parallax_error'][solar]/abs(allstar[1].data['gaia_parallax'][solar]) < 0.1) )[0] \n solar=solar[gd]\n distance = 1000./allstar[1].data['gaia_parallax'][solar]\n x,y,z,r=lbd2xyz(allstar[1].data['GLON'][solar],allstar[1].data['GLAT'][solar],distance/1000.)\n gd = np.where((abs(z) < 0.5) & (r>8) & (r<9))[0]\n solar=solar[gd]\n except:\n print('no distance information available for solar sample, using glon/glat')\n solar=apselect.select(allstar[1].data,badval='STAR_BAD',badtarg=['YOUNG','EMBEDDED','EMISSION','EXTENDED'],\n raw=True,logg=logg,glon=glon,glat=[-5,5],sn=[200,10000])\n \n gd=apselect.select(allstar[1].data,badval='STAR_BAD',raw=True,logg=logg)\n print('ngd: ',len(gd))\n print('nsolar: ',len(solar))\n try :\n v=np.where(allstar[1].data['VISIT'][gd]<= maxvisit)[0]\n gd=gd[v]\n except :\n print('VISIT keyword does not exist')\n # preselect with fast HTM method at largest cluster radius\n try:\n print('pre-selecting cluster members using HTM')\n h=esutil.htm.HTM()\n maxrad=clusters['rad'].max()\n m1,m2,rad=h.match(clusters['ra'],clusters['dec'],allstar[1].data['RA'][gd],allstar[1].data['DEC'][gd],maxrad,maxmatch=500)\n gd=gd[m2]\n except :\n pass\n # now select per cluster\n print('selecting cluster members')\n all=[]\n for cluster in clusts :\n if cluster in calclusters :\n #clustdir=os.environ['APOGEE_REDUX']+'/r12/stars/junk/'\n #if clustdir :\n # stars=ascii.read(clustdir+'/'+cluster+'.txt',names=['APOGEE_ID'],format='no_header')\n # jsaved,j2 = match.match(allstar[1].data[gd]['APOGEE_ID'],stars['APOGEE_ID'])\n j=apselect.clustmember(allstar[1].data[gd],cluster,raw=True,firstgen=True,firstpos=False,logg=logg,\n pm=pm,dist=dist)\n #print(cluster,len(j),len(jsaved))\n if len(j) < 1 :\n j=apselect.clustmember(allstar[1].data[gd],cluster,raw=True,logg=logg,pm=pm,dist=dist)\n all=set(all).union(gd[j].tolist())\n data=allstar[1].data[list(all)]\n\n # in the abbreviated array, get the lists of cluster members\n members=[]\n fig,ax=plots.multi(1,1,figsize=(16,8))\n for label in ax.axes.get_xticklabels():\n label.set_visible(False)\n for label in ax.axes.get_yticklabels():\n label.set_visible(False)\n\n iplot=0\n for iclust,cluster in enumerate(clusts) :\n if cluster in calclusters :\n ax.scatter((iplot//12)*0.1+0.25,12-iplot%12,marker=markers[iclust],color=colors[iclust])\n ax.text((iplot//12)*0.1+0.26,12-iplot%12,clusts[iclust]+' ( '+str(clusters[iclust].mh)+')',color=colors[iclust],va='center')\n ax.set_xlim(0.23,0.8)\n j=apselect.clustmember(data,clusts[iclust],raw=True,firstgen=True,firstpos=False,logg=logg,pm=pm,dist=dist)\n if len(j) < 1 :\n j=apselect.clustmember(data,clusts[iclust],raw=True,logg=logg, pm=pm, dist=dist)\n iplot+=1\n else :\n j=[]\n # members is a list of lists of cluster members\n members.append(j)\n if hard is not None : \n fig.savefig(hard+'clust_key.png')\n fig.savefig(hard+'clust_key.pdf')\n plt.close(fig)\n\n # setup output structured array\n rec = np.zeros(len(doels),dtype=[\n ('elem','S5'),\n ('elemfit','i4'),\n ('mhmin','f4'),\n ('te0','f4'),\n ('temin','f4'),\n ('temax','f4'),\n ('femin','f4'),\n ('femax','f4'),\n ('caltemin','f4'),\n ('caltemax','f4'),\n ('extfit','i4'),\n ('extpar','3f4'),\n ('clust','{:1d}S16'.format(len(clusts))),\n ('par','3f4'),\n ('abun','{:1d}f4'.format(len(clusts))),\n ('nstars','{:1d}i4'.format(len(clusts))),\n ('mean','{:1d}f4'.format(len(clusts))),\n ('rms','{:1d}f4'.format(len(clusts))),\n ('rmsgd','{:1d}f4'.format(len(clusts))),\n ('rawmean','{:1d}f4'.format(len(clusts))),\n ('errpar','4f4'),\n ])\n # empirical scatter bin setup: these are bin left edges\n if dwarfs :\n dmhbin=3.\n mhbins=np.arange(-2.25,0.75,dmhbin)\n nerrfit=2\n xr=[3000,7500]\n else :\n dmhbin=0.5\n mhbins=np.arange(-2.25,0.75,dmhbin)\n nerrfit=3\n xr=[3000,5500]\n dteffbin=250\n teffbins=np.arange(3500,6000,dteffbin)\n dsnbin=50\n snbins=np.arange(50,250,dsnbin)\n\n # plot setup\n if plot and not sepplot :\n fig,ax = plots.multi(nx,ny,hspace=0.001,wspace=0.5,figsize=(18,6))\n # plot setup for summary all-element plots\n if plot and len(doels) > 2 :\n nels=0\n for el in doels :\n # parameters for the fit for this element\n if cal == 'dr13' :\n pars = dr13cal(el,dwarfs=dwarfs)\n elif cal == 'dr14' :\n pars = dr14cal(el,dwarfs=dwarfs)\n else :\n pars = defaultcal(el,dwarfs=dwarfs)\n if pars['elemfit'] >=0 : nels+=1\n allfig,allax=plots.multi(2,(nels-1)/2+1,hspace=0.001,wspace=0.3,figsize=(12,18))\n if len(solar) > 0 : allsolarfig,allsolarax=plots.multi(2,(nels-1)/2+1,hspace=0.001,wspace=0.3,figsize=(12,18))\n if errpar :\n errfig,errax=plots.multi(len(snbins),len(doels),hspace=0.001,wspace=0.001,figsize=(3*len(snbins),2*len(doels)))\n\n # loop over all the elements!\n iel=0\n #iplot=0\n grid=[]\n yt=[]\n for iplot,el in enumerate(doels) :\n if lines :\n jelem = np.where(allstar[3].data['ELEM_SYMBOL'][0] == el)[0]\n nlines = len(np.where(allstar[3].data['FELEM_WIND'][0][0,:,jelem] > 0)[0])\n if nlines > 0 :\n linefig,lineax=plots.multi(2,nlines+1,hspace=0.001,wspace=0.4,figsize=(10,18))\n else : nlines = 0\n\n # parameters for the fit for this element\n if cal == 'dr13' :\n pars = dr13cal(el,dwarfs=dwarfs)\n elif cal == 'dr14' :\n pars = dr14cal(el,dwarfs=dwarfs)\n elif cal == 'dr16' :\n pars = dr16cal(el,dwarfs=dwarfs)\n else :\n pars = defaultcal(el,dwarfs=dwarfs)\n pars['clust'] = np.array(clusts,dtype='S16')\n pars['abun'] = np.zeros(len(clusts))\n pars['par'] = np.zeros(3)\n pars['elem'] = el\n pars['errpar'] = np.zeros(4)\n elemfit = pars['elemfit']\n while elemfit >= 0 :\n # get the good abundance data for this element, load variables for fit (teff, abun, clust)\n abundata, ok = getabun(data,elems,elemtoh,el,xh=xh,calib=calib)\n snr=np.clip(data['SNR'],0.,snbins[-1]+dsnbin-0.1)\n print(el,pars['elemfit'],pars['mhmin'],len(ok))\n\n # get cluster members\n ind=np.array([],dtype=int)\n clust=np.array([],dtype='S16')\n apogee_id=np.array([],dtype='S16')\n jclust=[]\n for iclust,cluster in enumerate(clusts) :\n #if cluster in calclusters :\n i=np.where(clusters.name == clusts[iclust])\n # get cluster members: intersection of all cluster members and good ones for this element\n j=list(set(ok).intersection(members[iclust]))\n jclust.append(j)\n if clusters[i].mh > pars['mhmin'] and len(j) > 3 :\n # ind has the indices of all stars above the [M/H] threshold and good abundances\n ind=np.append(ind,j)\n clust=np.append(clust,[clusts[iclust]]*len(j))\n\n for iline in range(1+nlines) :\n\n abundata, ok = getabun(data,elems,elemtoh,el,xh=xh,calib=calib,line=iline)\n\n teff=data['FPARAM'][ind,0]\n mh=data['FPARAM'][ind,3]\n vscatter=data['VSCATTER'][ind]\n abun=abundata[ind]\n try :\n visit=data['VISIT'][ind]\n except :\n visit = np.zeros(len(ind))\n # only use visits=0 and vscatter<maxvscatter[gd] for fit, but we'll plot all\n gd=np.where((visit == 0) & (vscatter<maxvscatter) & (teff>=pars['temin']) & (teff<=pars['temax']))[0]\n bd=np.where((visit > 0) | (vscatter>=maxvscatter) | (teff<pars['temin']) | (teff>pars['temax']))[0]\n if len(gd) > 2 :\n print(el,len(ind))\n for iter in range(2) :\n print(el,iter,len(gd),pars['temin'],pars['temax'])\n deriv=calderiv(teff[gd]-pars['te0'],abun[gd],clust[gd],order=pars['elemfit'])\n soln,inv = fit.linear(abun[gd],deriv)\n nclust = len(np.unique(clust[gd]))\n pars['clust'] = np.sort(np.unique(clust[gd]))\n pars['par'][0:pars['elemfit']] = soln[nclust:len(soln)]\n pars['abun'] = soln[0:nclust]\n func=calfunc(pars,teff,mh,abun,clust,order=pars['elemfit'],extcal=False)\n if iter == 0 :\n res=abun-func\n gd=np.where((visit == 0) & (vscatter<maxvscatter) & (teff>=pars['temin']) & (teff<=pars['temax']) & (abs(res) <= reject))[0]\n tmpreject=reject\n while len(gd) < 10 and tmpreject<reject*8 :\n tmpreject*=2.\n gd=np.where((visit == 0) & (vscatter<maxvscatter) & (teff>=pars['temin']) & (teff<=pars['temax']) & (abs(res) <= tmpreject))[0]\n \n bd=np.where((visit > 0) | (vscatter>=maxvscatter) | (teff<pars['temin']) | (teff>pars['temax']) | (abs(res) > tmpreject))[0]\n\n print('\\nGlobal {:<8s} {:8.3f} (summed) {:8.3f} (with 3 visits)'.format(el, (abun[gd]-func[gd]).std(), (abun[bd]-func[bd]).std()))\n # loop through all clusters and determine mean and scatter for each cluster, and accumulate \n # data for scatter as f([M/H],Teff,S/N)\n print(' Clusters: mean std (cal) mean std (raw)')\n rmsdata=[]\n rmsderiv=[]\n if errpar and iline == 0 and hard is not None: \n f=open(hard+el.strip()+'_err_obj.dat','w')\n fc=open(hard+el.strip()+'_err_clust.dat','w')\n tedata=[]\n sndata=[]\n mhdata=[]\n val=[]\n for iclust,cluster in enumerate(clusts) :\n if cluster in calclusters and len(jclust[iclust])>3 :\n j=np.array(jclust[iclust])\n try:\n cgd=np.where((data['VISIT'][j] == 0) & (data['VSCATTER'][j]<maxvscatter) & \n (data['FPARAM'][j,0]>=pars['temin']) & (data['FPARAM'][j,0]<=pars['temax']))[0]\n except:\n cgd=np.where((data['VSCATTER'][j]<maxvscatter) & (data['FPARAM'][j,0]>=pars['temin']) & (data['FPARAM'][j,0]<=pars['temax']))[0]\n if len(gd) > 1 :\n rmsgd = (abundata[j[cgd]]-calfunc(pars,data['FPARAM'][j[cgd],0],data['FPARAM'][j[cgd],3],abundata[j[cgd]],''*len(j),order=pars['elemfit'])).std()\n else :\n rmsgd=-1.\n rec['rms'][iel,iclust] = (abundata[j]-calfunc(pars,data['FPARAM'][j,0],data['FPARAM'][j,3],abundata[j],''*len(j),order=pars['elemfit'])).std()\n rec['rmsgd'][iel,iclust] = rmsgd\n rec['mean'][iel,iclust] = (abundata[j]-calfunc(pars,data['FPARAM'][j,0],data['FPARAM'][j,3],abundata[j],''*len(j),order=pars['elemfit'])).mean()\n rec['rawmean'][iel,iclust] = abundata[j].mean()\n rec['nstars'][iel,iclust] = len(j)\n print(' {:<10s}{:8.3f}{:8.3f}{:8.3f}{:6d}{:6d}{:8.3f}{:8.3f}'.format(\n clusts[iclust],rec['mean'][iel,iclust],rec['rms'][iel,iclust],rmsgd,rec['nstars'][iel,iclust],\n len(cgd),abundata[j].mean(),abundata[j].std()))\n\n # empirical uncertainties\n if errpar and iline==0 :\n tedata.extend(data['FPARAM'][j,0])\n sndata.extend(snr[j])\n mhdata.extend(data['FPARAM'][j,3])\n val.extend(abundata[j]-rec['mean'][iel,iclust])\n if hard is not None:\n for jj in j : \n f.write('{:8.1f}{:8.2f}{:8.2f}{:8.3f}{:8.1f} {:s} {:s}\\n'.format(\n data['FPARAM'][jj,0],snr[jj],data['FPARAM'][jj,3],abundata[jj]-rec['mean'][iel,iclust],\n rec['mean'][iel,iclust],clusts[iclust],data['APOGEE_ID'][jj]))\n i=np.where(clusters.name == clusts[iclust])\n for mhbin in mhbins :\n if (clusters[i].mh > mhbin) and (clusters[i].mh <= mhbin+dmhbin) :\n for teffbin in teffbins :\n for snbin in snbins :\n ibin = np.where(( data['FPARAM'][j,0] > teffbin) & (data['FPARAM'][j,0] <= teffbin+dteffbin) &\n ( snr[j] > snbin) & (snr[j] <= snbin+dsnbin) & (abs(abundata[j]-rec['mean'][iel,iclust]) < 0.3) )[0]\n if len(ibin) > 3 :\n if not np.isfinite(np.log(abundata[np.array(j)[ibin]].std())) : \n pdb.set_trace()\n rmsdata.append(np.log(abundata[np.array(j)[ibin]].std()))\n if dwarfs :\n rmsderiv.append([1.,teffbin+dteffbin/2.-4500.,snbin+dsnbin/2.-100.])\n else :\n rmsderiv.append([1.,teffbin+dteffbin/2.-4500.,snbin+dsnbin/2.-100.,mhbin+dmhbin/2.])\n if hard is not None:\n fc.write('{:8.1f}{:8.2f}{:8.2f}{:8.2f}{:5d}{:8.3f} {:s}\\n'.format(\n teffbin+dteffbin/2.,snbin+dsnbin/2.,mhbin+dmhbin/2.,clusters[i].mh[0],len(ibin),abundata[np.array(j)[ibin]].std(),clusts[iclust]))\n iplt = np.where(snbins == snbin)[0][0]\n plots.plotc(errax[iel,iplt],clusters[i].mh,teffbin+dteffbin/2.,abundata[np.array(j)[ibin]].std(),\n size=30,zr=[0,0.1],xr=[-2.5,0.5],yr=[3500,5500],linewidth=1)\n \n if errpar and iline==0 :\n if hard is not None: \n f.close()\n fc.close()\n #empirical uncertainties\n rmsdata=np.array(rmsdata)\n rmsderiv=np.array(rmsderiv)\n if len(rmsdata) > 5 :\n soln,inv = fit.linear(rmsdata,rmsderiv.transpose())\n y, x = np.mgrid[3500:5500:200j,-2.5:0.5:200j]\n for iplt in range(len(snbins)) :\n sn = snbins[iplt]+dsnbin/2.\n errax[iel,iplt].imshow(elemerr(soln,y-4500.,sn-100.,x),extent=[-2.5,0.5,3500,5500], aspect='auto',vmin=0,vmax=0.1, origin='lower',cmap='rainbow')\n errax[iel,iplt].text(0.98,0.98,el+' S/N={:4.0f}'.format(sn),va='top',ha='right',transform=errax[iel,iplt].transAxes)\n \n pars['errpar'] = soln\n # send all points to generic errfit function (not rms within each bin) for alternative approach and to get plots\n try:\n soln2 = err.errfit(np.array(tedata),np.array(sndata),np.array(mhdata),np.array(val),out=hard+el.strip(),mkhtml=False)\n grid.append([os.path.basename(hard+el.strip()+'_err.png'),os.path.basename(hard+el.strip()+'_err_sn.png')])\n yt.append(el.strip())\n except: \n print('errfit failed: ',el)\n \n # get calibrated values before external calibration \n func_cal=calfunc(pars,teff,abun,mh,clust,order=pars['elemfit'],extcal=False)\n func_uncal=calfunc(pars,teff,abun,mh,clust,order=0,extcal=False)\n\n # get the abundances of the \"solar circle\" stars\n if len(solar) > 0 and len(doels) > 2 :\n solar_teff=allstar[1].data['FPARAM'][solar,0]\n solar_mh=allstar[1].data['FPARAM'][solar,3]\n solar_abun,solar_ok= getabun(allstar[1].data[solar],elems,elemtoh,el,xh=xh,calib=calib)\n solar_func=calfunc(pars,solar_teff,solar_mh,solar_abun,np.array(['']*len(solar_teff)),order=pars['elemfit'],calib=calib)\n # get mean and scatter of solar metallicity stars, rejecting points more than 0.2 from mean\n ss=np.where((solar_mh[solar_ok] > -0.05) & (solar_mh[solar_ok] < 0.05) & \n (solar_teff[solar_ok] > pars['temin']) & (solar_teff[solar_ok] < pars['temax']))[0]\n median=np.median(solar_abun[solar_ok[ss]]-solar_func[solar_ok[ss]])\n ss=np.where((solar_mh[solar_ok] > -0.05) & (solar_mh[solar_ok] < 0.05) & \n (solar_teff[solar_ok] > pars['temin']) & (solar_teff[solar_ok] < pars['temax']) &\n (np.abs(solar_abun[solar_ok]-solar_func[solar_ok])<0.2))[0]\n std=(solar_abun[solar_ok[ss]]-solar_func[solar_ok[ss]]).std()\n if pars['extfit'] == 4 :\n pars['extpar'] = np.array([median,0.,0.])\n median_uncal=np.median(solar_abun[solar_ok[ss]])\n std_uncal=solar_abun[solar_ok[ss]].std()\n if pars['extfit'] == 10 :\n j=np.where(rec['nstars'][iel]>0)[0]\n pars['extpar'][0] = np.median(rec['mean'][iel][j]-clusters[j].mh)\n elif pars['extfit'] == 11 :\n j=np.where((clusters.mh < -1) & (rec['nstars'][iel]>0))[0]\n pars['extpar'][0] = np.median(rec['mean'][iel][j]-clusters[j].mh)\n j=np.where((clusters.mh > -0.5) & (rec['nstars'][iel]>0))[0]\n pars['extpar'][1] = np.median(rec['mean'][iel][j]-clusters[j].mh)\n \n # make plots!\n if plot :\n if sepplot :\n fig,ax = plots.multi(nx,ny,hspace=0.001,wspace=0.5,figsize=[12,6])\n fig1,ax1 = plots.multi(1,1,figsize=[12,4])\n fig2,ax2 = plots.multi(1,1,figsize=[12,4])\n else :\n for iy in range(ny) :\n for ix in range(nx) :\n ax[iy,ix].cla()\n if iline == 0 :\n #after calibration\n plots.plotp(ax[0,0],teff[gd],abun[gd]-func_cal[gd], typeref=clust[gd],yr=[-0.29,0.29],xr=xr,\n types=clusts,color=colors,marker=markers,size=16,yt=el)\n plots.plotp(ax[0,0],teff[bd],abun[bd]-func_cal[bd],typeref=clust[bd],yr=[-0.29,0.29],xr=xr,\n types=clusts,color=colors,marker=markers,size=16,facecolors='none',linewidths=0.2)\n ax[0,0].text(0.98,0.98,'{:5.3f}'.format((abun[gd]-func_cal[gd]).std()),transform=ax[0,0].transAxes,va='top',ha='right')\n #before calibration\n plots.plotp(ax[1,0],teff[gd],abun[gd]-func_uncal[gd],typeref=clust[gd],yr=[-0.29,0.29],xr=xr,\n types=clusts,color=colors,marker=markers,size=16,xt='Teff',yt=el)\n plots.plotp(ax[1,0],teff[bd],abun[bd]-func_uncal[bd],typeref=clust[bd],yr=[-0.29,0.29],xr=xr,\n types=clusts,color=colors,marker=markers,size=16,facecolors='none',linewidths=0.2)\n if sepplot:\n plots.plotp(ax1,teff[gd],abun[gd]-func_uncal[gd],typeref=clust[gd],yr=[-0.29,0.29],xr=xr,\n types=clusts,color=colors,marker=markers,size=16,xt='Teff',yt=el)\n plots.plotp(ax1,teff[bd],abun[bd]-func_uncal[bd],typeref=clust[bd],yr=[-0.29,0.29],xr=xr,\n types=clusts,color=colors,marker=markers,size=16,facecolors='none',linewidths=0.2)\n ax[1,0].text(0.98,0.98,'{:5.3f}'.format((abun[gd]-func_uncal[gd]).std()),transform=ax[1,0].transAxes,va='top',ha='right')\n # figure with all elements on same plot\n if len(doels) > 2 :\n if iline == 0 :\n plots.plotp(allax[iplot//2,iplot%2],teff[gd],abun[gd]-func_uncal[gd],typeref=clust[gd],yr=[-0.29,0.29],xr=[3500,5500],\n types=clusts,color=colors,marker=markers,size=8,xt='Teff',yt=el)\n plots.plotp(allax[iplot//2,iplot%2],teff[bd],abun[bd]-func_uncal[bd],typeref=clust[bd],yr=[-0.29,0.29],xr=[3500,5500],\n types=clusts,color=colors,marker=markers,size=8,facecolors='none',linewidths=0.2)\n allax[iplot//2,iplot%2].text(0.98,0.98,'{:5.3f}'.format(\n (abun[gd]-func_uncal[gd]).std()),transform=allax[iplot//2,iplot%2].transAxes,va='top',ha='right')\n m67 = np.where(clusts == 'M67')[0][0]\n allax[iplot//2,iplot%2].text(0.98,0.75,'{:5.3f}'.format(\n rec['rms'][iel,m67]),transform=allax[iplot//2,iplot%2].transAxes,va='top',ha='right',color='r')\n allax[iplot//2,iplot%2].yaxis.set_major_locator(MultipleLocator(0.2))\n allax[iplot//2,iplot%2].yaxis.set_minor_locator(MultipleLocator(0.05))\n label = allax[iplot//2,iplot%2].yaxis.get_label()\n if len(label.get_text()) < 5 : label.set_rotation(0)\n if nlines > 0 :\n plots.plotp(lineax[iline,0],teff[gd],abun[gd]-func_uncal[gd],typeref=clust[gd],yr=[-0.29,0.29],xr=xr,\n types=clusts,color=colors,marker=markers,size=16,xt='Teff',yt=el)\n plots.plotp(lineax[iline,0],teff[bd],abun[bd]-func_uncal[bd],typeref=clust[bd],yr=[-0.29,0.29],xr=xr,\n types=clusts,color=colors,marker=markers,size=16,facecolors='none',linewidths=0.2)\n plots.plotp(lineax[iline,1],teff[gd],abun[gd]-func_uncal[gd],typeref=clust[gd],yr=[-2.,2.],xr=xr,\n types=clusts,color=colors,marker=markers,size=16,xt='Teff',yt=el)\n plots.plotp(lineax[iline,1],teff[bd],abun[bd]-func_uncal[bd],typeref=clust[bd],yr=[-2,2],xr=xr,\n types=clusts,color=colors,marker=markers,size=16,facecolors='none',linewidths=0.2)\n if iline > 0 :\n w=np.squeeze(allstar[3].data['FELEM_WIND'][0][:,iline-1,jelem])\n lineax[iline,0].text(0.05,0.8,'{:8.2f}-{:8.2f} {:8.2f}'.format(w[0],w[1],w[2]),transform=lineax[iline,0].transAxes,fontsize=10)\n lineax[iline,1].text(0.05,0.8,'{:8.2f}-{:8.2f} {:8.2f}'.format(w[0],w[1],w[2]),transform=lineax[iline,1].transAxes,fontsize=10)\n\n # stuff for interactive plots\n plots._id_cols=['APOGEE_ID','VISIT']\n plots._id_cols=['APOGEE_ID']\n plots._data=data[ind]\n plots._data_x=teff\n plots._data_y=abun-func\n # plot fits\n x=np.linspace(pars['caltemin'],pars['caltemax'],200)\n func=calfunc(pars,x,x*0.,x*0,np.array(['']*len(x)),order=pars['elemfit'],extcal=False)\n plots.plotl(ax[1,0],x,func)\n if sepplot: plots.plotl(ax1,x,func)\n if len(doels) > 2 :\n # figure with all elements on same plot\n if iline==0 : plots.plotl(allax[iplot//2,iplot%2],x,func)\n # solar circle stars\n if iline==0 and len(solar) > 0 :\n plots.plotc(allsolarax[iplot//2,iplot%2],solar_teff[solar_ok],solar_abun[solar_ok]-solar_func[solar_ok],solar_mh[solar_ok],\n xr=xr,yr=[-0.5,0.5],zr=[-1,0.5],xt='Teff',yt=el)\n plots.plotl(allsolarax[iplot//2,iplot%2],[pars['temin'],pars['temax']],[median,median],color='k')\n plots.plotl(allsolarax[iplot//2,iplot%2],xr,[median,median],color='k',ls=':')\n allsolarax[iplot//2,iplot%2].text(0.98,0.98,'{:5.3f}'.format(std),ha='right',va='top',transform=allsolarax[iplot//2,iplot%2].transAxes)\n allsolarax[iplot//2,iplot%2].text(0.98,0.02,'{:5.3f}'.format(median),ha='right',va='bottom',transform=allsolarax[iplot//2,iplot%2].transAxes)\n label = allsolarax[iplot//2,iplot%2].yaxis.get_label()\n if len(label.get_text()) < 5 : label.set_rotation(0)\n plots.plotc(ax[0,2],solar_teff[solar_ok],solar_abun[solar_ok]-solar_func[solar_ok],solar_mh[solar_ok],xr=xr,yr=[-0.5,0.5],zr=[-1,0.5])\n plots.plotl(ax[0,2],xr,[median,median],color='orange')\n ax[0,2].text(0.98,0.98,'{:5.3f}'.format(std),ha='right',va='top',transform=ax[0,2].transAxes)\n ax[0,2].text(0.98,0.02,'{:5.3f}'.format(median),ha='right',va='bottom',transform=ax[0,2].transAxes)\n plots.plotc(ax[0,3],solar_mh[solar_ok],solar_abun[solar_ok]-solar_func[solar_ok],solar_teff[solar_ok],yr=[-0.5,0.5],zr=xr)\n #uncalibrated\n plots.plotc(ax[1,2],solar_teff[solar_ok],solar_abun[solar_ok],solar_mh[solar_ok],xr=xr,yr=[-0.5,0.5],zr=[-1,0.5])\n plots.plotl(ax[1,2],xr,[median_uncal,median_uncal],color='orange')\n ax[1,2].text(0.98,0.98,'{:5.3f}'.format(std_uncal),ha='right',va='top',transform=ax[1,2].transAxes)\n ax[1,2].text(0.98,0.02,'{:5.3f}'.format(median_uncal),ha='right',va='bottom',transform=ax[1,2].transAxes)\n plots.plotc(ax[1,3],solar_mh[solar_ok],solar_abun[solar_ok],solar_teff[solar_ok],yr=[-0.5,0.5],zr=xr)\n if xh or el == 'M' :\n gdplt=np.where(rec['nstars'][iel]>0)[0]\n plots.plotp(ax[0,1],clusters[gdplt].mh,rec['rawmean'][iel][gdplt]-clusters[gdplt].mh,\n typeref=clusters[gdplt].name,types=clusts,color=colors,marker=markers,size=16,\n xr=[-2.5,0.5],yr=[-0.6,0.6],xt='Lit [M/H]',yt='ASPCAP-lit [M/H]',yerr=rec['rms'][iel])\n plots.plotp(ax[1,1],clusters[gdplt].mh,rec['mean'][iel][gdplt]-clusters[gdplt].mh,\n typeref=clusters[gdplt].name,types=clusts,color=colors,marker=markers,size=16,\n xr=[-2.5,0.5],yr=[-0.6,0.6],xt='Lit [M/H]',yt='ASPCAP-lit [M/H]',yerr=rec['rms'][iel])\n if sepplot :\n plots.plotp(ax2,clusters[gdplt].mh,rec['mean'][iel][gdplt]-clusters[gdplt].mh,\n typeref=clusters[gdplt].name,types=clusts,color=colors,marker=markers,size=16,\n xr=[-2.5,0.5],yr=[-0.6,0.6],xt='Lit [M/H]',yt='ASPCAP-lit [M/H]',yerr=rec['rms'][iel])\n ax2.plot([-2.5,-1.0],[0.108797,0.108797],color='k')\n ax2.plot([-1.0,-0.5],[0.108797,-0.0272657],color='k')\n ax2.plot([-0.5,0.5],[-0.0272657,-0.0272657],color='k')\n\n\n else :\n gdplt=np.where(rec['nstars'][iel]>0)[0]\n plots.plotp(ax[0,1],clusters[gdplt].mh,rec['rawmean'][iel][gdplt],\n typeref=clusters[gdplt].name,types=clusts,color=colors,marker=markers,size=16,\n xr=[-2.5,0.5],yr=[-0.6,0.6],xt='Lit [M/H]',yt='ASPCAP-lit [M/H]',yerr=rec['rms'][iel])\n plots.plotp(ax[1,1],clusters[gdplt].mh,rec['mean'][iel][gdplt],\n typeref=clusters[gdplt].name,types=clusts,color=colors,marker=markers,size=16,\n xr=[-2.5,0.5],yr=[-0.6,0.6],xt='Lit [M/H]',yt='ASPCAP',yerr=rec['rms'][iel])\n plots.event(fig)\n #if iline == nlines : iplot+=1\n #if not sepplot and cal != 'inter' : pdb.set_trace()\n if iline == nlines and hard is not None : \n fig.savefig(hard+el.strip()+'.png')\n plt.close(fig)\n if sepplot: \n fig1.savefig(hard+el+'.pdf')\n fig2.savefig(hard+el+'_lit.pdf')\n plt.close(fig1)\n plt.close(fig2)\n if nlines > 0 : \n linefig.savefig(hard+el+'_lines.png')\n linefig.savefig(hard+el+'_lines.pdf')\n plt.close(linefig) \n \n if inter :\n # with interactive options, can adjust fit order and limits and redo\n plt.draw()\n plt.pause(1)\n print('elemfit: ',elemfit)\n s = raw_input('enter new elemfit (-1 to continue to next element, l for new fit limits): ')\n try:\n elemfit = int(s)\n except:\n s = raw_input('enter new lower and upper fit limits in Teff: ')\n pars['temin'] = int(s.split()[0])\n pars['temax'] = int(s.split()[1])\n if elemfit >=0 : pars['elemfit'] = elemfit\n else :\n elemfit = -1\n # transfer results for this element to output summary array \n for key in ['elem','elemfit','mhmin','te0','temin','temax','caltemin','caltemax','extfit','extpar','clust','abun','par','errpar'] :\n print(key, pars[key], pars['elem'], pars['elemfit'])\n if key == 'clust' or key == 'abun' or key == 'errpar':\n n=len(pars[key])\n rec[iel][key][0:n]=pars[key]\n elif key == 'par' :\n # reverse for aspcap_correct\n rec[iel][key][:]=pars[key][::-1]\n elif key == 'extpar' :\n print(pars[key])\n rec[iel][key][:]=pars[key][:]\n else :\n rec[iel][key]=pars[key]\n rec[iel]['femin'] = -99.999\n rec[iel]['femax'] = 99.999\n iel+=1\n #if plot and iplot%2 == 1 : \n if plot and len(doels)%2 == 1 : \n allax[iplot//2,iplot%2].set_visible(False)\n ticklabels = allax[iplot//2-1,iplot%2].get_xticklabels()\n plt.setp(ticklabels, visible=True)\n \n if plot and hard is not None and len(doels) > 2: \n allfig.savefig(hard+'all.png')\n if len(solar) > 0 : allsolarfig.savefig(hard+'allsolar.png')\n if errpar and hard is not None :\n try: html.htmltab(grid,ytitle=yt,file=hard+'err_all.html')\n except: pass\n errfig.savefig(hard+'err_all.png')\n plt.close(errfig)\n\n return rec\n\ndef calfunc(pars,teff,mh,abun,clust,order=1,calib=False,extcal=True) :\n '''\n Apply calibration function. If clust is not '', then include the mean abundance for the cluster as determined from the fit,\n otherwise only apply the temperature correction\n\n '''\n npts=len(teff)\n func=np.zeros([npts])\n # if we are given clusters that are not part of the calibration, set them to -999\n j=np.where(clust != '')[0]\n func[j]=-999.\n # start with the cluster mean abundances if requested\n for iclust in range(len(pars['clust'])) :\n j=np.where(clust == pars['clust'][iclust].strip())[0]\n func[j] = pars['abun'][iclust]\n # add the temperature terms, truncating at temin and temax\n if calib == False :\n if order >= 1:\n temp=copy.copy(teff)\n bd=np.where(temp < pars['temin'])[0]\n temp[bd]=pars['temin']\n bd=np.where(temp > pars['temax'])[0]\n temp[bd]=pars['temax']\n for iorder in range(0,order) :\n func += pars['par'][iorder]*(temp-pars['te0'])**(iorder+1)\n if extcal :\n if pars['extfit'] == 4 :\n func += pars['extpar'][0]\n elif pars['extfit'] == 10 :\n func += pars['extpar'][0]+pars['extpar'][1]*mh+pars['extpar'][2]*mh**2\n elif pars['extfit'] == 11 :\n mhclip=np.clip(mh,-1.,-0.5)\n func += pars['extpar'][0] + (mhclip-(-1.))*(pars['extpar'][1]-pars['extpar'][0])/0.5\n return func\n\ndef calderiv(teff,abun,clust,order=1) :\n '''\n Function/derivatives for abundance calibration\n '''\n uclust=np.sort(np.unique(clust))\n npar=order+len(uclust)\n npts=len(teff)\n deriv=np.zeros([npar,npts])\n for iclust in range(len(uclust)) :\n j=np.where(clust == uclust[iclust])[0]\n deriv[iclust,j] = 1.\n if order >= 1:\n for iorder in range(0,order) :\n deriv[len(uclust)+iorder,:] = teff**(iorder+1)\n return deriv\n \n\ndef defaultcal(el,dwarfs=False) :\n '''\n Return default parameters for abundance calibrtion\n '''\n te0=4500\n temin=4000\n if dwarfs : temax=6000\n else : temax=5000\n elemfit=1\n extfit=0\n caltemin=3532.5\n caltemax=6500\n extpar=[0.,0.,0.]\n mhmin=-1\n return {'elemfit': elemfit, 'mhmin' : mhmin, 'te0': te0, 'temin': temin, 'temax': temax, \n 'caltemin': caltemin, 'caltemax' : caltemax, 'extfit' : extfit, 'extpar' : np.array(extpar)}\n\ndef dr16cal(el,dwarfs=False) :\n '''\n Return default parameters for abundance calibrtion\n '''\n te0=4500\n # values for WARN and to use for fits, if any\n temin=0\n if dwarfs : temax=100000\n else : temax=10000\n # default method/order for fit with Teff (0=none)\n elemfit=0\n # default method for zeropoint (4=solar neighborhood)\n extfit=4\n # values for BAD, i.e. no calibration\n caltemin=3032.5\n caltemax=7500\n extpar=[0.,0.,0.]\n # minimum metallicity to use in clusters\n mhmin=-1\n\n if el.strip() == 'Ge' : elemfit=-1\n if el.strip() == 'Rb' : elemfit=-1\n if el.strip() == 'Nd' : elemfit=-1\n if el.strip() == 'Yb' : elemfit=-1\n\n if not dwarfs :\n if el.strip() == 'C' : \n extfit=0\n if el.strip() == 'CI' : \n extfit=0\n if el.strip() == 'N' : \n extfit=0\n if el.strip() == 'O' : \n temax=5000\n if el.strip() == 'Na' : \n temin=3750\n elif el.strip() == 'Al' : \n temin=3400\n elif el.strip() == 'K' : \n temin=3900\n elif el.strip() == 'P' : \n temax=6000\n elif el.strip() == 'Ti' : \n temin=4200\n elif el.strip() == 'TiII' : \n temin=4000\n elif el.strip() == 'V' : \n temax=4800\n elif el.strip() == 'Mn' : \n temin=4000\n elif el.strip() == 'Fe' : \n extfit=0\n elif el.strip() == 'Co' : \n temin=3300\n temax=6500\n elif el.strip() == 'Cu' : \n temin=4000\n elif el.strip() == 'Ce' : \n temin=4000\n temax=5000\n\n else :\n\n if el.strip() == 'O' : \n temax=5000\n elif el.strip() == 'Na' : \n temin=5500\n temax=5500\n elif el.strip() == 'P' : \n temin=4300\n temin=5500\n temax=5500\n elif el.strip() == 'S' : \n temin=4260\n elif el.strip() == 'K' : \n temin=4000\n temax=6500\n elif el.strip() == 'Ti' : \n temin=4000\n temax=6000\n elif el.strip() == 'TiII' : \n temin=5500\n temax=6000\n elif el.strip() == 'V' : \n temin=4800\n temax=5500\n elif el.strip() == 'Cr' : \n temin=3800\n temax=6200\n elif el.strip() == 'Mn' : \n temin=3800\n elif el.strip() == 'Fe' : \n extfit=0\n elif el.strip() == 'Co' : \n temax=6500\n elif el.strip() == 'Cu' : \n temax=6200\n elif el.strip() == 'Ce' : \n temin=4200\n temin=5500\n temax=5500\n\n return {'elemfit': elemfit, 'mhmin' : mhmin, 'te0': te0, 'temin': temin, 'temax': temax, \n 'caltemin': caltemin, 'caltemax' : caltemax, 'extfit' : extfit, 'extpar' : np.array(extpar)}\n\ndef dr14cal(el,dwarfs=False) :\n '''\n Return calibration parameters for requested element for DR14 choices\n\n elemfit gives order/type of polynomial in cluster fit: 1 (linear), 2 (quadratic), 3 (cubic)\n temin/temax gives range over which fit is performed\n caltemin/caltemax gives range over which calibration can be applied (bad outside range)\n extfit gives source of external calibration: 1 (Arcturus), 2 (Vesta), 3 (M67), 4 (solar sequence), 10 (quadratic fit to clusters), 11(piecewise fit to clusters)\n extpar gives the values of the external calibration\n '''\n\n # defaults\n te0=4500\n temin=4000\n temax=5000\n elemfit=1\n extfit=0\n caltemin=3532.5\n caltemax=6500\n extpar=[0.,0.,0.]\n mhmin=-1\n if el.strip() == 'Ca' : mhmin = -2.\n if el.strip() == 'C' : mhmin = -0.6\n if el.strip() == 'Fe' : mhmin = -3.\n if el.strip() == 'K' : mhmin = -0.6\n if el.strip() == 'Mn' : mhmin = -2.0\n if el.strip() == 'Na' : mhmin = -0.6\n if el.strip() == 'Ni' : mhmin = -3.0\n if el.strip() == 'N' : mhmin = -0.6\n if el.strip() == 'O' : mhmin = -0.6\n if el.strip() == 'Si' : mhmin = -3.0\n if el.strip() == 'V' : mhmin = -0.6\n\n # nothing below -1\n if mhmin < -1 : mhmin=-1.\n\n if not dwarfs :\n # calibration parameters for giants\n if el.strip() == 'C' :\n elemfit= 0\n elif el.strip() == 'CI' :\n elemfit= 0\n elif el.strip() == 'N' :\n elemfit= 0\n elif el.strip() == 'O' :\n elemfit= 2\n temin= 3750\n extfit= 4\n elif el.strip() == 'Na' :\n elemfit= 1\n extfit= 4\n elif el.strip() == 'Mg' :\n elemfit= 1\n temax= 5250\n extfit= 4\n elif el.strip() == 'Al' :\n elemfit= 1\n extfit= 4\n elif el.strip() == 'Si' :\n elemfit= 2\n temin= 3750\n temax= 5250\n extfit= 4\n elif el.strip() == 'P' :\n elemfit= 1\n extfit= 4\n elif el.strip() == 'S' :\n elemfit= 1\n extfit= 4\n elif el.strip() == 'K' :\n elemfit= 1\n extfit= 4\n elif el.strip() == 'Ca' :\n elemfit= 1\n temin= 3750\n extfit= 4\n elif el.strip() == 'Ti' :\n elemfit= 1\n temin= 3750\n extfit= 4\n elif el.strip() == 'TiII' :\n elemfit= 1\n extfit= 4\n elif el.strip() == 'V' :\n elemfit= 1\n extfit= 4\n elif el.strip() == 'Cr' :\n elemfit= 1\n temin= 3750\n extfit= 4\n elif el.strip() == 'Mn' :\n elemfit= 1\n extfit= 4\n elif el.strip() == 'Fe' :\n elemfit= 1\n extfit= 4\n elif el.strip() == 'Co' :\n elemfit= 1\n extfit= 4\n elif el.strip() == 'Ni' :\n elemfit= 1\n extfit= 4\n elif el.strip() == 'Cu' :\n elemfit= -1\n elif el.strip() == 'Ge' :\n elemfit= -1\n elif el.strip() == 'Ce' :\n elemfit= -1\n elif el.strip() == 'Rb' :\n elemfit= -1\n #elemfit= 1\n #extfit= 4\n elif el.strip() == 'Y' :\n elemfit= -1\n elif el.strip() == 'Nd' :\n elemfit= -1\n elif el.strip() == 'M' :\n elemfit= 1\n extfit=11\n elif el.strip() == 'alpha' :\n elemfit= 1\n temax=5250\n extfit= 4\n else :\n\n # default values for dwarfs\n temin=3200\n temax=6250\n elemfit=3\n caltemin=-1\n caltemax=6500\n extfit=0\n extpar=[0.,0.,0.]\n\n\n # manual overrides for each element, dwarfs\n if el.strip() == 'C' :\n elemfit=1\n extfit=4\n elif el.strip() == 'CI' :\n elemfit=1\n caltemin=3500\n caltemax=5000\n extfit=4\n elif el.strip() == 'N' :\n elemfit=0\n caltemin=3500\n extfit=4\n elif el.strip() == 'O' :\n elemfit=2\n temin=3500\n temax=4500\n extfit=4\n elif el.strip() == 'Na' :\n elemfit=-1 #0\n temin=3750\n temax=5500\n caltemin=3750\n extfit=4\n elif el.strip() == 'Mg' :\n elemfit=1\n temin=3750\n extfit=4\n elif el.strip() == 'Al' :\n elemfit=2\n temin=3750\n caltemin=3500\n extfit=4\n elif el.strip() == 'Si' :\n elemfit=1\n temin=3500\n extfit=4\n elif el.strip() == 'P' :\n elemfit=0\n caltemin=3750\n caltemax=5000\n extfit=0\n elif el.strip() == 'S' :\n elemfit=1\n temin=3750\n caltemin=3532\n extfit=4\n elif el.strip() == 'K' :\n elemfit=2\n temin=3750\n caltemin=3750\n extfit=4\n elif el.strip() == 'Ca' :\n elemfit=1\n temin=3750\n caltemin=3750\n extfit=4\n elif el.strip() == 'Ti' :\n elemfit=3\n temin=3750\n temax=5250\n caltemin=3750\n extfit=4\n elif el.strip() == 'TiII' :\n elemfit=-1\n caltemax=-1\n extfit=0\n elif el.strip() == 'V' :\n elemfit=2\n temax=5250\n caltemin=3750\n extfit=4\n elif el.strip() == 'Cr' :\n elemfit=0\n temax=5250\n caltemin=3750\n extfit=4\n elif el.strip() == 'Mn' :\n elemfit=3\n temin=3500\n caltemin=3500\n extfit=4\n elif el.strip() == 'Fe' :\n elemfit=2\n temin=3500\n extfit=4\n elif el.strip() == 'Co' :\n elemfit=-1\n elif el.strip() == 'Ni' :\n elemfit=1\n temin=3500\n caltemin=3500\n extfit=4\n elif el.strip() == 'Cu' :\n elemfit=-1 #2\n temin=3750\n caltemin=3750\n extfit=4\n elif el.strip() == 'Ge' :\n elemfit=-1\n elif el.strip() == 'Ce' :\n elemfit=-1\n elif el.strip() == 'Rb' :\n elemfit=-1 #1\n caltemin=3500\n temin=3200\n temax=5250\n extfit=4\n elif el.strip() == 'Y' :\n elemfit=-1\n elif el.strip() == 'Nd' :\n elemfit=-1\n elif el.strip() == 'M' :\n elemfit=1\n temin=3200\n extfit=10\n elif el.strip() == 'alpha' :\n elemfit=2\n temin=3500\n caltemin=3500\n extfit=4\n \n return {'elemfit': elemfit, 'mhmin' : mhmin, 'te0': te0, 'temin': temin, 'temax': temax, \n 'caltemin': caltemin, 'caltemax' : caltemax, 'extfit' : extfit, 'extpar' : np.array(extpar)}\n\ndef dr13cal(el,dwarfs=False) :\n '''\n Return calibration parameters for requested element for DR13 choices\n\n elemfit gives order/type of polynomial in cluster fit: 1 (linear), 2 (quadratic), 3 (cubic)\n temin/temax gives range over which fit is performed\n caltemin/caltemax gives range over which calibration can be applied (bad outside range)\n extfit gives source of external calibration: 1 (Arcturus), 2 (Vesta), 3 (M67), 4 (solar sequence), 10 (quadratic fit to clusters)\n extpar gives the values of the external calibration\n '''\n\n # defaults\n te0=4500\n temin=4000\n temax=5000\n elemfit=1\n extfit=0\n caltemin=3532.5\n caltemax=6500\n extpar=[0.,0.,0.]\n mhmin=-1\n if el.strip() == 'Ca' : mhmin = -2.\n if el.strip() == 'C' : mhmin = -0.6\n if el.strip() == 'Fe' : mhmin = -3.\n if el.strip() == 'K' : mhmin = -0.6\n if el.strip() == 'Mn' : mhmin = -2.0\n if el.strip() == 'Na' : mhmin = -0.6\n if el.strip() == 'Ni' : mhmin = -3.0\n if el.strip() == 'N' : mhmin = -0.6\n if el.strip() == 'O' : mhmin = -0.6\n if el.strip() == 'Si' : mhmin = -3.0\n if el.strip() == 'V' : mhmin = -0.6\n\n # nothing below -1\n if mhmin < -1 : mhmin=-1.\n\n if not dwarfs :\n # calibration parameters for giants\n if el.strip() == 'C' :\n elemfit= 0\n elif el.strip() == 'CI' :\n elemfit= 0\n elif el.strip() == 'N' :\n elemfit= 0\n elif el.strip() == 'O' :\n elemfit= 2\n temin= 3750\n extfit= 4\n extpar= [0.060,0.,0.]\n elif el.strip() == 'Na' :\n elemfit= 2\n extfit= 4\n extpar= [0.186,0.,0.]\n elif el.strip() == 'Mg' :\n elemfit= 3\n temin= 3500\n extfit= 4\n extpar= [0.045,0.,0.]\n elif el.strip() == 'Al' :\n elemfit= 3\n extfit= 4\n extpar= [0.108,0.,0.]\n elif el.strip() == 'Si' :\n elemfit= 3\n temin= 3500\n extfit= 4\n extpar= [0.107,0.,0.]\n elif el.strip() == 'P' :\n elemfit= 2\n extfit= 4\n extpar= [-0.008,0.,0.]\n elif el.strip() == 'S' :\n elemfit= 2\n extfit= 4\n extpar= [-0.092,0.,0.]\n elif el.strip() == 'K' :\n elemfit= 1\n extfit= 4\n extpar= [-0.026,0.,0.]\n elif el.strip() == 'Ca' :\n elemfit= 3\n temin= 3750\n extfit= 4\n extpar= [-0.021,0.,0.]\n elif el.strip() == 'Ti' :\n elemfit= 3\n temin= 3500\n extfit= 4\n extpar= [-0.014,0.,0.]\n elif el.strip() == 'TiII' :\n elemfit= 2\n extfit= 4\n extpar= [0.166,0.,0.]\n elif el.strip() == 'V' :\n elemfit= 3\n temin= 3750\n extfit= 4\n extpar= [0.110,0.,0.]\n elif el.strip() == 'Cr' :\n elemfit= 2\n temin= 3500\n extfit= 4\n extpar= [-0.057,0.,0.]\n elif el.strip() == 'Mn' :\n elemfit= 1\n extfit= 4\n extpar= [0.041,0.,0.]\n elif el.strip() == 'Fe' :\n elemfit= 2\n temin= 3500\n extfit= 4\n extpar= [-0.005,0.,0.]\n elif el.strip() == 'Co' :\n elemfit= 3\n extfit= 4\n extpar= [0.003,0.,0.]\n elif el.strip() == 'Ni' :\n elemfit= 2\n temin= 3750\n extfit= 4\n extpar= [-0.001,0.,0.]\n elif el.strip() == 'Cu' :\n elemfit= 3\n temin= 3\n extfit= 4\n extpar= [0.452,0.,0.]\n elif el.strip() == 'Ge' :\n elemfit= 2\n extfit= 4\n extpar= [0.354,0.,0.]\n elif el.strip() == 'Ce' :\n elemfit= -1\n elif el.strip() == 'Rb' :\n elemfit= 2\n temin= 3750\n extfit= 4\n extpar= [-0.105,0.,0.]\n elif el.strip() == 'Y' :\n elemfit= -1\n elif el.strip() == 'Nd' :\n elemfit= -1\n elif el.strip() == 'M' :\n elemfit= 1\n elif el.strip() == 'alpha' :\n elemfit= 2\n extfit= 4\n extpar = [0.056,0.,0.]\n else :\n\n # default values for dwarfs\n temin=3200\n temax=6250\n elemfit=3\n caltemin=-1\n caltemax=6500\n extfit=0\n extpar=[0.,0.,0.]\n\n\n # manual overrides for each element, dwarfs\n if el.strip() == 'C' :\n elemfit=1\n extfit=4\n extpar=[-0.019,0.,0.]\n elif el.strip() == 'CI' :\n extfit=4\n extpar=[-0.026,0.,0.]\n elif el.strip() == 'N' :\n extfit=4\n extpar=[-0.01,0.,0.]\n elif el.strip() == 'O' :\n elemfit=3\n temin=3500\n temax=4500\n extfit=4\n extpar=[0.068,0.,0.]\n elif el.strip() == 'Na' :\n elemfit=1\n temin=3750\n temax=5500\n caltemin=3750\n extfit=4\n extpar=[0.096,0.,0.]\n elif el.strip() == 'Mg' :\n elemfit=3\n temin=3750\n extfit=4\n extpar=[-0.003,0.,0.]\n elif el.strip() == 'Al' :\n elemfit=2\n temin=3750\n caltemin=3500\n extfit=4\n extpar=[0.043,0.,0.]\n elif el.strip() == 'Si' :\n elemfit=1\n temin=3500\n extfit=4\n extpar=[-0.023,0.,0.]\n elif el.strip() == 'P' :\n caltemax=-1\n extfit=0\n extpar=[0.,0.,0.]\n elif el.strip() == 'S' :\n elemfit=1\n temin=3750\n caltemin=5500\n extfit=4\n extpar=[-0.017,0.,0.]\n elif el.strip() == 'K' :\n elemfit=2\n temin=3750\n caltemin=3750\n extfit=4\n extpar=[-0.029,0.,0.]\n elif el.strip() == 'Ca' :\n elemfit=1\n temin=3750\n caltemin=3750\n extfit=4\n extpar=[0.023,0.,0.]\n elif el.strip() == 'Ti' :\n elemfit=3\n temin=3750\n temax=5250\n caltemin=3750\n extfit=4\n extpar=[-0.002,0.,0.]\n elif el.strip() == 'TiII' :\n caltemax=-1\n extfit=0\n extpar=[0.,0.,0.]\n elif el.strip() == 'V' :\n elemfit=2\n temax=5250\n caltemin=3750\n extfit=4\n extpar=[0.002,0.,0.]\n elif el.strip() == 'Cr' :\n elemfit=1\n temax=5250\n caltemin=3750\n extfit=4\n extpar=[-0.044,0.,0.]\n elif el.strip() == 'Mn' :\n elemfit=3\n temin=3500\n caltemin=3500\n extfit=4\n extpar=[-0.077,0.,0.]\n elif el.strip() == 'Fe' :\n elemfit=2\n temin=3500\n extfit=4\n extpar=[0.016,0.,0.]\n elif el.strip() == 'Co' :\n elemfit=-1\n elif el.strip() == 'Ni' :\n elemfit=1\n temin=3500\n caltemin=3500\n extfit=4\n extpar=[0.03,0.,0.]\n elif el.strip() == 'Cu' :\n elemfit=2\n temin=3750\n caltemin=4500\n extfit=4\n extpar=[0.026,0.,0.]\n elif el.strip() == 'Ge' :\n elemfit=-1\n elif el.strip() == 'Ce' :\n elemfit=-1\n elif el.strip() == 'Rb' :\n elemfit=1\n temin=3200\n temax=5250\n extfit=4\n extpar=[-0.217,0.,0.]\n elif el.strip() == 'Y' :\n elemfit=-1\n elif el.strip() == 'Nd' :\n elemfit=-1\n elif el.strip() == 'M' :\n elemfit=3\n temin=3200\n extfit=0\n extpar=[0.0,0.,0.]\n elif el.strip() == 'alpha' :\n elemfit=1\n extfit=4\n extpar=[-0.004,0.,0.]\n \n return {'elemfit': elemfit, 'mhmin' : mhmin, 'te0': te0, 'temin': temin, 'temax': temax, \n 'caltemin': caltemin, 'caltemax' : caltemax, 'extfit' : extfit, 'extpar' : np.array(extpar)}\n\ndef elemerr(soln,te,sn,fe) :\n '''\n Function to evaluate function for empirical uncertainties\n '''\n out=soln[0]+soln[1]*te+soln[2]*sn\n if len(soln) > 3: out+= soln[3]*fe\n return np.exp(out)\n\nif __name__ == '__main__' :\n main()\n\n\n\ndef lbd2xyz(l,b,d,R0=8.5) :\n ''' Angular coordinates + distance -> galactocentry x,y,z '''\n\n brad = b*np.pi/180.\n lrad = l*np.pi/180.\n\n x = d*np.sin(0.5*np.pi-brad)*np.cos(lrad)-R0\n y = d*np.sin(0.5*np.pi-brad)*np.sin(lrad)\n z = d*np.cos(0.5*np.pi-brad)\n r = np.sqrt(x**2+y**2)\n return x, y, z, r\n\n",
"from astropy.io import fits\nfrom apogee.utils import bitmask\nfrom apogee.utils import apselect\nfrom tools import plots\nimport os\nimport numpy as np\nimport matplotlib.pyplot as plt\n\na=fits.open(os.environ['APOGEE_REDUX']+'/r8/stars/l31c/l31c.1/allStar-l31c.1.fits')[1].data\nc=fits.open(os.environ['APOGEE_REDUX']+'/r8/stars/l31c/l31c.1/allStar-l31c.1.fits')[1].data\n\ngd=apselect.select(a,badval=['STAR_BAD'],logg=[0,3.8])\na=a[gd]\n\nhmin=12\nfor mag in [0] :\n if mag == 0 :\n pand = np.where( (a['andflag'] & bitmask.starflagval('PERSIST_HIGH') > 0 ) &\n (a['h'] > hmin) & (a['commiss'] == 0) )[0]\n por = np.where( (a['andflag'] & bitmask.starflagval('PERSIST_HIGH') == 0 ) &\n (a['starflag'] & bitmask.starflagval('PERSIST_HIGH') > 0 ) &\n (a['h'] > hmin) & (a['commiss'] == 0) )[0]\n pno = np.where( (a['starflag'] & bitmask.starflagval('PERSIST_HIGH') == 0 ) &\n (a['starflag'] & bitmask.starflagval('PERSIST_MED') == 0 ) &\n (a['starflag'] & bitmask.starflagval('PERSIST_LOW') == 0 ) &\n (a['h'] > hmin) & (a['commiss'] == 0) )[0]\n else :\n pand = np.where( (a['andflag'] & bitmask.starflagval('PERSIST_HIGH') > 0 ) &\n (a['h'] < hmin) & (a['commiss'] == 0) )[0]\n por = np.where( (a['andflag'] & bitmask.starflagval('PERSIST_HIGH') == 0 ) &\n (a['starflag'] & bitmask.starflagval('PERSIST_HIGH') > 0 ) &\n (a['h'] < hmin) & (a['commiss'] == 0) )[0]\n pno = np.where( (a['starflag'] & bitmask.starflagval('PERSIST_HIGH') == 0 ) &\n (a['starflag'] & bitmask.starflagval('PERSIST_MED') == 0 ) &\n (a['starflag'] & bitmask.starflagval('PERSIST_LOW') == 0 ) &\n (a['h'] < hmin) & (a['commiss'] == 0) )[0]\n\n fig,ax = plots.multi(3,1,wspace=0.001,hspace=0.001)\n axim=plots.plotc(ax[0],a['TEFF'][pno],a['LOGG'][pno],a['M_H'][pno],size=2,xr=[5500,3500],yr=[5,0],zr=[-2,0.5],xt='Teff',yt='logg',rasterized=True)\n plots.plotc(ax[1],a['TEFF'][por],a['LOGG'][por],a['M_H'][por],size=2,xr=[5500,3500],yr=[5,0],zr=[-2,0.5],xt='Teff',rasterized=True)\n plots.plotc(ax[2],a['TEFF'][pand],a['LOGG'][pand],a['M_H'][pand],size=2,xr=[5500,3500],yr=[5,0],zr=[-2,0.5],xt='Teff',rasterized=True)\n cbaxes = fig.add_axes([0.91, 0.1, 0.01, 0.8])\n cb = plt.colorbar(axim, cax = cbaxes)\n cb.set_label('[M/H]')\n cbaxes.tick_params(axis='both',labelsize=8)\n fig.savefig('persist_f_hr.pdf')\n \n tags=['C_FE','N_FE','O_FE']\n fig,ax = plots.multi(3,len(tags),wspace=0.001,hspace=0.001)\n for i,tag in enumerate(['C_FE','N_FE','O_FE']) :\n el=tag.split('_')[0]\n axim=plots.plotc(ax[i,0],a['M_H'][pno],a[tag][pno],a['TEFF'][pno],size=2,xr=[-2.5,1],yr=[-0.9,0.9],zr=[3500,5500],xt='[M/H]',yt='['+el.capitalize()+'/Fe]',rasterized=True)\n plots.plotc(ax[i,1],a['M_H'][por],a[tag][por],a['TEFF'][por],size=2,xr=[-2.5,1],yr=[-0.9,0.9],zr=[3500,5500],xt='[M/H]',rasterized=True)\n plots.plotc(ax[i,2],a['M_H'][pand],a[tag][pand],a['TEFF'][pand],size=2,xr=[-2.5,1],yr=[-0.9,0.9],zr=[3500,5500],xt='[M/H]',rasterized=True)\n ax[2,0].tick_params(axis='x',labelsize=8)\n ax[2,1].tick_params(axis='x',labelsize=8)\n ax[2,2].tick_params(axis='x',labelsize=8)\n cbaxes = fig.add_axes([0.91, 0.1, 0.01, 0.8])\n cb = plt.colorbar(axim, cax = cbaxes)\n cb.set_label(r'T$_{\\rm eff}$')\n cbaxes.tick_params(axis='both',labelsize=8)\n fig.savefig('persist_f_cno.pdf')\n\n tags=['MG_FE','SI_FE','S_FE','CA_FE','TI_FE']\n fig,ax = plots.multi(3,len(tags),wspace=0.001,hspace=0.001)\n for i,tag in enumerate(tags) :\n el=tag.split('_')[0]\n axim=plots.plotc(ax[i,0],a['M_H'][pno],a[tag][pno],a['TEFF'][pno],size=2,xr=[-2.5,1],yr=[-0.9,0.9],zr=[3500,5500],xt='[M/H]',yt='['+el.capitalize()+'/Fe]',rasterized=True)\n plots.plotc(ax[i,1],a['M_H'][por],a[tag][por],a['TEFF'][por],size=2,xr=[-2.5,1],yr=[-0.9,0.9],zr=[3500,5500],xt='[M/H]',rasterized=True)\n plots.plotc(ax[i,2],a['M_H'][pand],a[tag][pand],a['TEFF'][pand],size=2,xr=[-2.5,1],yr=[-0.9,0.9],zr=[3500,5500],xt='[M/H]',rasterized=True)\n ax[4,0].tick_params(axis='x',labelsize=8)\n ax[4,1].tick_params(axis='x',labelsize=8)\n ax[4,2].tick_params(axis='x',labelsize=8)\n cbaxes = fig.add_axes([0.91, 0.1, 0.01, 0.8])\n cb = plt.colorbar(axim, cax = cbaxes)\n cb.set_label(r'T$_{\\rm eff}$')\n cbaxes.tick_params(axis='both',labelsize=8)\n fig.savefig('persist_f_alpha.pdf')\n\n tags=['AL_FE','K_FE','MN_FE','NI_FE']\n fig,ax = plots.multi(3,len(tags),wspace=0.001,hspace=0.001)\n for i,tag in enumerate(tags) :\n el=tag.split('_')[0]\n axim=plots.plotc(ax[i,0],a['M_H'][pno],a[tag][pno],a['TEFF'][pno],size=2,xr=[-2.5,1],yr=[-0.9,0.9],zr=[3500,5500],xt='[M/H]',yt='['+el.capitalize()+'/Fe]',rasterized=True)\n plots.plotc(ax[i,1],a['M_H'][por],a[tag][por],a['TEFF'][por],size=2,xr=[-2.5,1],yr=[-0.9,0.9],zr=[3500,5500],xt='[M/H]',rasterized=True)\n plots.plotc(ax[i,2],a['M_H'][pand],a[tag][pand],a['TEFF'][pand],size=2,xr=[-2.5,1],yr=[-0.9,0.9],zr=[3500,5500],xt='[M/H]',rasterized=True)\n ax[3,0].tick_params(axis='x',labelsize=8)\n ax[3,1].tick_params(axis='x',labelsize=8)\n ax[3,2].tick_params(axis='x',labelsize=8)\n cbaxes = fig.add_axes([0.91, 0.1, 0.01, 0.8])\n cb = plt.colorbar(axim, cax = cbaxes)\n cb.set_label(r'T$_{\\rm eff}$')\n cbaxes.tick_params(axis='both',labelsize=8)\n fig.savefig('persist_f_fe.pdf')\n",
"from __future__ import print_function\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom astropy.io import ascii\nfrom astropy.io import fits\nfrom astropy.modeling import models,fitting\nfrom astropy.convolution import convolve, Box2DKernel\nfrom apogee.utils import apload\nfrom pyvista import image\nfrom tools import plots\nfrom tools import html\nimport os\nimport pdb\nfrom sklearn.cluster import KMeans\n\ndef maps(out='R.dat') :\n fp=open(out,'w')\n load.apred='current'\n load.instrument='apogee-s'\n lsf=load.apLSF(22670019)\n wave=load.apLSF(22600042)\n f,r=modelmap(lsf,wave)\n fp.write(\"apogee-s 22670019\\n\")\n stats(r,fp)\n\n load.instrument='apogee-n'\n lsf=load.apLSF(13400033)\n wave=load.apLSF(13400053)\n f,r=modelmap(lsf,wave)\n fp.write(\"apogee-n 13400033\\n\")\n stats(r,fp)\n\n lsf=load.apLSF(5440020)\n wave=load.apLSF(13400053)\n f,r=modelmap(lsf,wave)\n fp.write(\"apogee-n 05440020\\n\")\n stats(r,fp)\n fp.close()\n\ndef stats(r,fp) :\n for fib in [6,50,94] :\n fp.write(\"{:3d}\".format(3*fib))\n for ichip in range(3) :\n fp.write(\"{:8.1f}{:8.1f}{:8.1f} \".format(np.median(r[fib,:,ichip],axis=0),np.min(r[fib,:,ichip],axis=0),np.max(r[fib,:,ichip],axis=0)))\n fp.write(\"\\n\")\n\ndef modelmap(lsfframe,waveframe=None,cols=np.arange(5,2000,20),fibers=np.arange(1,300,3),apStar=False,smooth=5,hard=False) :\n '''\n Make LSF map from a model a[ps]LSF file\n '''\n\n lsfmap=np.zeros([len(fibers),len(cols),3])\n w=np.zeros([len(fibers),len(cols),3])\n dw=np.zeros([len(fibers),len(cols),3])\n r=np.zeros([len(fibers),len(cols),3])\n\n for ichip,chip in enumerate(['a','b','c']) :\n lsf=lsfframe[chip][1].data\n if waveframe is not None : wave=waveframe[chip][2].data\n\n nx=lsf.shape[0]\n x=np.arange(nx)\n col=1000\n fit=fitting.LevMarLSQFitter()\n g_init=models.Gaussian1D(mean=nx/2,stddev=2.5/2.354,amplitude=0.3)\n\n for i,col in enumerate(cols) :\n for j,fiber in enumerate(fibers) :\n y=lsf[:,300-fiber,col]\n g=fit(g_init,x,y)\n if waveframe is not None :\n w[j,i,ichip] = wave[300-fiber,col]\n dw[j,i,ichip] = wave[300-fiber,col]-wave[300-fiber,col-1]\n print(300-fiber,col,g.stddev*2.354,g.stddev*2.354/(6e-6*wave[300-fiber,col]*np.log(10.))*np.abs(dw[j,i,ichip]))\n lsfmap[j,i,ichip] = g.stddev*2.354\n r[j,i,ichip] = w[j,i,ichip]/(g.stddev*2.354*np.abs(dw[j,i,ichip]))\n if apStar and waveframe is not None :\n lsfmap[j,i,ichip]=lsfmap[j,i,ichip]/(6e-6*wave[300-fiber,col]*np.log(10.))*np.abs(dw[j,i,ichip])\n if smooth> 0: \n lsfmap[:,:,ichip] = convolve(np.squeeze(lsfmap[:,:,ichip]), Box2DKernel(smooth),boundary='extend')\n r[:,:,ichip] = convolve(np.squeeze(r[:,:,ichip]), Box2DKernel(smooth),boundary='extend')\n plt.imshow(lsfmap[:,:,ichip],vmin=1.5,vmax=3.5,interpolation='nearest') \n plt.draw()\n plt.show()\n\n plt.close()\n\n for idata,data in enumerate([lsfmap, r ]) :\n if idata == 0 : \n zr=[1.5,3.5]\n suffix='_fwhm'\n cmap='jet_r'\n else : \n zr=[18000,26000]\n suffix='_R'\n cmap='jet'\n fig,ax=plots.multi(3,1,figsize=(12,6),wspace=0.1)\n #im=ax.imshow(np.reshape(r,(len(fibers),3*len(cols)),order='F'),vmin=18000,vmax=25000,interpolation='nearest',cmap='jet_r') \n for ichip,chip in enumerate(['a','b','c']) :\n im=ax[ichip].imshow(data[:,:,ichip],vmin=zr[0],vmax=zr[1],interpolation='nearest',cmap=cmap,\n extent=(cols[0],cols[-1],fibers[-1],fibers[0]),origin='upper',aspect=2048./300.) \n ax[ichip].set_xlabel('Wavelength')\n xpix=[]\n xlab=[]\n wlab=[15200,15500,15750,15900,16150,16400,16550,16750,16900] \n if waveframe is not None :\n wave=waveframe[chip][2].data\n for w in wlab :\n ipix=abs(wave[150,:]-w).argmin()\n print(w,ipix)\n if ipix > 0 and ipix<2000 :\n xpix.append(ipix)\n xlab.append(str(w))\n ax[ichip].set_xticks(xpix)\n ax[ichip].set_xticklabels(xlab)\n if ichip == 0 : \n ax[ichip].set_ylabel('Fiber')\n else : \n #ax.set_xticklabels([])\n ax[ichip].set_yticklabels([])\n cbar_ax = fig.add_axes([0.25, 0.05, 0.5, 0.03])\n fig.colorbar(im, cax=cbar_ax, orientation='horizontal')\n plt.show()\n if hard :\n fig.savefig(lsfframe['filename'].strip('.fits')+suffix+'.png')\n fig.savefig(lsfframe['filename'].strip('.fits')+suffix+'.eps')\n plt.close()\n #pdb.set_trace()\n\n return lsfmap, r\n\ndef datamap(frameid=22600042,waveid=22600042,psfid=22600030,lamp='UNe',fibers=np.arange(50,300,50)) :\n\n lines=ascii.read(os.environ['APOGEEREDUCE_DIR']+'/lib/linelists/'+lamp+'.vac.apogee')\n pdb.set_trace()\n bright=np.where((lines['FLUX'] > 500) & (lines['USEWAVE'] ==1))[0]\n wbright=lines['WAVE'][bright]\n data=load.ap2D(frameid)\n psf=load.apEPSF(psfid)\n wave=load.apWave(waveid)\n\n fig,ax=plots.multi(1,3)\n lsfmap=np.zeros([300,2048,3])\n for ichip,chip in enumerate(['a','b','c']) :\n x=[]\n y=[]\n z=[]\n for w in wbright :\n for fiber in fibers :\n col=np.abs(wave[chip][2].data[fiber,:]-w).argmin()\n if (col>50) and (col<2000) :\n row=psf[chip][fiber].data['CENT'][0,col]\n g=image.gfit(data[chip][1].data,col,row,sub=False,pafixed=True,size=3)\n print(chip,w,fiber,col,row,g[0].x_stddev.value*2.354,g[0].y_stddev.value*2.354)\n lsfmap[fiber,col,ichip] = g[0].x_stddev.value*2.354\n x.append(col)\n y.append(row)\n z.append(g[0].x_stddev.value*2.354)\n pdb.set_trace()\n x=np.array(x)\n y=np.array(y)\n z=np.array(z)\n plots.plotc(ax[ichip],x,y,z,zr=[2,4],size=50) \n pdb.set_trace()\n\ndef group(lsf,wave=None,hard=None,groups=None) :\n \"\"\" Plot the FWHM from gaussian fits to the LSF of the 3 chips at column 1024\n \"\"\"\n fibers=np.arange(1,301)\n fwhm,r=modelmap(lsf,waveframe=wave,cols=[512,1024,1536],fibers=fibers,smooth=0)\n plt.close()\n plt.close()\n fig,ax=plots.multi(2,2,figsize=(8,6))\n plots.plotc(ax[0,0],fwhm[:,1,0],fwhm[:,1,1],fibers,xr=[2.3,3.8],yr=[1.8,3.0],xt='red fwhm, col 1024',yt='green fwhm, col 1024')\n ax[0,0].set_title('Color coded by fiber')\n plots.plotc(ax[0,1],fwhm[:,1,1],fwhm[:,1,2],fibers,xr=[2.0,3.5],yr=[1.8,3.0],xt='green fwhm, col 1024',yt='blue fwhm, col 1024')\n ax[0,1].set_title('Color coded by fiber')\n ax[0,0].grid()\n ax[0,1].grid()\n ax[1,0].grid()\n ax[1,1].grid()\n rfig,rax=plots.multi(2,2,figsize=(8,6))\n plots.plotc(rax[0,0],r[:,1,0],r[:,1,1],fibers,xr=[18000,28000],yr=[18000,28000],xt='red R, col 1024',yt='green R, col 1024')\n rax[0,0].set_title('Color coded by fiber')\n plots.plotc(rax[0,1],r[:,1,1],r[:,1,2],fibers,xr=[18000,28000],yr=[18000,28000],xt='green R, col 1024',yt='blue R, col 1024')\n rax[0,1].set_title('Color coded by fiber')\n rax[0,0].grid()\n rax[0,1].grid()\n rax[1,0].grid()\n rax[1,1].grid()\n\n # do a Kmeans analysis to split into LSF groups and plot\n X = np.squeeze(fwhm[:,1,:])\n km = KMeans(n_clusters=4)\n labels = km.fit_predict(X)\n for j in range(3) :\n plots.plotc(ax[1,0],fwhm[:,j,0],fwhm[:,j,1],labels,xr=[2.3,3.8],yr=[1.8,3.0],xt='red fwhm, col 1024',yt='green fwhm, col 1024')\n plots.plotc(ax[1,1],fwhm[:,j,1],fwhm[:,j,2],labels,xr=[2.0,3.5],yr=[1.8,3.0],xt='green fwhm, col 1024',yt='blue fwhm, col 1024')\n plots.plotc(rax[1,0],r[:,j,0],r[:,j,1],labels,xr=[18000,28000],yr=[18000,28000],xt='red R, col 1024',yt='green R, col 1024')\n plots.plotc(rax[1,1],r[:,j,1],r[:,j,2],labels,xr=[18000,28000],yr=[18000,28000],xt='green R, col 1024',yt='blue R, col 1024')\n ax[1,0].set_title('Color coded by group')\n ax[1,1].set_title('Color coded by group')\n rax[1,0].set_title('Color coded by group')\n rax[1,1].set_title('Color coded by group')\n gfig,gax=plots.multi(1,1)\n plots.plotp(gax,fibers,labels)\n if groups is not None :\n # show default groups if input\n for start in groups : gax.plot([start-0.5,start-0.5],[-1,5],color='k')\n\n if hard is not None : \n fig.tight_layout()\n fig.savefig(hard+'.png')\n rfig.tight_layout()\n rfig.savefig(hard+'_r.png')\n gfig.savefig(hard+'_group.png')\n plt.close()\n plt.close()\n plt.close()\n\n return fwhm\n\ndef parplot(lsf,hard=None) :\n \"\"\" Plot the LSF parameters of an LSF fit\n \"\"\"\n fig,ax=plots.multi(4,3)\n\n colors=['r','g','b']\n for ichip,chip in enumerate(['a','b','c']) :\n ii=0\n for ipar in range(9,18)+range(24,26) :\n iy=ii%3\n ix=ii//3\n ax[iy,ix].plot(lsf[chip][0].data[ipar,:],color=colors[ichip])\n ii+=1\n if hard is not None : \n fig.savefig(hard+'.png')\n plt.close()\n\ndef sum(apred='r11',telescope='apo25m',lsfs=[3430016,7510018,11130063,14600018,18430026,22330043,25560065],waveid=None,out='apogee-n' ,verbose=False,groups=None) :\n \"\"\" Make plots for a series of LSFs and a summary web page\n \"\"\"\n load=apload.ApLoad(apred=apred,telescope=telescope,verbose=verbose)\n if telescope == 'apo25m' : prefix = 'ap'\n else : prefix = 'as'\n if waveid is not None : wave=load.apWave(waveid)\n else : wave=None\n\n grid=[]\n ytit=[]\n for lsfid in lsfs :\n lsf=load.apLSF(lsfid)\n name1='pars_{:08d}'.format(lsfid)\n parplot(lsf,hard=name1) \n name2='fwhm_{:08d}'.format(lsfid)\n group(lsf,wave=wave,hard=name2,groups=groups) \n grid.append([name1+'.png',name2+'.png',name2+'_r.png',name2+'_group.png'])\n ytit.append('<A HREF={:s}LSF-{:08d}.html>{:08d}</A>'.format(prefix,lsfid,lsfid))\n\n xt=['LSF parameters','LSF FWHM','LSF R','LSF groups']\n\n html.htmltab(grid,xtitle=xt,ytitle=ytit,file=out+'.html')\n\ndef fibergroups(groups) :\n for i in range(len(groups)-1) :\n n=groups[i+1]-groups[i]\n for j in range(5) : print(int(groups[i]+j*n/5.+n/10.),end=\" \")\n print(\"\")\n\ndef dr16() :\n \"\"\" Make summary LSF pages/plots for DR16 LSFs\n \"\"\"\n groups=[1,50,146,246,301]\n fibergroups(groups)\n sum(apred='r11',telescope='apo25m',lsfs=[3430016,7510018,11130063,14600018,18430026,22330043,25560065],waveid=24040000,out='apogee-n',groups=groups)\n groups=[1,32,89,151,301]\n fibergroups(groups)\n sum(apred='r11',telescope='lco25m',lsfs=[22940020,26990075],out='apogee-s', waveid=24040000,groups=groups)\n\n"
] | [
[
"numpy.sqrt",
"numpy.linspace",
"numpy.concatenate",
"numpy.max",
"numpy.mean",
"numpy.exp",
"numpy.where",
"numpy.arange",
"numpy.diff",
"numpy.interp",
"numpy.zeros",
"numpy.log",
"numpy.min",
"scipy.interpolate.splev",
"numpy.log10",
"numpy.savetxt",
"numpy.array",
"numpy.sum",
"numpy.convolve",
"scipy.interpolate.splrep",
"numpy.ones",
"numpy.prod",
"numpy.loadtxt"
],
[
"matplotlib.pyplot.colorbar",
"numpy.where"
],
[
"numpy.arange",
"numpy.array",
"numpy.where"
],
[
"matplotlib.ticker.MultipleLocator",
"numpy.sqrt",
"numpy.linspace",
"numpy.squeeze",
"numpy.exp",
"numpy.where",
"numpy.unique",
"numpy.clip",
"numpy.arange",
"numpy.sin",
"matplotlib.pyplot.close",
"numpy.zeros",
"matplotlib.pyplot.figure",
"numpy.median",
"numpy.append",
"numpy.core.defchararray.strip",
"numpy.array",
"numpy.abs",
"numpy.cos",
"matplotlib.pyplot.draw",
"matplotlib.pyplot.setp",
"matplotlib.pyplot.pause"
],
[
"matplotlib.pyplot.colorbar"
],
[
"matplotlib.pyplot.imshow",
"numpy.log",
"numpy.abs",
"sklearn.cluster.KMeans",
"numpy.min",
"numpy.arange",
"numpy.squeeze",
"numpy.median",
"matplotlib.pyplot.draw",
"numpy.max",
"matplotlib.pyplot.close",
"numpy.array",
"matplotlib.pyplot.show",
"numpy.zeros",
"numpy.where"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"1.3",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"0.16",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
phil-lo/pyportlib | [
"3fbe7460c809a80e48615e934990dcd2d1f5003b"
] | [
"pyportlib/services/cash_manager.py"
] | [
"from datetime import datetime\nfrom typing import List, Union\nimport pandas as pd\n\nfrom pyportlib.services.cash_change import CashChange\nfrom pyportlib.utils import df_utils, files_utils\nfrom pyportlib.utils import logger\n\n\nclass CashManager:\n NAME = \"Cash Account\"\n ACCOUNTS_DIRECTORY = files_utils.get_accounts_dir()\n CASH_INFO = ['Date', 'Direction', 'Amount']\n CASH_FILENAME = \"cash.csv\"\n\n def __init__(self, account):\n self.account = account\n self.directory = f\"{self.ACCOUNTS_DIRECTORY}{self.account}\"\n self._cash_changes = pd.DataFrame()\n self.load()\n\n def __repr__(self):\n return self.NAME\n\n def load(self) -> None:\n \"\"\"\n Loads account cash changes from .csv of creates empty one if it is a new account\n :return:\n \"\"\"\n if files_utils.check_file(self.directory, self.CASH_FILENAME):\n cash = pd.read_csv(f\"{self.directory}/{self.CASH_FILENAME}\")\n try:\n cash.drop(columns='Unnamed: 0', inplace=True)\n except KeyError:\n pass\n finally:\n if df_utils.check_df_columns(df=cash, columns=self.CASH_INFO):\n cash.set_index('Date', inplace=True)\n cash.index.name = 'Date'\n cash.index = pd.to_datetime(cash.index)\n self._cash_changes = cash\n else:\n logger.logging.info(f'cash file does not match requirements: {self.account}')\n else:\n # if new ptf, create required files to use it\n if not files_utils.check_dir(self.directory):\n files_utils.make_dir(self.directory)\n # create empty transaction file in new directory\n empty_cash = self._empty_cash()\n empty_cash.to_csv(f\"{self.directory}/{self.CASH_FILENAME}\")\n self._cash_changes = empty_cash\n\n @property\n def cash_changes(self):\n return self._cash_changes\n\n def get_cash_change(self, date: datetime):\n c_ch = self.cash_changes\n return c_ch.loc[self.cash_changes.index <= date, 'Amount'].sum()\n\n def _write(self, date: datetime, direction: str, amount: float):\n direction = direction.title()\n if direction not in ['Deposit', 'Withdrawal']:\n raise Exception(f'cash direction type not supported {direction}')\n\n self.cash_changes.loc[date, \"Direction\"] = direction\n self.cash_changes.loc[date, \"Amount\"] = amount\n\n self.cash_changes.to_csv(f\"{self.directory}/{self.CASH_FILENAME}\")\n self.load()\n\n def add(self, cash_changes: Union[List[CashChange], CashChange]):\n if cash_changes:\n if not hasattr(cash_changes, '__iter__'):\n cash_changes = [cash_changes]\n\n for cc in cash_changes:\n cc = cc.info\n self._write(date=cc[\"Date\"], direction=cc['Direction'], amount=cc['Amount'])\n\n def reset(self):\n empty_cash = self._empty_cash()\n empty_cash.to_csv(f\"{self.directory}/{self.CASH_FILENAME}\")\n self._cash_changes = empty_cash\n\n def _empty_cash(self):\n return pd.DataFrame(columns=self.CASH_INFO).set_index('Date')\n\n"
] | [
[
"pandas.read_csv",
"pandas.to_datetime",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
covid-19-impact-lab/sid | [
"d867f55d4d005b01c672bd2edd0e1dc974cb182b"
] | [
"tests/test_parse_model.py"
] | [
"from contextlib import ExitStack as does_not_raise # noqa: N813\n\nimport numpy as np\nimport pandas as pd\nimport pytest\nfrom sid.config import DEFAULT_VIRUS_STRAINS\nfrom sid.config import INITIAL_CONDITIONS\nfrom sid.parse_model import parse_duration\nfrom sid.parse_model import parse_initial_conditions\nfrom sid.parse_model import parse_virus_strains\n\n\[email protected]\[email protected](\n \"duration, expectation, expected\",\n [\n (\n {\"start\": \"2020-01-01\", \"end\": \"2020-01-02\"},\n does_not_raise(),\n {\n \"start\": pd.Timestamp(\"2020-01-01\"),\n \"end\": pd.Timestamp(\"2020-01-02\"),\n \"dates\": pd.DatetimeIndex(pd.to_datetime([\"2020-01-01\", \"2020-01-02\"])),\n },\n ),\n (\n {\"start\": \"2020-01-01\", \"periods\": 2},\n does_not_raise(),\n {\n \"start\": pd.Timestamp(\"2020-01-01\"),\n \"end\": pd.Timestamp(\"2020-01-02\"),\n \"dates\": pd.DatetimeIndex(pd.to_datetime([\"2020-01-01\", \"2020-01-02\"])),\n },\n ),\n (\n {\"start\": \"2020-01-01\", \"periods\": 2, \"freq\": \"s\"},\n pytest.warns(UserWarning, match=\"Only 'start', 'end', and 'periods'\"),\n {\n \"start\": pd.Timestamp(\"2020-01-01\"),\n \"end\": pd.Timestamp(\"2020-01-02\"),\n \"dates\": pd.DatetimeIndex(pd.to_datetime([\"2020-01-01\", \"2020-01-02\"])),\n },\n ),\n ({\"periods\": 2}, pytest.raises(ValueError, match=\"Of the four\"), None),\n ],\n)\ndef test_parse_duration(duration, expectation, expected):\n with expectation:\n result = parse_duration(duration)\n for k in result:\n if k == \"dates\":\n assert np.all(result[k] == expected[k])\n else:\n assert result[k] == expected[k]\n\n\[email protected]\[email protected](\n (\n \"initial_conditions\",\n \"start_date_simulation\",\n \"virus_strains\",\n \"expectation\",\n \"expected\",\n ),\n [\n (\n None,\n pd.Timestamp(\"2020-01-02\"),\n {\"names\": [\"base_strain\"], \"factors\": np.ones(1)},\n does_not_raise(),\n {**INITIAL_CONDITIONS, \"virus_shares\": {\"base_strain\": 1.0}},\n ),\n (\n {\"assort_by\": [\"region\"]},\n pd.Timestamp(\"2020-01-02\"),\n {\"names\": [\"base_strain\"], \"factors\": np.ones(1)},\n does_not_raise(),\n {\n **INITIAL_CONDITIONS,\n \"assort_by\": [\"region\"],\n \"virus_shares\": {\"base_strain\": 1.0},\n },\n ),\n (\n {\"assort_by\": \"region\"},\n pd.Timestamp(\"2020-01-02\"),\n {\"names\": [\"base_strain\"], \"factors\": np.ones(1)},\n does_not_raise(),\n {\n **INITIAL_CONDITIONS,\n \"assort_by\": [\"region\"],\n \"virus_shares\": {\"base_strain\": 1.0},\n },\n ),\n (\n {\"growth_rate\": 0},\n pd.Timestamp(\"2020-01-02\"),\n {\"names\": [\"base_strain\"], \"factors\": np.ones(1)},\n pytest.raises(ValueError, match=\"'growth_rate' must be greater than or\"),\n None,\n ),\n (\n {\"burn_in_periods\": 0},\n pd.Timestamp(\"2020-01-02\"),\n {\"names\": [\"base_strain\"], \"factors\": np.ones(1)},\n pytest.raises(ValueError, match=\"'burn_in_periods' must be greater or\"),\n None,\n ),\n (\n {\"burn_in_periods\": 2.0},\n pd.Timestamp(\"2020-01-02\"),\n {\"names\": [\"base_strain\"], \"factors\": np.ones(1)},\n pytest.raises(ValueError, match=\"'burn_in_periods' must be an integer\"),\n None,\n ),\n (\n {\"initial_infections\": None},\n pd.Timestamp(\"2020-01-02\"),\n {\"names\": [\"base_strain\"], \"factors\": np.ones(1)},\n pytest.raises(ValueError, match=\"'initial_infections' must be a\"),\n None,\n ),\n ],\n)\ndef test_parse_initial_conditions(\n initial_conditions, start_date_simulation, virus_strains, expectation, expected\n):\n with expectation:\n result = parse_initial_conditions(\n initial_conditions, start_date_simulation, virus_strains\n )\n expected[\"burn_in_periods\"] = pd.DatetimeIndex([pd.Timestamp(\"2020-01-01\")])\n assert result == expected\n\n\[email protected]\[email protected](\n \"virus_strains, params, expectation, expected\",\n [\n pytest.param(None, None, does_not_raise(), DEFAULT_VIRUS_STRAINS, id=\"default\"),\n pytest.param(\n [],\n None,\n pytest.raises(ValueError, match=\"The list of\"),\n None,\n id=\"empty list\",\n ),\n pytest.param(\n [\"b117\"],\n pd.DataFrame(\n index=pd.MultiIndex.from_tuples(\n [], names=[\"category\", \"subcategory\", \"value\"]\n )\n ),\n pytest.raises(ValueError, match=\"Some factors for the infectiousness\"),\n None,\n id=\"missing param\",\n ),\n pytest.param(\n [\"wild_strain\", \"b117\"],\n pd.DataFrame(\n index=pd.MultiIndex.from_tuples(\n [\n (\"virus_strains\", \"wild_strain\", \"factor\"),\n (\"virus_strains\", \"b117\", \"factor\"),\n ],\n names=[\"category\", \"subcategory\", \"value\"],\n ),\n ),\n pytest.raises(ValueError, match=\"Some factors for the infectiousness\"),\n {\"names\": [\"b117\", \"wild_strain\"]},\n id=\"usual parsing\",\n ),\n pytest.param(\n set(),\n None,\n pytest.raises(ValueError, match=\"'virus_strains' is not 'None'\"),\n None,\n id=\"wrong input\",\n ),\n ],\n)\ndef test_parse_virus_strains(virus_strains, params, expectation, expected):\n with expectation:\n result = parse_virus_strains(virus_strains, params)\n\n assert result[\"names\"] == expected[\"names\"]\n assert \"factors\" not in result\n"
] | [
[
"pandas.to_datetime",
"pandas.MultiIndex.from_tuples",
"numpy.ones",
"numpy.all",
"pandas.Timestamp"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
hamhochoisg/moneydetection | [
"32a02f54a4a0c1a6f41a232fa30a3f0f15bdab13"
] | [
"main.py"
] | [
"import streamlit as st\nimport pandas as pd\nimport numpy as np\nimport cv2\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\nimport os\nfrom tensorflow.keras.preprocessing import image \n\nst.title('Banknotes Classification')\nmenu = ['Home','Up Load & Predict', 'Capture From Webcam']\n\n#========================#\n#==== Function #=========#\nModel_Path = 'model\\my_model_checkpoint.h5'\nclass_names = ['1000', '10000', '100000', '2000', '20000', '200000', '5000', '50000', '500000']\n\ndef get_saved_model(Model_Path):\n # Learning Rate maybe decrease so quick => start with 0.01\n restored_model = tf.keras.models.load_model(Model_Path)\n\n # Show the model architecture\n # restored_model.summary() #print in terminal\n return restored_model\n\ndef predict_image(image_path): #input and image show prediction label, reutrn string value of prediction\n model = get_saved_model(Model_Path)\n #Preprocess image:\n img = image.load_img(image_path, target_size=(224, 224))\n img_array = image.img_to_array(img)\n img_array = np.expand_dims(img_array, axis=0) #predict nhận theo batch (1,224,224,3)\n\n #Prediction:\n \n prediction = model.predict(img_array)\n index = prediction.argmax()\n l = list(prediction)\n tmp_percent = l[0][index]*100\n\n pred = class_names[index]\n st.write('model prediction:')\n st.write(pred)\n st.write('Model Propotion:')\n st.write(tmp_percent)\n\ndef predict_image_array(img_array): #input and image array with shape = (1,224,224,3) show prediction label, reutrn string value of prediction\n model = get_saved_model(Model_Path)\n \n prediction = model.predict(img_array)\n index = prediction.argmax()\n l = list(prediction)\n tmp_percent = l[0][index]*100\n\n pred = class_names[index]\n st.write('model prediction:')\n st.write(pred)\n st.write('Model Propotion:')\n st.write(tmp_percent)\n \n print(l)\n\n return l,index\n\n#========================#\n\nchoice = st.sidebar.selectbox('Danh mục', menu)\n\nif choice == 'Home':\n st.title('This is Home Page')\n st.write('Xin chào, đây là ứng dụng phân loại tiền')\n \n # Get The current Path\n current_path = os.getcwd()\n st.write('current path:')\n st.write(current_path)\n\n #Load Model\n st.write('This is our model:')\n # model = get_saved_model(Model_Path) \n test_image_path = \"media\\\\test\\\\500000\\\\Sự-thật-về-cách-đoán-3-số-Seri-tiền-500k-200k-100k-50k-20k-10k.jpg\"\n \n #Show Image\n st.write('For Example Below Image')\n st.image(test_image_path,use_column_width='auto')\n st.write(\"Model Can Understand This Value\") \n\n #Prediction: \n # predict_image(test_image_path)\n \n\nelif choice == 'Up Load & Predict':\n st.title('Please Upload Your Banknotes Image, I Can Understand it:')\n photo_uploaded = st.file_uploader('Choose your banknotes photo', ['png', 'jpg', 'jpeg'])\n if photo_uploaded != None:\n \n image_np = np.asarray(bytearray(photo_uploaded.read()), dtype=np.uint8)\n # print(image_np)\n # print(image_np.shape)\n img = cv2.imdecode(image_np, 1)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) \n print(img.shape)\n\n st.image(img)\n st.write(photo_uploaded.size)\n st.write(photo_uploaded.type)\n\n #Then Predict it\n img = cv2.resize(img, (224,224), interpolation = cv2.INTER_AREA)\n img_array = np.expand_dims(img, axis=0)\n # print(img_array.shape)\n print(type(img))\n\n predict_image_array(img_array)\n\nelif choice == 'Capture From Webcam':\n cap = cv2.VideoCapture(0) # device 0\n run = st.checkbox('Show Webcam')\n capture_button = st.checkbox('Campture')\n quit_button = st.checkbox('Quit')\n # Check if the webcam is opened correctly\n if not cap.isOpened():\n raise IOError(\"Cannot open webcam\")\n\n \n FRAME_WINDOW = st.image([])\n\n # Keep reading images from webcam until press 'q'\n while run:\n ret, frame = cap.read() \n \n # Display Webcam\n # cv2.imshow('My App!', frame)\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB ) #Convert màu cho đúng\n\n FRAME_WINDOW.image(frame)\n\n if capture_button: # press \"c\" => capture\n # save the current frame and predict\n cap.release() # Thử release ra liền để lấy cái hình hiện tại\n cv2.destroyAllWindows()\n print('Frame shape',frame.shape)\n captured_image = frame\n # captured_image = cv2.cvtColor(captured_image, cv2.COLOR_BGR2RGB ) #Đã convert ở trên rồi \n \n st.image(captured_image)\n st.write('Model is predicting it:')\n captured_image = cv2.resize(captured_image, (224,224))\n img_array = np.expand_dims(captured_image, axis=0)\n predict_image_array(img_array)\n\n run = False\n capture_button = False\n\n if quit_button: # press \"q\" => quit\n run = False\n capture_button = False\n quit_button = False\n # break\n\n cap.release()\n cv2.destroyAllWindows()\n\n # if captured_image.shape != None:\n # captured_image = cv2.cvtColor(captured_image, cv2.COLOR_BGR2RGB ) \n # st.write('Image That Captured')\n # st.image(captured_image)\n # captured_image = cv2.resize(captured_image, (224,224))\n\n # if captured_image.shape != None:\n # st.write('Image That Captured')\n # st.image(captured_image)\n # captured_image = cv2.resize(captured_image, (224,224))\n # print('Captured Image Shape:',captured_image.shape)\n # print('Captured Image Type:',type(captured_image)) \n # img_array = np.expand_dims(captured_image, axis=0)\n # predict_image_array(img_array)\n\n\n\n"
] | [
[
"tensorflow.keras.models.load_model",
"numpy.expand_dims",
"tensorflow.keras.preprocessing.image.img_to_array",
"tensorflow.keras.preprocessing.image.load_img"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"2.4",
"2.3",
"2.5",
"2.6"
]
}
] |
ssawwqdf/-project-stock_info_dashboard | [
"f14a462d915d2207db1da12307aefdef4b6921e1",
"f14a462d915d2207db1da12307aefdef4b6921e1"
] | [
"code_cr.py",
"lec20_flask.py"
] | [
"import re\nimport numpy as np\nimport pandas as pd\nimport requests #웹통신\nimport json\nfrom pmdarima.arima import ndiffs\nimport pmdarima as pm\nfrom pykrx import stock\nfrom bs4 import BeautifulSoup\nimport html5lib\n\n\n# ==============\n# 업종 분류\n# ==============\n# -------- 동일 업종 기업 출력\n# TODO(미완성) 동일 업종 선택\ndef select_same_industry(corp_name):\n indus=com_df[com_df['nm']==corp_name]['industry'].values[0] # TODO(df 확인)\n\n # print(com_df.groupby(by='industry')['nm'].nunique().max()) # 동종업계 최대 151개 -> 151개 재무제표 크롤링?\n\n list_com=com_df[com_df['industry']==indus]['corp_name'].values.tolist()\n return list_com\n\n\n\n# -------- 네이버증권 연관기업 코드(hjh)\ndef relate_code_crawl(co):\n #연관 종목코드 있는 페이지 불러오기\n url='https://finance.naver.com/item/main.naver?code='+str(co)\n page=pd.read_html(url,encoding='CP949')\n #연관 종목명과 종목코드 뽑아내기(code_list[0]은 '종목명'이어서 제외)\n code_list=page[4].columns.tolist()\n code_list=code_list[1:]\n #종목코드 리스트 반환\n codes=[]\n for word in (code_list):\n codes.append(word[-6:])\n #print(codes)\n return codes\n\n#relate_code_crawl('000660')\n\n\n\n# ==============\n# 기업 이름 코드 변환\n# ==============\n\n# -------- 네이버 재무제표 크롤링 용 gicode로 변환\ndef nm_to_bs_gicode(corp_name):\n gi=com_df[com_df['nm']==corp_name]['cd']\n gi=gi.values[0]\n return gi\n\n\n\ndef stc_code_to_bs_gicode(stock_code):\n gi = com_df[com_df['stock_code'] == stock_code]['cd']\n gi = gi.values[0]\n return gi\n\n\n\ndef yh_code_to_bs_gicode(yh_code):\n gi = com_df[com_df['yh_code'] == yhcode]['cd']\n gi = gi.values[0]\n return gi\n\n\n\n# -------- 네이버 금융 크롤링 용 gicode로 변환\ndef nm_to_fn_gicode(corp_name):\n gi=com_df[com_df['nm']==corp_name]['stock_code']\n gi=gi.values[0]\n return gi\n\n\n\ndef yh_code_to_fn_gicode(yh_code):\n gi=com_df[com_df['yh_code']==yh_code]['stock_code']\n gi=gi.values[0]\n return gi\n\n\n\n# -------- 코드를 기업이름으로 변환\ndef stc_code_to_nm(stock_code):\n gi = com_df[com_df['stock_code'] == stock_code]['nm']\n gi = gi.values[0]\n return gi\n\n\n\ndef yh_code_to_nm(yh_code):\n gi = com_df[com_df['yh_code'] == yh_code]['nm']\n gi = gi.values[0]\n return gi\n\n\n\n# ==============\n# 데이터 수집\n# ==============\n\n\n# -------- Balance Sheets API call\n# def bs_api(corp_name=None, yh_code=None, stock_code=None):\n# print('haha')\n\n\n\n\n# -------- Balance Sheets Crawling(재무제표 크롤링)\n# 220220 수정\n# 1) 매개변수 stock_code로 축약\n# 2) kind로 특정 테이블 지정하는 대신 데이터프레임 리스트 전체 반환\n# 3) '~계산에 참여한 계정 펼치기' 제거는 선택사항으로 둠\n\ndef bs_craw(stock_code, clear_name=False): # ------- 검색과 연동해서 입력 변수 설정\n \"\"\"\n # kind\n : 0 (연간 포괄손익계산서), 1 (분기별 포괄손익계산서)\n 2 (연간 재무상태표), 3 (분기별 재무상태표)\n 4 (연간 현금흐름표), 5 (분기별 현금프름표)\n \"\"\"\n\n # ------- 검색과 연동해서 입력되는 변수 따라 gicode(네이버에서 분류하는 기업 코드)로 변환\n gcode = stc_code_to_bs_gicode(stock_code)\n\n url = f\"http://comp.fnguide.com/SVO2/ASP/SVD_Finance.asp?NewMenuID=103&gicode={gcode}\"\n\n table_list = pd.read_html(url, encoding='UTF-8')\n\n # 항목에서 불필요한 부분 제거('계산에 참여한 계정 펼치기')\n if clear_name == False:\n return table_list\n\n else:\n new_table_list = []\n for tbl in table_list:\n for i, idx in enumerate(tbl.iloc[:, 0]):\n m = idx.replace('계산에 참여한 계정 펼치기', '')\n tbl.iloc[i, 0] = m\n new_table_list.append(tbl)\n return new_table_list\n\n\n# ------- 네이버 금융\n# 220220 수정\n# 1) 매개변수 stock_code로 축약\n# 2) kind로 특정 테이블 지정하는 대신 데이터프레임 리스트 전체 반환\ndef fn_craw(stock_code):\n \"\"\"\n # kind\n : 0 (전일&당일 상한가, 하한가, 거래량 등) #TODO 가공 필요\n 1 (증권사 별 매도 매수 정보) #TODO 가공 필요(컬럼이름)\n 2 (외국인, 기관 거래 정보) #TODO 가공 필요\n 3 (기업실적분석(연도별 분기별 주요재무 정보)) #TODO 가공 필요?\n 4 (동일업종비교) #TODO 가공 필요?\n 5 (시가총액, 주식수, 액면가 정보) #TODO 가공 필요\n 6 (외국인 주식 한도, 보유 정보)\n 7 (목표주가 정보) #TODO 가공 필요\n 8 (PER, PBR 배당수익률 정보) (주가 따라 변동) #TODO 가공 필요\n 9 (동일업종 PER, 등락률 정보) #TODO 가공 필요\n 10 (호가 10단계)\n 11 (인기 검색 종목: 코스피) #TODO 가공 필요\n 12 (인기 검색 종목: 코스닥) #TODO 가공 필요\n \"\"\"\n\n gcode = str(stock_code)\n\n url = f\"https://finance.naver.com/item/main.naver?code={gcode}\"\n table_list = pd.read_html(url, encoding='euc-kr')\n\n return table_list\n\n# ==============\n# 지표 선정\n# ==============\n\n# 220222 날씨 수정 시작 ---------------------------------------------\n\n# -------- 지표 선정\n# 220220 수정\n# 1) 매개변수 stock_code로 축약\n# 2) 데이터프레임 하나가 아닌 리스트로 받아오기때문에 kind 제거하고 직접 선택해줌\n# 3) sli_df_y, sil_df_q 에서 '-' 가공 시 if 조건에 따라 처리하는 대신 lambda와 re.sub 이용\n# 4) dict 대신 array로 반환, 기업 이름(nm도 반환)\ndef idv_radar_weather_data(stock_code):\n \"\"\"\n # <지표 설명>\n # 1. 배당 분석 -> 배당성향(배당 커버리지의 역수.)\n # 2. 유동성 분석(단기채무지급능력) -> 당좌비율(당좌자산 / 유동부채)\n # 3. 재무건전성 분석(레버리지 비율) -> 부채비율(총부채 / 자기자본)의 역수\n # 4. 수익성분석 -> 매출수익성(당기순이익/매출액))\n # 5. 성장성분석 -> 순이익성장률\n \"\"\"\n\n gcode = stock_code\n nm = stc_code_to_nm(stock_code)\n\n sil_df = fn_craw(gcode)[3] # 3: 기업실적정보 재무제표 (220220 수정)\n foreign_ms = fn_craw(gcode)[2].loc[1, '외국인'] # 2 : 외국인, 기관 거래 정보\n giguan_ms = fn_craw(gcode)[2].loc[1, '기관'] # 2 : 외국인, 기관 거래 정보\n\n if (sil_df.iloc[0:8, 3].isna().sum()) > 0: # 표 안 가르고 계산하는 건 신규 상장 기업은 정보가 아예 없기 때문\n pass\n elif (sil_df.iloc[0:8, 9].isna().sum()) > 0: # 표 안 가르고 계산하는 건 신규 상장 기업은 정보가 아예 없기 때문\n pass\n\n\n else:\n # 0. 재무정보는 최신 분기 실공시 기준\n # 0. 단, 배당은 1년에 한 번 이루어지기 때문에 최신 년도 공시 기준임\n sil_df_y = sil_df['최근 연간 실적'].iloc[:, 2] # 느리지만 .iloc으로 하는 이유는 공시 날짜가 다른 기업이 있기 때문\n sil_df_q = sil_df['최근 분기 실적'].iloc[:, 4]\n\n sil_df_y = sil_df_y.fillna(0)\n sil_df_q = sil_df_q.fillna(0)\n\n if sil_df_y.dtype == 'O':\n sil_df_y = sil_df_y.apply(lambda x: re.sub('^-$', '0', '{}'.format(x)))\n sil_df_y = sil_df_y.astype('float')\n\n if sil_df_q.dtype == 'O':\n sil_df_q = sil_df_q.apply(lambda x: re.sub('^-$', '0', '{}'.format(x)))\n sil_df_q = sil_df_q.astype('float')\n\n # 1. 배당성향(bd_tend)\n bd_tend = sil_df_y[15] # 실제 배당 성향\n\n # 2. 유동성 분석 - 당좌비율(당좌자산/유동부채)\n # 당좌자산 = (유동자산 - 재고자산)\n dj_rate = sil_df_q[7] # 당좌비율\n\n # 3. 재무건전성 분석 - 부채비율(총부채/자기자본)의 역수\n bch_rate = sil_df_q[6] / 100 # 부채비율\n bch_rate = round((1 / bch_rate) * 100, 2)\n\n # 4. 수익성 분석 - 매출수익성(당기순이익/매출액) # TODO 매출액 0인 애들은?\n\n dg_bene = sil_df_q[2]\n mch = sil_df_q[0]\n\n suyk = round((dg_bene / mch) * 100, 2)\n\n # 5. 성장성 분석 - 순이익성장률(지속성장 가능률)\n # (1-배당성향)*자기자본순이익률(ROE)\n # 유보율\n\n roe = sil_df_y[5] / 100\n ubo = (100 - bd_tend) / 100\n grth = round(roe * ubo * 100, 2)\n\n data_arr = np.array([bd_tend, dj_rate, bch_rate, suyk, grth])\n\n # weather part----------------\n # PER?\n weather_per = sil_df_y[10]\n\n # PBR\n weather_pbr = sil_df_y[12]\n\n # ROE\n weather_roe = sil_df_y[5]\n\n # EPS\n weather_eps = sil_df_y[9]\n\n # BPS\n weather_bps = sil_df_y[11]\n\n # array\n weather_arr = np.array([weather_per, weather_pbr, weather_roe, weather_eps, weather_bps])\n\n return data_arr, weather_arr, nm, foreign_ms, giguan_ms\n\n# 수정수정수정\n\n# -------- 관련 기업 지표 선정(상대적 비율 기준)\n# 220220 수정\n# 1) 매개변수 stock_code로 축약\n# 2) dict 대신 array로 반환, 기업 이름(nm도 반환)\n\n# 220222 날씨\n\ndef relate_radar_weather_data(stock_code):\n label_list = ['배당성향', '유동성', '건전성', '수익성', '성장성']\n arr_list = []\n\n # 주식 코드,이름으로 변환\n\n gcode = stock_code\n\n relate_corp = relate_code_crawl(co=gcode)\n\n # 다섯 개 회사가 안에 있다\n arr_list = [idv_radar_weather_data(stock_code=stcd) for stcd in relate_corp]\n\n # arr_list에서 데이터 분리\n radar_list = [x[0] for x in arr_list if x is not None]\n weather_list = [x[1] for x in arr_list if x is not None]\n nm_list = [x[2] for x in arr_list if x is not None]\n\n # 외인 매수, 기관 매수\n try:\n foreign_ms = arr_list[0][3]\n except TypeError:\n foreign_ms=0.01\n\n try:\n giguan_ms = arr_list[0][4]\n except TypeError:\n giguan_ms=0.01\n\n # radar_chart_data\n radar_list = np.array(radar_list)\n\n radar_list[:, 0] = (radar_list[:, 0] / radar_list[:, 0].mean()) * 100\n radar_list[:, 1] = (radar_list[:, 1] / radar_list[:, 1].mean()) * 100\n radar_list[:, 2] = (radar_list[:, 2] / radar_list[:, 2].mean()) * 100\n radar_list[:, 3] = (radar_list[:, 3] / radar_list[:, 3].mean()) * 100\n radar_list[:, 4] = (radar_list[:, 4] / radar_list[:, 4].mean()) * 100\n\n # radar_chart_dict\n radar_dict_list = []\n\n for i, nm in enumerate(nm_list):\n dic = {}\n dic[nm] = radar_list[i, :].tolist()\n radar_dict_list.append(dic)\n\n # weather_chart_data\n weather_list = np.array(weather_list)\n\n weather_list[:, 0] = (weather_list[:, 0] / weather_list[:, 0].mean()) # 각 기업의 평균 대비 PER\n weather_list[:, 1] = (weather_list[:, 1] / weather_list[:, 1].mean()) # 각 기업의 평균 대비 PBR\n weather_list[:, 2] = (weather_list[:, 2] / weather_list[:, 2].mean()) # 각 기업의 평균 대비 ROE\n weather_list[:, 3] = (weather_list[:, 3] / weather_list[:, 3].mean()) # 각 기업의 평균 대비 EPS\n weather_list[:, 4] = (weather_list[:, 4] / weather_list[:, 4].mean()) # 각 기업의 평균 대비 BPS\n weather_list=np.round(weather_list, 2)\n\n return label_list, radar_dict_list, weather_list[0], foreign_ms, giguan_ms\n\n\n# 220222 날씨 수정 끝 ---------------------------------------------\n\n# ==============\n# 지표 선정\n# ==============\n\n# -------- 지표 선정\n# 220220 수정\n# 1) 매개변수 stock_code로 축약\n# 2) 데이터프레임 하나가 아닌 리스트로 받아오기때문에 kind 제거하고 직접 선택해줌\n# 3) sli_df_y, sil_df_q 에서 '-' 가공 시 if 조건에 따라 처리하는 대신 lambda와 re.sub 이용\n# 4) dict 대신 array로 반환, 기업 이름(nm도 반환)\ndef idv_radar_data(stock_code):\n \"\"\"\n # <지표 설명>\n # 1. 배당 분석 -> 배당성향(배당 커버리지의 역수.)\n # 2. 유동성 분석(단기채무지급능력) -> 당좌비율(당좌자산 / 유동부채)\n # 3. 재무건전성 분석(레버리지 비율) -> 부채비율(총부채 / 자기자본)의 역수\n # 4. 수익성분석 -> 매출수익성(당기순이익/매출액))\n # 5. 성장성분석 -> 순이익성장률\n \"\"\"\n\n gcode = stock_code\n nm = stc_code_to_nm(stock_code)\n\n sil_df = fn_craw(gcode)[3] # 3: 기업실적정보 재무제표 (220220 수정)\n\n if (sil_df.iloc[0:8, 3].isna().sum()) > 0: # 표 안 가르고 계산하는 건 신규 상장 기업은 정보가 아예 없기 때문\n pass\n elif (sil_df.iloc[0:8, 9].isna().sum()) > 0: # 표 안 가르고 계산하는 건 신규 상장 기업은 정보가 아예 없기 때문\n pass\n\n\n else:\n # 0. 재무정보는 최신 분기 실공시 기준\n # 0. 단, 배당은 1년에 한 번 이루어지기 때문에 최신 년도 공시 기준임\n sil_df_y = sil_df['최근 연간 실적'].iloc[:, 2] # 느리지만 .iloc으로 하는 이유는 공시 날짜가 다른 기업이 있기 때문\n sil_df_q = sil_df['최근 분기 실적'].iloc[:, 4]\n\n sil_df_y = sil_df_y.fillna(0)\n sil_df_q = sil_df_q.fillna(0)\n\n if sil_df_y.dtype == 'O':\n sil_df_y = sil_df_y.apply(lambda x: re.sub('^-$', '0', '{}'.format(x)))\n sil_df_y = sil_df_y.astype('float')\n\n if sil_df_q.dtype == 'O':\n sil_df_q = sil_df_q.apply(lambda x: re.sub('^-$', '0', '{}'.format(x)))\n sil_df_q = sil_df_q.astype('float')\n\n # 1. 배당성향(bd_tend)\n bd_tend = sil_df_y[15] # 실제 배당 성향\n\n # 2. 유동성 분석 - 당좌비율(당좌자산/유동부채)\n # 당좌자산 = (유동자산 - 재고자산)\n dj_rate = sil_df_q[7] # 당좌비율\n\n # 3. 재무건전성 분석 - 부채비율(총부채/자기자본)의 역수\n bch_rate = sil_df_q[6] / 100 # 부채비율\n bch_rate = round((1 / bch_rate) * 100, 2)\n\n # 4. 수익성 분석 - 매출수익성(당기순이익/매출액) # TODO 매출액 0인 애들은?\n\n dg_bene = sil_df_q[2]\n mch = sil_df_q[0]\n\n suyk = round((dg_bene / mch) * 100, 2)\n\n # 5. 성장성 분석 - 순이익성장률(지속성장 가능률)\n # (1-배당성향)*자기자본순이익률(ROE)\n # 유보율\n\n roe = sil_df_y[5] / 100\n ubo = (100 - bd_tend) / 100\n grth = round(roe * ubo * 100, 2)\n\n data_arr = np.array([bd_tend, dj_rate, bch_rate, suyk, grth])\n\n return data_arr, nm\n\n\n# -------- 관련 기업 지표 선정(상대적 비율 기준)\n# 220220 수정\n# 1) 매개변수 stock_code로 축약\n# 2) dict 대신 array로 반환, 기업 이름(nm도 반환)\ndef relate_radar_data(stock_code):\n label_list = ['배당성향', '유동성', '건전성', '수익성', '성장성']\n arr_list = []\n\n # 주식 코드,이름으로 변환\n\n gcode = stock_code\n\n relate_corp = relate_code_crawl(co=gcode)\n\n arr_list = [idv_radar_data(stock_code=stcd) for stcd in relate_corp]\n nm_list = [x[1] for x in arr_list if x is not None]\n arr_list = [x[0] for x in arr_list if x is not None]\n\n arr_list = np.array(arr_list)\n\n arr_list[:, 0] = (arr_list[:, 0] / arr_list[:, 0].mean()) * 100\n arr_list[:, 1] = (arr_list[:, 1] / arr_list[:, 1].mean()) * 100\n arr_list[:, 2] = (arr_list[:, 2] / arr_list[:, 2].mean()) * 100\n arr_list[:, 3] = (arr_list[:, 3] / arr_list[:, 3].mean()) * 100\n arr_list[:, 4] = (arr_list[:, 4] / arr_list[:, 4].mean()) * 100\n\n dict_list = []\n\n for i, nm in enumerate(nm_list):\n dic = {}\n dic[nm] = arr_list[i, :].tolist()\n dict_list.append(dic)\n\n return label_list, dict_list\n\n\n# -------- 관련 기업 지표 선정(원본)\n\n# def relate_radar_data(yh_code=None, corp_name=None, stock_code=None):\n# label_list=['배당성향', '유동성', '건전성', '수익성', '성장성']\n# dict_list = []\n#\n# # 주식 코드로 변환\n# gcode = 0\n# if yh_code != None:\n# gcode = yh_code_to_fn_gicode(yh_code)\n# elif corp_name != None:\n# gcode = nm_to_fn_gicode(corp_name)\n# elif stock_code != None:\n# gcode = stock_code\n#\n# relate_corp = relate_code_crawl(co=gcode)\n#\n# dict_list = [idv_radar_data(stock_code=stcd) for stcd in relate_corp]\n#\n# dict_list = [x for x in dict_list if x is not None]\n#\n#\n# return label_list, dict_list\n\n\n# ==============\n# 시각화\n# ==============\n\n# -------- 매출, 당기순이익 추이 그래프\n# 220220 수정\n# 1) 매개변수 stock_code로 축약\n# 2) 크롤링한 데이터는 list로 받아오므로 kind 없애고 직접 인덱스 처리\n\ndef mch_dg(stock_code):\n gcode = stock_code\n nm = stc_code_to_nm(stock_code)\n\n bs_df = bs_craw(stock_code=gcode)[0]\n label_list = bs_df.columns[1:6].tolist() # 네 분기 + 전년동기\n mch_list = bs_df.loc[0, label_list].tolist() # 매출액\n dg_list = bs_df.loc[15, label_list].tolist() # 당기순이익\n\n return label_list, mch_list, dg_list\n\n\ndef icon_selection(index_array):\n res=[]\n for idx in index_array:\n if 3<idx :\n res.append(\"RAIN\")\n elif ( 1.2<idx and idx<=3 ):\n res.append(\"CLOUDY\")\n elif ( 0.8<idx and idx<=1.2 ):\n res.append(\"PARTLY_CLOUDY_DAY\")\n elif ( 0<idx and idx<=0.8 ):\n res.append(\"CLEAR_DAY\")\n else:\n res.append(\"SNOW\")\n\n return res\n\ndef foreign_giguan(index_array):\n res = []\n for idx in index_array:\n if idx >0:\n res.append(\"CLEAR_DAY\")\n elif idx==0:\n res.append(\"CLOUDY\")\n else:\n res.append(\"RAIN\")\n\n return res\n\n\n\n\n\n# ====================================================\n# 데이터\n# ====================================================\n\n# -------- 병합 파일 불러오기\ncom_df=pd.read_csv('com_df.csv',\n dtype={'stock_code': 'str', '표준코드': 'str', '단축코드': 'str', 'stock_code_ori':'str'},\n parse_dates=['listed_date', '상장일'])\n\n\n\n# -------- 뉴스 크롤링\ndef news_crawl(gi):\n\n\n tot_list = []\n\n for p in range(1):\n # 뉴스 기사 모인 페이지\n url = 'https://m.stock.naver.com/domestic/stock/' + str(gi) + '/news/title' # https://m.stock.naver.com/domestic/stock/003550/total\n #F12누르면 나오는 네트워크상에서 찾아온 경로\n #https://m.stock.naver.com/api/news/stock/005930?pageSize=20&page=1&searchMethod=title_entity_id.basic\n url = \"https://m.stock.naver.com/api/news/stock/\"+str(gi)+\"?pageSize=5&searchMethod=title_entity_id.basic&page=1\"\n res = requests.get(url)\n\n news_list = json.loads(res.text)\n #페이지에서 가져온 전체 뉴스기사를 for문으로 분리\n #print(news_list[0])\n for i, news in enumerate(news_list) :\n #신문사 id\n a=news['items'][0]['officeId']\n #기사 id\n b=news['items'][0]['articleId']\n list = []\n list.append(news['items'][0]['officeName']) #신문사\n list.append(news['items'][0]['datetime'][:8]) #날짜\n list.append(news['items'][0]['title'].replace('"','\\\"')) #제목\n list.append(news['items'][0]['imageOriginLink']) #이미지\n list.append(news['items'][0]['body'].replace('"','\\\"')) # 기사 내용\n list.append('https://m.stock.naver.com/domestic/stock/005930/news/view/'+str(a)+'/'+str(b)) #기사 url\n tot_list.append(list)\n\n news_df = pd.DataFrame(data=tot_list, columns=['offname','rdate','title','imgsrc','content','url'])\n news_df['title'] = news_df['title'].str.replace('&', '&')\n news_df['content'] = news_df['content'].str.replace('&', '&')\n\n #news_df['title'] = [re.sub('[^A-Za-z0-9가-힣]', '' ,s) for s in news_df['title']]\n\n\n #news_df.to_csv('css.csv',index=False)\n return news_df\n\n#co-종목코드\ndef relate_code_crawl(co):\n #연관 종목코드 있는 페이지 불러오기\n url='https://finance.naver.com/item/main.naver?code='+str(co)\n page=pd.read_html(url,encoding='CP949')\n #연관 종목명과 종목코드 뽑아내기(code_list[0]은 '종목명'이어서 제외)\n code_list=page[4].columns.tolist()\n code_list=code_list[1:]\n #종목코드 리스트 반환\n codes=[]\n for word in (code_list):\n codes.append(word[-6:])\n #print(codes)\n return codes\n\n\n# def before_1w_kospi(date):\n# before1w=date-timedelta(days=7)\n# return fdr.DataReader('KS11',before1w)[['Close']]#, fdr.DataReader('KQ11',before1w)\n\ndef invest_opinion(gcode):\n url='https://finance.naver.com/item/coinfo.naver?code='+str(gcode)\n page=pd.read_html(url,encoding='CP949')\n try:\n a,b=page[3][1].tolist()[0][:4].split('.')\n return ((int(a)+int(b)/100)/5)*100 #의견 점수 구한 후 백분율로 다시 변환\n except ValueError:\n return 0.1\n#최상현 함수\ndef crawl_ifrs(gcode):\n url = \"http://comp.fnguide.com/SVO2/ASP/SVD_Main.asp?pGB=1&gicode=A\"+gcode+\"&cID=&MenuYn=Y&ReportGB=&NewMenuID=11&stkGb=701\"\n table_list = pd.read_html(url, encoding='UTF-8')\n\n ifrs = table_list[10]\n\n ifrs = ifrs.fillna('9999999999')\n for i in range(1, 5):\n if ifrs.iloc[:, i].dtype == 'O':\n ifrs.iloc[:, i] = ifrs.iloc[:, i].apply(lambda x: '9999999999' if type(x) == str else x)\n print(ifrs.iloc[:, i])\n ifrs.iloc[:, i] = ifrs.iloc[:, i].astype('float')\n ifrs.iloc[:, i] = ifrs.iloc[:, i].apply(lambda x: format(float(x), ','))\n\n ifrs = pd.concat([ifrs.iloc[:, 0], ifrs['Annual']], axis=1)\n ifrs = ifrs.astype(str)\n\n for i in range(1, 5):\n ifrs.iloc[:12, i] = ifrs.iloc[:12, i].apply(lambda x: x[:-2])\n ifrs.iloc[18:21, i] = ifrs.iloc[18:21, i].apply(lambda x: x[:-2])\n ifrs.iloc[23:24, i] = ifrs.iloc[23:24, i].apply(lambda x: x[:-2])\n ifrs = ifrs.replace(['9,999,999,999', '9,999,999,999.0'], ['-', '-'])\n\n ifrs.rename(columns={'IFRS(연결)': ''}, inplace=True)\n ifrs = ifrs.to_html(justify=\"right\", index=False, classes=\"table\")\n ifrs = ifrs.replace('border=\"1\"', 'border=\"0\"')\n pd.options.display.float_format = '{:,.0f}'.format\n ifrs = ifrs.replace('<td>', '<td align=\"right\">')\n ifrs = ifrs.replace('<th>', '<th style=\"text-align: right;\">')\n ifrs = ifrs.replace('halign=\"left\"', 'style=\"text-align: center;\"')\n ifrs = ifrs.replace('class =\"dataframe table\"',\n 'class =\"dataframe table\" style = \"table-layout:fixed;word-break:break-all;\"')\n\n return (ifrs)\n\n\ndef ori_code(yh_code):\n origin_stock=com_df[com_df['yh_code']==yh_code]['stock_code_ori'].values[0]\n return origin_stock\n\n\n\n# 아리마 모델\ndef stock_predict(code,ptype):\n data = stock.get_market_ohlcv_by_date(fromdate=\"20220101\", todate=\"20220222\", ticker=str(code))\n print(data.head())\n data=data[[ptype]]\n y_train=data\n y_test=data\n kpss_diffs = ndiffs(y_train, alpha=0.05, test='kpss', max_d=6)\n adf_diffs = ndiffs(y_train, alpha=0.05, test='adf', max_d=6)\n n_diffs = max(adf_diffs, kpss_diffs)\n\n print(f\"추정된 차수 d = {n_diffs}\")\n model=pm.auto_arima(y_train,d=n_diffs,seasonal=False,trace=True)\n model.fit(y_train)\n print(model.summary())\n def forecast_one_step():\n fc, conf_int = model.predict(n_periods=1 # 한 스텝씩!\n , return_conf_int=True) # 신뢰구간 출력\n return (\n fc.tolist()[0],\n np.asarray(conf_int).tolist()[0]\n )\n forecasts = []\n y_pred = []\n pred_upper = []\n pred_lower = []\n\n for new_ob in y_test[ptype]:\n\n fc, conf = forecast_one_step()\n y_pred.append(int(fc))\n pred_upper.append(conf[1])\n pred_lower.append(conf[0])\n\n ## 모형 업데이트 !!\n model.update(new_ob)\n fc_last = model.predict(n_periods=1 # 한 스텝씩!\n )\n df=pd.DataFrame({\"test\": y_test[ptype], \"pred\": y_pred})\n print(df.tail())\n def MAE(y_test, y_pred):\n return np.mean(np.abs((df['test']-df['pred'])/df['test']))*100\n mae=np.round(MAE(y_test, y_pred).astype('float'),4)\n print(f\"MAE: {MAE(y_test, y_pred):.3f}\")\n price_list=[]\n return int(fc_last),mae\n\n\n\n\n\n",
"import json\nimport numpy as np\nimport pandas as pd\nfrom flask import Flask, make_response, jsonify, request, render_template\nfrom datetime import datetime, date,timedelta\nimport yfinance as yf\nfrom code_cr import *\nfrom pykrx import stock\nimport math\n\n\n\n# ====================================================\n# 데이터\n# ====================================================\ncom_df=pd.read_csv('com_df.csv',\n dtype={'stock_code': 'str', '표준코드': 'str', '단축코드': 'str', 'stock_code_ori':'str'},\n parse_dates=['listed_date', '상장일'])\n\n\n\n# ====================================================\n# 라우터\n# ====================================================\napp = Flask(__name__, template_folder=\"template\", static_folder=\"static\")\n\n\n\n# ====================================================\n# 메인 페이지 by 최상현\n# ====================================================\[email protected]('/')\ndef index():\n\n return render_template(\"index.html\")\n\n\n\n# ====================================================\n# 자동완성 비동기 by 최상현\n# ====================================================\n#---------------- 업체이름을 타이핑할때마다 실시간 비동기로 업체 명단을 가져와서 리턴 -----------\[email protected]('/com_search_ajax', methods=['post'])\ndef com_search_ajax():\n\n str = request.form.get('search_input')\n print(str)\n\n #-----------웹에서 입력한 검색어와 관련된 업체만 가져오기 -----------------\n # +++ 주의! com_df_rm 다시 호출하는 이유 : 검색 시 금융/보험 데이터 제거한 데이터프레임을 불러오기 때문\n com_df_srch = com_df=pd.read_csv('com_df_rm.csv',\n dtype={'stock_code': 'str', '표준코드': 'str', '단축코드': 'str', 'stock_code_ori':'str'},\n parse_dates=['listed_date', '상장일'])\n temp = com_df_srch[(com_df_srch['한글 종목명'].str.contains(str))|(com_df_srch['한글 종목명'].str.contains(str.upper()))][['yh_code', '한글 종목명']].head()\n print(temp.values.tolist())\n return json.dumps( temp.values.tolist() ) #리스트 형태로 데이터 전송\n\n\n\n# ====================================================\n# get 전송한 후 페이지\n# ====================================================\[email protected]('/form_submit_get', methods=[\"get\"])\ndef form_submit_get():\n\n # get으로 전송된 데이터 받기\n hidden_stock_code = request.args.get(\"hidden_stock_code\")\n hidden_corp_name = request.args.get(\"hidden_corp_name\")\n\n\n origin_code=ori_code(hidden_stock_code) #우선주 종목코드를 보통주 종목코드로 바꿔주는 함수\n stock_code= hidden_stock_code[:-3] #yfinance 종목코드를 pykrx종목코드로 바꿔주는 함수\n\n\n\n\n # ----------------재무 정보 시각화 관련 함수------------------ by 이혜린\n radar_label, radar_dict, weather_list, foreign, giguan = relate_radar_weather_data(stock_code=stock_code)\n bar_label, bar_mch_list, bar_dg_list = mch_dg(stock_code=stock_code)\n icons = icon_selection(weather_list)\n icons2 = foreign_giguan([foreign, giguan])\n\n if math.isnan(giguan):\n giguan=0.01\n if math.isnan(foreign):\n foreign=0.01\n\n giguan =format(int(giguan), ',')\n foreign =format(int(foreign), ',')\n\n\n\n # ----------------뉴스 기사 크롤링 관련 함수------------------ by 황지현\n df = news_crawl(origin_code) #우선주말고 보통주로 검색\n json_str = df.to_json(orient=\"values\")\n json_obj = json.loads(json_str)\n code = invest_opinion(origin_code) # 우선주말고 보통주로 검색\n\n\n #-------------------ARIMA 관련 함수------------------------ by 황지현\n low_stock, low_mae = stock_predict(stock_code, '저가')\n high_stock, high_mae = stock_predict(stock_code, '고가')\n close_stock, close_mae = stock_predict(stock_code, '종가')\n mae_mean = ((high_mae + low_mae + close_mae) / 3)\n mae_mean = round(mae_mean, 4)\n\n if close_stock < low_stock:\n low_stock = close_stock\n elif close_stock > high_stock:\n high_stock = close_stock\n\n\n # -----------------재무제표 크롤링 관련함수------------------- by 최상현\n ifrs=crawl_ifrs(origin_code) #우선주말고 보통주로 검색\n\n\n\n # -----------------경제지수, 기업 주가 관련함수--------------- by 김문식\n chart_res = chart_data(hidden_stock_code)\n f_info = finance_data(hidden_stock_code)\n\n\n\n\n return render_template(\"res.html\", ifrs = ifrs, res_obj=chart_res,\n hidden_corp_name=hidden_corp_name,\n f_info=f_info,\n RD_LABEL_LIST=radar_label,RD_DATA_DICT=radar_dict,\n BAR_LABEL_LIST=bar_label,\n BAR_DATA_LIST_MCH=bar_mch_list,\n BAR_DATA_LIST_DG=bar_dg_list,MY_NEWS=json_obj, MY_CODE=code,\n MY_HIGH=high_stock, MY_LOW=low_stock, MY_CLOSE=close_stock,\n MY_MAE=mae_mean,\n # -------220222(날씨!!!!!!!!!!)\n WEATHER_DATA_LIST=weather_list, # PER PBR ROE EPS BPS\n ICONS=icons,\n FOREIGN=foreign, # 외인 매수\n GIGUAN=giguan, # 기관 매수\n ICONS2=icons2 # 외인, 기관 매수\n )\n\n\n\n\n\n# ====================================================\n# 함수 by KMS\n# ====================================================\n# chart_data : KMS 2022.02.21\n# 사용자가 선택한 날짜에 맞추어 해당 기업의 주가정보를 반영해 주는 함수.\n# 날짜지정을 하지 않을경우 해당기업의 1년 주가정보를 반영.\ndef chart_data(ent, select_date = None):\n ent = ent.split(\".\")[0]\n if (select_date != None):\n ent_df = stock.get_market_ohlcv_by_date(fromdate=select_date[0], todate=select_date[1], ticker=ent)\n\n else:\n e_date = datetime.now()\n s_date = e_date - timedelta(days=30)\n print(f\"s_date .................: {s_date}\")\n ent_df = stock.get_market_ohlcv_by_date(fromdate=s_date, todate=e_date, ticker=ent)\n ent_df = ent_df.reset_index()\n ent_df = ent_df.drop(['시가', '고가', '저가', '거래량'], axis=1)\n ent_df.columns = ['Date', 'Close']\n ent_df['Date'] = ent_df['Date'].astype('str')\n ent_dict = ent_df.to_dict()\n\n dfcp = ent_df.tail(2)\n rate_color = dfcp['Close'].values.tolist()\n ent_dict['eve'] = rate_color[0]\n ent_dict['today'] = rate_color[1]\n if (rate_color[1] - rate_color[0] < 0):\n ent_dict['rate'] = \"fa fa-sort-desc\"\n ent_dict['color'] = 'blue'\n else:\n ent_dict['rate'] = \"fa fa-sort-asc\"\n ent_dict['color'] = 'red'\n\n res = {'ent':ent, 'ent_dict':ent_dict}\n return res\n\n# 비동기통신을 이용하여 사용자가 선택한 날짜를 yfinance에서 요구하는 날짜로 재가공처리해주는 함수.\[email protected]('/calendar_ajax_handle', methods=[\"post\"])\ndef calendar_ajax_handle():\n data = request.form.get(\"prm\")\n ent_name = request.form.get(\"ent\")\n splt_data = data.split(\":\")\n se_list = []\n for my_day in splt_data:\n\n se_list.append(str(datetime.strptime(my_day, \"%m/%d/%Y\").date()))\n print(f'ent_name............. : {ent_name}')\n res = chart_data(ent_name, se_list)\n return res\n\n# 2022-02-21 KMS\n# 기업(stock_code) ,코스피, 코스닥, 나스탁, 다우, S&p500 전날 현재 가격가져오는 함수.\n# 전날 지수와 현재 지수 비교하여 html class에 뿌려줄 data ; [rate, color]\ndef finance_data(stock_code):\n stock_list = ['^KS11','^KQ11', '^IXIC', '^GSPC', '^DJI']\n res_list = {}\n for stock in stock_list:\n yf_df = yf.download(stock ,start = '2022-01-01')\n se_list = round(yf_df.tail(2).reset_index()['Close'].astype('float'), 2)\n if(se_list[1]-se_list[0] < 0):\n se_list['rate'] = \"fa fa-sort-desc\"\n se_list['color'] = 'blue'\n else:\n se_list['rate'] = \"fa fa-sort-asc\"\n se_list['color'] = 'red'\n res_list[stock] = se_list\n return res_list\n\n\n\n\n\n\n\nif __name__ == '__main__':\n app.debug = True\n app.run(host='0.0.0.0', port=80, threaded=True)"
] | [
[
"pandas.concat",
"pandas.read_csv",
"numpy.abs",
"numpy.asarray",
"pandas.read_html",
"pandas.DataFrame",
"numpy.round",
"numpy.array"
],
[
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
donnyyou/centerX | [
"1073753533f26483c3ab053a7d8753708fcacde7"
] | [
"projects/speedup/centerX2onnx.py"
] | [
"from types import MethodType\nimport onnx\nimport torch\nfrom torch.onnx import OperatorExportTypes\nfrom onnxsim import simplify\nimport argparse\nimport io\nimport sys\nimport torch.nn as nn\n\nsys.path.insert(0, '.')\nfrom configs import add_centernet_config\nfrom detectron2.config import get_cfg\nfrom inference.centernet import build_model\nfrom detectron2.checkpoint import DetectionCheckpointer\nfrom fvcore.common.file_io import PathManager\n\ndef centerX_forward(self, x):\n x = self.normalizer(x / 255.)\n y = self._forward(x)\n fmap_max = nn.MaxPool2d(kernel_size=3, stride=1, padding=1)(y['cls'])\n keep = (y['cls'] - fmap_max).float() + 1e-9\n keep = nn.ReLU()(keep)\n keep = keep * 1e9\n result = y['cls'] * keep\n ret = [result,y['reg'],y['wh']] ## change dict to list\n return ret\n\ndef load_model(config_file,model_path):\n cfg = get_cfg()\n add_centernet_config(cfg)\n cfg.merge_from_file(config_file)\n forward = {'centerX': centerX_forward}\n\n # model\n model = build_model(cfg)\n model.forward = MethodType(forward['centerX'], model)\n DetectionCheckpointer(model).load(model_path)\n model.eval()\n model.cuda()\n return model\n\ndef get_parser():\n parser = argparse.ArgumentParser(description=\"Convert Pytorch to ONNX model\")\n\n parser.add_argument(\n \"--config-file\",\n metavar=\"FILE\",\n help=\"path to config file\",\n )\n parser.add_argument(\n \"--model-path\",\n metavar=\"FILE\",\n help=\"path to model\",\n )\n parser.add_argument(\n \"--name\",\n default=\"baseline\",\n help=\"name for converted model\"\n )\n parser.add_argument(\n \"--output\",\n default='onnx_model',\n help='path to save converted onnx model'\n )\n parser.add_argument(\n \"--input_w\",\n default=640,\n type=int,\n help='image_width'\n )\n parser.add_argument(\n \"--input_h\",\n default=384,\n type=int,\n help='image_height'\n )\n return parser\n\n\ndef remove_initializer_from_input(model):\n if model.ir_version < 4:\n print(\n 'Model with ir_version below 4 requires to include initilizer in graph input'\n )\n return\n\n inputs = model.graph.input\n name_to_input = {}\n for input in inputs:\n name_to_input[input.name] = input\n\n for initializer in model.graph.initializer:\n if initializer.name in name_to_input:\n inputs.remove(name_to_input[initializer.name])\n\n return model\n\n\ndef export_onnx_model(model, inputs):\n \"\"\"\n Trace and export a model to onnx format.\n Args:\n model (nn.Module):\n inputs (torch.Tensor): the model will be called by `model(*inputs)`\n Returns:\n an onnx model\n \"\"\"\n assert isinstance(model, torch.nn.Module)\n\n # make sure all modules are in eval mode, onnx may change the training state\n # of the module if the states are not consistent\n def _check_eval(module):\n assert not module.training\n\n model.apply(_check_eval)\n\n # Export the model to ONNX\n with torch.no_grad():\n with io.BytesIO() as f:\n torch.onnx.export(\n model,\n inputs,\n f,\n operator_export_type=OperatorExportTypes.ONNX_ATEN_FALLBACK,\n # verbose=True, # NOTE: uncomment this for debugging\n # export_params=True,\n )\n onnx_model = onnx.load_from_string(f.getvalue())\n\n # Apply ONNX's Optimization\n all_passes = onnx.optimizer.get_available_passes()\n passes = [\"extract_constant_to_initializer\", \"eliminate_unused_initializer\", \"fuse_bn_into_conv\"]\n assert all(p in all_passes for p in passes)\n onnx_model = onnx.optimizer.optimize(onnx_model, passes)\n return onnx_model\n\nif __name__ == '__main__':\n args = get_parser().parse_args()\n model = load_model(args.config_file, args.model_path)\n\n inputs = torch.randn(1, 3, args.input_h, args.input_w).cuda()\n onnx_model = export_onnx_model(model, inputs)\n\n model_simp, check = simplify(onnx_model)\n\n model_simp = remove_initializer_from_input(model_simp)\n\n assert check, \"Simplified ONNX model could not be validated\"\n\n PathManager.mkdirs(args.output)\n\n onnx.save_model(model_simp, f\"{args.output}/{args.name}.onnx\")\n\n print(f\"Export onnx model in {args.output} successfully!\")\n"
] | [
[
"torch.onnx.export",
"torch.randn",
"torch.nn.MaxPool2d",
"torch.no_grad",
"torch.nn.ReLU"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
WHATDOESTHEFOXSAY2U/Colab_Train | [
"30fdf2f9f72fbef51447ecc91070189ccca301b2"
] | [
"callbacks.py"
] | [
"\"\"\"\nContains custom callbacks.\n\"\"\"\n\nfrom constants import minimum_scores, maximum_scores\nimport constants\nimport datetime\nimport json\nfrom keras.callbacks import Callback, ModelCheckpoint\nimport numpy as np\nimport os\nfrom sklearn.metrics import cohen_kappa_score\nfrom util import process_data, create_folder\n\nclass QWKScore(Callback):\n def __init__(self, essays, save_to_file=True, print_to_screen=True):\n super()\n self.essays = essays\n self.save_to_file = save_to_file\n self.print_to_screen = print_to_screen\n\n def on_epoch_end(self, epoch, logs={}):\n # for each essay set calculate the QWK scores\n qwk_scores = []\n number_essays = []\n\n if self.print_to_screen:\n print(\"\\nQWK Scores\")\n\n for essay_set in range(1, 9):\n essays_in_set = self.essays[self.essays['essay_set'] == essay_set]\n X, y = process_data(essays_in_set)\n y_true = essays_in_set['domain1_score'].values\n\n normalised_prediction = self.model.predict(X)\n normalised_prediction = np.array(normalised_prediction)\n y_pred = np.around((normalised_prediction * (maximum_scores[essay_set] - minimum_scores[essay_set])) + minimum_scores[essay_set])\n\n qwk_score = cohen_kappa_score(y_true, y_pred, weights='quadratic')\n qwk_scores.append(qwk_score)\n number_essays.append(len(essays_in_set))\n\n if self.print_to_screen:\n print(\"Set {}: {:.2f}\".format(essay_set, qwk_score), end=' ')\n\n qwk_scores = np.array(qwk_scores)\n number_essays = np.array(number_essays)\n\n weighted_qwk_score = np.sum(qwk_scores * number_essays) / np.sum(number_essays)\n if self.print_to_screen:\n print('\\nWeighted QWK score: {:.2f}'.format(weighted_qwk_score))\n\n if self.save_to_file:\n summary = \"Epoch \" + str(epoch + 1)\n log_values = \"\\n\"\n for key, value in logs.items():\n log_values += \"{}: {:.4f} \".format(key, value)\n individual_qwk_scores = \"\\n\"\n for essay_set in range(8):\n individual_qwk_scores += \"Set {}: {:.2f} \".format(essay_set + 1, qwk_scores[essay_set])\n summary = summary + log_values + individual_qwk_scores\n summary += '\\nWeighted QWK score: {:.2f}'.format(weighted_qwk_score)\n summary += '\\n\\n'\n with open(os.path.join(constants.SAVE_DIR, \"scores.txt\"), \"a\") as f:\n f.write(summary)\n\nclass SaveModel(ModelCheckpoint):\n \"\"\"\n Wrapper of Model Checkpoint class.\n \"\"\"\n def __init__(self, directory, filename, monitor='val_loss', verbose=0, save_best_only=False, save_weights_only=False, mode='auto', period=1):\n \n # make folder with the current time as name\n now = datetime.datetime.now()\n current_time = \"{}_{}_{}_{}_{}_{}\".format(now.day, now.month, now.year, now.hour, now.minute, now.second)\n constants.SAVE_DIR = os.path.join(directory, current_time)\n\n create_folder(constants.SAVE_DIR)\n\n ModelCheckpoint.__init__(self, os.path.join(constants.SAVE_DIR, filename), monitor=monitor, save_best_only=save_best_only, save_weights_only=save_weights_only, mode=mode, period=period)\n\n def on_train_begin(self, logs=None):\n # save model architecture.\n parsed = json.loads(self.model.to_json())\n with open(os.path.join(constants.SAVE_DIR, 'model.txt'), 'w') as file:\n file.write(json.dumps(parsed, indent=4))\n"
] | [
[
"numpy.around",
"numpy.array",
"numpy.sum",
"sklearn.metrics.cohen_kappa_score"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
hujunxianligong/Graph-U-Nets | [
"d1a483400131fbe75a55cff27439585c62c4a575"
] | [
"main.py"
] | [
"import sys\nimport os\nimport torch\nimport random\nimport numpy as np\nfrom tqdm import tqdm\nimport torch.nn as nn\nimport torch.optim as optim\nimport math\nfrom network import GUNet\nfrom mlp_dropout import MLPClassifier\nfrom sklearn import metrics\nfrom util import cmd_args, load_data\n\n\nsys.path.append(\n '%s/pytorch_structure2vec-master/s2v_lib' % os.path.dirname(\n os.path.realpath(__file__)))\n\n\nclass Classifier(nn.Module):\n def __init__(self):\n super(Classifier, self).__init__()\n model = GUNet\n\n self.s2v = model(\n latent_dim=cmd_args.latent_dim,\n output_dim=cmd_args.out_dim,\n num_node_feats=cmd_args.feat_dim+cmd_args.attr_dim,\n num_edge_feats=0,\n k=cmd_args.sortpooling_k)\n out_dim = cmd_args.out_dim\n if out_dim == 0:\n out_dim = self.s2v.dense_dim\n self.mlp = MLPClassifier(\n input_size=out_dim, hidden_size=cmd_args.hidden,\n num_class=cmd_args.num_class, with_dropout=cmd_args.dropout)\n\n def PrepareFeatureLabel(self, batch_graph):\n labels = torch.LongTensor(len(batch_graph))\n n_nodes = 0\n\n if batch_graph[0].node_tags is not None:\n node_tag_flag = True\n concat_tag = []\n else:\n node_tag_flag = False\n\n if batch_graph[0].node_features is not None:\n node_feat_flag = True\n concat_feat = []\n else:\n node_feat_flag = False\n\n for i in range(len(batch_graph)):\n labels[i] = batch_graph[i].label\n n_nodes += batch_graph[i].num_nodes\n if node_tag_flag:\n concat_tag += batch_graph[i].node_tags\n if node_feat_flag:\n tmp = torch.from_numpy(\n batch_graph[i].node_features).type('torch.FloatTensor')\n concat_feat.append(tmp)\n\n if node_tag_flag:\n concat_tag = torch.LongTensor(concat_tag).view(-1, 1)\n node_tag = torch.zeros(n_nodes, cmd_args.feat_dim)\n node_tag.scatter_(1, concat_tag, 1)\n\n if node_feat_flag:\n node_feat = torch.cat(concat_feat, 0)\n\n if node_feat_flag and node_tag_flag:\n # concatenate one-hot embedding of node tags (node labels)\n # with continuous node features\n node_feat = torch.cat([node_tag.type_as(node_feat), node_feat], 1)\n elif node_feat_flag is False and node_tag_flag:\n node_feat = node_tag\n elif node_feat_flag and node_tag_flag is False:\n pass\n else:\n node_feat = torch.ones(n_nodes, 1)\n\n if cmd_args.mode == 'gpu':\n node_feat = node_feat.cuda()\n labels = labels.cuda()\n\n return node_feat, labels\n\n def forward(self, batch_graph):\n node_feat, labels = self.PrepareFeatureLabel(batch_graph)\n embed = self.s2v(batch_graph, node_feat, None)\n\n return self.mlp(embed, labels)\n\n def output_features(self, batch_graph):\n node_feat, labels = self.PrepareFeatureLabel(batch_graph)\n embed = self.s2v(batch_graph, node_feat, None)\n return embed, labels\n\n\ndef loop_dataset(g_list, classifier, sample_idxes, optimizer=None,\n bsize=cmd_args.batch_size):\n total_loss = []\n total_iters = (len(sample_idxes) + (bsize - 1) * (optimizer is None)) // bsize # noqa\n pbar = tqdm(range(total_iters), unit='batch')\n all_targets = []\n all_scores = []\n\n n_samples = 0\n for pos in pbar:\n selected_idx = sample_idxes[pos * bsize: (pos + 1) * bsize]\n\n batch_graph = [g_list[idx] for idx in selected_idx]\n targets = [g_list[idx].label for idx in selected_idx]\n all_targets += targets\n logits, loss, acc = classifier(batch_graph)\n all_scores.append(logits[:, 1].detach()) # for binary classification\n\n if optimizer is not None:\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n loss = loss.data.cpu().numpy()\n pbar.set_description('loss: %0.5f acc: %0.5f' % (loss, acc))\n\n total_loss.append(np.array([loss, acc]) * len(selected_idx))\n\n n_samples += len(selected_idx)\n if optimizer is None:\n assert n_samples == len(sample_idxes)\n total_loss = np.array(total_loss)\n avg_loss = np.sum(total_loss, 0) / n_samples\n all_scores = torch.cat(all_scores).cpu().numpy()\n\n # np.savetxt('test_scores.txt', all_scores) # output test predictions\n\n all_targets = np.array(all_targets)\n fpr, tpr, _ = metrics.roc_curve(all_targets, all_scores, pos_label=1)\n auc = metrics.auc(fpr, tpr)\n avg_loss = np.concatenate((avg_loss, [auc]))\n\n return avg_loss\n\n\nif __name__ == '__main__':\n print(cmd_args)\n random.seed(cmd_args.seed)\n np.random.seed(cmd_args.seed)\n torch.manual_seed(cmd_args.seed)\n\n train_graphs, test_graphs = load_data()\n print('# train: %d, # test: %d' % (len(train_graphs), len(test_graphs)))\n\n if cmd_args.sortpooling_k <= 1:\n num_nodes_list = sorted([\n g.num_nodes for g in train_graphs + test_graphs])\n cmd_args.sortpooling_k = num_nodes_list[\n int(math.ceil(cmd_args.sortpooling_k * len(num_nodes_list))) - 1]\n cmd_args.sortpooling_k = max(10, cmd_args.sortpooling_k)\n print('k used in SortPooling is: ' + str(cmd_args.sortpooling_k))\n\n classifier = Classifier()\n if cmd_args.mode == 'gpu':\n classifier = classifier.cuda()\n\n optimizer = optim.Adam(\n classifier.parameters(), lr=cmd_args.learning_rate, amsgrad=True,\n weight_decay=0.0008)\n\n train_idxes = list(range(len(train_graphs)))\n best_loss = None\n max_acc = 0.0\n for epoch in range(cmd_args.num_epochs):\n random.shuffle(train_idxes)\n classifier.train()\n avg_loss = loop_dataset(\n train_graphs, classifier, train_idxes, optimizer=optimizer)\n if not cmd_args.printAUC:\n avg_loss[2] = 0.0\n print('\\033[92maverage training of epoch %d: loss %.5f acc %.5f auc %.5f\\033[0m' % (epoch, avg_loss[0], avg_loss[1], avg_loss[2])) # noqa\n\n classifier.eval()\n test_loss = loop_dataset(\n test_graphs, classifier, list(range(len(test_graphs))))\n if not cmd_args.printAUC:\n test_loss[2] = 0.0\n print('\\033[93maverage test of epoch %d: loss %.5f acc %.5f auc %.5f\\033[0m' % (epoch, test_loss[0], test_loss[1], test_loss[2])) # noqa\n max_acc = max(max_acc, test_loss[1])\n\n with open('acc_result_%s.txt' % cmd_args.data, 'a+') as f:\n # f.write(str(test_loss[1]) + '\\n')\n f.write(str(max_acc) + '\\n')\n\n if cmd_args.printAUC:\n with open('auc_results.txt', 'a+') as f:\n f.write(str(test_loss[2]) + '\\n')\n\n if cmd_args.extract_features:\n features, labels = classifier.output_features(train_graphs)\n labels = labels.type('torch.FloatTensor')\n np.savetxt('extracted_features_train.txt', torch.cat(\n [labels.unsqueeze(1), features.cpu()], dim=1).detach().numpy(),\n '%.4f')\n features, labels = classifier.output_features(test_graphs)\n labels = labels.type('torch.FloatTensor')\n np.savetxt('extracted_features_test.txt', torch.cat(\n [labels.unsqueeze(1), features.cpu()], dim=1).detach().numpy(),\n '%.4f')\n"
] | [
[
"torch.LongTensor",
"torch.ones",
"numpy.random.seed",
"torch.zeros",
"torch.cat",
"torch.manual_seed",
"torch.from_numpy",
"sklearn.metrics.roc_curve",
"numpy.concatenate",
"sklearn.metrics.auc",
"numpy.array",
"numpy.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
carlsummer/python_developer_tools | [
"fc0dcf5c4ef088e2e535206dc82f09bbfd01f280",
"a8c4365b7cc601cda55648cdfd8c0cb1faae132f",
"fc0dcf5c4ef088e2e535206dc82f09bbfd01f280",
"fc0dcf5c4ef088e2e535206dc82f09bbfd01f280",
"a8c4365b7cc601cda55648cdfd8c0cb1faae132f",
"fc0dcf5c4ef088e2e535206dc82f09bbfd01f280",
"fc0dcf5c4ef088e2e535206dc82f09bbfd01f280",
"fc0dcf5c4ef088e2e535206dc82f09bbfd01f280"
] | [
"python_developer_tools/cv/classes/ResNeXt.py",
"python_developer_tools/machinelearning/lgb/train.py",
"python_developer_tools/cv/bases/conv/DCNv2/DCNv2.py",
"python_developer_tools/cv/detection/CenterNet2/changecenternet2.py",
"python_developer_tools/cv/loss/classes/cross_entropy.py",
"python_developer_tools/cv/classes/VGGNet.py",
"test/cv/loss/train_focalloss.py",
"python_developer_tools/cv/bases/attentions/SimAM-master/mmdetection/mmdet/models/backbones/resnet_simam.py"
] | [
"import torch\nimport torch.nn as nn\nimport torchvision\n\n\nclass ResNeXtBlock(nn.Module):\n def __init__(self,in_places,places, stride=1,downsampling=False, expansion = 2, cardinality=32):\n super(ResNeXtBlock,self).__init__()\n self.expansion = expansion\n self.downsampling = downsampling\n\n self.bottleneck = nn.Sequential(\n nn.Conv2d(in_channels=in_places, out_channels=places, kernel_size=1, stride=1, bias=False),\n nn.BatchNorm2d(places),\n nn.ReLU(inplace=True),\n nn.Conv2d(in_channels=places, out_channels=places, kernel_size=3, stride=stride, padding=1, bias=False, groups=cardinality),\n nn.BatchNorm2d(places),\n nn.ReLU(inplace=True),\n nn.Conv2d(in_channels=places, out_channels=places * self.expansion, kernel_size=1, stride=1, bias=False),\n nn.BatchNorm2d(places * self.expansion),\n )\n\n if self.downsampling:\n self.downsample = nn.Sequential(\n nn.Conv2d(in_channels=in_places, out_channels=places * self.expansion, kernel_size=1, stride=stride,bias=False),\n nn.BatchNorm2d(places * self.expansion)\n )\n self.relu = nn.ReLU(inplace=True)\n\n def forward(self, x):\n residual = x\n out = self.bottleneck(x)\n\n if self.downsampling:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n return out\n\n\nif __name__ =='__main__':\n model = ResNeXtBlock(in_places=256, places=128)\n print(model)\n\n input = torch.randn(1,256,64,64)\n out = model(input)\n print(out.shape)",
"import cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\nfrom sklearn import svm\nimport pandas as pd\nimport lightgbm as lgb\nfrom sklearn.metrics import classification_report, f1_score, mean_absolute_error\nfrom sklearn.model_selection import StratifiedKFold, KFold, ShuffleSplit\nimport pickle\nfrom sklearn.decomposition import PCA\nfrom sklearn.model_selection import train_test_split\n\n\ndef train_test_split_fun(alldataX, alldataY):\n return train_test_split(alldataX, alldataY, test_size=0.2, random_state=1024)\n\n\nif __name__ == '__main__':\n csvpath = r\"datasets/datasets.csv\"\n csvdf = pd.read_csv(csvpath, header=0)\n\n alldataX = csvdf[\n [\"calctype\", \"col\", \"row\", \"convert_rate\", \"iscoincidence\", \"sobeld\", \"jlxd\", \"a1\", \"a2\", \"a3\", \"a4\", \"a5\",\n \"a6\", \"a7\", \"a8\",\n \"s1\", \"s2\", \"s3\", \"s4\", \"s5\", \"s6\", \"s7\", \"s8\", \"s9\", \"s10\", \"s11\", \"s12\", \"s13\", \"s14\", \"s15\", \"s16\", \"s17\",\n \"s18\", \"s19\", \"s20\"]]\n alldataY = csvdf[\"Y\"]\n # 0.04288846720587341\n # 0.04579131922898897\n # alldataX.iloc[:, -20:] = alldataX.apply(lambda x: x.iloc[-20:] / np.max(x.iloc[-20:].values), axis=1)\n # alldataX[\"col\"] =alldataX.apply(lambda x: x[\"col\"] /24, axis=1)\n # alldataX[\"row\"] =alldataX.apply(lambda x: x[\"row\"] /6, axis=1)\n\n train_X, test_X, train_Y, test_Y = train_test_split_fun(alldataX.values, alldataY.values)\n\n # build_model()\n clf = lgb.LGBMRegressor()\n # clf.fit(train_X, train_Y,eval_set=[(test_X, test_Y)])\n clf.fit(alldataX.values, alldataY.values)\n with open('lgb.pickle', 'wb') as f:\n pickle.dump(clf, f)\n\n train_predict = clf.predict(train_X)\n score = mean_absolute_error(train_predict, train_Y)\n print(score)\n\n test_predict = clf.predict(test_X)\n score = mean_absolute_error(test_predict, test_Y)\n print(score)\n",
"# !/usr/bin/env python\n# -- coding: utf-8 --\n# @Author zengxiaohui\n# Datatime:5/12/2021 9:00 PM\n# @File:DcnV2\n\"\"\"\nconda create -n DCNV2 python=3.8\nconda activate DCNV2\ngit clone https://github.com/jinfagang/DCNv2_latest.git\ncd DCNv2_latest/\npip install torch==1.6.0\npip install torchvision==0.7.0\npython3 setup.py build develop\n\n./make.sh # build\n/home/deploy/anaconda3/envs/yolov5_py38_cu102_conda/lib/python3.8/site-packages/torch/utils/cpp_extension.py\n如果报错subprocess.CalledProcessError: Command '['ninja', '-v']' returned non-zero exit status 1.\n那么将命令command = ['ninja', '-v']改为command = ['ninja', '-V']\n\n如果报错:\ng++: error: /home/deploy/software/DCNv2-master/build/temp.linux-x86_64-3.8/home/deploy/software/DCNv2-master/src/vision.o: No such file or directory\ng++: error: /home/deploy/software/DCNv2-master/build/temp.linux-x86_64-3.8/home/deploy/software/DCNv2-master/src/cpu/dcn_v2_cpu.o: No such file or directory\ng++: error: /home/deploy/software/DCNv2-master/build/temp.linux-x86_64-3.8/home/deploy/software/DCNv2-master/src/cpu/dcn_v2_im2col_cpu.o: No such file or directory\ng++: error: /home/deploy/software/DCNv2-master/build/temp.linux-x86_64-3.8/home/deploy/software/DCNv2-master/src/cpu/dcn_v2_psroi_pooling_cpu.o: No such file or directory\ng++: error: /home/deploy/software/DCNv2-master/build/temp.linux-x86_64-3.8/home/deploy/software/DCNv2-master/src/cuda/dcn_v2_cuda.o: No such file or directory\ng++: error: /home/deploy/software/DCNv2-master/build/temp.linux-x86_64-3.8/home/deploy/software/DCNv2-master/src/cuda/dcn_v2_im2col_cuda.o: No such file or directory\ng++: error: /home/deploy/software/DCNv2-master/build/temp.linux-x86_64-3.8/home/deploy/software/DCNv2-master/src/cuda/dcn_v2_psroi_pooling_cuda.o: No such file or directory\n那么:\npython3 setup.py build develop\n\npython testcpu.py # run examples and gradient check on cpu\npython testcuda.py # run examples and gradient check on gpu\n\"\"\"\n\n# An Example\n# deformable conv\nimport torch\nfrom dcn_v2 import DCN\ninput = torch.randn(2, 64, 128, 128).cuda()\n# wrap all things (offset and mask) in DCN\ndcn = DCN(64, 64, kernel_size=(3,3), stride=1, padding=1, deformable_groups=2).cuda()\noutput = dcn(input)\nprint(output.shape)\n\n# deformable roi pooling\nfrom dcn_v2 import DCNPooling\ninput = torch.randn(2, 32, 64, 64).cuda()\nbatch_inds = torch.randint(2, (20, 1)).cuda().float()\nx = torch.randint(256, (20, 1)).cuda().float()\ny = torch.randint(256, (20, 1)).cuda().float()\nw = torch.randint(64, (20, 1)).cuda().float()\nh = torch.randint(64, (20, 1)).cuda().float()\nrois = torch.cat((batch_inds, x, y, x + w, y + h), dim=1)\n\n# mdformable pooling (V2)\n# wrap all things (offset and mask) in DCNPooling\ndpooling = DCNPooling(spatial_scale=1.0 / 4,\n pooled_size=7,\n output_dim=32,\n no_trans=False,\n group_size=1,\n trans_std=0.1).cuda()\n\ndout = dpooling(input, rois)\nprint(dout.shape)",
"import torch\nimport numpy as np\nimport pickle\n\n\ndef centernet2ModelToCustomerModel(num_class=10, model_path='models/CenterNet2_R50_1x.pth', model_save_dir=\"models\"):\n # 将centernet2的模型转换为自己的训练模型\n pretrained_weights = torch.load(model_path)\n pretrained_weights['iteration'] = 0\n\n pretrained_weights['model'][\"roi_heads.box_predictor.0.cls_score.weight\"].resize_(num_class + 1, 1024)\n pretrained_weights['model'][\"roi_heads.box_predictor.0.cls_score.bias\"].resize_(num_class + 1)\n pretrained_weights['model'][\"roi_heads.box_predictor.1.cls_score.weight\"].resize_(num_class + 1, 1024)\n pretrained_weights['model'][\"roi_heads.box_predictor.1.cls_score.bias\"].resize_(num_class + 1)\n pretrained_weights['model'][\"roi_heads.box_predictor.2.cls_score.weight\"].resize_(num_class + 1, 1024)\n pretrained_weights['model'][\"roi_heads.box_predictor.2.cls_score.bias\"].resize_(num_class + 1)\n\n torch.save(pretrained_weights, \"{}/CenterNet2_{}.pth\".format(model_save_dir, num_class))\n\nif __name__ == '__main__':\n centernet2ModelToCustomerModel(model_path=\"/home/zengxh/workspace/CenterNet2/projects/CenterNet2/models/CenterNet2_R50_1x.pth\", model_save_dir=\"/home/zengxh/workspace/CenterNet2/projects/CenterNet2/models\")",
"# !/usr/bin/env python\n# -- coding: utf-8 --\n# @Author zengxiaohui\n# Datatime:5/18/2021 4:48 PM\n# @File:cross_entropy.py\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom python_developer_tools.cv.loss.classes.LabelSmoothingCrossEntropy import LabelSmoothingCrossEntropy\n\n\ndef sigmoid(x):\n return (1 + (-x).exp()).reciprocal()\n\n\ndef binary_cross_entropy(pred, y):\n return -(pred.log() * y + (1 - y) * (1 - pred).log()).mean()\n\n\nclass SoftTargetCrossEntropy(nn.Module):\n\n def __init__(self):\n super(SoftTargetCrossEntropy, self).__init__()\n\n def forward(self, x, target):\n loss = torch.sum(-target * F.log_softmax(x, dim=-1), dim=-1)\n return loss.mean()\n\nclass JsdCrossEntropy(nn.Module):\n \"\"\" Jensen-Shannon Divergence + Cross-Entropy Loss\n Based on impl here: https://github.com/google-research/augmix/blob/master/imagenet.py\n From paper: 'AugMix: A Simple Data Processing Method to Improve Robustness and Uncertainty -\n https://arxiv.org/abs/1912.02781\n Hacked together by / Copyright 2020 Ross Wightman\n \"\"\"\n def __init__(self, num_splits=3, alpha=12, smoothing=0.1):\n super().__init__()\n self.num_splits = num_splits\n self.alpha = alpha\n if smoothing is not None and smoothing > 0:\n self.cross_entropy_loss = LabelSmoothingCrossEntropy(smoothing)\n else:\n self.cross_entropy_loss = torch.nn.CrossEntropyLoss()\n\n def __call__(self, output, target):\n split_size = output.shape[0] // self.num_splits\n assert split_size * self.num_splits == output.shape[0]\n logits_split = torch.split(output, split_size)\n\n # Cross-entropy is only computed on clean images\n loss = self.cross_entropy_loss(logits_split[0], target[:split_size])\n probs = [F.softmax(logits, dim=1) for logits in logits_split]\n\n # Clamp mixture distribution to avoid exploding KL divergence\n logp_mixture = torch.clamp(torch.stack(probs).mean(axis=0), 1e-7, 1).log()\n loss += self.alpha * sum([F.kl_div(\n logp_mixture, p_split, reduction='batchmean') for p_split in probs]) / len(probs)\n return loss",
"import torch\nimport torch.nn as nn\nimport torchvision\n\ndef Conv3x3BNReLU(in_channels,out_channels):\n return nn.Sequential(\n nn.Conv2d(in_channels=in_channels,out_channels=out_channels,kernel_size=3,stride=1,padding=1),\n nn.BatchNorm2d(out_channels),\n nn.ReLU6(inplace=True)\n )\n\nclass VGG(nn.Module):\n def __init__(self, block_nums,num_classes=1000):\n super(VGG, self).__init__()\n\n self.stage1 = self._make_layers(in_channels=3, out_channels=64, block_num=block_nums[0])\n self.stage2 = self._make_layers(in_channels=64, out_channels=128, block_num=block_nums[1])\n self.stage3 = self._make_layers(in_channels=128, out_channels=256, block_num=block_nums[2])\n self.stage4 = self._make_layers(in_channels=256, out_channels=512, block_num=block_nums[3])\n self.stage5 = self._make_layers(in_channels=512, out_channels=512, block_num=block_nums[4])\n\n self.classifier = nn.Sequential(\n nn.Linear(in_features=512*7*7,out_features=4096),\n nn.Dropout(p=0.2),\n nn.Linear(in_features=4096, out_features=4096),\n nn.Dropout(p=0.2),\n nn.Linear(in_features=4096, out_features=num_classes)\n )\n\n self._init_params()\n\n def _make_layers(self, in_channels, out_channels, block_num):\n layers = []\n layers.append(Conv3x3BNReLU(in_channels,out_channels))\n for i in range(1,block_num):\n layers.append(Conv3x3BNReLU(out_channels,out_channels))\n layers.append(nn.MaxPool2d(kernel_size=2,stride=2, ceil_mode=False))\n return nn.Sequential(*layers)\n\n def _init_params(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n\n def forward(self, x):\n x = self.stage1(x)\n x = self.stage2(x)\n x = self.stage3(x)\n x = self.stage4(x)\n x = self.stage5(x)\n x = x.view(x.size(0),-1)\n out = self.classifier(x)\n return out\n\ndef VGG16():\n block_nums = [2, 2, 3, 3, 3]\n model = VGG(block_nums)\n return model\n\ndef VGG19():\n block_nums = [2, 2, 4, 4, 4]\n model = VGG(block_nums)\n return model\n\nif __name__ == '__main__':\n model = VGG16()\n print(model)\n torchvision.models.vgg16_bn()\n\n input = torch.randn(1,3,224,224)\n out = model(input)\n print(out.shape)\n\n",
"# !/usr/bin/env python\n# -- coding: utf-8 --\n# @Author zengxiaohui\n# Datatime:8/13/2021 11:20 AM\n# @File:train_cifar10\nimport os\nimport torch\nimport torchvision\nimport torchvision.transforms as transforms\nimport torch.optim as optim\nimport torch.nn as nn\nfrom tqdm import tqdm\n\nfrom python_developer_tools.cv.classes.transferTorch import shufflenet_v2_x0_5\nfrom python_developer_tools.cv.loss.focalloss import FocalLoss2\nfrom python_developer_tools.cv.utils.torch_utils import init_seeds\n\ntransform = transforms.Compose(\n [transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])\n\nclasses = ('plane', 'car', 'bird', 'cat',\n 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')\n\nif __name__ == '__main__':\n #41.189999 %\n root_dir = \"/home/zengxh/datasets\"\n # os.environ['CUDA_VISIBLE_DEVICES'] = '1'\n epochs = 50\n batch_size = 1024\n num_workers = 8\n classes = 10\n\n init_seeds(1024)\n\n trainset = torchvision.datasets.CIFAR10(root=root_dir, train=True, download=True, transform=transform)\n trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True, num_workers=num_workers,\n pin_memory=True)\n\n testset = torchvision.datasets.CIFAR10(root=root_dir, train=False, download=True, transform=transform)\n testloader = torch.utils.data.DataLoader(testset, batch_size=batch_size, shuffle=False, num_workers=num_workers)\n\n model = shufflenet_v2_x0_5(classes, True)\n model.cuda()\n model.train()\n\n criterion = FocalLoss2()\n # SGD with momentum\n optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9)\n scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=epochs)\n\n for epoch in range(epochs):\n train_loss = 0.0\n for i, (inputs, labels) in tqdm(enumerate(trainloader)):\n inputs, labels = inputs.cuda(), labels.cuda()\n\n # zero the parameter gradients\n optimizer.zero_grad()\n\n # forward\n outputs = model(inputs)\n # loss\n loss = criterion(outputs, labels)\n # backward\n loss.backward()\n # update weights\n optimizer.step()\n\n # print statistics\n train_loss += loss\n\n scheduler.step()\n print('%d/%d loss: %.6f' % (epochs, epoch + 1, train_loss / len(trainset)))\n\n correct = 0\n model.eval()\n for j, (images, labels) in tqdm(enumerate(testloader)):\n outputs = model(images.cuda())\n _, predicted = torch.max(outputs.data, 1)\n correct += (predicted.cpu() == labels).sum()\n print('Accuracy of the network on the 10000 test images: %.6f %%' % (100 * correct / len(testset)))\n",
"import torch.nn as nn\nimport functools\n\nfrom mmcv.cnn import constant_init, kaiming_init\nfrom mmcv.runner import load_checkpoint\nfrom mmdet.utils import get_root_logger\nfrom torch.nn.modules.batchnorm import _BatchNorm\nfrom ..builder import BACKBONES\nfrom .attentions import simam_module\n\n\ndef conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):\n \"\"\"3x3 convolution with padding\"\"\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=dilation, groups=groups, bias=False, dilation=dilation)\n\n\ndef conv1x1(in_planes, out_planes, stride=1):\n \"\"\"1x1 convolution\"\"\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)\n\n\nclass BasicBlock(nn.Module):\n expansion = 1\n\n def __init__(self, \n inplanes, \n planes, \n stride=1, \n downsample=None, \n groups=1,\n base_width=64, \n dilation=1,\n norm_layer=None,\n attention_module=None):\n\n super(BasicBlock, self).__init__()\n if norm_layer is None:\n norm_layer = nn.BatchNorm2d\n if groups != 1 or base_width != 64:\n raise ValueError('BasicBlock only supports groups=1 and base_width=64')\n if dilation > 1:\n raise NotImplementedError(\"Dilation > 1 not supported in BasicBlock\")\n # Both self.conv1 and self.downsample layers downsample the input when stride != 1\n self.conv1 = conv3x3(inplanes, planes, stride)\n self.bn1 = norm_layer(planes)\n self.relu = nn.ReLU(inplace=True)\n self.conv2 = conv3x3(planes, planes)\n self.bn2 = norm_layer(planes)\n self.downsample = downsample\n self.stride = stride\n\n if attention_module is not None:\n if type(attention_module) == functools.partial:\n module_name = attention_module.func.get_module_name()\n else:\n module_name = attention_module.get_module_name()\n\n if module_name == \"simam\":\n self.conv2 = nn.Sequential(\n self.conv2,\n attention_module(planes)\n )\n else:\n self.bn2 = nn.Sequential(\n self.bn2, \n attention_module(planes)\n )\n \n def forward(self, x):\n identity = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out) \n\n if self.downsample is not None:\n identity = self.downsample(x)\n\n out += identity\n out = self.relu(out)\n\n return out\n\n\nclass Bottleneck(nn.Module):\n\n expansion = 4\n\n def __init__(self, \n inplanes, \n planes, \n stride=1, \n downsample=None, \n groups=1,\n base_width=64, \n dilation=1, \n norm_layer=None, \n attention_module=None):\n\n super(Bottleneck, self).__init__()\n if norm_layer is None:\n norm_layer = nn.BatchNorm2d\n width = int(planes * (base_width / 64.)) * groups\n # Both self.conv2 and self.downsample layers downsample the input when stride != 1\n self.conv1 = conv1x1(inplanes, width)\n self.bn1 = norm_layer(width)\n self.conv2 = conv3x3(width, width, stride, groups, dilation)\n self.bn2 = norm_layer(width)\n self.conv3 = conv1x1(width, planes * self.expansion)\n self.bn3 = norm_layer(planes * self.expansion)\n self.relu = nn.ReLU(inplace=True)\n self.downsample = downsample\n self.stride = stride\n\n if attention_module is not None:\n if type(attention_module) == functools.partial:\n module_name = attention_module.func.get_module_name()\n else:\n module_name = attention_module.get_module_name()\n\n if module_name == \"simam\":\n self.conv2 = nn.Sequential(\n self.conv2,\n attention_module(width)\n )\n else:\n self.bn3 = nn.Sequential(\n self.bn3, \n attention_module(planes * self.expansion)\n )\n\n def forward(self, x):\n identity = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out) \n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n if self.downsample is not None:\n identity = self.downsample(x)\n\n out += identity\n out = self.relu(out)\n\n return out\n\n\[email protected]_module()\nclass ResNetAM(nn.Module):\n\n arch_settings = {\n 18: (BasicBlock, (2, 2, 2, 2)),\n 34: (BasicBlock, (3, 4, 6, 3)),\n 50: (Bottleneck, (3, 4, 6, 3)),\n 101: (Bottleneck, (3, 4, 23, 3)),\n 152: (Bottleneck, (3, 8, 36, 3))\n }\n\n def __init__(self, \n depth,\n groups=1, \n width_per_group=64, \n replace_stride_with_dilation=None,\n norm_layer=None, \n norm_eval=True,\n frozen_stages=-1,\n attention_type=\"none\",\n attention_param=None,\n zero_init_residual=False):\n super(ResNetAM, self).__init__()\n if depth not in self.arch_settings:\n raise KeyError(f'invalid depth {depth} for resnet')\n\n if norm_layer is None:\n norm_layer = nn.BatchNorm2d\n self._norm_layer = norm_layer\n\n self.inplanes = 64\n self.dilation = 1\n self.norm_eval = norm_eval\n self.frozen_stages = frozen_stages\n self.zero_init_residual = zero_init_residual\n block, stage_blocks = self.arch_settings[depth]\n\n if attention_type == \"simam\":\n attention_module = functools.partial(simam_module, e_lambda=attention_param)\n else:\n attention_module = None\n\n if replace_stride_with_dilation is None:\n # each element in the tuple indicates if we should replace\n # the 2x2 stride with a dilated convolution instead\n replace_stride_with_dilation = [False, False, False]\n if len(replace_stride_with_dilation) != 3:\n raise ValueError(\"replace_stride_with_dilation should be None \"\n \"or a 3-element tuple, got {}\".format(replace_stride_with_dilation))\n self.groups = groups\n self.base_width = width_per_group\n self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3,\n bias=False)\n self.bn1 = norm_layer(self.inplanes)\n self.relu1 = nn.ReLU(inplace=True)\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n\n self.layer1 = self._make_layer(block, 64, stage_blocks[0], \n attention_module=attention_module)\n\n self.layer2 = self._make_layer(block, 128, stage_blocks[1], stride=2,\n dilate=replace_stride_with_dilation[0],\n attention_module=attention_module)\n\n self.layer3 = self._make_layer(block, 256, stage_blocks[2], stride=2,\n dilate=replace_stride_with_dilation[1],\n attention_module=attention_module)\n\n self.layer4 = self._make_layer(block, 512, stage_blocks[3], stride=2,\n dilate=replace_stride_with_dilation[2],\n attention_module=attention_module)\n\n self.avgpool = nn.AdaptiveAvgPool2d((1, 1))\n self.fc = nn.Linear(512 * block.expansion, 1000)\n\n\n def _make_layer(self, block, planes, blocks, stride=1, dilate=False, attention_module=None):\n norm_layer = self._norm_layer\n downsample = None\n previous_dilation = self.dilation\n if dilate:\n self.dilation *= stride\n stride = 1\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n conv1x1(self.inplanes, planes * block.expansion, stride),\n norm_layer(planes * block.expansion),\n )\n\n layers = []\n layers.append(block(self.inplanes, planes, stride, downsample, self.groups,\n self.base_width, previous_dilation, norm_layer, attention_module))\n self.inplanes = planes * block.expansion\n for _ in range(1, blocks):\n layers.append(block(self.inplanes, planes, groups=self.groups,\n base_width=self.base_width, dilation=self.dilation,\n norm_layer=norm_layer, attention_module=attention_module))\n\n return nn.Sequential(*layers)\n\n\n def _freeze_stages(self):\n if self.frozen_stages >= 0:\n self.bn1.eval()\n for m in [self.conv1, self.bn1]:\n for param in m.parameters():\n param.requires_grad = False\n\n for i in range(1, self.frozen_stages + 1):\n m = getattr(self, f'layer{i}')\n m.eval()\n for param in m.parameters():\n param.requires_grad = False\n\n def init_weights(self, pretrained=None):\n \"\"\"Initialize the weights in backbone.\n\n Args:\n pretrained (str, optional): Path to pre-trained weights.\n Defaults to None.\n \"\"\"\n\n self.fc = None\n self.avgpool = None\n if isinstance(pretrained, str):\n logger = get_root_logger()\n load_checkpoint(self, pretrained, strict=False, logger=logger)\n elif pretrained is None:\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n kaiming_init(m)\n elif isinstance(m, (_BatchNorm, nn.GroupNorm)):\n constant_init(m, 1)\n\n if self.zero_init_residual:\n for m in self.modules():\n if isinstance(m, Bottleneck):\n constant_init(m.norm3, 0)\n elif isinstance(m, BasicBlock):\n constant_init(m.norm2, 0)\n else:\n raise TypeError('pretrained must be a str or None')\n\n def forward(self, x):\n # See note [TorchScript super()]\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu1(x)\n x = self.maxpool(x)\n\n outs = []\n\n x = self.layer1(x)\n outs.append(x)\n\n x = self.layer2(x)\n outs.append(x)\n\n x = self.layer3(x)\n outs.append(x)\n \n x = self.layer4(x)\n outs.append(x)\n\n return tuple(outs)\n\n def train(self, mode=True):\n \"\"\"Convert the model into training mode while keep normalization layer\n freezed.\"\"\"\n super(ResNetAM, self).train(mode)\n self._freeze_stages()\n if mode and self.norm_eval:\n for m in self.modules():\n # trick: eval have effect on BatchNorm only\n if isinstance(m, _BatchNorm):\n m.eval()"
] | [
[
"torch.randn",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.nn.BatchNorm2d"
],
[
"sklearn.metrics.mean_absolute_error",
"pandas.read_csv",
"sklearn.model_selection.train_test_split"
],
[
"torch.randn",
"torch.randint",
"torch.cat"
],
[
"torch.load"
],
[
"torch.nn.functional.kl_div",
"torch.nn.CrossEntropyLoss",
"torch.nn.functional.softmax",
"torch.nn.functional.log_softmax",
"torch.split",
"torch.stack"
],
[
"torch.nn.Sequential",
"torch.nn.Dropout",
"torch.nn.ReLU6",
"torch.nn.init.constant_",
"torch.randn",
"torch.nn.Conv2d",
"torch.nn.Linear",
"torch.nn.MaxPool2d",
"torch.nn.BatchNorm2d",
"torch.nn.init.kaiming_normal_"
],
[
"torch.utils.data.DataLoader",
"torch.max",
"torch.optim.lr_scheduler.CosineAnnealingLR"
],
[
"torch.nn.Sequential",
"torch.nn.Conv2d",
"torch.nn.Linear",
"torch.nn.MaxPool2d",
"torch.nn.AdaptiveAvgPool2d",
"torch.nn.ReLU"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
edornd/multimodal-icl | [
"f79bfa73665db471c12ee9cb57bbee1bcabb0467"
] | [
"saticl/training.py"
] | [
"from itertools import chain\nfrom pathlib import Path\nfrom typing import Tuple\n\nimport torch\nfrom accelerate import Accelerator\nfrom torch.utils.data import DataLoader\n\nfrom saticl.config import Configuration, SSLConfiguration\nfrom saticl.datasets.icl import ICLDataset\nfrom saticl.datasets.transforms import invariance_transforms, inverse_transform, ssl_transforms\nfrom saticl.datasets.wrappers import SSLDataset\nfrom saticl.logging.tensorboard import TensorBoardLogger\nfrom saticl.losses.regularization import AugmentationInvariance\nfrom saticl.models.icl import ICLSegmenter\nfrom saticl.prepare import prepare_dataset, prepare_metrics, prepare_metrics_ssl, prepare_model, prepare_model_ssl\nfrom saticl.tasks import Task\nfrom saticl.trainer.base import Trainer\nfrom saticl.trainer.callbacks import Checkpoint, DisplaySamples, EarlyStopping, EarlyStoppingCriterion\nfrom saticl.trainer.invariance import AugInvarianceTrainer\nfrom saticl.trainer.ssl import SSLStage, SSLTrainer\nfrom saticl.utils.common import flatten_config, get_logger, git_revision_hash, store_config\nfrom saticl.utils.ml import checkpoint_path, init_experiment, seed_everything, seed_worker\n\n\nLOG = get_logger(__name__)\n\n\ndef init_from_previous_step(config: Configuration, new_model: ICLSegmenter, old_model: ICLSegmenter,\n model_folder: Path, task: Task) -> Tuple[ICLSegmenter, ICLSegmenter]:\n if task.step == 0:\n LOG.info(\"Step 0: training from scratch without old model\")\n return new_model, old_model\n\n LOG.info(\"Loading checkpoint from step: %d\", task.step - 1)\n if config.task.step_checkpoint is not None:\n ckpt_path = Path(config.task.step_checkpoint)\n else:\n ckpt_path = checkpoint_path(model_folder, task_name=task.name, step=task.step - 1)\n assert ckpt_path.exists() and ckpt_path.is_file(), f\"Checkpoint for step {task.step-1} not found at {str(ckpt_path)}\"\n\n checkpoint = torch.load(str(ckpt_path), map_location=\"cpu\")\n # load checkpoint into the new model, without strict matching because of ICL heads\n new_model.load_state_dict(checkpoint, strict=False)\n if config.model.init_balanced:\n new_model.init_classifier()\n # load the same checkpoint into the old model, this time strict since it's the very same\n old_model.load_state_dict(checkpoint, strict=True)\n old_model.freeze()\n old_model.eval()\n del checkpoint\n return new_model, old_model\n\n\ndef train(config: Configuration):\n # assertions before starting\n assert config.name is not None or config.task.step == 0, \"Specify the experiment name with ICL steps >= 1!\"\n assert torch.backends.cudnn.enabled, \"AMP requires CUDNN backend to be enabled.\"\n\n # prepare accelerator ASAP\n accelerator = Accelerator(fp16=config.trainer.amp, cpu=config.trainer.cpu)\n\n # Create the directory tree:\n # outputs\n # |-- dataset\n # |--task_name\n # |-- exp_name\n # |-- models\n # |-- logs\n accelerator.wait_for_everyone()\n log_name = f\"output-{config.task.step}.log\"\n exp_id, out_folder, model_folder, logs_folder = init_experiment(config=config, log_name=log_name)\n config_path = out_folder / f\"segmenter-config-s{config.task.step}.yaml\"\n LOG.info(\"Run started\")\n LOG.info(\"Experiment ID: %s\", exp_id)\n LOG.info(\"Output folder: %s\", out_folder)\n LOG.info(\"Models folder: %s\", model_folder)\n LOG.info(\"Logs folder: %s\", logs_folder)\n LOG.info(\"Configuration: %s\", config_path)\n\n # seeding everything\n LOG.info(\"Using seed: %d\", config.seed)\n seed_everything(config.seed)\n # prepare datasets\n LOG.info(\"Loading datasets...\")\n train_set, valid_set = prepare_dataset(config=config, partial_transforms=False)\n LOG.info(\"Full sets - train set: %d samples, validation set: %d samples\", len(train_set), len(valid_set))\n\n add_background = not train_set.has_background()\n task = Task(dataset=config.dataset,\n name=config.task.name,\n step=config.task.step,\n add_background=add_background)\n train_mask, valid_mask = 0, 255\n train_set = ICLDataset(dataset=train_set, task=task, mask_value=train_mask, filter_mode=config.task.filter_mode)\n valid_set = ICLDataset(dataset=valid_set, task=task, mask_value=valid_mask, filter_mode=config.task.filter_mode)\n # construct data loaders\n train_loader = DataLoader(dataset=train_set,\n batch_size=config.trainer.batch_size,\n shuffle=True,\n num_workers=config.trainer.num_workers,\n worker_init_fn=seed_worker,\n drop_last=True)\n valid_loader = DataLoader(dataset=valid_set,\n batch_size=config.trainer.batch_size,\n shuffle=False,\n num_workers=config.trainer.num_workers,\n worker_init_fn=seed_worker)\n LOG.info(\"ICL sets - Train set: %d samples, validation set: %d samples\", len(train_set), len(valid_set))\n\n # prepare models\n LOG.info(\"Preparing model...\")\n new_model = prepare_model(config=config, task=task)\n new_model = new_model.to(accelerator.device)\n if task.step > 0:\n old_task = Task(dataset=config.dataset,\n name=config.task.name,\n step=task.step - 1,\n add_background=add_background)\n old_model = prepare_model(config=config, task=old_task)\n old_model = old_model.to(accelerator.device)\n else:\n old_model = None\n new_model, old_model = init_from_previous_step(config, new_model, old_model, model_folder, task)\n LOG.info(\"Done preparing models\")\n\n # prepare optimizer and scheduler\n optimizer = config.optimizer.instantiate(new_model.parameters())\n scheduler = config.scheduler.instantiate(optimizer)\n # prepare losses\n weights = None\n if config.class_weights:\n weights = train_set.load_class_weights(Path(config.class_weights),\n device=accelerator.device,\n normalize=config.ce.tversky)\n LOG.info(\"Using class weights: %s\", str(weights))\n segment_loss = config.ce.instantiate(ignore_index=255, old_class_count=task.old_class_count(), weight=weights)\n distill_loss = config.kd.instantiate()\n if task.step > 0 and config.ce.unbiased:\n seg_loss_name = str(type(segment_loss))\n kdd_loss_name = str(type(distill_loss))\n if \"Unbiased\" not in seg_loss_name:\n LOG.warn(f\"Non-ubiased segmentation loss '{seg_loss_name}' for step {task.step}!\")\n if \"Unbiased\" not in kdd_loss_name:\n LOG.warn(f\"Non-unbiased KD loss '{kdd_loss_name}' for step {task.step}\")\n # prepare metrics and logger\n monitored = config.trainer.monitor.name\n train_metrics, valid_metrics = prepare_metrics(task=task, device=accelerator.device)\n logger = TensorBoardLogger(log_folder=logs_folder,\n filename_suffix=f\"step-{task.step}\",\n icl_step=task.step,\n comment=config.comment)\n # logging configuration to tensorboard\n LOG.debug(\"Logging flattened config. to TensorBoard\")\n logger.log_table(\"config\", flatten_config(config.dict()))\n\n # prepare trainer\n LOG.info(\"Visualize: %s, num. batches for visualization: %s\", str(config.visualize), str(config.num_samples))\n num_samples = int(config.visualize) * config.num_samples\n # choose trainer class depending on task or regularization\n trainer_class = Trainer\n kwargs = dict()\n if config.aug.apply:\n inv_transforms = invariance_transforms(config.aug)\n LOG.info(\"Invariance transforms: \")\n LOG.info(str(inv_transforms))\n kwargs.update(aug_criterion=AugmentationInvariance(transform=inv_transforms),\n aug_lambda=config.aug.factor,\n aug_lambda_icl=config.aug.factor_icl,\n temperature=config.trainer.temperature,\n temp_epochs=config.trainer.temp_epochs)\n trainer_class = AugInvarianceTrainer\n trainer = trainer_class(accelerator=accelerator,\n task=task,\n new_model=new_model,\n old_model=old_model,\n optimizer=optimizer,\n scheduler=scheduler,\n train_metrics=train_metrics,\n val_metrics=valid_metrics,\n old_classes=train_set.old_categories(),\n new_classes=train_set.new_categories(),\n seg_criterion=segment_loss,\n kdd_criterion=distill_loss,\n kde_criterion=None,\n kdd_lambda=config.kd.decoder_factor,\n kde_lambda=config.kd.encoder_factor,\n logger=logger,\n samples=num_samples,\n debug=config.debug,\n **kwargs)\n trainer.add_callback(EarlyStopping(call_every=1, metric=monitored,\n criterion=EarlyStoppingCriterion.maximum,\n patience=config.trainer.patience)) \\\n .add_callback(Checkpoint(call_every=1,\n model_folder=model_folder,\n name_format=f\"task{task.name}_step-{task.step}\",\n save_best=True)) \\\n .add_callback(DisplaySamples(inverse_transform=inverse_transform(),\n color_palette=train_set.palette()))\n # storing config and starting training\n config.version = git_revision_hash()\n store_config(config, path=config_path)\n trainer.fit(train_dataloader=train_loader, val_dataloader=valid_loader, max_epochs=config.trainer.max_epochs)\n LOG.info(f\"Training completed at epoch {trainer.current_epoch:<2d} \"\n f\"(best {monitored}: {trainer.best_score:.4f})\")\n LOG.info(\"Experiment %s (step %d) completed!\", exp_id, task.step)\n\n\ndef train_ssl(config: SSLConfiguration):\n # assertions before starting\n assert config.name is not None or config.task.step == 0, \"Specify the experiment name with ICL steps >= 1!\"\n assert torch.backends.cudnn.enabled, \"AMP requires CUDNN backend to be enabled.\"\n if config.in_channels != 4:\n LOG.warn(\"Forcing input channels to 4 (previous value: %d)\", config.in_channels)\n config.in_channels = 4\n # prepare accelerator ASAP\n accelerator = Accelerator(fp16=config.trainer.amp, cpu=config.trainer.cpu)\n\n # Create the directory tree:\n # outputs\n # |-- dataset\n # |--task_name\n # |-- exp_name\n # |-- models\n # |-- logs\n accelerator.wait_for_everyone()\n log_name = f\"output-{config.task.step}.log\"\n exp_id, out_folder, model_folder, logs_folder = init_experiment(config=config, log_name=log_name)\n config_path = out_folder / f\"segmenter-config-s{config.task.step}.yaml\"\n store_config(config, path=config_path)\n LOG.info(\"Run started\")\n LOG.info(\"Experiment ID: %s\", exp_id)\n LOG.info(\"Output folder: %s\", out_folder)\n LOG.info(\"Models folder: %s\", model_folder)\n LOG.info(\"Logs folder: %s\", logs_folder)\n LOG.info(\"Configuration: %s\", config_path)\n\n # seeding everything\n LOG.info(\"Using seed: %d\", config.seed)\n seed_everything(config.seed)\n # prepare datasets\n LOG.info(\"Loading datasets...\")\n train_set, valid_set = prepare_dataset(config=config)\n train_set = SSLDataset(train_set, transform=ssl_transforms())\n LOG.info(\"Full sets - train set: %d samples, validation set: %d samples\", len(train_set), len(valid_set))\n\n add_background = not train_set.has_background()\n task = Task(dataset=config.dataset,\n name=config.task.name,\n step=config.task.step,\n add_background=add_background)\n train_mask, valid_mask = 0, 255\n train_set = ICLDataset(dataset=train_set, task=task, mask_value=train_mask, filter_mode=config.task.filter_mode)\n valid_set = ICLDataset(dataset=valid_set, task=task, mask_value=valid_mask, filter_mode=config.task.filter_mode)\n train_loader = DataLoader(dataset=train_set,\n batch_size=config.trainer.batch_size,\n shuffle=True,\n num_workers=config.trainer.num_workers,\n worker_init_fn=seed_worker,\n drop_last=True)\n valid_loader = DataLoader(dataset=valid_set,\n batch_size=config.trainer.batch_size,\n shuffle=False,\n num_workers=config.trainer.num_workers,\n worker_init_fn=seed_worker)\n LOG.info(\"ICL sets - Train set: %d samples, validation set: %d samples\", len(train_set), len(valid_set))\n\n # prepare models\n LOG.info(\"Preparing model...\")\n new_model, ssl_model = prepare_model_ssl(config=config, task=task)\n new_model = new_model.to(accelerator.device)\n ssl_model = ssl_model.to(accelerator.device)\n if task.step > 0:\n old_task = Task(dataset=config.dataset,\n name=config.task.name,\n step=task.step - 1,\n add_background=add_background)\n old_model = prepare_model(config=config, task=old_task)\n old_model = old_model.to(accelerator.device)\n else:\n old_model = None\n new_model, old_model = init_from_previous_step(config, new_model, old_model, model_folder, task)\n LOG.info(\"Done preparing models\")\n\n # prepare optimizer and scheduler\n parameters = chain(new_model.parameters(), ssl_model.head.parameters())\n optimizer = config.optimizer.instantiate(parameters)\n scheduler = config.scheduler.instantiate(optimizer)\n # prepare losses, including SSL\n segment_loss = config.ce.instantiate(ignore_index=255, old_class_count=task.old_class_count())\n distill_loss = config.kd.instantiate()\n pretext_loss = config.ssl_loss()\n # asserts to verify their validity\n if task.step > 0 and config.ce.unbiased:\n seg_loss_name = str(type(segment_loss))\n kdd_loss_name = str(type(distill_loss))\n assert \"Unbiased\" in seg_loss_name, f\"Wrong loss '{seg_loss_name}' for step {task.step}\"\n assert \"Unbiased\" in kdd_loss_name, f\"Wrong loss '{kdd_loss_name}' for step {task.step}\"\n # prepare metrics and logger\n monitored = config.trainer.monitor.name\n train_metrics, valid_metrics = prepare_metrics(task=task, device=accelerator.device)\n ssl_metrics = prepare_metrics_ssl(num_classes=config.model.pretext_classes, device=accelerator.device)\n logger = TensorBoardLogger(log_folder=logs_folder,\n filename_suffix=f\"step-{task.step}\",\n icl_step=task.step,\n comment=config.comment)\n # logging configuration to tensorboard\n LOG.debug(\"Logging flattened config. to TensorBoard\")\n logger.log_table(\"config\", flatten_config(config.dict()))\n\n # prepare trainer\n LOG.info(\"Visualize: %s, num. batches for visualization: %s\", str(config.visualize), str(config.num_samples))\n num_samples = int(config.visualize) * config.num_samples\n trainer = SSLTrainer(accelerator=accelerator,\n task=task,\n new_model=new_model,\n old_model=old_model,\n ssl_model=ssl_model,\n optimizer=optimizer,\n scheduler=scheduler,\n train_metrics=train_metrics,\n val_metrics=valid_metrics,\n old_classes=train_set.old_categories(),\n new_classes=train_set.new_categories(),\n seg_criterion=segment_loss,\n ssl_criterion=pretext_loss,\n kdd_criterion=distill_loss,\n kde_criterion=None,\n kdd_lambda=config.kd.decoder_factor,\n kde_lambda=config.kd.encoder_factor,\n logger=logger,\n samples=num_samples,\n debug=config.debug)\n trainer.add_metrics(SSLStage.ssl, metrics=ssl_metrics)\n trainer.add_callback(EarlyStopping(call_every=1, metric=monitored,\n criterion=EarlyStoppingCriterion.maximum,\n patience=config.trainer.patience)) \\\n .add_callback(Checkpoint(call_every=1,\n model_folder=model_folder,\n name_format=f\"task{task.name}_step-{task.step}\",\n save_best=True)) \\\n .add_callback(DisplaySamples(inverse_transform=inverse_transform(),\n color_palette=train_set.palette()))\n trainer.fit(train_dataloader=train_loader, val_dataloader=valid_loader, max_epochs=config.trainer.max_epochs)\n LOG.info(f\"Training completed at epoch {trainer.current_epoch:<2d} \"\n f\"(best {monitored}: {trainer.best_score:.4f})\")\n LOG.info(\"Experiment %s (step %d) completed!\", exp_id, task.step)\n"
] | [
[
"torch.utils.data.DataLoader"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
frank20a/collaborative-sats | [
"9d26d3c8f66cf43bbd514f02434851439e746797",
"9d26d3c8f66cf43bbd514f02434851439e746797"
] | [
"src/slider_experiment/slider_experiment/thruster_pwm_tsl.py",
"src/stereo_cam/stereo_cam/disparity_publisher.py"
] | [
"import rclpy\nfrom rclpy.node import Node\nfrom geometry_msgs.msg import Vector3\nfrom std_msgs.msg import Int16\nfrom rclpy.qos import QoSPresetProfiles\nfrom ament_index_python import get_package_share_directory\n\nimport numpy as np\nimport sys, os\n\nfrom .parameters import force\nfrom .flags import flags\n\n\ndef create_pwm(value, resolution):\n if value < 0.0: \n value = -value\n if value > 1.0:\n value = 1.0\n \n return np.concatenate((np.ones(np.floor(resolution * value).astype(np.int32)), np.zeros(np.ceil(resolution * (1 - value)).astype(np.int32))))\n\n\nclass ThrustController(Node):\n def __init__(self):\n super().__init__('thrust_controller')\n \n self.declare_parameter('verbose', 0)\n self.declare_parameter('frequency', 10)\n self.declare_parameter('resolution', 100)\n\n self.verbose = self.get_parameter('verbose').get_parameter_value().integer_value\n self.frequency = self.get_parameter('frequency').get_parameter_value().integer_value\n self.resolution = self.get_parameter('resolution').get_parameter_value().integer_value\n\n sys.path.insert(1, os.path.join(get_package_share_directory('slider_experiment'), 'python_build/tsl_optimizer'))\n import tsl_optimizer as optimizer\n self.solver = optimizer.solver()\n \n self.signals = [create_pwm(0, self.resolution) for i in range(8)]\n self.i = 0\n \n self.create_subscription(Vector3, 'thrust_cmd', self.callback, QoSPresetProfiles.get_from_short_key('system_default'))\n self.pub = self.create_publisher(Int16, 'thruster_flags', QoSPresetProfiles.get_from_short_key('sensor_data'))\n\n self.create_timer(1/(self.frequency * self.resolution), self.send_signals)\n \n def callback(self, msg: Vector3):\n \n T = self.solver.run(p = [msg.x, msg.y, msg.z]).solution\n\n if self.verbose > 0: \n self.get_logger().info(f'\\n Fx = {msg.x: 2.2f}\\n Fy = {msg.y: 2.2f}\\ntau = {msg.z: 2.2f}')\n self.get_logger().info(f'cmd: {T}')\n\n self.signals = [create_pwm(T[i] / force, self.resolution) for i in range(8)]\n \n def send_signals(self):\n req = Int16()\n \n tmp = 0\n for i in range(8):\n if self.signals[i][self.i] == 1:\n tmp ^= flags[i]\n try:\n req.data = tmp\n except AssertionError:\n print(tmp)\n\n \n self.i += 1\n self.i %= self.resolution\n \n self.pub.publish(req)\n \n \ndef main(args=None):\n rclpy.init(args=args)\n node = ThrustController()\n rclpy.spin(node) \n node.destroy_node()\n rclpy.shutdown()\n \n\nif __name__ == '__main__':\n main()",
"from sympy import diag\nimport rclpy\nfrom rclpy.node import Node\nfrom sensor_msgs.msg import Image\nfrom rclpy.qos import QoSPresetProfiles\nfrom cv_bridge import CvBridge\nfrom stereo_msgs.msg import DisparityImage\n\nimport cv2\nfrom time import time\nimport numpy as np\n\nbridge = CvBridge()\nstereo = cv2.StereoBM_create()\n\n# Stereo parameters\nnumDisparities = 16 * 4\nminDisparity = -10\n\nstereo.setNumDisparities(numDisparities)\nstereo.setBlockSize(25)\nstereo.setPreFilterSize(7)\nstereo.setPreFilterCap(20)\nstereo.setUniquenessRatio(15)\nstereo.setSpeckleRange(3)\nstereo.setMinDisparity(minDisparity)\n\nclass DisparityPublisher(Node):\n def __init__(self):\n super().__init__(\"stereo_image_viewer\")\n\n self.img_r = None\n self.updated_r = False\n self.img_l = None\n self.updated_l = False\n\n max_fps = 60\n \n self.create_subscription(\n Image,\n '/stereo/left/image_raw',\n self.l_callback,\n QoSPresetProfiles.get_from_short_key('sensor_data')\n )\n \n self.create_subscription(\n Image,\n '/stereo/right/image_raw',\n self.r_callback,\n QoSPresetProfiles.get_from_short_key('sensor_data')\n )\n \n self.publisher_disp = self.create_publisher(\n DisparityImage, \n 'stereo/disparity', \n QoSPresetProfiles.get_from_short_key('sensor_data')\n )\n\n \n self.create_timer(1/max_fps, self.disparity)\n\n def l_callback(self, msg):\n self.img_l = cv2.cvtColor(bridge.imgmsg_to_cv2(msg, desired_encoding='passthrough'), cv2.COLOR_BGR2GRAY)\n self.updated_l = True\n\n def r_callback(self, msg):\n self.img_r = cv2.cvtColor(bridge.imgmsg_to_cv2(msg, desired_encoding='passthrough'), cv2.COLOR_BGR2GRAY)\n self.updated_r = True\n\n def disparity(self):\n # ----> Check if both images are fresh\n if not (self.updated_r and self.updated_l): return\n self.updated_l = False\n self.updated_r = False\n\n\n disparity = stereo.compute(self.img_l, self.img_r).astype(np.float32)\n # disparity = (disparity/16.0 - (minDisparity-1))/numDisparities\n # disparity = disparity - 16.0 * (minDisparity - 1)\n\n # ----> Send disparsity image message\n self.publisher_disp.publish(\n DisparityImage(\n max_disparity = np.float64(numDisparities - 16.0 * (minDisparity - 1)),\n min_disparity = np.float64(minDisparity),\n delta_d = 1.0 / numDisparities,\n image = bridge.cv2_to_imgmsg(disparity),\n t = 0.065,\n f = (720 / 2) / np.tan(1.04699999 / 2)\n )\n )\n\n # print(np.max(disparity), np.min(disparity))\n\n\ndef main(args=None):\n rclpy.init(args=args)\n viewer = DisparityPublisher()\n rclpy.spin(viewer)\n viewer.destroy_node()\n rclpy.shutdown()\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"numpy.ceil",
"numpy.floor"
],
[
"numpy.tan",
"numpy.float64"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
lucaskolson/ddd | [
"f273c61856bca27a40b9691b2a9842d8705a3503"
] | [
"app.py"
] | [
"import dash\nfrom dash import dcc\nfrom dash import html\nfrom dash.dependencies import Input, Output\nimport plotly.express as px\nimport pandas as pd\nfrom dash import callback_context\n\ndf = px.data.election()\ngeojson = px.data.election_geojson()\ncandidates = df.winner.unique()\n\nexternal_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']\n\napp = dash.Dash(__name__, external_stylesheets=external_stylesheets)\nserver = app.server\n\napp.title = \"ICE Detention Data Dashboard\"\n\nfy = ['2015-10-01', '2016-10-01', '2017-10-01', '2018-10-01']\n\nloc = [\"East Coast\", \"West Coast\", \"Southwest\", \"Midwest\", \"All\"]\n\napp.layout = html.Div(\n children=[\n html.Div(\n children=[\n html.H1(\n children=\"ICE Detention Analytics\", className=\"header-title\"\n ),\n html.P(\n children=\"A dashboard and data repository of\"\n \" ICE detention trends and facilities across the US\"\n \" between 2010 and 2020\",\n className=\"header-description\",\n ),\n ],\n className=\"header\",\n ),\n html.Div(\n children=[\n dcc.RadioItems(\n id='candidate', \n options=[{'value': x, 'label': x} \n for x in candidates],\n value=candidates[0],\n labelStyle={'display': 'inline-block'}\n ),\n html.Div(\n children=[dcc.Graph(\n id=\"choropleth\", config={\"displayModeBar\": False},\n ),\n html.Button(\"Download CSV\", id=\"btn_csv\"),\n dcc.Download(id=\"download-dataframe-csv\"),\n html.Button(\"Download Image\", id=\"btn_image\"),\n dcc.Download(id=\"download-image\")],\n className=\"card\",\n ),\n dcc.RadioItems(\n id='us_loc', \n options=[{'value': x, 'label': x} \n for x in loc],\n value=loc[0],\n labelStyle={'display': 'inline-block'}\n ),\n html.Div(\n children=dcc.Graph(\n id=\"fy_arrests\", config={\"displayModeBar\": False},\n ),\n className=\"card\",\n ),\n ],\n className=\"wrapper\",\n ),\n ]\n)\n\n\[email protected](\n Output(\"choropleth\", \"figure\"), \n [Input(\"candidate\", \"value\")])\n\ndef display_choropleth(candidate):\n fig = px.choropleth(\n df, geojson=geojson, color=candidate,\n locations=\"district\", featureidkey=\"properties.district\",\n projection=\"mercator\", range_color=[0, 6500])\n fig.update_geos(fitbounds=\"locations\", visible=False)\n fig.update_layout(margin={\"r\":0,\"t\":0,\"l\":0,\"b\":0})\n\n return fig\n\[email protected](\n Output(\"download-dataframe-csv\", \"data\"),\n Input(\"btn_csv\", \"n_clicks\"),\n prevent_initial_call=True,\n)\ndef func(n_clicks):\n return dcc.send_data_frame(df.to_csv, \"mydf.csv\")\n\[email protected](\n Output(\"download-image\", \"data\"),\n Input(\"btn_image\", \"n_clicks\"),\n prevent_initial_call=True,\n)\ndef func(n_clicks):\n return dcc.send_file(\n \"./plot_downloads/test.png\"\n )\n\[email protected](\n Output(\"fy_arrests\", \"figure\"),\n [Input(\"us_loc\", \"value\")])\n\ndef display_arrest_fy(us_loc):\n arrests_by_fy = pd.read_csv(\"./data/arrests_by_fy.csv\")\n if us_loc == \"West Coast\":\n aor = ['LOS', 'SEA', 'SFR', 'SND']\n elif us_loc == \"East Coast\":\n aor = ['ATL', 'BAL', 'BOS', 'BUF', 'DET', 'MIA', 'NEW', 'NOL', 'NYC', 'PHI', 'WAS', 'HQ']\n elif us_loc == \"Midwest\":\n aor = ['CHI', 'SPM']\n elif us_loc == \"Southwest\":\n aor = ['DAL', 'DEN', 'ELP', 'HOU', 'PHO', 'SLC', 'SNA']\n elif us_loc == \"All\":\n aor = ['ATL', 'BAL', 'BOS', 'BUF', 'CHI', 'DAL', 'DEN', 'DET', 'ELP', 'HOU', 'HQ', 'LOS', 'MIA', 'NEW', 'NOL','NYC', 'PHI', 'PHO', 'SEA', 'SFR', 'SLC', 'SNA', 'SND', 'SPM', 'WAS']\n else:\n aor = ['ATL', 'BAL', 'BOS', 'BUF', 'CHI', 'DAL', 'DEN', 'DET', 'ELP', 'HOU', 'HQ', 'LOS', 'MIA', 'NEW', 'NOL','NYC', 'PHI', 'PHO', 'SEA', 'SFR', 'SLC', 'SNA', 'SND', 'SPM', 'WAS']\n\n\n\n fig = px.line(arrests_by_fy, x=fy, \n y=aor, \n title = \"Arrests in AOR per FY\",\n labels=dict(x=\"Fiscal Year\", y=\"Number of Arrests\"))\n fig.update_xaxes(title=\"Fiscal Year\", nticks = 4)\n fig.update_yaxes(title=\"Number of Arrests\")\n fig.update_layout(legend_title_text='AOR')\n\n return fig\n\n\nif __name__ == \"__main__\":\n app.run_server(debug=True)\n"
] | [
[
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
Roshan-Thomas/qiskit-terra | [
"77219b5c7b7146b1545c5e5190739b36f4064b2f",
"77219b5c7b7146b1545c5e5190739b36f4064b2f",
"77219b5c7b7146b1545c5e5190739b36f4064b2f",
"77219b5c7b7146b1545c5e5190739b36f4064b2f",
"77219b5c7b7146b1545c5e5190739b36f4064b2f",
"77219b5c7b7146b1545c5e5190739b36f4064b2f",
"77219b5c7b7146b1545c5e5190739b36f4064b2f",
"77219b5c7b7146b1545c5e5190739b36f4064b2f",
"77219b5c7b7146b1545c5e5190739b36f4064b2f",
"77219b5c7b7146b1545c5e5190739b36f4064b2f",
"77219b5c7b7146b1545c5e5190739b36f4064b2f",
"77219b5c7b7146b1545c5e5190739b36f4064b2f"
] | [
"qiskit/visualization/timeline/plotters/matplotlib.py",
"qiskit/algorithms/optimizers/qnspsa.py",
"qiskit/algorithms/amplitude_estimators/fae.py",
"qiskit/circuit/library/standard_gates/ryy.py",
"test/python/pulse/test_calibrationbuilder.py",
"test/python/quantum_info/states/test_random.py",
"test/python/quantum_info/operators/symplectic/test_pauli_table.py",
"qiskit/quantum_info/operators/random.py",
"qiskit/result/mitigation/utils.py",
"qiskit/circuit/library/standard_gates/x.py",
"qiskit/algorithms/optimizers/nft.py",
"qiskit/tools/jupyter/backend_monitor.py"
] | [
"# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2020.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n# pylint: disable=invalid-name\n\n\"\"\"Matplotlib plotter API.\"\"\"\n\nfrom typing import Optional, Tuple\n\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom matplotlib.collections import PatchCollection\nfrom matplotlib.patches import Rectangle\n\nfrom qiskit.visualization.exceptions import VisualizationError\nfrom qiskit.visualization.timeline import core, types, drawings\nfrom qiskit.visualization.timeline.plotters.base_plotter import BasePlotter\nfrom qiskit.visualization.utils import matplotlib_close_if_inline\n\n\nclass MplPlotter(BasePlotter):\n \"\"\"Matplotlib API for pulse drawer.\n\n This plotter arranges bits along y axis of 2D canvas with vertical offset.\n \"\"\"\n\n def __init__(self, canvas: core.DrawerCanvas, axis: Optional[plt.Axes] = None):\n \"\"\"Create new plotter.\n\n Args:\n canvas: Configured drawer canvas object. Canvas object should be updated\n with `.update` method before initializing the plotter.\n axis: Matplotlib axis object. When `axis` is provided, the plotter updates\n given axis instead of creating and returning new matplotlib figure.\n \"\"\"\n super().__init__(canvas=canvas)\n\n if axis is None:\n fig_height = self.canvas.vmax - self.canvas.vmin\n fig_h = self.canvas.formatter[\"general.fig_unit_height\"] * fig_height\n fig_w = self.canvas.formatter[\"general.fig_width\"]\n\n self.figure = plt.figure(figsize=(fig_w, fig_h))\n self.ax = self.figure.add_subplot(1, 1, 1)\n else:\n self.figure = axis.figure\n self.ax = axis\n\n self.initialize_canvas()\n\n def initialize_canvas(self):\n \"\"\"Format appearance of matplotlib canvas.\"\"\"\n self.ax.set_facecolor(self.canvas.formatter[\"color.background\"])\n\n # axis lines\n self.ax.spines[\"right\"].set_color(\"none\")\n self.ax.spines[\"left\"].set_color(\"none\")\n self.ax.spines[\"top\"].set_color(\"none\")\n\n # axis labels\n self.ax.set_yticks([])\n axis_config = self.canvas.layout[\"time_axis_map\"](time_window=self.canvas.time_range)\n\n self.ax.set_xticks(list(axis_config.axis_map.keys()))\n self.ax.set_xticklabels(\n list(axis_config.axis_map.values()),\n fontsize=self.canvas.formatter[\"text_size.axis_label\"],\n )\n self.ax.set_xlabel(\n axis_config.label, fontsize=self.canvas.formatter[\"text_size.axis_label\"]\n )\n\n # boundary\n self.ax.set_xlim(*self.canvas.time_range)\n self.ax.set_ylim(self.canvas.vmin, self.canvas.vmax)\n\n def draw(self):\n \"\"\"Output drawings stored in canvas object.\"\"\"\n\n for _, data in self.canvas.collections:\n xvals = np.asarray(data.xvals, dtype=float)\n yvals = np.asarray(data.yvals, dtype=float)\n offsets = [self.canvas.assigned_coordinates[bit] for bit in data.bits]\n\n if isinstance(data, drawings.BoxData):\n # box data\n if data.data_type in [\n str(types.BoxType.SCHED_GATE.value),\n str(types.BoxType.DELAY.value),\n ]:\n # draw a smoothly rounded rectangle\n xs, ys1, ys2 = self._time_bucket_outline(xvals, yvals)\n self.ax.fill_between(\n x=xs, y1=ys1 + offsets[0], y2=ys2 + offsets[0], **data.styles\n )\n\n else:\n # draw a rectangle\n x0, x1 = xvals\n y0, y1 = yvals + offsets[0]\n\n rect = Rectangle(xy=(x0, y0), width=x1 - x0, height=y1 - y0)\n pc = PatchCollection([rect], **data.styles)\n self.ax.add_collection(pc)\n\n elif isinstance(data, drawings.LineData):\n # line data\n self.ax.plot(xvals, yvals + offsets[0], **data.styles)\n\n elif isinstance(data, drawings.TextData):\n # text data\n if data.latex is not None:\n s = rf\"${data.latex}$\"\n else:\n s = data.text\n\n self.ax.text(x=xvals[0], y=yvals[0] + offsets[0], s=s, **data.styles)\n\n elif isinstance(data, drawings.GateLinkData):\n # gate link data\n self.ax.plot(xvals.repeat(len(offsets)), offsets, **data.styles)\n\n else:\n VisualizationError(\n \"Data {name} is not supported by {plotter}\"\n \"\".format(name=data, plotter=self.__class__.__name__)\n )\n\n def _time_bucket_outline(\n self, xvals: np.ndarray, yvals: np.ndarray\n ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:\n \"\"\"Generate outline of time bucket. Edges are smoothly faded.\n\n Args:\n xvals: Left and right point coordinates.\n yvals: Bottom and top point coordinates.\n\n Returns:\n Coordinate vectors of time bucket fringe.\n \"\"\"\n x0, x1 = xvals\n y0, y1 = yvals\n\n width = x1 - x0\n y_mid = 0.5 * (y0 + y1)\n\n risefall = int(min(self.canvas.formatter[\"time_bucket.edge_dt\"], max(width / 2 - 2, 0)))\n edge = np.sin(np.pi / 2 * np.arange(0, risefall) / risefall)\n\n xs = np.concatenate(\n [\n np.arange(x0, x0 + risefall),\n [x0 + risefall, x1 - risefall],\n np.arange(x1 - risefall + 1, x1 + 1),\n ]\n )\n\n l1 = (y1 - y_mid) * np.concatenate([edge, [1, 1], edge[::-1]])\n l2 = (y0 - y_mid) * np.concatenate([edge, [1, 1], edge[::-1]])\n\n return xs, l1, l2\n\n def save_file(self, filename: str):\n \"\"\"Save image to file.\n Args:\n filename: File path to output image data.\n \"\"\"\n plt.savefig(filename, bbox_inches=\"tight\", dpi=self.canvas.formatter[\"general.dpi\"])\n\n def get_image(self, interactive: bool = False) -> matplotlib.pyplot.Figure:\n \"\"\"Get image data to return.\n Args:\n interactive: When set `True` show the circuit in a new window.\n This depends on the matplotlib backend being used supporting this.\n Returns:\n Matplotlib figure data.\n \"\"\"\n matplotlib_close_if_inline(self.figure)\n\n if self.figure and interactive:\n self.figure.show()\n\n return self.figure\n",
"# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2021.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"\"\"The QN-SPSA optimizer.\"\"\"\n\nfrom typing import Any, Iterator, Optional, Union, Callable, Dict\n\nimport numpy as np\nfrom qiskit.providers import Backend\nfrom qiskit.circuit import ParameterVector, QuantumCircuit\nfrom qiskit.opflow import StateFn, CircuitSampler, ExpectationBase\nfrom qiskit.utils import QuantumInstance\n\nfrom .spsa import SPSA, CALLBACK, TERMINATIONCHECKER, _batch_evaluate\n\n# the function to compute the fidelity\nFIDELITY = Callable[[np.ndarray, np.ndarray], float]\n\n\nclass QNSPSA(SPSA):\n r\"\"\"The Quantum Natural SPSA (QN-SPSA) optimizer.\n\n The QN-SPSA optimizer [1] is a stochastic optimizer that belongs to the family of gradient\n descent methods. This optimizer is based on SPSA but attempts to improve the convergence by\n sampling the **natural gradient** instead of the vanilla, first-order gradient. It achieves\n this by approximating Hessian of the ``fidelity`` of the ansatz circuit.\n\n Compared to natural gradients, which require :math:`\\mathcal{O}(d^2)` expectation value\n evaluations for a circuit with :math:`d` parameters, QN-SPSA only requires\n :math:`\\mathcal{O}(1)` and can therefore significantly speed up the natural gradient calculation\n by sacrificing some accuracy. Compared to SPSA, QN-SPSA requires 4 additional function\n evaluations of the fidelity.\n\n The stochastic approximation of the natural gradient can be systematically improved by\n increasing the number of ``resamplings``. This leads to a Monte Carlo-style convergence to\n the exact, analytic value.\n\n .. note::\n\n This component has some function that is normally random. If you want to reproduce behavior\n then you should set the random number generator seed in the algorithm_globals\n (``qiskit.utils.algorithm_globals.random_seed = seed``).\n\n Examples:\n\n This short example runs QN-SPSA for the ground state calculation of the ``Z ^ Z``\n observable where the ansatz is a ``PauliTwoDesign`` circuit.\n\n .. code-block:: python\n\n import numpy as np\n from qiskit.algorithms.optimizers import QNSPSA\n from qiskit.circuit.library import PauliTwoDesign\n from qiskit.opflow import Z, StateFn\n\n ansatz = PauliTwoDesign(2, reps=1, seed=2)\n observable = Z ^ Z\n initial_point = np.random.random(ansatz.num_parameters)\n\n def loss(x):\n bound = ansatz.bind_parameters(x)\n return np.real((StateFn(observable, is_measurement=True) @ StateFn(bound)).eval())\n\n fidelity = QNSPSA.get_fidelity(ansatz)\n qnspsa = QNSPSA(fidelity, maxiter=300)\n result = qnspsa.optimize(ansatz.num_parameters, loss, initial_point=initial_point)\n\n\n References:\n\n [1] J. Gacon et al, \"Simultaneous Perturbation Stochastic Approximation of the Quantum\n Fisher Information\", `arXiv:2103.09232 <https://arxiv.org/abs/2103.09232>`_\n\n \"\"\"\n\n def __init__(\n self,\n fidelity: FIDELITY,\n maxiter: int = 100,\n blocking: bool = True,\n allowed_increase: Optional[float] = None,\n learning_rate: Optional[Union[float, Callable[[], Iterator]]] = None,\n perturbation: Optional[Union[float, Callable[[], Iterator]]] = None,\n last_avg: int = 1,\n resamplings: Union[int, Dict[int, int]] = 1,\n perturbation_dims: Optional[int] = None,\n regularization: Optional[float] = None,\n hessian_delay: int = 0,\n lse_solver: Optional[Callable[[np.ndarray, np.ndarray], np.ndarray]] = None,\n initial_hessian: Optional[np.ndarray] = None,\n callback: Optional[CALLBACK] = None,\n termination_checker: Optional[TERMINATIONCHECKER] = None,\n ) -> None:\n r\"\"\"\n Args:\n fidelity: A function to compute the fidelity of the ansatz state with itself for\n two different sets of parameters.\n maxiter: The maximum number of iterations. Note that this is not the maximal number\n of function evaluations.\n blocking: If True, only accepts updates that improve the loss (up to some allowed\n increase, see next argument).\n allowed_increase: If ``blocking`` is ``True``, this argument determines by how much\n the loss can increase with the proposed parameters and still be accepted.\n If ``None``, the allowed increases is calibrated automatically to be twice the\n approximated standard deviation of the loss function.\n learning_rate: The update step is the learning rate is multiplied with the gradient.\n If the learning rate is a float, it remains constant over the course of the\n optimization. It can also be a callable returning an iterator which yields the\n learning rates for each optimization step.\n If ``learning_rate`` is set ``perturbation`` must also be provided.\n perturbation: Specifies the magnitude of the perturbation for the finite difference\n approximation of the gradients. Can be either a float or a generator yielding\n the perturbation magnitudes per step.\n If ``perturbation`` is set ``learning_rate`` must also be provided.\n last_avg: Return the average of the ``last_avg`` parameters instead of just the\n last parameter values.\n resamplings: The number of times the gradient (and Hessian) is sampled using a random\n direction to construct a gradient estimate. Per default the gradient is estimated\n using only one random direction. If an integer, all iterations use the same number\n of resamplings. If a dictionary, this is interpreted as\n ``{iteration: number of resamplings per iteration}``.\n perturbation_dims: The number of perturbed dimensions. Per default, all dimensions\n are perturbed, but a smaller, fixed number can be perturbed. If set, the perturbed\n dimensions are chosen uniformly at random.\n regularization: To ensure the preconditioner is symmetric and positive definite, the\n identity times a small coefficient is added to it. This generator yields that\n coefficient.\n hessian_delay: Start multiplying the gradient with the inverse Hessian only after a\n certain number of iterations. The Hessian is still evaluated and therefore this\n argument can be useful to first get a stable average over the last iterations before\n using it as preconditioner.\n lse_solver: The method to solve for the inverse of the Hessian. Per default an\n exact LSE solver is used, but can e.g. be overwritten by a minimization routine.\n initial_hessian: The initial guess for the Hessian. By default the identity matrix\n is used.\n callback: A callback function passed information in each iteration step. The\n information is, in this order: the parameters, the function value, the number\n of function evaluations, the stepsize, whether the step was accepted.\n termination_checker: A callback function executed at the end of each iteration step. The\n arguments are, in this order: the parameters, the function value, the number\n of function evaluations, the stepsize, whether the step was accepted. If the callback\n returns True, the optimization is terminated.\n To prevent additional evaluations of the objective method, if the objective has not yet\n been evaluated, the objective is estimated by taking the mean of the objective\n evaluations used in the estimate of the gradient.\n\n\n \"\"\"\n super().__init__(\n maxiter,\n blocking,\n allowed_increase,\n # trust region *must* be false for natural gradients to work\n trust_region=False,\n learning_rate=learning_rate,\n perturbation=perturbation,\n resamplings=resamplings,\n callback=callback,\n second_order=True,\n hessian_delay=hessian_delay,\n lse_solver=lse_solver,\n regularization=regularization,\n perturbation_dims=perturbation_dims,\n initial_hessian=initial_hessian,\n termination_checker=termination_checker,\n )\n\n self.fidelity = fidelity\n\n def _point_sample(self, loss, x, eps, delta1, delta2):\n loss_points = [x + eps * delta1, x - eps * delta1]\n fidelity_points = [\n (x, x + eps * delta1),\n (x, x - eps * delta1),\n (x, x + eps * (delta1 + delta2)),\n (x, x + eps * (-delta1 + delta2)),\n ]\n self._nfev += 6\n\n loss_values = _batch_evaluate(loss, loss_points, self._max_evals_grouped)\n fidelity_values = _batch_evaluate(self.fidelity, fidelity_points, self._max_evals_grouped)\n\n # compute the gradient approximation and additionally return the loss function evaluations\n gradient_estimate = (loss_values[0] - loss_values[1]) / (2 * eps) * delta1\n\n # compute the preconditioner point estimate\n diff = fidelity_values[2] - fidelity_values[0]\n diff -= fidelity_values[3] - fidelity_values[1]\n diff /= 2 * eps**2\n\n rank_one = np.outer(delta1, delta2)\n # -0.5 factor comes from the fact that we need -0.5 * fidelity\n hessian_estimate = -0.5 * diff * (rank_one + rank_one.T) / 2\n\n return np.mean(loss_values), gradient_estimate, hessian_estimate\n\n @property\n def settings(self) -> Dict[str, Any]:\n \"\"\"The optimizer settings in a dictionary format.\"\"\"\n # re-use serialization from SPSA\n settings = super().settings\n settings.update({\"fidelity\": self.fidelity})\n\n # remove SPSA-specific arguments not in QNSPSA\n settings.pop(\"trust_region\")\n settings.pop(\"second_order\")\n\n return settings\n\n @staticmethod\n def get_fidelity(\n circuit: QuantumCircuit,\n backend: Optional[Union[Backend, QuantumInstance]] = None,\n expectation: Optional[ExpectationBase] = None,\n ) -> Callable[[np.ndarray, np.ndarray], float]:\n r\"\"\"Get a function to compute the fidelity of ``circuit`` with itself.\n\n Let ``circuit`` be a parameterized quantum circuit performing the operation\n :math:`U(\\theta)` given a set of parameters :math:`\\theta`. Then this method returns\n a function to evaluate\n\n .. math::\n\n F(\\theta, \\phi) = \\big|\\langle 0 | U^\\dagger(\\theta) U(\\phi) |0\\rangle \\big|^2.\n\n The output of this function can be used as input for the ``fidelity`` to the\n :class:~`qiskit.algorithms.optimizers.QNSPSA` optimizer.\n\n Args:\n circuit: The circuit preparing the parameterized ansatz.\n backend: A backend of quantum instance to evaluate the circuits. If None, plain\n matrix multiplication will be used.\n expectation: An expectation converter to specify how the expected value is computed.\n If a shot-based readout is used this should be set to ``PauliExpectation``.\n\n Returns:\n A handle to the function :math:`F`.\n\n \"\"\"\n params_x = ParameterVector(\"x\", circuit.num_parameters)\n params_y = ParameterVector(\"y\", circuit.num_parameters)\n\n expression = ~StateFn(circuit.assign_parameters(params_x)) @ StateFn(\n circuit.assign_parameters(params_y)\n )\n\n if expectation is not None:\n expression = expectation.convert(expression)\n\n if backend is None:\n\n def fidelity(values_x, values_y):\n value_dict = dict(\n zip(params_x[:] + params_y[:], values_x.tolist() + values_y.tolist())\n )\n return np.abs(expression.bind_parameters(value_dict).eval()) ** 2\n\n else:\n sampler = CircuitSampler(backend)\n\n def fidelity(values_x, values_y=None):\n if values_y is not None: # no batches\n value_dict = dict(\n zip(params_x[:] + params_y[:], values_x.tolist() + values_y.tolist())\n )\n else:\n value_dict = {p: [] for p in params_x[:] + params_y[:]}\n for values_xy in values_x:\n for value_x, param_x in zip(values_xy[0, :], params_x):\n value_dict[param_x].append(value_x)\n\n for value_y, param_y in zip(values_xy[1, :], params_y):\n value_dict[param_y].append(value_y)\n\n return np.abs(sampler.convert(expression, params=value_dict).eval()) ** 2\n\n return fidelity\n",
"# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2017, 2020.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"\"\"Faster Amplitude Estimation.\"\"\"\n\nfrom typing import Optional, Union, List, Tuple\nimport numpy as np\n\nfrom qiskit.circuit import QuantumCircuit, ClassicalRegister\nfrom qiskit.providers import BaseBackend, Backend\nfrom qiskit.utils import QuantumInstance\nfrom qiskit.algorithms.exceptions import AlgorithmError\n\nfrom .amplitude_estimator import AmplitudeEstimator, AmplitudeEstimatorResult\nfrom .estimation_problem import EstimationProblem\n\n\nclass FasterAmplitudeEstimation(AmplitudeEstimator):\n \"\"\"The Faster Amplitude Estimation algorithm.\n\n The Faster Amplitude Estimation (FAE) [1] algorithm is a variant of Quantum Amplitude\n Estimation (QAE), where the Quantum Phase Estimation (QPE) by an iterative Grover search,\n similar to [2].\n\n Due to the iterative version of the QPE, this algorithm does not require any additional\n qubits, as the originally proposed QAE [3] and thus the resulting circuits are less complex.\n\n References:\n\n [1]: K. Nakaji. Faster Amplitude Estimation, 2020;\n `arXiv:2002.02417 <https://arxiv.org/pdf/2003.02417.pdf>`_\n [2]: D. Grinko et al. Iterative Amplitude Estimation, 2019;\n `arXiv:1912.05559 <http://arxiv.org/abs/1912.05559>`_\n [3]: G. Brassard et al. Quantum Amplitude Amplification and Estimation, 2000;\n `arXiv:quant-ph/0005055 <http://arxiv.org/abs/quant-ph/0005055>`_\n\n \"\"\"\n\n def __init__(\n self,\n delta: float,\n maxiter: int,\n rescale: bool = True,\n quantum_instance: Optional[Union[QuantumInstance, BaseBackend, Backend]] = None,\n ) -> None:\n r\"\"\"\n Args:\n delta: The probability that the true value is outside of the final confidence interval.\n maxiter: The number of iterations, the maximal power of Q is `2 ** (maxiter - 1)`.\n rescale: Whether to rescale the problem passed to `estimate`.\n quantum_instance: The quantum instance or backend to run the circuits.\n\n .. note::\n\n This algorithm overwrites the number of shots set in the ``quantum_instance``\n argument, but will reset them to the initial number after running.\n\n \"\"\"\n super().__init__()\n self.quantum_instance = quantum_instance\n self._shots = (int(1944 * np.log(2 / delta)), int(972 * np.log(2 / delta)))\n self._rescale = rescale\n self._delta = delta\n self._maxiter = maxiter\n self._num_oracle_calls = 0\n\n @property\n def quantum_instance(self) -> Optional[QuantumInstance]:\n \"\"\"Get the quantum instance.\n\n Returns:\n The quantum instance used to run this algorithm.\n \"\"\"\n return self._quantum_instance\n\n @quantum_instance.setter\n def quantum_instance(\n self, quantum_instance: Union[QuantumInstance, BaseBackend, Backend]\n ) -> None:\n \"\"\"Set quantum instance.\n\n Args:\n quantum_instance: The quantum instance used to run this algorithm.\n \"\"\"\n if isinstance(quantum_instance, (BaseBackend, Backend)):\n quantum_instance = QuantumInstance(quantum_instance)\n self._quantum_instance = quantum_instance\n\n def _cos_estimate(self, estimation_problem, k, shots):\n if self._quantum_instance is None:\n raise AlgorithmError(\"Quantum instance must be set.\")\n\n if self._quantum_instance.is_statevector:\n circuit = self.construct_circuit(estimation_problem, k, measurement=False)\n statevector = self._quantum_instance.execute(circuit).get_statevector()\n\n # sum over all amplitudes where the objective qubits are 1\n prob = 0\n for i, amplitude in enumerate(statevector):\n # get bitstring of objective qubits\n full_state = bin(i)[2:].zfill(circuit.num_qubits)[::-1]\n state = \"\".join([full_state[i] for i in estimation_problem.objective_qubits])\n\n # check if it is a good state\n if estimation_problem.is_good_state(state[::-1]):\n prob = prob + np.abs(amplitude) ** 2\n\n cos_estimate = 1 - 2 * prob\n else:\n circuit = self.construct_circuit(estimation_problem, k, measurement=True)\n\n self._quantum_instance.run_config.shots = shots\n counts = self._quantum_instance.execute(circuit).get_counts()\n self._num_oracle_calls += (2 * k + 1) * shots\n\n good_counts = 0\n for state, count in counts.items():\n if estimation_problem.is_good_state(state):\n good_counts += count\n\n cos_estimate = 1 - 2 * good_counts / shots\n\n return cos_estimate\n\n def _chernoff(self, cos, shots):\n width = np.sqrt(np.log(2 / self._delta) * 12 / shots)\n confint = [np.maximum(-1, cos - width), np.minimum(1, cos + width)]\n return confint\n\n def construct_circuit(\n self, estimation_problem: EstimationProblem, k: int, measurement: bool = False\n ) -> Union[QuantumCircuit, Tuple[QuantumCircuit, List[int]]]:\n r\"\"\"Construct the circuit :math:`Q^k X |0\\rangle>`.\n\n The A operator is the unitary specifying the QAE problem and Q the associated Grover\n operator.\n\n Args:\n estimation_problem: The estimation problem for which to construct the circuit.\n k: The power of the Q operator.\n measurement: Boolean flag to indicate if measurements should be included in the\n circuits.\n\n Returns:\n The circuit :math:`Q^k X |0\\rangle`.\n \"\"\"\n num_qubits = max(\n estimation_problem.state_preparation.num_qubits,\n estimation_problem.grover_operator.num_qubits,\n )\n circuit = QuantumCircuit(num_qubits, name=\"circuit\")\n\n # add classical register if needed\n if measurement:\n c = ClassicalRegister(len(estimation_problem.objective_qubits))\n circuit.add_register(c)\n\n # add A operator\n circuit.compose(estimation_problem.state_preparation, inplace=True)\n\n # add Q^k\n if k != 0:\n circuit.compose(estimation_problem.grover_operator.power(k), inplace=True)\n\n # add optional measurement\n if measurement:\n # real hardware can currently not handle operations after measurements, which might\n # happen if the circuit gets transpiled, hence we're adding a safeguard-barrier\n circuit.barrier()\n circuit.measure(estimation_problem.objective_qubits, c[:])\n\n return circuit\n\n def estimate(self, estimation_problem: EstimationProblem) -> \"FasterAmplitudeEstimationResult\":\n self._num_oracle_calls = 0\n user_defined_shots = self.quantum_instance._run_config.shots\n\n if self._rescale:\n problem = estimation_problem.rescale(0.25)\n else:\n problem = estimation_problem\n\n if self._quantum_instance.is_statevector:\n cos = self._cos_estimate(problem, k=0, shots=1)\n theta = np.arccos(cos) / 2\n theta_ci = [theta, theta]\n theta_cis = [theta_ci]\n num_steps = num_first_stage_steps = 1\n\n else:\n theta_ci = [0, np.arcsin(0.25)]\n first_stage = True\n j_0 = self._maxiter\n\n theta_cis = [theta_ci]\n num_first_stage_steps = 0\n num_steps = 0\n\n def cos_estimate(power, shots):\n return self._cos_estimate(problem, power, shots)\n\n for j in range(1, self._maxiter + 1):\n num_steps += 1\n if first_stage:\n num_first_stage_steps += 1\n c = cos_estimate(2 ** (j - 1), self._shots[0])\n chernoff_ci = self._chernoff(c, self._shots[0])\n theta_ci = [np.arccos(x) / (2 ** (j + 1) + 2) for x in chernoff_ci[::-1]]\n\n if 2 ** (j + 1) * theta_ci[1] >= 3 * np.pi / 8 and j < self._maxiter:\n j_0 = j\n v = 2**j * np.sum(theta_ci)\n first_stage = False\n else:\n cos = cos_estimate(2 ** (j - 1), self._shots[1])\n cos_2 = cos_estimate(2 ** (j - 1) + 2 ** (j_0 - 1), self._shots[1])\n sin = (cos * np.cos(v) - cos_2) / np.sin(v)\n rho = np.arctan2(sin, cos)\n n = int(((2 ** (j + 1) + 2) * theta_ci[1] - rho + np.pi / 3) / (2 * np.pi))\n\n theta_ci = [\n (2 * np.pi * n + rho + sign * np.pi / 3) / (2 ** (j + 1) + 2)\n for sign in [-1, 1]\n ]\n theta_cis.append(theta_ci)\n\n theta = np.mean(theta_ci)\n rescaling = 4 if self._rescale else 1\n value = (rescaling * np.sin(theta)) ** 2\n value_ci = [(rescaling * np.sin(x)) ** 2 for x in theta_ci]\n\n result = FasterAmplitudeEstimationResult()\n result.num_oracle_queries = self._num_oracle_calls\n result.num_steps = num_steps\n result.num_first_state_steps = num_first_stage_steps\n if self._quantum_instance.is_statevector:\n result.success_probability = 1\n else:\n result.success_probability = 1 - (2 * self._maxiter - j_0) * self._delta\n\n result.estimation = value\n result.estimation_processed = problem.post_processing(value)\n result.confidence_interval = value_ci\n result.confidence_interval_processed = tuple(problem.post_processing(x) for x in value_ci)\n result.theta_intervals = theta_cis\n\n # reset shots to what the user had defined\n self.quantum_instance._run_config.shots = user_defined_shots\n return result\n\n\nclass FasterAmplitudeEstimationResult(AmplitudeEstimatorResult):\n \"\"\"The result object for the Faster Amplitude Estimation algorithm.\"\"\"\n\n def __init__(self) -> None:\n super().__init__()\n self._success_probability = None\n self._num_steps = None\n self._num_first_state_steps = None\n self._theta_intervals = None\n\n @property\n def success_probability(self) -> int:\n \"\"\"Return the success probability of the algorithm.\"\"\"\n return self._success_probability\n\n @success_probability.setter\n def success_probability(self, probability: int) -> None:\n \"\"\"Set the success probability of the algorithm.\"\"\"\n self._success_probability = probability\n\n @property\n def num_steps(self) -> int:\n \"\"\"Return the total number of steps taken in the algorithm.\"\"\"\n return self._num_steps\n\n @num_steps.setter\n def num_steps(self, num_steps: int) -> None:\n \"\"\"Set the total number of steps taken in the algorithm.\"\"\"\n self._num_steps = num_steps\n\n @property\n def num_first_state_steps(self) -> int:\n \"\"\"Return the number of steps taken in the first step of algorithm.\"\"\"\n return self._num_first_state_steps\n\n @num_first_state_steps.setter\n def num_first_state_steps(self, num_steps: int) -> None:\n \"\"\"Set the number of steps taken in the first step of algorithm.\"\"\"\n self._num_first_state_steps = num_steps\n\n @property\n def theta_intervals(self) -> List[List[float]]:\n \"\"\"Return the confidence intervals for the angles in each iteration.\"\"\"\n return self._theta_intervals\n\n @theta_intervals.setter\n def theta_intervals(self, value: List[List[float]]) -> None:\n \"\"\"Set the confidence intervals for the angles in each iteration.\"\"\"\n self._theta_intervals = value\n",
"# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2017, 2020.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"\"\"Two-qubit YY-rotation gate.\"\"\"\n\nfrom typing import Optional\nimport numpy as np\nfrom qiskit.circuit.gate import Gate\nfrom qiskit.circuit.quantumregister import QuantumRegister\nfrom qiskit.circuit.parameterexpression import ParameterValueType\n\n\nclass RYYGate(Gate):\n r\"\"\"A parametric 2-qubit :math:`Y \\otimes Y` interaction (rotation about YY).\n\n This gate is symmetric, and is maximally entangling at :math:`\\theta = \\pi/2`.\n\n **Circuit Symbol:**\n\n .. parsed-literal::\n\n ┌─────────┐\n q_0: ┤1 ├\n │ Ryy(ϴ) │\n q_1: ┤0 ├\n └─────────┘\n\n **Matrix Representation:**\n\n .. math::\n\n \\newcommand{\\th}{\\frac{\\theta}{2}}\n\n R_{YY}(\\theta) = exp(-i \\th Y{\\otimes}Y) =\n \\begin{pmatrix}\n \\cos(\\th) & 0 & 0 & i\\sin(\\th) \\\\\n 0 & \\cos(\\th) & -i\\sin(\\th) & 0 \\\\\n 0 & -i\\sin(\\th) & \\cos(\\th) & 0 \\\\\n i\\sin(\\th) & 0 & 0 & \\cos(\\th)\n \\end{pmatrix}\n\n **Examples:**\n\n .. math::\n\n R_{YY}(\\theta = 0) = I\n\n .. math::\n\n R_{YY}(\\theta = \\pi) = i Y \\otimes Y\n\n .. math::\n\n R_{YY}(\\theta = \\frac{\\pi}{2}) = \\frac{1}{\\sqrt{2}}\n \\begin{pmatrix}\n 1 & 0 & 0 & i \\\\\n 0 & 1 & -i & 0 \\\\\n 0 & -i & 1 & 0 \\\\\n i & 0 & 0 & 1\n \\end{pmatrix}\n \"\"\"\n\n def __init__(self, theta: ParameterValueType, label: Optional[str] = None):\n \"\"\"Create new RYY gate.\"\"\"\n super().__init__(\"ryy\", 2, [theta], label=label)\n\n def _define(self):\n \"\"\"Calculate a subcircuit that implements this unitary.\"\"\"\n # pylint: disable=cyclic-import\n from qiskit.circuit.quantumcircuit import QuantumCircuit\n from .x import CXGate\n from .rx import RXGate\n from .rz import RZGate\n\n # ┌─────────┐ ┌──────────┐\n # q_0: ┤ Rx(π/2) ├──■─────────────■──┤ Rx(-π/2) ├\n # ├─────────┤┌─┴─┐┌───────┐┌─┴─┐├──────────┤\n # q_1: ┤ Rx(π/2) ├┤ X ├┤ Rz(0) ├┤ X ├┤ Rx(-π/2) ├\n # └─────────┘└───┘└───────┘└───┘└──────────┘\n q = QuantumRegister(2, \"q\")\n theta = self.params[0]\n qc = QuantumCircuit(q, name=self.name)\n rules = [\n (RXGate(np.pi / 2), [q[0]], []),\n (RXGate(np.pi / 2), [q[1]], []),\n (CXGate(), [q[0], q[1]], []),\n (RZGate(theta), [q[1]], []),\n (CXGate(), [q[0], q[1]], []),\n (RXGate(-np.pi / 2), [q[0]], []),\n (RXGate(-np.pi / 2), [q[1]], []),\n ]\n for instr, qargs, cargs in rules:\n qc._append(instr, qargs, cargs)\n\n self.definition = qc\n\n def inverse(self):\n \"\"\"Return inverse RYY gate (i.e. with the negative rotation angle).\"\"\"\n return RYYGate(-self.params[0])\n\n def __array__(self, dtype=None):\n \"\"\"Return a numpy.array for the RYY gate.\"\"\"\n theta = float(self.params[0])\n cos = np.cos(theta / 2)\n isin = 1j * np.sin(theta / 2)\n return np.array(\n [[cos, 0, 0, isin], [0, cos, -isin, 0], [0, -isin, cos, 0], [isin, 0, 0, cos]],\n dtype=dtype,\n )\n",
"# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2020.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"\"\"Test the RZXCalibrationBuilderNoEcho.\"\"\"\n\nfrom math import pi, erf, ceil\n\nimport numpy as np\n\nfrom qiskit import circuit, schedule\nfrom qiskit.transpiler import PassManager\nfrom qiskit.test import QiskitTestCase\nfrom qiskit.pulse import (\n Play,\n Delay,\n ShiftPhase,\n ControlChannel,\n DriveChannel,\n GaussianSquare,\n)\nfrom qiskit.transpiler.passes.calibration.builders import (\n RZXCalibrationBuilderNoEcho,\n)\nfrom qiskit.test.mock import FakeAthens\n\n\nclass TestCalibrationBuilder(QiskitTestCase):\n \"\"\"Test the Calibration Builder.\"\"\"\n\n def setUp(self):\n super().setUp()\n self.backend = FakeAthens()\n self.inst_map = self.backend.defaults().instruction_schedule_map\n\n\nclass TestRZXCalibrationBuilderNoEcho(TestCalibrationBuilder):\n \"\"\"Test RZXCalibrationBuilderNoEcho.\"\"\"\n\n def test_rzx_calibration_builder(self):\n \"\"\"Test whether RZXCalibrationBuilderNoEcho scales pulses correctly.\"\"\"\n\n # Define a circuit with one RZX gate and an angle theta.\n theta = pi / 3\n rzx_qc = circuit.QuantumCircuit(2)\n rzx_qc.rzx(theta / 2, 1, 0)\n\n # Verify that there are no calibrations for this circuit yet.\n self.assertEqual(rzx_qc.calibrations, {})\n\n # apply the RZXCalibrationBuilderNoEcho.\n pass_ = RZXCalibrationBuilderNoEcho(\n instruction_schedule_map=self.backend.defaults().instruction_schedule_map,\n qubit_channel_mapping=self.backend.configuration().qubit_channel_mapping,\n )\n cal_qc = PassManager(pass_).run(rzx_qc)\n rzx_qc_duration = schedule(cal_qc, self.backend).duration\n\n # Check that the calibrations contain the correct instructions\n # and pulses on the correct channels.\n rzx_qc_instructions = cal_qc.calibrations[\"rzx\"][((1, 0), (theta / 2,))].instructions\n self.assertEqual(rzx_qc_instructions[0][1].channel, DriveChannel(0))\n self.assertTrue(isinstance(rzx_qc_instructions[0][1], Play))\n self.assertTrue(isinstance(rzx_qc_instructions[0][1].pulse, GaussianSquare))\n self.assertEqual(rzx_qc_instructions[1][1].channel, DriveChannel(1))\n self.assertTrue(isinstance(rzx_qc_instructions[1][1], Delay))\n self.assertEqual(rzx_qc_instructions[2][1].channel, ControlChannel(1))\n self.assertTrue(isinstance(rzx_qc_instructions[2][1], Play))\n self.assertTrue(isinstance(rzx_qc_instructions[2][1].pulse, GaussianSquare))\n\n # Calculate the duration of one scaled Gaussian square pulse from the CX gate.\n cx_sched = self.inst_map.get(\"cx\", qubits=(1, 0))\n\n crs = []\n for time, inst in cx_sched.instructions:\n\n # Identify the CR pulses.\n if isinstance(inst, Play) and not isinstance(inst, ShiftPhase):\n if isinstance(inst.channel, ControlChannel):\n crs.append((time, inst))\n\n pulse_ = crs[0][1].pulse\n amp = pulse_.amp\n width = pulse_.width\n sigma = pulse_.sigma\n n_sigmas = (pulse_.duration - width) / sigma\n sample_mult = 16\n\n gaussian_area = abs(amp) * sigma * np.sqrt(2 * np.pi) * erf(n_sigmas)\n area = gaussian_area + abs(amp) * width\n target_area = abs(theta) / (np.pi / 2.0) * area\n width = (target_area - gaussian_area) / abs(amp)\n duration = ceil((width + n_sigmas * sigma) / sample_mult) * sample_mult\n\n # Check whether the durations of the RZX pulse and\n # the scaled CR pulse from the CX gate match.\n self.assertEqual(rzx_qc_duration, duration)\n",
"# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2017, 2020.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"\"\"Test random operator functions.\"\"\"\n\nimport unittest\nfrom test import combine\nfrom ddt import ddt\nimport numpy as np\n\nfrom qiskit.test import QiskitTestCase\nfrom qiskit.quantum_info import Statevector, DensityMatrix\nfrom qiskit.quantum_info.random import random_statevector\nfrom qiskit.quantum_info.random import random_density_matrix\n\n\n@ddt\nclass TestRandomStatevector(QiskitTestCase):\n \"\"\"Testing random_unitary function.\"\"\"\n\n @combine(dims=[(2,), (3,), (2, 2), (2, 3)])\n def test_tuple_dims(self, dims):\n \"\"\"Test random_statevector is valid with dims {dims}.\"\"\"\n value = random_statevector(dims)\n self.assertIsInstance(value, Statevector)\n self.assertTrue(value.is_valid())\n self.assertEqual(value.dims(), dims)\n\n @combine(dim=[2, 3, 4, 5])\n def test_int_dims(self, dim):\n \"\"\"Test random_statevector is valid with dims {dim}.\"\"\"\n value = random_statevector(dim)\n self.assertIsInstance(value, Statevector)\n self.assertTrue(value.is_valid())\n self.assertEqual(np.product(value.dims()), dim)\n\n def test_fixed_seed(self):\n \"\"\"Test fixing seed fixes output\"\"\"\n seed = 1532\n value1 = random_statevector(4, seed=seed)\n value2 = random_statevector(4, seed=seed)\n self.assertEqual(value1, value2)\n\n def test_not_global_seed(self):\n \"\"\"Test fixing seed is locally scoped.\"\"\"\n seed = 314159\n test_cases = 100\n random_statevector(2, seed=seed)\n rng_before = np.random.randint(1000, size=test_cases)\n random_statevector(2, seed=seed)\n rng_after = np.random.randint(1000, size=test_cases)\n self.assertFalse(np.all(rng_before == rng_after))\n\n\n@ddt\nclass TestRandomDensityMatrix(QiskitTestCase):\n \"\"\"Testing random_density_matrix function.\"\"\"\n\n @combine(dims=[(2,), (3,), (2, 2), (2, 3)], method=[\"Hilbert-Schmidt\", \"Bures\"])\n def test_tuple_dims(self, dims, method):\n \"\"\"Test random_density_matrix {method} method is valid with dims {dims}.\"\"\"\n value = random_density_matrix(dims, method=method)\n self.assertIsInstance(value, DensityMatrix)\n self.assertTrue(value.is_valid())\n self.assertEqual(value.dims(), dims)\n\n @combine(dim=[2, 3, 4, 5], method=[\"Hilbert-Schmidt\", \"Bures\"])\n def test_int_dims(self, dim, method):\n \"\"\"Test random_density_matrix {method} method is valid with dims {dim}.\"\"\"\n value = random_density_matrix(dim, method=method)\n self.assertIsInstance(value, DensityMatrix)\n self.assertTrue(value.is_valid())\n self.assertEqual(np.product(value.dims()), dim)\n\n @combine(method=[\"Hilbert-Schmidt\", \"Bures\"])\n def test_fixed_seed(self, method):\n \"\"\"Test fixing seed fixes output ({method} method)\"\"\"\n seed = 1532\n value1 = random_density_matrix(4, method=method, seed=seed)\n value2 = random_density_matrix(4, method=method, seed=seed)\n self.assertEqual(value1, value2)\n\n @combine(method=[\"Hilbert-Schmidt\", \"Bures\"])\n def test_not_global_seed(self, method):\n \"\"\"Test fixing seed is locally scoped ({method} method).\"\"\"\n seed = 314159\n test_cases = 100\n random_density_matrix(2, method=method, seed=seed)\n rng_before = np.random.randint(1000, size=test_cases)\n random_density_matrix(2, method=method, seed=seed)\n rng_after = np.random.randint(1000, size=test_cases)\n self.assertFalse(np.all(rng_before == rng_after))\n\n\nif __name__ == \"__main__\":\n unittest.main()\n",
"# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2017, 2020.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"\"\"Tests for PauliTable class.\"\"\"\n\nimport unittest\nfrom test import combine\nfrom ddt import ddt\nimport numpy as np\nfrom scipy.sparse import csr_matrix\n\nfrom qiskit import QiskitError\nfrom qiskit.test import QiskitTestCase\nfrom qiskit.quantum_info.operators.symplectic import PauliTable\n\n\ndef pauli_mat(label):\n \"\"\"Return Pauli matrix from a Pauli label\"\"\"\n mat = np.eye(1, dtype=complex)\n for i in label:\n if i == \"I\":\n mat = np.kron(mat, np.eye(2, dtype=complex))\n elif i == \"X\":\n mat = np.kron(mat, np.array([[0, 1], [1, 0]], dtype=complex))\n elif i == \"Y\":\n mat = np.kron(mat, np.array([[0, -1j], [1j, 0]], dtype=complex))\n elif i == \"Z\":\n mat = np.kron(mat, np.array([[1, 0], [0, -1]], dtype=complex))\n else:\n raise QiskitError(f\"Invalid Pauli string {i}\")\n return mat\n\n\nclass TestPauliTableInit(QiskitTestCase):\n \"\"\"Tests for PauliTable initialization.\"\"\"\n\n def test_array_init(self):\n \"\"\"Test array initialization.\"\"\"\n # Matrix array initialization\n with self.subTest(msg=\"bool array\"):\n target = np.array([[False, False], [True, True]])\n value = PauliTable(target)._array\n self.assertTrue(np.all(value == target))\n\n with self.subTest(msg=\"bool array no copy\"):\n target = np.array([[False, True], [True, True]])\n value = PauliTable(target)._array\n value[0, 0] = not value[0, 0]\n self.assertTrue(np.all(value == target))\n\n with self.subTest(msg=\"bool array raises\"):\n array = np.array([[False, False, False], [True, True, True]])\n self.assertRaises(QiskitError, PauliTable, array)\n\n def test_vector_init(self):\n \"\"\"Test vector initialization.\"\"\"\n # Vector array initialization\n with self.subTest(msg=\"bool vector\"):\n target = np.array([False, False, False, False])\n value = PauliTable(target)._array\n self.assertTrue(np.all(value == target))\n\n with self.subTest(msg=\"bool vector no copy\"):\n target = np.array([False, True, True, False])\n value = PauliTable(target)._array\n value[0, 0] = not value[0, 0]\n self.assertTrue(np.all(value == target))\n\n def test_string_init(self):\n \"\"\"Test string initialization.\"\"\"\n # String initialization\n with self.subTest(msg='str init \"I\"'):\n value = PauliTable(\"I\")._array\n target = np.array([[False, False]], dtype=bool)\n self.assertTrue(np.all(np.array(value == target)))\n\n with self.subTest(msg='str init \"X\"'):\n value = PauliTable(\"X\")._array\n target = np.array([[True, False]], dtype=bool)\n self.assertTrue(np.all(np.array(value == target)))\n\n with self.subTest(msg='str init \"Y\"'):\n value = PauliTable(\"Y\")._array\n target = np.array([[True, True]], dtype=bool)\n self.assertTrue(np.all(np.array(value == target)))\n\n with self.subTest(msg='str init \"Z\"'):\n value = PauliTable(\"Z\")._array\n target = np.array([[False, True]], dtype=bool)\n self.assertTrue(np.all(np.array(value == target)))\n\n with self.subTest(msg='str init \"IX\"'):\n value = PauliTable(\"IX\")._array\n target = np.array([[True, False, False, False]], dtype=bool)\n self.assertTrue(np.all(np.array(value == target)))\n\n with self.subTest(msg='str init \"XI\"'):\n value = PauliTable(\"XI\")._array\n target = np.array([[False, True, False, False]], dtype=bool)\n self.assertTrue(np.all(np.array(value == target)))\n\n with self.subTest(msg='str init \"YZ\"'):\n value = PauliTable(\"YZ\")._array\n target = np.array([[False, True, True, True]], dtype=bool)\n self.assertTrue(np.all(np.array(value == target)))\n\n with self.subTest(msg='str init \"XIZ\"'):\n value = PauliTable(\"XIZ\")._array\n target = np.array([[False, False, True, True, False, False]], dtype=bool)\n self.assertTrue(np.all(np.array(value == target)))\n\n def test_table_init(self):\n \"\"\"Test table initialization.\"\"\"\n # Pauli Table initialization\n with self.subTest(msg=\"PauliTable\"):\n target = PauliTable.from_labels([\"XI\", \"IX\", \"IZ\"])\n value = PauliTable(target)\n self.assertEqual(value, target)\n\n with self.subTest(msg=\"PauliTable no copy\"):\n target = PauliTable.from_labels([\"XI\", \"IX\", \"IZ\"])\n value = PauliTable(target)\n value[0] = \"II\"\n self.assertEqual(value, target)\n\n\nclass TestPauliTableProperties(QiskitTestCase):\n \"\"\"Tests for PauliTable properties.\"\"\"\n\n def test_array_propertiy(self):\n \"\"\"Test array property\"\"\"\n\n with self.subTest(msg=\"array\"):\n pauli = PauliTable(\"II\")\n array = np.zeros([2, 4], dtype=bool)\n self.assertTrue(np.all(pauli.array == array))\n\n with self.subTest(msg=\"set array\"):\n pauli = PauliTable(\"XX\")\n array = np.zeros([1, 4], dtype=bool)\n pauli.array = array\n self.assertTrue(np.all(pauli.array == array))\n\n with self.subTest(msg=\"set array raises\"):\n\n def set_array_raise():\n pauli = PauliTable(\"XXX\")\n pauli.array = np.eye(4)\n return pauli\n\n self.assertRaises(ValueError, set_array_raise)\n\n def test_x_propertiy(self):\n \"\"\"Test X property\"\"\"\n with self.subTest(msg=\"X\"):\n pauli = PauliTable.from_labels([\"XI\", \"IZ\", \"YY\"])\n array = np.array([[False, True], [False, False], [True, True]], dtype=bool)\n self.assertTrue(np.all(pauli.X == array))\n\n with self.subTest(msg=\"set X\"):\n pauli = PauliTable.from_labels([\"XI\", \"IZ\"])\n val = np.array([[False, False], [True, True]], dtype=bool)\n pauli.X = val\n self.assertEqual(pauli, PauliTable.from_labels([\"II\", \"XY\"]))\n\n with self.subTest(msg=\"set X raises\"):\n\n def set_x():\n pauli = PauliTable.from_labels([\"XI\", \"IZ\"])\n val = np.array([[False, False, False], [True, True, True]], dtype=bool)\n pauli.X = val\n return pauli\n\n self.assertRaises(Exception, set_x)\n\n def test_z_propertiy(self):\n \"\"\"Test Z property\"\"\"\n with self.subTest(msg=\"Z\"):\n pauli = PauliTable.from_labels([\"XI\", \"IZ\", \"YY\"])\n array = np.array([[False, False], [True, False], [True, True]], dtype=bool)\n self.assertTrue(np.all(pauli.Z == array))\n\n with self.subTest(msg=\"set Z\"):\n pauli = PauliTable.from_labels([\"XI\", \"IZ\"])\n val = np.array([[False, False], [True, True]], dtype=bool)\n pauli.Z = val\n self.assertEqual(pauli, PauliTable.from_labels([\"XI\", \"ZZ\"]))\n\n with self.subTest(msg=\"set Z raises\"):\n\n def set_z():\n pauli = PauliTable.from_labels([\"XI\", \"IZ\"])\n val = np.array([[False, False, False], [True, True, True]], dtype=bool)\n pauli.Z = val\n return pauli\n\n self.assertRaises(Exception, set_z)\n\n def test_shape_propertiy(self):\n \"\"\"Test shape property\"\"\"\n shape = (3, 8)\n pauli = PauliTable(np.zeros(shape))\n self.assertEqual(pauli.shape, shape)\n\n def test_size_propertiy(self):\n \"\"\"Test size property\"\"\"\n with self.subTest(msg=\"size\"):\n for j in range(1, 10):\n shape = (j, 8)\n pauli = PauliTable(np.zeros(shape))\n self.assertEqual(pauli.size, j)\n\n def test_n_qubit_propertiy(self):\n \"\"\"Test n_qubit property\"\"\"\n with self.subTest(msg=\"num_qubits\"):\n for j in range(1, 10):\n shape = (5, 2 * j)\n pauli = PauliTable(np.zeros(shape))\n self.assertEqual(pauli.num_qubits, j)\n\n def test_eq(self):\n \"\"\"Test __eq__ method.\"\"\"\n pauli1 = PauliTable.from_labels([\"II\", \"XI\"])\n pauli2 = PauliTable.from_labels([\"XI\", \"II\"])\n self.assertEqual(pauli1, pauli1)\n self.assertNotEqual(pauli1, pauli2)\n\n def test_len_methods(self):\n \"\"\"Test __len__ method.\"\"\"\n for j in range(1, 10):\n labels = j * [\"XX\"]\n pauli = PauliTable.from_labels(labels)\n self.assertEqual(len(pauli), j)\n\n def test_add_methods(self):\n \"\"\"Test __add__ method.\"\"\"\n labels1 = [\"XXI\", \"IXX\"]\n labels2 = [\"XXI\", \"ZZI\", \"ZYZ\"]\n pauli1 = PauliTable.from_labels(labels1)\n pauli2 = PauliTable.from_labels(labels2)\n target = PauliTable.from_labels(labels1 + labels2)\n self.assertEqual(target, pauli1 + pauli2)\n\n def test_add_qargs(self):\n \"\"\"Test add method with qargs.\"\"\"\n pauli1 = PauliTable.from_labels([\"IIII\", \"YYYY\"])\n pauli2 = PauliTable.from_labels([\"XY\", \"YZ\"])\n\n with self.subTest(msg=\"qargs=[0, 1]\"):\n target = PauliTable.from_labels([\"IIII\", \"YYYY\", \"IIXY\", \"IIYZ\"])\n self.assertEqual(pauli1 + pauli2([0, 1]), target)\n\n with self.subTest(msg=\"qargs=[0, 3]\"):\n target = PauliTable.from_labels([\"IIII\", \"YYYY\", \"XIIY\", \"YIIZ\"])\n self.assertEqual(pauli1 + pauli2([0, 3]), target)\n\n with self.subTest(msg=\"qargs=[2, 1]\"):\n target = PauliTable.from_labels([\"IIII\", \"YYYY\", \"IYXI\", \"IZYI\"])\n self.assertEqual(pauli1 + pauli2([2, 1]), target)\n\n with self.subTest(msg=\"qargs=[3, 1]\"):\n target = PauliTable.from_labels([\"IIII\", \"YYYY\", \"YIXI\", \"ZIYI\"])\n self.assertEqual(pauli1 + pauli2([3, 1]), target)\n\n def test_getitem_methods(self):\n \"\"\"Test __getitem__ method.\"\"\"\n with self.subTest(msg=\"__getitem__ single\"):\n labels = [\"XI\", \"IY\"]\n pauli = PauliTable.from_labels(labels)\n self.assertEqual(pauli[0], PauliTable(labels[0]))\n self.assertEqual(pauli[1], PauliTable(labels[1]))\n\n with self.subTest(msg=\"__getitem__ array\"):\n labels = np.array([\"XI\", \"IY\", \"IZ\", \"XY\", \"ZX\"])\n pauli = PauliTable.from_labels(labels)\n inds = [0, 3]\n self.assertEqual(pauli[inds], PauliTable.from_labels(labels[inds]))\n inds = np.array([4, 1])\n self.assertEqual(pauli[inds], PauliTable.from_labels(labels[inds]))\n\n with self.subTest(msg=\"__getitem__ slice\"):\n labels = np.array([\"XI\", \"IY\", \"IZ\", \"XY\", \"ZX\"])\n pauli = PauliTable.from_labels(labels)\n self.assertEqual(pauli[:], pauli)\n self.assertEqual(pauli[1:3], PauliTable.from_labels(labels[1:3]))\n\n def test_setitem_methods(self):\n \"\"\"Test __setitem__ method.\"\"\"\n with self.subTest(msg=\"__setitem__ single\"):\n labels = [\"XI\", \"IY\"]\n pauli = PauliTable.from_labels([\"XI\", \"IY\"])\n pauli[0] = \"II\"\n self.assertEqual(pauli[0], PauliTable(\"II\"))\n pauli[1] = \"XX\"\n self.assertEqual(pauli[1], PauliTable(\"XX\"))\n\n def raises_single():\n # Wrong size Pauli\n pauli[0] = \"XXX\"\n\n self.assertRaises(Exception, raises_single)\n\n with self.subTest(msg=\"__setitem__ array\"):\n labels = np.array([\"XI\", \"IY\", \"IZ\"])\n pauli = PauliTable.from_labels(labels)\n target = PauliTable.from_labels([\"II\", \"ZZ\"])\n inds = [2, 0]\n pauli[inds] = target\n self.assertEqual(pauli[inds], target)\n\n def raises_array():\n pauli[inds] = PauliTable.from_labels([\"YY\", \"ZZ\", \"XX\"])\n\n self.assertRaises(Exception, raises_array)\n\n with self.subTest(msg=\"__setitem__ slice\"):\n labels = np.array(5 * [\"III\"])\n pauli = PauliTable.from_labels(labels)\n target = PauliTable.from_labels(5 * [\"XXX\"])\n pauli[:] = target\n self.assertEqual(pauli[:], target)\n target = PauliTable.from_labels(2 * [\"ZZZ\"])\n pauli[1:3] = target\n self.assertEqual(pauli[1:3], target)\n\n\nclass TestPauliTableLabels(QiskitTestCase):\n \"\"\"Tests PauliTable label representation conversions.\"\"\"\n\n def test_from_labels_1q(self):\n \"\"\"Test 1-qubit from_labels method.\"\"\"\n labels = [\"I\", \"Z\", \"Z\", \"X\", \"Y\"]\n array = np.array(\n [[False, False], [False, True], [False, True], [True, False], [True, True]], dtype=bool\n )\n target = PauliTable(array)\n value = PauliTable.from_labels(labels)\n self.assertEqual(target, value)\n\n def test_from_labels_2q(self):\n \"\"\"Test 2-qubit from_labels method.\"\"\"\n labels = [\"II\", \"YY\", \"XZ\"]\n array = np.array(\n [[False, False, False, False], [True, True, True, True], [False, True, True, False]],\n dtype=bool,\n )\n target = PauliTable(array)\n value = PauliTable.from_labels(labels)\n self.assertEqual(target, value)\n\n def test_from_labels_5q(self):\n \"\"\"Test 5-qubit from_labels method.\"\"\"\n labels = [5 * \"I\", 5 * \"X\", 5 * \"Y\", 5 * \"Z\"]\n array = np.array(\n [10 * [False], 5 * [True] + 5 * [False], 10 * [True], 5 * [False] + 5 * [True]],\n dtype=bool,\n )\n target = PauliTable(array)\n value = PauliTable.from_labels(labels)\n self.assertEqual(target, value)\n\n def test_to_labels_1q(self):\n \"\"\"Test 1-qubit to_labels method.\"\"\"\n pauli = PauliTable(\n np.array(\n [[False, False], [False, True], [False, True], [True, False], [True, True]],\n dtype=bool,\n )\n )\n target = [\"I\", \"Z\", \"Z\", \"X\", \"Y\"]\n value = pauli.to_labels()\n self.assertEqual(value, target)\n\n def test_to_labels_1q_array(self):\n \"\"\"Test 1-qubit to_labels method w/ array=True.\"\"\"\n pauli = PauliTable(\n np.array(\n [[False, False], [False, True], [False, True], [True, False], [True, True]],\n dtype=bool,\n )\n )\n target = np.array([\"I\", \"Z\", \"Z\", \"X\", \"Y\"])\n value = pauli.to_labels(array=True)\n self.assertTrue(np.all(value == target))\n\n def test_labels_round_trip(self):\n \"\"\"Test from_labels and to_labels round trip.\"\"\"\n target = [\"III\", \"IXZ\", \"XYI\", \"ZZZ\"]\n value = PauliTable.from_labels(target).to_labels()\n self.assertEqual(value, target)\n\n def test_labels_round_trip_array(self):\n \"\"\"Test from_labels and to_labels round trip w/ array=True.\"\"\"\n labels = [\"III\", \"IXZ\", \"XYI\", \"ZZZ\"]\n target = np.array(labels)\n value = PauliTable.from_labels(labels).to_labels(array=True)\n self.assertTrue(np.all(value == target))\n\n\nclass TestPauliTableMatrix(QiskitTestCase):\n \"\"\"Tests PauliTable matrix representation conversions.\"\"\"\n\n def test_to_matrix_1q(self):\n \"\"\"Test 1-qubit to_matrix method.\"\"\"\n labels = [\"X\", \"I\", \"Z\", \"Y\"]\n targets = [pauli_mat(i) for i in labels]\n values = PauliTable.from_labels(labels).to_matrix()\n self.assertTrue(isinstance(values, list))\n for target, value in zip(targets, values):\n self.assertTrue(np.all(value == target))\n\n def test_to_matrix_1q_array(self):\n \"\"\"Test 1-qubit to_matrix method w/ array=True.\"\"\"\n labels = [\"Z\", \"I\", \"Y\", \"X\"]\n target = np.array([pauli_mat(i) for i in labels])\n value = PauliTable.from_labels(labels).to_matrix(array=True)\n self.assertTrue(isinstance(value, np.ndarray))\n self.assertTrue(np.all(value == target))\n\n def test_to_matrix_1q_sparse(self):\n \"\"\"Test 1-qubit to_matrix method w/ sparse=True.\"\"\"\n labels = [\"X\", \"I\", \"Z\", \"Y\"]\n targets = [pauli_mat(i) for i in labels]\n values = PauliTable.from_labels(labels).to_matrix(sparse=True)\n for mat, targ in zip(values, targets):\n self.assertTrue(isinstance(mat, csr_matrix))\n self.assertTrue(np.all(targ == mat.toarray()))\n\n def test_to_matrix_2q(self):\n \"\"\"Test 2-qubit to_matrix method.\"\"\"\n labels = [\"IX\", \"YI\", \"II\", \"ZZ\"]\n targets = [pauli_mat(i) for i in labels]\n values = PauliTable.from_labels(labels).to_matrix()\n self.assertTrue(isinstance(values, list))\n for target, value in zip(targets, values):\n self.assertTrue(np.all(value == target))\n\n def test_to_matrix_2q_array(self):\n \"\"\"Test 2-qubit to_matrix method w/ array=True.\"\"\"\n labels = [\"ZZ\", \"XY\", \"YX\", \"IZ\"]\n target = np.array([pauli_mat(i) for i in labels])\n value = PauliTable.from_labels(labels).to_matrix(array=True)\n self.assertTrue(isinstance(value, np.ndarray))\n self.assertTrue(np.all(value == target))\n\n def test_to_matrix_2q_sparse(self):\n \"\"\"Test 2-qubit to_matrix method w/ sparse=True.\"\"\"\n labels = [\"IX\", \"II\", \"ZY\", \"YZ\"]\n targets = [pauli_mat(i) for i in labels]\n values = PauliTable.from_labels(labels).to_matrix(sparse=True)\n for mat, targ in zip(values, targets):\n self.assertTrue(isinstance(mat, csr_matrix))\n self.assertTrue(np.all(targ == mat.toarray()))\n\n def test_to_matrix_5q(self):\n \"\"\"Test 5-qubit to_matrix method.\"\"\"\n labels = [\"IXIXI\", \"YZIXI\", \"IIXYZ\"]\n targets = [pauli_mat(i) for i in labels]\n values = PauliTable.from_labels(labels).to_matrix()\n self.assertTrue(isinstance(values, list))\n for target, value in zip(targets, values):\n self.assertTrue(np.all(value == target))\n\n def test_to_matrix_5q_sparse(self):\n \"\"\"Test 5-qubit to_matrix method w/ sparse=True.\"\"\"\n labels = [\"XXXYY\", \"IXIZY\", \"ZYXIX\"]\n targets = [pauli_mat(i) for i in labels]\n values = PauliTable.from_labels(labels).to_matrix(sparse=True)\n for mat, targ in zip(values, targets):\n self.assertTrue(isinstance(mat, csr_matrix))\n self.assertTrue(np.all(targ == mat.toarray()))\n\n\nclass TestPauliTableIteration(QiskitTestCase):\n \"\"\"Tests for PauliTable iterators class.\"\"\"\n\n def test_enumerate(self):\n \"\"\"Test enumerate with PauliTable.\"\"\"\n labels = [\"III\", \"IXI\", \"IYY\", \"YIZ\", \"XYZ\", \"III\"]\n pauli = PauliTable.from_labels(labels)\n for idx, i in enumerate(pauli):\n self.assertEqual(i, PauliTable(labels[idx]))\n\n def test_iter(self):\n \"\"\"Test iter with PauliTable.\"\"\"\n labels = [\"III\", \"IXI\", \"IYY\", \"YIZ\", \"XYZ\", \"III\"]\n pauli = PauliTable.from_labels(labels)\n for idx, i in enumerate(iter(pauli)):\n self.assertEqual(i, PauliTable(labels[idx]))\n\n def test_zip(self):\n \"\"\"Test zip with PauliTable.\"\"\"\n labels = [\"III\", \"IXI\", \"IYY\", \"YIZ\", \"XYZ\", \"III\"]\n pauli = PauliTable.from_labels(labels)\n for label, i in zip(labels, pauli):\n self.assertEqual(i, PauliTable(label))\n\n def test_label_iter(self):\n \"\"\"Test PauliTable label_iter method.\"\"\"\n labels = [\"III\", \"IXI\", \"IYY\", \"YIZ\", \"XYZ\", \"III\"]\n pauli = PauliTable.from_labels(labels)\n for idx, i in enumerate(pauli.label_iter()):\n self.assertEqual(i, labels[idx])\n\n def test_matrix_iter(self):\n \"\"\"Test PauliTable dense matrix_iter method.\"\"\"\n labels = [\"III\", \"IXI\", \"IYY\", \"YIZ\", \"XYZ\", \"III\"]\n pauli = PauliTable.from_labels(labels)\n for idx, i in enumerate(pauli.matrix_iter()):\n self.assertTrue(np.all(i == pauli_mat(labels[idx])))\n\n def test_matrix_iter_sparse(self):\n \"\"\"Test PauliTable sparse matrix_iter method.\"\"\"\n labels = [\"III\", \"IXI\", \"IYY\", \"YIZ\", \"XYZ\", \"III\"]\n pauli = PauliTable.from_labels(labels)\n for idx, i in enumerate(pauli.matrix_iter(sparse=True)):\n self.assertTrue(isinstance(i, csr_matrix))\n self.assertTrue(np.all(i.toarray() == pauli_mat(labels[idx])))\n\n\n@ddt\nclass TestPauliTableOperator(QiskitTestCase):\n \"\"\"Tests for PauliTable base operator methods.\"\"\"\n\n @combine(j=range(1, 10))\n def test_tensor(self, j):\n \"\"\"Test tensor method j={j}.\"\"\"\n labels1 = [\"XX\", \"YY\"]\n labels2 = [j * \"I\", j * \"Z\"]\n pauli1 = PauliTable.from_labels(labels1)\n pauli2 = PauliTable.from_labels(labels2)\n\n value = pauli1.tensor(pauli2)\n target = PauliTable.from_labels([i + j for i in labels1 for j in labels2])\n self.assertEqual(value, target)\n\n @combine(j=range(1, 10))\n def test_expand(self, j):\n \"\"\"Test expand method j={j}.\"\"\"\n labels1 = [\"XX\", \"YY\"]\n labels2 = [j * \"I\", j * \"Z\"]\n pauli1 = PauliTable.from_labels(labels1)\n pauli2 = PauliTable.from_labels(labels2)\n\n value = pauli1.expand(pauli2)\n target = PauliTable.from_labels([j + i for j in labels2 for i in labels1])\n self.assertEqual(value, target)\n\n def test_compose_1q(self):\n \"\"\"Test 1-qubit compose methods.\"\"\"\n # Test single qubit Pauli dot products\n pauli = PauliTable.from_labels([\"I\", \"X\", \"Y\", \"Z\"])\n\n with self.subTest(msg=\"compose single I\"):\n target = PauliTable.from_labels([\"I\", \"X\", \"Y\", \"Z\"])\n value = pauli.compose(\"I\")\n self.assertEqual(target, value)\n\n with self.subTest(msg=\"compose single X\"):\n target = PauliTable.from_labels([\"X\", \"I\", \"Z\", \"Y\"])\n value = pauli.compose(\"X\")\n self.assertEqual(target, value)\n\n with self.subTest(msg=\"compose single Y\"):\n target = PauliTable.from_labels([\"Y\", \"Z\", \"I\", \"X\"])\n value = pauli.compose(\"Y\")\n self.assertEqual(target, value)\n\n with self.subTest(msg=\"compose single Z\"):\n target = PauliTable.from_labels([\"Z\", \"Y\", \"X\", \"I\"])\n value = pauli.compose(\"Z\")\n self.assertEqual(target, value)\n\n def test_dot_1q(self):\n \"\"\"Test 1-qubit dot method.\"\"\"\n # Test single qubit Pauli dot products\n pauli = PauliTable.from_labels([\"I\", \"X\", \"Y\", \"Z\"])\n\n with self.subTest(msg=\"dot single I\"):\n target = PauliTable.from_labels([\"I\", \"X\", \"Y\", \"Z\"])\n value = pauli.dot(\"I\")\n self.assertEqual(target, value)\n\n with self.subTest(msg=\"dot single X\"):\n target = PauliTable.from_labels([\"X\", \"I\", \"Z\", \"Y\"])\n value = pauli.dot(\"X\")\n self.assertEqual(target, value)\n\n with self.subTest(msg=\"dot single Y\"):\n target = PauliTable.from_labels([\"Y\", \"Z\", \"I\", \"X\"])\n value = pauli.dot(\"Y\")\n self.assertEqual(target, value)\n\n with self.subTest(msg=\"dot single Z\"):\n target = PauliTable.from_labels([\"Z\", \"Y\", \"X\", \"I\"])\n value = pauli.dot(\"Z\")\n self.assertEqual(target, value)\n\n def test_qargs_compose_1q(self):\n \"\"\"Test 1-qubit compose method with qargs.\"\"\"\n\n pauli1 = PauliTable.from_labels([\"III\", \"XXX\"])\n pauli2 = PauliTable(\"Z\")\n\n with self.subTest(msg=\"compose 1-qubit qargs=[0]\"):\n target = PauliTable.from_labels([\"IIZ\", \"XXY\"])\n value = pauli1.compose(pauli2, qargs=[0])\n self.assertEqual(value, target)\n\n with self.subTest(msg=\"compose 1-qubit qargs=[1]\"):\n target = PauliTable.from_labels([\"IZI\", \"XYX\"])\n value = pauli1.compose(pauli2, qargs=[1])\n self.assertEqual(value, target)\n\n with self.subTest(msg=\"compose 1-qubit qargs=[2]\"):\n target = PauliTable.from_labels([\"ZII\", \"YXX\"])\n value = pauli1.compose(pauli2, qargs=[2])\n self.assertEqual(value, target)\n\n def test_qargs_dot_1q(self):\n \"\"\"Test 1-qubit dot method with qargs.\"\"\"\n\n pauli1 = PauliTable.from_labels([\"III\", \"XXX\"])\n pauli2 = PauliTable(\"Z\")\n\n with self.subTest(msg=\"dot 1-qubit qargs=[0]\"):\n target = PauliTable.from_labels([\"IIZ\", \"XXY\"])\n value = pauli1.dot(pauli2, qargs=[0])\n self.assertEqual(value, target)\n\n with self.subTest(msg=\"dot 1-qubit qargs=[1]\"):\n target = PauliTable.from_labels([\"IZI\", \"XYX\"])\n value = pauli1.dot(pauli2, qargs=[1])\n self.assertEqual(value, target)\n\n with self.subTest(msg=\"dot 1-qubit qargs=[2]\"):\n target = PauliTable.from_labels([\"ZII\", \"YXX\"])\n value = pauli1.dot(pauli2, qargs=[2])\n self.assertEqual(value, target)\n\n def test_qargs_compose_2q(self):\n \"\"\"Test 2-qubit compose method with qargs.\"\"\"\n\n pauli1 = PauliTable.from_labels([\"III\", \"XXX\"])\n pauli2 = PauliTable(\"ZY\")\n\n with self.subTest(msg=\"compose 2-qubit qargs=[0, 1]\"):\n target = PauliTable.from_labels([\"IZY\", \"XYZ\"])\n value = pauli1.compose(pauli2, qargs=[0, 1])\n self.assertEqual(value, target)\n\n with self.subTest(msg=\"compose 2-qubit qargs=[1, 0]\"):\n target = PauliTable.from_labels([\"IYZ\", \"XZY\"])\n value = pauli1.compose(pauli2, qargs=[1, 0])\n self.assertEqual(value, target)\n\n with self.subTest(msg=\"compose 2-qubit qargs=[0, 2]\"):\n target = PauliTable.from_labels([\"ZIY\", \"YXZ\"])\n value = pauli1.compose(pauli2, qargs=[0, 2])\n self.assertEqual(value, target)\n\n with self.subTest(msg=\"compose 2-qubit qargs=[2, 0]\"):\n target = PauliTable.from_labels([\"YIZ\", \"ZXY\"])\n value = pauli1.compose(pauli2, qargs=[2, 0])\n self.assertEqual(value, target)\n\n def test_qargs_dot_2q(self):\n \"\"\"Test 2-qubit dot method with qargs.\"\"\"\n\n pauli1 = PauliTable.from_labels([\"III\", \"XXX\"])\n pauli2 = PauliTable(\"ZY\")\n\n with self.subTest(msg=\"dot 2-qubit qargs=[0, 1]\"):\n target = PauliTable.from_labels([\"IZY\", \"XYZ\"])\n value = pauli1.dot(pauli2, qargs=[0, 1])\n self.assertEqual(value, target)\n\n with self.subTest(msg=\"dot 2-qubit qargs=[1, 0]\"):\n target = PauliTable.from_labels([\"IYZ\", \"XZY\"])\n value = pauli1.dot(pauli2, qargs=[1, 0])\n self.assertEqual(value, target)\n\n with self.subTest(msg=\"dot 2-qubit qargs=[0, 2]\"):\n target = PauliTable.from_labels([\"ZIY\", \"YXZ\"])\n value = pauli1.dot(pauli2, qargs=[0, 2])\n self.assertEqual(value, target)\n\n with self.subTest(msg=\"dot 2-qubit qargs=[2, 0]\"):\n target = PauliTable.from_labels([\"YIZ\", \"ZXY\"])\n value = pauli1.dot(pauli2, qargs=[2, 0])\n self.assertEqual(value, target)\n\n def test_qargs_compose_3q(self):\n \"\"\"Test 3-qubit compose method with qargs.\"\"\"\n\n pauli1 = PauliTable.from_labels([\"III\", \"XXX\"])\n pauli2 = PauliTable(\"XYZ\")\n\n with self.subTest(msg=\"compose 3-qubit qargs=None\"):\n target = PauliTable.from_labels([\"XYZ\", \"IZY\"])\n value = pauli1.compose(pauli2)\n self.assertEqual(value, target)\n\n with self.subTest(msg=\"compose 3-qubit qargs=[0, 1, 2]\"):\n target = PauliTable.from_labels([\"XYZ\", \"IZY\"])\n value = pauli1.compose(pauli2, qargs=[0, 1, 2])\n self.assertEqual(value, target)\n\n with self.subTest(msg=\"compose 3-qubit qargs=[2, 1, 0]\"):\n target = PauliTable.from_labels([\"ZYX\", \"YZI\"])\n value = pauli1.compose(pauli2, qargs=[2, 1, 0])\n self.assertEqual(value, target)\n\n with self.subTest(msg=\"compose 3-qubit qargs=[1, 0, 2]\"):\n target = PauliTable.from_labels([\"XZY\", \"IYZ\"])\n value = pauli1.compose(pauli2, qargs=[1, 0, 2])\n self.assertEqual(value, target)\n\n def test_qargs_dot_3q(self):\n \"\"\"Test 3-qubit dot method with qargs.\"\"\"\n\n pauli1 = PauliTable.from_labels([\"III\", \"XXX\"])\n pauli2 = PauliTable(\"XYZ\")\n\n with self.subTest(msg=\"dot 3-qubit qargs=None\"):\n target = PauliTable.from_labels([\"XYZ\", \"IZY\"])\n value = pauli1.dot(pauli2, qargs=[0, 1, 2])\n self.assertEqual(value, target)\n\n with self.subTest(msg=\"dot 3-qubit qargs=[0, 1, 2]\"):\n target = PauliTable.from_labels([\"XYZ\", \"IZY\"])\n value = pauli1.dot(pauli2, qargs=[0, 1, 2])\n self.assertEqual(value, target)\n\n with self.subTest(msg=\"dot 3-qubit qargs=[2, 1, 0]\"):\n target = PauliTable.from_labels([\"ZYX\", \"YZI\"])\n value = pauli1.dot(pauli2, qargs=[2, 1, 0])\n self.assertEqual(value, target)\n\n with self.subTest(msg=\"dot 3-qubit qargs=[1, 0, 2]\"):\n target = PauliTable.from_labels([\"XZY\", \"IYZ\"])\n value = pauli1.dot(pauli2, qargs=[1, 0, 2])\n self.assertEqual(value, target)\n\n\nclass TestPauliTableMethods(QiskitTestCase):\n \"\"\"Tests for PauliTable utility methods class.\"\"\"\n\n def test_sort(self):\n \"\"\"Test sort method.\"\"\"\n with self.subTest(msg=\"1 qubit standard order\"):\n unsrt = [\"X\", \"Z\", \"I\", \"Y\", \"X\", \"Z\"]\n srt = [\"I\", \"X\", \"X\", \"Y\", \"Z\", \"Z\"]\n target = PauliTable.from_labels(srt)\n value = PauliTable.from_labels(unsrt).sort()\n self.assertEqual(target, value)\n\n with self.subTest(msg=\"1 qubit weight order\"):\n unsrt = [\"X\", \"Z\", \"I\", \"Y\", \"X\", \"Z\"]\n srt = [\"I\", \"X\", \"X\", \"Y\", \"Z\", \"Z\"]\n target = PauliTable.from_labels(srt)\n value = PauliTable.from_labels(unsrt).sort(weight=True)\n self.assertEqual(target, value)\n\n with self.subTest(msg=\"2 qubit standard order\"):\n srt = [\n \"II\",\n \"IX\",\n \"IY\",\n \"IY\",\n \"XI\",\n \"XX\",\n \"XY\",\n \"XZ\",\n \"YI\",\n \"YX\",\n \"YY\",\n \"YZ\",\n \"ZI\",\n \"ZI\",\n \"ZX\",\n \"ZY\",\n \"ZZ\",\n \"ZZ\",\n ]\n unsrt = srt.copy()\n np.random.shuffle(unsrt)\n target = PauliTable.from_labels(srt)\n value = PauliTable.from_labels(unsrt).sort()\n self.assertEqual(target, value)\n\n with self.subTest(msg=\"2 qubit weight order\"):\n srt = [\n \"II\",\n \"IX\",\n \"IX\",\n \"IY\",\n \"IZ\",\n \"XI\",\n \"YI\",\n \"YI\",\n \"ZI\",\n \"XX\",\n \"XX\",\n \"XY\",\n \"XZ\",\n \"YX\",\n \"YY\",\n \"YY\",\n \"YZ\",\n \"ZX\",\n \"ZX\",\n \"ZY\",\n \"ZZ\",\n ]\n unsrt = srt.copy()\n np.random.shuffle(unsrt)\n target = PauliTable.from_labels(srt)\n value = PauliTable.from_labels(unsrt).sort(weight=True)\n self.assertEqual(target, value)\n\n with self.subTest(msg=\"3 qubit standard order\"):\n srt = [\n \"III\",\n \"III\",\n \"IIX\",\n \"IIY\",\n \"IIZ\",\n \"IXI\",\n \"IXX\",\n \"IXY\",\n \"IXZ\",\n \"IYI\",\n \"IYX\",\n \"IYY\",\n \"IYZ\",\n \"IZI\",\n \"IZX\",\n \"IZY\",\n \"IZY\",\n \"IZZ\",\n \"XII\",\n \"XII\",\n \"XIX\",\n \"XIY\",\n \"XIZ\",\n \"XXI\",\n \"XXX\",\n \"XXY\",\n \"XXZ\",\n \"XYI\",\n \"XYX\",\n \"XYY\",\n \"XYZ\",\n \"XYZ\",\n \"XZI\",\n \"XZX\",\n \"XZY\",\n \"XZZ\",\n \"YII\",\n \"YIX\",\n \"YIY\",\n \"YIZ\",\n \"YXI\",\n \"YXX\",\n \"YXY\",\n \"YXZ\",\n \"YXZ\",\n \"YYI\",\n \"YYX\",\n \"YYX\",\n \"YYY\",\n \"YYZ\",\n \"YZI\",\n \"YZX\",\n \"YZY\",\n \"YZZ\",\n \"ZII\",\n \"ZIX\",\n \"ZIY\",\n \"ZIZ\",\n \"ZXI\",\n \"ZXX\",\n \"ZXX\",\n \"ZXY\",\n \"ZXZ\",\n \"ZYI\",\n \"ZYI\",\n \"ZYX\",\n \"ZYY\",\n \"ZYZ\",\n \"ZZI\",\n \"ZZX\",\n \"ZZY\",\n \"ZZZ\",\n ]\n unsrt = srt.copy()\n np.random.shuffle(unsrt)\n target = PauliTable.from_labels(srt)\n value = PauliTable.from_labels(unsrt).sort()\n self.assertEqual(target, value)\n\n with self.subTest(msg=\"3 qubit weight order\"):\n srt = [\n \"III\",\n \"IIX\",\n \"IIY\",\n \"IIZ\",\n \"IXI\",\n \"IYI\",\n \"IZI\",\n \"XII\",\n \"YII\",\n \"ZII\",\n \"IXX\",\n \"IXY\",\n \"IXZ\",\n \"IYX\",\n \"IYY\",\n \"IYZ\",\n \"IZX\",\n \"IZY\",\n \"IZZ\",\n \"XIX\",\n \"XIY\",\n \"XIZ\",\n \"XXI\",\n \"XYI\",\n \"XZI\",\n \"XZI\",\n \"YIX\",\n \"YIY\",\n \"YIZ\",\n \"YXI\",\n \"YYI\",\n \"YZI\",\n \"YZI\",\n \"ZIX\",\n \"ZIY\",\n \"ZIZ\",\n \"ZXI\",\n \"ZYI\",\n \"ZZI\",\n \"ZZI\",\n \"XXX\",\n \"XXY\",\n \"XXZ\",\n \"XYX\",\n \"XYY\",\n \"XYZ\",\n \"XZX\",\n \"XZY\",\n \"XZZ\",\n \"YXX\",\n \"YXY\",\n \"YXZ\",\n \"YYX\",\n \"YYY\",\n \"YYZ\",\n \"YZX\",\n \"YZY\",\n \"YZZ\",\n \"ZXX\",\n \"ZXY\",\n \"ZXZ\",\n \"ZYX\",\n \"ZYY\",\n \"ZYZ\",\n \"ZZX\",\n \"ZZY\",\n \"ZZZ\",\n ]\n unsrt = srt.copy()\n np.random.shuffle(unsrt)\n target = PauliTable.from_labels(srt)\n value = PauliTable.from_labels(unsrt).sort(weight=True)\n self.assertEqual(target, value)\n\n def test_unique(self):\n \"\"\"Test unique method.\"\"\"\n with self.subTest(msg=\"1 qubit\"):\n labels = [\"X\", \"Z\", \"X\", \"X\", \"I\", \"Y\", \"I\", \"X\", \"Z\", \"Z\", \"X\", \"I\"]\n unique = [\"X\", \"Z\", \"I\", \"Y\"]\n target = PauliTable.from_labels(unique)\n value = PauliTable.from_labels(labels).unique()\n self.assertEqual(target, value)\n\n with self.subTest(msg=\"2 qubit\"):\n labels = [\"XX\", \"IX\", \"XX\", \"II\", \"IZ\", \"ZI\", \"YX\", \"YX\", \"ZZ\", \"IX\", \"XI\"]\n unique = [\"XX\", \"IX\", \"II\", \"IZ\", \"ZI\", \"YX\", \"ZZ\", \"XI\"]\n target = PauliTable.from_labels(unique)\n value = PauliTable.from_labels(labels).unique()\n self.assertEqual(target, value)\n\n with self.subTest(msg=\"10 qubit\"):\n labels = [10 * \"X\", 10 * \"I\", 10 * \"X\"]\n unique = [10 * \"X\", 10 * \"I\"]\n target = PauliTable.from_labels(unique)\n value = PauliTable.from_labels(labels).unique()\n self.assertEqual(target, value)\n\n def test_delete(self):\n \"\"\"Test delete method.\"\"\"\n with self.subTest(msg=\"single row\"):\n for j in range(1, 6):\n pauli = PauliTable.from_labels([j * \"X\", j * \"Y\"])\n self.assertEqual(pauli.delete(0), PauliTable(j * \"Y\"))\n self.assertEqual(pauli.delete(1), PauliTable(j * \"X\"))\n\n with self.subTest(msg=\"multiple rows\"):\n for j in range(1, 6):\n pauli = PauliTable.from_labels([j * \"X\", j * \"Y\", j * \"Z\"])\n self.assertEqual(pauli.delete([0, 2]), PauliTable(j * \"Y\"))\n self.assertEqual(pauli.delete([1, 2]), PauliTable(j * \"X\"))\n self.assertEqual(pauli.delete([0, 1]), PauliTable(j * \"Z\"))\n\n with self.subTest(msg=\"single qubit\"):\n pauli = PauliTable.from_labels([\"IIX\", \"IYI\", \"ZII\"])\n value = pauli.delete(0, qubit=True)\n target = PauliTable.from_labels([\"II\", \"IY\", \"ZI\"])\n self.assertEqual(value, target)\n value = pauli.delete(1, qubit=True)\n target = PauliTable.from_labels([\"IX\", \"II\", \"ZI\"])\n self.assertEqual(value, target)\n value = pauli.delete(2, qubit=True)\n target = PauliTable.from_labels([\"IX\", \"YI\", \"II\"])\n self.assertEqual(value, target)\n\n with self.subTest(msg=\"multiple qubits\"):\n pauli = PauliTable.from_labels([\"IIX\", \"IYI\", \"ZII\"])\n value = pauli.delete([0, 1], qubit=True)\n target = PauliTable.from_labels([\"I\", \"I\", \"Z\"])\n self.assertEqual(value, target)\n value = pauli.delete([1, 2], qubit=True)\n target = PauliTable.from_labels([\"X\", \"I\", \"I\"])\n self.assertEqual(value, target)\n value = pauli.delete([0, 2], qubit=True)\n target = PauliTable.from_labels([\"I\", \"Y\", \"I\"])\n self.assertEqual(value, target)\n\n def test_insert(self):\n \"\"\"Test insert method.\"\"\"\n # Insert single row\n for j in range(1, 10):\n pauli = PauliTable(j * \"X\")\n target0 = PauliTable.from_labels([j * \"I\", j * \"X\"])\n target1 = PauliTable.from_labels([j * \"X\", j * \"I\"])\n\n with self.subTest(msg=f\"single row from str ({j})\"):\n value0 = pauli.insert(0, j * \"I\")\n self.assertEqual(value0, target0)\n value1 = pauli.insert(1, j * \"I\")\n self.assertEqual(value1, target1)\n\n with self.subTest(msg=f\"single row from PauliTable ({j})\"):\n value0 = pauli.insert(0, PauliTable(j * \"I\"))\n self.assertEqual(value0, target0)\n value1 = pauli.insert(1, PauliTable(j * \"I\"))\n self.assertEqual(value1, target1)\n\n with self.subTest(msg=f\"single row from array ({j})\"):\n value0 = pauli.insert(0, PauliTable(j * \"I\").array)\n self.assertEqual(value0, target0)\n value1 = pauli.insert(1, PauliTable(j * \"I\").array)\n self.assertEqual(value1, target1)\n\n # Insert multiple rows\n for j in range(1, 10):\n pauli = PauliTable(j * \"X\")\n insert = PauliTable.from_labels([j * \"I\", j * \"Y\", j * \"Z\"])\n target0 = insert + pauli\n target1 = pauli + insert\n\n with self.subTest(msg=f\"multiple-rows from PauliTable ({j})\"):\n value0 = pauli.insert(0, insert)\n self.assertEqual(value0, target0)\n value1 = pauli.insert(1, insert)\n self.assertEqual(value1, target1)\n\n with self.subTest(msg=f\"multiple-rows from array ({j})\"):\n value0 = pauli.insert(0, insert.array)\n self.assertEqual(value0, target0)\n value1 = pauli.insert(1, insert.array)\n self.assertEqual(value1, target1)\n\n # Insert single column\n pauli = PauliTable.from_labels([\"X\", \"Y\", \"Z\"])\n for i in [\"I\", \"X\", \"Y\", \"Z\"]:\n target0 = PauliTable.from_labels([\"X\" + i, \"Y\" + i, \"Z\" + i])\n target1 = PauliTable.from_labels([i + \"X\", i + \"Y\", i + \"Z\"])\n\n with self.subTest(msg=\"single-column single-val from str\"):\n value = pauli.insert(0, i, qubit=True)\n self.assertEqual(value, target0)\n value = pauli.insert(1, i, qubit=True)\n self.assertEqual(value, target1)\n\n with self.subTest(msg=\"single-column single-val from PauliTable\"):\n value = pauli.insert(0, PauliTable(i), qubit=True)\n self.assertEqual(value, target0)\n value = pauli.insert(1, PauliTable(i), qubit=True)\n self.assertEqual(value, target1)\n\n with self.subTest(msg=\"single-column single-val from array\"):\n value = pauli.insert(0, PauliTable(i).array, qubit=True)\n self.assertEqual(value, target0)\n value = pauli.insert(1, PauliTable(i).array, qubit=True)\n self.assertEqual(value, target1)\n\n # Insert single column with multiple values\n pauli = PauliTable.from_labels([\"X\", \"Y\", \"Z\"])\n for i in [(\"I\", \"X\", \"Y\"), (\"X\", \"Y\", \"Z\"), (\"Y\", \"Z\", \"I\")]:\n target0 = PauliTable.from_labels([\"X\" + i[0], \"Y\" + i[1], \"Z\" + i[2]])\n target1 = PauliTable.from_labels([i[0] + \"X\", i[1] + \"Y\", i[2] + \"Z\"])\n\n with self.subTest(msg=\"single-column multiple-vals from PauliTable\"):\n value = pauli.insert(0, PauliTable.from_labels(i), qubit=True)\n self.assertEqual(value, target0)\n value = pauli.insert(1, PauliTable.from_labels(i), qubit=True)\n self.assertEqual(value, target1)\n\n with self.subTest(msg=\"single-column multiple-vals from array\"):\n value = pauli.insert(0, PauliTable.from_labels(i).array, qubit=True)\n self.assertEqual(value, target0)\n value = pauli.insert(1, PauliTable.from_labels(i).array, qubit=True)\n self.assertEqual(value, target1)\n\n # Insert multiple columns from single\n pauli = PauliTable.from_labels([\"X\", \"Y\", \"Z\"])\n for j in range(1, 5):\n for i in [j * \"I\", j * \"X\", j * \"Y\", j * \"Z\"]:\n target0 = PauliTable.from_labels([\"X\" + i, \"Y\" + i, \"Z\" + i])\n target1 = PauliTable.from_labels([i + \"X\", i + \"Y\", i + \"Z\"])\n\n with self.subTest(msg=\"multiple-columns single-val from str\"):\n value = pauli.insert(0, i, qubit=True)\n self.assertEqual(value, target0)\n value = pauli.insert(1, i, qubit=True)\n self.assertEqual(value, target1)\n\n with self.subTest(msg=\"multiple-columns single-val from PauliTable\"):\n value = pauli.insert(0, PauliTable(i), qubit=True)\n self.assertEqual(value, target0)\n value = pauli.insert(1, PauliTable(i), qubit=True)\n self.assertEqual(value, target1)\n\n with self.subTest(msg=\"multiple-columns single-val from array\"):\n value = pauli.insert(0, PauliTable(i).array, qubit=True)\n self.assertEqual(value, target0)\n value = pauli.insert(1, PauliTable(i).array, qubit=True)\n self.assertEqual(value, target1)\n\n # Insert multiple columns multiple row values\n pauli = PauliTable.from_labels([\"X\", \"Y\", \"Z\"])\n for j in range(1, 5):\n for i in [\n (j * \"I\", j * \"X\", j * \"Y\"),\n (j * \"X\", j * \"Z\", j * \"Y\"),\n (j * \"Y\", j * \"Z\", j * \"I\"),\n ]:\n target0 = PauliTable.from_labels([\"X\" + i[0], \"Y\" + i[1], \"Z\" + i[2]])\n target1 = PauliTable.from_labels([i[0] + \"X\", i[1] + \"Y\", i[2] + \"Z\"])\n\n with self.subTest(msg=\"multiple-column multiple-vals from PauliTable\"):\n value = pauli.insert(0, PauliTable.from_labels(i), qubit=True)\n self.assertEqual(value, target0)\n value = pauli.insert(1, PauliTable.from_labels(i), qubit=True)\n self.assertEqual(value, target1)\n\n with self.subTest(msg=\"multiple-column multiple-vals from array\"):\n value = pauli.insert(0, PauliTable.from_labels(i).array, qubit=True)\n self.assertEqual(value, target0)\n value = pauli.insert(1, PauliTable.from_labels(i).array, qubit=True)\n self.assertEqual(value, target1)\n\n def test_commutes(self):\n \"\"\"Test commutes method.\"\"\"\n # Single qubit Pauli\n pauli = PauliTable.from_labels([\"I\", \"X\", \"Y\", \"Z\"])\n with self.subTest(msg=\"commutes single-Pauli I\"):\n value = list(pauli.commutes(\"I\"))\n target = [True, True, True, True]\n self.assertEqual(value, target)\n\n with self.subTest(msg=\"commutes single-Pauli X\"):\n value = list(pauli.commutes(\"X\"))\n target = [True, True, False, False]\n self.assertEqual(value, target)\n\n with self.subTest(msg=\"commutes single-Pauli Y\"):\n value = list(pauli.commutes(\"Y\"))\n target = [True, False, True, False]\n self.assertEqual(value, target)\n\n with self.subTest(msg=\"commutes single-Pauli Z\"):\n value = list(pauli.commutes(\"Z\"))\n target = [True, False, False, True]\n self.assertEqual(value, target)\n\n # 2-qubit Pauli\n pauli = PauliTable.from_labels([\"II\", \"IX\", \"YI\", \"XY\", \"ZZ\"])\n with self.subTest(msg=\"commutes single-Pauli II\"):\n value = list(pauli.commutes(\"II\"))\n target = [True, True, True, True, True]\n self.assertEqual(value, target)\n\n with self.subTest(msg=\"commutes single-Pauli IX\"):\n value = list(pauli.commutes(\"IX\"))\n target = [True, True, True, False, False]\n self.assertEqual(value, target)\n\n with self.subTest(msg=\"commutes single-Pauli XI\"):\n value = list(pauli.commutes(\"XI\"))\n target = [True, True, False, True, False]\n self.assertEqual(value, target)\n\n with self.subTest(msg=\"commutes single-Pauli YI\"):\n value = list(pauli.commutes(\"YI\"))\n target = [True, True, True, False, False]\n self.assertEqual(value, target)\n\n with self.subTest(msg=\"commutes single-Pauli IY\"):\n value = list(pauli.commutes(\"IY\"))\n target = [True, False, True, True, False]\n self.assertEqual(value, target)\n\n with self.subTest(msg=\"commutes single-Pauli XY\"):\n value = list(pauli.commutes(\"XY\"))\n target = [True, False, False, True, True]\n self.assertEqual(value, target)\n\n with self.subTest(msg=\"commutes single-Pauli YX\"):\n value = list(pauli.commutes(\"YX\"))\n target = [True, True, True, True, True]\n self.assertEqual(value, target)\n\n with self.subTest(msg=\"commutes single-Pauli ZZ\"):\n value = list(pauli.commutes(\"ZZ\"))\n target = [True, False, False, True, True]\n self.assertEqual(value, target)\n\n def test_commutes_with_all(self):\n \"\"\"Test commutes_with_all method.\"\"\"\n # 1-qubit\n pauli = PauliTable.from_labels([\"I\", \"X\", \"Y\", \"Z\"])\n with self.subTest(msg=\"commutes_with_all [I]\"):\n value = list(pauli.commutes_with_all(\"I\"))\n target = [0, 1, 2, 3]\n self.assertEqual(value, target)\n\n with self.subTest(msg=\"commutes_with_all [X]\"):\n value = list(pauli.commutes_with_all(\"X\"))\n target = [0, 1]\n self.assertEqual(value, target)\n\n with self.subTest(msg=\"commutes_with_all [Y]\"):\n value = list(pauli.commutes_with_all(\"Y\"))\n target = [0, 2]\n self.assertEqual(value, target)\n\n with self.subTest(msg=\"commutes_with_all [Z]\"):\n value = list(pauli.commutes_with_all(\"Z\"))\n target = [0, 3]\n self.assertEqual(value, target)\n\n # 2-qubit Pauli\n pauli = PauliTable.from_labels([\"II\", \"IX\", \"YI\", \"XY\", \"ZZ\"])\n\n with self.subTest(msg=\"commutes_with_all [IX, YI]\"):\n other = PauliTable.from_labels([\"IX\", \"YI\"])\n value = list(pauli.commutes_with_all(other))\n target = [0, 1, 2]\n self.assertEqual(value, target)\n\n with self.subTest(msg=\"commutes_with_all [XY, ZZ]\"):\n other = PauliTable.from_labels([\"XY\", \"ZZ\"])\n value = list(pauli.commutes_with_all(other))\n target = [0, 3, 4]\n self.assertEqual(value, target)\n\n with self.subTest(msg=\"commutes_with_all [YX, ZZ]\"):\n other = PauliTable.from_labels([\"YX\", \"ZZ\"])\n value = list(pauli.commutes_with_all(other))\n target = [0, 3, 4]\n self.assertEqual(value, target)\n\n with self.subTest(msg=\"commutes_with_all [XY, YX]\"):\n other = PauliTable.from_labels([\"XY\", \"YX\"])\n value = list(pauli.commutes_with_all(other))\n target = [0, 3, 4]\n self.assertEqual(value, target)\n\n with self.subTest(msg=\"commutes_with_all [XY, IX]\"):\n other = PauliTable.from_labels([\"XY\", \"IX\"])\n value = list(pauli.commutes_with_all(other))\n target = [0]\n self.assertEqual(value, target)\n\n with self.subTest(msg=\"commutes_with_all [YX, IX]\"):\n other = PauliTable.from_labels([\"YX\", \"IX\"])\n value = list(pauli.commutes_with_all(other))\n target = [0, 1, 2]\n self.assertEqual(value, target)\n\n def test_anticommutes_with_all(self):\n \"\"\"Test anticommutes_with_all method.\"\"\"\n # 1-qubit\n pauli = PauliTable.from_labels([\"I\", \"X\", \"Y\", \"Z\"])\n with self.subTest(msg=\"anticommutes_with_all [I]\"):\n value = list(pauli.anticommutes_with_all(\"I\"))\n target = []\n self.assertEqual(value, target)\n\n with self.subTest(msg=\"antianticommutes_with_all [X]\"):\n value = list(pauli.anticommutes_with_all(\"X\"))\n target = [2, 3]\n self.assertEqual(value, target)\n\n with self.subTest(msg=\"anticommutes_with_all [Y]\"):\n value = list(pauli.anticommutes_with_all(\"Y\"))\n target = [1, 3]\n self.assertEqual(value, target)\n\n with self.subTest(msg=\"anticommutes_with_all [Z]\"):\n value = list(pauli.anticommutes_with_all(\"Z\"))\n target = [1, 2]\n self.assertEqual(value, target)\n\n # 2-qubit Pauli\n pauli = PauliTable.from_labels([\"II\", \"IX\", \"YI\", \"XY\", \"ZZ\"])\n\n with self.subTest(msg=\"anticommutes_with_all [IX, YI]\"):\n other = PauliTable.from_labels([\"IX\", \"YI\"])\n value = list(pauli.anticommutes_with_all(other))\n target = [3, 4]\n self.assertEqual(value, target)\n\n with self.subTest(msg=\"anticommutes_with_all [XY, ZZ]\"):\n other = PauliTable.from_labels([\"XY\", \"ZZ\"])\n value = list(pauli.anticommutes_with_all(other))\n target = [1, 2]\n self.assertEqual(value, target)\n\n with self.subTest(msg=\"anticommutes_with_all [YX, ZZ]\"):\n other = PauliTable.from_labels([\"YX\", \"ZZ\"])\n value = list(pauli.anticommutes_with_all(other))\n target = []\n self.assertEqual(value, target)\n\n with self.subTest(msg=\"anticommutes_with_all [XY, YX]\"):\n other = PauliTable.from_labels([\"XY\", \"YX\"])\n value = list(pauli.anticommutes_with_all(other))\n target = []\n self.assertEqual(value, target)\n\n with self.subTest(msg=\"anticommutes_with_all [XY, IX]\"):\n other = PauliTable.from_labels([\"XY\", \"IX\"])\n value = list(pauli.anticommutes_with_all(other))\n target = []\n self.assertEqual(value, target)\n\n with self.subTest(msg=\"anticommutes_with_all [YX, IX]\"):\n other = PauliTable.from_labels([\"YX\", \"IX\"])\n value = list(pauli.anticommutes_with_all(other))\n target = []\n self.assertEqual(value, target)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n",
"# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2017, 2020.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"\"\"\nMethods to create random operators.\n\"\"\"\n\nimport numpy as np\nfrom numpy.random import default_rng\n\nfrom qiskit.exceptions import QiskitError\nfrom qiskit.quantum_info.operators import Operator, Stinespring\n\n# pylint: disable=unused-import\nfrom .dihedral.random import random_cnotdihedral\nfrom .symplectic.random import (\n random_clifford,\n random_pauli,\n random_pauli_list,\n random_pauli_table,\n random_stabilizer_table,\n)\n\nDEFAULT_RNG = default_rng()\n\n\ndef random_unitary(dims, seed=None):\n \"\"\"Return a random unitary Operator.\n\n The operator is sampled from the unitary Haar measure.\n\n Args:\n dims (int or tuple): the input dimensions of the Operator.\n seed (int or np.random.Generator): Optional. Set a fixed seed or\n generator for RNG.\n\n Returns:\n Operator: a unitary operator.\n \"\"\"\n if seed is None:\n random_state = DEFAULT_RNG\n elif isinstance(seed, np.random.Generator):\n random_state = seed\n else:\n random_state = default_rng(seed)\n\n dim = np.product(dims)\n from scipy import stats\n\n mat = stats.unitary_group.rvs(dim, random_state=random_state)\n return Operator(mat, input_dims=dims, output_dims=dims)\n\n\ndef random_hermitian(dims, traceless=False, seed=None):\n \"\"\"Return a random hermitian Operator.\n\n The operator is sampled from Gaussian Unitary Ensemble.\n\n Args:\n dims (int or tuple): the input dimension of the Operator.\n traceless (bool): Optional. If True subtract diagonal entries to\n return a traceless hermitian operator\n (Default: False).\n seed (int or np.random.Generator): Optional. Set a fixed seed or\n generator for RNG.\n\n Returns:\n Operator: a Hermitian operator.\n \"\"\"\n if seed is None:\n rng = DEFAULT_RNG\n elif isinstance(seed, np.random.Generator):\n rng = seed\n else:\n rng = default_rng(seed)\n\n # Total dimension\n dim = np.product(dims)\n from scipy import stats\n\n if traceless:\n mat = np.zeros((dim, dim), dtype=complex)\n else:\n # Generate diagonal part of matrix for Gaussian N(0, 1)\n mat = np.diag(stats.norm.rvs(scale=1, size=dim, random_state=rng).astype(complex))\n\n # Generate lower triangular values from Gaussian N(0, 0.5)\n num_tril = (dim * (dim - 1)) // 2\n real_tril = stats.norm.rvs(scale=0.5, size=num_tril, random_state=rng)\n imag_tril = stats.norm.rvs(scale=0.5, size=num_tril, random_state=rng)\n # Get lower triangular indices\n rows, cols = np.tril_indices(dim, -1)\n mat[(rows, cols)] = real_tril + 1j * imag_tril\n mat[(cols, rows)] = real_tril - 1j * imag_tril\n return Operator(mat, input_dims=dims, output_dims=dims)\n\n\ndef random_quantum_channel(input_dims=None, output_dims=None, rank=None, seed=None):\n \"\"\"Return a random CPTP quantum channel.\n\n This constructs the Stinespring operator for the quantum channel by\n sampling a random isometry from the unitary Haar measure.\n\n Args:\n input_dims (int or tuple): the input dimension of the channel.\n output_dims (int or tuple): the input dimension of the channel.\n rank (int): Optional. The rank of the quantum channel Choi-matrix.\n seed (int or np.random.Generator): Optional. Set a fixed seed or\n generator for RNG.\n\n Returns:\n Stinespring: a quantum channel operator.\n\n Raises:\n QiskitError: if rank or dimensions are invalid.\n \"\"\"\n # Determine total input and output dimensions\n if input_dims is None and output_dims is None:\n raise QiskitError(\n \"No dimensions specified: input_dims and output_dims cannot both be None.\"\n )\n if input_dims is None:\n input_dims = output_dims\n elif output_dims is None:\n output_dims = input_dims\n\n d_in = np.product(input_dims)\n d_out = np.product(output_dims)\n\n # If rank is not specified set to the maximum rank for the\n # Choi matrix (input_dim * output_dim)\n if rank is None or rank > d_in * d_out:\n rank = d_in * d_out\n if rank < 1:\n raise QiskitError(f\"Rank {rank} must be greater than 0.\")\n from scipy import stats\n\n # Generate a random unitary matrix\n unitary = stats.unitary_group.rvs(max(rank * d_out, d_in), random_state=seed)\n\n # Truncate columns to produce an isometry\n return Stinespring(unitary[:, :d_in], input_dims=input_dims, output_dims=output_dims)\n",
"# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2021\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\"\"\"\nReadout mitigation data handling utils\n\"\"\"\n\nimport logging\nfrom typing import Optional, List, Tuple, Dict\nimport numpy as np\n\nfrom qiskit.exceptions import QiskitError\nfrom ..utils import marginal_counts\nfrom ..counts import Counts\n\nlogger = logging.getLogger(__name__)\n\n\ndef z_diagonal(dim, dtype=float):\n r\"\"\"Return the diagonal for the operator :math:`Z^\\otimes n`\"\"\"\n parity = np.zeros(dim, dtype=dtype)\n for i in range(dim):\n parity[i] = bin(i)[2:].count(\"1\")\n return (-1) ** np.mod(parity, 2)\n\n\ndef expval_with_stddev(coeffs: np.ndarray, probs: np.ndarray, shots: int) -> Tuple[float, float]:\n \"\"\"Compute expectation value and standard deviation.\n Args:\n coeffs: array of diagonal operator coefficients.\n probs: array of measurement probabilities.\n shots: total number of shots to obtain probabilities.\n Returns:\n tuple: (expval, stddev) expectation value and standard deviation.\n \"\"\"\n # Compute expval\n expval = coeffs.dot(probs)\n\n # Compute variance\n sq_expval = (coeffs**2).dot(probs)\n variance = (sq_expval - expval**2) / shots\n\n # Compute standard deviation\n if variance < 0 and not np.isclose(variance, 0):\n logger.warning(\n \"Encountered a negative variance in expectation value calculation.\"\n \"(%f). Setting standard deviation of result to 0.\",\n variance,\n )\n calc_stddev = np.sqrt(variance) if variance > 0 else 0.0\n return [expval, calc_stddev]\n\n\ndef stddev(probs, shots):\n \"\"\"Calculate stddev dict\"\"\"\n ret = {}\n for key, prob in probs.items():\n std_err = np.sqrt(prob * (1 - prob) / shots)\n ret[key] = std_err\n return ret\n\n\ndef str2diag(string):\n \"\"\"Transform diagonal from a string to a numpy array\"\"\"\n chars = {\n \"I\": np.array([1, 1], dtype=float),\n \"Z\": np.array([1, -1], dtype=float),\n \"0\": np.array([1, 0], dtype=float),\n \"1\": np.array([0, 1], dtype=float),\n }\n ret = np.array([1], dtype=float)\n for i in reversed(string):\n if i not in chars:\n raise QiskitError(f\"Invalid diagonal string character {i}\")\n ret = np.kron(chars[i], ret)\n return ret\n\n\ndef counts_to_vector(counts: Counts, num_qubits: int) -> Tuple[np.ndarray, int]:\n \"\"\"Transforms Counts to a probability vector\"\"\"\n vec = np.zeros(2**num_qubits, dtype=float)\n shots = 0\n for key, val in counts.items():\n shots += val\n vec[int(key, 2)] = val\n vec /= shots\n return vec, shots\n\n\ndef remap_qubits(\n vec: np.ndarray, num_qubits: int, qubits: Optional[List[int]] = None\n) -> np.ndarray:\n \"\"\"Remapping the qubits\"\"\"\n if qubits is not None:\n if len(qubits) != num_qubits:\n raise QiskitError(\"Num qubits does not match vector length.\")\n axes = [num_qubits - 1 - i for i in reversed(np.argsort(qubits))]\n vec = np.reshape(vec, num_qubits * [2]).transpose(axes).reshape(vec.shape)\n return vec\n\n\ndef marganalize_counts(\n counts: Counts,\n qubit_index: Dict[int, int],\n qubits: Optional[List[int]] = None,\n clbits: Optional[List[int]] = None,\n) -> np.ndarray:\n \"\"\"Marginalization of the Counts. Verify that number of clbits equals to the number of qubits.\"\"\"\n if clbits is not None:\n qubits_len = len(qubits) if not qubits is None else 0\n clbits_len = len(clbits) if not clbits is None else 0\n if clbits_len not in (0, qubits_len):\n raise QiskitError(\n \"Num qubits ({}) does not match number of clbits ({}).\".format(\n qubits_len, clbits_len\n )\n )\n counts = marginal_counts(counts, clbits)\n if clbits is None and qubits is not None:\n clbits = [qubit_index[qubit] for qubit in qubits]\n counts = marginal_counts(counts, clbits)\n return counts\n\n\ndef counts_probability_vector(\n counts: Counts,\n qubit_index: Dict[int, int],\n qubits: Optional[List[int]] = None,\n clbits: Optional[List[int]] = None,\n) -> Tuple[np.ndarray, int]:\n \"\"\"Compute a probability vector for all count outcomes.\n\n Args:\n counts: counts object\n qubit_index: For each qubit, its index in the mitigator qubits list\n qubits: qubits the count bitstrings correspond to.\n clbits: Optional, marginalize counts to just these bits.\n\n Raises:\n QiskitError: if qubits and clbits kwargs are not valid.\n\n Returns:\n np.ndarray: a probability vector for all count outcomes.\n \"\"\"\n counts = marganalize_counts(counts, qubit_index, qubits, clbits)\n if qubits is not None:\n num_qubits = len(qubits)\n else:\n num_qubits = len(qubit_index.keys())\n vec, shots = counts_to_vector(counts, num_qubits)\n vec = remap_qubits(vec, num_qubits, qubits)\n return vec, shots\n",
"# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2017.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"\"\"X, CX, CCX and multi-controlled X gates.\"\"\"\n\nfrom typing import Optional, Union\nimport warnings\nfrom math import ceil\nimport numpy\nfrom qiskit.circuit.controlledgate import ControlledGate\nfrom qiskit.circuit.gate import Gate\nfrom qiskit.circuit.parameterexpression import ParameterValueType\nfrom qiskit.circuit.quantumregister import QuantumRegister\nfrom qiskit.circuit._utils import _compute_control_matrix, _ctrl_state_to_int\nfrom qiskit.qasm import pi\nfrom .h import HGate\nfrom .t import TGate, TdgGate\nfrom .u1 import U1Gate\nfrom .u2 import U2Gate\nfrom .sx import SXGate\n\n\nclass XGate(Gate):\n r\"\"\"The single-qubit Pauli-X gate (:math:`\\sigma_x`).\n\n **Matrix Representation:**\n\n .. math::\n\n X = \\begin{pmatrix}\n 0 & 1 \\\\\n 1 & 0\n \\end{pmatrix}\n\n **Circuit symbol:**\n\n .. parsed-literal::\n\n ┌───┐\n q_0: ┤ X ├\n └───┘\n\n Equivalent to a :math:`\\pi` radian rotation about the X axis.\n\n .. note::\n\n A global phase difference exists between the definitions of\n :math:`RX(\\pi)` and :math:`X`.\n\n .. math::\n\n RX(\\pi) = \\begin{pmatrix}\n 0 & -i \\\\\n -i & 0\n \\end{pmatrix}\n = -i X\n\n The gate is equivalent to a classical bit flip.\n\n .. math::\n\n |0\\rangle \\rightarrow |1\\rangle \\\\\n |1\\rangle \\rightarrow |0\\rangle\n \"\"\"\n\n def __init__(self, label: Optional[str] = None):\n \"\"\"Create new X gate.\"\"\"\n super().__init__(\"x\", 1, [], label=label)\n\n def _define(self):\n \"\"\"\n gate x a { u3(pi,0,pi) a; }\n \"\"\"\n # pylint: disable=cyclic-import\n from qiskit.circuit.quantumcircuit import QuantumCircuit\n from .u3 import U3Gate\n\n q = QuantumRegister(1, \"q\")\n qc = QuantumCircuit(q, name=self.name)\n rules = [(U3Gate(pi, 0, pi), [q[0]], [])]\n for instr, qargs, cargs in rules:\n qc._append(instr, qargs, cargs)\n\n self.definition = qc\n\n def control(\n self,\n num_ctrl_qubits: int = 1,\n label: Optional[str] = None,\n ctrl_state: Optional[Union[str, int]] = None,\n ):\n \"\"\"Return a (multi-)controlled-X gate.\n\n One control returns a CX gate. Two controls returns a CCX gate.\n\n Args:\n num_ctrl_qubits (int): number of control qubits.\n label (str or None): An optional label for the gate [Default: None]\n ctrl_state (int or str or None): control state expressed as integer,\n string (e.g. '110'), or None. If None, use all 1s.\n\n Returns:\n ControlledGate: controlled version of this gate.\n \"\"\"\n gate = MCXGate(num_ctrl_qubits=num_ctrl_qubits, label=label, ctrl_state=ctrl_state)\n gate.base_gate.label = self.label\n return gate\n\n def inverse(self):\n r\"\"\"Return inverted X gate (itself).\"\"\"\n return XGate() # self-inverse\n\n def __array__(self, dtype=None):\n \"\"\"Return a numpy.array for the X gate.\"\"\"\n return numpy.array([[0, 1], [1, 0]], dtype=dtype)\n\n\nclass CXGate(ControlledGate):\n r\"\"\"Controlled-X gate.\n\n **Circuit symbol:**\n\n .. parsed-literal::\n\n q_0: ──■──\n ┌─┴─┐\n q_1: ┤ X ├\n └───┘\n\n **Matrix representation:**\n\n .. math::\n\n CX\\ q_0, q_1 =\n I \\otimes |0\\rangle\\langle0| + X \\otimes |1\\rangle\\langle1| =\n \\begin{pmatrix}\n 1 & 0 & 0 & 0 \\\\\n 0 & 0 & 0 & 1 \\\\\n 0 & 0 & 1 & 0 \\\\\n 0 & 1 & 0 & 0\n \\end{pmatrix}\n\n .. note::\n\n In Qiskit's convention, higher qubit indices are more significant\n (little endian convention). In many textbooks, controlled gates are\n presented with the assumption of more significant qubits as control,\n which in our case would be q_1. Thus a textbook matrix for this\n gate will be:\n\n .. parsed-literal::\n ┌───┐\n q_0: ┤ X ├\n └─┬─┘\n q_1: ──■──\n\n .. math::\n\n CX\\ q_1, q_0 =\n |0 \\rangle\\langle 0| \\otimes I + |1 \\rangle\\langle 1| \\otimes X =\n \\begin{pmatrix}\n 1 & 0 & 0 & 0 \\\\\n 0 & 1 & 0 & 0 \\\\\n 0 & 0 & 0 & 1 \\\\\n 0 & 0 & 1 & 0\n \\end{pmatrix}\n\n\n In the computational basis, this gate flips the target qubit\n if the control qubit is in the :math:`|1\\rangle` state.\n In this sense it is similar to a classical XOR gate.\n\n .. math::\n `|a, b\\rangle \\rightarrow |a, a \\oplus b\\rangle`\n \"\"\"\n\n def __init__(self, label: Optional[str] = None, ctrl_state: Optional[Union[str, int]] = None):\n \"\"\"Create new CX gate.\"\"\"\n super().__init__(\n \"cx\", 2, [], num_ctrl_qubits=1, label=label, ctrl_state=ctrl_state, base_gate=XGate()\n )\n\n def _define_qasm3(self):\n from qiskit.qasm3.ast import (\n Constant,\n Identifier,\n Integer,\n QuantumBlock,\n QuantumGateModifier,\n QuantumGateModifierName,\n QuantumGateSignature,\n QuantumGateDefinition,\n QuantumGateCall,\n )\n\n control, target = Identifier(\"c\"), Identifier(\"t\")\n call = QuantumGateCall(\n Identifier(\"U\"),\n [control, target],\n parameters=[Constant.PI, Integer(0), Constant.PI],\n modifiers=[QuantumGateModifier(QuantumGateModifierName.CTRL)],\n )\n return QuantumGateDefinition(\n QuantumGateSignature(Identifier(\"cx\"), [control, target]),\n QuantumBlock([call]),\n )\n\n def control(\n self,\n num_ctrl_qubits: int = 1,\n label: Optional[str] = None,\n ctrl_state: Optional[Union[str, int]] = None,\n ):\n \"\"\"Return a controlled-X gate with more control lines.\n\n Args:\n num_ctrl_qubits (int): number of control qubits.\n label (str or None): An optional label for the gate [Default: None]\n ctrl_state (int or str or None): control state expressed as integer,\n string (e.g. '110'), or None. If None, use all 1s.\n\n Returns:\n ControlledGate: controlled version of this gate.\n \"\"\"\n ctrl_state = _ctrl_state_to_int(ctrl_state, num_ctrl_qubits)\n new_ctrl_state = (self.ctrl_state << num_ctrl_qubits) | ctrl_state\n gate = MCXGate(num_ctrl_qubits=num_ctrl_qubits + 1, label=label, ctrl_state=new_ctrl_state)\n gate.base_gate.label = self.label\n return gate\n\n def inverse(self):\n \"\"\"Return inverted CX gate (itself).\"\"\"\n return CXGate(ctrl_state=self.ctrl_state) # self-inverse\n\n def __array__(self, dtype=None):\n \"\"\"Return a numpy.array for the CX gate.\"\"\"\n if self.ctrl_state:\n return numpy.array(\n [[1, 0, 0, 0], [0, 0, 0, 1], [0, 0, 1, 0], [0, 1, 0, 0]], dtype=dtype\n )\n else:\n return numpy.array(\n [[0, 0, 1, 0], [0, 1, 0, 0], [1, 0, 0, 0], [0, 0, 0, 1]], dtype=dtype\n )\n\n\nclass CCXGate(ControlledGate):\n r\"\"\"CCX gate, also known as Toffoli gate.\n\n **Circuit symbol:**\n\n .. parsed-literal::\n\n q_0: ──■──\n │\n q_1: ──■──\n ┌─┴─┐\n q_2: ┤ X ├\n └───┘\n\n **Matrix representation:**\n\n .. math::\n\n CCX q_0, q_1, q_2 =\n I \\otimes I \\otimes |0 \\rangle \\langle 0| + CX \\otimes |1 \\rangle \\langle 1| =\n \\begin{pmatrix}\n 1 & 0 & 0 & 0 & 0 & 0 & 0 & 0\\\\\n 0 & 1 & 0 & 0 & 0 & 0 & 0 & 0\\\\\n 0 & 0 & 1 & 0 & 0 & 0 & 0 & 0\\\\\n 0 & 0 & 0 & 0 & 0 & 0 & 0 & 1\\\\\n 0 & 0 & 0 & 0 & 1 & 0 & 0 & 0\\\\\n 0 & 0 & 0 & 0 & 0 & 1 & 0 & 0\\\\\n 0 & 0 & 0 & 0 & 0 & 0 & 1 & 0\\\\\n 0 & 0 & 0 & 1 & 0 & 0 & 0 & 0\n \\end{pmatrix}\n\n .. note::\n\n In Qiskit's convention, higher qubit indices are more significant\n (little endian convention). In many textbooks, controlled gates are\n presented with the assumption of more significant qubits as control,\n which in our case would be q_2 and q_1. Thus a textbook matrix for this\n gate will be:\n\n .. parsed-literal::\n ┌───┐\n q_0: ┤ X ├\n └─┬─┘\n q_1: ──■──\n │\n q_2: ──■──\n\n .. math::\n\n CCX\\ q_2, q_1, q_0 =\n |0 \\rangle \\langle 0| \\otimes I \\otimes I + |1 \\rangle \\langle 1| \\otimes CX =\n \\begin{pmatrix}\n 1 & 0 & 0 & 0 & 0 & 0 & 0 & 0\\\\\n 0 & 1 & 0 & 0 & 0 & 0 & 0 & 0\\\\\n 0 & 0 & 1 & 0 & 0 & 0 & 0 & 0\\\\\n 0 & 0 & 0 & 1 & 0 & 0 & 0 & 0\\\\\n 0 & 0 & 0 & 0 & 1 & 0 & 0 & 0\\\\\n 0 & 0 & 0 & 0 & 0 & 1 & 0 & 0\\\\\n 0 & 0 & 0 & 0 & 0 & 0 & 0 & 1\\\\\n 0 & 0 & 0 & 0 & 0 & 0 & 1 & 0\n \\end{pmatrix}\n\n \"\"\"\n\n def __init__(self, label: Optional[str] = None, ctrl_state: Optional[Union[str, int]] = None):\n \"\"\"Create new CCX gate.\"\"\"\n super().__init__(\n \"ccx\", 3, [], num_ctrl_qubits=2, label=label, ctrl_state=ctrl_state, base_gate=XGate()\n )\n\n def _define(self):\n \"\"\"\n gate ccx a,b,c\n {\n h c; cx b,c; tdg c; cx a,c;\n t c; cx b,c; tdg c; cx a,c;\n t b; t c; h c; cx a,b;\n t a; tdg b; cx a,b;}\n \"\"\"\n # pylint: disable=cyclic-import\n from qiskit.circuit.quantumcircuit import QuantumCircuit\n\n # ┌───┐\n # q_0: ───────────────────■─────────────────────■────■───┤ T ├───■──\n # │ ┌───┐ │ ┌─┴─┐┌┴───┴┐┌─┴─┐\n # q_1: ───────■───────────┼─────────■───┤ T ├───┼──┤ X ├┤ Tdg ├┤ X ├\n # ┌───┐┌─┴─┐┌─────┐┌─┴─┐┌───┐┌─┴─┐┌┴───┴┐┌─┴─┐├───┤└┬───┬┘└───┘\n # q_2: ┤ H ├┤ X ├┤ Tdg ├┤ X ├┤ T ├┤ X ├┤ Tdg ├┤ X ├┤ T ├─┤ H ├──────\n # └───┘└───┘└─────┘└───┘└───┘└───┘└─────┘└───┘└───┘ └───┘\n q = QuantumRegister(3, \"q\")\n qc = QuantumCircuit(q, name=self.name)\n rules = [\n (HGate(), [q[2]], []),\n (CXGate(), [q[1], q[2]], []),\n (TdgGate(), [q[2]], []),\n (CXGate(), [q[0], q[2]], []),\n (TGate(), [q[2]], []),\n (CXGate(), [q[1], q[2]], []),\n (TdgGate(), [q[2]], []),\n (CXGate(), [q[0], q[2]], []),\n (TGate(), [q[1]], []),\n (TGate(), [q[2]], []),\n (HGate(), [q[2]], []),\n (CXGate(), [q[0], q[1]], []),\n (TGate(), [q[0]], []),\n (TdgGate(), [q[1]], []),\n (CXGate(), [q[0], q[1]], []),\n ]\n for instr, qargs, cargs in rules:\n qc._append(instr, qargs, cargs)\n\n self.definition = qc\n\n def control(\n self,\n num_ctrl_qubits: int = 1,\n label: Optional[str] = None,\n ctrl_state: Optional[Union[str, int]] = None,\n ):\n \"\"\"Controlled version of this gate.\n\n Args:\n num_ctrl_qubits (int): number of control qubits.\n label (str or None): An optional label for the gate [Default: None]\n ctrl_state (int or str or None): control state expressed as integer,\n string (e.g. '110'), or None. If None, use all 1s.\n\n Returns:\n ControlledGate: controlled version of this gate.\n \"\"\"\n ctrl_state = _ctrl_state_to_int(ctrl_state, num_ctrl_qubits)\n new_ctrl_state = (self.ctrl_state << num_ctrl_qubits) | ctrl_state\n gate = MCXGate(num_ctrl_qubits=num_ctrl_qubits + 2, label=label, ctrl_state=new_ctrl_state)\n gate.base_gate.label = self.label\n return gate\n\n def inverse(self):\n \"\"\"Return an inverted CCX gate (also a CCX).\"\"\"\n return CCXGate(ctrl_state=self.ctrl_state) # self-inverse\n\n def __array__(self, dtype=None):\n \"\"\"Return a numpy.array for the CCX gate.\"\"\"\n mat = _compute_control_matrix(\n self.base_gate.to_matrix(), self.num_ctrl_qubits, ctrl_state=self.ctrl_state\n )\n if dtype:\n return numpy.asarray(mat, dtype=dtype)\n return mat\n\n\nclass RCCXGate(Gate):\n \"\"\"The simplified Toffoli gate, also referred to as Margolus gate.\n\n The simplified Toffoli gate implements the Toffoli gate up to relative phases.\n This implementation requires three CX gates which is the minimal amount possible,\n as shown in https://arxiv.org/abs/quant-ph/0312225.\n Note, that the simplified Toffoli is not equivalent to the Toffoli. But can be used in places\n where the Toffoli gate is uncomputed again.\n\n This concrete implementation is from https://arxiv.org/abs/1508.03273, the dashed box\n of Fig. 3.\n \"\"\"\n\n def __init__(self, label: Optional[str] = None):\n \"\"\"Create a new simplified CCX gate.\"\"\"\n super().__init__(\"rccx\", 3, [], label=label)\n\n def _define(self):\n \"\"\"\n gate rccx a,b,c\n { u2(0,pi) c;\n u1(pi/4) c;\n cx b, c;\n u1(-pi/4) c;\n cx a, c;\n u1(pi/4) c;\n cx b, c;\n u1(-pi/4) c;\n u2(0,pi) c;\n }\n \"\"\"\n # pylint: disable=cyclic-import\n from qiskit.circuit.quantumcircuit import QuantumCircuit\n\n q = QuantumRegister(3, \"q\")\n qc = QuantumCircuit(q, name=self.name)\n rules = [\n (U2Gate(0, pi), [q[2]], []), # H gate\n (U1Gate(pi / 4), [q[2]], []), # T gate\n (CXGate(), [q[1], q[2]], []),\n (U1Gate(-pi / 4), [q[2]], []), # inverse T gate\n (CXGate(), [q[0], q[2]], []),\n (U1Gate(pi / 4), [q[2]], []),\n (CXGate(), [q[1], q[2]], []),\n (U1Gate(-pi / 4), [q[2]], []), # inverse T gate\n (U2Gate(0, pi), [q[2]], []), # H gate\n ]\n for instr, qargs, cargs in rules:\n qc._append(instr, qargs, cargs)\n\n self.definition = qc\n\n def __array__(self, dtype=None):\n \"\"\"Return a numpy.array for the simplified CCX gate.\"\"\"\n return numpy.array(\n [\n [1, 0, 0, 0, 0, 0, 0, 0],\n [0, 1, 0, 0, 0, 0, 0, 0],\n [0, 0, 1, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, -1j],\n [0, 0, 0, 0, 1, 0, 0, 0],\n [0, 0, 0, 0, 0, -1, 0, 0],\n [0, 0, 0, 0, 0, 0, 1, 0],\n [0, 0, 0, 1j, 0, 0, 0, 0],\n ],\n dtype=dtype,\n )\n\n\nclass C3SXGate(ControlledGate):\n \"\"\"The 3-qubit controlled sqrt-X gate.\n\n This implementation is based on Page 17 of [1].\n\n References:\n [1] Barenco et al., 1995. https://arxiv.org/pdf/quant-ph/9503016.pdf\n \"\"\"\n\n def __init__(\n self,\n label: Optional[str] = None,\n ctrl_state: Optional[Union[str, int]] = None,\n *,\n angle: Optional[ParameterValueType] = None,\n ):\n \"\"\"Create a new 3-qubit controlled sqrt-X gate.\n\n Args:\n label (str or None): An optional label for the gate [Default: None]\n ctrl_state (int or str or None): control state expressed as integer,\n string (e.g. '110'), or None. If None, use all 1s.\n angle (float): DEPRECATED. The angle used in the controlled-U1 gates. An angle of π/8\n yields the sqrt(X) gates, an angle of π/4 the 3-qubit controlled X gate.\n \"\"\"\n super().__init__(\n \"c3sx\", 4, [], num_ctrl_qubits=3, label=label, ctrl_state=ctrl_state, base_gate=SXGate()\n )\n\n if angle is not None:\n warnings.warn(\n \"The angle argument is deprecated as of Qiskit Terra 0.17.0 and will \"\n \"be removed no earlier than 3 months after the release date.\",\n DeprecationWarning,\n stacklevel=2,\n )\n\n if angle is None:\n angle = numpy.pi / 8\n\n self._angle = angle\n\n def _define(self):\n \"\"\"\n gate c3sqrtx a,b,c,d\n {\n h d; cu1(pi/8) a,d; h d;\n cx a,b;\n h d; cu1(-pi/8) b,d; h d;\n cx a,b;\n h d; cu1(pi/8) b,d; h d;\n cx b,c;\n h d; cu1(-pi/8) c,d; h d;\n cx a,c;\n h d; cu1(pi/8) c,d; h d;\n cx b,c;\n h d; cu1(-pi/8) c,d; h d;\n cx a,c;\n h d; cu1(pi/8) c,d; h d;\n }\n \"\"\"\n # pylint: disable=cyclic-import\n from qiskit.circuit.quantumcircuit import QuantumCircuit\n from .u1 import CU1Gate\n\n q = QuantumRegister(4, name=\"q\")\n # pylint: disable=invalid-unary-operand-type\n rules = [\n (HGate(), [q[3]], []),\n (CU1Gate(self._angle), [q[0], q[3]], []),\n (HGate(), [q[3]], []),\n (CXGate(), [q[0], q[1]], []),\n (HGate(), [q[3]], []),\n (CU1Gate(-self._angle), [q[1], q[3]], []),\n (HGate(), [q[3]], []),\n (CXGate(), [q[0], q[1]], []),\n (HGate(), [q[3]], []),\n (CU1Gate(self._angle), [q[1], q[3]], []),\n (HGate(), [q[3]], []),\n (CXGate(), [q[1], q[2]], []),\n (HGate(), [q[3]], []),\n (CU1Gate(-self._angle), [q[2], q[3]], []),\n (HGate(), [q[3]], []),\n (CXGate(), [q[0], q[2]], []),\n (HGate(), [q[3]], []),\n (CU1Gate(self._angle), [q[2], q[3]], []),\n (HGate(), [q[3]], []),\n (CXGate(), [q[1], q[2]], []),\n (HGate(), [q[3]], []),\n (CU1Gate(-self._angle), [q[2], q[3]], []),\n (HGate(), [q[3]], []),\n (CXGate(), [q[0], q[2]], []),\n (HGate(), [q[3]], []),\n (CU1Gate(self._angle), [q[2], q[3]], []),\n (HGate(), [q[3]], []),\n ]\n qc = QuantumCircuit(q)\n for instr, qargs, cargs in rules:\n qc._append(instr, qargs, cargs)\n\n self.definition = qc\n\n def inverse(self):\n \"\"\"Invert this gate. The C3X is its own inverse.\"\"\"\n # pylint: disable=invalid-unary-operand-type\n if self._angle is not None:\n angle = -self._angle\n else:\n angle = None\n\n return C3SXGate(angle=angle, ctrl_state=self.ctrl_state)\n\n\nclass C3XGate(ControlledGate):\n r\"\"\"The X gate controlled on 3 qubits.\n\n This implementation uses :math:`\\sqrt{T}` and 14 CNOT gates.\n \"\"\"\n\n def __new__(\n cls,\n angle: Optional[ParameterValueType] = None,\n label: Optional[str] = None,\n ctrl_state: Optional[Union[str, int]] = None,\n ):\n if angle is not None:\n return C3SXGate(label, ctrl_state, angle=angle)\n\n instance = super().__new__(cls)\n instance.__init__(None, label, ctrl_state)\n return instance\n\n # pylint: disable=unused-argument\n def __init__(\n self,\n angle: Optional[ParameterValueType] = None,\n label: Optional[str] = None,\n ctrl_state: Optional[Union[str, int]] = None,\n ):\n \"\"\"Create a new 3-qubit controlled X gate.\"\"\"\n super().__init__(\n \"mcx\", 4, [], num_ctrl_qubits=3, label=label, ctrl_state=ctrl_state, base_gate=XGate()\n )\n\n # seems like open controls not hapening?\n def _define(self):\n \"\"\"\n gate c3x a,b,c,d\n {\n h d;\n p(pi/8) a;\n p(pi/8) b;\n p(pi/8) c;\n p(pi/8) d;\n cx a, b;\n p(-pi/8) b;\n cx a, b;\n cx b, c;\n p(-pi/8) c;\n cx a, c;\n p(pi/8) c;\n cx b, c;\n p(-pi/8) c;\n cx a, c;\n cx c, d;\n p(-pi/8) d;\n cx b, d;\n p(pi/8) d;\n cx c, d;\n p(-pi/8) d;\n cx a, d;\n p(pi/8) d;\n cx c, d;\n p(-pi/8) d;\n cx b, d;\n p(pi/8) d;\n cx c, d;\n p(-pi/8) d;\n cx a, d;\n h d;\n }\n \"\"\"\n from qiskit.circuit.quantumcircuit import QuantumCircuit\n\n q = QuantumRegister(4, name=\"q\")\n qc = QuantumCircuit(q, name=self.name)\n qc.h(3)\n qc.p(pi / 8, [0, 1, 2, 3])\n qc.cx(0, 1)\n qc.p(-pi / 8, 1)\n qc.cx(0, 1)\n qc.cx(1, 2)\n qc.p(-pi / 8, 2)\n qc.cx(0, 2)\n qc.p(pi / 8, 2)\n qc.cx(1, 2)\n qc.p(-pi / 8, 2)\n qc.cx(0, 2)\n qc.cx(2, 3)\n qc.p(-pi / 8, 3)\n qc.cx(1, 3)\n qc.p(pi / 8, 3)\n qc.cx(2, 3)\n qc.p(-pi / 8, 3)\n qc.cx(0, 3)\n qc.p(pi / 8, 3)\n qc.cx(2, 3)\n qc.p(-pi / 8, 3)\n qc.cx(1, 3)\n qc.p(pi / 8, 3)\n qc.cx(2, 3)\n qc.p(-pi / 8, 3)\n qc.cx(0, 3)\n qc.h(3)\n\n self.definition = qc\n\n def control(\n self,\n num_ctrl_qubits: int = 1,\n label: Optional[str] = None,\n ctrl_state: Optional[Union[str, int]] = None,\n ):\n \"\"\"Controlled version of this gate.\n\n Args:\n num_ctrl_qubits (int): number of control qubits.\n label (str or None): An optional label for the gate [Default: None]\n ctrl_state (int or str or None): control state expressed as integer,\n string (e.g. '110'), or None. If None, use all 1s.\n\n Returns:\n ControlledGate: controlled version of this gate.\n \"\"\"\n ctrl_state = _ctrl_state_to_int(ctrl_state, num_ctrl_qubits)\n new_ctrl_state = (self.ctrl_state << num_ctrl_qubits) | ctrl_state\n gate = MCXGate(num_ctrl_qubits=num_ctrl_qubits + 3, label=label, ctrl_state=new_ctrl_state)\n gate.base_gate.label = self.label\n return gate\n\n def inverse(self):\n \"\"\"Invert this gate. The C4X is its own inverse.\"\"\"\n return C3XGate(ctrl_state=self.ctrl_state)\n\n def __array__(self, dtype=None):\n \"\"\"Return a numpy.array for the C4X gate.\"\"\"\n mat = _compute_control_matrix(\n self.base_gate.to_matrix(), self.num_ctrl_qubits, ctrl_state=self.ctrl_state\n )\n if dtype:\n return numpy.asarray(mat, dtype=dtype)\n return mat\n\n\nclass RC3XGate(Gate):\n \"\"\"The simplified 3-controlled Toffoli gate.\n\n The simplified Toffoli gate implements the Toffoli gate up to relative phases.\n Note, that the simplified Toffoli is not equivalent to the Toffoli. But can be used in places\n where the Toffoli gate is uncomputed again.\n\n This concrete implementation is from https://arxiv.org/abs/1508.03273, the complete circuit\n of Fig. 4.\n \"\"\"\n\n def __init__(self, label: Optional[str] = None):\n \"\"\"Create a new RC3X gate.\"\"\"\n super().__init__(\"rcccx\", 4, [], label=label)\n\n def _define(self):\n \"\"\"\n gate rc3x a,b,c,d\n { u2(0,pi) d;\n u1(pi/4) d;\n cx c,d;\n u1(-pi/4) d;\n u2(0,pi) d;\n cx a,d;\n u1(pi/4) d;\n cx b,d;\n u1(-pi/4) d;\n cx a,d;\n u1(pi/4) d;\n cx b,d;\n u1(-pi/4) d;\n u2(0,pi) d;\n u1(pi/4) d;\n cx c,d;\n u1(-pi/4) d;\n u2(0,pi) d;\n }\n \"\"\"\n # pylint: disable=cyclic-import\n from qiskit.circuit.quantumcircuit import QuantumCircuit\n\n q = QuantumRegister(4, \"q\")\n qc = QuantumCircuit(q, name=self.name)\n rules = [\n (U2Gate(0, pi), [q[3]], []), # H gate\n (U1Gate(pi / 4), [q[3]], []), # T gate\n (CXGate(), [q[2], q[3]], []),\n (U1Gate(-pi / 4), [q[3]], []), # inverse T gate\n (U2Gate(0, pi), [q[3]], []),\n (CXGate(), [q[0], q[3]], []),\n (U1Gate(pi / 4), [q[3]], []),\n (CXGate(), [q[1], q[3]], []),\n (U1Gate(-pi / 4), [q[3]], []),\n (CXGate(), [q[0], q[3]], []),\n (U1Gate(pi / 4), [q[3]], []),\n (CXGate(), [q[1], q[3]], []),\n (U1Gate(-pi / 4), [q[3]], []),\n (U2Gate(0, pi), [q[3]], []),\n (U1Gate(pi / 4), [q[3]], []),\n (CXGate(), [q[2], q[3]], []),\n (U1Gate(-pi / 4), [q[3]], []),\n (U2Gate(0, pi), [q[3]], []),\n ]\n for instr, qargs, cargs in rules:\n qc._append(instr, qargs, cargs)\n\n self.definition = qc\n\n def __array__(self, dtype=None):\n \"\"\"Return a numpy.array for the RC3X gate.\"\"\"\n return numpy.array(\n [\n [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 1j, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],\n [0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1j, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],\n [0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0, 0, 0, 0, 0, 0],\n ],\n dtype=dtype,\n )\n\n\nclass C4XGate(ControlledGate):\n \"\"\"The 4-qubit controlled X gate.\n\n This implementation is based on Page 21, Lemma 7.5, of [1], with the use\n of the relative phase version of c3x, the rc3x [2].\n\n References:\n [1] Barenco et al., 1995. https://arxiv.org/pdf/quant-ph/9503016.pdf\n [2] Maslov, 2015. https://arxiv.org/abs/1508.03273\n \"\"\"\n\n def __init__(self, label: Optional[str] = None, ctrl_state: Optional[Union[str, int]] = None):\n \"\"\"Create a new 4-qubit controlled X gate.\"\"\"\n super().__init__(\n \"mcx\", 5, [], num_ctrl_qubits=4, label=label, ctrl_state=ctrl_state, base_gate=XGate()\n )\n\n # seems like open controls not hapening?\n def _define(self):\n \"\"\"\n gate c3sqrtx a,b,c,d\n {\n h d; cu1(pi/8) a,d; h d;\n cx a,b;\n h d; cu1(-pi/8) b,d; h d;\n cx a,b;\n h d; cu1(pi/8) b,d; h d;\n cx b,c;\n h d; cu1(-pi/8) c,d; h d;\n cx a,c;\n h d; cu1(pi/8) c,d; h d;\n cx b,c;\n h d; cu1(-pi/8) c,d; h d;\n cx a,c;\n h d; cu1(pi/8) c,d; h d;\n }\n gate c4x a,b,c,d,e\n {\n h e; cu1(pi/2) d,e; h e;\n rc3x a,b,c,d;\n h e; cu1(-pi/2) d,e; h e;\n rc3x a,b,c,d;\n c3sqrtx a,b,c,e;\n }\n \"\"\"\n # pylint: disable=cyclic-import\n from qiskit.circuit.quantumcircuit import QuantumCircuit\n from .u1 import CU1Gate\n\n q = QuantumRegister(5, name=\"q\")\n qc = QuantumCircuit(q, name=self.name)\n rules = [\n (HGate(), [q[4]], []),\n (CU1Gate(numpy.pi / 2), [q[3], q[4]], []),\n (HGate(), [q[4]], []),\n (RC3XGate(), [q[0], q[1], q[2], q[3]], []),\n (HGate(), [q[4]], []),\n (CU1Gate(-numpy.pi / 2), [q[3], q[4]], []),\n (HGate(), [q[4]], []),\n (RC3XGate().inverse(), [q[0], q[1], q[2], q[3]], []),\n (C3SXGate(), [q[0], q[1], q[2], q[4]], []),\n ]\n for instr, qargs, cargs in rules:\n qc._append(instr, qargs, cargs)\n\n self.definition = qc\n\n def control(\n self,\n num_ctrl_qubits: int = 1,\n label: Optional[str] = None,\n ctrl_state: Optional[Union[str, int]] = None,\n ):\n \"\"\"Controlled version of this gate.\n\n Args:\n num_ctrl_qubits (int): number of control qubits.\n label (str or None): An optional label for the gate [Default: None]\n ctrl_state (int or str or None): control state expressed as integer,\n string (e.g. '110'), or None. If None, use all 1s.\n\n Returns:\n ControlledGate: controlled version of this gate.\n \"\"\"\n ctrl_state = _ctrl_state_to_int(ctrl_state, num_ctrl_qubits)\n new_ctrl_state = (self.ctrl_state << num_ctrl_qubits) | ctrl_state\n gate = MCXGate(num_ctrl_qubits=num_ctrl_qubits + 4, label=label, ctrl_state=new_ctrl_state)\n gate.base_gate.label = self.label\n return gate\n\n def inverse(self):\n \"\"\"Invert this gate. The C4X is its own inverse.\"\"\"\n return C4XGate(ctrl_state=self.ctrl_state)\n\n def __array__(self, dtype=None):\n \"\"\"Return a numpy.array for the C4X gate.\"\"\"\n mat = _compute_control_matrix(\n self.base_gate.to_matrix(), self.num_ctrl_qubits, ctrl_state=self.ctrl_state\n )\n if dtype:\n return numpy.asarray(mat, dtype=dtype)\n return mat\n\n\nclass MCXGate(ControlledGate):\n \"\"\"The general, multi-controlled X gate.\"\"\"\n\n def __new__(\n cls,\n num_ctrl_qubits: Optional[int] = None,\n label: Optional[str] = None,\n ctrl_state: Optional[Union[str, int]] = None,\n ):\n \"\"\"Create a new MCX instance.\n\n Depending on the number of controls and which mode of the MCX, this creates an\n explicit CX, CCX, C3X or C4X instance or a generic MCX gate.\n \"\"\"\n # The CXGate and CCXGate will be implemented for all modes of the MCX, and\n # the C3XGate and C4XGate will be implemented in the MCXGrayCode class.\n explicit = {1: CXGate, 2: CCXGate}\n if num_ctrl_qubits in explicit:\n gate_class = explicit[num_ctrl_qubits]\n gate = gate_class.__new__(gate_class, label=label, ctrl_state=ctrl_state)\n # if __new__ does not return the same type as cls, init is not called\n gate.__init__(label=label, ctrl_state=ctrl_state)\n return gate\n return super().__new__(cls)\n\n def __init__(\n self,\n num_ctrl_qubits: int,\n label: Optional[str] = None,\n ctrl_state: Optional[Union[str, int]] = None,\n _name=\"mcx\",\n ):\n \"\"\"Create new MCX gate.\"\"\"\n num_ancilla_qubits = self.__class__.get_num_ancilla_qubits(num_ctrl_qubits)\n super().__init__(\n _name,\n num_ctrl_qubits + 1 + num_ancilla_qubits,\n [],\n num_ctrl_qubits=num_ctrl_qubits,\n label=label,\n ctrl_state=ctrl_state,\n base_gate=XGate(),\n )\n\n def inverse(self):\n \"\"\"Invert this gate. The MCX is its own inverse.\"\"\"\n return MCXGate(num_ctrl_qubits=self.num_ctrl_qubits, ctrl_state=self.ctrl_state)\n\n @staticmethod\n def get_num_ancilla_qubits(num_ctrl_qubits: int, mode: str = \"noancilla\") -> int:\n \"\"\"Get the number of required ancilla qubits without instantiating the class.\n\n This staticmethod might be necessary to check the number of ancillas before\n creating the gate, or to use the number of ancillas in the initialization.\n \"\"\"\n if mode == \"noancilla\":\n return 0\n if mode in [\"recursion\", \"advanced\"]:\n return int(num_ctrl_qubits > 4)\n if mode[:7] == \"v-chain\" or mode[:5] == \"basic\":\n return max(0, num_ctrl_qubits - 2)\n raise AttributeError(f\"Unsupported mode ({mode}) specified!\")\n\n def _define(self):\n \"\"\"The standard definition used the Gray code implementation.\"\"\"\n # pylint: disable=cyclic-import\n from qiskit.circuit.quantumcircuit import QuantumCircuit\n\n q = QuantumRegister(self.num_qubits, name=\"q\")\n qc = QuantumCircuit(q)\n qc._append(MCXGrayCode(self.num_ctrl_qubits), q[:], [])\n self.definition = qc\n\n @property\n def num_ancilla_qubits(self):\n \"\"\"The number of ancilla qubits.\"\"\"\n return self.__class__.get_num_ancilla_qubits(self.num_ctrl_qubits)\n\n def control(\n self,\n num_ctrl_qubits: int = 1,\n label: Optional[str] = None,\n ctrl_state: Optional[Union[str, int]] = None,\n ):\n \"\"\"Return a multi-controlled-X gate with more control lines.\n\n Args:\n num_ctrl_qubits (int): number of control qubits.\n label (str or None): An optional label for the gate [Default: None]\n ctrl_state (int or str or None): control state expressed as integer,\n string (e.g. '110'), or None. If None, use all 1s.\n\n Returns:\n ControlledGate: controlled version of this gate.\n \"\"\"\n if ctrl_state is None:\n # use __class__ so this works for derived classes\n gate = self.__class__(\n self.num_ctrl_qubits + num_ctrl_qubits, label=label, ctrl_state=ctrl_state\n )\n gate.base_gate.label = self.label\n return gate\n return super().control(num_ctrl_qubits, label=label, ctrl_state=ctrl_state)\n\n\nclass MCXGrayCode(MCXGate):\n r\"\"\"Implement the multi-controlled X gate using the Gray code.\n\n This delegates the implementation to the MCU1 gate, since :math:`X = H \\cdot U1(\\pi) \\cdot H`.\n \"\"\"\n\n def __new__(\n cls,\n num_ctrl_qubits: Optional[int] = None,\n label: Optional[str] = None,\n ctrl_state: Optional[Union[str, int]] = None,\n ):\n \"\"\"Create a new MCXGrayCode instance\"\"\"\n # if 1 to 4 control qubits, create explicit gates\n explicit = {1: CXGate, 2: CCXGate, 3: C3XGate, 4: C4XGate}\n if num_ctrl_qubits in explicit:\n gate_class = explicit[num_ctrl_qubits]\n gate = gate_class.__new__(gate_class, label=label, ctrl_state=ctrl_state)\n # if __new__ does not return the same type as cls, init is not called\n gate.__init__(label=label, ctrl_state=ctrl_state)\n return gate\n return super().__new__(cls)\n\n def __init__(\n self,\n num_ctrl_qubits: int,\n label: Optional[str] = None,\n ctrl_state: Optional[Union[str, int]] = None,\n ):\n super().__init__(num_ctrl_qubits, label=label, ctrl_state=ctrl_state, _name=\"mcx_gray\")\n\n def inverse(self):\n \"\"\"Invert this gate. The MCX is its own inverse.\"\"\"\n return MCXGrayCode(num_ctrl_qubits=self.num_ctrl_qubits, ctrl_state=self.ctrl_state)\n\n def _define(self):\n \"\"\"Define the MCX gate using the Gray code.\"\"\"\n # pylint: disable=cyclic-import\n from qiskit.circuit.quantumcircuit import QuantumCircuit\n from .u1 import MCU1Gate\n\n q = QuantumRegister(self.num_qubits, name=\"q\")\n qc = QuantumCircuit(q, name=self.name)\n qc._append(HGate(), [q[-1]], [])\n qc._append(MCU1Gate(numpy.pi, num_ctrl_qubits=self.num_ctrl_qubits), q[:], [])\n qc._append(HGate(), [q[-1]], [])\n self.definition = qc\n\n\nclass MCXRecursive(MCXGate):\n \"\"\"Implement the multi-controlled X gate using recursion.\n\n Using a single ancilla qubit, the multi-controlled X gate is recursively split onto\n four sub-registers. This is done until we reach the 3- or 4-controlled X gate since\n for these we have a concrete implementation that do not require ancillas.\n \"\"\"\n\n def __init__(\n self,\n num_ctrl_qubits: int,\n label: Optional[str] = None,\n ctrl_state: Optional[Union[str, int]] = None,\n ):\n super().__init__(num_ctrl_qubits, label=label, ctrl_state=ctrl_state, _name=\"mcx_recursive\")\n\n @staticmethod\n def get_num_ancilla_qubits(num_ctrl_qubits: int, mode: str = \"recursion\"):\n \"\"\"Get the number of required ancilla qubits.\"\"\"\n return MCXGate.get_num_ancilla_qubits(num_ctrl_qubits, mode)\n\n def inverse(self):\n \"\"\"Invert this gate. The MCX is its own inverse.\"\"\"\n return MCXRecursive(num_ctrl_qubits=self.num_ctrl_qubits, ctrl_state=self.ctrl_state)\n\n def _define(self):\n \"\"\"Define the MCX gate using recursion.\"\"\"\n # pylint: disable=cyclic-import\n from qiskit.circuit.quantumcircuit import QuantumCircuit\n\n q = QuantumRegister(self.num_qubits, name=\"q\")\n qc = QuantumCircuit(q, name=self.name)\n if self.num_qubits == 4:\n qc._append(C3XGate(), q[:], [])\n self.definition = qc\n elif self.num_qubits == 5:\n qc._append(C4XGate(), q[:], [])\n self.definition = qc\n else:\n for instr, qargs, cargs in self._recurse(q[:-1], q_ancilla=q[-1]):\n qc._append(instr, qargs, cargs)\n self.definition = qc\n\n def _recurse(self, q, q_ancilla=None):\n # recursion stop\n if len(q) == 4:\n return [(C3XGate(), q[:], [])]\n if len(q) == 5:\n return [(C4XGate(), q[:], [])]\n if len(q) < 4:\n raise AttributeError(\"Something went wrong in the recursion, have less than 4 qubits.\")\n\n # recurse\n num_ctrl_qubits = len(q) - 1\n middle = ceil(num_ctrl_qubits / 2)\n first_half = [*q[:middle], q_ancilla]\n second_half = [*q[middle:num_ctrl_qubits], q_ancilla, q[num_ctrl_qubits]]\n\n rule = []\n rule += self._recurse(first_half, q_ancilla=q[middle])\n rule += self._recurse(second_half, q_ancilla=q[middle - 1])\n rule += self._recurse(first_half, q_ancilla=q[middle])\n rule += self._recurse(second_half, q_ancilla=q[middle - 1])\n\n return rule\n\n\nclass MCXVChain(MCXGate):\n \"\"\"Implement the multi-controlled X gate using a V-chain of CX gates.\"\"\"\n\n def __new__(\n cls,\n num_ctrl_qubits: Optional[int] = None,\n dirty_ancillas: bool = False, # pylint: disable=unused-argument\n label: Optional[str] = None,\n ctrl_state: Optional[Union[str, int]] = None,\n ):\n \"\"\"Create a new MCX instance.\n\n This must be defined anew to include the additional argument ``dirty_ancillas``.\n \"\"\"\n return super().__new__(cls, num_ctrl_qubits, label=label, ctrl_state=ctrl_state)\n\n def __init__(\n self,\n num_ctrl_qubits: int,\n dirty_ancillas: bool = False,\n label: Optional[str] = None,\n ctrl_state: Optional[Union[str, int]] = None,\n ):\n super().__init__(num_ctrl_qubits, label=label, ctrl_state=ctrl_state, _name=\"mcx_vchain\")\n self._dirty_ancillas = dirty_ancillas\n\n def inverse(self):\n \"\"\"Invert this gate. The MCX is its own inverse.\"\"\"\n return MCXVChain(\n num_ctrl_qubits=self.num_ctrl_qubits,\n dirty_ancillas=self._dirty_ancillas,\n ctrl_state=self.ctrl_state,\n )\n\n @staticmethod\n def get_num_ancilla_qubits(num_ctrl_qubits: int, mode: str = \"v-chain\"):\n \"\"\"Get the number of required ancilla qubits.\"\"\"\n return MCXGate.get_num_ancilla_qubits(num_ctrl_qubits, mode)\n\n def _define(self):\n \"\"\"Define the MCX gate using a V-chain of CX gates.\"\"\"\n # pylint: disable=cyclic-import\n from qiskit.circuit.quantumcircuit import QuantumCircuit\n\n q = QuantumRegister(self.num_qubits, name=\"q\")\n qc = QuantumCircuit(q, name=self.name)\n q_controls = q[: self.num_ctrl_qubits]\n q_target = q[self.num_ctrl_qubits]\n q_ancillas = q[self.num_ctrl_qubits + 1 :]\n\n definition = []\n\n if self._dirty_ancillas:\n i = self.num_ctrl_qubits - 3\n ancilla_pre_rule = [\n (U2Gate(0, numpy.pi), [q_target], []),\n (CXGate(), [q_target, q_ancillas[i]], []),\n (U1Gate(-numpy.pi / 4), [q_ancillas[i]], []),\n (CXGate(), [q_controls[-1], q_ancillas[i]], []),\n (U1Gate(numpy.pi / 4), [q_ancillas[i]], []),\n (CXGate(), [q_target, q_ancillas[i]], []),\n (U1Gate(-numpy.pi / 4), [q_ancillas[i]], []),\n (CXGate(), [q_controls[-1], q_ancillas[i]], []),\n (U1Gate(numpy.pi / 4), [q_ancillas[i]], []),\n ]\n for inst in ancilla_pre_rule:\n definition.append(inst)\n\n for j in reversed(range(2, self.num_ctrl_qubits - 1)):\n definition.append(\n (RCCXGate(), [q_controls[j], q_ancillas[i - 1], q_ancillas[i]], [])\n )\n i -= 1\n\n definition.append((RCCXGate(), [q_controls[0], q_controls[1], q_ancillas[0]], []))\n i = 0\n for j in range(2, self.num_ctrl_qubits - 1):\n definition.append((RCCXGate(), [q_controls[j], q_ancillas[i], q_ancillas[i + 1]], []))\n i += 1\n\n if self._dirty_ancillas:\n ancilla_post_rule = [\n (U1Gate(-numpy.pi / 4), [q_ancillas[i]], []),\n (CXGate(), [q_controls[-1], q_ancillas[i]], []),\n (U1Gate(numpy.pi / 4), [q_ancillas[i]], []),\n (CXGate(), [q_target, q_ancillas[i]], []),\n (U1Gate(-numpy.pi / 4), [q_ancillas[i]], []),\n (CXGate(), [q_controls[-1], q_ancillas[i]], []),\n (U1Gate(numpy.pi / 4), [q_ancillas[i]], []),\n (CXGate(), [q_target, q_ancillas[i]], []),\n (U2Gate(0, numpy.pi), [q_target], []),\n ]\n for inst in ancilla_post_rule:\n definition.append(inst)\n else:\n definition.append((CCXGate(), [q_controls[-1], q_ancillas[i], q_target], []))\n\n for j in reversed(range(2, self.num_ctrl_qubits - 1)):\n definition.append((RCCXGate(), [q_controls[j], q_ancillas[i - 1], q_ancillas[i]], []))\n i -= 1\n definition.append((RCCXGate(), [q_controls[0], q_controls[1], q_ancillas[i]], []))\n\n if self._dirty_ancillas:\n for i, j in enumerate(list(range(2, self.num_ctrl_qubits - 1))):\n definition.append(\n (RCCXGate(), [q_controls[j], q_ancillas[i], q_ancillas[i + 1]], [])\n )\n\n for instr, qargs, cargs in definition:\n qc._append(instr, qargs, cargs)\n self.definition = qc\n",
"# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2019, 2020.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"\"\"Nakanishi-Fujii-Todo algorithm.\"\"\"\n\nfrom typing import Optional\n\nimport numpy as np\nfrom scipy.optimize import OptimizeResult\n\nfrom .scipy_optimizer import SciPyOptimizer\n\n\nclass NFT(SciPyOptimizer):\n \"\"\"\n Nakanishi-Fujii-Todo algorithm.\n\n See https://arxiv.org/abs/1903.12166\n \"\"\"\n\n _OPTIONS = [\"maxiter\", \"maxfev\", \"disp\", \"reset_interval\"]\n\n # pylint: disable=unused-argument\n def __init__(\n self,\n maxiter: Optional[int] = None,\n maxfev: int = 1024,\n disp: bool = False,\n reset_interval: int = 32,\n options: Optional[dict] = None,\n **kwargs,\n ) -> None:\n \"\"\"\n Built out using scipy framework, for details, please refer to\n https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.minimize.html.\n\n Args:\n maxiter: Maximum number of iterations to perform.\n maxfev: Maximum number of function evaluations to perform.\n disp: disp\n reset_interval: The minimum estimates directly once\n in ``reset_interval`` times.\n options: A dictionary of solver options.\n kwargs: additional kwargs for scipy.optimize.minimize.\n\n Notes:\n In this optimization method, the optimization function have to satisfy\n three conditions written in [1]_.\n\n References:\n .. [1] K. M. Nakanishi, K. Fujii, and S. Todo. 2019.\n Sequential minimal optimization for quantum-classical hybrid algorithms.\n arXiv preprint arXiv:1903.12166.\n \"\"\"\n if options is None:\n options = {}\n for k, v in list(locals().items()):\n if k in self._OPTIONS:\n options[k] = v\n super().__init__(method=nakanishi_fujii_todo, options=options, **kwargs)\n\n\n# pylint: disable=invalid-name\ndef nakanishi_fujii_todo(\n fun, x0, args=(), maxiter=None, maxfev=1024, reset_interval=32, eps=1e-32, callback=None, **_\n):\n \"\"\"\n Find the global minimum of a function using the nakanishi_fujii_todo\n algorithm [1].\n Args:\n fun (callable): ``f(x, *args)``\n Function to be optimized. ``args`` can be passed as an optional item\n in the dict ``minimizer_kwargs``.\n This function must satisfy the three condition written in Ref. [1].\n x0 (ndarray): shape (n,)\n Initial guess. Array of real elements of size (n,),\n where 'n' is the number of independent variables.\n args (tuple, optional):\n Extra arguments passed to the objective function.\n maxiter (int):\n Maximum number of iterations to perform.\n Default: None.\n maxfev (int):\n Maximum number of function evaluations to perform.\n Default: 1024.\n reset_interval (int):\n The minimum estimates directly once in ``reset_interval`` times.\n Default: 32.\n eps (float): eps\n **_ : additional options\n callback (callable, optional):\n Called after each iteration.\n Returns:\n OptimizeResult:\n The optimization result represented as a ``OptimizeResult`` object.\n Important attributes are: ``x`` the solution array. See\n `OptimizeResult` for a description of other attributes.\n Notes:\n In this optimization method, the optimization function have to satisfy\n three conditions written in [1].\n References:\n .. [1] K. M. Nakanishi, K. Fujii, and S. Todo. 2019.\n Sequential minimal optimization for quantum-classical hybrid algorithms.\n arXiv preprint arXiv:1903.12166.\n \"\"\"\n\n x0 = np.asarray(x0)\n recycle_z0 = None\n niter = 0\n funcalls = 0\n\n while True:\n\n idx = niter % x0.size\n\n if reset_interval > 0:\n if niter % reset_interval == 0:\n recycle_z0 = None\n\n if recycle_z0 is None:\n z0 = fun(np.copy(x0), *args)\n funcalls += 1\n else:\n z0 = recycle_z0\n\n p = np.copy(x0)\n p[idx] = x0[idx] + np.pi / 2\n z1 = fun(p, *args)\n funcalls += 1\n\n p = np.copy(x0)\n p[idx] = x0[idx] - np.pi / 2\n z3 = fun(p, *args)\n funcalls += 1\n\n z2 = z1 + z3 - z0\n c = (z1 + z3) / 2\n a = np.sqrt((z0 - z2) ** 2 + (z1 - z3) ** 2) / 2\n b = np.arctan((z1 - z3) / ((z0 - z2) + eps * (z0 == z2))) + x0[idx]\n b += 0.5 * np.pi + 0.5 * np.pi * np.sign((z0 - z2) + eps * (z0 == z2))\n\n x0[idx] = b\n recycle_z0 = c - a\n\n niter += 1\n\n if callback is not None:\n callback(np.copy(x0))\n\n if maxfev is not None:\n if funcalls >= maxfev:\n break\n\n if maxiter is not None:\n if niter >= maxiter:\n break\n\n return OptimizeResult(\n fun=fun(np.copy(x0), *args), x=x0, nit=niter, nfev=funcalls, success=(niter > 1)\n )\n",
"# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2017, 2018.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"\"\"A module for monitoring backends.\"\"\"\n\nimport types\nimport math\nimport datetime\nfrom IPython.display import display\nimport matplotlib.pyplot as plt\nfrom matplotlib.patches import Circle\nimport ipywidgets as widgets\nfrom qiskit.exceptions import QiskitError\nfrom qiskit.visualization.gate_map import plot_gate_map, plot_error_map\nfrom qiskit.test.mock import FakeBackend\n\ntry:\n from qiskit.providers.ibmq import IBMQBackend\nexcept ImportError:\n pass\n\nMONTH_NAMES = {\n 1: \"Jan.\",\n 2: \"Feb.\",\n 3: \"Mar.\",\n 4: \"Apr.\",\n 5: \"May\",\n 6: \"June\",\n 7: \"July\",\n 8: \"Aug.\",\n 9: \"Sept.\",\n 10: \"Oct.\",\n 11: \"Nov.\",\n 12: \"Dec.\",\n}\n\n\ndef _load_jobs_data(self, change):\n \"\"\"Loads backend jobs data\"\"\"\n if change[\"new\"] == 4 and not self._did_jobs:\n self._did_jobs = True\n year = widgets.Output(\n layout=widgets.Layout(display=\"flex-inline\", align_items=\"center\", min_height=\"400px\")\n )\n\n month = widgets.Output(\n layout=widgets.Layout(display=\"flex-inline\", align_items=\"center\", min_height=\"400px\")\n )\n\n week = widgets.Output(\n layout=widgets.Layout(display=\"flex-inline\", align_items=\"center\", min_height=\"400px\")\n )\n\n self.children[4].children = [year, month, week]\n self.children[4].set_title(0, \"Year\")\n self.children[4].set_title(1, \"Month\")\n self.children[4].set_title(2, \"Week\")\n self.children[4].selected_index = 1\n _build_job_history(self.children[4], self._backend)\n\n\ndef _backend_monitor(backend):\n \"\"\"A private function to generate a monitor widget\n for a IBMQ backend repr.\n\n Args:\n backend (IBMQBackend | FakeBackend): The backend.\n\n Raises:\n QiskitError: Input is not an IBMQBackend\n \"\"\"\n if not isinstance(backend, IBMQBackend) and not isinstance(backend, FakeBackend):\n raise QiskitError(\"Input variable is not of type IBMQBackend.\")\n title_style = \"style='color:#ffffff;background-color:#000000;padding-top: 1%;\"\n title_style += \"padding-bottom: 1%;padding-left: 1%; margin-top: 0px'\"\n title_html = f\"<h1 {title_style}>{backend.name()}</h1>\"\n\n details = [config_tab(backend)]\n\n tab_contents = [\"Configuration\"]\n\n # Empty jobs tab widget\n jobs = widgets.Tab(layout=widgets.Layout(max_height=\"620px\"))\n\n if not backend.configuration().simulator:\n tab_contents.extend([\"Qubit Properties\", \"Multi-Qubit Gates\", \"Error Map\", \"Job History\"])\n\n details.extend([qubits_tab(backend), gates_tab(backend), detailed_map(backend), jobs])\n\n tabs = widgets.Tab(layout=widgets.Layout(overflow_y=\"scroll\"))\n tabs.children = details\n for i in range(len(details)):\n tabs.set_title(i, tab_contents[i])\n\n # Make backend accessible to tabs widget\n tabs._backend = backend\n tabs._did_jobs = False\n tabs._update = types.MethodType(_load_jobs_data, tabs)\n\n tabs.observe(tabs._update, names=\"selected_index\")\n\n title_widget = widgets.HTML(value=title_html, layout=widgets.Layout(margin=\"0px 0px 0px 0px\"))\n\n bmonitor = widgets.VBox(\n [title_widget, tabs],\n layout=widgets.Layout(\n border=\"4px solid #000000\", max_height=\"650px\", min_height=\"650px\", overflow_y=\"hidden\"\n ),\n )\n display(bmonitor)\n\n\ndef config_tab(backend):\n \"\"\"The backend configuration widget.\n\n Args:\n backend (IBMQBackend | FakeBackend): The backend.\n\n Returns:\n grid: A GridBox widget.\n \"\"\"\n status = backend.status().to_dict()\n config = backend.configuration().to_dict()\n\n config_dict = {**status, **config}\n\n upper_list = [\"n_qubits\"]\n\n if \"quantum_volume\" in config.keys():\n if config[\"quantum_volume\"]:\n upper_list.append(\"quantum_volume\")\n\n upper_list.extend(\n [\n \"operational\",\n \"status_msg\",\n \"pending_jobs\",\n \"backend_version\",\n \"basis_gates\",\n \"max_shots\",\n \"max_experiments\",\n ]\n )\n\n lower_list = list(set(config_dict.keys()).difference(upper_list))\n # Remove gates because they are in a different tab\n lower_list.remove(\"gates\")\n # Look for hamiltonian\n if \"hamiltonian\" in lower_list:\n htex = config_dict[\"hamiltonian\"][\"h_latex\"]\n config_dict[\"hamiltonian\"] = \"$$%s$$\" % htex\n\n upper_str = \"<table>\"\n upper_str += \"\"\"<style>\ntable {\n border-collapse: collapse;\n width: auto;\n}\n\nth, td {\n text-align: left;\n padding: 8px;\n}\n\ntr:nth-child(even) {background-color: #f6f6f6;}\n</style>\"\"\"\n\n footer = \"</table>\"\n\n # Upper HBox widget data\n\n upper_str += \"<tr><th>Property</th><th>Value</th></tr>\"\n for key in upper_list:\n upper_str += \"<tr><td><font style='font-weight:bold'>{}</font></td><td>{}</td></tr>\".format(\n key,\n config_dict[key],\n )\n upper_str += footer\n\n upper_table = widgets.HTMLMath(\n value=upper_str, layout=widgets.Layout(width=\"100%\", grid_area=\"left\")\n )\n\n image_widget = widgets.Output(\n layout=widgets.Layout(\n display=\"flex-inline\",\n grid_area=\"right\",\n padding=\"10px 10px 10px 10px\",\n width=\"auto\",\n max_height=\"325px\",\n align_items=\"center\",\n )\n )\n\n if not config[\"simulator\"]:\n with image_widget:\n qubit_size = 24\n if config[\"n_qubits\"] > 20:\n qubit_size = 34\n gate_map = plot_gate_map(backend, qubit_size=qubit_size)\n display(gate_map)\n plt.close(gate_map)\n\n lower_str = \"<table>\"\n lower_str += \"\"\"<style>\ntable {\n border-collapse: collapse;\n width: auto;\n}\n\nth, td {\n text-align: left;\n padding: 8px;\n}\n\ntr:nth-child(even) {background-color: #f6f6f6;}\n</style>\"\"\"\n lower_str += \"<tr><th></th><th></th></tr>\"\n for key in lower_list:\n if key != \"name\":\n lower_str += f\"<tr><td>{key}</td><td>{config_dict[key]}</td></tr>\"\n lower_str += footer\n\n lower_table = widgets.HTMLMath(\n value=lower_str, layout=widgets.Layout(width=\"auto\", grid_area=\"bottom\")\n )\n\n grid = widgets.GridBox(\n children=[upper_table, image_widget, lower_table],\n layout=widgets.Layout(\n grid_template_rows=\"auto auto\",\n grid_template_columns=\"31% 23% 23% 23%\",\n grid_template_areas=\"\"\"\n \"left right right right\"\n \"bottom bottom bottom bottom\"\n \"\"\",\n grid_gap=\"0px 0px\",\n ),\n )\n\n return grid\n\n\ndef qubits_tab(backend):\n \"\"\"The qubits properties widget\n\n Args:\n backend (IBMQBackend | FakeBackend): The backend.\n\n Returns:\n VBox: A VBox widget.\n \"\"\"\n props = backend.properties()\n\n header_html = \"<div><font style='font-weight:bold'>{key}</font>: {value}</div>\"\n update_date = props.last_update_date.strftime(\"%a %d %B %Y at %H:%M %Z\")\n header_html = header_html.format(key=\"last_update_date\", value=update_date)\n\n update_date_widget = widgets.HTML(value=header_html)\n\n qubit_html = \"<table>\"\n qubit_html += \"\"\"<style>\ntable {\n border-collapse: collapse;\n width: auto;\n}\n\nth, td {\n text-align: left;\n padding: 8px;\n}\n\ntr:nth-child(even) {background-color: #f6f6f6;}\n</style>\"\"\"\n\n qubit_html += \"<tr><th></th><th>Frequency</th><th>T1</th><th>T2</th>\"\n qubit_footer = \"</table>\"\n\n gate_error_title = \"\"\n\n for index, qubit_data in enumerate(props.qubits):\n name = \"Q%s\" % index\n gate_data = [gate for gate in props.gates if gate.qubits == [index]]\n\n cal_data = dict.fromkeys([\"T1\", \"T2\", \"frequency\", \"readout_error\"], \"Unknown\")\n for nduv in qubit_data:\n if nduv.name in cal_data:\n cal_data[nduv.name] = str(round(nduv.value, 5)) + \" \" + nduv.unit\n\n gate_names = []\n gate_error = []\n for gd in gate_data:\n if gd.gate in [\"id\"]:\n continue\n try:\n gate_error.append(str(round(props.gate_error(gd.gate, index), 5)))\n gate_names.append(gd.gate.upper())\n except QiskitError:\n pass\n\n if not gate_error_title:\n for gname in gate_names:\n gate_error_title += f\"<th>{gname}</th>\"\n qubit_html += gate_error_title + \"<th>Readout error</th></tr>\"\n\n qubit_html += f\"<tr><td><font style='font-weight:bold'>{name}</font></td>\"\n qubit_html += (\n f\"<td>{cal_data['frequency']}</td><td>{cal_data['T1']}</td><td>{cal_data['T2']}</td>\"\n )\n for gerror in gate_error:\n qubit_html += f\"<td>{gerror}</td>\"\n qubit_html += f\"<td>{cal_data['readout_error']}</td>\"\n\n qubit_html += qubit_footer\n\n qubit_widget = widgets.HTML(value=qubit_html)\n\n out = widgets.VBox([update_date_widget, qubit_widget])\n\n return out\n\n\ndef gates_tab(backend):\n \"\"\"The multiple qubit gate error widget.\n\n Args:\n backend (IBMQBackend | FakeBackend): The backend.\n\n Returns:\n VBox: A VBox widget.\n \"\"\"\n props = backend.properties()\n\n multi_qubit_gates = [g for g in props.gates if len(g.qubits) > 1]\n\n header_html = \"<div><font style='font-weight:bold'>{key}</font>: {value}</div>\"\n header_html = header_html.format(key=\"last_update_date\", value=props.last_update_date)\n\n update_date_widget = widgets.HTML(value=header_html, layout=widgets.Layout(grid_area=\"top\"))\n\n gate_html = \"<table>\"\n gate_html += \"\"\"<style>\ntable {\n border-collapse: collapse;\n width: auto;\n}\n\nth, td {\n text-align: left;\n padding: 8px;\n}\n\ntr:nth-child(even) {background-color: #f6f6f6;};\n</style>\"\"\"\n\n gate_html += \"<tr><th></th><th>Type</th><th>Gate error</th></tr>\"\n gate_footer = \"</table>\"\n\n # Split gates into two columns\n left_num = math.ceil(len(multi_qubit_gates) / 3)\n mid_num = math.ceil((len(multi_qubit_gates) - left_num) / 2)\n\n left_table = gate_html\n\n for qub in range(left_num):\n gate = multi_qubit_gates[qub]\n qubits = gate.qubits\n ttype = gate.gate\n error = round(props.gate_error(gate.gate, qubits), 5)\n\n left_table += \"<tr><td><font style='font-weight:bold'>%s</font>\"\n left_table += \"</td><td>%s</td><td>%s</td></tr>\"\n left_table = left_table % (f\"{ttype}{qubits[0]}_{qubits[1]}\", ttype, error)\n left_table += gate_footer\n\n middle_table = gate_html\n\n for qub in range(left_num, left_num + mid_num):\n gate = multi_qubit_gates[qub]\n qubits = gate.qubits\n ttype = gate.gate\n error = round(props.gate_error(gate.gate, qubits), 5)\n\n middle_table += \"<tr><td><font style='font-weight:bold'>%s</font>\"\n middle_table += \"</td><td>%s</td><td>%s</td></tr>\"\n middle_table = middle_table % (f\"{ttype}{qubits[0]}_{qubits[1]}\", ttype, error)\n middle_table += gate_footer\n\n right_table = gate_html\n\n for qub in range(left_num + mid_num, len(multi_qubit_gates)):\n gate = multi_qubit_gates[qub]\n qubits = gate.qubits\n ttype = gate.gate\n error = round(props.gate_error(gate.gate, qubits), 5)\n\n right_table += \"<tr><td><font style='font-weight:bold'>%s</font>\"\n right_table += \"</td><td>%s</td><td>%s</td></tr>\"\n right_table = right_table % (f\"{ttype}{qubits[0]}_{qubits[1]}\", ttype, error)\n right_table += gate_footer\n\n left_table_widget = widgets.HTML(value=left_table, layout=widgets.Layout(grid_area=\"left\"))\n middle_table_widget = widgets.HTML(\n value=middle_table, layout=widgets.Layout(grid_area=\"middle\")\n )\n right_table_widget = widgets.HTML(value=right_table, layout=widgets.Layout(grid_area=\"right\"))\n\n grid = widgets.GridBox(\n children=[update_date_widget, left_table_widget, middle_table_widget, right_table_widget],\n layout=widgets.Layout(\n grid_template_rows=\"auto auto\",\n grid_template_columns=\"33% 33% 33%\",\n grid_template_areas=\"\"\"\n \"top top top\"\n \"left middle right\"\n \"\"\",\n grid_gap=\"0px 0px\",\n ),\n )\n\n return grid\n\n\ndef detailed_map(backend):\n \"\"\"Widget for displaying detailed noise map.\n\n Args:\n backend (IBMQBackend | FakeBackend): The backend.\n\n Returns:\n GridBox: Widget holding noise map images.\n \"\"\"\n error_widget = widgets.Output(\n layout=widgets.Layout(display=\"flex-inline\", align_items=\"center\")\n )\n with error_widget:\n display(plot_error_map(backend, figsize=(11, 9), show_title=False))\n return error_widget\n\n\ndef job_history(backend):\n \"\"\"Widget for displaying job history\n\n Args:\n backend (IBMQBackend | FakeBackend): The backend.\n\n Returns:\n Tab: A tab widget for history images.\n \"\"\"\n year = widgets.Output(\n layout=widgets.Layout(display=\"flex-inline\", align_items=\"center\", min_height=\"400px\")\n )\n\n month = widgets.Output(\n layout=widgets.Layout(display=\"flex-inline\", align_items=\"center\", min_height=\"400px\")\n )\n\n week = widgets.Output(\n layout=widgets.Layout(display=\"flex-inline\", align_items=\"center\", min_height=\"400px\")\n )\n\n tabs = widgets.Tab(layout=widgets.Layout(max_height=\"620px\"))\n tabs.children = [year, month, week]\n tabs.set_title(0, \"Year\")\n tabs.set_title(1, \"Month\")\n tabs.set_title(2, \"Week\")\n tabs.selected_index = 1\n\n _build_job_history(tabs, backend)\n return tabs\n\n\ndef _build_job_history(tabs, backend):\n\n past_year_date = datetime.datetime.now() - datetime.timedelta(days=365)\n date_filter = {\"creationDate\": {\"gt\": past_year_date.isoformat()}}\n jobs = backend.jobs(limit=None, db_filter=date_filter)\n\n with tabs.children[0]:\n year_plot = plot_job_history(jobs, interval=\"year\")\n display(year_plot)\n plt.close(year_plot)\n\n with tabs.children[1]:\n month_plot = plot_job_history(jobs, interval=\"month\")\n display(month_plot)\n plt.close(month_plot)\n\n with tabs.children[2]:\n week_plot = plot_job_history(jobs, interval=\"week\")\n display(week_plot)\n plt.close(week_plot)\n\n\ndef plot_job_history(jobs, interval=\"year\"):\n \"\"\"Plots the job history of the user from the given list of jobs.\n\n Args:\n jobs (list): A list of jobs with type IBMQjob.\n interval (str): Interval over which to examine.\n\n Returns:\n fig: A Matplotlib figure instance.\n \"\"\"\n\n def get_date(job):\n \"\"\"Returns a datetime object from a IBMQJob instance.\n\n Args:\n job (IBMQJob): A job.\n\n Returns:\n dt: A datetime object.\n \"\"\"\n creation_date = job.creation_date()\n\n if isinstance(creation_date, datetime.datetime):\n return creation_date\n\n return datetime.datetime.strptime(creation_date, \"%Y-%m-%dT%H:%M:%S.%fZ\")\n\n current_time = datetime.datetime.now()\n\n if interval == \"year\":\n bins = [(current_time - datetime.timedelta(days=k * 365 / 12)) for k in range(12)]\n elif interval == \"month\":\n bins = [(current_time - datetime.timedelta(days=k)) for k in range(30)]\n elif interval == \"week\":\n bins = [(current_time - datetime.timedelta(days=k)) for k in range(7)]\n\n binned_jobs = [0] * len(bins)\n\n if interval == \"year\":\n for job in jobs:\n for ind, dat in enumerate(bins):\n date = get_date(job)\n if date.month == dat.month:\n binned_jobs[ind] += 1\n break\n else:\n continue\n else:\n for job in jobs:\n for ind, dat in enumerate(bins):\n date = get_date(job)\n if date.day == dat.day and date.month == dat.month:\n binned_jobs[ind] += 1\n break\n else:\n continue\n\n nz_bins = []\n nz_idx = []\n for ind, val in enumerate(binned_jobs):\n if val != 0:\n nz_idx.append(ind)\n nz_bins.append(val)\n\n total_jobs = sum(binned_jobs)\n\n colors = [\"#003f5c\", \"#ffa600\", \"#374c80\", \"#ff764a\", \"#7a5195\", \"#ef5675\", \"#bc5090\"]\n\n if interval == \"year\":\n labels = [f\"{str(bins[b].year)[2:]}-{MONTH_NAMES[bins[b].month]}\" for b in nz_idx]\n else:\n labels = [f\"{MONTH_NAMES[bins[b].month]}-{bins[b].day}\" for b in nz_idx]\n fig, ax = plt.subplots(1, 1, figsize=(5.5, 5.5))\n ax.pie(\n nz_bins[::-1],\n labels=labels,\n colors=colors,\n textprops={\"fontsize\": 14},\n rotatelabels=True,\n counterclock=False,\n radius=1,\n )\n ax.add_artist(Circle((0, 0), 0.7, color=\"white\", zorder=1))\n ax.text(0, 0, total_jobs, horizontalalignment=\"center\", verticalalignment=\"center\", fontsize=26)\n return fig\n"
] | [
[
"matplotlib.collections.PatchCollection",
"numpy.asarray",
"numpy.arange",
"matplotlib.patches.Rectangle",
"matplotlib.pyplot.savefig",
"numpy.concatenate",
"matplotlib.pyplot.figure"
],
[
"numpy.outer",
"numpy.mean"
],
[
"numpy.log",
"numpy.maximum",
"numpy.minimum",
"numpy.abs",
"numpy.arcsin",
"numpy.arccos",
"numpy.cos",
"numpy.sin",
"numpy.arctan2",
"numpy.mean",
"numpy.sum"
],
[
"numpy.array",
"numpy.cos",
"numpy.sin"
],
[
"numpy.sqrt"
],
[
"numpy.all",
"numpy.random.randint"
],
[
"numpy.eye",
"numpy.random.shuffle",
"numpy.all",
"numpy.array",
"numpy.zeros"
],
[
"numpy.product",
"numpy.tril_indices",
"scipy.stats.norm.rvs",
"numpy.zeros",
"scipy.stats.unitary_group.rvs",
"numpy.random.default_rng"
],
[
"numpy.sqrt",
"numpy.reshape",
"numpy.kron",
"numpy.mod",
"numpy.argsort",
"numpy.array",
"numpy.zeros",
"numpy.isclose"
],
[
"numpy.asarray",
"numpy.array"
],
[
"numpy.sqrt",
"numpy.arctan",
"numpy.asarray",
"numpy.sign",
"numpy.copy"
],
[
"matplotlib.patches.Circle",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.close"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
limberc/mindspore | [
"59a277756eb4faad9ac9afcc7fd526e8277d4994",
"e294acdffc9246cb6d77ea18ea00d08244d30c59"
] | [
"tests/ut/python/dataset/test_compose.py",
"mindspore/train/summary/_summary_adapter.py"
] | [
"# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nimport numpy as np\nimport pytest\nimport mindspore.common.dtype as mstype\nimport mindspore.dataset as ds\nimport mindspore.dataset.transforms.c_transforms as c_transforms\nimport mindspore.dataset.transforms.py_transforms as py_transforms\n\nimport mindspore.dataset.vision.c_transforms as c_vision\nimport mindspore.dataset.vision.py_transforms as py_vision\n\nfrom util import visualize_list, save_and_check_md5, config_get_set_seed, config_get_set_num_parallel_workers\n\nGENERATE_GOLDEN = False\n\n\ndef test_compose():\n \"\"\"\n Test C++ and Python Compose Op\n \"\"\"\n ds.config.set_seed(0)\n\n def test_config(arr, op_list):\n try:\n data = ds.NumpySlicesDataset(arr, column_names=\"col\", shuffle=False)\n data = data.map(input_columns=[\"col\"], operations=op_list)\n res = []\n for i in data.create_dict_iterator(output_numpy=True):\n res.append(i[\"col\"].tolist())\n return res\n except (TypeError, ValueError) as e:\n return str(e)\n\n # Test simple compose with only 1 op, this would generate a warning\n assert test_config([[1, 0], [3, 4]], c_transforms.Compose([c_transforms.Fill(2)])) == [[2, 2], [2, 2]]\n\n # Test 1 column -> 2 columns -> 1 -> 2 -> 1\n assert test_config([[1, 0]],\n c_transforms.Compose(\n [c_transforms.Duplicate(), c_transforms.Concatenate(), c_transforms.Duplicate(),\n c_transforms.Concatenate()])) \\\n == [[1, 0] * 4]\n\n # Test one Python transform followed by a C++ transform. Type after OneHot is a float (mixed use-case)\n assert test_config([1, 0],\n c_transforms.Compose([py_transforms.OneHotOp(2), c_transforms.TypeCast(mstype.int32)])) \\\n == [[[0, 1]], [[1, 0]]]\n\n # Test exceptions.\n with pytest.raises(TypeError) as error_info:\n c_transforms.Compose([1, c_transforms.TypeCast(mstype.int32)])\n assert \"op_list[0] is neither a c_transform op (TensorOperation) nor a callable pyfunc.\" in str(error_info.value)\n\n # Test empty op list\n with pytest.raises(ValueError) as error_info:\n test_config([1, 0], c_transforms.Compose([]))\n assert \"op_list can not be empty.\" in str(error_info.value)\n\n # Test Python compose op\n assert test_config([1, 0], py_transforms.Compose([py_transforms.OneHotOp(2)])) == [[[0, 1]], [[1, 0]]]\n assert test_config([1, 0], py_transforms.Compose([py_transforms.OneHotOp(2), (lambda x: x + x)])) == [[[0, 2]],\n [[2, 0]]]\n\n # Test nested Python compose op\n assert test_config([1, 0],\n py_transforms.Compose([py_transforms.Compose([py_transforms.OneHotOp(2)]), (lambda x: x + x)])) \\\n == [[[0, 2]], [[2, 0]]]\n\n # Test passing a list of Python ops without Compose wrapper\n assert test_config([1, 0],\n [py_transforms.Compose([py_transforms.OneHotOp(2)]), (lambda x: x + x)]) \\\n == [[[0, 2]], [[2, 0]]]\n assert test_config([1, 0], [py_transforms.OneHotOp(2), (lambda x: x + x)]) == [[[0, 2]], [[2, 0]]]\n\n # Test a non callable function\n with pytest.raises(ValueError) as error_info:\n py_transforms.Compose([1])\n assert \"transforms[0] is not callable.\" in str(error_info.value)\n\n # Test empty Python op list\n with pytest.raises(ValueError) as error_info:\n test_config([1, 0], py_transforms.Compose([]))\n assert \"transforms list is empty.\" in str(error_info.value)\n\n # Pass in extra brackets\n with pytest.raises(TypeError) as error_info:\n py_transforms.Compose([(lambda x: x + x)])()\n assert \"Compose was called without an image. Fix invocation (avoid it being invoked as Compose([...])()).\" in str(\n error_info.value)\n\n\ndef test_lambdas():\n \"\"\"\n Test Multi Column Python Compose Op\n \"\"\"\n ds.config.set_seed(0)\n\n def test_config(arr, input_columns, output_cols, op_list):\n data = ds.NumpySlicesDataset(arr, column_names=input_columns, shuffle=False)\n data = data.map(operations=op_list, input_columns=input_columns, output_columns=output_cols,\n column_order=output_cols)\n res = []\n for i in data.create_dict_iterator(output_numpy=True):\n for col_name in output_cols:\n res.append(i[col_name].tolist())\n return res\n\n arr = ([[1]], [[3]])\n\n assert test_config(arr, [\"col0\", \"col1\"], [\"a\"], py_transforms.Compose([(lambda x, y: x)])) == [[1]]\n assert test_config(arr, [\"col0\", \"col1\"], [\"a\"], py_transforms.Compose([lambda x, y: x, lambda x: x])) == [[1]]\n assert test_config(arr, [\"col0\", \"col1\"], [\"a\", \"b\"],\n py_transforms.Compose([lambda x, y: x, lambda x: (x, x * 2)])) == \\\n [[1], [2]]\n assert test_config(arr, [\"col0\", \"col1\"], [\"a\", \"b\"],\n [lambda x, y: (x, x + y), lambda x, y: (x, y * 2)]) == [[1], [8]]\n\n\ndef test_c_py_compose_transforms_module():\n \"\"\"\n Test combining Python and C++ transforms\n \"\"\"\n ds.config.set_seed(0)\n\n def test_config(arr, input_columns, output_cols, op_list):\n data = ds.NumpySlicesDataset(arr, column_names=input_columns, shuffle=False)\n data = data.map(operations=op_list, input_columns=input_columns, output_columns=output_cols,\n column_order=output_cols)\n res = []\n for i in data.create_dict_iterator(output_numpy=True):\n for col_name in output_cols:\n res.append(i[col_name].tolist())\n return res\n\n arr = [1, 0]\n assert test_config(arr, [\"cols\"], [\"cols\"],\n [py_transforms.OneHotOp(2), c_transforms.Mask(c_transforms.Relational.EQ, 1)]) == \\\n [[[False, True]],\n [[True, False]]]\n assert test_config(arr, [\"cols\"], [\"cols\"],\n [py_transforms.OneHotOp(2), (lambda x: x + x), c_transforms.Fill(1)]) \\\n == [[[1, 1]], [[1, 1]]]\n assert test_config(arr, [\"cols\"], [\"cols\"],\n [py_transforms.OneHotOp(2), (lambda x: x + x), c_transforms.Fill(1), (lambda x: x + x)]) \\\n == [[[2, 2]], [[2, 2]]]\n assert test_config([[1, 3]], [\"cols\"], [\"cols\"],\n [c_transforms.PadEnd([3], -1), (lambda x: x + x)]) \\\n == [[2, 6, -2]]\n\n arr = ([[1]], [[3]])\n assert test_config(arr, [\"col0\", \"col1\"], [\"a\"], [(lambda x, y: x + y), c_transforms.PadEnd([2], -1)]) == [[4, -1]]\n\n\ndef test_c_py_compose_vision_module(plot=False, run_golden=True):\n \"\"\"\n Test combining Python and C++ vision transforms\n \"\"\"\n original_seed = config_get_set_seed(10)\n original_num_parallel_workers = config_get_set_num_parallel_workers(1)\n\n def test_config(plot, file_name, op_list):\n data_dir = \"../data/dataset/testImageNetData/train/\"\n data1 = ds.ImageFolderDataset(dataset_dir=data_dir, shuffle=False)\n data1 = data1.map(operations=op_list, input_columns=[\"image\"])\n data2 = ds.ImageFolderDataset(dataset_dir=data_dir, shuffle=False)\n data2 = data2.map(operations=c_vision.Decode(), input_columns=[\"image\"])\n original_images = []\n transformed_images = []\n\n for item in data1.create_dict_iterator(num_epochs=1, output_numpy=True):\n transformed_images.append(item[\"image\"])\n for item in data2.create_dict_iterator(num_epochs=1, output_numpy=True):\n original_images.append(item[\"image\"])\n\n if run_golden:\n # Compare with expected md5 from images\n save_and_check_md5(data1, file_name, generate_golden=GENERATE_GOLDEN)\n\n if plot:\n visualize_list(original_images, transformed_images)\n\n test_config(op_list=[c_vision.Decode(),\n py_vision.ToPIL(),\n py_vision.Resize((224, 224)),\n np.array],\n plot=plot, file_name=\"compose_c_py_1.npz\")\n\n test_config(op_list=[c_vision.Decode(),\n c_vision.Resize((224, 244)),\n py_vision.ToPIL(),\n np.array,\n c_vision.Resize((24, 24))],\n plot=plot, file_name=\"compose_c_py_2.npz\")\n\n test_config(op_list=[py_vision.Decode(),\n py_vision.Resize((224, 224)),\n np.array,\n c_vision.RandomColor()],\n plot=plot, file_name=\"compose_c_py_3.npz\")\n\n # Restore configuration\n ds.config.set_seed(original_seed)\n ds.config.set_num_parallel_workers((original_num_parallel_workers))\n\n\ndef test_py_transforms_with_c_vision():\n \"\"\"\n These examples will fail, as py_transforms.Random(Apply/Choice/Order) expect callable functions\n \"\"\"\n\n ds.config.set_seed(0)\n\n def test_config(op_list):\n data_dir = \"../data/dataset/testImageNetData/train/\"\n data = ds.ImageFolderDataset(dataset_dir=data_dir, shuffle=False)\n data = data.map(operations=op_list)\n res = []\n for i in data.create_dict_iterator(output_numpy=True):\n for col_name in output_cols:\n res.append(i[col_name].tolist())\n return res\n\n with pytest.raises(ValueError) as error_info:\n test_config(py_transforms.RandomApply([c_vision.RandomResizedCrop(200)]))\n assert \"transforms[0] is not callable.\" in str(error_info.value)\n\n with pytest.raises(ValueError) as error_info:\n test_config(py_transforms.RandomChoice([c_vision.RandomResizedCrop(200)]))\n assert \"transforms[0] is not callable.\" in str(error_info.value)\n\n with pytest.raises(ValueError) as error_info:\n test_config(py_transforms.RandomOrder([np.array, c_vision.RandomResizedCrop(200)]))\n assert \"transforms[1] is not callable.\" in str(error_info.value)\n\n with pytest.raises(RuntimeError) as error_info:\n test_config([py_transforms.OneHotOp(20, 0.1)])\n assert \"The truth value of an array with more than one element is ambiguous. Use a.any() or a.all()\" in str(\n error_info.value)\n\n\ndef test_py_vision_with_c_transforms():\n \"\"\"\n Test combining Python vision operations with C++ transforms operations\n \"\"\"\n\n ds.config.set_seed(0)\n\n def test_config(op_list):\n data_dir = \"../data/dataset/testImageNetData/train/\"\n data1 = ds.ImageFolderDataset(dataset_dir=data_dir, shuffle=False)\n data1 = data1.map(operations=op_list, input_columns=[\"image\"])\n transformed_images = []\n\n for item in data1.create_dict_iterator(num_epochs=1, output_numpy=True):\n transformed_images.append(item[\"image\"])\n return transformed_images\n\n # Test with Mask Op\n output_arr = test_config([py_vision.Decode(),\n py_vision.CenterCrop((2)), np.array,\n c_transforms.Mask(c_transforms.Relational.GE, 100)])\n\n exp_arr = [np.array([[[True, False, False],\n [True, False, False]],\n [[True, False, False],\n [True, False, False]]]),\n np.array([[[True, False, False],\n [True, False, False]],\n [[True, False, False],\n [True, False, False]]])]\n\n for exp_a, output in zip(exp_arr, output_arr):\n np.testing.assert_array_equal(exp_a, output)\n\n # Test with Fill Op\n output_arr = test_config([py_vision.Decode(),\n py_vision.CenterCrop((4)), np.array,\n c_transforms.Fill(10)])\n\n exp_arr = [np.ones((4, 4, 3)) * 10] * 2\n for exp_a, output in zip(exp_arr, output_arr):\n np.testing.assert_array_equal(exp_a, output)\n\n # Test with Concatenate Op, which will raise an error since ConcatenateOp only supports rank 1 tensors.\n with pytest.raises(RuntimeError) as error_info:\n test_config([py_vision.Decode(),\n py_vision.CenterCrop((2)), np.array,\n c_transforms.Concatenate(0)])\n assert \"Only 1D tensors supported\" in str(error_info.value)\n\n\ndef test_compose_with_custom_function():\n \"\"\"\n Test Python Compose with custom function\n \"\"\"\n\n def custom_function(x):\n return (x, x * x)\n\n # First dataset\n op_list = [\n lambda x: x * 3,\n custom_function,\n # convert two column output to one\n lambda *images: np.stack(images)\n ]\n\n data = ds.NumpySlicesDataset([[1, 2]], column_names=[\"col0\"], shuffle=False)\n data = data.map(input_columns=[\"col0\"], operations=op_list)\n #\n\n res = []\n for i in data.create_dict_iterator(output_numpy=True):\n res.append(i[\"col0\"].tolist())\n assert res == [[[3, 6], [9, 36]]]\n\n\nif __name__ == \"__main__\":\n test_compose()\n test_lambdas()\n test_c_py_compose_transforms_module()\n test_c_py_compose_vision_module(plot=True)\n test_py_transforms_with_c_vision()\n test_py_vision_with_c_transforms()\n test_compose_with_custom_function()\n",
"# Copyright 2020-2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Generate the summary event which conform to proto format.\"\"\"\nimport platform\nimport time\n\nimport numpy as np\nfrom PIL import Image\n\nfrom mindspore import log as logger\n\nfrom ..._checkparam import Validator\nfrom ..anf_ir_pb2 import DataType, ModelProto\nfrom ..summary_pb2 import Event\n\n# define the MindSpore image format\nMS_IMAGE_TENSOR_FORMAT = 'NCHW'\n# Set the Event mark\nEVENT_FILE_NAME_MARK = \".out.events.summary.\"\n# Set the init event of version and mark\nEVENT_FILE_INIT_VERSION_MARK = \"MindSpore.Event:\"\nEVENT_FILE_INIT_VERSION = 1\n\nF32_MIN, F32_MAX = np.finfo(np.float32).min, np.finfo(np.float32).max\n\n\ndef get_event_file_name(prefix, suffix, seconds=None):\n \"\"\"\n Create file name: file_prefix + EVENT_FILE_NAME_MARK + time(seconds) + \".\" + Hostname + file_suffix.\n\n Args:\n prefix (str): The prefix of file name.\n suffix (str): The suffix of file name.\n seconds (str): The time stamp of file name.\n\n Returns:\n String, the name of event log file.\n \"\"\"\n Validator.check_str_by_regular(prefix)\n Validator.check_str_by_regular(suffix)\n file_name = \"\"\n time_second = str(int(time.time()))\n if seconds is not None:\n time_second = seconds\n hostname = platform.node()\n\n if prefix is not None:\n file_name = file_name + prefix\n\n file_name = file_name + EVENT_FILE_NAME_MARK + time_second + \".\" + hostname\n\n if suffix is not None:\n file_name = file_name + suffix\n\n return file_name\n\n\ndef package_init_event():\n \"\"\"Package the summary init event.\"\"\"\n init_event = Event()\n init_event.wall_time = time.time()\n version = EVENT_FILE_INIT_VERSION_MARK + str(EVENT_FILE_INIT_VERSION)\n init_event.version = version\n return init_event\n\n\ndef package_graph_event(data):\n \"\"\"\n Package the summary graph event.\n\n Args:\n data (Bytes): Graph bytes string.\n\n Returns:\n Event, event log object.\n \"\"\"\n graph_event = Event()\n graph_event.wall_time = time.time()\n modelp = ModelProto()\n modelp.ParseFromString(data)\n graph_event.graph_def.CopyFrom(modelp.graph)\n return graph_event\n\n\ndef package_summary_event(data_list, step, wall_time):\n \"\"\"\n Package the summary to event protobuffer.\n\n Args:\n data_list (list): Summary data list.\n step (Number): The recode step index.\n wall_time (float): The wall time.\n\n Returns:\n Summary, the summary event.\n \"\"\"\n # create the event of summary\n summary_event = Event()\n summary = summary_event.summary\n summary_event.wall_time = wall_time\n summary_event.step = int(step)\n\n for value in data_list:\n summary_type = value[\"_type\"]\n data = value[\"data\"]\n tag = value[\"name\"]\n\n logger.debug(f\"Now process {summary_type} summary, tag = {tag}\")\n\n summary_value = summary.value.add()\n summary_value.tag = tag\n # get the summary type and parse the tag\n if summary_type == 'Scalar':\n if not _fill_scalar_summary(tag, data, summary_value):\n del summary.value[-1]\n elif summary_type == 'Tensor':\n _fill_tensor_summary(tag, data, summary_value.tensor)\n elif summary_type == 'Image':\n if not _fill_image_summary(tag, data, summary_value.image, MS_IMAGE_TENSOR_FORMAT):\n del summary.value[-1]\n elif summary_type == 'Histogram':\n _fill_histogram_summary(tag, data, summary_value.histogram)\n else:\n # The data is invalid ,jump the data\n logger.error(f\"Summary type({summary_type}) is error, tag = {tag}\")\n del summary.value[-1]\n\n return summary_event\n\n\ndef _nptype_to_prototype(np_value):\n \"\"\"\n Transform the np type to proto type.\n\n Args:\n np_value (Type): Numpy data type.\n\n Returns:\n Type, proto data type.\n \"\"\"\n np2pt_tbl = {\n np.bool_: 'DT_BOOL',\n np.int8: 'DT_INT8',\n np.int16: 'DT_INT16',\n np.int32: 'DT_INT32',\n np.int64: 'DT_INT64',\n np.uint8: 'DT_UINT8',\n np.uint16: 'DT_UINT16',\n np.uint32: 'DT_UINT32',\n np.uint64: 'DT_UINT64',\n np.float16: 'DT_FLOAT16',\n np.float: 'DT_FLOAT64',\n np.float32: 'DT_FLOAT32',\n np.float64: 'DT_FLOAT64',\n None: 'DT_UNDEFINED'\n }\n np_type = None\n if np_value is None:\n logger.error(\"The numpy value is none\")\n else:\n np_type = np_value.dtype.type\n\n proto = np2pt_tbl.get(np_type, None)\n if proto is None:\n raise TypeError(\"No match for proto data type.\")\n\n return proto\n\n\ndef _fill_scalar_summary(tag: str, np_value, summary):\n \"\"\"\n Package the scalar summary.\n\n Args:\n tag (str): Summary tag describe.\n np_value (Object): Scalary object.\n\n Returns:\n Summary, return scalar summary content.\n \"\"\"\n logger.debug(f\"Set({tag}) the scalar summary value\")\n if np_value.size == 1:\n # is scalar\n summary.scalar_value = np_value.item()\n return True\n if np_value.size > 1:\n logger.warning(\n f\"The tensor is not a single scalar, tag = {tag}, ndim = {np_value.ndim}, shape = {np_value.shape}\")\n summary.scalar_value = next(np_value.flat).item()\n return True\n logger.error(f\"There no values inside tensor, tag = {tag}, size = {np_value.size}\")\n return False\n\n\ndef _fill_tensor_summary(tag: str, np_value, summary_tensor):\n \"\"\"\n Package the tensor summary.\n\n Args:\n tag (str): Summary tag describe.\n np_value (Type): Summary data type.\n summary_tensor (Tensor): The tensor of summary.\n\n Returns:\n Summary, return tensor summary content.\n \"\"\"\n logger.debug(f\"Set({tag}) the tensor summary value\")\n # get tensor dtype\n tensor_dtype = _nptype_to_prototype(np_value)\n summary_tensor.data_type = DataType.Value(tensor_dtype)\n\n # get the value list\n tensor_value_list = np_value.reshape(-1).tolist()\n summary_tensor.float_data.extend(tensor_value_list)\n\n # get the tensor dim\n for v in np_value.shape:\n summary_tensor.dims.append(v)\n\n return summary_tensor\n\n\ndef _calc_histogram_bins(count):\n \"\"\"\n Calculates experience-based optimal bins number for histogram.\n\n There should be enough number in each bin. So we calc bin numbers according to count. For very small count(1 -\n 10), we assign carefully chosen number. For large count, we tried to make sure there are 9-10 numbers in each\n bucket on average. Too many bins will slow down performance, so we set max number of bins to 90.\n\n Args:\n count (int): Valid number count for the tensor.\n\n Returns:\n int, number of histogram bins.\n \"\"\"\n max_bins, max_per_bin = 90, 10\n\n if not count:\n return 1\n if count <= 5:\n return 2\n if count <= 10:\n return 3\n if count <= 880:\n # note that math.ceil(881/10) + 1 equals 90\n return count // max_per_bin + 1\n\n return max_bins\n\n\ndef _fill_histogram_summary(tag: str, np_value: np.ndarray, summary) -> None:\n \"\"\"\n Package the histogram summary.\n\n Args:\n tag (str): Summary tag describe.\n np_value (np.ndarray): Summary data.\n summary (summary_pb2.Summary.Histogram): Summary histogram data.\n \"\"\"\n logger.debug(f\"Set({tag}) the histogram summary value\")\n # Default bucket for tensor with no valid data.\n ma_value = np.ma.masked_invalid(np_value)\n total, valid = np_value.size, ma_value.count()\n invalids = []\n for isfn in np.isnan, np.isposinf, np.isneginf:\n if total - valid > sum(invalids):\n count = np.count_nonzero(isfn(np_value))\n invalids.append(count)\n else:\n invalids.append(0)\n\n summary.count = total\n summary.nan_count, summary.pos_inf_count, summary.neg_inf_count = invalids\n if not valid:\n logger.warning(f'There are no valid values in the ndarray(size={total}, shape={np_value.shape})')\n # summary.{min, max, sum} are 0s by default, no need to explicitly set\n else:\n # BUG: max of a masked array with dtype np.float16 returns inf\n # See numpy issue#15077\n if issubclass(np_value.dtype.type, np.floating):\n summary.min = ma_value.min(fill_value=np.PINF)\n summary.max = ma_value.max(fill_value=np.NINF)\n if summary.min < F32_MIN or summary.max > F32_MAX:\n logger.warning(f'Values({summary.min}, {summary.max}) are too large, '\n f'you may encounter some undefined behaviours hereafter.')\n else:\n summary.min = ma_value.min()\n summary.max = ma_value.max()\n summary.sum = ma_value.sum(dtype=np.float64)\n bins = _calc_histogram_bins(valid)\n first_edge, last_edge = summary.min, summary.max\n\n if not first_edge < last_edge:\n first_edge -= 0.5\n last_edge += 0.5\n\n bins = np.linspace(first_edge, last_edge, bins + 1, dtype=np_value.dtype)\n hists, edges = np.histogram(np_value, bins=bins)\n\n for hist, edge1, edge2 in zip(hists, edges, edges[1:]):\n bucket = summary.buckets.add()\n bucket.width = edge2 - edge1\n bucket.count = hist\n bucket.left = edge1\n\n\ndef _fill_image_summary(tag: str, np_value, summary_image, input_format='NCHW'):\n \"\"\"\n Package the image summary.\n\n Args:\n tag (str): Summary tag describe.\n np_value (Type): Summary data type.\n summary_image (Tensor): The tensor of summary.\n input_format (str): Data sort order index. Default: 'NCHW'.\n\n Returns:\n Summary, return image summary content.\n \"\"\"\n logger.debug(f\"Set({tag}) the image summary value\")\n if np_value.ndim != 4 or np_value.shape[1] not in (1, 3):\n logger.error(f\"The value is not Image, tag = {tag}, ndim = {np_value.ndim}, shape={np_value.shape}\")\n return False\n\n if np_value.ndim != len(input_format):\n logger.error(\n f\"The tensor with dim({np_value.ndim}) can't convert the format({input_format}) because dim not same\")\n return False\n\n # convert the tensor format\n tensor = _convert_image_format(np_value, input_format)\n\n # convert the tensor dtype\n # Do not assume that user passes in values in [0, 255], use data type to detect\n scale_factor = 1\n if tensor.dtype == np.uint8:\n scale_factor = 1\n elif np.max(tensor) <= 1 and np.min(tensor) >= 0:\n scale_factor = 255\n tensor = tensor.astype(np.float32)\n tensor = (tensor * scale_factor).astype(np.uint8)\n\n # create the image summary\n height, width, channel, image_string = _make_image(tensor)\n summary_image.height = height\n summary_image.width = width\n summary_image.colorspace = channel\n summary_image.encoded_image = image_string\n return True\n\n\ndef _make_image(tensor, rescale=1):\n \"\"\"\n Convert a numpy representation of an image to Image protobuf.\n\n Args:\n tensor (Tensor): The image data.\n rescale (Number): The rescale value. Default: 1.\n\n Returns:\n (Number, Number, Number, Bytes), return the height, width, channel, image string .\n \"\"\"\n height, width, channel = tensor.shape\n scaled_height = int(height * rescale)\n scaled_width = int(width * rescale)\n image = Image.fromarray(tensor)\n image = image.resize((scaled_width, scaled_height), Image.ANTIALIAS)\n import io\n output = io.BytesIO()\n image.save(output, format='PNG')\n image_string = output.getvalue()\n output.close()\n return height, width, channel, image_string\n\n\ndef _convert_image_format(np_tensor, input_format, out_format='HWC'):\n \"\"\"\n Convert the image format.\n\n Args:\n np_tensor (Tensor): The image data.\n input_format (str): Input data format.\n out_format (str): The output data format. Default: 'HWC'.\n\n Returns:\n Tensor, return format image.\n \"\"\"\n input_format = input_format.upper()\n\n # convert the NCHW\n if input_format != 'NCHW':\n index = [input_format.find(c) for c in 'NCHW']\n tensor_nchw = np_tensor.transpose(index)\n else:\n tensor_nchw = np_tensor\n\n # make grid to expand N\n tensor_chw = _make_canvas_for_imgs(tensor_nchw)\n\n # convert to out format\n out_index = ['CHW'.find(c) for c in out_format]\n out_tensor = tensor_chw.transpose(out_index)\n return out_tensor\n\n\ndef _make_canvas_for_imgs(tensor, col_imgs=8):\n \"\"\"\n Expand the N, show imgs on a canvs.\n\n Args:\n tensor (Tensor): The canvas value.\n col_imgs (Number): The image colume number. Default: 8.\n\n Returns:\n Tensor, retrun canvas of image.\n \"\"\"\n # expand the N1HW to N3HW\n if tensor.shape[1] == 1:\n tensor = np.concatenate([tensor, tensor, tensor], 1)\n\n # expand the N\n n = tensor.shape[0]\n h = tensor.shape[2]\n w = tensor.shape[3]\n cols = min(n, col_imgs)\n rows = int(np.ceil(float(n) / cols))\n\n # creat the canvas: expand the n\n out_canvas = np.zeros((3, h * rows, w * cols))\n i = 0\n for y in range(rows):\n for x in range(cols):\n if i >= n:\n break\n out_canvas[:, y * h:(y + 1) * h, x * w:(x + 1) * w] = tensor[i]\n i = i + 1\n return out_canvas\n"
] | [
[
"numpy.testing.assert_array_equal",
"numpy.array",
"numpy.stack",
"numpy.ones"
],
[
"numpy.histogram",
"numpy.linspace",
"numpy.min",
"numpy.finfo",
"numpy.concatenate",
"numpy.max",
"numpy.ma.masked_invalid",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
vbelissen/packnet-sfm | [
"dfba692596b08ccff17abb9423c1958cecc75b0f"
] | [
"packnet_sfm/models/SemiSupModel_fisheye.py"
] | [
"# Copyright 2020 Toyota Research Institute. All rights reserved.\n\nimport torch\n\nfrom packnet_sfm.models.SelfSupModel_fisheye import SfmModel, SelfSupModel_fisheye\nfrom packnet_sfm.losses.supervised_loss_valeo import SupervisedLoss\nfrom packnet_sfm.models.model_utils import merge_outputs\nfrom packnet_sfm.utils.depth import depth2inv\n\n\nclass SemiSupModel_fisheye(SelfSupModel_fisheye):\n \"\"\"\n Model that inherits a depth and pose networks, plus the self-supervised loss from\n SelfSupModel and includes a supervised loss for semi-supervision.\n\n Parameters\n ----------\n supervised_loss_weight : float\n Weight for the supervised loss\n kwargs : dict\n Extra parameters\n \"\"\"\n def __init__(self, supervised_loss_weight=0.9, **kwargs):\n # Initializes SelfSupModel\n super().__init__(**kwargs)\n # If supervision weight is 0.0, use SelfSupModel directly\n assert 0. < supervised_loss_weight <= 1., \"Model requires (0, 1] supervision\"\n # Store weight and initializes supervised loss\n self.supervised_loss_weight = supervised_loss_weight\n self._supervised_loss = SupervisedLoss(**kwargs)\n\n # Pose network is only required if there is self-supervision\n self._network_requirements['pose_net'] = self.supervised_loss_weight < 1\n # GT depth is only required if there is supervision\n self._train_requirements['gt_depth'] = self.supervised_loss_weight > 0\n\n @property\n def logs(self):\n \"\"\"Return logs.\"\"\"\n return {\n **super().logs,\n **self._supervised_loss.logs\n }\n\n def supervised_loss(self, inv_depths, gt_inv_depths,\n path_to_ego_mask,\n return_logs=False, progress=0.0):\n \"\"\"\n Calculates the supervised loss.\n\n Parameters\n ----------\n inv_depths : torch.Tensor [B,1,H,W]\n Predicted inverse depth maps from the original image\n gt_inv_depths : torch.Tensor [B,1,H,W]\n Ground-truth inverse depth maps from the original image\n return_logs : bool\n True if logs are stored\n progress :\n Training progress percentage\n\n Returns\n -------\n output : dict\n Dictionary containing a \"loss\" scalar a \"metrics\" dictionary\n \"\"\"\n return self._supervised_loss(\n inv_depths, gt_inv_depths,\n path_to_ego_mask,\n return_logs=return_logs, progress=progress)\n\n def forward(self, batch, return_logs=False, progress=0.0):\n \"\"\"\n Processes a batch.\n\n Parameters\n ----------\n batch : dict\n Input batch\n return_logs : bool\n True if logs are stored\n progress :\n Training progress percentage\n\n Returns\n -------\n output : dict\n Dictionary containing a \"loss\" scalar and different metrics and predictions\n for logging and downstream usage.\n \"\"\"\n if not self.training:\n # If not training, no need for self-supervised loss\n return SfmModel.forward(self, batch)\n else:\n if self.supervised_loss_weight == 1.:\n # If no self-supervision, no need to calculate loss\n self_sup_output = SfmModel.forward(self, batch)\n loss = torch.tensor([0.]).type_as(batch['rgb'])\n else:\n # Otherwise, calculate and weight self-supervised loss\n self_sup_output = SelfSupModel_fisheye.forward(self, batch)\n loss = (1.0 - self.supervised_loss_weight) * self_sup_output['loss']\n # Calculate and weight supervised loss\n sup_output = self.supervised_loss(\n self_sup_output['inv_depths'], depth2inv(batch['depth']),\n batch['path_to_ego_mask'],\n return_logs=return_logs, progress=progress)\n loss += self.supervised_loss_weight * sup_output['loss']\n # Merge and return outputs\n return {\n 'loss': loss,\n **merge_outputs(self_sup_output, sup_output),\n }\n"
] | [
[
"torch.tensor"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
terasakisatoshi/pythonCodes | [
"baee095ecee96f6b5ec6431267cdc6c40512a542",
"baee095ecee96f6b5ec6431267cdc6c40512a542",
"baee095ecee96f6b5ec6431267cdc6c40512a542",
"baee095ecee96f6b5ec6431267cdc6c40512a542",
"baee095ecee96f6b5ec6431267cdc6c40512a542",
"baee095ecee96f6b5ec6431267cdc6c40512a542",
"baee095ecee96f6b5ec6431267cdc6c40512a542"
] | [
"cythonTest/onlineSample/combination/jit_combination.py",
"ExcelHander/exceltest.py",
"matplotlib_exercise/officialExample/eventHandlingExamples/test_mouseclicks.py",
"matplotlib_exercise/officialExample/widgetsExamples/menu.py",
"matplotlib_exercise/surround_pt.py",
"bokehExer/scipyTest/sharpe.py",
"solveLinearEquation/gauss.py"
] | [
"import time\nfrom numba import jit\nimport numpy as np \n\n@jit()\ndef jit_sum_conbination(N):\n xs = [i for i in range(N)]\n ys = [i for i in range(N)]\n total = 0\n for x in xs:\n for y in ys:\n total += x+y\n return total\n\ndef py_sum_conbination(N):\n xs = np.arange(N)\n ys = np.arange(N)\n total = 0\n for x in xs:\n for y in ys:\n total += x+y\n return total\n\ndef main():\n N = 10000\n start = time.time()\n total = jit_sum_conbination(N)\n end = time.time()\n print(total)\n print('elapsed time=', end-start)\n\nif __name__ == '__main__':\n main()\n",
"# -*- coding: utf-8 -*-\nimport xlwt as xw\nimport matplotlib.pyplot as plt\n\n# Excel New Bookを作成\nwb = xw.Workbook()\n\n# セルに値をセットする\nxw.Range('A1').value = 'Foo 1'\n\n# 値を取得する\nstr = xw.Range('A1').value\nprint(str)\n\n# 指定したセルを基準に表データをセットする\nxw.Range('A1').value = [['Foo1','Foo2', 'Foo3'], [10, 20, 30]]\n\n# 指定したセルを基準に表データを取得する\ntable = xw.Range('A1').table.value\nprint(table)\n\n# 指定した範囲のデータを取得する\ntable2 = xw.Range('A1:C2').value\nprint(table2)\n\n# ワークブックやシートを指定する\ntable3 = xw.Range('Shett1', 'A1:C2', wkb=wb).value\nprint(table3)\n\n# matplotlibのグラフを追加する(エクセルのグラフを作成できるけど)\nfig = plt.figure()\nplt.plot([1,2,3,4,5])\nplot = xw.Plot(fig)\nplot.show('Plot1', left=xw.Range('D3').left, top=xw.Range('D3').top)\n\n# 保存\nfile_name = \"xlwings_sample.xlsx\"\nwb.save(file_name)",
"from __future__ import print_function\n\nimport matplotlib\n#matplotlib.use(\"WxAgg\")\n#matplotlib.use(\"TkAgg\")\n#matplotlib.use(\"GTKAgg\")\n#matplotlib.use(\"Qt4Agg\")\n#matplotlib.use(\"MacOSX\")\nimport matplotlib.pyplot as plt\n\n#print(\"***** TESTING WITH BACKEND: %s\"%matplotlib.get_backend() + \" *****\")\n\n\ndef OnClick(event):\n if event.dblclick:\n print(\"DBLCLICK\", event)\n else:\n print(\"DOWN \", event)\n\n\ndef OnRelease(event):\n print(\"UP \", event)\n\n\nfig = plt.gcf()\ncid_up = fig.canvas.mpl_connect('button_press_event', OnClick)\ncid_down = fig.canvas.mpl_connect('button_release_event', OnRelease)\n\nplt.gca().text(0.5, 0.5, \"Click on the canvas to test mouse events.\",\n ha=\"center\", va=\"center\")\n\nplt.show()",
"from __future__ import division, print_function\nimport numpy as np\nimport matplotlib\nimport matplotlib.colors as colors\nimport matplotlib.patches as patches\nimport matplotlib.mathtext as mathtext\nimport matplotlib.pyplot as plt\nimport matplotlib.artist as artist\nimport matplotlib.image as image\n\n\nclass ItemProperties(object):\n def __init__(self, fontsize=14, labelcolor='black', bgcolor='yellow',\n alpha=1.0):\n self.fontsize = fontsize\n self.labelcolor = labelcolor\n self.bgcolor = bgcolor\n self.alpha = alpha\n\n self.labelcolor_rgb = colors.to_rgba(labelcolor)[:3]\n self.bgcolor_rgb = colors.to_rgba(bgcolor)[:3]\n\n\nclass MenuItem(artist.Artist):\n parser = mathtext.MathTextParser(\"Bitmap\")\n padx = 5\n pady = 5\n\n def __init__(self, fig, labelstr, props=None, hoverprops=None,\n on_select=None):\n artist.Artist.__init__(self)\n\n self.set_figure(fig)\n self.labelstr = labelstr\n\n if props is None:\n props = ItemProperties()\n\n if hoverprops is None:\n hoverprops = ItemProperties()\n\n self.props = props\n self.hoverprops = hoverprops\n\n self.on_select = on_select\n\n x, self.depth = self.parser.to_mask(\n labelstr, fontsize=props.fontsize, dpi=fig.dpi)\n\n if props.fontsize != hoverprops.fontsize:\n raise NotImplementedError(\n 'support for different font sizes not implemented')\n\n self.labelwidth = x.shape[1]\n self.labelheight = x.shape[0]\n\n self.labelArray = np.zeros((x.shape[0], x.shape[1], 4))\n self.labelArray[:, :, -1] = x/255.\n\n self.label = image.FigureImage(fig, origin='upper')\n self.label.set_array(self.labelArray)\n\n # we'll update these later\n self.rect = patches.Rectangle((0, 0), 1, 1)\n\n self.set_hover_props(False)\n\n fig.canvas.mpl_connect('button_release_event', self.check_select)\n\n def check_select(self, event):\n over, junk = self.rect.contains(event)\n if not over:\n return\n\n if self.on_select is not None:\n self.on_select(self)\n\n def set_extent(self, x, y, w, h):\n print(x, y, w, h)\n self.rect.set_x(x)\n self.rect.set_y(y)\n self.rect.set_width(w)\n self.rect.set_height(h)\n\n self.label.ox = x + self.padx\n self.label.oy = y - self.depth + self.pady/2.\n\n self.rect._update_patch_transform()\n self.hover = False\n\n def draw(self, renderer):\n self.rect.draw(renderer)\n self.label.draw(renderer)\n\n def set_hover_props(self, b):\n if b:\n props = self.hoverprops\n else:\n props = self.props\n\n r, g, b = props.labelcolor_rgb\n self.labelArray[:, :, 0] = r\n self.labelArray[:, :, 1] = g\n self.labelArray[:, :, 2] = b\n self.label.set_array(self.labelArray)\n self.rect.set(facecolor=props.bgcolor, alpha=props.alpha)\n\n def set_hover(self, event):\n 'check the hover status of event and return true if status is changed'\n b, junk = self.rect.contains(event)\n\n changed = (b != self.hover)\n\n if changed:\n self.set_hover_props(b)\n\n self.hover = b\n return changed\n\n\nclass Menu(object):\n def __init__(self, fig, menuitems):\n self.figure = fig\n fig.suppressComposite = True\n\n self.menuitems = menuitems\n self.numitems = len(menuitems)\n\n maxw = max([item.labelwidth for item in menuitems])\n maxh = max([item.labelheight for item in menuitems])\n\n totalh = self.numitems*maxh + (self.numitems + 1)*2*MenuItem.pady\n\n x0 = 100\n y0 = 400\n\n width = maxw + 2*MenuItem.padx\n height = maxh + MenuItem.pady\n\n for item in menuitems:\n left = x0\n bottom = y0 - maxh - MenuItem.pady\n\n item.set_extent(left, bottom, width, height)\n\n fig.artists.append(item)\n y0 -= maxh + MenuItem.pady\n\n fig.canvas.mpl_connect('motion_notify_event', self.on_move)\n\n def on_move(self, event):\n draw = False\n for item in self.menuitems:\n draw = item.set_hover(event)\n if draw:\n self.figure.canvas.draw()\n break\n\n\nfig = plt.figure()\nfig.subplots_adjust(left=0.3)\nprops = ItemProperties(labelcolor='black', bgcolor='yellow',\n fontsize=15, alpha=0.2)\nhoverprops = ItemProperties(labelcolor='white', bgcolor='blue',\n fontsize=15, alpha=0.2)\n\nmenuitems = []\nfor label in ('open', 'close', 'save', 'save as', 'quit'):\n def on_select(item):\n print('you selected %s' % item.labelstr)\n item = MenuItem(fig, label, props=props, hoverprops=hoverprops,\n on_select=on_select)\n menuitems.append(item)\n\nmenu = Menu(fig, menuitems)\nplt.show()",
"from __future__ import print_function\n\nfrom six.moves import input\n\nimport numpy as np\n\nfrom matplotlib.widgets import LassoSelector\nfrom matplotlib.path import Path\n\n\nclass SelectFromCollection(object):\n \"\"\"Select indices from a matplotlib collection using `LassoSelector`.\n\n Selected indices are saved in the `ind` attribute. This tool highlights\n selected points by fading them out (i.e., reducing their alpha values).\n If your collection has alpha < 1, this tool will permanently alter them.\n\n Note that this tool selects collection objects based on their *origins*\n (i.e., `offsets`).\n\n Parameters\n ----------\n ax : :class:`~matplotlib.axes.Axes`\n Axes to interact with.\n\n collection : :class:`matplotlib.collections.Collection` subclass\n Collection you want to select from.\n\n alpha_other : 0 <= float <= 1\n To highlight a selection, this tool sets all selected points to an\n alpha value of 1 and non-selected points to `alpha_other`.\n \"\"\"\n\n def __init__(self, ax, collection, alpha_other=0.3):\n self.canvas = ax.figure.canvas\n self.collection = collection\n self.alpha_other = alpha_other\n\n self.xys = collection.get_offsets()\n self.Npts = len(self.xys)\n\n # Ensure that we have separate colors for each object\n self.fc = collection.get_facecolors()\n if len(self.fc) == 0:\n raise ValueError('Collection must have a facecolor')\n elif len(self.fc) == 1:\n self.fc = np.tile(self.fc, self.Npts).reshape(self.Npts, -1)\n\n self.lasso = LassoSelector(ax, onselect=self.onselect)\n self.ind = []\n\n def onselect(self, verts):\n path = Path(verts)\n self.ind = np.nonzero([path.contains_point(xy) for xy in self.xys])[0]\n self.fc[:, -1] = self.alpha_other\n self.fc[self.ind, -1] = 1\n self.collection.set_facecolors(self.fc)\n self.canvas.draw_idle()\n\n def disconnect(self):\n self.lasso.disconnect_events()\n self.fc[:, -1] = 1\n self.collection.set_facecolors(self.fc)\n self.canvas.draw_idle()\n\n\nif __name__ == '__main__':\n import matplotlib.pyplot as plt\n\n plt.ion()\n data = np.random.rand(100, 2)\n\n subplot_kw = dict(xlim=(0, 1), ylim=(0, 1), autoscale_on=False)\n fig, ax = plt.subplots(subplot_kw=subplot_kw)\n\n pts = ax.scatter(data[:, 0], data[:, 1], s=80)\n selector = SelectFromCollection(ax, pts)\n\n plt.draw()\n input('Press Enter to accept selected points')\n print(\"Selected points:\")\n print(selector.xys[selector.ind])\n selector.disconnect()\n\n # Block end of script so you can check that the lasso is disconnected.\n input('Press Enter to quit')",
"from scipy import ndimage as ndi\nimport numpy as np\nimport scipy\nfrom bokeh.io import curdoc\nfrom bokeh.plotting import figure, show\nfrom bokeh.models import ColumnDataSource\nfrom bokeh.models.widgets import Slider, TextInput\nfrom bokeh.layouts import row, widgetbox\nfrom bokeh.layouts import gridplot\nfrom bokeh.client import push_session\n\n\nclass ImageViewer():\n\n def __init__(self, target):\n self.target = target[::-1]\n self.source1 = ColumnDataSource(data=dict(image=[self.target]))\n self.alpha = Slider(title=\"alpha\", value=30,\n start=10, end=50, step=1)\n self.sigma = Slider(title=\"sigma\", value=3,\n start=1, end=20, step=1)\n\n self.fig1 = self.define_figure('image')\n self.regist_image(self.fig1,self.source1)\n\n blurred = ndi.gaussian_filter(self.target, sigma=self.sigma.value)\n self.source2 = ColumnDataSource(data=dict(image=[blurred]))\n self.fig2 = self.define_figure('blurred')\n self.regist_image(self.fig2,self.source2)\n\n\n filtered = ndi.gaussian_filter(blurred, sigma=1)\n sharped = blurred+self.alpha.value*(blurred-filtered)\n sharped = sharped.astype(np.uint8)\n self.source3 = ColumnDataSource(data=dict(image=[sharped]))\n self.fig3 = self.define_figure('sharped')\n self.regist_image(self.fig3,self.source3)\n\n widget_list = [self.alpha, self.sigma]\n for widget in widget_list:\n widget.on_change('value', self.update_data)\n inputs = widgetbox(*[widget_list])\n self.plot = row(inputs, gridplot(\n [[self.fig1, self.fig2, self.fig3]]), width=600)\n\n def define_figure(self, title):\n return figure(title=title,\n plot_width=self.target.shape[1]//2,\n x_range=[0, self.target.shape[1]],\n plot_height=self.target.shape[0]//2,\n y_range=[0, self.target.shape[0]])\n\n def regist_image(self, fig, source):\n fig.image('image',\n x=0, y=0,\n dh=self.target.shape[0], dw=self.target.shape[1],\n source=source, palette='Greys256')\n\n def update_data(self, attr, old, new):\n blurred = ndi.gaussian_filter(self.target, sigma=int(self.sigma.value))\n filtered = ndi.gaussian_filter(blurred, sigma=1)\n sharped = blurred+self.alpha.value*(blurred-filtered)\n sharped = sharped.astype(np.uint8)\n self.source2.data = dict(image=[blurred])\n self.source3.data = dict(image=[sharped])\n self.fig1.title.text = '{} {}'.format(\n self.sigma.value, self.alpha.value)\n\n\ndef main():\n target = scipy.misc.face(gray=True)\n viewer = ImageViewer(target)\n document = curdoc()\n document.add_root(viewer.plot)\n\nmain()\n",
"import numpy as np\nimport sympy as sy\n\n\ndef solveeq_gauss(a, b, x):\n \"\"\"\n solve equation ax=b using simple gauss method\n \"\"\"\n # define extended coefficient matrix as 'a'\n a = np.concatenate([a, b], axis=1)\n # push forward\n (row, col) = a.shape\n for j in range(row):\n print(\"a=\\n\", a)\n for i in range(j+1, row):\n p = a[i, j] / a[j, j]\n a[i] -= p*a[j]\n # back forward\n for i in range(row)[::-1]: # reverse iteration\n x[i] = (a[i, col-1]-np.dot(a[i, i+1:row], x[i+1:row]))/a[i, i]\n\n\ndef confirm_numpy(a, b, x):\n x = np.linalg.inv(a)@b\n\n\ndef confirm_sympy(a, b, x):\n a = sy.Matrix(a)\n x = a.solve(b)\n\n\ndef verify(a, b, x):\n solveeq_gauss(a, b, x)\n print(\"gauss x:\\n{}\".format(x))\n confirm_numpy(a, b, x)\n print(\"numpy x:\\n{}\".format(x))\n confirm_sympy(a, b, x)\n print(\"sympy.solve x:\\n{}\".format(x))\n\n\ndef test1():\n a = np.matrix([[2, 4, 6],\n [1, -1, 5],\n [4, 1, -2]], dtype='float64')\n b = np.matrix([28, 7, 21], 'float64').T\n x = np.empty(b.shape)\n verify(a, b, x)\n\n\ndef test2():\n\n a = np.matrix([[2, 4, 5, 2],\n [1, -8, 2, -6],\n [4, 1, -10, -2],\n [1, 7, 1, -2]], dtype='float64')\n b = np.matrix([9, -3, 1, -3], 'float64').T\n x = np.empty(b.shape)\n verify(a, b, x)\n\n\ndef main():\n test1()\n test2()\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"numpy.arange"
],
[
"matplotlib.pyplot.plot",
"matplotlib.pyplot.figure"
],
[
"matplotlib.pyplot.gca",
"matplotlib.pyplot.show",
"matplotlib.pyplot.gcf"
],
[
"matplotlib.artist.Artist.__init__",
"matplotlib.colors.to_rgba",
"matplotlib.image.FigureImage",
"matplotlib.patches.Rectangle",
"matplotlib.mathtext.MathTextParser",
"matplotlib.pyplot.show",
"numpy.zeros",
"matplotlib.pyplot.figure"
],
[
"matplotlib.path.Path",
"matplotlib.pyplot.subplots",
"numpy.tile",
"matplotlib.pyplot.draw",
"numpy.random.rand",
"matplotlib.pyplot.ion",
"matplotlib.widgets.LassoSelector"
],
[
"scipy.ndimage.gaussian_filter",
"scipy.misc.face"
],
[
"numpy.matrix",
"numpy.dot",
"numpy.linalg.inv",
"numpy.concatenate",
"numpy.empty"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"1.3",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"0.16",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
alexshuang/onnxruntime | [
"771a6d235b8495d05bcf6a906107df1bd6e81744"
] | [
"orttraining/orttraining/python/training/_ortmodule_utils.py"
] | [
"# -------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n# --------------------------------------------------------------------------\n\nfrom . import _utils\n\nfrom onnxruntime.capi.onnxruntime_inference_collection import OrtValue\nfrom onnxruntime.capi import _pybind_state as C\n\nimport torch\nfrom torch.utils.dlpack import from_dlpack, to_dlpack\nfrom torch.utils.cpp_extension import load_inline\n\n\ndef _ortvalue_to_torch_tensor(ortvalue):\n # PyTorch's to_dlpack() uses same config for both torch.bool and torch.uint8,\n # and convert the config to torch.uint8 tensor duing from_dlpack().\n # So we need to convert the torch tensor to torch.bool type if OrtValue is bool tensor.\n torch_tensor = from_dlpack(ortvalue._ortvalue.to_dlpack())\n return torch_tensor.to(torch.bool) if ortvalue.data_type() == 'tensor(bool)' else torch_tensor\n\n\ndef _ortvalue_from_torch_tensor(torch_tensor):\n return OrtValue(C.OrtValue.from_dlpack(to_dlpack(torch_tensor), torch_tensor.dtype == torch.bool))\n\n\ndef _load_torch_gpu_allocator_cpp_extension(verbosity, is_rocm_pytorch):\n gpu_identifier = \"hip\" if is_rocm_pytorch else \"cuda\"\n gpu_allocator_header = \"HIPCachingAllocator\" if is_rocm_pytorch else \"CUDACachingAllocator\"\n torch_gpu_allocator_addresses_cpp_source = f'''\n #include <torch/extension.h>\n #include <c10/{gpu_identifier}/{gpu_allocator_header}.h>\n\n size_t gpu_caching_allocator_raw_alloc_address() {{\n return reinterpret_cast<size_t>(&c10::{gpu_identifier}::{gpu_allocator_header}::raw_alloc);\n }}\n\n size_t gpu_caching_allocator_raw_delete_address() {{\n return reinterpret_cast<size_t>(&c10::{gpu_identifier}::{gpu_allocator_header}::raw_delete);\n }}\n '''\n\n return load_inline(name='inline_extension',\n cpp_sources=[torch_gpu_allocator_addresses_cpp_source],\n extra_cflags=['-D__HIP_PLATFORM_HCC__=1' if is_rocm_pytorch else ''],\n functions=['gpu_caching_allocator_raw_alloc_address',\n 'gpu_caching_allocator_raw_delete_address'],\n verbose=verbosity,\n with_cuda=True)\n\n\ndef _check_same_device(device, argument_str, *args):\n '''Check that all tensor arguments in *args reside on the same device as the input device'''\n\n assert isinstance(device, torch.device), '`device` must be a valid `torch.device` object'\n for arg in args:\n if arg is not None and isinstance(arg, torch.Tensor):\n arg_device = torch.device(arg.device)\n if arg_device != device:\n raise RuntimeError(\n f\"{argument_str} found on device {arg_device}, but expected it to be on module device {device}.\")\n\n\ndef get_device_from_module(module):\n '''Returns the first device found in the `module`'s parameters or None'''\n device = None\n try:\n device = next(module.parameters()).device\n for param in module.parameters():\n if param.device != device:\n raise RuntimeError('ORTModule supports a single device per model for now')\n except StopIteration:\n # Model doesn't have a device set to any of the model parameters\n pass\n return device\n\n\ndef _create_iobinding(io_binding, inputs, model, device):\n '''Creates IO binding for a `model` inputs and output'''\n for idx, value_info in enumerate(model.graph.input):\n io_binding.bind_ortvalue_input(value_info.name, _ortvalue_from_torch_tensor(inputs[idx]))\n\n for value_info in model.graph.output:\n io_binding.bind_output(value_info.name, device.type, device_id=_utils.get_device_index(device))\n"
] | [
[
"torch.device",
"torch.utils.cpp_extension.load_inline",
"torch.utils.dlpack.to_dlpack"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Paul-31415/soundplay | [
"0e7ea27c6d4bdf5f94e5034c7775a10c62d1583e",
"0e7ea27c6d4bdf5f94e5034c7775a10c62d1583e"
] | [
"fmsynth.py",
"Clockwork_RNN.py"
] | [
"\n\nfrom itools import lmbdWr,lm\n\nimport itertools\n\nfrom bisect import bisect_right\n\nimport brailleG as gr\n\ndef abs2(n):\n return (n*n.conjugate()).real\n \n\ndef fsample(buf,m=1,b=0):\n index = 0\n y = 0\n while 1:\n index = (index+b+m*y)%len(buf)\n y = yield buf[(int(index)+1)%len(buf)]*(index%1)+buf[int(index)]*(1-(index%1))\n\ndef fsine(a=1,m=1/48000,b=0):\n s = 0\n c = a\n y = 0\n while 1:\n amt = b+m*y\n s += c*amt\n c -= s*amt\n y = yield s\n\nimport math\npi = math.pi\neone = math.exp(2*pi)\nbuffer_size = 8192\n\nsinBuffer = [math.sin(i*2*math.pi/4/buffer_size) for i in range(buffer_size+1)]\n \ndef nsin(a):\n a = 4*buffer_size*(a%1)\n if a<=buffer_size:\n return sinBuffer[math.floor(a)]\n elif a<=buffer_size*2:\n return sinBuffer[math.floor(buffer_size-a)-1]\n elif a<=buffer_size*3:\n return -sinBuffer[math.floor(a-buffer_size*2)]\n else:\n return -sinBuffer[math.floor(buffer_size*3-a)-1]\ndef nsaw(a):\n return (a%1)*2-1\ndef ntri(a):\n return abs((a%1)-.5)*4-1\ndef nsquare(a,p=.5):\n return ((a%1)<p)*2-1\n\nlsin = lm(nsin)\nlsaw = lm(nsaw)\nltri = lm(ntri)\nlsqr = lm(nsquare)\n\ndef pulse(w=.5):\n def p(v):\n return ((v%1)<w)*2-1\n return p\ndef tri(w=.5):\n def t(v):\n v %= 1\n return 2*v/w-1 if v < w else 2*(w-v)/(1-w)+1\n return t\n\n_reserved = []\ndef polyvars(varstr):\n return [Polynomial(i) for i in varstr]\nclass Polynomial:\n def __init__(self,coef,var=\"x\"):\n if type(coef) == str:\n var = coef\n coef = [0,1]\n self.a = coef\n self.var = var\n self.trim()\n def trim(self):\n while len(self.a):\n if self.a[-1] == 0:\n self.a = self.a[:-1]\n else:\n break\n def simplified(self,tv=None):\n if tv == None:\n tv = self.var\n if tv == self.var:\n r = Polynomial([],tv)\n x = Polynomial(tv)\n xa = 1\n for t in self.a:\n if type(t) == Polynomial:\n r += t.simplified(tv)*xa\n else:\n r += t*xa\n xa *= x\n else:\n r = Polynomial([],tv)\n x = Polynomial(self.var)\n xa = 1\n for t in self.a:\n if type(t) == Polynomial:\n r += t.simplified(tv).__mul__(xa)\n else:\n r += t*xa\n xa *= x\n return r\n def __call__(self,vrs):\n if type(vrs) != dict:\n vrs = {self.var:vrs}\n if self.var in vrs:\n x = vrs[self.var]\n v = 0\n xa = 1\n for t in self.a:\n if type(t) == Polynomial:\n t = t(vrs)\n v += xa*t\n xa *= x\n return v\n return Polynomial([t(vrs) if type(t) == Polynomial else t for t in self.a],self.var)\n def __getitem__(self,i):\n if i>=len(self):\n return 0\n return self.a[i]\n def __setitem__(self,i,v):\n if i>=len(self):\n self.a += [0]*(i-len(self))+[v]\n else:\n self.a[i] = v\n self.trim()\n def __neg__(self):\n return Polynomial([-i for i in self.a],self.var)\n def __radd__(self,o):\n return self.__add__(o)\n def __add__(self,o):\n if type(o) == Polynomial and o.var == self.var:\n return self.padd(o)\n return self.npadd(o)\n def padd(self,o):\n return Polynomial(sumPolyn(self.a,o.a),self.var)\n def npadd(self,o):\n if len(self.a):\n return Polynomial([self.a[0]+o]+self.a[1:],self.var)\n return Polynomial([o],self.var)\n def __rsub__(self,o):\n return -self.__sub__(o)\n def __sub__(self,o):\n if type(o) == Polynomial and o.var == self.var:\n return self.psub(o)\n return self.npsub(o)\n def psub(self,o):\n return self.padd(-o)\n def npsub(self,o):\n if len(self.a):\n return Polynomial([self.a[0]-o]+self.a[1:],self.var)\n return Polynomial([-o],self.var)\n def __rmul__(self,o):\n return self.__mul__(o)\n def __mul__(self,o):\n if type(o) == Polynomial and o.var == self.var:\n return self.pmul(o)\n return self.npmul(o)\n def pmul(self,o):\n return Polynomial(prodPolyn(self.a,o.a),self.var)\n def npmul(self,o):\n if len(self.a):\n return Polynomial([e*o for e in self.a],self.var)\n return Polynomial([],self.var)\n #def __divmod__(self,o):\n #def __repr__(self,var=None):\n # if var == None:\n # var = self.var\n # return f\"polyn({var}) = \"+\" + \".join((f\"({self.a[i]})\"+[\"\",f\"{var}\"][i>0]+[\"\",f\"**{i}\"][i>1] for i in range(len(self.a))))\n def __repr__(self,var=None):\n if var == None:\n var = self.var\n return f\"p({var})=\"*0+\" + \".join(((f\"({self.a[i]})\" if self.a[i] != 1 else [\"1\",\"\"][i>0])+[\"\",f\"{var}\"][i>0]+[\"\",f\"**{i}\"][i>1] for i in range(len(self.a)) if self.a[i] != 0))\n def deriv(self):\n return Polynomial([self.a[i+1]*(i+1) for i in range(len(self.a)-1)],self.var)\n def integ(self,k=0):\n return Polynomial([k]+[self.a[i]*(1/(i+1)) for i in range(len(self.a))],self.var)\n def convolve(self,o):\n #integ of self(t-x)o(t) dt\n #want first arg to be x, second to be bounds\n #so, \n x = Polynomial('x')\n t = Polynomial('t')\n integrand = Polynomial([self(t-x)*o(t)],'t')\n return integrand.simplified('t').integ()\n def __len__(self):\n return len(self.a)\n def __eq__(self,o):\n if type(o) == Polynomial:\n if len(o) != len(self) or o.var != self.var:\n return False\n for i in range(len(self)):\n if self.a[i] != o.a[i]:\n return False\n return True\n if type(o) == float or type(o) == int or type(o) == complex:\n return len(self) <= 1 and (self.a+[0])[0] == o\n def __matmul__(self,o):\n return self.convolve(o)\n def plot(self,*args):\n plotPoly(self.a,*args)\n \ndef evalPolyn(polyn,x):\n v = 0\n xa = 1\n for t in polyn:\n v += xa*t\n xa *= x\n return v\ndef sumPolyn(p1,p2):\n res = [0 for i in range(max(len(p1),len(p2)))]\n for i in range(len(res)):\n if i < len(p1):\n if i < len(p2):\n res[i] = p1[i] + p2[i]\n else:\n res[i] = p1[i]\n else:\n res[i] = p2[i]\n return res\ndef prodPolyn(p1,p2):\n if len(p1) == 0 or len(p2) == 0:\n return []\n res = [0 for i in range(len(p1)+len(p2)-1)]\n for i in range(len(p1)):\n for j in range(len(p2)):\n res[i+j] += p1[i]*p2[j]\n return res\ndef composePolyn(p1,p2): #retuns p1(p2(x))\n px = [1]\n pr = []\n for i in p1:\n pr = sumPolyn(pr,prodPolyn(px,[i]))\n px = prodPolyn(px,p2)\n return pr\n\ndef fourierPolyn(p,freq):\n factor = 1/(2j*math.pi*freq)\n mask = [factor]\n result = [0 for i in p]\n for i in range(len(p)):\n facacc = factor\n for j in range(i,-1,-1):\n result[j] += facacc*p[i]\n facacc *= -factor*j\n return result\ndef evalFourierPolyn(p,freq,phase,low,high):\n l = evalPolyn(p,low)\n h = evalPolyn(p,high)\n return h*(eone**(1j*(freq*high+phase)))-l*(eone**(1j*(freq*low+phase)))\n \n\ndef convolvePolyn(p1,p2):\n pass\n\n\n\ndef softGCD(a,b,f=.01):\n if abs(b)<=f:\n return a\n return softGCD(b,a%b,f)\n\ndef convPolyFrags(p0,p1,l0,l1):\n #convolves 2 polynomial fragments\n if l0 > l1:\n return convPolyFrags(p1,p0,l1,l0)\n #l0 ≤ l1\n times = [-l0,0,l1-l0]\n #moving = composePolyn(p0,[t-x])\n\n def xify(v):\n return Polynomial([v],'x').simplified('x')\n \n p_0 = Polynomial(p0,'x')\n p_1 = Polynomial(p1,'x')\n x = Polynomial('x')\n conv = p_0@p_1\n a,b,c = conv(l0+x)-conv(0),conv(l0+x)-conv(x),conv(l1)-conv(x)\n a,b,c = [xify(xify((a,b,c)[i])(x+times[i])) for i in range(3)]\n if l1 != l0:\n return PiecewizePoly([[],a.a,b.a,c.a,[]],[-math.inf]+times+[l1],0)\n return PiecewizePoly([[],a.a,c.a,[]],[-math.inf]+times[:2]+[l1],0)\n \n \n\ndef plotPoly(p,t0=0,t1=1,res=50):\n import matplotlib.pyplot as plt\n fig, ax = plt.subplots(nrows=1, ncols=1)\n st = p[0]\n end = evalPolyn(p,t1-t0)\n mid = []\n ts = []\n if len(p) > 2:\n for j in range(1,res):\n t = (t1-t0)*j/res\n ts += [t+t0]\n mid += [evalPolyn(p,t)]\n ts = [t0]+ts+[t1]\n ys = [st]+mid+[end]\n plt.plot(ts,[i.real for i in ys],linestyle='-',color=(.3,.3,1), linewidth=2)\n plt.plot(ts,[i.imag for i in ys],linestyle='-',color=(1,.3,.3), linewidth=2)\n plt.show(block=0)\n\n#todo: closed form convolution\n# perhaps use @ (__matmul__)\n#todo: closed form composition? possible? not always: requires root finding\nclass PiecewizePoly:\n def __init__(self,polys = [[]],times=[0],mod=1):\n self.times = times\n self.polys = polys\n self.mod = mod\n def __call__(self,x):\n if self.mod != 0:\n x %= self.mod\n #binary search for correct polyn\n l = bisect_right(self.times,x)-1\n #eval polyn\n return evalPolyn(self.polys[l],x-self.times[l])\n def deriv(self):\n #do derivitive on self\n res_t = []\n res_p = []\n for p in range(len(self.polys)):\n res_t += [self.times[p]]\n res_p += [[]]\n for i in range(len(self.polys[p])-1):\n res_p[-1] += [self.polys[p][i+1]*(i+1)]\n return PiecewizePoly(res_p,res_t,self.mod)\n def integ(self,start=0,scale=1):\n #do integral on self\n res_t = []\n res_p = []\n for p in range(len(self.polys)):\n res_t += [self.times[p]]\n res_p += [[start]]\n for i in range(len(self.polys[p])):\n res_p[-1] += [self.polys[p][i]/(i+1)*scale]\n #continuize segments after first\n for i in range(1,len(res_t)):\n val = evalPolyn(res_p[i-1],res_t[i]-res_t[i-1])\n res_p[i][0] = val#-evalPolyn(res[i][1],res[i][0]) #not needed with new def\n return PiecewizePoly(res_p,res_t,self.mod)\n def timeshift(self,s):\n assert self.mod==0\n for i in range(len(self.times)):\n self.times[i] -= s\n return self\n def timescale(self,s):\n self.mod *= s\n for i in range(len(self.times)):\n self.times[i] *= s\n self.polys[i] = composePolyn(self.polys[i],[0,1/s])\n return self\n def convolve(self,o,fudgefactor = .001):\n ts = self.times + [self.mod if self.mod else math.inf]\n to = o.times + [o.mod if o.mod else math.inf]\n result = PiecewizePoly([[]],[-math.inf],0)\n for i in range(len(self.polys)):\n for j in range(len(o.polys)):\n pc = convPolyFrags(self.polys[i],o.polys[j],ts[i+1]-ts[i],to[j+1]-to[j])\n result += pc.timeshift(ts[i]-to[j])\n #now do moddy stuff\n return result\n \n \n def __matmul__(self,o,fudgefactor = .001):\n return self.convolve(o,fudgefactor)\n def __lmbdWr__(self):\n return lmbdWr(self)\n def __iterWr__(self):\n return iterWr(iter(lmbdWr(self)))\n def bias(self):\n intg = self.integ()\n return (intg.end()-intg(0))/self.mod\n def unbiased(self):\n #self shifted to have 0 dc bias\n bias = self.bias()\n res_t = []\n res_p = []\n for p in range(len(self.polys)):\n res_t += [self.times[p]]\n res_p += [sumPolyn([-bias],self.polys[p])]\n return PiecewizePoly(res_p,res_t,self.mod)\n def graph(self,w=40,h=20,lo=-2,hi=2):\n gr.graph(self,0,self.mod,lo,hi,w,h)\n def plot(self,res=50):\n import matplotlib.pyplot as plt\n fig, ax = plt.subplots(nrows=1, ncols=1)\n dash = 0\n for i in range(len(self.polys)):\n dash = 1-dash\n t0 = self.times[i]\n if t0 == -math.inf:\n t0 = self.times[i+1]-1\n t1 = (self.times+[self.mod if self.mod != 0 else self.times[-2]+1])[i+1]\n p = self.polys[i]\n st = (p+[0])[0]\n end = evalPolyn(p,t1-t0)\n mid = []\n ts = []\n if len(p) > 2:\n for j in range(1,res):\n t = (t1-t0)*j/res\n ts += [t+t0]\n mid += [evalPolyn(p,t)]\n ts = [t0]+ts+[t1]\n ys = [st]+mid+[end]\n plt.plot(ts,[i.real for i in ys],linestyle='-',color=(.3*dash,.3*dash,1), linewidth=2)\n plt.plot(ts,[i.imag for i in ys],linestyle='-',color=(1,.3*dash,.3*dash), linewidth=2)\n plt.show(block=0)\n \n def mag2(self):\n sqd = PiecewizePoly([prodPolyn(p,p) for p in self.polys],[t for t in self.times],self.mod+1).integ()\n return (sqd(self.mod)-sqd(0))/self.mod\n def norm(self,v=.5):\n #normalizes it so that integ(0,mod, of self^2) = v*mod\n target = v\n factor = target/self.mag2()**.5\n return PiecewizePoly([[i*factor for i in p] for p in self.polys],[t for t in self.times],self.mod)\n def __add__(self,o,fudgefactor = .001):\n if type(o) == PiecewizePoly:\n if self.mod == 0:\n assert o.mod == 0\n res_t = [-math.inf]\n res_p = [sumPolyn(self.polys[0],o.polys[0])]\n si = 0\n oi = 0\n sts = self.times + [math.inf]\n ots = o.times + [math.inf]\n sp = self.polys + [[]]\n op = o.polys + [[]]\n while si < len(self.times) and oi < len(o.times):\n st,ot = sts[si+1],ots[oi+1]\n if st < ot:\n si += 1\n res_t += [st]\n res_p += [sumPolyn(sp[si],\n composePolyn(op[oi],[st-ot,1]))]\n elif st > ot:\n oi += 1\n res_t += [ot]\n res_p += [sumPolyn(composePolyn(sp[si],[ot-st,1]),\n op[oi])]\n else:\n si += 1\n oi += 1\n res_t += [st]\n res_p += [sumPolyn(sp[si],op[oi])]\n return PiecewizePoly(res_p,res_t,0)\n \n else:\n assert o.mod != 0\n gcd = softGCD(self.mod,o.mod,fudgefactor*(self.mod*o.mod)**.5) \n lcm = self.mod*o.mod/gcd \n t = 0 \n res_t = []\n res_p = []\n sto = 0\n oto = 0\n si = 0\n oi = 0\n while t < lcm:\n res_t += [t]\n res_p += [sumPolyn(composePolyn(self.polys[si],[t-(self.times[si]+sto),1]),\n composePolyn(o.polys[oi],[t-(o.times[oi]+oto),1]))]\n \n st = sto+(self.times+[self.times[0]+self.mod])[si+1]\n ot = oto+(o.times+[o.times[0]+o.mod])[oi+1]\n t = min(st,ot)\n if st <= t:\n si += 1\n if si >= len(self.polys):\n si = 0\n sto += self.mod\n if ot <= t:\n oi += 1\n if oi >= len(o.polys):\n oi = 0\n oto += o.mod\n return PiecewizePoly(res_p,res_t,lcm)\n else:\n return PiecewizePoly([sumPolyn(p,[o]) for p in self.polys],[t for t in self.times],self.mod)\n def __mul__(self,o,fudgefactor = .001):\n if type(o) == PiecewizePoly:\n gcd = softGCD(self.mod,o.mod,fudgefactor*(self.mod*o.mod)**.5) \n lcm = self.mod*o.mod/gcd \n t = 0 \n res_t = []\n res_p = []\n sto = 0\n oto = 0\n si = 0\n oi = 0\n while t < lcm:\n res_t += [t]\n res_p += [prodPolyn(composePolyn(self.polys[si],[t-(self.times[si]+sto),1]),\n composePolyn(o.polys[oi],[t-(o.times[oi]+oto),1]))]\n \n st = sto+(self.times+[self.times[0]+self.mod])[si+1]\n ot = oto+(o.times+[o.times[0]+o.mod])[oi+1]\n t = min(st,ot)\n if st <= t:\n si += 1\n if si >= len(self.polys):\n si = 0\n sto += self.mod\n if ot <= t:\n oi += 1\n if oi >= len(o.polys):\n oi = 0\n oto += o.mod\n return PiecewizePoly(res_p,res_t,lcm)\n else:\n return PiecewizePoly([prodPolyn(p,[o]) for p in self.polys],[t for t in self.times],self.mod)\n \n def __radd__(self,o):\n return self.__add__(o)\n def __rmul__(self,o):\n return self.__mul__(o)\n def __sub__(self,o):\n return self.__add__(o.__mul__(-1))\n def __rsub__(self,o):\n return self.__mul__(-1).__add__(o)\n \n def t(self,v=1):\n return PiecewizePoly([[p[i]/(v**i) for i in range(len(p))] for p in self.polys],[t*v for t in self.times],self.mod*v)\n def isZero(self):\n for i in self.polys:\n for j in i:\n if j != 0:\n return False\n return True\n def end(self):\n x = self.mod\n l = -1\n #eval polyn\n return evalPolyn(self.polys[l],x-self.times[l])\n \n def freqComponent(self,f):\n if f == 0:\n return self.bias()\n result = 0\n f /= self.mod\n for i in range(len(self.polys)):\n p = fourierPolyn(self.polys[i],f)\n result += evalFourierPolyn(p,f,f*self.times[i],0,(self.times+[self.mod])[i+1]-self.times[i])\n return result\n def graphSpectrum(self,w=20,h=10,both=True):\n gr.graphLT(lambda x:abs(self.freqComponent(x)),both-h*2*both,h*(4-2*both)+both,0,1,w,h)\n def graphSpectrumLog(self,w=20,h=10,both = True,low=-10,hi=1):\n gr.graphLT(lambda x: (lambda v: (math.log(v) if v!=0 else -1e300))(abs(self.freqComponent(x))),both-h*2*both,h*(4-2*both)+both,low,hi,w,h)\n\n def bandLimit(self,t,bl=5,neg=False):\n tot = 0\n for i in range(neg*(1-bl),bl):\n tot += eone**(1j*i*t)*self.freqComponent(i)\n return tot\n\n def getBandlimitedBuffer(self,denominator,numerator = 1,ff=0,fnd=2,neg=False):\n #f_nyquist = .5\n # f_n = n*(num/den) < f_nyquist\n # n < .5*den/num\n d = softGCD(numerator,denominator,ff)\n numerator=int(round(numerator/d))\n denominator=int(round(denominator/d))\n return [self.bandLimit(numerator*i*self.mod/denominator,int(denominator/numerator/fnd),neg) for i in range(numerator*denominator)]\n \n def bandConj(self,t,bl=5):\n tot = 0\n re = self.real()\n im = self.imag()\n for i in range(0,bl):\n f = eone**(1j*i*t)\n tot += (f*re.freqComponent(i)).imag+(f*im.freqComponent(i)).imag*1j\n return tot\n \n \n \n def real(self):\n return PiecewizePoly([[i.real for i in j]for j in self.polys],[t for t in self.times],self.mod)\n def imag(self):\n return PiecewizePoly([[i.imag for i in j]for j in self.polys],[t for t in self.times],self.mod)\n def oscope(self,w=40,h=20,s=.5+.5j,m=.5,n=256):\n scrn = gr.brailleScreen(w*2,h*4)\n for i in range(n):\n t = i*self.mod/n\n v = self(t).conjugate()*m+s\n if 0<=int(v.real*w*2)<w*2 and 0<=int(v.imag*h*4) < h*4:\n gr.brailleScreenSet(scrn,int(v.real*w*2),int(v.imag*h*4))\n gr.brailleScreenPrint(scrn)\n\ndef forever(v):\n while 1:\n yield v\n#NEW BWLSYNTH IDEA:\n# sample the nth integral then derivitate the signal n times\n# the high harmonics are suppressed in the integrals which means\n# when they alias they are tiny\n# but the reconstruction filter doesn't amplify them a ton because they were aliased\n# thus cheap and easy bwl synthesis\ndef idbwlPoly(p,rate=440/48000,q=1,d=1):\n try:\n rate.__next__\n except:\n rate = forever(rate)\n ds = [[0]*d for i in range(q)]\n rates = [0]*d\n trate = 0\n for i in range(q):\n p = p.unbiased().integ()\n t = 0\n di = 0\n for i in range(q*d):\n di = (di+1)%d\n t += rate\n t %= 1\n r = p(t)\n trate -= rates[di]\n rates[di] = next(rate)\n trate += rates[di]\n for i in range(q):\n r,ds[i][di] = (r-ds[i][di]) / trate,r\n while 1:\n di = (di+1)%d\n t += rate\n t %= 1\n r = p(t)\n trate -= rates[di]\n rates[di] = next(rate)\n trate += rates[di]\n for i in range(q):\n r,ds[i][di] = (r-ds[i][di]) / trate,r\n yield r\n \n\ndef ditherPoly(p,rate=440/48000,dd=1):\n from random import random\n t = 0\n while 1:\n t += rate\n yield p(t+dd*rate*random())\n\n \ndef gaussApprox(mean=0,spread=1,iters=3):\n s = spread/iters\n blip = PiecewizePoly([[],[1/s],[]],[-math.inf,0,s],0)\n acc = blip\n for b in bin(iters)[3:]:\n acc.plot()\n acc @= acc\n if b == '1':\n acc @= blip\n return acc.timeshift(mean)\n \n\ndef plinConnectDots(dat,speed=1):\n polys = []\n times = []\n t = 0\n for i in range(len(dat)):\n leng = abs(dat[i-1]-dat[i])\n polys += [[dat[i-1],(dat[i]-dat[i-1])/leng]]\n times += [t]\n t += leng\n return PiecewizePoly(polys,times,t)\ndef pnlinConnectDots(dat,speed=1):\n r = plinConnectDots(dat,speed)\n return r.t(1/r.mod)\n \n \ndef papprox(dat,integ=2):\n #derivitive the freqs integ times\n dcs = []\n for intg in range(integ):\n dcs += [dat[-1]/(intg+1)]\n ddat = [(-dat[i-1]+dat[i])/(intg+1) for i in range(len(dat))]\n dat = ddat\n res = PiecewizePoly([[i] for i in dat],[i for i in range(len(dat))],len(dat))\n for i in range(integ):\n res = res.integ(dcs[-i-1])\n return res\n \"\"\"bl = len(dat)//2\n guess1 = PiecewizePoly([[i] for i in dat],[i/len(dat) for i in range(len(dat))],1)\n freqs = [guess1.freqComponent(i) for i in range(1-bl,bl)]\n dc = guess1.bias()\n #derivitive the freqs integ times\n for i in range(integ):\n for f in range(len(freqs)):\n freqs[f] *= (f+1-bl//2)*1j\n #come up with new samples to integrate repeatedly\n samps = []\n for t in range(len(dat)):\n tot = 0\n for i in range(1-bl,bl):\n tot += eone**(1j*i*t/len(dat))*freqs[i]\n samps += [tot]\n res = PiecewizePoly([[i] for i in samps],[i/len(samps) for i in range(len(samps))],1)\n for i in range(integ):\n res = res.unbiased().integ(0,1).unbiased()\n return res + dc\n \"\"\"\n \n\ndef ppulse(width=.5,amplitude=1):\n return PiecewizePoly([[0,[-1]],[width,[1]]]).unbiased()\n\n\npsqr = PiecewizePoly([[-1],[1]],[0,.5])\n\n#.5 -> 2\nptri = psqr.integ(0,4).unbiased()\n\n#.25*.5=1/8\nppar = ptri.integ(0,8)\n\n\npsaw = PiecewizePoly([[1,-2]],[0])\n\n\ncf = pnlinConnectDots([-.75+1.5j,-.5+1j,.5+1j,.75+1.5j,1+1j,1-1j,-1j-1,1j-1])*.5\n\ncfi = plinConnectDots([-.75+1.5j,-.5+1j,.5+1j,.75+1.5j,1+1j,1-1j,-1j-1,1j-1])\ncfi.polys += [[-1/3+.5j,-1j],[1/3+.5j,-1j]]\ncfi.times += [cfi.mod,cfi.mod+.75]\ncfi.mod += 1.5\ncfi = cfi.t(1/cfi.mod)*.5\n\n\n\ndef reorderTimes(times,order,top):\n newTimes = []\n t = 0\n for i in order:\n if i == len(times)-1:\n l = top-times[i]\n else:\n l = times[i+1]-times[i]\n newTimes += [t]\n t += l\n return newTimes\n\ndef reorder(wave,goal,fs=20,wfd = lambda f,a,b: abs(abs2(a)-abs2(b))):\n l = [i for i in range(len(wave.polys))]\n goalF = [goal.freqComponent(i) for i in range(1-fs,fs)]\n best = wave\n bestD = 1e300\n for p in itertools.permutations(l):\n guess = PiecewizePoly([wave.polys[i] for i in p],reorderTimes(wave.times,p,wave.mod),wave.mod)\n guessF = [guess.freqComponent(i) for i in range(1-fs,fs)]\n d = 0\n for i in range(len(goalF)):\n d += wfd(1-fs+i,goalF[i],guessF[i])\n if d < bestD:\n best = guess\n bestD = d\n return best\n\n\ndef quickStar(n,s=2):\n return pnlinConnectDots([eone**(1j*i*s/n) for i in range(n)])*.5\n\ndef prettyStar(n,rl=.5):\n return pnlinConnectDots([eone**(1j*(i+.5*j)/n)*[1,rl][j] for i in range(n) for j in range(2)])*.5\n\n\n\ndef getPSineApprox(sects=2,integs=12):\n offs = integs%4\n guess = PiecewizePoly([[math.sin(((i+.5)/sects+offs/4)*2*math.pi)] for i in range(sects)],[i/sects for i in range(sects)]).unbiased()\n for i in range(integs):\n guess = guess.integ(0,1).unbiased().norm()\n return guess\n\n\n\n\n\ndef c(f,g):\n for i in g:\n yield f(i)\n\ndef x(n,g):\n for i in g:\n yield n*i\ndef p(n,g):\n for i in g:\n yield n+i\ndef const(n):\n while 1:\n yield n\ndef integ(g,a=0):\n for i in g:\n a += i\n yield a\ndef deriv(g):\n p = next(g)\n for i in g:\n yield i-p\n p = i\ndef clamp(n,v=1):\n return min(max(n,-v),v)\ndef bderiv(g,b=1):\n p = next(g)\n d = 0\n for i in g:\n d += i-p\n p = i\n v = clamp(d,b)\n yield v\n d -= v\n\n\n\n\n \n \ndef send(g1,g2):\n next(g1)\n while 1:\n yield g1.send(next(g2))\n \nclass passFilter:\n def __init__(self):\n self.value = 0\n def send(self,val,time=1):\n self.value = val\n return val\nclass contRAvgFilt(passFilter):\n def __init__(self,a):\n self.alpha = math.log(a)\n self.value = 0\n def send(self,val,time=1):\n self.value = val+(self.value-val)*math.exp(self.alpha*time)\n return self.value\n\ndef getPerfSquareBuff(n,d=1):\n w = 1\n outbuf = [0 for i in range(n)]\n while w < n/d/2:\n for i in range(n):\n outbuf[i] += math.sin(i*2*pi*d/n*w)/w\n w += 2\n return outbuf\n\n\ndef nearestDownSample(g,r=1):\n a = 0\n for i in g:\n while a < 1:\n yield i\n a += r\n a -= 1\n \ndef linearDownSample(g,r=1):\n p = 0\n a = 0\n for i in g:\n while a < 1:\n yield a*i+(1-a)*p\n a += r\n p = i\n a -= 1\n \ndef fsamp(f,s=[(-1,.5),(1,.5)],filt=None,r=48000):\n if filt == None:\n filt = contRAvgFilt(1/r)\n a = 0\n i = 0\n if type(f)==int or type(f)==float:\n def g(v):\n while 1:\n yield v\n f = g(f)\n filtered = 0\n while 1:\n t = next(f)/r\n while t > 0:\n dt = min(t,s[i][1]-a)\n \n a += dt\n t -= dt\n filt.send(s[i][0],dt)\n\n if a>=s[i][1]:\n a -= s[i][1]\n i = (i+1)%len(s)\n\n \n yield filt.value\n \n \n\n\n\n\n\n#actual fm stuff\nfrom filters import IIR\nimport numpy as np\n\n\ndef phaseModulate(g,d=.1,f=10000,sr=48000):\n t = 0\n for i in g:\n t += f/sr\n yield nsin(t+i.real*d)+1j*(nsin(t+.25+i.imag*d))\ndef modulate(g,d=0.01,f=10000,sr=48000):\n t = .25\n for i in g:\n t += (d*i+1+1j)*f/sr\n yield (nsin(t.real)+1j*nsin(t.imag))\n\n\"\"\"def stereoEncode(g,c=10000,sr=48000):\n t = 0\n flt = IIR()\n flt.setPolys([1],\n for i in g:\n t += (c+i.imag)/sr\n yield nsin(t)+i.real\n\n\ndef stereoDecode(g,c=15000,sr=48000):\n for i in g:\n r = \n\"\"\"\n\n\n\n\n\n#def fm(\n",
"\"\"\"OA\n\n\"\"\"\nimport numpy as np\nfrom NNBlocks import tanh,sigm,relu,iden,isNone,expo,lrelu\n\n\ndef boxcars(periods):\n periods = np.array(periods,dtype=int)\n phases = np.zeros(periods.shape,dtype=int)\n def filt(v,ph=phases,pd=periods,stored=[None]):\n if isNone(stored[0]):\n stored[0] = np.zeros(periods.shape+v.shape)\n stored[0][ph==0] = 0\n stored[0] += v\n ph += 1\n f = ph>=pd\n ph[f] = 0\n return stored[0],f\n return filt\n\nREGULARIZATION_FACTOR = 0.01\nMAX_GRADIENT_STEP = 0.1\nclass NN:\n one = np.array([1])\n def __init__(self,shape=[1,8],af=tanh,history=4):\n self.hist = history\n self.hist_ind = 0\n try:\n len(af)\n self.af = af\n except:\n self.af = [af]*(len(shape)-1)\n self.shape = shape\n self.weights = [np.zeros((shape[i+1],shape[i]+1),dtype=float) for i in range(len(shape)-1)]\n self.gd_deltas = [np.copy(w) for w in self.weights]\n self.vals = [np.zeros((history,s),dtype=float) for s in shape]\n self.fvals = [np.zeros((history,s),dtype=float) for s in shape]\n def reset_mem(self):\n for i in range(len(self.vals)):\n self.vals[i].fill(0)\n self.fvals[i].fill(0)\n \n def scramble(self,keep=0,mag=1):\n for w in self.weights:\n w *= keep\n w += np.random.normal(w)*mag\n def len(self):\n return self.shape[-1]\n def hi(self,o=0):\n return (self.hist_ind+o)%self.hist\n def __call__(self,inp=None):\n if not isNone(inp):\n h = self.hist_ind = self.hi(1)\n self.fvals[0][h][:]= inp\n for i in range(len(self.weights)):\n self.vals[i+1][h][:]=self.weights[i]@np.concatenate((self.fvals[i][h],NN.one))\n self.fvals[i+1][h][:]=self.af[i](self.vals[i+1][h][:])\n return self.fvals[len(self.weights)][self.hi()]\n def grad_desc(self,prop,a=0.001,to=0):\n assert to < self.hist\n assert to >= 0\n h = self.hi(-to)\n for i in range(len(self.weights)-1,-1,-1):\n #print(\"1:\",\"prop:\",prop,\"vals[i+1]:\",self.vals[i+1][h],\"weights[i]:\",self.weights[i])\n prop = self.af[i].gradient(self.vals[i+1][h],self.fvals[i+1][h])*prop\n d = np.outer(prop,np.concatenate((self.fvals[i][h],NN.one)))\n #print(\"2:\",\"prop:\",prop,\"fvals[i]:\",self.fvals[i][h],\"outer:\",d)\n self.gd_deltas[i] -= d*a + self.weights[i]*REGULARIZATION_FACTOR*a\n prop = self.weights[i].transpose()[:-1]@prop\n #print(\"3:\",\"prop:\",prop)\n #print(\" \")\n return prop\n def grad_apply(self,vel=0):\n for i in range(len(self.weights)):\n self.weights[i] += np.clip(self.gd_deltas[i],-MAX_GRADIENT_STEP,MAX_GRADIENT_STEP)\n self.gd_deltas[i] = np.clip(self.gd_deltas[i]*vel,-MAX_GRADIENT_STEP,MAX_GRADIENT_STEP)\n #gradient ascent\n def grad(self,prop,a=0.001,to=0):\n assert to < self.hist\n assert to >= 0\n h = self.hi(-to)\n for i in range(len(self.weights)-1,-1,-1):\n prop = self.af[i].gradient(self.vals[i+1][h],self.fvals[i+1][h])*prop\n d = np.outer(prop,np.concatenate((self.fvals[i][h],NN.one)))\n self.gd_deltas[i] += d*a #- self.weights[i]*REGULARIZATION_FACTOR*a\n prop = self.weights[i].transpose()[:-1]@prop\n return prop\n \ndef graph(func):\n from matplotlib import pyplot as plt\n plt.plot([i/100 for i in range(-2000,2000)],[func(inp=i/100)[0] for i in range(-2000,2000)])\n return plt\n plt.show(block=0)\n\n\ndef grad_test_nn():\n print(\"making (1,2,1) relu net:\")\n print(\" -3 \")\n print(\" 1 -> @ 2 \\ \")\n print(\"->@ -> @ \")\n print(\" -1 -> @ 3 / -1 \")\n print(\" +2 \")\n n = NN((1,2,1),relu)\n n.weights[0] = np.array([[1.,-3],[-1,2]])\n n.weights[1] = np.array([[2.,3,-1]])\n print(\"expect n(0) = 5\")\n print(\" -3 | 0 \")\n print(\" 0 5 | 5 \")\n print(\" 2 | 2 \")\n print(\"got\",n(0))\n print(\"====\")\n print(\"expect n(1) = 2\")\n print(\" -2 | 0 \")\n print(\" 1 2 | 2 \")\n print(\" 1 | 1 \")\n print(\"got\",n(1))\n print(\"====\")\n print(\"gradient step size 0.01\")\n print(\"teaching n(1) = 0\")\n print(\"expect this:\")\n print(\" [ 1,-3] f [2 3 -1] f -> 2 \")\n print(\" [-1, 2]^ ^ ^ ^ \")\n print(\"gradients: ^ [0 2 2] 2 2 \")\n print(\" ^ ^ 4,6 \")\n print(\" ^ 0,6\")\n print(\" [ 0, 0] \")\n print(\" [ 6, 6] \")\n print(\"^< -6 \")\n g = n.grad_desc(n(1),0.01)\n print(\"got:\",g,n.gd_deltas)\n n.grad_apply()\n print(\"now n(1) = \",n(1))\n\n \n\n \ndef test_nn(a=0.1,vel=0,shape=(1,2,1),t=sigm):\n #print(\"Making\",shape,\"sigm net\")\n n = NN(shape,t)\n n.scramble()\n from matplotlib import pyplot as plt\n \n fig, ax = plt.subplots()\n ax.set_ylim((-2,2))\n \n p, = ax.plot([i/10 for i in range(-200,200)],[n(inp=i/10)[0] for i in range(-200,200)])\n\n def anim(i):\n for j in range(100):\n for x,y in [(0,1),(1,0),(-1,0)]:\n v = n(x)\n n.grad_desc(v-y,a)\n n.grad_apply(vel)\n p.set_ydata([n(inp=i/10)[0] for i in range(-200,200)])\n return p,\n import matplotlib.animation as animation\n ani = animation.FuncAnimation(fig,anim,interval=1)\n \n plt.show(block=0)\n return ani,anim,fig,ax,p\n\ndef test_nn_stack(a=0.1,vel=0,shape=[(1,2),(2,1)],t=[sigm,sigm],xr=[-1,0,1],yr=[0,1,0]):\n #print(\"Making\",shape,\"sigm net\")\n ns = [NN(shape[i],t[i%len(t)]) for i in range(len(shape))]\n for n in ns:\n n.scramble()\n from matplotlib import pyplot as plt\n \n fig, ax = plt.subplots()\n ax.set_ylim((-2,2))\n\n def f(i):\n v = i\n for n in ns:\n v = n(v)\n return v\n\n def gd(g,a):\n for n in ns[::-1]:\n g = n.grad(g,a)\n return g\n \n p, = ax.plot([i/10 for i in range(-200,200)],[f(i/10)[0] for i in range(-200,200)])\n ax.plot(xr,yr,'o')\n def anim(i,a=a,vel=vel):\n for i in range(len(xr)):\n v = f(xr[i])\n gd(yr[i]-v,a)\n for n in ns:\n n.grad_apply(vel)\n p.set_ydata([f(i/10)[0] for i in range(-200,200)])\n return p,\n import matplotlib.animation as animation\n ani = animation.FuncAnimation(fig,anim,interval=1)\n \n plt.show(block=0)\n return ani,anim,fig,ax,p\n\n\nclass NoGC:\n def __init__(self,*stuff):\n self.stuff = stuff\n def __repr__(self):\n return \"NoGC()\"\nimport random\ndef test_rnn(a=0.1,vel=0,l=2,h=5,shape=(2,2),t=sigm,noise=0):\n #print(\"Making\",shape,\"sigm net\")\n n = NN(shape,t,h)\n n.scramble()\n from matplotlib import pyplot as plt\n if type(l) == int:\n l = lambda i,l=l: i == l\n if type(a) != type(l):\n a = lambda i,l,v=a : v\n if type(vel) != type(l):\n vel = lambda i,l,v=vel : v\n\n def f(l,res=[n()]):\n n.reset_mem()\n r = []\n n(np.zeros(n.len()))\n #n(res[0])\n #for i in range(int((random.random()*10+1)*h)):\n # n(np.concatenate((np.zeros(1),n()[1:])))\n #res[0] = np.concatenate((np.zeros(1),n()[1:]))\n for i in range(h-l):\n r += [np.copy(n(np.concatenate((np.zeros(1),n()[1:]))))]\n #r += [np.zeros(n.len())]*(h-l)\n for i in range(l):\n r += [np.copy(n(np.concatenate((np.ones(1),n()[1:]))))]\n r += [np.copy(n(np.concatenate((np.zeros(1),n()[1:]))))]\n return r\n \n\n def teach(p,a):\n prop = np.zeros(n.len(),dtype=float)\n prop[0] = p\n for i in range(h):\n prop[1:] = n.grad(np.tanh(prop),a,i)[1:]\n #print(i,prop[1:])\n prop[0] = 0\n \n fig, ax = plt.subplots()\n ax.set_ylim((-2,2))\n d = [f(i) for i in range(h)]\n def dat(i,j,d):\n return [e[j] for e in d[i]]\n p = [[ax.plot([i-1+(o+1)/(h+1) for o in range(h+1)],dat(i,j,d))[0] for j in range(n.len())] for i in range(h)]\n r, = ax.plot(range(h),[p[-1][0] for p in d],'o')\n g, = ax.plot(range(h),[0 for p in d],'1')\n t, = ax.plot(range(h),[l(i) for i in range(h)],'x')\n def anim(ind):\n #for j in range(100):\n d = []\n gd = []\n for i in range(h):\n v = f(i)\n gd += [l(i)-v[-1][0]]\n teach(gd[-1],a(ind,gd[-1]))\n d += [v]\n n.grad_apply(vel(ind,gd[-1]))\n n.scramble(1,noise)\n r.set_ydata([p[-1][0] for p in d])\n for i in range(len(p)):\n for j in range(len(p[i])):\n p[i][j].set_ydata(dat(i,j,d))\n g.set_ydata([-gd[i] for i in range(h)])\n return r,g,*sum(p,[])\n import matplotlib.animation as animation\n ani = animation.FuncAnimation(fig,anim,interval=1)\n plt.show(block=0)\n return n,NoGC(ani,anim,fig,ax,p)\n\n\n\n\n\n\n\n\n\n\n\n\nDEFAULT_SUBNET = [8]\n \nclass Clockwork_RNN:\n def __init__(self,**ka):\n params = {'feedback_activation_function':tanh,\n 'output_activation_function':iden,\n 'resample_filter':boxcars,\n 'clock_periods':[1<<i for i in range(16)],\n 'subnet_sizes':[DEFAULT_SUBNET for i in range(16)],\n 'subnet_activation_function':tanh,\n 'outputs':2,\n 'inputs':1,\n 'feedback_history':3}\n params.update(ka)\n \n netdescs = [(params['clock_periods'][i],params['subnet_sizes'][i]) for i in range(len(params['clock_periods']))]\n netdescs.sort(key=lambda a:a[0])\n self.subnets = []\n chain = 0\n self.inps = params['inputs']\n self.hist = params['feedback_history']\n self.netdescs = netdescs\n self.net_offs = [params['outputs']]\n self.bp_accs = []\n\n\n #separate the output weight matrix into ones for each net\n self.output_weights = []\n self.gd_delta = []\n self.output_af = params['output_activation_function']\n self.output_parts = [np.zeros(params['outputs']) for i in netdescs]\n self.output = np.zeros(params['outputs'])\n self.output_unaf = np.zeros(params['outputs'])\n \n #build from slowest to fastest\n for nd in netdescs[::-1]:\n s = nd[1][-1]\n chain += s #recurrent inputs\n self.output_weights += [np.zeros((params['outputs'],1+s),dtype=float)]\n self.bp_accs += [np.zeros(s)]\n self.net_offs += [chain+params['outputs']]\n subnet = NN([chain+params['inputs']]+nd[1],\n params['subnet_activation_function'],\n self.hist)\n self.subnets += [subnet]\n self.subnets = self.subnets[::-1] #have [0] be fastest\n self.net_offs = self.net_offs[::-1]\n self.output_weights = self.output_weights[::-1]\n self.gd_delta = [np.zeros(w.shape) for w in self.output_weights]\n self.ran = None\n\n\n self.alphas = [1]*len(self.subnets)\n self.decays = [.5]*len(self.subnets)\n \n \n self.decimation_filt = params['resample_filter']([n[0] for n in netdescs])\n self.feedback_af = params['feedback_activation_function']\n\n \n \n \n def _construct_subnet_input(self,netid,inp):\n #input of net is concat(inp, slowest,...,recurrent)\n assert len(inp) == self.inps\n cons = (inp,)\n for i in range(len(self.subnets)-1,netid-1,-1):\n sn = self.subnets[i]\n #nol = self.net_offs[netid]\n #noh = self.net_offs[netid+1]\n cons += (self.feedback_af(sn()),)\n return np.concatenate(cons)\n def _construct_output_layer_input(self):\n cons = (NN.one,)\n for sn in self.subnets:\n cons += (sn(),)\n return np.concatenate(cons)\n def __call__(self,inp=None):\n if not isNone(inp):\n #fiter\n inps,self.ran = self.decimation_filt(inp)\n self.output_unaf.fill(0)\n for i in range(len(self.ran)):\n if self.ran[i]:\n sni = self._construct_subnet_input(i,inps[i])\n self.output_parts[i][:] = self.output_weights[i] @ np.concatenate((self.subnets[i](sni),NN.one))\n self.output_unaf += self.output_parts[i]\n self.output[:] = self.output_af(self.output_unaf)\n return self.output\n def grad_desc(self,prop,a=0.001):\n prop = self.output_af.gradient(self.output_unaf,self.output)*prop\n #self.gd_delta += a*np.outer(prop,self._construct_output_layer_input())\n #prop = self.output_weights[1:].transpose()@prop\n for i in range(len(self.bp_accs)):\n self.gd_delta[i] += a*np.outer(prop,np.concatenate((self.subnets[i](),NN.one))) + self.output_weights[i]*REGULARIZATION_FACTOR*a\n self.bp_accs[i] += self.output_weights[i].transpose()[:-1]@prop\n #apply grad descent to the nets that ran\n for i in range(len(self.ran)):\n if self.ran[i]:\n prop = self.bp_accs[i]\n for h in range(self.hist):\n prop = self.subnets[i].grad_desc(prop,a*self.alphas[i])\n #now take apart this to get the gradients for the slower nets\n offs = self.inps\n for si in range(len(self.subnets)-1,i,-1):\n sn = self.subnets[si]\n l = sn.len()\n self.bp_accs[si] += self.feedback_af.gradient(prop[offs:offs+l])*self.alphas[i]\n offs += l\n prop = self.feedback_af.gradient(self.subnets[i]())*prop[offs:]*self.decays[i]\n #recurse\n #reset the backprop gradient accumulator\n self.bp_accs[i].fill(0)\n def grad_apply(self,vel=0):\n for i in range(len(self.ran)):\n if self.ran[i]:\n self.subnets[i].grad_apply(vel)\n self.output_weights[i] -= np.clip(self.gd_delta[i],-MAX_GRADIENT_STEP,MAX_GRADIENT_STEP)\n self.gd_delta[i] *= vel\n def scramble(self,keep=0,mag=1):\n for i in range(len(self.subnets)):\n self.output_weights[i] *= keep\n self.output_weights[i] += np.random.normal(self.output_weights[i])*mag\n for sn in self.subnets:\n sn.scramble(keep,mag)\n\nn = Clockwork_RNN()\n"
] | [
[
"matplotlib.pyplot.plot",
"matplotlib.pyplot.show",
"matplotlib.pyplot.subplots"
],
[
"numpy.clip",
"matplotlib.pyplot.subplots",
"numpy.ones",
"numpy.concatenate",
"numpy.copy",
"numpy.random.normal",
"matplotlib.animation.FuncAnimation",
"numpy.tanh",
"numpy.array",
"numpy.zeros",
"matplotlib.pyplot.show"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
accordinglyto/dferte | [
"d4b8449c1633973dc538c9e72aca5d37802a4ee4",
"d4b8449c1633973dc538c9e72aca5d37802a4ee4",
"d4b8449c1633973dc538c9e72aca5d37802a4ee4",
"d4b8449c1633973dc538c9e72aca5d37802a4ee4"
] | [
"src/predict-binary.py",
"backup/23.py",
"src/18.py",
"backup/15.py"
] | [
"import os\nimport numpy as np\n#os.environ[\"KERAS_BACKEND\"] = \"plaidml.keras.backend\"\n\nfrom keras.preprocessing.image import ImageDataGenerator, load_img, img_to_array\nfrom keras.models import Sequential, load_model\n\n\nimg_width, img_height = 48, 48\nmodel_path = '../src/models/model.h5'\nweights_path = '../src/models/weights'\nmodel = load_model(model_path)\ntest_path = '../data/test'\n\ndef predict(file):\n x = load_img(file, target_size=(img_width,img_height))\n x = img_to_array(x)\n x = np.expand_dims(x, axis=0)\n array = model.predict(x)\n result = array[0]\n if result[0] > result[1]:\n if result[0] > 0.9:\n print(\"Predicted answer: Buy\")\n answer = 'buy'\n print(result)\n print(array)\n else:\n print(\"Predicted answer: Not confident\")\n answer = 'n/a'\n print(result)\n else:\n if result[1] > 0.9:\n print(\"Predicted answer: Sell\")\n answer = 'sell'\n print(result)\n else:\n print(\"Predicted answer: Not confident\")\n answer = 'n/a'\n print(result)\n\n return answer\n\n\ntb = 0\nts = 0\nfb = 0\nfs = 0\nna = 0\n\nfor i, ret in enumerate(os.walk(test_path + '/buy')):\n for i, filename in enumerate(ret[2]):\n if filename.startswith(\".\"):\n continue\n print(\"Label: buy\")\n result = predict(ret[0] + '/' + filename)\n if result == \"buy\":\n tb += 1\n elif result == 'n/a':\n print('no action')\n na += 1\n else:\n fb += 1\n\nfor i, ret in enumerate(os.walk(test_path + '/sell')):\n for i, filename in enumerate(ret[2]):\n if filename.startswith(\".\"):\n continue\n print(\"Label: sell\")\n result = predict(ret[0] + '/' + filename)\n if result == \"sell\":\n ts += 1\n elif result == 'n/a':\n print('no action')\n na += 1\n else:\n fs += 1\n\n\"\"\"\nCheck metrics\n\"\"\"\nprint(\"True buy: \", tb)\nprint(\"True sell: \", ts)\nprint(\"False buy: \", fb) # important\nprint(\"False sell: \", fs)\nprint(\"No action\", na)\n\nprecision = (tb+ts) / (tb + ts + fb + fs)\nrecall = tb / (tb + fs)\nprint(\"Precision: \", precision)\nprint(\"Recall: \", recall)\n\nf_measure = (2 * recall * precision) / (recall + precision)\nprint(\"F-measure: \", f_measure)\n",
"from numpy import genfromtxt\nimport matplotlib.pyplot as plt\nimport mpl_finance\nimport numpy as np\nimport uuid\nimport matplotlib\n\n# Input your csv file here with historical data\n\nad = genfromtxt(f\"../financial_data/RRHI.csv\", delimiter=\",\", dtype=str)\n\n\ndef convolve_sma(array, period):\n return np.convolve(array, np.ones((period,)) / period, mode=\"valid\")\n\n\ndef graphwerk(start, finish):\n open = []\n high = []\n low = []\n close = []\n volume = []\n # decision = []\n date = []\n\n c_open = []\n c_high = []\n c_low = []\n c_close = []\n c_volume = []\n c_date = []\n c_start = start + 12\n\n for x in range(finish - start):\n c_open.append(float(pd[c_start][1]))\n c_high.append(float(pd[c_start][2]))\n c_low.append(float(pd[c_start][3]))\n c_close.append(float(pd[c_start][4]))\n c_volume.append(float(pd[c_start][5]))\n c_date.append(pd[c_start][0])\n c_start = c_start + 1\n\n for x in range(finish - start):\n\n # Below filtering is valid for eurusd.csv file. Other financial data files have different orders so you need to find out\n # what means open, high and close in their respective order.\n open.append(float(pd[start][1]))\n high.append(float(pd[start][2]))\n low.append(float(pd[start][3]))\n close.append(float(pd[start][4]))\n volume.append(float(pd[start][5]))\n # decision.append(str(pd[start][6]))\n date.append(pd[start][0])\n\n start = start + 1\n\n decision = \"sell\"\n min_forecast = min(c_low)\n max_forecast = max(c_high)\n\n if close[-1] * 1.03 < max_forecast:\n decision = \"buy\"\n # for z in all_prices:\n # if close[-1] * 1.03 < z:\n # decision = \"buy\"\n\n sma = convolve_sma(close, 5)\n smb = list(sma)\n diff = sma[-1] - sma[-2]\n\n for x in range(len(close) - len(smb)):\n smb.append(smb[-1] + diff)\n\n fig = plt.figure(num=1, figsize=(3, 3), dpi=50, facecolor=\"w\", edgecolor=\"k\")\n dx = fig.add_subplot(111)\n # mpl_finance.volume_overlay(ax, open, close, volume, width=0.4, colorup='b', colordown='b', alpha=1)\n mpl_finance.candlestick2_ochl(\n dx, open, close, high, low, width=1.5, colorup=\"g\", colordown=\"r\", alpha=0.5\n )\n plt.autoscale()\n # plt.plot(smb, color=\"blue\", linewidth=10, alpha=0.5)\n plt.axis(\"off\")\n\n if decision == \"sell\":\n print(\"last value: \" + str(close[-1]))\n print(\n \"range of values in next 13 bars: \"\n + str(min_forecast)\n + \"-\"\n + str(max_forecast)\n )\n print(\"sell\")\n plt.savefig(sell_dir + str(uuid.uuid4()) + \".jpg\", bbox_inches=\"tight\")\n else:\n print(\"last value: \" + str(close[-1]))\n print(\n \"range of values in next 13 bars: \"\n + str(min_forecast)\n + \"-\"\n + str(max_forecast)\n )\n print(\"buy\")\n plt.savefig(buy_dir + str(uuid.uuid4()) + \".jpg\", bbox_inches=\"tight\")\n # if close[-1] >= close_next:\n # print('previous value is bigger')\n # print('last value: ' + str(close[-1]))\n # print('next value: ' + str(close_next))\n # print('sell')\n # plt.savefig(sell_dir + str(uuid.uuid4()) +'.jpg', bbox_inches='tight')\n # else:\n # print('previous value is smaller')\n # print('last value: '+ str(close[-1]))\n # print('next value: ' + str(close_next))\n # print('buy')\n # plt.savefig(buy_dir + str(uuid.uuid4())+'.jpg', bbox_inches='tight')\n\n # plt.show()\n open.clear()\n close.clear()\n volume.clear()\n high.clear()\n low.clear()\n plt.cla()\n plt.clf()\n\n\n# output = []\n# with open(\"STOCKbluechip.csv\") as f:\n# output = [str(s) for line in f.readlines() for s in line[:-1].split(\",\")]\n\n# for stock in output:\n\npd = ad\n\nbuy_dir = \"../data/train/buy/\"\nsell_dir = \"../data/train/sell/\"\n\niter = 0\n\nfor x in range(len(pd)):\n graphwerk(iter, iter + 12)\n iter = iter + 2\n",
"from numpy import genfromtxt\nimport matplotlib.pyplot as plt\nimport mpl_finance\nimport numpy as np\nimport uuid\nimport matplotlib\n\n# Input your csv file here with historical data\n\nad = genfromtxt(f\"../financial_data/MEG.csv\", delimiter=\",\", dtype=str)\nad = ad[1500:]\n\n\ndef convolve_sma(array, period):\n return np.convolve(array, np.ones((period,)) / period, mode=\"valid\")\n\n\ndef graphwerk(start, finish):\n open = []\n high = []\n low = []\n close = []\n volume = []\n # decision = []\n date = []\n\n c_open = []\n c_high = []\n c_low = []\n c_close = []\n c_volume = []\n c_date = []\n c_start = start + 18\n\n for x in range(finish - start - 6):\n c_open.append(float(pd[c_start][1]))\n c_high.append(float(pd[c_start][2]))\n c_low.append(float(pd[c_start][3]))\n c_close.append(float(pd[c_start][4]))\n c_volume.append(float(pd[c_start][5]))\n c_date.append(pd[c_start][0])\n c_start = c_start + 1\n\n for x in range(finish - start):\n\n # Below filtering is valid for eurusd.csv file. Other financial data files have different orders so you need to find out\n # what means open, high and close in their respective order.\n open.append(float(pd[start][1]))\n high.append(float(pd[start][2]))\n low.append(float(pd[start][3]))\n close.append(float(pd[start][4]))\n volume.append(float(pd[start][5]))\n # decision.append(str(pd[start][6]))\n date.append(pd[start][0])\n\n start = start + 1\n\n decision = \"sell\"\n min_forecast = min(c_low)\n max_forecast = max(c_high)\n\n if close[-1] * 1.06 < max_forecast:\n decision = \"buy\"\n # for z in all_prices:\n # if close[-1] * 1.03 < z:\n # decision = \"buy\"\n\n sma = convolve_sma(close, 5)\n smb = list(sma)\n diff = sma[-1] - sma[-2]\n\n for x in range(len(close) - len(smb)):\n smb.append(smb[-1] + diff)\n\n fig = plt.figure(num=1, figsize=(3, 3), dpi=50, facecolor=\"w\", edgecolor=\"k\")\n dx = fig.add_subplot(111)\n # mpl_finance.volume_overlay(ax, open, close, volume, width=0.4, colorup='b', colordown='b', alpha=1)\n mpl_finance.candlestick2_ochl(\n dx, open, close, high, low, width=1.5, colorup=\"g\", colordown=\"r\", alpha=0.5\n )\n plt.autoscale()\n # plt.plot(smb, color=\"blue\", linewidth=10, alpha=0.5)\n plt.axis(\"off\")\n\n if decision == \"sell\":\n print(\"last value: \" + str(close[-1]))\n print(\n \"range of values in next 13 bars: \"\n + str(min_forecast)\n + \"-\"\n + str(max_forecast)\n )\n print(\"sell\")\n plt.savefig(sell_dir + str(uuid.uuid4()) + \".jpg\", bbox_inches=\"tight\")\n else:\n print(\"last value: \" + str(close[-1]))\n print(\n \"range of values in next 13 bars: \"\n + str(min_forecast)\n + \"-\"\n + str(max_forecast)\n )\n print(\"buy\")\n plt.savefig(buy_dir + str(uuid.uuid4()) + \".jpg\", bbox_inches=\"tight\")\n # if close[-1] >= close_next:\n # print('previous value is bigger')\n # print('last value: ' + str(close[-1]))\n # print('next value: ' + str(close_next))\n # print('sell')\n # plt.savefig(sell_dir + str(uuid.uuid4()) +'.jpg', bbox_inches='tight')\n # else:\n # print('previous value is smaller')\n # print('last value: '+ str(close[-1]))\n # print('next value: ' + str(close_next))\n # print('buy')\n # plt.savefig(buy_dir + str(uuid.uuid4())+'.jpg', bbox_inches='tight')\n\n # plt.show()\n open.clear()\n close.clear()\n volume.clear()\n high.clear()\n low.clear()\n plt.cla()\n plt.clf()\n\n\n# output = []\n# with open(\"STOCKbluechip.csv\") as f:\n# output = [str(s) for line in f.readlines() for s in line[:-1].split(\",\")]\n\n# for stock in output:\n\npd = ad\n\nbuy_dir = \"../data/train/buy/\"\nsell_dir = \"../data/train/sell/\"\n\niter = 0\n\nfor x in range(len(pd)):\n graphwerk(iter, iter + 18)\n iter = iter + 2\n",
"from numpy import genfromtxt\nimport matplotlib.pyplot as plt\nimport mpl_finance\nimport numpy as np\nimport uuid\nimport matplotlib\n\n# Input your csv file here with historical data\n\nad = genfromtxt(f\"../financial_data/JGS.csv\", delimiter=\",\", dtype=str)\n\n\ndef convolve_sma(array, period):\n return np.convolve(array, np.ones((period,)) / period, mode=\"valid\")\n\n\ndef graphwerk(start, finish):\n open = []\n high = []\n low = []\n close = []\n volume = []\n # decision = []\n date = []\n\n c_open = []\n c_high = []\n c_low = []\n c_close = []\n c_volume = []\n c_date = []\n c_start = start + 12\n\n for x in range(finish - start):\n c_open.append(float(pd[c_start][1]))\n c_high.append(float(pd[c_start][2]))\n c_low.append(float(pd[c_start][3]))\n c_close.append(float(pd[c_start][4]))\n c_volume.append(float(pd[c_start][5]))\n c_date.append(pd[c_start][0])\n c_start = c_start + 1\n\n for x in range(finish - start):\n\n # Below filtering is valid for eurusd.csv file. Other financial data files have different orders so you need to find out\n # what means open, high and close in their respective order.\n open.append(float(pd[start][1]))\n high.append(float(pd[start][2]))\n low.append(float(pd[start][3]))\n close.append(float(pd[start][4]))\n volume.append(float(pd[start][5]))\n # decision.append(str(pd[start][6]))\n date.append(pd[start][0])\n\n start = start + 1\n\n decision = \"sell\"\n min_forecast = min(c_low)\n max_forecast = max(c_high)\n\n if close[-1] * 1.03 < max_forecast:\n decision = \"buy\"\n # for z in all_prices:\n # if close[-1] * 1.03 < z:\n # decision = \"buy\"\n\n sma = convolve_sma(close, 5)\n smb = list(sma)\n diff = sma[-1] - sma[-2]\n\n for x in range(len(close) - len(smb)):\n smb.append(smb[-1] + diff)\n\n fig = plt.figure(num=1, figsize=(3, 3), dpi=50, facecolor=\"w\", edgecolor=\"k\")\n dx = fig.add_subplot(111)\n # mpl_finance.volume_overlay(ax, open, close, volume, width=0.4, colorup='b', colordown='b', alpha=1)\n mpl_finance.candlestick2_ochl(\n dx, open, close, high, low, width=1.5, colorup=\"g\", colordown=\"r\", alpha=0.5\n )\n plt.autoscale()\n # plt.plot(smb, color=\"blue\", linewidth=10, alpha=0.5)\n plt.axis(\"off\")\n\n if decision == \"sell\":\n print(\"last value: \" + str(close[-1]))\n print(\n \"range of values in next 13 bars: \"\n + str(min_forecast)\n + \"-\"\n + str(max_forecast)\n )\n print(\"sell\")\n plt.savefig(sell_dir + str(uuid.uuid4()) + \".jpg\", bbox_inches=\"tight\")\n else:\n print(\"last value: \" + str(close[-1]))\n print(\n \"range of values in next 13 bars: \"\n + str(min_forecast)\n + \"-\"\n + str(max_forecast)\n )\n print(\"buy\")\n plt.savefig(buy_dir + str(uuid.uuid4()) + \".jpg\", bbox_inches=\"tight\")\n # if close[-1] >= close_next:\n # print('previous value is bigger')\n # print('last value: ' + str(close[-1]))\n # print('next value: ' + str(close_next))\n # print('sell')\n # plt.savefig(sell_dir + str(uuid.uuid4()) +'.jpg', bbox_inches='tight')\n # else:\n # print('previous value is smaller')\n # print('last value: '+ str(close[-1]))\n # print('next value: ' + str(close_next))\n # print('buy')\n # plt.savefig(buy_dir + str(uuid.uuid4())+'.jpg', bbox_inches='tight')\n\n # plt.show()\n open.clear()\n close.clear()\n volume.clear()\n high.clear()\n low.clear()\n plt.cla()\n plt.clf()\n\n\n# output = []\n# with open(\"STOCKbluechip.csv\") as f:\n# output = [str(s) for line in f.readlines() for s in line[:-1].split(\",\")]\n\n# for stock in output:\n\npd = ad\n\nbuy_dir = \"../data/train/buy/\"\nsell_dir = \"../data/train/sell/\"\n\niter = 0\n\nfor x in range(len(pd)):\n graphwerk(iter, iter + 12)\n iter = iter + 2\n"
] | [
[
"numpy.expand_dims"
],
[
"matplotlib.pyplot.autoscale",
"matplotlib.pyplot.cla",
"numpy.genfromtxt",
"numpy.ones",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.figure"
],
[
"matplotlib.pyplot.autoscale",
"matplotlib.pyplot.cla",
"numpy.genfromtxt",
"numpy.ones",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.figure"
],
[
"matplotlib.pyplot.autoscale",
"matplotlib.pyplot.cla",
"numpy.genfromtxt",
"numpy.ones",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
T3p/baselines | [
"5623c9160d1e86ebca3e673f142fe6b14a1db06c",
"5623c9160d1e86ebca3e673f142fe6b14a1db06c",
"5623c9160d1e86ebca3e673f142fe6b14a1db06c"
] | [
"sacred_mis/_sources/pomis2_57be95a71b575624c33c6ffe64e50d6e.py",
"baselines/pois2/run.py",
"baselines/common/notebook_utils.py"
] | [
"import numpy as np\nimport warnings\nimport baselines.common.tf_util as U\nimport tensorflow as tf\nimport time\nfrom baselines.common import zipsame, colorize\nfrom contextlib import contextmanager\nfrom collections import deque\nfrom baselines import logger\nfrom baselines.common.cg import cg\nfrom baselines.pomis2.memory import Memory\nfrom baselines.common.centralized_sampler import traj_segment_generator\nfrom baselines.pois.utils import cluster_rewards\n\n@contextmanager\ndef timed(msg):\n print(colorize(msg, color='magenta'))\n tstart = time.time()\n yield\n print(colorize('done in %.3f seconds'%(time.time() - tstart), color='magenta'))\n\ndef update_epsilon(delta_bound, epsilon_old, max_increase=2.):\n if delta_bound > (1. - 1. / (2 * max_increase)) * epsilon_old:\n return epsilon_old * max_increase\n else:\n return epsilon_old ** 2 / (2 * (epsilon_old - delta_bound))\n\ndef line_search_parabola(theta_init, alpha, natural_gradient, set_parameter, evaluate_bound, delta_bound_tol=1e-4, max_line_search_ite=30):\n epsilon = 1.\n epsilon_old = 0.\n delta_bound_old = -np.inf\n bound_init = evaluate_bound()\n theta_old = theta_init\n\n for i in range(max_line_search_ite):\n\n theta = theta_init + epsilon * alpha * natural_gradient\n set_parameter(theta)\n\n bound = evaluate_bound()\n\n if np.isnan(bound):\n warnings.warn('Got NaN bound value: rolling back!')\n return theta_old, epsilon_old, delta_bound_old, i + 1\n\n delta_bound = bound - bound_init\n\n epsilon_old = epsilon\n epsilon = update_epsilon(delta_bound, epsilon_old)\n if delta_bound <= delta_bound_old + delta_bound_tol:\n if delta_bound_old < 0.:\n return theta_init, 0., 0., i+1\n else:\n return theta_old, epsilon_old, delta_bound_old, i+1\n\n delta_bound_old = delta_bound\n theta_old = theta\n\n return theta_old, epsilon_old, delta_bound_old, i+1\n\ndef line_search_binary(theta_init, alpha, natural_gradient, set_parameter, evaluate_loss, delta_bound_tol=1e-4, max_line_search_ite=30):\n low = 0.\n high = None\n bound_init = evaluate_loss()\n delta_bound_old = 0.\n theta_opt = theta_init\n i_opt = 0\n delta_bound_opt = 0.\n epsilon_opt = 0.\n\n epsilon = 1.\n\n for i in range(max_line_search_ite):\n\n theta = theta_init + epsilon * natural_gradient * alpha\n set_parameter(theta)\n\n bound = evaluate_loss()\n delta_bound = bound - bound_init\n\n if np.isnan(bound):\n warnings.warn('Got NaN bound value: rolling back!')\n\n if np.isnan(bound) or delta_bound <= delta_bound_opt:\n high = epsilon\n else:\n low = epsilon\n theta_opt = theta\n delta_bound_opt = delta_bound\n i_opt = i\n epsilon_opt = epsilon\n\n epsilon_old = epsilon\n\n if high is None:\n epsilon *= 2\n else:\n epsilon = (low + high) / 2.\n\n if abs(epsilon_old - epsilon) < 1e-12:\n break\n\n return theta_opt, epsilon_opt, delta_bound_opt, i_opt+1\n\ndef optimize_offline(theta_init, set_parameter, line_search, evaluate_loss, evaluate_gradient, evaluate_natural_gradient=None, gradient_tol=1e-4, bound_tol=1e-4, max_offline_ite=100):\n theta = theta_old = theta_init\n improvement = improvement_old = 0.\n set_parameter(theta)\n\n\n '''\n bound_init = evaluate_loss()\n import scipy.optimize as opt\n\n def func(x):\n set_parameter(x)\n return -evaluate_loss()\n\n def grad(x):\n set_parameter(x)\n return -evaluate_gradient().astype(np.float64)\n\n theta, bound, d = opt.fmin_l_bfgs_b(func=func,\n fprime=grad,\n x0=theta_init.astype(np.float64),\n maxiter=100,\n )\n print(bound_init, bound)\n\n print(d)\n\n set_parameter(theta)\n improvement = bound_init + bound\n return theta, improvement\n\n '''\n\n fmtstr = '%6i %10.3g %10.3g %18i %18.3g %18.3g %18.3g'\n titlestr = '%6s %10s %10s %18s %18s %18s %18s'\n print(titlestr % ('iter', 'epsilon', 'step size', 'num line search', 'gradient norm', 'delta bound ite', 'delta bound tot'))\n\n for i in range(max_offline_ite):\n bound = evaluate_loss()\n gradient = evaluate_gradient()\n\n if np.any(np.isnan(gradient)):\n warnings.warn('Got NaN gradient! Stopping!')\n set_parameter(theta_old)\n return theta_old, improvement\n\n if np.isnan(bound):\n warnings.warn('Got NaN bound! Stopping!')\n set_parameter(theta_old)\n return theta_old, improvement_old\n\n if evaluate_natural_gradient is not None:\n natural_gradient = evaluate_natural_gradient(gradient)\n else:\n natural_gradient = gradient\n\n if np.dot(gradient, natural_gradient) < 0:\n warnings.warn('NatGradient dot Gradient < 0! Using vanilla gradient')\n natural_gradient = gradient\n\n gradient_norm = np.sqrt(np.dot(gradient, natural_gradient))\n\n if gradient_norm < gradient_tol:\n print('stopping - gradient norm < gradient_tol')\n return theta, improvement\n\n alpha = 1. / gradient_norm ** 2\n\n theta_old = theta\n improvement_old = improvement\n theta, epsilon, delta_bound, num_line_search = line_search(theta, alpha, natural_gradient, set_parameter, evaluate_loss)\n set_parameter(theta)\n\n improvement += delta_bound\n print(fmtstr % (i+1, epsilon, alpha*epsilon, num_line_search, gradient_norm, delta_bound, improvement))\n\n if delta_bound < bound_tol:\n print('stopping - delta bound < bound_tol')\n return theta, improvement\n\n return theta, improvement\n\ndef learn(env, make_policy, *,\n n_episodes,\n horizon,\n delta,\n gamma,\n max_iters,\n sampler=None,\n use_natural_gradient=False, #can be 'exact', 'approximate'\n fisher_reg=1e-2,\n iw_method='is',\n iw_norm='none',\n bound='J',\n line_search_type='parabola',\n save_weights=0,\n improvement_tol=0.,\n center_return=False,\n render_after=None,\n max_offline_iters=100,\n callback=None,\n clipping=False,\n entropy='none',\n positive_return=False,\n reward_clustering='none',\n capacity=10,\n warm_start=True):\n\n np.set_printoptions(precision=3)\n max_samples = horizon * n_episodes\n\n if line_search_type == 'binary':\n line_search = line_search_binary\n elif line_search_type == 'parabola':\n line_search = line_search_parabola\n else:\n raise ValueError()\n\n # Building the environment\n ob_space = env.observation_space\n ac_space = env.action_space\n\n # Creating the memory buffer\n memory = Memory(capacity=capacity, batch_size=n_episodes, horizon=horizon,\n ob_space=ob_space, ac_space=ac_space)\n\n # Building the target policy and saving its parameters\n pi = make_policy('pi', ob_space, ac_space)\n all_var_list = pi.get_trainable_variables()\n var_list = [v for v in all_var_list if v.name.split('/')[1].startswith('pol')]\n shapes = [U.intprod(var.get_shape().as_list()) for var in var_list]\n n_parameters = sum(shapes)\n\n # Building a set of behavioral policies\n behavioral_policies = memory.build_policies(make_policy, pi)\n\n # Placeholders\n ob_ = ob = U.get_placeholder_cached(name='ob')\n ac_ = pi.pdtype.sample_placeholder([None], name='ac')\n mask_ = tf.placeholder(dtype=tf.float32, shape=(None), name='mask')\n rew_ = tf.placeholder(dtype=tf.float32, shape=(None), name='rew')\n disc_rew_ = tf.placeholder(dtype=tf.float32, shape=(None), name='disc_rew')\n clustered_rew_ = tf.placeholder(dtype=tf.float32, shape=(None))\n gradient_ = tf.placeholder(dtype=tf.float32, shape=(n_parameters, 1), name='gradient')\n iter_number_ = tf.placeholder(dtype=tf.int32, name='iter_number')\n active_policies = tf.placeholder(dtype=tf.float32, shape=(capacity), name='active_policies')\n losses_with_name = []\n\n # Total number of trajectories\n N_total = tf.reduce_sum(active_policies) * n_episodes\n\n # Split operations\n disc_rew_split = tf.reshape(disc_rew_ * mask_, [-1, horizon])\n rew_split = tf.reshape(rew_ * mask_, [-1, horizon])\n mask_split = tf.reshape(mask_, [-1, horizon])\n\n # Policy densities\n target_log_pdf = pi.pd.logp(ac_) * mask_\n target_log_pdf_split = tf.reshape(target_log_pdf, [-1, horizon])\n behavioral_log_pdfs = tf.stack([bpi.pd.logp(ac_) * mask_ for bpi in memory.policies]) # Shape is (capacity, ntraj*horizon)\n behavioral_log_pdfs_split = tf.reshape(behavioral_log_pdfs, [memory.capacity, -1, horizon])\n\n # Compute renyi divergencies and sum over time, then exponentiate\n emp_d2_split = tf.reshape(tf.stack([pi.pd.renyi(bpi.pd, 2) * mask_ for bpi in memory.policies]), [memory.capacity, -1, horizon])\n emp_d2_split_cum = tf.exp(tf.reduce_sum(emp_d2_split, axis=2))\n # Compute arithmetic and harmonic mean of emp_d2\n emp_d2_mean = tf.reduce_mean(emp_d2_split_cum, axis=1)\n emp_d2_arithmetic = tf.reduce_sum(emp_d2_mean * active_policies) / tf.reduce_sum(active_policies)\n emp_d2_harmonic = tf.reduce_sum(active_policies) / tf.reduce_sum(1 / emp_d2_mean)\n\n # Return processing: clipping, centering, discounting\n ep_return = clustered_rew_ #tf.reduce_sum(mask_split * disc_rew_split, axis=1)\n if clipping:\n rew_split = tf.clip_by_value(rew_split, -1, 1)\n if center_return:\n ep_return = ep_return - tf.reduce_mean(ep_return)\n rew_split = rew_split - (tf.reduce_sum(rew_split) / (tf.reduce_sum(mask_split) + 1e-24))\n discounter = [pow(gamma, i) for i in range(0, horizon)] # Decreasing gamma\n discounter_tf = tf.constant(discounter)\n disc_rew_split = rew_split * discounter_tf\n\n # Reward statistics\n return_mean = tf.reduce_mean(ep_return)\n return_std = U.reduce_std(ep_return)\n return_max = tf.reduce_max(ep_return)\n return_min = tf.reduce_min(ep_return)\n return_abs_max = tf.reduce_max(tf.abs(ep_return))\n return_step_max = tf.reduce_max(tf.abs(rew_split)) # Max step reward\n return_step_mean = tf.abs(tf.reduce_mean(rew_split))\n positive_step_return_max = tf.maximum(0.0, tf.reduce_max(rew_split))\n negative_step_return_max = tf.maximum(0.0, tf.reduce_max(-rew_split))\n return_step_maxmin = tf.abs(positive_step_return_max - negative_step_return_max)\n losses_with_name.extend([(return_mean, 'InitialReturnMean'),\n (return_max, 'InitialReturnMax'),\n (return_min, 'InitialReturnMin'),\n (return_std, 'InitialReturnStd'),\n (emp_d2_arithmetic, 'EmpiricalD2Arithmetic'),\n (emp_d2_harmonic, 'EmpiricalD2Harmonic'),\n (return_step_max, 'ReturnStepMax'),\n (return_step_maxmin, 'ReturnStepMaxmin')])\n\n if iw_method == 'is':\n # Sum the log prob over time. Shapes: target(Nep, H), behav (Cap, Nep, H)\n target_log_pdf_episode = tf.reduce_sum(target_log_pdf_split, axis=1)\n behavioral_log_pdf_episode = tf.reduce_sum(behavioral_log_pdfs_split, axis=2)\n # To avoid numerical instability, compute the inversed ratio\n log_ratio = target_log_pdf_split - behavioral_log_pdfs_split\n inverse_log_ratio_episode = - tf.reduce_sum(log_ratio, axis=2)\n\n iw = 1 / tf.reduce_sum(tf.exp(inverse_log_ratio_episode) * tf.expand_dims(active_policies, -1), axis=0)\n\n # Compute also the balance-heuristic weights\n iw_split = tf.reshape(iw, (memory.capacity, -1))\n iw_by_behavioral = tf.reduce_mean(iw_split, axis=1)\n losses_with_name.append((iw_by_behavioral[0] / tf.reduce_sum(iw_by_behavioral), 'MultiIWFirstRatio'))\n losses_with_name.append((tf.reduce_max(iw_by_behavioral), 'MultiIWMax'))\n losses_with_name.append((tf.reduce_sum(iw_by_behavioral), 'MultiIWSum'))\n losses_with_name.append((tf.reduce_min(iw_by_behavioral), 'MultiIWMin'))\n\n # Get the probability by exponentiation\n #target_pdf_episode = tf.exp(target_log_pdf_episode)\n #behavioral_pdf_episode = tf.exp(behavioral_log_pdf_episode)\n # Get the denominator by averaging over behavioral policies\n #behavioral_pdf_mixture = tf.reduce_mean(behavioral_pdf_episode, axis=0) + 1e-24\n #iw = target_pdf_episode / behavioral_pdf_mixture\n iwn = iw / n_episodes\n\n # Compute the J\n w_return_mean = tf.reduce_sum(ep_return * iwn)\n # Empirical D2 of the mixture and relative ESS\n ess_renyi_arithmetic = N_total / emp_d2_arithmetic\n ess_renyi_harmonic = N_total / emp_d2_harmonic\n # Log quantities\n losses_with_name.extend([(tf.reduce_max(iw), 'MaxIW'),\n (tf.reduce_min(iw), 'MinIW'),\n (tf.reduce_mean(iw), 'MeanIW'),\n (U.reduce_std(iw), 'StdIW'),\n (tf.reduce_min(target_log_pdf_episode), 'MinTargetPdf'),\n (tf.reduce_min(behavioral_log_pdf_episode), 'MinBehavPdf'),\n (ess_renyi_arithmetic, 'ESSRenyiArithmetic'),\n (ess_renyi_harmonic, 'ESSRenyiHarmonic')])\n else:\n raise NotImplementedError()\n\n if bound == 'J':\n bound_ = w_return_mean\n elif bound == 'max-d2-harmonic':\n bound_ = w_return_mean - tf.sqrt((1 - delta) / (delta * ess_renyi_harmonic)) * return_abs_max\n elif bound == 'max-d2-arithmetic':\n bound_ = w_return_mean - tf.sqrt((1 - delta) / (delta * ess_renyi_arithmetic)) * return_abs_max\n else:\n raise NotImplementedError()\n\n # Policy entropy for exploration\n ent = pi.pd.entropy()\n meanent = tf.reduce_mean(ent)\n losses_with_name.append((meanent, 'MeanEntropy'))\n # Add policy entropy bonus\n if entropy != 'none':\n scheme, v1, v2 = entropy.split(':')\n if scheme == 'step':\n entcoeff = tf.cond(iter_number_ < int(v2), lambda: float(v1), lambda: float(0.0))\n losses_with_name.append((entcoeff, 'EntropyCoefficient'))\n entbonus = entcoeff * meanent\n bound_ = bound_ + entbonus\n elif scheme == 'lin':\n ip = tf.cast(iter_number_ / max_iters, tf.float32)\n entcoeff_decay = tf.maximum(0.0, float(v2) + (float(v1) - float(v2)) * (1.0 - ip))\n losses_with_name.append((entcoeff_decay, 'EntropyCoefficient'))\n entbonus = entcoeff_decay * meanent\n bound_ = bound_ + entbonus\n elif scheme == 'exp':\n ent_f = tf.exp(-tf.abs(tf.reduce_mean(iw) - 1) * float(v2)) * float(v1)\n losses_with_name.append((ent_f, 'EntropyCoefficient'))\n bound_ = bound_ + ent_f * meanent\n else:\n raise Exception('Unrecognized entropy scheme.')\n\n losses_with_name.append((w_return_mean, 'ReturnMeanIW'))\n losses_with_name.append((bound_, 'Bound'))\n losses, loss_names = map(list, zip(*losses_with_name))\n\n '''\n if use_natural_gradient:\n p = tf.placeholder(dtype=tf.float32, shape=[None])\n target_logpdf_episode = tf.reduce_sum(target_log_pdf_split * mask_split, axis=1)\n grad_logprob = U.flatgrad(tf.stop_gradient(iwn) * target_logpdf_episode, var_list)\n dot_product = tf.reduce_sum(grad_logprob * p)\n hess_logprob = U.flatgrad(dot_product, var_list)\n compute_linear_operator = U.function([p, ob_, ac_, disc_rew_, mask_], [-hess_logprob])\n '''\n\n assert_ops = tf.group(*tf.get_collection('asserts'))\n print_ops = tf.group(*tf.get_collection('prints'))\n\n compute_lossandgrad = U.function([ob_, ac_, rew_, disc_rew_, clustered_rew_, mask_, iter_number_, active_policies], losses + [U.flatgrad(bound_, var_list), assert_ops, print_ops])\n compute_grad = U.function([ob_, ac_, rew_, disc_rew_, clustered_rew_, mask_, iter_number_, active_policies], [U.flatgrad(bound_, var_list), assert_ops, print_ops])\n compute_bound = U.function([ob_, ac_, rew_, disc_rew_, clustered_rew_, mask_, iter_number_, active_policies], [bound_, assert_ops, print_ops])\n compute_losses = U.function([ob_, ac_, rew_, disc_rew_, clustered_rew_, mask_, iter_number_, active_policies], losses)\n #compute_temp = U.function([ob_, ac_, rew_, disc_rew_, clustered_rew_, mask_, iter_number_, active_policies], [log_inverse_ratio, abc, iw])\n\n set_parameter = U.SetFromFlat(var_list)\n get_parameter = U.GetFlat(var_list)\n policy_reinit = tf.variables_initializer(var_list)\n\n if sampler is None:\n seg_gen = traj_segment_generator(pi, env, n_episodes, horizon, stochastic=True, gamma=gamma)\n sampler = type(\"SequentialSampler\", (object,), {\"collect\": lambda self, _: seg_gen.__next__()})()\n\n U.initialize()\n\n # Starting optimizing\n episodes_so_far = 0\n timesteps_so_far = 0\n iters_so_far = 0\n tstart = time.time()\n lenbuffer = deque(maxlen=n_episodes)\n rewbuffer = deque(maxlen=n_episodes)\n\n while True:\n\n iters_so_far += 1\n if iters_so_far == 50:\n print('=== CHANGED GAMMA TO 1.0')\n seg_gen = traj_segment_generator(pi, env, n_episodes, horizon, stochastic=True, gamma=1.0)\n sampler = type(\"SequentialSampler\", (object,), {\"collect\": lambda self, _: seg_gen.__next__()})()\n\n if render_after is not None and iters_so_far % render_after == 0:\n if hasattr(env, 'render'):\n render(env, pi, horizon)\n\n if callback:\n callback(locals(), globals())\n\n if iters_so_far >= max_iters:\n print('Finished...')\n break\n\n logger.log('********** Iteration %i ************' % iters_so_far)\n\n theta = get_parameter()\n\n with timed('sampling'):\n seg = sampler.collect(theta)\n\n lens, rets = seg['ep_lens'], seg['ep_rets']\n lenbuffer.extend(lens)\n rewbuffer.extend(rets)\n episodes_so_far += len(lens)\n timesteps_so_far += sum(lens)\n\n # Adding batch of trajectories to memory\n memory.add_trajectory_batch(seg)\n\n # Get multiple batches from memory\n seg_with_memory = memory.get_trajectories()\n\n # Get clustered reward\n reward_matrix = np.reshape(seg_with_memory['disc_rew'] * seg_with_memory['mask'], (-1, horizon))\n ep_reward = np.sum(reward_matrix, axis=1)\n ep_reward = cluster_rewards(ep_reward, reward_clustering)\n\n args = ob, ac, rew, disc_rew, clustered_rew, mask, iter_number, active_policies = (seg_with_memory['ob'],\n seg_with_memory['ac'],\n seg_with_memory['rew'],\n seg_with_memory['disc_rew'],\n ep_reward,\n seg_with_memory['mask'],\n iters_so_far,\n memory.get_active_policies_mask())\n\n def evaluate_loss():\n loss = compute_bound(*args)\n return loss[0]\n\n def evaluate_gradient():\n gradient = compute_grad(*args)\n return gradient[0]\n\n if use_natural_gradient:\n def evaluate_fisher_vector_prod(x):\n return compute_linear_operator(x, *args)[0] + fisher_reg * x\n\n def evaluate_natural_gradient(g):\n return cg(evaluate_fisher_vector_prod, g, cg_iters=10, verbose=0)\n else:\n evaluate_natural_gradient = None\n\n with timed('summaries before'):\n logger.record_tabular(\"Iteration\", iters_so_far)\n logger.record_tabular(\"InitialBound\", evaluate_loss())\n logger.record_tabular(\"EpLenMean\", np.mean(lenbuffer))\n logger.record_tabular(\"EpRewMean\", np.mean(rewbuffer))\n logger.record_tabular(\"EpThisIter\", len(lens))\n logger.record_tabular(\"EpisodesSoFar\", episodes_so_far)\n logger.record_tabular(\"TimestepsSoFar\", timesteps_so_far)\n logger.record_tabular(\"TimeElapsed\", time.time() - tstart)\n\n if save_weights > 0 and iters_so_far % save_weights == 0:\n logger.record_tabular('Weights', str(get_parameter()))\n import pickle\n file = open('checkpoint' + str(iters_so_far) + '.pkl', 'wb')\n pickle.dump(theta, file)\n\n if not warm_start or memory.get_current_load() == capacity:\n # Optimize\n with timed(\"offline optimization\"):\n theta, improvement = optimize_offline(theta,\n set_parameter,\n line_search,\n evaluate_loss,\n evaluate_gradient,\n evaluate_natural_gradient,\n max_offline_ite=max_offline_iters)\n\n set_parameter(theta)\n print(theta)\n\n with timed('summaries after'):\n meanlosses = np.array(compute_losses(*args))\n for (lossname, lossval) in zip(loss_names, meanlosses):\n logger.record_tabular(lossname, lossval)\n else:\n # Reinitialize the policy\n tf.get_default_session().run(policy_reinit)\n\n logger.dump_tabular()\n\n env.close()\n",
"#!/usr/bin/env python3\n'''\n This script runs rllab or gym environments. To run RLLAB, use the format\n rllab.<env_name> as env name, otherwise gym will be used.\n'''\n# Common imports\nimport sys, re, os, time, logging\nfrom collections import defaultdict\n\n# Framework imports\nimport gym\nimport tensorflow as tf\n\n# Self imports: utils\nfrom baselines.common import set_global_seeds\nfrom baselines import logger\nimport baselines.common.tf_util as U\nfrom baselines.common.rllab_utils import Rllab2GymWrapper, rllab_env_from_name\nfrom baselines.common.atari_wrappers import make_atari, wrap_deepmind\nfrom baselines.common.vec_env.subproc_vec_env import SubprocVecEnv\nfrom baselines.common.vec_env.vec_frame_stack import VecFrameStack\nfrom baselines.common.cmd_util import get_env_type\nfrom baselines.common import set_global_seeds as set_all_seeds\n# Self imports: algorithm\nfrom baselines.policy.mlp_policy import MlpPolicy\nfrom baselines.policy.cnn_policy import CnnPolicy\nfrom baselines.pois2 import pois2\n\ndef train(env, policy, policy_init, seed, njobs=1, **alg_args):\n\n if env.startswith('rllab.'):\n # Get env name and class\n env_name = re.match('rllab.(\\S+)', env).group(1)\n env_rllab_class = rllab_env_from_name(env_name)\n # Define env maker\n def make_env(seed=0):\n def _thunk():\n env_rllab = Rllab2GymWrapper(env_rllab_class())\n env_rllab.seed(seed)\n return env_rllab\n return _thunk\n parallel_env = SubprocVecEnv([make_env(seed + i*100) for i in range(njobs)])\n # Used later\n env_type = 'rllab'\n else:\n # Normal gym, get if Atari or not.\n env_type = get_env_type(env)\n assert env_type is not None, \"Env not recognized.\"\n # Define the correct env maker\n if env_type == 'atari':\n # Atari, custom env creation\n def make_env(seed=0):\n def _thunk():\n _env = make_atari(env)\n _env.seed(seed)\n return wrap_deepmind(_env)\n return _thunk\n parallel_env = VecFrameStack(SubprocVecEnv([make_env(seed + i*100) for i in range(njobs)]), 4)\n else:\n # Not atari, standard env creation\n def make_env(seed=0):\n def _thunk():\n _env = gym.make(env)\n _env.seed(seed)\n return _env\n return _thunk\n parallel_env = SubprocVecEnv([make_env(seed + i*100) for i in range(njobs)])\n\n if policy == 'linear':\n hid_size = num_hid_layers = 0\n use_bias = False\n elif policy == 'simple-nn':\n hid_size = [16]\n num_hid_layers = 1\n use_bias = True\n elif policy == 'nn':\n hid_size = [100, 50, 25]\n num_hid_layers = 3\n use_bias = True\n\n if policy_init == 'xavier':\n policy_initializer = tf.contrib.layers.xavier_initializer()\n elif policy_init == 'zeros':\n policy_initializer = U.normc_initializer(0.0)\n elif policy_init == 'small-weights':\n policy_initializer = U.normc_initializer(0.1)\n else:\n raise Exception('Unrecognized policy initializer.')\n\n if policy == 'linear' or policy == 'nn' or policy == 'simple-nn':\n def make_policy(name, ob_space, ac_space):\n return MlpPolicy(name=name, ob_space=ob_space, ac_space=ac_space,\n hid_size=hid_size, num_hid_layers=num_hid_layers, gaussian_fixed_var=True, use_bias=use_bias, use_critic=False,\n hidden_W_init=policy_initializer, output_W_init=policy_initializer)\n elif policy == 'cnn':\n def make_policy(name, ob_space, ac_space):\n return CnnPolicy(name=name, ob_space=ob_space, ac_space=ac_space,\n gaussian_fixed_var=True, use_bias=False, use_critic=False,\n hidden_W_init=policy_initializer,\n output_W_init=policy_initializer)\n else:\n raise Exception('Unrecognized policy type.')\n\n try:\n affinity = len(os.sched_getaffinity(0))\n except:\n affinity = njobs\n sess = U.make_session(affinity)\n sess.__enter__()\n\n set_global_seeds(seed)\n\n gym.logger.setLevel(logging.WARN)\n\n pois2.learn(parallel_env, make_policy, **alg_args)\n\ndef main():\n import argparse\n parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument('--seed', help='RNG seed', type=int, default=0)\n parser.add_argument('--env', type=str, default='cartpole')\n parser.add_argument('--num_episodes', type=int, default=100)\n parser.add_argument('--horizon', type=int, default=500)\n parser.add_argument('--iw_method', type=str, default='is')\n parser.add_argument('--iw_norm', type=str, default='none')\n parser.add_argument('--natural', type=bool, default=False)\n parser.add_argument('--file_name', type=str, default='progress')\n parser.add_argument('--logdir', type=str, default='logs')\n parser.add_argument('--bound', type=str, default='max-d2')\n parser.add_argument('--delta', type=float, default=0.99)\n parser.add_argument('--njobs', type=int, default=-1)\n parser.add_argument('--policy', type=str, default='nn')\n parser.add_argument('--policy_init', type=str, default='xavier')\n parser.add_argument('--max_offline_iters', type=int, default=10)\n parser.add_argument('--max_iters', type=int, default=500)\n parser.add_argument('--gamma', type=float, default=1.0)\n parser.add_argument('--center', type=bool, default=False)\n parser.add_argument('--clipping', type=bool, default=False)\n parser.add_argument('--entropy', type=str, default='none')\n parser.add_argument('--reward_clustering', type=str, default='none')\n parser.add_argument('--experiment_name', type=str, default='none')\n parser.add_argument('--save_weights', type=int, default=0)\n args = parser.parse_args()\n if args.file_name == 'progress':\n file_name = '%s_delta=%s_seed=%s_%s' % (args.env.upper(), args.delta, args.seed, time.time())\n else:\n file_name = args.file_name\n logger.configure(dir=args.logdir, format_strs=['stdout', 'csv', 'tensorboard'], file_name=file_name)\n train(env=args.env,\n policy=args.policy,\n policy_init=args.policy_init,\n n_episodes=args.num_episodes,\n horizon=args.horizon,\n seed=args.seed,\n njobs=args.njobs,\n save_weights=args.save_weights,\n max_iters=args.max_iters,\n iw_method=args.iw_method,\n iw_norm=args.iw_norm,\n use_natural_gradient=args.natural,\n bound=args.bound,\n delta=args.delta,\n gamma=args.gamma,\n max_offline_iters=args.max_offline_iters,\n center_return=args.center,\n clipping=args.clipping,\n entropy=args.entropy,\n reward_clustering=args.reward_clustering,)\n\nif __name__ == '__main__':\n main()\n",
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue May 1 11:57:26 2018\n\n@author: matteo\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport scipy.stats as sts\nimport math\n\ndef bootstrap_ci(x, conf=0.95, resamples=10000):\n means = [np.mean(x[np.random.choice(x.shape[0], size=x.shape[0], replace=True), :], axis=0) for _ in range(resamples)]\n low = np.percentile(means, (1-conf)/2 * 100, axis=0)\n high = np.percentile(means, (1 - (1-conf)/2) * 100, axis=0)\n return low, high\n\ndef nonparam_ti(x, conf=0.95, prop=0.95):\n srt = np.sort(x, axis=0)\n n = x.shape[0]\n nu = n - sts.binom.ppf(conf, n, prop)\n if nu <= 1:\n raise ValueError('T.I. does not exist')\n if nu % 2 == 0:\n nu_1 = nu_2 = int(nu / 2)\n else:\n nu_1 = int(nu / 2 - 1 / 2)\n nu_2 = nu_1 + 1\n low = srt[nu_1 - 1, :]\n high = srt[n - nu_2, :]\n return low, high\n\ndef read_data(path, iters=None, default_batchsize=100, scale='Eps'):\n df = pd.read_csv(path, encoding='utf-8')\n if iters: df = df.loc[:iters, :]\n if not 'AvgRet' in df: df['AvgRet'] = df['EpRewMean']\n if not 'EpsThisIter' in df: df['EpsThisIter'] = df['BatchSize'] \n df['EpsSoFar'] = np.cumsum(df['EpsThisIter'])\n if 'SamplesThisIter' in df: df['SamplesSoFar'] = np.cumsum(df['SamplesThisIter'])\n df['CumAvgRet'] = np.cumsum(df['AvgRet']*df[scale+'ThisIter'])/np.sum(df[scale+'ThisIter'])\n return df\n\ndef moments(dfs):\n concat_df = pd.concat(dfs, axis=1)\n mean_df = pd.concat(dfs, axis=1).groupby(by=concat_df.columns, axis=1).mean()\n std_df = pd.concat(dfs, axis=1).groupby(by=concat_df.columns, axis=1).std()\n return mean_df, std_df\n\ndef plot_all(dfs, key='AvgRet', ylim=None, scale='Samples'):\n fig = plt.figure()\n ax = fig.add_subplot(111)\n for df in dfs:\n value = df[key]\n ax.plot(df[scale+'SoFar'], value)\n return fig\n\ndef plot_ci(dfs, conf=0.95, key='AvgRet', ylim=None, scale='Eps', bootstrap=False, resamples=10000):\n n_runs = len(dfs)\n mean_df, std_df = moments(dfs)\n mean = mean_df[key]\n std = std_df[key]\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.plot(mean_df[scale+'SoFar'], mean)\n if bootstrap:\n x = np.array([df[key] for df in dfs])\n interval = bootstrap_ci(x, conf, resamples)\n else:\n interval = sts.t.interval(conf, n_runs-1,loc=mean,scale=std/np.sqrt(n_runs))\n ax.fill_between(mean_df[scale+'SoFar'], interval[0], interval[1], alpha=0.3)\n if ylim: ax.set_ylim(ylim)\n return fig\n\ndef compare(candidates, conf=0.95, key='AvgRet', ylim=None, xlim=None, scale='Episodes', bootstrap=False, resamples=10000, roll=1, separate=False, opacity=1, tolerance=False, prop=0.95):\n fig = plt.figure()\n ax = fig.add_subplot(111)\n prop_cycle = plt.rcParams['axes.prop_cycle']\n colors = prop_cycle.by_key()['color']\n entries = []\n if type(roll) is int:\n roll = [roll]*len(candidates)\n for i, candidate_name in enumerate(candidates):\n entries.append(candidate_name)\n dfs = candidates[candidate_name]\n dfs = [dfs[j].rolling(roll[i]).mean() for j in range(len(dfs))]\n n_runs = len(dfs)\n mean_df, std_df = moments(dfs)\n mean = mean_df[key]\n std = std_df[key]\n if not separate:\n ax.plot(mean_df[scale+'SoFar'], mean) \n if bootstrap:\n x = np.array([df[key] for df in dfs])\n interval = bootstrap_ci(x, conf, resamples)\n elif tolerance:\n x = np.array([df[key] for df in dfs])\n interval = nonparam_ti(x, conf, prop)\n else:\n interval = sts.t.interval(conf, n_runs-1,loc=mean,scale=std/np.sqrt(n_runs))\n print(candidate_name, end=': ')\n print_ci(dfs, conf)\n ax.fill_between(mean_df[scale+'SoFar'], interval[0], interval[1], alpha=0.3)\n else:\n for d in dfs:\n ax.plot(d[scale+'SoFar'], d[key], color=colors[i], alpha=opacity)\n ax.legend(entries)\n leg = ax.get_legend()\n if separate:\n for i in range(len(entries)):\n leg.legendHandles[i].set_color(colors[i])\n leg.legendHandles[i].set_alpha(1)\n if ylim: ax.set_ylim(None,ylim)\n if xlim: ax.set_xlim(0,xlim)\n return fig\n\ndef plot_data(path, key='VanillaAvgRet'):\n df = pd.read_csv(path)\n fig = plt.figure()\n ax = fig.add_subplot(111)\n mean = df[key]\n ax.plot(df['EpsSoFar'], mean)\n return fig\n\ndef print_ci(dfs, conf=0.95, key='CumAvgRet'):\n n_runs = len(dfs)\n mean_df, std_df = moments(dfs)\n total_horizon = np.sum(mean_df['EpLenMean'])\n mean = mean_df[key][len(mean_df)-1]\n std = std_df[key][len(mean_df)-1]\n interval = sts.t.interval(conf, n_runs-1,loc=mean,scale=std/np.sqrt(n_runs))\n print('%f \\u00B1 %f\\t[%f, %f]\\t total horizon: %d' % (mean, std, interval[0], interval[1], int(total_horizon)))\n\ndef save_ci(dfs, key, name='foo', conf=0.95, path='.', rows=501, xkey='EpisodesSoFar', bootstrap=False, resamples=10000, mult=1., header=True):\n n_runs = len(dfs)\n mean_df, std_df = moments(dfs)\n mean = mean_df[key].values * mult\n std = std_df[key].values * mult + 1e-24\n if bootstrap:\n data = np.array([df[key] * mult for df in dfs])\n interval = bootstrap_ci(data, conf, resamples) \n else:\n interval = sts.t.interval(conf, n_runs-1,loc=mean,scale=std/math.sqrt(n_runs))\n low, high = interval\n if rows is not None:\n mean = mean[:rows]\n low = low[:rows]\n high = high[:rows]\n xx = range(1,len(mean)+1) if xkey is None else mean_df[xkey]\n plotdf = pd.DataFrame({\"iteration\": xx, \"mean\" : mean, \"low\" : low, \"up\": high})\n plotdf = plotdf.iloc[0:-1:1]\n plotdf.to_csv(name + '.csv', index=False, header=header)\n \ndef save_ti(dfs, key, name='foo', conf=0.95, prop=0.95, path='.', rows=501, xkey='EpisodesSoFar', mult=1., header=True):\n mean_df, std_df = moments(dfs)\n mean = mean_df[key].values * mult\n x = np.array([df[key] for df in dfs])\n interval = nonparam_ti(x, conf=conf, prop=prop)\n low, high = interval\n if rows is not None:\n mean = mean[:rows]\n low = low[:rows]\n high = high[:rows]\n xx = range(1,len(mean)+1) if xkey is None else mean_df[xkey]\n plotdf = pd.DataFrame({\"iteration\": xx, \"mean\" : mean, \"low\" : low, \"up\": high})\n plotdf = plotdf.iloc[0:-1:1]\n plotdf.to_csv(name + '.csv', index=False, header=header)"
] | [
[
"numpy.dot",
"tensorflow.reduce_sum",
"tensorflow.cast",
"tensorflow.variables_initializer",
"numpy.mean",
"numpy.reshape",
"tensorflow.get_collection",
"numpy.isnan",
"tensorflow.placeholder",
"tensorflow.exp",
"numpy.sum",
"tensorflow.clip_by_value",
"tensorflow.get_default_session",
"tensorflow.reduce_max",
"tensorflow.constant",
"tensorflow.reduce_mean",
"numpy.set_printoptions",
"tensorflow.reshape",
"tensorflow.expand_dims",
"tensorflow.reduce_min",
"tensorflow.sqrt",
"tensorflow.abs"
],
[
"tensorflow.contrib.layers.xavier_initializer"
],
[
"pandas.concat",
"pandas.read_csv",
"numpy.sqrt",
"numpy.random.choice",
"numpy.cumsum",
"numpy.sort",
"numpy.percentile",
"pandas.DataFrame",
"scipy.stats.binom.ppf",
"numpy.array",
"numpy.sum",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
NicolasHug/pyts | [
"29659fb09f568df2e7f8190f2d5a1c383dc7e9fa"
] | [
"pyts/transformation/boss.py"
] | [
"\"\"\"Code for Bag-of-SFA Symbols.\"\"\"\n\n# Author: Johann Faouzi <[email protected]>\n# License: BSD-3-Clause\n\nimport numpy as np\nfrom math import ceil\nfrom scipy.sparse import csr_matrix\nfrom sklearn.base import BaseEstimator, TransformerMixin\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.utils.validation import check_array, check_is_fitted\nfrom sklearn.utils.multiclass import check_classification_targets\nfrom ..approximation import SymbolicFourierApproximation\nfrom ..utils import windowed_view\n\n\nclass BOSS(BaseEstimator, TransformerMixin):\n \"\"\"Bag of Symbolic Fourier Approximation Symbols.\n\n For each time series, subseries are extracted using a slidind window.\n Then the subseries are transformed into a word using the Symbolic\n Fourier Approximation (SFA) algorithm. For each time series, the words\n are grouped together and a histogram counting the occurences of each\n word is created.\n\n Parameters\n ----------\n word_size : int (default = 4)\n Size of each word.\n\n n_bins : int (default = 4)\n The number of bins to produce. It must be between 2 and 26.\n\n strategy : str (default = 'quantile')\n Strategy used to define the widths of the bins:\n\n - 'uniform': All bins in each sample have identical widths\n - 'quantile': All bins in each sample have the same number of points\n - 'normal': Bin edges are quantiles from a standard normal distribution\n - 'entropy': Bin edges are computed using information gain\n\n window_size : int or float (default = 10)\n Size of the sliding window. If float, it represents the percentage of\n the size of each time series and must be between 0 and 1. The window\n size will be computed as ``ceil(window_size * n_timestamps)``.\n\n window_step : int or float (default = 1)\n Step of the sliding window. If float, it represents the percentage of\n the size of each time series and must be between 0 and 1. The window\n size will be computed as ``ceil(window_step * n_timestamps)``.\n\n anova : bool (default = False)\n If True, the Fourier coefficient selection is done via a one-way\n ANOVA test. If False, the first Fourier coefficients are selected.\n\n drop_sum : bool (default = False)\n If True, the first Fourier coefficient (i.e. the sum of the subseries)\n is dropped. Otherwise, it is kept.\n\n norm_mean : bool (default = False)\n If True, center each subseries before scaling.\n\n norm_std : bool (default = False)\n If True, scale each subseries to unit variance.\n\n numerosity_reduction : bool (default = True)\n If True, delete sample-wise all but one occurence of back to back\n identical occurences of the same words.\n\n sparse : bool (default = True)\n Return a sparse matrix if True, else return an array.\n\n alphabet : None, 'ordinal' or array-like, shape = (n_bins,)\n Alphabet to use. If None, the first `n_bins` letters of the Latin\n alphabet are used.\n\n Attributes\n ----------\n vocabulary_ : dict\n A mapping of feature indices to terms.\n\n References\n ----------\n .. [1] P. Schäfer, \"The BOSS is concerned with time series classification\n in the presence of noise\". Data Mining and Knowledge Discovery,\n 29(6), 1505-1530 (2015).\n\n Examples\n --------\n >>> from pyts.datasets import load_gunpoint\n >>> from pyts.transformation import BOSS\n >>> X_train, X_test, _, _ = load_gunpoint(return_X_y=True)\n >>> boss = BOSS(word_size=2, n_bins=2, sparse=False)\n >>> boss.fit(X_train) # doctest: +ELLIPSIS\n BOSS(...)\n >>> sorted(boss.vocabulary_.values())\n ['aa', 'ab', 'ba', 'bb']\n >>> boss.transform(X_test) # doctest: +ELLIPSIS\n array(...)\n\n \"\"\"\n\n def __init__(self, word_size=4, n_bins=4, strategy='quantile',\n window_size=10, window_step=1, anova=False, drop_sum=False,\n norm_mean=False, norm_std=False, numerosity_reduction=True,\n sparse=True, alphabet=None):\n self.word_size = word_size\n self.n_bins = n_bins\n self.strategy = strategy\n self.window_size = window_size\n self.window_step = window_step\n self.anova = anova\n self.drop_sum = drop_sum\n self.norm_mean = norm_mean\n self.norm_std = norm_std\n self.numerosity_reduction = numerosity_reduction\n self.sparse = sparse\n self.alphabet = alphabet\n\n def fit(self, X, y=None):\n \"\"\"Fit the model according to the given training data.\n\n Parameters\n ----------\n X : array-like, shape = (n_samples, n_timestamps)\n Training vector.\n\n y : None or array-like, shape = (n_samples,)\n Class labels for each data sample.\n\n Returns\n -------\n self : object\n\n \"\"\"\n X = check_array(X)\n n_samples, n_timestamps = X.shape\n if y is not None:\n check_classification_targets(y)\n\n window_size, window_step = self._check_params(n_timestamps)\n n_windows = (n_timestamps - window_size + window_step) // window_step\n\n X_windowed = windowed_view(\n X, window_size=window_size, window_step=window_step\n )\n X_windowed = X_windowed.reshape(n_samples * n_windows, window_size)\n\n sfa = SymbolicFourierApproximation(\n n_coefs=self.word_size, drop_sum=self.drop_sum, anova=self.anova,\n norm_mean=self.norm_mean, norm_std=self.norm_std,\n n_bins=self.n_bins, strategy=self.strategy, alphabet=self.alphabet\n )\n if y is None:\n y_repeated = None\n else:\n y_repeated = np.repeat(y, n_windows)\n X_sfa = sfa.fit_transform(X_windowed, y_repeated)\n\n X_word = np.asarray([''.join(X_sfa[i])\n for i in range(n_samples * n_windows)])\n X_word = X_word.reshape(n_samples, n_windows)\n\n if self.numerosity_reduction:\n not_equal = np.c_[X_word[:, 1:] != X_word[:, :-1],\n np.full(n_samples, True)]\n X_bow = np.asarray([' '.join(X_word[i, not_equal[i]])\n for i in range(n_samples)])\n else:\n X_bow = np.asarray([' '.join(X_word[i]) for i in range(n_samples)])\n\n vectorizer = CountVectorizer()\n vectorizer.fit(X_bow)\n self.vocabulary_ = {value: key for key, value in\n vectorizer.vocabulary_.items()}\n self._window_size = window_size\n self._window_step = window_step\n self._n_windows = n_windows\n self._sfa = sfa\n self._vectorizer = vectorizer\n return self\n\n def transform(self, X):\n \"\"\"Transform the provided data.\n\n Parameters\n ----------\n X : array-like, shape = (n_samples, n_timestamps)\n Test samples.\n\n Returns\n -------\n X_new : sparse matrix, shape = (n_samples, n_words)\n Document-term matrix.\n\n \"\"\"\n check_is_fitted(self, ['_sfa', '_vectorizer', 'vocabulary_'])\n X = check_array(X)\n n_samples, n_timestamps = X.shape\n\n X_windowed = windowed_view(\n X, window_size=self._window_size, window_step=self._window_step\n )\n X_windowed = X_windowed.reshape(-1, self._window_size)\n\n X_sfa = self._sfa.transform(X_windowed)\n X_word = np.asarray([''.join(X_sfa[i]) for i in range(X_sfa.shape[0])])\n X_word = X_word.reshape(n_samples, self._n_windows)\n\n if self.numerosity_reduction:\n not_equal = np.c_[X_word[:, 1:] != X_word[:, :-1],\n np.full(n_samples, True)]\n X_bow = np.asarray([' '.join(X_word[i, not_equal[i]])\n for i in range(n_samples)])\n else:\n X_bow = np.asarray([' '.join(X_word[i]) for i in range(n_samples)])\n\n X_boss = self._vectorizer.transform(X_bow)\n if not self.sparse:\n return X_boss.A\n return csr_matrix(X_boss)\n\n def fit_transform(self, X, y=None):\n \"\"\"Fit the data then transform it.\n\n Parameters\n ----------\n X : array-like, shape = (n_samples, n_timestamps)\n Training vector.\n\n y : None or array-like, shape = (n_samples,)\n Class labels for each data sample.\n\n Returns\n -------\n X_new : sparse matrix, shape = (n_samples, n_words)\n Document-term matrix.\n\n \"\"\"\n X = check_array(X)\n n_samples, n_timestamps = X.shape\n if y is not None:\n check_classification_targets(y)\n\n window_size, window_step = self._check_params(n_timestamps)\n n_windows = (n_timestamps - window_size + window_step) // window_step\n\n X_windowed = windowed_view(\n X, window_size=window_size, window_step=window_step\n )\n X_windowed = X_windowed.reshape(n_samples * n_windows, window_size)\n\n sfa = SymbolicFourierApproximation(\n n_coefs=self.word_size, drop_sum=self.drop_sum, anova=self.anova,\n norm_mean=self.norm_mean, norm_std=self.norm_std,\n n_bins=self.n_bins, strategy=self.strategy, alphabet=self.alphabet\n )\n if y is None:\n y_repeated = None\n else:\n y_repeated = np.repeat(y, n_windows)\n X_sfa = sfa.fit_transform(X_windowed, y_repeated)\n\n X_word = np.asarray([''.join(X_sfa[i])\n for i in range(n_samples * n_windows)])\n X_word = X_word.reshape(n_samples, n_windows)\n\n if self.numerosity_reduction:\n not_equal = np.c_[X_word[:, 1:] != X_word[:, :-1],\n np.full(n_samples, True)]\n X_bow = np.asarray([' '.join(X_word[i, not_equal[i]])\n for i in range(n_samples)])\n else:\n X_bow = np.asarray([' '.join(X_word[i]) for i in range(n_samples)])\n\n vectorizer = CountVectorizer()\n X_boss = vectorizer.fit_transform(X_bow)\n self.vocabulary_ = {value: key for key, value in\n vectorizer.vocabulary_.items()}\n self._window_size = window_size\n self._window_step = window_step\n self._n_windows = n_windows\n self._sfa = sfa\n self._vectorizer = vectorizer\n if not self.sparse:\n return X_boss.A\n return csr_matrix(X_boss)\n\n def _check_params(self, n_timestamps):\n if not isinstance(self.word_size, (int, np.integer)):\n raise TypeError(\"'word_size' must be an integer.\")\n if not self.word_size >= 1:\n raise ValueError(\"'word_size' must be a positive integer.\")\n\n if not isinstance(self.window_size,\n (int, np.integer, float, np.floating)):\n raise TypeError(\"'window_size' must be an integer or a float.\")\n if isinstance(self.window_size, (int, np.integer)):\n if self.drop_sum:\n if not 1 <= self.window_size <= (n_timestamps - 1):\n raise ValueError(\n \"If 'window_size' is an integer, it must be greater \"\n \"than or equal to 1 and lower than or equal to \"\n \"(n_timestamps - 1) if 'drop_sum=True'.\"\n )\n else:\n if not 1 <= self.window_size <= n_timestamps:\n raise ValueError(\n \"If 'window_size' is an integer, it must be greater \"\n \"than or equal to 1 and lower than or equal to \"\n \"n_timestamps if 'drop_sum=False'.\"\n )\n window_size = self.window_size\n else:\n if not 0 < self.window_size <= 1:\n raise ValueError(\n \"If 'window_size' is a float, it must be greater \"\n \"than 0 and lower than or equal to 1.\"\n )\n window_size = ceil(self.window_size * n_timestamps)\n\n if not isinstance(self.window_step,\n (int, np.integer, float, np.floating)):\n raise TypeError(\"'window_step' must be an integer or a float.\")\n if isinstance(self.window_step, (int, np.integer)):\n if not 1 <= self.window_step <= n_timestamps:\n raise ValueError(\n \"If 'window_step' is an integer, it must be greater \"\n \"than or equal to 1 and lower than or equal to \"\n \"n_timestamps.\"\n )\n window_step = self.window_step\n else:\n if not 0 < self.window_step <= 1:\n raise ValueError(\n \"If 'window_step' is a float, it must be greater \"\n \"than 0 and lower than or equal to 1.\"\n )\n window_step = ceil(self.window_step * n_timestamps)\n if self.drop_sum:\n if not self.word_size <= (window_size - 1):\n raise ValueError(\n \"'word_size' must be lower than or equal to \"\n \"(window_size - 1) if 'drop_sum=True'.\"\n )\n else:\n if not self.word_size <= window_size:\n raise ValueError(\n \"'word_size' must be lower than or equal to \"\n \"window_size if 'drop_sum=False'.\"\n )\n return window_size, window_step\n"
] | [
[
"sklearn.utils.validation.check_is_fitted",
"sklearn.utils.validation.check_array",
"sklearn.utils.multiclass.check_classification_targets",
"scipy.sparse.csr_matrix",
"numpy.full",
"sklearn.feature_extraction.text.CountVectorizer",
"numpy.repeat"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
fishjojo/pydmfe | [
"93cfc655314933d3531b5733521a1f95a044f6cb",
"93cfc655314933d3531b5733521a1f95a044f6cb"
] | [
"examples/research/proj/C3H6.py",
"examples/research/read_umat.py"
] | [
"from pydmfet import proj_ao\nfrom pydmfet.qcwrap.pyscf_rks_ao import rks_ao\nfrom pyscf import gto,scf\nimport numpy as np\nfrom pyscf.tools import molden\nfrom pyscf import lo\nfrom pyscf.lo import iao,orth\nfrom functools import reduce\nimport math\n\nbas ='ccpvdz'\ntemp = 0.01\n\nmol = gto.Mole()\nmol.atom = open('C3H6.xyz').read()\nmol.basis = bas\nmol.charge = 0\nmol.build(max_memory = 4000, verbose=4)\n\n\n#mf = scf.RKS(mol)\nmf = rks_ao(mol,smear_sigma = temp)\nmf.xc = \"pbe,pbe\"\nmf.max_cycle = 50\n\nDMguess = None\nmf.scf(dm0=DMguess)\n\n\nnatoms = mol.natm\nimpAtom = np.zeros([natoms], dtype=int)\nfor i in range(5):\n impAtom[i] = 1\n\n\nembed = proj_ao.proj_embed(mf,impAtom, Ne_env = 8)\nembed.pop_method = 'meta_lowdin'\nembed.make_frozen_orbs(norb = 11)\n#embed.embedding_potential()\n",
"import numpy as np\nfrom argparse import ArgumentParser\n\ndef read_umat(nrow,filename):\n\n f = open(filename, \"r\")\n\n mat = np.ndarray((nrow,nrow),dtype = np.double)\n\n vec = []\n index = 0\n nline = 0\n for line in f:\n nline += 1\n elemts = line.split()\n if(nline%(nrow+1) == 1):\n if len(vec)>0:\n x = np.array(vec)\n x = x.astype(np.double)\n _nrow = x.size/6\n assert(_nrow==nrow)\n x = np.reshape(x,(nrow,-1))\n mat[:,index:index+6] = x.copy()\n index += 6\n vec = []\n continue\n else:\n for i in elemts[1:]:\n vec.append(i)\n\n x = np.array(vec)\n x = x.astype(np.double)\n x = np.reshape(x,(nrow,-1))\n mat[:,index:] = x.copy()\n\n\n f.close()\n\n return mat\n\nif __name__ == \"__main__\":\n\n parser = ArgumentParser()\n parser.add_argument(\"n\", help=\"nrow\", type=int)\n parser.add_argument(\"-f\", \"--filename\", dest=\"filename\")\n args = parser.parse_args()\n\n read_umat(args.n, args.filename)\n"
] | [
[
"numpy.zeros"
],
[
"numpy.reshape",
"numpy.array",
"numpy.ndarray"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Lilgabz/Quantum-Algorithm-Implementations | [
"2bb5df522d76e94b300275dfefff2869ff31bc2c"
] | [
"Quantum Key Distribution/Mutation Testing/QKD Mutation Testing Cirq/Remove_mutant_2.py"
] | [
"import unittest\n\nimport cirq\nfrom cirq.ops import H, X, I\nimport random\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom numpy.random import randint\n\nimport hypothesis.strategies as st\nfrom hypothesis import given, settings\n\ndef generate_binary(len):\n return randint(2, size=len)\n\ndef encode_message(bits, bases, messageLen):\n message = []\n for i in range(messageLen):\n qubits = cirq.LineQubit.range(1)\n qc = cirq.Circuit()\n if bases[i] == 0: # Prepare qubit in Z-basis\n if bits[i] == 0:\n qc.append(cirq.I(qubits[0]))\n else:\n qc.append(cirq.X(qubits[0]))\n else: # Prepare qubit in X-basis\n if bits[i] == 0:\n ### mutant - remove ###\n qc.append(cirq.I(qubits[0]))\n else:\n qc.append(cirq.X(qubits[0]))\n qc.append(cirq.H(qubits[0]))\n message.append(qc)\n return message\n\ndef measure_message(message, bases, messageLen):\n measurements = []\n for q in range(messageLen):\n if bases[q] == 0: # measuring in Z-basis\n if (not message[q].has_measurements()):\n for qubit in message[q].all_qubits():\n message[q].append(cirq.measure(qubit))\n if bases[q] == 1: # measuring in X-basis\n if (not message[q].has_measurements()):\n for qubit in message[q].all_qubits():\n message[q].append(cirq.H(qubit))\n message[q].append(cirq.measure(qubit))\n simulator = cirq.Simulator()\n measured_bit = simulator.run(message[q])\n measurements.append((measured_bit.data.iat[0,0])) \n return measurements\n\ndef remove_garbage(a_bases, b_bases, bits, messageLen):\n good_bits = []\n for q in range(messageLen):\n if a_bases[q] == b_bases[q]:\n # If both used the same basis, add\n # this to the list of 'good' bits\n good_bits.append(bits[q])\n return good_bits\n\ndef sample_bits(bits, selection):\n sample = []\n for i in selection:\n # use np.mod to make sure the\n # bit we sample is always in \n # the list range\n i = np.mod(i, len(bits))\n # pop(i) removes the element of the\n # list at index 'i'\n sample.append(bits.pop(i))\n return sample"
] | [
[
"numpy.random.randint"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
YMandCL/Hands-On-Deep-Learning-for-Games | [
"0225661409c3bf59ae6b7996c254bb485ebd10cb",
"0225661409c3bf59ae6b7996c254bb485ebd10cb"
] | [
"Chapter03/Chapter_3/musegen/musegen.py",
"Chapter05/Chapter_5/Chapter_5_5.py"
] | [
"# Currently this script is configured to use the note-generator model.\n\nfrom config import sequence_length, output_dir, note_generator_dir\nfrom helper import loadChorales, loadModelAndWeights, createPitchSpecificVocabularies, createDurationVocabularySpecific\nfrom music21 import note, instrument, stream, duration\nimport numpy as np\nimport os\n\n# disable GPU processing\nos.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"\"\n# ----------------------------------------------\n\nfrom keras.utils import to_categorical\n\n# select the epoch to use when loading the weights of the model generator\ngenerator_epoch = 43\n\n# how many notes to generate ('end' marks are created along the way and the result is split into pieces)\nnumber_of_notes = 200\n\n# load chorales to create the vocabularies\nprint('loading chorales...')\nnotes = loadChorales()\n\n# create the vocabulary\nnote_vocab, note_names_vocab, note_vocab_categorical = createPitchSpecificVocabularies([x[0] for (x, _) in notes])\nduration_vocab = createDurationVocabularySpecific([d for (_, d) in notes])\nduration_vocab_categorical = to_categorical(range(len(duration_vocab)))\n\nnote_to_int = dict((note, number) for number, note in enumerate(note_vocab))\nint_to_note = dict((number, note) for number, note in enumerate(note_vocab))\n\nduration_to_int = dict((dur, number) for number, dur in enumerate(duration_vocab))\n\nduration_dim = duration_vocab.shape[0]\npitch_dim = np.array(note_vocab).shape[0]\n\nprint('loading networks...')\ndir_path = os.path.dirname(os.path.realpath(__file__))\ngenerator = loadModelAndWeights(os.path.join(dir_path, note_generator_dir, 'model.json'),\n os.path.join(dir_path, note_generator_dir, 'weights-{:02d}.hdf5'.format(generator_epoch)))\n\n# make a melody!!!\npitch_input = np.eye(pitch_dim)[np.random.choice(pitch_dim, size=sequence_length)]\nduration_input = np.eye(duration_dim)[np.random.choice(duration_dim, size=sequence_length)]\n\nprint('generating output...')\n\n# generate notes\ngenerator_output = []\n\nfor _ in range(number_of_notes):\n # reshape inputs\n pi = np.reshape(pitch_input, (1, sequence_length, pitch_dim))\n di = np.reshape(duration_input, (1, sequence_length, duration_dim))\n\n # make prediction\n pitch_pred, dur_pred = generator.predict({'pitches_input': pi, 'durations_input': di}, verbose=0)\n\n generator_output.append((pitch_pred, dur_pred))\n\n pitch_input = np.vstack([pitch_input, pitch_pred])\n pitch_input = pitch_input[1:len(pitch_input)]\n\n duration_input = np.vstack([duration_input, dur_pred])\n duration_input = duration_input[1:len(duration_input)]\n\n\noutput_notes = [(int_to_note[np.argmax(n)], duration_vocab[np.argmax(d)]) for (n, d) in generator_output]\noutput_notes = np.array(output_notes)\noutput_notes = np.reshape(output_notes, (-1, 2))\n\n# output_notes contains: pitch values in midi format (integers), 'rest' marks, 'end' marks\n\n# split the generated notes into pieces based on 'end' marks\nindices = []\nfor (ind, (n, _)) in enumerate(output_notes):\n if n == 'end':\n indices.append(ind)\nindices = np.insert(np.reshape(indices, (-1)), 0, 0)\n \npieces = [output_notes]\nif len(indices) > 1:\n pieces = ([ output_notes[(indices[j] + 1):indices[j + 1] ] for j in range(len(indices) - 1)])\n\nprint('writing output to disk...')\n\nos.makedirs(os.path.join(dir_path, output_dir, 'note-generator'), exist_ok=True)\n\n# output pieces to midi files\nfor index, notes in enumerate(pieces):\n midi_notes = []\n offset = 0\n for n, d in notes:\n # since a duration of 0 is included in the vocabulary (for the 'end' marks), the network may generate a 0 duration for other notes\n # naively correct and report this erroneous behaviour\n if abs(float(d)) < 0.001:\n print('found zero duration')\n d = '1.0'\n if n == 'rest':\n new_note = note.Rest()\n new_note.duration = duration.Duration(float(d))\n new_note.offset = offset\n new_note.storedInstrument = instrument.Piano()\n midi_notes.append(new_note)\n else:\n new_note = note.Note(int(n))\n new_note.duration = duration.Duration(float(d))\n new_note.offset = offset\n new_note.storedInstrument = instrument.Piano()\n midi_notes.append(new_note)\n offset += float(d)\n \n midi_stream = stream.Stream(midi_notes)\n midi_stream.write('midi', fp=os.path.join(dir_path, output_dir, 'note-generator', 'sample-{}.mid'.format(index)))",
"# -*- coding: utf-8 -*-\n# source from https://github.com/keon/deep-q-learning/blob/master/dqn.py\nimport random\nimport gym\nimport numpy as np\nfrom collections import deque\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.optimizers import Adam\n\nEPISODES = 1000\n\nclass DQNAgent:\n def __init__(self, state_size, action_size):\n self.state_size = state_size\n self.action_size = action_size\n self.memory = deque(maxlen=2000)\n self.gamma = 0.95 # discount rate\n self.epsilon = 1.0 # exploration rate\n self.epsilon_min = 0.01\n self.epsilon_decay = 0.995\n self.learning_rate = 0.001\n self.model = self._build_model()\n\n def _build_model(self):\n # Neural Net for Deep-Q learning Model\n model = Sequential()\n model.add(Dense(24, input_dim=self.state_size, activation='relu'))\n model.add(Dense(24, activation='relu'))\n model.add(Dense(self.action_size, activation='linear'))\n model.compile(loss='mse',\n optimizer=Adam(lr=self.learning_rate))\n return model\n\n def remember(self, state, action, reward, next_state, done):\n self.memory.append((state, action, reward, next_state, done))\n\n def act(self, state):\n if np.random.rand() <= self.epsilon:\n return random.randrange(self.action_size)\n act_values = self.model.predict(state)\n return np.argmax(act_values[0]) # returns action\n\n def replay(self, batch_size):\n minibatch = random.sample(self.memory, batch_size)\n for state, action, reward, next_state, done in minibatch:\n target = reward\n if not done:\n target = (reward + self.gamma *\n np.amax(self.model.predict(next_state)[0]))\n target_f = self.model.predict(state)\n target_f[0][action] = target\n self.model.fit(state, target_f, epochs=1, verbose=0)\n if self.epsilon > self.epsilon_min:\n self.epsilon *= self.epsilon_decay\n\n def load(self, name):\n self.model.load_weights(name)\n\n def save(self, name):\n self.model.save_weights(name)\n\n\nif __name__ == \"__main__\":\n env = gym.make('MountainCar-v0')\n state_size = env.observation_space.shape[0]\n action_size = env.action_space.n\n agent = DQNAgent(state_size, action_size)\n # agent.load(\"./save/cartpole-dqn.h5\")\n done = False\n batch_size = 32\n\n for e in range(EPISODES):\n state = env.reset()\n state = np.reshape(state, [1, state_size]) \n for time in range(500):\n # env.render()\n action = agent.act(state)\n env.render()\n next_state, reward, done, _ = env.step(action)\n reward = reward if not done else -10\n next_state = np.reshape(next_state, [1, state_size])\n agent.remember(state, action, reward, next_state, done)\n state = next_state\n if done:\n print(\"episode: {}/{}, score: {}, e: {:.2}\"\n .format(e, EPISODES, time, agent.epsilon))\n break\n if len(agent.memory) > batch_size:\n agent.replay(batch_size)\n # if e % 10 == 0:\n # agent.save(\"./save/cartpole-dqn.h5\")"
] | [
[
"numpy.random.choice",
"numpy.reshape",
"numpy.eye",
"numpy.argmax",
"numpy.array",
"numpy.vstack"
],
[
"numpy.reshape",
"numpy.argmax",
"numpy.random.rand"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
HassanDayoub/tfx | [
"dc9221abbb8dad991d1ae22fb91876da1290efae"
] | [
"tfx/orchestration/kubeflow/executor_wrappers.py"
] | [
"# Copyright 2019 Google LLC. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Wrappers for TFX executors running as part of a Kubeflow pipeline.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport abc\nimport argparse\nimport json\nimport os\nimport re\nfrom future import utils\nimport six\nimport tensorflow as tf\nfrom typing import Any, Dict, List, Text\n\nfrom tensorflow.python.lib.io import file_io # pylint: disable=g-direct-tensorflow-import\n\nfrom tfx import version\nfrom tfx.components.base import base_executor\nfrom tfx.utils import import_utils\nfrom tfx.utils import types\n\n\ndef parse_tfx_type(json_str: Text):\n \"\"\"Parses a list of artifacts and their types from json.\"\"\"\n json_artifact_list = json.loads(json_str)\n\n tfx_types = []\n for json_artifact in json_artifact_list:\n tfx_type = types.TfxArtifact.parse_from_json_dict(json_artifact)\n tfx_types.append(tfx_type)\n\n return tfx_types\n\n\ndef to_snake_case(name: Text):\n s1 = re.sub('(.)([A-Z][a-z]+)', r'\\1_\\2', name)\n return re.sub('([a-z0-9])([A-Z])', r'\\1_\\2', s1).lower()\n\n\nclass KubeflowExecutorWrapper(utils.with_metaclass(abc.ABCMeta), object):\n \"\"\"Abstract base class for all Kubeflow Pipelines-based TFX components.\"\"\"\n\n def __init__(\n self,\n executor_class_path: Text,\n name: Text,\n input_dict: Dict[Text, List[types.TfxArtifact]],\n outputs: Text,\n exec_properties: Dict[Text, Any],\n ):\n self._input_dict = input_dict\n self._output_dict = types.parse_tfx_type_dict(outputs)\n self._component_name = to_snake_case(name)\n self._exec_properties = exec_properties\n self._output_dir = self._exec_properties['output_dir']\n self._workflow_id = os.environ['WORKFLOW_ID']\n\n raw_args = self._exec_properties.get('beam_pipeline_args', [])\n\n # Beam expects str types for it's pipeline args. Ensure unicode type is\n # converted to str if required.\n beam_pipeline_args = []\n for arg in raw_args:\n # In order to support both Py2 and Py3: Py3 doesn't have `unicode` type.\n if six.PY2 and isinstance(arg, unicode):\n arg = arg.encode('ascii', 'ignore')\n\n beam_pipeline_args.append(arg)\n\n # TODO(zhitaoli): Revisit usage of setup_file here.\n module_dir = os.path.dirname(os.path.dirname(version.__file__))\n setup_file = os.path.join(module_dir, 'setup.py')\n tf.logging.info('Using setup_file \\'%s\\' to capture TFX dependencies',\n setup_file)\n beam_pipeline_args.append('--setup_file={}'.format(setup_file))\n\n executor_cls = import_utils.import_class_by_path(executor_class_path)\n # TODO(swoonna): Switch to execution_id when available\n unique_id = '{}_{}'.format(self._component_name, self._workflow_id)\n # TODO(swoonna): Add tmp_dir to additional_pipeline_args\n executor_context = base_executor.BaseExecutor.Context(\n beam_pipeline_args=beam_pipeline_args,\n tmp_dir=os.path.join(self._output_dir, '.temp', ''),\n unique_id=unique_id)\n self._executor = executor_cls(executor_context)\n\n def _set_outputs(self):\n tf.logging.info('Using workflow id {}'.format(self._workflow_id))\n\n max_input_span = 0\n for input_list in self._input_dict.values():\n for single_input in input_list:\n max_input_span = max(max_input_span, single_input.span)\n for output_name, output_artifact_list in self._output_dict.items():\n for output_artifact in output_artifact_list:\n output_artifact.uri = os.path.join(self._output_dir,\n self._component_name, output_name,\n self._workflow_id,\n output_artifact.split, '')\n output_artifact.span = max_input_span\n\n def run(self, output_basedir: Text = '/'):\n \"\"\"Runs the wrapped Executor, and writes metadata of output artifacts.\n\n Args:\n output_basedir: Base directory to which output artifacts metadata\n is written. Useful for unit tests.\n \"\"\"\n self._executor.Do(self._input_dict, self._output_dict,\n self._exec_properties)\n\n output_dir = os.path.join(output_basedir, 'output/ml_metadata')\n tf.gfile.MakeDirs(output_dir)\n for output_name, output_artifact_list in self._output_dict.items():\n filename = os.path.join(output_dir, output_name)\n with file_io.FileIO(filename, 'w') as f:\n output_list = [x.json_dict() for x in output_artifact_list]\n f.write(json.dumps(output_list))\n\n\n# TODO(b/132197968): Get rid of all the individual wrapper classes below and\n# combine them into a single generic one that constructs the input dict from\n# the individual named arguments instead. In the future, the generic wrapper\n# can call into TFX drivers to handle component-specific logic as well.\nclass CsvExampleGenWrapper(KubeflowExecutorWrapper):\n \"\"\"Wrapper for CSVExampleGen component.\"\"\"\n\n def __init__(self, args: argparse.Namespace):\n super(CsvExampleGenWrapper, self).__init__(\n executor_class_path=args.executor_class_path,\n name='CSVExampleGen',\n input_dict={\n 'input-base': parse_tfx_type(args.input_base),\n },\n outputs=args.outputs,\n exec_properties=json.loads(args.exec_properties),\n )\n self._set_input_artifact_span()\n self._set_outputs()\n\n def _set_input_artifact_span(self):\n for input_artifact in self._input_dict['input-base']:\n matched = re.match(r'span_([0-9]+)', input_artifact.uri)\n span = matched.group(1) if matched else 1\n input_artifact.span = span\n\n\nclass BigQueryExampleGenWrapper(KubeflowExecutorWrapper):\n \"\"\"Wrapper for BigQueryExampleGen component.\"\"\"\n\n def __init__(self, args: argparse.Namespace):\n super(BigQueryExampleGenWrapper, self).__init__(\n executor_class_path=args.executor_class_path,\n name='BigQueryExampleGen',\n input_dict={},\n outputs=args.outputs,\n exec_properties=json.loads(args.exec_properties),\n )\n self._set_outputs()\n\n\nclass StatisticsGenWrapper(KubeflowExecutorWrapper):\n \"\"\"Wrapper for StatisticsGen component.\"\"\"\n\n def __init__(self, args: argparse.Namespace):\n super(StatisticsGenWrapper, self).__init__(\n executor_class_path=args.executor_class_path,\n name='StatisticsGen',\n input_dict={\n 'input_data': parse_tfx_type(args.input_data),\n },\n outputs=args.outputs,\n exec_properties=json.loads(args.exec_properties),\n )\n self._set_outputs()\n\n\nclass SchemaGenWrapper(KubeflowExecutorWrapper):\n \"\"\"Wrapper for SchemaGen component.\"\"\"\n\n def __init__(self, args: argparse.Namespace):\n super(SchemaGenWrapper, self).__init__(\n executor_class_path=args.executor_class_path,\n name='SchemaGen',\n input_dict={\n 'stats': parse_tfx_type(args.stats),\n },\n outputs=args.outputs,\n exec_properties=json.loads(args.exec_properties),\n )\n self._set_outputs()\n\n\nclass ExampleValidatorWrapper(KubeflowExecutorWrapper):\n \"\"\"Wrapper for ExampleValidator component.\"\"\"\n\n def __init__(self, args: argparse.Namespace):\n super(ExampleValidatorWrapper, self).__init__(\n executor_class_path=args.executor_class_path,\n name='ExampleValidator',\n input_dict={\n 'stats': parse_tfx_type(args.stats),\n 'schema': parse_tfx_type(args.schema),\n },\n outputs=args.outputs,\n exec_properties=json.loads(args.exec_properties),\n )\n self._set_outputs()\n\n\nclass TransformWrapper(KubeflowExecutorWrapper):\n \"\"\"Wrapper for Transform component.\"\"\"\n\n def __init__(self, args: argparse.Namespace):\n super(TransformWrapper, self).__init__(\n executor_class_path=args.executor_class_path,\n name='Transform',\n input_dict={\n 'input_data': parse_tfx_type(args.input_data),\n 'schema': parse_tfx_type(args.schema),\n },\n outputs=args.outputs,\n exec_properties=json.loads(args.exec_properties),\n )\n self._set_outputs()\n\n\nclass TrainerWrapper(KubeflowExecutorWrapper):\n \"\"\"Wrapper for Trainer component.\"\"\"\n\n def __init__(self, args: argparse.Namespace):\n super(TrainerWrapper, self).__init__(\n executor_class_path=args.executor_class_path,\n name='Trainer',\n input_dict={\n 'transformed_examples': parse_tfx_type(args.transformed_examples),\n 'transform_output': parse_tfx_type(args.transform_output),\n 'schema': parse_tfx_type(args.schema),\n },\n outputs=args.outputs,\n exec_properties=json.loads(args.exec_properties),\n )\n self._set_outputs()\n\n # TODO(ajaygopinathan): Implement warm starting.\n self._exec_properties['warm_starting'] = False\n self._exec_properties['warm_start_from'] = None\n\n\nclass EvaluatorWrapper(KubeflowExecutorWrapper):\n \"\"\"Wrapper for Evaluator component.\"\"\"\n\n def __init__(self, args: argparse.Namespace):\n super(EvaluatorWrapper, self).__init__(\n executor_class_path=args.executor_class_path,\n name='Evaluator',\n input_dict={\n 'examples': parse_tfx_type(args.examples),\n 'model_exports': parse_tfx_type(args.model_exports),\n },\n outputs=args.outputs,\n exec_properties=json.loads(args.exec_properties),\n )\n self._set_outputs()\n\n\nclass ModelValidatorWrapper(KubeflowExecutorWrapper):\n \"\"\"Wrapper for ModelValidator component.\"\"\"\n\n def __init__(self, args: argparse.Namespace):\n super(ModelValidatorWrapper, self).__init__(\n executor_class_path=args.executor_class_path,\n name='ModelValidator',\n input_dict={\n 'examples': parse_tfx_type(args.examples),\n 'model': parse_tfx_type(args.model),\n },\n outputs=args.outputs,\n exec_properties=json.loads(args.exec_properties),\n )\n self._set_outputs()\n\n # TODO(ajaygopinathan): Implement latest blessed model determination.\n self._exec_properties['latest_blessed_model'] = None\n self._exec_properties['latest_blessed_model_id'] = None\n\n\nclass PusherWrapper(KubeflowExecutorWrapper):\n \"\"\"Wrapper for Pusher component.\"\"\"\n\n def __init__(self, args: argparse.Namespace):\n super(PusherWrapper, self).__init__(\n executor_class_path=args.executor_class_path,\n name='Pusher',\n input_dict={\n 'model_export': parse_tfx_type(args.model_export),\n 'model_blessing': parse_tfx_type(args.model_blessing),\n },\n outputs=args.outputs,\n exec_properties=json.loads(args.exec_properties),\n )\n self._set_outputs()\n\n # TODO(ajaygopinathan): Implement latest pushed model\n self._exec_properties['latest_pushed_model'] = None\n"
] | [
[
"tensorflow.python.lib.io.file_io.FileIO",
"tensorflow.gfile.MakeDirs",
"tensorflow.logging.info"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
maptube/UMaaS | [
"0758d8352213f332546d728f3eb02411c16c97ac"
] | [
"benchmark/benchmark_SingleOrigin.py"
] | [
"import timeit\nimport os.path\nimport numpy as np\nfrom math import exp, fabs\nfrom sys import float_info\n\nfrom globals import *\nfrom utils import loadMatrix, resizeMatrix\n\nfrom models.SingleOrigin import SingleOrigin\n\n\"\"\"\nBenchmarks for the Single Origin Constrained model (models/SingleOrigin.py)\nAll code here is lifted from the original model code and changed to be\nself-contained (no setup) so that timings of various optimisations are easy.\nCode here is designed to be a test of timings, NOT necessarily a test of\nreturn values, although real data has been used wherever possible i.e. instead\nof an NxN matrix containing random values, I try to load in a real matrix\ninstead.\n\"\"\"\n\n#modelRunsDir = '../model-runs'\n#TObsFilename = 'TObs.bin' #1 mode\n#CijRoadMinFilename = 'Cij_road_min.bin'\n\n#load and init\nTij=loadMatrix(os.path.join(modelRunsDir,TObs31Filename))\ncij=loadMatrix(os.path.join(modelRunsDir,CijRoadMinFilename))\n#end load and init\n\n###############################################################################\n\n\"\"\"\ncalculateCBar_slow\nMean trips calculation, straight conversion from original C# code, no python optimisation\n@returns float\n\"\"\"\ndef benchmark_calculateCBar_slow():\n #main code\n (M, N) = np.shape(Tij)\n CNumerator = 0.0\n CDenominator = 0.0\n for i in range(0,N):\n for j in range(0,N):\n CNumerator += Tij[i, j] * cij[i, j]\n CDenominator += Tij[i, j]\n CBar = CNumerator / CDenominator\n print(\"CBar=\",CBar)\n\n return CBar\n\n###############################################################################\n\n\"\"\"\ncalculateCBar_fast\nMean trips calculation, python optimised version of \"_slow\"\n@returns float (NOTE: the return value MUST be identical to the _slow version, to prove they're functionally identical)\n\"\"\"\ndef benchmark_calculateCBar_fast():\n #load and init\n Tij=loadMatrix(os.path.join(modelRunsDir,TObs31Filename))\n cij=loadMatrix(os.path.join(modelRunsDir,CijRoadMinFilename))\n #end load and init\n\n #main code\n CNumerator2 = np.sum(Tij*cij)\n CDenominator2 = np.sum(Tij)\n CBar2=CNumerator2/CDenominator2\n print(\"CBar2=\",CBar2)\n\n return CBar2\n\n###############################################################################\n\n\"\"\"\nThis is a benchmark of the simple Python code for SingleOrigin using different matrix sizes.\nIt is a test for how long a single execution of the main loop takes. Timings are printed\nto the console based on 1000 runs of the model code i.e. the timing you see in seconds\nmust be divided by 1000.\nNOTE: this could take a VERY long time to run if you pass in a high number for Nfinish \n\"\"\"\ndef benchmarkSingleOriginMatrixSizes(Nstart,Nfinish,Nstep):\n print(\"benchmark_SingleDest running matrix Nstart=\",Nstart,\" Nfinish=\",Nfinish, \" Nstep=\",Nstep)\n\n #load testing matrices\n TObs1 = loadMatrix(os.path.join(modelRunsDir,TObs31Filename))\n Cij1 = loadMatrix(os.path.join(modelRunsDir,CijRoadMinFilename))\n\n for N in range(Nstart,Nfinish,Nstep):\n #print(\"TPred runModel N=\",N)\n #set up the model\n testModel = SingleOrigin()\n (TPred, secs)=testModel.benchmarkRun(1000,resizeMatrix(TObs1,N),resizeMatrix(Cij1,N),1.0)\n #NOTE: timing printed to console based on 1000 iterations of the main loop in the above code\n #Should not contain any setup timings - only the actual algorithm run time.\n print(N,\",1000,\",secs) #all console logging from here - makes it nice and easy to import into excel\n\n###############################################################################\n\n"
] | [
[
"numpy.shape",
"numpy.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
joe-jordan/picosdk-python-wrappers | [
"76f393b500200de168b4f2b74b74aad74d89fd92"
] | [
"ps3000aExamples/ps3000aBlockMSOExample.py"
] | [
"#\n# Copyright (C) 2018 Pico Technology Ltd. See LICENSE file for terms.\n#\n# PS3000A BLOCK MODE MSO EXAMPLE\n# This example opens a 3000a driver device, sets up one digital port and a trigger to collect a block of data.\n# This data is then split into the indivual digital channels and plotted as the binary value against time in ns.\n\nimport ctypes\nfrom picosdk.ps3000a import ps3000a as ps\nfrom picosdk.functions import splitMSODataPort0, assert_pico_ok\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport time\nfrom array import *\n\n# Gives the device a handle\nstatus = {}\nchandle = ctypes.c_int16()\n\n# Opens the device/s\nstatus[\"openunit\"] = ps.ps3000aOpenUnit(ctypes.byref(chandle), None)\n\ntry:\n assert_pico_ok(status[\"openunit\"])\nexcept:\n # powerstate becomes the status number of openunit\n powerstate = status[\"openunit\"]\n\n # If powerstate is the same as 282 then it will run this if statement\n if powerstate == 282:\n # Changes the power input to \"PICO_POWER_SUPPLY_NOT_CONNECTED\"\n status[\"ChangePowerSource\"] = ps.ps3000aChangePowerSource(chandle, 282)\n # If the powerstate is the same as 286 then it will run this if statement\n elif powerstate == 286:\n # Changes the power input to \"PICO_USB3_0_DEVICE_NON_USB3_0_PORT\"\n status[\"ChangePowerSource\"] = ps.ps3000aChangePowerSource(chandle, 286)\n else:\n raise\n\n assert_pico_ok(status[\"ChangePowerSource\"])\n\n# set up digital port\n# handle = chandle\n# PS3000a_DIGITAL_PORT = 0x80\n# Enable = 1\n# logicLevel = 10000\nstatus[\"SetDigitalPort\"] = ps.ps3000aSetDigitalPort( chandle, 0x80, 1, 10000)\nassert_pico_ok(status[\"SetDigitalPort\"])\n\n# Setting the number of sample to be collected\npreTriggerSamples = 400\npostTriggerSamples = 400\nmaxsamples = preTriggerSamples + postTriggerSamples\n\n# Gets timebase innfomation\n# Handle = chandle\n# Timebase = 2 = timebase\n# Nosample = maxsamples\n# TimeIntervalNanoseconds = ctypes.byref(timeIntervalns)\n# MaxSamples = ctypes.byref(returnedMaxSamples)\n# Segement index = 0\ntimebase = 8\ntimeIntervalns = ctypes.c_float()\nreturnedMaxSamples = ctypes.c_int16()\nstatus[\"GetTimebase\"] = ps.ps3000aGetTimebase2(chandle, timebase, maxsamples, ctypes.byref(timeIntervalns), 1, ctypes.byref(returnedMaxSamples), 0)\nassert_pico_ok(status[\"GetTimebase\"])\n\n# Creates a overlow location for data\noverflow = ctypes.c_int16()\n# Creates converted types maxsamples\ncmaxSamples = ctypes.c_int32(maxsamples)\n\n# Create buffers ready for assigning pointers for data collection\nbufferAMax = (ctypes.c_int16 * maxsamples)()\nbufferAMin = (ctypes.c_int16 * maxsamples)()\n\n# Setting the data buffer location for data collection from PS3000A_DIGITAL_PORT0\n# Handle = Chandle\n# source = PS3000A_DIGITAL_PORT0 = 0x80\n# Buffer max = ctypes.byref(bufferAMax)\n# Buffer min = ctypes.byref(bufferAMin)\n# Buffer length = maxsamples\n# Segment index = 0\n# Ratio mode = ps3000A_Ratio_Mode_None = 0\nstatus[\"SetDataBuffers\"] = ps.ps3000aSetDataBuffers(chandle, 0x80, ctypes.byref(bufferAMax), ctypes.byref(bufferAMin), maxsamples, 0, 0)\nassert_pico_ok(status[\"SetDataBuffers\"])\n\n# Starts the block capture\n# Handle = chandle\n# Number of prTriggerSamples\n# Number of postTriggerSamples\n# Timebase = 2 = 4ns (see Programmer's guide for more information on timebases)\n# time indisposed ms = None (This is not needed within the example)\n# Segment index = 0\n# LpRead = None\n# pParameter = None\nstatus[\"runblock\"] = ps.ps3000aRunBlock(chandle, preTriggerSamples, postTriggerSamples, timebase, 1, None, 0, None, None)\nassert_pico_ok(status[\"runblock\"])\n\n# Creates a overlow location for data\noverflow = (ctypes.c_int16 * 10)()\n# Creates converted types maxsamples\ncmaxSamples = ctypes.c_int32(maxsamples)\n\n# Checks data collection to finish the capture\nready = ctypes.c_int16(0)\ncheck = ctypes.c_int16(0)\nwhile ready.value == check.value:\n status[\"isReady\"] = ps.ps3000aIsReady(chandle, ctypes.byref(ready))\n\n# Handle = chandle\n# start index = 0\n# noOfSamples = ctypes.byref(cmaxSamples)\n# DownSampleRatio = 0\n# DownSampleRatioMode = 0\n# SegmentIndex = 0\n# Overflow = ctypes.byref(overflow)\n\nstatus[\"GetValues\"] = ps.ps3000aGetValues(chandle, 0, ctypes.byref(cmaxSamples), 0, 0, 0, ctypes.byref(overflow))\nassert_pico_ok(status[\"GetValues\"])\n\nbufferAMaxBinaryD0, bufferAMaxBinaryD1, bufferAMaxBinaryD2, bufferAMaxBinaryD3, bufferAMaxBinaryD4, bufferAMaxBinaryD5, bufferAMaxBinaryD6, bufferAMaxBinaryD7 = splitMSODataPort0(cmaxSamples, bufferAMax)\n\n# Creates the time data\ntime = np.linspace(0, (cmaxSamples.value) * timeIntervalns.value, cmaxSamples.value)\n\n# Plots the data from digital channel onto a graph\nplt.plot(time, bufferAMaxBinaryD0[:])\nplt.plot(time, bufferAMaxBinaryD1[:])\nplt.plot(time, bufferAMaxBinaryD2[:])\nplt.plot(time, bufferAMaxBinaryD3[:])\nplt.plot(time, bufferAMaxBinaryD4[:])\nplt.plot(time, bufferAMaxBinaryD5[:])\nplt.plot(time, bufferAMaxBinaryD6[:])\nplt.plot(time, bufferAMaxBinaryD7[:])\nplt.xlabel('Time (ns)')\nplt.ylabel('Binary')\nplt.show()\n\n\n# Stops the scope\n# Handle = chandle\nstatus[\"stop\"] = ps.ps3000aStop(chandle)\nassert_pico_ok(status[\"stop\"])\n\n# Closes the unit\n# Handle = chandle\nstatus[\"stop\"] = ps.ps3000aCloseUnit(chandle)\nassert_pico_ok(status[\"stop\"])\n\n# Displays the staus returns\nprint(status)"
] | [
[
"numpy.linspace",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Kshitiz-Bansal/wavetorch | [
"927ad02dc9db83f72b8df1d91418a6681e60fd56",
"927ad02dc9db83f72b8df1d91418a6681e60fd56"
] | [
"wavetorch/io.py",
"study/utils/plot_mem.py"
] | [
"import copy\nimport os\n\nimport torch\n\nfrom . import geom\nfrom .cell import WaveCell\nfrom .probe import WaveIntensityProbe\nfrom .rnn import WaveRNN\nfrom .source import WaveSource\nfrom .utils import set_dtype\n\ndef save_model(model,\n\t\t\t name,\n\t\t\t savedir='./study/',\n\t\t\t history=None,\n\t\t\t history_geom_state=None,\n\t\t\t cfg=None,\n\t\t\t verbose=True):\n\t\"\"\"Save the model state and history to a file\n\t\"\"\"\n\tstr_filename = name + '.pt'\n\tif not os.path.exists(savedir):\n\t\tos.makedirs(savedir)\n\tstr_savepath = savedir + str_filename\n\n\tif history_geom_state is None:\n\t\thistory_geom_state = [model.cell.geom.state_reconstruction_args()]\n\n\tdata = {'model_geom_class_str': model.cell.geom.__class__.__name__,\n\t\t\t# Class name so we know which constructor to call in load()\n\t\t\t'model_state': model.state_dict(),\n\t\t\t# For now just store model state without history (only geom is likely to change)\n\t\t\t'history': history,\n\t\t\t'history_geom_state': history_geom_state, # Full history of the geometry state,\n\t\t\t'cfg': cfg}\n\n\tif verbose:\n\t\tprint(\"Saving model to %s\" % str_savepath)\n\ttorch.save(data, str_savepath)\n\n\ndef new_geometry(class_str, state):\n\tWaveGeometryClass = getattr(geom, class_str)\n\tgeom_state = copy.deepcopy(state)\n\treturn WaveGeometryClass(**geom_state)\n\n\ndef load_model(str_filename, which_iteration=-1):\n\t\"\"\"Load a previously saved model and its history from a file\n\t\"\"\"\n\n\tprint(\"Loading model from %s\" % str_filename)\n\n\tdata = torch.load(str_filename)\n\n\t# Set the type for floats from the save\n\tset_dtype(data['cfg']['dtype'])\n\n\t# Reconstruct Geometry\n\tnew_geom = new_geometry(data['model_geom_class_str'], data['history_geom_state'][which_iteration])\n\n\t# Get model state to recreate probes and sources\n\tmodel_state = copy.deepcopy(data['model_state'])\n\n\t# Parse out the probe and source coords\n\tpx = [model_state[k].item() for k in model_state if 'probes' in k and 'x' in k]\n\tpy = [model_state[k].item() for k in model_state if 'probes' in k and 'y' in k]\n\tsx = [model_state[k].item() for k in model_state if 'sources' in k and 'x' in k]\n\tsy = [model_state[k].item() for k in model_state if 'sources' in k and 'y' in k]\n\n\t# Manually add the probes and sources\n\tnew_probes = []\n\tfor (x, y) in zip(px, py):\n\t\tnew_probes.append(WaveIntensityProbe(x, y))\n\t\t# TODO(ian): here we should actually try to infer the type of probe (e.g. intensity or not)\n\n\tnew_sources = []\n\tfor (x, y) in zip(sx, sy):\n\t\tnew_sources.append(WaveSource(x, y))\n\n\tnew_cell = WaveCell(model_state['cell.dt'].item(), new_geom)\n\tnew_model = WaveRNN(new_cell, new_sources, new_probes)\n\t# Put into eval mode (doesn't really matter for us but whatever)\n\tnew_model.eval()\n\n\treturn new_model, data['history'], data['history_geom_state'], data['cfg']\n",
"\"\"\" Helper script for plotting memory usage from memory profiler\n\nInstall memory_profiler:\n\tconda install memory_profiler\n\nProfile the code:\n\tmprof run study/vowel_train.py study/example.yml\n\nThis will generate a mprofile dat file which you can then plot with this script\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport argparse\n\nparser = argparse.ArgumentParser() \nparser.add_argument('files', nargs='+')\n\nargs = parser.parse_args()\n\nfig, ax = plt.subplots(1,1, constrained_layout=True, figsize=(4,3))\n\nfor file in args.files:\n\tdata = np.loadtxt(file, usecols=(1,2), skiprows=1, delimiter=' ')\n\tmem = data[:,0]\n\tt = data[:,1]\n\tt = t-t.min()\n\tax.plot(t, mem/1e3)\n\nax.set_xlabel('Time (sec)')\nax.set_ylabel('Memory (GB)')\nax.grid()\nplt.show()\n"
] | [
[
"torch.load",
"torch.save"
],
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.subplots",
"numpy.loadtxt"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
philtrade/gQuant | [
"08b2a82a257c234b92f097b925f25cab16fd0926"
] | [
"tests/unit/test_indicator_node.py"
] | [
"'''\nTechnical Indicator Node Unit Tests\n\nTo run unittests:\n\n# Using standard library unittest\n\npython -m unittest -v\npython -m unittest tests/unit/test_indicator_node.py -v\n\nor\n\npython -m unittest discover <test_directory>\npython -m unittest discover -s <directory> -p 'test_*.py'\n\n# Using pytest\n# \"conda install pytest\" or \"pip install pytest\"\npytest -v tests\npytest -v tests/unit/test_indicator_node.py\n\n'''\nimport warnings\nimport unittest\nimport cudf\nimport gquant.cuindicator as gi\nfrom gquant.plugin_nodes.transform.indicatorNode import IndicatorNode\nfrom gquant.dataframe_flow.task import Task\nfrom .utils import make_orderer\nimport numpy as np\nimport copy\n\nordered, compare = make_orderer()\nunittest.defaultTestLoader.sortTestMethodsUsing = compare\n\n\nclass TestIndicatorNode(unittest.TestCase):\n\n def setUp(self):\n warnings.simplefilter('ignore', category=ImportWarning)\n warnings.simplefilter('ignore', category=DeprecationWarning)\n # ignore importlib warnings.\n size = 200\n half = size // 2\n self.size = size\n self.half = half\n np.random.seed(10)\n random_array = np.random.rand(size)\n open_array = np.random.rand(size)\n close_array = np.random.rand(size)\n high_array = np.random.rand(size)\n low_array = np.random.rand(size)\n volume_array = np.random.rand(size)\n indicator = np.zeros(size, dtype=np.int32)\n indicator[0] = 1\n indicator[half] = 1\n df = cudf.DataFrame()\n df['in'] = random_array\n df['open'] = open_array\n df['close'] = close_array\n df['high'] = high_array\n df['low'] = low_array\n df['volume'] = volume_array\n df['indicator'] = indicator\n self._cudf_data = df\n self.conf = {\n \"indicators\": [\n {\"function\": \"port_chaikin_oscillator\",\n \"columns\": [\"high\", \"low\", \"close\", \"volume\"],\n \"args\": [10, 20]},\n {\"function\": \"port_bollinger_bands\",\n \"columns\": [\"close\"],\n \"args\": [10],\n \"outputs\": [\"b1\", \"b2\"]}\n ],\n \"remove_na\": True\n }\n\n def tearDown(self):\n pass\n\n @ordered\n def test_colums(self):\n '''Test node columns requirments'''\n node_obj = {\"id\": \"abc\",\n \"type\": \"IndicatorNode\",\n \"conf\": self.conf,\n \"inputs\": []}\n task = Task(node_obj)\n inN = IndicatorNode(task)\n\n col = \"indicator\"\n msg = \"bad error: %s is missing\" % (col)\n self.assertTrue(col in inN.required, msg)\n col = \"high\"\n msg = \"bad error: %s is missing\" % (col)\n self.assertTrue(col in inN.required, msg)\n col = \"low\"\n msg = \"bad error: %s is missing\" % (col)\n self.assertTrue(col in inN.required, msg)\n col = \"close\"\n msg = \"bad error: %s is missing\" % (col)\n self.assertTrue(col in inN.required, msg)\n col = \"volume\"\n msg = \"bad error: %s is missing\" % (col)\n self.assertTrue(col in inN.required, msg)\n\n col = \"CH_OS_10_20\"\n msg = \"bad error: %s is missing\" % (col)\n self.assertTrue(col in inN.addition, msg)\n col = \"BO_BA_b1_10\"\n msg = \"bad error: %s is missing\" % (col)\n self.assertTrue(col in inN.addition, msg)\n col = \"BO_BA_b2_10\"\n msg = \"bad error: %s is missing\" % (col)\n self.assertTrue(col in inN.addition, msg)\n\n @ordered\n def test_drop(self):\n '''Test node columns drop'''\n node_obj = {\"id\": \"abc\",\n \"type\": \"IndicatorNode\",\n \"conf\": self.conf,\n \"inputs\": []}\n task = Task(node_obj)\n inN = IndicatorNode(task)\n o = inN.process([self._cudf_data])\n msg = \"bad error: df len %d is not right\" % (len(o))\n self.assertTrue(len(o) == 162, msg)\n\n newConf = copy.deepcopy(self.conf)\n newConf['remove_na'] = False\n node_obj = {\"id\": \"abc\",\n \"type\": \"IndicatorNode\",\n \"conf\": newConf,\n \"inputs\": []}\n task = Task(node_obj)\n inN = IndicatorNode(task)\n o = inN.process([self._cudf_data])\n msg = \"bad error: df len %d is not right\" % (len(o))\n self.assertTrue(len(o) == 200, msg)\n\n @ordered\n def test_signal(self):\n '''Test signal computation'''\n\n newConf = copy.deepcopy(self.conf)\n newConf['remove_na'] = False\n node_obj = {\"id\": \"abc\",\n \"type\": \"IndicatorNode\",\n \"conf\": newConf,\n \"inputs\": []}\n task = Task(node_obj)\n inN = IndicatorNode(task)\n o = inN.process([self._cudf_data])\n # check chaikin oscillator computation\n r_cudf = gi.chaikin_oscillator(self._cudf_data[:self.half]['high'],\n self._cudf_data[:self.half]['low'],\n self._cudf_data[:self.half]['close'],\n self._cudf_data[:self.half]['volume'],\n 10, 20)\n computed = o[:self.half]['CH_OS_10_20'].to_array('pandas')\n ref = r_cudf.to_array('pandas')\n err = np.abs(computed[~np.isnan(computed)] - ref[~np.isnan(ref)]).max()\n msg = \"bad error %f\\n\" % (err,)\n self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)\n\n r_cudf = gi.chaikin_oscillator(self._cudf_data[self.half:]['high'],\n self._cudf_data[self.half:]['low'],\n self._cudf_data[self.half:]['close'],\n self._cudf_data[self.half:]['volume'],\n 10, 20)\n computed = o[self.half:]['CH_OS_10_20'].to_array('pandas')\n ref = r_cudf.to_array('pandas')\n err = np.abs(computed[~np.isnan(computed)] - ref[~np.isnan(ref)]).max()\n msg = \"bad error %f\\n\" % (err,)\n self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)\n\n # check bollinger bands computation\n r_cudf = gi.bollinger_bands(self._cudf_data[:self.half]['close'], 10)\n computed = o[:self.half][\"BO_BA_b1_10\"].to_array('pandas')\n ref = r_cudf.b1.to_array('pandas')\n err = np.abs(computed[~np.isnan(computed)] - ref[~np.isnan(ref)]).max()\n msg = \"bad error %f\\n\" % (err,)\n self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)\n\n computed = o[:self.half][\"BO_BA_b2_10\"].to_array('pandas')\n ref = r_cudf.b2.to_array('pandas')\n err = np.abs(computed[~np.isnan(computed)] - ref[~np.isnan(ref)]).max()\n msg = \"bad error %f\\n\" % (err,)\n self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)\n\n r_cudf = gi.bollinger_bands(self._cudf_data[self.half:]['close'], 10)\n computed = o[self.half:][\"BO_BA_b1_10\"].to_array('pandas')\n ref = r_cudf.b1.to_array('pandas')\n err = np.abs(computed[~np.isnan(computed)] - ref[~np.isnan(ref)]).max()\n msg = \"bad error %f\\n\" % (err,)\n self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)\n\n computed = o[self.half:][\"BO_BA_b2_10\"].to_array('pandas')\n ref = r_cudf.b2.to_array('pandas')\n err = np.abs(computed[~np.isnan(computed)] - ref[~np.isnan(ref)]).max()\n msg = \"bad error %f\\n\" % (err,)\n self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)\n\n\nif __name__ == '__main__':\n unittest.main()\n"
] | [
[
"numpy.random.seed",
"numpy.isnan",
"numpy.random.rand",
"numpy.zeros",
"numpy.isclose"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
avivajpeyi/scipy | [
"dbfe06e6618232b26c241cbe8861e2ea1489b535",
"dbfe06e6618232b26c241cbe8861e2ea1489b535",
"dbfe06e6618232b26c241cbe8861e2ea1489b535",
"dbfe06e6618232b26c241cbe8861e2ea1489b535",
"dbfe06e6618232b26c241cbe8861e2ea1489b535",
"dbfe06e6618232b26c241cbe8861e2ea1489b535",
"dbfe06e6618232b26c241cbe8861e2ea1489b535",
"dbfe06e6618232b26c241cbe8861e2ea1489b535"
] | [
"scipy/fft/tests/test_real_transforms.py",
"tools/refguide_check.py",
"scipy/stats/tests/test_binned_statistic.py",
"scipy/optimize/_linprog.py",
"benchmarks/benchmarks/go_benchmark_functions/go_funcs_univariate.py",
"scipy/optimize/_lsq/dogbox.py",
"scipy/ndimage/_ni_docstrings.py",
"scipy/fft/tests/test_numpy.py"
] | [
"\nimport numpy as np\nfrom numpy.testing import assert_allclose, assert_array_equal\nimport pytest\n\nfrom scipy.fft import dct, idct, dctn, idctn, dst, idst, dstn, idstn\nimport scipy.fft as fft\nfrom scipy import fftpack\n\n# scipy.fft wraps the fftpack versions but with normalized inverse transforms.\n# So, the forward transforms and definitions are already thoroughly tested in\n# fftpack/test_real_transforms.py\n\n\[email protected](\"forward, backward\", [(dct, idct), (dst, idst)])\[email protected](\"type\", [1, 2, 3, 4])\[email protected](\"n\", [2, 3, 4, 5, 10, 16])\[email protected](\"axis\", [0, 1])\[email protected](\"norm\", [None, 'ortho'])\ndef test_identity_1d(forward, backward, type, n, axis, norm):\n # Test the identity f^-1(f(x)) == x\n x = np.random.rand(n, n)\n\n y = forward(x, type, axis=axis, norm=norm)\n z = backward(y, type, axis=axis, norm=norm)\n assert_allclose(z, x)\n\n pad = [(0, 0)] * 2\n pad[axis] = (0, 4)\n\n y2 = np.pad(y, pad, mode='edge')\n z2 = backward(y2, type, n, axis, norm)\n assert_allclose(z2, x)\n\n\[email protected](\"forward, backward\", [(dct, idct), (dst, idst)])\[email protected](\"type\", [1, 2, 3, 4])\[email protected](\"dtype\", [np.float16, np.float32, np.float64,\n np.complex64, np.complex128])\[email protected](\"axis\", [0, 1])\[email protected](\"norm\", [None, 'ortho'])\[email protected](\"overwrite_x\", [True, False])\ndef test_identity_1d_overwrite(forward, backward, type, dtype, axis, norm,\n overwrite_x):\n # Test the identity f^-1(f(x)) == x\n x = np.random.rand(7, 8)\n x_orig = x.copy()\n\n y = forward(x, type, axis=axis, norm=norm, overwrite_x=overwrite_x)\n y_orig = y.copy()\n z = backward(y, type, axis=axis, norm=norm, overwrite_x=overwrite_x)\n if not overwrite_x:\n assert_allclose(z, x, rtol=1e-6, atol=1e-6)\n assert_array_equal(x, x_orig)\n assert_array_equal(y, y_orig)\n else:\n assert_allclose(z, x_orig, rtol=1e-6, atol=1e-6)\n\n\[email protected](\"forward, backward\", [(dctn, idctn), (dstn, idstn)])\[email protected](\"type\", [1, 2, 3, 4])\[email protected](\"shape, axes\",\n [\n ((4, 4), 0),\n ((4, 4), 1),\n ((4, 4), None),\n ((4, 4), (0, 1)),\n ((10, 12), None),\n ((10, 12), (0, 1)),\n ((4, 5, 6), None),\n ((4, 5, 6), 1),\n ((4, 5, 6), (0, 2)),\n ])\[email protected](\"norm\", [None, 'ortho'])\ndef test_identity_nd(forward, backward, type, shape, axes, norm):\n # Test the identity f^-1(f(x)) == x\n\n x = np.random.random(shape)\n\n if axes is not None:\n shape = np.take(shape, axes)\n\n y = forward(x, type, axes=axes, norm=norm)\n z = backward(y, type, axes=axes, norm=norm)\n assert_allclose(z, x)\n\n if axes is None:\n pad = [(0, 4)] * x.ndim\n elif isinstance(axes, int):\n pad = [(0, 0)] * x.ndim\n pad[axes] = (0, 4)\n else:\n pad = [(0, 0)] * x.ndim\n\n for a in axes:\n pad[a] = (0, 4)\n\n y2 = np.pad(y, pad, mode='edge')\n z2 = backward(y2, type, shape, axes, norm)\n assert_allclose(z2, x)\n\n\[email protected](\"forward, backward\", [(dctn, idctn), (dstn, idstn)])\[email protected](\"type\", [1, 2, 3, 4])\[email protected](\"shape, axes\",\n [\n ((4, 5), 0),\n ((4, 5), 1),\n ((4, 5), None),\n ])\[email protected](\"dtype\", [np.float16, np.float32, np.float64,\n np.complex64, np.complex128])\[email protected](\"norm\", [None, 'ortho'])\[email protected](\"overwrite_x\", [False, True])\ndef test_identity_nd_overwrite(forward, backward, type, shape, axes, dtype,\n norm, overwrite_x):\n # Test the identity f^-1(f(x)) == x\n\n x = np.random.random(shape).astype(dtype)\n x_orig = x.copy()\n\n if axes is not None:\n shape = np.take(shape, axes)\n\n y = forward(x, type, axes=axes, norm=norm)\n y_orig = y.copy()\n z = backward(y, type, axes=axes, norm=norm)\n if overwrite_x:\n assert_allclose(z, x_orig, rtol=1e-6, atol=1e-6)\n else:\n assert_allclose(z, x, rtol=1e-6, atol=1e-6)\n assert_array_equal(x, x_orig)\n assert_array_equal(y, y_orig)\n\n\[email protected](\"func\", ['dct', 'dst', 'dctn', 'dstn'])\[email protected](\"type\", [1, 2, 3, 4])\[email protected](\"norm\", [None, 'ortho'])\ndef test_fftpack_equivalience(func, type, norm):\n x = np.random.rand(8, 16)\n fft_res = getattr(fft, func)(x, type, norm=norm)\n fftpack_res = getattr(fftpack, func)(x, type, norm=norm)\n\n assert_allclose(fft_res, fftpack_res)\n",
"#!/usr/bin/env python\n\"\"\"\nrefguide_check.py [OPTIONS] [-- ARGS]\n\nCheck for a Scipy submodule whether the objects in its __all__ dict\ncorrespond to the objects included in the reference guide.\n\nExample of usage::\n\n $ python refguide_check.py optimize\n\nNote that this is a helper script to be able to check if things are missing;\nthe output of this script does need to be checked manually. In some cases\nobjects are left out of the refguide for a good reason (it's an alias of\nanother function, or deprecated, or ...)\n\nAnother use of this helper script is to check validity of code samples\nin docstrings. This is different from doctesting [we do not aim to have\nscipy docstrings doctestable!], this is just to make sure that code in\ndocstrings is valid python::\n\n $ python refguide_check.py --doctests optimize\n\n\"\"\"\nimport copy\nimport doctest\nimport glob\nimport inspect\nimport io\nimport os\nimport re\nimport shutil\nimport sys\nimport tempfile\nimport warnings\nfrom argparse import ArgumentParser\nfrom contextlib import contextmanager, redirect_stderr\nfrom doctest import NORMALIZE_WHITESPACE, ELLIPSIS, IGNORE_EXCEPTION_DETAIL\n\nimport docutils.core\nimport numpy as np\nimport sphinx\nfrom docutils.parsers.rst import directives\nfrom pkg_resources import parse_version\n\nsys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', 'doc', 'sphinxext'))\nfrom numpydoc.docscrape_sphinx import get_doc_object\n\nif parse_version(sphinx.__version__) >= parse_version('1.5'):\n # Enable specific Sphinx directives\n from sphinx.directives import SeeAlso, Only\n directives.register_directive('seealso', SeeAlso)\n directives.register_directive('only', Only)\nelse:\n # Remove sphinx directives that don't run without Sphinx environment.\n # Sphinx < 1.5 installs all directives on import...\n directives._directives.pop('versionadded', None)\n directives._directives.pop('versionchanged', None)\n directives._directives.pop('moduleauthor', None)\n directives._directives.pop('sectionauthor', None)\n directives._directives.pop('codeauthor', None)\n directives._directives.pop('toctree', None)\n\n\nBASE_MODULE = \"scipy\"\n\nPUBLIC_SUBMODULES = [\n 'cluster',\n 'cluster.hierarchy',\n 'cluster.vq',\n 'constants',\n 'fft',\n 'fftpack',\n 'fftpack.convolve',\n 'integrate',\n 'interpolate',\n 'io',\n 'io.arff',\n 'io.wavfile',\n 'linalg',\n 'linalg.blas',\n 'linalg.lapack',\n 'linalg.interpolative',\n 'misc',\n 'ndimage',\n 'odr',\n 'optimize',\n 'signal',\n 'signal.windows',\n 'sparse',\n 'sparse.csgraph',\n 'sparse.linalg',\n 'spatial',\n 'spatial.distance',\n 'spatial.transform',\n 'special',\n 'stats',\n 'stats.mstats',\n 'stats.contingency',\n]\n\n# Docs for these modules are included in the parent module\nOTHER_MODULE_DOCS = {\n 'fftpack.convolve': 'fftpack',\n 'io.wavfile': 'io',\n 'io.arff': 'io',\n}\n\n# these names are known to fail doctesting and we like to keep it that way\n# e.g. sometimes pseudocode is acceptable etc\nDOCTEST_SKIPLIST = set([\n 'scipy.stats.kstwobign', # inaccurate cdf or ppf\n 'scipy.stats.levy_stable',\n 'scipy.special.sinc', # comes from numpy\n 'scipy.misc.who', # comes from numpy\n 'scipy.optimize.show_options',\n 'io.rst', # XXX: need to figure out how to deal w/ mat files\n])\n\n# these names are not required to be present in ALL despite being in\n# autosummary:: listing\nREFGUIDE_ALL_SKIPLIST = [\n r'scipy\\.sparse\\.csgraph',\n r'scipy\\.sparse\\.linalg',\n r'scipy\\.spatial\\.distance',\n r'scipy\\.linalg\\.blas\\.[sdczi].*',\n r'scipy\\.linalg\\.lapack\\.[sdczi].*',\n]\n\n# these names are not required to be in an autosummary:: listing\n# despite being in ALL\nREFGUIDE_AUTOSUMMARY_SKIPLIST = [\n r'scipy\\.special\\..*_roots', # old aliases for scipy.special.*_roots\n r'scipy\\.special\\.jn', # alias for jv\n r'scipy\\.ndimage\\.sum', # alias for sum_labels\n r'scipy\\.linalg\\.solve_lyapunov', # deprecated name\n r'scipy\\.stats\\.contingency\\.chi2_contingency',\n r'scipy\\.stats\\.contingency\\.expected_freq',\n r'scipy\\.stats\\.contingency\\.margins',\n r'scipy\\.stats\\.reciprocal',\n]\n# deprecated windows in scipy.signal namespace\nfor name in ('barthann', 'bartlett', 'blackmanharris', 'blackman', 'bohman',\n 'boxcar', 'chebwin', 'cosine', 'exponential', 'flattop',\n 'gaussian', 'general_gaussian', 'hamming', 'hann', 'hanning',\n 'kaiser', 'nuttall', 'parzen', 'slepian', 'triang', 'tukey'):\n REFGUIDE_AUTOSUMMARY_SKIPLIST.append(r'scipy\\.signal\\.' + name)\n\nHAVE_MATPLOTLIB = False\n\n\ndef short_path(path, cwd=None):\n \"\"\"\n Return relative or absolute path name, whichever is shortest.\n \"\"\"\n if not isinstance(path, str):\n return path\n if cwd is None:\n cwd = os.getcwd()\n abspath = os.path.abspath(path)\n relpath = os.path.relpath(path, cwd)\n if len(abspath) <= len(relpath):\n return abspath\n return relpath\n\n\ndef find_names(module, names_dict):\n # Refguide entries:\n #\n # - 3 spaces followed by function name, and maybe some spaces, some\n # dashes, and an explanation; only function names listed in\n # refguide are formatted like this (mostly, there may be some false\n # positives)\n #\n # - special directives, such as data and function\n #\n # - (scipy.constants only): quoted list\n #\n patterns = [\n r\"^\\s\\s\\s([a-z_0-9A-Z]+)(\\s+-+.*)?$\",\n r\"^\\.\\. (?:data|function)::\\s*([a-z_0-9A-Z]+)\\s*$\"\n ]\n\n if module.__name__ == 'scipy.constants':\n patterns += [\"^``([a-z_0-9A-Z]+)``\"]\n\n patterns = [re.compile(pattern) for pattern in patterns]\n module_name = module.__name__\n\n for line in module.__doc__.splitlines():\n res = re.search(r\"^\\s*\\.\\. (?:currentmodule|module):: ([a-z0-9A-Z_.]+)\\s*$\", line)\n if res:\n module_name = res.group(1)\n continue\n\n for pattern in patterns:\n res = re.match(pattern, line)\n if res is not None:\n name = res.group(1)\n entry = '.'.join([module_name, name])\n names_dict.setdefault(module_name, set()).add(name)\n break\n\n\ndef get_all_dict(module):\n \"\"\"Return a copy of the __all__ dict with irrelevant items removed.\"\"\"\n if hasattr(module, \"__all__\"):\n all_dict = copy.deepcopy(module.__all__)\n else:\n all_dict = copy.deepcopy(dir(module))\n all_dict = [name for name in all_dict\n if not name.startswith(\"_\")]\n for name in ['absolute_import', 'division', 'print_function']:\n try:\n all_dict.remove(name)\n except ValueError:\n pass\n\n # Modules are almost always private; real submodules need a separate\n # run of refguide_check.\n all_dict = [name for name in all_dict\n if not inspect.ismodule(getattr(module, name, None))]\n\n deprecated = []\n not_deprecated = []\n for name in all_dict:\n f = getattr(module, name, None)\n if callable(f) and is_deprecated(f):\n deprecated.append(name)\n else:\n not_deprecated.append(name)\n\n others = set(dir(module)).difference(set(deprecated)).difference(set(not_deprecated))\n\n return not_deprecated, deprecated, others\n\n\ndef compare(all_dict, others, names, module_name):\n \"\"\"Return sets of objects only in __all__, refguide, or completely missing.\"\"\"\n only_all = set()\n for name in all_dict:\n if name not in names:\n for pat in REFGUIDE_AUTOSUMMARY_SKIPLIST:\n if re.match(pat, module_name + '.' + name):\n break\n else:\n only_all.add(name)\n\n only_ref = set()\n missing = set()\n for name in names:\n if name not in all_dict:\n for pat in REFGUIDE_ALL_SKIPLIST:\n if re.match(pat, module_name + '.' + name):\n if name not in others:\n missing.add(name)\n break\n else:\n only_ref.add(name)\n\n return only_all, only_ref, missing\n\ndef is_deprecated(f):\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter(\"error\")\n try:\n f(**{\"not a kwarg\":None})\n except DeprecationWarning:\n return True\n except Exception:\n pass\n return False\n\ndef check_items(all_dict, names, deprecated, others, module_name, dots=True):\n num_all = len(all_dict)\n num_ref = len(names)\n\n output = \"\"\n\n output += \"Non-deprecated objects in __all__: %i\\n\" % num_all\n output += \"Objects in refguide: %i\\n\\n\" % num_ref\n\n only_all, only_ref, missing = compare(all_dict, others, names, module_name)\n dep_in_ref = set(only_ref).intersection(deprecated)\n only_ref = set(only_ref).difference(deprecated)\n\n if len(dep_in_ref) > 0:\n output += \"Deprecated objects in refguide::\\n\\n\"\n for name in sorted(deprecated):\n output += \" \" + name + \"\\n\"\n\n if len(only_all) == len(only_ref) == len(missing) == 0:\n if dots:\n output_dot('.')\n return [(None, True, output)]\n else:\n if len(only_all) > 0:\n output += \"ERROR: objects in %s.__all__ but not in refguide::\\n\\n\" % module_name\n for name in sorted(only_all):\n output += \" \" + name + \"\\n\"\n\n output += \"\\nThis issue can be fixed by adding these objects to\\n\"\n output += \"the function listing in __init__.py for this module\\n\"\n\n if len(only_ref) > 0:\n output += \"ERROR: objects in refguide but not in %s.__all__::\\n\\n\" % module_name\n for name in sorted(only_ref):\n output += \" \" + name + \"\\n\"\n\n output += \"\\nThis issue should likely be fixed by removing these objects\\n\"\n output += \"from the function listing in __init__.py for this module\\n\"\n output += \"or adding them to __all__.\\n\"\n\n if len(missing) > 0:\n output += \"ERROR: missing objects::\\n\\n\"\n for name in sorted(missing):\n output += \" \" + name + \"\\n\"\n\n if dots:\n output_dot('F')\n return [(None, False, output)]\n\n\ndef validate_rst_syntax(text, name, dots=True):\n if text is None:\n if dots:\n output_dot('E')\n return False, \"ERROR: %s: no documentation\" % (name,)\n\n ok_unknown_items = set([\n 'mod', 'currentmodule', 'autosummary', 'data',\n 'obj', 'versionadded', 'versionchanged', 'module', 'class', 'meth',\n 'ref', 'func', 'toctree', 'moduleauthor',\n 'sectionauthor', 'codeauthor', 'eq', 'doi', 'DOI', 'arXiv', 'arxiv'\n ])\n\n # Run through docutils\n error_stream = io.StringIO()\n\n def resolve(name, is_label=False):\n return (\"http://foo\", name)\n\n token = '<RST-VALIDATE-SYNTAX-CHECK>'\n\n docutils.core.publish_doctree(\n text, token,\n settings_overrides = dict(halt_level=5,\n traceback=True,\n default_reference_context='title-reference',\n default_role='emphasis',\n link_base='',\n resolve_name=resolve,\n stylesheet_path='',\n raw_enabled=0,\n file_insertion_enabled=0,\n warning_stream=error_stream))\n\n # Print errors, disregarding unimportant ones\n error_msg = error_stream.getvalue()\n errors = error_msg.split(token)\n success = True\n output = \"\"\n\n for error in errors:\n lines = error.splitlines()\n if not lines:\n continue\n\n m = re.match(r'.*Unknown (?:interpreted text role|directive type) \"(.*)\".*$', lines[0])\n if m:\n if m.group(1) in ok_unknown_items:\n continue\n\n m = re.match(r'.*Error in \"math\" directive:.*unknown option: \"label\"', \" \".join(lines), re.S)\n if m:\n continue\n\n output += name + lines[0] + \"::\\n \" + \"\\n \".join(lines[1:]).rstrip() + \"\\n\"\n success = False\n\n if not success:\n output += \" \" + \"-\"*72 + \"\\n\"\n for lineno, line in enumerate(text.splitlines()):\n output += \" %-4d %s\\n\" % (lineno+1, line)\n output += \" \" + \"-\"*72 + \"\\n\\n\"\n\n if dots:\n output_dot('.' if success else 'F')\n return success, output\n\n\ndef output_dot(msg='.', stream=sys.stderr):\n stream.write(msg)\n stream.flush()\n\n\ndef check_rest(module, names, dots=True):\n \"\"\"\n Check reStructuredText formatting of docstrings\n\n Returns: [(name, success_flag, output), ...]\n \"\"\"\n\n try:\n skip_types = (dict, str, unicode, float, int)\n except NameError:\n # python 3\n skip_types = (dict, str, float, int)\n\n results = []\n\n if module.__name__[6:] not in OTHER_MODULE_DOCS:\n results += [(module.__name__,) +\n validate_rst_syntax(inspect.getdoc(module),\n module.__name__, dots=dots)]\n\n for name in names:\n full_name = module.__name__ + '.' + name\n obj = getattr(module, name, None)\n\n if obj is None:\n results.append((full_name, False, \"%s has no docstring\" % (full_name,)))\n continue\n elif isinstance(obj, skip_types):\n continue\n\n if inspect.ismodule(obj):\n text = inspect.getdoc(obj)\n else:\n try:\n text = str(get_doc_object(obj))\n except Exception:\n import traceback\n results.append((full_name, False,\n \"Error in docstring format!\\n\" +\n traceback.format_exc()))\n continue\n\n m = re.search(\"([\\x00-\\x09\\x0b-\\x1f])\", text)\n if m:\n msg = (\"Docstring contains a non-printable character %r! \"\n \"Maybe forgot r\\\"\\\"\\\"?\" % (m.group(1),))\n results.append((full_name, False, msg))\n continue\n\n try:\n src_file = short_path(inspect.getsourcefile(obj))\n except TypeError:\n src_file = None\n\n if src_file:\n file_full_name = src_file + ':' + full_name\n else:\n file_full_name = full_name\n\n results.append((full_name,) + validate_rst_syntax(text, file_full_name, dots=dots))\n\n return results\n\n\n### Doctest helpers ####\n\n# the namespace to run examples in\nDEFAULT_NAMESPACE = {'np': np}\n\n# the namespace to do checks in\nCHECK_NAMESPACE = {\n 'np': np,\n 'assert_allclose': np.testing.assert_allclose,\n 'assert_equal': np.testing.assert_equal,\n # recognize numpy repr's\n 'array': np.array,\n 'matrix': np.matrix,\n 'int64': np.int64,\n 'uint64': np.uint64,\n 'int8': np.int8,\n 'int32': np.int32,\n 'float32': np.float32,\n 'float64': np.float64,\n 'dtype': np.dtype,\n 'nan': np.nan,\n 'NaN': np.nan,\n 'inf': np.inf,\n 'Inf': np.inf,}\n\n\nclass DTRunner(doctest.DocTestRunner):\n DIVIDER = \"\\n\"\n\n def __init__(self, item_name, checker=None, verbose=None, optionflags=0):\n self._item_name = item_name\n doctest.DocTestRunner.__init__(self, checker=checker, verbose=verbose,\n optionflags=optionflags)\n\n def _report_item_name(self, out, new_line=False):\n if self._item_name is not None:\n if new_line:\n out(\"\\n\")\n self._item_name = None\n\n def report_start(self, out, test, example):\n self._checker._source = example.source\n return doctest.DocTestRunner.report_start(self, out, test, example)\n\n def report_success(self, out, test, example, got):\n if self._verbose:\n self._report_item_name(out, new_line=True)\n return doctest.DocTestRunner.report_success(self, out, test, example, got)\n\n def report_unexpected_exception(self, out, test, example, exc_info):\n self._report_item_name(out)\n return doctest.DocTestRunner.report_unexpected_exception(\n self, out, test, example, exc_info)\n\n def report_failure(self, out, test, example, got):\n self._report_item_name(out)\n return doctest.DocTestRunner.report_failure(self, out, test,\n example, got)\n\nclass Checker(doctest.OutputChecker):\n obj_pattern = re.compile(r'at 0x[0-9a-fA-F]+>')\n vanilla = doctest.OutputChecker()\n rndm_markers = {'# random', '# Random', '#random', '#Random', \"# may vary\"}\n stopwords = {'plt.', '.hist', '.show', '.ylim', '.subplot(',\n 'set_title', 'imshow', 'plt.show', '.axis(', '.plot(',\n '.bar(', '.title', '.ylabel', '.xlabel', 'set_ylim', 'set_xlim',\n '# reformatted', '.set_xlabel(', '.set_ylabel(', '.set_zlabel(',\n '.set(xlim=', '.set(ylim=', '.set(xlabel=', '.set(ylabel='}\n\n def __init__(self, parse_namedtuples=True, ns=None, atol=1e-8, rtol=1e-2):\n self.parse_namedtuples = parse_namedtuples\n self.atol, self.rtol = atol, rtol\n if ns is None:\n self.ns = dict(CHECK_NAMESPACE)\n else:\n self.ns = ns\n\n def check_output(self, want, got, optionflags):\n # cut it short if they are equal\n if want == got:\n return True\n\n # skip stopwords in source\n if any(word in self._source for word in self.stopwords):\n return True\n\n # skip random stuff\n if any(word in want for word in self.rndm_markers):\n return True\n\n # skip function/object addresses\n if self.obj_pattern.search(got):\n return True\n\n # ignore comments (e.g. signal.freqresp)\n if want.lstrip().startswith(\"#\"):\n return True\n\n # try the standard doctest\n try:\n if self.vanilla.check_output(want, got, optionflags):\n return True\n except Exception:\n pass\n\n # OK then, convert strings to objects\n try:\n a_want = eval(want, dict(self.ns))\n a_got = eval(got, dict(self.ns))\n except Exception:\n # Maybe we're printing a numpy array? This produces invalid python\n # code: `print(np.arange(3))` produces \"[0 1 2]\" w/o commas between\n # values. So, reinsert commas and retry.\n # TODO: handle (1) abberivation (`print(np.arange(10000))`), and\n # (2) n-dim arrays with n > 1\n s_want = want.strip()\n s_got = got.strip()\n cond = (s_want.startswith(\"[\") and s_want.endswith(\"]\") and\n s_got.startswith(\"[\") and s_got.endswith(\"]\"))\n if cond:\n s_want = \", \".join(s_want[1:-1].split())\n s_got = \", \".join(s_got[1:-1].split())\n return self.check_output(s_want, s_got, optionflags)\n\n if not self.parse_namedtuples:\n return False\n # suppose that \"want\" is a tuple, and \"got\" is smth like\n # MoodResult(statistic=10, pvalue=0.1).\n # Then convert the latter to the tuple (10, 0.1),\n # and then compare the tuples.\n try:\n num = len(a_want)\n regex = (r'[\\w\\d_]+\\(' +\n ', '.join([r'[\\w\\d_]+=(.+)']*num) +\n r'\\)')\n grp = re.findall(regex, got.replace('\\n', ' '))\n if len(grp) > 1: # no more than one for now\n return False\n # fold it back to a tuple\n got_again = '(' + ', '.join(grp[0]) + ')'\n return self.check_output(want, got_again, optionflags)\n except Exception:\n return False\n\n # ... and defer to numpy\n try:\n return self._do_check(a_want, a_got)\n except Exception:\n # heterog tuple, eg (1, np.array([1., 2.]))\n try:\n return all(self._do_check(w, g) for w, g in zip(a_want, a_got))\n except (TypeError, ValueError):\n return False\n\n def _do_check(self, want, got):\n # This should be done exactly as written to correctly handle all of\n # numpy-comparable objects, strings, and heterogeneous tuples\n try:\n if want == got:\n return True\n except Exception:\n pass\n return np.allclose(want, got, atol=self.atol, rtol=self.rtol)\n\n\ndef _run_doctests(tests, full_name, verbose, doctest_warnings):\n \"\"\"Run modified doctests for the set of `tests`.\n\n Returns: list of [(success_flag, output), ...]\n \"\"\"\n flags = NORMALIZE_WHITESPACE | ELLIPSIS | IGNORE_EXCEPTION_DETAIL\n runner = DTRunner(full_name, checker=Checker(), optionflags=flags,\n verbose=verbose)\n\n output = io.StringIO(newline='')\n success = True\n # Redirect stderr to the stdout or output\n tmp_stderr = sys.stdout if doctest_warnings else output\n\n @contextmanager\n def temp_cwd():\n cwd = os.getcwd()\n tmpdir = tempfile.mkdtemp()\n try:\n os.chdir(tmpdir)\n yield tmpdir\n finally:\n os.chdir(cwd)\n shutil.rmtree(tmpdir)\n\n # Run tests, trying to restore global state afterward\n cwd = os.getcwd()\n with np.errstate(), np.printoptions(), temp_cwd() as tmpdir, \\\n redirect_stderr(tmp_stderr):\n # try to ensure random seed is NOT reproducible\n np.random.seed(None)\n\n for t in tests:\n t.filename = short_path(t.filename, cwd)\n fails, successes = runner.run(t, out=output.write)\n if fails > 0:\n success = False\n\n output.seek(0)\n return success, output.read()\n\n\ndef check_doctests(module, verbose, ns=None,\n dots=True, doctest_warnings=False):\n \"\"\"Check code in docstrings of the module's public symbols.\n\n Returns: list of [(item_name, success_flag, output), ...]\n \"\"\"\n if ns is None:\n ns = dict(DEFAULT_NAMESPACE)\n\n # Loop over non-deprecated items\n results = []\n\n for name in get_all_dict(module)[0]:\n full_name = module.__name__ + '.' + name\n\n if full_name in DOCTEST_SKIPLIST:\n continue\n\n try:\n obj = getattr(module, name)\n except AttributeError:\n import traceback\n results.append((full_name, False,\n \"Missing item!\\n\" +\n traceback.format_exc()))\n continue\n\n finder = doctest.DocTestFinder()\n try:\n tests = finder.find(obj, name, globs=dict(ns))\n except Exception:\n import traceback\n results.append((full_name, False,\n \"Failed to get doctests!\\n\" +\n traceback.format_exc()))\n continue\n\n success, output = _run_doctests(tests, full_name, verbose,\n doctest_warnings)\n\n if dots:\n output_dot('.' if success else 'F')\n\n results.append((full_name, success, output))\n\n if HAVE_MATPLOTLIB:\n import matplotlib.pyplot as plt\n plt.close('all')\n\n return results\n\n\ndef check_doctests_testfile(fname, verbose, ns=None,\n dots=True, doctest_warnings=False):\n \"\"\"Check code in a text file.\n\n Mimic `check_doctests` above, differing mostly in test discovery.\n (which is borrowed from stdlib's doctest.testfile here,\n https://github.com/python-git/python/blob/master/Lib/doctest.py)\n\n Returns: list of [(item_name, success_flag, output), ...]\n\n Notes\n -----\n\n refguide can be signalled to skip testing code by adding\n ``#doctest: +SKIP`` to the end of the line. If the output varies or is\n random, add ``# may vary`` or ``# random`` to the comment. for example\n\n >>> plt.plot(...) # doctest: +SKIP\n >>> random.randint(0,10)\n 5 # random\n\n We also try to weed out pseudocode:\n * We maintain a list of exceptions which signal pseudocode,\n * We split the text file into \"blocks\" of code separated by empty lines\n and/or intervening text.\n * If a block contains a marker, the whole block is then assumed to be\n pseudocode. It is then not being doctested.\n\n The rationale is that typically, the text looks like this:\n\n blah\n <BLANKLINE>\n >>> from numpy import some_module # pseudocode!\n >>> func = some_module.some_function\n >>> func(42) # still pseudocode\n 146\n <BLANKLINE>\n blah\n <BLANKLINE>\n >>> 2 + 3 # real code, doctest it\n 5\n\n \"\"\"\n results = []\n\n if ns is None:\n ns = dict(DEFAULT_NAMESPACE)\n\n _, short_name = os.path.split(fname)\n if short_name in DOCTEST_SKIPLIST:\n return results\n\n full_name = fname\n with open(fname, encoding='utf-8') as f:\n text = f.read()\n\n PSEUDOCODE = set(['some_function', 'some_module', 'import example',\n 'ctypes.CDLL', # likely need compiling, skip it\n 'integrate.nquad(func,' # ctypes integrate tutotial\n ])\n\n # split the text into \"blocks\" and try to detect and omit pseudocode blocks.\n parser = doctest.DocTestParser()\n good_parts = []\n for part in text.split('\\n\\n'):\n tests = parser.get_doctest(part, ns, fname, fname, 0)\n if any(word in ex.source for word in PSEUDOCODE\n for ex in tests.examples):\n # omit it\n pass\n else:\n # `part` looks like a good code, let's doctest it\n good_parts += [part]\n\n # Reassemble the good bits and doctest them:\n good_text = '\\n\\n'.join(good_parts)\n tests = parser.get_doctest(good_text, ns, fname, fname, 0)\n success, output = _run_doctests([tests], full_name, verbose,\n doctest_warnings)\n\n if dots:\n output_dot('.' if success else 'F')\n\n results.append((full_name, success, output))\n\n if HAVE_MATPLOTLIB:\n import matplotlib.pyplot as plt\n plt.close('all')\n\n return results\n\n\ndef init_matplotlib():\n global HAVE_MATPLOTLIB\n\n try:\n import matplotlib\n matplotlib.use('Agg')\n HAVE_MATPLOTLIB = True\n except ImportError:\n HAVE_MATPLOTLIB = False\n\n\ndef main(argv):\n parser = ArgumentParser(usage=__doc__.lstrip())\n parser.add_argument(\"module_names\", metavar=\"SUBMODULES\", default=[],\n nargs='*', help=\"Submodules to check (default: all public)\")\n parser.add_argument(\"--doctests\", action=\"store_true\", help=\"Run also doctests\")\n parser.add_argument(\"-v\", \"--verbose\", action=\"count\", default=0)\n parser.add_argument(\"--doctest-warnings\", action=\"store_true\",\n help=\"Enforce warning checking for doctests\")\n parser.add_argument(\"--skip-tutorial\", action=\"store_true\",\n help=\"Skip running doctests in the tutorial.\")\n args = parser.parse_args(argv)\n\n modules = []\n names_dict = {}\n\n if args.module_names:\n args.skip_tutorial = True\n else:\n args.module_names = list(PUBLIC_SUBMODULES)\n\n os.environ['SCIPY_PIL_IMAGE_VIEWER'] = 'true'\n\n module_names = list(args.module_names)\n for name in list(module_names):\n if name in OTHER_MODULE_DOCS:\n name = OTHER_MODULE_DOCS[name]\n if name not in module_names:\n module_names.append(name)\n\n for submodule_name in module_names:\n module_name = BASE_MODULE + '.' + submodule_name\n __import__(module_name)\n module = sys.modules[module_name]\n\n if submodule_name not in OTHER_MODULE_DOCS:\n find_names(module, names_dict)\n\n if submodule_name in args.module_names:\n modules.append(module)\n\n dots = True\n success = True\n results = []\n\n print(\"Running checks for %d modules:\" % (len(modules),))\n\n if args.doctests or not args.skip_tutorial:\n init_matplotlib()\n\n for module in modules:\n if dots:\n if module is not modules[0]:\n sys.stderr.write(' ')\n sys.stderr.write(module.__name__ + ' ')\n sys.stderr.flush()\n\n all_dict, deprecated, others = get_all_dict(module)\n names = names_dict.get(module.__name__, set())\n\n mod_results = []\n mod_results += check_items(all_dict, names, deprecated, others, module.__name__)\n mod_results += check_rest(module, set(names).difference(deprecated),\n dots=dots)\n if args.doctests:\n mod_results += check_doctests(module, (args.verbose >= 2), dots=dots,\n doctest_warnings=args.doctest_warnings)\n\n for v in mod_results:\n assert isinstance(v, tuple), v\n\n results.append((module, mod_results))\n\n if dots:\n sys.stderr.write(\"\\n\")\n sys.stderr.flush()\n\n if not args.skip_tutorial:\n base_dir = os.path.join(os.path.abspath(os.path.dirname(__file__)), '..')\n tut_path = os.path.join(base_dir, 'doc', 'source', 'tutorial', '*.rst')\n print('\\nChecking tutorial files at %s:' % os.path.relpath(tut_path, os.getcwd()))\n for filename in sorted(glob.glob(tut_path)):\n if dots:\n sys.stderr.write('\\n')\n sys.stderr.write(os.path.split(filename)[1] + ' ')\n sys.stderr.flush()\n\n tut_results = check_doctests_testfile(filename, (args.verbose >= 2),\n dots=dots, doctest_warnings=args.doctest_warnings)\n\n def scratch():\n pass # stub out a \"module\", see below\n scratch.__name__ = filename\n results.append((scratch, tut_results))\n\n if dots:\n sys.stderr.write(\"\\n\")\n sys.stderr.flush()\n\n # Report results\n all_success = True\n\n for module, mod_results in results:\n success = all(x[1] for x in mod_results)\n all_success = all_success and success\n\n if success and args.verbose == 0:\n continue\n\n print(\"\")\n print(\"=\" * len(module.__name__))\n print(module.__name__)\n print(\"=\" * len(module.__name__))\n print(\"\")\n\n for name, success, output in mod_results:\n if name is None:\n if not success or args.verbose >= 1:\n print(output.strip())\n print(\"\")\n elif not success or (args.verbose >= 2 and output.strip()):\n print(name)\n print(\"-\"*len(name))\n print(\"\")\n print(output.strip())\n print(\"\")\n\n if all_success:\n print(\"\\nOK: refguide and doctests checks passed!\")\n sys.exit(0)\n else:\n print(\"\\nERROR: refguide or doctests have errors\")\n sys.exit(1)\n\n\nif __name__ == '__main__':\n main(argv=sys.argv[1:])\n",
"import numpy as np\nfrom numpy.testing import assert_allclose\nfrom pytest import raises as assert_raises\nfrom scipy.stats import (binned_statistic, binned_statistic_2d,\n binned_statistic_dd)\nfrom scipy._lib._util import check_random_state\n\nfrom .common_tests import check_named_results\n\n\nclass TestBinnedStatistic(object):\n\n @classmethod\n def setup_class(cls):\n rng = check_random_state(9865)\n cls.x = rng.uniform(size=100)\n cls.y = rng.uniform(size=100)\n cls.v = rng.uniform(size=100)\n cls.X = rng.uniform(size=(100, 3))\n cls.w = rng.uniform(size=100)\n cls.u = rng.uniform(size=100) + 1e6\n\n def test_1d_count(self):\n x = self.x\n v = self.v\n\n count1, edges1, bc = binned_statistic(x, v, 'count', bins=10)\n count2, edges2 = np.histogram(x, bins=10)\n\n assert_allclose(count1, count2)\n assert_allclose(edges1, edges2)\n\n def test_gh5927(self):\n # smoke test for gh5927 - binned_statistic was using `is` for string\n # comparison\n x = self.x\n v = self.v\n statistics = [u'mean', u'median', u'count', u'sum']\n for statistic in statistics:\n binned_statistic(x, v, statistic, bins=10)\n\n def test_big_number_std(self):\n # tests for numerical stability of std calculation\n # see issue gh-10126 for more\n x = self.x\n u = self.u\n stat1, edges1, bc = binned_statistic(x, u, 'std', bins=10)\n stat2, edges2, bc = binned_statistic(x, u, np.std, bins=10)\n\n assert_allclose(stat1, stat2)\n\n def test_non_finite_inputs_and_int_bins(self):\n # if either `values` or `sample` contain np.inf or np.nan throw\n # see issue gh-9010 for more\n x = self.x\n u = self.u\n orig = u[0]\n u[0] = np.inf\n assert_raises(ValueError, binned_statistic, u, x, 'std', bins=10)\n # need to test for non-python specific ints, e.g. np.int8, np.int64\n assert_raises(ValueError, binned_statistic, u, x, 'std',\n bins=np.int64(10))\n u[0] = np.nan\n assert_raises(ValueError, binned_statistic, u, x, 'count', bins=10)\n # replace original value, u belongs the class\n u[0] = orig\n\n def test_1d_result_attributes(self):\n x = self.x\n v = self.v\n\n res = binned_statistic(x, v, 'count', bins=10)\n attributes = ('statistic', 'bin_edges', 'binnumber')\n check_named_results(res, attributes)\n\n def test_1d_sum(self):\n x = self.x\n v = self.v\n\n sum1, edges1, bc = binned_statistic(x, v, 'sum', bins=10)\n sum2, edges2 = np.histogram(x, bins=10, weights=v)\n\n assert_allclose(sum1, sum2)\n assert_allclose(edges1, edges2)\n\n def test_1d_mean(self):\n x = self.x\n v = self.v\n\n stat1, edges1, bc = binned_statistic(x, v, 'mean', bins=10)\n stat2, edges2, bc = binned_statistic(x, v, np.mean, bins=10)\n\n assert_allclose(stat1, stat2)\n assert_allclose(edges1, edges2)\n\n def test_1d_std(self):\n x = self.x\n v = self.v\n\n stat1, edges1, bc = binned_statistic(x, v, 'std', bins=10)\n stat2, edges2, bc = binned_statistic(x, v, np.std, bins=10)\n\n assert_allclose(stat1, stat2)\n assert_allclose(edges1, edges2)\n\n def test_1d_min(self):\n x = self.x\n v = self.v\n\n stat1, edges1, bc = binned_statistic(x, v, 'min', bins=10)\n stat2, edges2, bc = binned_statistic(x, v, np.min, bins=10)\n\n assert_allclose(stat1, stat2)\n assert_allclose(edges1, edges2)\n\n def test_1d_max(self):\n x = self.x\n v = self.v\n\n stat1, edges1, bc = binned_statistic(x, v, 'max', bins=10)\n stat2, edges2, bc = binned_statistic(x, v, np.max, bins=10)\n\n assert_allclose(stat1, stat2)\n assert_allclose(edges1, edges2)\n\n def test_1d_median(self):\n x = self.x\n v = self.v\n\n stat1, edges1, bc = binned_statistic(x, v, 'median', bins=10)\n stat2, edges2, bc = binned_statistic(x, v, np.median, bins=10)\n\n assert_allclose(stat1, stat2)\n assert_allclose(edges1, edges2)\n\n def test_1d_bincode(self):\n x = self.x[:20]\n v = self.v[:20]\n\n count1, edges1, bc = binned_statistic(x, v, 'count', bins=3)\n bc2 = np.array([3, 2, 1, 3, 2, 3, 3, 3, 3, 1, 1, 3, 3, 1, 2, 3, 1,\n 1, 2, 1])\n\n bcount = [(bc == i).sum() for i in np.unique(bc)]\n\n assert_allclose(bc, bc2)\n assert_allclose(bcount, count1)\n\n def test_1d_range_keyword(self):\n # Regression test for gh-3063, range can be (min, max) or [(min, max)]\n np.random.seed(9865)\n x = np.arange(30)\n data = np.random.random(30)\n\n mean, bins, _ = binned_statistic(x[:15], data[:15])\n mean_range, bins_range, _ = binned_statistic(x, data, range=[(0, 14)])\n mean_range2, bins_range2, _ = binned_statistic(x, data, range=(0, 14))\n\n assert_allclose(mean, mean_range)\n assert_allclose(bins, bins_range)\n assert_allclose(mean, mean_range2)\n assert_allclose(bins, bins_range2)\n\n def test_1d_multi_values(self):\n x = self.x\n v = self.v\n w = self.w\n\n stat1v, edges1v, bc1v = binned_statistic(x, v, 'mean', bins=10)\n stat1w, edges1w, bc1w = binned_statistic(x, w, 'mean', bins=10)\n stat2, edges2, bc2 = binned_statistic(x, [v, w], 'mean', bins=10)\n\n assert_allclose(stat2[0], stat1v)\n assert_allclose(stat2[1], stat1w)\n assert_allclose(edges1v, edges2)\n assert_allclose(bc1v, bc2)\n\n def test_2d_count(self):\n x = self.x\n y = self.y\n v = self.v\n\n count1, binx1, biny1, bc = binned_statistic_2d(\n x, y, v, 'count', bins=5)\n count2, binx2, biny2 = np.histogram2d(x, y, bins=5)\n\n assert_allclose(count1, count2)\n assert_allclose(binx1, binx2)\n assert_allclose(biny1, biny2)\n\n def test_2d_result_attributes(self):\n x = self.x\n y = self.y\n v = self.v\n\n res = binned_statistic_2d(x, y, v, 'count', bins=5)\n attributes = ('statistic', 'x_edge', 'y_edge', 'binnumber')\n check_named_results(res, attributes)\n\n def test_2d_sum(self):\n x = self.x\n y = self.y\n v = self.v\n\n sum1, binx1, biny1, bc = binned_statistic_2d(x, y, v, 'sum', bins=5)\n sum2, binx2, biny2 = np.histogram2d(x, y, bins=5, weights=v)\n\n assert_allclose(sum1, sum2)\n assert_allclose(binx1, binx2)\n assert_allclose(biny1, biny2)\n\n def test_2d_mean(self):\n x = self.x\n y = self.y\n v = self.v\n\n stat1, binx1, biny1, bc = binned_statistic_2d(x, y, v, 'mean', bins=5)\n stat2, binx2, biny2, bc = binned_statistic_2d(x, y, v, np.mean, bins=5)\n\n assert_allclose(stat1, stat2)\n assert_allclose(binx1, binx2)\n assert_allclose(biny1, biny2)\n\n def test_2d_mean_unicode(self):\n x = self.x\n y = self.y\n v = self.v\n stat1, binx1, biny1, bc = binned_statistic_2d(\n x, y, v, 'mean', bins=5)\n stat2, binx2, biny2, bc = binned_statistic_2d(x, y, v, np.mean, bins=5)\n assert_allclose(stat1, stat2)\n assert_allclose(binx1, binx2)\n assert_allclose(biny1, biny2)\n\n def test_2d_std(self):\n x = self.x\n y = self.y\n v = self.v\n\n stat1, binx1, biny1, bc = binned_statistic_2d(x, y, v, 'std', bins=5)\n stat2, binx2, biny2, bc = binned_statistic_2d(x, y, v, np.std, bins=5)\n\n assert_allclose(stat1, stat2)\n assert_allclose(binx1, binx2)\n assert_allclose(biny1, biny2)\n\n def test_2d_min(self):\n x = self.x\n y = self.y\n v = self.v\n\n stat1, binx1, biny1, bc = binned_statistic_2d(x, y, v, 'min', bins=5)\n stat2, binx2, biny2, bc = binned_statistic_2d(x, y, v, np.min, bins=5)\n\n assert_allclose(stat1, stat2)\n assert_allclose(binx1, binx2)\n assert_allclose(biny1, biny2)\n\n def test_2d_max(self):\n x = self.x\n y = self.y\n v = self.v\n\n stat1, binx1, biny1, bc = binned_statistic_2d(x, y, v, 'max', bins=5)\n stat2, binx2, biny2, bc = binned_statistic_2d(x, y, v, np.max, bins=5)\n\n assert_allclose(stat1, stat2)\n assert_allclose(binx1, binx2)\n assert_allclose(biny1, biny2)\n\n def test_2d_median(self):\n x = self.x\n y = self.y\n v = self.v\n\n stat1, binx1, biny1, bc = binned_statistic_2d(\n x, y, v, 'median', bins=5)\n stat2, binx2, biny2, bc = binned_statistic_2d(\n x, y, v, np.median, bins=5)\n\n assert_allclose(stat1, stat2)\n assert_allclose(binx1, binx2)\n assert_allclose(biny1, biny2)\n\n def test_2d_bincode(self):\n x = self.x[:20]\n y = self.y[:20]\n v = self.v[:20]\n\n count1, binx1, biny1, bc = binned_statistic_2d(\n x, y, v, 'count', bins=3)\n bc2 = np.array([17, 11, 6, 16, 11, 17, 18, 17, 17, 7, 6, 18, 16,\n 6, 11, 16, 6, 6, 11, 8])\n\n bcount = [(bc == i).sum() for i in np.unique(bc)]\n\n assert_allclose(bc, bc2)\n count1adj = count1[count1.nonzero()]\n assert_allclose(bcount, count1adj)\n\n def test_2d_multi_values(self):\n x = self.x\n y = self.y\n v = self.v\n w = self.w\n\n stat1v, binx1v, biny1v, bc1v = binned_statistic_2d(\n x, y, v, 'mean', bins=8)\n stat1w, binx1w, biny1w, bc1w = binned_statistic_2d(\n x, y, w, 'mean', bins=8)\n stat2, binx2, biny2, bc2 = binned_statistic_2d(\n x, y, [v, w], 'mean', bins=8)\n\n assert_allclose(stat2[0], stat1v)\n assert_allclose(stat2[1], stat1w)\n assert_allclose(binx1v, binx2)\n assert_allclose(biny1w, biny2)\n assert_allclose(bc1v, bc2)\n\n def test_2d_binnumbers_unraveled(self):\n x = self.x\n y = self.y\n v = self.v\n\n stat, edgesx, bcx = binned_statistic(x, v, 'mean', bins=20)\n stat, edgesy, bcy = binned_statistic(y, v, 'mean', bins=10)\n\n stat2, edgesx2, edgesy2, bc2 = binned_statistic_2d(\n x, y, v, 'mean', bins=(20, 10), expand_binnumbers=True)\n\n bcx3 = np.searchsorted(edgesx, x, side='right')\n bcy3 = np.searchsorted(edgesy, y, side='right')\n\n # `numpy.searchsorted` is non-inclusive on right-edge, compensate\n bcx3[x == x.max()] -= 1\n bcy3[y == y.max()] -= 1\n\n assert_allclose(bcx, bc2[0])\n assert_allclose(bcy, bc2[1])\n assert_allclose(bcx3, bc2[0])\n assert_allclose(bcy3, bc2[1])\n\n def test_dd_count(self):\n X = self.X\n v = self.v\n\n count1, edges1, bc = binned_statistic_dd(X, v, 'count', bins=3)\n count2, edges2 = np.histogramdd(X, bins=3)\n\n assert_allclose(count1, count2)\n assert_allclose(edges1, edges2)\n\n def test_dd_result_attributes(self):\n X = self.X\n v = self.v\n\n res = binned_statistic_dd(X, v, 'count', bins=3)\n attributes = ('statistic', 'bin_edges', 'binnumber')\n check_named_results(res, attributes)\n\n def test_dd_sum(self):\n X = self.X\n v = self.v\n\n sum1, edges1, bc = binned_statistic_dd(X, v, 'sum', bins=3)\n sum2, edges2 = np.histogramdd(X, bins=3, weights=v)\n\n assert_allclose(sum1, sum2)\n assert_allclose(edges1, edges2)\n\n def test_dd_mean(self):\n X = self.X\n v = self.v\n\n stat1, edges1, bc = binned_statistic_dd(X, v, 'mean', bins=3)\n stat2, edges2, bc = binned_statistic_dd(X, v, np.mean, bins=3)\n\n assert_allclose(stat1, stat2)\n assert_allclose(edges1, edges2)\n\n def test_dd_std(self):\n X = self.X\n v = self.v\n\n stat1, edges1, bc = binned_statistic_dd(X, v, 'std', bins=3)\n stat2, edges2, bc = binned_statistic_dd(X, v, np.std, bins=3)\n\n assert_allclose(stat1, stat2)\n assert_allclose(edges1, edges2)\n\n def test_dd_min(self):\n X = self.X\n v = self.v\n\n stat1, edges1, bc = binned_statistic_dd(X, v, 'min', bins=3)\n stat2, edges2, bc = binned_statistic_dd(X, v, np.min, bins=3)\n\n assert_allclose(stat1, stat2)\n assert_allclose(edges1, edges2)\n\n def test_dd_max(self):\n X = self.X\n v = self.v\n\n stat1, edges1, bc = binned_statistic_dd(X, v, 'max', bins=3)\n stat2, edges2, bc = binned_statistic_dd(X, v, np.max, bins=3)\n\n assert_allclose(stat1, stat2)\n assert_allclose(edges1, edges2)\n\n def test_dd_median(self):\n X = self.X\n v = self.v\n\n stat1, edges1, bc = binned_statistic_dd(X, v, 'median', bins=3)\n stat2, edges2, bc = binned_statistic_dd(X, v, np.median, bins=3)\n\n assert_allclose(stat1, stat2)\n assert_allclose(edges1, edges2)\n\n def test_dd_bincode(self):\n X = self.X[:20]\n v = self.v[:20]\n\n count1, edges1, bc = binned_statistic_dd(X, v, 'count', bins=3)\n bc2 = np.array([63, 33, 86, 83, 88, 67, 57, 33, 42, 41, 82, 83, 92,\n 32, 36, 91, 43, 87, 81, 81])\n\n bcount = [(bc == i).sum() for i in np.unique(bc)]\n\n assert_allclose(bc, bc2)\n count1adj = count1[count1.nonzero()]\n assert_allclose(bcount, count1adj)\n\n def test_dd_multi_values(self):\n X = self.X\n v = self.v\n w = self.w\n\n stat1v, edges1v, bc1v = binned_statistic_dd(X, v, np.std, bins=8)\n stat1w, edges1w, bc1w = binned_statistic_dd(X, w, np.std, bins=8)\n stat2, edges2, bc2 = binned_statistic_dd(X, [v, w], np.std, bins=8)\n\n assert_allclose(stat2[0], stat1v)\n assert_allclose(stat2[1], stat1w)\n assert_allclose(edges1v, edges2)\n assert_allclose(edges1w, edges2)\n assert_allclose(bc1v, bc2)\n\n def test_dd_binnumbers_unraveled(self):\n X = self.X\n v = self.v\n\n stat, edgesx, bcx = binned_statistic(X[:, 0], v, 'mean', bins=15)\n stat, edgesy, bcy = binned_statistic(X[:, 1], v, 'mean', bins=20)\n stat, edgesz, bcz = binned_statistic(X[:, 2], v, 'mean', bins=10)\n\n stat2, edges2, bc2 = binned_statistic_dd(\n X, v, 'mean', bins=(15, 20, 10), expand_binnumbers=True)\n\n assert_allclose(bcx, bc2[0])\n assert_allclose(bcy, bc2[1])\n assert_allclose(bcz, bc2[2])\n\n def test_dd_binned_statistic_result(self):\n # NOTE: tests the reuse of bin_edges from previous call\n x = np.random.random((10000, 3))\n v = np.random.random((10000))\n bins = np.linspace(0, 1, 10)\n bins = (bins, bins, bins)\n\n result = binned_statistic_dd(x, v, 'mean', bins=bins)\n stat = result.statistic\n\n result = binned_statistic_dd(x, v, 'mean',\n binned_statistic_result=result)\n stat2 = result.statistic\n\n assert_allclose(stat, stat2)\n\n def test_dd_zero_dedges(self):\n x = np.random.random((10000, 3))\n v = np.random.random((10000))\n bins = np.linspace(0, 1, 10)\n bins = np.append(bins, 1)\n bins = (bins, bins, bins)\n with assert_raises(ValueError, match='difference is numerically 0'):\n binned_statistic_dd(x, v, 'mean', bins=bins)\n",
"\"\"\"\nA top-level linear programming interface. Currently this interface solves\nlinear programming problems via the Simplex and Interior-Point methods.\n\n.. versionadded:: 0.15.0\n\nFunctions\n---------\n.. autosummary::\n :toctree: generated/\n\n linprog\n linprog_verbose_callback\n linprog_terse_callback\n\n\"\"\"\n\nimport numpy as np\n\nfrom .optimize import OptimizeResult, OptimizeWarning\nfrom warnings import warn\nfrom ._linprog_ip import _linprog_ip\nfrom ._linprog_simplex import _linprog_simplex\nfrom ._linprog_rs import _linprog_rs\nfrom ._linprog_util import (\n _parse_linprog, _presolve, _get_Abc, _postprocess, _LPProblem, _autoscale)\nfrom copy import deepcopy\n\n__all__ = ['linprog', 'linprog_verbose_callback', 'linprog_terse_callback']\n\n__docformat__ = \"restructuredtext en\"\n\n\ndef linprog_verbose_callback(res):\n \"\"\"\n A sample callback function demonstrating the linprog callback interface.\n This callback produces detailed output to sys.stdout before each iteration\n and after the final iteration of the simplex algorithm.\n\n Parameters\n ----------\n res : A `scipy.optimize.OptimizeResult` consisting of the following fields:\n\n x : 1-D array\n The independent variable vector which optimizes the linear\n programming problem.\n fun : float\n Value of the objective function.\n success : bool\n True if the algorithm succeeded in finding an optimal solution.\n slack : 1-D array\n The values of the slack variables. Each slack variable corresponds\n to an inequality constraint. If the slack is zero, then the\n corresponding constraint is active.\n con : 1-D array\n The (nominally zero) residuals of the equality constraints, that is,\n ``b - A_eq @ x``\n phase : int\n The phase of the optimization being executed. In phase 1 a basic\n feasible solution is sought and the T has an additional row\n representing an alternate objective function.\n status : int\n An integer representing the exit status of the optimization::\n\n 0 : Optimization terminated successfully\n 1 : Iteration limit reached\n 2 : Problem appears to be infeasible\n 3 : Problem appears to be unbounded\n 4 : Serious numerical difficulties encountered\n\n nit : int\n The number of iterations performed.\n message : str\n A string descriptor of the exit status of the optimization.\n \"\"\"\n x = res['x']\n fun = res['fun']\n phase = res['phase']\n status = res['status']\n nit = res['nit']\n message = res['message']\n complete = res['complete']\n\n saved_printoptions = np.get_printoptions()\n np.set_printoptions(linewidth=500,\n formatter={'float': lambda x: \"{0: 12.4f}\".format(x)})\n if status:\n print('--------- Simplex Early Exit -------\\n'.format(nit))\n print('The simplex method exited early with status {0:d}'.format(status))\n print(message)\n elif complete:\n print('--------- Simplex Complete --------\\n')\n print('Iterations required: {}'.format(nit))\n else:\n print('--------- Iteration {0:d} ---------\\n'.format(nit))\n\n if nit > 0:\n if phase == 1:\n print('Current Pseudo-Objective Value:')\n else:\n print('Current Objective Value:')\n print('f = ', fun)\n print()\n print('Current Solution Vector:')\n print('x = ', x)\n print()\n\n np.set_printoptions(**saved_printoptions)\n\n\ndef linprog_terse_callback(res):\n \"\"\"\n A sample callback function demonstrating the linprog callback interface.\n This callback produces brief output to sys.stdout before each iteration\n and after the final iteration of the simplex algorithm.\n\n Parameters\n ----------\n res : A `scipy.optimize.OptimizeResult` consisting of the following fields:\n\n x : 1-D array\n The independent variable vector which optimizes the linear\n programming problem.\n fun : float\n Value of the objective function.\n success : bool\n True if the algorithm succeeded in finding an optimal solution.\n slack : 1-D array\n The values of the slack variables. Each slack variable corresponds\n to an inequality constraint. If the slack is zero, then the\n corresponding constraint is active.\n con : 1-D array\n The (nominally zero) residuals of the equality constraints, that is,\n ``b - A_eq @ x``.\n phase : int\n The phase of the optimization being executed. In phase 1 a basic\n feasible solution is sought and the T has an additional row\n representing an alternate objective function.\n status : int\n An integer representing the exit status of the optimization::\n\n 0 : Optimization terminated successfully\n 1 : Iteration limit reached\n 2 : Problem appears to be infeasible\n 3 : Problem appears to be unbounded\n 4 : Serious numerical difficulties encountered\n\n nit : int\n The number of iterations performed.\n message : str\n A string descriptor of the exit status of the optimization.\n \"\"\"\n nit = res['nit']\n x = res['x']\n\n if nit == 0:\n print(\"Iter: X:\")\n print(\"{0: <5d} \".format(nit), end=\"\")\n print(x)\n\n\ndef linprog(c, A_ub=None, b_ub=None, A_eq=None, b_eq=None,\n bounds=None, method='interior-point', callback=None,\n options=None, x0=None):\n r\"\"\"\n Linear programming: minimize a linear objective function subject to linear\n equality and inequality constraints.\n\n Linear programming solves problems of the following form:\n\n .. math::\n\n \\min_x \\ & c^T x \\\\\n \\mbox{such that} \\ & A_{ub} x \\leq b_{ub},\\\\\n & A_{eq} x = b_{eq},\\\\\n & l \\leq x \\leq u ,\n\n where :math:`x` is a vector of decision variables; :math:`c`,\n :math:`b_{ub}`, :math:`b_{eq}`, :math:`l`, and :math:`u` are vectors; and\n :math:`A_{ub}` and :math:`A_{eq}` are matrices.\n\n Informally, that's:\n\n minimize::\n\n c @ x\n\n such that::\n\n A_ub @ x <= b_ub\n A_eq @ x == b_eq\n lb <= x <= ub\n\n Note that by default ``lb = 0`` and ``ub = None`` unless specified with\n ``bounds``.\n\n Parameters\n ----------\n c : 1-D array\n The coefficients of the linear objective function to be minimized.\n A_ub : 2-D array, optional\n The inequality constraint matrix. Each row of ``A_ub`` specifies the\n coefficients of a linear inequality constraint on ``x``.\n b_ub : 1-D array, optional\n The inequality constraint vector. Each element represents an\n upper bound on the corresponding value of ``A_ub @ x``.\n A_eq : 2-D array, optional\n The equality constraint matrix. Each row of ``A_eq`` specifies the\n coefficients of a linear equality constraint on ``x``.\n b_eq : 1-D array, optional\n The equality constraint vector. Each element of ``A_eq @ x`` must equal\n the corresponding element of ``b_eq``.\n bounds : sequence, optional\n A sequence of ``(min, max)`` pairs for each element in ``x``, defining\n the minimum and maximum values of that decision variable. Use ``None`` to\n indicate that there is no bound. By default, bounds are ``(0, None)``\n (all decision variables are non-negative).\n If a single tuple ``(min, max)`` is provided, then ``min`` and\n ``max`` will serve as bounds for all decision variables.\n method : {'interior-point', 'revised simplex', 'simplex'}, optional\n The algorithm used to solve the standard form problem.\n :ref:`'interior-point' <optimize.linprog-interior-point>` (default),\n :ref:`'revised simplex' <optimize.linprog-revised_simplex>`, and\n :ref:`'simplex' <optimize.linprog-simplex>` (legacy)\n are supported.\n callback : callable, optional\n If a callback function is provided, it will be called at least once per\n iteration of the algorithm. The callback function must accept a single\n `scipy.optimize.OptimizeResult` consisting of the following fields:\n\n x : 1-D array\n The current solution vector.\n fun : float\n The current value of the objective function ``c @ x``.\n success : bool\n ``True`` when the algorithm has completed successfully.\n slack : 1-D array\n The (nominally positive) values of the slack,\n ``b_ub - A_ub @ x``.\n con : 1-D array\n The (nominally zero) residuals of the equality constraints,\n ``b_eq - A_eq @ x``.\n phase : int\n The phase of the algorithm being executed.\n status : int\n An integer representing the status of the algorithm.\n\n ``0`` : Optimization proceeding nominally.\n\n ``1`` : Iteration limit reached.\n\n ``2`` : Problem appears to be infeasible.\n\n ``3`` : Problem appears to be unbounded.\n\n ``4`` : Numerical difficulties encountered.\n\n nit : int\n The current iteration number.\n message : str\n A string descriptor of the algorithm status.\n\n options : dict, optional\n A dictionary of solver options. All methods accept the following\n options:\n\n maxiter : int\n Maximum number of iterations to perform.\n Default: see method-specific documentation.\n disp : bool\n Set to ``True`` to print convergence messages.\n Default: ``False``.\n autoscale : bool\n Set to ``True`` to automatically perform equilibration.\n Consider using this option if the numerical values in the\n constraints are separated by several orders of magnitude.\n Default: ``False``.\n presolve : bool\n Set to ``False`` to disable automatic presolve.\n Default: ``True``.\n rr : bool\n Set to ``False`` to disable automatic redundancy removal.\n Default: ``True``.\n\n For method-specific options, see\n :func:`show_options('linprog') <show_options>`.\n\n x0 : 1-D array, optional\n Guess values of the decision variables, which will be refined by\n the optimization algorithm. This argument is currently used only by the\n 'revised simplex' method, and can only be used if `x0` represents a\n basic feasible solution.\n\n\n Returns\n -------\n res : OptimizeResult\n A :class:`scipy.optimize.OptimizeResult` consisting of the fields:\n\n x : 1-D array\n The values of the decision variables that minimizes the\n objective function while satisfying the constraints.\n fun : float\n The optimal value of the objective function ``c @ x``.\n slack : 1-D array\n The (nominally positive) values of the slack variables,\n ``b_ub - A_ub @ x``.\n con : 1-D array\n The (nominally zero) residuals of the equality constraints,\n ``b_eq - A_eq @ x``.\n success : bool\n ``True`` when the algorithm succeeds in finding an optimal\n solution.\n status : int\n An integer representing the exit status of the algorithm.\n\n ``0`` : Optimization terminated successfully.\n\n ``1`` : Iteration limit reached.\n\n ``2`` : Problem appears to be infeasible.\n\n ``3`` : Problem appears to be unbounded.\n\n ``4`` : Numerical difficulties encountered.\n\n nit : int\n The total number of iterations performed in all phases.\n message : str\n A string descriptor of the exit status of the algorithm.\n\n See Also\n --------\n show_options : Additional options accepted by the solvers.\n\n Notes\n -----\n This section describes the available solvers that can be selected by the\n 'method' parameter.\n\n :ref:`'interior-point' <optimize.linprog-interior-point>` is the default\n as it is typically the fastest and most robust method.\n :ref:`'revised simplex' <optimize.linprog-revised_simplex>` is more\n accurate for the problems it solves.\n :ref:`'simplex' <optimize.linprog-simplex>` is the legacy method and is\n included for backwards compatibility and educational purposes.\n\n Method *interior-point* uses the primal-dual path following algorithm\n as outlined in [4]_. This algorithm supports sparse constraint matrices and\n is typically faster than the simplex methods, especially for large, sparse\n problems. Note, however, that the solution returned may be slightly less\n accurate than those of the simplex methods and will not, in general,\n correspond with a vertex of the polytope defined by the constraints.\n\n .. versionadded:: 1.0.0\n\n Method *revised simplex* uses the revised simplex method as described in\n [9]_, except that a factorization [11]_ of the basis matrix, rather than\n its inverse, is efficiently maintained and used to solve the linear systems\n at each iteration of the algorithm.\n\n .. versionadded:: 1.3.0\n\n Method *simplex* uses a traditional, full-tableau implementation of\n Dantzig's simplex algorithm [1]_, [2]_ (*not* the\n Nelder-Mead simplex). This algorithm is included for backwards\n compatibility and educational purposes.\n\n .. versionadded:: 0.15.0\n\n Before applying any method, a presolve procedure based on [8]_ attempts\n to identify trivial infeasibilities, trivial unboundedness, and potential\n problem simplifications. Specifically, it checks for:\n\n - rows of zeros in ``A_eq`` or ``A_ub``, representing trivial constraints;\n - columns of zeros in ``A_eq`` `and` ``A_ub``, representing unconstrained\n variables;\n - column singletons in ``A_eq``, representing fixed variables; and\n - column singletons in ``A_ub``, representing simple bounds.\n\n If presolve reveals that the problem is unbounded (e.g. an unconstrained\n and unbounded variable has negative cost) or infeasible (e.g., a row of\n zeros in ``A_eq`` corresponds with a nonzero in ``b_eq``), the solver\n terminates with the appropriate status code. Note that presolve terminates\n as soon as any sign of unboundedness is detected; consequently, a problem\n may be reported as unbounded when in reality the problem is infeasible\n (but infeasibility has not been detected yet). Therefore, if it is\n important to know whether the problem is actually infeasible, solve the\n problem again with option ``presolve=False``.\n\n If neither infeasibility nor unboundedness are detected in a single pass\n of the presolve, bounds are tightened where possible and fixed\n variables are removed from the problem. Then, linearly dependent rows\n of the ``A_eq`` matrix are removed, (unless they represent an\n infeasibility) to avoid numerical difficulties in the primary solve\n routine. Note that rows that are nearly linearly dependent (within a\n prescribed tolerance) may also be removed, which can change the optimal\n solution in rare cases. If this is a concern, eliminate redundancy from\n your problem formulation and run with option ``rr=False`` or\n ``presolve=False``.\n\n Several potential improvements can be made here: additional presolve\n checks outlined in [8]_ should be implemented, the presolve routine should\n be run multiple times (until no further simplifications can be made), and\n more of the efficiency improvements from [5]_ should be implemented in the\n redundancy removal routines.\n\n After presolve, the problem is transformed to standard form by converting\n the (tightened) simple bounds to upper bound constraints, introducing\n non-negative slack variables for inequality constraints, and expressing\n unbounded variables as the difference between two non-negative variables.\n Optionally, the problem is automatically scaled via equilibration [12]_.\n The selected algorithm solves the standard form problem, and a\n postprocessing routine converts the result to a solution to the original\n problem.\n\n References\n ----------\n .. [1] Dantzig, George B., Linear programming and extensions. Rand\n Corporation Research Study Princeton Univ. Press, Princeton, NJ,\n 1963\n .. [2] Hillier, S.H. and Lieberman, G.J. (1995), \"Introduction to\n Mathematical Programming\", McGraw-Hill, Chapter 4.\n .. [3] Bland, Robert G. New finite pivoting rules for the simplex method.\n Mathematics of Operations Research (2), 1977: pp. 103-107.\n .. [4] Andersen, Erling D., and Knud D. Andersen. \"The MOSEK interior point\n optimizer for linear programming: an implementation of the\n homogeneous algorithm.\" High performance optimization. Springer US,\n 2000. 197-232.\n .. [5] Andersen, Erling D. \"Finding all linearly dependent rows in\n large-scale linear programming.\" Optimization Methods and Software\n 6.3 (1995): 219-227.\n .. [6] Freund, Robert M. \"Primal-Dual Interior-Point Methods for Linear\n Programming based on Newton's Method.\" Unpublished Course Notes,\n March 2004. Available 2/25/2017 at\n https://ocw.mit.edu/courses/sloan-school-of-management/15-084j-nonlinear-programming-spring-2004/lecture-notes/lec14_int_pt_mthd.pdf\n .. [7] Fourer, Robert. \"Solving Linear Programs by Interior-Point Methods.\"\n Unpublished Course Notes, August 26, 2005. Available 2/25/2017 at\n http://www.4er.org/CourseNotes/Book%20B/B-III.pdf\n .. [8] Andersen, Erling D., and Knud D. Andersen. \"Presolving in linear\n programming.\" Mathematical Programming 71.2 (1995): 221-245.\n .. [9] Bertsimas, Dimitris, and J. Tsitsiklis. \"Introduction to linear\n programming.\" Athena Scientific 1 (1997): 997.\n .. [10] Andersen, Erling D., et al. Implementation of interior point\n methods for large scale linear programming. HEC/Universite de\n Geneve, 1996.\n .. [11] Bartels, Richard H. \"A stabilization of the simplex method.\"\n Journal in Numerische Mathematik 16.5 (1971): 414-434.\n .. [12] Tomlin, J. A. \"On scaling linear programming problems.\"\n Mathematical Programming Study 4 (1975): 146-166.\n\n Examples\n --------\n Consider the following problem:\n\n .. math::\n\n \\min_{x_0, x_1} \\ -x_0 + 4x_1 & \\\\\n \\mbox{such that} \\ -3x_0 + x_1 & \\leq 6,\\\\\n -x_0 - 2x_1 & \\geq -4,\\\\\n x_1 & \\geq -3.\n\n The problem is not presented in the form accepted by `linprog`. This is\n easily remedied by converting the \"greater than\" inequality\n constraint to a \"less than\" inequality constraint by\n multiplying both sides by a factor of :math:`-1`. Note also that the last\n constraint is really the simple bound :math:`-3 \\leq x_1 \\leq \\infty`.\n Finally, since there are no bounds on :math:`x_0`, we must explicitly\n specify the bounds :math:`-\\infty \\leq x_0 \\leq \\infty`, as the\n default is for variables to be non-negative. After collecting coeffecients\n into arrays and tuples, the input for this problem is:\n\n >>> c = [-1, 4]\n >>> A = [[-3, 1], [1, 2]]\n >>> b = [6, 4]\n >>> x0_bounds = (None, None)\n >>> x1_bounds = (-3, None)\n >>> from scipy.optimize import linprog\n >>> res = linprog(c, A_ub=A, b_ub=b, bounds=[x0_bounds, x1_bounds])\n\n Note that the default method for `linprog` is 'interior-point', which is\n approximate by nature.\n\n >>> print(res)\n con: array([], dtype=float64)\n fun: -21.99999984082494 # may vary\n message: 'Optimization terminated successfully.'\n nit: 6 # may vary\n slack: array([3.89999997e+01, 8.46872439e-08] # may vary\n status: 0\n success: True\n x: array([ 9.99999989, -2.99999999]) # may vary\n\n If you need greater accuracy, try 'revised simplex'.\n\n >>> res = linprog(c, A_ub=A, b_ub=b, bounds=[x0_bounds, x1_bounds], method='revised simplex')\n >>> print(res)\n con: array([], dtype=float64)\n fun: -22.0 # may vary\n message: 'Optimization terminated successfully.'\n nit: 1 # may vary\n slack: array([39., 0.]) # may vary\n status: 0\n success: True\n x: array([10., -3.]) # may vary\n\n \"\"\"\n meth = method.lower()\n\n if x0 is not None and meth != \"revised simplex\":\n warning_message = \"x0 is used only when method is 'revised simplex'. \"\n warn(warning_message, OptimizeWarning)\n\n lp = _LPProblem(c, A_ub, b_ub, A_eq, b_eq, bounds, x0)\n lp, solver_options = _parse_linprog(lp, options)\n tol = solver_options.get('tol', 1e-9)\n\n iteration = 0\n complete = False # will become True if solved in presolve\n undo = []\n\n # Keep the original arrays to calculate slack/residuals for original\n # problem.\n lp_o = deepcopy(lp)\n\n # Solve trivial problem, eliminate variables, tighten bounds, etc.\n c0 = 0 # we might get a constant term in the objective\n if solver_options.pop('presolve', True):\n rr = solver_options.pop('rr', True)\n (lp, c0, x, undo, complete, status, message) = _presolve(lp, rr, tol)\n\n C, b_scale = 1, 1 # for trivial unscaling if autoscale is not used\n postsolve_args = (lp_o._replace(bounds=lp.bounds), undo, C, b_scale)\n\n if not complete:\n A, b, c, c0, x0 = _get_Abc(lp, c0, undo)\n if solver_options.pop('autoscale', False):\n A, b, c, x0, C, b_scale = _autoscale(A, b, c, x0)\n postsolve_args = postsolve_args[:-2] + (C, b_scale)\n\n if meth == 'simplex':\n x, status, message, iteration = _linprog_simplex(\n c, c0=c0, A=A, b=b, callback=callback,\n postsolve_args=postsolve_args, **solver_options)\n elif meth == 'interior-point':\n x, status, message, iteration = _linprog_ip(\n c, c0=c0, A=A, b=b, callback=callback,\n postsolve_args=postsolve_args, **solver_options)\n elif meth == 'revised simplex':\n x, status, message, iteration = _linprog_rs(\n c, c0=c0, A=A, b=b, x0=x0, callback=callback,\n postsolve_args=postsolve_args, **solver_options)\n else:\n raise ValueError('Unknown solver %s' % method)\n\n # Eliminate artificial variables, re-introduce presolved variables, etc.\n # need modified bounds here to translate variables appropriately\n disp = solver_options.get('disp', False)\n\n x, fun, slack, con, status, message = _postprocess(x, postsolve_args,\n complete, status,\n message, tol,\n iteration, disp)\n\n sol = {\n 'x': x,\n 'fun': fun,\n 'slack': slack,\n 'con': con,\n 'status': status,\n 'message': message,\n 'nit': iteration,\n 'success': status == 0}\n\n return OptimizeResult(sol)\n",
"# -*- coding: utf-8 -*-\nfrom numpy import cos, exp, log, pi, sin, sqrt\n\ntry:\n from scipy.misc import factorial\nexcept ImportError:\n pass\nfrom .go_benchmark import Benchmark\n\n#-----------------------------------------------------------------------\n# UNIVARIATE SINGLE-OBJECTIVE PROBLEMS\n#-----------------------------------------------------------------------\nclass Problem02(Benchmark):\n\n \"\"\"\n Univariate Problem02 objective function.\n\n This class defines the Univariate Problem02 global optimization problem. This\n is a multimodal minimization problem defined as follows:\n\n .. math::\n\n f_{\\\\text{Problem02}}(x) = \\\\sin(x) + \\\\sin \\\\left(\\\\frac{10}{3}x \\\\right)\n\n Bound constraints: :math:`x \\\\in [2.7, 7.5]`\n\n .. figure:: figures/Problem02.png\n :alt: Univariate Problem02 function\n :align: center\n\n **Univariate Problem02 function**\n\n *Global optimum*: :math:`f(x)=-1.899599` for :math:`x = 5.145735`\n\n \"\"\"\n\n def __init__(self, dimensions=1):\n Benchmark.__init__(self, dimensions)\n\n self._bounds = [(2.7, 7.5)]\n\n self.global_optimum = 5.145735\n self.fglob = -1.899599\n\n def fun(self, x, *args):\n self.nfev += 1\n\n x = x[0]\n return sin(x) + sin(10.0 / 3.0 * x)\n\n\nclass Problem03(Benchmark):\n\n \"\"\"\n Univariate Problem03 objective function.\n\n This class defines the Univariate Problem03 global optimization problem. This\n is a multimodal minimization problem defined as follows:\n\n .. math::\n\n f_{\\\\text{Problem03}}(x) = - \\\\sum_{k=1}^6 k \\\\sin[(k+1)x+k]\n\n Bound constraints: :math:`x \\\\in [-10, 10]`\n\n .. figure:: figures/Problem03.png\n :alt: Univariate Problem03 function\n :align: center\n\n **Univariate Problem03 function**\n\n *Global optimum*: :math:`f(x)=-12.03124` for :math:`x = -6.7745761`\n\n \"\"\"\n\n def __init__(self, dimensions=1):\n Benchmark.__init__(self, dimensions)\n\n self._bounds = [(-10, 10)]\n\n self.global_optimum = -6.7745761\n self.fglob = -12.03124\n\n def fun(self, x, *args):\n self.nfev += 1\n\n x = x[0]\n y = 0.0\n for k in range(1, 6):\n y += k * sin((k + 1) * x + k)\n\n return -y\n\n\nclass Problem04(Benchmark):\n\n \"\"\"\n Univariate Problem04 objective function.\n\n This class defines the Univariate Problem04 global optimization problem. This\n is a multimodal minimization problem defined as follows:\n\n .. math::\n\n f_{\\\\text{Problem04}}(x) = - \\\\left(16x^2 - 24x + 5 \\\\right) e^{-x}\n\n Bound constraints: :math:`x \\\\in [1.9, 3.9]`\n\n .. figure:: figures/Problem04.png\n :alt: Univariate Problem04 function\n :align: center\n\n **Univariate Problem04 function**\n\n *Global optimum*: :math:`f(x)=-3.85045` for :math:`x = 2.868034`\n\n \"\"\"\n\n def __init__(self, dimensions=1):\n Benchmark.__init__(self, dimensions)\n\n self._bounds = [(1.9, 3.9)]\n\n self.global_optimum = 2.868034\n self.fglob = -3.85045\n\n def fun(self, x, *args):\n self.nfev += 1\n\n x = x[0]\n return -(16 * x ** 2 - 24 * x + 5) * exp(-x)\n\n\nclass Problem05(Benchmark):\n\n \"\"\"\n Univariate Problem05 objective function.\n\n This class defines the Univariate Problem05 global optimization problem. This\n is a multimodal minimization problem defined as follows:\n\n .. math::\n\n f_{\\\\text{Problem05}}(x) = - \\\\left(1.4 - 3x \\\\right) \\\\sin(18x)\n\n Bound constraints: :math:`x \\\\in [0, 1.2]`\n\n .. figure:: figures/Problem05.png\n :alt: Univariate Problem05 function\n :align: center\n\n **Univariate Problem05 function**\n\n *Global optimum*: :math:`f(x)=-1.48907` for :math:`x = 0.96609`\n\n \"\"\"\n\n def __init__(self, dimensions=1):\n Benchmark.__init__(self, dimensions)\n\n self._bounds = [(0.0, 1.2)]\n\n self.global_optimum = 0.96609\n self.fglob = -1.48907\n\n def fun(self, x, *args):\n self.nfev += 1\n\n x = x[0]\n return -(1.4 - 3 * x) * sin(18.0 * x)\n\n\nclass Problem06(Benchmark):\n\n \"\"\"\n Univariate Problem06 objective function.\n\n This class defines the Univariate Problem06 global optimization problem. This\n is a multimodal minimization problem defined as follows:\n\n .. math::\n\n f_{\\\\text{Problem06}}(x) = - \\\\left[x + \\\\sin(x) \\\\right] e^{-x^2}\n\n Bound constraints: :math:`x \\\\in [-10, 10]`\n\n .. figure:: figures/Problem06.png\n :alt: Univariate Problem06 function\n :align: center\n\n **Univariate Problem06 function**\n\n *Global optimum*: :math:`f(x)=-0.824239` for :math:`x = 0.67956`\n\n \"\"\"\n\n def __init__(self, dimensions=1):\n Benchmark.__init__(self, dimensions)\n\n self._bounds = [(-10.0, 10.0)]\n\n self.global_optimum = 0.67956\n self.fglob = -0.824239\n\n def fun(self, x, *args):\n self.nfev += 1\n\n x = x[0]\n return -(x + sin(x)) * exp(-x ** 2.0)\n\n\nclass Problem07(Benchmark):\n\n \"\"\"\n Univariate Problem07 objective function.\n\n This class defines the Univariate Problem07 global optimization problem. This\n is a multimodal minimization problem defined as follows:\n\n .. math::\n\n f_{\\\\text{Problem07}}(x) = \\\\sin(x) + \\\\sin \\\\left(\\\\frac{10}{3}x \\\\right) + \\\\log(x) - 0.84x + 3\n\n Bound constraints: :math:`x \\\\in [2.7, 7.5]`\n\n .. figure:: figures/Problem07.png\n :alt: Univariate Problem07 function\n :align: center\n\n **Univariate Problem07 function**\n\n *Global optimum*: :math:`f(x)=-1.6013` for :math:`x = 5.19978`\n\n \"\"\"\n\n def __init__(self, dimensions=1):\n Benchmark.__init__(self, dimensions)\n\n self._bounds = [(2.7, 7.5)]\n\n self.global_optimum = 5.19978\n self.fglob = -1.6013\n\n def fun(self, x, *args):\n self.nfev += 1\n\n x = x[0]\n return sin(x) + sin(10.0 / 3.0 * x) + log(x) - 0.84 * x + 3\n\n\nclass Problem08(Benchmark):\n\n \"\"\"\n Univariate Problem08 objective function.\n\n This class defines the Univariate Problem08 global optimization problem. This\n is a multimodal minimization problem defined as follows:\n\n .. math::\n\n f_{\\\\text{Problem08}}(x) = - \\\\sum_{k=1}^6 k \\\\cos[(k+1)x+k]\n\n Bound constraints: :math:`x \\\\in [-10, 10]`\n\n .. figure:: figures/Problem08.png\n :alt: Univariate Problem08 function\n :align: center\n\n **Univariate Problem08 function**\n\n *Global optimum*: :math:`f(x)=-14.508` for :math:`x = -7.083506`\n\n \"\"\"\n\n def __init__(self, dimensions=1):\n Benchmark.__init__(self, dimensions)\n\n self._bounds = [(-10, 10)]\n\n self.global_optimum = -7.083506\n self.fglob = -14.508\n\n def fun(self, x, *args):\n self.nfev += 1\n\n x = x[0]\n\n y = 0.0\n for k in range(1, 6):\n y += k * cos((k + 1) * x + k)\n\n return -y\n\n\nclass Problem09(Benchmark):\n\n \"\"\"\n Univariate Problem09 objective function.\n\n This class defines the Univariate Problem09 global optimization problem. This\n is a multimodal minimization problem defined as follows:\n\n .. math::\n\n f_{\\\\text{Problem09}}(x) = \\\\sin(x) + \\\\sin \\\\left(\\\\frac{2}{3} x \\\\right)\n\n Bound constraints: :math:`x \\\\in [3.1, 20.4]`\n\n .. figure:: figures/Problem09.png\n :alt: Univariate Problem09 function\n :align: center\n\n **Univariate Problem09 function**\n\n *Global optimum*: :math:`f(x)=-1.90596` for :math:`x = 17.039`\n\n \"\"\"\n\n def __init__(self, dimensions=1):\n Benchmark.__init__(self, dimensions)\n\n self._bounds = [(3.1, 20.4)]\n\n self.global_optimum = 17.039\n self.fglob = -1.90596\n\n def fun(self, x, *args):\n self.nfev += 1\n\n x = x[0]\n return sin(x) + sin(2.0 / 3.0 * x)\n\n\nclass Problem10(Benchmark):\n\n \"\"\"\n Univariate Problem10 objective function.\n\n This class defines the Univariate Problem10 global optimization problem. This\n is a multimodal minimization problem defined as follows:\n\n .. math::\n\n f_{\\\\text{Problem10}}(x) = -x\\\\sin(x)\n\n Bound constraints: :math:`x \\\\in [0, 10]`\n\n .. figure:: figures/Problem10.png\n :alt: Univariate Problem10 function\n :align: center\n\n **Univariate Problem10 function**\n\n *Global optimum*: :math:`f(x)=-7.916727` for :math:`x = 7.9787`\n\n \"\"\"\n\n def __init__(self, dimensions=1):\n Benchmark.__init__(self, dimensions)\n\n self._bounds = [(0, 10)]\n\n self.global_optimum = 7.9787\n self.fglob = -7.916727\n\n def fun(self, x, *args):\n self.nfev += 1\n\n x = x[0]\n return -x * sin(x)\n\n\nclass Problem11(Benchmark):\n\n \"\"\"\n Univariate Problem11 objective function.\n\n This class defines the Univariate Problem11 global optimization problem. This\n is a multimodal minimization problem defined as follows:\n\n .. math::\n\n f_{\\\\text{Problem11}}(x) = 2\\\\cos(x) + \\\\cos(2x)\n\n Bound constraints: :math:`x \\\\in [-\\\\pi/2, 2\\\\pi]`\n\n .. figure:: figures/Problem11.png\n :alt: Univariate Problem11 function\n :align: center\n\n **Univariate Problem11 function**\n\n *Global optimum*: :math:`f(x)=-1.5` for :math:`x = 2.09439`\n\n \"\"\"\n\n def __init__(self, dimensions=1):\n Benchmark.__init__(self, dimensions)\n\n self._bounds = [(-pi / 2, 2 * pi)]\n\n self.global_optimum = 2.09439\n self.fglob = -1.5\n\n def fun(self, x, *args):\n self.nfev += 1\n\n x = x[0]\n return 2 * cos(x) + cos(2 * x)\n\n\nclass Problem12(Benchmark):\n\n \"\"\"\n Univariate Problem12 objective function.\n\n This class defines the Univariate Problem12 global optimization problem. This\n is a multimodal minimization problem defined as follows:\n\n .. math::\n\n f_{\\\\text{Problem12}}(x) = \\\\sin^3(x) + \\\\cos^3(x)\n\n Bound constraints: :math:`x \\\\in [0, 2\\\\pi]`\n\n .. figure:: figures/Problem12.png\n :alt: Univariate Problem12 function\n :align: center\n\n **Univariate Problem12 function**\n\n *Global optimum*: :math:`f(x)=-1` for :math:`x = \\\\pi`\n\n \"\"\"\n\n def __init__(self, dimensions=1):\n Benchmark.__init__(self, dimensions)\n\n self._bounds = [(0, 2 * pi)]\n\n self.global_optimum = pi\n self.fglob = -1\n\n def fun(self, x, *args):\n self.nfev += 1\n\n x = x[0]\n return (sin(x)) ** 3.0 + (cos(x)) ** 3.0\n\n\nclass Problem13(Benchmark):\n\n \"\"\"\n Univariate Problem13 objective function.\n\n This class defines the Univariate Problem13 global optimization problem. This\n is a multimodal minimization problem defined as follows:\n\n .. math::\n\n f_{\\\\text{Problem13}}(x) = -x^{2/3} - (1 - x^2)^{1/3}\n\n Bound constraints: :math:`x \\\\in [0.001, 0.99]`\n\n .. figure:: figures/Problem13.png\n :alt: Univariate Problem13 function\n :align: center\n\n **Univariate Problem13 function**\n\n *Global optimum*: :math:`f(x)=-1.5874` for :math:`x = 1/\\\\sqrt(2)`\n\n \"\"\"\n\n def __init__(self, dimensions=1):\n Benchmark.__init__(self, dimensions)\n\n self._bounds = [(0.001, 0.99)]\n\n self.global_optimum = 1.0 / sqrt(2)\n self.fglob = -1.5874\n\n def fun(self, x, *args):\n self.nfev += 1\n\n x = x[0]\n return -x ** (2.0 / 3.0) - (1.0 - x ** 2) ** (1.0 / 3.0)\n\n\nclass Problem14(Benchmark):\n\n \"\"\"\n Univariate Problem14 objective function.\n\n This class defines the Univariate Problem14 global optimization problem. This\n is a multimodal minimization problem defined as follows:\n\n .. math::\n\n f_{\\\\text{Problem14}}(x) = -e^{-x} \\\\sin(2\\\\pi x)\n\n Bound constraints: :math:`x \\\\in [0, 4]`\n\n .. figure:: figures/Problem14.png\n :alt: Univariate Problem14 function\n :align: center\n\n **Univariate Problem14 function**\n\n *Global optimum*: :math:`f(x)=-0.788685` for :math:`x = 0.224885`\n\n \"\"\"\n\n def __init__(self, dimensions=1):\n Benchmark.__init__(self, dimensions)\n\n self._bounds = [(0.0, 4.0)]\n\n self.global_optimum = 0.224885\n self.fglob = -0.788685\n\n def fun(self, x, *args):\n self.nfev += 1\n\n x = x[0]\n return -exp(-x) * sin(2.0 * pi * x)\n\n\nclass Problem15(Benchmark):\n\n \"\"\"\n Univariate Problem15 objective function.\n\n This class defines the Univariate Problem15 global optimization problem.\n This is a multimodal minimization problem defined as follows:\n\n .. math::\n\n f_{\\\\text{Problem15}}(x) = \\\\frac{x^{2} - 5 x + 6}{x^{2} + 1}\n\n Bound constraints: :math:`x \\\\in [-5, 5]`\n\n .. figure:: figures/Problem15.png\n :alt: Univariate Problem15 function\n :align: center\n\n **Univariate Problem15 function**\n\n *Global optimum*: :math:`f(x)=-0.03553` for :math:`x = 2.41422`\n\n \"\"\"\n\n def __init__(self, dimensions=1):\n Benchmark.__init__(self, dimensions)\n\n self._bounds = [(-5.0, 5.0)]\n\n self.global_optimum = 2.41422\n self.fglob = -0.03553\n\n def fun(self, x, *args):\n self.nfev += 1\n\n x = x[0]\n return -(-x ** 2.0 + 5 * x - 6) / (x ** 2 + 1)\n\n\nclass Problem18(Benchmark):\n\n \"\"\"\n Univariate Problem18 objective function.\n\n This class defines the Univariate Problem18 global optimization problem.\n This is a multimodal minimization problem defined as follows:\n\n .. math::\n\n f_{\\\\text{Problem18}}(x) = \\\\begin{cases}(x-2)^2 & \\\\textrm{if} \\\\hspace{5pt} x \\\\leq 3 \\\\\\\\\n 2\\\\log(x-2)+1&\\\\textrm{otherwise}\\\\end{cases}\n\n Bound constraints: :math:`x \\\\in [0, 6]`\n\n .. figure:: figures/Problem18.png\n :alt: Univariate Problem18 function\n :align: center\n\n **Univariate Problem18 function**\n\n *Global optimum*: :math:`f(x)=0` for :math:`x = 2`\n\n \"\"\"\n\n def __init__(self, dimensions=1):\n Benchmark.__init__(self, dimensions)\n\n self._bounds = [(0.0, 6.0)]\n\n self.global_optimum = 2\n self.fglob = 0\n\n def fun(self, x, *args):\n self.nfev += 1\n\n x = x[0]\n\n if x <= 3:\n return (x - 2.0) ** 2.0\n\n return 2 * log(x - 2.0) + 1\n\n\nclass Problem20(Benchmark):\n\n \"\"\"\n Univariate Problem20 objective function.\n\n This class defines the Univariate Problem20 global optimization problem.\n This is a multimodal minimization problem defined as follows:\n\n .. math::\n\n f_{\\\\text{Problem20}}(x) = -[x-\\\\sin(x)]e^{-x^2}\n\n Bound constraints: :math:`x \\\\in [-10, 10]`\n\n .. figure:: figures/Problem20.png\n :alt: Univariate Problem20 function\n :align: center\n\n **Univariate Problem20 function**\n\n *Global optimum*: :math:`f(x)=-0.0634905` for :math:`x = 1.195137`\n\n \"\"\"\n\n def __init__(self, dimensions=1):\n Benchmark.__init__(self, dimensions)\n\n self._bounds = [(-10, 10)]\n\n self.global_optimum = 1.195137\n self.fglob = -0.0634905\n\n def fun(self, x, *args):\n self.nfev += 1\n\n x = x[0]\n return -(x - sin(x)) * exp(-x ** 2.0)\n\n\nclass Problem21(Benchmark):\n\n \"\"\"\n Univariate Problem21 objective function.\n\n This class defines the Univariate Problem21 global optimization problem.\n This is a multimodal minimization problem defined as follows:\n\n .. math::\n\n f_{\\\\text{Problem21}}(x) = x \\\\sin(x) + x \\\\cos(2x)\n\n Bound constraints: :math:`x \\\\in [0, 10]`\n\n .. figure:: figures/Problem21.png\n :alt: Univariate Problem21 function\n :align: center\n\n **Univariate Problem21 function**\n\n *Global optimum*: :math:`f(x)=-9.50835` for :math:`x = 4.79507`\n\n \"\"\"\n\n def __init__(self, dimensions=1):\n Benchmark.__init__(self, dimensions)\n\n self._bounds = [(0, 10)]\n\n self.global_optimum = 4.79507\n self.fglob = -9.50835\n\n def fun(self, x, *args):\n self.nfev += 1\n\n x = x[0]\n return x * sin(x) + x * cos(2.0 * x)\n\n\nclass Problem22(Benchmark):\n\n \"\"\"\n Univariate Problem22 objective function.\n\n This class defines the Univariate Problem22 global optimization problem.\n This is a multimodal minimization problem defined as follows:\n\n .. math::\n\n f_{\\\\text{Problem22}}(x) = e^{-3x} - \\\\sin^3(x)\n\n Bound constraints: :math:`x \\\\in [0, 20]`\n\n .. figure:: figures/Problem22.png\n :alt: Univariate Problem22 function\n :align: center\n\n **Univariate Problem22 function**\n\n *Global optimum*: :math:`f(x)=e^{-27\\\\pi/2} - 1` for :math:`x = 9\\\\pi/2`\n\n \"\"\"\n\n def __init__(self, dimensions=1):\n Benchmark.__init__(self, dimensions)\n\n self._bounds = [(0, 20)]\n\n self.global_optimum = 9.0 * pi / 2.0\n self.fglob = exp(-27.0 * pi / 2.0) - 1.0\n\n def fun(self, x, *args):\n self.nfev += 1\n\n x = x[0]\n return exp(-3.0 * x) - (sin(x)) ** 3.0\n",
"\"\"\"\nDogleg algorithm with rectangular trust regions for least-squares minimization.\n\nThe description of the algorithm can be found in [Voglis]_. The algorithm does\ntrust-region iterations, but the shape of trust regions is rectangular as\nopposed to conventional elliptical. The intersection of a trust region and\nan initial feasible region is again some rectangle. Thus, on each iteration a\nbound-constrained quadratic optimization problem is solved.\n\nA quadratic problem is solved by well-known dogleg approach, where the\nfunction is minimized along piecewise-linear \"dogleg\" path [NumOpt]_,\nChapter 4. If Jacobian is not rank-deficient then the function is decreasing\nalong this path, and optimization amounts to simply following along this\npath as long as a point stays within the bounds. A constrained Cauchy step\n(along the anti-gradient) is considered for safety in rank deficient cases,\nin this situations the convergence might be slow.\n\nIf during iterations some variable hit the initial bound and the component\nof anti-gradient points outside the feasible region, then a next dogleg step\nwon't make any progress. At this state such variables satisfy first-order\noptimality conditions and they are excluded before computing a next dogleg\nstep.\n\nGauss-Newton step can be computed exactly by `numpy.linalg.lstsq` (for dense\nJacobian matrices) or by iterative procedure `scipy.sparse.linalg.lsmr` (for\ndense and sparse matrices, or Jacobian being LinearOperator). The second\noption allows to solve very large problems (up to couple of millions of\nresiduals on a regular PC), provided the Jacobian matrix is sufficiently\nsparse. But note that dogbox is not very good for solving problems with\nlarge number of constraints, because of variables exclusion-inclusion on each\niteration (a required number of function evaluations might be high or accuracy\nof a solution will be poor), thus its large-scale usage is probably limited\nto unconstrained problems.\n\nReferences\n----------\n.. [Voglis] C. Voglis and I. E. Lagaris, \"A Rectangular Trust Region Dogleg\n Approach for Unconstrained and Bound Constrained Nonlinear\n Optimization\", WSEAS International Conference on Applied\n Mathematics, Corfu, Greece, 2004.\n.. [NumOpt] J. Nocedal and S. J. Wright, \"Numerical optimization, 2nd edition\".\n\"\"\"\nimport numpy as np\nfrom numpy.linalg import lstsq, norm\n\nfrom scipy.sparse.linalg import LinearOperator, aslinearoperator, lsmr\nfrom scipy.optimize import OptimizeResult\n\nfrom .common import (\n step_size_to_bound, in_bounds, update_tr_radius, evaluate_quadratic,\n build_quadratic_1d, minimize_quadratic_1d, compute_grad,\n compute_jac_scale, check_termination, scale_for_robust_loss_function,\n print_header_nonlinear, print_iteration_nonlinear)\n\n\ndef lsmr_operator(Jop, d, active_set):\n \"\"\"Compute LinearOperator to use in LSMR by dogbox algorithm.\n\n `active_set` mask is used to excluded active variables from computations\n of matrix-vector products.\n \"\"\"\n m, n = Jop.shape\n\n def matvec(x):\n x_free = x.ravel().copy()\n x_free[active_set] = 0\n return Jop.matvec(x * d)\n\n def rmatvec(x):\n r = d * Jop.rmatvec(x)\n r[active_set] = 0\n return r\n\n return LinearOperator((m, n), matvec=matvec, rmatvec=rmatvec, dtype=float)\n\n\ndef find_intersection(x, tr_bounds, lb, ub):\n \"\"\"Find intersection of trust-region bounds and initial bounds.\n\n Returns\n -------\n lb_total, ub_total : ndarray with shape of x\n Lower and upper bounds of the intersection region.\n orig_l, orig_u : ndarray of bool with shape of x\n True means that an original bound is taken as a corresponding bound\n in the intersection region.\n tr_l, tr_u : ndarray of bool with shape of x\n True means that a trust-region bound is taken as a corresponding bound\n in the intersection region.\n \"\"\"\n lb_centered = lb - x\n ub_centered = ub - x\n\n lb_total = np.maximum(lb_centered, -tr_bounds)\n ub_total = np.minimum(ub_centered, tr_bounds)\n\n orig_l = np.equal(lb_total, lb_centered)\n orig_u = np.equal(ub_total, ub_centered)\n\n tr_l = np.equal(lb_total, -tr_bounds)\n tr_u = np.equal(ub_total, tr_bounds)\n\n return lb_total, ub_total, orig_l, orig_u, tr_l, tr_u\n\n\ndef dogleg_step(x, newton_step, g, a, b, tr_bounds, lb, ub):\n \"\"\"Find dogleg step in a rectangular region.\n\n Returns\n -------\n step : ndarray, shape (n,)\n Computed dogleg step.\n bound_hits : ndarray of int, shape (n,)\n Each component shows whether a corresponding variable hits the\n initial bound after the step is taken:\n * 0 - a variable doesn't hit the bound.\n * -1 - lower bound is hit.\n * 1 - upper bound is hit.\n tr_hit : bool\n Whether the step hit the boundary of the trust-region.\n \"\"\"\n lb_total, ub_total, orig_l, orig_u, tr_l, tr_u = find_intersection(\n x, tr_bounds, lb, ub\n )\n bound_hits = np.zeros_like(x, dtype=int)\n\n if in_bounds(newton_step, lb_total, ub_total):\n return newton_step, bound_hits, False\n\n to_bounds, _ = step_size_to_bound(np.zeros_like(x), -g, lb_total, ub_total)\n\n # The classical dogleg algorithm would check if Cauchy step fits into\n # the bounds, and just return it constrained version if not. But in a\n # rectangular trust region it makes sense to try to improve constrained\n # Cauchy step too. Thus, we don't distinguish these two cases.\n\n cauchy_step = -minimize_quadratic_1d(a, b, 0, to_bounds)[0] * g\n\n step_diff = newton_step - cauchy_step\n step_size, hits = step_size_to_bound(cauchy_step, step_diff,\n lb_total, ub_total)\n bound_hits[(hits < 0) & orig_l] = -1\n bound_hits[(hits > 0) & orig_u] = 1\n tr_hit = np.any((hits < 0) & tr_l | (hits > 0) & tr_u)\n\n return cauchy_step + step_size * step_diff, bound_hits, tr_hit\n\n\ndef dogbox(fun, jac, x0, f0, J0, lb, ub, ftol, xtol, gtol, max_nfev, x_scale,\n loss_function, tr_solver, tr_options, verbose):\n f = f0\n f_true = f.copy()\n nfev = 1\n\n J = J0\n njev = 1\n\n if loss_function is not None:\n rho = loss_function(f)\n cost = 0.5 * np.sum(rho[0])\n J, f = scale_for_robust_loss_function(J, f, rho)\n else:\n cost = 0.5 * np.dot(f, f)\n\n g = compute_grad(J, f)\n\n jac_scale = isinstance(x_scale, str) and x_scale == 'jac'\n if jac_scale:\n scale, scale_inv = compute_jac_scale(J)\n else:\n scale, scale_inv = x_scale, 1 / x_scale\n\n Delta = norm(x0 * scale_inv, ord=np.inf)\n if Delta == 0:\n Delta = 1.0\n\n on_bound = np.zeros_like(x0, dtype=int)\n on_bound[np.equal(x0, lb)] = -1\n on_bound[np.equal(x0, ub)] = 1\n\n x = x0\n step = np.empty_like(x0)\n\n if max_nfev is None:\n max_nfev = x0.size * 100\n\n termination_status = None\n iteration = 0\n step_norm = None\n actual_reduction = None\n\n if verbose == 2:\n print_header_nonlinear()\n\n while True:\n active_set = on_bound * g < 0\n free_set = ~active_set\n\n g_free = g[free_set]\n g_full = g.copy()\n g[active_set] = 0\n\n g_norm = norm(g, ord=np.inf)\n if g_norm < gtol:\n termination_status = 1\n\n if verbose == 2:\n print_iteration_nonlinear(iteration, nfev, cost, actual_reduction,\n step_norm, g_norm)\n\n if termination_status is not None or nfev == max_nfev:\n break\n\n x_free = x[free_set]\n lb_free = lb[free_set]\n ub_free = ub[free_set]\n scale_free = scale[free_set]\n\n # Compute (Gauss-)Newton and build quadratic model for Cauchy step.\n if tr_solver == 'exact':\n J_free = J[:, free_set]\n newton_step = lstsq(J_free, -f, rcond=-1)[0]\n\n # Coefficients for the quadratic model along the anti-gradient.\n a, b = build_quadratic_1d(J_free, g_free, -g_free)\n elif tr_solver == 'lsmr':\n Jop = aslinearoperator(J)\n\n # We compute lsmr step in scaled variables and then\n # transform back to normal variables, if lsmr would give exact lsq\n # solution, this would be equivalent to not doing any\n # transformations, but from experience it's better this way.\n\n # We pass active_set to make computations as if we selected\n # the free subset of J columns, but without actually doing any\n # slicing, which is expensive for sparse matrices and impossible\n # for LinearOperator.\n\n lsmr_op = lsmr_operator(Jop, scale, active_set)\n newton_step = -lsmr(lsmr_op, f, **tr_options)[0][free_set]\n newton_step *= scale_free\n\n # Components of g for active variables were zeroed, so this call\n # is correct and equivalent to using J_free and g_free.\n a, b = build_quadratic_1d(Jop, g, -g)\n\n actual_reduction = -1.0\n while actual_reduction <= 0 and nfev < max_nfev:\n tr_bounds = Delta * scale_free\n\n step_free, on_bound_free, tr_hit = dogleg_step(\n x_free, newton_step, g_free, a, b, tr_bounds, lb_free, ub_free)\n\n step.fill(0.0)\n step[free_set] = step_free\n\n if tr_solver == 'exact':\n predicted_reduction = -evaluate_quadratic(J_free, g_free,\n step_free)\n elif tr_solver == 'lsmr':\n predicted_reduction = -evaluate_quadratic(Jop, g, step)\n\n x_new = x + step\n f_new = fun(x_new)\n nfev += 1\n\n step_h_norm = norm(step * scale_inv, ord=np.inf)\n\n if not np.all(np.isfinite(f_new)):\n Delta = 0.25 * step_h_norm\n continue\n\n # Usual trust-region step quality estimation.\n if loss_function is not None:\n cost_new = loss_function(f_new, cost_only=True)\n else:\n cost_new = 0.5 * np.dot(f_new, f_new)\n actual_reduction = cost - cost_new\n\n Delta, ratio = update_tr_radius(\n Delta, actual_reduction, predicted_reduction,\n step_h_norm, tr_hit\n )\n\n step_norm = norm(step)\n termination_status = check_termination(\n actual_reduction, cost, step_norm, norm(x), ratio, ftol, xtol)\n\n if termination_status is not None:\n break\n\n if actual_reduction > 0:\n on_bound[free_set] = on_bound_free\n\n x = x_new\n # Set variables exactly at the boundary.\n mask = on_bound == -1\n x[mask] = lb[mask]\n mask = on_bound == 1\n x[mask] = ub[mask]\n\n f = f_new\n f_true = f.copy()\n\n cost = cost_new\n\n J = jac(x, f)\n njev += 1\n\n if loss_function is not None:\n rho = loss_function(f)\n J, f = scale_for_robust_loss_function(J, f, rho)\n\n g = compute_grad(J, f)\n\n if jac_scale:\n scale, scale_inv = compute_jac_scale(J, scale_inv)\n else:\n step_norm = 0\n actual_reduction = 0\n\n iteration += 1\n\n if termination_status is None:\n termination_status = 0\n\n return OptimizeResult(\n x=x, cost=cost, fun=f_true, jac=J, grad=g_full, optimality=g_norm,\n active_mask=on_bound, nfev=nfev, njev=njev, status=termination_status)\n",
"\"\"\"Docstring components common to several ndimage functions.\"\"\"\nfrom scipy._lib import doccer\n\n__all__ = ['docfiller']\n\n\n_input_doc = (\n\"\"\"input : array_like\n The input array.\"\"\")\n_axis_doc = (\n\"\"\"axis : int, optional\n The axis of `input` along which to calculate. Default is -1.\"\"\")\n_output_doc = (\n\"\"\"output : array or dtype, optional\n The array in which to place the output, or the dtype of the\n returned array. By default an array of the same dtype as input\n will be created.\"\"\")\n_size_foot_doc = (\n\"\"\"size : scalar or tuple, optional\n See footprint, below. Ignored if footprint is given.\nfootprint : array, optional\n Either `size` or `footprint` must be defined. `size` gives\n the shape that is taken from the input array, at every element\n position, to define the input to the filter function.\n `footprint` is a boolean array that specifies (implicitly) a\n shape, but also which of the elements within this shape will get\n passed to the filter function. Thus ``size=(n,m)`` is equivalent\n to ``footprint=np.ones((n,m))``. We adjust `size` to the number\n of dimensions of the input array, so that, if the input array is\n shape (10,10,10), and `size` is 2, then the actual size used is\n (2,2,2). When `footprint` is given, `size` is ignored.\"\"\")\n_mode_reflect_doc = (\n\"\"\"mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional\n The `mode` parameter determines how the input array is extended\n beyond its boundaries. Default is 'reflect'. Behavior for each valid\n value is as follows:\n\n 'reflect' (`d c b a | a b c d | d c b a`)\n The input is extended by reflecting about the edge of the last\n pixel.\n\n 'constant' (`k k k k | a b c d | k k k k`)\n The input is extended by filling all values beyond the edge with\n the same constant value, defined by the `cval` parameter.\n\n 'nearest' (`a a a a | a b c d | d d d d`)\n The input is extended by replicating the last pixel.\n\n 'mirror' (`d c b | a b c d | c b a`)\n The input is extended by reflecting about the center of the last\n pixel.\n\n 'wrap' (`a b c d | a b c d | a b c d`)\n The input is extended by wrapping around to the opposite edge.\"\"\")\n_mode_constant_doc = (\n _mode_reflect_doc.replace(\"Default is 'reflect'\", \"Default is 'constant'\"))\n_mode_mirror_doc = (\n _mode_reflect_doc.replace(\"Default is 'reflect'\", \"Default is 'mirror'\"))\nassert _mode_reflect_doc != _mode_constant_doc, 'Default not replaced'\n\n_mode_multiple_doc = (\n\"\"\"mode : str or sequence, optional\n The `mode` parameter determines how the input array is extended\n when the filter overlaps a border. By passing a sequence of modes\n with length equal to the number of dimensions of the input array,\n different modes can be specified along each axis. Default value is\n 'reflect'. The valid values and their behavior is as follows:\n\n 'reflect' (`d c b a | a b c d | d c b a`)\n The input is extended by reflecting about the edge of the last\n pixel.\n\n 'constant' (`k k k k | a b c d | k k k k`)\n The input is extended by filling all values beyond the edge with\n the same constant value, defined by the `cval` parameter.\n\n 'nearest' (`a a a a | a b c d | d d d d`)\n The input is extended by replicating the last pixel.\n\n 'mirror' (`d c b | a b c d | c b a`)\n The input is extended by reflecting about the center of the last\n pixel.\n\n 'wrap' (`a b c d | a b c d | a b c d`)\n The input is extended by wrapping around to the opposite edge.\"\"\")\n_cval_doc = (\n\"\"\"cval : scalar, optional\n Value to fill past edges of input if `mode` is 'constant'. Default\n is 0.0.\"\"\")\n_origin_doc = (\n\"\"\"origin : int, optional\n Controls the placement of the filter on the input array's pixels.\n A value of 0 (the default) centers the filter over the pixel, with\n positive values shifting the filter to the left, and negative ones\n to the right.\"\"\")\n_origin_multiple_doc = (\n\"\"\"origin : int or sequence, optional\n Controls the placement of the filter on the input array's pixels.\n A value of 0 (the default) centers the filter over the pixel, with\n positive values shifting the filter to the left, and negative ones\n to the right. By passing a sequence of origins with length equal to\n the number of dimensions of the input array, different shifts can\n be specified along each axis.\"\"\")\n_extra_arguments_doc = (\n\"\"\"extra_arguments : sequence, optional\n Sequence of extra positional arguments to pass to passed function.\"\"\")\n_extra_keywords_doc = (\n\"\"\"extra_keywords : dict, optional\n dict of extra keyword arguments to pass to passed function.\"\"\")\n_prefilter_doc = (\n\"\"\"prefilter : bool, optional\n Determines if the input array is prefiltered with `spline_filter`\n before interpolation. The default is True, which will create a\n temporary `float64` array of filtered values if `order > 1`. If\n setting this to False, the output will be slightly blurred if\n `order > 1`, unless the input is prefiltered, i.e. it is the result\n of calling `spline_filter` on the original input.\"\"\")\n\ndocdict = {\n 'input': _input_doc,\n 'axis': _axis_doc,\n 'output': _output_doc,\n 'size_foot': _size_foot_doc,\n 'mode_constant': _mode_constant_doc,\n 'mode_mirror': _mode_mirror_doc,\n 'mode_reflect': _mode_reflect_doc,\n 'mode_multiple': _mode_multiple_doc,\n 'cval': _cval_doc,\n 'origin': _origin_doc,\n 'origin_multiple': _origin_multiple_doc,\n 'extra_arguments': _extra_arguments_doc,\n 'extra_keywords': _extra_keywords_doc,\n 'prefilter': _prefilter_doc\n }\n\ndocfiller = doccer.filldoc(docdict)\n",
"import queue\nimport threading\nimport multiprocessing\nimport numpy as np\nimport pytest\nfrom numpy.random import random\nfrom numpy.testing import (\n assert_array_almost_equal, assert_array_equal, assert_allclose\n )\nfrom pytest import raises as assert_raises\nimport scipy.fft as fft\n\ndef fft1(x):\n L = len(x)\n phase = -2j*np.pi*(np.arange(L)/float(L))\n phase = np.arange(L).reshape(-1, 1) * phase\n return np.sum(x*np.exp(phase), axis=1)\n\n\nclass TestFFTShift(object):\n\n def test_fft_n(self):\n assert_raises(ValueError, fft.fft, [1, 2, 3], 0)\n\n\nclass TestFFT1D(object):\n\n def test_identity(self):\n maxlen = 512\n x = random(maxlen) + 1j*random(maxlen)\n xr = random(maxlen)\n for i in range(1,maxlen):\n assert_array_almost_equal(fft.ifft(fft.fft(x[0:i])), x[0:i],\n decimal=12)\n assert_array_almost_equal(fft.irfft(fft.rfft(xr[0:i]),i),\n xr[0:i], decimal=12)\n\n def test_fft(self):\n x = random(30) + 1j*random(30)\n assert_array_almost_equal(fft1(x), fft.fft(x))\n assert_array_almost_equal(fft1(x) / np.sqrt(30),\n fft.fft(x, norm=\"ortho\"))\n\n def test_ifft(self):\n x = random(30) + 1j*random(30)\n assert_array_almost_equal(x, fft.ifft(fft.fft(x)))\n assert_array_almost_equal(\n x, fft.ifft(fft.fft(x, norm=\"ortho\"), norm=\"ortho\"))\n\n def test_fft2(self):\n x = random((30, 20)) + 1j*random((30, 20))\n assert_array_almost_equal(fft.fft(fft.fft(x, axis=1), axis=0),\n fft.fft2(x))\n assert_array_almost_equal(fft.fft2(x) / np.sqrt(30 * 20),\n fft.fft2(x, norm=\"ortho\"))\n\n def test_ifft2(self):\n x = random((30, 20)) + 1j*random((30, 20))\n assert_array_almost_equal(fft.ifft(fft.ifft(x, axis=1), axis=0),\n fft.ifft2(x))\n assert_array_almost_equal(fft.ifft2(x) * np.sqrt(30 * 20),\n fft.ifft2(x, norm=\"ortho\"))\n\n def test_fftn(self):\n x = random((30, 20, 10)) + 1j*random((30, 20, 10))\n assert_array_almost_equal(\n fft.fft(fft.fft(fft.fft(x, axis=2), axis=1), axis=0),\n fft.fftn(x))\n assert_array_almost_equal(fft.fftn(x) / np.sqrt(30 * 20 * 10),\n fft.fftn(x, norm=\"ortho\"))\n\n def test_ifftn(self):\n x = random((30, 20, 10)) + 1j*random((30, 20, 10))\n assert_array_almost_equal(\n fft.ifft(fft.ifft(fft.ifft(x, axis=2), axis=1), axis=0),\n fft.ifftn(x))\n assert_array_almost_equal(fft.ifftn(x) * np.sqrt(30 * 20 * 10),\n fft.ifftn(x, norm=\"ortho\"))\n\n def test_rfft(self):\n x = random(30)\n for n in [x.size, 2*x.size]:\n for norm in [None, 'ortho']:\n assert_array_almost_equal(\n fft.fft(x, n=n, norm=norm)[:(n//2 + 1)],\n fft.rfft(x, n=n, norm=norm))\n assert_array_almost_equal(fft.rfft(x, n=n) / np.sqrt(n),\n fft.rfft(x, n=n, norm=\"ortho\"))\n\n def test_irfft(self):\n x = random(30)\n assert_array_almost_equal(x, fft.irfft(fft.rfft(x)))\n assert_array_almost_equal(\n x, fft.irfft(fft.rfft(x, norm=\"ortho\"), norm=\"ortho\"))\n\n def test_rfft2(self):\n x = random((30, 20))\n assert_array_almost_equal(fft.fft2(x)[:, :11], fft.rfft2(x))\n assert_array_almost_equal(fft.rfft2(x) / np.sqrt(30 * 20),\n fft.rfft2(x, norm=\"ortho\"))\n\n def test_irfft2(self):\n x = random((30, 20))\n assert_array_almost_equal(x, fft.irfft2(fft.rfft2(x)))\n assert_array_almost_equal(\n x, fft.irfft2(fft.rfft2(x, norm=\"ortho\"), norm=\"ortho\"))\n\n def test_rfftn(self):\n x = random((30, 20, 10))\n assert_array_almost_equal(fft.fftn(x)[:, :, :6], fft.rfftn(x))\n assert_array_almost_equal(fft.rfftn(x) / np.sqrt(30 * 20 * 10),\n fft.rfftn(x, norm=\"ortho\"))\n\n def test_irfftn(self):\n x = random((30, 20, 10))\n assert_array_almost_equal(x, fft.irfftn(fft.rfftn(x)))\n assert_array_almost_equal(\n x, fft.irfftn(fft.rfftn(x, norm=\"ortho\"), norm=\"ortho\"))\n\n def test_hfft(self):\n x = random(14) + 1j*random(14)\n x_herm = np.concatenate((random(1), x, random(1)))\n x = np.concatenate((x_herm, x[::-1].conj()))\n assert_array_almost_equal(fft.fft(x), fft.hfft(x_herm))\n assert_array_almost_equal(fft.hfft(x_herm) / np.sqrt(30),\n fft.hfft(x_herm, norm=\"ortho\"))\n\n def test_ihfft(self):\n x = random(14) + 1j*random(14)\n x_herm = np.concatenate((random(1), x, random(1)))\n x = np.concatenate((x_herm, x[::-1].conj()))\n assert_array_almost_equal(x_herm, fft.ihfft(fft.hfft(x_herm)))\n assert_array_almost_equal(\n x_herm, fft.ihfft(fft.hfft(x_herm, norm=\"ortho\"),\n norm=\"ortho\"))\n\n def test_hfft2(self):\n x = random((30, 20))\n assert_array_almost_equal(x, fft.hfft2(fft.ihfft2(x)))\n assert_array_almost_equal(\n x, fft.hfft2(fft.ihfft2(x, norm=\"ortho\"), norm=\"ortho\"))\n\n def test_ihfft2(self):\n x = random((30, 20))\n assert_array_almost_equal(fft.ifft2(x)[:, :11], fft.ihfft2(x))\n assert_array_almost_equal(fft.ihfft2(x) * np.sqrt(30 * 20),\n fft.ihfft2(x, norm=\"ortho\"))\n\n def test_hfftn(self):\n x = random((30, 20, 10))\n assert_array_almost_equal(x, fft.hfftn(fft.ihfftn(x)))\n assert_array_almost_equal(\n x, fft.hfftn(fft.ihfftn(x, norm=\"ortho\"), norm=\"ortho\"))\n\n def test_ihfftn(self):\n x = random((30, 20, 10))\n assert_array_almost_equal(fft.ifftn(x)[:, :, :6], fft.ihfftn(x))\n assert_array_almost_equal(fft.ihfftn(x) * np.sqrt(30 * 20 * 10),\n fft.ihfftn(x, norm=\"ortho\"))\n\n @pytest.mark.parametrize(\"op\", [fft.fftn, fft.ifftn,\n fft.rfftn, fft.irfftn,\n fft.hfftn, fft.ihfftn])\n def test_axes(self, op):\n x = random((30, 20, 10))\n axes = [(0, 1, 2), (0, 2, 1), (1, 0, 2), (1, 2, 0), (2, 0, 1), (2, 1, 0)]\n for a in axes:\n op_tr = op(np.transpose(x, a))\n tr_op = np.transpose(op(x, axes=a), a)\n assert_array_almost_equal(op_tr, tr_op)\n\n @pytest.mark.parametrize(\"op\", [fft.fft2, fft.ifft2,\n fft.rfft2, fft.irfft2,\n fft.hfft2, fft.ihfft2,\n fft.fftn, fft.ifftn,\n fft.rfftn, fft.irfftn,\n fft.hfftn, fft.ihfftn])\n def test_axes_subset_with_shape(self, op):\n x = random((16, 8, 4))\n axes = [(0, 1, 2), (0, 2, 1), (1, 2, 0)]\n for a in axes:\n # different shape on the first two axes\n shape = tuple([2*x.shape[ax] if ax in a[:2] else x.shape[ax]\n for ax in range(x.ndim)])\n # transform only the first two axes\n op_tr = op(np.transpose(x, a), s=shape[:2], axes=(0, 1))\n tr_op = np.transpose(op(x, s=shape[:2], axes=a[:2]), a)\n assert_array_almost_equal(op_tr, tr_op)\n\n def test_all_1d_norm_preserving(self):\n # verify that round-trip transforms are norm-preserving\n x = random(30)\n x_norm = np.linalg.norm(x)\n n = x.size * 2\n func_pairs = [(fft.fft, fft.ifft),\n (fft.rfft, fft.irfft),\n # hfft: order so the first function takes x.size samples\n # (necessary for comparison to x_norm above)\n (fft.ihfft, fft.hfft),\n ]\n for forw, back in func_pairs:\n for n in [x.size, 2*x.size]:\n for norm in [None, 'ortho']:\n tmp = forw(x, n=n, norm=norm)\n tmp = back(tmp, n=n, norm=norm)\n assert_array_almost_equal(x_norm,\n np.linalg.norm(tmp))\n\n @pytest.mark.parametrize(\"dtype\", [np.half, np.single, np.double,\n np.longdouble])\n def test_dtypes(self, dtype):\n # make sure that all input precisions are accepted\n x = random(30).astype(dtype)\n assert_array_almost_equal(fft.ifft(fft.fft(x)), x)\n assert_array_almost_equal(fft.irfft(fft.rfft(x)), x)\n assert_array_almost_equal(fft.hfft(fft.ihfft(x), len(x)), x)\n\n\[email protected](\n \"dtype\",\n [np.float32, np.float64, np.longfloat,\n np.complex64, np.complex128, np.longcomplex])\[email protected](\"order\", [\"F\", 'non-contiguous'])\[email protected](\n \"fft\",\n [fft.fft, fft.fft2, fft.fftn,\n fft.ifft, fft.ifft2, fft.ifftn])\ndef test_fft_with_order(dtype, order, fft):\n # Check that FFT/IFFT produces identical results for C, Fortran and\n # non contiguous arrays\n rng = np.random.RandomState(42)\n X = rng.rand(8, 7, 13).astype(dtype, copy=False)\n if order == 'F':\n Y = np.asfortranarray(X)\n else:\n # Make a non contiguous array\n Y = X[::-1]\n X = np.ascontiguousarray(X[::-1])\n\n if fft.__name__.endswith('fft'):\n for axis in range(3):\n X_res = fft(X, axis=axis)\n Y_res = fft(Y, axis=axis)\n assert_array_almost_equal(X_res, Y_res)\n elif fft.__name__.endswith(('fft2', 'fftn')):\n axes = [(0, 1), (1, 2), (0, 2)]\n if fft.__name__.endswith('fftn'):\n axes.extend([(0,), (1,), (2,), None])\n for ax in axes:\n X_res = fft(X, axes=ax)\n Y_res = fft(Y, axes=ax)\n assert_array_almost_equal(X_res, Y_res)\n else:\n raise ValueError\n\n\nclass TestFFTThreadSafe(object):\n threads = 16\n input_shape = (800, 200)\n\n def _test_mtsame(self, func, *args):\n def worker(args, q):\n q.put(func(*args))\n\n q = queue.Queue()\n expected = func(*args)\n\n # Spin off a bunch of threads to call the same function simultaneously\n t = [threading.Thread(target=worker, args=(args, q))\n for i in range(self.threads)]\n [x.start() for x in t]\n\n [x.join() for x in t]\n # Make sure all threads returned the correct value\n for i in range(self.threads):\n assert_array_equal(q.get(timeout=5), expected,\n 'Function returned wrong value in multithreaded context')\n\n def test_fft(self):\n a = np.ones(self.input_shape, dtype=np.complex128)\n self._test_mtsame(fft.fft, a)\n\n def test_ifft(self):\n a = np.full(self.input_shape, 1+0j)\n self._test_mtsame(fft.ifft, a)\n\n def test_rfft(self):\n a = np.ones(self.input_shape)\n self._test_mtsame(fft.rfft, a)\n\n def test_irfft(self):\n a = np.full(self.input_shape, 1+0j)\n self._test_mtsame(fft.irfft, a)\n\n def test_hfft(self):\n a = np.ones(self.input_shape, np.complex64)\n self._test_mtsame(fft.hfft, a)\n\n def test_ihfft(self):\n a = np.ones(self.input_shape)\n self._test_mtsame(fft.ihfft, a)\n\n\[email protected](\"func\", [fft.fft, fft.ifft, fft.rfft, fft.irfft])\ndef test_multiprocess(func):\n # Test that fft still works after fork (gh-10422)\n\n with multiprocessing.Pool(2) as p:\n res = p.map(func, [np.ones(100) for _ in range(4)])\n\n expect = func(np.ones(100))\n for x in res:\n assert_allclose(x, expect)\n\n\nclass TestIRFFTN(object):\n\n def test_not_last_axis_success(self):\n ar, ai = np.random.random((2, 16, 8, 32))\n a = ar + 1j*ai\n\n axes = (-2,)\n\n # Should not raise error\n fft.irfftn(a, axes=axes)\n"
] | [
[
"numpy.random.random",
"numpy.pad",
"numpy.take",
"numpy.testing.assert_array_equal",
"numpy.random.rand",
"numpy.testing.assert_allclose"
],
[
"numpy.printoptions",
"numpy.allclose",
"numpy.random.seed",
"matplotlib.use",
"matplotlib.pyplot.close",
"numpy.errstate"
],
[
"numpy.random.random",
"numpy.random.seed",
"numpy.linspace",
"scipy.stats.binned_statistic_2d",
"numpy.arange",
"numpy.unique",
"numpy.histogramdd",
"scipy.stats.binned_statistic_dd",
"scipy._lib._util.check_random_state",
"numpy.append",
"numpy.int64",
"numpy.searchsorted",
"numpy.testing.assert_allclose",
"scipy.stats.binned_statistic",
"numpy.array",
"numpy.histogram",
"numpy.histogram2d"
],
[
"numpy.set_printoptions",
"numpy.get_printoptions"
],
[
"numpy.log",
"numpy.sqrt",
"numpy.cos",
"numpy.sin",
"numpy.exp"
],
[
"numpy.dot",
"scipy.optimize.OptimizeResult",
"numpy.minimum",
"numpy.maximum",
"numpy.isfinite",
"numpy.empty_like",
"numpy.linalg.norm",
"numpy.linalg.lstsq",
"scipy.sparse.linalg.aslinearoperator",
"numpy.zeros_like",
"scipy.sparse.linalg.LinearOperator",
"numpy.equal",
"numpy.any",
"scipy.sparse.linalg.lsmr",
"numpy.sum"
],
[
"scipy._lib.doccer.filldoc"
],
[
"numpy.sqrt",
"scipy.fft.rfftn",
"scipy.fft.ihfft",
"numpy.exp",
"scipy.fft.ifft",
"scipy.fft.fft2",
"numpy.arange",
"numpy.full",
"scipy.fft.fftn",
"scipy.fft.ifft2",
"numpy.testing.assert_array_almost_equal",
"scipy.fft.ihfft2",
"scipy.fft.ihfftn",
"scipy.fft",
"numpy.ascontiguousarray",
"numpy.asfortranarray",
"scipy.fft.irfftn",
"numpy.testing.assert_allclose",
"scipy.fft.rfft2",
"scipy.fft.ifftn",
"numpy.transpose",
"numpy.random.RandomState",
"numpy.random.random",
"scipy.fft.hfft",
"scipy.fft.rfft",
"numpy.linalg.norm",
"numpy.ones",
"scipy.fft.__name__.endswith",
"scipy.fft.fft"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.14",
"1.6",
"1.10",
"0.15",
"1.4",
"1.3",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"1.0",
"0.17",
"0.16",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.6",
"1.10",
"1.4",
"1.9",
"1.5",
"1.7",
"1.8"
],
"tensorflow": []
}
] |
willshiao/brgan | [
"99d1627176a59811bf9032ef1f99d6e7261095fb",
"99d1627176a59811bf9032ef1f99d6e7261095fb"
] | [
"src/dsloader/kronecker.py",
"src/ggan/svd.py"
] | [
"import networkx as nx\nimport numpy as np\nimport torch\nfrom torch.utils.data import Dataset\nfrom dsloader.util import kron_graph, random_binary, make_fractional\n\n\nclass KroneckerDataset (Dataset):\n\n def __init__(self, kron_iter=4, seed_size=4, fixed_seed=None, num_graphs=1, perms_per_graph=256, progress_bar=False):\n self.kron_iter = kron_iter\n self.seed_size = seed_size\n\n\n self.num_nodes = seed_size ** (kron_iter + 1)\n self.seeds = []\n self.matrices = []\n\n num_iter = range(num_graphs)\n if progress_bar:\n from tqdm import tqdm\n num_iter = tqdm(num_iter)\n\n for i in num_iter:\n seed = random_binary(seed_size, use_sparsity=False)\n self.seeds.append(seed)\n if fixed_seed is not None:\n k_g = kron_graph(fixed_seed, n=kron_iter).astype(np.float)\n else:\n k_g = kron_graph(seed, n=kron_iter).astype(np.float)\n for j in range(perms_per_graph):\n self.matrices.append(make_fractional(k_g, inplace=False))\n \n \n def __len__(self):\n return len(self.matrices)\n\n def __getitem__(self, idx):\n return torch.tensor(self.matrices[idx])\n",
"import torch\nimport torch.nn as nn\n\nclass ModdedSharedSvdGenerator(nn.Module):\n def __init__(self, latent_dim=100, layer_size=128, num_nodes=500, rank=30, extra_dim=False):\n super(ModdedSharedSvdGenerator, self).__init__()\n self.num_nodes = num_nodes\n self.rank = rank\n self.latent_dim = latent_dim\n self.extra_dim = extra_dim\n self.output_factors = False\n\n shared_layers = [\n nn.Linear(latent_dim, layer_size),\n nn.Linear(layer_size, layer_size * 2),\n nn.BatchNorm1d(layer_size * 2),\n nn.ReLU(inplace=True),\n # New block\n nn.Linear(layer_size * 2, layer_size * 4),\n nn.BatchNorm1d(layer_size * 4),\n ]\n\n mat_output_layers = [\n [\n nn.Linear(layer_size * 4, num_nodes * rank)\n ] for _ in range(2)\n ]\n sigma_output_layers = [\n nn.Linear(layer_size * 4, rank)\n ]\n\n self.shared = nn.Sequential(*shared_layers)\n self.output1 = nn.Sequential(*mat_output_layers[0])\n self.output2 = nn.Sequential(*mat_output_layers[1])\n self.output_sigma = nn.Sequential(*sigma_output_layers)\n\n def set_factor_output(self, new_val):\n self.output_factors = new_val\n return True\n\n def sample_latent(self, num_samples):\n return torch.randn((num_samples, self.latent_dim))\n\n def forward(self, noise):\n batch_sz = noise.shape[0]\n S = self.shared(noise)\n U = self.output1(S).view(batch_sz, self.num_nodes, self.rank)\n Vt = self.output2(S).view(batch_sz, self.rank, self.num_nodes)\n sig = self.output_sigma(S).view(batch_sz, self.rank)\n sig_diag = torch.diag_embed(sig)\n U_scaled = torch.bmm(U, sig_diag)\n res = torch.bmm(U_scaled, Vt)\n\n if self.extra_dim:\n out = res.view(batch_sz, 1, self.num_nodes, self.num_nodes)\n elif not self.output_factors:\n out = res.view(batch_sz, self.num_nodes, self.num_nodes)\n\n if self.output_factors:\n return (out, (U, Vt))\n else:\n return out\n\n def sample_latent(self, num_samples):\n return torch.randn((num_samples, self.latent_dim))\n"
] | [
[
"torch.tensor"
],
[
"torch.nn.Sequential",
"torch.nn.BatchNorm1d",
"torch.randn",
"torch.diag_embed",
"torch.nn.Linear",
"torch.bmm",
"torch.nn.ReLU"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
koriavinash1/pgm | [
"89e11b61f7141a75d8991ff4ea229ef66d7a4a0c",
"89e11b61f7141a75d8991ff4ea229ef66d7a4a0c"
] | [
"examples/assignment3/MH.py",
"pgm/inference/check.py"
] | [
"import sys\nimport numpy as np\nsys.path.append('../..')\nfrom pgm.inference.MetropolisHastings import MH\nfrom matplotlib import pyplot as plt\n\ndef Gamma(theta, k = 1):\n def G(k):\n if k <= 0: return 1\n elif k == 0.5: return np.pi **0.5\n return k*G(k-1)\n def distribution(x):\n x = np.abs(x)\n return (x**(k-1))*np.exp(-x/theta)/((theta**k) * G(k)) \n return distribution\n\n\ndef proposalDistribution(sigma=0.1):\n \"\"\"\n Describes example proposal distribution\n considers gaussion distribution with fixed sigma\n as the mean keeps changing it's made an inner function argument\n \"\"\"\n def QDistribution(param = 0):\n return lambda x: (1/(((2*np.pi)**0.5) * sigma))*np.exp(-((x-param)**2)/ (sigma**2))\n\n return QDistribution, lambda x: np.random.normal(x, sigma)\n\n\n# ==========================================\nfunction = Gamma(theta=5.5, k=1)\nsigma = [0.1, 1.0, 2.0]\nburnin = [2, 5, 10, 100, 200]\n\n\"\"\"\nfor sig in sigma:\n for _burnin in burnin: \n proposalDist, proposalSamp = proposalDistribution(sig)\n\n mh = MH(function, _burnin, proposalDist, proposalSamp)\n nMontecarlo = 1000\n\n for _ in range(nMontecarlo):\n next(mh.sampler())\n\n sampledvalues = np.array(mh.x_seq)\n print(\"sig, burin, mean, bacc, cacc: \", sig, _burnin, np.mean(sampledvalues), np.mean(mh.burninAcc), np.mean(mh.collectionAcc))\n\n\n\"\"\"\nx = np.linspace(-20, 20, 500)\nfx = function(x)\n\nproposalDist, proposalSamp = proposalDistribution(sigma = 2.0)\nmh = MH(function, 100, proposalDist, proposalSamp)\nfor _ in range(1000):\n next(mh.sampler())\n\nsampledvalues = np.array(mh.x_seq)\nplt.plot(x, fx, 'b--', linewidth=2.0)\n\nhist = np.histogram(sampledvalues, bins=50)\nx = hist[1][1:]\nhist = hist[0]\nprint(hist.shape, x.shape)\nhist = hist*np.max(fx)/np.max(hist)\nplt.bar(x, hist, color = 'g', width=1.8, alpha=0.7)\n# plt.hist(sampledvalues, 50, density=True, stacked=True, facecolor='g', alpha=0.7, linewidth=0)\nplt.legend(['target pdf', 'sampled histogram'])\nplt.show()\n\nplt.plot(sampledvalues, linewidth=2.0)\nplt.ylim(-20.0, 20.0)\nplt.show()\n\n\n",
"\n \nimport numpy as np\nimport pandas as pd\nfrom scipy.stats import gamma\nimport matplotlib.pyplot as plt\nimport cv2\n\nclass Loopy():\n\n\tdef __init__(self, grid, iterations):\n\t\tself.grid = grid\n\t\tself.size = self.grid.shape[0]\n\t\tself.iterations = iterations\n\t\tself.compatibility_inter = np.array([[1.5, 0.5], [0.5, 1.5]])\n\t\tself.compatibility_outer = np.array([[1.9, 0.1], [0.1, 1.9]])\n\n\tdef get_neighbours(self, idx):\n\n\t\tneighbours = []\n\t\tif idx+self.size < self.size**2:\n\t\t\tneighbours.append(idx+self.size)\n\t\tif idx-self.size > 0:\n\t\t\tneighbours.append(idx-self.size)\n\t\ttry:\n\t\t\tif np.unravel_index(idx-1, (self.size,self.size))[0] == np.unravel_index(idx, (self.size,self.size))[0]:\n\t\t\t\tneighbours.append(idx-1)\n\t\texcept:\n\t\t\tpass\n\t\ttry:\n\t\t\tif np.unravel_index(idx+1, (self.size,self.size))[0] == np.unravel_index(idx, (self.size,self.size))[0]:\n\t\t\t\tneighbours.append(idx+1)\n\t\texcept:\n\t\t\tpass\n\n\t\treturn(neighbours)\n\n\tdef message_dict(self, iterations):\n\n\t\tfactor_messages = np.ones((self.size**2, self.size**2, 2))\n\t\tclique_messages = np.ones((self.size**2, self.size**2, 2))\n\t\tbeta = np.ones((self.size, self.size, 2))\n\n\t\tfor i in range(iterations):\n\t\t\tfor j in range(self.size**2):\n\t\t\t\tneighbours = self.get_neighbours(j)\n\t\t\t\tfor n in neighbours:\n\t\t\t\t\tfactor_messages[j,n,:] = self.compatibility_outer[:, self.grid[np.unravel_index(j, (self.size, self.size))]]\n\t\t\t\t\tadj_neighbours = np.setdiff1d(neighbours,n)\n\t\t\t\t\tfor adj in adj_neighbours:\n\t\t\t\t\t\tfactor_messages[j,n,:] *= clique_messages[adj,j,:]\n\t\t\t\t\tclique_messages[n,j,:] *= self.compatibility_inter.dot(factor_messages[n,j,:])\n\t\t\tfactor_norm = np.sum(factor_messages, axis=2)\n\t\t\tfactor_messages[:,:,0] /= factor_norm\n\t\t\tfactor_messages[:,:,1] /= factor_norm\n\t\t\tclique_norm = np.sum(clique_messages, axis=2)\n\t\t\tclique_messages[:,:,0] /= clique_norm\n\t\t\tclique_messages[:,:,1] /= clique_norm\n\n\t\tfor j in range(self.size**2):\n\t\t\tneighbours = self.get_neighbours(j)\n\t\t\tfor n in neighbours:\n\t\t\t\tbeta[np.unravel_index(j, (self.size, self.size))] *= factor_messages[j,n]\n\n\t\tplt.imshow(np.argmax(beta, axis = 2))\n\t\tplt.show()\n\n\t# def message_dict(self, iterations):\n\n\t# \tfactor_messages = np.ones((self.size**2, self.size**2, 2))\n\t# \tclique_messages = np.ones((self.size**2, self.size**2, 2))\n\t# \tbeta = np.ones((self.size, self.size, 2))\n\n\t# \tfor i in range(iterations):\n\t# \t\tfor j in range(self.size**2):\n\t# \t\t\tneighbours = self.get_neighbours(j)\n\t# \t\t\tfor n in neighbours:\n\t# \t\t\t\tfactor_messages[j,n,:] = self.compatibility_outer[:, self.grid[np.unravel_index(j, (self.size, self.size))]]\n\t# \t\t\t\tadj_neighbours = np.setdiff1d(neighbours,n)\n\t# \t\t\t\tfor adj in adj_neighbours:\n\t# \t\t\t\t\tfactor_messages[j,n,:] *= clique_messages[adj,j,:]\n\t# \t\t\t\tclique_messages[n,j,:] *= self.compatibility_inter.dot(factor_messages[n,j,:])\n\t# \t\tfactor_norm = np.sum(factor_messages, axis=2)\n\t# \t\tfactor_messages[:,:,0] /= factor_norm\n\t# \t\tfactor_messages[:,:,1] /= factor_norm\n\t# \t\tclique_norm = np.sum(clique_messages, axis=2)\n\t# \t\tclique_messages[:,:,0] /= clique_norm\n\t# \t\tclique_messages[:,:,1] /= clique_norm\n\t# \t\t# print(factor_messages, clique_messages)\n\n\t# \tfor j in range(self.size**2):\n\t# \t\tneighbours = self.get_neighbours(j)\n\t# \t\tfor n in neighbours:\n\t# \t\t\tbeta[np.unravel_index(j, (self.size, self.size))] *= factor_messages[j,n]\n\t# \t\t# print(self.compatibility_outer[:, self.grid[np.unravel_index(j, (self.size, self.size))]])\n\t# \t\t# beta[np.unravel_index(j, (self.size, self.size))] *= self.compatibility_outer[:, self.grid[np.unravel_index(j, (self.size, self.size))]]\n\t# \tplt.imshow(np.argmax(beta, axis = 2))\n\t# \tplt.show()\n\n\nif __name__ == '__main__':\n\n\tsize = 60\n\tflip_prob = 0.2\n\tgrid = np.zeros((size, size), dtype='int64')\n\tfor j in range(size**2):\n\t\tidx = np.unravel_index(j, (size, size))\n\t\tif ((idx[0]-50)**2+(idx[1]-50)**2)**0.5 <= 25:\n\t\t\tgrid[idx] = 1\n\t\tthresh = np.random.random_sample()\n\t\tif thresh < flip_prob:\n\t\t\tgrid[idx] = 1-grid[idx]\n\n\tplt.imshow(grid)\n\tplt.show()\n\tL = Loopy(grid, 10)\n\tL.message_dict(10)"
] | [
[
"matplotlib.pyplot.legend",
"numpy.abs",
"numpy.linspace",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.plot",
"numpy.max",
"numpy.random.normal",
"matplotlib.pyplot.bar",
"numpy.exp",
"numpy.array",
"numpy.histogram",
"matplotlib.pyplot.show"
],
[
"matplotlib.pyplot.imshow",
"numpy.unravel_index",
"numpy.random.random_sample",
"numpy.setdiff1d",
"numpy.ones",
"numpy.argmax",
"numpy.array",
"numpy.zeros",
"numpy.sum",
"matplotlib.pyplot.show"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Freakawho/sagemaker-tensorflow-training-toolkit-master | [
"f37c7d85600beb5461788db8c471b66c25beff8f"
] | [
"src/sagemaker_tensorflow_container/training.py"
] | [
"# Copyright 2018-2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the 'License'). You\n# may not use this file except in compliance with the License. A copy of\n# the License is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# or in the 'license' file accompanying this file. This file is\n# distributed on an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific\n# language governing permissions and limitations under the License.\nfrom __future__ import absolute_import\n\nimport json\nimport logging\nimport multiprocessing\nimport os\nimport subprocess\nimport time\n\nfrom sagemaker_training import entry_point, environment, mapping, runner\nimport tensorflow as tf\n\nfrom sagemaker_tensorflow_container import s3_utils\n\nlogger = logging.getLogger(__name__)\n\nSAGEMAKER_PARAMETER_SERVER_ENABLED = \"sagemaker_parameter_server_enabled\"\nMODEL_DIR = \"/opt/ml/model\"\n\n\ndef _is_host_master(hosts, current_host):\n return current_host == hosts[0]\n\n\ndef _build_tf_config(hosts, current_host, ps_task=False):\n \"\"\"Builds a dictionary containing cluster information based on number of hosts and number of\n parameter servers.\n\n Args:\n hosts (list[str]): List of host names in the cluster\n current_host (str): Current host name\n ps_task (bool): Set to True if this config is built for a parameter server process\n (default: False)\n\n Returns:\n dict[str: dict]: A dictionary describing the cluster setup for distributed training.\n For more information regarding TF_CONFIG:\n https://cloud.google.com/ml-engine/docs/tensorflow/distributed-training-details\n \"\"\"\n # Assign the first host as the master. Rest of the hosts if any will be worker hosts.\n # The first ps_num hosts will also have a parameter task assign to them.\n masters = hosts[:1]\n workers = hosts[1:]\n ps = hosts if len(hosts) > 1 else None\n\n def host_addresses(hosts, port=2222):\n return [\"{}:{}\".format(host, port) for host in hosts]\n\n tf_config = {\"cluster\": {\"master\": host_addresses(masters)}, \"environment\": \"cloud\"}\n\n if ps:\n tf_config[\"cluster\"][\"ps\"] = host_addresses(ps, port=\"2223\")\n\n if workers:\n tf_config[\"cluster\"][\"worker\"] = host_addresses(workers)\n\n if ps_task:\n if ps is None:\n raise ValueError(\n \"Cannot have a ps task if there are no parameter servers in the cluster\"\n )\n task_type = \"ps\"\n task_index = ps.index(current_host)\n elif _is_host_master(hosts, current_host):\n task_type = \"master\"\n task_index = 0\n else:\n task_type = \"worker\"\n task_index = workers.index(current_host)\n\n tf_config[\"task\"] = {\"index\": task_index, \"type\": task_type}\n return tf_config\n\n\ndef _run_ps(env, cluster):\n logger.info(\"Running distributed training job with parameter servers\")\n\n cluster_spec = tf.train.ClusterSpec(cluster)\n task_index = env.hosts.index(env.current_host)\n # Force parameter server to run on cpu. Running multiple TensorFlow processes on the same\n # GPU is not safe:\n # https://stackoverflow.com/questions/46145100/is-it-unsafe-to-run-multiple-tensorflow-processes-on-the-same-gpu\n no_gpu_config = tf.ConfigProto(device_count={\"GPU\": 0})\n\n server = tf.train.Server(\n cluster_spec, job_name=\"ps\", task_index=task_index, config=no_gpu_config\n )\n\n multiprocessing.Process(target=lambda: server.join()).start()\n\n\ndef _run_worker(env, cmd_args, tf_config):\n env_vars = env.to_env_vars()\n env_vars[\"TF_CONFIG\"] = json.dumps(tf_config)\n\n entry_point.run(\n uri=env.module_dir,\n user_entry_point=env.user_entry_point,\n args=cmd_args,\n env_vars=env_vars,\n capture_error=True,\n )\n\n\ndef _wait_until_master_is_down(master):\n while True:\n try:\n subprocess.check_call(\n [\"curl\", \"{}:2222\".format(master)], stdout=subprocess.PIPE, stderr=subprocess.PIPE\n )\n logger.info(\"master {} is still up, waiting for it to exit\".format(master))\n time.sleep(10)\n except subprocess.CalledProcessError:\n logger.info(\"master {} is down, stopping parameter server\".format(master))\n return\n\n\ndef train(env, cmd_args):\n \"\"\"Get training job environment from env and run the training job.\n\n Args:\n env (sagemaker_training.env.TrainingEnv): Instance of TrainingEnv class\n \"\"\"\n parameter_server_enabled = env.additional_framework_parameters.get(\n SAGEMAKER_PARAMETER_SERVER_ENABLED, False\n )\n if len(env.hosts) > 1 and parameter_server_enabled:\n\n tf_config = _build_tf_config(hosts=env.hosts, current_host=env.current_host)\n\n logger.info(\"Running distributed training job with parameter servers\")\n logger.info(\"Launching parameter server process\")\n _run_ps(env, tf_config[\"cluster\"])\n logger.info(\"Launching worker process\")\n _run_worker(env, cmd_args, tf_config)\n\n if not _is_host_master(env.hosts, env.current_host):\n _wait_until_master_is_down(env.hosts[0])\n\n else:\n\n mpi_enabled = env.additional_framework_parameters.get(\"sagemaker_mpi_enabled\")\n\n if mpi_enabled:\n runner_type = runner.MPIRunnerType\n else:\n runner_type = runner.ProcessRunnerType\n\n entry_point.run(\n uri=env.module_dir,\n user_entry_point=env.user_entry_point,\n args=cmd_args,\n env_vars=env.to_env_vars(),\n capture_error=True,\n runner_type=runner_type,\n )\n\n\ndef _log_model_missing_warning(model_dir):\n pb_file_exists = False\n file_exists = False\n for dirpath, dirnames, filenames in os.walk(model_dir):\n if filenames:\n file_exists = True\n for f in filenames:\n if \"saved_model.pb\" in f or \"saved_model.pbtxt\" in f:\n pb_file_exists = True\n path, direct_parent_dir = os.path.split(dirpath)\n if not str.isdigit(direct_parent_dir):\n logger.warn(\n \"Your model will NOT be servable with SageMaker TensorFlow Serving containers. \"\n 'The SavedModel bundle is under directory \"{}\", not a numeric name.'.format(\n direct_parent_dir\n )\n )\n\n if not file_exists:\n logger.warn(\n \"No model artifact is saved under path {}.\"\n \" Your training job will not save any model files to S3.\\n\"\n \"For details of how to construct your training script see:\\n\"\n \"https://sagemaker.readthedocs.io/en/stable/using_tf.html#adapting-your-local-tensorflow-script\".format(\n model_dir\n )\n )\n elif not pb_file_exists:\n logger.warn(\n \"Your model will NOT be servable with SageMaker TensorFlow Serving container. \"\n \"The model artifact was not saved in the TensorFlow SavedModel directory structure:\\n\"\n \"https://www.tensorflow.org/guide/saved_model#structure_of_a_savedmodel_directory\"\n )\n\n\ndef _model_dir_with_training_job(model_dir, job_name):\n if model_dir and model_dir.startswith(\"/opt/ml\"):\n return model_dir\n else:\n return \"{}/{}/model\".format(model_dir, job_name)\n\n\ndef main():\n \"\"\"Training entry point\n \"\"\"\n hyperparameters = environment.read_hyperparameters()\n env = environment.Environment(hyperparameters=hyperparameters)\n\n user_hyperparameters = env.hyperparameters\n\n # If the training job is part of the multiple training jobs for tuning, we need to append the training job name to\n # model_dir in case they read from/write to the same object\n if \"_tuning_objective_metric\" in hyperparameters:\n model_dir = _model_dir_with_training_job(hyperparameters.get(\"model_dir\"), env.job_name)\n logger.info(\"Appending the training job name to model_dir: {}\".format(model_dir))\n user_hyperparameters[\"model_dir\"] = model_dir\n\n s3_utils.configure(user_hyperparameters.get(\"model_dir\"), os.environ.get(\"SAGEMAKER_REGION\"))\n train(env, mapping.to_cmd_args(user_hyperparameters))\n _log_model_missing_warning(MODEL_DIR)\n"
] | [
[
"tensorflow.ConfigProto",
"tensorflow.train.Server",
"tensorflow.train.ClusterSpec"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
TanayGahlot/mne-python | [
"857aa97c201451b82931c5eba50642975afc423d",
"857aa97c201451b82931c5eba50642975afc423d",
"857aa97c201451b82931c5eba50642975afc423d",
"857aa97c201451b82931c5eba50642975afc423d"
] | [
"examples/decoding/plot_decoding_csp_eeg.py",
"examples/stats/plot_cluster_stats_time_frequency_repeated_measures_anova.py",
"examples/export/plot_epochs_to_nitime.py",
"mne/channels/interpolation.py"
] | [
"\"\"\"\n===========================================================================\nMotor imagery decoding from EEG data using the Common Spatial Pattern (CSP)\n===========================================================================\n\nDecoding of motor imagery applied to EEG data decomposed using CSP.\nHere the classifier is applied to features extracted on CSP filtered signals.\n\nSee http://en.wikipedia.org/wiki/Common_spatial_pattern and [1]\n\nThe EEGBCI dataset is documented in [2]\nThe data set is available at PhysioNet [3]\n\n[1] Zoltan J. Koles. The quantitative extraction and topographic mapping\n of the abnormal components in the clinical EEG. Electroencephalography\n and Clinical Neurophysiology, 79(6):440--447, December 1991.\n\n[2] Schalk, G., McFarland, D.J., Hinterberger, T., Birbaumer, N.,\n Wolpaw, J.R. (2004) BCI2000: A General-Purpose Brain-Computer Interface\n (BCI) System. IEEE TBME 51(6):1034-1043\n\n[3] Goldberger AL, Amaral LAN, Glass L, Hausdorff JM, Ivanov PCh, Mark RG,\n Mietus JE, Moody GB, Peng C-K, Stanley HE. (2000) PhysioBank,\n PhysioToolkit, and PhysioNet: Components of a New Research Resource for\n Complex Physiologic Signals. Circulation 101(23):e215-e220\n\"\"\"\n# Authors: Martin Billinger <[email protected]>\n#\n# License: BSD (3-clause)\n\nprint(__doc__)\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom mne import Epochs, pick_types\nfrom mne.io import concatenate_raws\nfrom mne.io.edf import read_raw_edf\nfrom mne.datasets import eegbci\nfrom mne.event import find_events\nfrom mne.decoding import CSP\nfrom mne.layouts import read_layout\n\n###############################################################################\n## Set parameters and read data\n\n# avoid classification of evoked responses by using epochs that start 1s after\n# cue onset.\ntmin, tmax = -1., 4.\nevent_id = dict(hands=2, feet=3)\nsubject = 1\nruns = [6, 10, 14] # motor imagery: hands vs feet\n\nraw_fnames = eegbci.load_data(subject, runs)\nraw_files = [read_raw_edf(f, tal_channel=-1, preload=True) for f in raw_fnames]\nraw = concatenate_raws(raw_files)\n\n# strip channel names\nraw.info['ch_names'] = [chn.strip('.') for chn in raw.info['ch_names']]\n\n# Apply band-pass filter\nraw.filter(7., 30., method='iir')\n\nevents = find_events(raw, shortest_event=0, stim_channel='STI 014')\n\npicks = pick_types(raw.info, meg=False, eeg=True, stim=False, eog=False,\n exclude='bads')\n\n# Read epochs (train will be done only between 1 and 2s)\n# Testing will be done with a running classifier\nepochs = Epochs(raw, events, event_id, tmin, tmax, proj=True, picks=picks,\n baseline=None, preload=True, add_eeg_ref=False)\nepochs_train = epochs.crop(tmin=1., tmax=2., copy=True)\nlabels = epochs.events[:, -1] - 2\n\n###############################################################################\n# Classification with linear discrimant analysis\n\nfrom sklearn.lda import LDA\nfrom sklearn.cross_validation import ShuffleSplit\n\n# Assemble a classifier\nsvc = LDA()\ncsp = CSP(n_components=4, reg=None, log=True)\n\n# Define a monte-carlo cross-validation generator (reduce variance):\ncv = ShuffleSplit(len(labels), 10, test_size=0.2, random_state=42)\nscores = []\nepochs_data = epochs.get_data()\nepochs_data_train = epochs_train.get_data()\n\n# Use scikit-learn Pipeline with cross_val_score function\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.cross_validation import cross_val_score\nclf = Pipeline([('CSP', csp), ('SVC', svc)])\nscores = cross_val_score(clf, epochs_data_train, labels, cv=cv, n_jobs=1)\n\n# Printing the results\nclass_balance = np.mean(labels == labels[0])\nclass_balance = max(class_balance, 1. - class_balance)\nprint(\"Classification accuracy: %f / Chance level: %f\" % (np.mean(scores),\n class_balance))\n\n# plot CSP patterns estimated on full data for visualization\ncsp.fit_transform(epochs_data, labels)\n\nevoked = epochs.average()\nevoked.data = csp.patterns_.T\nevoked.times = np.arange(evoked.data.shape[0])\n\nlayout = read_layout('EEG1005')\nevoked.plot_topomap(times=[0, 1, 2, 61, 62, 63], ch_type='eeg', layout=layout,\n scale_time=1, time_format='%i', scale=1,\n unit='Patterns (AU)', size=1.5)\n\n###############################################################################\n# Look at performance over time\n\nsfreq = raw.info['sfreq']\nw_length = int(sfreq * 0.5) # running classifier: window length\nw_step = int(sfreq * 0.1) # running classifier: window step size\nw_start = np.arange(0, epochs_data.shape[2] - w_length, w_step)\n\nscores_windows = []\n\nfor train_idx, test_idx in cv:\n y_train, y_test = labels[train_idx], labels[test_idx]\n\n X_train = csp.fit_transform(epochs_data_train[train_idx], y_train)\n X_test = csp.transform(epochs_data_train[test_idx])\n\n # fit classifier\n svc.fit(X_train, y_train)\n\n # running classifier: test classifier on sliding window\n score_this_window = []\n for n in w_start:\n X_test = csp.transform(epochs_data[test_idx][:, :, n:(n + w_length)])\n score_this_window.append(svc.score(X_test, y_test))\n scores_windows.append(score_this_window)\n\n# Plot scores over time\nw_times = (w_start + w_length / 2.) / sfreq + epochs.tmin\n\nplt.figure()\nplt.plot(w_times, np.mean(scores_windows, 0), label='Score')\nplt.axvline(0, linestyle='--', color='k', label='Onset')\nplt.axhline(0.5, linestyle='-', color='k', label='Chance')\nplt.xlabel('time (s)')\nplt.ylabel('classification accuracy')\nplt.title('Classification score over time')\nplt.legend(loc='lower right')\nplt.show()\n",
"\"\"\"\n====================================================================\nMass-univariate twoway repeated measures ANOVA on single trial power\n====================================================================\n\nThis script shows how to conduct a mass-univariate repeated measures\nANOVA. As the model to be fitted assumes two fully crossed factors,\nwe will study the interplay between perceptual modality\n(auditory VS visual) and the location of stimulus presentation\n(left VS right). Here we use single trials as replications\n(subjects) while iterating over time slices plus frequency bands\nfor to fit our mass-univariate model. For the sake of simplicity we\nwill confine this analysis to one single channel of which we know\nthat it exposes a strong induced response. We will then visualize\neach effect by creating a corresponding mass-univariate effect\nimage. We conclude with accounting for multiple comparisons by\nperforming a permutation clustering test using the ANOVA as\nclustering function. The results final will be compared to\nmultiple comparisons using False Discovery Rate correction.\n\"\"\"\n# Authors: Denis Engemann <[email protected]>\n# Eric Larson <[email protected]>\n# Alexandre Gramfort <[email protected]>\n#\n# License: BSD (3-clause)\n\nprint(__doc__)\n\nimport numpy as np\n\nimport mne\nfrom mne import io\nfrom mne.time_frequency import single_trial_power\nfrom mne.stats import f_threshold_twoway_rm, f_twoway_rm, fdr_correction\nfrom mne.datasets import sample\n\n###############################################################################\n# Set parameters\ndata_path = sample.data_path()\nraw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'\nevent_fname = data_path + '/MEG/sample/sample_audvis_raw-eve.fif'\nevent_id = 1\ntmin = -0.2\ntmax = 0.5\n\n# Setup for reading the raw data\nraw = io.Raw(raw_fname)\nevents = mne.read_events(event_fname)\n\ninclude = []\nraw.info['bads'] += ['MEG 2443'] # bads\n\n# picks MEG gradiometers\npicks = mne.pick_types(raw.info, meg='grad', eeg=False, eog=True,\n stim=False, include=include, exclude='bads')\n\nch_name = raw.info['ch_names'][picks[0]]\n\n# Load conditions\nreject = dict(grad=4000e-13, eog=150e-6)\nevent_id = dict(aud_l=1, aud_r=2, vis_l=3, vis_r=4)\nepochs = mne.Epochs(raw, events, event_id, tmin, tmax,\n picks=picks, baseline=(None, 0),\n reject=reject)\n\n# make sure all conditions have the same counts, as the ANOVA expects a\n# fully balanced data matrix and does not forgive imbalances that generously\n# (risk of type-I error)\nepochs.equalize_event_counts(event_id, copy=False)\n# Time vector\ntimes = 1e3 * epochs.times # change unit to ms\n\n# Factor to downs-sample the temporal dimension of the PSD computed by\n# single_trial_power.\ndecim = 2\nfrequencies = np.arange(7, 30, 3) # define frequencies of interest\nsfreq = raw.info['sfreq'] # sampling in Hz\nn_cycles = frequencies / frequencies[0]\nbaseline_mask = times[::decim] < 0\n\n# now create TFR representations for all conditions\nepochs_power = []\nfor condition in [epochs[k].get_data()[:, 97:98, :] for k in event_id]:\n this_power = single_trial_power(condition, sfreq=sfreq, frequencies=frequencies,\n n_cycles=n_cycles, use_fft=False,\n decim=decim)\n this_power = this_power[:, 0, :, :] # we only have one channel.\n # Compute ratio with baseline power (be sure to correct time vector with\n # decimation factor)\n epochs_baseline = np.mean(this_power[:, :, baseline_mask], axis=2)\n this_power /= epochs_baseline[..., np.newaxis]\n epochs_power.append(this_power)\n\n###############################################################################\n# Setup repeated measures ANOVA\n\nn_conditions = len(epochs.event_id)\nn_replications = epochs.events.shape[0] / n_conditions\n# we will tell the ANOVA how to interpret the data matrix in terms of\n# factors. This done via the factor levels argument which is a list\n# of the number factor levels for each factor.\nfactor_levels = [2, 2] # number of levels in each factor\neffects = 'A*B' # this is the default signature for computing all effects\n# Other possible options are 'A' or 'B' for the corresponding main effects\n# or 'A:B' for the interaction effect only (this notation is borrowed from the\n# R formula language)\nn_frequencies = len(frequencies)\nn_times = len(times[::decim])\n\n# Now we'll assemble the data matrix and swap axes so the trial replications\n# are the first dimension and the conditions are the second dimension\ndata = np.swapaxes(np.asarray(epochs_power), 1, 0)\n# reshape last two dimensions in one mass-univariate observation-vector\ndata = data.reshape(n_replications, n_conditions, n_frequencies * n_times)\n\n# so we have replications * conditions * observations:\nprint(data.shape)\n\n# while the iteration scheme used above for assembling the data matrix\n# makes sure the first two dimensions are organized as expected (with A =\n# modality and B = location):\n#\n# A1B1 A1B2 A2B1 B2B2\n# trial 1 1.34 2.53 0.97 1.74\n# trial ... .... .... .... ....\n# trial 56 2.45 7.90 3.09 4.76\n#\n# Now we're ready to run our repeated measures ANOVA.\n\nfvals, pvals = f_twoway_rm(data, factor_levels, effects=effects)\n\neffect_labels = ['modality', 'location', 'modality by location']\nimport matplotlib.pyplot as plt\n\n# let's visualize our effects by computing f-images\nfor effect, sig, effect_label in zip(fvals, pvals, effect_labels):\n plt.figure()\n # show naive F-values in gray\n plt.imshow(effect.reshape(8, 211), cmap=plt.cm.gray, extent=[times[0],\n times[-1], frequencies[0], frequencies[-1]], aspect='auto',\n origin='lower')\n # create mask for significant Time-frequency locations\n effect = np.ma.masked_array(effect, [sig > .05])\n plt.imshow(effect.reshape(8, 211), cmap='RdBu_r', extent=[times[0],\n times[-1], frequencies[0], frequencies[-1]], aspect='auto',\n origin='lower')\n plt.colorbar()\n plt.xlabel('time (ms)')\n plt.ylabel('Frequency (Hz)')\n plt.title(r\"Time-locked response for '%s' (%s)\" % (effect_label, ch_name))\n plt.show()\n\n# Note. As we treat trials as subjects, the test only accounts for\n# time locked responses despite the 'induced' approach.\n# For analysis for induced power at the group level averaged TRFs\n# are required.\n\n\n###############################################################################\n# Account for multiple comparisons using FDR versus permutation clustering test\n\n# First we need to slightly modify the ANOVA function to be suitable for\n# the clustering procedure. Also want to set some defaults.\n# Let's first override effects to confine the analysis to the interaction\neffects = 'A:B'\n\n\n# A stat_fun must deal with a variable number of input arguments.\ndef stat_fun(*args):\n # Inside the clustering function each condition will be passed as\n # flattened array, necessitated by the clustering procedure.\n # The ANOVA however expects an input array of dimensions:\n # subjects X conditions X observations (optional).\n # The following expression catches the list input and swaps the first and\n # the second dimension and finally calls the ANOVA function.\n return f_twoway_rm(np.swapaxes(args, 1, 0), factor_levels=factor_levels,\n effects=effects, return_pvals=False)[0]\n # The ANOVA returns a tuple f-values and p-values, we will pick the former.\n\n\npthresh = 0.00001 # set threshold rather high to save some time\nf_thresh = f_threshold_twoway_rm(n_replications, factor_levels, effects,\n pthresh)\ntail = 1 # f-test, so tail > 0\nn_permutations = 256 # Save some time (the test won't be too sensitive ...)\nT_obs, clusters, cluster_p_values, h0 = mne.stats.permutation_cluster_test(\n epochs_power, stat_fun=stat_fun, threshold=f_thresh, tail=tail, n_jobs=1,\n n_permutations=n_permutations, buffer_size=None)\n\n# Create new stats image with only significant clusters\ngood_clusers = np.where(cluster_p_values < .05)[0]\nT_obs_plot = np.ma.masked_array(T_obs,\n np.invert(clusters[np.squeeze(good_clusers)]))\n\nplt.figure()\nfor f_image, cmap in zip([T_obs, T_obs_plot], [plt.cm.gray, 'RdBu_r']):\n plt.imshow(f_image, cmap=cmap, extent=[times[0], times[-1],\n frequencies[0], frequencies[-1]], aspect='auto',\n origin='lower')\nplt.xlabel('time (ms)')\nplt.ylabel('Frequency (Hz)')\nplt.title('Time-locked response for \\'modality by location\\' (%s)\\n'\n ' cluster-level corrected (p <= 0.05)' % ch_name)\nplt.show()\n\n# now using FDR\nmask, _ = fdr_correction(pvals[2])\nT_obs_plot2 = np.ma.masked_array(T_obs, np.invert(mask))\n\nplt.figure()\nfor f_image, cmap in zip([T_obs, T_obs_plot2], [plt.cm.gray, 'RdBu_r']):\n plt.imshow(f_image, cmap=cmap, extent=[times[0], times[-1],\n frequencies[0], frequencies[-1]], aspect='auto',\n origin='lower')\n\nplt.xlabel('time (ms)')\nplt.ylabel('Frequency (Hz)')\nplt.title('Time-locked response for \\'modality by location\\' (%s)\\n'\n ' FDR corrected (p <= 0.05)' % ch_name)\nplt.show()\n\n# Both, cluster level and FDR correction help getting rid of\n# putatively spots we saw in the naive f-images.\n",
"\"\"\"\n=======================\nExport epochs to NiTime\n=======================\n\nThis script shows how to export Epochs to the NiTime library\nfor further signal processing and data analysis.\n\n\"\"\"\n\n# Author: Denis Engemann <[email protected]>\n# Alexandre Gramfort <[email protected]>\n#\n# License: BSD (3-clause)\n\nprint(__doc__)\n\nimport numpy as np\nimport mne\nfrom mne import io\nfrom mne.datasets import sample\ndata_path = sample.data_path()\n\n###############################################################################\n# Set parameters\nraw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'\nevent_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'\nevent_id, tmin, tmax = 1, -0.2, 0.5\n\n# Setup for reading the raw data\nraw = io.Raw(raw_fname)\nevents = mne.read_events(event_fname)\n\n# Set up pick list: EEG + MEG - bad channels (modify to your needs)\nraw.info['bads'] += ['MEG 2443', 'EEG 053']\npicks = mne.pick_types(raw.info, meg='grad', eeg=False, stim=False, eog=True,\n exclude='bads')\n\n# Read epochs\nepochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,\n picks=picks, baseline=(None, 0), preload=True,\n reject=dict(grad=4000e-13, eog=150e-6))\n\n# Export to NiTime\nepochs_ts = epochs.to_nitime(picks=np.arange(20), collapse=True)\n\n###############################################################################\n# Now use nitime's OO-interface to compute coherence between sensors\n\nfrom nitime.analysis import MTCoherenceAnalyzer\nfrom nitime.viz import drawmatrix_channels\nimport matplotlib.pyplot as plt\n\n# setup coherency analyzer\nC = MTCoherenceAnalyzer(epochs_ts)\n\n# confine analysis to 10 - 20 Hz\nfreq_idx = np.where((C.frequencies > 10) * (C.frequencies < 30))[0]\n\n# compute average coherence\ncoh = np.mean(C.coherence[:, :, freq_idx], -1) # Averaging on last dimension\ndrawmatrix_channels(coh, epochs.ch_names, color_anchor=0,\n title='MEG gradiometer coherence')\n\nplt.show()\n",
"# Authors: Denis Engemann <[email protected]>\n#\n# License: BSD (3-clause)\n\nimport numpy as np\nfrom numpy.polynomial.legendre import legval\nfrom scipy import linalg\n\nfrom .. utils import logger\nfrom .. io.pick import pick_types\nfrom .. surface import _normalize_vectors\nfrom . utils import _fit_sphere\n\n\ndef _calc_g(cosang, stiffness=4, num_lterms=50):\n \"\"\"Calculate spherical spline g function between points on a sphere.\n\n Parameters\n ----------\n cosang : array-like of float, shape(n_channels, n_channels)\n cosine of angles between pairs of points on a spherical surface. This\n is equivalent to the dot product of unit vectors.\n stiffness : float\n stiffness of the spline.\n num_lterms : int\n number of Legendre terms to evaluate.\n\n Returns\n -------\n G : np.ndrarray of float, shape(n_channels, n_channels)\n The G matrix.\n \"\"\"\n factors = [(2 * n + 1) / (n ** stiffness * (n + 1) ** stiffness * 4 * np.pi)\n for n in range(1, num_lterms + 1)]\n return legval(cosang, [0] + factors)\n\n\ndef _calc_h(cosang, stiffness=4, num_lterms=50):\n \"\"\"Calculate spherical spline h function between points on a sphere.\n\n Parameters\n ----------\n cosang : array-like of float, shape(n_channels, n_channels)\n cosine of angles between pairs of points on a spherical surface. This\n is equivalent to the dot product of unit vectors.\n stiffness : float\n stiffness of the spline. Also referred to as `m`.\n num_lterms : int\n number of Legendre terms to evaluate.\n H : np.ndrarray of float, shape(n_channels, n_channels)\n The H matrix.\n \"\"\"\n factors = [(2 * n + 1) /\n (n ** (stiffness - 1) * (n + 1) ** (stiffness - 1) * 4 * np.pi)\n for n in range(1, num_lterms + 1)]\n return legval(cosang, [0] + factors)\n\n\ndef _make_interpolation_matrix(pos_from, pos_to, alpha=1e-5):\n \"\"\"Compute interpolation matrix based on spherical splines\n\n Implementation based on [1]\n\n Parameters\n ----------\n pos_from : np.ndarray of float, shape(n_good_sensors, 3)\n The positions to interpoloate from.\n pos_to : np.ndarray of float, shape(n_bad_sensors, 3)\n The positions to interpoloate.\n alpha : float\n Regularization parameter. Defaults to 1e-5.\n\n Returns\n -------\n interpolation : np.ndarray of float, shape(len(pos_from), len(pos_to))\n The interpolation matrix that maps good signals to the location\n of bad signals.\n\n References\n ----------\n [1] Perrin, F., Pernier, J., Bertrand, O. and Echallier, JF. (1989).\n Spherical splines for scalp potential and current density mapping.\n Electroencephalography Clinical Neurophysiology, Feb; 72(2):184-7.\n \"\"\"\n\n pos_from = pos_from.copy()\n pos_to = pos_to.copy()\n\n # normalize sensor positions to sphere\n _normalize_vectors(pos_from)\n _normalize_vectors(pos_to)\n\n # cosine angles between source positions\n cosang_from = pos_from.dot(pos_from.T)\n cosang_to_from = pos_to.dot(pos_from.T)\n G_from = _calc_g(cosang_from)\n G_to_from, H_to_from = (f(cosang_to_from) for f in (_calc_g, _calc_h))\n\n if alpha is not None:\n G_from.flat[::len(G_from) + 1] += alpha\n\n C_inv = linalg.pinv(G_from)\n interpolation = G_to_from.dot(C_inv)\n return interpolation\n\n\ndef _interpolate_bads_eeg(inst):\n \"\"\"Interpolate bad channels\n\n Operates in place.\n\n Parameters\n ----------\n inst : mne.io.Raw, mne.Epochs or mne.Evoked\n The data to interpolate. Must be preloaded.\n \"\"\"\n from mne.io.base import _BaseRaw\n from mne.epochs import _BaseEpochs\n from mne.evoked import Evoked\n\n if 'eeg' not in inst:\n raise ValueError('This interpolation function requires EEG channels.')\n if len(inst.info['bads']) == 0:\n raise ValueError('No bad channels to interpolate.')\n if getattr(inst, 'preload', None) is False:\n raise ValueError('Data must be preloaded.')\n\n bads_idx = np.zeros(len(inst.ch_names), dtype=np.bool)\n goods_idx = np.zeros(len(inst.ch_names), dtype=np.bool)\n\n picks = pick_types(inst.info, meg=False, eeg=True, exclude=[])\n bads_idx[picks] = [inst.ch_names[ch] in inst.info['bads'] for ch in picks]\n goods_idx[picks] = True\n goods_idx[bads_idx] = False\n\n if len(bads_idx) != len(inst.info['bads']):\n logger.warning('Channel interpolation is currently only implemented '\n 'for EEG. The MEG channels marked as bad will remain '\n 'untouched.')\n\n pos = inst.get_channel_positions(picks)\n\n # Make sure only EEG are used\n bads_idx_pos = bads_idx[picks]\n goods_idx_pos = goods_idx[picks]\n\n pos_good = pos[goods_idx_pos]\n pos_bad = pos[bads_idx_pos]\n\n # test spherical fit\n radius, center = _fit_sphere(pos_good)\n distance = np.sqrt(np.sum((pos_good - center) ** 2, 1))\n distance = np.mean(distance / radius)\n if np.abs(1. - distance) > 0.1:\n logger.warning('Your spherical fit is poor, interpolation results are '\n 'likely to be inaccurate.')\n\n logger.info('Computing interpolation matrix from {0} sensor '\n 'positions'.format(len(pos_good)))\n\n interpolation = _make_interpolation_matrix(pos_good, pos_bad)\n\n logger.info('Interpolating {0} sensors'.format(len(pos_bad)))\n if getattr(inst, 'preload', None) is False:\n raise ValueError('Data must be preloaded')\n\n if isinstance(inst, _BaseRaw):\n inst._data[bads_idx] = interpolation.dot(inst._data[goods_idx])\n elif isinstance(inst, _BaseEpochs):\n tmp = np.dot(interpolation[:, np.newaxis, :],\n inst._data[:, goods_idx, :])\n if np.sum(bads_idx) == 1:\n tmp = tmp[0]\n else:\n tmp = tmp[:, 0, ...]\n inst._data[:, bads_idx, :] = np.transpose(tmp, (1, 0, 2))\n elif isinstance(inst, Evoked):\n inst.data[bads_idx] = interpolation.dot(inst.data[goods_idx])\n else:\n raise ValueError('Inputs of type {0} are not supported'\n .format(type(inst)))\n return inst\n"
] | [
[
"sklearn.cross_validation.cross_val_score",
"matplotlib.pyplot.legend",
"sklearn.lda.LDA",
"matplotlib.pyplot.axvline",
"matplotlib.pyplot.axhline",
"matplotlib.pyplot.title",
"numpy.arange",
"sklearn.pipeline.Pipeline",
"matplotlib.pyplot.ylabel",
"numpy.mean",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
],
[
"matplotlib.pyplot.imshow",
"numpy.swapaxes",
"matplotlib.pyplot.title",
"numpy.invert",
"numpy.asarray",
"numpy.arange",
"matplotlib.pyplot.figure",
"numpy.squeeze",
"matplotlib.pyplot.colorbar",
"numpy.mean",
"numpy.ma.masked_array",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"numpy.where",
"matplotlib.pyplot.ylabel"
],
[
"numpy.arange",
"matplotlib.pyplot.show",
"numpy.mean",
"numpy.where"
],
[
"numpy.dot",
"scipy.linalg.pinv",
"numpy.abs",
"numpy.polynomial.legendre.legval",
"numpy.mean",
"numpy.transpose",
"numpy.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"0.12",
"0.14",
"0.15"
],
"tensorflow": []
}
] |
kafkasl/contextualLSTM | [
"a4421d592c3960c79842b0f23de162e61fcab3dd"
] | [
"src/lstm/lstm_wp.py"
] | [
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Example / benchmark for building a PTB LSTM model.\n\nTrains the model described in:\n(Zaremba, et. al.) Recurrent Neural Network Regularization\nhttp://arxiv.org/abs/1409.2329\n\nThere are 3 supported model configurations:\n===========================================\n| config | epochs | train | valid | test\n===========================================\n| small | 13 | 37.99 | 121.39 | 115.91\n| medium | 39 | 48.45 | 86.16 | 82.07\n| large | 55 | 37.87 | 82.62 | 78.29\nThe exact results may vary depending on the random initialization.\n\nThe hyperparameters used in the model:\n- init_scale - the initial scale of the weights\n- learning_rate - the initial value of the learning rate\n- max_grad_norm - the maximum permissible norm of the gradient\n- num_layers - the number of LSTM layers\n- num_steps - the number of unrolled steps of LSTM\n- hidden_size - the number of LSTM units\n- max_epoch - the number of epochs trained with the initial learning rate\n- max_max_epoch - the total number of epochs for training\n- keep_prob - the probability of keeping weights in the dropout layer\n- lr_decay - the decay of the learning rate for each epoch after \"max_epoch\"\n- batch_size - the batch size\n\nThe data required for this example is in the data/ dir of the\nPTB dataset from Tomas Mikolov's webpage:\n\n$ wget http://www.fit.vutbr.cz/~imikolov/rnnlm/simple-examples.tgz\n$ tar xvf simple-examples.tgz\n\nTo run:\n\n$ python ptb_word_lm.py --data_path=simple-examples/data/\n\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport sys\nsys.path.insert(0, \"../src/\")\n\nimport inspect\nimport time\n\nimport numpy as np\nimport tensorflow as tf\n\nimport reader_wp as reader\n\nflags = tf.flags\nlogging = tf.logging\n\nflags.DEFINE_string(\n \"model\", \"small\",\n \"A type of model. Possible options are: small, medium, large.\")\n\nflags.DEFINE_string(\n \"tasks\", \"all\",\n \"Tasks to be performed. Possible options are: all, train, test, valid\")\n\nflags.DEFINE_string(\n \"word_to_id_path\", \"../models/eos/word2id_1000.pklz\",\n \"A type of model. Possible options are: small, medium, large.\")\n\nflags.DEFINE_string(\"data_path\", None,\n \"Where the training/test data is stored.\")\nflags.DEFINE_string(\"save_path\", None,\n \"Model output directory.\")\nflags.DEFINE_bool(\"use_fp16\", False,\n \"Train using 16-bit floats instead of 32bit floats\")\n\nFLAGS = flags.FLAGS\n\n\ndef data_type():\n return tf.float16 if FLAGS.use_fp16 else tf.float32\n\n\ndef get_vocab_size():\n word_to_id = VectorManager.read_vector(FLAGS.word_to_id_path)\n size = len(word_to_id)\n print(\"Vocabulary size: %s\" % size)\n return size\n\nclass WPInput(object):\n \"\"\"The input data.\"\"\"\n\n def __init__(self, config, data, name=None):\n self.batch_size = batch_size = config.batch_size\n self.num_steps = num_steps = config.num_steps\n self.epoch_size = ((len(data) // batch_size) - 1) // num_steps\n self.input_data, self.targets = reader.wiki_producer(\n data, batch_size, num_steps, name=name)\n\n\nclass WPModel(object):\n \"\"\"Word Prediction model.\"\"\"\n\n def __init__(self, is_training, config, input_):\n self._input = input_\n\n batch_size = input_.batch_size\n num_steps = input_.num_steps\n size = config.hidden_size\n vocab_size = config.vocab_size\n\n # Slightly better results can be obtained with forget gate biases\n # initialized to 1 but the hyperparameters of the model would need to be\n # different than reported in the paper.\n def lstm_cell():\n # With the latest TensorFlow source code (as of Mar 27, 2017),\n # the BasicLSTMCell will need a reuse parameter which is unfortunately not\n # defined in TensorFlow 1.0. To maintain backwards compatibility, we add\n # an argument check here:\n # if 'reuse' in inspect.getargspec(\n # tf.contrib.rnn.BasicLSTMCell.__init__).args:\n # return tf.contrib.rnn.BasicLSTMCell(\n # size, forget_bias=0.0, state_is_tuple=True,\n # reuse=tf.get_variable_scope().reuse)\n # else:\n return tf.contrib.rnn.BasicLSTMCell(\n size, forget_bias=0.0, state_is_tuple=True)\n\n attn_cell = lstm_cell\n if is_training and config.keep_prob < 1:\n def attn_cell():\n return tf.contrib.rnn.DropoutWrapper(\n lstm_cell(), output_keep_prob=config.keep_prob)\n\n cell = tf.contrib.rnn.MultiRNNCell(\n [attn_cell() for _ in range(config.num_layers)], state_is_tuple=True)\n\n # data_type() returns float32 or float16\n self._initial_state = cell.zero_state(batch_size, data_type())\n\n with tf.device(\"/cpu:0\"):\n # TODO: replace TF input with my embeddings\n # TODO: implement PTB reader or something similar\n embedding = tf.get_variable(\n \"embedding\", [vocab_size, size], dtype=data_type())\n inputs = tf.nn.embedding_lookup(embedding, input_.input_data)\n\n\n if is_training and config.keep_prob < 1:\n # Dropout allows to use the net for train and testing\n # See: https://stackoverflow.com/questions/34597316/why-input-is-scaled-in-tf-nn-dropout-in-tensorflow\n # and: http://www.cs.toronto.edu/~rsalakhu/papers/srivastava14a.pdf\n inputs = tf.nn.dropout(inputs, config.keep_prob)\n\n # Simplified version of models/tutorials/rnn/rnn.py's rnn().\n # This builds an unrolled LSTM for tutorial purposes only.\n # In general, use the rnn() or state_saving_rnn() from rnn.py.\n #\n # The alternative version of the code below is:\n #\n inputs = tf.unstack(inputs, num=num_steps, axis=1)\n outputs, state = tf.contrib.rnn.static_rnn(\n cell, inputs, initial_state=self._initial_state)\n # TODO: passing the sequence_length argument will enable to input variable-length tensors\n\n # outputs = []\n # state = self._initial_state\n # with tf.variable_scope(\"RNN\"):\n # for time_step in range(num_steps):\n # if time_step > 0:\n # tf.get_variable_scope().reuse_variables()\n # (cell_output, state) = cell(inputs[:, time_step, :], state) # Call (inputs, state)\n # outputs.append(cell_output)\n\n # TODO: check why outputs are stacked and resized\n output = tf.reshape(tf.stack(axis=1, values=outputs), [-1, size])\n softmax_w = tf.get_variable(\n \"softmax_w\", [size, vocab_size], dtype=data_type())\n softmax_b = tf.get_variable(\"softmax_b\", [vocab_size], dtype=data_type())\n logits = tf.matmul(output, softmax_w) + softmax_b\n loss = tf.contrib.legacy_seq2seq.sequence_loss_by_example(\n [logits],\n [tf.reshape(input_.targets, [-1])],\n [tf.ones([batch_size * num_steps], dtype=data_type())])\n self._cost = cost = tf.reduce_sum(loss) / batch_size\n self._final_state = state\n\n if not is_training:\n return\n\n self._lr = tf.Variable(0.0, trainable=False)\n tvars = tf.trainable_variables()\n grads, _ = tf.clip_by_global_norm(tf.gradients(cost, tvars),\n config.max_grad_norm)\n optimizer = tf.train.GradientDescentOptimizer(self._lr)\n self._train_op = optimizer.apply_gradients(\n zip(grads, tvars),\n global_step=tf.contrib.framework.get_or_create_global_step())\n\n self._new_lr = tf.placeholder(\n tf.float32, shape=[], name=\"new_learning_rate\")\n self._lr_update = tf.assign(self._lr, self._new_lr)\n\n def assign_lr(self, session, lr_value):\n session.run(self._lr_update, feed_dict={self._new_lr: lr_value})\n\n @property\n def input(self):\n return self._input\n\n @property\n def initial_state(self):\n return self._initial_state\n\n @property\n def cost(self):\n return self._cost\n\n @property\n def final_state(self):\n return self._final_state\n\n @property\n def lr(self):\n return self._lr\n\n @property\n def train_op(self):\n return self._train_op\n\n\nclass SmallConfig(object):\n \"\"\"Small config.\"\"\"\n init_scale = 0.1\n learning_rate = 1.0\n max_grad_norm = 5\n num_layers = 2\n num_steps = 20\n hidden_size = 200\n max_epoch = 4\n max_max_epoch = 13\n keep_prob = 1.0\n lr_decay = 0.5\n batch_size = 20\n vocab_size = 27942\n\n\nclass MediumConfig(object):\n \"\"\"Medium config.\"\"\"\n init_scale = 0.05\n learning_rate = 1.0\n max_grad_norm = 5\n num_layers = 2\n num_steps = 35\n hidden_size = 650\n max_epoch = 6\n max_max_epoch = 39\n keep_prob = 0.5\n lr_decay = 0.8\n batch_size = 20\n vocab_size = 10000\n\n\nclass LargeConfig(object):\n \"\"\"Large config.\"\"\"\n init_scale = 0.04\n learning_rate = 1.0\n max_grad_norm = 10\n num_layers = 2\n num_steps = 35\n hidden_size = 1024\n max_epoch = 14\n max_max_epoch = 55\n keep_prob = 0.35\n lr_decay = 1 / 1.15\n batch_size = 20\n vocab_size = 10000\n\n\nclass TestConfig(object):\n \"\"\"Tiny config, for testing.\"\"\"\n init_scale = 0.1\n learning_rate = 1.0\n max_grad_norm = 1\n num_layers = 1\n num_steps = 2\n hidden_size = 2\n max_epoch = 1\n max_max_epoch = 1\n keep_prob = 1.0\n lr_decay = 0.5\n batch_size = 20\n vocab_size = 10000\n\n\ndef run_epoch(session, model, eval_op=None, verbose=False):\n \"\"\"Runs the model on the given data.\"\"\"\n start_time = time.time()\n costs = 0.0\n iters = 0\n state = session.run(model.initial_state)\n\n fetches = {\n \"cost\": model.cost,\n \"final_state\": model.final_state,\n }\n if eval_op is not None:\n fetches[\"eval_op\"] = eval_op\n\n for step in range(model.input.epoch_size):\n feed_dict = {}\n for i, (c, h) in enumerate(model.initial_state):\n feed_dict[c] = state[i].c\n feed_dict[h] = state[i].h\n\n vals = session.run(fetches, feed_dict)\n cost = vals[\"cost\"]\n state = vals[\"final_state\"]\n\n costs += cost\n iters += model.input.num_steps\n\n if verbose and step % (model.input.epoch_size // 10) == 10:\n print(\"%.3f perplexity: %.3f speed: %.0f wps\" %\n (step * 1.0 / model.input.epoch_size, np.exp(costs / iters),\n iters * model.input.batch_size / (time.time() - start_time)))\n\n return np.exp(costs / iters)\n\n\ndef get_config():\n if FLAGS.model == \"small\":\n return SmallConfig()\n elif FLAGS.model == \"medium\":\n return MediumConfig()\n elif FLAGS.model == \"large\":\n return LargeConfig()\n elif FLAGS.model == \"test\":\n return TestConfig()\n else:\n raise ValueError(\"Invalid model: %s\", FLAGS.model)\n\n\ndef main(_):\n if not FLAGS.data_path:\n raise ValueError(\"Must set --data_path to wiki data directory\")\n\n raw_data = reader.wiki_raw_data(FLAGS.data_path, FLAGS.word_to_id_path)\n train_data, valid_data, test_data = raw_data\n\n #vocab_size = get_vocab_size()\n vocab_size = 126930\n\n config = get_config()\n config.vocab_size = vocab_size\n\n eval_config = get_config()\n eval_config.batch_size = 1\n eval_config.num_steps = 1\n eval_config.vocab_size = vocab_size\n\n with tf.Graph().as_default():\n # Args: [minval, maxval]\n initializer = tf.random_uniform_initializer(-config.init_scale,\n config.init_scale)\n\n with tf.name_scope(\"Train\"):\n train_input = WPInput(config=config, data=train_data, name=\"TrainInput\")\n with tf.variable_scope(\"Model\", reuse=None, initializer=initializer):\n m = WPModel(is_training=True, config=config, input_=train_input)\n tf.summary.scalar(\"Training Loss\", m.cost)\n tf.summary.scalar(\"Learning Rate\", m.lr)\n\n with tf.name_scope(\"Valid\"):\n valid_input = WPInput(config=config, data=valid_data, name=\"ValidInput\")\n with tf.variable_scope(\"Model\", reuse=True, initializer=initializer):\n mvalid = WPModel(is_training=False, config=config, input_=valid_input)\n tf.summary.scalar(\"Validation Loss\", mvalid.cost)\n\n with tf.name_scope(\"Test\"):\n test_input = WPInput(config=eval_config, data=test_data, name=\"TestInput\")\n with tf.variable_scope(\"Model\", reuse=True, initializer=initializer):\n mtest = WPModel(is_training=False, config=eval_config,\n input_=test_input)\n\n sv = tf.train.Supervisor(logdir=FLAGS.save_path)\n with sv.managed_session() as session:\n for i in range(config.max_max_epoch):\n lr_decay = config.lr_decay ** max(i + 1 - config.max_epoch, 0.0)\n m.assign_lr(session, config.learning_rate * lr_decay)\n\n print(\"Epoch: %d Learning rate: %.3f\" % (i + 1, session.run(m.lr)))\n train_perplexity = run_epoch(session, m, eval_op=m.train_op,\n verbose=True)\n print(\"Epoch: %d Train Perplexity: %.3f\" % (i + 1, train_perplexity))\n valid_perplexity = run_epoch(session, mvalid)\n print(\"Epoch: %d Valid Perplexity: %.3f\" % (i + 1, valid_perplexity))\n\n test_perplexity = run_epoch(session, mtest)\n print(\"Test Perplexity: %.3f\" % test_perplexity)\n\n if FLAGS.save_path:\n print(\"Saving model to %s.\" % FLAGS.save_path)\n sv.saver.save(session, FLAGS.save_path, global_step=sv.global_step)\n\n\nif __name__ == \"__main__\":\n tf.app.run()\n"
] | [
[
"tensorflow.device",
"tensorflow.stack",
"tensorflow.reduce_sum",
"numpy.exp",
"tensorflow.summary.scalar",
"tensorflow.Graph",
"tensorflow.Variable",
"tensorflow.random_uniform_initializer",
"tensorflow.gradients",
"tensorflow.name_scope",
"tensorflow.trainable_variables",
"tensorflow.nn.dropout",
"tensorflow.app.run",
"tensorflow.matmul",
"tensorflow.unstack",
"tensorflow.placeholder",
"tensorflow.train.GradientDescentOptimizer",
"tensorflow.contrib.rnn.static_rnn",
"tensorflow.nn.embedding_lookup",
"tensorflow.contrib.rnn.BasicLSTMCell",
"tensorflow.assign",
"tensorflow.reshape",
"tensorflow.train.Supervisor",
"tensorflow.variable_scope",
"tensorflow.contrib.framework.get_or_create_global_step"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
fazildgr8/virtual_pen_MNIST | [
"69055980ee0f0005766e62e3a1ca4e2a0259157c"
] | [
"pensetup.py"
] | [
"import cv2\r\nimport numpy as np\r\nimport time\r\n\r\n\r\n# A required callback method that goes into the trackbar function.\r\ndef nothing(x):\r\n pass\r\n\r\n\r\n# Initializing the webcam feed.\r\ncap = cv2.VideoCapture(0)\r\ncap.set(3, 1280)\r\ncap.set(4, 720)\r\n\r\n# Create a window named trackbars.\r\ncv2.namedWindow(\"Trackbars\")\r\n\r\n# Now create 6 trackbars that will control the lower and upper range of\r\n# H,S and V channels. The Arguments are like this: Name of trackbar,\r\n# window name, range,callback function. For Hue the range is 0-179 and\r\n# for S,V its 0-255.\r\ncv2.createTrackbar(\"L - H\", \"Trackbars\", 0, 179, nothing)\r\ncv2.createTrackbar(\"L - S\", \"Trackbars\", 0, 255, nothing)\r\ncv2.createTrackbar(\"L - V\", \"Trackbars\", 0, 255, nothing)\r\ncv2.createTrackbar(\"U - H\", \"Trackbars\", 179, 179, nothing)\r\ncv2.createTrackbar(\"U - S\", \"Trackbars\", 255, 255, nothing)\r\ncv2.createTrackbar(\"U - V\", \"Trackbars\", 255, 255, nothing)\r\n\r\nwhile True:\r\n\r\n # Start reading the webcam feed frame by frame.\r\n ret, frame = cap.read()\r\n if not ret:\r\n break\r\n # Flip the frame horizontally (Not required)\r\n frame = cv2.flip(frame, 1)\r\n\r\n # Convert the BGR image to HSV image.\r\n hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\r\n\r\n # Get the new values of the trackbar in real time as the user changes\r\n # them\r\n l_h = cv2.getTrackbarPos(\"L - H\", \"Trackbars\")\r\n l_s = cv2.getTrackbarPos(\"L - S\", \"Trackbars\")\r\n l_v = cv2.getTrackbarPos(\"L - V\", \"Trackbars\")\r\n u_h = cv2.getTrackbarPos(\"U - H\", \"Trackbars\")\r\n u_s = cv2.getTrackbarPos(\"U - S\", \"Trackbars\")\r\n u_v = cv2.getTrackbarPos(\"U - V\", \"Trackbars\")\r\n\r\n # Set the lower and upper HSV range according to the value selected\r\n # by the trackbar\r\n lower_range = np.array([l_h, l_s, l_v])\r\n upper_range = np.array([u_h, u_s, u_v])\r\n\r\n # Filter the image and get the binary mask, where white represents\r\n # your target color\r\n mask = cv2.inRange(hsv, lower_range, upper_range)\r\n\r\n # You can also visualize the real part of the target color (Optional)\r\n res = cv2.bitwise_and(frame, frame, mask=mask)\r\n\r\n # Converting the binary mask to 3 channel image, this is just so\r\n # we can stack it with the others\r\n mask_3 = cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR)\r\n\r\n # stack the mask, orginal frame and the filtered result\r\n stacked = np.hstack((mask_3, frame, res))\r\n\r\n # Show this stacked frame at 40% of the size.\r\n cv2.imshow('Trackbars', cv2.resize(stacked, None, fx=0.4, fy=0.4))\r\n\r\n # If the user presses ESC then exit the program\r\n key = cv2.waitKey(1)\r\n if key == 27:\r\n break\r\n\r\n # If the user presses `s` then print this array.\r\n if key == ord('s'):\r\n\r\n thearray = [[l_h, l_s, l_v], [u_h, u_s, u_v]]\r\n print(thearray)\r\n\r\n # Also save this array as penval.npy\r\n np.save('penval', thearray)\r\n break\r\n\r\n# Release the camera & destroy the windows.\r\ncap.release()\r\ncv2.destroyAllWindows()"
] | [
[
"numpy.hstack",
"numpy.array",
"numpy.save"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
gitter-badger/agent | [
"3f53eaa7ebdee3ab423c7b58785d584fe1a6ae11",
"3f53eaa7ebdee3ab423c7b58785d584fe1a6ae11"
] | [
"neodroidagent/utilities/exploration/sampling/random_process/ornstein_uhlenbeck.py",
"neodroidagent/utilities/signal/advantage_estimation.py"
] | [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nfrom .annealed_guassian import AnnealedGaussianProcess\n\n__author__ = \"Christian Heider Nielsen\"\n\n# Based on http://math.stackexchange.com/questions/1287634/implementing-ornstein-uhlenbeck-in-matlab\nimport numpy\n\n__all__ = [\"OrnsteinUhlenbeckProcess\"]\n\n\nclass OrnsteinUhlenbeckProcess(AnnealedGaussianProcess):\n def __init__(\n self,\n *,\n theta: float = 0.15,\n mean: float = 0.0,\n sigma: float = 1.0,\n dt: float = 1e-2,\n x_0=None,\n sigma_min: float = None,\n n_steps_annealing: int = 1000,\n **kwargs\n ):\n super().__init__(\n mean=mean,\n sigma=sigma,\n sigma_min=sigma_min,\n n_steps_annealing=n_steps_annealing,\n **kwargs\n )\n self.theta = theta\n self.mean = mean\n self.dt = dt\n self.x_0 = x_0\n self.reset()\n\n def sample(self, size):\n x = (\n self.x_prev\n + self.theta * (self.mean - self.x_prev) * self.dt\n + self.current_sigma * numpy.sqrt(self.dt) * numpy.random.normal(size=size)\n )\n self.x_prev = x\n self.n_steps += 1\n return x\n\n def reset(self):\n super().reset()\n self.x_prev = self.x_0 if self.x_0 is not None else numpy.zeros_like(self.x_0)\n\n\nif __name__ == \"__main__\":\n\n random_process = OrnsteinUhlenbeckProcess(theta=0.5)\n\n for i in range(1000):\n print(random_process.sample((2, 1)))\n",
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nfrom typing import Union\n\nimport numpy\n\nfrom draugr.torch_utilities import to_tensor\n\n__author__ = \"Christian Heider Nielsen\"\n\nimport torch\n\n__all__ = [\"torch_advantage_estimate\", \"torch_compute_gae\"]\n\n\ndef torch_advantage_estimate(\n signal,\n non_terminal,\n value_estimate,\n *,\n discount_factor: float = 0.95,\n tau: float = 0.97,\n device: Union[str, torch.device] = \"cpu\",\n normalise: bool = True,\n divide_by_zero_safety: float = 1e-10,\n):\n \"\"\"\nComputes advantages and discounted returns.\nIf the advantage is positive for an action, then it yielded a more positive signal than expected. And thus\nexpectations can be adjust to make actions more likely.\n\n:param discount_factor:\n:type discount_factor:\n:param tau:\n:type tau:\n:return:\n:rtype:\n@param device:\n@param tau:\n@param discount_factor:\n@param value_estimate:\n@param non_terminal:\n@param signal:\n@param divide_by_zero_safety:\n@param normalise:\n\"\"\"\n horizon_length, num_workers, *_ = signal.size()\n\n advantages_out = torch.zeros_like(signal, device=device)\n adv = torch.zeros(num_workers, device=device)\n\n for t in reversed(range(horizon_length - 1)):\n delta = (\n signal[t]\n + value_estimate[t + 1] * discount_factor * non_terminal[t]\n - value_estimate[t]\n )\n adv = adv * discount_factor * tau * non_terminal[t] + delta\n\n advantages_out[t] = adv\n\n if normalise:\n advantages_out = (advantages_out - advantages_out.mean()) / (\n advantages_out.std() + divide_by_zero_safety\n )\n\n return advantages_out\n\n\ndef torch_compute_gae(\n signal,\n non_terminal,\n values,\n *,\n discount_factor=0.95,\n gae_lambda=0.95,\n device: Union[str, torch.device] = \"cpu\",\n normalise_adv=True,\n) -> torch.tensor:\n \"\"\"\n\nComputes discounted return and advantage\n\n@param signal:\n@param non_terminal:\n@param values:\n@param discount_factor:\n@param gae_lambda:\n@param device:\n@param normalise:\n@return:\n\"\"\"\n len_signal = len(signal)\n assert len_signal == len(non_terminal) == len(values) - 1, (\n f\"{signal.shape}, {non_terminal.shape}, \" f\"{values.shape}\"\n )\n\n ret = []\n gae = 0\n for step_i in reversed(range(len_signal)):\n delta = (\n signal[step_i]\n + discount_factor * values[step_i + 1] * non_terminal[step_i]\n - values[step_i]\n )\n gae = delta + discount_factor * gae_lambda * non_terminal[step_i] * gae\n ret.insert(0, gae + values[step_i])\n\n ret = to_tensor(ret, device=device)\n advantage = ret - values[:-1]\n\n if normalise_adv:\n advantage = (advantage - advantage.mean()) / (advantage.std() + 1e-6)\n\n return ret, advantage\n\n\nif __name__ == \"__main__\":\n\n def s():\n\n numpy.random.seed(23)\n size = (10, 3, 1)\n a_size = (size[0] + 1, *size[1:])\n signal = numpy.zeros(size)\n non_terminal = numpy.ones(size)\n value_estimate = numpy.random.random(a_size)\n non_terminal[3, 0] = 0\n non_terminal[8, 1] = 0\n signal[-5:, :] = -1\n\n signals = to_tensor(signal, device=\"cpu\")\n non_terminals = to_tensor(non_terminal, device=\"cpu\")\n value_estimates = to_tensor(value_estimate, device=\"cpu\")\n\n r, a = torch_compute_gae(signals, non_terminals, value_estimates)\n print(r, a)\n print(size, r.shape, a.shape)\n\n s()\n"
] | [
[
"numpy.random.normal",
"numpy.zeros_like",
"numpy.sqrt"
],
[
"numpy.random.random",
"numpy.random.seed",
"torch.zeros",
"torch.zeros_like",
"numpy.ones",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
diegulio/Breed_Recognition-to-Buscomiperro | [
"040ee45b9b5c355c3ec2c7413cd89a623024ad4e"
] | [
"label_traincatset.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"label_TrainCatSet.ipynb\n\nAutomatically generated by Colaboratory.\n\nOriginal file is located at\n https://colab.research.google.com/drive/1vDyBZ7Ql-8qQ3l7EWJB9TfnwGy66qGGn\n\"\"\"\n\nimport pandas as pd\nimport os\nimport numpy as np\n\n# Enlisto los nombres de las imagenes\nimagenes = os.listdir('/content/drive/MyDrive/Colab Data/Proyecto buscomiperro/gatos')\nimagenes[:5]\n\ndef extract_ext(id): # Para que el resultado sea como el de razas le quito la extensión\n return os.path.splitext(id)[0]\n\nlabels = list(map(extract_ext, imagenes))\n\ndf = pd.DataFrame()\ndf['id'] = labels\ndf['breed'] = 'gato'\ndf.to_csv('cat_labels.csv')\n\n"
] | [
[
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
lebrice/RoBO | [
"0cb58a1622d3a540f7714b239f0cedf048b6fd9f",
"0cb58a1622d3a540f7714b239f0cedf048b6fd9f",
"0cb58a1622d3a540f7714b239f0cedf048b6fd9f",
"0cb58a1622d3a540f7714b239f0cedf048b6fd9f",
"0cb58a1622d3a540f7714b239f0cedf048b6fd9f"
] | [
"robo/fmin/bayesian_optimization.py",
"robo/maximizers/differential_evolution.py",
"robo/fmin/entropy_search.py",
"robo/maximizers/direct.py",
"experiments/bayesopt/run_fabolas_surrogate.py"
] | [
"import logging\nimport george\nimport numpy as np\nimport inspect\n\nfrom pybnn import BaseModel\nfrom pybnn.dngo import DNGO\n\nfrom robo.priors.default_priors import DefaultPrior\nfrom robo.models.base_model import BaseModel as BaseModel_\nfrom robo.models.wrapper_bohamiann import WrapperBohamiann\nfrom robo.models.gaussian_process import GaussianProcess\nfrom robo.models.gaussian_process_mcmc import GaussianProcessMCMC\nfrom robo.models.random_forest import RandomForest\nfrom robo.maximizers.base_maximizer import BaseMaximizer\nfrom robo.maximizers.scipy_optimizer import SciPyOptimizer\nfrom robo.maximizers.random_sampling import RandomSampling\nfrom robo.maximizers.differential_evolution import DifferentialEvolution\nfrom robo.solver.bayesian_optimization import BayesianOptimization\nfrom robo.acquisition_functions.base_acquisition import BaseAcquisitionFunction\nfrom robo.acquisition_functions.ei import EI\nfrom robo.acquisition_functions.pi import PI\nfrom robo.acquisition_functions.log_ei import LogEI\nfrom robo.acquisition_functions.lcb import LCB\nfrom robo.acquisition_functions.marginalization import MarginalizationGPMCMC\nfrom robo.initial_design import init_latin_hypercube_sampling\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef bayesian_optimization(objective_function, lower, upper, num_iterations=30, X_init=None, Y_init=None,\n maximizer=\"random\", acquisition_func=\"log_ei\", model_type=\"gp_mcmc\",\n n_init=3, rng=None, output_path=None):\n \"\"\"\n General interface for Bayesian optimization for global black box\n optimization problems.\n\n Parameters\n ----------\n objective_function: function\n The objective function that is minimized. This function gets a numpy\n array (D,) as input and returns the function value (scalar)\n lower: np.ndarray (D,)\n The lower bound of the search space\n upper: np.ndarray (D,)\n The upper bound of the search space\n num_iterations: int\n The number of iterations (initial design + BO)\n X_init: np.ndarray(N,D)\n Initial points to warmstart BO\n Y_init: np.ndarray(N,1)\n Function values of the already initial points\n maximizer: {\"random\", \"scipy\", \"differential_evolution\"}\n The optimizer for the acquisition function.\n acquisition_func: {\"ei\", \"log_ei\", \"lcb\", \"pi\"}\n The acquisition function\n model_type: {\"gp\", \"gp_mcmc\", \"rf\", \"bohamiann\", \"dngo\"}\n The model for the objective function.\n n_init: int\n Number of points for the initial design. Make sure that it\n is <= num_iterations.\n output_path: string\n Specifies the path where the intermediate output after each iteration will be saved.\n If None no output will be saved to disk.\n rng: numpy.random.RandomState\n Random number generator\n\n Returns\n -------\n dict with all results\n \"\"\"\n assert upper.shape[0] == lower.shape[0], \"Dimension miss match\"\n assert np.all(lower < upper), \"Lower bound >= upper bound\"\n assert n_init <= num_iterations, \"Number of initial design point has to be <= than the number of iterations\"\n\n if rng is None:\n rng = np.random.RandomState(np.random.randint(0, 10000))\n\n cov_amp = 2\n n_dims = lower.shape[0]\n\n initial_ls = np.ones([n_dims])\n exp_kernel = george.kernels.Matern52Kernel(initial_ls,\n ndim=n_dims)\n kernel = cov_amp * exp_kernel\n\n prior = DefaultPrior(len(kernel) + 1)\n\n n_hypers = 3 * len(kernel)\n if n_hypers % 2 == 1:\n n_hypers += 1\n\n if model_type == \"gp\":\n model = GaussianProcess(kernel, prior=prior, rng=rng,\n normalize_output=False, normalize_input=True,\n lower=lower, upper=upper)\n elif model_type == \"gp_mcmc\":\n model = GaussianProcessMCMC(kernel, prior=prior,\n n_hypers=n_hypers,\n chain_length=200,\n burnin_steps=100,\n normalize_input=True,\n normalize_output=False,\n rng=rng, lower=lower, upper=upper)\n\n elif model_type == \"rf\":\n model = RandomForest(rng=rng)\n\n elif model_type == \"bohamiann\":\n model = WrapperBohamiann()\n\n elif model_type == \"dngo\":\n model = DNGO()\n\n elif isinstance(model_type, (BaseModel, BaseModel_)):\n model = model_type\n\n elif callable(model_type):\n model = model_type()\n\n else:\n raise ValueError(\"'{}' is not a valid model\".format(model_type))\n\n if acquisition_func == \"ei\":\n a = EI(model)\n elif acquisition_func == \"log_ei\":\n a = LogEI(model)\n elif acquisition_func == \"pi\":\n a = PI(model)\n elif acquisition_func == \"lcb\":\n a = LCB(model)\n elif isinstance(acquisition_func, BaseAcquisitionFunction):\n a = acquisition_func\n elif callable(acquisition_func):\n a = acquisition_func(model)\n else:\n raise ValueError(\"'{}' is not a valid acquisition function\"\n .format(acquisition_func))\n\n if model_type == \"gp_mcmc\":\n acquisition_func = MarginalizationGPMCMC(a)\n else:\n acquisition_func = a\n\n if maximizer == \"random\":\n max_func = RandomSampling(acquisition_func, lower, upper, rng=rng)\n elif maximizer == \"scipy\":\n max_func = SciPyOptimizer(acquisition_func, lower, upper, rng=rng)\n elif maximizer == \"differential_evolution\":\n max_func = DifferentialEvolution(acquisition_func, lower, upper, rng=rng)\n elif isinstance(maximizer, BaseMaximizer):\n max_func = maximizer\n elif callable(maximizer):\n max_func = maximizer(acquisition_func, lower, upper, rng=rng)\n else:\n raise ValueError(\"'{}' is not a valid function to maximize the \"\n \"acquisition function\".format(maximizer))\n\n bo = BayesianOptimization(objective_function, lower, upper,\n acquisition_func, model, max_func,\n initial_points=n_init, rng=rng,\n initial_design=init_latin_hypercube_sampling,\n output_path=output_path)\n\n x_best, f_min = bo.run(num_iterations, X=X_init, y=Y_init)\n\n results = dict()\n results[\"x_opt\"] = x_best\n results[\"f_opt\"] = f_min\n results[\"incumbents\"] = [inc for inc in bo.incumbents]\n results[\"incumbent_values\"] = [val for val in bo.incumbents_values]\n results[\"runtime\"] = bo.runtime\n results[\"overhead\"] = bo.time_overhead\n results[\"X\"] = [x.tolist() for x in bo.X]\n results[\"y\"] = [y for y in bo.y]\n return results\n",
"import sys\nimport numpy as np\nimport scipy as sp\n\nfrom robo.maximizers.base_maximizer import BaseMaximizer\n\n\nclass DifferentialEvolution(BaseMaximizer):\n\n def __init__(self, objective_function, lower, upper, n_iters=20, rng=None):\n \"\"\"\n\n Parameters\n ----------\n objective_function: acquisition function\n The acquisition function which will be maximized\n lower: np.ndarray (D)\n Lower bounds of the input space\n upper: np.ndarray (D)\n Upper bounds of the input space\n n_iters: int\n Number of iterations\n \"\"\"\n self.n_iters = n_iters\n super(DifferentialEvolution, self).__init__(objective_function, lower, upper, rng)\n\n def _acquisition_fkt_wrapper(self, acq_f):\n def _l(x):\n a = -acq_f(np.array([np.clip(x, self.lower, self.upper)]))\n if np.any(np.isinf(a)):\n return sys.float_info.max\n return a\n\n return _l\n\n def maximize(self):\n \"\"\"\n Maximizes the given acquisition function.\n\n Returns\n -------\n np.ndarray(N,D)\n Point with highest acquisition value.\n \"\"\"\n\n bounds = list(zip(self.lower, self.upper))\n\n res = sp.optimize.differential_evolution(self._acquisition_fkt_wrapper(self.objective_func),\n bounds, maxiter=self.n_iters)\n\n return np.clip(res[\"x\"], self.lower, self.upper)\n",
"import logging\nimport george\nimport numpy as np\n\nfrom robo.priors.default_priors import DefaultPrior\nfrom robo.models.gaussian_process import GaussianProcess\nfrom robo.models.gaussian_process_mcmc import GaussianProcessMCMC\nfrom robo.maximizers.random_sampling import RandomSampling\nfrom robo.maximizers.scipy_optimizer import SciPyOptimizer\nfrom robo.maximizers.differential_evolution import DifferentialEvolution\nfrom robo.solver.bayesian_optimization import BayesianOptimization\nfrom robo.acquisition_functions.information_gain import InformationGain\nfrom robo.acquisition_functions.ei import EI\nfrom robo.acquisition_functions.marginalization import MarginalizationGPMCMC\nfrom robo.initial_design import init_latin_hypercube_sampling\n\nlogger = logging.getLogger(__name__)\n\n\ndef entropy_search(objective_function, lower, upper, num_iterations=30,\n maximizer=\"random\", model=\"gp_mcmc\", X_init=None, Y_init=None,\n n_init=3, output_path=None, rng=None):\n \"\"\"\n Entropy search for global black box optimization problems. This is a reimplemenation of the entropy search\n algorithm by Henning and Schuler[1].\n\n [1] Entropy search for information-efficient global optimization.\n P. Hennig and C. Schuler.\n JMLR, (1), 2012.\n\n Parameters\n ----------\n objective_function: function\n The objective function that is minimized. This function gets a numpy array (D,) as input and returns\n the function value (scalar)\n lower: np.ndarray (D,)\n The lower bound of the search space\n upper: np.ndarray (D,)\n The upper bound of the search space\n num_iterations: int\n The number of iterations (initial design + BO)\n maximizer: {\"random\", \"scipy\", \"differential_evolution\"}\n Defines how the acquisition function is maximized.\n model: {\"gp\", \"gp_mcmc\"}\n The model for the objective function.\n X_init: np.ndarray(N,D)\n Initial points to warmstart BO\n Y_init: np.ndarray(N,1)\n Function values of the already initial points\n n_init: int\n Number of points for the initial design. Make sure that it is <= num_iterations.\n output_path: string\n Specifies the path where the intermediate output after each iteration will be saved.\n If None no output will be saved to disk.\n rng: numpy.random.RandomState\n Random number generator\n\n Returns\n -------\n dict with all results\n \"\"\"\n assert upper.shape[0] == lower.shape[0], \"Dimension miss match\"\n assert np.all(lower < upper), \"Lower bound >= upper bound\"\n assert n_init <= num_iterations, \"Number of initial design point has to be <= than the number of iterations\"\n\n if rng is None:\n rng = np.random.RandomState(np.random.randint(0, 10000))\n\n cov_amp = 2\n n_dims = lower.shape[0]\n\n initial_ls = np.ones([n_dims])\n exp_kernel = george.kernels.Matern52Kernel(initial_ls,\n ndim=n_dims)\n kernel = cov_amp * exp_kernel\n\n prior = DefaultPrior(len(kernel) + 1)\n\n n_hypers = 3 * len(kernel)\n if n_hypers % 2 == 1:\n n_hypers += 1\n\n if model == \"gp\":\n gp = GaussianProcess(kernel, prior=prior, rng=rng,\n normalize_output=False, normalize_input=True,\n lower=lower, upper=upper)\n elif model == \"gp_mcmc\":\n gp = GaussianProcessMCMC(kernel, prior=prior,\n n_hypers=n_hypers,\n chain_length=200,\n burnin_steps=100,\n normalize_input=True,\n normalize_output=False,\n rng=rng, lower=lower, upper=upper)\n else:\n print(\"ERROR: %s is not a valid model!\" % model)\n return\n\n a = InformationGain(gp, lower=lower, upper=upper, sampling_acquisition=EI)\n\n if model == \"gp\":\n acquisition_func = a\n elif model == \"gp_mcmc\":\n acquisition_func = MarginalizationGPMCMC(a)\n\n if maximizer == \"random\":\n max_func = RandomSampling(acquisition_func, lower, upper, rng=rng)\n elif maximizer == \"scipy\":\n max_func = SciPyOptimizer(acquisition_func, lower, upper, rng=rng)\n elif maximizer == \"differential_evolution\":\n max_func = DifferentialEvolution(acquisition_func, lower, upper, rng=rng)\n else:\n print(\"ERROR: %s is not a valid function to maximize the acquisition function!\" % maximizer)\n return\n\n bo = BayesianOptimization(objective_function, lower, upper, acquisition_func, gp, max_func,\n initial_design=init_latin_hypercube_sampling,\n initial_points=n_init, rng=rng, output_path=output_path)\n\n x_best, f_min = bo.run(num_iterations, X=X_init, y=Y_init)\n\n results = dict()\n results[\"x_opt\"] = x_best\n results[\"f_opt\"] = f_min\n results[\"incumbents\"] = [inc for inc in bo.incumbents]\n results[\"incumbent_values\"] = [val for val in bo.incumbents_values]\n results[\"runtime\"] = bo.runtime\n results[\"overhead\"] = bo.time_overhead\n results[\"X\"] = [x.tolist() for x in bo.X]\n results[\"y\"] = [y for y in bo.y]\n return results\n",
"import os\nimport sys\n\ntry:\n import DIRECT\nexcept ImportError:\n raise ImportError(\"\"\"\n In order to use this module, DIRECT need to be installed. Try running\n pip install direct\n \"\"\")\n\nimport numpy as np\n\nfrom robo.maximizers.base_maximizer import BaseMaximizer\n\n\nclass Direct(BaseMaximizer):\n\n def __init__(self, objective_function, lower, upper,\n n_func_evals=400, n_iters=200, verbose=True):\n \"\"\"\n Interface for the DIRECT algorithm by D. R. Jones, C. D. Perttunen\n and B. E. Stuckmann\n\n Parameters\n ----------\n objective_function: acquisition function\n The acquisition function which will be maximized\n lower: np.ndarray (D)\n Lower bounds of the input space\n upper: np.ndarray (D)\n Upper bounds of the input space\n n_func_evals: int\n The maximum number of function evaluations\n n_iters: int\n The maximum number of iterations\n verbose: bool\n Suppress Direct's output.\n \"\"\"\n self.n_func_evals = n_func_evals\n self.n_iters = n_iters\n self.verbose = verbose\n\n super(Direct, self).__init__(objective_function, lower, upper)\n\n def _direct_acquisition_fkt_wrapper(self, acq_f):\n def _l(x, user_data):\n return -acq_f(np.array([x])), 0\n\n return _l\n\n def maximize(self):\n \"\"\"\n Maximizes the given acquisition function.\n\n Returns\n -------\n np.ndarray(N,D)\n Point with highest acquisition value.\n\n \"\"\"\n if self.verbose:\n x, _, _ = DIRECT.solve(self._direct_acquisition_fkt_wrapper(self.objective_func),\n l=[self.lower],\n u=[self.upper],\n maxT=self.n_iters,\n maxf=self.n_func_evals)\n else:\n fileno = sys.stdout.fileno()\n with os.fdopen(os.dup(fileno), 'wb') as stdout:\n with os.fdopen(os.open(os.devnull, os.O_WRONLY), 'wb') as devnull:\n sys.stdout.flush();\n os.dup2(devnull.fileno(), fileno) # redirect\n x, _, _ = DIRECT.solve(self._direct_acquisition_fkt_wrapper(self.objective_func),\n l=[self.lower],\n u=[self.upper],\n maxT=self.n_iters,\n maxf=self.n_func_evals)\n sys.stdout.flush();\n os.dup2(stdout.fileno(), fileno) # restore\n return x\n",
"import os\nimport sys\nimport json\nimport logging\nimport numpy as np\n\nlogging.basicConfig(level=logging.INFO)\n\nfrom robo.fmin import fabolas\n\nfrom hpolib.benchmarks.ml.surrogate_svm import SurrogateSVM\nfrom hpolib.benchmarks.ml.surrogate_cnn import SurrogateCNN\nfrom hpolib.benchmarks.ml.surrogate_fcnet import SurrogateFCNet\n\n\nrun_id = int(sys.argv[1])\nbenchmark = sys.argv[2]\n\nif benchmark == \"svm_mnist\":\n f = SurrogateSVM(path=\"/ihome/kleinaa/devel/git/HPOlib/surrogates/\")\nelif benchmark == \"cnn_cifar10\":\n f = SurrogateCNN(path=\"/ihome/kleinaa/devel/git/HPOlib/surrogates/\")\nelif benchmark == \"fcnet_mnist\":\n f = SurrogateFCNet(path=\"/ihome/kleinaa/devel/git/HPOlib/surrogates/\")\n\noutput_path = \"./experiments/RoBO/surrogates\"\n\nrng = np.random.RandomState(run_id)\n\nnum_iterations = 100\ns_max = 50000\ns_min = 100\nsubsets = [128] * 8\nsubsets.extend([64] * 4)\nsubsets.extend([32] * 2)\nsubsets.extend([4] * 1)\n\n\ndef objective(x, s):\n dataset_fraction = s / s_max\n\n res = f.objective_function(x, dataset_fraction=dataset_fraction)\n return res[\"function_value\"], res[\"cost\"]\n\ninfo = f.get_meta_information()\nbounds = np.array(info['bounds'])\nlower = bounds[:, 0]\nupper = bounds[:, 1]\nresults = fabolas(objective_function=objective, lower=lower, upper=upper,\n s_min=s_min, s_max=s_max, n_init=len(subsets), num_iterations=num_iterations,\n n_hypers=30, subsets=subsets, rng=rng)\n\nresults[\"run_id\"] = run_id\nresults['X'] = results['X'].tolist()\nresults['y'] = results['y'].tolist()\nresults['c'] = results['c'].tolist()\n\ntest_error = []\ncum_cost = 0\n\nfor i, inc in enumerate(results[\"incumbents\"]):\n y = f.objective_function_test(np.array(inc))[\"function_value\"]\n test_error.append(y)\n\n # Compute the time it would have taken to evaluate this configuration\n c = results[\"c\"][i]\n cum_cost += c\n\n # Estimate the runtime as the optimization overhead + estimated cost\n results[\"runtime\"][i] += cum_cost\n results[\"test_error\"] = test_error\n\nresults[\"method\"] = \"fabolas\"\nresults[\"benchmark\"] = benchmark\nresults[\"run_id\"] = run_id\n\np = os.path.join(output_path, benchmark, \"fabolas\")\nos.makedirs(p, exist_ok=True)\n\nfh = open(os.path.join(p, '%s_run_%d.json' % (benchmark, run_id)), 'w')\njson.dump(results, fh)\nfh.close()\n"
] | [
[
"numpy.all",
"numpy.random.randint",
"numpy.ones"
],
[
"numpy.isinf",
"numpy.clip"
],
[
"numpy.all",
"numpy.random.randint",
"numpy.ones"
],
[
"numpy.array"
],
[
"numpy.array",
"numpy.random.RandomState"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
andersonmanhaes/ml_mestrado | [
"d737d80e07d9392895e4455e49a33b8700080cf1",
"d737d80e07d9392895e4455e49a33b8700080cf1",
"d737d80e07d9392895e4455e49a33b8700080cf1"
] | [
"T1/code/visualizar_reta.py",
"T1/code/plot_ex1data2.py",
"T2/custo_reglin_regularizada.py"
] | [
"import os\nimport pandas as pd\nimport numpy as np\n\nimport matplotlib.pyplot as plt\n\n\ndef plot(filepath, theta):\n path = os.getcwd() + filepath\n dataset = pd.read_csv(path, header=None)\n X = dataset.iloc[:, 0:-1].values\n y = dataset.iloc[:, -1:].values\n\n t = np.arange(0, 25, 1)\n plt.scatter(X, y, color='red', marker='x', label='Training Data')\n plt.plot(t, theta[0] + (theta[1]*t), color='blue', label='Linear Regression')\n plt.axis([4, 25, -5, 25])\n plt.title('Populacao da cidade x Lucro da filial')\n plt.xlabel('Populacao da cidade (10k)')\n plt.ylabel('Lucro (10k)')\n plt.legend()\n plt.show()\n\n filename = 'target/plot1.2.png'\n if not os.path.exists(os.path.dirname(filename)):\n os.makedirs(os.path.dirname(filename))\n\n plt.savefig(filename)\n",
"import os\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n\ndef importarDados(filepath):\n path = os.getcwd() + filepath \n data = pd.read_csv(path, header=None)\n\n X = data.iloc[:, 0:-1].values\n y = data.iloc[:, -1:].values\n\n return X, y\n",
"import numpy as np\n\ndef custo_reglin_regularizada(theta, X, y, _lambda):\n # Quantidade de exemplos\n m = len(X)\n theta = np.matrix(theta)\n\n # não considera theta0 para o cálculo\n theta_j = theta[:,1:]\n regularizacao = (_lambda /(2 * m)) * np.sum(theta_j.dot(theta_j.T)) \n\n erro = X.dot(theta.T) - y\n\n # Computa a função de custo J\n J = (np.sum(np.power(erro, 2)))/ (2 * m) \n \n return J + regularizacao"
] | [
[
"matplotlib.pyplot.legend",
"pandas.read_csv",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.title",
"numpy.arange",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
],
[
"pandas.read_csv"
],
[
"numpy.matrix",
"numpy.power"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
eyov7/CV_LTH_Pre-training-LLNL | [
"bb18ba2093328aeb4e5ab3929f2749264ef3c981",
"bb18ba2093328aeb4e5ab3929f2749264ef3c981"
] | [
"main_imp_visda.py",
"SimCLR/optimizer/lars.py"
] | [
"import argparse\nimport os\nimport random\nimport shutil\nimport time\nimport warnings\nimport copy \n\nimport torch\nimport torch.nn as nn\nimport torch.nn.parallel\nimport torch.backends.cudnn as cudnn\nimport torch.optim\nimport torch.utils.data\nimport torchvision.transforms as transforms\nimport torchvision.datasets as datasets\nimport torchvision.models as models\n\nfrom pruning_utils import *\nfrom visda2017 import VisDA17\n\nmodel_names = sorted(name for name in models.__dict__\n if name.islower() and not name.startswith(\"__\")\n and callable(models.__dict__[name]))\n\nparser = argparse.ArgumentParser(description='PyTorch Visda Training')\n################################ required settings ################################\nparser.add_argument('data', metavar='DIR',\n help='path to dataset')\nparser.add_argument('-a', '--arch', metavar='ARCH', default='resnet50',\n choices=model_names,\n help='model architecture: ' +\n ' | '.join(model_names) +\n ' (default: resnet18)')\nparser.add_argument('--epochs', default=20, type=int, metavar='N',\n help='number of total epochs to run')\nparser.add_argument('-b', '--batch-size', default=128, type=int,\n metavar='N',\n help='mini-batch size (default: 256), this is the total '\n 'batch size of all GPUs on the current node when '\n 'using Data Parallel or Distributed Data Parallel')\nparser.add_argument('--lr', '--learning-rate', default=0.001, type=float,\n metavar='LR', help='initial learning rate', dest='lr')\n\nparser.add_argument('--prune_type', default=None, type=str, help='prune type [lt, pt_trans]')\nparser.add_argument('--pre_weight', default=None, type=str)\nparser.add_argument('--dataset', default='visda17', type=str)\nparser.add_argument('--save_dir', default='results/', type=str)\nparser.add_argument('--percent', default=0.2, type=float, help='pruning rate for each iteration')\nparser.add_argument('--states', default=19, type=int, help='number of iterative pruning states')\nparser.add_argument('--start_state', default=0, type=int, help='number of iterative pruning states')\n\n################################ other settings ################################\nparser.add_argument('-j', '--workers', default=4, type=int, metavar='N',\n help='number of data loading workers (default: 4)')\nparser.add_argument('--start-epoch', default=0, type=int, metavar='N',\n help='manual epoch number (useful on restarts)')\nparser.add_argument('--momentum', default=0.9, type=float, metavar='M',\n help='momentum')\nparser.add_argument('--wd', '--weight-decay', default=5e-4, type=float,\n metavar='W', help='weight decay (default: 1e-4)',\n dest='weight_decay')\nparser.add_argument('-p', '--print-freq', default=50, type=int,\n metavar='N', help='print frequency (default: 10)')\nparser.add_argument('--resume', default='', type=str, metavar='PATH',\n help='path to latest checkpoint (default: none)')\nparser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',\n help='evaluate model on validation set')\nparser.add_argument('--pretrained', dest='pretrained', action='store_true',\n help='use pre-trained model')\nparser.add_argument('--seed', default=None, type=int,\n help='seed for initializing training. ')\nparser.add_argument('--gpu', default=None, type=int,\n help='GPU id to use.')\n\n\nbest_acc1 = 0\nbest_epoch = 0\n\ndef main():\n args = parser.parse_args()\n\n os.makedirs(args.save_dir, exist_ok=True)\n\n if args.seed is not None:\n random.seed(args.seed)\n torch.manual_seed(args.seed)\n cudnn.deterministic = True\n warnings.warn('You have chosen to seed training. '\n 'This will turn on the CUDNN deterministic setting, '\n 'which can slow down your training considerably! '\n 'You may see unexpected behavior when restarting '\n 'from checkpoints.')\n\n if args.gpu is not None:\n warnings.warn('You have chosen a specific GPU. This will completely '\n 'disable data parallelism.')\n\n main_worker(args.gpu, args)\n\n\ndef main_worker(gpu, args):\n global best_acc1, best_epoch\n args.gpu = gpu\n\n if args.gpu is not None:\n print(\"Use GPU: {} for training\".format(args.gpu))\n\n # create model\n print(\"=> using model '{}'\".format(args.arch))\n model = models.__dict__[args.arch](pretrained=False)\n if_pruned = False\n\n assert args.dataset == 'visda17'\n\n ch = model.fc.in_features\n model.fc = nn.Linear(ch,12)\n\n if args.prune_type=='lt':\n print('using Lottery Tickets setting ')\n initalization = copy.deepcopy(model.state_dict())\n torch.save({'state_dict': initalization}, os.path.join(args.save_dir, 'random_init.pt'))\n\n elif args.prune_type=='pt_trans':\n print('using Pretrain Tickets setting')\n ticket_init_weight = torch.load(args.pre_weight)\n if 'state_dict' in ticket_init_weight.keys():\n ticket_init_weight = ticket_init_weight['state_dict']\n\n all_keys = list(ticket_init_weight.keys())\n for key in all_keys:\n if 'fc.' in key:\n del ticket_init_weight[key] \n\n print('layer number', len(ticket_init_weight.keys()))\n for key in ticket_init_weight.keys():\n assert key in model.state_dict().keys()\n model.load_state_dict(ticket_init_weight, strict=False)\n initalization = copy.deepcopy(model.state_dict())\n\n else:\n raise ValueError(\"Unknown Pruning Type\")\n\n print('Mode: Dataparallel')\n model = torch.nn.DataParallel(model).cuda()\n\n # optionally resume from a checkpoint\n if args.resume:\n if os.path.isfile(args.resume):\n print(\"=> loading checkpoint '{}'\".format(args.resume))\n if args.gpu is None:\n checkpoint = torch.load(args.resume)\n else:\n # Map model to be loaded to specified single gpu.\n loc = 'cuda:{}'.format(args.gpu)\n checkpoint = torch.load(args.resume, map_location=loc)\n args.start_epoch = checkpoint['epoch']\n args.start_state = checkpoint['state']\n best_acc1 = checkpoint['best_acc1']\n if_pruned = checkpoint['if_pruned']\n initalization = checkpoint['init_weight']\n\n if if_pruned:\n prune_model_custom(model.module, checkpoint['mask'], False)\n\n model.module.load_state_dict(checkpoint['state_dict'])\n optimizer.load_state_dict(checkpoint['optimizer'])\n print(\"=> loaded checkpoint '{}' (epoch {})\"\n .format(args.resume, checkpoint['epoch']))\n else:\n print(\"=> no checkpoint found at '{}'\".format(args.resume))\n\n criterion = nn.CrossEntropyLoss().cuda(args.gpu)\n optimizer = torch.optim.SGD(model.parameters(), args.lr,\n momentum=args.momentum,\n weight_decay=args.weight_decay)\n\n cudnn.benchmark = True\n\n # Data loading code\n normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n\n train_trans = transforms.Compose([\n transforms.RandomResizedCrop(size=224, scale=(0.75, 1.33)),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n normalize,\n ])\n\n val_trans = transforms.Compose([\n transforms.Resize((224, 224)),\n transforms.ToTensor(),\n normalize,\n ])\n\n train_dataset = VisDA17(txt_file=os.path.join(args.data, \"train/image_list.txt\"), \n root_dir=os.path.join(args.data, \"train\"), transform=train_trans)\n val_dataset = VisDA17(txt_file=os.path.join(args.data, \"validation/image_list.txt\"), \n root_dir=os.path.join(args.data, \"validation\"), transform=val_trans)\n\n train_loader = torch.utils.data.DataLoader(\n train_dataset, batch_size=args.batch_size, shuffle=True,\n num_workers=args.workers, pin_memory=True)\n\n val_loader = torch.utils.data.DataLoader(\n val_dataset,\n batch_size=args.batch_size, shuffle=False,\n num_workers=args.workers, pin_memory=True)\n\n if args.evaluate:\n validate(val_loader, model, criterion, args)\n return\n\n for prun_iter in range(args.start_state, args.states):\n\n check_sparsity(model.module, False)\n for epoch in range(args.start_epoch, args.epochs):\n\n print(optimizer.state_dict()['param_groups'][0]['lr'])\n # train for one epoch\n train(train_loader, model, criterion, optimizer, epoch, args)\n\n # evaluate on validation set\n acc1 = validate(val_loader, model, criterion, args)\n\n # remember best acc@1 and save checkpoint\n is_best = acc1 > best_acc1\n best_acc1 = max(acc1, best_acc1)\n\n if is_best:\n best_epoch = epoch+1\n\n if if_pruned:\n mask_dict = extract_mask(model.state_dict())\n else:\n mask_dict = None\n\n save_checkpoint({\n 'epoch': epoch + 1,\n 'state': prun_iter,\n 'arch': args.arch,\n 'state_dict': model.module.state_dict(),\n 'mask': mask_dict,\n 'best_acc1': best_acc1,\n 'optimizer' : optimizer.state_dict(),\n 'if_pruned': if_pruned,\n 'init_weight':initalization\n }, is_best, checkpoint=args.save_dir, best_name=str(prun_iter)+'model_best.pth.tar')\n\n check_sparsity(model.module, False)\n print('**best TA = ', best_acc1, 'best epoch = ', best_epoch)\n\n # start pruning \n print('start pruning model')\n pruning_model(model.module, args.percent, False)\n if_pruned = True\n \n current_mask = extract_mask(model.state_dict())\n remove_prune(model.module, False)\n\n model.module.load_state_dict(initalization)\n best_acc1 = 0 \n best_epoch = 0\n prune_model_custom(model.module, current_mask, False)\n validate(val_loader, model, criterion, args)\n optimizer = torch.optim.SGD(model.parameters(), args.lr,\n momentum=args.momentum,\n weight_decay=args.weight_decay)\n\n\ndef train(train_loader, model, criterion, optimizer, epoch, args):\n batch_time = AverageMeter('Time', ':6.3f')\n data_time = AverageMeter('Data', ':6.3f')\n losses = AverageMeter('Loss', ':.4e')\n top1 = AverageMeter('Acc@1', ':6.2f')\n top5 = AverageMeter('Acc@5', ':6.2f')\n progress = ProgressMeter(\n len(train_loader),\n [batch_time, data_time, losses, top1, top5],\n prefix=\"Epoch: [{}]\".format(epoch))\n\n # switch to train mode\n model.train()\n wp_steps = len(train_loader)\n\n end = time.time()\n for i, (images, target) in enumerate(train_loader):\n # measure data loading time\n data_time.update(time.time() - end)\n\n adjust_learning_rate(optimizer, epoch, args, i+1, steps_for_one_epoch=wp_steps)\n\n if args.gpu is not None:\n images = images.cuda(args.gpu, non_blocking=True)\n target = target.cuda(args.gpu, non_blocking=True)\n\n # compute output\n output = model(images)\n loss = criterion(output, target)\n\n # measure accuracy and record loss\n acc1, acc5 = accuracy(output, target, topk=(1, 5))\n losses.update(loss.item(), images.size(0))\n top1.update(acc1[0], images.size(0))\n top5.update(acc5[0], images.size(0))\n\n # compute gradient and do SGD step\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n if i % args.print_freq == 0:\n progress.display(i)\n\n\ndef validate(val_loader, model, criterion, args):\n batch_time = AverageMeter('Time', ':6.3f')\n losses = AverageMeter('Loss', ':.4e')\n top1 = AverageMeter('Acc@1', ':6.2f')\n top5 = AverageMeter('Acc@5', ':6.2f')\n progress = ProgressMeter(\n len(val_loader),\n [batch_time, losses, top1, top5],\n prefix='Test: ')\n\n # switch to evaluate mode\n model.eval()\n\n with torch.no_grad():\n end = time.time()\n for i, (images, target) in enumerate(val_loader):\n if args.gpu is not None:\n images = images.cuda(args.gpu, non_blocking=True)\n target = target.cuda(args.gpu, non_blocking=True)\n\n # compute output\n output = model(images)\n loss = criterion(output, target)\n\n # measure accuracy and record loss\n acc1, acc5 = accuracy(output, target, topk=(1, 5))\n losses.update(loss.item(), images.size(0))\n top1.update(acc1[0], images.size(0))\n top5.update(acc5[0], images.size(0))\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n if i % args.print_freq == 0:\n progress.display(i)\n\n # TODO: this should also be done with the ProgressMeter\n print(' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}'\n .format(top1=top1, top5=top5))\n\n return top1.avg\n\ndef save_checkpoint(state, is_best, checkpoint, filename='checkpoint.pth.tar', best_name='model_best.pth.tar'):\n filepath = os.path.join(checkpoint, filename)\n torch.save(state, filepath)\n if is_best:\n shutil.copyfile(filepath, os.path.join(checkpoint, best_name))\n\ndef adjust_learning_rate(optimizer, epoch, args, iterations, steps_for_one_epoch):\n\n max_lr = args.lr\n\n if epoch < 10:\n lr = max_lr\n else:\n lr = max_lr*0.1\n\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n\nclass AverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n def __init__(self, name, fmt=':f'):\n self.name = name\n self.fmt = fmt\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count\n\n def __str__(self):\n fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'\n return fmtstr.format(**self.__dict__)\n\nclass ProgressMeter(object):\n def __init__(self, num_batches, meters, prefix=\"\"):\n self.batch_fmtstr = self._get_batch_fmtstr(num_batches)\n self.meters = meters\n self.prefix = prefix\n\n def display(self, batch):\n entries = [self.prefix + self.batch_fmtstr.format(batch)]\n entries += [str(meter) for meter in self.meters]\n print('\\t'.join(entries))\n\n def _get_batch_fmtstr(self, num_batches):\n num_digits = len(str(num_batches // 1))\n fmt = '{:' + str(num_digits) + 'd}'\n return '[' + fmt + '/' + fmt.format(num_batches) + ']'\n\ndef accuracy(output, target, topk=(1,)):\n \"\"\"Computes the accuracy over the k top predictions for the specified values of k\"\"\"\n with torch.no_grad():\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res\n\nif __name__ == '__main__':\n main()",
"\"\"\" Layer-wise adaptive rate scaling for SGD in PyTorch! \"\"\"\nimport torch\nfrom torch.optim.optimizer import Optimizer, required\n\n\nclass LARS(Optimizer):\n r\"\"\"Implements layer-wise adaptive rate scaling for SGD.\n\n Args:\n params (iterable): iterable of parameters to optimize or dicts defining\n parameter groups\n lr (float): base learning rate (\\gamma_0)\n momentum (float, optional): momentum factor (default: 0) (\"m\")\n weight_decay (float, optional): weight decay (L2 penalty) (default: 0)\n (\"\\beta\")\n eta (float, optional): LARS coefficient\n max_epoch: maximum training epoch to determine polynomial LR decay.\n\n Based on Algorithm 1 of the following paper by You, Gitman, and Ginsburg.\n Large Batch Training of Convolutional Networks:\n https://arxiv.org/abs/1708.03888\n\n Example:\n >>> optimizer = LARS(model.parameters(), lr=0.1, eta=1e-3)\n >>> optimizer.zero_grad()\n >>> loss_fn(model(input), target).backward()\n >>> optimizer.step()\n \"\"\"\n def __init__(self, params, lr=required, momentum=.9,\n weight_decay=.0005, eta=0.001):\n if lr is not required and lr < 0.0:\n raise ValueError(\"Invalid learning rate: {}\".format(lr))\n if momentum < 0.0:\n raise ValueError(\"Invalid momentum value: {}\".format(momentum))\n if weight_decay < 0.0:\n raise ValueError(\"Invalid weight_decay value: {}\"\n .format(weight_decay))\n if eta < 0.0:\n raise ValueError(\"Invalid LARS coefficient value: {}\".format(eta))\n\n self.epoch = 0\n defaults = dict(lr=lr, momentum=momentum,\n weight_decay=weight_decay,\n eta=eta)\n super(LARS, self).__init__(params, defaults)\n\n def step(self, epoch=None, closure=None):\n \"\"\"Performs a single optimization step.\n\n Arguments:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n epoch: current epoch to calculate polynomial LR decay schedule.\n if None, uses self.epoch and increments it.\n \"\"\"\n loss = None\n if closure is not None:\n loss = closure()\n\n if epoch is None:\n epoch = self.epoch\n self.epoch += 1\n\n for group in self.param_groups:\n weight_decay = group['weight_decay']\n momentum = group['momentum']\n eta = group['eta']\n lr = group['lr']\n # max_epoch = group['max_epoch']\n\n for p in group['params']:\n if p.grad is None:\n continue\n\n param_state = self.state[p]\n d_p = p.grad.data\n\n weight_norm = torch.norm(p.data)\n grad_norm = torch.norm(d_p)\n\n # Global LR computed on polynomial decay schedule\n # decay = (1 - float(epoch) / max_epoch) ** 2\n global_lr = lr\n\n # Compute local learning rate for this layer\n local_lr = eta * weight_norm / \\\n (grad_norm + weight_decay * weight_norm)\n\n # if len(local_lr[(weight_norm < 1e-15) | (grad_norm < 1e-15)]) > 0:\n # print(\"len zeros is {}\".format(len(local_lr[(weight_norm < 1e-15) | (grad_norm < 1e-15)])))\n local_lr[(weight_norm < 1e-15) | (grad_norm < 1e-15)] = 1.0\n\n # Update the momentum term\n actual_lr = local_lr * global_lr\n\n if 'momentum_buffer' not in param_state:\n buf = param_state['momentum_buffer'] = \\\n torch.zeros_like(p.data)\n else:\n buf = param_state['momentum_buffer']\n buf.mul_(momentum).add_(d_p + weight_decay * p.data, alpha=actual_lr)\n p.data.add_(-buf)\n\n return loss\n"
] | [
[
"torch.nn.CrossEntropyLoss",
"torch.load",
"torch.manual_seed",
"torch.utils.data.DataLoader",
"torch.nn.Linear",
"torch.no_grad",
"torch.nn.DataParallel",
"torch.save"
],
[
"torch.norm",
"torch.zeros_like"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Lim-Guowei/RUL | [
"e23e97a373df73abc2fde14ce070dcb5230a79c2"
] | [
"eda.py"
] | [
"import os\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom dataloader import dataloader\nimport seaborn as sns\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.model_selection import train_test_split\n\npd.set_option('display.float_format', '{:.6f}'.format)\n\ndef countNullPercent(dataframe):\n \"\"\" Print percentage of null values for each column in dataframe sorted in descending order\n \"\"\"\n nullCollect = {}\n for column in dataframe:\n rowCount = len(dataframe[column])\n nullCount = dataframe[column].isnull().sum()\n percentNull = round((nullCount/rowCount)*100, 2)\n nullCollect.update({column: percentNull})\n\n for key, value in sorted(nullCollect.items(), key=lambda item: item[1], reverse=True): # Sort dictionary based on value in descending order\n print(\"{}: {}\".format(key, value))\n return \n\ndef countUniqueVal(dataframe, column):\n \"\"\" Print unique values for each columns\n \"\"\"\n for count, name in enumerate(column):\n print(\"#{} - {}\".format(count, name))\n print(dataframe[name].value_counts())\n print(\"\\n\")\n return\n\ndef plot_by_unit(dataframe, unit):\n \"\"\" Generate visualization for each fleet unit\n Unit number can be obtained by inspecting \"unit\" column in dataframe\n Generate plot for each variable (x-axis) vs rul (y-axis)\n \"\"\"\n df_unit = dataframe[dataframe[\"unit\"] == unit]\n print(df_unit)\n\n ### Correlation plot\n plt.subplots(figsize=(20,15))\n color = plt.get_cmap('inferno') # default color\n color.set_bad('lightblue')\n corr_plot = sns.heatmap(data=df_unit.corr(), annot=False, cmap=color)\n plt.title(\"Correlation matrix for unit {}\".format(unit), fontdict={'fontsize': 16})\n plt.savefig(\"corr_plot_unit_{}.png\".format(unit))\n return\n\ndef rank_feature_importance(dataframe):\n feat_labels = dataframe.columns.values\n\n Y = dataframe[\"RUL\"]\n X = dataframe.drop([\"RUL\"], axis=1)\n X_train, X_test, Y_train, Y_test = train_test_split(X, Y, random_state=42, shuffle=True, test_size=0.2)\n \n # Create a random forest classifier\n clf = RandomForestClassifier(n_estimators=100, random_state=0, n_jobs=-1)\n\n # Train the classifier\n clf.fit(X_train, Y_train)\n\n # Plot random forest feature importance\n importances = clf.feature_importances_\n indices = np.argsort(importances)\n\n plt.title('Feature Importances', fontdict={'fontsize': 16})\n plt.barh(range(len(indices)), importances[indices], color='b', align='center')\n plt.yticks(range(len(indices)), [feat_labels[i] for i in indices])\n plt.xlabel('Relative Importance')\n plt.savefig(\"feature_importance.png\")\n return\n\ndef add_lag_features(dataframe):\n\n dataframe[\"RUL_lag1\"] = dataframe[\"RUL\"].shift(1)\n dataframe[\"RUL_lag3\"] = dataframe[\"RUL\"].shift(3)\n dataframe[\"RUL_lag5\"] = dataframe[\"RUL\"].shift(5)\n dataframe = dataframe.iloc[5::] # Discard NaN rows\n \n fig = dataframe.plot(y=[\"RUL\", \"RUL_lag1\", \"RUL_lag1\", \"RUL_lag3\", \"RUL_lag5\"], \n kind=\"line\", \n title=\"Lag on RUL variable\", \n xlabel=\"index\", \n use_index=True,\n linewidth=1.0,\n alpha=0.7,\n xlim=(0, dataframe.index.max()),\n figsize=(20, 15)\n ).get_figure()\n \n fig.savefig(\"lag_on_RUL.png\")\n return\n\ndef eda(filename):\n df_dev, df_test = dataloader(filename)\n column_name = df_dev.columns.tolist()\n\n ### Check for null or zeroes\n countNullPercent(df_dev) # No null values in dataframe\n countNullPercent(df_test) # No null values in dataframe\n df_dev.describe().to_csv(\"df_dev_description.csv\")\n df_test.describe().to_csv(\"df_test_description.csv\")\n\n # Remove columns containing all zeroes\n # Remove \"cycle\" as \"RUL\" is sufficient as target variable\n df_dev = df_dev.drop(columns=[\"fan_eff_mod\", \"fan_flow_mod\", \"LPC_eff_mod\", \"LPC_flow_mod\", \"HPC_eff_mod\", \"HPC_flow_mod\", \"HPT_flow_mod\", \"LPT_eff_mod\", \"LPT_flow_mod\", \"cycle\"])\n df_test = df_test.drop(columns=[\"fan_eff_mod\", \"fan_flow_mod\", \"LPC_eff_mod\", \"LPC_flow_mod\", \"HPC_eff_mod\", \"HPC_flow_mod\", \"HPT_flow_mod\", \"LPT_eff_mod\", \"LPT_flow_mod\", \"cycle\"])\n\n ### Identify categorical features as \"unit\", \"Fc\", \"hs\"\n countUniqueVal(df_dev, [\"unit\", \"Fc\", \"hs\"])\n\n ### Generate correlation matrix plot for each unit in fleet \n plot_by_unit(df_dev, 1.0)\n plot_by_unit(df_dev, 2.0)\n plot_by_unit(df_dev, 3.0)\n plot_by_unit(df_dev, 4.0)\n plot_by_unit(df_dev, 5.0)\n plot_by_unit(df_dev, 6.0)\n\n # Rank feature importance using random forest classifier\n rank_feature_importance(df_dev)\n\n add_lag_features(df_dev)\n\n return\n \nif __name__ == \"__main__\":\n eda(\"N-CMAPSS_DS01-005.h5\")"
] | [
[
"sklearn.ensemble.RandomForestClassifier",
"matplotlib.pyplot.title",
"matplotlib.pyplot.get_cmap",
"sklearn.model_selection.train_test_split",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.xlabel",
"numpy.argsort",
"pandas.set_option"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
fredyshox/AppVideoFramer | [
"0e43f2828d2e3737451a0cf1ec81e6840796ac30"
] | [
"Tools/fastlane-templates.py"
] | [
"#!/usr/bin/env python3\n# \n# Retrieve templates from fastlane/frameit\n#\n\nimport sys\nimport os \nfrom os import path\nfrom shutil import copyfile\nfrom tempfile import gettempdir\nimport re\nimport json\nimport cv2\nimport numpy as np\nfrom common import sanitize_color, sanitize_device_name, sanitize_device_key, apply_default_color\n\n# URL to frameit-frames repository\nFRAMEIT_URL = \"https://github.com/fastlane/frameit-frames/archive/gh-pages.zip\"\n\ndef main():\n if len(sys.argv) < 3:\n print(f\"Usage: {sys.argv[0]} resource_dir contents_file\")\n exit(1)\n\n resource_dir = sys.argv[1]\n contents_path = sys.argv[2]\n zip_path = path.join(resource_dir, \"gh-pages.zip\")\n repo_dir = path.join(resource_dir, \"frameit-frames-gh-pages\")\n\n print(\"Downloading frameit frames...\")\n status_code = os.system(f\"wget -q --show-progress -O \\\"{zip_path}\\\" \\\"{FRAMEIT_URL}\\\" && unzip -d \\\"{resource_dir}\\\" \\\"{zip_path}\\\"\")\n print(f\"Status code: {status_code}\")\n\n # path to latest frames\n frameit_dir = path.join(repo_dir, \"latest\")\n with open(contents_path, \"r\") as cf:\n contents = json.load(cf)\n\n for frame_path in os.listdir(frameit_dir):\n frame_path = path.join(frameit_dir, frame_path)\n filename = path.basename(frame_path)\n if not path.isfile(frame_path) or not filename_valid(filename):\n continue\n \n device_name = sanitize_device_name(filename)\n device_key = sanitize_device_key(device_name)\n device_color = sanitize_color(filename)\n print(f\"Found template: {frame_path}\")\n print(f\"Template {device_name} - {device_color}\")\n \n image = cv2.imread(frame_path, cv2.IMREAD_UNCHANGED) # read preserving alpha\n frame_height, frame_width = image.shape[:2]\n ox, oy, width, height = measure_screen_bounds(image)\n print(f\"==> +{ox}+{oy}, {width}x{height}\")\n\n if device_key in contents:\n device_info = contents[device_key]\n else:\n device_info = { \n \"images\": {},\n \"left\": ox,\n \"top\": oy,\n \"right\": ox + width,\n \"bottom\": oy + height,\n \"res_height\": frame_height,\n \"res_width\": frame_width\n }\n device_info[\"images\"][device_color] = filename\n \n contents[device_key] = device_info\n copyfile(frame_path, path.join(resource_dir, filename))\n\n # default colors - first model color which is available in DEFAULT_COLOR array\n for key in contents.keys():\n apply_default_color(contents, key)\n\n with open(contents_path, \"w\") as cf:\n json.dump(contents, cf, sort_keys=True, indent=4)\n\n print(\"Cleaning up...\")\n os.system(f\"rm {zip_path} && rm -r {repo_dir}\")\n\ndef measure_screen_bounds(image):\n alpha = image[:, :, 3]\n alpha = cv2.threshold(alpha, 252, 255, cv2.THRESH_BINARY_INV)[1] # 99% threshold\n # connected component analysis\n n, labels, stats, centroids = cv2.connectedComponentsWithStats(alpha, connectivity=8)\n # compare centroids to image center\n img_center = np.array([alpha.shape[0] // 2, alpha.shape[1] // 2])\n # component which contains image center should be screen\n screen_label = labels[img_center[0], img_center[1]]\n x, y, width, height = stats[screen_label][:4]\n return int(x), int(y), int(width), int(height)\n\ndef filename_valid(filename):\n pattern = \"^Apple iP.*\\.png$\"\n return re.search(pattern, filename) is not None\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
data-weirdo/studio | [
"48852c4f097f773ce3d408b59f79fda2e2d60470"
] | [
"function/python/brightics/function/transform/sql/functions.py"
] | [
"\"\"\"\n Copyright 2019 Samsung SDS\n \n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n \n http://www.apache.org/licenses/LICENSE-2.0\n \n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\n\n# -*- coding: utf-8 -*-\n\nimport dateutil.parser\nimport numpy as np\nfrom .serializer import _serialize\nfrom .serializer import _deserialize\nimport re\n\n\"\"\" \nconstants \n\"\"\"\n\n\ndef e():\n return np.math.e\n\n\ndef pi():\n return np.math.pi\n\n\"\"\"\nlambda functions\n\"\"\"\nlog = lambda _: np.math.log(_) if _ is not None else np.math.nan # ?\nln = lambda _: np.math.log(_)\nlog10 = lambda _: np.math.log10(_)\nlog2 = lambda _: np.math.log2(_)\nexp = lambda _: np.math.exp(_)\nexp2 = lambda _: np.math.pow(2, _)\nsqrt = lambda _: np.math.sqrt(_)\nceil = lambda _: np.math.ceil(_)\nfloor = lambda _: np.math.floor(_)\nsign = lambda _: int(np.sign(_))\n \nfactorial = lambda _: np.math.factorial(_)\n\npow = lambda a, b: np.math.pow(a, b)\n\nljust = lambda item, length, lpad_str: str(item).ljust(length, lpad_str) # ?\nrjust = lambda item, length, rpad_str: str(item).rjust(length, rpad_str) # ?\n\nis_null = lambda _: 1 if _ is None else 0\n\n\"\"\"\nregular expression related functions\n\"\"\"\n\nregexp = lambda exp, str_: False if re.search(exp, str_) is None else True\nregexp_replace = lambda initial_str, pattern, replacement: re.sub(pattern, replacement, initial_str)\n\n\ndef regexp_extract(subject, pattern, *index): # todo index??\n\n def _is_empty(tup):\n return not tup\n \n if _is_empty(index):\n return re.search(pattern, subject).group(1)\n else:\n return re.search(pattern, subject).group(index[0])\n \n\"\"\"\ndatetime related functions\n\"\"\"\n# todo weekofmonth, datediff, timediff\n\n\ndef datediff(end_isotime, start_isotime):\n end_datetime = dateutil.parser.parse(end_isotime)\n start_datetime = dateutil.parser.parse(start_isotime)\n diff_datetime = end_datetime - start_datetime\n return diff_datetime.days\n\n\ndef strftime_a(isotime): # ?\n return dateutil.parser.parse(isotime).strftime('%a')\n\n\ndef strftime_aa(isotime): # ?\n return dateutil.parser.parse(isotime).strftime('%A')\n\n\ndef strftime_aak(isotime): # ?\n w_dict = {'Monday':'월요일',\n 'Tuesday':'화요일',\n 'Wednesday':'수요일',\n 'Thursday':'목요일',\n 'Friday':'금요일',\n 'Saturday':'토요일',\n 'Sunday':'일요일',\n }\n return w_dict[dateutil.parser.parse(isotime).strftime('%A')]\n\n\ndef strftime_ak(isotime): # ?\n w_dict = {'Monday':'월',\n 'Tuesday':'화',\n 'Wednesday':'수',\n 'Thursday':'목',\n 'Friday':'금',\n 'Saturday':'토',\n 'Sunday':'일',\n }\n return w_dict[dateutil.parser.parse(isotime).strftime('%A')]\n\n\"\"\" \narray related functions \n\"\"\"\n\n\ndef array(*args):\n return _serialize(np.array(list(args)))\n\n\ndef get_array_element(serialized_list, index):\n return _deserialize(serialized_list)[index]\n\n\ndef concat_ws(sep, serialized_list):\n arr = _deserialize(serialized_list)\n return sep.join([str(item) for item in arr])\n\n\ndef split(str_, *sep):\n nargs = len(sep)\n if nargs == 0:\n return _serialize(str_.split())\n else: # todo elif nargs == 1:\n return _serialize(str_.split(sep[0]))\n\n \ndef size(serialized_list):\n arr = _deserialize(serialized_list)\n return len(arr)\n"
] | [
[
"numpy.math.exp",
"numpy.math.ceil",
"numpy.math.sqrt",
"numpy.math.factorial",
"numpy.sign",
"numpy.math.log10",
"numpy.math.pow",
"numpy.math.floor",
"numpy.math.log2",
"numpy.math.log"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
sander102907/autoencoder_program_synthesis | [
"752954f9ef268908553189a1c3323bad15b39f04"
] | [
"autoencoder_program_synthesis/model_utils/modules.py"
] | [
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass AddGate(nn.Module):\n \"\"\"\n Add gate similar to LSTM add gate: :math: `y = σ(W_mul * inp + b_mul) * tanh(W_add * inp + b_add)`\n\n Outputs information that can be added to some state\n where the network learns: if and how much of the input should be added\n \"\"\"\n\n def __init__(self, dim):\n super().__init__()\n\n self.W_mul = nn.Linear(dim, dim, bias=True)\n self.W_add = nn.Linear(dim, dim, bias=True)\n\n self.sigmoid = nn.Sigmoid()\n\n\n def forward(self, inp):\n out_mul = self.sigmoid(self.W_mul(inp))\n out_add = torch.tanh(self.W_add(inp))\n\n return out_mul * out_add\n\n\nclass PredictiveHidden(nn.Module):\n \"\"\"\n Computes a combined predictive hidden state from two hidden states: :math:`y = tanh(W1 * x1 + W2 * x2)`\n \"\"\"\n\n def __init__(self, dim):\n super().__init__()\n\n # Learnable parameter weights1 -> for calculating: W1 * inp1\n self.W1 = nn.Linear(dim, dim, bias=True)\n\n # Learnable parameter weights2 -> for calculating: W2 * inp2\n self.W2 = nn.Linear(dim, dim, bias=True)\n\n\n def forward(self, inp1, inp2):\n # predictive hidden state: tanh(W1 * inp1 + W2 * inp2)\n h_pred = torch.tanh(self.W1(inp1) + self.W2(inp2))\n\n return h_pred\n\n\nclass TreeTopologyPred(nn.Module):\n \"\"\"\n Computes logits for depth, width and res predictions with linear transformations: dim -> 1\n \"\"\"\n\n def __init__(self, dim):\n super().__init__()\n\n # For topology prediction, we predict whether there are children\n self.depth_pred = nn.Linear(dim, 1)\n\n # For topology prediction, we predict whether there are successor siblings\n self.width_pred = nn.Linear(dim, 1)\n\n # For predicting whether a token is a reserved keyword of c++ or not\n self.res_pred = nn.Linear(dim, 1)\n\n def forward(self, inp):\n depth_pred = self.depth_pred(inp)\n width_pred = self.width_pred(inp)\n res_pred = self.res_pred(inp)\n\n return depth_pred, width_pred, res_pred\n\n\nclass LstmAttention(nn.Module):\n \"\"\"\n ATTENTION-BASED LSTM FOR PSYCHOLOGICAL STRESS DETECTION FROM SPOKEN\n LANGUAGE USING DISTANT SUPERVISION\n\n https://arxiv.org/abs/1805.12307\n \"\"\"\n\n def __init__(self, dim):\n super().__init__()\n\n self.attention_weights = nn.Linear(dim, dim)\n self.softmax = nn.Softmax(dim=-1)\n\n def forward(self, inp):\n u = torch.tanh(self.attention_weights(inp))\n\n a = self.softmax(u)\n\n v = torch.sum(a * inp, dim=-1)\n\n return u * inp\n\n\nclass MultiLayerLSTMCell(nn.Module):\n \"\"\"\n A long short-term memory (LSTM) cell with support for multiple layers.\n\n input_size: The number of expected features in the input\n hidden_size: The number of features in the hidden state\n num_layers: Number of recurrent layers.\n E.g., setting num_layers=2 would mean stacking two LSTM cells together\n to form a stacked LSTM cell, with the second LSTM cell taking in outputs of\n the first LSTM cell and computing the final results. Default: 1\n \"\"\"\n\n def __init__(self, input_size, hidden_size, num_layers = 1, recurrent_dropout=0):\n super().__init__()\n\n self.num_layers = num_layers\n self.rnns = nn.ModuleList([])\n self.dropout = nn.Dropout(recurrent_dropout)\n\n # Initialize RNNs with num layers\n for i in range(num_layers):\n if i == 0:\n self.rnns.append(nn.LSTMCell(input_size, hidden_size))\n else:\n self.rnns.append(nn.LSTMCell(hidden_size, hidden_size))\n\n\n def forward(self, input, hidden_states):\n new_hidden_states = []\n\n for i in range(self.num_layers):\n if i == 0:\n h, c = self.rnns[i](input, hidden_states[i])\n else:\n h, c = self.rnns[i](h, hidden_states[i])\n\n # apply recurrent dropout on the outputs of each LSTM cell hidden except the last layer\n if i < self.num_layers - 1:\n h = self.dropout(h)\n\n\n new_hidden_states.append((h, c))\n\n return new_hidden_states\n\n\n\nclass Highway(nn.Module):\n \"\"\"\n Code from:\n https://github.com/kefirski/pytorch_RVAE/blob/19103d1298d7d77423c6e7d76dcc190400d7256e/selfModules/highway.py#L5\n\n Highway networks use learned gating mechanisms to regulate information flow, inspired by Long Short-Term Memory (LSTM) recurrent neural networks.\n The gating mechanisms allow neural networks to have paths for information to follow across different layers (\"information highways\")\n\n http://papers.nips.cc/paper/5850-training-very-deep-networks \n \"\"\"\n \n def __init__(self, size, num_layers, f):\n\n super(Highway, self).__init__()\n\n self.num_layers = num_layers\n\n self.nonlinear = [nn.Linear(size, size) for _ in range(num_layers)]\n for i, module in enumerate(self.nonlinear):\n self._add_to_parameters(module.parameters(), 'nonlinear_module_{}'.format(i))\n\n self.linear = [nn.Linear(size, size) for _ in range(num_layers)]\n for i, module in enumerate(self.linear):\n self._add_to_parameters(module.parameters(), 'linear_module_{}'.format(i))\n\n self.gate = [nn.Linear(size, size) for _ in range(num_layers)]\n for i, module in enumerate(self.gate):\n self._add_to_parameters(module.parameters(), 'gate_module_{}'.format(i))\n\n self.f = f\n\n def forward(self, x):\n \"\"\"\n :param x: tensor with shape of [batch_size, size]\n :return: tensor with shape of [batch_size, size]\n applies σ(x) ⨀ (f(G(x))) + (1 - σ(x)) ⨀ (Q(x)) transformation | G and Q is affine transformation,\n f is non-linear transformation, σ(x) is affine transformation with sigmoid non-linearition\n and ⨀ is element-wise multiplication\n \"\"\"\n\n for layer in range(self.num_layers):\n gate = F.sigmoid(self.gate[layer](x))\n\n nonlinear = self.f(self.nonlinear[layer](x))\n linear = self.linear[layer](x)\n\n x = gate * nonlinear + (1 - gate) * linear\n\n return x\n\n def _add_to_parameters(self, parameters, name):\n for i, parameter in enumerate(parameters):\n self.register_parameter(name='{}-{}'.format(name, i), param=parameter)\n\n \n\n \n"
] | [
[
"torch.nn.Softmax",
"torch.nn.Dropout",
"torch.nn.ModuleList",
"torch.sum",
"torch.nn.Sigmoid",
"torch.nn.LSTMCell",
"torch.nn.Linear"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
yyHaker/EKMRC-is-your-need | [
"483e2d9d822907ef36a39333933fd939dac1cea0"
] | [
"EKMRC/src/test_gnn.py"
] | [
"#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\n'''\n@File : test_gnn.py\n@Author : yyhaker \n@Contact : [email protected]\n@Time : 2020/04/22 15:19:24\n'''\n\n# here put the import lib\nimport torch\nfrom torch_geometric.data import Data\nimport torch.nn.functional as F\nfrom torch_geometric.nn import GCNConv\n\nedge_index = torch.tensor([[0, 2],\n [2, 0],\n [3, 2],\n [2, 3]], dtype=torch.long)\nx = torch.tensor([[-1], [0], [1]], dtype=torch.float)\n\n\ndata = Data(x=x, edge_index=edge_index.t().contiguous())\ndevice = torch.device('cuda')\ndata = data.to(device)\n\n\nclass Net(torch.nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n self.conv1 = GCNConv(1, 16)\n self.conv2 = GCNConv(16, 2)\n\n def forward(self, data):\n x, edge_index = data.x, data.edge_index\n\n x = self.conv1(x, edge_index)\n x = F.relu(x)\n x = F.dropout(x, training=self.training)\n x = self.conv2(x, edge_index)\n\n return F.log_softmax(x, dim=1)\n\n\nmodel = Net().to(device)\noptimizer = torch.optim.Adam(model.parameters(), lr=0.01, weight_decay=5e-4)\n\nmodel.train()\nfor epoch in range(200):\n # optimizer.zero_grad()\n out = model(data)"
] | [
[
"torch.nn.functional.log_softmax",
"torch.nn.functional.dropout",
"torch.tensor",
"torch.nn.functional.relu",
"torch.device"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
toptaldev92/tensorflow | [
"1fd1f65d1b0896149e44a1f105267c27994010d9",
"f49aca4532c155597c669cf2189f211cafbebf96",
"f49aca4532c155597c669cf2189f211cafbebf96",
"f49aca4532c155597c669cf2189f211cafbebf96",
"1fd1f65d1b0896149e44a1f105267c27994010d9"
] | [
"tensorflow/examples/learn/text_classification_cnn.py",
"tensorflow/python/tools/freeze_graph_test.py",
"tensorflow/contrib/layers/python/layers/layers_test.py",
"tensorflow/python/training/queue_runner.py",
"tensorflow/python/framework/graph_util_test.py"
] | [
"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Example of Estimator for CNN-based text classification with DBpedia data.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport argparse\nimport sys\n\nimport numpy as np\nimport pandas\nfrom sklearn import metrics\nimport tensorflow as tf\n\nfrom tensorflow.contrib import learn\n\nFLAGS = None\n\nMAX_DOCUMENT_LENGTH = 100\nEMBEDDING_SIZE = 20\nN_FILTERS = 10\nWINDOW_SIZE = 20\nFILTER_SHAPE1 = [WINDOW_SIZE, EMBEDDING_SIZE]\nFILTER_SHAPE2 = [WINDOW_SIZE, N_FILTERS]\nPOOLING_WINDOW = 4\nPOOLING_STRIDE = 2\nn_words = 0\n\n\ndef cnn_model(x, y):\n \"\"\"2 layer Convolutional network to predict from sequence of words\n to a class.\"\"\"\n # Convert indexes of words into embeddings.\n # This creates embeddings matrix of [n_words, EMBEDDING_SIZE] and then\n # maps word indexes of the sequence into [batch_size, sequence_length,\n # EMBEDDING_SIZE].\n y = tf.one_hot(y, 15, 1, 0)\n word_vectors = learn.ops.categorical_variable(x, n_classes=n_words,\n embedding_size=EMBEDDING_SIZE, name='words')\n word_vectors = tf.expand_dims(word_vectors, 3)\n with tf.variable_scope('CNN_Layer1'):\n # Apply Convolution filtering on input sequence.\n conv1 = tf.contrib.layers.convolution2d(word_vectors, N_FILTERS,\n FILTER_SHAPE1, padding='VALID')\n # Add a RELU for non linearity.\n conv1 = tf.nn.relu(conv1)\n # Max pooling across output of Convolution+Relu.\n pool1 = tf.nn.max_pool(conv1, ksize=[1, POOLING_WINDOW, 1, 1],\n strides=[1, POOLING_STRIDE, 1, 1], padding='SAME')\n # Transpose matrix so that n_filters from convolution becomes width.\n pool1 = tf.transpose(pool1, [0, 1, 3, 2])\n with tf.variable_scope('CNN_Layer2'):\n # Second level of convolution filtering.\n conv2 = tf.contrib.layers.convolution2d(pool1, N_FILTERS,\n FILTER_SHAPE2, padding='VALID')\n # Max across each filter to get useful features for classification.\n pool2 = tf.squeeze(tf.reduce_max(conv2, 1), squeeze_dims=[1])\n\n # Apply regular WX + B and classification.\n prediction, loss = learn.models.logistic_regression(pool2, y)\n\n train_op = tf.contrib.layers.optimize_loss(\n loss, tf.contrib.framework.get_global_step(),\n optimizer='Adam', learning_rate=0.01)\n\n return {'class': tf.argmax(prediction, 1), 'prob': prediction}, loss, train_op\n\n\ndef main(unused_argv):\n global n_words\n # Prepare training and testing data\n dbpedia = learn.datasets.load_dataset(\n 'dbpedia', test_with_fake_data=FLAGS.test_with_fake_data)\n x_train = pandas.DataFrame(dbpedia.train.data)[1]\n y_train = pandas.Series(dbpedia.train.target)\n x_test = pandas.DataFrame(dbpedia.test.data)[1]\n y_test = pandas.Series(dbpedia.test.target)\n\n # Process vocabulary\n vocab_processor = learn.preprocessing.VocabularyProcessor(MAX_DOCUMENT_LENGTH)\n x_train = np.array(list(vocab_processor.fit_transform(x_train)))\n x_test = np.array(list(vocab_processor.transform(x_test)))\n n_words = len(vocab_processor.vocabulary_)\n print('Total words: %d' % n_words)\n\n # Build model\n classifier = learn.Estimator(model_fn=cnn_model)\n\n # Train and predict\n classifier.fit(x_train, y_train, steps=100)\n y_predicted = [\n p['class'] for p in classifier.predict(x_test, as_iterable=True)]\n score = metrics.accuracy_score(y_test, y_predicted)\n print('Accuracy: {0:f}'.format(score))\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '--test_with_fake_data',\n default=False,\n help='Test the example code with fake data.',\n action='store_true'\n )\n FLAGS, unparsed = parser.parse_known_args()\n tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)\n",
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests the graph freezing tool.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\n\nimport tensorflow as tf\n\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.tools import freeze_graph\n\n\nclass FreezeGraphTest(test_util.TensorFlowTestCase):\n\n def _testFreezeGraph(self, saver_write_version):\n\n checkpoint_prefix = os.path.join(self.get_temp_dir(), \"saved_checkpoint\")\n checkpoint_state_name = \"checkpoint_state\"\n input_graph_name = \"input_graph.pb\"\n output_graph_name = \"output_graph.pb\"\n\n # We'll create an input graph that has a single variable containing 1.0,\n # and that then multiplies it by 2.\n with tf.Graph().as_default():\n variable_node = tf.Variable(1.0, name=\"variable_node\")\n output_node = tf.mul(variable_node, 2.0, name=\"output_node\")\n sess = tf.Session()\n init = tf.initialize_all_variables()\n sess.run(init)\n output = sess.run(output_node)\n self.assertNear(2.0, output, 0.00001)\n saver = tf.train.Saver(write_version=saver_write_version)\n checkpoint_path = saver.save(sess, checkpoint_prefix, global_step=0,\n latest_filename=checkpoint_state_name)\n tf.train.write_graph(sess.graph, self.get_temp_dir(), input_graph_name)\n\n # We save out the graph to disk, and then call the const conversion\n # routine.\n input_graph_path = os.path.join(self.get_temp_dir(), input_graph_name)\n input_saver_def_path = \"\"\n input_binary = False\n output_node_names = \"output_node\"\n restore_op_name = \"save/restore_all\"\n filename_tensor_name = \"save/Const:0\"\n output_graph_path = os.path.join(self.get_temp_dir(), output_graph_name)\n clear_devices = False\n\n freeze_graph.freeze_graph(input_graph_path, input_saver_def_path,\n input_binary, checkpoint_path,\n output_node_names, restore_op_name,\n filename_tensor_name, output_graph_path,\n clear_devices, \"\")\n\n # Now we make sure the variable is now a constant, and that the graph still\n # produces the expected result.\n with tf.Graph().as_default():\n output_graph_def = tf.GraphDef()\n with open(output_graph_path, \"rb\") as f:\n output_graph_def.ParseFromString(f.read())\n _ = tf.import_graph_def(output_graph_def, name=\"\")\n\n self.assertEqual(4, len(output_graph_def.node))\n for node in output_graph_def.node:\n self.assertNotEqual(\"Variable\", node.op)\n\n with tf.Session() as sess:\n output_node = sess.graph.get_tensor_by_name(\"output_node:0\")\n output = sess.run(output_node)\n self.assertNear(2.0, output, 0.00001)\n\n def testFreezeGraphV1(self):\n self._testFreezeGraph(tf.train.SaverDef.V1)\n\n def testFreezeGraphV2(self):\n self._testFreezeGraph(tf.train.SaverDef.V2)\n\nif __name__ == \"__main__\":\n tf.test.main()\n",
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for tf.contrib.layers.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport math\nimport numpy as np\nimport tensorflow as tf\n\n# TODO(sguada) Expose tf.with_dependencies\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.contrib.layers.python.layers import layers as _layers\nfrom tensorflow.python.ops import state_ops\n\n\nclass AvgPool2DTest(tf.test.TestCase):\n\n def testInvalidDataFormat(self):\n height, width = 3, 6\n images = np.random.uniform(size=(5, height, width, 3))\n with self.assertRaisesRegexp(\n ValueError, 'data_format has to be either NCHW or NHWC.'):\n tf.contrib.layers.avg_pool2d(images, [3, 3], data_format='CHWN')\n\n def testCreateAvgPool(self):\n height, width = 3, 6\n images = np.random.uniform(size=(5, height, width, 3))\n output = tf.contrib.layers.avg_pool2d(images, [3, 3])\n self.assertEqual(output.op.name, 'AvgPool2D/AvgPool')\n self.assertListEqual(output.get_shape().as_list(), [5, 1, 2, 3])\n\n def testCreateAvgPoolNCHW(self):\n height, width = 3, 6\n images = np.random.uniform(size=(5, 2, height, width))\n output = tf.contrib.layers.avg_pool2d(images, [3, 3], data_format='NCHW')\n self.assertEquals(output.op.name, 'AvgPool2D/AvgPool')\n self.assertListEqual(output.get_shape().as_list(), [5, 2, 1, 2])\n\n def testCollectOutputs(self):\n height, width = 3, 6\n images = tf.random_uniform((5, height, width, 3), seed=1)\n output = tf.contrib.layers.avg_pool2d(images, [3, 3],\n outputs_collections='outputs')\n output_collected = tf.get_collection('outputs')[0]\n self.assertEqual(output_collected.alias, 'AvgPool2D')\n self.assertEqual(output_collected, output)\n\n def testCreateSquareAvgPool(self):\n height, width = 3, 6\n images = tf.random_uniform((5, height, width, 3), seed=1)\n output = tf.contrib.layers.avg_pool2d(images, 3)\n self.assertEqual(output.op.name, 'AvgPool2D/AvgPool')\n self.assertListEqual(output.get_shape().as_list(), [5, 1, 2, 3])\n\n def testCreateAvgPoolWithScope(self):\n height, width = 3, 6\n images = tf.random_uniform((5, height, width, 3), seed=1)\n output = tf.contrib.layers.avg_pool2d(images, [3, 3], scope='pool1')\n self.assertEqual(output.op.name, 'pool1/AvgPool')\n\n def testCreateAvgPoolWithSamePadding(self):\n height, width = 3, 6\n images = tf.random_uniform((5, height, width, 3), seed=1)\n output = tf.contrib.layers.avg_pool2d(images, [3, 3], padding='SAME')\n self.assertListEqual(output.get_shape().as_list(), [5, 2, 3, 3])\n\n def testCreateAvgPoolWithSamePaddingNCHW(self):\n height, width = 3, 6\n images = tf.random_uniform((5, 3, height, width), seed=1)\n output = tf.contrib.layers.avg_pool2d(images, [3, 3], padding='SAME',\n data_format='NCHW')\n self.assertListEqual(output.get_shape().as_list(), [5, 3, 2, 3])\n\n def testCreateAvgPoolStrideWithSamePadding(self):\n height, width = 3, 6\n images = tf.random_uniform((5, height, width, 3), seed=1)\n output = tf.contrib.layers.avg_pool2d(images, [3, 3], stride=1,\n padding='SAME')\n self.assertListEqual(output.get_shape().as_list(), [5, height, width, 3])\n\n def testGlobalAvgPool(self):\n height, width = 3, 6\n images = tf.random_uniform((5, height, width, 3), seed=1)\n output = tf.contrib.layers.avg_pool2d(images, images.get_shape()[1:3],\n stride=1)\n self.assertListEqual(output.get_shape().as_list(), [5, 1, 1, 3])\n\n\nclass PoolTest(tf.test.TestCase):\n\n def testCreatePool(self):\n height, width = 3, 3\n images = np.random.uniform(size=(5, height, width, 3))\n output = tf.contrib.layers.pool(images, [3, 3], pooling_type='AVG')\n self.assertEqual(output.op.name, 'avg_pool')\n self.assertListEqual(output.get_shape().as_list(), [5, 1, 1, 3])\n\n def testCreatePoolNCHW(self):\n height, width = 3, 3\n images = np.random.uniform(size=(5, 3, height, width))\n output = tf.contrib.layers.pool(\n images, [3, 3], pooling_type='AVG', data_format='NCHW')\n self.assertEqual(output.op.name, 'avg_pool')\n self.assertListEqual(output.get_shape().as_list(), [5, 3, 1, 1])\n\n def testCollectOutputs(self):\n height, width = 3, 3\n images = tf.random_uniform((5, height, width, 3), seed=1)\n output = tf.contrib.layers.pool(images, [3, 3],\n pooling_type='AVG',\n outputs_collections='outputs')\n output_collected = tf.get_collection('outputs')[0]\n self.assertEqual(output_collected.alias, 'avg_pool')\n self.assertEqual(output_collected, output)\n\n def testCreateSquareAvgPool(self):\n height, width = 3, 3\n images = tf.random_uniform((5, height, width, 3), seed=1)\n output = tf.contrib.layers.pool(images, 3, pooling_type='AVG')\n self.assertEqual(output.op.name, 'avg_pool')\n self.assertEqual(output.get_shape().as_list(), [5, 1, 1, 3])\n\n def testCreateMaxPoolWithScope(self):\n height, width = 3, 3\n images = tf.random_uniform((5, height, width, 3), seed=1)\n output = tf.contrib.layers.pool(\n images, [3, 3], pooling_type='MAX', scope='pool1')\n self.assertEqual(output.op.name, 'pool1')\n\n def testCreateMaxPoolWithSamePadding(self):\n height, width = 3, 3\n images = tf.random_uniform((5, height, width, 3), seed=1)\n output = tf.contrib.layers.pool(\n images, [3, 3], pooling_type='MAX', padding='SAME')\n self.assertEqual(output.get_shape().as_list(), [5, 3, 3, 3])\n\n def testCreateAvgPoolStrideWithSamePadding(self):\n height, width = 3, 3\n images = tf.random_uniform((5, height, width, 3), seed=1)\n output = tf.contrib.layers.pool(\n images, [3, 3], stride=1, padding='SAME', pooling_type='AVG')\n self.assertEqual(output.get_shape().as_list(), [5, height, width, 3])\n\n def testGlobalAvgPool(self):\n height, width = 3, 3\n images = tf.random_uniform((5, height, width, 3), seed=1)\n output = tf.contrib.layers.pool(\n images, images.get_shape()[1:3], stride=1, pooling_type='AVG')\n self.assertEqual(output.get_shape().as_list(), [5, 1, 1, 3])\n\n def testAvgPoolWithStride(self):\n height, width = 5, 8\n images = tf.random_uniform((5, height, width, 3), seed=1)\n output = tf.contrib.layers.pool(\n images, [2, 3], stride=[1, 2], pooling_type='AVG')\n self.assertEqual(output.get_shape().as_list(), [5, 4, 3, 3])\n\n def testAvgPoolWithDilation(self):\n height, width = 5, 8\n images = tf.random_uniform((5, height, width, 3), seed=1)\n output = tf.contrib.layers.pool(\n images, [2, 3], dilation_rate=[1, 2], pooling_type='AVG')\n self.assertEqual(output.get_shape().as_list(), [5, 4, 4, 3])\n\n def testAvgPoolWithDilationNCHW(self):\n height, width = 5, 8\n images = tf.random_uniform((5, 3, height, width), seed=1)\n output = tf.contrib.layers.pool(\n images, [2, 3], dilation_rate=[1, 2], pooling_type='AVG', data_format='NCHW')\n self.assertEqual(output.get_shape().as_list(), [5, 3, 4, 4])\n\n\nclass BiasAddTest(tf.test.TestCase):\n\n def testCreate(self):\n height, width = 3, 3\n with self.test_session():\n images = np.random.uniform(size=(5, height, width, 3))\n output = tf.contrib.layers.bias_add(images)\n self.assertEqual(output.op.name, 'BiasAdd/BiasAdd')\n self.assertListEqual(output.get_shape().as_list(), [5, height, width, 3])\n\n def testCreateWithActivation(self):\n height, width = 3, 3\n with self.test_session():\n images = tf.random_uniform((5, height, width, 3), seed=1)\n output = tf.contrib.layers.bias_add(images, activation_fn=tf.nn.relu)\n self.assertEqual(output.op.name, 'BiasAdd/Relu')\n self.assertListEqual(output.get_shape().as_list(), [5, height, width, 3])\n\n def testCreateDimensions(self):\n dims = (2, 3, 4)\n shape = [5, 2, 3, 4]\n with self.test_session():\n for d in dims:\n input_shape = shape[:d]\n inputs = tf.random_uniform(input_shape, seed=1)\n output = tf.contrib.layers.bias_add(inputs)\n self.assertListEqual(output.get_shape().as_list(), input_shape)\n biases = tf.contrib.framework.get_variables_by_name('biases')[-1]\n self.assertListEqual(biases.get_shape().as_list(), [input_shape[-1]])\n\n\nclass ConvolutionTest(tf.test.TestCase):\n\n def testInvalidDataFormat(self):\n height, width = 7, 9\n with self.test_session():\n images = tf.random_uniform((5, height, width, 3), seed=1)\n with self.assertRaisesRegexp(\n ValueError, 'data_format'):\n tf.contrib.layers.convolution2d(images, 32, 3, data_format='CHWN')\n\n def testCreateConv(self):\n height, width = 7, 9\n with self.test_session():\n images = np.random.uniform(size=(5, height, width, 4))\n output = tf.contrib.layers.convolution2d(images, 32, [3, 3])\n self.assertEqual(output.op.name, 'Conv/Relu')\n self.assertListEqual(output.get_shape().as_list(), [5, height, width, 32])\n weights = tf.contrib.framework.get_variables_by_name('weights')[0]\n self.assertListEqual(weights.get_shape().as_list(), [3, 3, 4, 32])\n biases = tf.contrib.framework.get_variables_by_name('biases')[0]\n self.assertListEqual(biases.get_shape().as_list(), [32])\n\n def testCreateConvNCHW(self):\n height, width = 7, 9\n with self.test_session():\n images = np.random.uniform(size=(5, 4, height, width))\n output = tf.contrib.layers.convolution2d(\n images, 32, [3, 3], data_format='NCHW')\n self.assertEqual(output.op.name, 'Conv/Relu')\n self.assertListEqual(output.get_shape().as_list(), [5, 32, height, width])\n weights = tf.contrib.framework.get_variables_by_name('weights')[0]\n self.assertListEqual(weights.get_shape().as_list(), [3, 3, 4, 32])\n biases = tf.contrib.framework.get_variables_by_name('biases')[0]\n self.assertListEqual(biases.get_shape().as_list(), [32])\n\n def testCreateSquareConv(self):\n height, width = 7, 9\n with self.test_session():\n images = tf.random_uniform((5, height, width, 3), seed=1)\n output = tf.contrib.layers.convolution2d(images, 32, 3)\n self.assertEqual(output.op.name, 'Conv/Relu')\n self.assertListEqual(output.get_shape().as_list(), [5, height, width, 32])\n\n def testCreateConvWithTensorShape(self):\n height, width = 7, 9\n with self.test_session():\n images = tf.random_uniform((5, height, width, 3), seed=1)\n output = tf.contrib.layers.convolution2d(images, 32,\n images.get_shape()[1:3])\n self.assertEqual(output.op.name, 'Conv/Relu')\n self.assertListEqual(output.get_shape().as_list(), [5, height, width, 32])\n\n def testCreateFullyConv(self):\n height, width = 7, 9\n with self.test_session():\n images = tf.random_uniform((5, height, width, 32), seed=1)\n output = tf.contrib.layers.convolution2d(images, 64,\n images.get_shape()[1:3],\n padding='VALID')\n self.assertEqual(output.op.name, 'Conv/Relu')\n self.assertListEqual(output.get_shape().as_list(), [5, 1, 1, 64])\n biases = tf.contrib.framework.get_variables_by_name('biases')[0]\n self.assertListEqual(biases.get_shape().as_list(), [64])\n\n def testCreateVerticalConv(self):\n height, width = 7, 9\n with self.test_session():\n images = tf.random_uniform((5, height, width, 4), seed=1)\n output = tf.contrib.layers.convolution2d(images, 32, [3, 1])\n self.assertEqual(output.op.name, 'Conv/Relu')\n self.assertListEqual(output.get_shape().as_list(),\n [5, height, width, 32])\n weights = tf.contrib.framework.get_variables_by_name('weights')[0]\n self.assertListEqual(weights.get_shape().as_list(), [3, 1, 4, 32])\n biases = tf.contrib.framework.get_variables_by_name('biases')[0]\n self.assertListEqual(biases.get_shape().as_list(), [32])\n\n def testCreateHorizontalConv(self):\n height, width = 7, 9\n with self.test_session():\n images = tf.random_uniform((5, height, width, 4), seed=1)\n output = tf.contrib.layers.convolution2d(images, 32, [1, 3])\n self.assertEqual(output.op.name, 'Conv/Relu')\n self.assertListEqual(output.get_shape().as_list(),\n [5, height, width, 32])\n weights = tf.contrib.framework.get_variables_by_name('weights')[0]\n self.assertListEqual(weights.get_shape().as_list(), [1, 3, 4, 32])\n\n def testCreateConvWithStride(self):\n height, width = 6, 8\n with self.test_session():\n images = tf.random_uniform((5, height, width, 3), seed=1)\n output = tf.contrib.layers.convolution2d(images, 32, [3, 3], stride=2)\n self.assertEqual(output.op.name, 'Conv/Relu')\n self.assertListEqual(output.get_shape().as_list(),\n [5, height/2, width/2, 32])\n\n def testCreateConvCreatesWeightsAndBiasesVars(self):\n height, width = 7, 9\n images = tf.random_uniform((5, height, width, 3), seed=1)\n with self.test_session():\n self.assertFalse(tf.contrib.framework.get_variables('conv1/weights'))\n self.assertFalse(tf.contrib.framework.get_variables('conv1/biases'))\n tf.contrib.layers.convolution2d(images, 32, [3, 3], scope='conv1')\n self.assertTrue(tf.contrib.framework.get_variables('conv1/weights'))\n self.assertTrue(tf.contrib.framework.get_variables('conv1/biases'))\n\n def testCreateConvWithScope(self):\n height, width = 7, 9\n with self.test_session():\n images = tf.random_uniform((5, height, width, 3), seed=1)\n output = tf.contrib.layers.convolution2d(images, 32, [3, 3],\n scope='conv1')\n self.assertEqual(output.op.name, 'conv1/Relu')\n\n def testCreateConvWithCollection(self):\n height, width = 7, 9\n images = tf.random_uniform((5, height, width, 3), seed=1)\n with tf.name_scope('fe'):\n conv = tf.contrib.layers.convolution2d(images, 32, [3, 3],\n outputs_collections='outputs',\n scope='Conv')\n output_collected = tf.get_collection('outputs')[0]\n self.assertEqual(output_collected.alias, 'fe/Conv')\n self.assertEqual(output_collected, conv)\n\n def testCreateConvWithoutActivation(self):\n height, width = 7, 9\n with self.test_session():\n images = tf.random_uniform((5, height, width, 3), seed=1)\n output = tf.contrib.layers.convolution2d(images, 32, [3, 3],\n activation_fn=None)\n self.assertEqual(output.op.name, 'Conv/BiasAdd')\n\n def testCreateConvValid(self):\n height, width = 7, 9\n with self.test_session():\n images = tf.random_uniform((5, height, width, 3), seed=1)\n output = tf.contrib.layers.convolution2d(images, 32, [3, 3],\n padding='VALID')\n self.assertListEqual(output.get_shape().as_list(), [5, 5, 7, 32])\n\n def testCreateConvWithWD(self):\n height, width = 7, 9\n weight_decay = 0.01\n with self.test_session() as sess:\n images = tf.random_uniform((5, height, width, 3), seed=1)\n regularizer = tf.contrib.layers.l2_regularizer(weight_decay)\n tf.contrib.layers.convolution2d(images, 32, [3, 3],\n weights_regularizer=regularizer)\n l2_loss = tf.nn.l2_loss(\n tf.contrib.framework.get_variables_by_name('weights')[0])\n wd = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)[0]\n self.assertEqual(wd.op.name,\n 'Conv/weights/Regularizer/l2_regularizer')\n sess.run(tf.initialize_all_variables())\n self.assertAlmostEqual(sess.run(wd), weight_decay * l2_loss.eval())\n\n def testCreateConvNoRegularizers(self):\n height, width = 7, 9\n with self.test_session():\n images = tf.random_uniform((5, height, width, 3), seed=1)\n tf.contrib.layers.convolution2d(images, 32, [3, 3])\n self.assertEqual(\n tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES), [])\n\n def testReuseVars(self):\n height, width = 7, 9\n with self.test_session():\n images = tf.random_uniform((5, height, width, 3), seed=1)\n tf.contrib.layers.convolution2d(images, 32, [3, 3], scope='conv1')\n self.assertEqual(len(tf.contrib.framework.get_variables()), 2)\n tf.contrib.layers.convolution2d(images, 32, [3, 3], scope='conv1',\n reuse=True)\n self.assertEqual(len(tf.contrib.framework.get_variables()), 2)\n\n def testNonReuseVars(self):\n height, width = 7, 9\n with self.test_session():\n images = tf.random_uniform((5, height, width, 3), seed=1)\n tf.contrib.layers.convolution2d(images, 32, [3, 3])\n self.assertEqual(len(tf.contrib.framework.get_variables()), 2)\n tf.contrib.layers.convolution2d(images, 32, [3, 3])\n self.assertEqual(len(tf.contrib.framework.get_variables()), 4)\n\n def testReuseConvWithWD(self):\n height, width = 7, 9\n with self.test_session():\n images = tf.random_uniform((5, height, width, 3), seed=1)\n weight_decay = tf.contrib.layers.l2_regularizer(0.01)\n with tf.contrib.framework.arg_scope(\n [tf.contrib.layers.convolution2d],\n weights_regularizer=weight_decay):\n tf.contrib.layers.convolution2d(images, 32, [3, 3], scope='conv1')\n self.assertEqual(len(tf.contrib.framework.get_variables()), 2)\n self.assertEqual(\n len(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)), 1)\n tf.contrib.layers.convolution2d(images, 32, [3, 3], scope='conv1',\n reuse=True)\n self.assertEqual(len(tf.contrib.framework.get_variables()), 2)\n self.assertEqual(\n len(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)), 1)\n\n def testConvWithBatchNorm(self):\n height, width = 7, 9\n with self.test_session():\n images = tf.random_uniform((5, height, width, 32), seed=1)\n with tf.contrib.framework.arg_scope(\n [tf.contrib.layers.convolution2d],\n normalizer_fn=tf.contrib.layers.batch_norm,\n normalizer_params={'decay': 0.9}):\n net = tf.contrib.layers.convolution2d(images, 32, [3, 3])\n net = tf.contrib.layers.convolution2d(net, 32, [3, 3])\n self.assertEqual(len(tf.contrib.framework.get_variables()), 8)\n self.assertEqual(\n len(tf.contrib.framework.get_variables('Conv/BatchNorm')), 3)\n self.assertEqual(\n len(tf.contrib.framework.get_variables('Conv_1/BatchNorm')), 3)\n\n def testReuseConvWithBatchNorm(self):\n height, width = 7, 9\n with self.test_session():\n images = tf.random_uniform((5, height, width, 32), seed=1)\n with tf.contrib.framework.arg_scope(\n [tf.contrib.layers.convolution2d],\n normalizer_fn=tf.contrib.layers.batch_norm,\n normalizer_params={'decay': 0.9}):\n net = tf.contrib.layers.convolution2d(images, 32, [3, 3], scope='Conv')\n net = tf.contrib.layers.convolution2d(net, 32, [3, 3], scope='Conv',\n reuse=True)\n self.assertEqual(len(tf.contrib.framework.get_variables()), 4)\n self.assertEqual(\n len(tf.contrib.framework.get_variables('Conv/BatchNorm')), 3)\n self.assertEqual(\n len(tf.contrib.framework.get_variables('Conv_1/BatchNorm')), 0)\n\n def testCreateConvCreatesWeightsAndBiasesVarsWithRateTwo(self):\n height, width = 7, 9\n images = tf.random_uniform((5, height, width, 3), seed=1)\n with self.test_session():\n self.assertFalse(tf.contrib.framework.get_variables('conv1/weights'))\n self.assertFalse(tf.contrib.framework.get_variables('conv1/biases'))\n tf.contrib.layers.convolution2d(images, 32, [3, 3], rate=2, scope='conv1')\n self.assertTrue(tf.contrib.framework.get_variables('conv1/weights'))\n self.assertTrue(tf.contrib.framework.get_variables('conv1/biases'))\n\n def testOutputSizeWithRateTwoSamePadding(self):\n num_filters = 32\n input_size = [5, 10, 12, 3]\n expected_size = [5, 10, 12, num_filters]\n\n images = tf.random_uniform(input_size, seed=1)\n output = tf.contrib.layers.convolution2d(images, num_filters,\n [3, 3], rate=2, padding='SAME')\n self.assertListEqual(list(output.get_shape().as_list()), expected_size)\n with self.test_session() as sess:\n sess.run(tf.initialize_all_variables())\n self.assertEqual(output.op.name, 'Conv/Relu')\n self.assertListEqual(list(output.eval().shape), expected_size)\n\n def testOutputSizeWithRateTwoValidPadding(self):\n num_filters = 32\n input_size = [5, 10, 12, 3]\n expected_size = [5, 6, 8, num_filters]\n\n images = tf.random_uniform(input_size, seed=1)\n output = tf.contrib.layers.convolution2d(images, num_filters, [3, 3],\n rate=2, padding='VALID')\n self.assertListEqual(list(output.get_shape().as_list()), expected_size)\n with self.test_session() as sess:\n sess.run(tf.initialize_all_variables())\n self.assertEqual(output.op.name, 'Conv/Relu')\n self.assertListEqual(list(output.eval().shape), expected_size)\n\n def testOutputSizeWithRateTwoThreeValidPadding(self):\n num_filters = 32\n input_size = [5, 10, 12, 3]\n expected_size = [5, 6, 6, num_filters]\n\n images = tf.random_uniform(input_size, seed=1)\n output = tf.contrib.layers.convolution2d(images, num_filters, [3, 3],\n rate=[2, 3], padding='VALID')\n self.assertListEqual(list(output.get_shape().as_list()), expected_size)\n with self.test_session() as sess:\n sess.run(tf.initialize_all_variables())\n self.assertEquals(output.op.name, 'Conv/Relu')\n self.assertListEqual(list(output.eval().shape), expected_size)\n\n def testDynamicOutputSizeWithRateOneValidPadding(self):\n num_filters = 32\n input_size = [5, 9, 11, 3]\n expected_size = [None, None, None, num_filters]\n expected_size_dynamic = [5, 7, 9, num_filters]\n\n with self.test_session():\n images = tf.placeholder(np.float32, [None, None, None, input_size[3]])\n output = tf.contrib.layers.convolution2d(images, num_filters, [3, 3],\n rate=1, padding='VALID')\n tf.initialize_all_variables().run()\n self.assertEqual(output.op.name, 'Conv/Relu')\n self.assertListEqual(output.get_shape().as_list(), expected_size)\n eval_output = output.eval({images: np.zeros(input_size, np.float32)})\n self.assertListEqual(list(eval_output.shape), expected_size_dynamic)\n\n def testDynamicOutputSizeWithRateOneValidPaddingNCHW(self):\n if tf.test.is_gpu_available():\n num_filters = 32\n input_size = [5, 3, 9, 11]\n expected_size = [None, num_filters, None, None]\n expected_size_dynamic = [5, num_filters, 7, 9]\n\n with self.test_session(use_gpu=True):\n images = tf.placeholder(np.float32, [None, input_size[1], None, None])\n output = tf.contrib.layers.convolution2d(\n images,\n num_filters, [3, 3],\n rate=1,\n padding='VALID',\n data_format='NCHW')\n tf.initialize_all_variables().run()\n self.assertEqual(output.op.name, 'Conv/Relu')\n self.assertListEqual(output.get_shape().as_list(), expected_size)\n eval_output = output.eval({images: np.zeros(input_size, np.float32)})\n self.assertListEqual(list(eval_output.shape), expected_size_dynamic)\n\n def testDynamicOutputSizeWithRateTwoValidPadding(self):\n num_filters = 32\n input_size = [5, 9, 11, 3]\n expected_size = [None, None, None, num_filters]\n expected_size_dynamic = [5, 5, 7, num_filters]\n\n with self.test_session():\n images = tf.placeholder(np.float32, [None, None, None, input_size[3]])\n output = tf.contrib.layers.convolution2d(images, num_filters, [3, 3],\n rate=2, padding='VALID')\n tf.initialize_all_variables().run()\n self.assertEqual(output.op.name, 'Conv/Relu')\n self.assertListEqual(output.get_shape().as_list(), expected_size)\n eval_output = output.eval({images: np.zeros(input_size, np.float32)})\n self.assertListEqual(list(eval_output.shape), expected_size_dynamic)\n\n def testWithScope(self):\n num_filters = 32\n input_size = [5, 9, 11, 3]\n expected_size = [5, 5, 7, num_filters]\n\n images = tf.random_uniform(input_size, seed=1)\n output = tf.contrib.layers.convolution2d(images, num_filters, [3, 3],\n rate=2, padding='VALID',\n scope='conv7')\n with self.test_session() as sess:\n sess.run(tf.initialize_all_variables())\n self.assertEqual(output.op.name, 'conv7/Relu')\n self.assertListEqual(list(output.eval().shape), expected_size)\n\n def testWithScopeWithoutActivation(self):\n num_filters = 32\n input_size = [5, 9, 11, 3]\n expected_size = [5, 5, 7, num_filters]\n\n images = tf.random_uniform(input_size, seed=1)\n output = tf.contrib.layers.convolution2d(images, num_filters, [3, 3],\n rate=2, padding='VALID',\n activation_fn=None, scope='conv7')\n with self.test_session() as sess:\n sess.run(tf.initialize_all_variables())\n self.assertEqual(output.op.name, 'conv7/BiasAdd')\n self.assertListEqual(list(output.eval().shape), expected_size)\n\n\nclass Convolution2dTransposeTests(tf.test.TestCase):\n\n def testInvalidDataFormat(self):\n height, width = 7, 9\n with self.test_session():\n images = tf.random_uniform((5, height, width, 3), seed=1)\n with self.assertRaisesRegexp(\n ValueError, 'data_format has to be either NCHW or NHWC.'):\n tf.contrib.layers.convolution2d_transpose(\n images, 32, 3, data_format='CHWN')\n\n\n def testOutputSizeWithStrideOneSamePaddingNCHW(self):\n # `NCHW` data fomat is only supported for `GPU` device.\n if tf.test.is_gpu_available():\n with self.test_session(use_gpu=True) as sess:\n num_filters = 32\n input_size = [5, 3, 10, 12]\n expected_size = [5, num_filters, 10, 12]\n\n images = tf.random_uniform(input_size, seed=1)\n output = tf.contrib.layers.conv2d_transpose(\n images, num_filters, [3, 3], stride=1,\n padding='SAME', data_format='NCHW')\n self.assertEqual(output.op.name, 'Conv2d_transpose/Relu')\n\n sess.run(tf.initialize_all_variables())\n self.assertListEqual(list(output.eval().shape), expected_size)\n\n\n def testOutputSizeWithStrideOneValidPaddingNCHW(self):\n if tf.test.is_gpu_available():\n with self.test_session(use_gpu=True) as sess:\n num_filters = 32\n input_size = [5, 3, 10, 12]\n expected_size = [5, num_filters, 12, 14]\n\n images = tf.random_uniform(input_size, seed=1)\n output = tf.contrib.layers.conv2d_transpose(\n images, num_filters, [3, 3], stride=1,\n padding='VALID', data_format='NCHW')\n self.assertEqual(output.op.name, 'Conv2d_transpose/Relu')\n\n sess.run(tf.initialize_all_variables())\n self.assertListEqual(list(output.eval().shape), expected_size)\n\n def testOutputSizeWithStrideTwoValidPaddingNCHW(self):\n if tf.test.is_gpu_available():\n with self.test_session(use_gpu=True) as sess:\n num_filters = 32\n input_size = [5, 3, 9, 11]\n expected_size = [5, num_filters, 19, 23]\n\n images = tf.random_uniform(input_size, seed=1)\n output = tf.contrib.layers.conv2d_transpose(\n images, num_filters, [3, 3], stride=[2, 2],\n padding='VALID', data_format='NCHW')\n self.assertEqual(output.op.name, 'Conv2d_transpose/Relu')\n self.assertListEqual(list(output.get_shape().as_list()), expected_size)\n\n sess.run(tf.initialize_all_variables())\n self.assertListEqual(list(output.eval().shape), expected_size)\n\n def testOutputSizeWith1x1StrideTwoSamePaddingNCHW(self):\n if tf.test.is_gpu_available():\n with self.test_session(use_gpu=True) as sess:\n num_filters = 1\n input_size = [1, 1, 1, 1]\n expected_size = [1, num_filters, 2, 2]\n\n images = tf.random_uniform(input_size, seed=1)\n output = tf.contrib.layers.conv2d_transpose(\n images, num_filters, [2, 2], stride=[2, 2],\n padding='SAME', data_format='NCHW')\n self.assertListEqual(list(output.get_shape().as_list()), expected_size)\n\n sess.run(tf.initialize_all_variables())\n self.assertEqual(output.op.name, 'Conv2d_transpose/Relu')\n self.assertListEqual(list(output.eval().shape), expected_size)\n\n def testOutputSizeWith1x1StrideTwoValidPaddingNCHW(self):\n if tf.test.is_gpu_available():\n with self.test_session(use_gpu=True) as sess:\n num_filters = 1\n input_size = [1, 1, 1, 1]\n expected_size = [1, num_filters, 2, 2]\n\n images = tf.random_uniform(input_size, seed=1)\n output = tf.contrib.layers.conv2d_transpose(\n images, num_filters, [2, 2], stride=[2, 2],\n padding='VALID', data_format='NCHW')\n sess.run(tf.initialize_all_variables())\n self.assertEqual(output.op.name, 'Conv2d_transpose/Relu')\n self.assertListEqual(list(output.eval().shape), expected_size)\n\n def testOutputSizeWith2x2StrideTwoSamePaddingNCHW(self):\n if tf.test.is_gpu_available():\n with self.test_session(use_gpu=True) as sess:\n num_filters = 1\n input_size = [1, 1, 2, 2]\n expected_size = [1, num_filters, 4, 4]\n\n images = tf.random_uniform(input_size, seed=1)\n output = tf.contrib.layers.conv2d_transpose(\n images, num_filters, [2, 2], stride=[2, 2],\n padding='SAME', data_format='NCHW')\n sess.run(tf.initialize_all_variables())\n self.assertEqual(output.op.name, 'Conv2d_transpose/Relu')\n self.assertListEqual(list(output.eval().shape), expected_size)\n\n def testOutputSizeWith2x2StrideTwoValidPaddingNCHW(self):\n if tf.test.is_gpu_available():\n with self.test_session(use_gpu=True) as sess:\n num_filters = 1\n input_size = [1, 1, 2, 2]\n expected_size = [1, num_filters, 4, 4]\n\n images = tf.random_uniform(input_size, seed=1)\n output = tf.contrib.layers.conv2d_transpose(\n images, num_filters, [2, 2], stride=[2, 2],\n padding='VALID', data_format='NCHW')\n sess.run(tf.initialize_all_variables())\n self.assertEqual(output.op.name, 'Conv2d_transpose/Relu')\n self.assertListEqual(list(output.eval().shape), expected_size)\n\n def testOutputSizeWithStride2x1NCHW(self):\n if tf.test.is_gpu_available():\n with self.test_session(use_gpu=True) as sess:\n num_filters = 1\n input_size = [1, 1, 3, 2]\n expected_size = [1, num_filters, 6, 5]\n\n images = tf.random_uniform(input_size, seed=1)\n output = tf.contrib.layers.conv2d_transpose(\n images, num_filters, [2, 4], stride=[2, 1],\n padding='VALID', data_format='NCHW')\n sess.run(tf.initialize_all_variables())\n self.assertEqual(output.op.name, 'Conv2d_transpose/Relu')\n self.assertListEqual(list(output.eval().shape), expected_size)\n\n def testOutputSizeWithStride2x4NCHW(self):\n if tf.test.is_gpu_available():\n with self.test_session(use_gpu=True) as sess:\n num_filters = 1\n input_size = [1, 1, 3, 2]\n expected_size = [1, num_filters, 6, 8]\n\n images = tf.random_uniform(input_size, seed=1)\n output = tf.contrib.layers.conv2d_transpose(\n images, num_filters, [2, 4], stride=[2, 4],\n padding='VALID', data_format='NCHW')\n sess.run(tf.initialize_all_variables())\n self.assertEqual(output.op.name, 'Conv2d_transpose/Relu')\n self.assertListEqual(list(output.eval().shape), expected_size)\n\n def testOutputSizeWithStride2x5NCHW(self):\n if tf.test.is_gpu_available():\n with self.test_session(use_gpu=True) as sess:\n num_filters = 1\n input_size = [1, 1, 3, 2]\n expected_size = [1, num_filters, 6, 10]\n\n images = tf.random_uniform(input_size, seed=1)\n output = tf.contrib.layers.conv2d_transpose(\n images, num_filters, [2, 4], stride=[2, 5],\n padding='VALID', data_format='NCHW')\n sess.run(tf.initialize_all_variables())\n self.assertEqual(output.op.name, 'Conv2d_transpose/Relu')\n self.assertListEqual(list(output.eval().shape), expected_size)\n\n\n def testOutputSizeWithStrideOneSamePadding(self):\n num_filters = 32\n input_size = [5, 10, 12, 3]\n expected_size = [5, 10, 12, num_filters]\n\n images = tf.random_uniform(input_size, seed=1)\n output = tf.contrib.layers.conv2d_transpose(\n images, num_filters, [3, 3], stride=1, padding='SAME')\n self.assertEqual(output.op.name, 'Conv2d_transpose/Relu')\n\n with self.test_session() as sess:\n sess.run(tf.initialize_all_variables())\n self.assertListEqual(list(output.eval().shape), expected_size)\n\n def testOutputSizeWithStrideOneValidPadding(self):\n num_filters = 32\n input_size = [5, 10, 12, 3]\n expected_size = [5, 12, 14, num_filters]\n\n images = tf.random_uniform(input_size, seed=1)\n output = tf.contrib.layers.conv2d_transpose(\n images, num_filters, [3, 3], stride=1, padding='VALID')\n self.assertEqual(output.op.name, 'Conv2d_transpose/Relu')\n\n with self.test_session() as sess:\n sess.run(tf.initialize_all_variables())\n self.assertListEqual(list(output.eval().shape), expected_size)\n\n def testOutputSizeWithStrideTwoValidPadding(self):\n num_filters = 32\n input_size = [5, 9, 11, 3]\n expected_size = [5, 19, 23, num_filters]\n\n images = tf.random_uniform(input_size, seed=1)\n output = tf.contrib.layers.conv2d_transpose(\n images, num_filters, [3, 3], stride=[2, 2], padding='VALID')\n self.assertEqual(output.op.name, 'Conv2d_transpose/Relu')\n self.assertListEqual(list(output.get_shape().as_list()), expected_size)\n\n with self.test_session() as sess:\n sess.run(tf.initialize_all_variables())\n self.assertListEqual(list(output.eval().shape), expected_size)\n\n def testOutputSizeWith1x1StrideTwoSamePadding(self):\n num_filters = 1\n input_size = [1, 1, 1, 1]\n expected_size = [1, 2, 2, num_filters]\n\n images = tf.random_uniform(input_size, seed=1)\n output = tf.contrib.layers.conv2d_transpose(\n images, num_filters, [2, 2], stride=[2, 2], padding='SAME')\n self.assertListEqual(list(output.get_shape().as_list()), expected_size)\n\n with self.test_session() as sess:\n sess.run(tf.initialize_all_variables())\n self.assertEqual(output.op.name, 'Conv2d_transpose/Relu')\n self.assertListEqual(list(output.eval().shape), expected_size)\n\n def testOutputSizeWith1x1StrideTwoValidPadding(self):\n num_filters = 1\n input_size = [1, 1, 1, 1]\n expected_size = [1, 2, 2, num_filters]\n\n images = tf.random_uniform(input_size, seed=1)\n output = tf.contrib.layers.conv2d_transpose(\n images, num_filters, [2, 2], stride=[2, 2], padding='VALID')\n with self.test_session() as sess:\n sess.run(tf.initialize_all_variables())\n self.assertEqual(output.op.name, 'Conv2d_transpose/Relu')\n self.assertListEqual(list(output.eval().shape), expected_size)\n\n def testOutputSizeWith2x2StrideTwoSamePadding(self):\n num_filters = 1\n input_size = [1, 2, 2, 1]\n expected_size = [1, 4, 4, num_filters]\n\n images = tf.random_uniform(input_size, seed=1)\n output = tf.contrib.layers.conv2d_transpose(\n images, num_filters, [2, 2], stride=[2, 2], padding='SAME')\n with self.test_session() as sess:\n sess.run(tf.initialize_all_variables())\n self.assertEqual(output.op.name, 'Conv2d_transpose/Relu')\n self.assertListEqual(list(output.eval().shape), expected_size)\n\n def testOutputSizeWith2x2StrideTwoValidPadding(self):\n num_filters = 1\n input_size = [1, 2, 2, 1]\n expected_size = [1, 4, 4, num_filters]\n\n images = tf.random_uniform(input_size, seed=1)\n output = tf.contrib.layers.conv2d_transpose(\n images, num_filters, [2, 2], stride=[2, 2], padding='VALID')\n with self.test_session() as sess:\n sess.run(tf.initialize_all_variables())\n self.assertEqual(output.op.name, 'Conv2d_transpose/Relu')\n self.assertListEqual(list(output.eval().shape), expected_size)\n\n def testOutputSizeWithStride2x1(self):\n num_filters = 1\n input_size = [1, 3, 2, 1]\n expected_size = [1, 6, 5, num_filters]\n\n images = tf.random_uniform(input_size, seed=1)\n output = tf.contrib.layers.conv2d_transpose(\n images, num_filters, [2, 4], stride=[2, 1], padding='VALID')\n with self.test_session() as sess:\n sess.run(tf.initialize_all_variables())\n self.assertEqual(output.op.name, 'Conv2d_transpose/Relu')\n self.assertListEqual(list(output.eval().shape), expected_size)\n\n def testOutputSizeWithStride2x4(self):\n num_filters = 1\n input_size = [1, 3, 2, 1]\n expected_size = [1, 6, 8, num_filters]\n\n images = tf.random_uniform(input_size, seed=1)\n output = tf.contrib.layers.conv2d_transpose(\n images, num_filters, [2, 4], stride=[2, 4], padding='VALID')\n with self.test_session() as sess:\n sess.run(tf.initialize_all_variables())\n self.assertEqual(output.op.name, 'Conv2d_transpose/Relu')\n self.assertListEqual(list(output.eval().shape), expected_size)\n\n def testOutputSizeWithStride2x5(self):\n num_filters = 1\n input_size = [1, 3, 2, 1]\n expected_size = [1, 6, 10, num_filters]\n\n images = tf.random_uniform(input_size, seed=1)\n output = tf.contrib.layers.conv2d_transpose(\n images, num_filters, [2, 4], stride=[2, 5], padding='VALID')\n with self.test_session() as sess:\n sess.run(tf.initialize_all_variables())\n self.assertEqual(output.op.name, 'Conv2d_transpose/Relu')\n self.assertListEqual(list(output.eval().shape), expected_size)\n\n def testOutputSizeRandomSizesAndStridesValidPadding(self):\n np.random.seed(0)\n max_image_size = 10\n\n for _ in range(10):\n num_filters = 1\n input_size = [1, np.random.randint(1, max_image_size),\n np.random.randint(1, max_image_size), 1]\n filter_size = [np.random.randint(1, input_size[1] + 1),\n np.random.randint(1, input_size[2] + 1)]\n stride = [np.random.randint(1, 3), np.random.randint(1, 3)]\n\n tf.reset_default_graph()\n graph = tf.Graph()\n with graph.as_default():\n images = tf.random_uniform(input_size, seed=1)\n transpose = tf.contrib.layers.conv2d_transpose(\n images, num_filters, filter_size, stride=stride, padding='VALID')\n conv = tf.contrib.layers.conv2d(\n transpose, num_filters, filter_size, stride=stride, padding='VALID')\n\n with self.test_session(graph=graph) as sess:\n sess.run(tf.initialize_all_variables())\n self.assertListEqual(list(conv.eval().shape), input_size)\n\n def testDynamicOutputSizeWithStrideTwoValidPadding(self):\n num_filters = 32\n input_size = [5, 9, 11, 3]\n expected_size = [None, None, None, num_filters]\n expected_size_dynamic = [5, 19, 23, num_filters]\n\n images = tf.placeholder(np.float32, [None, None, None, input_size[3]])\n output = tf.contrib.layers.conv2d_transpose(\n images, num_filters, [3, 3], stride=[2, 2], padding='VALID')\n self.assertListEqual(output.get_shape().as_list(), expected_size)\n\n with self.test_session() as sess:\n sess.run(tf.initialize_all_variables())\n self.assertEqual(output.op.name, 'Conv2d_transpose/Relu')\n eval_output = output.eval({images: np.zeros(input_size, np.float32)})\n self.assertListEqual(list(eval_output.shape), expected_size_dynamic)\n\n def testDynamicOutputSizeWithStrideTwoSamePadding(self):\n num_filters = 32\n input_size = [5, 9, 11, 3]\n expected_size = [None, None, None, num_filters]\n expected_size_dynamic = [5, 18, 22, num_filters]\n\n with self.test_session():\n images = tf.placeholder(np.float32, [None, None, None, input_size[3]])\n output = tf.contrib.layers.conv2d_transpose(\n images, num_filters, [3, 3], stride=[2, 2], padding='SAME')\n tf.initialize_all_variables().run()\n self.assertEqual(output.op.name, 'Conv2d_transpose/Relu')\n self.assertListEqual(output.get_shape().as_list(), expected_size)\n eval_output = output.eval({images: np.zeros(input_size, np.float32)})\n self.assertListEqual(list(eval_output.shape), expected_size_dynamic)\n\n def testWithScope(self):\n num_filters = 32\n input_size = [5, 9, 11, 3]\n expected_size = [5, 19, 23, num_filters]\n\n images = tf.random_uniform(input_size, seed=1)\n output = tf.contrib.layers.conv2d_transpose(\n images, num_filters, [3, 3], stride=2, padding='VALID', scope='conv7')\n self.assertEqual(output.op.name, 'conv7/Relu')\n\n with self.test_session() as sess:\n sess.run(tf.initialize_all_variables())\n self.assertListEqual(list(output.eval().shape), expected_size)\n\n def testWithScopeWithoutActivation(self):\n num_filters = 32\n input_size = [5, 9, 11, 3]\n expected_size = [5, 19, 23, num_filters]\n\n images = tf.random_uniform(input_size, seed=1)\n output = tf.contrib.layers.conv2d_transpose(\n images, num_filters, [3, 3], stride=2, padding='VALID',\n activation_fn=None, scope='conv7')\n self.assertEqual(output.op.name, 'conv7/BiasAdd')\n\n with self.test_session() as sess:\n sess.run(tf.initialize_all_variables())\n self.assertListEqual(list(output.eval().shape), expected_size)\n\n def testDeconvWithoutBiasesProducesConv2dTranspose(self):\n num_filters = 32\n input_size = [5, 9, 11, 3]\n expected_size = [5, 19, 23, num_filters]\n stride = 2\n padding = 'VALID'\n\n with self.test_session() as sess:\n images = tf.random_uniform(input_size, seed=1)\n output_deconv = tf.contrib.layers.conv2d_transpose(\n images, num_filters, [3, 3], stride=stride, padding=padding,\n activation_fn=None, scope='conv7')\n\n weights = tf.contrib.framework.get_variables_by_name('conv7/weights')[0]\n output_conv2d_transpose = tf.nn.conv2d_transpose(\n images,\n weights,\n expected_size,\n [1, stride, stride, 1],\n padding=padding)\n\n sess.run(tf.initialize_all_variables())\n\n output_deconv, output_conv2d_transpose = sess.run(\n [output_deconv, output_conv2d_transpose])\n\n self.assertTrue(np.isclose(output_deconv,\n output_conv2d_transpose, 1e-5, 1e-5).all())\n\n\nclass ConvolutionInPlaneTest(tf.test.TestCase):\n\n def testHorzConvWithBlankImage(self):\n image = tf.ones((1, 10, 10, 1))\n horz_gradients = tf.contrib.layers.conv2d_in_plane(\n image,\n weights_initializer=tf.constant_initializer([1, -1]),\n kernel_size=[1, 2],\n padding='VALID',\n activation_fn=None)\n init_op = tf.initialize_all_variables()\n\n with self.test_session() as sess:\n sess.run(init_op)\n result = sess.run(horz_gradients)\n expected = np.zeros((1, 10, 9, 1))\n\n self.assertAllEqual(result, expected)\n\n def testHorzConvWithBlankImageAndPlaceholder(self):\n image = tf.placeholder(tf.float32, shape=(None, None, None, 1))\n horz_gradients = tf.contrib.layers.conv2d_in_plane(\n image,\n weights_initializer=tf.constant_initializer([1, -1]),\n kernel_size=[1, 2],\n padding='VALID',\n activation_fn=None)\n init_op = tf.initialize_all_variables()\n\n with self.test_session() as sess:\n sess.run(init_op)\n result = sess.run(horz_gradients,\n feed_dict={image: np.ones((1, 10, 10, 1))})\n expected = np.zeros((1, 10, 9, 1))\n\n self.assertAllEqual(result, expected)\n\n def testHorzConvWithRandomImageMultiBatch(self):\n np.random.seed(1)\n image = np.random.rand(5, 10, 10, 1)\n expected = image[:, :, 0:-1, :] - image[:, :, 1:, :]\n\n tf_image = tf.constant(image, dtype=tf.float32)\n horz_gradients = tf.contrib.layers.conv2d_in_plane(\n tf_image,\n weights_initializer=tf.constant_initializer([1, -1]),\n kernel_size=[1, 2],\n padding='VALID',\n activation_fn=None)\n init_op = tf.initialize_all_variables()\n\n with self.test_session() as sess:\n sess.run(init_op)\n result = sess.run(horz_gradients)\n\n self.assertAllClose(result, expected, rtol=1e-5, atol=1e-5)\n\n def testHorzConvWithRandomImageMultiBatchMultiChannel(self):\n np.random.seed(1)\n image = np.random.rand(5, 10, 10, 7)\n expected = image[:, :, 0:-1, :] - image[:, :, 1:, :]\n\n tf_image = tf.constant(image, dtype=tf.float32)\n horz_gradients = tf.contrib.layers.conv2d_in_plane(\n tf_image,\n weights_initializer=tf.constant_initializer([1, -1]),\n kernel_size=[1, 2],\n padding='VALID',\n activation_fn=None)\n init_op = tf.initialize_all_variables()\n\n with self.test_session() as sess:\n sess.run(init_op)\n result = sess.run(horz_gradients)\n\n self.assertAllClose(result, expected, rtol=1e-5, atol=1e-5)\n\n def testHorzConvWithVaryingImage(self):\n image = np.asmatrix(('1.0 2.0 3.0;'\n '1.1 2.0 4.0;'\n '-4.3 0.0 8.9'))\n\n expected = np.asmatrix(('-1.0 -1.0;'\n '-0.9 -2.0;'\n '-4.3 -8.9'))\n expected = np.reshape(np.asarray(expected), (1, 3, 2, 1))\n\n tf_image = tf.constant(image, shape=(1, 3, 3, 1), dtype=tf.float32)\n horz_gradients = tf.contrib.layers.conv2d_in_plane(\n tf_image,\n weights_initializer=tf.constant_initializer([1, -1]),\n kernel_size=[1, 2],\n padding='VALID',\n activation_fn=None)\n init_op = tf.initialize_all_variables()\n\n with self.test_session() as sess:\n sess.run(init_op)\n result = sess.run(horz_gradients)\n\n self.assertAllClose(result, expected, rtol=1e-5, atol=1e-5)\n\n def testVertConvWithBlankImage(self):\n image = tf.ones((1, 10, 10, 1))\n vert_gradients = tf.contrib.layers.conv2d_in_plane(\n image,\n weights_initializer=tf.constant_initializer([1, -1]),\n kernel_size=[2, 1],\n padding='VALID',\n activation_fn=None)\n init_op = tf.initialize_all_variables()\n\n with self.test_session() as sess:\n sess.run(init_op)\n result = sess.run(vert_gradients)\n expected = np.zeros((1, 9, 10, 1))\n\n self.assertAllEqual(result, expected)\n\n def testVertConvWithVaryingImage(self):\n image = np.asmatrix(('1.0 2.0 3.0;'\n '1.1 2.0 4.0;'\n '-4.3 0.0 8.9'))\n\n expected = np.asmatrix(('-0.1 0.0 -1.0;'\n ' 5.4 2.0 -4.9'))\n expected = np.reshape(np.asarray(expected), (1, 2, 3, 1))\n\n tf_image = tf.constant(image, shape=(1, 3, 3, 1), dtype=tf.float32)\n vert_gradients = tf.contrib.layers.conv2d_in_plane(\n tf_image,\n weights_initializer=tf.constant_initializer([1, -1]),\n kernel_size=[2, 1],\n padding='VALID',\n activation_fn=None)\n init_op = tf.initialize_all_variables()\n\n with self.test_session() as sess:\n sess.run(init_op)\n result = sess.run(vert_gradients)\n\n self.assertAllClose(result, expected, rtol=1e-5, atol=1e-5)\n\n\nclass DropoutTest(tf.test.TestCase):\n\n def testCreateDropout(self):\n height, width = 3, 3\n with self.test_session():\n images = np.random.uniform(size=(5, height, width, 3))\n output = tf.contrib.layers.dropout(images)\n self.assertEqual(output.op.name, 'Dropout/dropout/mul')\n output.get_shape().assert_is_compatible_with(\n tf.convert_to_tensor(images).get_shape())\n\n def testCreateDropoutWithConstantTrue(self):\n height, width = 3, 3\n with self.test_session():\n is_training = tf.constant(True)\n images = tf.random_uniform((5, height, width, 3), seed=1)\n output = tf.contrib.layers.dropout(images, is_training=is_training)\n self.assertEqual(output.op.name, 'Dropout/dropout/mul')\n output.get_shape().assert_is_compatible_with(images.get_shape())\n\n def testCreateDropoutWithConstantFalse(self):\n height, width = 3, 3\n with self.test_session():\n is_training = tf.constant(False)\n images = tf.random_uniform((5, height, width, 3), seed=1)\n output = tf.contrib.layers.dropout(images, is_training=is_training)\n self.assertEqual(output.op.name, 'Dropout/Identity')\n output.get_shape().assert_is_compatible_with(images.get_shape())\n\n def testCreateDropoutWithPlaceholder(self):\n height, width = 3, 3\n with self.test_session():\n is_training = tf.placeholder(dtype=tf.bool, shape=[])\n images = tf.random_uniform((5, height, width, 3), seed=1)\n output = tf.contrib.layers.dropout(images, is_training=is_training)\n self.assertEqual(output.op.name, 'Dropout/cond/Merge')\n output.get_shape().assert_is_compatible_with(images.get_shape())\n\n def testCollectOutputs(self):\n height, width = 3, 3\n with self.test_session():\n images = tf.random_uniform((5, height, width, 3), seed=1)\n output = tf.contrib.layers.dropout(images, outputs_collections='outputs')\n c_output = tf.get_collection('outputs')[0]\n self.assertEqual(c_output.alias, 'Dropout')\n self.assertEqual(c_output, output)\n\n def testDropout(self):\n height, width = 10, 10\n with self.test_session() as sess:\n images = tf.random_uniform((5, height, width, 3), seed=1, name='images')\n num_elem_initial = tf.reduce_mean(tf.to_float(images > 0))\n output = tf.contrib.layers.dropout(images)\n num_elem = tf.reduce_mean(tf.to_float(output > 0))\n sess.run(tf.initialize_all_variables())\n num_elem, num_elem_initial = sess.run([num_elem, num_elem_initial])\n self.assertLess(num_elem, num_elem_initial/2 + 0.1)\n self.assertGreater(num_elem, num_elem_initial/2 - 0.1)\n\n def testCreateDropoutNoTraining(self):\n height, width = 3, 3\n with self.test_session() as sess:\n images = tf.random_uniform((5, height, width, 3), seed=1, name='images')\n num_elem_initial = tf.reduce_mean(tf.to_float(images > 0))\n output = tf.contrib.layers.dropout(images, is_training=False)\n num_elem = tf.reduce_mean(tf.to_float(output > 0))\n sess.run(tf.initialize_all_variables())\n num_elem, num_elem_initial = sess.run([num_elem, num_elem_initial])\n self.assertEqual(num_elem, num_elem_initial)\n outputs, inputs = sess.run([output, images])\n self.assertAllClose(outputs, inputs)\n\n def testCreateFCFollowByDropout(self):\n height, width = 3, 3\n with self.test_session() as sess:\n images = tf.random_uniform((5, height, width, 3), seed=1, name='images')\n output = tf.contrib.layers.fully_connected(images, 50)\n num_elem_initial = tf.reduce_mean(tf.to_float(output > 0))\n output = tf.contrib.layers.dropout(output)\n num_elem = tf.reduce_mean(tf.to_float(output > 0))\n sess.run(tf.initialize_all_variables())\n num_elem, num_elem_initial = sess.run([num_elem, num_elem_initial])\n self.assertLess(num_elem, num_elem_initial/2 + 0.1)\n self.assertGreater(num_elem, num_elem_initial/2 - 0.1)\n\n def testCreateFCWithDropout(self):\n height, width = 3, 3\n with self.test_session() as sess:\n images = tf.random_uniform((5, height, width, 3), seed=1, name='images')\n output = tf.contrib.layers.fully_connected(\n images, 50, normalizer_fn=tf.contrib.layers.dropout)\n num_elem = tf.reduce_mean(tf.to_float(output > 0))\n sess.run(tf.initialize_all_variables())\n num_elem = sess.run(num_elem)\n self.assertLess(num_elem, 0.5)\n self.assertGreater(num_elem, 0.1)\n\n\nclass FlattenTest(tf.test.TestCase):\n\n def testInvalidRank(self):\n with tf.Graph().as_default() as g, self.test_session(g):\n inputs = tf.placeholder(dtype=tf.float32)\n inputs.set_shape(tf.TensorShape((5,)))\n with self.assertRaisesRegexp(\n ValueError, 'must have a least 2 dimensions'):\n tf.contrib.layers.flatten(inputs)\n\n def testUnknownLastDim(self):\n with tf.Graph().as_default() as g, self.test_session(g):\n inputs = tf.placeholder(dtype=tf.float32)\n inputs.set_shape(tf.TensorShape((5, None)))\n with self.assertRaisesRegexp(ValueError, '2nd dimension must be defined'):\n tf.contrib.layers.flatten(inputs)\n\n def testCollectOutputs(self):\n height, width = 3, 3\n with self.test_session():\n images = np.random.uniform(size=(5, height, width, 3))\n output = tf.contrib.layers.flatten(images, outputs_collections='outputs')\n c_output = tf.get_collection('outputs')[0]\n self.assertEqual(c_output.alias, 'Flatten')\n self.assertEqual(c_output, output)\n\n def testFlatten4D(self):\n height, width = 3, 3\n with self.test_session():\n images = tf.random_uniform((5, height, width, 3), seed=1, name='images')\n output = tf.contrib.layers.flatten(images)\n self.assertEqual(output.get_shape().num_elements(),\n images.get_shape().num_elements())\n self.assertEqual(output.get_shape()[0], images.get_shape()[0])\n\n def testFlatten3D(self):\n height, width = 3, 3\n with self.test_session():\n images = tf.random_uniform((5, height, width), seed=1, name='images')\n output = tf.contrib.layers.flatten(images)\n self.assertEqual(output.get_shape().num_elements(),\n images.get_shape().num_elements())\n self.assertEqual(output.get_shape()[0], images.get_shape()[0])\n\n def testFlattenBatchSize(self):\n height, width = 3, 3\n with self.test_session() as sess:\n images = tf.random_uniform((5, height, width, 3), seed=1, name='images')\n inputs = tf.placeholder(tf.int32, (None, height, width, 3))\n output = tf.contrib.layers.flatten(inputs)\n self.assertEqual(output.get_shape().as_list(),\n [None, height * width * 3])\n output = sess.run(output, {inputs: images.eval()})\n self.assertEqual(output.size,\n images.get_shape().num_elements())\n self.assertEqual(output.shape[0], images.get_shape()[0])\n\n\ndef _sparsify(array, threshold=0.5):\n array[array < threshold] = 0\n non_zero = np.where(array)\n indices = np.vstack(non_zero).T\n values = array[non_zero]\n shape = array.shape\n return indices, values, shape\n\n\nclass PartialFlattenTest(tf.test.TestCase):\n\n def testDensePartialFlatten(self):\n \"\"\"Test `_inner_flatten` on `Tensor`s.\"\"\"\n shape = [2, 3, 4, 5, 6]\n np.random.seed(5446)\n inputs = np.random.randint(0, 100, size=shape)\n\n for new_rank in [1, 2, 3, 4, 5]:\n expected_new_shape = (shape[:new_rank - 1] +\n [np.prod(shape[new_rank - 1:])])\n expected_flattened = np.reshape(inputs, expected_new_shape)\n\n flattened_t = _layers._inner_flatten(inputs, new_rank)\n static_shape = flattened_t.get_shape().as_list()\n self.assertEqual(static_shape, expected_new_shape)\n with self.test_session() as sess:\n flattened = sess.run(flattened_t)\n np.testing.assert_array_equal(expected_flattened, flattened)\n\n def testSparsePartialFlatten(self):\n \"\"\"Test `_inner_flatten` on `SparseTensor`s.\"\"\"\n shape = [4, 3, 11, 6, 1, 3]\n np.random.seed(10301)\n random_ = np.random.rand(*shape)\n indices, values, _ = _sparsify(random_)\n\n for new_rank in [1, 2, 3, 4, 5]:\n expected_shape = (shape[:new_rank - 1] + [np.prod(shape[new_rank - 1:])])\n reshaped_random_ = np.reshape(random_, expected_shape)\n expected_indices, expected_values, _ = _sparsify(reshaped_random_)\n\n inputs_t = tf.SparseTensor(indices, values, shape)\n\n flattened_t = _layers._inner_flatten(inputs_t, new_rank)\n\n with self.test_session() as sess:\n flattened = sess.run(flattened_t)\n\n np.testing.assert_array_equal(expected_indices, flattened.indices)\n np.testing.assert_array_equal(expected_values, flattened.values)\n np.testing.assert_array_equal(expected_shape, flattened.shape)\n\n def testIncompleteShape(self):\n \"\"\"Test `_inner_flatten` shape inference for incomplete shapes.\"\"\"\n shape = [2, None, 4, None, 5, 6]\n inputs = tf.placeholder(tf.int32)\n inputs.set_shape(shape)\n\n flattened1 = _layers._inner_flatten(inputs, 1)\n self.assertEqual([None], flattened1.get_shape().as_list())\n\n flattened2 = _layers._inner_flatten(inputs, 2)\n self.assertEqual([2, None], flattened2.get_shape().as_list())\n\n flattened3 = _layers._inner_flatten(inputs, 3)\n self.assertEqual([2, None, None], flattened3.get_shape().as_list())\n\n flattened4 = _layers._inner_flatten(inputs, 4)\n self.assertEqual([2, None, 4, None], flattened4.get_shape().as_list())\n\n flattened5 = _layers._inner_flatten(inputs, 5)\n self.assertEqual([2, None, 4, None, 30], flattened5.get_shape().as_list())\n\n\nclass FCTest(tf.test.TestCase):\n\n def testCreateFC(self):\n height, width = 3, 3\n for layer_fn in (tf.contrib.layers.fully_connected, tf.contrib.layers.relu):\n with tf.Graph().as_default() as g, self.test_session(g):\n inputs = np.random.uniform(size=(5, height * width * 3))\n output = layer_fn(inputs, 32)\n self.assertEqual(output.op.name, 'fully_connected/Relu')\n self.assertListEqual(output.get_shape().as_list(), [5, 32])\n weights = tf.contrib.framework.get_variables_by_name('weights')[0]\n self.assertListEqual(weights.get_shape().as_list(), [3 * 3 * 3, 32])\n biases = tf.contrib.framework.get_variables_by_name('biases')[0]\n self.assertListEqual(biases.get_shape().as_list(), [32])\n\n def testCreateFCWithScope(self):\n height, width = 3, 3\n with self.test_session():\n inputs = tf.random_uniform((5, height * width * 3), seed=1)\n output = tf.contrib.layers.fully_connected(inputs, 32, scope='fc1')\n self.assertEqual(output.op.name, 'fc1/Relu')\n\n def testCreateFCWithCollection(self):\n height, width = 3, 3\n inputs = tf.random_uniform((5, height * width * 3), seed=1)\n with tf.name_scope('fe'):\n fc = tf.contrib.layers.fully_connected(inputs, 7,\n outputs_collections='outputs',\n scope='fc')\n output_collected = tf.get_collection('outputs')[0]\n self.assertEqual(output_collected.alias, 'fe/fc')\n self.assertEqual(output_collected, fc)\n\n def testCreateFcCreatesWeightsAndBiasesVars(self):\n height, width = 3, 3\n inputs = tf.random_uniform((5, height * width * 3), seed=1)\n with self.test_session():\n self.assertFalse(tf.contrib.framework.get_variables('fc1/weights'))\n self.assertFalse(tf.contrib.framework.get_variables('fc1/biases'))\n tf.contrib.layers.fully_connected(inputs, 32, scope='fc1')\n self.assertTrue(tf.contrib.framework.get_variables('fc1/weights'))\n self.assertTrue(tf.contrib.framework.get_variables('fc1/biases'))\n\n def testReuseVars(self):\n height, width = 3, 3\n inputs = tf.random_uniform((5, height * width * 3), seed=1)\n with self.test_session():\n tf.contrib.layers.fully_connected(inputs, 32, scope='fc1')\n self.assertEqual(len(tf.contrib.framework.get_variables('fc1')), 2)\n tf.contrib.layers.fully_connected(inputs, 32, scope='fc1', reuse=True)\n self.assertEqual(len(tf.contrib.framework.get_variables('fc1')), 2)\n\n def testNonReuseVars(self):\n height, width = 3, 3\n inputs = tf.random_uniform((5, height * width * 3), seed=1)\n with self.test_session():\n tf.contrib.layers.fully_connected(inputs, 32)\n self.assertEqual(\n len(tf.contrib.framework.get_variables('fully_connected')), 2)\n tf.contrib.layers.fully_connected(inputs, 32)\n self.assertEqual(\n len(tf.contrib.framework.get_variables('fully_connected')), 4)\n\n def testCreateFCWithoutActivation(self):\n height, width = 3, 3\n with self.test_session():\n inputs = tf.random_uniform((5, height * width * 3), seed=1)\n output = tf.contrib.layers.fully_connected(inputs, 32, activation_fn=None)\n self.assertEqual(output.op.name, 'fully_connected/BiasAdd')\n\n def testCreateFCWithWD(self):\n height, width = 3, 3\n with self.test_session() as sess:\n inputs = tf.random_uniform((5, height * width * 3), seed=1)\n weight_decay = tf.contrib.layers.l2_regularizer(0.01)\n tf.contrib.layers.fully_connected(inputs, 32,\n weights_regularizer=weight_decay)\n wd = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)[0]\n self.assertEqual(wd.op.name,\n 'fully_connected/weights/Regularizer/l2_regularizer')\n sess.run(tf.initialize_all_variables())\n self.assertLess(sess.run(wd), 0.4)\n\n def testCreateNoRegularizers(self):\n height, width = 3, 3\n with self.test_session():\n inputs = tf.random_uniform((5, height * width * 3), seed=1)\n tf.contrib.layers.fully_connected(inputs, 32)\n self.assertEqual(\n tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES), [])\n\n def testReuseFCWithWD(self):\n height, width = 3, 3\n with self.test_session():\n inputs = tf.random_uniform((5, height * width * 3), seed=1)\n weight_decay = tf.contrib.layers.l2_regularizer(0.01)\n tf.contrib.layers.fully_connected(inputs, 32,\n weights_regularizer=weight_decay,\n scope='FC')\n self.assertEqual(len(tf.contrib.framework.get_variables()), 2)\n self.assertEqual(\n len(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)), 1)\n tf.contrib.layers.fully_connected(inputs, 32,\n weights_regularizer=weight_decay,\n scope='FC',\n reuse=True)\n self.assertEqual(len(tf.contrib.framework.get_variables()), 2)\n self.assertEqual(\n len(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)), 1)\n\n def testFCWithBatchNorm(self):\n height, width = 3, 3\n with self.test_session():\n images = tf.random_uniform((5, height * width * 3), seed=1)\n with tf.contrib.framework.arg_scope(\n [tf.contrib.layers.fully_connected],\n normalizer_fn=tf.contrib.layers.batch_norm,\n normalizer_params={'decay': 0.9}):\n net = tf.contrib.layers.fully_connected(images, 27)\n net = tf.contrib.layers.fully_connected(net, 27)\n self.assertEqual(len(tf.contrib.framework.get_variables()), 8)\n self.assertEqual(len(tf.contrib.framework.get_variables(\n 'fully_connected/BatchNorm')), 3)\n self.assertEqual(len(tf.contrib.framework.get_variables(\n 'fully_connected_1/BatchNorm')), 3)\n\n def testReuseFCWithBatchNorm(self):\n height, width = 3, 3\n with self.test_session():\n images = tf.random_uniform((5, height * width * 3), seed=1)\n with tf.contrib.framework.arg_scope(\n [tf.contrib.layers.fully_connected],\n normalizer_fn=tf.contrib.layers.batch_norm,\n normalizer_params={'decay': 0.9}):\n net = tf.contrib.layers.fully_connected(images, 27, scope='fc1')\n net = tf.contrib.layers.fully_connected(net, 27, scope='fc1',\n reuse=True)\n self.assertEqual(len(tf.contrib.framework.get_variables()), 4)\n self.assertEqual(\n len(tf.contrib.framework.get_variables('fc1/BatchNorm')), 3)\n\n\nclass BatchNormTest(tf.test.TestCase):\n\n def _addBesselsCorrection(self, sample_size, expected_var):\n correction_factor = sample_size / (sample_size - 1)\n expected_var *= correction_factor\n return expected_var, correction_factor\n\n def testUnknownShape(self):\n with tf.Graph().as_default() as g, self.test_session(g):\n inputs = tf.placeholder(dtype=tf.float32)\n with self.assertRaisesRegexp(ValueError, 'undefined rank'):\n tf.contrib.layers.batch_norm(inputs)\n\n def testInvalidDataFormat(self):\n with tf.Graph().as_default() as g, self.test_session(g):\n inputs = tf.placeholder(dtype=tf.float32)\n with self.assertRaisesRegexp(\n ValueError, 'data_format has to be either NCHW or NHWC.'):\n tf.contrib.layers.batch_norm(inputs, data_format='CHWN')\n\n def testUnknownLastDim(self):\n with tf.Graph().as_default() as g, self.test_session(g):\n inputs = tf.placeholder(dtype=tf.float32)\n inputs.set_shape(tf.TensorShape((5, 3, 3, None)))\n with self.assertRaisesRegexp(ValueError, 'undefined last dimension'):\n tf.contrib.layers.batch_norm(inputs)\n\n def testWeightedMomentsFused(self):\n with tf.Graph().as_default() as g, self.test_session(g):\n inputs = tf.placeholder(dtype=tf.float32, shape=(5, 3, 3, 7))\n batch_weights = tf.placeholder(dtype=tf.float32)\n with self.assertRaisesRegexp(ValueError,\n 'Weighted mean and variance'):\n tf.contrib.layers.batch_norm(\n inputs, batch_weights=batch_weights, fused=True)\n\n def _testCreateOp(self, fused):\n height, width = 3, 3\n with self.test_session():\n images = np.random.uniform(size=(5, height, width, 3)).astype('f')\n output = tf.contrib.layers.batch_norm(images, fused=fused)\n expected_name = ('BatchNorm/FusedBatchNorm' if fused else\n 'BatchNorm/batchnorm')\n self.assertTrue(output.op.name.startswith(expected_name))\n self.assertListEqual(output.get_shape().as_list(), [5, height, width, 3])\n\n def testCreateOpDefault(self):\n self._testCreateOp(False)\n\n def testCreateOpFused(self):\n self._testCreateOp(True)\n\n def testCreateVariables(self):\n height, width = 3, 3\n with self.test_session():\n images = tf.random_uniform((5, height, width, 3), seed=1)\n tf.contrib.layers.batch_norm(images, scale=True)\n beta = tf.contrib.framework.get_variables_by_name('beta')[0]\n gamma = tf.contrib.framework.get_variables_by_name('gamma')[0]\n self.assertEqual(beta.op.name, 'BatchNorm/beta')\n self.assertEqual(gamma.op.name, 'BatchNorm/gamma')\n moving_mean = tf.contrib.framework.get_variables_by_name('moving_mean')[0]\n moving_variance = tf.contrib.framework.get_variables_by_name(\n 'moving_variance')[0]\n self.assertEqual(moving_mean.op.name, 'BatchNorm/moving_mean')\n self.assertEqual(moving_variance.op.name, 'BatchNorm/moving_variance')\n\n def testMovingAverageVariables(self):\n height, width = 3, 3\n with self.test_session():\n images = tf.random_uniform((5, height, width, 3), seed=1)\n tf.contrib.layers.batch_norm(images, scale=True)\n moving_mean = tf.contrib.framework.get_variables_by_name('moving_mean')[0]\n moving_variance = tf.contrib.framework.get_variables_by_name(\n 'moving_variance')[0]\n self.assertEqual(moving_mean.op.name, 'BatchNorm/moving_mean')\n self.assertEqual(moving_variance.op.name, 'BatchNorm/moving_variance')\n\n def testUpdatesCollection(self):\n height, width = 3, 3\n with self.test_session():\n images = tf.random_uniform((5, height, width, 3), seed=1)\n tf.contrib.layers.batch_norm(images, updates_collections='my_update_ops')\n update_layers = tf.get_collection('my_update_ops')\n update_moving_mean = update_layers[0]\n update_moving_variance = update_layers[1]\n self.assertEqual(update_moving_mean.op.name,\n 'BatchNorm/AssignMovingAvg')\n self.assertEqual(update_moving_variance.op.name,\n 'BatchNorm/AssignMovingAvg_1')\n\n def testReuseVariables(self):\n height, width = 3, 3\n with self.test_session():\n images = tf.random_uniform((5, height, width, 3), seed=1)\n tf.contrib.layers.batch_norm(images, scale=True, scope='bn')\n tf.contrib.layers.batch_norm(images, scale=True, scope='bn', reuse=True)\n beta = tf.contrib.framework.get_variables_by_name('beta')\n gamma = tf.contrib.framework.get_variables_by_name('gamma')\n self.assertEqual(len(beta), 1)\n self.assertEqual(len(gamma), 1)\n moving_mean = tf.contrib.framework.get_variables_by_name('moving_mean')\n moving_variance = tf.contrib.framework.get_variables_by_name(\n 'moving_variance')\n moving_vars = moving_mean + moving_variance\n self.assertEqual(len(moving_vars), 2)\n\n def testReuseUpdateOps(self):\n height, width = 3, 3\n with self.test_session():\n images = tf.random_uniform((5, height, width, 3), seed=1)\n with tf.contrib.framework.arg_scope([tf.contrib.layers.batch_norm],\n updates_collections='update_ops'):\n tf.contrib.layers.batch_norm(images, scope='bn')\n self.assertEqual(len(tf.get_collection('update_ops')), 2)\n tf.contrib.layers.batch_norm(images, scope='bn', reuse=True)\n self.assertEqual(len(tf.get_collection('update_ops')), 4)\n\n def testCreateMovingVars(self):\n height, width = 3, 3\n with self.test_session():\n images = tf.random_uniform((5, height, width, 3), seed=1)\n _ = tf.contrib.layers.batch_norm(images)\n moving_mean = tf.contrib.framework.get_variables('BatchNorm/moving_mean')\n self.assertEqual(len(moving_mean), 1)\n self.assertEqual(moving_mean[0].op.name, 'BatchNorm/moving_mean')\n moving_variance = tf.contrib.framework.get_variables(\n 'BatchNorm/moving_variance')\n self.assertEqual(len(moving_variance), 1)\n self.assertEqual(moving_variance[0].op.name, 'BatchNorm/moving_variance')\n\n def _testNoneUpdatesCollections(self, fused, data_format='NHWC'):\n height, width = 2, 2\n batch_size = 10\n channels = 3\n np.random.seed(1)\n use_gpu = fused\n with self.test_session(use_gpu=use_gpu) as sess:\n if data_format == 'NHWC':\n image_shape = (batch_size, height, width, channels)\n axis = (0, 1, 2)\n else:\n image_shape = (batch_size, channels, height, width)\n axis = (0, 2, 3)\n image_values = np.random.rand(*image_shape)\n expected_mean = np.mean(image_values, axis=axis)\n expected_var = np.var(image_values, axis=axis)\n if fused:\n # Add Bessel's correction\n expected_var, _ = self._addBesselsCorrection(\n batch_size * height * width, expected_var)\n images = tf.constant(image_values, shape=image_shape, dtype=tf.float32)\n output = tf.contrib.layers.batch_norm(\n images,\n decay=0.1,\n updates_collections=None,\n fused=fused,\n data_format=data_format)\n # updates_ops are not added to UPDATE_OPS collection.\n self.assertEqual(tf.get_collection(tf.GraphKeys.UPDATE_OPS), [])\n # Initialize all variables\n sess.run(tf.initialize_all_variables())\n moving_mean = tf.contrib.framework.get_variables(\n 'BatchNorm/moving_mean')[0]\n moving_variance = tf.contrib.framework.get_variables(\n 'BatchNorm/moving_variance')[0]\n mean, variance = sess.run([moving_mean, moving_variance])\n # After initialization moving_mean == 0 and moving_variance == 1.\n self.assertAllClose(mean, [0] * channels)\n self.assertAllClose(variance, [1] * channels)\n for _ in range(10):\n sess.run([output])\n mean = moving_mean.eval()\n variance = moving_variance.eval()\n # After 10 updates with decay 0.1 moving_mean == expected_mean and\n # moving_variance == expected_var.\n self.assertAllClose(mean, expected_mean)\n self.assertAllClose(variance, expected_var)\n\n def testNoneUpdatesCollectionsDefault(self):\n self._testNoneUpdatesCollections(False)\n\n def testNoneUpdatesCollectionsFusedNCHW(self):\n if tf.test.is_gpu_available():\n self._testNoneUpdatesCollections(True, data_format='NCHW')\n\n def testNoneUpdatesCollectionsFusedNHWC(self):\n self._testNoneUpdatesCollections(True, data_format='NHWC')\n\n def _testDelayedUpdateMovingVars(self, fused, data_format='NHWC'):\n height, width = 2, 2\n batch_size = 10\n channels = 3\n np.random.seed(1)\n use_gpu = fused\n with self.test_session(use_gpu=use_gpu) as sess:\n if data_format == 'NHWC':\n image_shape = (batch_size, height, width, channels)\n axis = (0, 1, 2)\n else:\n image_shape = (batch_size, channels, height, width)\n axis = (0, 2, 3)\n image_values = np.random.rand(*image_shape)\n expected_mean = np.mean(image_values, axis=axis)\n expected_var = np.var(image_values, axis=axis)\n if fused:\n # Add Bessel's correction\n expected_var, correction_factor = self._addBesselsCorrection(\n batch_size * height * width, expected_var)\n images = tf.constant(image_values, shape=image_shape, dtype=tf.float32)\n output = tf.contrib.layers.batch_norm(\n images, decay=0.1, fused=fused, data_format=data_format)\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n # updates_ops are added to UPDATE_OPS collection.\n self.assertEqual(len(update_ops), 2)\n with tf.control_dependencies(update_ops):\n barrier = tf.no_op(name='barrier')\n output = control_flow_ops.with_dependencies([barrier], output)\n # Initialize all variables\n sess.run(tf.initialize_all_variables())\n moving_mean = tf.contrib.framework.get_variables(\n 'BatchNorm/moving_mean')[0]\n moving_variance = tf.contrib.framework.get_variables(\n 'BatchNorm/moving_variance')[0]\n mean, variance = sess.run([moving_mean, moving_variance])\n # After initialization moving_mean == 0 and moving_variance == 1.\n self.assertAllClose(mean, [0] * channels)\n self.assertAllClose(variance, [1] * channels)\n for _ in range(10):\n sess.run([output])\n mean = moving_mean.eval()\n variance = moving_variance.eval()\n # After 10 updates with decay 0.1 moving_mean == expected_mean and\n # moving_variance == expected_var.\n self.assertAllClose(mean, expected_mean)\n if fused:\n # Add Bessel's correction\n moving_variance_corrected = moving_variance / correction_factor\n correct_moving_variance = state_ops.assign(moving_variance,\n moving_variance_corrected)\n sess.run(correct_moving_variance)\n self.assertAllClose(variance, expected_var)\n\n def testDelayedUpdateMovingVarsDefault(self):\n self._testDelayedUpdateMovingVars(False)\n\n def testDelayedUpdateMovingVarsFusedNCHW(self):\n if tf.test.is_gpu_available():\n self._testDelayedUpdateMovingVars(True, data_format='NCHW')\n\n def testDelayedUpdateMovingVarsFusedNHWC(self):\n self._testDelayedUpdateMovingVars(True, data_format='NHWC')\n\n def testDelayedUpdateMovingVars(self):\n self._testDelayedUpdateMovingVars(False)\n\n def testEvalMovingVars(self):\n height, width = 3, 3\n with self.test_session() as sess:\n image_shape = (10, height, width, 3)\n image_values = np.random.rand(*image_shape)\n expected_mean = np.mean(image_values, axis=(0, 1, 2))\n expected_var = np.var(image_values, axis=(0, 1, 2))\n images = tf.constant(image_values, shape=image_shape, dtype=tf.float32)\n output = tf.contrib.layers.batch_norm(images,\n decay=0.1,\n is_training=False)\n self.assertEqual(tf.get_collection(tf.GraphKeys.UPDATE_OPS), [])\n # Initialize all variables\n sess.run(tf.initialize_all_variables())\n moving_mean = tf.contrib.framework.get_variables(\n 'BatchNorm/moving_mean')[0]\n moving_variance = tf.contrib.framework.get_variables(\n 'BatchNorm/moving_variance')[0]\n mean, variance = sess.run([moving_mean, moving_variance])\n # After initialization moving_mean == 0 and moving_variance == 1.\n self.assertAllClose(mean, [0] * 3)\n self.assertAllClose(variance, [1] * 3)\n # Simulate assigment from saver restore.\n init_assigns = [tf.assign(moving_mean, expected_mean),\n tf.assign(moving_variance, expected_var)]\n sess.run(init_assigns)\n for _ in range(10):\n sess.run([output], {images: np.random.rand(*image_shape)})\n mean = moving_mean.eval()\n variance = moving_variance.eval()\n # Although we feed different images, the moving_mean and moving_variance\n # shouldn't change.\n self.assertAllClose(mean, expected_mean)\n self.assertAllClose(variance, expected_var)\n\n def testEvalMovingVarsWithPartitioner(self):\n # This test makes sure that the moving-mean and moving-variance logic works\n # when `batch_norm` is called within a variable-scope that has a variable\n # partitioner.\n partitioner = tf.fixed_size_partitioner(2, axis=0)\n with tf.variable_scope(tf.get_variable_scope(), partitioner=partitioner):\n self.testEvalMovingVars()\n\n def _testReuseVars(self, fused):\n height, width = 3, 3\n batch_size = 10\n channels = 3\n with self.test_session() as sess:\n image_shape = (batch_size, height, width, channels)\n image_values = np.random.rand(*image_shape)\n expected_mean = np.mean(image_values, axis=(0, 1, 2))\n expected_var = np.var(image_values, axis=(0, 1, 2))\n if fused:\n # Add Bessel's correction\n expected_var, correction_factor = self._addBesselsCorrection(\n batch_size * height * width, expected_var)\n images = tf.constant(image_values, shape=image_shape, dtype=tf.float32)\n output_train = tf.contrib.layers.batch_norm(\n images, decay=0.1, is_training=True, scope='BN', fused=fused)\n output_eval = tf.contrib.layers.batch_norm(\n images,\n decay=0.1,\n is_training=False,\n scope='BN',\n reuse=True,\n fused=fused)\n # Initialize all variables\n sess.run(tf.initialize_all_variables())\n moving_mean = tf.contrib.framework.get_variables(\n 'BN/moving_mean')[0]\n moving_variance = tf.contrib.framework.get_variables(\n 'BN/moving_variance')[0]\n mean, variance = sess.run([moving_mean, moving_variance])\n # After initialization moving_mean == 0 and moving_variance == 1.\n self.assertAllClose(mean, [0] * channels)\n self.assertAllClose(variance, [1] * channels)\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n with tf.control_dependencies(update_ops):\n barrier = tf.no_op(name='barrier')\n train_op = control_flow_ops.with_dependencies([barrier], output_train)\n # Before updates the outputs are different for train and eval.\n self.assertFalse(np.allclose(sess.run([output_train]),\n sess.run([output_eval])))\n for _ in range(10):\n sess.run([train_op])\n mean = moving_mean.eval()\n variance = moving_variance.eval()\n # After 10 updates with decay 0.1 moving_mean == expected_mean and\n # moving_variance == expected_var.\n self.assertAllClose(mean, expected_mean)\n if fused:\n # Add Bessel's correction\n moving_variance_corrected = moving_variance / correction_factor\n correct_moving_variance = state_ops.assign(moving_variance,\n moving_variance_corrected)\n sess.run(correct_moving_variance)\n self.assertAllClose(variance, expected_var)\n # After convergence output_train and output_eval should be the same.\n self.assertAllClose(sess.run([output_train]), sess.run([output_eval]))\n\n def testReuseVarsDefault(self):\n self._testReuseVars(False)\n\n def testReuseVarsFused(self):\n self._testReuseVars(True)\n\n def _testIsTrainingVariable(self, fused, data_format='NHWC'):\n height, width = 2, 2\n batch_size = 10\n channels = 3\n np.random.seed(1)\n use_gpu = fused\n np.random.seed(1)\n with self.test_session(use_gpu=use_gpu) as sess:\n if data_format == 'NHWC':\n image_shape = (batch_size, height, width, channels)\n axis = (0, 1, 2)\n else:\n image_shape = (batch_size, channels, height, width)\n axis = (0, 2, 3)\n image_values = np.random.rand(*image_shape)\n expected_mean = np.mean(image_values, axis=axis)\n expected_var = np.var(image_values, axis=axis)\n if fused:\n # Add Bessel's correction\n expected_var, correction_factor = self._addBesselsCorrection(\n batch_size * height * width, expected_var)\n images = tf.constant(image_values, shape=image_shape, dtype=tf.float32)\n is_training = tf.Variable(True)\n output = tf.contrib.layers.batch_norm(\n images,\n decay=0.1,\n is_training=is_training,\n fused=fused,\n data_format=data_format)\n # Initialize all variables\n sess.run(tf.initialize_all_variables())\n moving_mean = tf.contrib.framework.get_variables(\n 'BatchNorm/moving_mean')[0]\n moving_variance = tf.contrib.framework.get_variables(\n 'BatchNorm/moving_variance')[0]\n mean, variance = sess.run([moving_mean, moving_variance])\n # After initialization moving_mean == 0 and moving_variance == 1.\n self.assertAllClose(mean, [0] * channels)\n self.assertAllClose(variance, [1] * channels)\n # Before updates the outputs are different depending of is_training.\n output_true = sess.run([output], {is_training: True})\n output_false = sess.run([output], {is_training: False})\n self.assertFalse(np.allclose(output_true, output_false))\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n with tf.control_dependencies(update_ops):\n barrier = tf.no_op(name='barrier')\n train_op = control_flow_ops.with_dependencies([barrier], output)\n for _ in range(10):\n sess.run([train_op])\n mean = moving_mean.eval()\n variance = moving_variance.eval()\n # After 10 updates with decay 0.1 moving_mean == expected_mean and\n # moving_variance == expected_var.\n self.assertAllClose(mean, expected_mean)\n self.assertAllClose(variance, expected_var)\n # After updates to convergence the outputs don't depend on is_training.\n output_true = sess.run([output], {is_training: True})\n if fused:\n # Add Bessel's correction\n moving_variance_corrected = moving_variance / correction_factor\n correct_moving_variance = state_ops.assign(moving_variance,\n moving_variance_corrected)\n sess.run(correct_moving_variance)\n output_false = sess.run([output], {is_training: False})\n self.assertAllClose(output_true, output_false)\n\n def testIsTrainingVariableDefault(self):\n self._testIsTrainingVariable(False)\n\n def testIsTrainingVariableFusedNCHW(self):\n if tf.test.is_gpu_available():\n self._testIsTrainingVariable(True, data_format='NCHW')\n\n def testIsTrainingVariableFusedNHWC(self):\n self._testIsTrainingVariable(True, data_format='NHWC')\n\n def testNoUpdatesWhenIsTrainingFalse(self):\n height, width = 3, 3\n with self.test_session() as sess:\n image_shape = (10, height, width, 3)\n image_values = np.random.rand(*image_shape)\n images = tf.constant(image_values, shape=image_shape, dtype=tf.float32)\n output = tf.contrib.layers.batch_norm(images,\n decay=0.1,\n is_training=False)\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n # updates_ops are not added to UPDATE_OPS collection.\n self.assertEqual(len(update_ops), 0)\n # Initialize all variables\n sess.run(tf.initialize_all_variables())\n moving_mean = tf.contrib.framework.get_variables(\n 'BatchNorm/moving_mean')[0]\n moving_variance = tf.contrib.framework.get_variables(\n 'BatchNorm/moving_variance')[0]\n mean, variance = sess.run([moving_mean, moving_variance])\n # After initialization moving_mean == 0 and moving_variance == 1.\n self.assertAllClose(mean, [0] * 3)\n self.assertAllClose(variance, [1] * 3)\n # When is_training is False batch_norm doesn't update moving_vars.\n for _ in range(10):\n sess.run([output])\n self.assertAllClose(moving_mean.eval(), [0] * 3)\n self.assertAllClose(moving_variance.eval(), [1] * 3)\n\n def testNoneUpdatesCollectionNoTraining(self):\n height, width = 3, 3\n with self.test_session() as sess:\n image_shape = (10, height, width, 3)\n image_values = np.random.rand(*image_shape)\n images = tf.constant(image_values, shape=image_shape, dtype=tf.float32)\n output = tf.contrib.layers.batch_norm(images,\n decay=0.1,\n updates_collections=None,\n is_training=False)\n # updates_ops are not added to UPDATE_OPS collection.\n self.assertEqual(tf.get_collection(tf.GraphKeys.UPDATE_OPS), [])\n # Initialize all variables\n sess.run(tf.initialize_all_variables())\n moving_mean = tf.contrib.framework.get_variables(\n 'BatchNorm/moving_mean')[0]\n moving_variance = tf.contrib.framework.get_variables(\n 'BatchNorm/moving_variance')[0]\n mean, variance = sess.run([moving_mean, moving_variance])\n # After initialization moving_mean == 0 and moving_variance == 1.\n self.assertAllClose(mean, [0] * 3)\n self.assertAllClose(variance, [1] * 3)\n # When is_training is False batch_norm doesn't update moving_vars.\n for _ in range(10):\n sess.run([output])\n self.assertAllClose(moving_mean.eval(), [0] * 3)\n self.assertAllClose(moving_variance.eval(), [1] * 3)\n\n def _testNoneUpdatesCollectionIsTrainingVariable(self,\n fused,\n data_format='NHWC'):\n height, width = 2, 2\n batch_size = 10\n channels = 3\n np.random.seed(1)\n use_gpu = fused\n with self.test_session(use_gpu=use_gpu) as sess:\n if data_format == 'NHWC':\n image_shape = (batch_size, height, width, channels)\n axis = (0, 1, 2)\n else:\n image_shape = (batch_size, channels, height, width)\n axis = (0, 2, 3)\n image_values = np.random.rand(*image_shape)\n expected_mean = np.mean(image_values, axis=axis)\n expected_var = np.var(image_values, axis=axis)\n if fused:\n # Add Bessel's correction\n expected_var, correction_factor = self._addBesselsCorrection(\n batch_size * height * width, expected_var)\n images = tf.constant(image_values, shape=image_shape, dtype=tf.float32)\n is_training = tf.Variable(True)\n output = tf.contrib.layers.batch_norm(\n images,\n decay=0.1,\n updates_collections=None,\n is_training=is_training,\n fused=fused,\n data_format=data_format)\n # updates_ops are not added to UPDATE_OPS collection.\n self.assertEqual(tf.get_collection(tf.GraphKeys.UPDATE_OPS), [])\n # Initialize all variables\n sess.run(tf.initialize_all_variables())\n moving_mean = tf.contrib.framework.get_variables(\n 'BatchNorm/moving_mean')[0]\n moving_variance = tf.contrib.framework.get_variables(\n 'BatchNorm/moving_variance')[0]\n mean, variance = sess.run([moving_mean, moving_variance])\n # After initialization moving_mean == 0 and moving_variance == 1.\n self.assertAllClose(mean, [0] * channels)\n self.assertAllClose(variance, [1] * channels)\n # When is_training is False batch_norm doesn't update moving_vars.\n for _ in range(10):\n sess.run([output], {is_training: False})\n self.assertAllClose(moving_mean.eval(), [0] * channels)\n self.assertAllClose(moving_variance.eval(), [1] * channels)\n # Before updates the outputs are different depending of is_training.\n output_true = sess.run([output], {is_training: True})\n output_false = sess.run([output], {is_training: False})\n self.assertFalse(np.allclose(output_true, output_false))\n # When is_training is True update moving_vars.\n for _ in range(10):\n sess.run([output], {is_training: True})\n # After 10 updates with decay 0.1 moving_mean == expected_mean and\n # moving_variance == expected_var.\n self.assertAllClose(moving_mean.eval(), expected_mean)\n self.assertAllClose(moving_variance.eval(), expected_var)\n # After updates to convergence the outputs don't depend on is_training.\n output_true = sess.run([output], {is_training: True})\n if fused:\n # Add Bessel's correction\n moving_variance_corrected = moving_variance / correction_factor\n correct_moving_variance = state_ops.assign(moving_variance,\n moving_variance_corrected)\n sess.run(correct_moving_variance)\n output_false = sess.run([output], {is_training: False})\n self.assertTrue(np.allclose(output_true, output_false))\n\n def testNoneUpdatesCollectionIsTrainingVariableDefault(self):\n self._testNoneUpdatesCollectionIsTrainingVariable(False)\n\n def testNoneUpdatesCollectionIsTrainingVariableFusedNCHW(self):\n if tf.test.is_gpu_available():\n self._testNoneUpdatesCollectionIsTrainingVariable(\n True, data_format='NCHW')\n\n def testNoneUpdatesCollectionIsTrainingVariableFusedNHWC(self):\n self._testNoneUpdatesCollectionIsTrainingVariable(True, data_format='NHWC')\n\n def _testTrainMovingVars(self, fused, data_format='NHWC'):\n # Test that the gradients are stable while the moving_mean is updated.\n # Since the moving_mean is used as shift to compute the tf.momments, the\n # gradients could diverge, this test checks that gradients remains stable\n # while the moving_mean is updated.\n height, width = 7, 7\n batch_size = 10\n channels = 32\n np.random.seed(1)\n use_gpu = fused\n with self.test_session(use_gpu=use_gpu) as sess:\n if data_format == 'NHWC':\n image_shape = (batch_size, height, width, channels)\n axis = (0, 1, 2)\n else:\n image_shape = (batch_size, channels, height, width)\n axis = (0, 2, 3)\n image_values = np.random.rand(*image_shape) + 2\n expected_mean = np.mean(image_values, axis=axis)\n expected_var = np.var(image_values, axis=axis)\n if fused:\n # Add Bessel's correction\n expected_var, _ = self._addBesselsCorrection(\n batch_size * height * width, expected_var)\n images = tf.constant(image_values, shape=image_shape, dtype=tf.float32)\n output = tf.contrib.layers.batch_norm(\n images,\n decay=0.2,\n updates_collections=None,\n is_training=True,\n fused=fused,\n data_format=data_format)\n self.assertEqual(tf.get_collection(tf.GraphKeys.UPDATE_OPS), [])\n\n objective = tf.reduce_sum(output)\n\n [images_gradients] = tf.gradients(objective, images)\n # Initialize all variables\n sess.run(tf.initialize_all_variables())\n moving_mean = tf.contrib.framework.get_variables(\n 'BatchNorm/moving_mean')[0]\n moving_variance = tf.contrib.framework.get_variables(\n 'BatchNorm/moving_variance')[0]\n mean, variance = sess.run([moving_mean, moving_variance])\n # After initialization moving_mean == 0 and moving_variance == 1.\n self.assertAllClose(mean, [0] * channels)\n self.assertAllClose(variance, [1] * channels)\n\n # Initial input gradients.\n images_gradients_value = sess.run(images_gradients)\n for _ in range(10):\n np_output, new_images_gradients = sess.run([output, images_gradients])\n # The outputs should be close to 0.0 mean and 1.0 variance\n self.assertAllClose(\n np.mean(\n np_output, axis=axis), [0] * channels, rtol=0.1, atol=0.1)\n self.assertAllClose(\n np.var(np_output, axis=axis), [1] * channels, rtol=0.1, atol=0.1)\n # The gradients should change slowly while updating moving_mean.\n max_diff = np.max(np.abs(images_gradients_value - new_images_gradients))\n self.assertGreaterEqual(max_diff, 0.0)\n self.assertLess(max_diff, 5e-5)\n self.assertAllClose(moving_mean.eval(), expected_mean)\n self.assertAllClose(moving_variance.eval(), expected_var)\n\n def testTrainMovingVarsDefault(self):\n self._testTrainMovingVars(False)\n\n def testTrainMovingVarsFusedNCHW(self):\n if tf.test.is_gpu_available():\n self._testTrainMovingVars(True, data_format='NCHW')\n\n def testTrainMovingVarsFusedNHWC(self):\n self._testTrainMovingVars(True, data_format='NHWC')\n\n def testCustomInitializer(self):\n height, width = 3, 3\n channels = 3\n with self.test_session() as sess:\n images = (np.ones((5, height, width, channels)) * 9.0).astype('f')\n beta = tf.constant_initializer((np.ones(channels) * 5.0).astype('f'))\n gamma = tf.constant_initializer((np.ones(channels) * 2.0).astype('f'))\n mean = tf.constant_initializer((np.ones(channels) * 5.0).astype('f'))\n variance = tf.constant_initializer((np.ones(channels) * 4.0).astype('f'))\n output = tf.contrib.layers.batch_norm(images,\n is_training=False,\n scale=True,\n epsilon=0.0,\n param_initializers={\n 'beta': beta,\n 'gamma': gamma,\n 'moving_mean': mean,\n 'moving_variance': variance,\n })\n sess.run(tf.initialize_all_variables())\n outs = sess.run(output)\n self.assertAllClose(outs, images)\n\n\nclass LayerNormTest(tf.test.TestCase):\n\n def testUnknownShape(self):\n with tf.Graph().as_default() as g, self.test_session(g):\n inputs = tf.placeholder(dtype=tf.float32)\n with self.assertRaisesRegexp(ValueError, 'undefined rank'):\n tf.contrib.layers.layer_norm(inputs)\n\n def testUnknownLastDim(self):\n with tf.Graph().as_default() as g, self.test_session(g):\n inputs = tf.placeholder(dtype=tf.float32)\n inputs.set_shape(tf.TensorShape((5, 3, 3, None)))\n with self.assertRaisesRegexp(ValueError, 'undefined last dimension'):\n tf.contrib.layers.layer_norm(inputs)\n\n def testCreateOp(self):\n height, width = 3, 3\n with self.test_session():\n images = np.random.uniform(size=(5, height, width, 3))\n output = tf.contrib.layers.layer_norm(images)\n self.assertTrue(output.op.name.startswith('LayerNorm/batchnorm'))\n self.assertListEqual(output.get_shape().as_list(), [5, height, width, 3])\n\n def testCreateVariables(self):\n height, width = 3, 3\n with self.test_session():\n images = tf.random_uniform((5, height, width, 3), seed=1)\n tf.contrib.layers.layer_norm(images)\n beta = tf.contrib.framework.get_variables_by_name('beta')[0]\n gamma = tf.contrib.framework.get_variables_by_name('gamma')[0]\n self.assertEqual(beta.op.name, 'LayerNorm/beta')\n self.assertEqual(gamma.op.name, 'LayerNorm/gamma')\n\n def testReuseVariables(self):\n height, width = 3, 3\n with self.test_session():\n images = tf.random_uniform((5, height, width, 3), seed=1)\n tf.contrib.layers.layer_norm(images, scope='ln')\n tf.contrib.layers.layer_norm(images, scope='ln', reuse=True)\n beta = tf.contrib.framework.get_variables_by_name('beta')\n gamma = tf.contrib.framework.get_variables_by_name('gamma')\n self.assertEqual(len(beta), 1)\n self.assertEqual(len(gamma), 1)\n\n def testReuseVars(self):\n height, width = 3, 3\n with self.test_session() as sess:\n image_shape = (10, height, width, 3)\n image_values = np.random.rand(*image_shape)\n images = tf.constant(image_values, shape=image_shape, dtype=tf.float32)\n output_train = tf.contrib.layers.layer_norm(images, scope='LN')\n output_eval = tf.contrib.layers.layer_norm(images,\n scope='LN',\n reuse=True)\n # Initialize all variables\n sess.run(tf.initialize_all_variables())\n # output_train and output_eval should be the same.\n self.assertAllClose(sess.run([output_train]), sess.run([output_eval]))\n\n def doOutputTest(self, input_shape):\n with self.test_session() as sess:\n input_values = np.random.rand(*input_shape)\n inputs = tf.constant(input_values, shape=input_shape, dtype=tf.float32)\n output_op = tf.contrib.layers.layer_norm(inputs, scope='LN')\n # Initialize all variables\n sess.run(tf.initialize_all_variables())\n # The mean and variance of the output should be close to 0 and 1\n # respectively.\n moments_axis = tuple([i for i in range(1, len(input_shape))])\n outputs = sess.run(output_op)\n expected_mean = np.zeros(input_shape[0])\n expected_var = np.ones(input_shape[0])\n mean = np.mean(outputs, axis=moments_axis)\n var = np.var(outputs, axis=moments_axis)\n tol = 1e-5\n self.assertAllClose(mean, expected_mean, rtol=tol, atol=tol)\n self.assertAllClose(var, expected_var, rtol=tol, atol=tol)\n\n def testOutput2DInput(self):\n self.doOutputTest((10, 300))\n\n def testOutput4DInput(self):\n self.doOutputTest((100, 10, 10, 3))\n\nclass MaxPool2DTest(tf.test.TestCase):\n\n def testInvalidDataFormat(self):\n height, width = 3, 6\n images = np.random.uniform(size=(5, height, width, 3))\n with self.assertRaisesRegexp(\n ValueError, 'data_format has to be either NCHW or NHWC.'):\n tf.contrib.layers.max_pool2d(images, [3, 3], data_format='CHWN')\n\n def testCreateMaxPool(self):\n height, width = 3, 6\n images = np.random.uniform(size=(5, height, width, 3)).astype(np.float32)\n output = tf.contrib.layers.max_pool2d(images, [3, 3])\n self.assertEqual(output.op.name, 'MaxPool2D/MaxPool')\n self.assertListEqual(output.get_shape().as_list(), [5, 1, 2, 3])\n\n def testCreateMaxPoolNCHW(self):\n height, width = 3, 6\n images = np.random.uniform(size=(5, 3, height, width)).astype(np.float32)\n output = tf.contrib.layers.max_pool2d(images, [3, 3], data_format='NCHW')\n self.assertEquals(output.op.name, 'MaxPool2D/MaxPool')\n self.assertListEqual(output.get_shape().as_list(), [5, 3, 1, 2])\n\n def testCollectOutputs(self):\n height, width = 3, 6\n images = tf.random_uniform((5, height, width, 3), seed=1)\n output = tf.contrib.layers.max_pool2d(images, [3, 3],\n outputs_collections='outputs')\n output_collected = tf.get_collection('outputs')[0]\n self.assertEqual(output_collected.alias, 'MaxPool2D')\n self.assertEqual(output_collected, output)\n\n def testCreateSquareMaxPool(self):\n height, width = 3, 6\n images = tf.random_uniform((5, height, width, 3), seed=1)\n output = tf.contrib.layers.max_pool2d(images, 3)\n self.assertEqual(output.op.name, 'MaxPool2D/MaxPool')\n self.assertListEqual(output.get_shape().as_list(), [5, 1, 2, 3])\n\n def testCreateMaxPoolWithScope(self):\n height, width = 3, 6\n images = tf.random_uniform((5, height, width, 3), seed=1)\n output = tf.contrib.layers.max_pool2d(images, [3, 3], scope='pool1')\n self.assertEqual(output.op.name, 'pool1/MaxPool')\n\n def testCreateMaxPoolWithSamePadding(self):\n height, width = 3, 6\n images = tf.random_uniform((5, height, width, 3), seed=1)\n output = tf.contrib.layers.max_pool2d(images, [3, 3], padding='SAME')\n self.assertListEqual(output.get_shape().as_list(), [5, 2, 3, 3])\n\n def testCreateMaxPoolWithSamePaddingNCHW(self):\n height, width = 3, 6\n images = tf.random_uniform((5, 3, height, width), seed=1)\n output = tf.contrib.layers.max_pool2d(images, [3, 3], padding='SAME',\n data_format='NCHW')\n self.assertListEqual(output.get_shape().as_list(), [5, 3, 2, 3])\n\n def testCreateMaxPoolStrideWithSamePadding(self):\n height, width = 3, 6\n images = tf.random_uniform((5, height, width, 3), seed=1)\n output = tf.contrib.layers.max_pool2d(images, [3, 3], stride=1,\n padding='SAME')\n self.assertListEqual(output.get_shape().as_list(), [5, height, width, 3])\n\n def testGlobalMaxPool(self):\n height, width = 3, 6\n images = tf.random_uniform((5, height, width, 3), seed=1)\n output = tf.contrib.layers.max_pool2d(images, images.get_shape()[1:3],\n stride=1)\n self.assertListEqual(output.get_shape().as_list(), [5, 1, 1, 3])\n\n\nclass OneHotEncodingTest(tf.test.TestCase):\n\n def testOneHotEncodingCreate(self):\n with self.test_session():\n labels = np.array([0, 1, 2])\n output = tf.contrib.layers.one_hot_encoding(labels, num_classes=3)\n self.assertEqual(output.op.name, 'OneHotEncoding/one_hot')\n self.assertListEqual(output.get_shape().as_list(), [3, 3])\n\n def testCollectOutputs(self):\n with self.test_session():\n labels = tf.constant([0, 1, 2])\n output = tf.contrib.layers.one_hot_encoding(labels, num_classes=3,\n outputs_collections='outputs')\n c_output = tf.get_collection('outputs')[0]\n self.assertEqual(c_output.alias, 'OneHotEncoding')\n self.assertEqual(c_output, output)\n\n def testOneHotEncoding(self):\n with self.test_session():\n labels = tf.constant([0, 1, 2])\n one_hot_labels = tf.constant([[1, 0, 0],\n [0, 1, 0],\n [0, 0, 1]])\n output = tf.contrib.layers.one_hot_encoding(labels, num_classes=3)\n self.assertAllClose(output.eval(), one_hot_labels.eval())\n\n def testOneHotEncodingInt32(self):\n with self.test_session():\n labels = tf.constant([0, 1, 2], dtype=tf.int32)\n one_hot_labels = tf.constant([[1, 0, 0],\n [0, 1, 0],\n [0, 0, 1]])\n output = tf.contrib.layers.one_hot_encoding(labels, num_classes=3)\n self.assertAllClose(output.eval(), one_hot_labels.eval())\n\n\nclass RepeatTests(tf.test.TestCase):\n\n def testRepeat(self):\n height, width = 3, 3\n with self.test_session():\n images = np.random.uniform(size=(5, height, width, 3))\n output = tf.contrib.layers.repeat(images, 3,\n tf.contrib.layers.conv2d, 32, [3, 3])\n self.assertEqual(output.op.name, 'Repeat/convolution_3/Relu')\n self.assertListEqual(output.get_shape().as_list(), [5, 3, 3, 32])\n\n def testRepeatWithScope(self):\n height, width = 3, 3\n with self.test_session():\n images = tf.random_uniform((5, height, width, 3), seed=1, name='images')\n output = tf.contrib.layers.repeat(images, 3,\n tf.contrib.layers.conv2d, 32, [3, 3],\n scope='conv1')\n self.assertEqual(output.op.name, 'conv1/conv1_3/Relu')\n self.assertListEqual(output.get_shape().as_list(), [5, 3, 3, 32])\n\n\nclass SeparableConv2dTest(tf.test.TestCase):\n\n def testCreateConvInt32(self):\n height, width = 3, 3\n with self.test_session():\n images = tf.random_uniform(\n (5, height, width, 3), seed=1, dtype=tf.int32, maxval=12345)\n with self.assertRaisesRegexp(TypeError, 'non-floating point type'):\n tf.contrib.layers.separable_conv2d(images, 32, [3, 3], 2)\n\n def testCreateConvFloat32(self):\n height, width = 3, 3\n with self.test_session():\n images = tf.random_uniform(\n (5, height, width, 3), seed=1, dtype=tf.float32)\n output = tf.contrib.layers.separable_conv2d(images, 32, [3, 3], 2)\n self.assertEqual(output.op.name, 'SeparableConv2d/Relu')\n self.assertListEqual(output.get_shape().as_list(), [5, height, width, 32])\n\n def testCreateConvFloat64(self):\n height, width = 3, 3\n with self.test_session():\n images = tf.random_uniform(\n (5, height, width, 3), seed=1, dtype=tf.float64)\n output = tf.contrib.layers.separable_conv2d(images, 32, [3, 3], 2)\n self.assertEqual(output.op.name, 'SeparableConv2d/Relu')\n self.assertListEqual(output.get_shape().as_list(), [5, height, width, 32])\n\n def testCreateDepthwiseConv(self):\n height, width = 3, 3\n with self.test_session():\n images = tf.random_uniform((5, height, width, 3), seed=1)\n output = tf.contrib.layers.separable_conv2d(images, None, [3, 3], 2)\n self.assertEqual(output.op.name, 'SeparableConv2d/Relu')\n self.assertListEqual(output.get_shape().as_list(), [5, height, width, 6])\n\n def testCreateConvCreatesWeightsAndBiasesVars(self):\n height, width = 3, 3\n images = tf.random_uniform((5, height, width, 3), seed=1)\n with self.test_session():\n self.assertFalse(\n tf.contrib.framework.get_variables('conv1/depthwise_weights'))\n self.assertFalse(\n tf.contrib.framework.get_variables('conv1/pointwise_weights'))\n self.assertFalse(tf.contrib.framework.get_variables('conv1/biases'))\n tf.contrib.layers.separable_conv2d(images, 32, [3, 3], 4, scope='conv1')\n self.assertTrue(\n tf.contrib.framework.get_variables('conv1/depthwise_weights'))\n self.assertTrue(\n tf.contrib.framework.get_variables('conv1/pointwise_weights'))\n self.assertTrue(tf.contrib.framework.get_variables('conv1/biases'))\n\n def testCreateDepthwiseConvCreatesWeightsAndBiasesVars(self):\n height, width = 3, 3\n images = tf.random_uniform((5, height, width, 3), seed=1)\n with self.test_session():\n self.assertFalse(\n tf.contrib.framework.get_variables('conv1/depthwise_weights'))\n self.assertFalse(\n tf.contrib.framework.get_variables('conv1/pointwise_weights'))\n self.assertFalse(tf.contrib.framework.get_variables('conv1/biases'))\n tf.contrib.layers.separable_conv2d(images, None, [3, 3], 4, scope='conv1')\n self.assertTrue(\n tf.contrib.framework.get_variables('conv1/depthwise_weights'))\n self.assertFalse(\n tf.contrib.framework.get_variables('conv1/pointwise_weights'))\n self.assertTrue(tf.contrib.framework.get_variables('conv1/biases'))\n\n def testCreateConvWithScope(self):\n height, width = 3, 3\n with self.test_session():\n images = tf.random_uniform((5, height, width, 3), seed=1)\n output = tf.contrib.layers.separable_conv2d(\n images, 32, [3, 3], 6, scope='conv1')\n self.assertEqual(output.op.name, 'conv1/Relu')\n\n def testCreateConvWithoutActivation(self):\n height, width = 3, 3\n with self.test_session():\n images = tf.random_uniform((5, height, width, 3), seed=1)\n output = tf.contrib.layers.separable_conv2d(\n images, 32, [3, 3], 8, activation_fn=None)\n self.assertEqual(output.op.name, 'SeparableConv2d/BiasAdd')\n\n def testCreateConvValid(self):\n height, width = 3, 3\n with self.test_session():\n images = tf.random_uniform((5, height, width, 3), seed=1)\n output = tf.contrib.layers.separable_conv2d(\n images, 32, [3, 3], 2, padding='VALID')\n self.assertListEqual(output.get_shape().as_list(), [5, 1, 1, 32])\n\n def testCreateDepthwiseConvValid(self):\n height, width = 3, 3\n with self.test_session():\n images = tf.random_uniform((5, height, width, 3), seed=1)\n output = tf.contrib.layers.separable_conv2d(\n images, None, [3, 3], 2, padding='VALID')\n self.assertListEqual(output.get_shape().as_list(), [5, 1, 1, 6])\n\n def testCreateConvWithWeightDecay(self):\n tf.set_random_seed(0)\n height, width = 3, 3\n with self.test_session() as sess:\n images = tf.random_uniform((5, height, width, 3), seed=1)\n regularizer = tf.contrib.layers.l2_regularizer(0.01)\n tf.contrib.layers.separable_conv2d(\n images, 32, [3, 3], 2, weights_regularizer=regularizer)\n self.assertEqual(\n len(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)), 2)\n weight_decay = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)[0]\n self.assertEqual(\n weight_decay.op.name,\n 'SeparableConv2d/depthwise_weights/Regularizer/l2_regularizer')\n sess.run(tf.initialize_all_variables())\n self.assertLessEqual(sess.run(weight_decay), 0.05)\n weight_decay = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)[1]\n self.assertEqual(\n weight_decay.op.name,\n 'SeparableConv2d/pointwise_weights/Regularizer/l2_regularizer')\n self.assertLessEqual(sess.run(weight_decay), 0.05)\n\n def testReuseConvWithWeightDecay(self):\n height, width = 3, 3\n with self.test_session():\n images = tf.random_uniform((5, height, width, 3), seed=1)\n regularizer = tf.contrib.layers.l2_regularizer(0.01)\n tf.contrib.layers.separable_conv2d(\n images, 32, [3, 3], 2,\n weights_regularizer=regularizer,\n scope='conv1')\n self.assertEqual(\n len(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)), 2)\n tf.contrib.layers.separable_conv2d(\n images, 32, [3, 3], 2,\n weights_regularizer=regularizer,\n scope='conv1', reuse=True)\n self.assertEqual(\n len(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)), 2)\n\n def testConvWithBatchNorm(self):\n height, width = 3, 3\n batch_norm_collection = 'moving_vars'\n normalizer_params = {\n 'variables_collections': {\n 'beta': [batch_norm_collection],\n 'gamma': [batch_norm_collection],\n 'moving_mean': [batch_norm_collection],\n 'moving_variance': [batch_norm_collection],\n }\n }\n images = tf.random_uniform((5, height, width, 3), seed=1)\n net = tf.contrib.layers.separable_conv2d(\n images, 8, [3, 3], 2,\n normalizer_fn=tf.contrib.layers.batch_norm,\n normalizer_params=normalizer_params,\n scope='conv1')\n net = tf.contrib.layers.separable_conv2d(\n net, 32, [3, 3], 2,\n normalizer_fn=tf.contrib.layers.batch_norm,\n normalizer_params=normalizer_params,\n scope='conv2')\n self.assertEqual(len(tf.get_collection(batch_norm_collection)), 6)\n self.assertEqual(\n len(tf.contrib.framework.get_variables('conv1/BatchNorm')), 3)\n self.assertEqual(\n len(tf.contrib.framework.get_variables('conv2/BatchNorm')), 3)\n\n def testConvWithInputsViaPlaceHolder(self):\n height, width = 3, 3\n images_placeholder = tf.placeholder(tf.float32, shape=(None, None, None, 3))\n net = tf.contrib.layers.separable_conv2d(\n images_placeholder, 8, [3, 3], 2,\n normalizer_fn=tf.contrib.layers.batch_norm,\n normalizer_params={},\n scope='conv1')\n init_op = tf.initialize_all_variables()\n with self.test_session() as sess:\n images = np.random.rand(5, height, width, 3)\n sess.run(init_op)\n sess.run(net, feed_dict={images_placeholder: images})\n\n\nclass SoftmaxTests(tf.test.TestCase):\n\n def setUp(self):\n self.low = 1 / (1 + math.e)\n self.high = math.e / (1 + math.e)\n\n def testSoftmax2D(self):\n logits = tf.constant([[0.0, 1], [1, 1], [1, 0]])\n prediction = tf.contrib.layers.softmax(logits)\n exp_prediction = np.array([[self.low, self.high],\n [0.5, 0.5],\n [self.high, self.low]])\n\n with self.test_session() as sess:\n prediction = sess.run(prediction)\n self.assertAllClose(exp_prediction, prediction)\n\n def testSoftmax3D(self):\n logits = np.ones((2, 3, 2))\n logits[0, 0, 0] = 0\n logits[1, 1, 1] = 0\n logits = tf.constant(logits)\n exp_prediction = 0.5 * np.ones((2, 3, 2))\n exp_prediction[0, 0, 0] = self.low\n exp_prediction[0, 0, 1] = self.high\n exp_prediction[1, 1, 0] = self.high\n exp_prediction[1, 1, 1] = self.low\n\n prediction = tf.contrib.layers.softmax(logits)\n with self.test_session() as sess:\n prediction = sess.run(prediction)\n self.assertAllClose(exp_prediction, prediction)\n\n def testSoftmax3DUnknownSize(self):\n logits = np.ones((2, 3, 2))\n logits[0, 0, 0] = 0\n logits[1, 1, 1] = 0\n logit_placeholder = tf.placeholder(tf.float32, shape=(None, None, 2))\n feed_dict = {logit_placeholder: logits}\n exp_prediction = 0.5 * np.ones((2, 3, 2))\n exp_prediction[0, 0, 0] = self.low\n exp_prediction[0, 0, 1] = self.high\n exp_prediction[1, 1, 0] = self.high\n exp_prediction[1, 1, 1] = self.low\n\n prediction = tf.contrib.layers.softmax(logit_placeholder)\n with self.test_session() as sess:\n prediction = sess.run(prediction, feed_dict=feed_dict)\n self.assertAllClose(exp_prediction, prediction)\n\n def testSoftmaxUndefinedNthDimension(self):\n logits = tf.placeholder(tf.float32)\n with self.assertRaises(ValueError):\n tf.contrib.layers.softmax(logits)\n\n\nclass StackTests(tf.test.TestCase):\n\n def testStackFullyConnected(self):\n height, width = 3, 3\n with self.test_session():\n images = np.random.uniform(size=(5, height * width * 3))\n output = tf.contrib.layers.stack(images,\n tf.contrib.layers.fully_connected,\n [10, 20, 30])\n self.assertEqual(output.op.name, 'Stack/fully_connected_3/Relu')\n self.assertListEqual(output.get_shape().as_list(), [5, 30])\n\n def testStackRelu(self):\n height, width = 3, 3\n with self.test_session():\n images = tf.random_uniform((5, height * width * 3), seed=1, name='images')\n output = tf.contrib.layers.stack(images,\n tf.contrib.layers.relu,\n [10, 20, 30])\n self.assertEqual(output.op.name, 'Stack/fully_connected_3/Relu')\n self.assertListEqual(output.get_shape().as_list(), [5, 30])\n\n def testStackConvolution2d(self):\n height, width = 3, 3\n with self.test_session():\n images = tf.random_uniform((5, height, width, 3), seed=1, name='images')\n output = tf.contrib.layers.stack(images,\n tf.contrib.layers.convolution2d,\n [10, 20, 30],\n kernel_size=[3, 3],\n padding='SAME')\n self.assertEqual(output.op.name, 'Stack/convolution_3/Relu')\n self.assertListEqual(output.get_shape().as_list(), [5, 3, 3, 30])\n\n def testStackWithScope(self):\n height, width = 3, 3\n with self.test_session():\n images = tf.random_uniform((5, height, width, 3), seed=1, name='images')\n output = tf.contrib.layers.stack(images,\n tf.contrib.layers.convolution2d,\n [10, 20, 30],\n kernel_size=[3, 3],\n padding='SAME',\n scope='conv1')\n self.assertEqual(output.op.name, 'conv1/conv1_3/Relu')\n self.assertListEqual(output.get_shape().as_list(), [5, 3, 3, 30])\n\n\nclass UnitNormTests(tf.test.TestCase):\n\n def testUnitNormWithRandomMatrix(self):\n height, width = 2, 3\n\n for dim in range(3):\n tf.set_random_seed(0)\n image = tf.random_uniform((height, width, 3))\n output = tf.contrib.layers.unit_norm(image, dim=dim, epsilon=1e-6)\n norms = tf.sqrt(tf.reduce_sum(tf.square(output), reduction_indices=dim))\n\n shape = [height, width, 3]\n del shape[dim]\n expected = np.ones(shape)\n\n with self.test_session():\n actual = norms.eval()\n self.assertAllClose(expected, actual, 1e-4, 1e-4)\n\n def testDimEqualToRankRaisesError(self):\n height, width = 2, 3\n\n tf.set_random_seed(0)\n image = tf.random_uniform((height, width, 3))\n\n with self.assertRaises(ValueError):\n tf.contrib.layers.unit_norm(image, dim=3, epsilon=1e-6)\n\n def testUnknownRankRaisesError(self):\n image = tf.placeholder(tf.float32)\n with self.assertRaises(ValueError):\n tf.contrib.layers.unit_norm(image, dim=2)\n\n def testKnownRankUnknownDimsSucceeds(self):\n height, width = 2, 3\n\n for dim in range(3):\n placeholder_value = np.ones((height, width, 3))\n shape = [height, width, 3]\n del shape[dim]\n expected = np.ones(shape)\n\n image = tf.placeholder(tf.float32, (None, None, 3))\n output = tf.contrib.layers.unit_norm(image, dim=dim, epsilon=1e-6)\n norms = tf.sqrt(tf.reduce_sum(tf.square(output), reduction_indices=dim))\n\n with self.test_session():\n actual = norms.eval({image: placeholder_value})\n self.assertAllClose(expected, actual, 1e-4, 1e-4)\n\n\n# TODO(b/28426988): Add separate tests for non-legacy versions.\nclass LegacyFullyConnectedTest(tf.test.TestCase):\n\n def setUp(self):\n tf.test.TestCase.setUp(self)\n tf.set_random_seed(1234)\n self.input = tf.constant([[1., 2., 3.], [-4., 15., -6.]])\n self.input_3_dim_arr = [[[1., 1.1, 1.2],\n [2., 2.1, 2.2],\n [3., 3.1, 3.2],\n [4., 4.1, 4.2]],\n [[5., 5.1, 5.2],\n [6., 6.1, 6.2],\n [7., 7.1, 7.2],\n [8., 8.1, 8.2]]]\n self.input_3_dim = tf.constant(self.input_3_dim_arr)\n\n assert not tf.get_collection(tf.GraphKeys.SUMMARIES)\n\n def _fully_connected_basic_use(self, x, num_output_units, expected_shape):\n output = tf.contrib.layers.legacy_fully_connected(x,\n num_output_units,\n activation_fn=tf.nn.relu)\n\n with tf.Session() as sess:\n with self.assertRaises(tf.errors.FailedPreconditionError):\n sess.run(output)\n\n tf.initialize_all_variables().run()\n out_value, shape_value = sess.run([output, tf.shape(output)])\n\n self.assertAllClose(shape_value, expected_shape)\n self.assertEqual(output.get_shape().as_list(), expected_shape)\n self.assertTrue(np.all(out_value >= 0),\n 'Relu should have all values >= 0.')\n\n self.assertEqual(2,\n len(tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)))\n self.assertEqual(0,\n len(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)))\n\n def test_fully_connected_basic_use(self):\n self._fully_connected_basic_use(self.input, 8, [2, 8])\n\n def test_fully_connected_basic_use_multi_dim(self):\n for last_dim in [1, 3]:\n self.setUp()\n self._fully_connected_basic_use(\n self.input_3_dim, last_dim, [2, 4, last_dim])\n\n def test_relu_layer_basic_use(self):\n output = tf.contrib.layers.legacy_relu(self.input, 8)\n\n with tf.Session() as sess:\n with self.assertRaises(tf.errors.FailedPreconditionError):\n sess.run(output)\n\n tf.initialize_all_variables().run()\n out_value = sess.run(output)\n\n self.assertEqual(output.get_shape().as_list(), [2, 8])\n self.assertTrue(np.all(out_value >= 0),\n 'Relu should have all values >= 0.')\n\n self.assertEqual(2,\n len(tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)))\n self.assertEqual(0,\n len(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)))\n\n def test_variable_reuse_with_scope(self):\n with tf.variable_scope('test') as vs:\n output1 = tf.contrib.layers.legacy_relu(self.input, 8)\n output2 = tf.contrib.layers.legacy_relu(self.input, 8)\n\n with tf.variable_scope(vs, reuse=True):\n output3 = tf.contrib.layers.legacy_relu(self.input, 8)\n\n with tf.Session() as sess:\n tf.initialize_all_variables().run()\n out_value1, out_value2, out_value3 = sess.run([output1, output2, output3])\n\n self.assertFalse(np.allclose(out_value1, out_value2))\n self.assertAllClose(out_value1, out_value3)\n\n def test_variable_reuse_with_template(self):\n tmpl1 = tf.make_template('test',\n tf.contrib.layers.legacy_fully_connected,\n num_output_units=8)\n output1 = tmpl1(self.input)\n output2 = tmpl1(self.input)\n\n with tf.Session() as sess:\n tf.initialize_all_variables().run()\n out_value1, out_value2 = sess.run([output1, output2])\n self.assertAllClose(out_value1, out_value2)\n\n def _custom_initializers(self, x, num_output_units, expected_outputs):\n output = tf.contrib.layers.legacy_relu(\n x,\n num_output_units,\n weight_init=tf.constant_initializer(2.0),\n bias_init=tf.constant_initializer(1.0))\n\n with tf.Session() as sess:\n tf.initialize_all_variables().run()\n out_value = sess.run(output)\n\n self.assertAllClose(np.array(expected_outputs), out_value)\n\n def test_custom_initializers(self):\n self._custom_initializers(\n self.input, 2, [[13.0, 13.0], [11.0, 11.0]])\n\n def test_custom_initializers_multi_dim(self):\n self._custom_initializers(self.input_3_dim,\n 2,\n [[[7.6, 7.6],\n [13.6, 13.6],\n [19.6, 19.6],\n [25.6, 25.6]],\n [[31.6, 31.6],\n [37.6, 37.6],\n [43.6, 43.6],\n [49.6, 49.6]]])\n\n def test_custom_collections(self):\n tf.contrib.layers.legacy_relu(self.input,\n 2,\n weight_collections=['unbiased'],\n bias_collections=['biased'],\n output_collections=['output'])\n\n self.assertEqual(1, len(tf.get_collection('unbiased')))\n self.assertEqual(1, len(tf.get_collection('biased')))\n self.assertEqual(1, len(tf.get_collection('output')))\n self.assertEqual(2, len(tf.get_collection(tf.GraphKeys.VARIABLES)))\n\n def test_all_custom_collections(self):\n tf.contrib.layers.legacy_relu(self.input,\n 2,\n weight_collections=['unbiased', 'all'],\n bias_collections=['biased', 'all'])\n\n self.assertEqual(1, len(tf.get_collection('unbiased')))\n self.assertEqual(1, len(tf.get_collection('biased')))\n self.assertEqual(tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES),\n tf.get_collection('all'))\n\n def test_no_bias(self):\n tf.contrib.layers.legacy_relu(self.input, 2, bias_init=None)\n self.assertEqual(1, len(tf.get_collection(tf.GraphKeys.VARIABLES)))\n\n def test_no_activation(self):\n y = tf.contrib.layers.legacy_fully_connected(self.input, 2)\n self.assertEqual(2, len(tf.get_collection(tf.GraphKeys.VARIABLES)))\n self.assertEqual('BiasAdd', y.op.type)\n\n def test_no_activation_no_bias(self):\n y = tf.contrib.layers.legacy_fully_connected(self.input, 2, bias_init=None)\n self.assertEqual(1, len(tf.get_collection(tf.GraphKeys.VARIABLES)))\n self.assertEqual('MatMul', y.op.type)\n\n def test_regularizer(self):\n cnt = [0]\n tensor = tf.constant(5.0)\n def test_fn(_):\n cnt[0] += 1\n return tensor\n\n tf.contrib.layers.legacy_fully_connected(self.input,\n 2,\n weight_regularizer=test_fn)\n\n self.assertEqual([tensor],\n tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))\n self.assertEqual(1, cnt[0])\n\n def test_regularizer_with_multiple_variables(self):\n cnt = [0]\n tensor = tf.constant(5.0)\n def test_fn(_):\n cnt[0] += 1\n return tensor\n\n tf.contrib.layers.legacy_fully_connected(self.input,\n 2,\n weight_regularizer=test_fn)\n tf.contrib.layers.legacy_fully_connected(self.input,\n 2,\n weight_regularizer=test_fn)\n\n self.assertEqual([tensor, tensor],\n tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))\n self.assertEqual(2, cnt[0])\n\n def test_regularizer_with_variable_reuse(self):\n cnt = [0]\n tensor = tf.constant(5.0)\n def test_fn(_):\n cnt[0] += 1\n return tensor\n\n with tf.variable_scope('test') as vs:\n tf.contrib.layers.legacy_fully_connected(self.input,\n 2,\n weight_regularizer=test_fn)\n\n with tf.variable_scope(vs, reuse=True):\n tf.contrib.layers.legacy_fully_connected(self.input,\n 2,\n weight_regularizer=test_fn)\n\n self.assertEqual([tensor],\n tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))\n self.assertEqual(1, cnt[0])\n\n def test_empty_x_results_in_empty_output(self):\n # Empty x is common if someone masks their input with tf.boolean_mask in\n # order to drop missing entries, and in a particular batch all entries are\n # missing.\n with self.test_session():\n x = np.array([]).reshape(0, 3)\n self.assertEqual(0, tf.size(x).eval())\n y = tf.contrib.layers.legacy_fully_connected(x,\n 2,\n activation_fn=tf.nn.softmax)\n tf.initialize_all_variables().run()\n expected_y = np.array([]).reshape(0, 2)\n np.testing.assert_array_equal(expected_y, y.eval())\n\n def test_shapes_variable_first_dim(self):\n # first dimension is not known statically.\n x = tf.placeholder(tf.float32, shape=[None, 4, 3])\n y = tf.contrib.layers.legacy_fully_connected(x, 1)\n # in the output we still only know the 2nd and 3rd dimensions statically.\n self.assertEqual(y.get_shape().as_list(), [None, 4, 1])\n with self.test_session() as sess:\n tf.initialize_all_variables().run()\n # we can feed in input with first dimension 2\n shape_value = sess.run(tf.shape(y), feed_dict={x: self.input_3_dim_arr})\n self.assertAllClose(shape_value, [2, 4, 1])\n # we can feed in input with first dimension 1\n shape_value = sess.run(tf.shape(y),\n feed_dict={x: [self.input_3_dim_arr[0]]})\n self.assertAllClose(shape_value, [1, 4, 1])\n # we cannot feed in input with inconsistent dimensions\n with self.assertRaises(ValueError):\n sess.run(tf.shape(y), feed_dict={x: [[[]]]})\n\n def _unknown_dim_invalid_input(self, last_dim):\n x = tf.placeholder(tf.float32, shape=[3, last_dim])\n tf.contrib.layers.legacy_fully_connected(x, 2, activation_fn=None)\n\n def test_known_dim_valid_input(self):\n self._unknown_dim_invalid_input(last_dim=3)\n\n def test_unknown_dim_invalid_input(self):\n with self.assertRaisesRegexp(\n ValueError, 'last dimension of x must be known but is None'):\n self._unknown_dim_invalid_input(last_dim=None)\n\n def test_1d_invalid_input(self):\n with self.test_session():\n with self.assertRaisesRegexp(ValueError,\n 'rank of x must be at least 2 not: 1'):\n x = tf.constant([[]], shape=[0])\n tf.contrib.layers.legacy_fully_connected(x,\n 2,\n activation_fn=tf.nn.softmax)\n\n\nif __name__ == '__main__':\n tf.test.main()\n",
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Create threads to run multiple enqueue ops.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport threading\n\nfrom tensorflow.core.protobuf import queue_runner_pb2\nfrom tensorflow.python.framework import errors\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.platform import tf_logging as logging\n\n\nclass QueueRunner(object):\n \"\"\"Holds a list of enqueue operations for a queue, each to be run in a thread.\n\n Queues are a convenient TensorFlow mechanism to compute tensors\n asynchronously using multiple threads. For example in the canonical 'Input\n Reader' setup one set of threads generates filenames in a queue; a second set\n of threads read records from the files, processes them, and enqueues tensors\n on a second queue; a third set of threads dequeues these input records to\n construct batches and runs them through training operations.\n\n There are several delicate issues when running multiple threads that way:\n closing the queues in sequence as the input is exhausted, correctly catching\n and reporting exceptions, etc.\n\n The `QueueRunner`, combined with the `Coordinator`, helps handle these issues.\n \"\"\"\n\n def __init__(self, queue=None, enqueue_ops=None, close_op=None,\n cancel_op=None, queue_closed_exception_types=None,\n queue_runner_def=None, import_scope=None):\n \"\"\"Create a QueueRunner.\n\n On construction the `QueueRunner` adds an op to close the queue. That op\n will be run if the enqueue ops raise exceptions.\n\n When you later call the `create_threads()` method, the `QueueRunner` will\n create one thread for each op in `enqueue_ops`. Each thread will run its\n enqueue op in parallel with the other threads. The enqueue ops do not have\n to all be the same op, but it is expected that they all enqueue tensors in\n `queue`.\n\n Args:\n queue: A `Queue`.\n enqueue_ops: List of enqueue ops to run in threads later.\n close_op: Op to close the queue. Pending enqueue ops are preserved.\n cancel_op: Op to close the queue and cancel pending enqueue ops.\n queue_closed_exception_types: Optional tuple of Exception types that\n indicate that the queue has been closed when raised during an enqueue\n operation. Defaults to `(tf.errors.OutOfRangeError,)`. Another common\n case includes `(tf.errors.OutOfRangeError, tf.errors.CancelledError)`,\n when some of the enqueue ops may dequeue from other Queues.\n queue_runner_def: Optional `QueueRunnerDef` protocol buffer. If specified,\n recreates the QueueRunner from its contents. `queue_runner_def` and the\n other arguments are mutually exclusive.\n import_scope: Optional `string`. Name scope to add. Only used when\n initializing from protocol buffer.\n\n Raises:\n ValueError: If both `queue_runner_def` and `queue` are both specified.\n ValueError: If `queue` or `enqueue_ops` are not provided when not\n restoring from `queue_runner_def`.\n \"\"\"\n if queue_runner_def:\n if queue or enqueue_ops:\n raise ValueError(\"queue_runner_def and queue are mutually exclusive.\")\n self._init_from_proto(queue_runner_def,\n import_scope=import_scope)\n else:\n self._init_from_args(\n queue=queue, enqueue_ops=enqueue_ops,\n close_op=close_op, cancel_op=cancel_op,\n queue_closed_exception_types=queue_closed_exception_types)\n # Protect the count of runs to wait for.\n self._lock = threading.Lock()\n self._runs = 0\n # List of exceptions raised by the running threads.\n self._exceptions_raised = []\n\n def _init_from_args(self, queue=None, enqueue_ops=None, close_op=None,\n cancel_op=None, queue_closed_exception_types=None):\n \"\"\"Create a QueueRunner from arguments.\n\n Args:\n queue: A `Queue`.\n enqueue_ops: List of enqueue ops to run in threads later.\n close_op: Op to close the queue. Pending enqueue ops are preserved.\n cancel_op: Op to close the queue and cancel pending enqueue ops.\n queue_closed_exception_types: Tuple of exception types, which indicate\n the queue has been safely closed.\n\n Raises:\n ValueError: If `queue` or `enqueue_ops` are not provided when not\n restoring from `queue_runner_def`.\n TypeError: If `queue_closed_exception_types` is provided, but is not\n a non-empty tuple of error types (subclasses of `tf.errors.OpError`).\n \"\"\"\n if not queue or not enqueue_ops:\n raise ValueError(\"Must provide queue and enqueue_ops.\")\n self._queue = queue\n self._enqueue_ops = enqueue_ops\n self._close_op = close_op\n self._cancel_op = cancel_op\n if queue_closed_exception_types is not None:\n if (not isinstance(queue_closed_exception_types, tuple)\n or not queue_closed_exception_types\n or not all(issubclass(t, errors.OpError)\n for t in queue_closed_exception_types)):\n raise TypeError(\n \"queue_closed_exception_types, when provided, \"\n \"must be a non-empty list of tf.error types, but saw: %s\"\n % queue_closed_exception_types)\n self._queue_closed_exception_types = queue_closed_exception_types\n # Close when no more will be produced, but pending enqueues should be\n # preserved.\n if self._close_op is None:\n self._close_op = self._queue.close()\n # Close and cancel pending enqueues since there was an error and we want\n # to unblock everything so we can cleanly exit.\n if self._cancel_op is None:\n self._cancel_op = self._queue.close(cancel_pending_enqueues=True)\n if not self._queue_closed_exception_types:\n self._queue_closed_exception_types = (errors.OutOfRangeError,)\n else:\n self._queue_closed_exception_types = tuple(\n self._queue_closed_exception_types)\n\n def _init_from_proto(self, queue_runner_def, import_scope=None):\n \"\"\"Create a QueueRunner from `QueueRunnerDef`.\n\n Args:\n queue_runner_def: Optional `QueueRunnerDef` protocol buffer.\n import_scope: Optional `string`. Name scope to add.\n \"\"\"\n assert isinstance(queue_runner_def, queue_runner_pb2.QueueRunnerDef)\n g = ops.get_default_graph()\n self._queue = g.as_graph_element(\n ops.prepend_name_scope(queue_runner_def.queue_name, import_scope))\n self._enqueue_ops = [g.as_graph_element(\n ops.prepend_name_scope(op, import_scope))\n for op in queue_runner_def.enqueue_op_name]\n self._close_op = g.as_graph_element(ops.prepend_name_scope(\n queue_runner_def.close_op_name, import_scope))\n self._cancel_op = g.as_graph_element(ops.prepend_name_scope(\n queue_runner_def.cancel_op_name, import_scope))\n self._queue_closed_exception_types = tuple(\n errors.exception_type_from_error_code(code)\n for code in queue_runner_def.queue_closed_exception_types)\n # Legacy support for old QueueRunnerDefs created before this field\n # was added.\n if not self._queue_closed_exception_types:\n self._queue_closed_exception_types = (errors.OutOfRangeError,)\n\n @property\n def queue(self):\n return self._queue\n\n @property\n def enqueue_ops(self):\n return self._enqueue_ops\n\n @property\n def close_op(self):\n return self._close_op\n\n @property\n def cancel_op(self):\n return self._cancel_op\n\n @property\n def queue_closed_exception_types(self):\n return self._queue_closed_exception_types\n\n @property\n def exceptions_raised(self):\n \"\"\"Exceptions raised but not handled by the `QueueRunner` threads.\n\n Exceptions raised in queue runner threads are handled in one of two ways\n depending on whether or not a `Coordinator` was passed to\n `create_threads()`:\n\n * With a `Coordinator`, exceptions are reported to the coordinator and\n forgotten by the `QueueRunner`.\n * Without a `Coordinator`, exceptions are captured by the `QueueRunner` and\n made available in this `exceptions_raised` property.\n\n Returns:\n A list of Python `Exception` objects. The list is empty if no exception\n was captured. (No exceptions are captured when using a Coordinator.)\n \"\"\"\n return self._exceptions_raised\n\n @property\n def name(self):\n \"\"\"The string name of the underlying Queue.\"\"\"\n return self._queue.name\n\n # pylint: disable=broad-except\n def _run(self, sess, enqueue_op, coord=None):\n \"\"\"Execute the enqueue op in a loop, close the queue in case of error.\n\n Args:\n sess: A Session.\n enqueue_op: The Operation to run.\n coord: Optional Coordinator object for reporting errors and checking\n for stop conditions.\n \"\"\"\n if coord:\n coord.register_thread(threading.current_thread())\n decremented = False\n try:\n while True:\n if coord and coord.should_stop():\n break\n try:\n sess.run(enqueue_op)\n except self._queue_closed_exception_types: # pylint: disable=catching-non-exception\n # This exception indicates that a queue was closed.\n with self._lock:\n self._runs -= 1\n decremented = True\n if self._runs == 0:\n try:\n sess.run(self._close_op)\n except Exception as e:\n # Intentionally ignore errors from close_op.\n logging.vlog(1, \"Ignored exception: %s\", str(e))\n return\n except Exception as e:\n # This catches all other exceptions.\n if coord:\n coord.request_stop(e)\n else:\n logging.error(\"Exception in QueueRunner: %s\", str(e))\n with self._lock:\n self._exceptions_raised.append(e)\n raise\n finally:\n # Make sure we account for all terminations: normal or errors.\n if not decremented:\n with self._lock:\n self._runs -= 1\n\n def _close_on_stop(self, sess, cancel_op, coord):\n \"\"\"Close the queue when the Coordinator requests stop.\n\n Args:\n sess: A Session.\n cancel_op: The Operation to run.\n coord: Coordinator.\n \"\"\"\n coord.register_thread(threading.current_thread())\n coord.wait_for_stop()\n try:\n sess.run(cancel_op)\n except Exception as e:\n # Intentionally ignore errors from cancel_op.\n logging.vlog(1, \"Ignored exception: %s\", str(e))\n # pylint: enable=broad-except\n\n def create_threads(self, sess, coord=None, daemon=False, start=False):\n \"\"\"Create threads to run the enqueue ops.\n\n This method requires a session in which the graph was launched. It creates\n a list of threads, optionally starting them. There is one thread for each\n op passed in `enqueue_ops`.\n\n The `coord` argument is an optional coordinator, that the threads will use\n to terminate together and report exceptions. If a coordinator is given,\n this method starts an additional thread to close the queue when the\n coordinator requests a stop.\n\n This method may be called again as long as all threads from a previous call\n have stopped.\n\n Args:\n sess: A `Session`.\n coord: Optional `Coordinator` object for reporting errors and checking\n stop conditions.\n daemon: Boolean. If `True` make the threads daemon threads.\n start: Boolean. If `True` starts the threads. If `False` the\n caller must call the `start()` method of the returned threads.\n\n Returns:\n A list of threads.\n\n Raises:\n RuntimeError: If threads from a previous call to `create_threads()` are\n still running.\n \"\"\"\n with self._lock:\n if self._runs > 0:\n # Already started: no new threads to return.\n return []\n self._runs = len(self._enqueue_ops)\n self._exceptions_raised = []\n\n ret_threads = [threading.Thread(target=self._run, args=(sess, op, coord))\n for op in self._enqueue_ops]\n if coord:\n ret_threads.append(threading.Thread(target=self._close_on_stop,\n args=(sess, self._cancel_op, coord)))\n for t in ret_threads:\n if daemon:\n t.daemon = True\n if start:\n t.start()\n return ret_threads\n\n def to_proto(self, export_scope=None):\n \"\"\"Converts this `QueueRunner` to a `QueueRunnerDef` protocol buffer.\n\n Args:\n export_scope: Optional `string`. Name scope to remove.\n\n Returns:\n A `QueueRunnerDef` protocol buffer, or `None` if the `Variable` is not in\n the specified name scope.\n \"\"\"\n if (export_scope is None or\n self.queue.name.startswith(export_scope)):\n queue_runner_def = queue_runner_pb2.QueueRunnerDef()\n queue_runner_def.queue_name = ops.strip_name_scope(\n self.queue.name, export_scope)\n for enqueue_op in self.enqueue_ops:\n queue_runner_def.enqueue_op_name.append(\n ops.strip_name_scope(enqueue_op.name, export_scope))\n queue_runner_def.close_op_name = ops.strip_name_scope(\n self.close_op.name, export_scope)\n queue_runner_def.cancel_op_name = ops.strip_name_scope(\n self.cancel_op.name, export_scope)\n queue_runner_def.queue_closed_exception_types.extend([\n errors.error_code_from_exception_type(cls)\n for cls in self._queue_closed_exception_types])\n return queue_runner_def\n else:\n return None\n\n @staticmethod\n def from_proto(queue_runner_def, import_scope=None):\n \"\"\"Returns a `QueueRunner` object created from `queue_runner_def`.\"\"\"\n return QueueRunner(queue_runner_def=queue_runner_def,\n import_scope=import_scope)\n\n\ndef add_queue_runner(qr, collection=ops.GraphKeys.QUEUE_RUNNERS):\n \"\"\"Adds a `QueueRunner` to a collection in the graph.\n\n When building a complex model that uses many queues it is often difficult to\n gather all the queue runners that need to be run. This convenience function\n allows you to add a queue runner to a well known collection in the graph.\n\n The companion method `start_queue_runners()` can be used to start threads for\n all the collected queue runners.\n\n Args:\n qr: A `QueueRunner`.\n collection: A `GraphKey` specifying the graph collection to add\n the queue runner to. Defaults to `GraphKeys.QUEUE_RUNNERS`.\n \"\"\"\n ops.add_to_collection(collection, qr)\n\n\ndef start_queue_runners(sess=None, coord=None, daemon=True, start=True,\n collection=ops.GraphKeys.QUEUE_RUNNERS):\n \"\"\"Starts all queue runners collected in the graph.\n\n This is a companion method to `add_queue_runner()`. It just starts\n threads for all queue runners collected in the graph. It returns\n the list of all threads.\n\n Args:\n sess: `Session` used to run the queue ops. Defaults to the\n default session.\n coord: Optional `Coordinator` for coordinating the started threads.\n daemon: Whether the threads should be marked as `daemons`, meaning\n they don't block program exit.\n start: Set to `False` to only create the threads, not start them.\n collection: A `GraphKey` specifying the graph collection to\n get the queue runners from. Defaults to `GraphKeys.QUEUE_RUNNERS`.\n\n Returns:\n A list of threads.\n \"\"\"\n if sess is None:\n sess = ops.get_default_session()\n if not sess:\n raise ValueError(\"Cannot start queue runners: No default session is \"\n \"registered. Use `with sess.as_default()` or pass an \"\n \"explicit session to tf.start_queue_runners(sess=sess)\")\n with sess.graph.as_default():\n threads = []\n for qr in ops.get_collection(collection):\n threads.extend(qr.create_threads(sess, coord=coord, daemon=daemon,\n start=start))\n return threads\n\n\nops.register_proto_function(ops.GraphKeys.QUEUE_RUNNERS,\n proto_type=queue_runner_pb2.QueueRunnerDef,\n to_proto=QueueRunner.to_proto,\n from_proto=QueueRunner.from_proto)\n",
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Tests for tensorflow.python.client.graph_util.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\n\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import graph_util\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import tensor_util\nfrom tensorflow.python.ops import gen_state_ops\nfrom tensorflow.python.ops import math_ops # pylint: disable=unused-import\n\n\n# Utility device function to use for testing\ndef test_device_func_pin_variable_to_cpu(op):\n if op.device:\n return op.device\n return \"/cpu:0\" if op.node_def.op == \"Variable\" else op.device\n\n\nclass DeviceFunctionsTest(tf.test.TestCase):\n\n def testTwoDeviceFunctions(self):\n with ops.Graph().as_default() as g:\n var_0 = gen_state_ops._variable(shape=[1], dtype=dtypes.float32, \n name=\"var_0\", container=\"\", shared_name=\"\")\n with g.device(test_device_func_pin_variable_to_cpu):\n var_1 = gen_state_ops._variable(shape=[1], dtype=dtypes.float32, \n name=\"var_1\", container=\"\", shared_name=\"\")\n var_2 = gen_state_ops._variable(shape=[1], dtype=dtypes.float32, \n name=\"var_2\", container=\"\", shared_name=\"\")\n var_3 = gen_state_ops._variable(shape=[1], dtype=dtypes.float32, \n name=\"var_3\", container=\"\", shared_name=\"\")\n with g.device(test_device_func_pin_variable_to_cpu):\n var_4 = gen_state_ops._variable(shape=[1], dtype=dtypes.float32, \n name=\"var_4\", container=\"\", shared_name=\"\")\n with g.device(\"/device:GPU:0\"):\n var_5 = gen_state_ops._variable(shape=[1], dtype=dtypes.float32, \n name=\"var_5\", container=\"\", shared_name=\"\")\n var_6 = gen_state_ops._variable(shape=[1], dtype=dtypes.float32, \n name=\"var_6\", container=\"\", shared_name=\"\")\n\n self.assertDeviceEqual(var_0.device, None)\n self.assertDeviceEqual(var_1.device, \"/device:CPU:0\")\n self.assertDeviceEqual(var_2.device, None)\n self.assertDeviceEqual(var_3.device, None)\n self.assertDeviceEqual(var_4.device, \"/device:CPU:0\")\n self.assertDeviceEqual(var_5.device, \"/device:GPU:0\")\n self.assertDeviceEqual(var_6.device, \"/device:CPU:0\")\n\n def testNestedDeviceFunctions(self):\n with tf.Graph().as_default():\n var_0 = tf.Variable(0)\n with tf.device(test_device_func_pin_variable_to_cpu):\n var_1 = tf.Variable(1)\n with tf.device(lambda op: \"/gpu:0\"):\n var_2 = tf.Variable(2)\n with tf.device(\"/gpu:0\"): # Implicit merging device function.\n var_3 = tf.Variable(3)\n\n self.assertDeviceEqual(var_0.device, None)\n self.assertDeviceEqual(var_1.device, \"/device:CPU:0\")\n self.assertDeviceEqual(var_2.device, \"/device:GPU:0\")\n self.assertDeviceEqual(var_3.device, \"/device:GPU:0\")\n\n def testExplicitDevice(self):\n with ops.Graph().as_default() as g:\n const_0 = constant_op.constant(5.0)\n with g.device(\"/device:GPU:0\"):\n const_1 = constant_op.constant(5.0)\n with g.device(\"/device:GPU:1\"):\n const_2 = constant_op.constant(5.0)\n with g.device(\"/device:CPU:0\"):\n const_3 = constant_op.constant(5.0)\n with g.device(\"/device:CPU:1\"):\n const_4 = constant_op.constant(5.0)\n with g.device(\"/job:ps\"):\n const_5 = constant_op.constant(5.0)\n\n self.assertDeviceEqual(const_0.device, None)\n self.assertDeviceEqual(const_1.device, \"/device:GPU:0\")\n self.assertDeviceEqual(const_2.device, \"/device:GPU:1\")\n self.assertDeviceEqual(const_3.device, \"/device:CPU:0\")\n self.assertDeviceEqual(const_4.device, \"/device:CPU:1\")\n self.assertDeviceEqual(const_5.device, \"/job:ps\")\n\n def testDefaultDevice(self):\n with ops.Graph().as_default() as g, g.device(\n test_device_func_pin_variable_to_cpu):\n with g.device(\"/job:ps\"):\n const_0 = constant_op.constant(5.0)\n with g.device(\"/device:GPU:0\"):\n const_1 = constant_op.constant(5.0)\n with g.device(\"/device:GPU:1\"):\n const_2 = constant_op.constant(5.0)\n with g.device(\"/device:CPU:0\"):\n const_3 = constant_op.constant(5.0)\n with g.device(\"/device:CPU:1\"):\n const_4 = constant_op.constant(5.0)\n with g.device(\"/replica:0\"):\n const_5 = constant_op.constant(5.0)\n\n self.assertDeviceEqual(const_0.device, \"/job:ps\")\n self.assertDeviceEqual(const_1.device, \"/device:GPU:0\")\n self.assertDeviceEqual(const_2.device, \"/device:GPU:1\")\n self.assertDeviceEqual(const_3.device, \"/device:CPU:0\")\n self.assertDeviceEqual(const_4.device, \"/device:CPU:1\")\n self.assertDeviceEqual(const_5.device, \"/replica:0\")\n\n def testExtractSubGraph(self):\n graph_def = tf.GraphDef()\n n1 = graph_def.node.add()\n n1.name = \"n1\"\n n1.input.extend([\"n5\"])\n n2 = graph_def.node.add()\n n2.name = \"n2\"\n # Take the first output of the n1 node as the input.\n n2.input.extend([\"n1:0\"])\n n3 = graph_def.node.add()\n n3.name = \"n3\"\n # Add a control input (which isn't really needed by the kernel, but\n # rather to enforce execution order between nodes).\n n3.input.extend([\"^n2\"])\n n4 = graph_def.node.add()\n n4.name = \"n4\"\n\n # It is fine to have a loops in the graph as well.\n n5 = graph_def.node.add()\n n5.name = \"n5\"\n n5.input.extend([\"n1\"])\n\n sub_graph = graph_util.extract_sub_graph(graph_def, [\"n3\"])\n self.assertEqual(\"n1\", sub_graph.node[0].name)\n self.assertEqual(\"n2\", sub_graph.node[1].name)\n self.assertEqual(\"n3\", sub_graph.node[2].name)\n self.assertEqual(\"n5\", sub_graph.node[3].name)\n\n def testConvertVariablesToConsts(self):\n with tf.Graph().as_default():\n variable_node = tf.Variable(1.0, name=\"variable_node\")\n _ = tf.Variable(1.0, name=\"unused_variable_node\")\n output_node = tf.mul(variable_node, 2.0, name=\"output_node\")\n with tf.Session() as sess:\n init = tf.initialize_variables([variable_node])\n sess.run(init)\n output = sess.run(output_node)\n self.assertNear(2.0, output, 0.00001)\n variable_graph_def = sess.graph.as_graph_def()\n # First get the constant_graph_def when variable_names_whitelist is set,\n # note that if variable_names_whitelist is not set an error will be\n # thrown because unused_variable_node is not initialized.\n constant_graph_def = graph_util.convert_variables_to_constants(\n sess, variable_graph_def, [\"output_node\"],\n variable_names_whitelist=set([\"variable_node\"]))\n\n # Then initialize the unused variable, and get another\n # constant_graph_def when variable_names_whitelist is not set.\n sess.run(tf.initialize_all_variables())\n constant_graph_def_without_variable_whitelist = (\n graph_util.convert_variables_to_constants(\n sess, variable_graph_def, [\"output_node\"]))\n\n # The unused variable should be cleared so the two graphs should be\n # equivalent.\n self.assertEqual(str(constant_graph_def),\n str(constant_graph_def_without_variable_whitelist))\n\n # Now we make sure the variable is now a constant, and that the graph still\n # produces the expected result.\n with tf.Graph().as_default():\n _ = tf.import_graph_def(constant_graph_def, name=\"\")\n self.assertEqual(4, len(constant_graph_def.node))\n for node in constant_graph_def.node:\n self.assertNotEqual(\"Variable\", node.op)\n with tf.Session() as sess:\n output_node = sess.graph.get_tensor_by_name(\"output_node:0\")\n output = sess.run(output_node)\n self.assertNear(2.0, output, 0.00001)\n\n def create_node_def(self, op, name, inputs):\n new_node = tf.NodeDef()\n new_node.op = op\n new_node.name = name\n for input_name in inputs:\n new_node.input.extend([input_name])\n return new_node\n\n def create_constant_node_def(self, name, value, dtype, shape=None):\n node = self.create_node_def(\"Const\", name, [])\n self.set_attr_dtype(node, \"dtype\", dtype)\n self.set_attr_tensor(node, \"value\", value, dtype, shape)\n return node\n\n def set_attr_dtype(self, node, key, value):\n node.attr[key].CopyFrom(tf.AttrValue(type=value.as_datatype_enum))\n\n def set_attr_tensor(self, node, key, value, dtype, shape=None):\n node.attr[key].CopyFrom(tf.AttrValue(\n tensor=tensor_util.make_tensor_proto(value,\n dtype=dtype,\n shape=shape)))\n\n def testRemoveTrainingNodes(self):\n a_constant_name = \"a_constant\"\n b_constant_name = \"b_constant\"\n a_check_name = \"a_check\"\n b_check_name = \"b_check\"\n a_identity_name = \"a_identity\"\n b_identity_name = \"b_identity\"\n add_name = \"add\"\n graph_def = tf.GraphDef()\n a_constant = self.create_constant_node_def(a_constant_name,\n value=1,\n dtype=tf.float32,\n shape=[])\n graph_def.node.extend([a_constant])\n a_check_node = self.create_node_def(\"CheckNumerics\", a_check_name,\n [a_constant_name])\n graph_def.node.extend([a_check_node])\n a_identity_node = self.create_node_def(\"Identity\", a_identity_name,\n [a_constant_name,\n \"^\" + a_check_name])\n graph_def.node.extend([a_identity_node])\n b_constant = self.create_constant_node_def(b_constant_name,\n value=1,\n dtype=tf.float32,\n shape=[])\n graph_def.node.extend([b_constant])\n b_check_node = self.create_node_def(\"CheckNumerics\", b_check_name,\n [b_constant_name])\n graph_def.node.extend([b_check_node])\n b_identity_node = self.create_node_def(\"Identity\", b_identity_name,\n [b_constant_name,\n \"^\" + b_check_name])\n graph_def.node.extend([b_identity_node])\n add_node = self.create_node_def(\"Add\", add_name,\n [a_identity_name,\n b_identity_name])\n self.set_attr_dtype(add_node, \"T\", tf.float32)\n graph_def.node.extend([add_node])\n\n expected_output = tf.GraphDef()\n a_constant = self.create_constant_node_def(a_constant_name,\n value=1,\n dtype=tf.float32,\n shape=[])\n expected_output.node.extend([a_constant])\n b_constant = self.create_constant_node_def(b_constant_name,\n value=1,\n dtype=tf.float32,\n shape=[])\n expected_output.node.extend([b_constant])\n add_node = self.create_node_def(\"Add\", add_name,\n [a_constant_name,\n b_constant_name])\n self.set_attr_dtype(add_node, \"T\", tf.float32)\n expected_output.node.extend([add_node])\n\n output = graph_util.remove_training_nodes(graph_def)\n self.assertProtoEquals(expected_output, output)\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n"
] | [
[
"pandas.Series",
"tensorflow.nn.max_pool",
"pandas.DataFrame",
"tensorflow.contrib.layers.convolution2d",
"tensorflow.contrib.learn.models.logistic_regression",
"tensorflow.contrib.learn.ops.categorical_variable",
"tensorflow.argmax",
"tensorflow.app.run",
"tensorflow.one_hot",
"tensorflow.contrib.learn.datasets.load_dataset",
"tensorflow.nn.relu",
"tensorflow.reduce_max",
"tensorflow.transpose",
"tensorflow.contrib.framework.get_global_step",
"tensorflow.expand_dims",
"tensorflow.contrib.learn.Estimator",
"tensorflow.variable_scope",
"tensorflow.contrib.learn.preprocessing.VocabularyProcessor",
"sklearn.metrics.accuracy_score"
],
[
"tensorflow.Graph",
"tensorflow.import_graph_def",
"tensorflow.Variable",
"tensorflow.python.tools.freeze_graph.freeze_graph",
"tensorflow.test.main",
"tensorflow.mul",
"tensorflow.initialize_all_variables",
"tensorflow.Session",
"tensorflow.train.Saver",
"tensorflow.GraphDef"
],
[
"tensorflow.convert_to_tensor",
"tensorflow.control_dependencies",
"tensorflow.reduce_sum",
"numpy.all",
"tensorflow.contrib.framework.get_variables_by_name",
"tensorflow.contrib.layers.flatten",
"tensorflow.make_template",
"numpy.where",
"tensorflow.contrib.layers.convolution2d",
"tensorflow.reset_default_graph",
"tensorflow.Session",
"tensorflow.square",
"numpy.zeros",
"tensorflow.TensorShape",
"tensorflow.placeholder",
"numpy.asmatrix",
"tensorflow.contrib.layers.batch_norm",
"numpy.array",
"tensorflow.fixed_size_partitioner",
"tensorflow.contrib.framework.arg_scope",
"tensorflow.ones",
"tensorflow.contrib.layers.softmax",
"tensorflow.SparseTensor",
"numpy.testing.assert_array_equal",
"numpy.random.uniform",
"tensorflow.random_uniform",
"numpy.vstack",
"numpy.asarray",
"numpy.var",
"tensorflow.contrib.layers.conv2d_transpose",
"tensorflow.Graph",
"numpy.allclose",
"numpy.reshape",
"tensorflow.initialize_all_variables",
"tensorflow.test.TestCase.setUp",
"tensorflow.contrib.layers.avg_pool2d",
"numpy.random.rand",
"tensorflow.contrib.layers.repeat",
"tensorflow.no_op",
"tensorflow.set_random_seed",
"tensorflow.contrib.layers.bias_add",
"tensorflow.contrib.layers.legacy_relu",
"tensorflow.contrib.layers.pool",
"numpy.ones",
"tensorflow.contrib.layers.stack",
"tensorflow.contrib.layers.legacy_fully_connected",
"tensorflow.get_variable_scope",
"tensorflow.contrib.layers.one_hot_encoding",
"tensorflow.nn.conv2d_transpose",
"numpy.mean",
"tensorflow.python.ops.state_ops.assign",
"numpy.random.randint",
"tensorflow.Variable",
"tensorflow.python.ops.control_flow_ops.with_dependencies",
"tensorflow.contrib.layers.convolution2d_transpose",
"tensorflow.test.main",
"tensorflow.contrib.layers.separable_conv2d",
"tensorflow.contrib.layers.python.layers.layers._inner_flatten",
"tensorflow.shape",
"tensorflow.size",
"tensorflow.constant",
"tensorflow.contrib.framework.get_variables",
"tensorflow.assign",
"tensorflow.contrib.layers.fully_connected",
"tensorflow.constant_initializer",
"tensorflow.contrib.layers.l2_regularizer",
"tensorflow.variable_scope",
"tensorflow.get_collection",
"tensorflow.gradients",
"tensorflow.name_scope",
"tensorflow.to_float",
"numpy.isclose",
"tensorflow.contrib.layers.dropout",
"tensorflow.contrib.layers.conv2d",
"numpy.abs",
"numpy.random.seed",
"tensorflow.contrib.layers.max_pool2d",
"tensorflow.contrib.layers.layer_norm",
"numpy.prod",
"tensorflow.contrib.layers.unit_norm",
"tensorflow.test.is_gpu_available"
],
[
"tensorflow.python.framework.ops.register_proto_function",
"tensorflow.core.protobuf.queue_runner_pb2.QueueRunnerDef",
"tensorflow.python.framework.ops.get_collection",
"tensorflow.python.framework.errors.error_code_from_exception_type",
"tensorflow.python.framework.ops.prepend_name_scope",
"tensorflow.python.framework.ops.get_default_graph",
"tensorflow.python.framework.ops.add_to_collection",
"tensorflow.python.framework.ops.get_default_session",
"tensorflow.python.framework.errors.exception_type_from_error_code",
"tensorflow.python.framework.ops.strip_name_scope"
],
[
"tensorflow.python.framework.graph_util.remove_training_nodes",
"tensorflow.device",
"tensorflow.Graph",
"tensorflow.import_graph_def",
"tensorflow.Variable",
"tensorflow.test.main",
"tensorflow.initialize_all_variables",
"tensorflow.Session",
"tensorflow.AttrValue",
"tensorflow.python.ops.gen_state_ops._variable",
"tensorflow.python.framework.graph_util.extract_sub_graph",
"tensorflow.initialize_variables",
"tensorflow.python.framework.tensor_util.make_tensor_proto",
"tensorflow.python.framework.ops.Graph",
"tensorflow.mul",
"tensorflow.python.framework.graph_util.convert_variables_to_constants",
"tensorflow.GraphDef",
"tensorflow.NodeDef",
"tensorflow.python.framework.constant_op.constant"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"1.10",
"1.12",
"2.7",
"2.6",
"1.4",
"1.13",
"2.3",
"2.4",
"2.9",
"1.5",
"1.7",
"2.5",
"0.12",
"1.0",
"2.2",
"1.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"1.12",
"2.6",
"2.2",
"1.13",
"2.3",
"2.4",
"1.4",
"2.9",
"1.5",
"1.7",
"2.5",
"0.12",
"1.0",
"2.8",
"1.2",
"2.10"
]
}
] |
filipesouzacit/RL-with-MCTS | [
"cca1a8a79e5973a30b423c45a090e2473975c189"
] | [
"trainer.py"
] | [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thus Jan 07 15:54:13 2021\n@author: Filipe Souza\n\nBased on Josh Varty (https://github.com/JoshVarty/AlphaZeroSimple)\n\"\"\"\nimport numpy as np\nfrom random import shuffle\nimport keras\n\nfrom gym_go import gogame\nfrom monte_carlo_tree_search import MCTS\n\nclass Trainer:\n\n def __init__(self, game, model, args):\n self.game = game\n self.model = model\n self.args = args\n self.mcts = MCTS(self.game, self.model, self.args)\n\n def exceute_episode(self):\n\n train_examples = []\n current_player = 1\n state = gogame.init_state(self.args['boardSize'])\n\n while True:\n #print(\"while True\")\n canonical_board = gogame.canonical_form(state)\n\n self.mcts = MCTS(self.game, self.model, self.args)\n root = self.mcts.run(self.model, canonical_board, to_play=1)\n\n action_probs = [0 for _ in range((self.args['boardSize']* self.args['boardSize'])+1)]\n for k, v in root.children.items():\n action_probs[k] = v.visit_count\n\n action_probs = action_probs / np.sum(action_probs)\n train_examples.append((canonical_board, current_player, action_probs))\n\n action = root.select_action(temperature=1)\n state = gogame.next_state(state, action, canonical=False)\n current_player = - current_player\n reward = gogame.winning(state)*current_player if gogame.game_ended(state) else None \n\n if reward is not None:\n ret = []\n for hist_state, hist_current_player, hist_action_probs in train_examples:\n # [Board, currentPlayer, actionProbabilities, Reward]\n tfBoard = np.array([hist_state[0],hist_state[1],hist_state[3]]).transpose().tolist()\n #ret.append(np.array([tfBoard,tfBoard, hist_action_probs, reward * ((-1) ** (hist_current_player != current_player))]))\n ret.append((tfBoard,hist_action_probs, reward * ((-1) ** (hist_current_player != current_player))))\n return ret\n\n def learn(self):\n for i in range(1, self.args['numIters'] + 1):\n\n print(\"numIters: {}/{}\".format(i, self.args['numIters']))\n\n train_examples = []\n\n for eps in range(self.args['numEps']):\n print(\"numEps: {}/{}\".format(eps, self.args['numEps']))\n iteration_train_examples = self.exceute_episode()\n train_examples.extend(iteration_train_examples)\n\n shuffle(train_examples)\n self.train(train_examples)\n\n def train(self, trainD):\n \n # Define the checkpoint\n checkpoint = keras.callbacks.ModelCheckpoint(self.args['checkpointPath'], monitor=\"val_loss\",\n mode=\"min\", save_best_only=True, verbose=0)\n\n # train the network\n print(\"Training network...\")\n \n x = [i[0] for i in trainD]\n x = np.array(x)\n \n y1 = [i[1] for i in trainD]\n y2 = [i[2] for i in trainD]\n y1 = np.array(y1)\n y2 = np.array(y2)\n \n history = self.model.model.fit(x,y={\"action_output\": y1, \"Value_output\": y2}, \n validation_split=0.2,\n batch_size=self.args['batchSize'], epochs=self.args['epochs'], \n verbose=1, callbacks=[checkpoint])\n \n # print accurary of the best epoch\n self.model.model.load_weights(self.args['checkpointPath'])\n \n"
] | [
[
"numpy.array",
"numpy.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Lemon-Nation/PyLMDI | [
"54d15ec44b84bd84b960003b1fd6690057240565"
] | [
"Numercial_Examples/Examples_China.py"
] | [
"\n\n\n\n# =============================================================================\n# Step1: Input\n# =============================================================================\nimport numpy as np\nfrom PyLMDI import PyLMDI\n\nif __name__=='__main__':\n \n #--- Step1: Input\n Ct = 794.6119504871361 # Carbon emission from China's commercial buildings in 2018\n C0 = 761.984276581356 # Carbon emission from China's commercial buildings in 2017\n \n Pt = 1395.38 # Population size in 2018\n P0 = 1390.08 # in 2017 \n gt = 64.52073987 \n g0 = 59.04367375\n st = 0.521570193\n s0 = 0.51892765\n it = 0.002743568\n i0 = 0.002876626\n et = 3.053397862\n e0 = 3.004500526\n kt = 2.02\n k0 = 2.07\n \n \n Ct,C0 = [Ct],[C0]\n \n Xt = np.array([Pt,gt,st,it,et,kt]).reshape([-1,1])\n X0 = np.array([P0,g0,s0,i0,e0,k0]).reshape([-1,1])\n \n #--- Step2-4: LMDI decomposition analysis\n \n LMDI = PyLMDI(Ct,C0,Xt,X0)\n ans = LMDI.Add()\n \n \n # --- Step 5: Output\n \n print(\"The change of carbon emission of China's commercial buildings from 2017 to 2018 is: \",ans[0])\n \n print(\"The various driving forces contribute as follows:\")\n \n print(\"P: \",ans[1])\n print(\"g: \",ans[2])\n print(\"s: \",ans[3])\n print(\"i: \",ans[4])\n print(\"e: \",ans[5])\n print(\"K: \",ans[6])"
] | [
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
HuangHaoyu1997/NRI | [
"e0cd1ef5e168db19cd904eabfd369a65238b5d07"
] | [
"utils.py"
] | [
"import numpy as np\nimport torch\nfrom torch.utils.data.dataset import TensorDataset\nfrom torch.utils.data import DataLoader\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\n\n\ndef my_softmax(input, axis=1):\n trans_input = input.transpose(axis, 0).contiguous()\n soft_max_1d = F.softmax(trans_input)\n return soft_max_1d.transpose(axis, 0)\n\n\ndef binary_concrete(logits, tau=1, hard=False, eps=1e-10):\n y_soft = binary_concrete_sample(logits, tau=tau, eps=eps)\n if hard:\n y_hard = (y_soft > 0.5).float()\n y = Variable(y_hard.data - y_soft.data) + y_soft\n else:\n y = y_soft\n return y\n\n\ndef binary_concrete_sample(logits, tau=1, eps=1e-10):\n logistic_noise = sample_logistic(logits.size(), eps=eps)\n if logits.is_cuda:\n logistic_noise = logistic_noise.cuda()\n y = logits + Variable(logistic_noise)\n return F.sigmoid(y / tau)\n\n\ndef sample_logistic(shape, eps=1e-10):\n uniform = torch.rand(shape).float()\n return torch.log(uniform + eps) - torch.log(1 - uniform + eps)\n\n\ndef sample_gumbel(shape, eps=1e-10):\n \"\"\"\n NOTE: Stolen from https://github.com/pytorch/pytorch/pull/3341/commits/327fcfed4c44c62b208f750058d14d4dc1b9a9d3\n\n Sample from Gumbel(0, 1)\n\n based on\n https://github.com/ericjang/gumbel-softmax/blob/3c8584924603869e90ca74ac20a6a03d99a91ef9/Categorical%20VAE.ipynb ,\n (MIT license)\n \"\"\"\n U = torch.rand(shape).float()\n return - torch.log(eps - torch.log(U + eps))\n\n\ndef gumbel_softmax_sample(logits, tau=1, eps=1e-10):\n \"\"\"\n NOTE: Stolen from https://github.com/pytorch/pytorch/pull/3341/commits/327fcfed4c44c62b208f750058d14d4dc1b9a9d3\n\n Draw a sample from the Gumbel-Softmax distribution\n\n based on\n https://github.com/ericjang/gumbel-softmax/blob/3c8584924603869e90ca74ac20a6a03d99a91ef9/Categorical%20VAE.ipynb\n (MIT license)\n \"\"\"\n gumbel_noise = sample_gumbel(logits.size(), eps=eps)\n if logits.is_cuda:\n gumbel_noise = gumbel_noise.cuda()\n y = logits + Variable(gumbel_noise)\n return my_softmax(y / tau, axis=-1)\n\n\ndef gumbel_softmax(logits, tau=1, hard=False, eps=1e-10):\n \"\"\"\n NOTE: Stolen from https://github.com/pytorch/pytorch/pull/3341/commits/327fcfed4c44c62b208f750058d14d4dc1b9a9d3\n\n Sample from the Gumbel-Softmax distribution and optionally discretize.\n Args:\n logits: [batch_size, n_class] unnormalized log-probs\n tau: non-negative scalar temperature\n hard: if True, take argmax, but differentiate w.r.t. soft sample y\n Returns:\n [batch_size, n_class] sample from the Gumbel-Softmax distribution.\n If hard=True, then the returned sample will be one-hot, otherwise it will\n be a probability distribution that sums to 1 across classes\n\n Constraints:\n - this implementation only works on batch_size x num_features tensor for now\n\n based on\n https://github.com/ericjang/gumbel-softmax/blob/3c8584924603869e90ca74ac20a6a03d99a91ef9/Categorical%20VAE.ipynb ,\n (MIT license)\n \"\"\"\n y_soft = gumbel_softmax_sample(logits, tau=tau, eps=eps)\n if hard:\n shape = logits.size()\n _, k = y_soft.data.max(-1)\n # this bit is based on\n # https://discuss.pytorch.org/t/stop-gradients-for-st-gumbel-softmax/530/5\n y_hard = torch.zeros(*shape)\n if y_soft.is_cuda:\n y_hard = y_hard.cuda()\n y_hard = y_hard.zero_().scatter_(-1, k.view(shape[:-1] + (1,)), 1.0)\n # this cool bit of code achieves two things:\n # - makes the output value exactly one-hot (since we add then\n # subtract y_soft value)\n # - makes the gradient equal to y_soft gradient (since we strip\n # all other gradients)\n y = Variable(y_hard - y_soft.data) + y_soft\n else:\n y = y_soft\n return y\n\n\ndef binary_accuracy(output, labels):\n preds = output > 0.5\n correct = preds.type_as(labels).eq(labels).double()\n correct = correct.sum()\n return correct / len(labels)\n\n\ndef load_data(batch_size=1, suffix=''):\n loc_train = np.load('data/loc_train' + suffix + '.npy')\n vel_train = np.load('data/vel_train' + suffix + '.npy')\n edges_train = np.load('data/edges_train' + suffix + '.npy')\n\n loc_valid = np.load('data/loc_valid' + suffix + '.npy')\n vel_valid = np.load('data/vel_valid' + suffix + '.npy')\n edges_valid = np.load('data/edges_valid' + suffix + '.npy')\n\n loc_test = np.load('data/loc_test' + suffix + '.npy')\n vel_test = np.load('data/vel_test' + suffix + '.npy')\n edges_test = np.load('data/edges_test' + suffix + '.npy')\n\n # [num_samples, num_timesteps, num_dims, num_atoms]\n num_atoms = loc_train.shape[3] # 质点的数量\n\n loc_max = loc_train.max()\n loc_min = loc_train.min()\n vel_max = vel_train.max()\n vel_min = vel_train.min()\n\n # Normalize to [-1, 1]\n loc_train = (loc_train - loc_min) * 2 / (loc_max - loc_min) - 1\n vel_train = (vel_train - vel_min) * 2 / (vel_max - vel_min) - 1\n\n loc_valid = (loc_valid - loc_min) * 2 / (loc_max - loc_min) - 1\n vel_valid = (vel_valid - vel_min) * 2 / (vel_max - vel_min) - 1\n\n loc_test = (loc_test - loc_min) * 2 / (loc_max - loc_min) - 1\n vel_test = (vel_test - vel_min) * 2 / (vel_max - vel_min) - 1\n\n # Reshape to: [num_sims, num_atoms, num_timesteps, num_dims], e.g. [50000, 5, 49, 2]\n loc_train = np.transpose(loc_train, [0, 3, 1, 2])\n vel_train = np.transpose(vel_train, [0, 3, 1, 2])\n feat_train = np.concatenate([loc_train, vel_train], axis=3) # [50000, 5, 49, 4]\n edges_train = np.reshape(edges_train, [-1, num_atoms ** 2]) # [50000, 25]\n edges_train = np.array((edges_train + 1) / 2, dtype=np.int64) # float -> long \n\n loc_valid = np.transpose(loc_valid, [0, 3, 1, 2])\n vel_valid = np.transpose(vel_valid, [0, 3, 1, 2])\n feat_valid = np.concatenate([loc_valid, vel_valid], axis=3)\n edges_valid = np.reshape(edges_valid, [-1, num_atoms ** 2])\n edges_valid = np.array((edges_valid + 1) / 2, dtype=np.int64)\n\n loc_test = np.transpose(loc_test, [0, 3, 1, 2])\n vel_test = np.transpose(vel_test, [0, 3, 1, 2])\n feat_test = np.concatenate([loc_test, vel_test], axis=3)\n edges_test = np.reshape(edges_test, [-1, num_atoms ** 2])\n edges_test = np.array((edges_test + 1) / 2, dtype=np.int64)\n\n feat_train = torch.FloatTensor(feat_train) # feature就是location和velocity向量concat\n edges_train = torch.LongTensor(edges_train)\n feat_valid = torch.FloatTensor(feat_valid)\n edges_valid = torch.LongTensor(edges_valid)\n feat_test = torch.FloatTensor(feat_test)\n edges_test = torch.LongTensor(edges_test)\n\n # Exclude self edges\n off_diag_idx = np.ravel_multi_index(\n np.where(np.ones((num_atoms, num_atoms)) - np.eye(num_atoms)), # 对角线0元素,其余为1,np.where输出非零元素坐标\n [num_atoms, num_atoms]) # 把对角线元素的index去掉,返回剩下的index\n edges_train = edges_train[:, off_diag_idx] # 将edge邻接矩阵中所有的对角线元素都去掉\n edges_valid = edges_valid[:, off_diag_idx]\n edges_test = edges_test[:, off_diag_idx]\n\n train_data = TensorDataset(feat_train, edges_train)\n valid_data = TensorDataset(feat_valid, edges_valid)\n test_data = TensorDataset(feat_test, edges_test)\n\n train_data_loader = DataLoader(train_data, batch_size=batch_size)\n valid_data_loader = DataLoader(valid_data, batch_size=batch_size)\n test_data_loader = DataLoader(test_data, batch_size=batch_size)\n\n return train_data_loader, valid_data_loader, test_data_loader, loc_max, loc_min, vel_max, vel_min\n\n\ndef load_kuramoto_data(batch_size=1, suffix=''):\n feat_train = np.load('data/feat_train' + suffix + '.npy')\n edges_train = np.load('data/edges_train' + suffix + '.npy')\n feat_valid = np.load('data/feat_valid' + suffix + '.npy')\n edges_valid = np.load('data/edges_valid' + suffix + '.npy')\n feat_test = np.load('data/feat_test' + suffix + '.npy')\n edges_test = np.load('data/edges_test' + suffix + '.npy')\n\n # [num_sims, num_atoms, num_timesteps, num_dims]\n num_atoms = feat_train.shape[1]\n\n # Normalize each feature dim. individually\n feat_max = feat_train.max(0).max(0).max(0)\n feat_min = feat_train.min(0).min(0).min(0)\n\n feat_max = np.expand_dims(np.expand_dims(np.expand_dims(feat_max, 0), 0), 0)\n feat_min = np.expand_dims(np.expand_dims(np.expand_dims(feat_min, 0), 0), 0)\n\n # Normalize to [-1, 1]\n feat_train = (feat_train - feat_min) * 2 / (feat_max - feat_min) - 1\n feat_valid = (feat_valid - feat_min) * 2 / (feat_max - feat_min) - 1\n feat_test = (feat_test - feat_min) * 2 / (feat_max - feat_min) - 1\n\n # Reshape to: [num_sims, num_atoms, num_timesteps, num_dims]\n edges_train = np.reshape(edges_train, [-1, num_atoms ** 2])\n edges_valid = np.reshape(edges_valid, [-1, num_atoms ** 2])\n edges_test = np.reshape(edges_test, [-1, num_atoms ** 2])\n\n feat_train = torch.FloatTensor(feat_train)\n edges_train = torch.LongTensor(edges_train)\n feat_valid = torch.FloatTensor(feat_valid)\n edges_valid = torch.LongTensor(edges_valid)\n feat_test = torch.FloatTensor(feat_test)\n edges_test = torch.LongTensor(edges_test)\n\n # Exclude self edges\n off_diag_idx = np.ravel_multi_index(\n np.where(np.ones((num_atoms, num_atoms)) - np.eye(num_atoms)),\n [num_atoms, num_atoms])\n edges_train = edges_train[:, off_diag_idx]\n edges_valid = edges_valid[:, off_diag_idx]\n edges_test = edges_test[:, off_diag_idx]\n\n train_data = TensorDataset(feat_train, edges_train)\n valid_data = TensorDataset(feat_valid, edges_valid)\n test_data = TensorDataset(feat_test, edges_test)\n\n train_data_loader = DataLoader(train_data, batch_size=batch_size)\n valid_data_loader = DataLoader(valid_data, batch_size=batch_size)\n test_data_loader = DataLoader(test_data, batch_size=batch_size)\n\n return train_data_loader, valid_data_loader, test_data_loader\n\n\ndef load_kuramoto_data_old(batch_size=1, suffix=''):\n feat_train = np.load('data/old_kuramoto/feat_train' + suffix + '.npy')\n edges_train = np.load('data/old_kuramoto/edges_train' + suffix + '.npy')\n feat_valid = np.load('data/old_kuramoto/feat_valid' + suffix + '.npy')\n edges_valid = np.load('data/old_kuramoto/edges_valid' + suffix + '.npy')\n feat_test = np.load('data/old_kuramoto/feat_test' + suffix + '.npy')\n edges_test = np.load('data/old_kuramoto/edges_test' + suffix + '.npy')\n\n # [num_sims, num_atoms, num_timesteps, num_dims]\n num_atoms = feat_train.shape[1]\n\n # Reshape to: [num_sims, num_atoms, num_timesteps, num_dims]\n edges_train = np.reshape(edges_train, [-1, num_atoms ** 2])\n edges_valid = np.reshape(edges_valid, [-1, num_atoms ** 2])\n edges_test = np.reshape(edges_test, [-1, num_atoms ** 2])\n\n feat_train = torch.FloatTensor(feat_train)\n edges_train = torch.LongTensor(edges_train)\n feat_valid = torch.FloatTensor(feat_valid)\n edges_valid = torch.LongTensor(edges_valid)\n feat_test = torch.FloatTensor(feat_test)\n edges_test = torch.LongTensor(edges_test)\n\n # Exclude self edges\n off_diag_idx = np.ravel_multi_index(\n np.where(np.ones((num_atoms, num_atoms)) - np.eye(num_atoms)),\n [num_atoms, num_atoms])\n edges_train = edges_train[:, off_diag_idx]\n edges_valid = edges_valid[:, off_diag_idx]\n edges_test = edges_test[:, off_diag_idx]\n\n train_data = TensorDataset(feat_train, edges_train)\n valid_data = TensorDataset(feat_valid, edges_valid)\n test_data = TensorDataset(feat_test, edges_test)\n\n train_data_loader = DataLoader(train_data, batch_size=batch_size)\n valid_data_loader = DataLoader(valid_data, batch_size=batch_size)\n test_data_loader = DataLoader(test_data, batch_size=batch_size)\n\n return train_data_loader, valid_data_loader, test_data_loader\n\n\ndef load_motion_data(batch_size=1, suffix=''):\n feat_train = np.load('data/motion_train' + suffix + '.npy')\n feat_valid = np.load('data/motion_valid' + suffix + '.npy')\n feat_test = np.load('data/motion_test' + suffix + '.npy')\n adj = np.load('data/motion_adj' + suffix + '.npy')\n\n # NOTE: Already normalized\n\n # [num_samples, num_nodes, num_timesteps, num_dims]\n num_nodes = feat_train.shape[1]\n\n edges_train = np.repeat(np.expand_dims(adj.flatten(), 0),\n feat_train.shape[0], axis=0)\n edges_valid = np.repeat(np.expand_dims(adj.flatten(), 0),\n feat_valid.shape[0], axis=0)\n edges_test = np.repeat(np.expand_dims(adj.flatten(), 0),\n feat_test.shape[0], axis=0)\n\n feat_train = torch.FloatTensor(feat_train)\n edges_train = torch.LongTensor(np.array(edges_train, dtype=np.int64))\n feat_valid = torch.FloatTensor(feat_valid)\n edges_valid = torch.LongTensor(np.array(edges_valid, dtype=np.int64))\n feat_test = torch.FloatTensor(feat_test)\n edges_test = torch.LongTensor(np.array(edges_test, dtype=np.int64))\n\n # Exclude self edges\n off_diag_idx = np.ravel_multi_index(\n np.where(np.ones((num_nodes, num_nodes)) - np.eye(num_nodes)),\n [num_nodes, num_nodes])\n edges_train = edges_train[:, off_diag_idx]\n edges_valid = edges_valid[:, off_diag_idx]\n edges_test = edges_test[:, off_diag_idx]\n\n train_data = TensorDataset(feat_train, edges_train)\n valid_data = TensorDataset(feat_valid, edges_valid)\n test_data = TensorDataset(feat_test, edges_test)\n\n train_data_loader = DataLoader(train_data, batch_size=batch_size)\n valid_data_loader = DataLoader(valid_data, batch_size=batch_size)\n test_data_loader = DataLoader(test_data, batch_size=batch_size)\n\n return train_data_loader, valid_data_loader, test_data_loader\n\n\ndef to_2d_idx(idx, num_cols):\n idx = np.array(idx, dtype=np.int64)\n y_idx = np.array(np.floor(idx / float(num_cols)), dtype=np.int64)\n x_idx = idx % num_cols\n return x_idx, y_idx\n\n\ndef encode_onehot(labels):\n classes = set(labels) # {0, 1, 2, 3, 4}\n classes_dict = {c: np.identity(len(classes))[i, :] for i, c in enumerate(classes)} \n # {0: array([1., 0., 0., 0., 0.]), 1: array([0., 1., 0., 0., 0.]), 2: array([0., 0., 1., 0., 0.]), 3: array([0., 0., 0., 1., 0.]), 4: array([0., 0., 0., 0., 1.])}\n # print('class:',classes_dict)\n labels_onehot = np.array(list(map(classes_dict.get, labels)), dtype=np.int32)\n return labels_onehot\n\n\ndef get_triu_indices(num_nodes):\n \"\"\"Linear triu (upper triangular) indices.\"\"\"\n ones = torch.ones(num_nodes, num_nodes)\n eye = torch.eye(num_nodes, num_nodes)\n triu_indices = (ones.triu() - eye).nonzero().t()\n triu_indices = triu_indices[0] * num_nodes + triu_indices[1]\n return triu_indices\n\n\ndef get_tril_indices(num_nodes):\n \"\"\"Linear tril (lower triangular) indices.\"\"\"\n ones = torch.ones(num_nodes, num_nodes)\n eye = torch.eye(num_nodes, num_nodes)\n tril_indices = (ones.tril() - eye).nonzero().t()\n tril_indices = tril_indices[0] * num_nodes + tril_indices[1]\n return tril_indices\n\n\ndef get_offdiag_indices(num_nodes):\n \"\"\"Linear off-diagonal indices.\"\"\"\n ones = torch.ones(num_nodes, num_nodes)\n eye = torch.eye(num_nodes, num_nodes)\n offdiag_indices = (ones - eye).nonzero().t()\n offdiag_indices = offdiag_indices[0] * num_nodes + offdiag_indices[1]\n return offdiag_indices\n\n\ndef get_triu_offdiag_indices(num_nodes):\n \"\"\"Linear triu (upper) indices w.r.t. vector of off-diagonal elements.\"\"\"\n triu_idx = torch.zeros(num_nodes * num_nodes)\n triu_idx[get_triu_indices(num_nodes)] = 1.\n triu_idx = triu_idx[get_offdiag_indices(num_nodes)]\n return triu_idx.nonzero()\n\n\ndef get_tril_offdiag_indices(num_nodes):\n \"\"\"Linear tril (lower) indices w.r.t. vector of off-diagonal elements.\"\"\"\n tril_idx = torch.zeros(num_nodes * num_nodes)\n tril_idx[get_tril_indices(num_nodes)] = 1.\n tril_idx = tril_idx[get_offdiag_indices(num_nodes)]\n return tril_idx.nonzero()\n\n\ndef get_minimum_distance(data):\n data = data[:, :, :, :2].transpose(1, 2)\n data_norm = (data ** 2).sum(-1, keepdim=True)\n dist = data_norm + \\\n data_norm.transpose(2, 3) - \\\n 2 * torch.matmul(data, data.transpose(2, 3))\n min_dist, _ = dist.min(1)\n return min_dist.view(min_dist.size(0), -1)\n\n\ndef get_buckets(dist, num_buckets):\n dist = dist.cpu().data.numpy()\n\n min_dist = np.min(dist)\n max_dist = np.max(dist)\n bucket_size = (max_dist - min_dist) / num_buckets\n thresholds = bucket_size * np.arange(num_buckets)\n\n bucket_idx = []\n for i in range(num_buckets):\n if i < num_buckets - 1:\n idx = np.where(np.all(np.vstack((dist > thresholds[i],\n dist <= thresholds[i + 1])), 0))[0]\n else:\n idx = np.where(dist > thresholds[i])[0]\n bucket_idx.append(idx)\n\n return bucket_idx, thresholds\n\n\ndef get_correct_per_bucket(bucket_idx, pred, target):\n pred = pred.cpu().numpy()[:, 0]\n target = target.cpu().data.numpy()\n\n correct_per_bucket = []\n for i in range(len(bucket_idx)):\n preds_bucket = pred[bucket_idx[i]]\n target_bucket = target[bucket_idx[i]]\n correct_bucket = np.sum(preds_bucket == target_bucket)\n correct_per_bucket.append(correct_bucket)\n\n return correct_per_bucket\n\n\ndef get_correct_per_bucket_(bucket_idx, pred, target):\n pred = pred.cpu().numpy()\n target = target.cpu().data.numpy()\n\n correct_per_bucket = []\n for i in range(len(bucket_idx)):\n preds_bucket = pred[bucket_idx[i]]\n target_bucket = target[bucket_idx[i]]\n correct_bucket = np.sum(preds_bucket == target_bucket)\n correct_per_bucket.append(correct_bucket)\n\n return correct_per_bucket\n\n\ndef kl_categorical(preds, log_prior, num_atoms, eps=1e-16):\n kl_div = preds * (torch.log(preds + eps) - log_prior)\n return kl_div.sum() / (num_atoms * preds.size(0))\n\n\ndef kl_categorical_uniform(preds, num_atoms, num_edge_types, add_const=False,\n eps=1e-16):\n kl_div = preds * torch.log(preds + eps)\n if add_const:\n const = np.log(num_edge_types)\n kl_div += const\n return kl_div.sum() / (num_atoms * preds.size(0))\n\n\ndef nll_gaussian(preds, target, variance, add_const=False):\n neg_log_p = ((preds - target) ** 2 / (2 * variance))\n if add_const:\n const = 0.5 * np.log(2 * np.pi * variance)\n neg_log_p += const\n return neg_log_p.sum() / (target.size(0) * target.size(1))\n\n\ndef edge_accuracy(preds, target):\n _, preds = preds.max(-1)\n correct = preds.float().data.eq(\n target.float().data.view_as(preds)).cpu().sum()\n return np.float(correct) / (target.size(0) * target.size(1))\n\nif __name__==\"__main__\":\n triu_indices = get_triu_offdiag_indices(5)\n print(triu_indices)\n"
] | [
[
"torch.nn.functional.softmax",
"numpy.expand_dims",
"torch.zeros",
"numpy.vstack",
"torch.utils.data.DataLoader",
"numpy.concatenate",
"numpy.max",
"torch.FloatTensor",
"torch.utils.data.dataset.TensorDataset",
"numpy.where",
"torch.autograd.Variable",
"torch.ones",
"numpy.reshape",
"numpy.arange",
"numpy.eye",
"torch.eye",
"torch.nn.functional.sigmoid",
"torch.rand",
"numpy.load",
"torch.LongTensor",
"numpy.log",
"numpy.min",
"torch.log",
"numpy.transpose",
"numpy.array",
"numpy.sum",
"numpy.ones",
"numpy.float"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
craymichael/tensorflow | [
"b5de565c9c57fa7ca02d42bcfe6f470ecf117ba5"
] | [
"tensorflow/python/compat/compat.py"
] | [
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Utilities for API compatibility between TensorFlow release versions.\n\nSee [Version\nCompatibility](https://tensorflow.org/guide/version_compat#backward_forward)\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport datetime\nimport os\n\nfrom tensorflow.python.platform import tf_logging as logging\nfrom tensorflow.python.util import tf_contextlib\nfrom tensorflow.python.util.tf_export import tf_export\n\n\n# This value changes every day with an automatic CL. It can be modified in code\n# via `forward_compatibility_horizon()` or with the environment variable\n# TF_FORWARD_COMPATIBILITY_DELTA_DAYS, which is added to the compatibility date.\n_FORWARD_COMPATIBILITY_HORIZON = datetime.date(2021, 6, 17)\n_FORWARD_COMPATIBILITY_DELTA_DAYS_VAR_NAME = \"TF_FORWARD_COMPATIBILITY_DELTA_DAYS\"\n_FORWARD_COMPATIBILITY_DATE_NUMBER = None\n\n\ndef _date_to_date_number(year, month, day):\n return (year << 9) | (month << 5) | day\n\n\ndef _update_forward_compatibility_date_number(date_to_override=None):\n \"\"\"Update the base date to compare in forward_compatible function.\"\"\"\n\n global _FORWARD_COMPATIBILITY_DATE_NUMBER\n\n if date_to_override:\n date = date_to_override\n else:\n date = _FORWARD_COMPATIBILITY_HORIZON\n delta_days = os.getenv(_FORWARD_COMPATIBILITY_DELTA_DAYS_VAR_NAME)\n if delta_days:\n date += datetime.timedelta(days=int(delta_days))\n\n if date < _FORWARD_COMPATIBILITY_HORIZON:\n logging.warning(\"Trying to set the forward compatibility date to the past\"\n \" date %s. This will be ignored by TensorFlow.\" % (date))\n return\n _FORWARD_COMPATIBILITY_DATE_NUMBER = _date_to_date_number(\n date.year, date.month, date.day)\n\n\n_update_forward_compatibility_date_number()\n\n\n@tf_export(\"compat.forward_compatible\")\ndef forward_compatible(year, month, day):\n \"\"\"Return true if the forward compatibility window has expired.\n\n See [Version\n compatibility](https://tensorflow.org/guide/version_compat#backward_forward).\n\n Forward-compatibility refers to scenarios where the producer of a TensorFlow\n model (a GraphDef or SavedModel) is compiled against a version of the\n TensorFlow library newer than what the consumer was compiled against. The\n \"producer\" is typically a Python program that constructs and trains a model\n while the \"consumer\" is typically another program that loads and serves the\n model.\n\n TensorFlow has been supporting a 3 week forward-compatibility window for\n programs compiled from source at HEAD.\n\n For example, consider the case where a new operation `MyNewAwesomeAdd` is\n created with the intent of replacing the implementation of an existing Python\n wrapper - `tf.add`. The Python wrapper implementation should change from\n something like:\n\n ```python\n def add(inputs, name=None):\n return gen_math_ops.add(inputs, name)\n ```\n\n to:\n\n ```python\n from tensorflow.python.compat import compat\n\n def add(inputs, name=None):\n if compat.forward_compatible(year, month, day):\n # Can use the awesome new implementation.\n return gen_math_ops.my_new_awesome_add(inputs, name)\n # To maintain forward compatibility, use the old implementation.\n return gen_math_ops.add(inputs, name)\n ```\n\n Where `year`, `month`, and `day` specify the date beyond which binaries\n that consume a model are expected to have been updated to include the\n new operations. This date is typically at least 3 weeks beyond the date\n the code that adds the new operation is committed.\n\n Args:\n year: A year (e.g., 2018). Must be an `int`.\n month: A month (1 <= month <= 12) in year. Must be an `int`.\n day: A day (1 <= day <= 31, or 30, or 29, or 28) in month. Must be an\n `int`.\n\n Returns:\n True if the caller can expect that serialized TensorFlow graphs produced\n can be consumed by programs that are compiled with the TensorFlow library\n source code after (year, month, day).\n \"\"\"\n return _FORWARD_COMPATIBILITY_DATE_NUMBER > _date_to_date_number(\n year, month, day)\n\n\n@tf_export(\"compat.forward_compatibility_horizon\")\n@tf_contextlib.contextmanager\ndef forward_compatibility_horizon(year, month, day):\n \"\"\"Context manager for testing forward compatibility of generated graphs.\n\n See [Version\n compatibility](https://tensorflow.org/guide/version_compat#backward_forward).\n\n To ensure forward compatibility of generated graphs (see `forward_compatible`)\n with older binaries, new features can be gated with:\n\n ```python\n if compat.forward_compatible(year=2018, month=08, date=01):\n generate_graph_with_new_features()\n else:\n generate_graph_so_older_binaries_can_consume_it()\n ```\n\n However, when adding new features, one may want to unittest it before\n the forward compatibility window expires. This context manager enables\n such tests. For example:\n\n ```python\n from tensorflow.python.compat import compat\n\n def testMyNewFeature(self):\n with compat.forward_compatibility_horizon(2018, 08, 02):\n # Test that generate_graph_with_new_features() has an effect\n ```\n\n Args:\n year: A year (e.g., 2018). Must be an `int`.\n month: A month (1 <= month <= 12) in year. Must be an `int`.\n day: A day (1 <= day <= 31, or 30, or 29, or 28) in month. Must be an\n `int`.\n\n Yields:\n Nothing.\n \"\"\"\n try:\n _update_forward_compatibility_date_number(datetime.date(year, month, day))\n yield\n finally:\n _update_forward_compatibility_date_number()\n"
] | [
[
"tensorflow.python.platform.tf_logging.warning",
"tensorflow.python.util.tf_export.tf_export"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.5",
"1.7",
"1.4"
]
}
] |
atranitell/TensorGate | [
"855ae0c69a706c179c26ba4a75a8067a514285fe",
"855ae0c69a706c179c26ba4a75a8067a514285fe"
] | [
"utils/device.py",
"samples/kinface/kinface_1E1G1D.py"
] | [
"# Copyright 2017 The KaiJIN Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"performance tools\"\"\"\n\nfrom tensorflow.python.client import device_lib\n\n\ndef showing_avaliable_device():\n \"\"\"Showing the available device.\"\"\"\n for x in device_lib.list_local_devices():\n print(x)\n",
"# Copyright 2017 The KaiJIN Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"FOR KINFACE\"\"\"\n\nimport tensorflow as tf\nfrom gate import context\nfrom gate.data.data_factory import load_data\nfrom gate.solver import updater\nfrom gate.utils import variable\nfrom samples.kinface import kinbase\n\n\nclass KINFACE_1E1G1D(kinbase.KINBASE):\n\n def __init__(self, config):\n kinbase.KINBASE.__init__(self, config)\n\n def _net(self, c1_real, p2_real):\n c1_mu, c1_sigma, feat_c1 = self._encoder(c1_real)\n p2_mu, p2_sigma, feat_p2 = self._encoder(p2_real, True)\n return feat_c1, feat_p2\n\n @context.graph_phase_wrapper()\n def train(self):\n # load data\n data, info, path = load_data(self.config)\n c1_real, p1_real, c2_real, p2_real = tf.unstack(data, axis=1)\n label, cond = tf.unstack(info, axis=1)\n\n # load net\n c1_mu, c1_sigma, feat_c1 = self._encoder(c1_real)\n p2_mu, p2_sigma, feat_p2 = self._encoder(p2_real, True)\n\n # children to parent\n c1_z = c1_mu + c1_sigma * tf.random_normal(tf.shape(c1_mu))\n c1_z = self._generator(c1_z, cond)\n c1_fake = tf.clip_by_value(c1_z, 1e-8, 1 - 1e-8)\n\n # parent to children\n p2_z = p2_mu + p2_sigma * tf.random_normal(tf.shape(p2_mu))\n p2_z = self._generator(p2_z, cond, True)\n p2_fake = tf.clip_by_value(p2_z, 1e-8, 1 - 1e-8)\n\n # discriminator\n D_c1_fake = self._discriminator(c1_fake, cond)\n D_p1_real = self._discriminator(p1_real, cond, reuse=True)\n D_p2_fake = self._discriminator(p2_fake, cond, reuse=True)\n D_c2_real = self._discriminator(c2_real, cond, reuse=True)\n\n # loss for genertor\n E1_loss = self._loss_vae(p1_real, c1_fake, c1_mu, c1_sigma)\n E2_loss = self._loss_vae(c2_real, p2_fake, p2_mu, p2_sigma)\n E_loss = E1_loss + E2_loss\n\n # loss for discriminator\n D1_loss, G1_loss = self._loss_gan(D_c1_fake, D_p1_real)\n D2_loss, G2_loss = self._loss_gan(D_p2_fake, D_c2_real)\n D_loss = D1_loss + D2_loss\n G_loss = G1_loss + G2_loss\n\n loss = E_loss + D_loss + G_loss\n\n # update gradients\n global_step = tf.train.create_global_step()\n\n var_e = variable.select_vars('encoder')\n var_g = variable.select_vars('generator')\n var_d = variable.select_vars('discriminator')\n\n op1 = updater.default(self.config, loss, global_step, var_e, 0)\n op2 = updater.default(self.config, loss, None, var_g, 1)\n op3 = updater.default(self.config, loss, None, var_d, 0)\n train_op = tf.group(op1, op2, op3)\n\n # add hooks\n self.add_hook(self.snapshot.init())\n self.add_hook(self.summary.init())\n self.add_hook(context.Running_Hook(\n config=self.config.log,\n step=global_step,\n keys=['E', 'D', 'G'],\n values=[E_loss, D_loss, G_loss],\n func_test=self.test,\n func_val=None))\n\n saver = tf.train.Saver(var_list=variable.all())\n with context.DefaultSession(self.hooks) as sess:\n self.snapshot.restore(sess, saver)\n while not sess.should_stop():\n sess.run(train_op)"
] | [
[
"tensorflow.python.client.device_lib.list_local_devices"
],
[
"tensorflow.clip_by_value",
"tensorflow.unstack",
"tensorflow.shape",
"tensorflow.train.create_global_step",
"tensorflow.group"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"1.12",
"2.6",
"2.2",
"1.13",
"2.3",
"2.4",
"1.4",
"2.9",
"1.5",
"1.7",
"2.5",
"0.12",
"1.0",
"2.8",
"1.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
aburousan/manim | [
"c11b649e9aed34976844e6a131fb12e2a30c7bc8",
"c11b649e9aed34976844e6a131fb12e2a30c7bc8"
] | [
"manim/mobject/opengl_geometry.py",
"manim/animation/transform_matching_parts.py"
] | [
"import numpy as np\n\nfrom .. import logger\nfrom ..constants import *\nfrom ..mobject.mobject import Mobject\nfrom ..mobject.types.opengl_vectorized_mobject import (\n OpenGLDashedVMobject,\n OpenGLVGroup,\n OpenGLVMobject,\n)\nfrom ..utils.color import *\nfrom ..utils.deprecation import deprecated_params\nfrom ..utils.iterables import adjacent_n_tuples, adjacent_pairs\nfrom ..utils.simple_functions import clip, fdiv\nfrom ..utils.space_ops import (\n angle_between_vectors,\n angle_of_vector,\n compass_directions,\n find_intersection,\n normalize,\n rotate_vector,\n rotation_matrix_transpose,\n)\n\nDEFAULT_DOT_RADIUS = 0.08\nDEFAULT_SMALL_DOT_RADIUS = 0.04\nDEFAULT_DASH_LENGTH = 0.05\nDEFAULT_ARROW_TIP_LENGTH = 0.35\nDEFAULT_ARROW_TIP_WIDTH = 0.35\n\n\nclass OpenGLTipableVMobject(OpenGLVMobject):\n \"\"\"\n Meant for shared functionality between Arc and Line.\n Functionality can be classified broadly into these groups:\n\n * Adding, Creating, Modifying tips\n - add_tip calls create_tip, before pushing the new tip\n into the TipableVMobject's list of submobjects\n - stylistic and positional configuration\n\n * Checking for tips\n - Boolean checks for whether the TipableVMobject has a tip\n and a starting tip\n\n * Getters\n - Straightforward accessors, returning information pertaining\n to the TipableVMobject instance's tip(s), its length etc\n \"\"\"\n\n # Adding, Creating, Modifying tips\n\n def __init__(\n self,\n tip_length=DEFAULT_ARROW_TIP_LENGTH,\n normal_vector=OUT,\n tip_config={},\n **kwargs\n ):\n self.tip_length = tip_length\n self.normal_vector = normal_vector\n self.tip_config = tip_config\n OpenGLVMobject.__init__(self, **kwargs)\n\n def add_tip(self, at_start=False, **kwargs):\n \"\"\"\n Adds a tip to the TipableVMobject instance, recognising\n that the endpoints might need to be switched if it's\n a 'starting tip' or not.\n \"\"\"\n tip = self.create_tip(at_start, **kwargs)\n self.reset_endpoints_based_on_tip(tip, at_start)\n self.asign_tip_attr(tip, at_start)\n self.add(tip)\n return self\n\n def create_tip(self, at_start=False, **kwargs):\n \"\"\"\n Stylises the tip, positions it spacially, and returns\n the newly instantiated tip to the caller.\n \"\"\"\n tip = self.get_unpositioned_tip(**kwargs)\n self.position_tip(tip, at_start)\n return tip\n\n def get_unpositioned_tip(self, **kwargs):\n \"\"\"\n Returns a tip that has been stylistically configured,\n but has not yet been given a position in space.\n \"\"\"\n config = {}\n config.update(self.tip_config)\n config.update(kwargs)\n return OpenGLArrowTip(**config)\n\n def position_tip(self, tip, at_start=False):\n # Last two control points, defining both\n # the end, and the tangency direction\n if at_start:\n anchor = self.get_start()\n handle = self.get_first_handle()\n else:\n handle = self.get_last_handle()\n anchor = self.get_end()\n tip.rotate(angle_of_vector(handle - anchor) - PI - tip.get_angle())\n tip.shift(anchor - tip.get_tip_point())\n return tip\n\n def reset_endpoints_based_on_tip(self, tip, at_start):\n if self.get_length() == 0:\n # Zero length, put_start_and_end_on wouldn't\n # work\n return self\n\n if at_start:\n start = tip.get_base()\n end = self.get_end()\n else:\n start = self.get_start()\n end = tip.get_base()\n self.put_start_and_end_on(start, end)\n return self\n\n def asign_tip_attr(self, tip, at_start):\n if at_start:\n self.start_tip = tip\n else:\n self.tip = tip\n return self\n\n # Checking for tips\n def has_tip(self):\n return hasattr(self, \"tip\") and self.tip in self\n\n def has_start_tip(self):\n return hasattr(self, \"start_tip\") and self.start_tip in self\n\n # Getters\n def pop_tips(self):\n start, end = self.get_start_and_end()\n result = OpenGLVGroup()\n if self.has_tip():\n result.add(self.tip)\n self.remove(self.tip)\n if self.has_start_tip():\n result.add(self.start_tip)\n self.remove(self.start_tip)\n self.put_start_and_end_on(start, end)\n return result\n\n def get_tips(self):\n \"\"\"\n Returns a VGroup (collection of VMobjects) containing\n the TipableVMObject instance's tips.\n \"\"\"\n result = OpenGLVGroup()\n if hasattr(self, \"tip\"):\n result.add(self.tip)\n if hasattr(self, \"start_tip\"):\n result.add(self.start_tip)\n return result\n\n def get_tip(self):\n \"\"\"Returns the TipableVMobject instance's (first) tip,\n otherwise throws an exception.\"\"\"\n tips = self.get_tips()\n if len(tips) == 0:\n raise Exception(\"tip not found\")\n else:\n return tips[0]\n\n def get_default_tip_length(self):\n return self.tip_length\n\n def get_first_handle(self):\n return self.points[1]\n\n def get_last_handle(self):\n return self.points[-2]\n\n def get_end(self):\n if self.has_tip():\n return self.tip.get_start()\n else:\n return OpenGLVMobject.get_end(self)\n\n def get_start(self):\n if self.has_start_tip():\n return self.start_tip.get_start()\n else:\n return OpenGLVMobject.get_start(self)\n\n def get_length(self):\n start, end = self.get_start_and_end()\n return np.linalg.norm(start - end)\n\n\nclass OpenGLArc(OpenGLTipableVMobject):\n def __init__(\n self,\n start_angle=0,\n angle=TAU / 4,\n radius=1.0,\n n_components=8,\n arc_center=ORIGIN,\n **kwargs\n ):\n self.start_angle = start_angle\n self.angle = angle\n self.radius = radius\n self.n_components = n_components\n self.arc_center = arc_center\n super().__init__(self, **kwargs)\n self.orientation = -1\n\n def init_points(self):\n self.set_points(\n OpenGLArc.create_quadratic_bezier_points(\n angle=self.angle,\n start_angle=self.start_angle,\n n_components=self.n_components,\n )\n )\n # To maintain proper orientation for fill shaders.\n self.scale(self.radius, about_point=ORIGIN)\n self.shift(self.arc_center)\n\n @staticmethod\n def create_quadratic_bezier_points(angle, start_angle=0, n_components=8):\n samples = np.array(\n [\n [np.cos(a), np.sin(a), 0]\n for a in np.linspace(\n start_angle,\n start_angle + angle,\n 2 * n_components + 1,\n )\n ]\n )\n theta = angle / n_components\n samples[1::2] /= np.cos(theta / 2)\n\n points = np.zeros((3 * n_components, 3))\n points[0::3] = samples[0:-1:2]\n points[1::3] = samples[1::2]\n points[2::3] = samples[2::2]\n return points\n\n def get_arc_center(self):\n \"\"\"\n Looks at the normals to the first two\n anchors, and finds their intersection points\n \"\"\"\n # First two anchors and handles\n a1, h, a2 = self.points[:3]\n # Tangent vectors\n t1 = h - a1\n t2 = h - a2\n # Normals\n n1 = rotate_vector(t1, TAU / 4)\n n2 = rotate_vector(t2, TAU / 4)\n return find_intersection(a1, n1, a2, n2)\n\n def get_start_angle(self):\n angle = angle_of_vector(self.get_start() - self.get_arc_center())\n return angle % TAU\n\n def get_stop_angle(self):\n angle = angle_of_vector(self.get_end() - self.get_arc_center())\n return angle % TAU\n\n def move_arc_center_to(self, point):\n self.shift(point - self.get_arc_center())\n return self\n\n\nclass OpenGLArcBetweenPoints(OpenGLArc):\n def __init__(self, start, end, angle=TAU / 4, **kwargs):\n super().__init__(angle=angle, **kwargs)\n if angle == 0:\n self.set_points_as_corners([LEFT, RIGHT])\n self.put_start_and_end_on(start, end)\n\n\nclass OpenGLCurvedArrow(OpenGLArcBetweenPoints):\n def __init__(self, start_point, end_point, **kwargs):\n OpenGLArcBetweenPoints.__init__(self, start_point, end_point, **kwargs)\n self.add_tip()\n\n\nclass OpenGLCurvedDoubleArrow(OpenGLCurvedArrow):\n def __init__(self, start_point, end_point, **kwargs):\n OpenGLCurvedArrow.__init__(self, start_point, end_point, **kwargs)\n self.add_tip(at_start=True)\n\n\nclass OpenGLCircle(OpenGLArc):\n def __init__(self, color=RED, **kwargs):\n OpenGLArc.__init__(self, 0, TAU, color=color, **kwargs)\n\n def surround(self, mobject, dim_to_match=0, stretch=False, buff=MED_SMALL_BUFF):\n # Ignores dim_to_match and stretch; result will always be a circle\n # TODO: Perhaps create an ellipse class to handle singele-dimension stretching\n\n self.replace(mobject, dim_to_match, stretch)\n self.stretch((self.get_width() + 2 * buff) / self.get_width(), 0)\n self.stretch((self.get_height() + 2 * buff) / self.get_height(), 1)\n\n def point_at_angle(self, angle):\n start_angle = self.get_start_angle()\n return self.point_from_proportion((angle - start_angle) / TAU)\n\n\nclass OpenGLDot(OpenGLCircle):\n def __init__(\n self,\n point=ORIGIN,\n radius=DEFAULT_DOT_RADIUS,\n stroke_width=0,\n fill_opacity=1.0,\n color=WHITE,\n **kwargs\n ):\n super().__init__(\n arc_center=point,\n radius=radius,\n stroke_width=stroke_width,\n fill_opacity=fill_opacity,\n color=color,\n **kwargs\n )\n\n\nclass OpenGLEllipse(OpenGLCircle):\n def __init__(self, width=2, height=1, **kwargs):\n super().__init__(**kwargs)\n self.set_width(width, stretch=True)\n self.set_height(height, stretch=True)\n\n\nclass OpenGLAnnularSector(OpenGLArc):\n def __init__(\n self,\n inner_radius=1,\n outer_radius=2,\n angle=TAU / 4,\n start_angle=0,\n fill_opacity=1,\n stroke_width=0,\n color=WHITE,\n **kwargs\n ):\n self.inner_radius = inner_radius\n self.outer_radius = outer_radius\n OpenGLArc.__init__(\n self,\n start_angle=start_angle,\n angle=angle,\n fill_opacity=fill_opacity,\n stroke_width=stroke_width,\n color=color,\n **kwargs\n )\n\n def init_points(self):\n inner_arc, outer_arc = (\n OpenGLArc(\n start_angle=self.start_angle,\n angle=self.angle,\n radius=radius,\n arc_center=self.arc_center,\n )\n for radius in (self.inner_radius, self.outer_radius)\n )\n outer_arc.reverse_points()\n self.append_points(inner_arc.points)\n self.add_line_to(outer_arc.points[0])\n self.append_points(outer_arc.points)\n self.add_line_to(inner_arc.points[0])\n\n\nclass OpenGLSector(OpenGLAnnularSector):\n def __init__(self, outer_radius=1, inner_radius=0, **kwargs):\n OpenGLAnnularSector.__init__(\n self, inner_radius=inner_radius, outer_radius=outer_radius, **kwargs\n )\n\n\nclass OpenGLAnnulus(OpenGLCircle):\n def __init__(\n self,\n inner_radius=1,\n outer_radius=2,\n fill_opacity=1,\n stroke_width=0,\n color=WHITE,\n mark_paths_closed=False,\n **kwargs\n ):\n self.mark_paths_closed = mark_paths_closed # is this even used?\n self.inner_radius = inner_radius\n self.outer_radius = outer_radius\n OpenGLCircle.__init__(\n self,\n fill_opacity=fill_opacity,\n stroke_width=stroke_width,\n color=color,\n **kwargs\n )\n\n def init_points(self):\n self.radius = self.outer_radius\n outer_circle = OpenGLCircle(radius=self.outer_radius)\n inner_circle = OpenGLCircle(radius=self.inner_radius)\n inner_circle.reverse_points()\n self.append_points(outer_circle.points)\n self.append_points(inner_circle.points)\n self.shift(self.arc_center)\n\n\nclass OpenGLLine(OpenGLTipableVMobject):\n def __init__(self, start=LEFT, end=RIGHT, buff=0, path_arc=0, **kwargs):\n self.dim = 3\n self.buff = buff\n self.path_arc = path_arc\n self.set_start_and_end_attrs(start, end)\n super().__init__(**kwargs)\n\n def init_points(self):\n self.set_points_by_ends(self.start, self.end, self.buff, self.path_arc)\n\n def set_points_by_ends(self, start, end, buff=0, path_arc=0):\n if path_arc:\n self.set_points(OpenGLArc.create_quadratic_bezier_points(path_arc))\n self.put_start_and_end_on(start, end)\n else:\n self.set_points_as_corners([start, end])\n self.account_for_buff(self.buff)\n\n def set_path_arc(self, new_value):\n self.path_arc = new_value\n self.init_points()\n\n def account_for_buff(self, buff):\n if buff == 0:\n return\n #\n if self.path_arc == 0:\n length = self.get_length()\n else:\n length = self.get_arc_length()\n #\n if length < 2 * buff:\n return\n buff_prop = buff / length\n self.pointwise_become_partial(self, buff_prop, 1 - buff_prop)\n return self\n\n def set_start_and_end_attrs(self, start, end):\n # If either start or end are Mobjects, this\n # gives their centers\n rough_start = self.pointify(start)\n rough_end = self.pointify(end)\n vect = normalize(rough_end - rough_start)\n # Now that we know the direction between them,\n # we can find the appropriate boundary point from\n # start and end, if they're mobjects\n self.start = self.pointify(start, vect) + self.buff * vect\n self.end = self.pointify(end, -vect) - self.buff * vect\n\n def pointify(self, mob_or_point, direction=None):\n \"\"\"\n Take an argument passed into Line (or subclass) and turn\n it into a 3d point.\n \"\"\"\n if isinstance(mob_or_point, Mobject):\n mob = mob_or_point\n if direction is None:\n return mob.get_center()\n else:\n return mob.get_continuous_bounding_box_point(direction)\n else:\n point = mob_or_point\n result = np.zeros(self.dim)\n result[: len(point)] = point\n return result\n\n def put_start_and_end_on(self, start, end):\n curr_start, curr_end = self.get_start_and_end()\n if (curr_start == curr_end).all():\n self.set_points_by_ends(start, end, self.path_arc)\n return super().put_start_and_end_on(start, end)\n\n def get_vector(self):\n return self.get_end() - self.get_start()\n\n def get_unit_vector(self):\n return normalize(self.get_vector())\n\n def get_angle(self):\n return angle_of_vector(self.get_vector())\n\n def get_projection(self, point):\n \"\"\"\n Return projection of a point onto the line\n \"\"\"\n unit_vect = self.get_unit_vector()\n start = self.get_start()\n return start + np.dot(point - start, unit_vect) * unit_vect\n\n def get_slope(self):\n return np.tan(self.get_angle())\n\n def set_angle(self, angle, about_point=None):\n if about_point is None:\n about_point = self.get_start()\n self.rotate(\n angle - self.get_angle(),\n about_point=about_point,\n )\n return self\n\n def set_length(self, length):\n self.scale(length / self.get_length())\n\n\nclass OpenGLDashedLine(OpenGLLine):\n @deprecated_params(\n params=\"positive_space_ratio dash_spacing\",\n since=\"v0.9.0\",\n message=\"Use dashed_ratio instead of positive_space_ratio.\",\n )\n def __init__(\n self, *args, dash_length=DEFAULT_DASH_LENGTH, dashed_ratio=0.5, **kwargs\n ):\n # Simplify with removal of deprecation warning\n self.dash_spacing = kwargs.pop(\"dash_spacing\", None) # Unused param\n self.dashed_ratio = kwargs.pop(\"positive_space_ratio\", None) or dashed_ratio\n self.dash_length = dash_length\n super().__init__(*args, **kwargs)\n dashed_ratio = self.dashed_ratio\n num_dashes = self.calculate_num_dashes(dashed_ratio)\n dashes = OpenGLDashedVMobject(\n self, num_dashes=num_dashes, dashed_ratio=dashed_ratio\n )\n self.clear_points()\n self.add(*dashes)\n\n def calculate_num_dashes(self, dashed_ratio):\n return max(\n 2, int(np.ceil((self.get_length() / self.dash_length) * dashed_ratio))\n )\n\n def get_start(self):\n if len(self.submobjects) > 0:\n return self.submobjects[0].get_start()\n else:\n return OpenGLLine.get_start(self)\n\n def get_end(self):\n if len(self.submobjects) > 0:\n return self.submobjects[-1].get_end()\n else:\n return OpenGLLine.get_end(self)\n\n def get_first_handle(self):\n return self.submobjects[0].points[1]\n\n def get_last_handle(self):\n return self.submobjects[-1].points[-2]\n\n\nclass OpenGLTangentLine(OpenGLLine):\n def __init__(self, vmob, alpha, length=1, d_alpha=1e-6, **kwargs):\n self.length = length\n self.d_alpha = d_alpha\n da = self.d_alpha\n a1 = clip(alpha - da, 0, 1)\n a2 = clip(alpha + da, 0, 1)\n super().__init__(vmob.pfp(a1), vmob.pfp(a2), **kwargs)\n self.scale(self.length / self.get_length())\n\n\nclass OpenGLElbow(OpenGLVMobject):\n def __init__(self, width=0.2, angle=0, **kwargs):\n self.angle = angle\n super().__init__(self, **kwargs)\n self.set_points_as_corners([UP, UP + RIGHT, RIGHT])\n self.set_width(width, about_point=ORIGIN)\n self.rotate(self.angle, about_point=ORIGIN)\n\n\nclass OpenGLArrow(OpenGLLine):\n def __init__(\n self,\n start=LEFT,\n end=RIGHT,\n path_arc=0,\n fill_color=GREY_A,\n fill_opacity=1,\n stroke_width=0,\n buff=MED_SMALL_BUFF,\n thickness=0.05,\n tip_width_ratio=5,\n tip_angle=PI / 3,\n max_tip_length_to_length_ratio=0.5,\n max_width_to_length_ratio=0.1,\n **kwargs\n ):\n self.thickness = thickness\n self.tip_width_ratio = tip_width_ratio\n self.tip_angle = tip_angle\n self.max_tip_length_to_length_ratio = max_tip_length_to_length_ratio\n self.max_width_to_length_ratio = max_width_to_length_ratio\n super().__init__(\n start=start,\n end=end,\n buff=buff,\n path_arc=path_arc,\n fill_color=fill_color,\n fill_opacity=fill_opacity,\n stroke_width=stroke_width,\n **kwargs\n )\n\n def set_points_by_ends(self, start, end, buff=0, path_arc=0):\n # Find the right tip length and thickness\n vect = end - start\n length = max(np.linalg.norm(vect), 1e-8)\n thickness = self.thickness\n w_ratio = fdiv(self.max_width_to_length_ratio, fdiv(thickness, length))\n if w_ratio < 1:\n thickness *= w_ratio\n\n tip_width = self.tip_width_ratio * thickness\n tip_length = tip_width / (2 * np.tan(self.tip_angle / 2))\n t_ratio = fdiv(self.max_tip_length_to_length_ratio, fdiv(tip_length, length))\n if t_ratio < 1:\n tip_length *= t_ratio\n tip_width *= t_ratio\n\n # Find points for the stem\n if path_arc == 0:\n points1 = (length - tip_length) * np.array([RIGHT, 0.5 * RIGHT, ORIGIN])\n points1 += thickness * UP / 2\n points2 = points1[::-1] + thickness * DOWN\n else:\n # Solve for radius so that the tip-to-tail length matches |end - start|\n a = 2 * (1 - np.cos(path_arc))\n b = -2 * tip_length * np.sin(path_arc)\n c = tip_length ** 2 - length ** 2\n R = (-b + np.sqrt(b ** 2 - 4 * a * c)) / (2 * a)\n\n # Find arc points\n points1 = OpenGLArc.create_quadratic_bezier_points(path_arc)\n points2 = np.array(points1[::-1])\n points1 *= R + thickness / 2\n points2 *= R - thickness / 2\n if path_arc < 0:\n tip_length *= -1\n rot_T = rotation_matrix_transpose(PI / 2 - path_arc, OUT)\n for points in points1, points2:\n points[:] = np.dot(points, rot_T)\n points += R * DOWN\n\n self.set_points(points1)\n # Tip\n self.add_line_to(tip_width * UP / 2)\n self.add_line_to(tip_length * LEFT)\n self.tip_index = len(self.points) - 1\n self.add_line_to(tip_width * DOWN / 2)\n self.add_line_to(points2[0])\n # Close it out\n self.append_points(points2)\n self.add_line_to(points1[0])\n\n if length > 0:\n # Final correction\n super().scale(length / self.get_length())\n\n self.rotate(angle_of_vector(vect) - self.get_angle())\n self.rotate(\n PI / 2 - np.arccos(normalize(vect)[2]),\n axis=rotate_vector(self.get_unit_vector(), -PI / 2),\n )\n self.shift(start - self.get_start())\n self.refresh_triangulation()\n\n def reset_points_around_ends(self):\n self.set_points_by_ends(\n self.get_start(), self.get_end(), path_arc=self.path_arc\n )\n return self\n\n def get_start(self):\n nppc = self.n_points_per_curve\n points = self.points\n return (points[0] + points[-nppc]) / 2\n\n def get_end(self):\n return self.points[self.tip_index]\n\n def put_start_and_end_on(self, start, end):\n self.set_points_by_ends(start, end, buff=0, path_arc=self.path_arc)\n return self\n\n def scale(self, *args, **kwargs):\n super().scale(*args, **kwargs)\n self.reset_points_around_ends()\n return self\n\n def set_thickness(self, thickness):\n self.thickness = thickness\n self.reset_points_around_ends()\n return self\n\n def set_path_arc(self, path_arc):\n self.path_arc = path_arc\n self.reset_points_around_ends()\n return self\n\n\nclass OpenGLVector(OpenGLArrow):\n def __init__(self, direction=RIGHT, buff=0, **kwargs):\n self.buff = buff\n if len(direction) == 2:\n direction = np.hstack([direction, 0])\n super().__init__(ORIGIN, direction, buff=buff, **kwargs)\n\n\nclass OpenGLDoubleArrow(OpenGLArrow):\n def __init__(self, *args, **kwargs):\n OpenGLArrow.__init__(self, *args, **kwargs)\n self.add_tip(at_start=True)\n\n\nclass OpenGLCubicBezier(OpenGLVMobject):\n def __init__(self, a0, h0, h1, a1, **kwargs):\n OpenGLVMobject.__init__(self, **kwargs)\n self.add_cubic_bezier_curve(a0, h0, h1, a1)\n\n\nclass OpenGLPolygon(OpenGLVMobject):\n def __init__(self, *vertices, **kwargs):\n self.vertices = vertices\n super().__init__(**kwargs)\n\n def init_points(self):\n verts = self.vertices\n self.set_points_as_corners([*verts, verts[0]])\n\n def get_vertices(self):\n return self.get_start_anchors()\n\n def round_corners(self, radius=0.5):\n vertices = self.get_vertices()\n arcs = []\n for v1, v2, v3 in adjacent_n_tuples(vertices, 3):\n vect1 = v2 - v1\n vect2 = v3 - v2\n unit_vect1 = normalize(vect1)\n unit_vect2 = normalize(vect2)\n angle = angle_between_vectors(vect1, vect2)\n # Negative radius gives concave curves\n angle *= np.sign(radius)\n # Distance between vertex and start of the arc\n cut_off_length = radius * np.tan(angle / 2)\n # Determines counterclockwise vs. clockwise\n sign = np.sign(np.cross(vect1, vect2)[2])\n arc = OpenGLArcBetweenPoints(\n v2 - unit_vect1 * cut_off_length,\n v2 + unit_vect2 * cut_off_length,\n angle=sign * angle,\n n_components=2,\n )\n arcs.append(arc)\n\n self.clear_points()\n # To ensure that we loop through starting with last\n arcs = [arcs[-1], *arcs[:-1]]\n for arc1, arc2 in adjacent_pairs(arcs):\n self.append_points(arc1.points)\n line = OpenGLLine(arc1.get_end(), arc2.get_start())\n # Make sure anchors are evenly distributed\n len_ratio = line.get_length() / arc1.get_arc_length()\n line.insert_n_curves(int(arc1.get_num_curves() * len_ratio))\n self.append_points(line.points)\n return self\n\n\nclass OpenGLRegularPolygon(OpenGLPolygon):\n def __init__(self, n=6, start_angle=None, **kwargs):\n self.start_angle = start_angle\n if self.start_angle is None:\n if n % 2 == 0:\n self.start_angle = 0\n else:\n self.start_angle = 90 * DEGREES\n start_vect = rotate_vector(RIGHT, self.start_angle)\n vertices = compass_directions(n, start_vect)\n super().__init__(*vertices, **kwargs)\n\n\nclass OpenGLTriangle(OpenGLRegularPolygon):\n def __init__(self, **kwargs):\n super().__init__(n=3, **kwargs)\n\n\nclass OpenGLArrowTip(OpenGLTriangle):\n def __init__(\n self,\n fill_opacity=1,\n fill_color=WHITE,\n stroke_width=0,\n width=DEFAULT_ARROW_TIP_WIDTH,\n length=DEFAULT_ARROW_TIP_LENGTH,\n angle=0,\n **kwargs\n ):\n OpenGLTriangle.__init__(\n self,\n start_angle=0,\n fill_opacity=fill_opacity,\n fill_color=fill_color,\n stroke_width=stroke_width,\n **kwargs\n )\n self.set_width(width, stretch=True)\n self.set_height(length, stretch=True)\n\n def get_base(self):\n return self.point_from_proportion(0.5)\n\n def get_tip_point(self):\n return self.points[0]\n\n def get_vector(self):\n return self.get_tip_point() - self.get_base()\n\n def get_angle(self):\n return angle_of_vector(self.get_vector())\n\n def get_length(self):\n return np.linalg.norm(self.get_vector())\n\n\nclass OpenGLRectangle(OpenGLPolygon):\n def __init__(self, color=WHITE, width=4.0, height=2.0, **kwargs):\n OpenGLPolygon.__init__(self, UR, UL, DL, DR, color=color, **kwargs)\n\n self.set_width(width, stretch=True)\n self.set_height(height, stretch=True)\n\n\nclass OpenGLSquare(OpenGLRectangle):\n def __init__(self, side_length=2.0, **kwargs):\n self.side_length = side_length\n\n super().__init__(height=side_length, width=side_length, **kwargs)\n\n\nclass OpenGLRoundedRectangle(OpenGLRectangle):\n def __init__(self, corner_radius=0.5, **kwargs):\n self.corner_radius = corner_radius\n OpenGLRectangle.__init__(self, **kwargs)\n self.round_corners(self.corner_radius)\n",
"\"\"\"Animations that try to transform Mobjects while keeping track of identical parts.\"\"\"\n\n__all__ = [\"TransformMatchingShapes\", \"TransformMatchingTex\"]\n\nfrom typing import TYPE_CHECKING, List, Optional\n\nimport numpy as np\n\nfrom .._config import config\nfrom ..mobject.mobject import Group, Mobject\nfrom ..mobject.opengl_mobject import OpenGLGroup, OpenGLMobject\nfrom ..mobject.types.opengl_vectorized_mobject import OpenGLVGroup, OpenGLVMobject\nfrom ..mobject.types.vectorized_mobject import VGroup, VMobject\nfrom .composition import AnimationGroup\nfrom .fading import FadeIn, FadeOut\nfrom .transform import FadeTransformPieces, Transform\n\nif TYPE_CHECKING:\n from ..scene.scene import Scene\n\n\nclass TransformMatchingAbstractBase(AnimationGroup):\n \"\"\"Abstract base class for transformations that keep track of matching parts.\n\n Subclasses have to implement the two static methods\n :meth:`~.TransformMatchingAbstractBase.get_mobject_parts` and\n :meth:`~.TransformMatchingAbstractBase.get_mobject_key`.\n\n Basically, this transformation first maps all submobjects returned\n by the ``get_mobject_parts`` method to certain keys by applying the\n ``get_mobject_key`` method. Then, submobjects with matching keys\n are transformed into each other.\n\n Parameters\n ----------\n mobject\n The starting :class:`~.Mobject`.\n target_mobject\n The target :class:`~.Mobject`.\n transform_mismatches\n Controls whether submobjects without a matching key are transformed\n into each other by using :class:`~.Transform`. Default: ``False``.\n fade_transform_mismatches\n Controls whether submobjects without a matching key are transformed\n into each other by using :class:`~.FadeTransform`. Default: ``False``.\n key_map\n Optional. A dictionary mapping keys belonging to some of the starting mobject's\n submobjects (i.e., the return values of the ``get_mobject_key`` method)\n to some keys belonging to the target mobject's submobjects that should\n be transformed although the keys don't match.\n kwargs\n All further keyword arguments are passed to the submobject transformations.\n\n\n Note\n ----\n If neither ``transform_mismatches`` nor ``fade_transform_mismatches``\n are set to ``True``, submobjects without matching keys in the starting\n mobject are faded out in the direction of the unmatched submobjects in\n the target mobject, and unmatched submobjects in the target mobject\n are faded in from the direction of the unmatched submobjects in the\n start mobject.\n\n \"\"\"\n\n def __init__(\n self,\n mobject: \"Mobject\",\n target_mobject: \"Mobject\",\n transform_mismatches: bool = False,\n fade_transform_mismatches: bool = False,\n key_map: Optional[dict] = None,\n **kwargs\n ):\n assert type(mobject) is type(target_mobject)\n\n if isinstance(mobject, OpenGLVMobject):\n group_type = OpenGLVGroup\n elif isinstance(mobject, OpenGLMobject):\n group_type = OpenGLGroup\n elif isinstance(mobject, VMobject):\n group_type = VGroup\n else:\n group_type = Group\n\n source_map = self.get_shape_map(mobject)\n target_map = self.get_shape_map(target_mobject)\n\n if key_map is None:\n key_map = {}\n\n # Create two mobjects whose submobjects all match each other\n # according to whatever keys are used for source_map and\n # target_map\n transform_source = group_type()\n transform_target = group_type()\n kwargs[\"final_alpha_value\"] = 0\n for key in set(source_map).intersection(target_map):\n transform_source.add(source_map[key])\n transform_target.add(target_map[key])\n anims = [Transform(transform_source, transform_target, **kwargs)]\n # User can manually specify when one part should transform\n # into another despite not matching by using key_map\n key_mapped_source = group_type()\n key_mapped_target = group_type()\n for key1, key2 in key_map.items():\n if key1 in source_map and key2 in target_map:\n key_mapped_source.add(source_map[key1])\n key_mapped_target.add(target_map[key2])\n source_map.pop(key1, None)\n target_map.pop(key2, None)\n if len(key_mapped_source) > 0:\n anims.append(\n FadeTransformPieces(key_mapped_source, key_mapped_target, **kwargs)\n )\n\n fade_source = group_type()\n fade_target = group_type()\n for key in set(source_map).difference(target_map):\n fade_source.add(source_map[key])\n for key in set(target_map).difference(source_map):\n fade_target.add(target_map[key])\n\n if transform_mismatches:\n if \"replace_mobject_with_target_in_scene\" not in kwargs:\n kwargs[\"replace_mobject_with_target_in_scene\"] = True\n anims.append(Transform(fade_source, fade_target, **kwargs))\n elif fade_transform_mismatches:\n anims.append(FadeTransformPieces(fade_source, fade_target, **kwargs))\n else:\n anims.append(FadeOut(fade_source, target_position=fade_target, **kwargs))\n anims.append(\n FadeIn(fade_target.copy(), target_position=fade_target, **kwargs)\n )\n\n super().__init__(*anims)\n\n self.to_remove = mobject\n self.to_add = target_mobject\n\n def get_shape_map(self, mobject: \"Mobject\") -> dict:\n shape_map = {}\n for sm in self.get_mobject_parts(mobject):\n key = self.get_mobject_key(sm)\n if key not in shape_map:\n if config[\"renderer\"] == \"opengl\":\n shape_map[key] = OpenGLVGroup()\n else:\n shape_map[key] = VGroup()\n shape_map[key].add(sm)\n return shape_map\n\n def clean_up_from_scene(self, scene: \"Scene\") -> None:\n for anim in self.animations:\n anim.interpolate(0)\n scene.remove(self.mobject)\n scene.remove(self.to_remove)\n scene.add(self.to_add)\n\n @staticmethod\n def get_mobject_parts(mobject: \"Mobject\"):\n raise NotImplementedError(\"To be implemented in subclass.\")\n\n @staticmethod\n def get_mobject_key(mobject: \"Mobject\"):\n raise NotImplementedError(\"To be implemented in subclass.\")\n\n\nclass TransformMatchingShapes(TransformMatchingAbstractBase):\n \"\"\"An animation trying to transform groups by matching the shape\n of their submobjects.\n\n Two submobjects match if the hash of their point coordinates after\n normalization (i.e., after translation to the origin, fixing the submobject\n height at 1 unit, and rounding the coordinates to three decimal places)\n matches.\n\n See also\n --------\n :class:`~.TransformMatchingAbstractBase`\n\n Examples\n --------\n\n .. manim:: Anagram\n\n class Anagram(Scene):\n def construct(self):\n src = Text(\"the morse code\")\n tar = Text(\"here come dots\")\n self.play(Write(src))\n self.wait(0.5)\n self.play(TransformMatchingShapes(src, tar, path_arc=PI/2))\n self.wait(0.5)\n\n \"\"\"\n\n def __init__(\n self,\n mobject: \"Mobject\",\n target_mobject: \"Mobject\",\n transform_mismatches: bool = False,\n fade_transform_mismatches: bool = False,\n key_map: Optional[dict] = None,\n **kwargs\n ):\n super().__init__(\n mobject,\n target_mobject,\n transform_mismatches=transform_mismatches,\n fade_transform_mismatches=fade_transform_mismatches,\n key_map=key_map,\n **kwargs\n )\n\n @staticmethod\n def get_mobject_parts(mobject: \"Mobject\") -> List[\"Mobject\"]:\n return mobject.family_members_with_points()\n\n @staticmethod\n def get_mobject_key(mobject: \"Mobject\") -> int:\n mobject.save_state()\n mobject.center()\n mobject.set_height(1)\n result = hash(np.round(mobject.points, 3).tobytes())\n mobject.restore()\n return result\n\n\nclass TransformMatchingTex(TransformMatchingAbstractBase):\n \"\"\"A transformation trying to transform rendered LaTeX strings.\n\n Two submobjects match if their ``tex_string`` matches.\n\n See also\n --------\n :class:`~.TransformMatchingAbstractBase`\n\n Examples\n --------\n\n .. manim:: MatchingEquationParts\n\n class MatchingEquationParts(Scene):\n def construct(self):\n eq1 = MathTex(\"{{a^2}} + {{b^2}} = {{c^2}}\")\n eq2 = MathTex(\"{{a^2}} = {{c^2}} - {{b^2}}\")\n self.add(eq1)\n self.wait(0.5)\n self.play(TransformMatchingTex(eq1, eq2))\n self.wait(0.5)\n\n \"\"\"\n\n def __init__(\n self,\n mobject: \"Mobject\",\n target_mobject: \"Mobject\",\n transform_mismatches: bool = False,\n fade_transform_mismatches: bool = False,\n key_map: Optional[dict] = None,\n **kwargs\n ):\n assert hasattr(mobject, \"tex_string\")\n assert hasattr(target_mobject, \"tex_string\")\n super().__init__(\n mobject,\n target_mobject,\n transform_mismatches=transform_mismatches,\n fade_transform_mismatches=fade_transform_mismatches,\n key_map=key_map,\n **kwargs\n )\n\n @staticmethod\n def get_mobject_parts(mobject: \"Mobject\") -> List[\"Mobject\"]:\n return mobject.submobjects\n\n @staticmethod\n def get_mobject_key(mobject: \"Mobject\") -> str:\n return mobject.tex_string\n"
] | [
[
"numpy.hstack",
"numpy.dot",
"numpy.sqrt",
"numpy.linspace",
"numpy.cos",
"numpy.linalg.norm",
"numpy.sin",
"numpy.sign",
"numpy.tan",
"numpy.cross",
"numpy.array",
"numpy.zeros"
],
[
"numpy.round"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ddarmon/transCSSR | [
"8ed057eee70d2d50d14bc719c7850ac46a00e4d4"
] | [
"demo-and-experimental-scripts/demo_predict_presynch_eT.py"
] | [
"import numpy\nimport scipy.stats\nimport itertools\nimport copy\nimport string\nimport os\n\nfrom collections import Counter, defaultdict\nfrom filter_data_methods import *\nfrom igraph import *\n\nfrom transCSSR import *\n\ndata_prefix = ''\n\n#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n#\n# The various test transducers. Xt is the input\n# and Yt is the output.\n#\n#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\n# Xt_name = 'coinflip'\n# Yt_name = 'coinflip-excite_w_refrac'\n\nXt_name = 'barnettX'\nYt_name = 'barnettY'\n\n# Xt_name = ''\n# Yt_name = 'even'\n\n#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n#\n# Load in the data for each process.\n#\n#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\nstringY = open('data/{}{}.dat'.format(data_prefix, Yt_name)).readline().strip()\n\nif Xt_name == '':\n\tstringX = '0'*len(stringY)\nelse:\n\tstringX = open('data/{}{}.dat'.format(data_prefix, Xt_name)).readline().strip()\n\n#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n#\n# Set the parameters and associated quantities:\n# \taxs, ays -- the input / output alphabets\n# \talpha -- the significance level associated with\n# \t CSSR's hypothesis tests.\n# \tL -- The maximum history length to look\n# back when inferring predictive\n# distributions.\n#\n#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\nif Xt_name == '':\n\taxs = ['0']\n\tays = ['0', '1']\nelse:\n\taxs = ['0', '1']\n\tays = ['0', '1']\n\ne_symbols = list(itertools.product(axs, ays)) # All of the possible pairs of emission\n # symbols for (x, y)\n\nalpha = 0.001\n\nverbose = False\n\n# L is the maximum amount we want to ever look back.\n\nL_max = 3\n\nTx = len(stringX); Ty = len(stringY)\n\nassert Tx == Ty, 'The two time series must have the same length.'\n\nT = Tx\n\nword_lookup_marg, word_lookup_fut = estimate_predictive_distributions(stringX, stringY, L_max)\n\nepsilon, invepsilon, morph_by_state = run_transCSSR(word_lookup_marg, word_lookup_fut, L_max, axs, ays, e_symbols, Xt_name, Yt_name, alpha = alpha)\n\nind_go_to = 20\n\npossible_states_from_predict_presynch_eT = numpy.zeros((ind_go_to-1, len(invepsilon)), dtype = numpy.int32)\n\nfor cur_ind in range(1, ind_go_to):\n\tcurX = stringX[:cur_ind]\n\tcurY = stringY[:cur_ind-1]\n\n\tpreds, possible_states = predict_presynch_eT(curX, curY, machine_fname = 'transCSSR_results/+{}.dot'.format(Xt_name), transducer_fname = 'transCSSR_results/{}+{}.dot'.format(Xt_name, Yt_name), axs = axs, ays = ays, inf_alg = 'transCSSR')\n\n\tpossible_states_from_predict_presynch_eT[cur_ind - 1] = possible_states\n\n\tprint((cur_ind, curX, curY + '*', preds.tolist(), possible_states))\n\nprint('')\n\npreds_all, possible_states_all = filter_and_pred_probs(stringX, stringY, machine_fname = 'transCSSR_results/+{}.dot'.format(Xt_name), transducer_fname = 'transCSSR_results/{}+{}.dot'.format(Xt_name, Yt_name), axs = axs, ays = ays, inf_alg = 'transCSSR')\n\nfor cur_ind in range(1, ind_go_to):\n\tcurX = stringX[:cur_ind]\n\tcurY = stringY[:cur_ind-1]\n\n\tprint((cur_ind, curX, curY + '*', preds_all[cur_ind-1, :].tolist(), possible_states_all[cur_ind-1, :].tolist()))\n\nfiltered_states, filtered_probs, stringY_pred = filter_and_predict(stringX, stringY, epsilon, invepsilon, morph_by_state, axs, ays, e_symbols, L_max, memoryless = False)\n\nprint_go_to = 40\n\nprint((\"\\n\\nFirst {} predictions.\".format(print_go_to)))\nfor ind in range(print_go_to):\n\tprint((filtered_probs[ind], preds_all[ind, 1]))\n\nprint((\"\\n\\nLast {} predictions.\".format(print_go_to)))\nfor ind in range(preds_all.shape[0] - print_go_to, preds_all.shape[0]):\n\tprint((filtered_probs[ind], preds_all[ind, 1]))\n\nimport matplotlib.pyplot as plt\n\nplt.figure()\nplt.plot(filtered_probs[:, 1], label = 'Using filter_and_predict')\nplt.plot(preds_all[:, 1], label = 'Using filter_and_pred_probs')\nplt.xlim([0, 1000])\nplt.legend()\nplt.show()"
] | [
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ds-praveenkumar/m5-accuracy-prediction | [
"20255adc95c3e0fe6c6acec9fd16ac88c6e95908"
] | [
"src/models/build_model.py"
] | [
"# github link: https://github.com/ds-praveenkumar/kaggle\n# Author: ds-praveenkumar\n# file: forcasting/build_model.py/\n# Created by ds-praveenkumar at 13-06-2020 02 09\n# feature:\n\nimport os\nimport psutil\nfrom fbprophet import Prophet\nfrom pathlib import Path\nimport pandas as pd\nimport numpy as np\nimport pickle\nfrom src.utility.timeit import timeit\n\nROOT_DIR = Path(__file__).parent.parent.parent\nprint('ROOT_DIR:', ROOT_DIR)\n\n@timeit\ndef us_public_holidays():\n ny = pd.DataFrame({'holiday': \"New Year's Day\", 'ds': pd.to_datetime(['2016-01-01', '2017-01-01'])})\n mlk = pd.DataFrame(\n {'holiday': 'Birthday of Martin Luther King, Jr.', 'ds': pd.to_datetime(['2016-01-18', '2017-01-16'])})\n wash = pd.DataFrame({'holiday': \"Washington's Birthday\", 'ds': pd.to_datetime(['2016-02-15', '2017-02-20'])})\n mem = pd.DataFrame({'holiday': 'Memorial Day', 'ds': pd.to_datetime(['2016-05-30', '2017-05-29'])})\n ind = pd.DataFrame(\n {'holiday': 'Independence Day', 'ds': pd.to_datetime(['2015-07-04', '2016-07-04', '2017-07-04'])})\n lab = pd.DataFrame({'holiday': 'Labor Day', 'ds': pd.to_datetime(['2015-09-07', '2016-09-05', '2017-09-04'])})\n col = pd.DataFrame({'holiday': 'Columbus Day', 'ds': pd.to_datetime(['2015-10-12', '2016-10-10', '2017-10-09'])})\n vet = pd.DataFrame({'holiday': \"Veteran's Day\", 'ds': pd.to_datetime(['2015-11-11', '2016-11-11', '2017-11-11'])})\n thanks = pd.DataFrame({'holiday': 'Thanksgiving Day', 'ds': pd.to_datetime(['2015-11-26', '2016-11-24'])})\n christ = pd.DataFrame({'holiday': 'Christmas', 'ds': pd.to_datetime(['2015-12-25', '2016-12-25'])})\n inaug = pd.DataFrame({'holiday': 'Inauguration Day', 'ds': pd.to_datetime(['2017-01-20'])})\n us_public_holidays = pd.concat([ny, mlk, wash, mem, ind, lab, col, vet, thanks, christ, inaug])\n return us_public_holidays\n\n\ndef is_nfl_season(ds):\n date = pd.to_datetime(ds)\n return (date.month > 8 or date.month < 2)\n\n\ndef nfl_sunday(ds):\n date = pd.to_datetime(ds)\n if date.weekday() == 6 and (date.month > 8 or date.month < 2):\n return 1\n else:\n return 0\n\n@timeit\ndef build_model():\n df = pd.read_csv('H:\\\\forcasting\\\\data\\\\training\\\\10655.csv')\n df['y'] = np.log1p(df.y.astype(float) + 1)\n print(df)\n model = Prophet(\n interval_width=0.95,\n changepoint_prior_scale=0.15,\n daily_seasonality=True,\n holidays=us_public_holidays(),\n\n yearly_seasonality=True,\n weekly_seasonality=True,\n seasonality_mode='multiplicative'\n )\n model.add_seasonality(\n name='weekly', period=7, fourier_order=3, prior_scale=0.1)\n\n\n df['nfl_sunday'] = df['ds'].apply(nfl_sunday)\n\n print(df)\n model.add_regressor('nfl_sunday')\n model.add_country_holidays(country_name='US')\n #save model\n filename = 'prophet_1.0.pkl'\n root = os.path.join(ROOT_DIR,'models')\n print(ROOT_DIR)\n path = os.path.join(root,filename)\n\n # with open(path, \"wb\") as f:\n # pickle.dump(model, f)\n print(f\"model saved at: {path}\")\n\n model.fit(df)\n future = model.make_future_dataframe(periods=28)\n future['nfl_sunday'] = future['ds'].apply(nfl_sunday)\n forecast = model.predict(future)\n print(forecast[-28:])\n\n\n\nif __name__ == '__main__':\n process = psutil.Process(os.getpid())\n build_model()\n print('Memory Usage(MB):',process.memory_info()[0] / float(2 ** 20))"
] | [
[
"pandas.concat",
"pandas.to_datetime",
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
Spiilgriim/nnexpy | [
"f8e419598ef94bebb532eb32ccaeeb48a3edfb5e"
] | [
"nnexpy/network_generator.py"
] | [
"class NetworkGenerator(object):\n def build_model(self, *args, **kwargs):\n import tensorflow as tf\n depth = kwargs.get('depth', 1)\n input_shape = kwargs.get('input_shape', (2,))\n width = kwargs.get('width', 8)\n activation = kwargs.get('activation', 'relu')\n\n model = tf.keras.Sequential()\n\n model.add(tf.keras.layers.Dense(8, input_dim=input_shape[0], activation=activation,\n kernel_initializer='he_uniform'))\n for _ in range(depth):\n model.add(tf.keras.layers.Dense(8, activation=activation))\n model.add(tf.keras.layers.Dense(1, activation='sigmoid'))\n return model\n\n def train_and_save(self, *args, **kwargs):\n import tensorflow as tf\n tf.compat.v1.disable_eager_execution()\n\n model = kwargs.get('model', None)\n epoch_number = kwargs.get('epoch_number', 100)\n data = kwargs.get('data', None)\n label = kwargs.get('label', None)\n save_path = kwargs.get('save_path', './model.h5')\n callbacks = kwargs.get('callbacks', None)\n batch_size = kwargs.get('batch_size', 10)\n loss = kwargs.get('loss', 'sparse_categorical_crossentropy')\n\n model.summary()\n model.compile(optimizer=\"adam\",\n loss=loss, metrics=['accuracy'])\n model.fit(data, label, validation_split=0.2, batch_size=batch_size,\n epochs=epoch_number, shuffle=True, verbose=2, callbacks=callbacks)\n model.save(save_path)\n import gc\n del model\n gc.collect()\n tf.keras.backend.clear_session()\n tf.compat.v1.reset_default_graph()\n\n def full_net_combined(self, depth, input_shape, mypath, epoch_number, data, label):\n import tensorflow as tf\n tf.compat.v1.disable_eager_execution()\n model = self.build_model(\n depth=depth, input_shape=input_shape, width=8, activation='relu')\n csv = tf.keras.callbacks.CSVLogger(\n mypath + str(depth) + 'layer.csv', separator=',', append=False)\n self.train_and_save(model=model, epoch_number=epoch_number, data=data, label=label, save_path=mypath +\n str(depth) + 'layer.h5', batch_size=64, loss=\"binary_crossentropy\", callbacks=[csv])\n import gc\n del model\n gc.collect()\n tf.keras.backend.clear_session()\n tf.compat.v1.reset_default_graph()\n"
] | [
[
"tensorflow.keras.layers.Dense",
"tensorflow.keras.Sequential",
"tensorflow.keras.backend.clear_session",
"tensorflow.compat.v1.disable_eager_execution",
"tensorflow.compat.v1.reset_default_graph"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
MartinCooke/jocular | [
"635816d4ef6aa6ea75187137e25386dad2d551e9",
"635816d4ef6aa6ea75187137e25386dad2d551e9"
] | [
"jocular/stretch.py",
"jocular/calcs.py"
] | [
"''' Various stretch functions. Easy to add more. Room for refinement,\n methinks.\n'''\n\nimport numpy as np\n\ndef stretch(x, method='linear', param=None, NR=0, background=None):\n\n # if no noise reduction just use stretch alone\n if (NR <= 0) or (background is None):\n return stretch_main(x, method=method, param=param)\n\n else:\n # get stretched data and lightly suppress low end\n y = stretch_main(x, method=method, param=param)\n hyper_param = 1 - .1 * (NR / 100)\n return y * stretch_main(x, method='hyper', param=hyper_param)\n\ndef stretch_main(x, method='linear', param=None):\n\n if method == 'linear':\n return x\n\n if method == 'hyper':\n d = .02\n c = d * (1 + d - param)\n return (1 + c) * (x / (x + c))\n\n if method == 'log':\n c = param * 200\n return np.log(c*x + 1) / np.log(c + 1)\n\n if method == 'asinh':\n # c = param * 250\n c = param * 2000\n return np.arcsinh(c*x) / np.arcsinh(c + .0000001)\n\n if method == 'gamma':\n # with noise reduction, linear from x=0-a, with slope s\n y = x.copy()\n # g = .5 - .5 * param\n # g = .75 - .75 * param\n g = 1 - param\n a0 = .01\n s = g / (a0 * (g - 1) + a0 ** (1 - g))\n d = (1 / (a0 ** g * (g - 1) + 1)) - 1\n y[x < a0] = x[x < a0] * s\n y[x >= a0] = (1 + d) * (x[x >= a0] ** g) - d\n return y\n \n else:\n return x\n\n",
"''' Various astro calcs mainly based on Meuss. \n'''\n\nimport numpy as np\nimport math\nimport time\nfrom datetime import datetime\n\ndef julian_date(when):\n\t# from Meuss p 61; 'when' is a datetime object\n\n\ty = when.year\n\tm = when.month\n\td = when.day + when.hour/24 + when.minute/(24*60) + when.second/(24*3600)\n\n\tif m < 3:\n\t\ty -= 1\n\t\tm += 12\n\n\ta = int(y / 100)\n\n\tif y >= 1582 and m >= 10:\n\t\t# Gregorian\n\t\ta = int(y/100)\n\t\tb = 2 - a + int(a / 4)\n\telse: \n\t\t# Julian\n\t\tb = 0\n\n\tjd = int(365.25 * (y + 4716)) + int(30.6001 * (m + 1)) + d + b - 1524.5\n\treturn jd\n\n\ndef to_range(x, d):\n\t# reduce x to range 0-d by adding or subtracting multiples of d\n\tif x < 0:\n\t\treturn x - int((x / d) - 1) * d\n\telse:\n\t\treturn x - int((x / d)) * d\n\ndef local_sidereal_time(when, longitude):\n\t# direct method of Meuss p87\n\n\t# when must be in UT\n\tjd = julian_date(when)\n\tt = (jd - 2451545.0) / 36525.0\n\tmst = 280.46061837 + 360.98564736629 * (jd - 2451545.0) + .000387933 * t**2 - t**3 / 38710000\n\n\t# convert to 0-360\n\tmst = to_range(mst, 360)\n\n\t# convert from Greenwich to local\n\tlst = mst + longitude\n\n\treturn lst\n\ndef sun_altitude(when, latitude, longitude):\n\t# Meuss p163+\n\n\tjd = julian_date(when)\n\trads = math.pi / 180.\n\n\n\tt = (jd - 2451545.0) / 36525.0\n\tL0 = 280.46646 + 36000.76983 * t + 0.0003032 * t * t\n\tL0 = to_range(L0, 360)\n\tM = 357.52911 + 35999.05029 * t - 0.0001537 * t * t\n\t#e = 0.016708634 - 0.000042037 * t - 0.0000001267 * t * t\n\tC = (1.914602 - 0.004817 * t - 0.000014 * t * t) * np.sin(M * rads) + \\\n\t\t(0.019993 - 0.000101 * t) * np.sin(2 * M * rads) + \\\n\t\t0.000289 * np.sin(3 * M * rads)\n\tlong_sun = L0 + C\n\t#v = M + C\n\t# R = (1.000001018 * (1 - e * e)) / (1 + e * np.cos(v * rads))\n\tsigma = 125.04 - 1934.136 * t\n\tlam = long_sun - 0.00569 - 0.00478 * np.sin(sigma * rads)\n\tep = 23 + (26/60) + (21.448/3600) - (46.815*t + 0.00059 * t**2 - 0.001813*t**3) / 3600\n\tep_corr = ep + 0.00256 * np.cos(sigma * rads)\n\tra = np.arctan2(np.cos(ep_corr * rads) * np.sin(lam * rads), np.cos(lam * rads)) / rads\n\tra = to_range(ra, 360)\n\tdec = np.arcsin(np.sin(ep_corr * rads) * np.sin(lam * rads)) / rads\n\n\t# now convert to locale\n\n\tts = time.time()\n\tutc_offset = (datetime.fromtimestamp(ts) - datetime.utcfromtimestamp(ts)).total_seconds() / 3600.0\n\tlst = local_sidereal_time(when, longitude)\n\tlat = latitude * rads\n\tH = (-utc_offset*15 + lst - ra) * rads\n\talt = np.arcsin(np.sin(lat) * np.sin(dec * rads) + np.cos(lat) * np.cos(dec * rads) * np.cos(H)) / rads\n\n\treturn alt\n\n"
] | [
[
"numpy.log",
"numpy.arcsinh"
],
[
"numpy.cos",
"numpy.sin"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
juvilius/mathchem-package | [
"ca56cb03e6ccdb47b3dfc382ca36b0a00d3e28b9"
] | [
"mathchem/mathchem.py"
] | [
"import numpy as np\n\n\nclass Mol():\n r\"\"\"\n Molecule.\n \"\"\"\n __g6_string = ''\n # Adjacency matrix\n __A = []\n # Incidence matrix\n __B = []\n # Laplacian matrix\n __L = []\n # Normalized laplacian matrix\n __NL = []\n # Signless laplacian matrix\n __Q = []\n # Distance matrix\n __D = []\n # Resistance Distance matrix\n __RD = []\n\n __Order = 0\n __Edges = []\n\n __Sage_graph = None\n __NX_graph = None\n\n __Degrees = []\n\n __Spectrum = []\n __Laplacian_spectrum = []\n __Distance_spectrum = []\n __Norm_laplacian_spectrum = []\n __Signless_laplacian_spectrum = []\n __RD_spectrum = []\n\n __Is_connected = None\n # Switch it to False when we know that the graph is connected. Useful for big calculations\n __Check_connectedness = True\n\n def _reset_(self):\n \"\"\" Reset all attributes \"\"\"\n self.__g6_string = ''\n # Adjacency matrix\n self.__A = []\n # Incidence matrix\n self.__B = []\n # Laplacian matrix\n self.__L = []\n # Normalized laplacian matrix\n self.__NL = []\n # Signless laplacian matrix\n self.__Q = []\n # Distance matrix\n self.__D = []\n # Resistance Distance matrix\n self.__RD = []\n\n self.__Order = 0\n self.__Edges = []\n\n self.__Sage_graph = None\n self.__NX_graph = None\n\n self.__Degrees = []\n\n self.__Spectrum = []\n self.__Laplacian_spectrum = []\n self.__Distance_spectrum = []\n self.__Norm_laplacian_spectrum = []\n self.__Signless_laplacian_spectrum = []\n self.__RD_spectrum = []\n\n self.__Is_connected = None\n\n # allow to set structure from somewhere\n # used in utilites\n\n def _set_A(self, A):\n self.__A = A\n\n def _set_Edges(self, edges):\n self.__Edges = edges\n\n def _set_Order(self, order):\n self.__Order = order\n\n # native method to initialize Mol class is to provide g6 string\n def __init__(self, string=None, check_connectedness=True):\n \"\"\" Molecular graph class \"\"\"\n self.__Check_connectedness = check_connectedness\n if string != None:\n if string[0] == '>':\n if string.startswith('>>graph6<<'):\n string = string[10:]\n elif string.startswith('>>sparse6<<'):\n string = string[11:]\n\n if string[0] == ':':\n self.read_s6(string)\n else:\n self.read_g6(string)\n\n def __repr__(self):\n if self.__A != None:\n return 'Molecular graph on ' + str(\n self.__Order) + ' vertices and ' + str(self.size()) + ' edges'\n return 'Empty Molecular graph'\n\n def __len__(self):\n if self.__A != None: return len(self.__A)\n else: return 0\n\n def set_check_connectedness(self, c):\n \"\"\" Switch on/off of checking connectedness for the graph. Might be useful in batch calculations to economy time.\n args: c (True/False)\n \"\"\"\n self.check_connectedness = c\n\n def g6_string(self):\n \"\"\" Return a graph6 string representation of the graph\n \n Alias: graph6_string \"\"\"\n return self.__g6_string\n\n # alias like in Sage:\n graph6_string = g6_string\n\n def order(self):\n \"\"\" Return number of vertices \"\"\"\n return self.__Order\n\n # alias for order\n n = order\n\n def edges(self):\n \"\"\" Return list of edges \"\"\"\n return self.__Edges\n\n def size(self):\n \"\"\" Return number of edges\"\"\"\n return len(self.__Edges)\n\n # alias for size\n m = size\n\n def vertices(self):\n \"\"\" Return list of vertices \"\"\"\n return range(self.__Order)\n\n def sage_graph(self):\n \"\"\" Return Sage Graph object \"\"\"\n if self.__Sage_graph is None: self._init_sage_graph_()\n return self.__Sage_graph\n\n def NX_graph(self):\n \"\"\" Return NetworkX graph object \"\"\"\n if self.__NX_graph is None:\n import networkx as nx\n self.__NX_graph = nx.Graph(self.__Edges)\n return self.__NX_graph\n\n nx_graph = NX_graph\n\n def _init_sage_graph_(self):\n \"\"\" Initialize SAGE graph from Adjacency matrix\"\"\"\n from sage.graphs.graph import Graph\n self.__Sage_graph = Graph(self.__Edges)\n\n def read_g6(self, s):\n \"\"\" Initialize graph from graph6 string \"\"\"\n\n def graph_bit(pos, off):\n return ((ord(s[off + 1 + pos / 6]) - 63) & (2**(5 - pos % 6))) != 0\n\n if s.startswith('>>graph6<<'):\n s = s[10:]\n # reset all the attributes before changing the structure\n self._reset_()\n\n n = ord(s[0]) - 63\n off = 0\n if n == 63:\n if ord(s[1]) - 63 != 63:\n n = ((ord(s[1]) - 63) << 12) + (\n (ord(s[2]) - 63) << 6) + ord(s[3]) - 63\n\n off = 3\n else:\n n = ((ord(s[2]) - 63) << 30) + ((ord(s[3]) - 63) << 24) + (\n (ord(s[4]) - 63) << 18) + ((ord(s[5]) - 63) << 12) + (\n (ord(s[6]) - 63) << 6) + ord(s[7]) - 63\n\n off = 7\n\n self.__Order = n\n\n self.__A = [[0 for col in range(n)] for row in range(n)]\n\n i = 0\n j = 1\n\n self.__Edges = []\n for x in range(n * (n - 1) / 2):\n if graph_bit(x, off):\n self.__A[i][j] = 1\n self.__A[j][i] = 1\n self.__Edges.append((i, j))\n if j - i == 1:\n i = 0\n j += 1\n else:\n i += 1\n\n self.__g6_string = s\n\n read_graph6 = read_g6\n\n def read_s6(self, s):\n \"\"\" Initialize graph from sparse6 string \"\"\"\n\n def graph_bit(pos, off):\n return ((ord(s[off + 1 + pos / 6]) - 63) & (2**(5 - pos % 6))) != 0\n\n if s.startswith('>>sparse6<<'):\n s = s[11:]\n if not s[0] == ':':\n print('This is not a sparse6 format!')\n return False\n\n # reset all the attributes before changing the structure\n self._reset_()\n\n s = s[1:]\n n = ord(s[0]) - 63\n off = 0\n if n == 63:\n if ord(s[1]) - 63 != 63:\n n = ((ord(s[1]) - 63) << 12) + (\n (ord(s[2]) - 63) << 6) + ord(s[3]) - 63\n\n off = 3\n else:\n n = ((ord(s[2]) - 63) << 30) + ((ord(s[3]) - 63) << 24) + (\n (ord(s[4]) - 63) << 18) + ((ord(s[5]) - 63) << 12) + (\n (ord(s[6]) - 63) << 6) + ord(s[7]) - 63\n\n off = 7\n\n self.__Order = n\n\n k = 1\n while 1 << k < n:\n k += 1\n\n data = s[off + 1:]\n\n #print n,k\n #print data\n\n def parseData():\n \"\"\"Return stream of pairs b[i], x[i] for sparse6 format.\"\"\"\n chunks = iter(data)\n d = None # partial data word\n dLen = 0 # how many unparsed bits are left in d\n\n while 1:\n if dLen < 1:\n d = ord(next(chunks)) - 63\n dLen = 6\n dLen -= 1\n b = (d >> dLen) & 1 # grab top remaining bit\n\n x = d & ((1 << dLen) - 1) # partially built up value of x\n xLen = dLen # how many bits included so far in x\n while xLen < k: # now grab full chunks until we have enough\n d = ord(next(chunks)) - 63\n dLen = 6\n x = (x << 6) + d\n xLen += 6\n x = (x >> (xLen - k)) # shift back the extra bits\n dLen = xLen - k\n yield b, x\n\n self.__A = [[0 for col in range(n)] for row in range(n)]\n\n self.__Edges = []\n\n v = 0\n\n for b, x in parseData():\n if b: v += 1\n if x >= n:\n break # padding with ones can cause overlarge number here\n elif x > v:\n v = x\n else:\n self.__A[x][v] = 1\n self.__A[v][x] = 1\n self.__Edges.append((x, v))\n\n self.__g6_string = ''\n\n read_sparse6 = read_s6\n\n def read_matrix(self, matrix):\n \"\"\"Initialize graph from adjacency matrix including numpy.matrix\"\"\"\n if type(matrix) == np.matrix:\n matrix = matrix.astype(int).tolist()\n self._reset_()\n self.__Order = len(matrix)\n self.__A = matrix\n\n for i in range(self.__Order):\n for j in range(i):\n if matrix[i][j] == 1:\n self.__Edges.append((i, j))\n\n def read_edgelist(self, edges):\n \"\"\"Initialize graph from list of edges.\n Example:\n m = mathchem.Mol()\n m.read_edgelist( [(4,3),(3,1),(1,4))] )\"\"\"\n # first relabel nodes\n nodes = []\n for e in edges:\n if not e[0] in nodes: nodes.append(e[0])\n if not e[1] in nodes: nodes.append(e[1])\n self._reset_()\n self.__Order = len(nodes)\n d = dict(zip(nodes, range(len(nodes))))\n self.__Edges = [(d[e[0]], d[e[1]]) for e in edges]\n\n self.__A = [[0 for col in range(self.__Order)]\n for row in range(self.__Order)]\n for i, j in self.__Edges:\n self.__A[i][j] = 1\n self.__A[j][i] = 1\n\n def write_dot_file(self, filename):\n\n f_out = open(filename, 'w')\n f_out.writelines('graph Mol {\\n')\n for (i, j) in self.edges():\n f_out.writelines(' ' + str(i) + ' -- ' + str(j) + ';\\n')\n f_out.writelines('}')\n f_out.close()\n\n #\n #\n # matrices\n #\n #\n\n def adjacency_matrix(self):\n \"\"\" Return Adjacency matrix\n \n Alias : A\n \"\"\"\n return self.__A\n\n A = adjacency_matrix\n\n def incidence_matrix(self):\n \"\"\" Return Incidence matrix \n \n Alias: B\n \"\"\"\n if self.__B == []:\n\n def func(u, v):\n col = [0] * self.__Order\n col[u] = 1\n col[v] = 1\n return col\n\n # apply func to each edge\n b = map(lambda e: func(e), self.edges())\n # transpose the result\n self.__B = map(list, zip(*b))\n return self.__B\n\n B = incidence_matrix\n\n def laplacian_matrix(self):\n \"\"\" Return Laplacian matrix\n \n L = D-A\n where D - matrix whose diagonal elements are the degrees of the corresponding vertices\n A - adjacency matrix\n \n Alias : L\n \"\"\"\n if self.__L == []:\n self.__L = np.diag(self.degrees()) - np.matrix(self.__A)\n return self.__L\n\n L = laplacian_matrix\n\n def signless_laplacian_matrix(self):\n \"\"\" Return Signless Laplacian matrix\n \n Q = D+A\n Alias : Q\n \"\"\"\n if self.__Q == []:\n\n self.__Q = np.diag(self.degrees()) + np.matrix(self.__A)\n return self.__Q\n\n Q = signless_laplacian_matrix\n\n def normalized_laplacian_matrix(self):\n \"\"\" Return Normalized Laplacian matrix\n \n NL = deg^(-1/2) * L * deg(1/2)\n Alias : NL\n \"\"\"\n ## TODO: check if we have zeros in degrees()\n if self.__NL == []:\n d1 = np.diag(np.power(self.degrees(), -.5))\n d2 = np.diag(np.power(self.degrees(), .5))\n self.__NL = d1 * self.laplacian_matrix() * d2\n return self.__NL\n\n NL = normalized_laplacian_matrix\n\n def distance_matrix(self):\n \"\"\" Return Distance matrix\n \n Alias : D\n \"\"\"\n if self.__Order == 0: return []\n\n if self.__D == []:\n # use here float only for using np.inf - infinity\n A = np.matrix(self.__A, dtype=float)\n n, m = A.shape\n I = np.identity(n)\n A[A == 0] = np.inf # set zero entries to inf\n A[I == 1] = 0 # except diagonal which should be zero\n for i in range(n):\n r = A[i, :]\n A = np.minimum(A, r + r.T)\n self.__D = np.matrix(A, dtype=int)\n\n return self.__D\n\n D = distance_matrix\n\n def reciprocal_distance_matrix(self):\n \"\"\" Return Reciprocal Distance matrix \"\"\"\n\n rd = np.matrix(self.distance_matrix(), dtype=float)\n # probably there exists more python way to apply a function to each element of matrix\n for i in range(self.__Order):\n for j in range(self.__Order):\n if not rd[i, j] == 0: rd[i, j] = 1 / rd[i, j]\n\n return rd\n\n def resistance_distance_matrix(self):\n \"\"\" Return Resistance Distance matrix \"\"\"\n\n if not self.is_connected() or self.__Order == 0:\n return False\n\n if self.__RD == []:\n #from numpy import linalg as la\n n = self.__Order\n s = n * self.laplacian_matrix() + 1\n sn = n * np.linalg.inv(s)\n RD = np.ndarray((n, n))\n for i in range(n):\n for j in range(n):\n RD[i, j] = np.float64(\n np.longdouble(sn[i, i]) + np.longdouble(sn[j, j]) -\n 2 * np.longdouble(sn[i, j]))\n self.__RD = RD\n\n return self.__RD\n\n def seidel_matrix(self):\n \"\"\" Return Seidel matrix \n S = J - I - 2A\n\n Alias: S\n \"\"\"\n n = self.__Order\n return np.ones((n, n)) - np.identity(n) - 2 * np.matrix(self.__A)\n\n S = seidel_matrix\n\n #\n #\n # Graph invariants\n #\n #\n\n def diameter(self):\n \"\"\" Return diameter of the graph\n \n Diameter is the maximum value of distance matrix\n \"\"\"\n if self.__Order == 0: return 0\n return self.distance_matrix().max()\n\n def degrees(self):\n \"\"\" Return degree of the vertex\n \n Alias : deg\n \"\"\"\n if self.__Degrees == []:\n self.__Degrees = map(lambda r: sum(r), self.__A)\n ## calcuate degrees for all vertices\n return self.__Degrees\n\n deg = degrees\n\n def eccentricity(self):\n \"\"\" Eccentricity of the graph for all its vertices\"\"\"\n if self.__Order == 0: return None\n\n return self.distance_matrix().max(axis=0).tolist()[0]\n\n def distances_from_vertex(self, v):\n \"\"\" Return list of all distances from a given vertex to all others\"\"\"\n # used to test graph where it is connected or not\n seen = {}\n level = 0\n nextlevel = [v]\n while nextlevel:\n thislevel = nextlevel\n nextlevel = []\n for v in thislevel:\n if v not in seen:\n seen[v] = level\n nb = [\n i\n for (i, j) in zip(range(len(self.__A[v])), self.__A[v])\n if j != 0\n ]\n nextlevel.extend(nb)\n #if (cutoff is not None and cutoff <= level): break\n level = level + 1\n return seen\n\n def is_connected(self):\n \"\"\" Return True/False depends on the graph is connected or not \"\"\"\n if self.__Order == 0: return False\n\n if not self.__Check_connectedness: return True\n\n if self.__Is_connected is None:\n # we take vertex 0 and check whether we can reach all other vertices\n self.__Is_connected = len(\n self.distances_from_vertex(0)) == self.order()\n return self.__Is_connected\n\n #\n #\n # Graph spectra\n #\n #\n\n def spectrum(self, matrix=\"adjacency\"):\n r\"\"\" Spectrum of the graph\n \n args:\n matrix (str or matrix)\n 'adjacency' or 'A' : default\n 'laplacian' or 'L'\n 'distance' or 'D'\n 'signless_laplacian' or 'Q'\n 'normalized_laplacian' or 'NL'\n 'resistance_distance' or 'RD'\n 'reciprocal_distance'\n\n arbitrary matrix\n \n \"\"\"\n\n from numpy import linalg as la\n\n if type(matrix) is str:\n\n if self.__Order == 0: return []\n\n if matrix == \"adjacency\" or matrix == \"A\":\n if self.__Spectrum == []:\n s = la.eigvalsh(self.__A).tolist()\n s.sort(reverse=True)\n self.__Spectrum = s\n return self.__Spectrum\n\n elif matrix == \"laplacian\" or matrix == \"L\":\n if self.__Laplacian_spectrum == []:\n s = la.eigvalsh(self.laplacian_matrix()).tolist()\n s.sort(reverse=True)\n self.__Laplacian_spectrum = map(\n lambda x: x if x > 0 else 0, s)\n return self.__Laplacian_spectrum\n\n elif matrix == \"distance\" or matrix == \"D\":\n if self.__Distance_spectrum == []:\n s = la.eigvalsh(self.distance_matrix()).tolist()\n s.sort(reverse=True)\n self.__Distance_spectrum = s\n return self.__Distance_spectrum\n\n elif matrix == \"signless_laplacian\" or matrix == \"Q\":\n if self.__Signless_laplacian_spectrum == []:\n ## TODO: check if we have zeros in degrees()\n s = la.eigvalsh(self.signless_laplacian_matrix()).tolist()\n s.sort(reverse=True)\n self.__Signless_laplacian_spectrum = map(\n lambda x: x if x > 0 else 0, s)\n return self.__Signless_laplacian_spectrum\n\n elif matrix == \"normalized_laplacian\" or matrix == \"NL\":\n if self.__Norm_laplacian_spectrum == []:\n ## TODO: check if we have zeros in degrees()\n s = la.eigvalsh(\n self.normalized_laplacian_matrix()).tolist()\n s.sort(reverse=True)\n self.__Norm_laplacian_spectrum = s\n return self.__Norm_laplacian_spectrum\n\n elif matrix == \"resistance_distance\" or matrix == \"RD\":\n if self.__RD_spectrum == []:\n s = la.eigvalsh(self.resistance_distance_matrix()).tolist()\n s.sort(reverse=True)\n self.__RD_spectrum = s\n return self.__RD_spectrum\n # NO CACHE\n elif matrix == \"reciprocal_distance\":\n s = la.eigvalsh(self.reciprocal_distance_matrix()).tolist()\n s.sort(reverse=True)\n return s\n else:\n return False\n\n # if the parameter is an arbitrary matrix\n # DEPRECATED:\n # use mathchem.spectrum(matrix) for arbitrary matrices\n #\n else:\n s = la.eigvalsh(matrix).tolist()\n s.sort(reverse=True)\n return s\n\n # for arbitrary matrices use:\n # mathchem.spectral_moment(matrix)\n def spectral_moment(self, k, matrix=\"adjacency\"):\n \"\"\" Return k-th spectral moment\n \n parameters: matrix - see spectrum help\n \"\"\"\n return np.sum(np.power(self.spectrum(matrix), k))\n\n # for arbitrary matrices use:\n # mathchem.spectral_radius(matrix)\n def spectral_radius(self, matrix=\"adjacency\"):\n s = self.spectrum(matrix)\n return max(abs(s[0]), abs(s[len(s) - 1]))\n\n # for arbitrary matrices use:\n # mathchem.energy(matrix)\n def energy(self, matrix=\"adjacency\"):\n \"\"\" Return energy of the graph \n \n parameters: matrix - see spectrum help\n \"\"\"\n if self.__Order == 0: return False\n s = self.spectrum(matrix)\n a = np.sum(s, dtype=np.longdouble) / len(s)\n return np.float64(\n np.sum(map(lambda x: abs(x - a), s), dtype=np.longdouble))\n\n def incidence_energy(self):\n \"\"\" Return incidence energy (IE)\n \n Incidence energy is the sum of singular values of incidence matrix\n \"\"\"\n if self.__Order == 0: return False\n from numpy.linalg import svd\n return np.float64(\n np.sum(svd(self.incidence_matrix(), compute_uv=False),\n dtype=np.longdouble))\n\n #\n #\n # Chemical indices\n #\n #\n\n def zagreb_m1_index(self):\n \"\"\" Zagreb M1 Index \"\"\"\n return sum(map(lambda d: d**2, self.degrees()))\n\n def zagreb_m2_index(self):\n \"\"\" Zagreb M2 Index \n \n The molecular graph must contain at least one edge, otherwise the function Return False\n Zagreb M2 Index is a special case of Connectivity Index with power = 1\"\"\"\n return sum(\n map(lambda e1, e2: self.degrees()[e1] * self.degrees()[e2],\n self.edges()))\n\n def zagreb_m1_coindex(self):\n \"\"\" Zagreb M1 Coindex \"\"\"\n return 2 * self.size() * (self.__Order - 1) - self.zagreb_m1_index()\n\n def zagreb_m2_coindex(self):\n \"\"\" Zagreb M2 Coindex \"\"\"\n return 2 * (self.size()**\n 2) - self.zagreb_m2_index() - self.zagreb_m1_index() * .5\n\n def connectivity_index(self, power):\n \"\"\" Connectivity index (R)\"\"\"\n E = self.edges() # E - all edges\n if len(E) == 0: return 0\n return np.float64(\n np.sum(map(\n lambda e1, e2:\n (self.degrees()[e1] * self.degrees()[e2])**power, E),\n dtype=np.longdouble))\n\n def augmented_zagreb_index(self):\n \"\"\" Augmented Zagreb Index\"\"\"\n E = self.edges() # E - all edges\n d = self.degrees()\n if len(E) < 2: return 0\n return np.float64(\n np.sum(map(\n lambda e1, e2: (np.longdouble(d[e1] * d[e2]) /\n (d[e1] + d[e2] - 2))**3, E),\n dtype=np.longdouble))\n\n def sum_connectivity_index(self):\n \"\"\" Sum-Connectivity index\"\"\"\n E = self.edges() # E - all edges\n if len(E) == 0: return 0\n return np.float64(\n np.sum(map(\n lambda e1, e2:\n (self.degrees()[e1] + self.degrees()[e2])**(-0.5), E),\n dtype=np.longdouble))\n\n def geometric_arithmetic_index(self):\n \"\"\" Geometric-Arithmetic index\"\"\"\n E = self.edges() # E - all edges\n if len(E) == 0: return 0\n return np.float64(\n np.sum(map(\n lambda e1, e2: 2.0 * np.sqrt(self.degrees()[e1] * self.degrees(\n )[e2]) / (self.degrees()[e1] + self.degrees()[e2]), E),\n dtype=np.longdouble))\n\n def eccentric_connectivity_index(self):\n \"\"\" Eccentric Connectivity Index \n \n The molecuar graph must be connected, otherwise the function Return False\"\"\"\n if not self.is_connected():\n return False\n return sum(map(lambda a, b: a * b, self.degrees(),\n self.eccentricity()))\n\n def randic_index(self):\n \"\"\" Randic Index \n \n The molecular graph must contain at least one edge, otherwise the function Return False\n Randic Index is a special case of Connectivity Index with power = -1/2\"\"\"\n return self.connectivity_index(-0.5)\n\n def atom_bond_connectivity_index(self):\n \"\"\" Atom-Bond Connectivity Index (ABC) \"\"\"\n s = np.longdouble(0) # summator\n for u, v in self.edges():\n d1 = np.float64(self.degrees()[u])\n d2 = np.float64(self.degrees()[v])\n s += np.longdouble(((d1 + d2 - 2) / (d1 * d2))**.5)\n return np.float64(s)\n\n def estrada_index(self, matrix=\"adjacency\"):\n \"\"\" Estrada Index (EE) \n \n args:\n matrix -- see spectrum for help, default value is 'adjacency'\n \n There is an alias 'distance_estrada_index' for distance matrix\n \"\"\"\n return np.float64(\n np.sum(map(lambda x: np.exp(x.real), self.spectrum(matrix)),\n dtype=np.longdouble))\n\n def distance_estrada_index(self):\n \"\"\" Distance Estrada Index (DEE) \n \n Special case of Estrada index with distance matrix\n \"\"\"\n return self.estrada_index('distance')\n\n def degree_distance(self):\n \"\"\" Degree Distance (DD)\n \n The molecuar graph must be connected, otherwise the function Return False\"\"\"\n if not self.is_connected():\n return False\n dd = np.matrix(self.degrees()) * self.distance_matrix().sum(axis=1)\n return dd[0, 0]\n\n def reverse_degree_distance(self):\n \"\"\" Reverse Distance Degree (rDD)\n \n The molecuar graph must be connected, otherwise the function Return False\"\"\"\n if not self.is_connected():\n return False\n return 2 * (self.order() - 1) * len(\n self.edges()) * self.diameter() - self.degree_distance()\n\n def molecular_topological_index(self):\n \"\"\" (Schultz) Molecular Topological Index (MTI)\n \n The molecuar graph must be connected, otherwise the function Return False\"\"\"\n if not self.is_connected():\n return False\n # (A+D)*d\n\n A = np.matrix(self.__A)\n d = np.matrix(self.degrees())\n return np.float64(\n ((A + self.distance_matrix()) * d.T).sum(dtype=np.longdouble))\n\n def eccentric_distance_sum(self):\n \"\"\" Distance Sum\n \n The molecuar graph must be connected, otherwise the function Return False\"\"\"\n if not self.is_connected():\n return False\n return (self.eccentricity() * self.distance_matrix().sum(axis=1))[0, 0]\n\n # strange - it is slow ((\n def balaban_j_index(self):\n \"\"\" Balaban J index \n \n The molecuar graph must be connected, otherwise the function Return False\"\"\"\n if not self.is_connected():\n return False\n ds = self.distance_matrix().sum(axis=1)\n m = len(self.edges())\n k = (m / (m - self.__Order + 2.0))\n return np.float64(\n k *\n np.sum(map(lambda u, v: 1 / np.sqrt(\n (ds[u][0, 0] * ds[v][0, 0])), self.edges()),\n dtype=np.longdouble))\n\n def sum_balaban_index(self):\n \"\"\" Sum Balaban index \n \n The molecuar graph must be connected, otherwise the function Return False\"\"\"\n if not self.is_connected():\n return False\n ds = self.distance_matrix().sum(axis=1)\n m = len(self.edges())\n k = (m / (m - self.__Order + 2.0))\n return np.float64(\n k *\n np.sum(map(lambda u, v: 1 / np.sqrt(\n (ds[u][0, 0] + ds[v][0, 0])), self.edges()),\n dtype=np.longdouble))\n\n def kirchhoff_index(self):\n \"\"\" Kirchhoff Index (Kf)\n \n Kf = 1/2 * sum_i sum_j RD[i,j]\n Based on resistance distance matrix RD\n \n Alias: resistance\n \n The molecuar graph must be connected, otherwise the function Return False\n \"\"\"\n if not self.is_connected():\n return False\n return np.float64(\n self.resistance_distance_matrix().sum(dtype=np.longdouble) / 2)\n\n resistance = kirchhoff_index\n\n def wiener_index(self):\n \"\"\" Wiener Index (W)\n \n W = 1/2 * sum_i sum_j D[i,j]\n where D is distance matrix\n The molecuar graph must be connected, otherwise the function Return False\n \"\"\"\n if not self.is_connected():\n return False\n return self.distance_matrix().sum(dtype=np.float64) / 2\n\n def terminal_wiener_index(self):\n \"\"\" Calculate Terminal Wiener Index (TW)\n \n TW = Sum of all distances between pendent vertices (with degree = 1)\n \"\"\"\n if not self.is_connected(): return False\n s = 0\n for u in range(self.order()):\n if self.degrees()[u] != 1: continue\n for v in range(u + 1, self.order()):\n if self.degrees()[v] == 1:\n s = s + self.distance_matrix()[u, v]\n return s\n\n def reverse_wiener_index(self):\n \"\"\" Reverse Wiener Index (RW)\n \n RW = 1/2 * sum_i!=j ( d - D[i,j] )\n where D is distance matrix and d is diameter\n \n The molecuar graph must be connected, otherwise the function Return False\n \"\"\"\n if not self.is_connected():\n return False\n # here we use formula: RW = 1/2 * n * (n-1) * d - W\n return self.diameter() * (\n self.__Order * (self.__Order - 1)) / 2 - self.wiener_index()\n\n def hyper_wiener_index(self):\n \"\"\" Hyper-Wiener Index (WW)\n \n WW = 1/2 * ( sum_ij d(i,j)^2 + sum_i_j d(i,j) )\n where D is distance matrix\n\n The molecuar graph must be connected, otherwise the function Return False\n \"\"\"\n if not self.is_connected():\n return False\n return (\n np.power(self.distance_matrix(), 2).sum() +\n self.distance_matrix().sum()) / 4 # since we have symmetric matrix\n\n def harary_index(self):\n \"\"\" Harary Index (H)\n \n H = 1/2 sum_i sum_j Rd[i,j]\n where Rd is reciprocal distance matrix \n Rd[i,j] = 1 / D[i,j] for D[i,j] != 0\n Rd[i,j] = 0 otherwise\n\n The molecuar graph must be connected, otherwise the function Return False\n \"\"\"\n if not self.is_connected():\n return False\n return np.float64(\n self.reciprocal_distance_matrix().sum(dtype=np.longdouble)) / 2\n\n def LEL(self):\n \"\"\" Return Laplacian-like energy (LEL) \"\"\"\n return np.float64(\n np.sum(map(lambda x: np.sqrt(x), self.spectrum('laplacian')),\n dtype=np.longdouble))\n\n def multiplicative_sum_zagreb_index(self):\n \"\"\" Log( Multiplicative Sum Zagreb index )\"\"\"\n d = self.degrees()\n return np.float64(\n np.sum(map(lambda u, v: np.log(np.float64(d[u] + d[v])),\n self.edges()),\n dtype=np.longdouble))\n\n def multiplicative_p2_zagreb_index(self):\n \"\"\"Calculates Log( Multiplicative P2 Zagreb index )\"\"\"\n d = self.degrees()\n return np.float64(\n np.sum(map(lambda u, v: np.log(np.float64(d[u] * d[v])),\n self.edges()),\n dtype=np.longdouble))\n\n def multiplicative_p1_zagreb_index(self):\n \"\"\"Calculates Log( Multiplicative P1 Zagreb index )\"\"\"\n d = self.degrees()\n return np.float64(\n np.sum(map(lambda v: np.log(np.float64(d[v]**2)), self.vertices()),\n dtype=np.longdouble))\n\n def szeged_index(self):\n \"\"\"Calculates Szeged index\"\"\"\n if not self.is_connected():\n return False\n s = 0\n D = self.distance_matrix()\n for u, v in self.edges():\n diff = D[u, :] - D[v, :]\n s += (diff > 0).sum() * (diff < 0).sum()\n return float(s)\n\n def revised_szeged_index(self):\n \"\"\"Calculates Revised Szeged index\"\"\"\n if not self.is_connected():\n return False\n s = 0.0\n D = self.distance_matrix()\n for u, v in self.edges():\n diff = D[u, :] - D[v, :]\n o = (diff == 0).sum()\n s += ((diff > 0).sum() + .5 * o) * ((diff < 0).sum() + .5 * o)\n return s\n\n def homo_lumo_index(self):\n \"\"\"Calculates HOMO-LUMO index\"\"\"\n if not self.is_connected():\n return False\n n = self.order()\n if n % 2 == 0:\n h = int(n / 2 -\n 1) # because array indices start from 0 instead of 1\n l = int(h + 1)\n return max([abs(self.spectrum()[h]), abs(self.spectrum()[l])])\n # else:\n h = int((n - 1) / 2)\n return abs(self.spectrum()[h])\n\n HL_index = homo_lumo_index\n\n # Adriatic indices\n\n # DEPRECATED\n # use mathchem.all_adriatic()\n\n def all_adriatic(self):\n \"\"\" Generate all possible parameters sets for adriatic indices\"\"\"\n r = []\n for p in [0, 1]:\n for i in [1, 2, 3]:\n for j in range(1, 9):\n if i == 3:\n for a in [0.5, 2]:\n r.append((p, i, j, a))\n elif i == 2 and j in range(1, 6):\n for a in [-1, -0.5, 0.5, 1, 2]:\n r.append((p, i, j, a))\n elif i == 2 or i == 1:\n for a in [0.5, 1, 2]:\n r.append((p, i, j, a))\n return r\n\n def adriatic_name(self, p, i, j, a):\n \"\"\" Return the name for given parameters of Adriatic indices\"\"\"\n #(j)\n name1 = {1:'Randic type ',\\\n 2:'sum ',\\\n 3:'inverse sum ', \\\n 4:'misbalance ', \\\n 5:'inverse misbalance ', \\\n 6:'min-max ', \\\n 7:'max-min ', \\\n 8:'symmetric division '}\n # (i,a)\n name2 = {(1, 0.5):'lor',\\\n (1,1):'lo', \\\n (1,2):'los', \\\n (2,-1):'in', \\\n (2, -0.5):'ir', \\\n (2, 0.5):'ro', \\\n (2,1):'', \\\n (2,2):'s', \\\n (3, 0.5):'ha', \\\n (3,2):'two'}\n #(p)\n name3 = {0: 'deg', 1: 'di'}\n\n return (name1[j] + name2[(i, a)] + name3[p])\n\n def _adriatic_entry_(self, du, dv, i, j, a):\n \"\"\" Return an individual edge contribution for Adriatic indices and matrices\"\"\"\n # phi(x,a)\n phi = {\n 1: lambda x, a: np.log(x)**a,\n 2: lambda x, a: x**a,\n 3: lambda x, a: a**x\n }\n # gamma (x,y)\n gamma = {\\\n 1: lambda x,y: x*y,\\\n 2: lambda x,y: x+y,\\\n 3: lambda x,y: 0 if x+y==0 else 1.0/(x+y),\\\n 4: lambda x,y: abs(x-y),\\\n 5: lambda x,y: 0 if x==y else 1.0/abs(x-y),\\\n 6: lambda x,y: 0 if max(x,y)==0 else min(x,y)/max(x,y),\\\n 7: lambda x,y: 0 if min(x,y)==0 else max(x,y)/min(x,y),\\\n 8: lambda x,y: 0 if x==0 or y==0 else x/y+y/x}\n\n return gamma[j](phi[i](du, a), phi[i](dv, a))\n\n def adriatic_matrix(self, p, i, j, a):\n \"\"\" Return the Adriatic matrix with given parameters\"\"\"\n\n if p == 0: d = self.degrees()\n else: d = self.distance_matrix().sum(axis=0).tolist()[0]\n\n AM = [[0] * self.order() for k in range(self.order())]\n\n for u, v in self.edges():\n AM[u][v] = AM[v][u] = self._adriatic_entry_(\n np.float64(d[u]), np.float64(d[v]), i, j, a)\n\n return AM\n\n def adriatic_index(self, p, i, j, a):\n \"\"\" Return the Adriatic index with given parameters\"\"\"\n\n if p == 0: d = self.degrees()\n else: d = self.distance_matrix().sum(axis=0).tolist()[0]\n\n func = lambda u, v: self._adriatic_entry_(np.float64(d[u]),\n np.float64(d[v]), i, j, a)\n return np.float64(np.sum(map(func, self.edges()), dtype=np.longdouble))\n\n # Adriatic indices by names\n\n def randic_type_lordeg_index(self):\n \"\"\" Adriatic index: Randic type lordeg index\"\"\"\n return self.adriatic_index(0, 1, 1, 0.5)\n\n def randic_type_lodeg_index(self):\n \"\"\" Adriatic index: Randic type lodeg index\"\"\"\n return self.adriatic_index(0, 1, 1, 1)\n\n def randic_type_losdeg_index(self):\n \"\"\" Adriatic index: Randic type losdeg index\"\"\"\n return self.adriatic_index(0, 1, 1, 2)\n\n def sum_lordeg_index(self):\n \"\"\" Adriatic index: sum lordeg index\"\"\"\n return self.adriatic_index(0, 1, 2, 0.5)\n\n def sum_lodeg_index(self):\n \"\"\" Adriatic index: sum lodeg index\"\"\"\n return self.adriatic_index(0, 1, 2, 1)\n\n def sum_losdeg_index(self):\n \"\"\" Adriatic index: sum losdeg index\"\"\"\n return self.adriatic_index(0, 1, 2, 2)\n\n def inverse_sum_lordeg_index(self):\n \"\"\" Adriatic index: inverse sum lordeg index\"\"\"\n return self.adriatic_index(0, 1, 3, 0.5)\n\n def inverse_sum_lodeg_index(self):\n \"\"\" Adriatic index: inverse sum lodeg index\"\"\"\n return self.adriatic_index(0, 1, 3, 1)\n\n def inverse_sum_losdeg_index(self):\n \"\"\" Adriatic index: inverse sum losdeg index\"\"\"\n return self.adriatic_index(0, 1, 3, 2)\n\n def misbalance_lordeg_index(self):\n \"\"\" Adriatic index: misbalance lordeg index\"\"\"\n return self.adriatic_index(0, 1, 4, 0.5)\n\n def misbalance_lodeg_index(self):\n \"\"\" Adriatic index: misbalance lodeg index\"\"\"\n return self.adriatic_index(0, 1, 4, 1)\n\n def misbalance_losdeg_index(self):\n \"\"\" Adriatic index: misbalance losdeg index\"\"\"\n return self.adriatic_index(0, 1, 4, 2)\n\n def inverse_misbalance_lordeg_index(self):\n \"\"\" Adriatic index: inverse misbalance lordeg index\"\"\"\n return self.adriatic_index(0, 1, 5, 0.5)\n\n def inverse_misbalance_lodeg_index(self):\n \"\"\" Adriatic index: inverse misbalance lodeg index\"\"\"\n return self.adriatic_index(0, 1, 5, 1)\n\n def inverse_misbalance_losdeg_index(self):\n \"\"\" Adriatic index: inverse misbalance losdeg index\"\"\"\n return self.adriatic_index(0, 1, 5, 2)\n\n def min_max_lordeg_index(self):\n \"\"\" Adriatic index: min-max lordeg index\"\"\"\n return self.adriatic_index(0, 1, 6, 0.5)\n\n def min_max_lodeg_index(self):\n \"\"\" Adriatic index: min-max lodeg index\"\"\"\n return self.adriatic_index(0, 1, 6, 1)\n\n def min_max_losdeg_index(self):\n \"\"\" Adriatic index: min-max losdeg index\"\"\"\n return self.adriatic_index(0, 1, 6, 2)\n\n def max_min_lordeg_index(self):\n \"\"\" Adriatic index: max-min lordeg index\"\"\"\n return self.adriatic_index(0, 1, 7, 0.5)\n\n def max_min_lodeg_index(self):\n \"\"\" Adriatic index: max-min lodeg index\"\"\"\n return self.adriatic_index(0, 1, 7, 1)\n\n def max_min_losdeg_index(self):\n \"\"\" Adriatic index: max-min losdeg index\"\"\"\n return self.adriatic_index(0, 1, 7, 2)\n\n def symmetric_division_lordeg_index(self):\n \"\"\" Adriatic index: symmetric division lordeg index\"\"\"\n return self.adriatic_index(0, 1, 8, 0.5)\n\n def symmetric_division_lodeg_index(self):\n \"\"\" Adriatic index: symmetric division lodeg index\"\"\"\n return self.adriatic_index(0, 1, 8, 1)\n\n def symmetric_division_losdeg_index(self):\n \"\"\" Adriatic index: symmetric division losdeg index\"\"\"\n return self.adriatic_index(0, 1, 8, 2)\n\n def randic_type_indeg_index(self):\n \"\"\" Adriatic index: Randic type indeg index\"\"\"\n return self.adriatic_index(0, 2, 1, -1)\n\n def randic_type_irdeg_index(self):\n \"\"\" Adriatic index: Randic type irdeg index\"\"\"\n return self.adriatic_index(0, 2, 1, -0.5)\n\n def randic_type_rodeg_index(self):\n \"\"\" Adriatic index: Randic type rodeg index\"\"\"\n return self.adriatic_index(0, 2, 1, 0.5)\n\n def randic_type_deg_index(self):\n \"\"\" Adriatic index: Randic type deg index\"\"\"\n return self.adriatic_index(0, 2, 1, 1)\n\n def randic_type_sdeg_index(self):\n \"\"\" Adriatic index: Randic type sdeg index\"\"\"\n return self.adriatic_index(0, 2, 1, 2)\n\n def sum_indeg_index(self):\n \"\"\" Adriatic index: sum indeg index\"\"\"\n return self.adriatic_index(0, 2, 2, -1)\n\n def sum_irdeg_index(self):\n \"\"\" Adriatic index: sum irdeg index\"\"\"\n return self.adriatic_index(0, 2, 2, -0.5)\n\n def sum_rodeg_index(self):\n \"\"\" Adriatic index: sum rodeg index\"\"\"\n return self.adriatic_index(0, 2, 2, 0.5)\n\n def sum_deg_index(self):\n \"\"\" Adriatic index: sum deg index\"\"\"\n return self.adriatic_index(0, 2, 2, 1)\n\n def sum_sdeg_index(self):\n \"\"\" Adriatic index: sum sdeg index\"\"\"\n return self.adriatic_index(0, 2, 2, 2)\n\n def inverse_sum_indeg_index(self):\n \"\"\" Adriatic index: inverse sum indeg index\"\"\"\n return self.adriatic_index(0, 2, 3, -1)\n\n def inverse_sum_irdeg_index(self):\n \"\"\" Adriatic index: inverse sum irdeg index\"\"\"\n return self.adriatic_index(0, 2, 3, -0.5)\n\n def inverse_sum_rodeg_index(self):\n \"\"\" Adriatic index: inverse sum rodeg index\"\"\"\n return self.adriatic_index(0, 2, 3, 0.5)\n\n def inverse_sum_deg_index(self):\n \"\"\" Adriatic index: inverse sum deg index\"\"\"\n return self.adriatic_index(0, 2, 3, 1)\n\n def inverse_sum_sdeg_index(self):\n \"\"\" Adriatic index: inverse sum sdeg index\"\"\"\n return self.adriatic_index(0, 2, 3, 2)\n\n def misbalance_indeg_index(self):\n \"\"\" Adriatic index: misbalance indeg index\"\"\"\n return self.adriatic_index(0, 2, 4, -1)\n\n def misbalance_irdeg_index(self):\n \"\"\" Adriatic index: misbalance irdeg index\"\"\"\n return self.adriatic_index(0, 2, 4, -0.5)\n\n def misbalance_rodeg_index(self):\n \"\"\" Adriatic index: misbalance rodeg index\"\"\"\n return self.adriatic_index(0, 2, 4, 0.5)\n\n def misbalance_deg_index(self):\n \"\"\" Adriatic index: misbalance deg index\"\"\"\n return self.adriatic_index(0, 2, 4, 1)\n\n def misbalance_sdeg_index(self):\n \"\"\" Adriatic index: misbalance sdeg index\"\"\"\n return self.adriatic_index(0, 2, 4, 2)\n\n def inverse_misbalance_indeg_index(self):\n \"\"\" Adriatic index: inverse misbalance indeg index\"\"\"\n return self.adriatic_index(0, 2, 5, -1)\n\n def inverse_misbalance_irdeg_index(self):\n \"\"\" Adriatic index: inverse misbalance irdeg index\"\"\"\n return self.adriatic_index(0, 2, 5, -0.5)\n\n def inverse_misbalance_rodeg_index(self):\n \"\"\" Adriatic index: inverse misbalance rodeg index\"\"\"\n return self.adriatic_index(0, 2, 5, 0.5)\n\n def inverse_misbalance_deg_index(self):\n \"\"\" Adriatic index: inverse misbalance deg index\"\"\"\n return self.adriatic_index(0, 2, 5, 1)\n\n def inverse_misbalance_sdeg_index(self):\n \"\"\" Adriatic index: inverse misbalance sdeg index\"\"\"\n return self.adriatic_index(0, 2, 5, 2)\n\n def min_max_rodeg_index(self):\n \"\"\" Adriatic index: min-max rodeg index\"\"\"\n return self.adriatic_index(0, 2, 6, 0.5)\n\n def min_max_deg_index(self):\n \"\"\" Adriatic index: min-max deg index\"\"\"\n return self.adriatic_index(0, 2, 6, 1)\n\n def min_max_sdeg_index(self):\n \"\"\" Adriatic index: min-max sdeg index\"\"\"\n return self.adriatic_index(0, 2, 6, 2)\n\n def max_min_rodeg_index(self):\n \"\"\" Adriatic index: max-min rodeg index\"\"\"\n return self.adriatic_index(0, 2, 7, 0.5)\n\n def max_min_deg_index(self):\n \"\"\" Adriatic index: max-min deg index\"\"\"\n return self.adriatic_index(0, 2, 7, 1)\n\n def max_min_sdeg_index(self):\n \"\"\" Adriatic index: max-min sdeg index\"\"\"\n return self.adriatic_index(0, 2, 7, 2)\n\n def symmetric_division_rodeg_index(self):\n \"\"\" Adriatic index: symmetric division rodeg index\"\"\"\n return self.adriatic_index(0, 2, 8, 0.5)\n\n def symmetric_division_deg_index(self):\n \"\"\" Adriatic index: symmetric division deg index\"\"\"\n return self.adriatic_index(0, 2, 8, 1)\n\n def symmetric_division_sdeg_index(self):\n \"\"\" Adriatic index: symmetric division sdeg index\"\"\"\n return self.adriatic_index(0, 2, 8, 2)\n\n def randic_type_hadeg_index(self):\n \"\"\" Adriatic index: Randic type hadeg index\"\"\"\n return self.adriatic_index(0, 3, 1, 0.5)\n\n def randic_type_twodeg_index(self):\n \"\"\" Adriatic index: Randic type twodeg index\"\"\"\n return self.adriatic_index(0, 3, 1, 2)\n\n def sum_hadeg_index(self):\n \"\"\" Adriatic index: sum hadeg index\"\"\"\n return self.adriatic_index(0, 3, 2, 0.5)\n\n def sum_twodeg_index(self):\n \"\"\" Adriatic index: sum twodeg index\"\"\"\n return self.adriatic_index(0, 3, 2, 2)\n\n def inverse_sum_hadeg_index(self):\n \"\"\" Adriatic index: inverse sum hadeg index\"\"\"\n return self.adriatic_index(0, 3, 3, 0.5)\n\n def inverse_sum_twodeg_index(self):\n \"\"\" Adriatic index: inverse sum twodeg index\"\"\"\n return self.adriatic_index(0, 3, 3, 2)\n\n def misbalance_hadeg_index(self):\n \"\"\" Adriatic index: misbalance hadeg index\"\"\"\n return self.adriatic_index(0, 3, 4, 0.5)\n\n def misbalance_twodeg_index(self):\n \"\"\" Adriatic index: misbalance twodeg index\"\"\"\n return self.adriatic_index(0, 3, 4, 2)\n\n def inverse_misbalance_hadeg_index(self):\n \"\"\" Adriatic index: inverse misbalance hadeg index\"\"\"\n return self.adriatic_index(0, 3, 5, 0.5)\n\n def inverse_misbalance_twodeg_index(self):\n \"\"\" Adriatic index: inverse misbalance twodeg index\"\"\"\n return self.adriatic_index(0, 3, 5, 2)\n\n def min_max_hadeg_index(self):\n \"\"\" Adriatic index: min-max hadeg index\"\"\"\n return self.adriatic_index(0, 3, 6, 0.5)\n\n def min_max_twodeg_index(self):\n \"\"\" Adriatic index: min-max twodeg index\"\"\"\n return self.adriatic_index(0, 3, 6, 2)\n\n def max_min_hadeg_index(self):\n \"\"\" Adriatic index: max-min hadeg index\"\"\"\n return self.adriatic_index(0, 3, 7, 0.5)\n\n def max_min_twodeg_index(self):\n \"\"\" Adriatic index: max-min twodeg index\"\"\"\n return self.adriatic_index(0, 3, 7, 2)\n\n def symmetric_division_hadeg_index(self):\n \"\"\" Adriatic index: symmetric division hadeg index\"\"\"\n return self.adriatic_index(0, 3, 8, 0.5)\n\n def symmetric_division_twodeg_index(self):\n \"\"\" Adriatic index: symmetric division twodeg index\"\"\"\n return self.adriatic_index(0, 3, 8, 2)\n\n def randic_type_lordi_index(self):\n \"\"\" Adriatic index: Randic type lordi index\"\"\"\n return self.adriatic_index(1, 1, 1, 0.5)\n\n def randic_type_lodi_index(self):\n \"\"\" Adriatic index: Randic type lodi index\"\"\"\n return self.adriatic_index(1, 1, 1, 1)\n\n def randic_type_losdi_index(self):\n \"\"\" Adriatic index: Randic type losdi index\"\"\"\n return self.adriatic_index(1, 1, 1, 2)\n\n def sum_lordi_index(self):\n \"\"\" Adriatic index: sum lordi index\"\"\"\n return self.adriatic_index(1, 1, 2, 0.5)\n\n def sum_lodi_index(self):\n \"\"\" Adriatic index: sum lodi index\"\"\"\n return self.adriatic_index(1, 1, 2, 1)\n\n def sum_losdi_index(self):\n \"\"\" Adriatic index: sum losdi index\"\"\"\n return self.adriatic_index(1, 1, 2, 2)\n\n def inverse_sum_lordi_index(self):\n \"\"\" Adriatic index: inverse sum lordi index\"\"\"\n return self.adriatic_index(1, 1, 3, 0.5)\n\n def inverse_sum_lodi_index(self):\n \"\"\" Adriatic index: inverse sum lodi index\"\"\"\n return self.adriatic_index(1, 1, 3, 1)\n\n def inverse_sum_losdi_index(self):\n \"\"\" Adriatic index: inverse sum losdi index\"\"\"\n return self.adriatic_index(1, 1, 3, 2)\n\n def misbalance_lordi_index(self):\n \"\"\" Adriatic index: misbalance lordi index\"\"\"\n return self.adriatic_index(1, 1, 4, 0.5)\n\n def misbalance_lodi_index(self):\n \"\"\" Adriatic index: misbalance lodi index\"\"\"\n return self.adriatic_index(1, 1, 4, 1)\n\n def misbalance_losdi_index(self):\n \"\"\" Adriatic index: misbalance losdi index\"\"\"\n return self.adriatic_index(1, 1, 4, 2)\n\n def inverse_misbalance_lordi_index(self):\n \"\"\" Adriatic index: inverse misbalance lordi index\"\"\"\n return self.adriatic_index(1, 1, 5, 0.5)\n\n def inverse_misbalance_lodi_index(self):\n \"\"\" Adriatic index: inverse misbalance lodi index\"\"\"\n return self.adriatic_index(1, 1, 5, 1)\n\n def inverse_misbalance_losdi_index(self):\n \"\"\" Adriatic index: inverse misbalance losdi index\"\"\"\n return self.adriatic_index(1, 1, 5, 2)\n\n def min_max_lordi_index(self):\n \"\"\" Adriatic index: min-max lordi index\"\"\"\n return self.adriatic_index(1, 1, 6, 0.5)\n\n def min_max_lodi_index(self):\n \"\"\" Adriatic index: min-max lodi index\"\"\"\n return self.adriatic_index(1, 1, 6, 1)\n\n def min_max_losdi_index(self):\n \"\"\" Adriatic index: min-max losdi index\"\"\"\n return self.adriatic_index(1, 1, 6, 2)\n\n def max_min_lordi_index(self):\n \"\"\" Adriatic index: max-min lordi index\"\"\"\n return self.adriatic_index(1, 1, 7, 0.5)\n\n def max_min_lodi_index(self):\n \"\"\" Adriatic index: max-min lodi index\"\"\"\n return self.adriatic_index(1, 1, 7, 1)\n\n def max_min_losdi_index(self):\n \"\"\" Adriatic index: max-min losdi index\"\"\"\n return self.adriatic_index(1, 1, 7, 2)\n\n def symmetric_division_lordi_index(self):\n \"\"\" Adriatic index: symmetric division lordi index\"\"\"\n return self.adriatic_index(1, 1, 8, 0.5)\n\n def symmetric_division_lodi_index(self):\n \"\"\" Adriatic index: symmetric division lodi index\"\"\"\n return self.adriatic_index(1, 1, 8, 1)\n\n def symmetric_division_losdi_index(self):\n \"\"\" Adriatic index: symmetric division losdi index\"\"\"\n return self.adriatic_index(1, 1, 8, 2)\n\n def randic_type_indi_index(self):\n \"\"\" Adriatic index: Randic type indi index\"\"\"\n return self.adriatic_index(1, 2, 1, -1)\n\n def randic_type_irdi_index(self):\n \"\"\" Adriatic index: Randic type irdi index\"\"\"\n return self.adriatic_index(1, 2, 1, -0.5)\n\n def randic_type_rodi_index(self):\n \"\"\" Adriatic index: Randic type rodi index\"\"\"\n return self.adriatic_index(1, 2, 1, 0.5)\n\n def randic_type_di_index(self):\n \"\"\" Adriatic index: Randic type di index\"\"\"\n return self.adriatic_index(1, 2, 1, 1)\n\n def randic_type_sdi_index(self):\n \"\"\" Adriatic index: Randic type sdi index\"\"\"\n return self.adriatic_index(1, 2, 1, 2)\n\n def sum_indi_index(self):\n \"\"\" Adriatic index: sum indi index\"\"\"\n return self.adriatic_index(1, 2, 2, -1)\n\n def sum_irdi_index(self):\n \"\"\" Adriatic index: sum irdi index\"\"\"\n return self.adriatic_index(1, 2, 2, -0.5)\n\n def sum_rodi_index(self):\n \"\"\" Adriatic index: sum rodi index\"\"\"\n return self.adriatic_index(1, 2, 2, 0.5)\n\n def sum_di_index(self):\n \"\"\" Adriatic index: sum di index\"\"\"\n return self.adriatic_index(1, 2, 2, 1)\n\n def sum_sdi_index(self):\n \"\"\" Adriatic index: sum sdi index\"\"\"\n return self.adriatic_index(1, 2, 2, 2)\n\n def inverse_sum_indi_index(self):\n \"\"\" Adriatic index: inverse sum indi index\"\"\"\n return self.adriatic_index(1, 2, 3, -1)\n\n def inverse_sum_irdi_index(self):\n \"\"\" Adriatic index: inverse sum irdi index\"\"\"\n return self.adriatic_index(1, 2, 3, -0.5)\n\n def inverse_sum_rodi_index(self):\n \"\"\" Adriatic index: inverse sum rodi index\"\"\"\n return self.adriatic_index(1, 2, 3, 0.5)\n\n def inverse_sum_di_index(self):\n \"\"\" Adriatic index: inverse sum di index\"\"\"\n return self.adriatic_index(1, 2, 3, 1)\n\n def inverse_sum_sdi_index(self):\n \"\"\" Adriatic index: inverse sum sdi index\"\"\"\n return self.adriatic_index(1, 2, 3, 2)\n\n def misbalance_indi_index(self):\n \"\"\" Adriatic index: misbalance indi index\"\"\"\n return self.adriatic_index(1, 2, 4, -1)\n\n def misbalance_irdi_index(self):\n \"\"\" Adriatic index: misbalance irdi index\"\"\"\n return self.adriatic_index(1, 2, 4, -0.5)\n\n def misbalance_rodi_index(self):\n \"\"\" Adriatic index: misbalance rodi index\"\"\"\n return self.adriatic_index(1, 2, 4, 0.5)\n\n def misbalance_di_index(self):\n \"\"\" Adriatic index: misbalance di index\"\"\"\n return self.adriatic_index(1, 2, 4, 1)\n\n def misbalance_sdi_index(self):\n \"\"\" Adriatic index: misbalance sdi index\"\"\"\n return self.adriatic_index(1, 2, 4, 2)\n\n def inverse_misbalance_indi_index(self):\n \"\"\" Adriatic index: inverse misbalance indi index\"\"\"\n return self.adriatic_index(1, 2, 5, -1)\n\n def inverse_misbalance_irdi_index(self):\n \"\"\" Adriatic index: inverse misbalance irdi index\"\"\"\n return self.adriatic_index(1, 2, 5, -0.5)\n\n def inverse_misbalance_rodi_index(self):\n \"\"\" Adriatic index: inverse misbalance rodi index\"\"\"\n return self.adriatic_index(1, 2, 5, 0.5)\n\n def inverse_misbalance_di_index(self):\n \"\"\" Adriatic index: inverse misbalance di index\"\"\"\n return self.adriatic_index(1, 2, 5, 1)\n\n def inverse_misbalance_sdi_index(self):\n \"\"\" Adriatic index: inverse misbalance sdi index\"\"\"\n return self.adriatic_index(1, 2, 5, 2)\n\n def min_max_rodi_index(self):\n \"\"\" Adriatic index: min-max rodi index\"\"\"\n return self.adriatic_index(1, 2, 6, 0.5)\n\n def min_max_di_index(self):\n \"\"\" Adriatic index: min-max di index\"\"\"\n return self.adriatic_index(1, 2, 6, 1)\n\n def min_max_sdi_index(self):\n \"\"\" Adriatic index: min-max sdi index\"\"\"\n return self.adriatic_index(1, 2, 6, 2)\n\n def max_min_rodi_index(self):\n \"\"\" Adriatic index: max-min rodi index\"\"\"\n return self.adriatic_index(1, 2, 7, 0.5)\n\n def max_min_di_index(self):\n \"\"\" Adriatic index: max-min di index\"\"\"\n return self.adriatic_index(1, 2, 7, 1)\n\n def max_min_sdi_index(self):\n \"\"\" Adriatic index: max-min sdi index\"\"\"\n return self.adriatic_index(1, 2, 7, 2)\n\n def symmetric_division_rodi_index(self):\n \"\"\" Adriatic index: symmetric division rodi index\"\"\"\n return self.adriatic_index(1, 2, 8, 0.5)\n\n def symmetric_division_di_index(self):\n \"\"\" Adriatic index: symmetric division di index\"\"\"\n return self.adriatic_index(1, 2, 8, 1)\n\n def symmetric_division_sdi_index(self):\n \"\"\" Adriatic index: symmetric division sdi index\"\"\"\n return self.adriatic_index(1, 2, 8, 2)\n\n def randic_type_hadi_index(self):\n \"\"\" Adriatic index: Randic type hadi index\"\"\"\n return self.adriatic_index(1, 3, 1, 0.5)\n\n def randic_type_twodi_index(self):\n \"\"\" Adriatic index: Randic type twodi index\"\"\"\n return self.adriatic_index(1, 3, 1, 2)\n\n def sum_hadi_index(self):\n \"\"\" Adriatic index: sum hadi index\"\"\"\n return self.adriatic_index(1, 3, 2, 0.5)\n\n def sum_twodi_index(self):\n \"\"\" Adriatic index: sum twodi index\"\"\"\n return self.adriatic_index(1, 3, 2, 2)\n\n def inverse_sum_hadi_index(self):\n \"\"\" Adriatic index: inverse sum hadi index\"\"\"\n return self.adriatic_index(1, 3, 3, 0.5)\n\n def inverse_sum_twodi_index(self):\n \"\"\" Adriatic index: inverse sum twodi index\"\"\"\n return self.adriatic_index(1, 3, 3, 2)\n\n def misbalance_hadi_index(self):\n \"\"\" Adriatic index: misbalance hadi index\"\"\"\n return self.adriatic_index(1, 3, 4, 0.5)\n\n def misbalance_twodi_index(self):\n \"\"\" Adriatic index: misbalance twodi index\"\"\"\n return self.adriatic_index(1, 3, 4, 2)\n\n def inverse_misbalance_hadi_index(self):\n \"\"\" Adriatic index: inverse misbalance hadi index\"\"\"\n return self.adriatic_index(1, 3, 5, 0.5)\n\n def inverse_misbalance_twodi_index(self):\n \"\"\" Adriatic index: inverse misbalance twodi index\"\"\"\n return self.adriatic_index(1, 3, 5, 2)\n\n def min_max_hadi_index(self):\n \"\"\" Adriatic index: min-max hadi index\"\"\"\n return self.adriatic_index(1, 3, 6, 0.5)\n\n def min_max_twodi_index(self):\n \"\"\" Adriatic index: min-max twodi index\"\"\"\n return self.adriatic_index(1, 3, 6, 2)\n\n def max_min_hadi_index(self):\n \"\"\" Adriatic index: max-min hadi index\"\"\"\n return self.adriatic_index(1, 3, 7, 0.5)\n\n def max_min_twodi_index(self):\n \"\"\" Adriatic index: max-min twodi index\"\"\"\n return self.adriatic_index(1, 3, 7, 2)\n\n def symmetric_division_hadi_index(self):\n \"\"\" Adriatic index: symmetric division hadi index\"\"\"\n return self.adriatic_index(1, 3, 8, 0.5)\n\n def symmetric_division_twodi_index(self):\n \"\"\" Adriatic index: symmetric division twodi index\"\"\"\n return self.adriatic_index(1, 3, 8, 2)\n"
] | [
[
"numpy.matrix",
"numpy.log",
"numpy.minimum",
"numpy.sqrt",
"numpy.linalg.inv",
"numpy.ndarray",
"numpy.ones",
"numpy.longdouble",
"numpy.identity",
"numpy.float64",
"numpy.linalg.eigvalsh",
"numpy.exp",
"numpy.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ElmerCSC/ElmerIceCourses | [
"6ff1011f3a1311d84699a30da9f8fc56cb984a08"
] | [
"TeteRousse/Step1/Makegeo.py"
] | [
"# -*- coding: utf-8 -*-\n# Create a geo (gmsh input file) file from a contour file\n# the contour file contains the (x,y) coordinates of the ordered\n# points defining the contour of the domain\n#\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# Test these options\n # edge size of the elements\nel_size = 18.0 \n\n # Spline or line \nspline = True\n\nContour = np.loadtxt('./../Data/Contour_TR_glacier.dat')\nx = Contour[:,0]\ny = Contour[:,1]\n\nif x[0]==x[-1] and y[0]==y[-1]:\n print('Same first and last points in contour file')\n Npt = len(x)-1\nelse:\n Npt = len(x) \n\n# Open the output file\ngeo = open('teterousse0.geo', 'w')\ngeo.write('// This a a geo file created using the python script Makegeo.py // \\n')\ngeo.write('Mesh.Algorithm=5; \\n')\ngeo.write('// To controle the element size, one can directly modify the lc value in the geo file // \\n')\ngeo.write('lc = {0} ; \\n'.format(el_size))\n\n# write the points coordinates (x,y,0,lc)\nnp=0\nfor j in range(0,Npt):\n np=np+1\n geo.write('Point({0}) = '.format(np)+r'{'+' {0}, {1}, 0.0, lc'.format(x[j],y[j])+r'}'+'; \\n')\n\n# if spline\nif spline: \n geo.write('Spline(1) = {')\n for j in range(0,Npt):\n geo.write('{0},'.format(j+1))\n geo.write('1}; \\n')\n \n geo.write('Line Loop(2) = {1}; \\n')\n geo.write('Plane Surface(3) = {2}; \\n')\n geo.write('Physical Line(4) = {1}; \\n')\n geo.write('Physical Surface(5) = {3}; \\n')\n \n \n# else it is lines, as a spline might not work in all case\nelse:\n nl=0\n for j in range(0,Npt-1):\n nl=nl+1\n geo.write('Line({0}) = '.format(nl)+r'{'+'{0},{1}'.format(j+1,j+2)+r'}'+'; \\n')\n geo.write('Line({0}) = '.format(nl+1)+r'{'+'{0},{1}'.format(j+2,1)+r'}'+'; \\n')\n \n geo.write('Compound Line({0}) = '.format(nl+2)+r'{')\n for j in range(0,Npt-1):\n geo.write('{0}, '.format(j+1))\n geo.write('{0}'.format(j+2)+'}; \\n')\n \n geo.write('Line Loop({0}) = '.format(nl+3)+r'{'+'{0}'.format(nl+2)+r'};'+' \\n')\n geo.write('Plane Surface({0}) = '.format(nl+4)+r'{'+'{0}'.format(nl+3)+r'};'+' \\n')\n geo.write('Physical Line({0}) = '.format(nl+5)+r'{'+'{0}'.format(nl+2)+r'};'+' \\n')\n geo.write('Physical Surface({0}) = '.format(nl+6)+r'{'+'{0}'.format(nl+4)+r'};'+' \\n')\n\ngeo.close()\n"
] | [
[
"numpy.loadtxt"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
qingyuanxingsi/incubator-mxnet | [
"fc9e70bf2d349ad4c6cb65ff3f0958e23a7410bf",
"fc9e70bf2d349ad4c6cb65ff3f0958e23a7410bf",
"fc9e70bf2d349ad4c6cb65ff3f0958e23a7410bf"
] | [
"tests/python/unittest/test_optimizer.py",
"example/gluon/kaggle_k_fold_cross_validation.py",
"example/gluon/super_resolution.py"
] | [
"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nimport numpy as np\nimport mxnet as mx\nimport mxnet.lr_scheduler as lr_scheduler\nfrom mxnet import gluon\nimport unittest\nfrom nose.tools import raises\nimport math\nfrom mxnet.test_utils import *\nfrom common import setup_module, with_seed\n\n@with_seed()\ndef test_learning_rate():\n o1 = mx.optimizer.Optimizer(learning_rate=0.01)\n o1.set_learning_rate(0.2)\n assert o1.learning_rate == 0.2\n\n lr_s = lr_scheduler.FactorScheduler(step=1)\n o2 = mx.optimizer.Optimizer(lr_scheduler=lr_s, learning_rate=0.3)\n assert o2.learning_rate == 0.3\n o2.lr_scheduler.base_lr = 0.4\n assert o2.learning_rate == 0.4\n\n\n@raises(UserWarning)\n@with_seed()\ndef test_learning_rate_expect_user_warning():\n lr_s = lr_scheduler.FactorScheduler(step=1)\n o = mx.optimizer.Optimizer(lr_scheduler=lr_s, learning_rate=0.3)\n o.set_learning_rate(0.5)\n\n\n@with_seed()\ndef test_lr_wd_mult():\n data = mx.sym.Variable('data')\n bias = mx.sym.Variable('fc1_bias', lr_mult=1.0)\n fc1 = mx.sym.FullyConnected(data=data, bias=bias, name='fc1', num_hidden=10, lr_mult=0)\n fc2 = mx.sym.FullyConnected(data=fc1, name='fc2', num_hidden=10, wd_mult=0.5)\n\n mod = mx.mod.Module(symbol=fc2, label_names=None, context=default_context())\n mod.bind(data_shapes=[('data', (5,10))])\n mod.init_params(initializer=mx.init.Uniform(1.0))\n mod.init_optimizer(optimizer_params={'learning_rate': 1.0})\n args1, _ = mod.get_params()\n args1 = {k: v.asnumpy() for k, v in args1.items()}\n mod.forward(mx.io.DataBatch(data=[mx.random.uniform(low=-1.0, high=1.0, shape=(5,10))], label=None), is_train=True)\n mod.backward(mod.get_outputs())\n mod.update()\n args2, _ = mod.get_params()\n args2 = {k: v.asnumpy() for k, v in args2.items()}\n\n assert mod._optimizer.lr_mult == {'fc1_bias': 1.0, 'fc1_weight': 0.0}\n assert mod._optimizer.wd_mult == {'fc2_bias': 0.5, 'fc2_weight': 0.5, 'fc1_bias': 0.0}\n assert mx.test_utils.almost_equal(args1['fc1_weight'], args2['fc1_weight'], 1e-10)\n assert not mx.test_utils.almost_equal(args1['fc1_bias'], args2['fc1_bias'], 1e-1)\n assert not mx.test_utils.almost_equal(args1['fc2_weight'], args2['fc2_weight'], 1e-1)\n\ndef compare_ndarray_tuple(t1, t2, rtol=None, atol=None):\n if t1 is not None and t2 is not None:\n if isinstance(t1, tuple):\n for s1, s2 in zip(t1, t2):\n compare_ndarray_tuple(s1, s2, rtol, atol)\n else:\n assert_almost_equal(t1.asnumpy(), t2.asnumpy(), rtol=rtol, atol=atol)\n\n\ndef compare_optimizer(opt1, opt2, shape, dtype, w_stype='default', g_stype='default',\n rtol=1e-4, atol=1e-5):\n if w_stype == 'default':\n w2 = mx.random.uniform(shape=shape, ctx=default_context(), dtype=dtype)\n w1 = w2.copyto(default_context())\n elif w_stype == 'row_sparse' or w_stype == 'csr':\n w2 = rand_ndarray(shape, w_stype, density=1, dtype=dtype)\n w1 = w2.copyto(default_context()).tostype('default')\n else:\n raise Exception(\"type not supported yet\")\n if g_stype == 'default':\n g2 = mx.random.uniform(shape=shape, ctx=default_context(), dtype=dtype)\n g1 = g2.copyto(default_context())\n elif g_stype == 'row_sparse' or g_stype == 'csr':\n g2 = rand_ndarray(shape, g_stype, dtype=dtype)\n g1 = g2.copyto(default_context()).tostype('default')\n else:\n raise Exception(\"type not supported yet\")\n\n state1 = opt1.create_state_multi_precision(0, w1)\n state2 = opt2.create_state_multi_precision(0, w2)\n compare_ndarray_tuple(state1, state2)\n\n opt1.update_multi_precision(0, w1, g1, state1)\n opt2.update_multi_precision(0, w2, g2, state2)\n compare_ndarray_tuple(state1, state2, rtol=rtol, atol=atol)\n assert_almost_equal(w1.asnumpy(), w2.asnumpy(), rtol=rtol, atol=atol)\n\n# SGD\n\nclass PySGD(mx.optimizer.Optimizer):\n \"\"\"python reference implemenation of sgd\"\"\"\n def __init__(self, learning_rate=0.01, momentum=0.0, multi_precision=False, **kwargs):\n super(PySGD, self).__init__(learning_rate=learning_rate, **kwargs)\n self.momentum = momentum\n self.multi_precision = multi_precision\n\n def create_state(self, index, weight):\n \"\"\"Create additional optimizer state: momentum\n\n Parameters\n ----------\n weight : NDArray\n The weight data\n\n \"\"\"\n momentum = None\n weight_master_copy = None\n do_multi_precision = self.multi_precision and weight.dtype == np.float16\n if do_multi_precision:\n if self.momentum != 0.0:\n momentum = mx.nd.zeros(weight.shape, weight.context, dtype=np.float32)\n weight_master_copy = array(weight, ctx=weight.context, dtype=np.float32)\n return (momentum, weight_master_copy)\n else:\n if self.momentum != 0.0:\n momentum = mx.nd.zeros(weight.shape, weight.context, dtype=weight.dtype)\n return momentum\n\n def create_state_multi_precision(self, index, weight):\n return self.create_state(index, weight)\n\n def update(self, index, weight, grad, state):\n \"\"\"Update the parameters.\n\n Parameters\n ----------\n index : int\n An unique integer key used to index the parameters\n\n weight : NDArray\n weight ndarray\n\n grad : NDArray\n grad ndarray\n\n state : NDArray or other objects returned by init_state\n The auxiliary state used in optimization.\n \"\"\"\n lr = self._get_lr(index)\n wd = self._get_wd(index)\n self._update_count(index)\n use_multi_precision = isinstance(state, list) or isinstance(state, tuple)\n\n if not use_multi_precision:\n if self.momentum == 0.0:\n if self.clip_gradient is not None:\n weight[:] = ((1 - lr*wd)*weight -\n lr*mx.nd.clip(grad*self.rescale_grad, -self.clip_gradient, self.clip_gradient))\n else:\n weight[:] = (1 - lr*wd)*weight - lr*self.rescale_grad*grad\n else:\n mom = state\n if self.clip_gradient is not None:\n mom[:] = (self.momentum*mom - lr*wd*weight -\n lr*mx.nd.clip(grad*self.rescale_grad, -self.clip_gradient, self.clip_gradient))\n weight += mom\n else:\n mom[:] = self.momentum*mom - lr*wd*weight - lr*self.rescale_grad*grad\n weight += mom\n else:\n grad32 = array(grad, ctx=grad.context, dtype=np.float32)\n mom = state[0]\n weight32 = state[1]\n if self.momentum == 0.0:\n if self.clip_gradient is not None:\n weight32[:] = ((1 - lr*wd)*weight32 -\n lr*mx.nd.clip(grad32*self.rescale_grad, -self.clip_gradient, self.clip_gradient))\n else:\n weight32[:] = (1 - lr*wd)*weight32 - lr*self.rescale_grad*grad32\n else:\n if self.clip_gradient is not None:\n mom[:] = (self.momentum*mom - lr*wd*weight32 -\n lr*mx.nd.clip(grad32*self.rescale_grad, -self.clip_gradient, self.clip_gradient))\n weight32 += mom\n else:\n mom[:] = self.momentum*mom - lr*wd*weight32 - lr*self.rescale_grad*grad32\n weight32 += mom\n tmp = weight32.astype(weight.dtype)\n tmp.copyto(weight)\n\n def update_multi_precision(self, index, weight, grad, state):\n self.update(index, weight, grad, state)\n\[email protected](\"Test fails intermittently. Temporarily disabled until fixed. Tracked at https://github.com/apache/incubator-mxnet/issues/9000\")\n@with_seed()\ndef test_sgd():\n opt1 = PySGD\n opt2 = mx.optimizer.SGD\n shape = (3, 4, 5)\n mom_options = [{}, {'momentum': 0.9}]\n cg_options = [{}, {'clip_gradient': 0.4}, {'clip_gradient': 0.5}]\n rg_options = [{}, {'rescale_grad': 0.14}, {'rescale_grad': 0.8}]\n wd_options = [{}, {'wd': 0.03}, {'wd': 0.05}, {'wd': 0.07}]\n mp_options = [{}, {'multi_precision': False}, {'multi_precision': True}]\n for dtype in [np.float16, np.float32, np.float64]:\n for mom_option in mom_options:\n for cg_option in cg_options:\n for rg_option in rg_options:\n for wd_option in wd_options:\n for mp_option in mp_options:\n kwarg = {}\n kwarg.update(mom_option)\n kwarg.update(cg_option)\n kwarg.update(rg_option)\n kwarg.update(wd_option)\n kwarg.update(mp_option)\n if (dtype == np.float16 and\n ('multi_precision' not in kwarg or\n not kwarg['multi_precision'])):\n continue\n compare_optimizer(opt1(**kwarg), opt2(**kwarg), shape, dtype)\n # test operator fallback on cpu\n if (default_context() == mx.cpu()):\n compare_optimizer(opt1(**kwarg), opt2(**kwarg), shape, dtype,\n g_stype='row_sparse')\n if dtype != np.float16:\n compare_optimizer(opt1(**kwarg), opt2(**kwarg), shape[:2],\n dtype, w_stype='csr', g_stype='csr')\n # test optimizer with a big shape\n big_shape = (54686454, 1)\n kwarg = {'momentum': 0.9, 'wd': 0.05}\n compare_optimizer(opt1(**kwarg), opt2(**kwarg), big_shape, np.float32)\n\nclass PySparseSGD(mx.optimizer.Optimizer):\n \"\"\"python reference implemenation of sgd\"\"\"\n def __init__(self, learning_rate=0.01, momentum=0.0, **kwargs):\n super(PySparseSGD, self).__init__(learning_rate=learning_rate, **kwargs)\n self.momentum = momentum\n\n def create_state(self, index, weight):\n \"\"\"Create additional optimizer state: momentum\n\n Parameters\n ----------\n weight : NDArray\n The weight data\n\n \"\"\"\n if self.momentum == 0.0:\n return None\n else:\n return mx.nd.zeros(weight.shape, weight.context, dtype=weight.dtype)\n\n def update(self, index, weight, grad, state):\n \"\"\"Update the parameters.\n\n Parameters\n ----------\n index : int\n An unique integer key used to index the parameters\n\n weight : NDArray\n weight ndarray\n\n grad : NDArray\n grad ndarray\n\n state : NDArray or other objects returned by init_state\n The auxiliary state used in optimization.\n \"\"\"\n lr = self._get_lr(index)\n wd = self._get_wd(index)\n self._update_count(index)\n num_rows = weight.shape[0]\n if self.momentum == 0.0:\n # Update on a per row basis, skip all-zero rows\n for row in range(num_rows):\n grad_row = grad[row].asnumpy()\n all_zeros = mx.test_utils.almost_equal(grad_row, np.zeros_like(grad_row))\n if all_zeros:\n continue\n if self.clip_gradient is not None:\n weight[row] = ((1 - lr*wd)*weight[row] -\n lr*mx.nd.clip(grad[row]*self.rescale_grad,\n -self.clip_gradient, self.clip_gradient))\n else:\n weight[row] = (1 - lr*wd)*weight[row] - lr*self.rescale_grad*grad[row]\n else:\n mom = state\n for row in range(num_rows):\n grad_row = grad[row].asnumpy()\n all_zeros = mx.test_utils.almost_equal(grad_row, np.zeros_like(grad_row))\n if all_zeros:\n continue\n if self.clip_gradient is not None:\n mom[row] = (self.momentum*mom[row] - lr*wd*weight[row] -\n lr*mx.nd.clip(grad[row]*self.rescale_grad, -self.clip_gradient, self.clip_gradient))\n weight[row] += mom[row]\n else:\n mom[row] = self.momentum*mom[row] - lr*wd*weight[row] - lr*self.rescale_grad*grad[row]\n weight[row] += mom[row]\n\n@with_seed()\ndef test_sparse_sgd():\n opt1 = PySparseSGD\n opt2 = mx.optimizer.SGD\n shape = (3, 4, 5)\n mom_options = [{}, {'momentum': 0.9}]\n cg_options = [{}, {'clip_gradient': 0.4}, {'clip_gradient': 0.5}]\n rg_options = [{}, {'rescale_grad': 0.14}, {'rescale_grad': 0.8}]\n wd_options = [{}, {'wd': 0.03}, {'wd': 0.05}, {'wd': 0.07}]\n mp_options = [{}, {'multi_precision': False}, {'multi_precision': True}]\n for dtype in [np.float32]:\n for mom_option in mom_options:\n for cg_option in cg_options:\n for rg_option in rg_options:\n for wd_option in wd_options:\n for mp_option in mp_options:\n kwarg = {}\n kwarg.update(mom_option)\n kwarg.update(cg_option)\n kwarg.update(rg_option)\n kwarg.update(wd_option)\n kwarg.update(mp_option)\n compare_optimizer(opt1(**kwarg), opt2(**kwarg), shape, dtype,\n w_stype='row_sparse', g_stype='row_sparse')\n\n\n@with_seed(0)\ndef test_std_sparse_sgd():\n opt1 = PySGD\n opt2 = mx.optimizer.SGD\n shape = (3, 4, 5)\n mom_options = [{'momentum': 0.9}]\n cg_options = [{}, {'clip_gradient': 0.4}, {'clip_gradient': 0.5}]\n rg_options = [{}, {'rescale_grad': 0.14}, {'rescale_grad': 0.8}]\n wd_options = [{}, {'wd': 0.03}, {'wd': 0.05}, {'wd': 0.07}]\n for dtype in [np.float32]:\n for mom_option in mom_options:\n for cg_option in cg_options:\n for rg_option in rg_options:\n for wd_option in wd_options:\n kwarg = {}\n kwarg.update(mom_option)\n kwarg.update(cg_option)\n kwarg.update(rg_option)\n kwarg.update(wd_option)\n compare_optimizer(opt1(**kwarg), opt2(lazy_update=False, **kwarg), shape, dtype,\n w_stype='row_sparse', g_stype='row_sparse')\n\n\nclass PyNAG(PySGD):\n def __init__(self, **kwargs):\n super(PyNAG, self).__init__(**kwargs)\n\n def create_state(self, index, weight):\n \"\"\"Create additional optimizer state: momentum\n\n Parameters\n ----------\n weight : NDArray\n The weight data\n\n \"\"\"\n momentum = None\n weight_master_copy = None\n do_multi_precision = self.multi_precision and weight.dtype == np.float16\n if do_multi_precision:\n if self.momentum != 0.0:\n momentum = mx.nd.zeros(weight.shape, weight.context, dtype=np.float32)\n weight_master_copy = array(weight, ctx=weight.context, dtype=np.float32)\n return (weight_master_copy, momentum)\n else:\n if self.momentum != 0.0:\n momentum = mx.nd.zeros(weight.shape, weight.context, dtype=weight.dtype)\n return momentum\n\n def create_state_multi_precision(self, index, weight):\n return self.create_state(index, weight)\n\n def update(self, index, weight, grad, state):\n \"\"\"Update the parameters.\n\n Parameters\n ----------\n index : int\n An unique integer key used to index the parameters\n\n weight : NDArray\n weight ndarray\n\n grad : NDArray\n grad ndarray\n\n state : NDArray or other objects returned by init_state\n The auxiliary state used in optimization.\n \"\"\"\n lr = self._get_lr(index)\n wd = self._get_wd(index)\n self._update_count(index)\n use_multi_precision = isinstance(state, list) or isinstance(state, tuple)\n if not use_multi_precision:\n grad = grad * self.rescale_grad\n if self.clip_gradient is not None:\n grad = mx.nd.clip(grad, -self.clip_gradient, self.clip_gradient)\n if self.momentum == 0.0:\n weight[:] += -lr * (grad + wd * weight)\n else:\n mom = state\n mom[:] *= self.momentum\n grad += wd * weight\n mom[:] += grad\n grad[:] += self.momentum * mom\n weight[:] += -lr * grad \n else:\n grad32 = array(grad, ctx=grad.context, dtype=np.float32)\n grad32 = grad32 * self.rescale_grad\n if self.clip_gradient is not None:\n grad32 = mx.nd.clip(grad32, -self.clip_gradient, self.clip_gradient)\n mom = state[1]\n weight32 = state[0]\n if self.momentum == 0.0:\n weight32[:] += -lr * (grad32 + wd * weight32)\n else:\n mom[:] *= self.momentum\n grad32 += wd * weight32\n mom[:] += grad32\n grad32[:] += self.momentum * mom\n weight32[:] += -lr * grad32\n tmp = weight32.astype(weight.dtype)\n tmp.copyto(weight)\n\n@with_seed(0)\ndef test_nag():\n opt1 = PyNAG\n opt2 = mx.optimizer.NAG\n shape = (3, 4, 5)\n mom_options = [{}, {'momentum': 0.9}]\n cg_options = [{}, {'clip_gradient': 0.4}, {'clip_gradient': 0.5}]\n rg_options = [{}, {'rescale_grad': 0.14}, {'rescale_grad': 0.8}]\n wd_options = [{}, {'wd': 0.03}, {'wd': 0.05}, {'wd': 0.07}]\n mp_options = [{}, {'multi_precision': False}, {'multi_precision': True}]\n for dtype in [np.float16, np.float32, np.float64]:\n for mom_option in mom_options:\n for cg_option in cg_options:\n for rg_option in rg_options:\n for wd_option in wd_options:\n for mp_option in mp_options:\n kwarg = {}\n kwarg.update(mom_option)\n kwarg.update(cg_option)\n kwarg.update(rg_option)\n kwarg.update(wd_option)\n kwarg.update(mp_option)\n if (dtype == np.float16 and\n ('multi_precision' not in kwarg or\n not kwarg['multi_precision'])):\n continue\n compare_optimizer(opt1(**kwarg), opt2(**kwarg), shape, dtype)\n\n\n\n# FTML\n\nclass PyFTML(mx.optimizer.Optimizer):\n \"\"\"python reference implemenation of FTML\"\"\"\n def __init__(self, beta1=0.6, beta2=0.999, epsilon=1e-8, **kwargs):\n super(PyFTML, self).__init__(**kwargs)\n self.beta1 = beta1\n self.beta2 = beta2\n self.epsilon = epsilon\n\n def create_state(self, index, weight):\n return (mx.nd.zeros(weight.shape, weight.context, dtype=weight.dtype), # d_0\n mx.nd.zeros(weight.shape, weight.context, dtype=weight.dtype), # v_0\n mx.nd.zeros(weight.shape, weight.context, dtype=weight.dtype)) # z_0\n\n def update(self, index, weight, grad, state):\n assert(isinstance(weight, mx.nd. NDArray))\n assert(isinstance(grad, mx.nd.NDArray))\n self._update_count(index)\n lr = self._get_lr(index)\n wd = self._get_wd(index)\n t = self._index_update_count[index]\n\n grad = grad * self.rescale_grad + wd * weight\n if self.clip_gradient is not None:\n grad = mx.nd.clip(grad, -self.clip_gradient, self.clip_gradient)\n # get previous states\n prev_d, prev_v, prev_z = state\n # compute states\n v_t = self.beta2 * prev_v + (1 - self.beta2) * mx.nd.square(grad)\n d_t = (1 - pow(self.beta1, t)) / lr * (mx.nd.sqrt(v_t / (1 - pow(self.beta2, t))) + self.epsilon)\n sigma_t = d_t - self.beta1 * prev_d\n z_t = self.beta1 * prev_z + (1 - self.beta1) * grad - sigma_t * weight\n # update weight\n weight[:] = - z_t / d_t\n # update states\n prev_d[:] = d_t\n prev_v[:] = v_t\n prev_z[:] = z_t\n\n@with_seed(0)\ndef test_ftml():\n opt1 = PyFTML\n opt2 = mx.optimizer.FTML\n shape = (3, 4, 5)\n beta1_options = [{}, {'beta1': 0.5}, {'beta1': 0.7}]\n beta2_options = [{}, {'beta2': 0.8}, {'beta2': 0.9}]\n cg_options = [{}, {'clip_gradient': 0.4}, {'clip_gradient': 0.5}]\n rg_options = [{}, {'rescale_grad': 0.14}, {'rescale_grad': 0.8}]\n wd_options = [{}, {'wd': 0.03}, {'wd': 0.05}, {'wd': 0.07}]\n for dtype in [np.float32]:\n for beta1_option in beta1_options:\n for beta2_option in beta2_options:\n for cg_option in cg_options:\n for rg_option in rg_options:\n for wd_option in wd_options:\n kwarg = {}\n kwarg.update(beta1_option)\n kwarg.update(beta2_option)\n kwarg.update(cg_option)\n kwarg.update(rg_option)\n kwarg.update(wd_option)\n compare_optimizer(opt1(**kwarg), opt2(**kwarg), shape, dtype)\n\n\n# ADAM\n\nclass PyAdam(mx.optimizer.Optimizer):\n \"\"\"python reference implemenation of adam\"\"\"\n def __init__(self, learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-8,\n decay_factor=(1 - 1e-8), sparse_update=False, **kwargs):\n super(PyAdam, self).__init__(learning_rate=learning_rate, **kwargs)\n self.beta1 = beta1\n self.beta2 = beta2\n self.epsilon = epsilon\n self.decay_factor = decay_factor\n self.sparse_update = sparse_update\n\n def create_state(self, index, weight):\n \"\"\"Create additional optimizer state: mean, variance\n\n Parameters\n ----------\n weight : NDArray\n The weight data\n\n \"\"\"\n return (mx.nd.zeros(weight.shape, weight.context, dtype=weight.dtype), # mean\n mx.nd.zeros(weight.shape, weight.context, dtype=weight.dtype)) # variance\n\n def update(self, index, weight, grad, state):\n \"\"\"Update the parameters.\n\n Parameters\n ----------\n index : int\n An unique integer key used to index the parameters\n\n weight : NDArray\n weight ndarray\n\n grad : NDArray\n grad ndarray\n\n state : NDArray or other objects returned by init_state\n The auxiliary state used in optimization.\n \"\"\"\n lr = self._get_lr(index)\n self._update_count(index)\n\n t = self._index_update_count[index]\n mean, variance = state\n\n wd = self._get_wd(index)\n num_rows = weight.shape[0]\n coef1 = 1. - self.beta1**t\n coef2 = 1. - self.beta2**t\n lr *= math.sqrt(coef2)/coef1\n for row in range(num_rows):\n # check row slices of all zeros\n all_zeros = mx.test_utils.almost_equal(grad[row].asnumpy(), np.zeros_like(grad[row].asnumpy()))\n # skip zeros during sparse update\n if all_zeros and self.sparse_update:\n continue\n grad[row] = grad[row] * self.rescale_grad + wd * weight[row]\n # clip gradients\n if self.clip_gradient is not None:\n mx.nd.clip(grad[row], -self.clip_gradient, self.clip_gradient, out=grad[row])\n # update mean\n mean[row] *= self.beta1\n mean[row] += grad[row] * (1. - self.beta1)\n # update variance\n variance[row] *= self.beta2\n variance[row] += (1 - self.beta2) * mx.nd.square(grad[row], out=grad[row])\n # update weight\n weight[row] -= lr*mean[row]/(mx.nd.sqrt(variance[row]) + self.epsilon)\n\n\n@with_seed()\ndef test_adam():\n opt1 = PyAdam\n opt2 = mx.optimizer.Adam\n shape = (3, 4, 5)\n cg_options = [{}, {'clip_gradient': 0.4}, {'clip_gradient': 0.5}]\n rg_options = [{}, {'rescale_grad': 0.14}, {'rescale_grad': 0.8}]\n wd_options = [{}, {'wd': 0.03}, {'wd': 0.05}, {'wd': 0.07}]\n mp_options = [{}, {'multi_precision': False}, {'multi_precision': True}]\n for dtype in [np.float16, np.float32, np.float64]:\n for cg_option in cg_options:\n for rg_option in rg_options:\n for wd_option in wd_options:\n for mp_option in mp_options:\n kwarg = {}\n kwarg.update(cg_option)\n kwarg.update(rg_option)\n kwarg.update(wd_option)\n kwarg.update(mp_option)\n if (dtype == np.float16 and\n ('multi_precision' not in kwarg or\n not kwarg['multi_precision'])):\n continue\n # atol 2e-5 needed to pass with seed 1248389097\n compare_optimizer(opt1(**kwarg), opt2(**kwarg), shape, dtype,\n rtol=1e-4, atol=2e-5)\n # atol 2e-5 needed to pass with seed 781809840\n compare_optimizer(opt1(sparse_update=True, **kwarg), opt2(**kwarg), shape,\n dtype, w_stype='row_sparse', g_stype='row_sparse',\n rtol=1e-4, atol=2e-5)\n compare_optimizer(opt1(**kwarg), opt2(lazy_update=False, **kwarg), shape,\n dtype, w_stype='row_sparse', g_stype='row_sparse',\n rtol=1e-4, atol=2e-5)\n\n# Signum\nclass PySignum(mx.optimizer.Optimizer):\n \"\"\"The python reference of Signum optimizer.\n\n The optimizer updates the weight by:\n\n rescaled_grad = rescale_grad * clip(grad, clip_gradient) + wd * weight\n state = momentum * state + (1-momentum)*rescaled_grad\n weight = (1 - lr * wd_lh) * weight - lr * sign(state)\n\n See the original paper at: https://jeremybernste.in/projects/amazon/signum.pdf\n\n For details of the update algorithm see\n :class:`~mxnet.ndarray.signsgd_update` and :class:`~mxnet.ndarray.signum_update`.\n\n This optimizer accepts the following parameters in addition to those accepted\n by :class:`.Optimizer`.\n\n Parameters\n ----------\n momentum : float, optional\n The momentum value.\n wd_lh : float, optitional\n The amount of decoupled weight decay regularization.\n \"\"\"\n def __init__(self, learning_rate=0.01, momentum=0.9, wd_lh = 0.0, **kwargs):\n super(PySignum, self).__init__(learning_rate = learning_rate, **kwargs)\n self.momentum = momentum\n self.wd_lh = wd_lh\n\n def create_state(self, index, weight):\n momentum = None\n if self.momentum != 0.0:\n momentum = mx.nd.zeros(weight.shape, weight.context, dtype=weight.dtype, stype=weight.stype)\n return momentum\n\n def update(self, index, weight, grad, state):\n self._update_count(index)\n lr = self._get_lr(index)\n wd = self._get_wd(index)\n\n if state is not None:\n mom = state\n if self.clip_gradient is not None:\n mom[:] = (self.momentum*mom - (1-self.momentum)*(wd*weight +\n mx.nd.clip(grad*self.rescale_grad, -self.clip_gradient, self.clip_gradient)))\n else:\n mom[:] = self.momentum*mom - (1-self.momentum)*wd*weight - (1-self.momentum)*self.rescale_grad*grad\n weight[:] = (1 - lr*self.wd_lh)*weight + lr*mx.nd.sign(mom)\n else:\n weight[:] = (1 - lr*(wd+self.wd_lh))*weight - lr*mx.nd.sign(grad)\n\n@with_seed(0)\ndef test_signum():\n opt1 = PySignum\n opt2 = mx.optimizer.Signum\n shape = (3, 4, 5)\n cg_options = [{}, {'clip_gradient': 0.4}, {'clip_gradient': 0.5}]\n rg_options = [{}, {'rescale_grad': 0.14}, {'rescale_grad': 0.8}]\n wd_options = [{}, {'wd': 0.03}, {'wd': 0.05}, {'wd': 0.07}]\n wd_lh_options = [{}, {'wd_lh': 0.015}, {'wd_lh': 0.0}]\n mom_options = [{}, {'momentum': 0.9}]\n lr_options = [{'learning_rate': 0.05},{'learning_rate': 0.01}]\n for dtype in [np.float32, np.float64]:\n for cg_option in cg_options:\n for rg_option in rg_options:\n for wd_option in wd_options:\n for mp_option in wd_lh_options:\n for lr_option in lr_options:\n for mom_option in mom_options:\n kwarg = {}\n kwarg.update(cg_option)\n kwarg.update(rg_option)\n kwarg.update(wd_option)\n kwarg.update(mp_option)\n kwarg.update(lr_option)\n kwarg.update(mom_option)\n compare_optimizer(opt1(**kwarg), opt2(**kwarg), shape, dtype)\n\n\n# RMSProp\nclass PyRMSProp(mx.optimizer.Optimizer):\n \"\"\"RMSProp optimizer of Tieleman & Hinton, 2012,\n\n For centered=False, the code follows the version in\n http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf by\n Tieleman & Hinton, 2012\n\n For centered=True, the code follows the version in\n http://arxiv.org/pdf/1308.0850v5.pdf Eq(38) - Eq(45) by Alex Graves, 2013.\n\n Parameters\n ----------\n learning_rate : float, optional\n Step size.\n Default value is set to 0.001.\n gamma1: float, optional\n decay factor of moving average for gradient, gradient^2.\n Default value is set to 0.9.\n gamma2: float, optional\n \"momentum\" factor.\n Default value if set to 0.9.\n Only used if centered=True\n epsilon : float, optional\n Default value is set to 1e-8.\n centered : boolean, optional\n Use Graves or Tielemans & Hintons version of RMSProp\n wd : float, optional\n L2 regularization coefficient add to all the weights\n rescale_grad : float, optional\n rescaling factor of gradient.\n clip_gradient : float, optional\n clip gradient in range [-clip_gradient, clip_gradient]\n clip_weights : float, optional\n clip weights in range [-clip_weights, clip_weights]\n\n \"\"\"\n def __init__(self, learning_rate=0.001, gamma1=0.9, gamma2=0.9,\n epsilon=1e-8, centered=False, clip_weights=None, **kwargs):\n super(PyRMSProp, self).__init__(learning_rate=learning_rate, **kwargs)\n self.centered = centered\n self.gamma1 = gamma1\n self.gamma2 = gamma2\n self.epsilon = epsilon\n self.clip_weights = clip_weights\n\n def create_state(self, index, weight):\n \"\"\"Create additional optimizer state.\n\n For centered=False: n\n For centered=True: n, g, delta\n\n Parameters\n ----------\n weight : NDArray\n The weight data\n \"\"\"\n if self.centered:\n return (mx.nd.zeros(weight.shape, weight.context), # n\n mx.nd.zeros(weight.shape, weight.context), # g\n mx.nd.zeros(weight.shape, weight.context)) # delta\n else:\n return (mx.nd.zeros(weight.shape, weight.context), ) # n\n\n def update(self, index, weight, grad, state):\n \"\"\"Update the parameters.\n\n Parameters\n ----------\n index : int\n An unique integer key used to index the parameters\n\n weight : NDArray\n weight ndarray\n\n grad : NDArray\n grad ndarray\n\n state : NDArray or other objects returned by init_state\n The auxiliary state used in optimization.\n \"\"\"\n lr = self._get_lr(index)\n wd = self._get_wd(index)\n self._update_count(index)\n grad = grad * self.rescale_grad + wd * weight\n\n if not self.centered:\n (n, ) = state\n if self.clip_gradient is not None:\n grad = mx.nd.clip(grad, -self.clip_gradient, self.clip_gradient)\n n[:] = (1 - self.gamma1) * (grad * grad) + self.gamma1 * n\n weight[:] -= lr * grad/(mx.nd.sqrt(n + self.epsilon))\n\n else:\n n, g, delta = state\n if self.clip_gradient is not None:\n grad = mx.nd.clip(grad, -self.clip_gradient, self.clip_gradient)\n n[:] = (1 - self.gamma1) * (grad * grad) + self.gamma1 * n\n g[:] = (1 - self.gamma1) * grad + self.gamma1 * g\n delta[:] = (self.gamma2) * delta - lr * grad/(mx.nd.sqrt(n - g*g + self.epsilon))\n weight[:] += delta\n\n if self.clip_weights:\n mx.ndarray.clip(weight, -self.clip_weights, self.clip_weights, out=weight)\n\[email protected](\"Test fails intermittently. Temporarily disabled until fixed. Tracked at https://github.com/apache/incubator-mxnet/issues/8230\")\n@with_seed(0)\ndef test_rms():\n opt1 = PyRMSProp\n opt2 = mx.optimizer.RMSProp\n shape = (3, 4, 5)\n cg_options = [{}, {'clip_gradient': 0.4}, {'clip_gradient': 0.5}]\n cw_options = [{}, {'clip_weights': 0.01}]\n center_options = [{}, {'centered': False}, {'centered': True}]\n rg_options = [{}, {'rescale_grad': 0.14}, {'rescale_grad': 0.8}]\n wd_options = [{}, {'wd': 0.03}, {'wd': 0.05}, {'wd': 0.07}]\n mp_options = [{}, {'multi_precision': False}, {'multi_precision': True}]\n for dtype in [np.float16, np.float32]:\n for cw_option in cw_options:\n for cg_option in cg_options:\n for center_option in center_options:\n for rg_option in rg_options:\n for wd_option in wd_options:\n for mp_option in mp_options:\n kwarg = {}\n kwarg.update(cw_option)\n kwarg.update(cg_option)\n kwarg.update(center_option)\n kwarg.update(rg_option)\n kwarg.update(wd_option)\n kwarg.update(mp_option)\n if (dtype == np.float16 and\n ('multi_precision' not in kwarg or\n not kwarg['multi_precision'])):\n continue\n compare_optimizer(opt1(**kwarg), opt2(**kwarg), shape, dtype)\n if (default_context() == mx.cpu()):\n compare_optimizer(opt1(**kwarg), opt2(**kwarg), shape, dtype, g_stype='row_sparse')\n\nclass PyFtrl(mx.optimizer.Optimizer):\n \"\"\"The Ftrl optimizer.\n\n Referenced from *Ad Click Prediction: a View from the Trenches*, available at\n http://dl.acm.org/citation.cfm?id=2488200.\n\n Parameters\n ----------\n lamda1 : float, optional\n L1 regularization coefficient.\n learning_rate : float, optional\n The initial learning rate.\n beta : float, optional\n Per-coordinate learning rate correlation parameter.\n eta :\n .. math::\n \\\\eta_{t,i} = \\\\frac{learningrate}{\\\\beta+\\\\sqrt{\\\\sum_{s=1}^tg_{s,i}^t}}\n \"\"\"\n\n def __init__(self, lamda1=0.01, learning_rate=0.1, beta=1, sparse_update=False, **kwargs):\n super(PyFtrl, self).__init__(**kwargs)\n self.lamda1 = lamda1\n self.beta = beta\n self.lr = learning_rate\n self.sparse_update = sparse_update\n\n def create_state(self, index, weight):\n return (mx.nd.zeros(weight.shape, weight.context, dtype=weight.dtype), # dn\n mx.nd.zeros(weight.shape, weight.context, dtype=weight.dtype)) # n\n\n def update(self, index, weight, grad, state):\n self._update_count(index)\n wd = self._get_wd(index)\n lr = self._get_lr(index)\n num_rows = weight.shape[0]\n\n dn, n = state\n for row in range(num_rows):\n all_zeros = mx.test_utils.almost_equal(grad[row].asnumpy(), np.zeros_like(grad[row].asnumpy()))\n if all_zeros and self.sparse_update:\n continue\n grad[row] = grad[row] * self.rescale_grad\n if self.clip_gradient is not None:\n mx.nd.clip(grad[row], -self.clip_gradient, self.clip_gradient, out=grad[row])\n\n #update dn, n\n dn[row] += grad[row] - (mx.nd.sqrt(n[row] + grad[row] * grad[row]) - mx.nd.sqrt(n[row])) * weight[row] / lr\n n[row] += grad[row] * grad[row]\n\n # update weight\n weight[row] = (mx.nd.sign(dn[row]) * self.lamda1 - dn[row]) / \\\n ((self.beta + mx.nd.sqrt(n[row])) / lr + wd) * (mx.nd.abs(dn[row]) > self.lamda1)\n\n@with_seed()\ndef test_ftrl():\n opt1 = PyFtrl\n opt2 = mx.optimizer.Ftrl\n shape = (3, 4, 5)\n kwargs = [{},\n {'clip_gradient': 0.5},\n {'clip_gradient': 0.4, 'rescale_grad': 0.14},\n {'rescale_grad': 0.8},\n {'clip_gradient': 0.5, 'wd': 0.07},\n {'clip_gradient': 0.4, 'rescale_grad': 0.14, 'wd': 0.03},\n {'rescale_grad': 0.8, 'wd': 0.05},\n {'rescale_grad': 0.8, 'wd': 0.05, 'lamda1': 0.01},\n {'clip_gradient': 0.5, 'wd': 0.07, 'lamda1': 1.0}]\n for kwarg in kwargs:\n compare_optimizer(opt1(**kwarg), opt2(**kwarg), shape, np.float32)\n compare_optimizer(opt1(sparse_update=True, **kwarg), opt2(**kwarg), shape,\n np.float32, w_stype='row_sparse', g_stype='row_sparse')\n\n@with_seed(1234)\ndef test_nadam():\n\n def get_net(num_hidden, flatten=True):\n data = mx.symbol.Variable('data')\n fc1 = mx.symbol.FullyConnected(data, name='fc1', num_hidden=128, flatten=flatten)\n act1 = mx.symbol.Activation(fc1, name='relu1', act_type=\"relu\")\n fc2 = mx.symbol.FullyConnected(act1, name = 'fc2', num_hidden = 64, flatten=flatten)\n act2 = mx.symbol.Activation(fc2, name='relu2', act_type=\"relu\")\n fc3 = mx.symbol.FullyConnected(act2, name='fc3', num_hidden=num_hidden, flatten=flatten)\n return fc3\n\n N = 20\n data = mx.random.uniform(-1, 1, shape=(N, 10))\n label = mx.random.uniform(-1, 1, shape=(N, 1))\n data_iter = mx.io.NDArrayIter(data, label, batch_size=5, label_name='label', shuffle=True)\n output = get_net(1)\n l = mx.symbol.Variable('label')\n Loss = gluon.loss.L1Loss()\n loss = Loss(output, l)\n loss = mx.sym.make_loss(loss)\n mod = mx.mod.Module(loss, data_names=('data',), label_names=('label',))\n mod.fit(data_iter, num_epoch=60, optimizer_params={'learning_rate': 0.0005, 'wd': 0.0005},\n initializer=mx.init.Xavier(magnitude=2), eval_metric=mx.metric.Loss(),\n optimizer='nadam')\n assert mod.score(data_iter, eval_metric=mx.metric.Loss())[0][1] < 0.1\n\n# AdaGrad\nclass PyAdaGrad(mx.optimizer.Optimizer):\n \"\"\"The python reference of AdaGrad optimizer.\n\n This class implements the AdaGrad optimizer described in *Adaptive Subgradient\n Methods for Online Learning and Stochastic Optimization*, and available at\n http://www.jmlr.org/papers/volume12/duchi11a/duchi11a.pdf.\n\n Updates are applied by::\n\n rescaled_grad = clip(grad * rescale_grad + wd * weight, clip_gradient)\n history = history + square(rescaled_grad)\n w = w - learning_rate * rescaled_grad / sqrt(history + epsilon)\n\n This optimizer accepts the following parameters in addition to those accepted\n by :class:`.Optimizer`.\n\n Parameters\n ----------\n eps: float, optional\n Small value to avoid division by 0.\n\n \"\"\"\n def __init__(self, eps=1e-7, **kwargs):\n super(PyAdaGrad, self).__init__(**kwargs)\n self.float_stable_eps = eps\n\n def create_state(self, index, weight):\n return mx.nd.zeros(weight.shape, weight.context, stype=weight.stype)\n\n def update(self, index, weight, grad, state):\n self._update_count(index)\n lr = self._get_lr(index)\n wd = self._get_wd(index)\n\n history = state\n grad = grad * self.rescale_grad\n if self.clip_gradient is not None:\n grad = mx.nd.clip(grad, -self.clip_gradient, self.clip_gradient)\n history[:] += mx.nd.square(grad)\n div = grad / mx.nd.sqrt(history + self.float_stable_eps)\n weight[:] += (div + weight * wd) * -lr\n\ndef test_adagrad():\n mx.random.seed(0)\n opt1 = PyAdaGrad\n opt2 = mx.optimizer.AdaGrad\n shape = (3, 4, 5)\n eps_options = [{}, {'eps': 1e-8}]\n cg_options = [{}, {'clip_gradient': 0.4}, {'clip_gradient': 0.5}]\n rg_options = [{}, {'rescale_grad': 0.14}, {'rescale_grad': 0.8}]\n wd_options = [{}, {'wd': 0.0}]\n for dtype in [np.float32]:\n for eps_option in eps_options:\n for cg_option in cg_options:\n for rg_option in rg_options:\n for wd_option in wd_options:\n kwarg = {}\n kwarg.update(eps_option)\n kwarg.update(cg_option)\n kwarg.update(rg_option)\n kwarg.update(wd_option)\n compare_optimizer(opt1(**kwarg), opt2(**kwarg), shape, dtype)\n if wd_option.get('wd', 0.0) == 0.0:\n compare_optimizer(opt1(**kwarg), opt2(**kwarg), shape, dtype,\n w_stype='row_sparse', g_stype='row_sparse')\n\n\n\nif __name__ == '__main__':\n import nose\n nose.runmodule()\n",
"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\n\n# This example provides an end-to-end pipeline for a common Kaggle competition.\n# The entire pipeline includes common utilities such as k-fold cross validation\n# and data pre-processing.\n#\n# Specifically, the example studies the `House Prices: Advanced Regression\n# Techniques` challenge as a case study.\n#\n# The link to the problem on Kaggle:\n# https://www.kaggle.com/c/house-prices-advanced-regression-techniques\n\nimport numpy as np\nimport pandas as pd\nfrom mxnet import autograd\nfrom mxnet import gluon\nfrom mxnet import ndarray as nd\n\n# After logging in www.kaggle.com, the training and testing data sets can be downloaded at:\n# https://www.kaggle.com/c/house-prices-advanced-regression-techniques/download/train.csv\n# https://www.kaggle.com/c/house-prices-advanced-regression-techniques/download/test.csv\ntrain = pd.read_csv(\"train.csv\")\ntest = pd.read_csv(\"test.csv\")\nall_X = pd.concat((train.loc[:, 'MSSubClass':'SaleCondition'],\n test.loc[:, 'MSSubClass':'SaleCondition']))\n\n# Get all the numerical features and apply standardization.\nnumeric_feas = all_X.dtypes[all_X.dtypes != \"object\"].index\nall_X[numeric_feas] = all_X[numeric_feas].apply(lambda x:\n (x - x.mean()) / (x.std()))\n# Convert categorical feature values to numerical (including N/A).\nall_X = pd.get_dummies(all_X, dummy_na=True)\n# Approximate N/A feature value by the mean value of the current feature.\nall_X = all_X.fillna(all_X.mean())\n\nnum_train = train.shape[0]\n\n# Convert data formats to NDArrays to feed into gluon.\nX_train = all_X[:num_train].as_matrix()\nX_test = all_X[num_train:].as_matrix()\ny_train = train.SalePrice.as_matrix()\n\nX_train = nd.array(X_train)\ny_train = nd.array(y_train)\ny_train.reshape((num_train, 1))\n\nX_test = nd.array(X_test)\nsquare_loss = gluon.loss.L2Loss()\n\ndef get_rmse_log(net, X_train, y_train):\n \"\"\"Gets root mse between the logarithms of the prediction and the truth.\"\"\"\n num_train = X_train.shape[0]\n clipped_preds = nd.clip(net(X_train), 1, float('inf'))\n return np.sqrt(2 * nd.sum(square_loss(\n nd.log(clipped_preds), nd.log(y_train))).asscalar() / num_train)\n\ndef get_net():\n \"\"\"Gets a neural network. Better results are obtained with modifications.\"\"\"\n net = gluon.nn.Sequential()\n with net.name_scope():\n net.add(gluon.nn.Dense(50, activation=\"relu\"))\n net.add(gluon.nn.Dense(1))\n net.initialize()\n return net\n\ndef train(net, X_train, y_train, epochs, verbose_epoch, learning_rate,\n weight_decay, batch_size):\n \"\"\"Trains the model.\"\"\"\n dataset_train = gluon.data.ArrayDataset(X_train, y_train)\n data_iter_train = gluon.data.DataLoader(dataset_train, batch_size,\n shuffle=True)\n trainer = gluon.Trainer(net.collect_params(), 'adam',\n {'learning_rate': learning_rate,\n 'wd': weight_decay})\n net.collect_params().initialize(force_reinit=True)\n for epoch in range(epochs):\n for data, label in data_iter_train:\n with autograd.record():\n output = net(data)\n loss = square_loss(output, label)\n loss.backward()\n trainer.step(batch_size)\n avg_loss = get_rmse_log(net, X_train, y_train)\n if epoch > verbose_epoch:\n print(\"Epoch %d, train loss: %f\" % (epoch, avg_loss))\n return avg_loss\n\ndef k_fold_cross_valid(k, epochs, verbose_epoch, X_train, y_train,\n learning_rate, weight_decay, batch_size):\n \"\"\"Conducts k-fold cross validation for the model.\"\"\"\n assert k > 1\n fold_size = X_train.shape[0] // k\n\n train_loss_sum = 0.0\n test_loss_sum = 0.0\n for test_idx in range(k):\n X_val_test = X_train[test_idx * fold_size: (test_idx + 1) *\n fold_size, :]\n y_val_test = y_train[test_idx * fold_size: (test_idx + 1) * fold_size]\n val_train_defined = False\n for i in range(k):\n if i != test_idx:\n X_cur_fold = X_train[i * fold_size: (i + 1) * fold_size, :]\n y_cur_fold = y_train[i * fold_size: (i + 1) * fold_size]\n if not val_train_defined:\n X_val_train = X_cur_fold\n y_val_train = y_cur_fold\n val_train_defined = True\n else:\n X_val_train = nd.concat(X_val_train, X_cur_fold, dim=0)\n y_val_train = nd.concat(y_val_train, y_cur_fold, dim=0)\n net = get_net()\n train_loss = train(net, X_val_train, y_val_train, epochs, verbose_epoch,\n learning_rate, weight_decay, batch_size)\n train_loss_sum += train_loss\n test_loss = get_rmse_log(net, X_val_test, y_val_test)\n print(\"Test loss: %f\" % test_loss)\n test_loss_sum += test_loss\n return train_loss_sum / k, test_loss_sum / k\n\n# The sets of parameters. Better results are obtained with modifications.\n# These parameters can be fine-tuned with k-fold cross-validation.\nk = 5\nepochs = 100\nverbose_epoch = 95\nlearning_rate = 0.3\nweight_decay = 100\nbatch_size = 100\n\ntrain_loss, test_loss = \\\n k_fold_cross_valid(k, epochs, verbose_epoch, X_train, y_train,\n learning_rate, weight_decay, batch_size)\nprint(\"%d-fold validation: Avg train loss: %f, Avg test loss: %f\" %\n (k, train_loss, test_loss))\n\ndef learn(epochs, verbose_epoch, X_train, y_train, test, learning_rate,\n weight_decay, batch_size):\n \"\"\"Trains the model and predicts on the test data set.\"\"\"\n net = get_net()\n _ = train(net, X_train, y_train, epochs, verbose_epoch, learning_rate,\n weight_decay, batch_size)\n preds = net(X_test).asnumpy()\n test['SalePrice'] = pd.Series(preds.reshape(1, -1)[0])\n submission = pd.concat([test['Id'], test['SalePrice']], axis=1)\n submission.to_csv('submission.csv', index=False)\n\nlearn(epochs, verbose_epoch, X_train, y_train, test, learning_rate,\n weight_decay, batch_size)\n",
"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nfrom __future__ import print_function\nimport argparse, tarfile\nimport math\nimport os\nimport numpy as np\n\nimport mxnet as mx\nimport mxnet.ndarray as F\nfrom mxnet import gluon\nfrom mxnet.gluon import nn\nfrom mxnet import autograd as ag\nfrom mxnet.test_utils import download\nfrom mxnet.image import CenterCropAug, ResizeAug\nfrom mxnet.io import PrefetchingIter\n\nfrom data import ImagePairIter\n\n\n# CLI\nparser = argparse.ArgumentParser(description='Super-resolution using an efficient sub-pixel convolution neural network.')\nparser.add_argument('--upscale_factor', type=int, default=3, help=\"super resolution upscale factor. default is 3.\")\nparser.add_argument('--batch_size', type=int, default=4, help='training batch size, per device. default is 4.')\nparser.add_argument('--test_batch_size', type=int, default=100, help='test batch size')\nparser.add_argument('--epochs', type=int, default=30, help='number of training epochs')\nparser.add_argument('--lr', type=float, default=0.001, help='learning Rate. default is 0.001.')\nparser.add_argument('--use-gpu', action='store_true', help='whether to use GPU.')\nparser.add_argument('--seed', type=int, default=123, help='random seed to use. Default=123')\nparser.add_argument('--resolve_img', type=str, help='input image to use')\nopt = parser.parse_args()\n\nprint(opt)\n\nupscale_factor = opt.upscale_factor\nbatch_size, test_batch_size = opt.batch_size, opt.test_batch_size\ncolor_flag = 0\n\n# get data\ndataset_path = \"dataset\"\ndataset_url = \"http://www2.eecs.berkeley.edu/Research/Projects/CS/vision/bsds/BSDS300-images.tgz\"\ndef get_dataset(prefetch=False):\n image_path = os.path.join(dataset_path, \"BSDS300/images\")\n\n if not os.path.exists(image_path):\n os.makedirs(dataset_path)\n file_name = download(dataset_url)\n with tarfile.open(file_name) as tar:\n for item in tar:\n tar.extract(item, dataset_path)\n os.remove(file_name)\n\n crop_size = 256\n crop_size -= crop_size % upscale_factor\n input_crop_size = crop_size // upscale_factor\n\n input_transform = [CenterCropAug((crop_size, crop_size)), ResizeAug(input_crop_size)]\n target_transform = [CenterCropAug((crop_size, crop_size))]\n\n iters = (ImagePairIter(os.path.join(image_path, \"train\"),\n (input_crop_size, input_crop_size),\n (crop_size, crop_size),\n batch_size, color_flag, input_transform, target_transform),\n ImagePairIter(os.path.join(image_path, \"test\"),\n (input_crop_size, input_crop_size),\n (crop_size, crop_size),\n test_batch_size, color_flag,\n input_transform, target_transform))\n\n return [PrefetchingIter(i) for i in iters] if prefetch else iters\n\ntrain_data, val_data = get_dataset()\n\nmx.random.seed(opt.seed)\nctx = [mx.gpu(0)] if opt.use_gpu else [mx.cpu()]\n\n\n# define model\ndef _rearrange(raw, F, upscale_factor):\n # (N, C * r^2, H, W) -> (N, C, r^2, H, W)\n splitted = F.reshape(raw, shape=(0, -4, -1, upscale_factor**2, 0, 0))\n # (N, C, r^2, H, W) -> (N, C, r, r, H, W)\n unflatten = F.reshape(splitted, shape=(0, 0, -4, upscale_factor, upscale_factor, 0, 0))\n # (N, C, r, r, H, W) -> (N, C, H, r, W, r)\n swapped = F.transpose(unflatten, axes=(0, 1, 4, 2, 5, 3))\n # (N, C, H, r, W, r) -> (N, C, H*r, W*r)\n return F.reshape(swapped, shape=(0, 0, -3, -3))\n\n\nclass SuperResolutionNet(gluon.Block):\n def __init__(self, upscale_factor):\n super(SuperResolutionNet, self).__init__()\n with self.name_scope():\n self.conv1 = nn.Conv2D(64, (5, 5), strides=(1, 1), padding=(2, 2))\n self.conv2 = nn.Conv2D(64, (3, 3), strides=(1, 1), padding=(1, 1))\n self.conv3 = nn.Conv2D(32, (3, 3), strides=(1, 1), padding=(1, 1))\n self.conv4 = nn.Conv2D(upscale_factor ** 2, (3, 3), strides=(1, 1), padding=(1, 1))\n self.upscale_factor = upscale_factor\n\n def forward(self, x):\n x = F.Activation(self.conv1(x), act_type='relu')\n x = F.Activation(self.conv2(x), act_type='relu')\n x = F.Activation(self.conv3(x), act_type='relu')\n return _rearrange(self.conv4(x), F, self.upscale_factor)\n\nnet = SuperResolutionNet(upscale_factor)\nmetric = mx.metric.MSE()\n\ndef test(ctx):\n val_data.reset()\n avg_psnr = 0\n batches = 0\n for batch in val_data:\n batches += 1\n data = gluon.utils.split_and_load(batch.data[0], ctx_list=ctx, batch_axis=0)\n label = gluon.utils.split_and_load(batch.label[0], ctx_list=ctx, batch_axis=0)\n outputs = []\n for x in data:\n outputs.append(net(x))\n metric.update(label, outputs)\n avg_psnr += 10 * math.log10(1/metric.get()[1])\n metric.reset()\n avg_psnr /= batches\n print('validation avg psnr: %f'%avg_psnr)\n\n\ndef train(epoch, ctx):\n if isinstance(ctx, mx.Context):\n ctx = [ctx]\n net.initialize(mx.init.Orthogonal(), ctx=ctx)\n # re-initialize conv4's weight to be Orthogonal\n net.conv4.collect_params().initialize(mx.init.Orthogonal(scale=1), force_reinit=True, ctx=ctx)\n trainer = gluon.Trainer(net.collect_params(), 'adam', {'learning_rate': opt.lr})\n loss = gluon.loss.L2Loss()\n\n for i in range(epoch):\n train_data.reset()\n for batch in train_data:\n data = gluon.utils.split_and_load(batch.data[0], ctx_list=ctx, batch_axis=0)\n label = gluon.utils.split_and_load(batch.label[0], ctx_list=ctx, batch_axis=0)\n outputs = []\n with ag.record():\n for x, y in zip(data, label):\n z = net(x)\n L = loss(z, y)\n L.backward()\n outputs.append(z)\n trainer.step(batch.data[0].shape[0])\n metric.update(label, outputs)\n\n name, acc = metric.get()\n metric.reset()\n print('training mse at epoch %d: %s=%f'%(i, name, acc))\n test(ctx)\n\n net.save_params('superres.params')\n\ndef resolve(ctx):\n from PIL import Image\n if isinstance(ctx, list):\n ctx = [ctx[0]]\n net.load_params('superres.params', ctx=ctx)\n img = Image.open(opt.resolve_img).convert('YCbCr')\n y, cb, cr = img.split()\n data = mx.nd.expand_dims(mx.nd.expand_dims(mx.nd.array(y), axis=0), axis=0)\n out_img_y = mx.nd.reshape(net(data), shape=(-3, -2)).asnumpy()\n out_img_y = out_img_y.clip(0, 255)\n out_img_y = Image.fromarray(np.uint8(out_img_y[0]), mode='L')\n\n out_img_cb = cb.resize(out_img_y.size, Image.BICUBIC)\n out_img_cr = cr.resize(out_img_y.size, Image.BICUBIC)\n out_img = Image.merge('YCbCr', [out_img_y, out_img_cb, out_img_cr]).convert('RGB')\n\n out_img.save('resolved.png')\n\nif opt.resolve_img:\n resolve(ctx)\nelse:\n train(opt.epochs, ctx)\n"
] | [
[
"numpy.zeros_like"
],
[
"pandas.concat",
"pandas.read_csv",
"pandas.get_dummies"
],
[
"numpy.uint8"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
kavach-feature/Advanced_lane_finding | [
"12e4e330e338734fdb35655c7581b98ba1eb490b"
] | [
"line.py"
] | [
"import numpy as np\r\nimport cv2\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.image as mpimg\r\nimport pickle\r\n\r\nclass Line():\r\n\tdef __init__(self,n):\r\n\t\tself.n=n\r\n\t\tself.detected =False\r\n\r\n\t\t#Polynomial coefficients of the lines\r\n\r\n\t\tself.A=[]\r\n\t\tself.B=[]\r\n\t\tself.C=[]\r\n\r\n\t\t#Running average of coefficients\r\n\r\n\t\tself.A_avg=0.\r\n\t\tself.B_avg=0.\r\n\t\tself.C_avg=0.\r\n\r\n\tdef obtain_fit(self):\r\n\t\treturn (self.A_avg,self.B_avg,self.C_avg)\t\r\n\r\n\r\n\tdef update_fit(self,fit_coeffs):\r\n\r\n\t\t\"\"\"Obtain the fit coefficients from the latest frame and apply over each of 2nd polynomial coefficients\r\n\t\tfor the purpose of smoothing\r\n\t\t\"\"\"\r\n\r\n\t\tfull_Q= len(self.A) >= self.n\r\n\r\n\r\n\t\t#Append line fit coefficients\r\n\r\n\t\tself.A.append(fit_coeffs[0])\r\n\t\tself.B.append(fit_coeffs[1])\r\n\t\tself.C.append(fit_coeffs[2])\r\n\r\n\t\tif full_Q:\r\n\t\t\t_=self.A.pop(0)\r\n\t\t\t_=self.B.pop(0)\r\n\t\t\t_=self.C.pop(0)\r\n\r\n\r\n\t\t# Compute the average of the polynomial coefficients \r\n\r\n\t\tself.A_avg = np.mean(self.A)\r\n\t\tself.B_avg = np.mean(self.B)\r\n\t\tself.C_avg = np.mean(self.C)\r\n\r\n\r\n\t\treturn (self.A_avg,self.B_avg,self.C_avg)\r\n\r\n"
] | [
[
"numpy.mean"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
vermouth1992/torchlib | [
"63b2bedb40f670b2d9fbfc0daeab4a8d44623095",
"63b2bedb40f670b2d9fbfc0daeab4a8d44623095",
"63b2bedb40f670b2d9fbfc0daeab4a8d44623095",
"63b2bedb40f670b2d9fbfc0daeab4a8d44623095",
"63b2bedb40f670b2d9fbfc0daeab4a8d44623095",
"63b2bedb40f670b2d9fbfc0daeab4a8d44623095"
] | [
"torchlib/deep_rl/algorithm/ppo/utils.py",
"tensorlib/rl/envs/wrappers/atari_wrappers.py",
"torchlib/deep_rl/algorithm/rainbow/memory.py",
"tensorlib/rl/common.py",
"torchlib/dataset/utils.py",
"torchlib/generative_model/made.py"
] | [
"\"\"\"\nCommon utilities to implement policy gradient algorithms\n\"\"\"\n\nfrom collections import namedtuple, deque\n\nimport numpy as np\nfrom scipy import signal\nfrom torchlib.dataset.utils import create_data_loader\nfrom torchlib.deep_rl.utils.replay.replay import ReplayBuffer\nfrom torchlib.deep_rl.utils.replay.sampler import Sampler\nfrom torchlib.utils.math import unnormalize, normalize\n\nTrajectory = namedtuple('Trajectory', ('state', 'action', 'reward_to_go', 'advantage', 'old_log_prob'))\n\n\nclass PPOReplayBuffer(ReplayBuffer):\n def __init__(self, gamma, lam, policy, alpha=0.9):\n \"\"\"\n\n Args:\n gamma: discount factor\n lam: generalized advantage estimation\n policy: PPO policy\n alpha: value moving average ratio\n \"\"\"\n super(PPOReplayBuffer, self).__init__(None, None, None, None, None)\n self.gamma = gamma\n self.lam = lam\n self.alpha = alpha\n self.policy = policy\n\n def _initialize(self):\n self.memory = deque()\n self.running_value_mean = 0.\n self.running_value_std = 0.\n\n def clear(self):\n self._size = 0\n self.memory.clear()\n\n def _finish_trajectory(self, states, actions, rewards, last_value):\n \"\"\"Compute path accessory information including (reward_to_go, old_log_prob, advantage)\n\n Returns:\n\n \"\"\"\n predicted_state_values = self.policy.predict_state_value_batch(states)\n predicted_state_values = unnormalize(predicted_state_values, self.running_value_mean, self.running_value_std)\n\n rewards_last_state = np.append(rewards, last_value)\n predicted_state_values = np.append(predicted_state_values, last_value)\n\n # Used for fit value function\n reward_to_go = discount(rewards_last_state, self.gamma).astype(np.float32)[:-1]\n\n temporal_difference = rewards + predicted_state_values[1:] * self.gamma - predicted_state_values[:-1]\n # calculate reward-to-go\n gae = discount(temporal_difference, self.gamma * self.lam).astype(np.float32)\n\n old_log_prob = self.policy.predict_log_prob_batch(states, actions)\n\n return reward_to_go, gae, old_log_prob\n\n def add_trajectory(self, states, actions, rewards, last_value):\n \"\"\"If last_state is not None, this trajectory is truncated.\n\n Args:\n states: (T, ob_dim)\n actions: (T, ac_dim)\n rewards: (T,)\n last_state: (ob_dim)\n\n Returns:\n\n \"\"\"\n reward_to_go, gae, old_log_prob = self._finish_trajectory(states, actions, rewards, last_value)\n self.memory.append(Trajectory(\n state=states,\n action=actions,\n reward_to_go=reward_to_go,\n advantage=gae,\n old_log_prob=old_log_prob\n ))\n\n self._size += actions.shape[0]\n\n def random_iterator(self, batch_size):\n \"\"\"Create an iterator of all the dataset and update value mean and std\n\n\n Args:\n batch_size:\n\n Returns:\n\n \"\"\"\n states = np.concatenate([trajectory.state for trajectory in self.memory], axis=0)\n actions = np.concatenate([trajectory.action for trajectory in self.memory], axis=0)\n reward_to_go = np.concatenate([trajectory.reward_to_go for trajectory in self.memory], axis=0)\n gaes = np.concatenate([trajectory.advantage for trajectory in self.memory], axis=0)\n old_log_prob = np.concatenate([trajectory.old_log_prob for trajectory in self.memory], axis=0)\n\n value_mean, value_std = np.mean(reward_to_go), np.std(reward_to_go)\n reward_to_go = normalize(reward_to_go, value_mean, value_std)\n\n self.running_value_mean = self.running_value_mean * self.alpha + value_mean * (1 - self.alpha)\n self.running_value_std = self.running_value_std * self.alpha + value_std * (1 - self.alpha)\n\n gaes = normalize(gaes, np.mean(gaes), np.std(gaes))\n\n batch_size = min(batch_size, states.shape[0])\n\n data_loader = create_data_loader((states, actions, reward_to_go, gaes, old_log_prob),\n batch_size=batch_size, shuffle=True, drop_last=True)\n\n return data_loader\n\n\nclass PPOSampler(Sampler):\n def __init__(self, min_steps_per_batch, logger=None):\n super(PPOSampler, self).__init__()\n self.min_steps_per_batch = min_steps_per_batch\n self.logger = logger\n\n def sample_trajectories(self, policy=None):\n obs_lst = []\n action_lst = []\n reward_lst = []\n done_lst = []\n\n policy = self.policy if policy is None else policy\n obs = self.env.reset()\n for _ in range(self.min_steps_per_batch // obs.shape[0]):\n action = policy.predict_batch(obs)\n obs_lst.append(obs)\n action_lst.append(action)\n\n obs, rewards, dones, infos = self.env.step(action)\n\n reward_lst.append(rewards)\n done_lst.append(dones)\n\n # compute last state value for the last trajectory in each environment\n last_state_lst = obs\n last_value_lst = self.policy.predict_state_value_batch(last_state_lst)\n last_value_lst = unnormalize(last_value_lst, self.pool.running_value_mean, self.pool.running_value_std)\n\n obs_lst = np.stack(obs_lst, axis=1)\n action_lst = np.stack(action_lst, axis=1)\n reward_lst = np.stack(reward_lst, axis=1)\n done_lst = np.stack(done_lst, axis=1)\n\n # separate trajectories and add to pool\n for i in range(self.env.num_envs):\n done_index = np.where(done_lst[i])[0] + 1\n if done_lst[i][-1] == True:\n done_index = done_index[:-1] # ignore the last one\n last_value = 0.\n else:\n last_value = last_value_lst[i]\n\n sub_obs_lst = np.split(obs_lst[i], done_index)\n sub_action_lst = np.split(action_lst[i], done_index)\n sub_reward_lst = np.split(reward_lst[i], done_index)\n sub_last_value_lst = [0.] * (len(sub_obs_lst) - 1) + [last_value]\n\n for j in range(len(sub_obs_lst)):\n self.pool.add_trajectory(states=sub_obs_lst[j],\n actions=sub_action_lst[j],\n rewards=sub_reward_lst[j],\n last_value=sub_last_value_lst[j])\n if self.logger:\n self.logger.store(EpReward=np.sum(sub_reward_lst[j]) + sub_last_value_lst[j])\n self.logger.store(EpLength=sub_obs_lst[j].shape[0])\n\n\ndef discount(x, gamma):\n return signal.lfilter([1.0], [1.0, -gamma], x[::-1])[::-1]\n",
"\"\"\"\nCode copied from UC Berkeley CS294-112\n\"\"\"\n\nfrom collections import deque\n\nimport cv2\nimport gym\nimport numpy as np\nfrom gym import spaces\n\n\nclass NoopResetEnv(gym.Wrapper):\n def __init__(self, env=None, noop_max=30):\n \"\"\"Sample initial states by taking random number of no-ops on reset.\n No-op is assumed to be action 0.\n \"\"\"\n super(NoopResetEnv, self).__init__(env)\n self.noop_max = noop_max\n assert env.unwrapped.get_action_meanings()[0] == 'NOOP'\n\n def reset(self):\n \"\"\" Do no-op action for a number of steps in [1, noop_max].\"\"\"\n self.env.reset()\n noops = np.random.randint(1, self.noop_max + 1)\n for _ in range(noops):\n obs, _, _, _ = self.env.step(0)\n return obs\n\n\nclass FireResetEnv(gym.Wrapper):\n def __init__(self, env=None):\n \"\"\"Take action on reset for environments that are fixed until firing.\"\"\"\n super(FireResetEnv, self).__init__(env)\n assert env.unwrapped.get_action_meanings()[1] == 'FIRE'\n assert len(env.unwrapped.get_action_meanings()) >= 3\n\n def reset(self):\n self.env.reset()\n obs, _, _, _ = self.env.step(1)\n obs, _, _, _ = self.env.step(2)\n return obs\n\n\nclass EpisodicLifeEnv(gym.Wrapper):\n def __init__(self, env=None):\n \"\"\"Make end-of-life == end-of-episode, but only reset on true game over.\n Done by DeepMind for the DQN and co. since it helps value estimation.\n \"\"\"\n super(EpisodicLifeEnv, self).__init__(env)\n self.lives = 0\n self.was_real_done = True\n self.was_real_reset = False\n\n def step(self, action):\n obs, reward, done, info = self.env.step(action)\n self.was_real_done = done\n # check current lives, make loss of life terminal,\n # then update lives to handle bonus lives\n lives = self.env.unwrapped.ale.lives()\n if lives < self.lives and lives > 0:\n # for Qbert somtimes we stay in lives == 0 condtion for a few frames\n # so its important to keep lives > 0, so that we only reset once\n # the environment advertises done.\n done = True\n self.lives = lives\n return obs, reward, done, info\n\n def reset(self):\n \"\"\"Reset only when lives are exhausted.\n This way all states are still reachable even though lives are episodic,\n and the learner need not know about any of this behind-the-scenes.\n \"\"\"\n if self.was_real_done:\n obs = self.env.reset()\n self.was_real_reset = True\n else:\n # no-op step to advance from terminal/lost life state\n obs, _, _, _ = self.env.step(0)\n self.was_real_reset = False\n self.lives = self.env.unwrapped.ale.lives()\n return obs\n\n\nclass MaxAndSkipEnv(gym.Wrapper):\n def __init__(self, env=None, skip=4):\n \"\"\"Return only every `skip`-th frame\"\"\"\n super(MaxAndSkipEnv, self).__init__(env)\n # most recent raw observations (for max pooling across time steps)\n self._obs_buffer = deque(maxlen=2)\n self._skip = skip\n\n def step(self, action):\n total_reward = 0.0\n done = None\n for _ in range(self._skip):\n obs, reward, done, info = self.env.step(action)\n self._obs_buffer.append(obs)\n total_reward += reward\n if done:\n break\n\n max_frame = np.max(np.stack(self._obs_buffer), axis=0)\n\n return max_frame, total_reward, done, info\n\n def reset(self):\n \"\"\"Clear past frame buffer and init. to first obs. from inner env.\"\"\"\n self._obs_buffer.clear()\n obs = self.env.reset()\n self._obs_buffer.append(obs)\n return obs\n\n\ndef _process_frame84(frame):\n img = np.reshape(frame, [210, 160, 3]).astype(np.float32)\n img = img[:, :, 0] * 0.299 + img[:, :, 1] * 0.587 + img[:, :, 2] * 0.114\n resized_screen = cv2.resize(img, (84, 110), interpolation=cv2.INTER_LINEAR)\n x_t = resized_screen[18:102, :]\n x_t = np.reshape(x_t, [84, 84, 1])\n return x_t.astype(np.uint8)\n\n\nclass ProcessFrame84(gym.Wrapper):\n def __init__(self, env=None):\n super(ProcessFrame84, self).__init__(env)\n self.observation_space = spaces.Box(low=0, high=255, shape=(84, 84, 1))\n\n def step(self, action):\n obs, reward, done, info = self.env.step(action)\n return _process_frame84(obs), reward, done, info\n\n def reset(self):\n return _process_frame84(self.env.reset())\n\n\nclass ClippedRewardsWrapper(gym.RewardWrapper):\n def reward(self, reward):\n return np.sign(reward)\n\n\nclass StackFrame(gym.Wrapper):\n def __init__(self, env, frame_length=4):\n super(StackFrame, self).__init__(env)\n self.single_observation_space = env.observation_space\n low = np.repeat(self.single_observation_space.low, frame_length, axis=-1)\n high = np.repeat(self.single_observation_space.high, frame_length, axis=-1)\n dtype = self.single_observation_space.dtype\n self.observation_space = spaces.Box(low=low, high=high, shape=None, dtype=dtype)\n self.obs = deque(maxlen=frame_length)\n for _ in range(frame_length):\n self.obs.append(np.zeros(shape=self.single_observation_space.shape))\n\n def step(self, action):\n obs, reward, done, info = self.env.step(action)\n self.obs.append(obs)\n return np.concatenate(self.obs, axis=-1), reward, done, info\n\n def reset(self):\n obs = self.env.reset()\n self.obs.append(obs)\n return np.concatenate(self.obs, axis=-1)\n\n\ndef wrap_deepmind_ram(env, frame_length=4):\n env = EpisodicLifeEnv(env)\n env = NoopResetEnv(env, noop_max=30)\n env = MaxAndSkipEnv(env, skip=4)\n if 'FIRE' in env.unwrapped.get_action_meanings():\n env = FireResetEnv(env)\n env = StackFrame(env, frame_length=frame_length)\n env = ClippedRewardsWrapper(env)\n return env\n\n\ndef wrap_deepmind(env, frame_length=4):\n assert 'NoFrameskip' in env.spec.id\n env = EpisodicLifeEnv(env)\n env = NoopResetEnv(env, noop_max=30)\n env = MaxAndSkipEnv(env, skip=4)\n if 'FIRE' in env.unwrapped.get_action_meanings():\n env = FireResetEnv(env)\n env = ProcessFrame84(env)\n env = StackFrame(env, frame_length=frame_length)\n env = ClippedRewardsWrapper(env)\n return env\n",
"# -*- coding: utf-8 -*-\nfrom __future__ import division\n\nfrom collections import namedtuple\n\nimport numpy as np\nimport torch\n\nTransition = namedtuple('Transition', ('timestep', 'state', 'action', 'reward', 'nonterminal'))\nblank_trans = Transition(0, torch.zeros(84, 84, dtype=torch.uint8), None, 0, False)\n\n\n# Segment tree data structure where parent node values are sum/max of children node values\nclass SegmentTree():\n def __init__(self, size):\n self.index = 0\n self.size = size\n self.full = False # Used to track actual capacity\n self.sum_tree = np.zeros((2 * size - 1,),\n dtype=np.float32) # Initialise fixed size tree with all (priority) zeros\n self.data = np.array([None] * size) # Wrap-around cyclic buffer\n self.max = 1 # Initial max value to return (1 = 1^ω)\n\n # Propagates value up tree given a tree index\n def _propagate(self, index, value):\n parent = (index - 1) // 2\n left, right = 2 * parent + 1, 2 * parent + 2\n self.sum_tree[parent] = self.sum_tree[left] + self.sum_tree[right]\n if parent != 0:\n self._propagate(parent, value)\n\n # Updates value given a tree index\n def update(self, index, value):\n self.sum_tree[index] = value # Set new value\n self._propagate(index, value) # Propagate value\n self.max = max(value, self.max)\n\n def append(self, data, value):\n self.data[self.index] = data # Store data in underlying data structure\n self.update(self.index + self.size - 1, value) # Update tree\n self.index = (self.index + 1) % self.size # Update index\n self.full = self.full or self.index == 0 # Save when capacity reached\n self.max = max(value, self.max)\n\n # Searches for the location of a value in sum tree\n def _retrieve(self, index, value):\n left, right = 2 * index + 1, 2 * index + 2\n if left >= len(self.sum_tree):\n return index\n elif value <= self.sum_tree[left]:\n return self._retrieve(left, value)\n else:\n return self._retrieve(right, value - self.sum_tree[left])\n\n # Searches for a value in sum tree and returns value, data index and tree index\n def find(self, value):\n index = self._retrieve(0, value) # Search for index of item from root\n data_index = index - self.size + 1\n return (self.sum_tree[index], data_index, index) # Return value, data index, tree index\n\n # Returns data given a data index\n def get(self, data_index):\n return self.data[data_index % self.size]\n\n def total(self):\n return self.sum_tree[0]\n\n\nclass ReplayMemory():\n def __init__(self, args, capacity):\n self.device = args.device\n self.capacity = capacity\n self.history = args.history_length\n self.discount = args.discount\n self.n = args.multi_step\n self.priority_weight = args.priority_weight # Initial importance sampling weight β, annealed to 1 over course of training\n self.priority_exponent = args.priority_exponent\n self.t = 0 # Internal episode timestep counter\n self.transitions = SegmentTree(\n capacity) # Store transitions in a wrap-around cyclic buffer within a sum tree for querying priorities\n\n # Adds state and action at time t, reward and terminal at time t + 1\n def append(self, state, action, reward, terminal):\n # Only store last frame and discretise to save memory\n state = state[-1].mul(255).to(dtype=torch.uint8, device=torch.device('cpu'))\n self.transitions.append(Transition(self.t, state, action, reward, not terminal),\n self.transitions.max) # Store new transition with maximum priority\n self.t = 0 if terminal else self.t + 1 # Start new episodes with t = 0\n\n # Returns a transition with blank states where appropriate\n def _get_transition(self, idx):\n transition = np.array([None] * (self.history + self.n))\n transition[self.history - 1] = self.transitions.get(idx)\n for t in range(self.history - 2, -1, -1): # e.g. 2 1 0\n if transition[t + 1].timestep == 0:\n transition[t] = blank_trans # If future frame has timestep 0\n else:\n transition[t] = self.transitions.get(idx - self.history + 1 + t)\n for t in range(self.history, self.history + self.n): # e.g. 4 5 6\n if transition[t - 1].nonterminal:\n transition[t] = self.transitions.get(idx - self.history + 1 + t)\n else:\n transition[t] = blank_trans # If prev (next) frame is terminal\n return transition\n\n # Returns a valid sample from a segment\n def _get_sample_from_segment(self, segment, i):\n valid = False\n while not valid:\n sample = np.random.uniform(i * segment,\n (i + 1) * segment) # Uniformly sample an element from within a segment\n prob, idx, tree_idx = self.transitions.find(\n sample) # Retrieve sample from tree with un-normalised probability\n # Resample if transition straddled current index or probablity 0\n if (self.transitions.index - idx) % self.capacity > self.n and (\n idx - self.transitions.index) % self.capacity >= self.history and prob != 0:\n valid = True # Note that conditions are valid but extra conservative around buffer index 0\n\n # Retrieve all required transition data (from t - h to t + n)\n transition = self._get_transition(idx)\n # Create un-discretised state and nth next state\n state = torch.stack([trans.state for trans in transition[:self.history]]).to(device=self.device).to(\n dtype=torch.float32).div_(255)\n next_state = torch.stack([trans.state for trans in transition[self.n:self.n + self.history]]).to(\n device=self.device).to(dtype=torch.float32).div_(255)\n # Discrete action to be used as index\n action = torch.tensor([transition[self.history - 1].action], dtype=torch.int64, device=self.device)\n # Calculate truncated n-step discounted return R^n = Σ_k=0->n-1 (γ^k)R_t+k+1 (note that invalid nth next states have reward 0)\n R = torch.tensor([sum(self.discount ** n * transition[self.history + n - 1].reward for n in range(self.n))],\n dtype=torch.float32, device=self.device)\n # Mask for non-terminal nth next states\n nonterminal = torch.tensor([transition[self.history + self.n - 1].nonterminal], dtype=torch.float32,\n device=self.device)\n\n return prob, idx, tree_idx, state, action, R, next_state, nonterminal\n\n def sample(self, batch_size):\n p_total = self.transitions.total() # Retrieve sum of all priorities (used to create a normalised probability distribution)\n segment = p_total / batch_size # Batch size number of segments, based on sum over all probabilities\n batch = [self._get_sample_from_segment(segment, i) for i in range(batch_size)] # Get batch of valid samples\n probs, idxs, tree_idxs, states, actions, returns, next_states, nonterminals = zip(*batch)\n states, next_states, = torch.stack(states), torch.stack(next_states)\n actions, returns, nonterminals = torch.cat(actions), torch.cat(returns), torch.stack(nonterminals)\n probs = np.array(probs, dtype=np.float32) / p_total # Calculate normalised probabilities\n capacity = self.capacity if self.transitions.full else self.transitions.index\n weights = (capacity * probs) ** -self.priority_weight # Compute importance-sampling weights w\n weights = torch.tensor(weights / weights.max(), dtype=torch.float32,\n device=self.device) # Normalise by max importance-sampling weight from batch\n return tree_idxs, states, actions, returns, next_states, nonterminals, weights\n\n def update_priorities(self, idxs, priorities):\n priorities = np.power(priorities, self.priority_exponent)\n [self.transitions.update(idx, priority) for idx, priority in zip(idxs, priorities)]\n\n # Set up internal state for iterator\n def __iter__(self):\n self.current_idx = 0\n return self\n\n # Return valid states for validation\n def __next__(self):\n if self.current_idx == self.capacity:\n raise StopIteration\n # Create stack of states\n state_stack = [None] * self.history\n state_stack[-1] = self.transitions.data[self.current_idx].state\n prev_timestep = self.transitions.data[self.current_idx].timestep\n for t in reversed(range(self.history - 1)):\n if prev_timestep == 0:\n state_stack[t] = blank_trans.state # If future frame has timestep 0\n else:\n state_stack[t] = self.transitions.data[self.current_idx + t - self.history + 1].state\n prev_timestep -= 1\n state = torch.stack(state_stack, 0).to(dtype=torch.float32, device=self.device).div_(\n 255) # Agent will turn into batch\n self.current_idx += 1\n return state\n\n next = __next__ # Alias __next__ for Python 2 compatibility\n",
"from abc import ABC, abstractmethod\n\nimport gym\nimport numpy as np\nfrom gym.spaces import Space\nfrom tensorlib.utils.random import set_global_seeds\n\n\nclass BaseAgent(ABC):\n def predict(self, state):\n return self.predict_batch(np.expand_dims(state, axis=0))[0]\n\n @abstractmethod\n def predict_batch(self, states, deterministic=False):\n \"\"\" Predict a batch of actions given states\n\n Args:\n states: (batch_size, ...)\n deterministic: (bool). Use to distinguish agent at training time and testing time.\n\n Returns:\n\n \"\"\"\n raise NotImplementedError\n\n def reset(self):\n \"\"\"\n This function is used for stateful agent such as recurrent agent.\n \"\"\"\n pass\n\n @property\n def state_dict(self):\n return {}\n\n def load_state_dict(self, states):\n pass\n\n def save_checkpoint(self, checkpoint_path):\n pass\n\n def load_checkpoint(self, checkpoint_path):\n pass\n\n\nclass RandomAgent(BaseAgent):\n def __init__(self, action_space: Space):\n \"\"\"\n\n Args:\n action_space: Must be batch action space\n \"\"\"\n self.action_space = action_space\n\n def predict_batch(self, states):\n return self.action_space.sample()\n\n\ndef test(env: gym.Env, agent: BaseAgent, num_episode=100, frame_history_len=1, render=False, seed=1996):\n set_global_seeds(seed)\n env.seed(seed)\n reward_lst = []\n for i in range(num_episode):\n observation_lst = []\n done = False\n episode_reward = 0\n previous_observation = env.reset()\n agent.reset()\n observation_lst.append(previous_observation)\n for _ in range(frame_history_len - 1):\n if render:\n env.render()\n action = env.action_space.sample()\n previous_observation, reward, done, _ = env.step(action)\n observation_lst.append(previous_observation)\n episode_reward += reward\n while not done:\n if render:\n env.render()\n action = agent.predict(np.concatenate(observation_lst, axis=-1))\n previous_observation, reward, done, _ = env.step(action)\n episode_reward += reward\n observation_lst.pop(0)\n observation_lst.append(previous_observation)\n print('Episode: {}/{}. Reward: {}'.format(i + 1, num_episode, episode_reward))\n reward_lst.append(episode_reward)\n print('Reward range [{}, {}]'.format(np.min(reward_lst), np.max(reward_lst)))\n print('Reward {}±{}'.format(np.mean(reward_lst), np.std(reward_lst)))\n\n env.close()\n",
"import numpy as np\nimport torch\nfrom torch.utils.data import TensorDataset, Dataset\nfrom torchlib.common import enable_cuda\n\n\ndef create_tensor_dataset(data):\n tensor_data = []\n for d in data:\n if isinstance(d, np.ndarray):\n tensor_data.append(torch.from_numpy(d))\n elif isinstance(d, torch.Tensor):\n tensor_data.append(d)\n else:\n raise ValueError('Unknown data type {}'.format(type(d)))\n return TensorDataset(*tensor_data)\n\n\nclass TupleDataset(Dataset):\n \"\"\"\n A tuple of tensordataset. Used for (input), (target)\n \"\"\"\n\n def __init__(self, tuples_tensors):\n \"\"\"\n\n Args:\n source: a tuple of a list of numpy array\n target: a tuple of a list of numpy array\n \"\"\"\n self.tensor_datasets = []\n for data in tuples_tensors:\n assert isinstance(data, tuple), \\\n 'Each element in tuples_tensors must also be a tuple. Got {}'.format(type(data))\n self.tensor_datasets.append(create_tensor_dataset(data))\n\n assert all(len(self.tensor_datasets[0]) == len(dataset) for dataset in self.tensor_datasets)\n\n def __getitem__(self, index):\n return tuple(dataset[index] for dataset in self.tensor_datasets)\n\n def __len__(self):\n return len(self.tensor_datasets[0])\n\n\ndef create_data_loader(data, batch_size=32, shuffle=True, drop_last=False):\n \"\"\" Create a data loader given numpy array x and y\n\n Args:\n data: a tuple (x, y, z, ...) where they have common first shape dim.\n\n Returns: Pytorch data loader\n\n \"\"\"\n if drop_last:\n batch_size = min(batch_size, data[0].shape[0])\n kwargs = {'num_workers': 0, 'pin_memory': True} if enable_cuda else {}\n dataset = create_tensor_dataset(data)\n loader = torch.utils.data.DataLoader(dataset, batch_size, shuffle=shuffle, drop_last=drop_last, **kwargs)\n return loader\n\n\ndef create_tuple_data_loader(tuples_data, batch_size=32, shuffle=True, drop_last=False):\n if drop_last:\n batch_size = min(batch_size, tuples_data[0][0].shape[0])\n kwargs = {'num_workers': 0, 'pin_memory': True} if enable_cuda else {}\n dataset = TupleDataset(tuples_data)\n loader = torch.utils.data.DataLoader(dataset, batch_size, shuffle=shuffle, drop_last=drop_last, **kwargs)\n return loader\n",
"\"\"\"\nThe MADE implementation is copied from https://github.com/karpathy/pytorch-made/blob/master/made.py\n\"\"\"\n\nimport numpy as np\nimport torch.nn as nn\nfrom torchlib.utils.layers import MaskedLinear\n\n\nclass MADE(nn.Module):\n def __init__(self, nin, hidden_sizes, nout, num_masks=1, natural_ordering=False):\n \"\"\"\n nin: integer; number of inputs\n hidden sizes: a list of integers; number of units in hidden layers\n nout: integer; number of outputs, which usually collectively parameterize some kind of 1D distribution\n note: if nout is e.g. 2x larger than nin (perhaps the mean and std), then the first nin\n will be all the means and the second nin will be stds. i.e. output dimensions depend on the\n same input dimensions in \"chunks\" and should be carefully decoded downstream appropriately.\n the output of running the tests for this file makes this a bit more clear with examples.\n num_masks: can be used to train ensemble over orderings/connections\n natural_ordering: force natural ordering of dimensions, don't use random permutations\n \"\"\"\n\n super().__init__()\n self.nin = nin\n self.nout = nout\n self.hidden_sizes = hidden_sizes\n assert self.nout % self.nin == 0, \"nout must be integer multiple of nin\"\n\n # define a simple MLP neural net\n self.net = []\n hs = [nin] + hidden_sizes + [nout]\n for h0, h1 in zip(hs, hs[1:]):\n self.net.extend([\n MaskedLinear(h0, h1),\n nn.ReLU(),\n ])\n self.net.pop() # pop the last ReLU for the output layer\n self.net = nn.Sequential(*self.net)\n\n # seeds for orders/connectivities of the model ensemble\n self.natural_ordering = natural_ordering\n self.num_masks = num_masks\n self.seed = 0 # for cycling through num_masks orderings\n\n self.m = {}\n self.update_masks() # builds the initial self.m connectivity\n # note, we could also precompute the masks and cache them, but this\n # could get memory expensive for large number of masks.\n\n def update_masks(self):\n if self.m and self.num_masks == 1: return # only a single seed, skip for efficiency\n L = len(self.hidden_sizes)\n\n # fetch the next seed and construct a random stream\n rng = np.random.RandomState(self.seed)\n self.seed = (self.seed + 1) % self.num_masks\n\n # sample the order of the inputs and the connectivity of all neurons\n self.m[-1] = np.arange(self.nin) if self.natural_ordering else rng.permutation(self.nin)\n for l in range(L):\n self.m[l] = rng.randint(self.m[l - 1].min(), self.nin - 1, size=self.hidden_sizes[l])\n\n # construct the mask matrices\n masks = [self.m[l - 1][:, None] <= self.m[l][None, :] for l in range(L)]\n masks.append(self.m[L - 1][:, None] < self.m[-1][None, :])\n\n # handle the case where nout = nin * k, for integer k > 1\n if self.nout > self.nin:\n k = int(self.nout / self.nin)\n # replicate the mask across the other outputs\n masks[-1] = np.concatenate([masks[-1]] * k, axis=1)\n\n # set the masks in all MaskedLinear layers\n layers = [l for l in self.net.modules() if isinstance(l, MaskedLinear)]\n for l, m in zip(layers, masks):\n l.set_mask(m)\n\n def forward(self, x):\n return self.net(x)\n"
] | [
[
"numpy.split",
"numpy.stack",
"numpy.concatenate",
"numpy.append",
"numpy.std",
"numpy.mean",
"scipy.signal.lfilter",
"numpy.where",
"numpy.sum"
],
[
"numpy.reshape",
"numpy.stack",
"numpy.concatenate",
"numpy.sign",
"numpy.repeat",
"numpy.zeros",
"numpy.random.randint"
],
[
"numpy.power",
"torch.zeros",
"torch.cat",
"torch.tensor",
"torch.stack",
"numpy.random.uniform",
"numpy.array",
"numpy.zeros",
"torch.device"
],
[
"numpy.expand_dims",
"numpy.min",
"numpy.concatenate",
"numpy.max",
"numpy.std",
"numpy.mean"
],
[
"torch.utils.data.TensorDataset",
"torch.utils.data.DataLoader",
"torch.from_numpy"
],
[
"torch.nn.Sequential",
"numpy.arange",
"numpy.concatenate",
"torch.nn.ReLU",
"numpy.random.RandomState"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jianoaix/ray | [
"1701b923bc83905f8961c06a6a173e3eba46a936",
"1701b923bc83905f8961c06a6a173e3eba46a936",
"1701b923bc83905f8961c06a6a173e3eba46a936",
"1701b923bc83905f8961c06a6a173e3eba46a936",
"1701b923bc83905f8961c06a6a173e3eba46a936",
"1701b923bc83905f8961c06a6a173e3eba46a936"
] | [
"python/ray/_private/utils.py",
"python/ray/tune/tests/test_integration_wandb.py",
"rllib/examples/custom_metrics_and_callbacks.py",
"rllib/algorithms/crr/crr.py",
"python/ray/tune/utils/trainable.py",
"rllib/tests/test_io.py"
] | [
"import binascii\nimport errno\nimport functools\nimport hashlib\nimport importlib\nimport logging\nimport multiprocessing\nimport os\nimport signal\nimport subprocess\nimport sys\nimport tempfile\nimport threading\nimport time\nfrom typing import Optional, Sequence, Tuple, Any, Union, Dict\nimport uuid\nimport grpc\nimport warnings\n\ntry:\n from grpc import aio as aiogrpc\nexcept ImportError:\n from grpc.experimental import aio as aiogrpc\n\nimport inspect\nfrom inspect import signature\nfrom pathlib import Path\nimport numpy as np\n\nimport ray\nfrom ray.core.generated.gcs_pb2 import ErrorTableData\nimport ray.ray_constants as ray_constants\nfrom ray._private.tls_utils import load_certs_from_env\n\n# Import psutil after ray so the packaged version is used.\nimport psutil\n\npwd = None\nif sys.platform != \"win32\":\n import pwd\n\nlogger = logging.getLogger(__name__)\n\n# Linux can bind child processes' lifetimes to that of their parents via prctl.\n# prctl support is detected dynamically once, and assumed thereafter.\nlinux_prctl = None\n\n# Windows can bind processes' lifetimes to that of kernel-level \"job objects\".\n# We keep a global job object to tie its lifetime to that of our own process.\nwin32_job = None\nwin32_AssignProcessToJobObject = None\n\n\ndef get_user_temp_dir():\n if \"RAY_TMPDIR\" in os.environ:\n return os.environ[\"RAY_TMPDIR\"]\n elif sys.platform.startswith(\"linux\") and \"TMPDIR\" in os.environ:\n return os.environ[\"TMPDIR\"]\n elif sys.platform.startswith(\"darwin\") or sys.platform.startswith(\"linux\"):\n # Ideally we wouldn't need this fallback, but keep it for now for\n # for compatibility\n tempdir = os.path.join(os.sep, \"tmp\")\n else:\n tempdir = tempfile.gettempdir()\n return tempdir\n\n\ndef get_ray_temp_dir():\n return os.path.join(get_user_temp_dir(), \"ray\")\n\n\ndef _random_string():\n id_hash = hashlib.shake_128()\n id_hash.update(uuid.uuid4().bytes)\n id_bytes = id_hash.digest(ray_constants.ID_SIZE)\n assert len(id_bytes) == ray_constants.ID_SIZE\n return id_bytes\n\n\ndef format_error_message(exception_message: str, task_exception: bool = False):\n \"\"\"Improve the formatting of an exception thrown by a remote function.\n\n This method takes a traceback from an exception and makes it nicer by\n removing a few uninformative lines and adding some space to indent the\n remaining lines nicely.\n\n Args:\n exception_message: A message generated by traceback.format_exc().\n\n Returns:\n A string of the formatted exception message.\n \"\"\"\n lines = exception_message.split(\"\\n\")\n if task_exception:\n # For errors that occur inside of tasks, remove lines 1 and 2 which are\n # always the same, they just contain information about the worker code.\n lines = lines[0:1] + lines[3:]\n pass\n return \"\\n\".join(lines)\n\n\ndef push_error_to_driver(\n worker, error_type: str, message: str, job_id: Optional[str] = None\n):\n \"\"\"Push an error message to the driver to be printed in the background.\n\n Args:\n worker: The worker to use.\n error_type: The type of the error.\n message: The message that will be printed in the background\n on the driver.\n job_id: The ID of the driver to push the error message to. If this\n is None, then the message will be pushed to all drivers.\n \"\"\"\n if job_id is None:\n job_id = ray.JobID.nil()\n assert isinstance(job_id, ray.JobID)\n worker.core_worker.push_error(job_id, error_type, message, time.time())\n\n\ndef construct_error_message(job_id, error_type, message, timestamp):\n \"\"\"Construct an ErrorTableData object.\n\n Args:\n job_id: The ID of the job that the error should go to. If this is\n nil, then the error will go to all drivers.\n error_type: The type of the error.\n message: The error message.\n timestamp: The time of the error.\n\n Returns:\n The ErrorTableData object.\n \"\"\"\n data = ErrorTableData()\n data.job_id = job_id.binary()\n data.type = error_type\n data.error_message = message\n data.timestamp = timestamp\n return data\n\n\ndef publish_error_to_driver(\n error_type: str,\n message: str,\n gcs_publisher,\n job_id=None,\n):\n \"\"\"Push an error message to the driver to be printed in the background.\n\n Normally the push_error_to_driver function should be used. However, in some\n instances, the raylet client is not available, e.g., because the\n error happens in Python before the driver or worker has connected to the\n backend processes.\n\n Args:\n error_type: The type of the error.\n message: The message that will be printed in the background\n on the driver.\n gcs_publisher: The GCS publisher to use.\n job_id: The ID of the driver to push the error message to. If this\n is None, then the message will be pushed to all drivers.\n \"\"\"\n if job_id is None:\n job_id = ray.JobID.nil()\n assert isinstance(job_id, ray.JobID)\n error_data = construct_error_message(job_id, error_type, message, time.time())\n try:\n gcs_publisher.publish_error(job_id.hex().encode(), error_data)\n except Exception:\n logger.exception(f\"Failed to publish error {error_data}\")\n\n\ndef random_string():\n \"\"\"Generate a random string to use as an ID.\n\n Note that users may seed numpy, which could cause this function to generate\n duplicate IDs. Therefore, we need to seed numpy ourselves, but we can't\n interfere with the state of the user's random number generator, so we\n extract the state of the random number generator and reset it after we are\n done.\n\n TODO(rkn): If we want to later guarantee that these are generated in a\n deterministic manner, then we will need to make some changes here.\n\n Returns:\n A random byte string of length ray_constants.ID_SIZE.\n \"\"\"\n # Get the state of the numpy random number generator.\n numpy_state = np.random.get_state()\n # Try to use true randomness.\n np.random.seed(None)\n # Generate the random ID.\n random_id = np.random.bytes(ray_constants.ID_SIZE)\n # Reset the state of the numpy random number generator.\n np.random.set_state(numpy_state)\n return random_id\n\n\ndef decode(byte_str: str, allow_none: bool = False, encode_type: str = \"utf-8\"):\n \"\"\"Make this unicode in Python 3, otherwise leave it as bytes.\n\n Args:\n byte_str: The byte string to decode.\n allow_none: If true, then we will allow byte_str to be None in which\n case we will return an empty string. TODO(rkn): Remove this flag.\n This is only here to simplify upgrading to flatbuffers 1.10.0.\n\n Returns:\n A byte string in Python 2 and a unicode string in Python 3.\n \"\"\"\n if byte_str is None and allow_none:\n return \"\"\n\n if not isinstance(byte_str, bytes):\n raise ValueError(f\"The argument {byte_str} must be a bytes object.\")\n if sys.version_info >= (3, 0):\n return byte_str.decode(encode_type)\n else:\n return byte_str\n\n\ndef ensure_str(s, encoding=\"utf-8\", errors=\"strict\"):\n \"\"\"Coerce *s* to `str`.\n\n - `str` -> `str`\n - `bytes` -> decoded to `str`\n \"\"\"\n if isinstance(s, str):\n return s\n else:\n assert isinstance(s, bytes)\n return s.decode(encoding, errors)\n\n\ndef binary_to_object_ref(binary_object_ref):\n return ray.ObjectRef(binary_object_ref)\n\n\ndef binary_to_task_id(binary_task_id):\n return ray.TaskID(binary_task_id)\n\n\ndef binary_to_hex(identifier):\n hex_identifier = binascii.hexlify(identifier)\n if sys.version_info >= (3, 0):\n hex_identifier = hex_identifier.decode()\n return hex_identifier\n\n\ndef hex_to_binary(hex_identifier):\n return binascii.unhexlify(hex_identifier)\n\n\n# TODO(qwang): Remove these hepler functions\n# once we separate `WorkerID` from `UniqueID`.\ndef compute_job_id_from_driver(driver_id):\n assert isinstance(driver_id, ray.WorkerID)\n return ray.JobID(driver_id.binary()[0 : ray.JobID.size()])\n\n\ndef compute_driver_id_from_job(job_id):\n assert isinstance(job_id, ray.JobID)\n rest_length = ray_constants.ID_SIZE - job_id.size()\n driver_id_str = job_id.binary() + (rest_length * b\"\\xff\")\n return ray.WorkerID(driver_id_str)\n\n\ndef get_cuda_visible_devices():\n \"\"\"Get the device IDs in the CUDA_VISIBLE_DEVICES environment variable.\n\n Returns:\n devices (List[str]): If CUDA_VISIBLE_DEVICES is set, returns a\n list of strings representing the IDs of the visible GPUs.\n If it is not set or is set to NoDevFiles, returns empty list.\n \"\"\"\n gpu_ids_str = os.environ.get(\"CUDA_VISIBLE_DEVICES\", None)\n\n if gpu_ids_str is None:\n return None\n\n if gpu_ids_str == \"\":\n return []\n\n if gpu_ids_str == \"NoDevFiles\":\n return []\n\n # GPU identifiers are given as strings representing integers or UUIDs.\n return list(gpu_ids_str.split(\",\"))\n\n\nlast_set_gpu_ids = None\n\n\ndef set_cuda_visible_devices(gpu_ids):\n \"\"\"Set the CUDA_VISIBLE_DEVICES environment variable.\n\n Args:\n gpu_ids (List[str]): List of strings representing GPU IDs.\n \"\"\"\n\n if os.environ.get(ray_constants.NOSET_CUDA_VISIBLE_DEVICES_ENV_VAR):\n return\n\n global last_set_gpu_ids\n if last_set_gpu_ids == gpu_ids:\n return # optimization: already set\n\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = \",\".join([str(i) for i in gpu_ids])\n last_set_gpu_ids = gpu_ids\n\n\ndef resources_from_ray_options(options_dict: Dict[str, Any]) -> Dict[str, Any]:\n \"\"\"Determine a task's resource requirements.\n\n Args:\n options_dict: The dictionary that contains resources requirements.\n\n Returns:\n A dictionary of the resource requirements for the task.\n \"\"\"\n resources = (options_dict.get(\"resources\") or {}).copy()\n\n if \"CPU\" in resources or \"GPU\" in resources:\n raise ValueError(\n \"The resources dictionary must not contain the key 'CPU' or 'GPU'\"\n )\n elif \"memory\" in resources or \"object_store_memory\" in resources:\n raise ValueError(\n \"The resources dictionary must not \"\n \"contain the key 'memory' or 'object_store_memory'\"\n )\n\n num_cpus = options_dict.get(\"num_cpus\")\n num_gpus = options_dict.get(\"num_gpus\")\n memory = options_dict.get(\"memory\")\n object_store_memory = options_dict.get(\"object_store_memory\")\n accelerator_type = options_dict.get(\"accelerator_type\")\n\n if num_cpus is not None:\n resources[\"CPU\"] = num_cpus\n if num_gpus is not None:\n resources[\"GPU\"] = num_gpus\n if memory is not None:\n resources[\"memory\"] = ray_constants.to_memory_units(memory, round_up=True)\n if object_store_memory is not None:\n resources[\"object_store_memory\"] = ray_constants.to_memory_units(\n object_store_memory, round_up=True\n )\n if accelerator_type is not None:\n resources[\n f\"{ray_constants.RESOURCE_CONSTRAINT_PREFIX}{accelerator_type}\"\n ] = 0.001\n\n return resources\n\n\nclass Unbuffered(object):\n \"\"\"There's no \"built-in\" solution to programatically disabling buffering of\n text files. Ray expects stdout/err to be text files, so creating an\n unbuffered binary file is unacceptable.\n\n See\n https://mail.python.org/pipermail/tutor/2003-November/026645.html.\n https://docs.python.org/3/library/functions.html#open\n\n \"\"\"\n\n def __init__(self, stream):\n self.stream = stream\n\n def write(self, data):\n self.stream.write(data)\n self.stream.flush()\n\n def writelines(self, datas):\n self.stream.writelines(datas)\n self.stream.flush()\n\n def __getattr__(self, attr):\n return getattr(self.stream, attr)\n\n\ndef open_log(path, unbuffered=False, **kwargs):\n \"\"\"\n Opens the log file at `path`, with the provided kwargs being given to\n `open`.\n \"\"\"\n # Disable buffering, see test_advanced_3.py::test_logging_to_driver\n kwargs.setdefault(\"buffering\", 1)\n kwargs.setdefault(\"mode\", \"a\")\n kwargs.setdefault(\"encoding\", \"utf-8\")\n stream = open(path, **kwargs)\n if unbuffered:\n return Unbuffered(stream)\n else:\n return stream\n\n\ndef get_system_memory(\n # For cgroups v1:\n memory_limit_filename=\"/sys/fs/cgroup/memory/memory.limit_in_bytes\",\n # For cgroups v2:\n memory_limit_filename_v2=\"/sys/fs/cgroup/memory.max\",\n):\n \"\"\"Return the total amount of system memory in bytes.\n\n Returns:\n The total amount of system memory in bytes.\n \"\"\"\n # Try to accurately figure out the memory limit if we are in a docker\n # container. Note that this file is not specific to Docker and its value is\n # often much larger than the actual amount of memory.\n docker_limit = None\n if os.path.exists(memory_limit_filename):\n with open(memory_limit_filename, \"r\") as f:\n docker_limit = int(f.read())\n elif os.path.exists(memory_limit_filename_v2):\n with open(memory_limit_filename_v2, \"r\") as f:\n max_file = f.read()\n if max_file.isnumeric():\n docker_limit = int(max_file)\n else:\n # max_file is \"max\", i.e. is unset.\n docker_limit = None\n\n # Use psutil if it is available.\n psutil_memory_in_bytes = psutil.virtual_memory().total\n\n if docker_limit is not None:\n # We take the min because the cgroup limit is very large if we aren't\n # in Docker.\n return min(docker_limit, psutil_memory_in_bytes)\n\n return psutil_memory_in_bytes\n\n\ndef _get_docker_cpus(\n cpu_quota_file_name=\"/sys/fs/cgroup/cpu/cpu.cfs_quota_us\",\n cpu_period_file_name=\"/sys/fs/cgroup/cpu/cpu.cfs_period_us\",\n cpuset_file_name=\"/sys/fs/cgroup/cpuset/cpuset.cpus\",\n cpu_max_file_name=\"/sys/fs/cgroup/cpu.max\",\n) -> Optional[float]:\n # TODO (Alex): Don't implement this logic oursleves.\n # Docker has 2 underyling ways of implementing CPU limits:\n # https://docs.docker.com/config/containers/resource_constraints/#configure-the-default-cfs-scheduler\n # 1. --cpuset-cpus 2. --cpus or --cpu-quota/--cpu-period (--cpu-shares is a\n # soft limit so we don't worry about it). For Ray's purposes, if we use\n # docker, the number of vCPUs on a machine is whichever is set (ties broken\n # by smaller value).\n\n cpu_quota = None\n # See: https://bugs.openjdk.java.net/browse/JDK-8146115\n if os.path.exists(cpu_quota_file_name) and os.path.exists(cpu_period_file_name):\n try:\n with open(cpu_quota_file_name, \"r\") as quota_file, open(\n cpu_period_file_name, \"r\"\n ) as period_file:\n cpu_quota = float(quota_file.read()) / float(period_file.read())\n except Exception:\n logger.exception(\"Unexpected error calculating docker cpu quota.\")\n # Look at cpu.max for cgroups v2\n elif os.path.exists(cpu_max_file_name):\n try:\n max_file = open(cpu_max_file_name).read()\n quota_str, period_str = max_file.split()\n if quota_str.isnumeric() and period_str.isnumeric():\n cpu_quota = float(quota_str) / float(period_str)\n else:\n # quota_str is \"max\" meaning the cpu quota is unset\n cpu_quota = None\n except Exception:\n logger.exception(\"Unexpected error calculating docker cpu quota.\")\n if (cpu_quota is not None) and (cpu_quota < 0):\n cpu_quota = None\n elif cpu_quota == 0:\n # Round up in case the cpu limit is less than 1.\n cpu_quota = 1\n\n cpuset_num = None\n if os.path.exists(cpuset_file_name):\n try:\n with open(cpuset_file_name) as cpuset_file:\n ranges_as_string = cpuset_file.read()\n ranges = ranges_as_string.split(\",\")\n cpu_ids = []\n for num_or_range in ranges:\n if \"-\" in num_or_range:\n start, end = num_or_range.split(\"-\")\n cpu_ids.extend(list(range(int(start), int(end) + 1)))\n else:\n cpu_ids.append(int(num_or_range))\n cpuset_num = len(cpu_ids)\n except Exception:\n logger.exception(\"Unexpected error calculating docker cpuset ids.\")\n # Possible to-do: Parse cgroups v2's cpuset.cpus.effective for the number\n # of accessible CPUs.\n\n if cpu_quota and cpuset_num:\n return min(cpu_quota, cpuset_num)\n return cpu_quota or cpuset_num\n\n\ndef get_num_cpus() -> int:\n cpu_count = multiprocessing.cpu_count()\n if os.environ.get(\"RAY_USE_MULTIPROCESSING_CPU_COUNT\"):\n logger.info(\n \"Detected RAY_USE_MULTIPROCESSING_CPU_COUNT=1: Using \"\n \"multiprocessing.cpu_count() to detect the number of CPUs. \"\n \"This may be inconsistent when used inside docker. \"\n \"To correctly detect CPUs, unset the env var: \"\n \"`RAY_USE_MULTIPROCESSING_CPU_COUNT`.\"\n )\n return cpu_count\n try:\n # Not easy to get cpu count in docker, see:\n # https://bugs.python.org/issue36054\n docker_count = _get_docker_cpus()\n if docker_count is not None and docker_count != cpu_count:\n # Don't log this warning if we're on K8s or if the warning is\n # explicitly disabled.\n if (\n \"RAY_DISABLE_DOCKER_CPU_WARNING\" not in os.environ\n and \"KUBERNETES_SERVICE_HOST\" not in os.environ\n ):\n logger.warning(\n \"Detecting docker specified CPUs. In \"\n \"previous versions of Ray, CPU detection in containers \"\n \"was incorrect. Please ensure that Ray has enough CPUs \"\n \"allocated. As a temporary workaround to revert to the \"\n \"prior behavior, set \"\n \"`RAY_USE_MULTIPROCESSING_CPU_COUNT=1` as an env var \"\n \"before starting Ray. Set the env var: \"\n \"`RAY_DISABLE_DOCKER_CPU_WARNING=1` to mute this warning.\"\n )\n # TODO (Alex): We should probably add support for fractional cpus.\n if int(docker_count) != float(docker_count):\n logger.warning(\n f\"Ray currently does not support initializing Ray\"\n f\"with fractional cpus. Your num_cpus will be \"\n f\"truncated from {docker_count} to \"\n f\"{int(docker_count)}.\"\n )\n docker_count = int(docker_count)\n cpu_count = docker_count\n\n except Exception:\n # `nproc` and cgroup are linux-only. If docker only works on linux\n # (will run in a linux VM on other platforms), so this is fine.\n pass\n\n return cpu_count\n\n\ndef get_used_memory():\n \"\"\"Return the currently used system memory in bytes\n\n Returns:\n The total amount of used memory\n \"\"\"\n # Try to accurately figure out the memory usage if we are in a docker\n # container.\n docker_usage = None\n # For cgroups v1:\n memory_usage_filename = \"/sys/fs/cgroup/memory/memory.usage_in_bytes\"\n # For cgroups v2:\n memory_usage_filename_v2 = \"/sys/fs/cgroup/memory.current\"\n if os.path.exists(memory_usage_filename):\n with open(memory_usage_filename, \"r\") as f:\n docker_usage = int(f.read())\n elif os.path.exists(memory_usage_filename_v2):\n with open(memory_usage_filename_v2, \"r\") as f:\n docker_usage = int(f.read())\n\n # Use psutil if it is available.\n psutil_memory_in_bytes = psutil.virtual_memory().used\n\n if docker_usage is not None:\n # We take the min because the cgroup limit is very large if we aren't\n # in Docker.\n return min(docker_usage, psutil_memory_in_bytes)\n\n return psutil_memory_in_bytes\n\n\ndef estimate_available_memory():\n \"\"\"Return the currently available amount of system memory in bytes.\n\n Returns:\n The total amount of available memory in bytes. Based on the used\n and total memory.\n\n \"\"\"\n return get_system_memory() - get_used_memory()\n\n\ndef get_shared_memory_bytes():\n \"\"\"Get the size of the shared memory file system.\n\n Returns:\n The size of the shared memory file system in bytes.\n \"\"\"\n # Make sure this is only called on Linux.\n assert sys.platform == \"linux\" or sys.platform == \"linux2\"\n\n shm_fd = os.open(\"/dev/shm\", os.O_RDONLY)\n try:\n shm_fs_stats = os.fstatvfs(shm_fd)\n # The value shm_fs_stats.f_bsize is the block size and the\n # value shm_fs_stats.f_bavail is the number of available\n # blocks.\n shm_avail = shm_fs_stats.f_bsize * shm_fs_stats.f_bavail\n finally:\n os.close(shm_fd)\n\n return shm_avail\n\n\ndef check_oversized_function(\n pickled: bytes, name: str, obj_type: str, worker: \"ray.Worker\"\n) -> None:\n \"\"\"Send a warning message if the pickled function is too large.\n\n Args:\n pickled: the pickled function.\n name: name of the pickled object.\n obj_type: type of the pickled object, can be 'function',\n 'remote function', or 'actor'.\n worker: the worker used to send warning message. message will be logged\n locally if None.\n \"\"\"\n length = len(pickled)\n if length <= ray_constants.FUNCTION_SIZE_WARN_THRESHOLD:\n return\n elif length < ray_constants.FUNCTION_SIZE_ERROR_THRESHOLD:\n warning_message = (\n \"The {} {} is very large ({} MiB). \"\n \"Check that its definition is not implicitly capturing a large \"\n \"array or other object in scope. Tip: use ray.put() to put large \"\n \"objects in the Ray object store.\"\n ).format(obj_type, name, length // (1024 * 1024))\n if worker:\n push_error_to_driver(\n worker,\n ray_constants.PICKLING_LARGE_OBJECT_PUSH_ERROR,\n \"Warning: \" + warning_message,\n job_id=worker.current_job_id,\n )\n else:\n error = (\n \"The {} {} is too large ({} MiB > FUNCTION_SIZE_ERROR_THRESHOLD={}\"\n \" MiB). Check that its definition is not implicitly capturing a \"\n \"large array or other object in scope. Tip: use ray.put() to \"\n \"put large objects in the Ray object store.\"\n ).format(\n obj_type,\n name,\n length // (1024 * 1024),\n ray_constants.FUNCTION_SIZE_ERROR_THRESHOLD // (1024 * 1024),\n )\n raise ValueError(error)\n\n\ndef is_main_thread():\n return threading.current_thread().getName() == \"MainThread\"\n\n\ndef detect_fate_sharing_support_win32():\n global win32_job, win32_AssignProcessToJobObject\n if win32_job is None and sys.platform == \"win32\":\n import ctypes\n\n try:\n from ctypes.wintypes import BOOL, DWORD, HANDLE, LPVOID, LPCWSTR\n\n kernel32 = ctypes.WinDLL(\"kernel32\")\n kernel32.CreateJobObjectW.argtypes = (LPVOID, LPCWSTR)\n kernel32.CreateJobObjectW.restype = HANDLE\n sijo_argtypes = (HANDLE, ctypes.c_int, LPVOID, DWORD)\n kernel32.SetInformationJobObject.argtypes = sijo_argtypes\n kernel32.SetInformationJobObject.restype = BOOL\n kernel32.AssignProcessToJobObject.argtypes = (HANDLE, HANDLE)\n kernel32.AssignProcessToJobObject.restype = BOOL\n kernel32.IsDebuggerPresent.argtypes = ()\n kernel32.IsDebuggerPresent.restype = BOOL\n except (AttributeError, TypeError, ImportError):\n kernel32 = None\n job = kernel32.CreateJobObjectW(None, None) if kernel32 else None\n job = subprocess.Handle(job) if job else job\n if job:\n from ctypes.wintypes import DWORD, LARGE_INTEGER, ULARGE_INTEGER\n\n class JOBOBJECT_BASIC_LIMIT_INFORMATION(ctypes.Structure):\n _fields_ = [\n (\"PerProcessUserTimeLimit\", LARGE_INTEGER),\n (\"PerJobUserTimeLimit\", LARGE_INTEGER),\n (\"LimitFlags\", DWORD),\n (\"MinimumWorkingSetSize\", ctypes.c_size_t),\n (\"MaximumWorkingSetSize\", ctypes.c_size_t),\n (\"ActiveProcessLimit\", DWORD),\n (\"Affinity\", ctypes.c_size_t),\n (\"PriorityClass\", DWORD),\n (\"SchedulingClass\", DWORD),\n ]\n\n class IO_COUNTERS(ctypes.Structure):\n _fields_ = [\n (\"ReadOperationCount\", ULARGE_INTEGER),\n (\"WriteOperationCount\", ULARGE_INTEGER),\n (\"OtherOperationCount\", ULARGE_INTEGER),\n (\"ReadTransferCount\", ULARGE_INTEGER),\n (\"WriteTransferCount\", ULARGE_INTEGER),\n (\"OtherTransferCount\", ULARGE_INTEGER),\n ]\n\n class JOBOBJECT_EXTENDED_LIMIT_INFORMATION(ctypes.Structure):\n _fields_ = [\n (\"BasicLimitInformation\", JOBOBJECT_BASIC_LIMIT_INFORMATION),\n (\"IoInfo\", IO_COUNTERS),\n (\"ProcessMemoryLimit\", ctypes.c_size_t),\n (\"JobMemoryLimit\", ctypes.c_size_t),\n (\"PeakProcessMemoryUsed\", ctypes.c_size_t),\n (\"PeakJobMemoryUsed\", ctypes.c_size_t),\n ]\n\n debug = kernel32.IsDebuggerPresent()\n\n # Defined in <WinNT.h>; also available here:\n # https://docs.microsoft.com/en-us/windows/win32/api/jobapi2/nf-jobapi2-setinformationjobobject\n JobObjectExtendedLimitInformation = 9\n JOB_OBJECT_LIMIT_BREAKAWAY_OK = 0x00000800\n JOB_OBJECT_LIMIT_DIE_ON_UNHANDLED_EXCEPTION = 0x00000400\n JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE = 0x00002000\n buf = JOBOBJECT_EXTENDED_LIMIT_INFORMATION()\n buf.BasicLimitInformation.LimitFlags = (\n (0 if debug else JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE)\n | JOB_OBJECT_LIMIT_DIE_ON_UNHANDLED_EXCEPTION\n | JOB_OBJECT_LIMIT_BREAKAWAY_OK\n )\n infoclass = JobObjectExtendedLimitInformation\n if not kernel32.SetInformationJobObject(\n job, infoclass, ctypes.byref(buf), ctypes.sizeof(buf)\n ):\n job = None\n win32_AssignProcessToJobObject = (\n kernel32.AssignProcessToJobObject if kernel32 is not None else False\n )\n win32_job = job if job else False\n return bool(win32_job)\n\n\ndef detect_fate_sharing_support_linux():\n global linux_prctl\n if linux_prctl is None and sys.platform.startswith(\"linux\"):\n try:\n from ctypes import c_int, c_ulong, CDLL\n\n prctl = CDLL(None).prctl\n prctl.restype = c_int\n prctl.argtypes = [c_int, c_ulong, c_ulong, c_ulong, c_ulong]\n except (AttributeError, TypeError):\n prctl = None\n linux_prctl = prctl if prctl else False\n return bool(linux_prctl)\n\n\ndef detect_fate_sharing_support():\n result = None\n if sys.platform == \"win32\":\n result = detect_fate_sharing_support_win32()\n elif sys.platform.startswith(\"linux\"):\n result = detect_fate_sharing_support_linux()\n return result\n\n\ndef set_kill_on_parent_death_linux():\n \"\"\"Ensures this process dies if its parent dies (fate-sharing).\n\n Linux-only. Must be called in preexec_fn (i.e. by the child).\n \"\"\"\n if detect_fate_sharing_support_linux():\n import signal\n\n PR_SET_PDEATHSIG = 1\n if linux_prctl(PR_SET_PDEATHSIG, signal.SIGKILL, 0, 0, 0) != 0:\n import ctypes\n\n raise OSError(ctypes.get_errno(), \"prctl(PR_SET_PDEATHSIG) failed\")\n else:\n assert False, \"PR_SET_PDEATHSIG used despite being unavailable\"\n\n\ndef set_kill_child_on_death_win32(child_proc):\n \"\"\"Ensures the child process dies if this process dies (fate-sharing).\n\n Windows-only. Must be called by the parent, after spawning the child.\n\n Args:\n child_proc: The subprocess.Popen or subprocess.Handle object.\n \"\"\"\n\n if isinstance(child_proc, subprocess.Popen):\n child_proc = child_proc._handle\n assert isinstance(child_proc, subprocess.Handle)\n\n if detect_fate_sharing_support_win32():\n if not win32_AssignProcessToJobObject(win32_job, int(child_proc)):\n import ctypes\n\n raise OSError(ctypes.get_last_error(), \"AssignProcessToJobObject() failed\")\n else:\n assert False, \"AssignProcessToJobObject used despite being unavailable\"\n\n\ndef set_sigterm_handler(sigterm_handler):\n \"\"\"Registers a handler for SIGTERM in a platform-compatible manner.\"\"\"\n if sys.platform == \"win32\":\n # Note that these signal handlers only work for console applications.\n # TODO(mehrdadn): implement graceful process termination mechanism\n # SIGINT is Ctrl+C, SIGBREAK is Ctrl+Break.\n signal.signal(signal.SIGBREAK, sigterm_handler)\n else:\n signal.signal(signal.SIGTERM, sigterm_handler)\n\n\ndef try_make_directory_shared(directory_path):\n try:\n os.chmod(directory_path, 0o0777)\n except OSError as e:\n # Silently suppress the PermissionError that is thrown by the chmod.\n # This is done because the user attempting to change the permissions\n # on a directory may not own it. The chmod is attempted whether the\n # directory is new or not to avoid race conditions.\n # ray-project/ray/#3591\n if e.errno in [errno.EACCES, errno.EPERM]:\n pass\n else:\n raise\n\n\ndef try_to_create_directory(directory_path):\n \"\"\"Attempt to create a directory that is globally readable/writable.\n\n Args:\n directory_path: The path of the directory to create.\n \"\"\"\n directory_path = os.path.expanduser(directory_path)\n os.makedirs(directory_path, exist_ok=True)\n # Change the log directory permissions so others can use it. This is\n # important when multiple people are using the same machine.\n try_make_directory_shared(directory_path)\n\n\ndef try_to_symlink(symlink_path, target_path):\n \"\"\"Attempt to create a symlink.\n\n If the symlink path exists and isn't a symlink, the symlink will not be\n created. If a symlink exists in the path, it will be attempted to be\n removed and replaced.\n\n Args:\n symlink_path: The path at which to create the symlink.\n target_path: The path the symlink should point to.\n \"\"\"\n symlink_path = os.path.expanduser(symlink_path)\n target_path = os.path.expanduser(target_path)\n\n if os.path.exists(symlink_path):\n if os.path.islink(symlink_path):\n # Try to remove existing symlink.\n try:\n os.remove(symlink_path)\n except OSError:\n return\n else:\n # There's an existing non-symlink file, don't overwrite it.\n return\n\n try:\n os.symlink(target_path, symlink_path)\n except OSError:\n return\n\n\ndef get_user():\n if pwd is None:\n return \"\"\n try:\n return pwd.getpwuid(os.getuid()).pw_name\n except Exception:\n return \"\"\n\n\ndef get_function_args(callable):\n all_parameters = frozenset(signature(callable).parameters)\n return list(all_parameters)\n\n\ndef get_conda_bin_executable(executable_name):\n \"\"\"\n Return path to the specified executable, assumed to be discoverable within\n the 'bin' subdirectory of a conda installation. Adapted from\n https://github.com/mlflow/mlflow.\n \"\"\"\n\n # Use CONDA_EXE as per https://github.com/conda/conda/issues/7126\n if \"CONDA_EXE\" in os.environ:\n conda_bin_dir = os.path.dirname(os.environ[\"CONDA_EXE\"])\n return os.path.join(conda_bin_dir, executable_name)\n return executable_name\n\n\ndef get_conda_env_dir(env_name):\n \"\"\"Find and validate the conda directory for a given conda environment.\n\n For example, given the environment name `tf1`, this function checks\n the existence of the corresponding conda directory, e.g.\n `/Users/scaly/anaconda3/envs/tf1`, and returns it.\n \"\"\"\n conda_prefix = os.environ.get(\"CONDA_PREFIX\")\n if conda_prefix is None:\n # The caller is neither in a conda env or in (base) env. This is rare\n # because by default, new terminals start in (base), but we can still\n # support this case.\n conda_exe = os.environ.get(\"CONDA_EXE\")\n if conda_exe is None:\n raise ValueError(\n \"Cannot find environment variables set by conda. \"\n \"Please verify conda is installed.\"\n )\n # Example: CONDA_EXE=$HOME/anaconda3/bin/python\n # Strip out /bin/python by going up two parent directories.\n conda_prefix = str(Path(conda_exe).parent.parent)\n\n # There are two cases:\n # 1. We are in a conda (base) env: CONDA_DEFAULT_ENV=base and\n # CONDA_PREFIX=$HOME/anaconda3\n # 2. We are in a user-created conda env: CONDA_DEFAULT_ENV=$env_name and\n # CONDA_PREFIX=$HOME/anaconda3/envs/$current_env_name\n if os.environ.get(\"CONDA_DEFAULT_ENV\") == \"base\":\n # Caller's curent environment is (base).\n # Not recommended by conda, but we can still support it.\n if env_name == \"base\":\n # Desired environment is (base), located at e.g. $HOME/anaconda3\n env_dir = conda_prefix\n else:\n # Desired environment is user-created, e.g.\n # $HOME/anaconda3/envs/$env_name\n env_dir = os.path.join(conda_prefix, \"envs\", env_name)\n else:\n # Now `conda_prefix` should be something like\n # $HOME/anaconda3/envs/$current_env_name\n # We want to replace the last component with the desired env name.\n conda_envs_dir = os.path.split(conda_prefix)[0]\n env_dir = os.path.join(conda_envs_dir, env_name)\n if not os.path.isdir(env_dir):\n raise ValueError(\n \"conda env \"\n + env_name\n + \" not found in conda envs directory. Run `conda env list` to \"\n + \"verify the name is correct.\"\n )\n return env_dir\n\n\ndef get_call_location(back: int = 1):\n \"\"\"\n Get the location (filename and line number) of a function caller, `back`\n frames up the stack.\n\n Args:\n back: The number of frames to go up the stack, not including this\n function.\n \"\"\"\n stack = inspect.stack()\n try:\n frame = stack[back + 1]\n return f\"{frame.filename}:{frame.lineno}\"\n except IndexError:\n return \"UNKNOWN\"\n\n\n# Used to only print a deprecation warning once for a given function if we\n# don't wish to spam the caller.\n_PRINTED_WARNING = set()\n\n\n# The following is inspired by\n# https://github.com/tensorflow/tensorflow/blob/dec8e0b11f4f87693b67e125e67dfbc68d26c205/tensorflow/python/util/deprecation.py#L274-L329\ndef deprecated(\n instructions: Optional[str] = None,\n removal_release: Optional[str] = None,\n removal_date: Optional[str] = None,\n warn_once: bool = True,\n):\n \"\"\"\n Creates a decorator for marking functions as deprecated. The decorator\n will log a deprecation warning on the first (or all, see `warn_once` arg)\n invocations, and will otherwise leave the wrapped function unchanged.\n\n Args:\n instructions: Instructions for the caller to update their code.\n removal_release: The release in which this deprecated function\n will be removed. Only one of removal_release and removal_date\n should be specified. If neither is specfieid, we'll warning that\n the function will be removed \"in a future release\".\n removal_date: The date on which this deprecated function will be\n removed. Only one of removal_release and removal_date should be\n specified. If neither is specfieid, we'll warning that\n the function will be removed \"in a future release\".\n warn_once: If true, the deprecation warning will only be logged\n on the first invocation. Otherwise, the deprecation warning will\n be logged on every invocation. Defaults to True.\n\n Returns:\n A decorator to be used for wrapping deprecated functions.\n \"\"\"\n if removal_release is not None and removal_date is not None:\n raise ValueError(\n \"Only one of removal_release and removal_date should be specified.\"\n )\n\n def deprecated_wrapper(func):\n @functools.wraps(func)\n def new_func(*args, **kwargs):\n global _PRINTED_WARNING\n if func not in _PRINTED_WARNING:\n if warn_once:\n _PRINTED_WARNING.add(func)\n msg = (\n \"From {}: {} (from {}) is deprecated and will \".format(\n get_call_location(), func.__name__, func.__module__\n )\n + \"be removed \"\n + (\n f\"in version {removal_release}.\"\n if removal_release is not None\n else f\"after {removal_date}\"\n if removal_date is not None\n else \"in a future version\"\n )\n + (f\" {instructions}\" if instructions is not None else \"\")\n )\n warnings.warn(msg)\n return func(*args, **kwargs)\n\n return new_func\n\n return deprecated_wrapper\n\n\ndef import_attr(full_path: str):\n \"\"\"Given a full import path to a module attr, return the imported attr.\n\n For example, the following are equivalent:\n MyClass = import_attr(\"module.submodule:MyClass\")\n MyClass = import_attr(\"module.submodule.MyClass\")\n from module.submodule import MyClass\n\n Returns:\n Imported attr\n \"\"\"\n if full_path is None:\n raise TypeError(\"import path cannot be None\")\n\n if \":\" in full_path:\n if full_path.count(\":\") > 1:\n raise ValueError(\n f'Got invalid import path \"{full_path}\". An '\n \"import path may have at most one colon.\"\n )\n module_name, attr_name = full_path.split(\":\")\n else:\n last_period_idx = full_path.rfind(\".\")\n module_name = full_path[:last_period_idx]\n attr_name = full_path[last_period_idx + 1 :]\n\n module = importlib.import_module(module_name)\n return getattr(module, attr_name)\n\n\ndef get_wheel_filename(\n sys_platform: str = sys.platform,\n ray_version: str = ray.__version__,\n py_version: str = f\"{sys.version_info.major}{sys.version_info.minor}\",\n) -> str:\n \"\"\"Returns the filename used for the nightly Ray wheel.\n\n Args:\n sys_platform: The platform as returned by sys.platform. Examples:\n \"darwin\", \"linux\", \"win32\"\n ray_version: The Ray version as returned by ray.__version__ or\n `ray --version`. Examples: \"3.0.0.dev0\"\n py_version (str):\n The major and minor Python versions concatenated. Examples: \"36\",\n \"37\", \"38\", \"39\"\n Returns:\n The wheel file name. Examples:\n ray-3.0.0.dev0-cp38-cp38-manylinux2014_x86_64.whl\n \"\"\"\n assert py_version in [\"36\", \"37\", \"38\", \"39\"], py_version\n\n os_strings = {\n \"darwin\": \"macosx_10_15_x86_64\"\n if py_version in [\"38\", \"39\"]\n else \"macosx_10_15_intel\",\n \"linux\": \"manylinux2014_x86_64\",\n \"win32\": \"win_amd64\",\n }\n\n assert sys_platform in os_strings, sys_platform\n\n wheel_filename = (\n f\"ray-{ray_version}-cp{py_version}-\"\n f\"cp{py_version}{'m' if py_version in ['36', '37'] else ''}\"\n f\"-{os_strings[sys_platform]}.whl\"\n )\n\n return wheel_filename\n\n\ndef get_master_wheel_url(\n ray_commit: str = ray.__commit__,\n sys_platform: str = sys.platform,\n ray_version: str = ray.__version__,\n py_version: str = f\"{sys.version_info.major}{sys.version_info.minor}\",\n) -> str:\n \"\"\"Return the URL for the wheel from a specific commit.\"\"\"\n filename = get_wheel_filename(\n sys_platform=sys_platform, ray_version=ray_version, py_version=py_version\n )\n return (\n f\"https://s3-us-west-2.amazonaws.com/ray-wheels/master/\"\n f\"{ray_commit}/{filename}\"\n )\n\n\ndef get_release_wheel_url(\n ray_commit: str = ray.__commit__,\n sys_platform: str = sys.platform,\n ray_version: str = ray.__version__,\n py_version: str = f\"{sys.version_info.major}{sys.version_info.minor}\",\n) -> str:\n \"\"\"Return the URL for the wheel for a specific release.\"\"\"\n filename = get_wheel_filename(\n sys_platform=sys_platform, ray_version=ray_version, py_version=py_version\n )\n return (\n f\"https://ray-wheels.s3-us-west-2.amazonaws.com/releases/\"\n f\"{ray_version}/{ray_commit}/{filename}\"\n )\n # e.g. https://ray-wheels.s3-us-west-2.amazonaws.com/releases/1.4.0rc1/e7c7\n # f6371a69eb727fa469e4cd6f4fbefd143b4c/ray-1.4.0rc1-cp36-cp36m-manylinux201\n # 4_x86_64.whl\n\n\ndef validate_namespace(namespace: str):\n if not isinstance(namespace, str):\n raise TypeError(\"namespace must be None or a string.\")\n elif namespace == \"\":\n raise ValueError(\n '\"\" is not a valid namespace. ' \"Pass None to not specify a namespace.\"\n )\n\n\ndef init_grpc_channel(\n address: str,\n options: Optional[Sequence[Tuple[str, Any]]] = None,\n asynchronous: bool = False,\n):\n grpc_module = aiogrpc if asynchronous else grpc\n if os.environ.get(\"RAY_USE_TLS\", \"0\").lower() in (\"1\", \"true\"):\n server_cert_chain, private_key, ca_cert = load_certs_from_env()\n credentials = grpc.ssl_channel_credentials(\n certificate_chain=server_cert_chain,\n private_key=private_key,\n root_certificates=ca_cert,\n )\n channel = grpc_module.secure_channel(address, credentials, options=options)\n else:\n channel = grpc_module.insecure_channel(address, options=options)\n\n return channel\n\n\ndef check_dashboard_dependencies_installed() -> bool:\n \"\"\"Returns True if Ray Dashboard dependencies are installed.\n\n Checks to see if we should start the dashboard agent or not based on the\n Ray installation version the user has installed (ray vs. ray[default]).\n Unfortunately there doesn't seem to be a cleaner way to detect this other\n than just blindly importing the relevant packages.\n\n \"\"\"\n try:\n import ray.dashboard.optional_deps # noqa: F401\n\n return True\n except ImportError:\n return False\n\n\ndef internal_kv_list_with_retry(gcs_client, prefix, namespace, num_retries=20):\n result = None\n if isinstance(prefix, str):\n prefix = prefix.encode()\n if isinstance(namespace, str):\n namespace = namespace.encode()\n for _ in range(num_retries):\n try:\n result = gcs_client.internal_kv_keys(prefix, namespace)\n except Exception as e:\n if isinstance(e, grpc.RpcError) and e.code() in (\n grpc.StatusCode.UNAVAILABLE,\n grpc.StatusCode.UNKNOWN,\n ):\n logger.warning(\n f\"Unable to connect to GCS at {gcs_client.address}. \"\n \"Check that (1) Ray GCS with matching version started \"\n \"successfully at the specified address, and (2) there is \"\n \"no firewall setting preventing access.\"\n )\n else:\n logger.exception(\"Internal KV List failed\")\n result = None\n\n if result is not None:\n break\n else:\n logger.debug(f\"Fetched {prefix}=None from KV. Retrying.\")\n time.sleep(2)\n if result is None:\n raise RuntimeError(\n f\"Could not list '{prefix}' from GCS. Did GCS start successfully?\"\n )\n return result\n\n\ndef internal_kv_get_with_retry(gcs_client, key, namespace, num_retries=20):\n result = None\n if isinstance(key, str):\n key = key.encode()\n for _ in range(num_retries):\n try:\n result = gcs_client.internal_kv_get(key, namespace)\n except Exception as e:\n if isinstance(e, grpc.RpcError) and e.code() in (\n grpc.StatusCode.UNAVAILABLE,\n grpc.StatusCode.UNKNOWN,\n ):\n logger.warning(\n f\"Unable to connect to GCS at {gcs_client.address}. \"\n \"Check that (1) Ray GCS with matching version started \"\n \"successfully at the specified address, and (2) there is \"\n \"no firewall setting preventing access.\"\n )\n else:\n logger.exception(\"Internal KV Get failed\")\n result = None\n\n if result is not None:\n break\n else:\n logger.debug(f\"Fetched {key}=None from KV. Retrying.\")\n time.sleep(2)\n if not result:\n raise RuntimeError(\n f\"Could not read '{key.decode()}' from GCS. Did GCS start successfully?\"\n )\n return result\n\n\ndef internal_kv_put_with_retry(gcs_client, key, value, namespace, num_retries=20):\n if isinstance(key, str):\n key = key.encode()\n if isinstance(value, str):\n value = value.encode()\n if isinstance(namespace, str):\n namespace = namespace.encode()\n error = None\n for _ in range(num_retries):\n try:\n return gcs_client.internal_kv_put(\n key, value, overwrite=True, namespace=namespace\n )\n except grpc.RpcError as e:\n if e.code() in (\n grpc.StatusCode.UNAVAILABLE,\n grpc.StatusCode.UNKNOWN,\n ):\n logger.warning(\n f\"Unable to connect to GCS at {gcs_client.address}. \"\n \"Check that (1) Ray GCS with matching version started \"\n \"successfully at the specified address, and (2) there is \"\n \"no firewall setting preventing access.\"\n )\n else:\n logger.exception(\"Internal KV Put failed\")\n time.sleep(2)\n error = e\n # Reraise the last grpc.RpcError.\n raise error\n\n\ndef compute_version_info():\n \"\"\"Compute the versions of Python, and Ray.\n\n Returns:\n A tuple containing the version information.\n \"\"\"\n ray_version = ray.__version__\n python_version = \".\".join(map(str, sys.version_info[:3]))\n return ray_version, python_version\n\n\ndef get_directory_size_bytes(path: Union[str, Path] = \".\") -> int:\n \"\"\"Get the total size of a directory in bytes, including subdirectories.\"\"\"\n total_size_bytes = 0\n for dirpath, dirnames, filenames in os.walk(path):\n for f in filenames:\n fp = os.path.join(dirpath, f)\n # skip if it is a symbolic link or a .pyc file\n if not os.path.islink(fp) and not f.endswith(\".pyc\"):\n total_size_bytes += os.path.getsize(fp)\n\n return total_size_bytes\n\n\ndef check_version_info(cluster_metadata):\n \"\"\"Check if the Python and Ray versions stored in GCS matches this process.\n Args:\n cluster_metadata: Ray cluster metadata from GCS.\n\n Raises:\n Exception: An exception is raised if there is a version mismatch.\n \"\"\"\n cluster_version_info = (\n cluster_metadata[\"ray_version\"],\n cluster_metadata[\"python_version\"],\n )\n version_info = compute_version_info()\n if version_info != cluster_version_info:\n node_ip_address = ray._private.services.get_node_ip_address()\n error_message = (\n \"Version mismatch: The cluster was started with:\\n\"\n \" Ray: \" + cluster_version_info[0] + \"\\n\"\n \" Python: \" + cluster_version_info[1] + \"\\n\"\n \"This process on node \" + node_ip_address + \" was started with:\" + \"\\n\"\n \" Ray: \" + version_info[0] + \"\\n\"\n \" Python: \" + version_info[1] + \"\\n\"\n )\n raise RuntimeError(error_message)\n",
"import os\nimport tempfile\nfrom collections import namedtuple\nfrom multiprocessing import Queue\nimport unittest\n\nimport numpy as np\n\nfrom ray.tune import Trainable\nfrom ray.tune.function_runner import wrap_function\nfrom ray.tune.integration.wandb import (\n WandbLoggerCallback,\n _WandbLoggingProcess,\n WANDB_ENV_VAR,\n WandbTrainableMixin,\n wandb_mixin,\n _QueueItem,\n)\nfrom ray.tune.result import TRIAL_INFO\nfrom ray.tune.trial import _TrialInfo\nfrom ray.tune.utils.placement_groups import PlacementGroupFactory\n\n\nclass Trial(\n namedtuple(\n \"MockTrial\",\n [\n \"config\",\n \"trial_id\",\n \"trial_name\",\n \"trainable_name\",\n \"placement_group_factory\",\n \"logdir\",\n ],\n )\n):\n def __hash__(self):\n return hash(self.trial_id)\n\n def __str__(self):\n return self.trial_name\n\n\nclass _MockWandbLoggingProcess(_WandbLoggingProcess):\n def __init__(self, logdir, queue, exclude, to_config, *args, **kwargs):\n super(_MockWandbLoggingProcess, self).__init__(\n logdir, queue, exclude, to_config, *args, **kwargs\n )\n\n self.logs = Queue()\n self.config_updates = Queue()\n\n def run(self):\n while True:\n result_type, result_content = self.queue.get()\n if result_type == _QueueItem.END:\n break\n log, config_update = self._handle_result(result_content)\n self.config_updates.put(config_update)\n self.logs.put(log)\n\n\nclass WandbTestExperimentLogger(WandbLoggerCallback):\n _logger_process_cls = _MockWandbLoggingProcess\n\n @property\n def trial_processes(self):\n return self._trial_processes\n\n\nclass _MockWandbAPI(object):\n def init(self, *args, **kwargs):\n self.args = args\n self.kwargs = kwargs\n return self\n\n\nclass _MockWandbTrainableMixin(WandbTrainableMixin):\n _wandb = _MockWandbAPI()\n\n\nclass WandbTestTrainable(_MockWandbTrainableMixin, Trainable):\n pass\n\n\nclass WandbIntegrationTest(unittest.TestCase):\n def setUp(self):\n if WANDB_ENV_VAR in os.environ:\n del os.environ[WANDB_ENV_VAR]\n\n def tearDown(self):\n if WANDB_ENV_VAR in os.environ:\n del os.environ[WANDB_ENV_VAR]\n\n def testWandbLoggerConfig(self):\n trial_config = {\"par1\": 4, \"par2\": 9.12345678}\n trial = Trial(\n trial_config,\n 0,\n \"trial_0\",\n \"trainable\",\n PlacementGroupFactory([{\"CPU\": 1}]),\n \"/tmp\",\n )\n\n if WANDB_ENV_VAR in os.environ:\n del os.environ[WANDB_ENV_VAR]\n\n # No API key\n with self.assertRaises(ValueError):\n logger = WandbTestExperimentLogger(project=\"test_project\")\n logger.setup()\n\n # API Key in config\n logger = WandbTestExperimentLogger(project=\"test_project\", api_key=\"1234\")\n logger.setup()\n self.assertEqual(os.environ[WANDB_ENV_VAR], \"1234\")\n\n del logger\n del os.environ[WANDB_ENV_VAR]\n\n # API Key file\n with tempfile.NamedTemporaryFile(\"wt\") as fp:\n fp.write(\"5678\")\n fp.flush()\n\n logger = WandbTestExperimentLogger(\n project=\"test_project\", api_key_file=fp.name\n )\n logger.setup()\n self.assertEqual(os.environ[WANDB_ENV_VAR], \"5678\")\n\n del logger\n del os.environ[WANDB_ENV_VAR]\n\n # API Key in env\n os.environ[WANDB_ENV_VAR] = \"9012\"\n logger = WandbTestExperimentLogger(project=\"test_project\")\n logger.setup()\n del logger\n\n # From now on, the API key is in the env variable.\n\n logger = WandbTestExperimentLogger(project=\"test_project\")\n logger.log_trial_start(trial)\n\n self.assertEqual(\n logger.trial_processes[trial].kwargs[\"project\"], \"test_project\"\n )\n self.assertEqual(logger.trial_processes[trial].kwargs[\"id\"], trial.trial_id)\n self.assertEqual(logger.trial_processes[trial].kwargs[\"name\"], trial.trial_name)\n self.assertEqual(\n logger.trial_processes[trial].kwargs[\"group\"], trial.trainable_name\n )\n self.assertIn(\"config\", logger.trial_processes[trial]._exclude)\n\n del logger\n\n # log config.\n logger = WandbTestExperimentLogger(project=\"test_project\", log_config=True)\n logger.log_trial_start(trial)\n self.assertNotIn(\"config\", logger.trial_processes[trial]._exclude)\n self.assertNotIn(\"metric\", logger.trial_processes[trial]._exclude)\n\n del logger\n\n # Exclude metric.\n logger = WandbTestExperimentLogger(project=\"test_project\", excludes=[\"metric\"])\n logger.log_trial_start(trial)\n self.assertIn(\"config\", logger.trial_processes[trial]._exclude)\n self.assertIn(\"metric\", logger.trial_processes[trial]._exclude)\n\n del logger\n\n def testWandbLoggerReporting(self):\n trial_config = {\"par1\": 4, \"par2\": 9.12345678}\n trial = Trial(\n trial_config,\n 0,\n \"trial_0\",\n \"trainable\",\n PlacementGroupFactory([{\"CPU\": 1}]),\n \"/tmp\",\n )\n\n logger = WandbTestExperimentLogger(\n project=\"test_project\", api_key=\"1234\", excludes=[\"metric2\"]\n )\n logger.on_trial_start(0, [], trial)\n\n r1 = {\n \"metric1\": 0.8,\n \"metric2\": 1.4,\n \"metric3\": np.asarray(32.0),\n \"metric4\": np.float32(32.0),\n \"const\": \"text\",\n \"config\": trial_config,\n }\n\n logger.on_trial_result(0, [], trial, r1)\n\n logged = logger.trial_processes[trial].logs.get(timeout=10)\n self.assertIn(\"metric1\", logged)\n self.assertNotIn(\"metric2\", logged)\n self.assertIn(\"metric3\", logged)\n self.assertIn(\"metric4\", logged)\n self.assertNotIn(\"const\", logged)\n self.assertNotIn(\"config\", logged)\n\n del logger\n\n def testWandbMixinConfig(self):\n config = {\"par1\": 4, \"par2\": 9.12345678}\n trial = Trial(\n config,\n 0,\n \"trial_0\",\n \"trainable\",\n PlacementGroupFactory([{\"CPU\": 1}]),\n \"/tmp\",\n )\n trial_info = _TrialInfo(trial)\n\n config[TRIAL_INFO] = trial_info\n\n if WANDB_ENV_VAR in os.environ:\n del os.environ[WANDB_ENV_VAR]\n\n # Needs at least a project\n with self.assertRaises(ValueError):\n trainable = WandbTestTrainable(config)\n\n # No API key\n config[\"wandb\"] = {\"project\": \"test_project\"}\n with self.assertRaises(ValueError):\n trainable = WandbTestTrainable(config)\n\n # API Key in config\n config[\"wandb\"] = {\"project\": \"test_project\", \"api_key\": \"1234\"}\n trainable = WandbTestTrainable(config)\n self.assertEqual(os.environ[WANDB_ENV_VAR], \"1234\")\n\n del os.environ[WANDB_ENV_VAR]\n\n # API Key file\n with tempfile.NamedTemporaryFile(\"wt\") as fp:\n fp.write(\"5678\")\n fp.flush()\n\n config[\"wandb\"] = {\"project\": \"test_project\", \"api_key_file\": fp.name}\n\n trainable = WandbTestTrainable(config)\n self.assertEqual(os.environ[WANDB_ENV_VAR], \"5678\")\n\n del os.environ[WANDB_ENV_VAR]\n\n # API Key in env\n os.environ[WANDB_ENV_VAR] = \"9012\"\n config[\"wandb\"] = {\"project\": \"test_project\"}\n trainable = WandbTestTrainable(config)\n\n # From now on, the API key is in the env variable.\n\n # Default configuration\n config[\"wandb\"] = {\"project\": \"test_project\"}\n config[TRIAL_INFO] = trial_info\n\n trainable = WandbTestTrainable(config)\n self.assertEqual(trainable.wandb.kwargs[\"project\"], \"test_project\")\n self.assertEqual(trainable.wandb.kwargs[\"id\"], trial.trial_id)\n self.assertEqual(trainable.wandb.kwargs[\"name\"], trial.trial_name)\n self.assertEqual(trainable.wandb.kwargs[\"group\"], \"WandbTestTrainable\")\n\n def testWandbDecoratorConfig(self):\n config = {\"par1\": 4, \"par2\": 9.12345678}\n trial = Trial(\n config,\n 0,\n \"trial_0\",\n \"trainable\",\n PlacementGroupFactory([{\"CPU\": 1}]),\n \"/tmp\",\n )\n trial_info = _TrialInfo(trial)\n\n @wandb_mixin\n def train_fn(config):\n return 1\n\n train_fn.__mixins__ = (_MockWandbTrainableMixin,)\n\n config[TRIAL_INFO] = trial_info\n\n if WANDB_ENV_VAR in os.environ:\n del os.environ[WANDB_ENV_VAR]\n\n # Needs at least a project\n with self.assertRaises(ValueError):\n wrapped = wrap_function(train_fn)(config)\n\n # No API key\n config[\"wandb\"] = {\"project\": \"test_project\"}\n with self.assertRaises(ValueError):\n wrapped = wrap_function(train_fn)(config)\n\n # API Key in config\n config[\"wandb\"] = {\"project\": \"test_project\", \"api_key\": \"1234\"}\n wrapped = wrap_function(train_fn)(config)\n self.assertEqual(os.environ[WANDB_ENV_VAR], \"1234\")\n\n del os.environ[WANDB_ENV_VAR]\n\n # API Key file\n with tempfile.NamedTemporaryFile(\"wt\") as fp:\n fp.write(\"5678\")\n fp.flush()\n\n config[\"wandb\"] = {\"project\": \"test_project\", \"api_key_file\": fp.name}\n\n wrapped = wrap_function(train_fn)(config)\n self.assertEqual(os.environ[WANDB_ENV_VAR], \"5678\")\n\n del os.environ[WANDB_ENV_VAR]\n\n # API Key in env\n os.environ[WANDB_ENV_VAR] = \"9012\"\n config[\"wandb\"] = {\"project\": \"test_project\"}\n wrapped = wrap_function(train_fn)(config)\n\n # From now on, the API key is in the env variable.\n\n # Default configuration\n config[\"wandb\"] = {\"project\": \"test_project\"}\n config[TRIAL_INFO] = trial_info\n\n wrapped = wrap_function(train_fn)(config)\n self.assertEqual(wrapped.wandb.kwargs[\"project\"], \"test_project\")\n self.assertEqual(wrapped.wandb.kwargs[\"id\"], trial.trial_id)\n self.assertEqual(wrapped.wandb.kwargs[\"name\"], trial.trial_name)\n\n def testWandbMixinRLlib(self):\n \"\"\"Test compatibility with RLlib configuration dicts\"\"\"\n # Local import to avoid tune dependency on rllib\n try:\n from ray.rllib.algorithms.ppo import PPO\n except ImportError:\n self.skipTest(\"ray[rllib] not available\")\n return\n\n class WandbPPOTrainer(_MockWandbTrainableMixin, PPO):\n pass\n\n config = {\n \"env\": \"CartPole-v0\",\n \"wandb\": {\n \"project\": \"test_project\",\n \"api_key\": \"1234\",\n },\n }\n\n # Test that trainer object can be initialized\n WandbPPOTrainer(config)\n\n\nif __name__ == \"__main__\":\n import pytest\n import sys\n\n sys.exit(pytest.main([\"-v\", __file__]))\n",
"\"\"\"Example of using RLlib's debug callbacks.\n\nHere we use callbacks to track the average CartPole pole angle magnitude as a\ncustom metric.\n\"\"\"\n\nfrom typing import Dict, Tuple\nimport argparse\nimport numpy as np\nimport os\n\nimport ray\nfrom ray import tune\nfrom ray.rllib.algorithms.callbacks import DefaultCallbacks\nfrom ray.rllib.env import BaseEnv\nfrom ray.rllib.evaluation import Episode, RolloutWorker\nfrom ray.rllib.policy import Policy\nfrom ray.rllib.policy.sample_batch import SampleBatch\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\n \"--framework\",\n choices=[\"tf\", \"tf2\", \"tfe\", \"torch\"],\n default=\"tf\",\n help=\"The DL framework specifier.\",\n)\nparser.add_argument(\"--stop-iters\", type=int, default=2000)\n\n\nclass MyCallbacks(DefaultCallbacks):\n def on_episode_start(\n self,\n *,\n worker: RolloutWorker,\n base_env: BaseEnv,\n policies: Dict[str, Policy],\n episode: Episode,\n env_index: int,\n **kwargs\n ):\n # Make sure this episode has just been started (only initial obs\n # logged so far).\n assert episode.length == 0, (\n \"ERROR: `on_episode_start()` callback should be called right \"\n \"after env reset!\"\n )\n print(\"episode {} (env-idx={}) started.\".format(episode.episode_id, env_index))\n episode.user_data[\"pole_angles\"] = []\n episode.hist_data[\"pole_angles\"] = []\n\n def on_episode_step(\n self,\n *,\n worker: RolloutWorker,\n base_env: BaseEnv,\n policies: Dict[str, Policy],\n episode: Episode,\n env_index: int,\n **kwargs\n ):\n # Make sure this episode is ongoing.\n assert episode.length > 0, (\n \"ERROR: `on_episode_step()` callback should not be called right \"\n \"after env reset!\"\n )\n pole_angle = abs(episode.last_observation_for()[2])\n raw_angle = abs(episode.last_raw_obs_for()[2])\n assert pole_angle == raw_angle\n episode.user_data[\"pole_angles\"].append(pole_angle)\n\n def on_episode_end(\n self,\n *,\n worker: RolloutWorker,\n base_env: BaseEnv,\n policies: Dict[str, Policy],\n episode: Episode,\n env_index: int,\n **kwargs\n ):\n # Check if there are multiple episodes in a batch, i.e.\n # \"batch_mode\": \"truncate_episodes\".\n if worker.policy_config[\"batch_mode\"] == \"truncate_episodes\":\n # Make sure this episode is really done.\n assert episode.batch_builder.policy_collectors[\"default_policy\"].batches[\n -1\n ][\"dones\"][-1], (\n \"ERROR: `on_episode_end()` should only be called \"\n \"after episode is done!\"\n )\n pole_angle = np.mean(episode.user_data[\"pole_angles\"])\n print(\n \"episode {} (env-idx={}) ended with length {} and pole \"\n \"angles {}\".format(\n episode.episode_id, env_index, episode.length, pole_angle\n )\n )\n episode.custom_metrics[\"pole_angle\"] = pole_angle\n episode.hist_data[\"pole_angles\"] = episode.user_data[\"pole_angles\"]\n\n def on_sample_end(self, *, worker: RolloutWorker, samples: SampleBatch, **kwargs):\n print(\"returned sample batch of size {}\".format(samples.count))\n\n def on_train_result(self, *, algorithm, result: dict, **kwargs):\n print(\n \"Algorithm.train() result: {} -> {} episodes\".format(\n algorithm, result[\"episodes_this_iter\"]\n )\n )\n # you can mutate the result dict to add new fields to return\n result[\"callback_ok\"] = True\n\n def on_learn_on_batch(\n self, *, policy: Policy, train_batch: SampleBatch, result: dict, **kwargs\n ) -> None:\n result[\"sum_actions_in_train_batch\"] = np.sum(train_batch[\"actions\"])\n print(\n \"policy.learn_on_batch() result: {} -> sum actions: {}\".format(\n policy, result[\"sum_actions_in_train_batch\"]\n )\n )\n\n def on_postprocess_trajectory(\n self,\n *,\n worker: RolloutWorker,\n episode: Episode,\n agent_id: str,\n policy_id: str,\n policies: Dict[str, Policy],\n postprocessed_batch: SampleBatch,\n original_batches: Dict[str, Tuple[Policy, SampleBatch]],\n **kwargs\n ):\n print(\"postprocessed {} steps\".format(postprocessed_batch.count))\n if \"num_batches\" not in episode.custom_metrics:\n episode.custom_metrics[\"num_batches\"] = 0\n episode.custom_metrics[\"num_batches\"] += 1\n\n\nif __name__ == \"__main__\":\n args = parser.parse_args()\n\n ray.init()\n trials = tune.run(\n \"PG\",\n stop={\n \"training_iteration\": args.stop_iters,\n },\n config={\n \"env\": \"CartPole-v0\",\n \"num_envs_per_worker\": 2,\n \"callbacks\": MyCallbacks,\n \"framework\": args.framework,\n # Use GPUs iff `RLLIB_NUM_GPUS` env var set to > 0.\n \"num_gpus\": int(os.environ.get(\"RLLIB_NUM_GPUS\", \"0\")),\n },\n ).trials\n\n # Verify episode-related custom metrics are there.\n custom_metrics = trials[0].last_result[\"custom_metrics\"]\n print(custom_metrics)\n assert \"pole_angle_mean\" in custom_metrics\n assert \"pole_angle_min\" in custom_metrics\n assert \"pole_angle_max\" in custom_metrics\n assert \"num_batches_mean\" in custom_metrics\n assert \"callback_ok\" in trials[0].last_result\n\n # Verify `on_learn_on_batch` custom metrics are there (per policy).\n if args.framework == \"torch\":\n info_custom_metrics = custom_metrics[\"default_policy\"]\n print(info_custom_metrics)\n assert \"sum_actions_in_train_batch\" in info_custom_metrics\n",
"import logging\nimport numpy as np\nfrom typing import Type, List, Optional\nimport tree\n\nfrom ray.rllib.algorithms.algorithm import Algorithm, AlgorithmConfig\nfrom ray.rllib.execution.train_ops import (\n multi_gpu_train_one_step,\n train_one_step,\n)\nfrom ray.rllib.offline.shuffled_input import ShuffledInput\nfrom ray.rllib.policy import Policy\nfrom ray.rllib.policy.sample_batch import SampleBatch\nfrom ray.rllib.utils.annotations import override\nfrom ray.rllib.utils.metrics import (\n LAST_TARGET_UPDATE_TS,\n NUM_TARGET_UPDATES,\n TARGET_NET_UPDATE_TIMER,\n)\nfrom ray.rllib.utils.typing import (\n PartialAlgorithmConfigDict,\n ResultDict,\n AlgorithmConfigDict,\n)\n\nlogger = logging.getLogger(__name__)\n\n\nclass CRRConfig(AlgorithmConfig):\n def __init__(self, algo_class=None):\n super().__init__(algo_class=algo_class or CRR)\n\n # fmt: off\n # __sphinx_doc_begin__\n # CRR-specific settings.\n self.weight_type = \"bin\"\n self.temperature = 1.0\n self.max_weight = 20.0\n self.advantage_type = \"mean\"\n self.n_action_sample = 4\n self.twin_q = True\n self.target_update_grad_intervals = 100\n # __sphinx_doc_end__\n # fmt: on\n self.replay_buffer_config = {\n \"type\": \"ReplayBuffer\",\n \"capacity\": 50000,\n # How many steps of the model to sample before learning starts.\n \"learning_starts\": 1000,\n \"replay_batch_size\": 32,\n # The number of contiguous environment steps to replay at once. This\n # may be set to greater than 1 to support recurrent models.\n \"replay_sequence_length\": 1,\n }\n self.actor_hiddens = [256, 256]\n self.actor_hidden_activation = \"relu\"\n self.critic_hiddens = [256, 256]\n self.critic_hidden_activation = \"relu\"\n self.critic_lr = 3e-4\n self.actor_lr = 3e-4\n self.tau = 5e-3\n\n # overriding the trainer config default\n self.num_workers = 0 # offline RL does not need rollout workers\n\n def training(\n self,\n *,\n weight_type: Optional[str] = None,\n temperature: Optional[float] = None,\n max_weight: Optional[float] = None,\n advantage_type: Optional[str] = None,\n n_action_sample: Optional[int] = None,\n twin_q: Optional[bool] = None,\n target_update_grad_intervals: Optional[int] = None,\n replay_buffer_config: Optional[dict] = None,\n actor_hiddens: Optional[List[int]] = None,\n actor_hidden_activation: Optional[str] = None,\n critic_hiddens: Optional[List[int]] = None,\n critic_hidden_activation: Optional[str] = None,\n tau: Optional[float] = None,\n **kwargs,\n ) -> \"CRRConfig\":\n\n \"\"\"\n === CRR configs\n\n Args:\n weight_type: weight type to use `bin` | `exp`.\n temperature: the exponent temperature used in exp weight type.\n max_weight: the max weight limit for exp weight type.\n advantage_type: The way we reduce q values to v_t values `max` | `mean`.\n n_action_sample: the number of actions to sample for v_t estimation.\n twin_q: if True, uses pessimistic q estimation.\n target_update_grad_intervals: The frequency at which we update the\n target copy of the model in terms of the number of gradient updates\n applied to the main model.\n replay_buffer_config: The config dictionary for replay buffer.\n actor_hiddens: The number of hidden units in the actor's fc network.\n actor_hidden_activation: The activation used in the actor's fc network.\n critic_hiddens: The number of hidden units in the critic's fc network.\n critic_hidden_activation: The activation used in the critic's fc network.\n tau: Polyak averaging coefficient\n (making it 1 is reduces it to a hard update).\n **kwargs: forward compatibility kwargs\n\n Returns:\n This updated CRRConfig object.\n \"\"\"\n super().training(**kwargs)\n\n if weight_type is not None:\n self.weight_type = weight_type\n if temperature is not None:\n self.temperature = temperature\n if max_weight is not None:\n self.max_weight = max_weight\n if advantage_type is not None:\n self.advantage_type = advantage_type\n if n_action_sample is not None:\n self.n_action_sample = n_action_sample\n if twin_q is not None:\n self.twin_q = twin_q\n if target_update_grad_intervals is not None:\n self.target_update_grad_intervals = target_update_grad_intervals\n if replay_buffer_config is not None:\n self.replay_buffer_config = replay_buffer_config\n if actor_hiddens is not None:\n self.actor_hiddens = actor_hiddens\n if actor_hidden_activation is not None:\n self.actor_hidden_activation = actor_hidden_activation\n if critic_hiddens is not None:\n self.critic_hiddens = critic_hiddens\n if critic_hidden_activation is not None:\n self.critic_hidden_activation = critic_hidden_activation\n if tau is not None:\n self.tau = tau\n\n return self\n\n\nNUM_GRADIENT_UPDATES = \"num_grad_updates\"\n\n\nclass CRR(Algorithm):\n\n # TODO: we have a circular dependency for get\n # default config. config -> Trainer -> config\n # defining Config class in the same file for now as a workaround.\n\n def setup(self, config: PartialAlgorithmConfigDict):\n super().setup(config)\n # initial setup for handling the offline data in form of a replay buffer\n # Add the entire dataset to Replay Buffer (global variable)\n reader = self.workers.local_worker().input_reader\n\n # For d4rl, add the D4RLReaders' dataset to the buffer.\n if isinstance(self.config[\"input\"], str) and \"d4rl\" in self.config[\"input\"]:\n dataset = reader.dataset\n self.local_replay_buffer.add(dataset)\n # For a list of files, add each file's entire content to the buffer.\n elif isinstance(reader, ShuffledInput):\n num_batches = 0\n total_timesteps = 0\n for batch in reader.child.read_all_files():\n num_batches += 1\n total_timesteps += len(batch)\n # Add NEXT_OBS if not available. This is slightly hacked\n # as for the very last time step, we will use next-obs=zeros\n # and therefore force-set DONE=True to avoid this missing\n # next-obs to cause learning problems.\n if SampleBatch.NEXT_OBS not in batch:\n obs = batch[SampleBatch.OBS]\n batch[SampleBatch.NEXT_OBS] = np.concatenate(\n [obs[1:], np.zeros_like(obs[0:1])]\n )\n batch[SampleBatch.DONES][-1] = True\n self.local_replay_buffer.add(batch)\n print(\n f\"Loaded {num_batches} batches ({total_timesteps} ts) into the\"\n \" replay buffer, which has capacity \"\n f\"{self.local_replay_buffer.capacity}.\"\n )\n else:\n raise ValueError(\n \"Unknown offline input! config['input'] must either be list of\"\n \" offline files (json) or a D4RL-specific InputReader \"\n \"specifier (e.g. 'd4rl.hopper-medium-v0').\"\n )\n\n # added a counter key for keeping track of number of gradient updates\n self._counters[NUM_GRADIENT_UPDATES] = 0\n # if I don't set this here to zero I won't see zero in the logs (defaultdict)\n self._counters[NUM_TARGET_UPDATES] = 0\n\n @classmethod\n @override(Algorithm)\n def get_default_config(cls) -> AlgorithmConfigDict:\n return CRRConfig().to_dict()\n\n @override(Algorithm)\n def get_default_policy_class(self, config: AlgorithmConfigDict) -> Type[Policy]:\n if config[\"framework\"] == \"torch\":\n from ray.rllib.algorithms.crr.torch import CRRTorchPolicy\n\n return CRRTorchPolicy\n else:\n raise ValueError(\"Non-torch frameworks are not supported yet!\")\n\n @override(Algorithm)\n def training_step(self) -> ResultDict:\n\n total_transitions = len(self.local_replay_buffer)\n bsize = self.config[\"train_batch_size\"]\n n_batches_per_epoch = total_transitions // bsize\n\n results = []\n for batch_iter in range(n_batches_per_epoch):\n # Sample training batch from replay buffer.\n train_batch = self.local_replay_buffer.sample(bsize)\n\n # Postprocess batch before we learn on it.\n post_fn = self.config.get(\"before_learn_on_batch\") or (lambda b, *a: b)\n train_batch = post_fn(train_batch, self.workers, self.config)\n\n # Learn on training batch.\n # Use simple optimizer (only for multi-agent or tf-eager; all other\n # cases should use the multi-GPU optimizer, even if only using 1 GPU)\n if self.config.get(\"simple_optimizer\", False):\n train_results = train_one_step(self, train_batch)\n else:\n train_results = multi_gpu_train_one_step(self, train_batch)\n\n # update target every few gradient updates\n cur_ts = self._counters[NUM_GRADIENT_UPDATES]\n last_update = self._counters[LAST_TARGET_UPDATE_TS]\n\n if cur_ts - last_update >= self.config[\"target_update_grad_intervals\"]:\n with self._timers[TARGET_NET_UPDATE_TIMER]:\n to_update = self.workers.local_worker().get_policies_to_train()\n self.workers.local_worker().foreach_policy_to_train(\n lambda p, pid: pid in to_update and p.update_target()\n )\n self._counters[NUM_TARGET_UPDATES] += 1\n self._counters[LAST_TARGET_UPDATE_TS] = cur_ts\n\n self._counters[NUM_GRADIENT_UPDATES] += 1\n\n results.append(train_results)\n\n summary = tree.map_structure_with_path(\n lambda path, *v: float(np.mean(v)), *results\n )\n\n return summary\n",
"import glob\nimport inspect\nimport io\nimport logging\nimport os\nimport pandas as pd\nimport shutil\nfrom typing import Any, Dict, Union, Optional\n\nimport ray\nimport ray.cloudpickle as pickle\nfrom ray.tune.registry import _ParameterRegistry\nfrom ray.tune.utils import detect_checkpoint_function\nfrom ray.util import placement_group\nfrom ray.util.annotations import DeveloperAPI\nfrom six import string_types\n\nlogger = logging.getLogger(__name__)\n\n\n@DeveloperAPI\nclass TrainableUtil:\n @staticmethod\n def process_checkpoint(\n checkpoint: Union[Dict, str], parent_dir: str, trainable_state: Dict\n ) -> str:\n \"\"\"Creates checkpoint file structure and writes metadata\n under `parent_dir`.\n\n The file structure could either look like:\n - checkpoint_00000 (returned path)\n -- .is_checkpoint\n -- .tune_metadata\n -- xxx.pkl (or whatever user specifies in their Trainable)\n Or,\n - checkpoint_00000\n -- .is_checkpoint\n -- checkpoint (returned path)\n -- checkpoint.tune_metadata\n \"\"\"\n saved_as_dict = False\n if isinstance(checkpoint, string_types):\n if not checkpoint.startswith(parent_dir):\n raise ValueError(\n \"The returned checkpoint path must be within the \"\n \"given checkpoint dir {}: {}\".format(parent_dir, checkpoint)\n )\n checkpoint_path = checkpoint\n if os.path.isdir(checkpoint_path):\n # Add trailing slash to prevent tune metadata from\n # being written outside the directory.\n checkpoint_path = os.path.join(checkpoint_path, \"\")\n elif isinstance(checkpoint, dict):\n saved_as_dict = True\n checkpoint_path = os.path.join(parent_dir, \"checkpoint\")\n with open(checkpoint_path, \"wb\") as f:\n pickle.dump(checkpoint, f)\n else:\n raise ValueError(\n \"Returned unexpected type {}. \"\n \"Expected str or dict.\".format(type(checkpoint))\n )\n\n with open(checkpoint_path + \".tune_metadata\", \"wb\") as f:\n trainable_state[\"saved_as_dict\"] = saved_as_dict\n pickle.dump(trainable_state, f)\n return checkpoint_path\n\n @staticmethod\n def load_checkpoint_metadata(checkpoint_path: str) -> Optional[Dict]:\n metadata_path = os.path.join(checkpoint_path, \".tune_metadata\")\n if not os.path.exists(metadata_path):\n checkpoint_dir = TrainableUtil.find_checkpoint_dir(checkpoint_path)\n metadatas = glob.glob(f\"{checkpoint_dir}/**/.tune_metadata\", recursive=True)\n if not metadatas:\n return None\n metadata_path = metadatas[0]\n\n with open(metadata_path, \"rb\") as f:\n return pickle.load(f)\n\n @staticmethod\n def pickle_checkpoint(checkpoint_path: str):\n \"\"\"Pickles checkpoint data.\"\"\"\n checkpoint_dir = TrainableUtil.find_checkpoint_dir(checkpoint_path)\n data = {}\n for basedir, _, file_names in os.walk(checkpoint_dir):\n for file_name in file_names:\n path = os.path.join(basedir, file_name)\n with open(path, \"rb\") as f:\n data[os.path.relpath(path, checkpoint_dir)] = f.read()\n # Use normpath so that a directory path isn't mapped to empty string.\n name = os.path.relpath(os.path.normpath(checkpoint_path), checkpoint_dir)\n name += os.path.sep if os.path.isdir(checkpoint_path) else \"\"\n data_dict = pickle.dumps(\n {\n \"checkpoint_name\": name,\n \"data\": data,\n }\n )\n return data_dict\n\n @staticmethod\n def checkpoint_to_object(checkpoint_path):\n data_dict = TrainableUtil.pickle_checkpoint(checkpoint_path)\n out = io.BytesIO()\n if len(data_dict) > 10e6: # getting pretty large\n logger.info(\"Checkpoint size is {} bytes\".format(len(data_dict)))\n out.write(data_dict)\n return out.getvalue()\n\n @staticmethod\n def find_checkpoint_dir(checkpoint_path):\n \"\"\"Returns the directory containing the checkpoint path.\n\n Raises:\n FileNotFoundError if the directory is not found.\n \"\"\"\n if not os.path.exists(checkpoint_path):\n raise FileNotFoundError(\"Path does not exist\", checkpoint_path)\n if os.path.isdir(checkpoint_path):\n checkpoint_dir = checkpoint_path\n else:\n checkpoint_dir = os.path.dirname(checkpoint_path)\n while checkpoint_dir != os.path.dirname(checkpoint_dir):\n if os.path.exists(os.path.join(checkpoint_dir, \".is_checkpoint\")):\n break\n checkpoint_dir = os.path.dirname(checkpoint_dir)\n else:\n raise FileNotFoundError(\n \"Checkpoint directory not found for {}\".format(checkpoint_path)\n )\n return os.path.normpath(checkpoint_dir)\n\n @staticmethod\n def find_rel_checkpoint_dir(logdir, checkpoint_path):\n \"\"\"Returns the (relative) directory name of the checkpoint.\n\n Note, the assumption here is `logdir` should be the prefix of\n `checkpoint_path`.\n For example, returns `checkpoint00000`.\n \"\"\"\n assert checkpoint_path.startswith(\n logdir\n ), \"expecting `logdir` to be a prefix of `checkpoint_path`\"\n rel_path = os.path.relpath(checkpoint_path, logdir)\n tokens = rel_path.split(os.sep)\n return os.path.join(tokens[0])\n\n @staticmethod\n def make_checkpoint_dir(\n checkpoint_dir: str, index: Union[int, str], override=False\n ):\n \"\"\"Creates a checkpoint directory within the provided path.\n\n Args:\n checkpoint_dir: Path to checkpoint directory.\n index: A subdirectory will be created\n at the checkpoint directory named 'checkpoint_{index}'.\n override: Deletes checkpoint_dir before creating\n a new one.\n \"\"\"\n suffix = \"checkpoint\"\n if index is not None:\n suffix += f\"_{index:06d}\" if isinstance(index, int) else f\"_{index}\"\n checkpoint_dir = os.path.join(checkpoint_dir, suffix)\n\n if override and os.path.exists(checkpoint_dir):\n shutil.rmtree(checkpoint_dir)\n os.makedirs(checkpoint_dir, exist_ok=True)\n # Drop marker in directory to identify it as a checkpoint dir.\n open(os.path.join(checkpoint_dir, \".is_checkpoint\"), \"a\").close()\n return checkpoint_dir\n\n @staticmethod\n def create_from_pickle(obj, tmpdir):\n info = pickle.loads(obj)\n data = info[\"data\"]\n checkpoint_path = os.path.join(tmpdir, info[\"checkpoint_name\"])\n\n for relpath_name, file_contents in data.items():\n path = os.path.join(tmpdir, relpath_name)\n\n # This may be a subdirectory, hence not just using tmpdir\n os.makedirs(os.path.dirname(path), exist_ok=True)\n with open(path, \"wb\") as f:\n f.write(file_contents)\n return checkpoint_path\n\n @staticmethod\n def get_checkpoints_paths(logdir):\n \"\"\"Finds the checkpoints within a specific folder.\n\n Returns a pandas DataFrame of training iterations and checkpoint\n paths within a specific folder.\n\n Raises:\n FileNotFoundError if the directory is not found.\n \"\"\"\n marker_paths = glob.glob(\n os.path.join(glob.escape(logdir), \"checkpoint_*/.is_checkpoint\")\n )\n iter_chkpt_pairs = []\n for marker_path in marker_paths:\n chkpt_dir = os.path.dirname(marker_path)\n\n # Skip temporary checkpoints\n if os.path.basename(chkpt_dir).startswith(\"checkpoint_tmp\"):\n continue\n\n metadata_file = glob.glob(\n os.path.join(glob.escape(chkpt_dir), \"*.tune_metadata\")\n )\n # glob.glob: filenames starting with a dot are special cases\n # that are not matched by '*' and '?' patterns.\n metadata_file += glob.glob(\n os.path.join(glob.escape(chkpt_dir), \".tune_metadata\")\n )\n metadata_file = list(set(metadata_file)) # avoid duplication\n if len(metadata_file) != 1:\n raise ValueError(\n \"{} has zero or more than one tune_metadata.\".format(chkpt_dir)\n )\n\n metadata_file = metadata_file[0]\n\n try:\n with open(metadata_file, \"rb\") as f:\n metadata = pickle.load(f)\n except Exception as e:\n logger.warning(f\"Could not read metadata from checkpoint: {e}\")\n metadata = {}\n\n chkpt_path = metadata_file[: -len(\".tune_metadata\")]\n chkpt_iter = metadata.get(\"iteration\", -1)\n iter_chkpt_pairs.append([chkpt_iter, chkpt_path])\n\n chkpt_df = pd.DataFrame(\n iter_chkpt_pairs, columns=[\"training_iteration\", \"chkpt_path\"]\n )\n return chkpt_df\n\n\n@DeveloperAPI\nclass PlacementGroupUtil:\n @staticmethod\n def get_remote_worker_options(\n num_workers: int,\n num_cpus_per_worker: int,\n num_gpus_per_worker: int,\n num_workers_per_host: Optional[int],\n timeout_s: Optional[int],\n ) -> (Dict[str, Any], placement_group):\n \"\"\"Returns the option for remote workers.\n\n Args:\n num_workers: Number of training workers to include in\n world.\n num_cpus_per_worker: Number of CPU resources to reserve\n per training worker.\n num_gpus_per_worker: Number of GPU resources to reserve\n per training worker.\n num_workers_per_host: Optional[int]: Number of workers to\n colocate per host.\n timeout_s: Seconds before the torch process group\n times out. Useful when machines are unreliable. Defaults\n to 60 seconds. This value is also reused for triggering\n placement timeouts if forcing colocation.\n\n\n Returns:\n type: option that contains CPU/GPU count of\n the remote worker and the placement group information.\n pg: return a reference to the placement group\n \"\"\"\n pg = None\n options = dict(num_cpus=num_cpus_per_worker, num_gpus=num_gpus_per_worker)\n if num_workers_per_host:\n num_hosts = int(num_workers / num_workers_per_host)\n cpus_per_node = num_cpus_per_worker * num_workers_per_host\n gpus_per_node = num_gpus_per_worker * num_workers_per_host\n bundle = {\"CPU\": cpus_per_node, \"GPU\": gpus_per_node}\n\n all_bundles = [bundle] * num_hosts\n pg = placement_group(all_bundles, strategy=\"STRICT_SPREAD\")\n logger.debug(\"Waiting for placement_group to start.\")\n ray.get(pg.ready(), timeout=timeout_s)\n logger.debug(\"Placement_group started.\")\n options[\"placement_group\"] = pg\n\n return options, pg\n\n\ndef with_parameters(trainable, **kwargs):\n \"\"\"Wrapper for trainables to pass arbitrary large data objects.\n\n This wrapper function will store all passed parameters in the Ray\n object store and retrieve them when calling the function. It can thus\n be used to pass arbitrary data, even datasets, to Tune trainables.\n\n This can also be used as an alternative to ``functools.partial`` to pass\n default arguments to trainables.\n\n When used with the function API, the trainable function is called with\n the passed parameters as keyword arguments. When used with the class API,\n the ``Trainable.setup()`` method is called with the respective kwargs.\n\n If the data already exists in the object store (are instances of\n ObjectRef), using ``tune.with_parameters()`` is not necessary. You can\n instead pass the object refs to the training function via the ``config``\n or use Python partials.\n\n Args:\n trainable: Trainable to wrap.\n **kwargs: parameters to store in object store.\n\n Function API example:\n\n .. code-block:: python\n\n from ray import tune\n\n def train(config, data=None):\n for sample in data:\n loss = update_model(sample)\n tune.report(loss=loss)\n\n data = HugeDataset(download=True)\n\n tune.run(\n tune.with_parameters(train, data=data),\n # ...\n )\n\n Class API example:\n\n .. code-block:: python\n\n from ray import tune\n\n class MyTrainable(tune.Trainable):\n def setup(self, config, data=None):\n self.data = data\n self.iter = iter(self.data)\n self.next_sample = next(self.iter)\n\n def step(self):\n loss = update_model(self.next_sample)\n try:\n self.next_sample = next(self.iter)\n except StopIteration:\n return {\"loss\": loss, done: True}\n return {\"loss\": loss}\n\n data = HugeDataset(download=True)\n\n tune.run(\n tune.with_parameters(MyTrainable, data=data),\n # ...\n )\n\n \"\"\"\n from ray.tune.trainable import Trainable\n\n if not callable(trainable) or (\n inspect.isclass(trainable) and not issubclass(trainable, Trainable)\n ):\n raise ValueError(\n f\"`tune.with_parameters() only works with function trainables \"\n f\"or classes that inherit from `tune.Trainable()`. Got type: \"\n f\"{type(trainable)}.\"\n )\n\n parameter_registry = _ParameterRegistry()\n ray.worker._post_init_hooks.append(parameter_registry.flush)\n\n # Objects are moved into the object store\n prefix = f\"{str(trainable)}_\"\n for k, v in kwargs.items():\n parameter_registry.put(prefix + k, v)\n\n trainable_name = getattr(trainable, \"__name__\", \"tune_with_parameters\")\n\n if inspect.isclass(trainable):\n # Class trainable\n keys = list(kwargs.keys())\n\n class _Inner(trainable):\n def setup(self, config):\n setup_kwargs = {}\n for k in keys:\n setup_kwargs[k] = parameter_registry.get(prefix + k)\n super(_Inner, self).setup(config, **setup_kwargs)\n\n _Inner.__name__ = trainable_name\n return _Inner\n else:\n # Function trainable\n use_checkpoint = detect_checkpoint_function(trainable, partial=True)\n keys = list(kwargs.keys())\n\n def inner(config, checkpoint_dir=None):\n fn_kwargs = {}\n if use_checkpoint:\n default = checkpoint_dir\n sig = inspect.signature(trainable)\n if \"checkpoint_dir\" in sig.parameters:\n default = sig.parameters[\"checkpoint_dir\"].default or default\n fn_kwargs[\"checkpoint_dir\"] = default\n\n for k in keys:\n fn_kwargs[k] = parameter_registry.get(prefix + k)\n trainable(config, **fn_kwargs)\n\n inner.__name__ = trainable_name\n\n # Use correct function signature if no `checkpoint_dir` parameter\n # is set\n if not use_checkpoint:\n\n def _inner(config):\n inner(config, checkpoint_dir=None)\n\n _inner.__name__ = trainable_name\n\n if hasattr(trainable, \"__mixins__\"):\n _inner.__mixins__ = trainable.__mixins__\n return _inner\n\n if hasattr(trainable, \"__mixins__\"):\n inner.__mixins__ = trainable.__mixins__\n\n return inner\n",
"import glob\nimport json\nimport numpy as np\nimport os\nimport random\nimport shutil\nimport tempfile\nimport time\nimport unittest\n\nimport ray\nfrom ray.tune.registry import (\n register_env,\n register_input,\n registry_get_input,\n registry_contains_input,\n)\nfrom ray.rllib.algorithms.pg import PG\nfrom ray.rllib.examples.env.multi_agent import MultiAgentCartPole\nfrom ray.rllib.offline import (\n IOContext,\n JsonWriter,\n JsonReader,\n InputReader,\n ShuffledInput,\n DatasetWriter,\n)\nfrom ray.rllib.offline.json_writer import _to_json\nfrom ray.rllib.policy.sample_batch import SampleBatch\nfrom ray.rllib.utils.test_utils import framework_iterator\n\nSAMPLES = SampleBatch(\n {\n \"actions\": np.array([1, 2, 3, 4]),\n \"obs\": np.array([4, 5, 6, 7]),\n \"eps_id\": [1, 1, 2, 3],\n }\n)\n\n\ndef make_sample_batch(i):\n return SampleBatch({\"actions\": np.array([i, i, i]), \"obs\": np.array([i, i, i])})\n\n\nclass AgentIOTest(unittest.TestCase):\n def setUp(self):\n ray.init(num_cpus=1, ignore_reinit_error=True)\n self.test_dir = tempfile.mkdtemp()\n\n def tearDown(self):\n shutil.rmtree(self.test_dir)\n ray.shutdown()\n\n def write_outputs(self, output, fw, output_config=None):\n agent = PG(\n env=\"CartPole-v0\",\n config={\n \"output\": output + (fw if output != \"logdir\" else \"\"),\n \"rollout_fragment_length\": 250,\n \"framework\": fw,\n \"output_config\": output_config or {},\n },\n )\n agent.train()\n return agent\n\n def test_agent_output_ok(self):\n for fw in framework_iterator(frameworks=(\"torch\", \"tf\")):\n self.write_outputs(self.test_dir, fw)\n self.assertEqual(len(os.listdir(self.test_dir + fw)), 1)\n reader = JsonReader(self.test_dir + fw + \"/*.json\")\n reader.next()\n\n def test_agent_output_logdir(self):\n \"\"\"Test special value 'logdir' as Agent's output.\"\"\"\n for fw in framework_iterator():\n agent = self.write_outputs(\"logdir\", fw)\n self.assertEqual(len(glob.glob(agent.logdir + \"/output-*.json\")), 1)\n\n def test_agent_output_infos(self):\n \"\"\"Verify that the infos dictionary is written to the output files.\n\n Note, with torch this is always the case.\n \"\"\"\n output_config = {\"store_infos\": True}\n for fw in framework_iterator(frameworks=(\"torch\", \"tf\")):\n self.write_outputs(self.test_dir, fw, output_config=output_config)\n self.assertEqual(len(os.listdir(self.test_dir + fw)), 1)\n reader = JsonReader(self.test_dir + fw + \"/*.json\")\n data = reader.next()\n assert \"infos\" in data\n\n def test_agent_input_dir(self):\n for fw in framework_iterator(frameworks=(\"torch\", \"tf\")):\n self.write_outputs(self.test_dir, fw)\n print(\"WROTE TO: \", self.test_dir)\n agent = PG(\n env=\"CartPole-v0\",\n config={\n \"input\": self.test_dir + fw,\n \"off_policy_estimation_methods\": {},\n \"framework\": fw,\n },\n )\n result = agent.train()\n self.assertEqual(result[\"timesteps_total\"], 250) # read from input\n self.assertTrue(np.isnan(result[\"episode_reward_mean\"]))\n\n def test_split_by_episode(self):\n splits = SAMPLES.split_by_episode()\n self.assertEqual(len(splits), 3)\n self.assertEqual(splits[0].count, 2)\n self.assertEqual(splits[1].count, 1)\n self.assertEqual(splits[2].count, 1)\n\n def test_agent_input_postprocessing_enabled(self):\n for fw in framework_iterator(frameworks=(\"tf\", \"torch\")):\n self.write_outputs(self.test_dir, fw)\n\n # Rewrite the files to drop advantages and value_targets for\n # testing\n for path in glob.glob(self.test_dir + fw + \"/*.json\"):\n out = []\n with open(path) as f:\n for line in f.readlines():\n data = json.loads(line)\n # Data won't contain rewards as these are not included\n # in the write_outputs run (not needed in the\n # SampleBatch). Flip out \"rewards\" for \"advantages\"\n # just for testing.\n data[\"rewards\"] = data[\"advantages\"]\n del data[\"advantages\"]\n if \"value_targets\" in data:\n del data[\"value_targets\"]\n out.append(data)\n with open(path, \"w\") as f:\n for data in out:\n f.write(json.dumps(data))\n\n agent = PG(\n env=\"CartPole-v0\",\n config={\n \"input\": self.test_dir + fw,\n \"off_policy_estimation_methods\": {},\n \"postprocess_inputs\": True, # adds back 'advantages'\n \"framework\": fw,\n },\n )\n\n result = agent.train()\n self.assertEqual(result[\"timesteps_total\"], 250) # read from input\n self.assertTrue(np.isnan(result[\"episode_reward_mean\"]))\n\n def test_agent_input_eval_sim(self):\n for fw in framework_iterator():\n self.write_outputs(self.test_dir, fw)\n agent = PG(\n env=\"CartPole-v0\",\n config={\n \"input\": self.test_dir + fw,\n \"off_policy_estimation_methods\": {\n \"simulation\": {\"type\": \"simulation\"}\n },\n \"framework\": fw,\n },\n )\n for _ in range(50):\n result = agent.train()\n if not np.isnan(result[\"episode_reward_mean\"]):\n return # simulation ok\n time.sleep(0.1)\n assert False, \"did not see any simulation results\"\n\n def test_agent_input_list(self):\n for fw in framework_iterator(frameworks=(\"torch\", \"tf\")):\n self.write_outputs(self.test_dir, fw)\n agent = PG(\n env=\"CartPole-v0\",\n config={\n \"input\": glob.glob(self.test_dir + fw + \"/*.json\"),\n \"off_policy_estimation_methods\": {},\n \"rollout_fragment_length\": 99,\n \"framework\": fw,\n },\n )\n result = agent.train()\n self.assertEqual(result[\"timesteps_total\"], 250) # read from input\n self.assertTrue(np.isnan(result[\"episode_reward_mean\"]))\n\n def test_agent_input_dict(self):\n for fw in framework_iterator():\n self.write_outputs(self.test_dir, fw)\n agent = PG(\n env=\"CartPole-v0\",\n config={\n \"input\": {\n self.test_dir + fw: 0.1,\n \"sampler\": 0.9,\n },\n \"train_batch_size\": 2000,\n \"off_policy_estimation_methods\": {},\n \"framework\": fw,\n },\n )\n result = agent.train()\n self.assertTrue(not np.isnan(result[\"episode_reward_mean\"]))\n\n def test_multi_agent(self):\n register_env(\n \"multi_agent_cartpole\", lambda _: MultiAgentCartPole({\"num_agents\": 10})\n )\n\n for fw in framework_iterator():\n pg = PG(\n env=\"multi_agent_cartpole\",\n config={\n \"num_workers\": 0,\n \"output\": self.test_dir,\n \"multiagent\": {\n \"policies\": {\"policy_1\", \"policy_2\"},\n \"policy_mapping_fn\": (\n lambda aid, **kwargs: random.choice(\n [\"policy_1\", \"policy_2\"]\n )\n ),\n },\n \"framework\": fw,\n },\n )\n pg.train()\n self.assertEqual(len(os.listdir(self.test_dir)), 1)\n\n pg.stop()\n pg = PG(\n env=\"multi_agent_cartpole\",\n config={\n \"num_workers\": 0,\n \"input\": self.test_dir,\n \"off_policy_estimation_methods\": {\n \"simulation\": {\"type\": \"simulation\"}\n },\n \"train_batch_size\": 2000,\n \"multiagent\": {\n \"policies\": {\"policy_1\", \"policy_2\"},\n \"policy_mapping_fn\": (\n lambda aid, **kwargs: random.choice(\n [\"policy_1\", \"policy_2\"]\n )\n ),\n },\n \"framework\": fw,\n },\n )\n for _ in range(50):\n result = pg.train()\n if not np.isnan(result[\"episode_reward_mean\"]):\n return # simulation ok\n time.sleep(0.1)\n assert False, \"did not see any simulation results\"\n\n def test_custom_input_procedure(self):\n class CustomJsonReader(JsonReader):\n def __init__(self, ioctx: IOContext):\n super().__init__(ioctx.input_config[\"input_files\"], ioctx)\n\n def input_creator(ioctx: IOContext) -> InputReader:\n return ShuffledInput(CustomJsonReader(ioctx))\n\n register_input(\"custom_input\", input_creator)\n test_input_procedure = [\n \"custom_input\",\n input_creator,\n \"ray.rllib.examples.custom_input_api.CustomJsonReader\",\n ]\n for input_procedure in test_input_procedure:\n for fw in framework_iterator(frameworks=(\"torch\", \"tf\")):\n self.write_outputs(self.test_dir, fw)\n agent = PG(\n env=\"CartPole-v0\",\n config={\n \"input\": input_procedure,\n \"input_config\": {\"input_files\": self.test_dir + fw},\n \"off_policy_estimation_methods\": {},\n \"framework\": fw,\n },\n )\n result = agent.train()\n self.assertEqual(result[\"timesteps_total\"], 250)\n self.assertTrue(np.isnan(result[\"episode_reward_mean\"]))\n\n\nclass JsonIOTest(unittest.TestCase):\n def setUp(self):\n ray.init(num_cpus=1, ignore_reinit_error=True)\n self.test_dir = tempfile.mkdtemp()\n\n def tearDown(self):\n shutil.rmtree(self.test_dir)\n ray.shutdown()\n\n def test_write_dataset(self):\n ioctx = IOContext(\n self.test_dir,\n {\n \"output\": \"dataset\",\n \"output_config\": {\n \"format\": \"json\",\n \"path\": self.test_dir,\n \"max_num_samples_per_file\": 2,\n },\n },\n 0,\n None,\n )\n writer = DatasetWriter(ioctx, compress_columns=[\"obs\"])\n self.assertEqual(len(os.listdir(self.test_dir)), 0)\n writer.write(SAMPLES)\n writer.write(SAMPLES)\n self.assertEqual(len(os.listdir(self.test_dir)), 1)\n\n def test_write_simple(self):\n ioctx = IOContext(self.test_dir, {}, 0, None)\n writer = JsonWriter(\n self.test_dir, ioctx, max_file_size=1000, compress_columns=[\"obs\"]\n )\n self.assertEqual(len(os.listdir(self.test_dir)), 0)\n writer.write(SAMPLES)\n writer.write(SAMPLES)\n self.assertEqual(len(os.listdir(self.test_dir)), 1)\n\n def test_write_file_uri(self):\n ioctx = IOContext(self.test_dir, {}, 0, None)\n writer = JsonWriter(\n \"file://\" + self.test_dir,\n ioctx,\n max_file_size=1000,\n compress_columns=[\"obs\"],\n )\n self.assertEqual(len(os.listdir(self.test_dir)), 0)\n writer.write(SAMPLES)\n writer.write(SAMPLES)\n self.assertEqual(len(os.listdir(self.test_dir)), 1)\n\n def test_write_paginate(self):\n ioctx = IOContext(self.test_dir, {}, 0, None)\n writer = JsonWriter(\n self.test_dir, ioctx, max_file_size=5000, compress_columns=[\"obs\"]\n )\n self.assertEqual(len(os.listdir(self.test_dir)), 0)\n for _ in range(100):\n writer.write(SAMPLES)\n num_files = len(os.listdir(self.test_dir))\n\n # Pagination can't really be predicted:\n # On travis, it seems to create only 2 files, but sometimes also\n # 6, or 7. 12 or 13 usually on a Mac locally.\n # Reasons: Different compressions, file-size interpretations,\n # json writers?\n assert num_files >= 2, \"Expected >= 2 files, but found {} ({})\".format(\n num_files, os.listdir(self.test_dir)\n )\n\n def test_read_write(self):\n ioctx = IOContext(self.test_dir, {}, 0, None)\n writer = JsonWriter(\n self.test_dir, ioctx, max_file_size=5000, compress_columns=[\"obs\"]\n )\n for i in range(100):\n writer.write(make_sample_batch(i))\n reader = JsonReader(self.test_dir + \"/*.json\")\n seen_a = set()\n seen_o = set()\n for i in range(1000):\n batch = reader.next()\n seen_a.add(batch[\"actions\"][0])\n seen_o.add(batch[\"obs\"][0])\n self.assertGreater(len(seen_a), 90)\n self.assertLess(len(seen_a), 101)\n self.assertGreater(len(seen_o), 90)\n self.assertLess(len(seen_o), 101)\n\n def test_skips_over_empty_lines_and_files(self):\n open(self.test_dir + \"/empty\", \"w\").close()\n with open(self.test_dir + \"/f1\", \"w\") as f:\n f.write(\"\\n\")\n f.write(\"\\n\")\n f.write(_to_json(make_sample_batch(0), []))\n with open(self.test_dir + \"/f2\", \"w\") as f:\n f.write(_to_json(make_sample_batch(1), []))\n f.write(\"\\n\")\n reader = JsonReader(\n [\n self.test_dir + \"/empty\",\n self.test_dir + \"/f1\",\n \"file://\" + self.test_dir + \"/f2\",\n ]\n )\n seen_a = set()\n for i in range(100):\n batch = reader.next()\n seen_a.add(batch[\"actions\"][0])\n self.assertEqual(len(seen_a), 2)\n\n def test_skips_over_corrupted_lines(self):\n with open(self.test_dir + \"/f1\", \"w\") as f:\n f.write(_to_json(make_sample_batch(0), []))\n f.write(\"\\n\")\n f.write(_to_json(make_sample_batch(1), []))\n f.write(\"\\n\")\n f.write(_to_json(make_sample_batch(2), []))\n f.write(\"\\n\")\n f.write(_to_json(make_sample_batch(3), []))\n f.write(\"\\n\")\n f.write(\"{..corrupted_json_record\")\n reader = JsonReader(\n [\n self.test_dir + \"/f1\",\n ]\n )\n seen_a = set()\n for i in range(10):\n batch = reader.next()\n seen_a.add(batch[\"actions\"][0])\n self.assertEqual(len(seen_a), 4)\n\n def test_abort_on_all_empty_inputs(self):\n open(self.test_dir + \"/empty\", \"w\").close()\n reader = JsonReader(\n [\n self.test_dir + \"/empty\",\n ]\n )\n self.assertRaises(ValueError, lambda: reader.next())\n with open(self.test_dir + \"/empty1\", \"w\") as f:\n for _ in range(100):\n f.write(\"\\n\")\n with open(self.test_dir + \"/empty2\", \"w\") as f:\n for _ in range(100):\n f.write(\"\\n\")\n reader = JsonReader(\n [\n self.test_dir + \"/empty1\",\n self.test_dir + \"/empty2\",\n ]\n )\n self.assertRaises(ValueError, lambda: reader.next())\n\n def test_custom_input_registry(self):\n config = {\"input_config\": {}}\n ioctx = IOContext(self.test_dir, config, 0, None)\n\n class CustomInputReader(InputReader):\n def __init__(self, ioctx: IOContext):\n self.ioctx = ioctx\n\n def next(self):\n return 0\n\n def input_creator(ioctx: IOContext):\n return ShuffledInput(CustomInputReader(ioctx))\n\n register_input(\"custom_input\", input_creator)\n self.assertTrue(registry_contains_input(\"custom_input\"))\n creator = registry_get_input(\"custom_input\")\n self.assertIsNotNone(creator)\n reader = creator(ioctx)\n self.assertIsInstance(reader, ShuffledInput)\n self.assertEqual(reader.next(), 0)\n self.assertEqual(ioctx.log_dir, self.test_dir)\n self.assertEqual(ioctx.config, config)\n self.assertEqual(ioctx.worker_index, 0)\n self.assertIsNone(ioctx.worker)\n self.assertEqual(ioctx.input_config, {})\n\n\nif __name__ == \"__main__\":\n import pytest\n import sys\n\n sys.exit(pytest.main([\"-v\", __file__]))\n"
] | [
[
"numpy.random.bytes",
"numpy.random.get_state",
"numpy.random.set_state",
"numpy.random.seed"
],
[
"numpy.asarray",
"numpy.float32"
],
[
"numpy.mean",
"numpy.sum"
],
[
"numpy.mean",
"numpy.zeros_like"
],
[
"pandas.DataFrame"
],
[
"numpy.isnan",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
gitter-badger/mlmodels | [
"f70f1da7434e8855eed50adc67b49cc169f2ea24",
"f70f1da7434e8855eed50adc67b49cc169f2ea24",
"f70f1da7434e8855eed50adc67b49cc169f2ea24",
"f70f1da7434e8855eed50adc67b49cc169f2ea24",
"f70f1da7434e8855eed50adc67b49cc169f2ea24",
"f70f1da7434e8855eed50adc67b49cc169f2ea24",
"f70f1da7434e8855eed50adc67b49cc169f2ea24",
"f70f1da7434e8855eed50adc67b49cc169f2ea24",
"f70f1da7434e8855eed50adc67b49cc169f2ea24"
] | [
"mlmodels/model_tf/misc/tf_nlp/speech-to-text/1.tacotron/train.py",
"mlmodels/model_tf/misc/tfcode2/Attention/3.hierarchical.py",
"mlmodels/model_tf/misc/tf_nlp/neural-machine-translation/4.basic-seq2seq-api-greedy.py",
"mlmodels/model_tf/misc/tfcode2/Seq-to-Seq/estimator/estimator.py",
"mlmodels/model_tf/misc/tfcode/deep-learning/18.lstm-attention-scaleddot.py",
"mlmodels/model_tf/misc/tf_nlp/text-classification/62.residual-network-bahdanau.py",
"mlmodels/model_tf/misc/tf_nlp/text-to-speech/3.seq2seq-luong.py",
"mlmodels/model_tch/vae/network_test.py",
"mlmodels/model_tf/util.py"
] | [
"# coding: utf-8\n\n# In[1]:\n\n\nimport os\n\nimport numpy as np\nimport tensorflow as tf\nfrom tqdm import tqdm\n\nfrom model import Model\nfrom setting import batch_size, get_cached, idx2char, n_mels, reduction_factor, text2idx\n\n# In[2]:\n\n\npaths, lengths, texts = [], [], []\ntext_files = [f for f in os.listdir(\"spectrogram\") if f.endswith(\".npy\")]\nfor fpath in text_files:\n with open(\"../data/\" + fpath.replace(\"npy\", \"txt\")) as fopen:\n text, converted = text2idx(fopen.read())\n texts.append(converted)\n lengths.append(len(text))\n paths.append(fpath.replace(\".npy\", \"\"))\n\n\n# In[3]:\n\n\ndef dynamic_batching(paths):\n spectrograms, max_x = [], 0\n for path in paths:\n spectrograms.append(np.load(\"spectrogram/\" + path + \".npy\"))\n if spectrograms[-1].shape[0] > max_x:\n max_x = spectrograms[-1].shape[0]\n return spectrograms, max_x\n\n\n# In[4]:\n\n\ntf.reset_default_graph()\nsess = tf.InteractiveSession()\nmodel = Model()\nsess.run(tf.global_variables_initializer())\n\n\n# In[5]:\n\n\nfor e in range(30):\n pbar = tqdm(range(0, len(text_files), batch_size), desc=\"minibatch loop\")\n total_cost, total_acc = 0, 0\n for k in pbar:\n index = min(k + batch_size, len(text_files))\n files, max_x = dynamic_batching(paths[k:index])\n max_y = max(lengths[k:index])\n batch_x = np.zeros((len(files), max_x, n_mels * reduction_factor))\n batch_y = np.zeros((len(files), max_y))\n for n in range(len(files)):\n batch_x[n] = np.pad(files[n], ((max_x - files[n].shape[0], 0), (0, 0)), mode=\"constant\")\n batch_y[n] = np.pad(texts[k + n], ((0, max_y - len(texts[k + n]))), mode=\"constant\")\n _, acc, cost = sess.run(\n [model.optimizer, model.accuracy, model.cost],\n feed_dict={model.X: batch_x, model.Y: batch_y, model.Y_seq_len: lengths[k:index]},\n )\n total_cost += cost\n total_acc += acc\n pbar.set_postfix(cost=cost, accuracy=acc)\n total_cost /= len(text_files) / batch_size\n total_acc /= len(text_files) / batch_size\n\n print(\"epoch %d, avg loss %f, avg acc %f\" % (e + 1, total_cost, total_acc))\n\n\nempty_y = np.zeros((1, len(batch_y[0])))\npredicted = \"\".join(\n [\n idx2char[c]\n for c in sess.run(model.preds, feed_dict={model.X: batch_x[:1], model.Y: empty_y})[0]\n if idx2char[c] not in [\"S\", \"E\"]\n ]\n)\nground_truth = \"\".join([idx2char[c] for c in batch_y[0] if idx2char[c] not in [\"S\", \"E\"]])\nprint(\"predicted: %s, ground truth: %s\" % (predicted, ground_truth))\n",
"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport time\n\nimport numpy as np\nimport tensorflow as tf\nfrom sklearn.cross_validation import train_test_split\n\nfrom utils import *\n\n# In[2]:\n\n\ntrainset = sklearn.datasets.load_files(container_path=\"data\", encoding=\"UTF-8\")\ntrainset.data, trainset.target = separate_dataset(trainset, 1.0)\nprint(trainset.target_names)\nprint(len(trainset.data))\nprint(len(trainset.target))\n\n\n# In[3]:\n\n\nONEHOT = np.zeros((len(trainset.data), len(trainset.target_names)))\nONEHOT[np.arange(len(trainset.data)), trainset.target] = 1.0\ntrain_X, test_X, train_Y, test_Y, train_onehot, test_onehot = train_test_split(\n trainset.data, trainset.target, ONEHOT, test_size=0.2\n)\n\n\n# In[4]:\n\n\nconcat = \" \".join(trainset.data).split()\nvocabulary_size = len(list(set(concat)))\ndata, count, dictionary, rev_dictionary = build_dataset(concat, vocabulary_size)\nprint(\"vocab from size: %d\" % (vocabulary_size))\nprint(\"Most common words\", count[4:10])\nprint(\"Sample data\", data[:10], [rev_dictionary[i] for i in data[:10]])\n\n\n# In[5]:\n\n\nGO = dictionary[\"GO\"]\nPAD = dictionary[\"PAD\"]\nEOS = dictionary[\"EOS\"]\nUNK = dictionary[\"UNK\"]\n\n\n# In[6]:\n\n\nclass Model:\n def __init__(\n self,\n size_layer,\n num_layers,\n embedded_size,\n dict_size,\n dimension_output,\n learning_rate,\n maxlen,\n ):\n def cells(reuse=False):\n return tf.nn.rnn_cell.GRUCell(size_layer, reuse=reuse)\n\n self.X = tf.placeholder(tf.int32, [None, None])\n self.Y = tf.placeholder(tf.float32, [None, dimension_output])\n encoder_embeddings = tf.Variable(tf.random_uniform([dict_size, embedded_size], -1, 1))\n encoder_embedded = tf.nn.embedding_lookup(encoder_embeddings, self.X)\n rnn_cells = tf.nn.rnn_cell.MultiRNNCell([cells() for _ in range(num_layers)])\n outputs, _ = tf.nn.dynamic_rnn(rnn_cells, encoder_embedded, dtype=tf.float32)\n\n w_omega = tf.Variable(tf.random_normal([size_layer, maxlen], stddev=0.1))\n b_omega = tf.Variable(tf.random_normal([maxlen], stddev=0.1))\n u_omega = tf.Variable(tf.random_normal([maxlen], stddev=0.1))\n\n with tf.name_scope(\"v\"):\n v = tf.tanh(tf.tensordot(outputs, w_omega, axes=1) + b_omega)\n\n vu = tf.tensordot(v, u_omega, axes=1, name=\"vu\")\n alphas = tf.nn.softmax(vu, name=\"alphas\")\n\n outputs = tf.reduce_sum(outputs * tf.expand_dims(alphas, -1), 1)\n W = tf.get_variable(\n \"w\", shape=(size_layer, dimension_output), initializer=tf.orthogonal_initializer()\n )\n b = tf.get_variable(\"b\", shape=(dimension_output), initializer=tf.zeros_initializer())\n self.logits = tf.matmul(outputs, W) + b\n self.cost = tf.reduce_mean(\n tf.nn.softmax_cross_entropy_with_logits(logits=self.logits, labels=self.Y)\n )\n self.optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(self.cost)\n correct_pred = tf.equal(tf.argmax(self.logits, 1), tf.argmax(self.Y, 1))\n self.accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\n\n\n# In[7]:\n\n\nsize_layer = 128\nnum_layers = 2\nembedded_size = 128\ndimension_output = len(trainset.target_names)\nlearning_rate = 1e-3\nmaxlen = 50\nbatch_size = 128\ntf.reset_default_graph()\nsess = tf.InteractiveSession()\nmodel = Model(\n size_layer, num_layers, embedded_size, len(dictionary), dimension_output, learning_rate, maxlen\n)\nsess.run(tf.global_variables_initializer())\n\n\n# In[8]:\n\n\nEARLY_STOPPING, CURRENT_CHECKPOINT, CURRENT_ACC, EPOCH = 5, 0, 0, 0\nwhile True:\n lasttime = time.time()\n if CURRENT_CHECKPOINT == EARLY_STOPPING:\n print(\"break epoch:%d\\n\" % (EPOCH))\n break\n\n train_acc, train_loss, test_acc, test_loss = 0, 0, 0, 0\n for i in range(0, (len(train_X) // batch_size) * batch_size, batch_size):\n batch_x = str_idx(train_X[i : i + batch_size], dictionary, maxlen)\n acc, loss, _ = sess.run(\n [model.accuracy, model.cost, model.optimizer],\n feed_dict={model.X: batch_x, model.Y: train_onehot[i : i + batch_size]},\n )\n train_loss += loss\n train_acc += acc\n\n for i in range(0, (len(test_X) // batch_size) * batch_size, batch_size):\n batch_x = str_idx(test_X[i : i + batch_size], dictionary, maxlen)\n acc, loss = sess.run(\n [model.accuracy, model.cost],\n feed_dict={model.X: batch_x, model.Y: test_onehot[i : i + batch_size]},\n )\n test_loss += loss\n test_acc += acc\n\n train_loss /= len(train_X) // batch_size\n train_acc /= len(train_X) // batch_size\n test_loss /= len(test_X) // batch_size\n test_acc /= len(test_X) // batch_size\n\n if test_acc > CURRENT_ACC:\n print(\"epoch: %d, pass acc: %f, current acc: %f\" % (EPOCH, CURRENT_ACC, test_acc))\n CURRENT_ACC = test_acc\n CURRENT_CHECKPOINT = 0\n else:\n CURRENT_CHECKPOINT += 1\n\n print(\"time taken:\", time.time() - lasttime)\n print(\n \"epoch: %d, training loss: %f, training acc: %f, valid loss: %f, valid acc: %f\\n\"\n % (EPOCH, train_loss, train_acc, test_loss, test_acc)\n )\n EPOCH += 1\n\n\n# In[9]:\n\n\nlogits = sess.run(model.logits, feed_dict={model.X: str_idx(test_X, dictionary, maxlen)})\nprint(\n metrics.classification_report(test_Y, np.argmax(logits, 1), target_names=trainset.target_names)\n)\n\n\n# In[ ]:\n",
"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport collections\nimport os\nimport re\nimport time\n\nimport numpy as np\nimport tensorflow as tf\nfrom sklearn.utils import shuffle\n\n# In[2]:\n\n\ndef build_dataset(words, n_words, atleast=1):\n count = [[\"PAD\", 0], [\"GO\", 1], [\"EOS\", 2], [\"UNK\", 3]]\n counter = collections.Counter(words).most_common(n_words)\n counter = [i for i in counter if i[1] >= atleast]\n count.extend(counter)\n dictionary = dict()\n for word, _ in count:\n dictionary[word] = len(dictionary)\n data = list()\n unk_count = 0\n for word in words:\n index = dictionary.get(word, 0)\n if index == 0:\n unk_count += 1\n data.append(index)\n count[0][1] = unk_count\n reversed_dictionary = dict(zip(dictionary.values(), dictionary.keys()))\n return data, count, dictionary, reversed_dictionary\n\n\n# In[3]:\n\n\nwith open(\"english-train\", \"r\") as fopen:\n text_from = fopen.read().lower().split(\"\\n\")[:-1]\nwith open(\"vietnam-train\", \"r\") as fopen:\n text_to = fopen.read().lower().split(\"\\n\")[:-1]\nprint(\"len from: %d, len to: %d\" % (len(text_from), len(text_to)))\n\n\n# In[4]:\n\n\nconcat_from = \" \".join(text_from).split()\nvocabulary_size_from = len(list(set(concat_from)))\ndata_from, count_from, dictionary_from, rev_dictionary_from = build_dataset(\n concat_from, vocabulary_size_from\n)\nprint(\"vocab from size: %d\" % (vocabulary_size_from))\nprint(\"Most common words\", count_from[4:10])\nprint(\"Sample data\", data_from[:10], [rev_dictionary_from[i] for i in data_from[:10]])\n\n\n# In[5]:\n\n\nconcat_to = \" \".join(text_to).split()\nvocabulary_size_to = len(list(set(concat_to)))\ndata_to, count_to, dictionary_to, rev_dictionary_to = build_dataset(concat_to, vocabulary_size_to)\nprint(\"vocab to size: %d\" % (vocabulary_size_to))\nprint(\"Most common words\", count_to[4:10])\nprint(\"Sample data\", data_to[:10], [rev_dictionary_to[i] for i in data_to[:10]])\n\n\n# In[6]:\n\n\nGO = dictionary_from[\"GO\"]\nPAD = dictionary_from[\"PAD\"]\nEOS = dictionary_from[\"EOS\"]\nUNK = dictionary_from[\"UNK\"]\n\n\n# In[7]:\n\n\nfor i in range(len(text_to)):\n text_to[i] += \" EOS\"\n\n\n# In[8]:\n\n\nclass Chatbot:\n def __init__(\n self,\n size_layer,\n num_layers,\n embedded_size,\n from_dict_size,\n to_dict_size,\n learning_rate,\n batch_size,\n ):\n def cells(reuse=False):\n return tf.nn.rnn_cell.BasicRNNCell(size_layer, reuse=reuse)\n\n self.X = tf.placeholder(tf.int32, [None, None])\n self.Y = tf.placeholder(tf.int32, [None, None])\n self.X_seq_len = tf.count_nonzero(self.X, 1, dtype=tf.int32)\n self.Y_seq_len = tf.count_nonzero(self.Y, 1, dtype=tf.int32)\n batch_size = tf.shape(self.X)[0]\n\n encoder_embedding = tf.Variable(tf.random_uniform([from_dict_size, embedded_size], -1, 1))\n decoder_embedding = tf.Variable(tf.random_uniform([to_dict_size, embedded_size], -1, 1))\n\n _, encoder_state = tf.nn.dynamic_rnn(\n cell=tf.nn.rnn_cell.MultiRNNCell([cells() for _ in range(num_layers)]),\n inputs=tf.nn.embedding_lookup(encoder_embedding, self.X),\n sequence_length=self.X_seq_len,\n dtype=tf.float32,\n )\n main = tf.strided_slice(self.Y, [0, 0], [batch_size, -1], [1, 1])\n decoder_input = tf.concat([tf.fill([batch_size, 1], GO), main], 1)\n dense = tf.layers.Dense(to_dict_size)\n decoder_cells = tf.nn.rnn_cell.MultiRNNCell([cells() for _ in range(num_layers)])\n\n training_helper = tf.contrib.seq2seq.TrainingHelper(\n inputs=tf.nn.embedding_lookup(decoder_embedding, decoder_input),\n sequence_length=self.Y_seq_len,\n time_major=False,\n )\n training_decoder = tf.contrib.seq2seq.BasicDecoder(\n cell=decoder_cells,\n helper=training_helper,\n initial_state=encoder_state,\n output_layer=dense,\n )\n training_decoder_output, _, _ = tf.contrib.seq2seq.dynamic_decode(\n decoder=training_decoder,\n impute_finished=True,\n maximum_iterations=tf.reduce_max(self.Y_seq_len),\n )\n self.training_logits = training_decoder_output.rnn_output\n\n predicting_helper = tf.contrib.seq2seq.GreedyEmbeddingHelper(\n embedding=decoder_embedding,\n start_tokens=tf.tile(tf.constant([GO], dtype=tf.int32), [batch_size]),\n end_token=EOS,\n )\n predicting_decoder = tf.contrib.seq2seq.BasicDecoder(\n cell=decoder_cells,\n helper=predicting_helper,\n initial_state=encoder_state,\n output_layer=dense,\n )\n predicting_decoder_output, _, _ = tf.contrib.seq2seq.dynamic_decode(\n decoder=predicting_decoder,\n impute_finished=True,\n maximum_iterations=2 * tf.reduce_max(self.X_seq_len),\n )\n self.predicting_ids = predicting_decoder_output.sample_id\n\n masks = tf.sequence_mask(self.Y_seq_len, tf.reduce_max(self.Y_seq_len), dtype=tf.float32)\n self.cost = tf.contrib.seq2seq.sequence_loss(\n logits=self.training_logits, targets=self.Y, weights=masks\n )\n self.optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(self.cost)\n y_t = tf.argmax(self.training_logits, axis=2)\n y_t = tf.cast(y_t, tf.int32)\n self.prediction = tf.boolean_mask(y_t, masks)\n mask_label = tf.boolean_mask(self.Y, masks)\n correct_pred = tf.equal(self.prediction, mask_label)\n correct_index = tf.cast(correct_pred, tf.float32)\n self.accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\n\n\n# In[9]:\n\n\nsize_layer = 256\nnum_layers = 2\nembedded_size = 128\nlearning_rate = 0.001\nbatch_size = 16\nepoch = 20\n\n\n# In[10]:\n\n\ntf.reset_default_graph()\nsess = tf.InteractiveSession()\nmodel = Chatbot(\n size_layer,\n num_layers,\n embedded_size,\n len(dictionary_from),\n len(dictionary_to),\n learning_rate,\n batch_size,\n)\nsess.run(tf.global_variables_initializer())\n\n\n# In[11]:\n\n\ndef str_idx(corpus, dic):\n X = []\n for i in corpus:\n ints = []\n for k in i.split():\n ints.append(dic.get(k, UNK))\n X.append(ints)\n return X\n\n\n# In[12]:\n\n\nX = str_idx(text_from, dictionary_from)\nY = str_idx(text_to, dictionary_to)\n\n\n# In[13]:\n\n\ndef pad_sentence_batch(sentence_batch, pad_int):\n padded_seqs = []\n seq_lens = []\n max_sentence_len = max([len(sentence) for sentence in sentence_batch])\n for sentence in sentence_batch:\n padded_seqs.append(sentence + [pad_int] * (max_sentence_len - len(sentence)))\n seq_lens.append(len(sentence))\n return padded_seqs, seq_lens\n\n\n# In[14]:\n\n\nfor i in range(epoch):\n total_loss, total_accuracy = 0, 0\n for k in range(0, len(text_to), batch_size):\n index = min(k + batch_size, len(text_to))\n batch_x, seq_x = pad_sentence_batch(X[k:index], PAD)\n batch_y, seq_y = pad_sentence_batch(Y[k:index], PAD)\n predicted, accuracy, loss, _ = sess.run(\n [model.predicting_ids, model.accuracy, model.cost, model.optimizer],\n feed_dict={model.X: batch_x, model.Y: batch_y},\n )\n total_loss += loss\n total_accuracy += accuracy\n total_loss /= len(text_to) / batch_size\n total_accuracy /= len(text_to) / batch_size\n print(\"epoch: %d, avg loss: %f, avg accuracy: %f\" % (i + 1, total_loss, total_accuracy))\n\n\n# In[15]:\n\n\nfor i in range(len(batch_x)):\n print(\"row %d\" % (i + 1))\n print(\n \"QUESTION:\", \" \".join([rev_dictionary_from[n] for n in batch_x[i] if n not in [0, 1, 2, 3]])\n )\n print(\n \"REAL ANSWER:\",\n \" \".join([rev_dictionary_to[n] for n in batch_y[i] if n not in [0, 1, 2, 3]]),\n )\n print(\n \"PREDICTED ANSWER:\",\n \" \".join([rev_dictionary_to[n] for n in predicted[i] if n not in [0, 1, 2, 3]]),\n \"\\n\",\n )\n\n\n# In[ ]:\n",
"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport collections\n\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow.python.layers.core import Dense\n\n# In[2]:\n\n\ndef build_dataset(words, n_words):\n count = [[\"GO\", 0], [\"PAD\", 1], [\"EOS\", 2], [\"UNK\", 3]]\n count.extend(collections.Counter(words).most_common(n_words - 1))\n dictionary = dict()\n for word, _ in count:\n dictionary[word] = len(dictionary)\n data = list()\n unk_count = 0\n for word in words:\n index = dictionary.get(word, 0)\n if index == 0:\n unk_count += 1\n data.append(index)\n count[0][1] = unk_count\n reversed_dictionary = dict(zip(dictionary.values(), dictionary.keys()))\n return data, count, dictionary, reversed_dictionary\n\n\n# In[3]:\n\n\nwith open(\"data/from\", \"r\") as fopen:\n text_from = fopen.read().lower().split(\"\\n\")\nwith open(\"data/to\", \"r\") as fopen:\n text_to = fopen.read().lower().split(\"\\n\")\nprint(\"len from: %d, len to: %d\" % (len(text_from), len(text_to)))\n\n\n# In[4]:\n\n\nconcat_from = \" \".join(text_from).split()\nvocabulary_size_from = len(list(set(concat_from)))\ndata_from, count_from, dictionary_from, rev_dictionary_from = build_dataset(\n concat_from, vocabulary_size_from\n)\nprint(\"vocab from size: %d\" % (vocabulary_size_from))\nprint(\"Most common words\", count_from[3:10])\nprint(\"Sample data\", data_from[:10], [rev_dictionary_from[i] for i in data_from[:10]])\n\n\n# In[5]:\n\n\nconcat_to = \" \".join(text_to).split()\nvocabulary_size_to = len(list(set(concat_to)))\ndata_to, count_to, dictionary_to, rev_dictionary_to = build_dataset(concat_to, vocabulary_size_to)\nprint(\"vocab to size: %d\" % (vocabulary_size_to))\nprint(\"Most common words\", count_to[3:10])\nprint(\"Sample data\", data_to[:10], [rev_dictionary_to[i] for i in data_to[:10]])\n\n\n# In[6]:\n\n\nGO = dictionary_from[\"GO\"]\nPAD = dictionary_from[\"PAD\"]\nEOS = dictionary_from[\"EOS\"]\nUNK = dictionary_from[\"UNK\"]\n\n\n# In[7]:\n\n\nclass Chatbot:\n def __init__(\n self,\n size_layer,\n num_layers,\n embedded_size,\n batch_size,\n from_dict_size,\n to_dict_size,\n grad_clip=5.0,\n ):\n self.size_layer = size_layer\n self.num_layers = num_layers\n self.embedded_size = embedded_size\n self.grad_clip = grad_clip\n self.from_dict_size = from_dict_size\n self.to_dict_size = to_dict_size\n self.batch_size = batch_size\n self.model = tf.estimator.Estimator(self.model_fn)\n\n def lstm_cell(self, reuse=False):\n return tf.nn.rnn_cell.LSTMCell(self.size_layer, reuse=reuse)\n\n def seq2seq(self, x_dict, reuse):\n x = x_dict[\"x\"]\n x_seq_len = x_dict[\"x_len\"]\n with tf.variable_scope(\"encoder\", reuse=reuse):\n encoder_embedding = (\n tf.get_variable(\"encoder_embedding\")\n if reuse\n else tf.get_variable(\n \"encoder_embedding\",\n [self.from_dict_size, self.embedded_size],\n tf.float32,\n tf.random_uniform_initializer(-1.0, 1.0),\n )\n )\n _, encoder_state = tf.nn.dynamic_rnn(\n cell=tf.nn.rnn_cell.MultiRNNCell(\n [self.lstm_cell() for _ in range(self.num_layers)]\n ),\n inputs=tf.nn.embedding_lookup(encoder_embedding, x),\n sequence_length=x_seq_len,\n dtype=tf.float32,\n )\n encoder_state = tuple(encoder_state[-1] for _ in range(self.num_layers))\n if not reuse:\n y = x_dict[\"y\"]\n y_seq_len = x_dict[\"y_len\"]\n with tf.variable_scope(\"decoder\", reuse=reuse):\n decoder_embedding = tf.get_variable(\n \"decoder_embedding\",\n [self.to_dict_size, self.embedded_size],\n tf.float32,\n tf.random_uniform_initializer(-1.0, 1.0),\n )\n helper = tf.contrib.seq2seq.TrainingHelper(\n inputs=tf.nn.embedding_lookup(decoder_embedding, y),\n sequence_length=y_seq_len,\n time_major=False,\n )\n decoder = tf.contrib.seq2seq.BasicDecoder(\n cell=tf.nn.rnn_cell.MultiRNNCell(\n [self.lstm_cell() for _ in range(self.num_layers)]\n ),\n helper=helper,\n initial_state=encoder_state,\n output_layer=tf.layers.Dense(self.to_dict_size),\n )\n decoder_output, _, _ = tf.contrib.seq2seq.dynamic_decode(\n decoder=decoder,\n impute_finished=True,\n maximum_iterations=tf.reduce_max(y_seq_len),\n )\n return decoder_output.rnn_output\n else:\n with tf.variable_scope(\"decoder\", reuse=reuse):\n helper = tf.contrib.seq2seq.GreedyEmbeddingHelper(\n embedding=tf.get_variable(\"decoder_embedding\"),\n start_tokens=tf.tile(tf.constant([GO], dtype=tf.int32), [tf.shape(x)[0]]),\n end_token=EOS,\n )\n decoder = tf.contrib.seq2seq.BasicDecoder(\n cell=tf.nn.rnn_cell.MultiRNNCell(\n [self.lstm_cell(reuse=True) for _ in range(self.num_layers)]\n ),\n helper=helper,\n initial_state=encoder_state,\n output_layer=tf.layers.Dense(self.to_dict_size, _reuse=reuse),\n )\n decoder_output, _, _ = tf.contrib.seq2seq.dynamic_decode(\n decoder=decoder,\n impute_finished=True,\n maximum_iterations=2 * tf.reduce_max(x_seq_len),\n )\n return decoder_output.sample_id\n\n def model_fn(self, features, labels, mode):\n logits = self.seq2seq(features, reuse=False)\n predictions = self.seq2seq(features, reuse=True)\n if mode == tf.estimator.ModeKeys.PREDICT:\n return tf.estimator.EstimatorSpec(mode, predictions=predictions)\n y_seq_len = features[\"y_len\"]\n masks = tf.sequence_mask(y_seq_len, tf.reduce_max(y_seq_len), dtype=tf.float32)\n loss_op = tf.contrib.seq2seq.sequence_loss(logits=logits, targets=labels, weights=masks)\n params = tf.trainable_variables()\n gradients = tf.gradients(loss_op, params)\n clipped_gradients, _ = tf.clip_by_global_norm(gradients, self.grad_clip)\n train_op = tf.train.AdamOptimizer().apply_gradients(\n zip(clipped_gradients, params), global_step=tf.train.get_global_step()\n )\n acc_op = tf.metrics.accuracy(labels=labels, predictions=predictions)\n estim_specs = tf.estimator.EstimatorSpec(\n mode=mode,\n predictions=predictions,\n loss=loss_op,\n train_op=train_op,\n eval_metric_ops={\"accuracy\": acc_op},\n )\n return estim_specs\n\n\n# In[8]:\n\n\nsize_layer = 256\nnum_layers = 2\nembedded_size = 256\nbatch_size = len(text_from)\nmodel = Chatbot(\n size_layer,\n num_layers,\n embedded_size,\n batch_size,\n vocabulary_size_from + 4,\n vocabulary_size_to + 4,\n)\n\n\n# In[9]:\n\n\ndef str_idx(corpus, dic):\n X = []\n for i in corpus:\n ints = []\n for k in i.split():\n try:\n ints.append(dic[k])\n except Exception as e:\n print(e)\n ints.append(UNK)\n X.append(ints)\n return X\n\n\nX = str_idx(text_from, dictionary_from)\nY = str_idx(text_to, dictionary_to)\n\n\n# In[10]:\n\n\ndef pad_sentence_batch(sentence_batch, pad_int):\n padded_seqs = []\n seq_lens = []\n max_sentence_len = max([len(sentence) for sentence in sentence_batch])\n for sentence in sentence_batch:\n padded_seqs.append(sentence + [pad_int] * (max_sentence_len - len(sentence)))\n seq_lens.append(len(sentence))\n return np.array(padded_seqs).astype(np.int32), np.array(seq_lens).astype(np.int32)\n\n\n# In[13]:\n\n\nbatch_x, seq_x = pad_sentence_batch(X, PAD)\nbatch_y, seq_y = pad_sentence_batch(Y, PAD)\n\n\n# In[14]:\n\n\ninput_fn = tf.estimator.inputs.numpy_input_fn(\n x={\"x\": batch_x, \"x_len\": seq_x, \"y\": batch_y, \"y_len\": seq_y},\n y=batch_y,\n batch_size=batch_size,\n num_epochs=100,\n shuffle=True,\n)\nmodel.model.train(input_fn)\n\n\n# In[ ]:\n",
"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nfrom datetime import datetime, timedelta\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport tensorflow as tf\nfrom sklearn.preprocessing import MinMaxScaler\n\nsns.set()\n\n\n# In[2]:\n\n\ndf = pd.read_csv(\"../dataset/GOOG-year.csv\")\ndate_ori = pd.to_datetime(df.iloc[:, 0]).tolist()\ndf.head()\n\n\n# In[3]:\n\n\nminmax = MinMaxScaler().fit(df.iloc[:, 1:].astype(\"float32\"))\ndf_log = minmax.transform(df.iloc[:, 1:].astype(\"float32\"))\ndf_log = pd.DataFrame(df_log)\ndf_log.head()\n\n\n# In[4]:\n\n\nnum_layers = 1\nsize_layer = 128\ntimestamp = 5\nepoch = 700\ndropout_rate = 0.9\nfuture_day = 50\n\n\n# In[5]:\n\n\nclass Model:\n def __init__(\n self, learning_rate, num_layers, size, size_layer, output_size, seq_len, forget_bias=0.1\n ):\n def lstm_cell(size_layer):\n return tf.nn.rnn_cell.LSTMCell(size_layer, state_is_tuple=False)\n\n def global_pooling(x, func):\n batch_size = tf.shape(self.X)[0]\n num_units = x.get_shape().as_list()[-1]\n x = func(x, x.get_shape().as_list()[1], 1)\n x = tf.reshape(x, [batch_size, num_units])\n return x\n\n rnn_cells = tf.nn.rnn_cell.MultiRNNCell(\n [lstm_cell(size_layer) for _ in range(num_layers)], state_is_tuple=False\n )\n self.X = tf.placeholder(tf.float32, (None, None, size))\n self.Y = tf.placeholder(tf.float32, (None, output_size))\n drop = tf.contrib.rnn.DropoutWrapper(rnn_cells, output_keep_prob=forget_bias)\n self.hidden_layer = tf.placeholder(tf.float32, (None, num_layers * 2 * size_layer))\n self.outputs, self.last_state = tf.nn.dynamic_rnn(\n drop, self.X, initial_state=self.hidden_layer, dtype=tf.float32, time_major=True\n )\n self.outputs = self.outputs[:, :, 0]\n x = self.X\n masks = tf.sign(self.outputs)\n batch_size = tf.shape(self.X)[0]\n align = tf.matmul(self.X, tf.transpose(self.X, [0, 2, 1]))\n paddings = tf.fill(tf.shape(align), float(\"-inf\"))\n k_masks = tf.tile(tf.expand_dims(masks, 1), [1, seq_len, 1])\n align = tf.where(tf.equal(k_masks, 0), paddings, align)\n align = tf.nn.tanh(align)\n q_masks = tf.to_float(masks)\n q_masks = tf.tile(tf.expand_dims(q_masks, -1), [1, 1, seq_len])\n align *= q_masks\n\n x = tf.matmul(align, x)\n g_max = global_pooling(x, tf.layers.max_pooling1d)\n g_avg = global_pooling(x, tf.layers.average_pooling1d)\n self.outputs = tf.concat([g_max, g_avg], 1)\n self.logits = tf.layers.dense(self.outputs, output_size)\n self.cost = tf.reduce_mean(tf.square(self.Y - self.logits))\n self.optimizer = tf.train.AdamOptimizer(learning_rate).minimize(self.cost)\n\n\n# In[6]:\n\n\ntf.reset_default_graph()\nmodelnn = Model(\n 0.01, num_layers, df_log.shape[1], size_layer, df_log.shape[1], timestamp, dropout_rate\n)\nsess = tf.InteractiveSession()\nsess.run(tf.global_variables_initializer())\n\n\n# In[7]:\n\n\nfor i in range(epoch):\n init_value = np.zeros((timestamp, num_layers * 2 * size_layer))\n total_loss = 0\n for k in range(0, (df_log.shape[0] // timestamp) * timestamp, timestamp):\n batch_x = np.expand_dims(df_log.iloc[k : k + timestamp].values, axis=0)\n batch_y = df_log.iloc[k + 1 : k + timestamp + 1].values\n last_state, _, loss = sess.run(\n [modelnn.last_state, modelnn.optimizer, modelnn.cost],\n feed_dict={modelnn.X: batch_x, modelnn.Y: batch_y, modelnn.hidden_layer: init_value},\n )\n init_value = last_state\n total_loss += loss\n total_loss /= df_log.shape[0] // timestamp\n if (i + 1) % 100 == 0:\n print(\"epoch:\", i + 1, \"avg loss:\", total_loss)\n\n\n# In[8]:\n\n\noutput_predict = np.zeros((df_log.shape[0] + future_day, df_log.shape[1]))\noutput_predict[0] = df_log.iloc[0]\nupper_b = (df_log.shape[0] // timestamp) * timestamp\ninit_value = np.zeros((timestamp, num_layers * 2 * size_layer))\n\nfor k in range(0, (df_log.shape[0] // timestamp) * timestamp, timestamp):\n out_logits, last_state = sess.run(\n [modelnn.logits, modelnn.last_state],\n feed_dict={\n modelnn.X: np.expand_dims(df_log.iloc[k : k + timestamp], axis=0),\n modelnn.hidden_layer: init_value,\n },\n )\n output_predict[k + 1 : k + timestamp + 1] = out_logits\n init_value = last_state\n\ndf_log.loc[df_log.shape[0]] = out_logits[-1]\ndate_ori.append(date_ori[-1] + timedelta(days=1))\n\n\n# In[9]:\n\n\nfor i in range(future_day - 1):\n out_logits, last_state = sess.run(\n [modelnn.logits, modelnn.last_state],\n feed_dict={\n modelnn.X: np.expand_dims(df_log.iloc[-timestamp:], axis=0),\n modelnn.hidden_layer: init_value,\n },\n )\n init_value = last_state\n output_predict[df_log.shape[0]] = out_logits[-1]\n df_log.loc[df_log.shape[0]] = out_logits[-1]\n date_ori.append(date_ori[-1] + timedelta(days=1))\n\n\n# In[10]:\n\n\ndf_log = minmax.inverse_transform(df_log.values)\ndate_ori = pd.Series(date_ori).dt.strftime(date_format=\"%Y-%m-%d\").tolist()\n\n\n# In[11]:\n\n\ndef anchor(signal, weight):\n buffer = []\n last = signal[0]\n for i in signal:\n smoothed_val = last * weight + (1 - weight) * i\n buffer.append(smoothed_val)\n last = smoothed_val\n return buffer\n\n\n# In[12]:\n\n\ncurrent_palette = sns.color_palette(\"Paired\", 12)\nfig = plt.figure(figsize=(15, 10))\nax = plt.subplot(111)\nx_range_original = np.arange(df.shape[0])\nx_range_future = np.arange(df_log.shape[0])\nax.plot(x_range_original, df.iloc[:, 1], label=\"true Open\", color=current_palette[0])\nax.plot(x_range_future, anchor(df_log[:, 0], 0.5), label=\"predict Open\", color=current_palette[1])\nax.plot(x_range_original, df.iloc[:, 2], label=\"true High\", color=current_palette[2])\nax.plot(x_range_future, anchor(df_log[:, 1], 0.5), label=\"predict High\", color=current_palette[3])\nax.plot(x_range_original, df.iloc[:, 3], label=\"true Low\", color=current_palette[4])\nax.plot(x_range_future, anchor(df_log[:, 2], 0.5), label=\"predict Low\", color=current_palette[5])\nax.plot(x_range_original, df.iloc[:, 4], label=\"true Close\", color=current_palette[6])\nax.plot(x_range_future, anchor(df_log[:, 3], 0.5), label=\"predict Close\", color=current_palette[7])\nax.plot(x_range_original, df.iloc[:, 5], label=\"true Adj Close\", color=current_palette[8])\nax.plot(\n x_range_future, anchor(df_log[:, 4], 0.5), label=\"predict Adj Close\", color=current_palette[9]\n)\nbox = ax.get_position()\nax.set_position([box.x0, box.y0 + box.height * 0.1, box.width, box.height * 0.9])\nax.legend(loc=\"upper center\", bbox_to_anchor=(0.5, -0.05), fancybox=True, shadow=True, ncol=5)\nplt.title(\"overlap stock market\")\nplt.xticks(x_range_future[::30], date_ori[::30])\nplt.show()\n\n\n# In[13]:\n\n\nfig = plt.figure(figsize=(20, 8))\nplt.subplot(1, 2, 1)\nplt.plot(x_range_original, df.iloc[:, 1], label=\"true Open\", color=current_palette[0])\nplt.plot(x_range_original, df.iloc[:, 2], label=\"true High\", color=current_palette[2])\nplt.plot(x_range_original, df.iloc[:, 3], label=\"true Low\", color=current_palette[4])\nplt.plot(x_range_original, df.iloc[:, 4], label=\"true Close\", color=current_palette[6])\nplt.plot(x_range_original, df.iloc[:, 5], label=\"true Adj Close\", color=current_palette[8])\nplt.xticks(x_range_original[::60], df.iloc[:, 0].tolist()[::60])\nplt.legend()\nplt.title(\"true market\")\nplt.subplot(1, 2, 2)\nplt.plot(x_range_future, anchor(df_log[:, 0], 0.5), label=\"predict Open\", color=current_palette[1])\nplt.plot(x_range_future, anchor(df_log[:, 1], 0.5), label=\"predict High\", color=current_palette[3])\nplt.plot(x_range_future, anchor(df_log[:, 2], 0.5), label=\"predict Low\", color=current_palette[5])\nplt.plot(x_range_future, anchor(df_log[:, 3], 0.5), label=\"predict Close\", color=current_palette[7])\nplt.plot(\n x_range_future, anchor(df_log[:, 4], 0.5), label=\"predict Adj Close\", color=current_palette[9]\n)\nplt.xticks(x_range_future[::60], date_ori[::60])\nplt.legend()\nplt.title(\"predict market\")\nplt.show()\n\n\n# In[14]:\n\n\nfig = plt.figure(figsize=(15, 10))\nax = plt.subplot(111)\nax.plot(x_range_original, df.iloc[:, -1], label=\"true Volume\")\nax.plot(x_range_future, anchor(df_log[:, -1], 0.5), label=\"predict Volume\")\nbox = ax.get_position()\nax.set_position([box.x0, box.y0 + box.height * 0.1, box.width, box.height * 0.9])\nax.legend(loc=\"upper center\", bbox_to_anchor=(0.5, -0.05), fancybox=True, shadow=True, ncol=5)\nplt.xticks(x_range_future[::30], date_ori[::30])\nplt.title(\"overlap market volume\")\nplt.show()\n\n\n# In[15]:\n\n\nfig = plt.figure(figsize=(20, 8))\nplt.subplot(1, 2, 1)\nplt.plot(x_range_original, df.iloc[:, -1], label=\"true Volume\")\nplt.xticks(x_range_original[::60], df.iloc[:, 0].tolist()[::60])\nplt.legend()\nplt.title(\"true market volume\")\nplt.subplot(1, 2, 2)\nplt.plot(x_range_future, anchor(df_log[:, -1], 0.5), label=\"predict Volume\")\nplt.xticks(x_range_future[::60], date_ori[::60])\nplt.legend()\nplt.title(\"predict market volume\")\nplt.show()\n\n\n# In[ ]:\n",
"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport os\nimport random\nimport time\n\nimport tensorflow as tf\nfrom sklearn.cross_validation import train_test_split\nfrom tqdm import tqdm\n\nfrom utils import *\n\n# In[2]:\n\n\ntrainset = sklearn.datasets.load_files(container_path=\"data\", encoding=\"UTF-8\")\ntrainset.data, trainset.target = separate_dataset(trainset, 1.0)\nprint(trainset.target_names)\nprint(len(trainset.data))\nprint(len(trainset.target))\n\n\n# In[3]:\n\n\nconcat = \" \".join(trainset.data).split()\nvocabulary_size = len(list(set(concat)))\ndata, count, dictionary, rev_dictionary = build_dataset(concat, vocabulary_size)\nprint(\"vocab from size: %d\" % (vocabulary_size))\nprint(\"Most common words\", count[4:10])\nprint(\"Sample data\", data[:10], [rev_dictionary[i] for i in data[:10]])\n\n\n# In[4]:\n\n\nGO = dictionary[\"GO\"]\nPAD = dictionary[\"PAD\"]\nEOS = dictionary[\"EOS\"]\nUNK = dictionary[\"UNK\"]\n\n\n# In[5]:\n\n\nsize_layer = 128\ndimension_output = len(trainset.target_names)\nmaxlen = 50\nbatch_size = 32\n\n\n# In[6]:\n\n\nclass Attention:\n def __init__(self, hidden_size):\n self.hidden_size = hidden_size\n self.dense_layer = tf.layers.Dense(hidden_size)\n self.v = tf.random_normal([hidden_size], mean=0, stddev=1 / np.sqrt(hidden_size))\n\n def score(self, hidden_tensor, encoder_outputs):\n energy = tf.nn.tanh(self.dense_layer(tf.concat([hidden_tensor, encoder_outputs], 2)))\n energy = tf.transpose(energy, [0, 2, 1])\n batch_size = tf.shape(encoder_outputs)[0]\n v = tf.expand_dims(tf.tile(tf.expand_dims(self.v, 0), [batch_size, 1]), 1)\n energy = tf.matmul(v, energy)\n return tf.squeeze(energy, 1)\n\n def __call__(self, hidden, encoder_outputs):\n seq_len = tf.shape(encoder_outputs)[1]\n batch_size = tf.shape(encoder_outputs)[0]\n H = tf.tile(tf.expand_dims(hidden, 1), [1, seq_len, 1])\n attn_energies = self.score(H, encoder_outputs)\n return tf.expand_dims(tf.nn.softmax(attn_energies), 1)\n\n\nclass Model:\n def __init__(\n self,\n dict_size,\n size_layers,\n learning_rate,\n num_classes,\n maxlen,\n num_blocks=3,\n block_size=128,\n ):\n self.X = tf.placeholder(tf.int32, [None, maxlen])\n self.Y = tf.placeholder(tf.int32, [None])\n embeddings = tf.Variable(tf.random_uniform([dict_size, size_layers], -1, 1))\n embedded = tf.nn.embedding_lookup(embeddings, self.X)\n self.attention = Attention(size_layers)\n\n def residual_block(x, size, rate, block):\n with tf.variable_scope(\"block_%d_%d\" % (block, rate), reuse=False):\n attn_weights = self.attention(tf.reduce_sum(x, axis=1), x)\n conv_filter = tf.layers.conv1d(\n attn_weights,\n x.shape[2] // 4,\n kernel_size=size,\n strides=1,\n padding=\"same\",\n dilation_rate=rate,\n activation=tf.nn.tanh,\n )\n conv_gate = tf.layers.conv1d(\n x,\n x.shape[2] // 4,\n kernel_size=size,\n strides=1,\n padding=\"same\",\n dilation_rate=rate,\n activation=tf.nn.sigmoid,\n )\n out = tf.multiply(conv_filter, conv_gate)\n out = tf.layers.conv1d(\n out, block_size, kernel_size=1, strides=1, padding=\"same\", activation=tf.nn.tanh\n )\n return tf.add(x, out), out\n\n forward = tf.layers.conv1d(embedded, block_size, kernel_size=1, strides=1, padding=\"SAME\")\n zeros = tf.zeros_like(forward)\n for i in range(num_blocks):\n for r in [1, 2, 4, 8, 16]:\n forward, s = residual_block(forward, size=7, rate=r, block=i)\n zeros = tf.add(zeros, s)\n self.logits = tf.reduce_sum(\n tf.layers.conv1d(forward, num_classes, kernel_size=1, strides=1, padding=\"SAME\"), 1\n )\n self.cost = tf.reduce_mean(\n tf.nn.sparse_softmax_cross_entropy_with_logits(logits=self.logits, labels=self.Y)\n )\n self.optimizer = tf.train.AdamOptimizer(learning_rate).minimize(self.cost)\n correct_pred = tf.equal(tf.argmax(self.logits, 1, output_type=tf.int32), self.Y)\n self.accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\n\n\n# In[7]:\n\n\ntf.reset_default_graph()\nsess = tf.InteractiveSession()\nmodel = Model(len(dictionary), size_layer, 1e-3, dimension_output, maxlen)\nsess.run(tf.global_variables_initializer())\n\n\n# In[8]:\n\n\nvectors = str_idx(trainset.data, dictionary, maxlen)\ntrain_X, test_X, train_Y, test_Y = train_test_split(vectors, trainset.target, test_size=0.2)\n\n\n# In[9]:\n\n\nEARLY_STOPPING, CURRENT_CHECKPOINT, CURRENT_ACC, EPOCH = 3, 0, 0, 0\n\nwhile True:\n lasttime = time.time()\n if CURRENT_CHECKPOINT == EARLY_STOPPING:\n print(\"break epoch:%d\\n\" % (EPOCH))\n break\n\n train_acc, train_loss, test_acc, test_loss = 0, 0, 0, 0\n pbar = tqdm(range(0, len(train_X), batch_size), desc=\"train minibatch loop\")\n for i in pbar:\n batch_x = train_X[i : min(i + batch_size, train_X.shape[0])]\n batch_y = train_Y[i : min(i + batch_size, train_X.shape[0])]\n batch_x_expand = np.expand_dims(batch_x, axis=1)\n acc, cost, _ = sess.run(\n [model.accuracy, model.cost, model.optimizer],\n feed_dict={model.Y: batch_y, model.X: batch_x},\n )\n assert not np.isnan(cost)\n train_loss += cost\n train_acc += acc\n pbar.set_postfix(cost=cost, accuracy=acc)\n\n pbar = tqdm(range(0, len(test_X), batch_size), desc=\"test minibatch loop\")\n for i in pbar:\n batch_x = test_X[i : min(i + batch_size, test_X.shape[0])]\n batch_y = test_Y[i : min(i + batch_size, test_X.shape[0])]\n batch_x_expand = np.expand_dims(batch_x, axis=1)\n acc, cost = sess.run(\n [model.accuracy, model.cost], feed_dict={model.Y: batch_y, model.X: batch_x}\n )\n test_loss += cost\n test_acc += acc\n pbar.set_postfix(cost=cost, accuracy=acc)\n\n train_loss /= len(train_X) / batch_size\n train_acc /= len(train_X) / batch_size\n test_loss /= len(test_X) / batch_size\n test_acc /= len(test_X) / batch_size\n\n if test_acc > CURRENT_ACC:\n print(\"epoch: %d, pass acc: %f, current acc: %f\" % (EPOCH, CURRENT_ACC, test_acc))\n CURRENT_ACC = test_acc\n CURRENT_CHECKPOINT = 0\n else:\n CURRENT_CHECKPOINT += 1\n\n print(\"time taken:\", time.time() - lasttime)\n print(\n \"epoch: %d, training loss: %f, training acc: %f, valid loss: %f, valid acc: %f\\n\"\n % (EPOCH, train_loss, train_acc, test_loss, test_acc)\n )\n EPOCH += 1\n\n\n# In[10]:\n\n\nreal_Y, predict_Y = [], []\n\npbar = tqdm(range(0, len(test_X), batch_size), desc=\"validation minibatch loop\")\nfor i in pbar:\n batch_x = test_X[i : min(i + batch_size, test_X.shape[0])]\n batch_y = test_Y[i : min(i + batch_size, test_X.shape[0])]\n predict_Y += np.argmax(\n sess.run(model.logits, feed_dict={model.X: batch_x, model.Y: batch_y}), 1\n ).tolist()\n real_Y += batch_y\n\n\n# In[11]:\n\n\nprint(metrics.classification_report(real_Y, predict_Y, target_names=trainset.target_names))\n\n\n# In[ ]:\n",
"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport os\n\nimport tensorflow as tf\nfrom scipy.io.wavfile import write\nfrom tqdm import tqdm\n\nfrom utils import *\n\n# In[2]:\n\n\ndef prenet(inputs, num_units=None, is_training=True, scope=\"prenet\"):\n if num_units is None:\n num_units = [embed_size, embed_size // 2]\n with tf.variable_scope(scope):\n outputs = tf.layers.dense(inputs, units=num_units[0], activation=tf.nn.relu, name=\"dense1\")\n outputs = tf.layers.dropout(\n outputs, rate=dropout_rate, training=is_training, name=\"dropout1\"\n )\n outputs = tf.layers.dense(outputs, units=num_units[1], activation=tf.nn.relu, name=\"dense2\")\n outputs = tf.layers.dropout(\n outputs, rate=dropout_rate, training=is_training, name=\"dropout2\"\n )\n return outputs\n\n\ndef highwaynet(inputs, num_units=None, scope=\"highwaynet\"):\n if not num_units:\n num_units = inputs.get_shape()[-1]\n with tf.variable_scope(scope):\n H = tf.layers.dense(inputs, units=num_units, activation=tf.nn.relu, name=\"dense1\")\n T = tf.layers.dense(\n inputs,\n units=num_units,\n activation=tf.nn.sigmoid,\n bias_initializer=tf.constant_initializer(-1.0),\n name=\"dense2\",\n )\n outputs = H * T + inputs * (1.0 - T)\n return outputs\n\n\ndef conv1d_banks(inputs, K=16, is_training=True, scope=\"conv1d_banks\"):\n with tf.variable_scope(scope):\n outputs = tf.layers.conv1d(inputs, embed_size // 2, 1, padding=\"SAME\")\n for k in range(2, K + 1):\n with tf.variable_scope(\"num_{}\".format(k)):\n output = tf.layers.conv1d(inputs, embed_size // 2, k, padding=\"SAME\")\n outputs = tf.concat((outputs, output), -1)\n outputs = tf.nn.relu(tf.layers.batch_normalization(outputs, training=is_training))\n return outputs\n\n\nclass Model:\n def __init__(self, num_layers, size_layers, learning_rate=1e-3, dropout=1.0):\n self.X = tf.placeholder(tf.int32, (None, None))\n self.training = tf.placeholder(tf.bool, None)\n lookup_table = tf.get_variable(\n \"lookup_table\",\n dtype=tf.float32,\n shape=[len(vocab), size_layers],\n initializer=tf.truncated_normal_initializer(mean=0.0, stddev=0.01),\n )\n lookup_table = tf.concat((tf.zeros(shape=[1, size_layers]), lookup_table[1:, :]), 0)\n forward = tf.nn.embedding_lookup(lookup_table, self.X)\n self.Y = tf.placeholder(tf.float32, (None, None, n_mels * resampled))\n self.decoder_inputs = tf.concat((tf.zeros_like(self.Y[:, :1, :]), self.Y[:, :-1, :]), 1)\n self.decoder_inputs = self.decoder_inputs[:, :, -n_mels:]\n self.Z = tf.placeholder(tf.float32, (None, None, fourier_window_size // 2 + 1))\n\n batch_size = tf.shape(self.X)[0]\n seq_lens = tf.count_nonzero(tf.reduce_sum(self.decoder_inputs, -1), 1, dtype=tf.int32) + 1\n\n def cells(reuse=False):\n return tf.contrib.rnn.DropoutWrapper(\n tf.nn.rnn_cell.LSTMCell(\n size_layers, initializer=tf.orthogonal_initializer(), reuse=reuse\n ),\n state_keep_prob=dropout,\n output_keep_prob=dropout,\n )\n\n def attention(encoder_out, seq_len, reuse=False):\n attention_mechanism = tf.contrib.seq2seq.LuongAttention(\n num_units=size_layers, memory=encoder_out, memory_sequence_length=seq_len\n )\n return tf.contrib.seq2seq.AttentionWrapper(\n cell=tf.nn.rnn_cell.MultiRNNCell([cells(reuse) for _ in range(num_layers)]),\n attention_mechanism=attention_mechanism,\n attention_layer_size=size_layers,\n alignment_history=True,\n )\n\n encoder_cells = tf.nn.rnn_cell.MultiRNNCell([cells() for _ in range(num_layers)])\n encoder_out, encoder_state = tf.nn.dynamic_rnn(\n cell=encoder_cells, inputs=forward, sequence_length=seq_lens, dtype=tf.float32\n )\n\n encoder_state = tuple(encoder_state[-1] for _ in range(num_layers))\n decoder_cell = attention(encoder_out, seq_lens)\n dense_layer = tf.layers.Dense(n_mels * resampled)\n\n training_helper = tf.contrib.seq2seq.TrainingHelper(\n inputs=self.decoder_inputs, sequence_length=seq_lens, time_major=False\n )\n training_decoder = tf.contrib.seq2seq.BasicDecoder(\n cell=decoder_cell,\n helper=training_helper,\n initial_state=decoder_cell.zero_state(batch_size, tf.float32).clone(\n cell_state=encoder_state\n ),\n output_layer=dense_layer,\n )\n training_decoder_output, _, _ = tf.contrib.seq2seq.dynamic_decode(\n decoder=training_decoder,\n impute_finished=True,\n maximum_iterations=tf.reduce_max(seq_lens),\n )\n\n self.Y_hat = training_decoder_output.rnn_output\n out_decoder2 = tf.reshape(self.Y_hat, [tf.shape(self.Y_hat)[0], -1, n_mels])\n dec = conv1d_banks(out_decoder2, K=decoder_num_banks, is_training=self.training)\n dec = tf.layers.max_pooling1d(dec, pool_size=2, strides=1, padding=\"same\")\n dec = tf.layers.conv1d(dec, embed_size // 2, 3, name=\"decoder-conv1-1\", padding=\"SAME\")\n dec = tf.nn.relu(tf.layers.batch_normalization(dec, training=self.training))\n dec = tf.layers.conv1d(dec, embed_size // 2, 3, name=\"decoder-conv1-2\", padding=\"SAME\")\n dec = tf.layers.batch_normalization(dec, training=self.training)\n dec = tf.layers.dense(dec, embed_size // 2)\n for i in range(4):\n dec = highwaynet(\n dec, num_units=embed_size // 2, scope=\"decoder-highwaynet-{}\".format(i)\n )\n with tf.variable_scope(\"decoder-gru\", reuse=False):\n cell = tf.contrib.rnn.GRUCell(embed_size // 2)\n cell_bw = tf.contrib.rnn.GRUCell(embed_size // 2)\n outputs, _ = tf.nn.bidirectional_dynamic_rnn(cell, cell_bw, dec, dtype=tf.float32)\n outputs = tf.concat(outputs, 2)\n self.Z_hat = tf.layers.dense(outputs, 1 + fourier_window_size // 2)\n self.loss1 = tf.reduce_mean(tf.abs(self.Y_hat - self.Y))\n self.loss2 = tf.reduce_mean(tf.abs(self.Z_hat - self.Z))\n self.loss = self.loss1 + self.loss2\n self.optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(self.loss)\n\n\n# In[3]:\n\n\ntf.reset_default_graph()\nsess = tf.InteractiveSession()\n\nsize_layers = 128\nlearning_rate = 1e-3\nnum_layers = 2\n\nmodel = Model(num_layers, size_layers, learning_rate)\nsess.run(tf.global_variables_initializer())\n\n\n# In[4]:\n\n\npaths, lengths, texts, raw_texts = [], [], [], []\ntext_files = [f for f in os.listdir(\"mel\") if f.endswith(\".npy\")]\nfor fpath in text_files:\n with open(\"%s/%s\" % (path, fpath.replace(\"npy\", \"txt\"))) as fopen:\n text = fopen.read()\n paths.append(fpath.replace(\".npy\", \"\"))\n text = text_normalize(text)\n raw_texts.append(text)\n text = text + \"E\"\n texts.append(np.array([char2idx[char] for char in text], np.int32))\n lengths.append(len(text))\n\n\n# In[5]:\n\n\ndef dynamic_batching(paths):\n files, max_y, max_z = [], 0, 0\n for n in range(len(paths)):\n files.append(get_cached(paths[n]))\n if files[-1][0].shape[0] > max_y:\n max_y = files[-1][0].shape[0]\n if files[-1][1].shape[0] > max_z:\n max_z = files[-1][1].shape[0]\n return files, max_y, max_z\n\n\n# In[6]:\n\n\nEPOCH = 30\nfor i in range(EPOCH):\n pbar = tqdm(range(0, len(paths), batch_size), desc=\"minibatch loop\")\n for k in pbar:\n index = min(k + batch_size, len(paths))\n files, max_y, max_z = dynamic_batching(paths[k:index])\n max_x = max(lengths[k:index])\n batch_x = np.zeros((batch_size, max_x))\n batch_y = np.zeros((batch_size, max_y, n_mels * resampled))\n batch_z = np.zeros((batch_size, max_z, fourier_window_size // 2 + 1))\n for n in range(len(files)):\n batch_x[n, :] = np.pad(\n texts[k + n], ((0, max_x - texts[k + n].shape[0])), mode=\"constant\"\n )\n batch_y[n, :, :] = np.pad(\n files[n][0], ((0, max_y - files[n][0].shape[0]), (0, 0)), mode=\"constant\"\n )\n batch_z[n, :, :] = np.pad(\n files[n][1], ((0, max_z - files[n][1].shape[0]), (0, 0)), mode=\"constant\"\n )\n _, cost = sess.run(\n [model.optimizer, model.loss],\n feed_dict={model.X: batch_x, model.Y: batch_y, model.Z: batch_z, model.training: True},\n )\n pbar.set_postfix(cost=cost)\n\n\n# In[7]:\n\n\ny_hat = np.zeros((1, 50, n_mels * resampled), np.float32)\nfor j in tqdm(range(50)):\n _y_hat = sess.run(model.Y_hat, {model.X: [texts[0]], model.Y: y_hat})\n y_hat[:, j, :] = _y_hat[:, j, :]\n\n\n# In[8]:\n\n\nmags = sess.run(model.Z_hat, {model.Y_hat: y_hat, model.training: False})\n\n\n# In[9]:\n\n\naudio = spectrogram2wav(mags[0])\n\n\n# In[10]:\n\n\nprint(\"saving: %s\" % (raw_texts[0]))\nwrite(os.path.join(\"test.wav\"), sample_rate, audio)\n\n\n# In[ ]:\n",
"from torchsummary import summary\nimport torch\nimport torch.nn as nn\nimport torch.nn.init as init\nfrom torch.autograd import Variable\n\ndef reparametrize(mu, logvar):\n std = logvar.div(2).exp()\n eps = Variable(std.data.new(std.size()).normal_())\n return mu + std*eps\n\nclass View(nn.Module):\n def __init__(self, size):\n super(View, self).__init__()\n self.size = size\n\n def forward(self, tensor):\n return tensor.view(self.size)\n\n\nclass BetaVAE_new(nn.Module):\n \"\"\"BetaVAE_new's last conv is to 4 [-1, 4, 1, 1].\"\"\"\n\n def __init__(self, z_dim=10, nc=3):\n super(BetaVAE_new, self).__init__()\n self.z_dim = z_dim\n self.nc = nc\n self.encoder = nn.Sequential(\n nn.Conv2d(nc, 32, 4, 2, 1), # B, 32, 32, 32\n nn.ReLU(True),\n nn.Conv2d(32, 32, 4, 2, 1), # B, 32, 16, 16\n nn.ReLU(True),\n nn.Conv2d(32, 64, 4, 2, 1), # B, 64, 8, 8\n nn.ReLU(True),\n nn.Conv2d(64, 64, 4, 2, 1), # B, 64, 4, 4\n nn.ReLU(True),\n nn.Conv2d(64, 4, 4, 1), # B, 4, 1, 1\n nn.ReLU(True),\n View((-1, 4*1*1)), # B, 4\n nn.Linear(4, z_dim*2), # B, z_dim*2\n )\n self.decoder = nn.Sequential(\n nn.Linear(z_dim, 4), # B, 4\n View((-1, 4, 1, 1)), # B, 4, 1, 1\n nn.ReLU(True),\n nn.ConvTranspose2d(4, 64, 4), # B, 64, 4, 4\n nn.ReLU(True),\n nn.ConvTranspose2d(64, 64, 4, 2, 1), # B, 64, 8, 8\n nn.ReLU(True),\n nn.ConvTranspose2d(64, 32, 4, 2, 1), # B, 32, 16, 16\n nn.ReLU(True),\n nn.ConvTranspose2d(32, 32, 4, 2, 1), # B, 32, 32, 32\n nn.ReLU(True),\n nn.ConvTranspose2d(32, nc, 4, 2, 1), # B, nc, 64, 64\n )\n\n self.weight_init()\n\n def weight_init(self):\n for block in self._modules:\n for m in self._modules[block]:\n kaiming_init(m)\n\n def forward(self, x):\n distributions = self._encode(x)\n mu = distributions[:, :self.z_dim]\n logvar = distributions[:, self.z_dim:]\n z = reparametrize(mu, logvar)\n x_recon = self._decode(z)\n\n return x_recon, mu, logvar\n\n def _encode(self, x):\n return self.encoder(x)\n\n def _decode(self, z):\n return self.decoder(z)\n\nclass BetaVAE_H(nn.Module):\n \"\"\"Model proposed in original beta-VAE paper(Higgins et al, ICLR, 2017).\"\"\"\n\n def __init__(self, z_dim=10, nc=3):\n super(BetaVAE_H, self).__init__()\n self.z_dim = z_dim\n self.nc = nc\n self.encoder = nn.Sequential(\n nn.Conv2d(nc, 32, 4, 2, 1), # B, 32, 32, 32\n nn.ReLU(True),\n nn.Conv2d(32, 32, 4, 2, 1), # B, 32, 16, 16\n nn.ReLU(True),\n nn.Conv2d(32, 64, 4, 2, 1), # B, 64, 8, 8\n nn.ReLU(True),\n nn.Conv2d(64, 64, 4, 2, 1), # B, 64, 4, 4\n nn.ReLU(True),\n nn.Conv2d(64, 256, 4, 1), # B, 256, 1, 1\n nn.ReLU(True),\n View((-1, 256*1*1)), # B, 256\n nn.Linear(256, z_dim*2), # B, z_dim*2\n )\n self.decoder = nn.Sequential(\n nn.Linear(z_dim, 256), # B, 256\n View((-1, 256, 1, 1)), # B, 256, 1, 1\n nn.ReLU(True),\n nn.ConvTranspose2d(256, 64, 4), # B, 64, 4, 4\n nn.ReLU(True),\n nn.ConvTranspose2d(64, 64, 4, 2, 1), # B, 64, 8, 8\n nn.ReLU(True),\n nn.ConvTranspose2d(64, 32, 4, 2, 1), # B, 32, 16, 16\n nn.ReLU(True),\n nn.ConvTranspose2d(32, 32, 4, 2, 1), # B, 32, 32, 32\n nn.ReLU(True),\n nn.ConvTranspose2d(32, nc, 4, 2, 1), # B, nc, 64, 64\n )\n\n self.weight_init()\n\n def weight_init(self):\n for block in self._modules:\n for m in self._modules[block]:\n kaiming_init(m)\n\n def forward(self, x):\n distributions = self._encode(x)\n mu = distributions[:, :self.z_dim]\n logvar = distributions[:, self.z_dim:]\n z = reparametrize(mu, logvar)\n x_recon = self._decode(z)\n\n return x_recon, mu, logvar\n\n def _encode(self, x):\n return self.encoder(x)\n\n def _decode(self, z):\n return self.decoder(z)\ndef kaiming_init(m):\n if isinstance(m, (nn.Linear, nn.Conv2d)):\n init.kaiming_normal(m.weight)\n if m.bias is not None:\n m.bias.data.fill_(0)\n elif isinstance(m, (nn.BatchNorm1d, nn.BatchNorm2d)):\n m.weight.data.fill_(1)\n if m.bias is not None:\n m.bias.data.fill_(0)\n\n\ndef normal_init(m, mean, std):\n if isinstance(m, (nn.Linear, nn.Conv2d)):\n m.weight.data.normal_(mean, std)\n if m.bias.data is not None:\n m.bias.data.zero_()\n elif isinstance(m, (nn.BatchNorm2d, nn.BatchNorm1d)):\n m.weight.data.fill_(1)\n if m.bias.data is not None:\n m.bias.data.zero_()\n\n\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\") \nmodel = BetaVAE_new().to(device)\n\nsummary(model, (3, 64, 64))\n",
"# Copyright 2017 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"DNC util ops and modules.\"\"\"\n\nfrom __future__ import absolute_import, division, print_function\n\nimport numpy as np\nimport tensorflow as tf\nimport os, sys, inspect\n\n\ndef os_module_path():\n current_dir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))\n parent_dir = os.path.dirname(current_dir)\n # sys.path.insert(0, parent_dir)\n return parent_dir\n\n\ndef os_file_path(data_path):\n from pathlib import Path\n data_path = os.path.join(Path(__file__).parent.parent.absolute(), data_path)\n print(data_path)\n return data_path\n\n\ndef os_package_root_path(filepath, sublevel=0, path_add=\"\"):\n \"\"\"\n :param filepath:\n :param sublevel: level 0 : current path, level 1 : 1 level above\n :param path_add:\n :return:\n \"\"\"\n from pathlib import Path\n path = Path(filepath).parent\n for i in range(1, sublevel + 1):\n path = path.parent\n \n path = os.path.join(path.absolute(), path_add)\n return path\n\n\n# print(\"check\", os_package_root_path(__file__, sublevel=1) )\n\n\ndef batch_invert_permutation(permutations):\n \"\"\"Returns batched `tf.invert_permutation` for every row in `permutations`.\"\"\"\n with tf.name_scope(\"batch_invert_permutation\", values=[permutations]):\n unpacked = tf.unstack(permutations)\n inverses = [tf.invert_permutation(permutation) for permutation in unpacked]\n return tf.stack(inverses)\n\n\ndef batch_gather(values, indices):\n \"\"\"Returns batched `tf.gather` for every row in the input.\"\"\"\n with tf.name_scope(\"batch_gather\", values=[values, indices]):\n unpacked = zip(tf.unstack(values), tf.unstack(indices))\n result = [tf.gather(value, index) for value, index in unpacked]\n return tf.stack(result)\n\n\ndef one_hot(length, index):\n \"\"\"Return an nd array of given `length` filled with 0s and a 1 at `index`.\"\"\"\n result = np.zeros(length)\n result[index] = 1\n return result\n\n\ndef set_root_dir():\n current_dir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))\n parent_dir = os.path.dirname(current_dir)\n sys.path.insert(0, parent_dir)\n return parent_dir\n\n\n"
] | [
[
"numpy.pad",
"tensorflow.InteractiveSession",
"tensorflow.global_variables_initializer",
"tensorflow.reset_default_graph",
"numpy.load"
],
[
"tensorflow.nn.dynamic_rnn",
"sklearn.cross_validation.train_test_split",
"tensorflow.nn.softmax_cross_entropy_with_logits",
"tensorflow.cast",
"tensorflow.orthogonal_initializer",
"tensorflow.train.AdamOptimizer",
"tensorflow.reset_default_graph",
"numpy.argmax",
"tensorflow.name_scope",
"tensorflow.tensordot",
"tensorflow.argmax",
"tensorflow.nn.rnn_cell.GRUCell",
"tensorflow.matmul",
"tensorflow.InteractiveSession",
"tensorflow.zeros_initializer",
"tensorflow.placeholder",
"tensorflow.global_variables_initializer",
"tensorflow.nn.embedding_lookup",
"tensorflow.nn.softmax",
"tensorflow.expand_dims",
"tensorflow.random_uniform",
"tensorflow.random_normal"
],
[
"tensorflow.count_nonzero",
"tensorflow.cast",
"tensorflow.equal",
"tensorflow.train.AdamOptimizer",
"tensorflow.strided_slice",
"tensorflow.boolean_mask",
"tensorflow.reset_default_graph",
"tensorflow.argmax",
"tensorflow.contrib.seq2seq.BasicDecoder",
"tensorflow.fill",
"tensorflow.InteractiveSession",
"tensorflow.shape",
"tensorflow.placeholder",
"tensorflow.layers.Dense",
"tensorflow.global_variables_initializer",
"tensorflow.nn.embedding_lookup",
"tensorflow.reduce_max",
"tensorflow.constant",
"tensorflow.contrib.seq2seq.sequence_loss",
"tensorflow.random_uniform",
"tensorflow.nn.rnn_cell.BasicRNNCell"
],
[
"tensorflow.get_variable",
"tensorflow.metrics.accuracy",
"tensorflow.train.AdamOptimizer",
"tensorflow.random_uniform_initializer",
"tensorflow.gradients",
"tensorflow.train.get_global_step",
"tensorflow.trainable_variables",
"tensorflow.estimator.Estimator",
"tensorflow.shape",
"tensorflow.layers.Dense",
"numpy.array",
"tensorflow.nn.embedding_lookup",
"tensorflow.reduce_max",
"tensorflow.constant",
"tensorflow.contrib.seq2seq.sequence_loss",
"tensorflow.nn.rnn_cell.LSTMCell",
"tensorflow.clip_by_global_norm",
"tensorflow.estimator.EstimatorSpec",
"tensorflow.variable_scope",
"tensorflow.estimator.inputs.numpy_input_fn"
],
[
"matplotlib.pyplot.legend",
"tensorflow.nn.dynamic_rnn",
"pandas.to_datetime",
"tensorflow.sign",
"tensorflow.concat",
"numpy.expand_dims",
"pandas.Series",
"tensorflow.equal",
"pandas.DataFrame",
"matplotlib.pyplot.plot",
"tensorflow.train.AdamOptimizer",
"sklearn.preprocessing.MinMaxScaler",
"pandas.read_csv",
"numpy.arange",
"tensorflow.layers.dense",
"tensorflow.reset_default_graph",
"matplotlib.pyplot.subplot",
"tensorflow.to_float",
"tensorflow.square",
"numpy.zeros",
"matplotlib.pyplot.figure",
"tensorflow.matmul",
"tensorflow.InteractiveSession",
"matplotlib.pyplot.title",
"tensorflow.shape",
"tensorflow.placeholder",
"tensorflow.nn.tanh",
"tensorflow.global_variables_initializer",
"matplotlib.pyplot.show",
"tensorflow.transpose",
"tensorflow.contrib.rnn.DropoutWrapper",
"tensorflow.nn.rnn_cell.LSTMCell",
"tensorflow.reshape",
"tensorflow.expand_dims",
"matplotlib.pyplot.xticks"
],
[
"tensorflow.layers.conv1d",
"sklearn.cross_validation.train_test_split",
"tensorflow.concat",
"tensorflow.reduce_sum",
"tensorflow.cast",
"tensorflow.train.AdamOptimizer",
"tensorflow.squeeze",
"tensorflow.reset_default_graph",
"tensorflow.add",
"tensorflow.argmax",
"tensorflow.matmul",
"tensorflow.InteractiveSession",
"tensorflow.shape",
"tensorflow.layers.Dense",
"tensorflow.placeholder",
"tensorflow.global_variables_initializer",
"tensorflow.zeros_like",
"tensorflow.nn.sparse_softmax_cross_entropy_with_logits",
"tensorflow.nn.embedding_lookup",
"tensorflow.nn.softmax",
"tensorflow.transpose",
"tensorflow.multiply",
"tensorflow.expand_dims",
"tensorflow.variable_scope",
"tensorflow.random_uniform"
],
[
"tensorflow.layers.conv1d",
"tensorflow.nn.dynamic_rnn",
"tensorflow.concat",
"tensorflow.contrib.rnn.GRUCell",
"tensorflow.zeros",
"tensorflow.layers.dropout",
"tensorflow.reduce_sum",
"tensorflow.nn.bidirectional_dynamic_rnn",
"tensorflow.orthogonal_initializer",
"tensorflow.train.AdamOptimizer",
"tensorflow.layers.max_pooling1d",
"tensorflow.layers.batch_normalization",
"tensorflow.contrib.seq2seq.LuongAttention",
"tensorflow.layers.dense",
"tensorflow.truncated_normal_initializer",
"tensorflow.reset_default_graph",
"tensorflow.InteractiveSession",
"tensorflow.shape",
"tensorflow.placeholder",
"tensorflow.layers.Dense",
"tensorflow.global_variables_initializer",
"tensorflow.zeros_like",
"tensorflow.nn.embedding_lookup",
"tensorflow.contrib.seq2seq.TrainingHelper",
"tensorflow.reduce_max",
"tensorflow.constant_initializer",
"tensorflow.variable_scope",
"tensorflow.abs"
],
[
"torch.nn.init.kaiming_normal",
"torch.nn.ConvTranspose2d",
"torch.nn.Conv2d",
"torch.nn.Linear",
"torch.cuda.is_available",
"torch.nn.ReLU"
],
[
"tensorflow.invert_permutation",
"tensorflow.unstack",
"tensorflow.stack",
"tensorflow.gather",
"tensorflow.name_scope",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
}
] |
uhh-lt/semeval2019-hhmm | [
"b746b0fb8ab3b957d399276cb354e950f0ef30ed",
"b746b0fb8ab3b957d399276cb354e950f0ef30ed"
] | [
"utils.py",
"ud2csv.py"
] | [
"import pandas as pd\nfrom pathlib import Path\n\n\ndef df_to_csv(df, path):\n df.to_csv(path, sep='\\t', index=False, encoding='utf-8')\n\n\ndef csv_to_df(path):\n df = pd.read_csv(path, sep='\\t', dtype=str, encoding='utf-8')\n return df\n\n\ndef max_arguments(task):\n fp = open(task, 'r')\n lines_args = fp.readlines()\n maxT = 0\n for line in lines_args:\n tokens = len(line.split(' '))\n if tokens > maxT:\n maxT = tokens\n return maxT - 3 # context_id, verb pos, verb-frame\n\n\ndef max_frameArguments(dataset):\n dir = \"./semeval_data\"\n task21_auto = dir + \"/dev/auto/task-2.1.auto.txt\"\n task21_dev = dir + \"/dev/task-2.1.txt\"\n task21_test =dir+\"/test/task-2.1.txt\"\n\n\n\n if dataset == 'dev':\n task21 = task21_dev\n elif dataset == 'auto':\n task21 = task21_auto\n elif dataset == 'test':\n task21 = task21_test\n\n\n return max_arguments(task21)\n# ------------------------------------------------------------- df input from txt\nimport ud2csv\n\ndir = \"./semeval_data\"\nud_gold = dir+\"/dep-stx/pos-gold-dep-auto.conll.txt\"\n# -----------------------------------\ndef task_to_df(task, dataset):\n\n if Path('./input/train_task{}_{}.csv'.format(task, dataset)).exists():\n return csv_to_df('./input/train_task{}_{}.csv'.format(task, dataset))\n else:\n if task==1:\n return ud2csv.task1_to_df(dir+'/{}/task-1.txt'.format(dataset), ud_gold)\n if task ==22:\n return ud2csv.task22_to_df(dir + '/{}/task-2.2.txt'.format(dataset), ud_gold)\n\n\ndef task1_to_df_gd(dataset):\n if Path('./input/train_task{}_{}.csv'.format(1, dataset)).exists():\n return csv_to_df('./input/gd_task{}_{}.csv'.format(1, dataset))\n\n else:\n return ud2csv.task1_to_df_gd(dir+'/{}/task-1.txt'.format(dataset), ud_gold)\n\n\ndef task22_baselines(dataset, gr='in'):\n\n if Path('./input/all_grammaticalLabels_{}.csv'.format(dataset)).exists():\n df_task22 = csv_to_df('./input/all_grammaticalLabels_{}.csv'.format(dataset))\n else:\n df_task22 = ud2csv.task22_to_df_withFrameArgsDependencies(dir+'/{}/task-2.2.txt'.format(dataset), ud_gold)\n return ud2csv.getGrammaticalBaseline(df_task22, gr)\n",
"# http://universaldependencies.org/format.html\n\nimport pyconll\nimport pandas as pd\nfrom utils import df_to_csv, csv_to_df, max_arguments\n\n\ndef ud_sentence_to_dict(udfile): # just put space between each word token of tree, without any other processing\n\n data = pyconll.load_from_file(udfile)\n context_dict = {}\n\n for sentence in data:\n s_id = sentence.source.splitlines()[0] # sentence id\n _sentence = \"\"\n # print(_id)\n for token in sentence:\n\n wlist = token.conll().split()\n w = wlist[1] # word\n # --------------- Replace \n if w == '-LRB-':\n w = '('\n if w == '-RRB-':\n w = ')'\n # ----------------\n\n if not _sentence:\n _sentence = w\n continue\n\n else:\n _sentence = _sentence + ' ' + w\n\n # print(_sentence)\n context_dict[s_id] = _sentence\n return context_dict\n\n\ndef ud_to_dict(udfile):\n data = pyconll.load_from_file(udfile)\n\n tree_dict = {}\n for sentence in data:\n sent_lines = sentence.source.splitlines()\n s_id = sent_lines[0]\n tree_dict[s_id] = sent_lines[1:]\n\n return tree_dict\n\n\ndef dict_to_df(context_dict):\n # open the txt files\n labels = ['context_id', 'context']\n df = pd.DataFrame(columns=labels)\n context = []\n context_id = []\n for key, value in context_dict.items():\n context_id.append(key)\n context.append(value)\n df['context_id'] = context_id\n df['context'] = context\n return df\n\n\ndef task1_to_df(annotated_file, udfile):\n context_dict = ud_sentence_to_dict(udfile)\n # open the txt files\n fp = open(annotated_file, 'r')\n lines = fp.readlines()\n\n labels = ['context_id', 'word', 'word_index', 'gold_sense_id', 'predict_sense_id', 'context']\n df = pd.DataFrame(columns=labels)\n\n for i in range(len(lines)):\n frame_tokens = lines[i].split()\n sentence = context_dict.get(frame_tokens[0]) # frame_tokens[0] is conext_id\n word, sense = frame_tokens[2].split('.')\n df.loc[i] = [frame_tokens[0], word, frame_tokens[1], sense, '', sentence]\n\n return df\n\n\ndef task22_to_df(task22_file, udfile):\n context_dict = ud_sentence_to_dict(udfile)\n fp = open(task22_file, 'r')\n lines_args = fp.readlines()\n labels = ['context_id', 'word', 'gold_sense_id', 'predict_sense_id', 'context',\n 'verb', 'verb_index', 'role_index']\n\n df = pd.DataFrame(columns=labels)\n\n for i in range(len(lines_args)):\n\n frame_tokens = lines_args[i].split(' ')\n s_id = frame_tokens[0]\n sentence = context_dict[s_id]\n # example--> #20064023 2 begin.na Ford-:-1-:-Agent install-:-3-:-Theme\n verb_index, verb = frame_tokens[1], frame_tokens[2].split('.')[0]\n roles = frame_tokens[3:] # 0 is s_id, 1 is verb_position and 2 is verb.Frame\n s = 0\n for role in roles: # if a line does not have any role, it will be skipped\n\n role_lemma, role_index, gold_label = role.replace('\\n', '').split('-:-')\n row = ['{}_{}'.format(s_id, s), role_lemma, gold_label, '', sentence, verb, verb_index, role_index]\n df.loc[len(df)] = row\n s = s + 1\n return df\n\n\ndef task1_to_df_svo(task1_file, udfile):\n # open the txt files\n tree_dict = ud_to_dict(udfile)\n context_dict = ud_sentence_to_dict(udfile)\n\n fp = open(task1_file, 'r')\n lines = fp.readlines()\n\n labels = ['context_id', 'word', 'word_index', 'gold_sense_id', 'predict_sense_id', 'context',\n 'verb_lemma', 'subject_lemma', 'object_lemma',\n 'verb_index', 'subject_index', 'object_index']\n\n df = pd.DataFrame(columns=labels)\n for i in range(len(lines)):\n\n frame_tokens = lines[i].split(' ')\n s_id = frame_tokens[0]\n sent_lines = tree_dict[s_id]\n\n sentence = context_dict[s_id]\n word_tokens = sentence.split(' ')\n\n vb_index = frame_tokens[1].split('_')[0]\n subj_index = 0\n obj_index = 0\n vb = ''\n subj = ''\n obj = ''\n for token in sent_lines:\n nodes = token.split()\n if nodes[0] == vb_index:\n vb = nodes[2]\n\n if (nodes[6] == vb_index): # dependency at verb\n\n # subj\n if (nodes[7] == 'nsubj' or nodes[7] == 'nmod'):\n subj_index = nodes[0]\n subj = nodes[2]\n\n # obj\n if (nodes[7] == 'nsubjpass' or nodes[7] == 'dobj'):\n obj_index = nodes[0]\n obj = nodes[2]\n\n index = [int(vb_index), int(subj_index), int(obj_index)]\n lemmas = [vb, subj, obj]\n\n word, sense = frame_tokens[2].replace('\\n', '').split('.')\n\n df.loc[i] = [s_id, word, frame_tokens[1], sense, '', sentence, lemmas[0], lemmas[1], lemmas[2], index[0],\n index[1], index[2]]\n\n return df\n\n\ndef task1_to_df_gd(task1_file, udfile):\n # open the txt files\n tree_dict = ud_to_dict(udfile)\n context_dict = ud_sentence_to_dict(udfile)\n\n fp = open(task1_file, 'r')\n lines = fp.readlines()\n\n dependent_entries = []\n\n labels = ['context_id', 'word', 'word_index', 'gold_sense_id', 'predict_sense_id', 'context',\n 'verb_lemma', 'subject_lemma', 'object_lemma', 'iobj_lemma', 'csubj_lemma', 'ccomp_lemma',\n 'verb_index', 'subject_index', 'object_index', 'iobj_index', 'csubj_index', 'ccomp_index']\n df = pd.DataFrame(columns=labels)\n for i in range(len(lines)):\n\n frame_tokens = lines[i].split(' ')\n s_id = frame_tokens[0]\n sent_lines = tree_dict[s_id]\n\n sentence = context_dict[s_id]\n word_tokens = sentence.split(' ')\n\n vb_index = frame_tokens[1].split('_')[0]\n # print(vb_index)\n\n subj_index = 0\n obj_index = 0\n iobj_index = 0\n csubj_index = 0\n ccomp_index = 0\n\n vb = ''\n subj = ''\n obj = ''\n iobj = ''\n csubj = ''\n ccomp = ''\n\n for token in sent_lines:\n nodes = token.split()\n if nodes[0] == vb_index:\n vb = nodes[2]\n\n if (nodes[6] == vb_index):\n\n # subj\n if (nodes[7] == 'nsubj' or nodes[7] == 'nmod'):\n subj_index = nodes[0]\n subj = nodes[2]\n\n # obj\n if (nodes[7] == 'nsubjpass' or nodes[7] == 'dobj'):\n obj_index = nodes[0]\n obj = nodes[2]\n\n if nodes[7] == 'iobj':\n iobj_index = nodes[0]\n iobj = nodes[2]\n dependent_entries.append(nodes[7] + '-' + nodes[2])\n\n if nodes[7] == 'csubj':\n csubj_index = nodes[0]\n csubj = nodes[2]\n dependent_entries.append(nodes[7] + '-' + nodes[2])\n\n if nodes[7] == 'ccomp':\n ccomp_index = nodes[0]\n ccomp = nodes[2]\n dependent_entries.append(nodes[7] + '-' + nodes[2])\n\n vb_index = frame_tokens[1] # in test data verb index is multi_integers\n\n index = [vb_index, int(subj_index), int(obj_index), int(iobj_index), int(csubj_index), int(ccomp_index)]\n lemma = [vb, subj, obj, iobj, csubj, ccomp]\n\n word, sense = frame_tokens[2].replace('\\n', '').split('.')\n df.loc[i] = [s_id, word, frame_tokens[1], sense, '', sentence, lemma[0], lemma[1], lemma[2], lemma[3], lemma[4],\n lemma[5], index[0], index[1], index[2], index[3], index[4], index[5]]\n\n return df # , set(dependent_entries)\n\n\ndef task1_to_df_withFrameArgs(task21_file, udfile):\n context_dict = ud_sentence_to_dict(udfile)\n\n fp = open(task21_file, 'r')\n lines_args = fp.readlines()\n\n labels = ['context_id', 'word', 'word_index', 'gold_sense_id', 'predict_sense_id', 'context',\n 'verb_lemma', 'verb_index'\n ]\n\n mr = max_arguments(task21_file)\n for n in range(1, mr + 1):\n labels.append('{}{}_{}'.format('arg', n, 'lemma'))\n labels.append('{}{}_{}'.format('arg', n, 'index'))\n\n print(labels)\n df = pd.DataFrame(columns=labels)\n\n for i in range(len(lines_args)):\n\n frame_tokens = lines_args[i].split(' ')\n s_id = frame_tokens[0]\n\n sentence = context_dict[s_id]\n word_tokens = sentence.split(' ')\n\n # example--> Ford-:-1-:-Agent install-:-3-:-Activity\n word, sense = frame_tokens[2].replace('\\n', '').split('.')\n row = [s_id, word, frame_tokens[1], sense, '', sentence, word, frame_tokens[1]]\n\n roles = frame_tokens[3:] # 0 is s_id, 1 is verb_position and 2 is verb.Frame\n\n n = 1\n for role in roles:\n role_lemma, role_index = role.split('-:-')[0:2]\n row.append(role_lemma)\n row.append(role_index)\n\n n = n + 1\n for j in range(n, mr + 1):\n row.append('')\n row.append('0')\n\n df.loc[i] = row\n\n return df\n\n#-------------------------------------------------------------------------\ndef ud_sentences_to_file(udfile, target_file):\n di = ud_sentence_to_dict(udfile)\n df = dict_to_df(di)\n with open(target_file, 'w') as fp:\n for sent in df['context']:\n fp.write(sent + '\\n')\n\n\ndef task1_to_csv(task1_file, udfile, target_file):\n df = task1_to_df(task1_file, udfile)\n df_to_csv(df, target_file)\n return df\n\n\ndef task22_to_csv(task22_file, udfile, target_file):\n df = task22_to_df(task22_file, udfile)\n df_to_csv(df, target_file)\n return df\n\n\ndef task1_to_csv_svo(task1_file, udfile, target_file):\n df = task1_to_df_svo(task1_file, udfile)\n df_to_csv(df, target_file)\n return df\n\n\ndef task1_to_csv_gd(task1_file, udfile, target_file):\n df = task1_to_df_gd(task1_file, udfile)\n df_to_csv(df, target_file)\n return df\n\n\ndef task1_sentences_to_file(task1_file, udfile, target_file):\n df = task1_to_df(task1_file, udfile)\n with open(target_file, 'w') as fp:\n for sent in df['context']:\n fp.write(sent + '\\n')\n\n\n#-------------------------------------------------------------------------\ndef back_to_verb(source, prevsource, sent_lines, verb_index):\n if source == '0': # role is ROOT\n return sent_lines[int(prevsource)].split()[7]\n\n nextsource = sent_lines[int(source) - 1].split()[6]\n\n if nextsource == verb_index or nextsource == '0':\n # print(nextsource, verb_index)\n if nextsource != verb_index: # NOT RELATED TO TARGET VERB\n return sent_lines[int(prevsource) - 1].split()[7]\n else:\n return sent_lines[int(prevsource) - 1].split()[7]\n else:\n nextsource = sent_lines[int(source) - 1].split()[6]\n return back_to_verb(nextsource, source, sent_lines, verb_index)\n\n\ndef task22_to_df_withFrameArgsDependencies(task22_file, udfile):\n # open the txt files\n tree_dict = ud_to_dict(udfile)\n context_dict = ud_sentence_to_dict(udfile)\n\n fp = open(task22_file, 'r')\n lines_args = fp.readlines()\n\n labels = ['context_id', 'word', 'gold_sense_id', 'predict_sense_id', 'context',\n 'verb', 'verb_index', 'role_index', 'inbound_dependency', 'outbound_dependency']#, 'dependency_to_verb']\n\n labels.append('position_to_verb')\n labels.append('boolean_position_to_verb')\n\n mr = max_arguments(task22_file)\n\n df = pd.DataFrame(columns=labels)\n\n for i in range(len(lines_args)):\n\n frame_tokens = lines_args[i].split(' ')\n s_id = frame_tokens[0]\n sent_lines = tree_dict[s_id]\n\n sentence = context_dict[s_id]\n word_tokens = sentence.split(' ')\n\n # example--> #20064023 2 begin.na Ford-:-1-:-Agent install-:-3-:-Theme\n verb_index, verb = frame_tokens[1], frame_tokens[2].split('.')[0]\n roles = frame_tokens[3:] # 0 is s_id, 1 is verb_position and 2 is verb.Frame\n s = 0\n for role in roles:\n\n role_lemma, role_index, gold_label = role.replace('\\n', '').split('-:-')\n row = ['{}_{}'.format(s_id, s), role_lemma, gold_label, '', sentence, verb, verb_index, role_index]\n inLabel = 'None'\n outLabel = 'None'\n backtoVerb = 'None'\n for index in role_index.split('_'):\n for token in sent_lines:\n nodes = token.split()\n # inbound grammatical dependency\n if nodes[0] == index:\n if inLabel == 'None':\n inLabel = nodes[7]\n else:\n inLabel = inLabel + '_' + nodes[7]\n\n # source = nodes[6]\n # prevsource = nodes[0]\n\n # outbound grammatical dependency\n if nodes[6] == index:\n if outLabel == 'None':\n outLabel = nodes[7]\n else:\n outLabel = outLabel + '_' + nodes[7]\n\n # lastLabeltoVerb = back_to_verb(source, prevsource, sent_lines, verb_index.split('_')[0])\n\n indicies = role_index.split('_')\n pos = s + 1\n if int(indicies[0]) > int(verb_index.split('_')[0]): # in test set format of index is changed\n bool_pos = 0\n else:\n bool_pos = 1\n\n row.append(inLabel)\n row.append(outLabel)\n # row.append(lastLabeltoVerb)\n\n row.append(pos)\n row.append(bool_pos)\n\n df.loc[len(df)] = row\n s = s + 1\n\n return df\n\n\ndef getLabelNames(df_task22, columns):\n labels = []\n for column in columns:\n for label in df_task22[column]:\n if label != 'None':\n for lbl in label.split('_'):\n labels.append(lbl.split(':')[0])\n return list(set(labels))\n\n\ndef getGrammaticalBaseline(df_task22, gr='in'):\n # df_task22=task22_to_df_withFrameArgsDependencies(task22_file, udfile)\n grLabels=[]\n if gr == 'in':\n grLabels = getLabelNames(df_task22, ['inbound_dependency'])\n\n if gr == 'out':\n grLabels = getLabelNames(df_task22, ['outbound_dependency'])\n\n if gr == 'inout':\n grLabels = getLabelNames(df_task22, ['inbound_dependency', 'outbound_dependency'])\n\n columns = list(df_task22.columns)\n for lb in grLabels:\n columns.append(lb)\n\n df = pd.DataFrame(columns=columns)\n # print(columns)\n\n for index, inLabel, outLabel in zip(df_task22.index, df_task22['inbound_dependency'],\n df_task22['outbound_dependency']):\n\n row = list(df_task22.loc[index])\n for lb in grLabels:\n row.append(0)\n\n if gr == 'in' or gr == 'inout':\n if inLabel != 'None':\n multiLabels = inLabel.split('_')\n for label in multiLabels:\n column = label.split(':')[0]\n row[columns.index(column)] = -1\n\n if gr == 'out' or gr == 'inout':\n if outLabel != 'None':\n multiLabels = outLabel.split('_')\n for label in multiLabels:\n column = label.split(':')[0]\n if row[columns.index(column)] == -1: # also appear in inbound_dependency\n row[columns.index(column)] = -1\n else:\n row[columns.index(column)] = 1\n\n df.loc[len(df)] = row\n\n return df\n\n#-------------------------------------------------------------------------\n"
] | [
[
"pandas.read_csv"
],
[
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
qinliuliuqin/active-mri-acquisition | [
"b561f838667f4bc7753b1f89dfbdd545d0f00ada",
"b561f838667f4bc7753b1f89dfbdd545d0f00ada",
"b561f838667f4bc7753b1f89dfbdd545d0f00ada"
] | [
"activemri/experimental/cvpr19_models/data/masking_utils.py",
"activemri/experimental/cvpr19_models/data/raw_data_loader.py",
"activemri/data/singlecoil_knee_data.py"
] | [
"# Copyright (c) Facebook, Inc. and its affiliates.\n\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport logging\n\nimport numpy as np\nimport torch\n\n\ndef get_mask_func(mask_type, which_dataset, rnl_params=None):\n # Whether the number of lines is random or not\n random_num_lines = mask_type[-4:] == \"_rnl\"\n if \"symmetric_basic\" in mask_type:\n logging.info(\n f\"Mask is symmetric uniform choice with random_num_lines={random_num_lines}.\"\n )\n return SymmetricUniformChoiceMaskFunc(\n [0.125],\n [4],\n which_dataset,\n random_num_lines=random_num_lines,\n rnl_params=rnl_params,\n )\n if \"basic\" in mask_type:\n # First two parameters are ignored if `random_num_lines` is True\n logging.info(\n f\"Mask is fixed acceleration mask with random_num_lines={random_num_lines}.\"\n )\n return BasicMaskFunc(\n [0.125],\n [4],\n which_dataset,\n random_num_lines=random_num_lines,\n rnl_params=rnl_params,\n )\n if \"low_to_high\" in mask_type:\n logging.info(\n f\"Mask is symmetric low to high with random_num_lines={random_num_lines}.\"\n )\n return SymmetricLowToHighMaskFunc(\n [0.125],\n [4],\n which_dataset,\n random_num_lines=random_num_lines,\n rnl_params=rnl_params,\n )\n if \"symmetric_grid\" in mask_type:\n logging.info(\"Mask is symmetric grid.\")\n return SymmetricUniformGridMaskFunc(\n [], [], which_dataset, random_num_lines=True, rnl_params=rnl_params\n )\n if \"grid\" in mask_type:\n logging.info(\"Mask is grid (not symmetric).\")\n return UniformGridMaskFunc(\n [], [], which_dataset, random_num_lines=True, rnl_params=rnl_params\n )\n raise ValueError(f\"Invalid mask type: {mask_type}.\")\n\n\nclass MaskFunc:\n def __init__(\n self,\n center_fractions,\n accelerations,\n which_dataset,\n random_num_lines=False,\n rnl_params=None,\n ):\n if len(center_fractions) != len(accelerations):\n raise ValueError(\n \"Number of center fractions should match number of accelerations\"\n )\n\n self.center_fractions = center_fractions\n self.accelerations = accelerations\n self.random_num_lines = random_num_lines\n\n if rnl_params is None:\n # The lines below give approx. 4x acceleration on average.\n self.min_lowf_lines = 10 if which_dataset != \"KNEE_RAW\" else 30\n self.max_lowf_lines = 12 if which_dataset != \"KNEE_RAW\" else 32\n self.highf_beta_alpha = 1\n self.highf_beta_beta = 5\n else:\n params = [int(x) for x in rnl_params.split(\",\")]\n assert len(params) == 4\n self.min_lowf_lines = params[0]\n self.max_lowf_lines = params[1]\n self.highf_beta_alpha = params[2]\n self.highf_beta_beta = params[3]\n\n self.rng = np.random.RandomState()\n\n def __call__(self, shape, seed=None):\n if len(shape) < 3:\n raise ValueError(\"Shape should have 3 or more dimensions\")\n\n self.rng.seed(seed)\n num_cols = shape[-2]\n\n # Determine number of low and high frequency lines to scan\n if self.random_num_lines:\n # These are guaranteed to be an even number (useful for symmetric masks)\n num_low_freqs = self.rng.choice(\n range(self.min_lowf_lines, self.max_lowf_lines, 2)\n )\n num_high_freqs = (\n int(\n self.rng.beta(self.highf_beta_alpha, self.highf_beta_beta)\n * (num_cols - num_low_freqs)\n // 2\n )\n * 2\n )\n else:\n choice = self.rng.randint(0, len(self.accelerations))\n center_fraction = self.center_fractions[choice]\n acceleration = self.accelerations[choice]\n\n num_low_freqs = int(round(num_cols * center_fraction))\n num_high_freqs = int(num_cols // acceleration - num_low_freqs)\n\n # Create the mask\n mask = self.create_lf_focused_mask(num_cols, num_high_freqs, num_low_freqs)\n\n # Reshape the mask\n mask_shape = [1 for _ in shape]\n mask_shape[-1] = num_cols\n mask = torch.from_numpy(mask.reshape(*mask_shape).astype(np.float32))\n return mask\n\n def create_lf_focused_mask(self, num_cols, num_high_freqs, num_low_freqs):\n p = num_high_freqs / (num_cols - num_low_freqs)\n mask = self.rng.uniform(size=num_cols) < p\n pad = (num_cols - num_low_freqs + 1) // 2\n mask[pad : pad + num_low_freqs] = True\n return mask\n\n\nclass BasicMaskFunc(MaskFunc):\n def create_lf_focused_mask(self, num_cols, num_high_freqs, num_low_freqs):\n mask = np.zeros([num_cols])\n hf_cols = self.rng.choice(\n np.arange(num_cols - num_low_freqs), num_high_freqs, replace=False\n )\n hf_cols[hf_cols >= (num_cols - num_low_freqs + 1) // 2] += num_low_freqs\n mask[hf_cols] = True\n pad = (num_cols - num_low_freqs + 1) // 2\n mask[pad : pad + num_low_freqs] = True\n mask = np.fft.ifftshift(mask, axes=0)\n return mask\n\n\nclass SymmetricUniformChoiceMaskFunc(MaskFunc):\n def create_lf_focused_mask(self, num_cols, num_high_freqs, num_low_freqs):\n mask = np.zeros([num_cols])\n num_cols //= 2\n num_low_freqs //= 2\n num_high_freqs //= 2\n hf_cols = self.rng.choice(\n np.arange(num_cols - num_low_freqs), num_high_freqs, replace=False\n )\n mask[hf_cols] = True\n pad = num_cols - num_low_freqs\n mask[pad:num_cols] = True\n mask[: -(num_cols + 1) : -1] = mask[:num_cols]\n mask = np.fft.ifftshift(mask, axes=0)\n return mask\n\n\nclass UniformGridMaskFunc(MaskFunc):\n def create_lf_focused_mask(self, num_cols, num_high_freqs, num_low_freqs):\n mask = np.zeros([num_cols])\n acceleration = self.rng.choice([4, 8, 16])\n hf_cols = np.arange(acceleration, num_cols, acceleration)\n mask[hf_cols] = True\n mask[: num_low_freqs // 2] = mask[-(num_low_freqs // 2) :] = True\n return mask\n\n\nclass SymmetricLowToHighMaskFunc(MaskFunc):\n def create_lf_focused_mask(self, num_cols, num_high_freqs, num_low_freqs):\n mask = np.zeros([num_cols])\n num_cols //= 2\n num_low_freqs //= 2\n num_high_freqs //= 2\n num_low_freqs += num_high_freqs\n pad = num_cols - num_low_freqs\n mask[pad:num_cols] = True\n mask[: -(num_cols + 1) : -1] = mask[:num_cols]\n mask = np.fft.ifftshift(mask, axes=0)\n return mask\n\n\nclass SymmetricUniformGridMaskFunc(MaskFunc):\n def create_lf_focused_mask(self, num_cols, num_high_freqs, num_low_freqs):\n mask = np.zeros([num_cols])\n acceleration = self.rng.choice([4, 8, 16])\n num_cols //= 2\n num_low_freqs //= 2\n hf_cols = np.arange(acceleration, num_cols, acceleration)\n mask[hf_cols] = True\n mask[:num_low_freqs] = True\n mask[: -(num_cols + 1) : -1] = mask[:num_cols]\n return mask\n",
"# Copyright (c) Facebook, Inc. and its affiliates.\n\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport pathlib\n\nimport h5py\nimport numpy as np\nimport torch\nimport torch.utils.data\n\n\ndef ifftshift(x, dim=None):\n if dim is None:\n dim = tuple(range(x.dim()))\n shift = [(dim + 1) // 2 for dim in x.shape]\n elif isinstance(dim, int):\n shift = (x.shape[dim] + 1) // 2\n else:\n shift = [(x.shape[i] + 1) // 2 for i in dim]\n return roll(x, shift, dim)\n\n\ndef fftshift(x, dim=None):\n if dim is None:\n dim = tuple(range(x.dim()))\n shift = [dim // 2 for dim in x.shape]\n elif isinstance(dim, int):\n shift = x.shape[dim] // 2\n else:\n shift = [x.shape[i] // 2 for i in dim]\n return roll(x, shift, dim)\n\n\ndef roll(x, shift, dim):\n if isinstance(shift, (tuple, list)):\n assert len(shift) == len(dim)\n for s, d in zip(shift, dim):\n x = roll(x, s, d)\n return x\n shift = shift % x.size(dim)\n if shift == 0:\n return x\n left = x.narrow(dim, 0, x.size(dim) - shift)\n right = x.narrow(dim, x.size(dim) - shift, shift)\n return torch.cat((right, left), dim=dim)\n\n\nclass RawSliceData(torch.utils.data.Dataset):\n def __init__(\n self,\n root,\n transform,\n num_cols=None,\n num_volumes=None,\n num_rand_slices=None,\n custom_split=None,\n ):\n self.transform = transform\n self.examples = []\n\n self.num_rand_slices = num_rand_slices\n self.rng = np.random.RandomState(1234)\n\n files = []\n for fname in list(pathlib.Path(root).iterdir()):\n data = h5py.File(fname, \"r\")\n if num_cols is not None and data[\"kspace\"].shape[2] != num_cols:\n continue\n files.append(fname)\n\n if custom_split is not None:\n split_info = []\n with open(f\"data/splits/raw_{custom_split}.txt\") as f:\n for line in f:\n split_info.append(line.rsplit(\"\\n\")[0])\n files = [f for f in files if f.name in split_info]\n\n if num_volumes is not None:\n self.rng.shuffle(files)\n files = files[:num_volumes]\n\n for volume_i, fname in enumerate(sorted(files)):\n data = h5py.File(fname, \"r\")\n kspace = data[\"kspace\"]\n\n if num_rand_slices is None:\n num_slices = kspace.shape[0]\n self.examples += [(fname, slice) for slice in range(num_slices)]\n else:\n slice_ids = list(range(kspace.shape[0]))\n self.rng.seed(seed=volume_i)\n self.rng.shuffle(slice_ids)\n self.examples += [\n (fname, slice) for slice in slice_ids[:num_rand_slices]\n ]\n\n def __len__(self):\n return len(self.examples)\n\n def __getitem__(self, i):\n fname, slice = self.examples[i]\n with h5py.File(fname, \"r\") as data:\n kspace = data[\"kspace\"][slice]\n return self.transform(kspace, data.attrs)\n\n\nclass RawDataTransform:\n def __init__(self, mask_func, fixed_seed=None, seed_per_image=False):\n self.mask_func = mask_func\n self.fixed_seed = fixed_seed\n self.seed_per_image = seed_per_image\n\n def __call__(self, kspace, attrs):\n kspace = torch.from_numpy(np.stack([kspace.real, kspace.imag], axis=-1))\n kspace = ifftshift(kspace, dim=(0, 1))\n image = torch.ifft(kspace, 2, normalized=False)\n image = ifftshift(image, dim=(0, 1))\n # norm = torch.sqrt(image[..., 0] ** 2 + image[..., 1] ** 2).max()\n # 5.637766165023095e-08, 7.072103529760345e-07, 5.471710210258607e-06\n # normalize by the mean norm of training images.\n image /= 7.072103529760345e-07\n kspace /= 7.072103529760345e-07\n shape = np.array(kspace.shape)\n seed = (\n int(1009 * image.sum().abs())\n if self.fixed_seed is None and self.seed_per_image\n else self.fixed_seed\n )\n mask = self.mask_func(shape, seed)\n return mask, image, kspace\n",
"# Copyright (c) Facebook, Inc. and its affiliates.\n\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport pathlib\nfrom typing import Callable, List, Optional, Tuple\n\nimport fastmri\nimport h5py\nimport numpy as np\nimport torch.utils.data\n\n\n# -----------------------------------------------------------------------------\n# Single coil knee dataset (as used in MICCAI'20)\n# -----------------------------------------------------------------------------\nclass MICCAI2020Data(torch.utils.data.Dataset):\n # This is the same as fastMRI singlecoil_knee, except we provide a custom test split\n # and also normalize images by the mean norm of the k-space over training data\n KSPACE_WIDTH = 368\n KSPACE_HEIGHT = 640\n START_PADDING = 166\n END_PADDING = 202\n CENTER_CROP_SIZE = 320\n\n def __init__(\n self,\n root: pathlib.Path,\n transform: Callable,\n num_cols: Optional[int] = None,\n num_volumes: Optional[int] = None,\n num_rand_slices: Optional[int] = None,\n custom_split: Optional[str] = None,\n ):\n self.transform = transform\n self.examples: List[Tuple[pathlib.PurePath, int]] = []\n\n self.num_rand_slices = num_rand_slices\n self.rng = np.random.RandomState(1234)\n\n files = []\n for fname in list(pathlib.Path(root).iterdir()):\n data = h5py.File(fname, \"r\")\n if num_cols is not None and data[\"kspace\"].shape[2] != num_cols:\n continue\n files.append(fname)\n\n if custom_split is not None:\n split_info = []\n with open(f\"activemri/data/splits/knee_singlecoil/{custom_split}.txt\") as f:\n for line in f:\n split_info.append(line.rsplit(\"\\n\")[0])\n files = [f for f in files if f.name in split_info]\n\n if num_volumes is not None:\n self.rng.shuffle(files)\n files = files[:num_volumes]\n\n for volume_i, fname in enumerate(sorted(files)):\n data = h5py.File(fname, \"r\")\n kspace = data[\"kspace\"]\n\n if num_rand_slices is None:\n num_slices = kspace.shape[0]\n self.examples += [(fname, slice_id) for slice_id in range(num_slices)]\n else:\n slice_ids = list(range(kspace.shape[0]))\n self.rng.seed(seed=volume_i)\n self.rng.shuffle(slice_ids)\n self.examples += [\n (fname, slice_id) for slice_id in slice_ids[:num_rand_slices]\n ]\n\n def __len__(self):\n return len(self.examples)\n\n def __getitem__(self, i):\n fname, slice_id = self.examples[i]\n with h5py.File(fname, \"r\") as data:\n kspace = data[\"kspace\"][slice_id]\n kspace = torch.from_numpy(np.stack([kspace.real, kspace.imag], axis=-1))\n kspace = fastmri.ifftshift(kspace, dim=(0, 1))\n target = torch.fft.ifft(kspace, 2, norm=None)\n target = fastmri.ifftshift(target, dim=(0, 1))\n # Normalize using mean of k-space in training data\n target /= 7.072103529760345e-07\n kspace /= 7.072103529760345e-07\n\n # Environment expects numpy arrays. The code above was used with an older\n # version of the environment to generate the results of the MICCAI'20 paper.\n # So, to keep this consistent with the version in the paper, we convert\n # the tensors back to numpy rather than changing the original code.\n kspace = kspace.numpy()\n target = target.numpy()\n return self.transform(\n kspace,\n torch.zeros(kspace.shape[1]),\n target,\n dict(data.attrs),\n fname.name,\n slice_id,\n )\n"
] | [
[
"numpy.arange",
"numpy.fft.ifftshift",
"numpy.random.RandomState",
"numpy.zeros"
],
[
"torch.cat",
"numpy.stack",
"numpy.array",
"torch.ifft",
"numpy.random.RandomState"
],
[
"numpy.random.RandomState",
"numpy.stack"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
izhorvath/MetGAN | [
"aca85fb3306d2515a65c8d525cd78e1147ba7e1b"
] | [
"models/networks.py"
] | [
"import torch\nimport torch.nn as nn\nfrom torch.nn import init\nimport functools\nfrom torch.optim import lr_scheduler\nfrom math import floor, log2\nfrom functools import partial\nfrom linear_attention_transformer import ImageLinearAttention\n\n###\n\nfrom random import random\n\n\nimport numpy as np\nimport torch.nn.functional as F\n\n\n###\n\nfrom models.networks_SPADE.base_network import BaseNetwork\nfrom models.networks_SPADE.architecture import ResnetBlock as ResnetBlock\nfrom models.networks_SPADE.architecture import SPADEResnetBlock as SPADEResnetBlock\n\n\n###############################################################################\n# Helper Functions\n###############################################################################\n\n\nclass Identity(nn.Module):\n def forward(self, x):\n return x\n\n\ndef get_norm_layer(norm_type='instance'):\n \"\"\"Return a normalization layer\n\n Parameters:\n norm_type (str) -- the name of the normalization layer: batch | instance | none\n\n For BatchNorm, we use learnable affine parameters and track running statistics (mean/stddev).\n For InstanceNorm, we do not use learnable affine parameters. We do not track running statistics.\n \"\"\"\n if norm_type == 'batch':\n norm_layer = functools.partial(nn.BatchNorm2d, affine=True, track_running_stats=True)\n elif norm_type == 'instance':\n norm_layer = functools.partial(nn.InstanceNorm2d, affine=False, track_running_stats=False)\n elif norm_type == 'none':\n def norm_layer(x): return Identity()\n else:\n raise NotImplementedError('normalization layer [%s] is not found' % norm_type)\n return norm_layer\n\n\ndef get_scheduler(optimizer, opt):\n \"\"\"Return a learning rate scheduler\n\n Parameters:\n optimizer -- the optimizer of the network\n opt (option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions. \n opt.lr_policy is the name of learning rate policy: linear | step | plateau | cosine\n\n For 'linear', we keep the same learning rate for the first <opt.n_epochs> epochs\n and linearly decay the rate to zero over the next <opt.n_epochs_decay> epochs.\n For other schedulers (step, plateau, and cosine), we use the default PyTorch schedulers.\n See https://pytorch.org/docs/stable/optim.html for more details.\n \"\"\"\n if opt.lr_policy == 'linear':\n def lambda_rule(epoch):\n lr_l = 1.0 - max(0, epoch + opt.epoch_count - opt.n_epochs) / float(opt.n_epochs_decay + 1)\n return lr_l\n scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)\n elif opt.lr_policy == 'step':\n scheduler = lr_scheduler.StepLR(optimizer, step_size=opt.lr_decay_iters, gamma=0.1)\n elif opt.lr_policy == 'plateau':\n scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.2, threshold=0.01, patience=5)\n elif opt.lr_policy == 'cosine':\n scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=opt.n_epochs, eta_min=0)\n else:\n return NotImplementedError('learning rate policy [%s] is not implemented', opt.lr_policy)\n return scheduler\n\ndef define_SPADE(opt,gpu_ids):\n if('spade8' in opt.netG):\n net = SPADE8Generator(input_nc=1, output_nc=1, num_downs = 8, ngf=1, norm_layer='abc', use_dropout=False, opt=opt)\n elif('spade6' in opt.netG):\n net = SPADE6Generator(input_nc=1, output_nc=1, num_downs = 8, ngf=1, norm_layer='abc', use_dropout=False, opt=opt)\n else:\n net = SPADEGenerator(input_nc=1, output_nc=1, num_downs = 8, ngf=1, norm_layer='abc', use_dropout=False, opt=opt)\n if len(gpu_ids) > 0:\n assert(torch.cuda.is_available())\n net.to(gpu_ids[0])\n #net = torch.nn.DataParallel(net, gpu_ids) \n net.init_weights()\n return net\n\ndef init_weights(net, init_type='normal', init_gain=0.02):\n \"\"\"Initialize network weights.\n\n Parameters:\n net (network) -- network to be initialized\n init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal\n init_gain (float) -- scaling factor for normal, xavier and orthogonal.\n\n We use 'normal' in the original pix2pix and CycleGAN paper. But xavier and kaiming might\n work better for some applications. Feel free to try yourself.\n \"\"\"\n def init_func(m): # define the initialization function\n classname = m.__class__.__name__\n if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):\n if init_type == 'normal':\n init.normal_(m.weight.data, 0.0, init_gain)\n elif init_type == 'xavier':\n init.xavier_normal_(m.weight.data, gain=init_gain)\n elif init_type == 'kaiming':\n init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')\n elif init_type == 'orthogonal':\n init.orthogonal_(m.weight.data, gain=init_gain)\n else:\n raise NotImplementedError('initialization method [%s] is not implemented' % init_type)\n if hasattr(m, 'bias') and m.bias is not None:\n init.constant_(m.bias.data, 0.0)\n elif classname.find('BatchNorm2d') != -1: # BatchNorm Layer's weight is not a matrix; only normal distribution applies.\n init.normal_(m.weight.data, 1.0, init_gain)\n init.constant_(m.bias.data, 0.0)\n\n print('initialize network with %s' % init_type)\n net.apply(init_func) # apply the initialization function <init_func>\n\n\ndef init_net(net, init_type='normal', init_gain=0.02, gpu_ids=[]):\n \"\"\"Initialize a network: 1. register CPU/GPU device (with multi-GPU support); 2. initialize the network weights\n Parameters:\n net (network) -- the network to be initialized\n init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal\n gain (float) -- scaling factor for normal, xavier and orthogonal.\n gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2\n\n Return an initialized network.\n \"\"\"\n if len(gpu_ids) > 0:\n assert(torch.cuda.is_available())\n net.to(gpu_ids[0])\n #net = torch.nn.DataParallel(net, gpu_ids) # multi-GPUs\n init_weights(net, init_type, init_gain=init_gain)\n return net\n\n\ndef define_G(input_nc, output_nc, ngf, netG, norm='batch', use_dropout=False, init_type='normal', init_gain=0.02, gpu_ids=[]):\n \"\"\"Create a generator\n\n Parameters:\n input_nc (int) -- the number of channels in input images\n output_nc (int) -- the number of channels in output images\n ngf (int) -- the number of filters in the last conv layer\n netG (str) -- the architecture's name: resnet_9blocks | resnet_6blocks | unet_256 | unet_128\n norm (str) -- the name of normalization layers used in the network: batch | instance | none\n use_dropout (bool) -- if use dropout layers.\n init_type (str) -- the name of our initialization method.\n init_gain (float) -- scaling factor for normal, xavier and orthogonal.\n gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2\n\n Returns a generator\n\n Our current implementation provides two types of generators:\n U-Net: [unet_128] (for 128x128 input images) and [unet_256] (for 256x256 input images)\n The original U-Net paper: https://arxiv.org/abs/1505.04597\n\n Resnet-based generator: [resnet_6blocks] (with 6 Resnet blocks) and [resnet_9blocks] (with 9 Resnet blocks)\n Resnet-based generator consists of several Resnet blocks between a few downsampling/upsampling operations.\n We adapt Torch code from Justin Johnson's neural style transfer project (https://github.com/jcjohnson/fast-neural-style).\n\n\n The generator has been initialized by <init_net>. It uses RELU for non-linearity.\n \"\"\"\n net = None\n norm_layer = get_norm_layer(norm_type=norm)\n\n if netG == 'resnet_9blocks':\n net = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=9)\n elif netG == 'resnet_9blocksup':\n net = ResnetGeneratorUp(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=9)\n elif netG == 'resnet_6blocks':\n net = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=6)\n elif netG == 'unet_128':\n net = UnetGenerator(input_nc, output_nc, 7, ngf, norm_layer=norm_layer, use_dropout=use_dropout)\n elif netG == 'unet_256':\n net = UnetGenerator(input_nc, output_nc, 8, ngf, norm_layer=norm_layer, use_dropout=use_dropout)\n elif netG == 'unet_768':\n net = UNet768(input_nc, output_nc, 8, ngf, norm_layer=norm_layer, use_dropout=use_dropout)\n elif netG == 'unet_768_sigm':\n net = UNet768Sigm(input_nc, output_nc, 8, ngf, norm_layer=norm_layer, use_dropout=use_dropout)\n elif netG == 'unet_spade':\n net = UNet768PIXSPADE(input_nc, output_nc, 8, ngf, norm_layer=norm_layer, use_dropout=use_dropout)\n elif netG == 'unet_spade8sm':\n net = UNet768PIXSPADE8SM(input_nc, output_nc, 8, ngf, norm_layer=norm_layer, use_dropout=use_dropout)\n else:\n raise NotImplementedError('Generator model name [%s] is not recognized' % netG)\n return init_net(net, init_type, init_gain, gpu_ids)\n\n\ndef define_D(input_nc, ndf, netD, n_layers_D=3, norm='batch', init_type='normal', init_gain=0.02, gpu_ids=[]):\n \"\"\"Create a discriminator\n\n Parameters:\n input_nc (int) -- the number of channels in input images\n ndf (int) -- the number of filters in the first conv layer\n netD (str) -- the architecture's name: basic | n_layers | pixel\n n_layers_D (int) -- the number of conv layers in the discriminator; effective when netD=='n_layers'\n norm (str) -- the type of normalization layers used in the network.\n init_type (str) -- the name of the initialization method.\n init_gain (float) -- scaling factor for normal, xavier and orthogonal.\n gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2\n\n Returns a discriminator\n\n Our current implementation provides three types of discriminators:\n [basic]: 'PatchGAN' classifier described in the original pix2pix paper.\n It can classify whether 70×70 overlapping patches are real or fake.\n Such a patch-level discriminator architecture has fewer parameters\n than a full-image discriminator and can work on arbitrarily-sized images\n in a fully convolutional fashion.\n\n [n_layers]: With this mode, you can specify the number of conv layers in the discriminator\n with the parameter <n_layers_D> (default=3 as used in [basic] (PatchGAN).)\n\n [pixel]: 1x1 PixelGAN discriminator can classify whether a pixel is real or not.\n It encourages greater color diversity but has no effect on spatial statistics.\n\n The discriminator has been initialized by <init_net>. It uses Leakly RELU for non-linearity.\n \"\"\"\n net = None\n norm_layer = get_norm_layer(norm_type=norm)\n\n if netD == 'basic': # default PatchGAN classifier\n net = NLayerDiscriminator(input_nc, ndf, n_layers=3, norm_layer=norm_layer)\n elif netD == 'n_layers': # more options\n net = NLayerDiscriminator(input_nc, ndf, n_layers_D, norm_layer=norm_layer)\n elif netD == 'pixel': # classify if each pixel is real or fake\n net = PixelDiscriminator(input_nc, ndf, norm_layer=norm_layer)\n elif netD == 'conditional': #conditional patchGAN\n net = NLayerDiscriminator(input_nc, ndf, n_layers=3, norm_layer=norm_layer)\n elif netD == 'unet':\n net = UnetDiscriminator()\n else:\n raise NotImplementedError('Discriminator model name [%s] is not recognized' % netD)\n return init_net(net, init_type, init_gain, gpu_ids)\n\n\n##############################################################################\n# Classes\n##############################################################################\nclass GANLoss(nn.Module):\n \"\"\"Define different GAN objectives.\n\n The GANLoss class abstracts away the need to create the target label tensor\n that has the same size as the input.\n \"\"\"\n\n def __init__(self, gan_mode, target_real_label=1.0, target_fake_label=0.0):\n \"\"\" Initialize the GANLoss class.\n\n Parameters:\n gan_mode (str) - - the type of GAN objective. It currently supports vanilla, lsgan, and wgangp.\n target_real_label (bool) - - label for a real image\n target_fake_label (bool) - - label of a fake image\n\n Note: Do not use sigmoid as the last layer of Discriminator.\n LSGAN needs no sigmoid. vanilla GANs will handle it with BCEWithLogitsLoss.\n \"\"\"\n super(GANLoss, self).__init__()\n self.register_buffer('real_label', torch.tensor(target_real_label))\n self.register_buffer('fake_label', torch.tensor(target_fake_label))\n self.gan_mode = gan_mode\n if gan_mode == 'lsgan':\n self.loss = nn.MSELoss()\n elif gan_mode == 'vanilla':\n self.loss = nn.BCEWithLogitsLoss()\n elif gan_mode in ['wgangp']:\n self.loss = None\n else:\n raise NotImplementedError('gan mode %s not implemented' % gan_mode)\n\n def get_target_tensor(self, prediction, target_is_real):\n \"\"\"Create label tensors with the same size as the input.\n\n Parameters:\n prediction (tensor) - - tpyically the prediction from a discriminator\n target_is_real (bool) - - if the ground truth label is for real images or fake images\n\n Returns:\n A label tensor filled with ground truth label, and with the size of the input\n \"\"\"\n\n if target_is_real:\n target_tensor = self.real_label\n else:\n target_tensor = self.fake_label\n return target_tensor.expand_as(prediction)\n\n def __call__(self, prediction, target_is_real):\n \"\"\"Calculate loss given Discriminator's output and grount truth labels.\n\n Parameters:\n prediction (tensor) - - tpyically the prediction output from a discriminator\n target_is_real (bool) - - if the ground truth label is for real images or fake images\n\n Returns:\n the calculated loss.\n \"\"\"\n if self.gan_mode in ['lsgan', 'vanilla']:\n target_tensor = self.get_target_tensor(prediction, target_is_real)\n loss = self.loss(prediction, target_tensor)\n elif self.gan_mode == 'wgangp':\n if target_is_real:\n loss = -prediction.mean()\n else:\n loss = prediction.mean()\n return loss\n \nclass UnetGANLoss(nn.Module):\n \"\"\"Define different GAN objectives.\n\n The GANLoss class abstracts away the need to create the target label tensor\n that has the same size as the input.\n \"\"\"\n\n def __init__(self, gan_mode, target_real_label=1.0, target_fake_label=0.0):\n \"\"\" Initialize the GANLoss class.\n\n Parameters:\n gan_mode (str) - - the type of GAN objective. It currently supports vanilla, lsgan, and wgangp.\n target_real_label (bool) - - label for a real image\n target_fake_label (bool) - - label of a fake image\n\n Note: Do not use sigmoid as the last layer of Discriminator.\n LSGAN needs no sigmoid. vanilla GANs will handle it with BCEWithLogitsLoss.\n \"\"\"\n super(UnetGANLoss, self).__init__()\n self.register_buffer('real_label_1', torch.tensor(target_real_label))\n self.register_buffer('real_label_2', torch.tensor(np.ones((1,256,256))))\n self.register_buffer('fake_label_1', torch.tensor(target_fake_label))\n self.register_buffer('fake_label_2', torch.tensor(np.zeros((1,256,256))))\n\n self.loss_1 = nn.BCEWithLogitsLoss()\n self.loss_2 = nn.BCEWithLogitsLoss()\n\n def get_target_tensor(self, prediction_1, prediction_2, target_is_real):\n \"\"\"Create label tensors with the same size as the input.\n\n Parameters:\n prediction (tensor) - - tpyically the prediction from a discriminator\n target_is_real (bool) - - if the ground truth label is for real images or fake images\n\n Returns:\n A label tensor filled with ground truth label, and with the size of the input\n \"\"\"\n\n if target_is_real:\n target_tensor_1 = self.real_label_1\n target_tensor_2 = self.real_label_2\n else:\n target_tensor_1 = self.fake_label_1\n target_tensor_2 = self.fake_label_2\n return target_tensor_1.expand_as(prediction_1), target_tensor_2.expand_as(prediction_2)\n\n def __call__(self, prediction_1, prediction_2, target_is_real):\n \"\"\"Calculate loss given Discriminator's output and grount truth labels.\n\n Parameters:\n prediction (tensor) - - tpyically the prediction output from a discriminator\n target_is_real (bool) - - if the ground truth label is for real images or fake images\n\n Returns:\n the calculated loss.\n \"\"\"\n\n target_tensor_1, target_tensor_2 = self.get_target_tensor(prediction_1, prediction_2, target_is_real)\n loss_1 = self.loss_1(prediction_1, target_tensor_1)\n loss_2 = self.loss_2(prediction_2, target_tensor_2)\n\n \n loss = loss_1.mean()+loss_2.mean()\n return loss\n\n\ndef cal_gradient_penalty(netD, real_data, fake_data, device, type='mixed', constant=1.0, lambda_gp=10.0):\n \"\"\"Calculate the gradient penalty loss, used in WGAN-GP paper https://arxiv.org/abs/1704.00028\n\n Arguments:\n netD (network) -- discriminator network\n real_data (tensor array) -- real images\n fake_data (tensor array) -- generated images from the generator\n device (str) -- GPU / CPU: from torch.device('cuda:{}'.format(self.gpu_ids[0])) if self.gpu_ids else torch.device('cpu')\n type (str) -- if we mix real and fake data or not [real | fake | mixed].\n constant (float) -- the constant used in formula ( | |gradient||_2 - constant)^2\n lambda_gp (float) -- weight for this loss\n\n Returns the gradient penalty loss\n \"\"\"\n if lambda_gp > 0.0:\n if type == 'real': # either use real images, fake images, or a linear interpolation of two.\n interpolatesv = real_data\n elif type == 'fake':\n interpolatesv = fake_data\n elif type == 'mixed':\n alpha = torch.rand(real_data.shape[0], 1, device=device)\n alpha = alpha.expand(real_data.shape[0], real_data.nelement() // real_data.shape[0]).contiguous().view(*real_data.shape)\n interpolatesv = alpha * real_data + ((1 - alpha) * fake_data)\n else:\n raise NotImplementedError('{} not implemented'.format(type))\n interpolatesv.requires_grad_(True)\n disc_interpolates = netD(interpolatesv)\n gradients = torch.autograd.grad(outputs=disc_interpolates, inputs=interpolatesv,\n grad_outputs=torch.ones(disc_interpolates.size()).to(device),\n create_graph=True, retain_graph=True, only_inputs=True)\n gradients = gradients[0].view(real_data.size(0), -1) # flat the data\n gradient_penalty = (((gradients + 1e-16).norm(2, dim=1) - constant) ** 2).mean() * lambda_gp # added eps\n return gradient_penalty, gradients\n else:\n return 0.0, None\n\n\nclass ResnetGenerator(nn.Module):\n \"\"\"Resnet-based generator that consists of Resnet blocks between a few downsampling/upsampling operations.\n\n We adapt Torch code and idea from Justin Johnson's neural style transfer project(https://github.com/jcjohnson/fast-neural-style)\n \"\"\"\n\n def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=6, padding_type='reflect'):\n \"\"\"Construct a Resnet-based generator\n\n Parameters:\n input_nc (int) -- the number of channels in input images\n output_nc (int) -- the number of channels in output images\n ngf (int) -- the number of filters in the last conv layer\n norm_layer -- normalization layer\n use_dropout (bool) -- if use dropout layers\n n_blocks (int) -- the number of ResNet blocks\n padding_type (str) -- the name of padding layer in conv layers: reflect | replicate | zero\n \"\"\"\n assert(n_blocks >= 0)\n super(ResnetGenerator, self).__init__()\n if type(norm_layer) == functools.partial:\n use_bias = norm_layer.func == nn.InstanceNorm2d\n else:\n use_bias = norm_layer == nn.InstanceNorm2d\n\n model = [nn.ReflectionPad2d(3),\n nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0, bias=use_bias),\n norm_layer(ngf),\n nn.ReLU(True)]\n\n n_downsampling = 2\n for i in range(n_downsampling): # add downsampling layers\n mult = 2 ** i\n model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1, bias=use_bias),\n norm_layer(ngf * mult * 2),\n nn.ReLU(True)]\n\n mult = 2 ** n_downsampling\n for i in range(n_blocks): # add ResNet blocks\n\n model += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias)]\n\n for i in range(n_downsampling): # add upsampling layers\n mult = 2 ** (n_downsampling - i)\n model += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2),\n kernel_size=3, stride=2,\n padding=1, output_padding=1,\n bias=use_bias),\n norm_layer(int(ngf * mult / 2)),\n nn.ReLU(True)]\n model += [nn.ReflectionPad2d(3)]\n model += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)]\n model += [nn.Tanh()]\n\n self.model = nn.Sequential(*model)\n\n def forward(self, input):\n \"\"\"Standard forward\"\"\"\n return self.model(input)\n \nclass ResnetGeneratorUp(nn.Module):\n \"\"\"Resnet-based generator that consists of Resnet blocks between a few downsampling/upsampling operations.\n\n We adapt Torch code and idea from Justin Johnson's neural style transfer project(https://github.com/jcjohnson/fast-neural-style)\n \"\"\"\n\n def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=6, padding_type='reflect'):\n \"\"\"Construct a Resnet-based generator\n\n Parameters:\n input_nc (int) -- the number of channels in input images\n output_nc (int) -- the number of channels in output images\n ngf (int) -- the number of filters in the last conv layer\n norm_layer -- normalization layer\n use_dropout (bool) -- if use dropout layers\n n_blocks (int) -- the number of ResNet blocks\n padding_type (str) -- the name of padding layer in conv layers: reflect | replicate | zero\n \"\"\"\n assert(n_blocks >= 0)\n super(ResnetGeneratorUp, self).__init__()\n if type(norm_layer) == functools.partial:\n use_bias = norm_layer.func == nn.InstanceNorm2d\n else:\n use_bias = norm_layer == nn.InstanceNorm2d\n\n model = [nn.ReflectionPad2d(3),\n nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0, bias=use_bias),\n norm_layer(ngf),\n nn.ReLU(True)]\n\n n_downsampling = 2\n for i in range(n_downsampling): # add downsampling layers\n mult = 2 ** i\n model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1, bias=use_bias),\n norm_layer(ngf * mult * 2),\n nn.ReLU(True)]\n\n mult = 2 ** n_downsampling\n for i in range(n_blocks): # add ResNet blocks\n\n model += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias)]\n\n for i in range(n_downsampling): # add upsampling layers\n mult = 2 ** (n_downsampling - i)\n model += [nn.Upsample(scale_factor = 2, mode='nearest'),\n nn.ReflectionPad2d(1),\n nn.Conv2d(ngf * mult, int(ngf * mult / 2), kernel_size=3, stride=1, padding=0),]\n model += [nn.ReflectionPad2d(3)]\n model += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)]\n model += [nn.Tanh()]\n\n self.model = nn.Sequential(*model)\n\n def forward(self, input):\n \"\"\"Standard forward\"\"\"\n return self.model(input)\n\n\nclass ResnetBlock(nn.Module):\n \"\"\"Define a Resnet block\"\"\"\n\n def __init__(self, dim, padding_type, norm_layer, use_dropout, use_bias):\n \"\"\"Initialize the Resnet block\n\n A resnet block is a conv block with skip connections\n We construct a conv block with build_conv_block function,\n and implement skip connections in <forward> function.\n Original Resnet paper: https://arxiv.org/pdf/1512.03385.pdf\n \"\"\"\n super(ResnetBlock, self).__init__()\n self.conv_block = self.build_conv_block(dim, padding_type, norm_layer, use_dropout, use_bias)\n\n def build_conv_block(self, dim, padding_type, norm_layer, use_dropout, use_bias):\n \"\"\"Construct a convolutional block.\n\n Parameters:\n dim (int) -- the number of channels in the conv layer.\n padding_type (str) -- the name of padding layer: reflect | replicate | zero\n norm_layer -- normalization layer\n use_dropout (bool) -- if use dropout layers.\n use_bias (bool) -- if the conv layer uses bias or not\n\n Returns a conv block (with a conv layer, a normalization layer, and a non-linearity layer (ReLU))\n \"\"\"\n conv_block = []\n p = 0\n if padding_type == 'reflect':\n conv_block += [nn.ReflectionPad2d(1)]\n elif padding_type == 'replicate':\n conv_block += [nn.ReplicationPad2d(1)]\n elif padding_type == 'zero':\n p = 1\n else:\n raise NotImplementedError('padding [%s] is not implemented' % padding_type)\n\n conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim), nn.ReLU(True)]\n if use_dropout:\n conv_block += [nn.Dropout(0.5)]\n\n p = 0\n if padding_type == 'reflect':\n conv_block += [nn.ReflectionPad2d(1)]\n elif padding_type == 'replicate':\n conv_block += [nn.ReplicationPad2d(1)]\n elif padding_type == 'zero':\n p = 1\n else:\n raise NotImplementedError('padding [%s] is not implemented' % padding_type)\n conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim)]\n\n return nn.Sequential(*conv_block)\n\n def forward(self, x):\n \"\"\"Forward function (with skip connections)\"\"\"\n out = x + self.conv_block(x) # add skip connections\n return out\n\n\nclass UnetGenerator(nn.Module):\n \"\"\"Create a Unet-based generator\"\"\"\n\n def __init__(self, input_nc, output_nc, num_downs, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False):\n \"\"\"Construct a Unet generator\n Parameters:\n input_nc (int) -- the number of channels in input images\n output_nc (int) -- the number of channels in output images\n num_downs (int) -- the number of downsamplings in UNet. For example, # if |num_downs| == 7,\n image of size 128x128 will become of size 1x1 # at the bottleneck\n ngf (int) -- the number of filters in the last conv layer\n norm_layer -- normalization layer\n\n We construct the U-Net from the innermost layer to the outermost layer.\n It is a recursive process.\n \"\"\"\n super(UnetGenerator, self).__init__()\n # construct unet structure\n unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=None, norm_layer=norm_layer, innermost=True) # add the innermost layer\n for i in range(num_downs - 5): # add intermediate layers with ngf * 8 filters\n unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer, use_dropout=use_dropout)\n # gradually reduce the number of filters from ngf * 8 to ngf\n unet_block = UnetSkipConnectionBlock(ngf * 4, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer)\n unet_block = UnetSkipConnectionBlock(ngf * 2, ngf * 4, input_nc=None, submodule=unet_block, norm_layer=norm_layer)\n unet_block = UnetSkipConnectionBlock(ngf, ngf * 2, input_nc=None, submodule=unet_block, norm_layer=norm_layer)\n self.model = UnetSkipConnectionBlock(output_nc, ngf, input_nc=input_nc, submodule=unet_block, outermost=True, norm_layer=norm_layer) # add the outermost layer\n\n def forward(self, input):\n \"\"\"Standard forward\"\"\"\n return self.model(input)\n\n\nclass UnetSkipConnectionBlock(nn.Module):\n \"\"\"Defines the Unet submodule with skip connection.\n X -------------------identity----------------------\n |-- downsampling -- |submodule| -- upsampling --|\n \"\"\"\n\n def __init__(self, outer_nc, inner_nc, input_nc=None,\n submodule=None, outermost=False, innermost=False, norm_layer=nn.BatchNorm2d, use_dropout=False):\n \"\"\"Construct a Unet submodule with skip connections.\n\n Parameters:\n outer_nc (int) -- the number of filters in the outer conv layer\n inner_nc (int) -- the number of filters in the inner conv layer\n input_nc (int) -- the number of channels in input images/features\n submodule (UnetSkipConnectionBlock) -- previously defined submodules\n outermost (bool) -- if this module is the outermost module\n innermost (bool) -- if this module is the innermost module\n norm_layer -- normalization layer\n use_dropout (bool) -- if use dropout layers.\n \"\"\"\n super(UnetSkipConnectionBlock, self).__init__()\n self.outermost = outermost\n if type(norm_layer) == functools.partial:\n use_bias = norm_layer.func == nn.InstanceNorm2d\n else:\n use_bias = norm_layer == nn.InstanceNorm2d\n if input_nc is None:\n input_nc = outer_nc\n downconv = nn.Conv2d(input_nc, inner_nc, kernel_size=4,\n stride=2, padding=1, bias=use_bias)\n downrelu = nn.LeakyReLU(0.2, True)\n downnorm = norm_layer(inner_nc)\n uprelu = nn.ReLU(True)\n upnorm = norm_layer(outer_nc)\n\n if outermost:\n upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,\n kernel_size=4, stride=2,\n padding=1)\n down = [downconv]\n up = [uprelu, upconv, nn.Tanh()]\n model = down + [submodule] + up\n elif innermost:\n upconv = nn.ConvTranspose2d(inner_nc, outer_nc,\n kernel_size=4, stride=2,\n padding=1, bias=use_bias)\n down = [downrelu, downconv]\n up = [uprelu, upconv, upnorm]\n model = down + up\n else:\n upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,\n kernel_size=4, stride=2,\n padding=1, bias=use_bias)\n down = [downrelu, downconv, downnorm]\n up = [uprelu, upconv, upnorm]\n\n if use_dropout:\n model = down + [submodule] + up + [nn.Dropout(0.5)]\n else:\n model = down + [submodule] + up\n\n self.model = nn.Sequential(*model)\n\n def forward(self, x):\n if self.outermost:\n return self.model(x)\n else: # add skip connections\n return torch.cat([x, self.model(x)], 1)\n \n#%%% Unet from DeepMact\n \n \nclass ConvBnRelu2d(torch.nn.Module):\n def __init__(self, in_channels, out_channels, kernel_size=3, padding=1, output_padding=1, dilation=1, stride=1, groups=1, is_bn=True, is_relu=True, is_decoder=False):\n super(ConvBnRelu2d, self).__init__()\n if is_decoder:\n self.transpConv = torch.nn.ConvTranspose2d(in_channels, out_channels, kernel_size=kernel_size, padding=padding, output_padding=output_padding, stride=stride, dilation=dilation, groups=groups, bias=False)\n self.conv = None\n else:\n self.transpConv = None\n self.conv = torch.nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, padding=padding, stride=stride, dilation=dilation, groups=groups, bias=False)\n self.bn = torch.nn.BatchNorm2d(out_channels, eps=1e-4)\n self.relu = torch.nn.ReLU(inplace=True)\n if is_bn is False: self.bn = None\n if is_relu is False: self.relu = None\n\n def forward(self, x):\n if self.conv is None:\n x = self.transpConv(x)\n elif self.transpConv is None:\n x = self.conv(x)\n \n if self.bn is not None:\n x = self.bn(x)\n if self.relu is not None:\n x = self.relu(x)\n return x\n\n \nclass StackEncoder(torch.nn.Module):\n def __init__(self, x_channels, y_channels, kernel_size=3, stride=1):\n super(StackEncoder, self).__init__()\n padding = (kernel_size - 1) // 2\n self.encode = torch.nn.Sequential(\n ConvBnRelu2d(x_channels, y_channels, kernel_size=kernel_size, padding=padding, dilation=1, stride=stride, groups=1),\n ConvBnRelu2d(y_channels, y_channels, kernel_size=kernel_size, padding=padding, dilation=1, stride=stride, groups=1),\n )\n\n def forward(self, x):\n y = self.encode(x)\n y_small = torch.nn.functional.max_pool2d(y, kernel_size=2, stride=2)\n return y, y_small\n\n\nclass StackDecoder(torch.nn.Module):\n def __init__(self, x_big_channels, x_channels, y_channels, kernel_size=3, stride=1):\n super(StackDecoder, self).__init__()\n padding = (kernel_size - 1) // 2\n\n self.decode = torch.nn.Sequential(\n ConvBnRelu2d(x_big_channels + x_channels, y_channels, kernel_size=kernel_size, padding=padding, dilation=1, stride=stride, groups=1),\n ConvBnRelu2d(y_channels, y_channels, kernel_size=kernel_size, padding=padding, dilation=1, stride=stride, groups=1),\n ConvBnRelu2d(y_channels, y_channels, kernel_size=kernel_size, padding=padding, dilation=1, stride=stride, groups=1),\n )\n\n def forward(self, x_big, x):\n N, C, H, W = x_big.size()\n y = torch.nn.functional.upsample(x, size=(H, W), mode='bilinear', align_corners=True)\n y = torch.cat([y, x_big], 1)\n y = self.decode(y)\n return y\n# 768\nclass UNet768(torch.nn.Module):\n def __init__(self, input_nc, output_nc, num_downs, ngf, norm_layer=nn.BatchNorm2d, use_dropout=False):\n super(UNet768, self).__init__()\n # def __init__(self, input_nc, output_nc, num_downs, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False):\n # C, H, W = in_shape\n # assert(C==3)\n self.output_nc = output_nc\n\n # 1024\n self.down1 = StackEncoder(input_nc, 24, kernel_size=3) # Channels: 1 in, 24 out; Image size: 300 in, 150 out\n self.down2 = StackEncoder(24, 64, kernel_size=3) # Channels: 24 in, 64 out; Image size: 150 in, 75 out\n self.down3 = StackEncoder(64, 128, kernel_size=3) # Channels: 64 in, 128 out; Image size: 75 in, 38 out\n self.down4 = StackEncoder(128, 256, kernel_size=3) # Channels: 128 in, 256 out; Image size: 38 in, 19 out\n self.down5 = StackEncoder(256, 512, kernel_size=3) # Channels: 256 in, 512 out; Image size: 19 in, 10 out\n self.down6 = StackEncoder(512, 768, kernel_size=3) # Channels: 512 in, 768 out; Image size: 10 in, 5 out\n\n self.center = torch.nn.Sequential(\n ConvBnRelu2d(768, 768, kernel_size=3, padding=1, stride=1), # Channels: 768 in, 768 out; Image size: 5 in, 5 out\n )\n\n # x_big_channels, x_channels, y_channels\n self.up6 = StackDecoder(768, 768, 512, kernel_size=3) # Channels: 768+768 = 1536 in, 512 out; Image size: 5 in, 10 out\n self.up5 = StackDecoder(512, 512, 256, kernel_size=3) # Channels: 512+512 = 1024 in, 256 out; Image size: 10 in, 19 out\n self.up4 = StackDecoder(256, 256, 128, kernel_size=3) # Channels: 256+256 = 512 in, 128 out; Image size: 19 in, 38 out\n self.up3 = StackDecoder(128, 128, 64, kernel_size=3) # Channels: 128+128 = 256 in, 64 out; Image size: 38 in, 75 out\n self.up2 = StackDecoder(64, 64, 24, kernel_size=3) # Channels: 64+64 = 128 in, 24 out; Image size: 75 in, 150 out\n self.up1 = StackDecoder(24, 24, 24, kernel_size=3) # Channels: 24+24 = 48 in, 24 out; Image size: 150 in, 300 out\n self.classify = torch.nn.Conv2d(24, output_nc, kernel_size=1, padding=0, stride=1, bias=True) # Channels: 24 in, 1 out; Image size: 300 in, 300 out\n self.final_out = torch.nn.Tanh()\n\n def _crop_concat(self, upsampled, bypass):\n \"\"\"\n Crop y to the (h, w) of x and concat them.\n Used for the expansive path.\n Returns:\n The concatenated tensor\n \"\"\"\n c = (bypass.size()[2] - upsampled.size()[2]) // 2\n bypass = torch.nn.functional.pad(bypass, (-c, -c, -c, -c))\n\n return torch.cat((upsampled, bypass), 1)\n\n def forward(self, x):\n out = x # ;print('x ',x.size())\n #\n down1, out = self.down1(out) ##;\n #print('down1',down1.shape) #256\n down2, out = self.down2(out) # ;\n #print('down2',down2.shape) #128\n down3, out = self.down3(out) # ;\n #print('down3',down3.shape) #64\n down4, out = self.down4(out) # ;\n #print('down4',down4.shape) #32\n down5, out = self.down5(out) # ;\n #print('down5',down5.shape) #16\n down6, out = self.down6(out) # ;\n #print('down6',down6.shape) #8\n pass # ;\n #print('out ',out.shape)\n\n out = self.center(out)\n #print('0',out.shape)\n out = self.up6(down6, out)\n #print('1',out.shape)\n out = self.up5(down5, out)\n #print('2',out.shape)\n out = self.up4(down4, out)\n #print('3',out.shape)\n out = self.up3(down3, out)\n #print('4',out.shape)\n out = self.up2(down2, out)\n #print('5',out.shape)\n out = self.up1(down1, out)\n # 1024\n #print('6',out.shape)\n out = self.final_out(self.classify(out))\n out = torch.reshape(out,(-1, self.output_nc, x.shape[2],x.shape[3]))#, dim=1)\n return out\n \n#%%Unet_spade_768_300\n \n \n \n#%%sigm\n\n\nclass UNet768Sigm(torch.nn.Module):\n def __init__(self, input_nc, output_nc, num_downs, ngf, norm_layer=nn.BatchNorm2d, use_dropout=False):\n super(UNet768Sigm, self).__init__()\n # def __init__(self, input_nc, output_nc, num_downs, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False):\n # C, H, W = in_shape\n # assert(C==3)\n self.output_nc = output_nc\n\n # 1024\n self.down1 = StackEncoder(input_nc, 24, kernel_size=3) # Channels: 1 in, 24 out; Image size: 300 in, 150 out\n self.down2 = StackEncoder(24, 64, kernel_size=3) # Channels: 24 in, 64 out; Image size: 150 in, 75 out\n self.down3 = StackEncoder(64, 128, kernel_size=3) # Channels: 64 in, 128 out; Image size: 75 in, 38 out\n self.down4 = StackEncoder(128, 256, kernel_size=3) # Channels: 128 in, 256 out; Image size: 38 in, 19 out\n self.down5 = StackEncoder(256, 512, kernel_size=3) # Channels: 256 in, 512 out; Image size: 19 in, 10 out\n self.down6 = StackEncoder(512, 768, kernel_size=3) # Channels: 512 in, 768 out; Image size: 10 in, 5 out\n\n self.center = torch.nn.Sequential(\n ConvBnRelu2d(768, 768, kernel_size=3, padding=1, stride=1), # Channels: 768 in, 768 out; Image size: 5 in, 5 out\n )\n\n # x_big_channels, x_channels, y_channels\n self.up6 = StackDecoder(768, 768, 512, kernel_size=3) # Channels: 768+768 = 1536 in, 512 out; Image size: 5 in, 10 out\n self.up5 = StackDecoder(512, 512, 256, kernel_size=3) # Channels: 512+512 = 1024 in, 256 out; Image size: 10 in, 19 out\n self.up4 = StackDecoder(256, 256, 128, kernel_size=3) # Channels: 256+256 = 512 in, 128 out; Image size: 19 in, 38 out\n self.up3 = StackDecoder(128, 128, 64, kernel_size=3) # Channels: 128+128 = 256 in, 64 out; Image size: 38 in, 75 out\n self.up2 = StackDecoder(64, 64, 24, kernel_size=3) # Channels: 64+64 = 128 in, 24 out; Image size: 75 in, 150 out\n self.up1 = StackDecoder(24, 24, 24, kernel_size=3) # Channels: 24+24 = 48 in, 24 out; Image size: 150 in, 300 out\n self.classify = torch.nn.Conv2d(24, output_nc, kernel_size=1, padding=0, stride=1, bias=True) # Channels: 24 in, 1 out; Image size: 300 in, 300 out\n self.final_out = torch.nn.Sigmoid()\n\n def _crop_concat(self, upsampled, bypass):\n \"\"\"\n Crop y to the (h, w) of x and concat them.\n Used for the expansive path.\n Returns:\n The concatenated tensor\n \"\"\"\n c = (bypass.size()[2] - upsampled.size()[2]) // 2\n bypass = torch.nn.functional.pad(bypass, (-c, -c, -c, -c))\n\n return torch.cat((upsampled, bypass), 1)\n\n def forward(self, x):\n out = x # ;print('x ',x.size())\n #\n down1, out = self.down1(out) ##;print('down1',down1.size()) #256\n down2, out = self.down2(out) # ;print('down2',down2.size()) #128\n down3, out = self.down3(out) # ;print('down3',down3.size()) #64\n down4, out = self.down4(out) # ;print('down4',down4.size()) #32\n down5, out = self.down5(out) # ;print('down5',down5.size()) #16\n down6, out = self.down6(out) # ;print('down6',down6.size()) #8\n pass # ;print('out ',out.size())\n\n out = self.center(out)\n out = self.up6(down6, out)\n out = self.up5(down5, out)\n out = self.up4(down4, out)\n out = self.up3(down3, out)\n out = self.up2(down2, out)\n out = self.up1(down1, out)\n # 1024\n\n out = self.final_out(self.classify(out))\n out = torch.reshape(out,(1, self.output_nc, 256,256))#, dim=1)\n return out\n\n\n\n\n\nclass NLayerDiscriminator(nn.Module):\n \"\"\"Defines a PatchGAN discriminator\"\"\"\n\n def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d):\n \"\"\"Construct a PatchGAN discriminator\n\n Parameters:\n input_nc (int) -- the number of channels in input images\n ndf (int) -- the number of filters in the last conv layer\n n_layers (int) -- the number of conv layers in the discriminator\n norm_layer -- normalization layer\n \"\"\"\n super(NLayerDiscriminator, self).__init__()\n if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters\n use_bias = norm_layer.func == nn.InstanceNorm2d\n else:\n use_bias = norm_layer == nn.InstanceNorm2d\n\n kw = 4\n padw = 1\n sequence = [nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True)]\n nf_mult = 1\n nf_mult_prev = 1\n for n in range(1, n_layers): # gradually increase the number of filters\n nf_mult_prev = nf_mult\n nf_mult = min(2 ** n, 8)\n sequence += [\n nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias),\n norm_layer(ndf * nf_mult),\n nn.LeakyReLU(0.2, True)\n ]\n\n nf_mult_prev = nf_mult\n nf_mult = min(2 ** n_layers, 8)\n sequence += [\n nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1, padding=padw, bias=use_bias),\n norm_layer(ndf * nf_mult),\n nn.LeakyReLU(0.2, True)\n ]\n\n sequence += [nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)] # output 1 channel prediction map\n self.model = nn.Sequential(*sequence)\n\n def forward(self, input):\n \"\"\"Standard forward.\"\"\"\n return self.model(input)\n\n\nclass PixelDiscriminator(nn.Module):\n \"\"\"Defines a 1x1 PatchGAN discriminator (pixelGAN)\"\"\"\n\n def __init__(self, input_nc, ndf=64, norm_layer=nn.BatchNorm2d):\n \"\"\"Construct a 1x1 PatchGAN discriminator\n\n Parameters:\n input_nc (int) -- the number of channels in input images\n ndf (int) -- the number of filters in the last conv layer\n norm_layer -- normalization layer\n \"\"\"\n super(PixelDiscriminator, self).__init__()\n if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters\n use_bias = norm_layer.func == nn.InstanceNorm2d\n else:\n use_bias = norm_layer == nn.InstanceNorm2d\n\n self.net = [\n nn.Conv2d(input_nc, ndf, kernel_size=1, stride=1, padding=0),\n nn.LeakyReLU(0.2, True),\n nn.Conv2d(ndf, ndf * 2, kernel_size=1, stride=1, padding=0, bias=use_bias),\n norm_layer(ndf * 2),\n nn.LeakyReLU(0.2, True),\n nn.Conv2d(ndf * 2, 1, kernel_size=1, stride=1, padding=0, bias=use_bias)]\n\n self.net = nn.Sequential(*self.net)\n\n def forward(self, input):\n \"\"\"Standard forward.\"\"\"\n return self.net(input)\n\n\n#%% Unet as Disdef random_hflip(tensor, prob):\n \n\ndef DiffAugment(x, types=[]):\n for p in types:\n for f in AUGMENT_FNS[p]:\n x = f(x)\n return x.contiguous(memory_format = torch.contiguous_format)\n\ndef rand_brightness(x):\n x = x + (torch.rand(x.size(0), 1, 1, 1, dtype=x.dtype, device=x.device) - 0.5)\n return x\n\ndef rand_saturation(x):\n x_mean = x.mean(dim=1, keepdim=True)\n x = (x - x_mean) * (torch.rand(x.size(0), 1, 1, 1, dtype=x.dtype, device=x.device) * 2) + x_mean\n return x\n\ndef rand_contrast(x):\n x_mean = x.mean(dim=[1, 2, 3], keepdim=True)\n x = (x - x_mean) * (torch.rand(x.size(0), 1, 1, 1, dtype=x.dtype, device=x.device) + 0.5) + x_mean\n return x\n\ndef rand_translation(x, ratio=0.125):\n shift_x, shift_y = int(x.size(2) * ratio + 0.5), int(x.size(3) * ratio + 0.5)\n translation_x = torch.randint(-shift_x, shift_x + 1, size=[x.size(0), 1, 1], device=x.device)\n translation_y = torch.randint(-shift_y, shift_y + 1, size=[x.size(0), 1, 1], device=x.device)\n grid_batch, grid_x, grid_y = torch.meshgrid(\n torch.arange(x.size(0), dtype=torch.long, device=x.device),\n torch.arange(x.size(2), dtype=torch.long, device=x.device),\n torch.arange(x.size(3), dtype=torch.long, device=x.device),\n )\n grid_x = torch.clamp(grid_x + translation_x + 1, 0, x.size(2) + 1)\n grid_y = torch.clamp(grid_y + translation_y + 1, 0, x.size(3) + 1)\n x_pad = F.pad(x, [1, 1, 1, 1, 0, 0, 0, 0])\n x = x_pad.permute(0, 2, 3, 1).contiguous()[grid_batch, grid_x, grid_y].permute(0, 3, 1, 2).contiguous(memory_format = torch.contiguous_format)\n return x\n\ndef rand_cutout(x, ratio=0.5):\n cutout_size = int(x.size(2) * ratio + 0.5), int(x.size(3) * ratio + 0.5)\n offset_x = torch.randint(0, x.size(2) + (1 - cutout_size[0] % 2), size=[x.size(0), 1, 1], device=x.device)\n offset_y = torch.randint(0, x.size(3) + (1 - cutout_size[1] % 2), size=[x.size(0), 1, 1], device=x.device)\n grid_batch, grid_x, grid_y = torch.meshgrid(\n torch.arange(x.size(0), dtype=torch.long, device=x.device),\n torch.arange(cutout_size[0], dtype=torch.long, device=x.device),\n torch.arange(cutout_size[1], dtype=torch.long, device=x.device),\n )\n grid_x = torch.clamp(grid_x + offset_x - cutout_size[0] // 2, min=0, max=x.size(2) - 1)\n grid_y = torch.clamp(grid_y + offset_y - cutout_size[1] // 2, min=0, max=x.size(3) - 1)\n mask = torch.ones(x.size(0), x.size(2), x.size(3), dtype=x.dtype, device=x.device)\n mask[grid_batch, grid_x, grid_y] = 0\n x = x * mask.unsqueeze(1)\n return x\n\nAUGMENT_FNS = {\n 'color': [rand_brightness, rand_saturation, rand_contrast],\n 'translation': [rand_translation],\n 'cutout': [rand_cutout],\n}\n \ndef random_float(lo, hi):\n return lo + (hi - lo) * random()\n\ndef random_crop_and_resize(tensor, scale):\n b, c, h, _ = tensor.shape\n new_width = int(h * scale)\n delta = h - new_width\n h_delta = int(random() * delta)\n w_delta = int(random() * delta)\n cropped = tensor[:, :, h_delta:(h_delta + new_width), w_delta:(w_delta + new_width)].clone()\n return F.interpolate(cropped, size=(h, h), mode='bilinear')\n\ndef random_hflip(tensor, prob):\n if prob > random():\n return tensor\n return torch.flip(tensor, dims=(3,))\n\nclass AugWrapper(nn.Module):\n def __init__(self, D, image_size, types):\n super().__init__()\n self.D = D\n self.types = types\n\n def forward(self, images, prob = 0., detach = False):\n if random() < prob:\n images = random_hflip(images, prob=0.5)\n images = DiffAugment(images, types=self.types)\n\n if detach:\n images.detach_()\n\n return self.D(images), images\n \n \ndef leaky_relu(p=0.2):\n return nn.LeakyReLU(p)\n\n\nclass Residual(nn.Module):\n def __init__(self, fn):\n super().__init__()\n self.fn = fn\n def forward(self, x):\n return self.fn(x) + x\n\nclass Flatten(nn.Module):\n def __init__(self, index):\n super().__init__()\n self.index = index\n def forward(self, x):\n return x.flatten(self.index)\n\nclass Rezero(nn.Module):\n def __init__(self, fn):\n super().__init__()\n self.fn = fn\n self.g = nn.Parameter(torch.zeros(1))\n def forward(self, x):\n return self.fn(x) * self.g \n \ndef double_conv(chan_in, chan_out):\n return nn.Sequential(\n nn.Conv2d(chan_in, chan_out, 3, padding=1),\n leaky_relu(),\n nn.Conv2d(chan_out, chan_out, 3, padding=1),\n leaky_relu()\n )\n \nclass DownBlock(nn.Module):\n def __init__(self, input_channels, filters, downsample=True):\n super().__init__()\n self.conv_res = nn.Conv2d(input_channels, filters, 1, stride = (2 if downsample else 1))\n\n self.net = double_conv(input_channels, filters)\n self.down = nn.Conv2d(filters, filters, 3, padding = 1, stride = 2) if downsample else None\n\n def forward(self, x):\n res = self.conv_res(x)\n x = self.net(x)\n unet_res = x\n\n if self.down is not None:\n x = self.down(x)\n\n x = x + res\n return x, unet_res\n \n\n# one layer of self-attention and feedforward, for images\n\nattn_and_ff = lambda chan: nn.Sequential(*[\n Residual(Rezero(ImageLinearAttention(chan, norm_queries = True))),\n Residual(Rezero(nn.Sequential(nn.Conv2d(chan, chan * 2, 1), leaky_relu(), nn.Conv2d(chan * 2, chan, 1))))\n])\n \nclass UpBlock(nn.Module):\n def __init__(self, input_channels, filters):\n super().__init__()\n self.conv_res = nn.ConvTranspose2d(input_channels // 2, filters, 1, stride = 2)\n self.net = double_conv(input_channels, filters)\n self.up = nn.Upsample(scale_factor = 2, mode='bilinear', align_corners=False)\n self.input_channels = input_channels\n self.filters = filters\n\n def forward(self, x, res):\n *_, h, w = x.shape\n conv_res = self.conv_res(x, output_size = (h * 2, w * 2))\n x = self.up(x)\n x = torch.cat((x, res), dim=1)\n x = self.net(x)\n x = x + conv_res\n return x\n \nclass UnetDiscriminator(nn.Module):\n def __init__(self, image_size=256, network_capacity = 16, transparent = False, fmap_max = 256):\n super().__init__()\n num_layers = int(log2(image_size) - 3)\n num_init_filters = 2# if not transparent else 4\n\n blocks = []\n filters = [num_init_filters] + [(network_capacity) * (2 ** i) for i in range(num_layers + 1)]\n\n set_fmap_max = partial(min, fmap_max)\n filters = list(map(set_fmap_max, filters))\n filters[-1] = filters[-2]\n\n chan_in_out = list(zip(filters[:-1], filters[1:]))\n chan_in_out = list(map(list, chan_in_out))\n\n print('Channels',chan_in_out)\n down_blocks = []\n attn_blocks = []\n\n for ind, (in_chan, out_chan) in enumerate(chan_in_out):\n num_layer = ind + 1\n is_not_last = ind != (len(chan_in_out) - 1)\n\n block = DownBlock(in_chan, out_chan, downsample = is_not_last)\n down_blocks.append(block)\n\n attn_fn = attn_and_ff(out_chan)\n attn_blocks.append(attn_fn)\n\n self.down_blocks = nn.ModuleList(down_blocks)\n self.attn_blocks = nn.ModuleList(attn_blocks)\n\n last_chan = filters[-1]\n\n self.to_logit = nn.Sequential(\n leaky_relu(),\n nn.AvgPool2d(image_size // (2 ** num_layers)),\n Flatten(1),\n nn.Linear(last_chan, 1)\n )\n\n self.conv = double_conv(last_chan, last_chan)\n\n dec_chan_in_out = chan_in_out[:-1][::-1]\n self.up_blocks = nn.ModuleList(list(map(lambda c: UpBlock(c[1] * 2, c[0]), dec_chan_in_out)))\n self.conv_out = nn.Conv2d(2, 1, 1)\n\n def forward(self, x):\n \n #print('Input shape:', x.shape)\n b, *_ = x.shape\n\n residuals = []\n i=0\n for (down_block, attn_block) in zip(self.down_blocks, self.attn_blocks):\n #print('Step', i, x.shape)\n i=i+1\n x, unet_res = down_block(x)\n residuals.append(unet_res)\n\n if attn_block is not None:\n x = attn_block(x)\n\n x = self.conv(x) + x\n enc_out = self.to_logit(x)\n\n for (up_block, res) in zip(self.up_blocks, residuals[:-1][::-1]):\n #print('in up blocK', x.shape)\n x = up_block(x, res)\n\n dec_out = self.conv_out(x)\n return enc_out.squeeze(), dec_out\n\n\n#%% SPADE RESNET\n \n \nclass SPADEGenerator(BaseNetwork):\n\n def __init__(self, input_nc, output_nc, num_downs, ngf, norm_layer=nn.BatchNorm2d, use_dropout=False,opt=None):\n super(SPADEGenerator, self).__init__()\n self.opt = opt\n self.opt.num_upsampling_layers = 'normal'\n self.opt.norm_G = 'spectralspadesyncbatch3x3'\n self.opt.ngf = 64\n self.opt.semantic_nc = 2\n self.opt.use_vae = False\n self.opt.crop_size = 256\n self.opt.normG = 'spectralinstance'\n self.opt.aspect_ratio = 1.0\n nf = self.opt.ngf\n opt = self.opt\n\n self.sw, self.sh = self.compute_latent_vector_size(opt)\n\n\n self.fc = nn.Conv2d(self.opt.semantic_nc, 16 * nf, 3, padding=1)\n\n self.head_0 = SPADEResnetBlock(16 * nf, 16 * nf, opt)\n\n self.G_middle_0 = SPADEResnetBlock(16 * nf, 16 * nf, opt)\n self.G_middle_1 = SPADEResnetBlock(16 * nf, 16 * nf, opt)\n\n self.up_0 = SPADEResnetBlock(16 * nf, 8 * nf, opt)\n self.up_1 = SPADEResnetBlock(8 * nf, 4 * nf, opt)\n self.up_2 = SPADEResnetBlock(4 * nf, 2 * nf, opt)\n self.up_3 = SPADEResnetBlock(2 * nf, 1 * nf, opt)\n\n final_nc = nf\n\n if opt.num_upsampling_layers == 'most':\n self.up_4 = SPADEResnetBlock(1 * nf, nf // 2, opt)\n final_nc = nf // 2\n\n self.conv_img = nn.Conv2d(final_nc, 3, 3, padding=1)\n\n self.up = nn.Upsample(scale_factor=2)\n\n def compute_latent_vector_size(self, opt):\n if opt.num_upsampling_layers == 'normal':\n num_up_layers = 5\n elif opt.num_upsampling_layers == 'more':\n num_up_layers = 6\n elif opt.num_upsampling_layers == 'most':\n num_up_layers = 7\n else:\n raise ValueError('opt.num_upsampling_layers [%s] not recognized' %\n opt.num_upsampling_layers)\n\n sw = self.opt.crop_size // (2**num_up_layers)\n sh = round(sw / opt.aspect_ratio)\n\n return sw, sh\n\n def forward(self, input, z=None):\n seg = input\n\n if self.opt.use_vae:\n # we sample z from unit normal and reshape the tensor\n if z is None:\n z = torch.randn(input.size(0), self.opt.z_dim,\n dtype=torch.float32, device=input.get_device())\n x = self.fc(z)\n x = x.view(-1, 16 * self.opt.ngf, self.sh, self.sw)\n else:\n # we downsample segmap and run convolution\n x = F.interpolate(seg, size=(self.sh, self.sw))\n x = self.fc(x)\n\n #print('0,', x.shape)\n x = self.head_0(x, seg)\n #print('1,', x.shape)\n x = self.up(x)\n #print('2', x.shape)\n x = self.G_middle_0(x, seg)\n #print('3,', x.shape)\n if self.opt.num_upsampling_layers == 'more' or \\\n self.opt.num_upsampling_layers == 'most':\n x = self.up(x)\n #print('4,', x.shape)\n #x = self.G_middle_1(x, seg)\n output_5 = x\n #print('5,', x.shape)\n x = self.up(x)\n output_6 = x\n #print('6,', x.shape)\n x = self.up_0(x, seg)\n #print('7,', x.shape)\n x = self.up(x)\n #print('8,', x.shape)\n x = self.up_1(x, seg)\n output_9 = x\n #print('9,', x.shape)\n x = self.up(x)\n #print('10,', x.shape)\n x = self.up_2(x, seg)\n #print('11,', x.shape)\n output_11 = x\n x = self.up(x)\n # print('12,', x.shape)\n x = self.up_3(x, seg)\n #print('13,', x.shape)\n\n if self.opt.num_upsampling_layers == 'most':\n x = self.up(x)\n x = self.up_4(x, seg)\n #print('14,', x.shape)\n x = self.conv_img(F.leaky_relu(x, 2e-1))\n # print('15,', x.shape)\n output_15 = x\n #x = F.tanh(x)\n #print('16,', x.shape)\n\n return output_5,output_6,output_9,output_11,output_15\n \n#%% spade8\n \nclass SPADE8Generator(BaseNetwork):\n\n def __init__(self, input_nc, output_nc, num_downs, ngf, norm_layer=nn.BatchNorm2d, use_dropout=False,opt=None):\n super(SPADE8Generator, self).__init__()\n self.opt = opt\n self.opt.num_upsampling_layers = 'normal'\n self.opt.norm_G = 'spectralspadesyncbatch3x3'\n self.opt.ngf = 8\n self.opt.semantic_nc = 2\n self.opt.use_vae = False\n self.opt.crop_size = 256\n self.opt.normG = 'spectralinstance'\n self.opt.aspect_ratio = 1.0\n nf = self.opt.ngf\n opt = self.opt\n\n self.sw, self.sh = self.compute_latent_vector_size(opt)\n\n\n self.fc = nn.Conv2d(self.opt.semantic_nc, 16 * nf, 3, padding=1)\n\n self.head_0 = SPADEResnetBlock(16 * nf, 16 * nf, opt)\n\n self.G_middle_0 = SPADEResnetBlock(16 * nf, 16 * nf, opt)\n self.G_middle_1 = SPADEResnetBlock(16 * nf, 16 * nf, opt)\n\n self.up_0 = SPADEResnetBlock(16 * nf, 8 * nf, opt)\n self.up_1 = SPADEResnetBlock(8 * nf, 4 * nf, opt)\n self.up_2 = SPADEResnetBlock(4 * nf, 2 * nf, opt)\n self.up_3 = SPADEResnetBlock(2 * nf, 1 * nf, opt)\n\n final_nc = nf\n\n if opt.num_upsampling_layers == 'most':\n self.up_4 = SPADEResnetBlock(1 * nf, nf // 2, opt)\n final_nc = nf // 2\n\n self.conv_img = nn.Conv2d(final_nc, 3, 3, padding=1)\n\n self.up = nn.Upsample(scale_factor=2)\n\n def compute_latent_vector_size(self, opt):\n if opt.num_upsampling_layers == 'normal':\n num_up_layers = 5\n elif opt.num_upsampling_layers == 'more':\n num_up_layers = 6\n elif opt.num_upsampling_layers == 'most':\n num_up_layers = 7\n else:\n raise ValueError('opt.num_upsampling_layers [%s] not recognized' %\n opt.num_upsampling_layers)\n\n sw = self.opt.crop_size // (2**num_up_layers)\n sh = round(sw / opt.aspect_ratio)\n\n return sw, sh\n\n def forward(self, input, z=None):\n seg = input\n\n if self.opt.use_vae:\n # we sample z from unit normal and reshape the tensor\n if z is None:\n z = torch.randn(input.size(0), self.opt.z_dim,\n dtype=torch.float32, device=input.get_device())\n x = self.fc(z)\n x = x.view(-1, 16 * self.opt.ngf, self.sh, self.sw)\n else:\n # we downsample segmap and run convolution\n x = F.interpolate(seg, size=(self.sh, self.sw))\n x = self.fc(x)\n\n #print('0,', x.shape)\n x = self.head_0(x, seg)\n #print('1,', x.shape)\n x = self.up(x)\n #print('2', x.shape)\n x = self.G_middle_0(x, seg)\n #print('3,', x.shape)\n if self.opt.num_upsampling_layers == 'more' or \\\n self.opt.num_upsampling_layers == 'most':\n x = self.up(x)\n #print('4,', x.shape)\n x = self.G_middle_1(x, seg)\n output_5 = x\n #print('5,', x.shape)\n x = self.up(x)\n output_6 = x\n #print('6,', x.shape)\n x = self.up_0(x, seg)\n #print('7,', x.shape)\n x = self.up(x)\n #print('8,', x.shape)\n x = self.up_1(x, seg)\n output_9 = x\n #print('9,', x.shape)\n x = self.up(x)\n #print('10,', x.shape)\n x = self.up_2(x, seg)\n #print('11,', x.shape)\n output_11 = x\n '''this can be removed'''\n x = self.up(x)\n #print('12,', x.shape)\n x = self.up_3(x, seg)\n #print('13,', x.shape)\n\n if self.opt.num_upsampling_layers == 'most':\n x = self.up(x)\n x = self.up_4(x, seg)\n #print('14,', x.shape)\n x = self.conv_img(F.leaky_relu(x, 2e-1))\n #print('15,', x.shape)\n output_15 = x\n #x = F.tanh(x)\n #print('16,', x.shape)\n '''til here'''\n return output_5,output_6,output_9,output_11,output_15\n \n#%%\nclass SPADE6Generator(BaseNetwork):\n\n def __init__(self, input_nc, output_nc, num_downs, ngf, norm_layer=nn.BatchNorm2d, use_dropout=False,opt=None):\n super(SPADE6Generator, self).__init__()\n self.opt = opt\n self.opt.num_upsampling_layers = 'normal'\n self.opt.norm_G = 'spectralspadesyncbatch3x3'\n self.opt.ngf = 6\n self.opt.semantic_nc = 2\n self.opt.use_vae = False\n self.opt.crop_size = 300\n self.opt.normG = 'spectralinstance'\n self.opt.aspect_ratio = 1.0\n nf = self.opt.ngf\n opt = self.opt\n\n self.sw, self.sh = self.compute_latent_vector_size(opt)\n\n\n self.fc = nn.Conv2d(self.opt.semantic_nc, 16 * nf, 3, padding=1)\n\n self.head_0 = SPADEResnetBlock(16 * nf, 16 * nf, opt)\n\n self.G_middle_0 = SPADEResnetBlock(16 * nf, 16 * nf, opt)\n self.G_middle_1 = SPADEResnetBlock(16 * nf, 16 * nf, opt)\n\n self.up_0 = SPADEResnetBlock(16 * nf, 8 * nf, opt)\n self.up_1 = SPADEResnetBlock(8 * nf, 4 * nf, opt)\n self.up_2 = SPADEResnetBlock(4 * nf, 2 * nf, opt)\n self.up_3 = SPADEResnetBlock(2 * nf, 1 * nf, opt)\n\n final_nc = nf\n\n if opt.num_upsampling_layers == 'most':\n self.up_4 = SPADEResnetBlock(1 * nf, nf // 2, opt)\n final_nc = nf // 2\n\n self.conv_img = nn.Conv2d(final_nc, 3, 3, padding=1)\n\n self.up = nn.Upsample(scale_factor=2)\n\n def compute_latent_vector_size(self, opt):\n if opt.num_upsampling_layers == 'normal':\n num_up_layers = 5\n elif opt.num_upsampling_layers == 'more':\n num_up_layers = 6\n elif opt.num_upsampling_layers == 'most':\n num_up_layers = 7\n else:\n raise ValueError('opt.num_upsampling_layers [%s] not recognized' %\n opt.num_upsampling_layers)\n\n sw = 10#self.opt.crop_size // (2**num_up_layers)\n sh = round(sw / opt.aspect_ratio)\n\n return sw, sh\n\n def forward(self, input, z=None):\n seg = input\n\n if self.opt.use_vae:\n # we sample z from unit normal and reshape the tensor\n if z is None:\n z = torch.randn(input.size(0), self.opt.z_dim,\n dtype=torch.float32, device=input.get_device())\n x = self.fc(z)\n x = x.view(-1, 16 * self.opt.ngf, self.sh, self.sw)\n else:\n # we downsample segmap and run convolution\n x = F.interpolate(seg, size=(self.sh, self.sw))\n x = self.fc(x)\n\n print('0,', x.shape)\n x = self.head_0(x, seg)\n print('1,', x.shape)\n x = self.up(x)\n print('2', x.shape)\n x = self.G_middle_0(x, seg)\n print('3,', x.shape)\n if self.opt.num_upsampling_layers == 'more' or \\\n self.opt.num_upsampling_layers == 'most':\n x = self.up(x)\n print('4,', x.shape)\n x = self.G_middle_1(x, seg)\n output_5 = x\n print('5,', x.shape)\n x = self.up(x)\n output_6 = x\n print('6,', x.shape)\n x = self.up_0(x, seg)\n print('7,', x.shape)\n x = self.up(x)\n print('8,', x.shape)\n x = self.up_1(x, seg)\n output_9 = x\n print('9,', x.shape)\n x = self.up(x)\n print('10,', x.shape)\n x = self.up_2(x, seg)\n print('11,', x.shape)\n output_11 = x\n x = self.up(x)\n print('12,', x.shape)\n x = self.up_3(x, seg)\n print('13,', x.shape)\n\n if self.opt.num_upsampling_layers == 'most':\n x = self.up(x)\n x = self.up_4(x, seg)\n print('14,', x.shape)\n x = self.conv_img(F.leaky_relu(x, 2e-1))\n print('15,', x.shape)\n output_15 = x\n #x = F.tanh(x)\n print('16,', x.shape)\n\n return output_5,output_6,output_9,output_11,output_15\n\n#%% For the PIX2SPADE\n \nclass UNet768PIXSPADE(torch.nn.Module):\n def __init__(self, input_nc, output_nc, num_downs, ngf, norm_layer=nn.BatchNorm2d, use_dropout=False):\n super(UNet768PIXSPADE, self).__init__()\n # def __init__(self, input_nc, output_nc, num_downs, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False):\n # C, H, W = in_shape\n # assert(C==3)\n print('UNET 768 SPADE')\n self.output_nc = output_nc\n\n # 1024\n self.down1 = StackEncoder(1, 24, kernel_size=3) # Channels: 1 in, 24 out; Image size: 300 in, 150 out\n self.down2 = StackEncoder(24, 64, kernel_size=3) # Channels: 24 in, 64 out; Image size: 150 in, 75 out\n self.down3 = StackEncoder(64, 128, kernel_size=3) # Channels: 64 in, 128 out; Image size: 75 in, 38 out\n self.down4 = StackEncoder(128, 256, kernel_size=3) # Channels: 128 in, 256 out; Image size: 38 in, 19 out\n self.down5 = StackEncoder(256, 512, kernel_size=3) # Channels: 256 in, 512 out; Image size: 19 in, 10 out\n self.down6 = StackEncoder(512, 768, kernel_size=3) # Channels: 512 in, 768 out; Image size: 10 in, 5 out\n\n self.center = torch.nn.Sequential(\n ConvBnRelu2d(768, 768, kernel_size=3, padding=1, stride=1), # Channels: 768 in, 768 out; Image size: 5 in, 5 out\n )\n\n # x_big_channels, x_channels, y_channels\n self.up6 = StackDecoder(768, 768, 512, kernel_size=3) # Channels: 768+768 = 1536 in, 512 out; Image size: 5 in, 10 out\n self.up5 = StackDecoder(512, 512, 256, kernel_size=3) # Channels: 512+512 = 1024 in, 256 out; Image size: 10 in, 19 out\n self.up4 = StackDecoder(256+1024, 256, 128, kernel_size=3) # Channels: 256+256 = 512 in, 128 out; Image size: 19 in, 38 out\n self.up3 = StackDecoder(128+1024, 128, 64, kernel_size=3) # Channels: 128+128 = 256 in, 64 out; Image size: 38 in, 75 out\n self.up2 = StackDecoder(64+256, 64, 24, kernel_size=3) # Channels: 64+64 = 128 in, 24 out; Image size: 75 in, 150 out\n self.up1 = StackDecoder(24+128, 24, 24, kernel_size=3) # Channels: 24+24 = 48 in, 24 out; Image size: 150 in, 300 out\n self.classify = torch.nn.Conv2d(24+3, output_nc, kernel_size=1, padding=0, stride=1, bias=True) # Channels: 24 in, 1 out; Image size: 300 in, 300 out\n self.final_out = torch.nn.Tanh()\n\n def _crop_concat(self, upsampled, bypass):\n \"\"\"\n Crop y to the (h, w) of x and concat them.\n Used for the expansive path.\n Returns:\n The concatenated tensor\n \"\"\"\n c = (bypass.size()[2] - upsampled.size()[2]) // 2\n bypass = torch.nn.functional.pad(bypass, (-c, -c, -c, -c))\n\n return torch.cat((upsampled, bypass), 1)\n\n def forward(self,x, input_to_net):\n #print(input_to_net.shape)\n output_5,output_6,output_9,output_11,output_15 = input_to_net\n \n #print(x.shape)\n \n out = x # ;print('x ',x.size())\n #\n down1, out = self.down1(out) ##;\n #print('down1',down1.shape) #256\n down2, out = self.down2(out) # ;\n #print('down2',down2.shape) #128\n down3, out = self.down3(out) # ;\n #print('down3',down3.shape) #64\n down4, out = self.down4(out) # ;\n #print('down4',down4.shape) #32\n down5, out = self.down5(out) # ;\n #print('down5',down5.shape) #16\n down6, out = self.down6(out) # ;\n #print('down6',down6.shape) #8\n pass # ;\n #print('out ',out.shape)\n\n out = self.center(out)\n #print('0',out.shape)\n out = self.up6(down6, out)\n #print('1',out.shape)\n out = self.up5(down5, out)\n out = torch.cat((out,output_5 ),1 )\n #print('2',out.shape)\n out = self.up4(down4, out)\n out = torch.cat((out,output_6 ),1 )\n #print('3',out.shape)\n out = self.up3(down3, out)\n out = torch.cat((out,output_9 ),1 )\n #print('4',out.shape)\n out = self.up2(down2, out)\n out = torch.cat((out,output_11 ),1 )\n #print('5',out.shape)\n out = self.up1(down1, out)\n # 1024\n out = torch.cat((out,output_15 ),1 )\n #print('6',out.shape)\n out = self.final_out(self.classify(out))\n out = torch.reshape(out,(-1, self.output_nc, 256,256))#, dim=1)\n return out\n\n#%%Unet for spade8\n \nclass UNet768PIXSPADE8SM(torch.nn.Module):\n def __init__(self, input_nc, output_nc, num_downs, ngf, norm_layer=nn.BatchNorm2d, use_dropout=False):\n super(UNet768PIXSPADE8SM, self).__init__()\n # def __init__(self, input_nc, output_nc, num_downs, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False):\n # C, H, W = in_shape\n # assert(C==3)\n print('UNET 768 SPADE')\n self.output_nc = output_nc\n\n # 1024\n self.down1 = StackEncoder(1, 24, kernel_size=3) # Channels: 1 in, 24 out; Image size: 300 in, 150 out\n self.down2 = StackEncoder(24, 64, kernel_size=3) # Channels: 24 in, 64 out; Image size: 150 in, 75 out\n self.down3 = StackEncoder(64, 128, kernel_size=3) # Channels: 64 in, 128 out; Image size: 75 in, 38 out\n self.down4 = StackEncoder(128, 256, kernel_size=3) # Channels: 128 in, 256 out; Image size: 38 in, 19 out\n self.down5 = StackEncoder(256, 512, kernel_size=3) # Channels: 256 in, 512 out; Image size: 19 in, 10 out\n self.down6 = StackEncoder(512, 768, kernel_size=3) # Channels: 512 in, 768 out; Image size: 10 in, 5 out\n\n self.center = torch.nn.Sequential(\n ConvBnRelu2d(768, 768, kernel_size=3, padding=1, stride=1), # Channels: 768 in, 768 out; Image size: 5 in, 5 out\n )\n\n # x_big_channels, x_channels, y_channels\n self.up6 = StackDecoder(768, 768, 512, kernel_size=3) # Channels: 768+768 = 1536 in, 512 out; Image size: 5 in, 10 out\n self.up5 = StackDecoder(512, 512, 256, kernel_size=3) # Channels: 512+512 = 1024 in, 256 out; Image size: 10 in, 19 out\n self.up4 = StackDecoder(256+128, 256, 128, kernel_size=3) # Channels: 256+256 = 512 in, 128 out; Image size: 19 in, 38 out\n self.up3 = StackDecoder(128+128, 128, 64, kernel_size=3) # Channels: 128+128 = 256 in, 64 out; Image size: 38 in, 75 out\n self.up2 = StackDecoder(64+32, 64, 24, kernel_size=3) # Channels: 64+64 = 128 in, 24 out; Image size: 75 in, 150 out\n self.up1 = StackDecoder(24+16, 24, 24, kernel_size=3) # Channels: 24+24 = 48 in, 24 out; Image size: 150 in, 300 out\n self.classify = torch.nn.Conv2d(24, output_nc, kernel_size=1, padding=0, stride=1, bias=True) # Channels: 24 in, 1 out; Image size: 300 in, 300 out\n self.final_out = torch.nn.Tanh()\n\n def _crop_concat(self, upsampled, bypass):\n \"\"\"\n Crop y to the (h, w) of x and concat them.\n Used for the expansive path.\n Returns:\n The concatenated tensor\n \"\"\"\n c = (bypass.size()[2] - upsampled.size()[2]) // 2\n bypass = torch.nn.functional.pad(bypass, (-c, -c, -c, -c))\n\n return torch.cat((upsampled, bypass), 1)\n\n def forward(self,x, input_to_net):\n #print(input_to_net.shape)\n output_5,output_6,output_9,output_11,output_15 = input_to_net\n \n #print(x.shape)\n \n out = x # ;print('x ',x.size())\n #\n down1, out = self.down1(out) ##;\n #print('down1',down1.shape) #256\n down2, out = self.down2(out) # ;\n #print('down2',down2.shape) #128\n down3, out = self.down3(out) # ;\n #print('down3',down3.shape) #64\n down4, out = self.down4(out) # ;\n #print('down4',down4.shape) #32\n down5, out = self.down5(out) # ;\n #print('down5',down5.shape) #16\n down6, out = self.down6(out) # ;\n #print('down6',down6.shape) #8\n pass # ;\n #print('out ',out.shape)\n\n out = self.center(out)\n #print('0',out.shape)\n out = self.up6(down6, out)\n #print('1',out.shape)\n out = self.up5(down5, out)\n out = torch.cat((out,output_5 ),1 )\n #print('2',out.shape)\n out = self.up4(down4, out)\n out = torch.cat((out,output_6 ),1 )\n #print('3',out.shape)\n out = self.up3(down3, out)\n out = torch.cat((out,output_9 ),1 )\n #print('4',out.shape)\n out = self.up2(down2, out)\n out = torch.cat((out,output_11 ),1 )\n #print('5',out.shape)\n out = self.up1(down1, out)\n # 1024\n #out = torch.cat((out,output_15 ),1 )\n #print('6',out.shape)\n out = self.final_out(self.classify(out))\n out = torch.reshape(out,(-1, self.output_nc, 256,256))#, dim=1)\n return out\n\n \n\n \n\n"
] | [
[
"torch.nn.functional.upsample",
"torch.optim.lr_scheduler.LambdaLR",
"torch.optim.lr_scheduler.CosineAnnealingLR",
"torch.cat",
"torch.zeros",
"torch.nn.BCEWithLogitsLoss",
"torch.nn.functional.interpolate",
"torch.cuda.is_available",
"torch.nn.ReplicationPad2d",
"torch.nn.Dropout",
"torch.optim.lr_scheduler.ReduceLROnPlateau",
"torch.reshape",
"torch.nn.Sigmoid",
"torch.tensor",
"torch.rand",
"torch.arange",
"torch.nn.functional.max_pool2d",
"numpy.zeros",
"torch.nn.functional.pad",
"torch.optim.lr_scheduler.StepLR",
"torch.nn.Sequential",
"torch.nn.ConvTranspose2d",
"torch.nn.init.constant_",
"torch.nn.ModuleList",
"torch.nn.Conv2d",
"torch.nn.init.xavier_normal_",
"torch.nn.Linear",
"torch.nn.AvgPool2d",
"torch.nn.init.normal_",
"torch.nn.LeakyReLU",
"torch.nn.functional.leaky_relu",
"torch.nn.BatchNorm2d",
"torch.flip",
"torch.nn.ReflectionPad2d",
"torch.nn.Tanh",
"numpy.ones",
"torch.nn.Upsample",
"torch.nn.init.orthogonal_",
"torch.nn.ReLU",
"torch.nn.MSELoss",
"torch.nn.init.kaiming_normal_"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
xxia-kathy/models | [
"157faae1af5d89c53a5699b601dc68fee274ef09"
] | [
"official/core/base_trainer_test.py"
] | [
"# Lint as: python3\n# Copyright 2020 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for tensorflow_models.core.trainers.trainer.\"\"\"\n# pylint: disable=g-direct-tensorflow-import\n\nimport os\nfrom absl.testing import parameterized\nimport tensorflow as tf\n\nfrom tensorflow.python.distribute import combinations\nfrom tensorflow.python.distribute import strategy_combinations\nfrom official.core import base_trainer as trainer_lib\nfrom official.core import train_lib\nfrom official.modeling.hyperparams import config_definitions as cfg\nfrom official.utils.testing import mock_task\n\n\ndef all_strategy_combinations():\n return combinations.combine(\n distribution=[\n strategy_combinations.default_strategy,\n strategy_combinations.tpu_strategy,\n strategy_combinations.one_device_strategy_gpu,\n ],\n mode='eager',\n )\n\n\nclass TrainerTest(tf.test.TestCase, parameterized.TestCase):\n\n def setUp(self):\n super().setUp()\n self._config = cfg.ExperimentConfig(\n trainer=cfg.TrainerConfig(\n optimizer_config=cfg.OptimizationConfig({\n 'optimizer': {\n 'type': 'sgd'\n },\n 'learning_rate': {\n 'type': 'constant'\n }\n })))\n\n def create_test_trainer(self, config, model_dir=None):\n task = mock_task.MockTask(config.task, logging_dir=model_dir)\n ckpt_exporter = train_lib.maybe_create_best_ckpt_exporter(config, model_dir)\n trainer = trainer_lib.Trainer(\n config,\n task,\n model=task.build_model(),\n optimizer=trainer_lib.create_optimizer(config.trainer, config.runtime),\n checkpoint_exporter=ckpt_exporter)\n return trainer\n\n @combinations.generate(all_strategy_combinations())\n def test_trainer_train(self, distribution):\n with distribution.scope():\n trainer = self.create_test_trainer(self._config)\n logs = trainer.train(tf.convert_to_tensor(5, dtype=tf.int32))\n self.assertIn('training_loss', logs)\n self.assertIn('learning_rate', logs)\n\n @combinations.generate(all_strategy_combinations())\n def test_trainer_validate(self, distribution):\n with distribution.scope():\n trainer = self.create_test_trainer(self._config)\n logs = trainer.evaluate(tf.convert_to_tensor(5, dtype=tf.int32))\n self.assertIn('validation_loss', logs)\n self.assertEqual(logs['acc'], 5. * distribution.num_replicas_in_sync)\n\n @combinations.generate(\n combinations.combine(\n mixed_precision_dtype=['float32', 'bfloat16', 'float16'],\n loss_scale=[None, 'dynamic', 128, 256],\n ))\n def test_configure_optimizer(self, mixed_precision_dtype, loss_scale):\n config = cfg.ExperimentConfig(\n runtime=cfg.RuntimeConfig(\n mixed_precision_dtype=mixed_precision_dtype, loss_scale=loss_scale),\n trainer=cfg.TrainerConfig(\n optimizer_config=cfg.OptimizationConfig({\n 'optimizer': {\n 'type': 'sgd'\n },\n 'learning_rate': {\n 'type': 'constant'\n }\n })))\n trainer = self.create_test_trainer(config)\n if mixed_precision_dtype != 'float16':\n self.assertIsInstance(trainer.optimizer, tf.keras.optimizers.SGD)\n elif mixed_precision_dtype == 'float16' and loss_scale is None:\n self.assertIsInstance(trainer.optimizer, tf.keras.optimizers.SGD)\n else:\n self.assertIsInstance(\n trainer.optimizer,\n tf.keras.mixed_precision.experimental.LossScaleOptimizer)\n\n metrics = trainer.train(tf.convert_to_tensor(5, dtype=tf.int32))\n self.assertIn('training_loss', metrics)\n\n @combinations.generate(all_strategy_combinations())\n def test_export_best_ckpt(self, distribution):\n config = cfg.ExperimentConfig(\n trainer=cfg.TrainerConfig(\n best_checkpoint_export_subdir='best_ckpt',\n best_checkpoint_eval_metric='acc',\n optimizer_config=cfg.OptimizationConfig({\n 'optimizer': {\n 'type': 'sgd'\n },\n 'learning_rate': {\n 'type': 'constant'\n }\n })))\n model_dir = self.get_temp_dir()\n trainer = self.create_test_trainer(config, model_dir=model_dir)\n trainer.train(tf.convert_to_tensor(1, dtype=tf.int32))\n trainer.evaluate(tf.convert_to_tensor(1, dtype=tf.int32))\n self.assertTrue(\n tf.io.gfile.exists(os.path.join(model_dir, 'best_ckpt', 'info.json')))\n\n\nif __name__ == '__main__':\n tf.test.main()\n"
] | [
[
"tensorflow.convert_to_tensor",
"tensorflow.python.distribute.combinations.combine",
"tensorflow.test.main"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
prkriley/neurips2019_intrus | [
"3e36930246347e6b80a583d2ab378054ea3b9f7a"
] | [
"lib/models.py"
] | [
"\"\"\"\nTransformer encoder / decoder layer chain\n\"\"\"\nimport numpy as np\nimport tensorflow as tf\n\nimport lib.layers\nfrom . import layers, ops\nfrom .data import linelen\n\n\nclass Transformer:\n\n def __init__(\n self, name, inp_voc, out_voc,\n logits_bias=False, share_emb=False, dst_rand_offset=False,\n rescale_emb=True, inp_emb_bias=False, emb_inp_device='', emb_out_device='',\n **kwargs\n ):\n \"\"\"\n Transformer-based model that predicts logp(insert(i, token) | x, y)\n :type inp_voc: lib.voc.Voc\n :type out_voc: lib.voc.Voc\n :param logits_bias: if True, final logits layer has bias term.\n :param share_emb: if True, input and output embeddings will use the same matrix.\n Useful for in case of shared vocabularies or when there is a\n :param dst_rand_offset: if True, adds a random offset to output embeddings, same for all positions\n :param kwargs: other hyperparameters - see TransformerChain and TransformerEmbedding\n \"\"\"\n self.name = name\n self.inp_voc, self.out_voc = inp_voc, out_voc\n self.dst_rand_offset = dst_rand_offset\n self.hp = kwargs\n\n emb_size = kwargs.get('emb_size', kwargs.get('hid_size', 512))\n max_voc_size = max(len(inp_voc), len(out_voc))\n\n with tf.variable_scope(self.name) as self.scope:\n # Embeddings\n self.emb_inp = layers.TransformerEmbedding(\n 'emb_inp', max_voc_size if share_emb else len(inp_voc), emb_size,\n bias=inp_emb_bias, rescale=rescale_emb, device=emb_inp_device)\n\n self.emb_out = layers.TransformerEmbedding(\n 'emb_out', max_voc_size if share_emb else len(out_voc), emb_size,\n matrix=self.emb_inp.emb.mat if share_emb else None,\n rescale=rescale_emb, device=emb_out_device)\n\n # Model body\n self.encoder = layers.TransformerChain('enc', **kwargs)\n self.decoder = layers.TransformerChain('dec', attn_inputs=['enc'], **kwargs)\n\n # logits: token insertions plus one extra logit to predict position where to insert\n self.logits = layers.Dense(\n 'logits', kwargs['hid_size'], len(out_voc) + 1,\n matrix=tf.transpose(self.emb_out.emb.mat) if kwargs.get('dwwt', False) else None,\n bias=None if logits_bias else 0\n )\n\n def _get_batch_sample(self):\n \"\"\" A minimal example of model input data \"\"\"\n return [(\"i saw a cat\", \"i write the code\")]\n\n def make_encoder_batch_ph(self):\n return {\n 'inp': tf.placeholder('int32', [None, None]),\n 'inp_len': tf.placeholder('int32', [None])\n }\n\n def make_feed_dict(self, batch, **kwargs):\n \"\"\" Take input data strings, return a dict { key: np.array(value) } \"\"\"\n inp_lines, out_lines = zip(*batch)\n inp_len = [linelen(line) for line in inp_lines]\n out_len = [linelen(line) for line in out_lines]\n return {\n 'inp': self.inp_voc.to_matrix(inp_lines),\n 'inp_len': np.array(inp_len, 'int32'),\n 'out': self.out_voc.to_matrix(out_lines),\n 'out_len': np.array(out_len, 'int32')\n }\n\n def encode(self, batch, is_train):\n \"\"\" Take placeholders for data batch, return encoder state \"\"\"\n with tf.name_scope(self.name), ops.dropout_scope(is_train):\n inp = batch['inp'] # [batch_size * ninp]\n inp_len = batch.get('inp_len', ops.infer_length(inp, self.inp_voc.eos)) # [batch]\n attn_mask = ops.make_attn_mask(inp, inp_len) # [batch_size, 1, 1, ninp]\n out, _ = self.encoder(self.emb_inp(inp), self_attn_mask=attn_mask)\n # ^-- [batch_size, ninp, hid_size]\n return dict(out=out, attn_mask=attn_mask)\n\n def compute_action_logprobs(self, batch, is_train, enc=None, temperature=None):\n \"\"\"\n Compute log-probabilities for all possible actions (aka agent policy)\n :param batch: a dict with\n - token matrix 'out'[batch_size, output_length]\n - optional length vector out_len[batch_size]\n :param is_train: whether or not to use training behavior (e.g. dropout)\n :returns: {'insert':logp(insert(i, c) | x, y), 'finish':logp(terminate| x, y)}\n \"\"\"\n enc = self.encode(batch, is_train) if enc is None else enc\n with tf.name_scope(self.name), ops.dropout_scope(is_train):\n out = batch['out'] # partial translation, shape: [batch_size * nout]\n out_len = batch.get('out_len', ops.infer_length(out, self.out_voc.eos)) # [batch]\n\n # embedding. Note: at this point, a special \"zero\" vector is added\n # to the first position hence length is increased by 1\n\n out_padded = tf.concat([tf.zeros_like(out[:, :1]), out], axis=1) # [batch_size, nout+1]\n dec_emb = self.emb_out(out_padded, offset='random' if self.dst_rand_offset else 0)\n # ^-- shape: [batch_size, nout + 1]\n\n # run decoder\n attn_mask = ops.make_attn_mask(out_padded, out_len + 1) # [batch_size, 1, 1, nout + 1]\n dec_out, _ = self.decoder(dec_emb, self_attn_mask=attn_mask,\n enc_out=enc['out'], enc_attn_mask=enc['attn_mask'])\n # ^-- [batch_size, nout + 1, hid_size]\n\n logits = self.logits(dec_out) # [batch_size, nout + 1, voc_size + 1]\n if temperature is not None:\n logits /= temperature\n\n # compute log-probabilities for actions\n\n # position log-probabilities, logP(insert(pos, *) | ...)\n # used to predict position of next insert and termination condition (EOS)\n position_logits = logits[:, :, -1] # [batch_size, nout + 1]\n\n position_mask = tf.cast(attn_mask, tf.bool)[:, 0, 0, :] # [batch_size, nout + 1]\n position_logits = tf.where(position_mask, position_logits,\n tf.fill(tf.shape(position_logits), -1e9))\n position_logp = tf.nn.log_softmax(position_logits, axis=-1) # [batch_size, n_out]\n\n # two actions: insert - at any non-EOS position - or finish - defined as inserting at EOS\n finish_logp = tf.gather_nd(position_logp,\n tf.stack([tf.range(tf.shape(out_len)[0]), out_len], axis=1))\n # ^-- [batch_size]\n\n insert_position_logp = tf.where(position_mask[:, 1:], position_logp[:, :-1],\n tf.fill(tf.shape(position_logp[:, :-1]), -1e9))\n # ^-- [batch_size, nout]\n\n # insertion log-probabilities:\n # logP(insert(pos, tok) | ...) = logP(insert(pos, *) | ...) + logP(insert(pos, tok) | insert(pos, *), ...)\n\n token_logits = logits[:, :-1, :len(self.out_voc)] # [batch_size, n_out, voc_size]\n token_logp_given_position = tf.nn.log_softmax(token_logits, axis=-1)\n # note: we do not need mask on token_logp_given_position cuz mask is already applied to insert_position_logp\n\n insert_logp = insert_position_logp[:, :, None] + token_logp_given_position\n\n return {\n # group 1 (exps sum to 1)\n 'insert': insert_logp, # [batch_size, nout, voc_size]\n 'finish': finish_logp, # [batch_size]\n }\n\n\nclass ImgToSeqTransformer(Transformer):\n def __init__(\n self, name, out_voc, inp_w, inp_h, inp_channels=3, make_encoder=lib.layers.ImageEncoder,\n logits_bias=False, share_emb=False, dst_rand_offset=False,\n rescale_emb=True, emb_out_device='',\n **kwargs\n ):\n \"\"\"\n Transformer-based model that predicts logp(insert(i, token) | x, y)\n :type out_voc: lib.voc.Voc\n :param logits_bias: if True, final logits layer has bias term.\n :param dst_rand_offset: if True, adds a random offset to output embeddings, same for all positions\n :param kwargs: other hyperparameters - see TransformerChain and TransformerEmbedding\n \"\"\"\n self.name = name\n self.inp_voc, self.out_voc = out_voc, out_voc # inp voc is a stub, the same as out_voc\n self.dst_rand_offset = dst_rand_offset\n self.hp = kwargs\n self.w = inp_w\n self.h = inp_h\n self.inp_channels = inp_channels\n\n emb_size = kwargs.get('emb_size', kwargs.get('hid_size', 512))\n max_voc_size = len(out_voc)\n\n with tf.variable_scope(self.name) as self.scope:\n # Embeddings\n\n self.emb_out = layers.TransformerEmbedding(\n 'emb_out', max_voc_size if share_emb else len(out_voc), emb_size,\n matrix=self.emb_inp.emb.mat if share_emb else None,\n rescale=rescale_emb, device=emb_out_device)\n\n # Model body\n self.encoder = make_encoder('enc', inp_h=inp_w, inp_w=inp_h, inp_channels=inp_channels, **kwargs)\n\n enc_out_shape = self.encode(self.make_encoder_batch_ph(), True)['out'].shape\n assert enc_out_shape.ndims == 3 and enc_out_shape[-1].value is not None, \\\n \"encoder output shape must be a 3d tensor with fixed num units, \" \\\n \"got shape {}\".format(enc_out_shape)\n\n self.decoder = layers.TransformerChain('dec', attn_inputs=['enc'],\n attn_input_sizes={'enc': enc_out_shape[-1].value},\n **kwargs)\n\n # logits: token insertions plus one extra logit to predict position where to insert\n self.logits = layers.Dense(\n 'logits', kwargs['hid_size'], len(out_voc) + 1,\n bias=None if logits_bias else 0\n )\n\n\n def _get_batch_sample(self):\n \"\"\" A minimal example of model input data \"\"\"\n return [(np.zeros((self.h, self.w, self.inp_channels)), 'A cat sat')]\n\n def make_feed_dict(self, batch, **kwargs):\n \"\"\" Take input data strings, return a dict { key: np.array(value) } \"\"\"\n inp_imgs, out_lines = zip(*batch)\n\n out_len = [linelen(line) for line in out_lines]\n return {\n 'inp': np.array(inp_imgs, 'float32'),\n 'out': self.out_voc.to_matrix(out_lines),\n 'out_len': np.array(out_len, 'int32')\n }\n\n def make_encoder_batch_ph(self):\n return {\n 'inp': tf.placeholder('float32', [None, self.h, self.w, self.inp_channels]),\n }\n\n def encode(self, batch, is_train):\n \"\"\" Take placeholders for data batch, return encoder state \"\"\"\n with tf.name_scope(self.name), ops.dropout_scope(is_train):\n inp = batch['inp'] # [batch_size * ninp]\n\n out = self.encoder(inp)\n assert out.shape[-1] is not None\n out_shape = tf.shape(out)\n\n out = tf.reshape(out, [out_shape[0], -1, out.shape[-1]])\n\n attn_mask = tf.ones((out_shape[0], 1, 1, out_shape[1] * out_shape[2])) # [batch_size, 1, 1, ninp]\n\n return dict(out=out, attn_mask=attn_mask)\n"
] | [
[
"tensorflow.transpose",
"tensorflow.nn.log_softmax",
"tensorflow.shape",
"tensorflow.reshape",
"tensorflow.cast",
"tensorflow.placeholder",
"tensorflow.ones",
"tensorflow.zeros_like",
"tensorflow.name_scope",
"tensorflow.variable_scope",
"numpy.array",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
Prtfw/PySyft | [
"35012f5bf55628bb19761d5f40d03181fbbb1766"
] | [
"test/torch/pointers/test_pointer_tensor.py"
] | [
"import torch\nimport torch as th\nimport syft\n\nfrom syft.frameworks.torch.tensors.interpreters.additive_shared import AdditiveSharingTensor\nfrom syft.frameworks.torch.tensors.interpreters.precision import FixedPrecisionTensor\nfrom syft.generic.pointers.pointer_tensor import PointerTensor\nimport pytest\n\n\ndef test_init(workers):\n alice, me = workers[\"alice\"], workers[\"me\"]\n pointer = PointerTensor(id=1000, location=alice, owner=me)\n pointer.__str__()\n\n\ndef test_create_pointer():\n x = torch.Tensor([1, 2])\n x.create_pointer()\n\n\ndef test_send_default_garbage_collector_true(workers):\n \"\"\"\n Remote tensor should be garbage collected by default on\n deletion of the Pointer tensor pointing to remote tensor\n \"\"\"\n alice = workers[\"alice\"]\n\n x = torch.Tensor([-1, 2])\n x_ptr = x.send(alice)\n assert x_ptr.child.garbage_collect_data\n\n\ndef test_send_garbage_collect_data_false(workers):\n \"\"\"\n Remote tensor should be not garbage collected on\n deletion of the Pointer tensor pointing to remote tensor\n \"\"\"\n alice = workers[\"alice\"]\n\n x = torch.Tensor([-1, 2])\n x_ptr = x.send(alice)\n x_ptr.garbage_collection = False\n assert x_ptr.child.garbage_collect_data == False\n\n\ndef test_send_gc_false(workers):\n \"\"\"\n Remote tensor should be not garbage collected on\n deletion of the Pointer tensor pointing to remote tensor\n \"\"\"\n alice = workers[\"alice\"]\n x = torch.Tensor([-1, 2])\n x_ptr = x.send(alice)\n x_ptr.gc = False\n assert x_ptr.child.garbage_collect_data == False\n assert x_ptr.gc == False, \"property GC is not in sync\"\n assert x_ptr.garbage_collection == False, \"property garbage_collection is not in sync\"\n\n\ndef test_send_gc_true(workers):\n \"\"\"\n Remote tensor by default is garbage collected on\n deletion of Pointer Tensor\n \"\"\"\n alice = workers[\"alice\"]\n\n x = torch.Tensor([-1, 2])\n x_ptr = x.send(alice)\n\n assert x_ptr.gc == True\n\n\ndef test_send_disable_gc(workers):\n \"\"\"Pointer tensor should be not garbage collected.\"\"\"\n alice = workers[\"alice\"]\n\n x = torch.Tensor([-1, 2])\n x_ptr = x.send(alice).disable_gc\n assert x_ptr.child.garbage_collect_data == False\n assert x_ptr.gc == False, \"property GC is not in sync\"\n assert x_ptr.garbage_collection == False, \"property garbage_collection is not in sync\"\n\n\ndef test_send_get(workers):\n \"\"\"Test several send get usages\"\"\"\n bob = workers[\"bob\"]\n alice = workers[\"alice\"]\n\n # simple send\n x = torch.Tensor([1, 2])\n x_ptr = x.send(bob)\n x_back = x_ptr.get()\n assert (x == x_back).all()\n\n # send with variable overwriting\n x = torch.Tensor([1, 2])\n x = x.send(bob)\n x_back = x.get()\n assert (torch.Tensor([1, 2]) == x_back).all()\n\n # double send\n x = torch.Tensor([1, 2])\n x_ptr = x.send(bob)\n x_ptr_ptr = x_ptr.send(alice)\n x_ptr_back = x_ptr_ptr.get()\n x_back_back = x_ptr_back.get()\n assert (x == x_back_back).all()\n\n # double send with variable overwriting\n x = torch.Tensor([1, 2])\n x = x.send(bob)\n x = x.send(alice)\n x = x.get()\n x_back = x.get()\n assert (torch.Tensor([1, 2]) == x_back).all()\n\n # chained double send\n x = torch.Tensor([1, 2])\n x = x.send(bob).send(alice)\n x_back = x.get().get()\n assert (torch.Tensor([1, 2]) == x_back).all()\n\n\ndef test_inplace_send_get(workers):\n bob = workers[\"bob\"]\n\n tensor = torch.tensor([1.0, -1.0, 3.0, 4.0])\n tensor_ptr = tensor.send_(bob)\n\n assert tensor_ptr.id == tensor.id\n assert id(tensor_ptr) == id(tensor)\n\n tensor_back = tensor_ptr.get_()\n\n assert tensor_back.id == tensor_ptr.id\n assert tensor_back.id == tensor.id\n assert id(tensor_back) == id(tensor)\n assert id(tensor_back) == id(tensor)\n\n assert (tensor_back == tensor).all()\n\n\ndef test_repeated_send(workers):\n \"\"\"Tests that repeated calls to .send(bob) works gracefully.\n Previously garbage collection deleted the remote object\n when .send() was called twice. This test ensures the fix still\n works.\"\"\"\n\n bob = workers[\"bob\"]\n\n # create tensor\n x = torch.Tensor([1, 2])\n\n # send tensor to bob\n x_ptr = x.send(bob)\n\n # send tensor again\n x_ptr = x.send(bob)\n\n # ensure bob has tensor\n assert x.id in bob._objects\n\n\ndef test_remote_autograd(workers):\n \"\"\"Tests the ability to backpropagate gradients on a remote\n worker.\"\"\"\n\n bob = workers[\"bob\"]\n\n # TEST: simple remote grad calculation\n\n # create a tensor\n x = torch.tensor([1, 2, 3, 4.0], requires_grad=True)\n\n # send tensor to bob\n x = x.send(bob)\n\n # do some calculation\n y = (x + x).sum()\n\n # backpropagate on remote machine\n y.backward()\n\n # check that remote gradient is correct\n x_grad = bob._objects[x.id_at_location].grad\n x_grad_target = torch.ones(4).float() + 1\n assert (x_grad == x_grad_target).all()\n\n # TEST: Ensure remote grad calculation gets properly serded\n\n # create tensor\n x = torch.tensor([1, 2, 3, 4.0], requires_grad=True).send(bob)\n\n # compute function\n y = x.sum()\n\n # backpropagate\n y.backward()\n\n # get the gradient created from backpropagation manually\n x_grad = bob._objects[x.id_at_location].grad\n\n # get the entire x tensor (should bring the grad too)\n x = x.get()\n\n # make sure that the grads match\n assert (x.grad == x_grad).all()\n\n\ndef test_gradient_send_recv(workers):\n \"\"\"Tests that gradients are properly sent and received along\n with their tensors.\"\"\"\n\n bob = workers[\"bob\"]\n\n # create a tensor\n x = torch.tensor([1, 2, 3, 4.0], requires_grad=True)\n\n # create gradient on tensor\n x.sum().backward(th.tensor(1.0))\n\n # save gradient\n orig_grad = x.grad\n\n # send and get back\n t = x.send(bob).get()\n\n # check that gradient was properly serde\n assert (t.grad == orig_grad).all()\n\n\ndef test_method_on_attribute(workers):\n\n bob = workers[\"bob\"]\n\n # create remote object with children\n x = torch.Tensor([1, 2, 3])\n x = syft.LoggingTensor().on(x).send(bob)\n\n # call method on data tensor directly\n x.child.point_to_attr = \"child.child\"\n y = x.add(x)\n assert isinstance(y.get(), torch.Tensor)\n\n # call method on loggingtensor directly\n x.child.point_to_attr = \"child\"\n y = x.add(x)\n y = y.get()\n assert isinstance(y.child, syft.LoggingTensor)\n\n # # call method on zeroth attribute\n # x.child.point_to_attr = \"\"\n # y = x.add(x)\n # y = y.get()\n #\n # assert isinstance(y, torch.Tensor)\n # assert isinstance(y.child, syft.LoggingTensor)\n # assert isinstance(y.child.child, torch.Tensor)\n\n # call .get() on pinter to attribute (should error)\n x.child.point_to_attr = \"child\"\n try:\n x.get()\n except syft.exceptions.CannotRequestObjectAttribute as e:\n assert True\n\n\ndef test_grad_pointer(workers):\n \"\"\"Tests the automatic creation of a .grad pointer when\n calling .send() on a tensor with requires_grad==True\"\"\"\n\n bob = workers[\"bob\"]\n\n x = torch.tensor([1, 2, 3.0], requires_grad=True).send(bob)\n y = (x + x).sum()\n y.backward()\n\n assert (bob._objects[x.id_at_location].grad == torch.tensor([2, 2, 2.0])).all()\n\n\ndef test_move(workers):\n alice, bob, james, me = workers[\"alice\"], workers[\"bob\"], workers[\"james\"], workers[\"me\"]\n\n x = torch.tensor([1, 2, 3, 4, 5]).send(bob)\n\n assert x.id_at_location in bob._objects\n assert x.id_at_location not in alice._objects\n\n x.move(alice)\n\n assert x.id_at_location in bob._objects\n assert x.id_at_location in alice._objects\n\n x = torch.tensor([1.0, 2, 3, 4, 5], requires_grad=True).send(bob)\n\n assert x.id_at_location in bob._objects\n assert x.id_at_location not in alice._objects\n\n x.move(alice)\n\n assert x.id_at_location in bob._objects\n assert x.id_at_location in alice._objects\n\n alice.clear_objects()\n bob.clear_objects()\n x = torch.tensor([1.0, 2, 3, 4, 5]).send(bob)\n x.move(alice)\n\n assert len(alice._objects) == 1\n\n # Test .move on remote objects\n\n james.clear_objects()\n x = th.tensor([1.0]).send(james)\n remote_x = james._objects[x.id_at_location]\n remote_ptr = remote_x.send(bob)\n assert remote_ptr.id in james._objects.keys()\n remote_ptr2 = remote_ptr.move(alice)\n assert remote_ptr2.id in james._objects.keys()\n\n # Test .move back to myself\n\n alice.clear_objects()\n bob.clear_objects()\n x = torch.tensor([1.0, 2, 3, 4, 5]).send(bob)\n y = x.move(alice)\n z = y.move(me)\n assert (z == x).all()\n\n\ndef test_combine_pointers(workers):\n \"\"\"\n Ensure that the sy.combine_pointers works as expected\n \"\"\"\n\n bob = workers[\"bob\"]\n alice = workers[\"alice\"]\n\n x = th.tensor([1, 2, 3, 4, 5]).send(bob)\n y = th.tensor([1, 2, 3, 4, 5]).send(alice)\n\n a = x.combine(y)\n b = a + a\n\n c = b.get(sum_results=True)\n assert (c == th.tensor([4, 8, 12, 16, 20])).all()\n\n b = a + a\n c = b.get(sum_results=False)\n assert len(c) == 2\n assert (c[0] == th.tensor([2, 4, 6, 8, 10])).all\n\n\ndef test_remote_to_cpu_device(workers):\n \"\"\"Ensure remote .to cpu works\"\"\"\n device = torch.device(\"cpu\")\n bob = workers[\"bob\"]\n\n x = th.tensor([1, 2, 3, 4, 5]).send(bob)\n x.to(device)\n\n\ndef test_get_remote_shape(workers):\n \"\"\"Test pointer.shape functionality\"\"\"\n bob = workers[\"bob\"]\n # tensor directly sent: shape stored at sending\n x = th.tensor([1, 2, 3, 4, 5]).send(bob)\n assert x.shape == torch.Size([5])\n # result of an operation: need to make a call to the remote worker\n y = x + x\n assert y.shape == torch.Size([5])\n\n\ndef test_remote_function_with_multi_ouput(workers):\n \"\"\"\n Functions like .split return several tensors, registration and response\n must be made carefully in this case\n \"\"\"\n bob = workers[\"bob\"]\n\n tensor = torch.tensor([1, 2, 3, 4.0])\n ptr = tensor.send(bob)\n r_ptr = torch.split(ptr, 2)\n assert (r_ptr[0].get() == torch.tensor([1, 2.0])).all()\n\n tensor = torch.tensor([1, 2, 3, 4.0])\n ptr = tensor.send(bob)\n max_value, argmax_idx = torch.max(ptr, 0)\n\n assert max_value.get().item() == 4.0\n assert argmax_idx.get().item() == 3\n\n\ndef test_raising_error_when_item_func_called(workers):\n pointer = PointerTensor(id=1000, location=workers[\"alice\"], owner=workers[\"me\"])\n with pytest.raises(RuntimeError):\n pointer.item()\n\n\ndef test_fix_prec_on_pointer_tensor(workers):\n \"\"\"\n Ensure .fix_precision() works as expected.\n Also check that fix_precision() is not inplace.\n \"\"\"\n bob = workers[\"bob\"]\n\n tensor = torch.tensor([1, 2, 3, 4.0])\n ptr = tensor.send(bob)\n\n ptr_fp = ptr.fix_precision()\n\n remote_tensor = bob._objects[ptr.id_at_location]\n remote_fp_tensor = bob._objects[ptr_fp.id_at_location]\n\n # check that fix_precision is not inplace\n assert (remote_tensor == tensor).all()\n\n assert isinstance(ptr.child, PointerTensor)\n assert isinstance(remote_fp_tensor.child, FixedPrecisionTensor)\n\n\ndef test_fix_prec_on_pointer_of_pointer(workers):\n \"\"\"\n Ensure .fix_precision() works along a chain of pointers.\n \"\"\"\n bob = workers[\"bob\"]\n alice = workers[\"alice\"]\n\n tensor = torch.tensor([1, 2, 3, 4.0])\n ptr = tensor.send(bob)\n ptr = ptr.send(alice)\n\n ptr = ptr.fix_precision()\n\n alice_tensor = alice._objects[ptr.id_at_location]\n remote_tensor = bob._objects[alice_tensor.id_at_location]\n\n assert isinstance(ptr.child, PointerTensor)\n assert isinstance(remote_tensor.child, FixedPrecisionTensor)\n\n\ndef test_float_prec_on_pointer_tensor(workers):\n \"\"\"\n Ensure .float_precision() works as expected.\n \"\"\"\n bob = workers[\"bob\"]\n\n tensor = torch.tensor([1, 2, 3, 4.0])\n ptr = tensor.send(bob)\n ptr = ptr.fix_precision()\n\n ptr = ptr.float_precision()\n remote_tensor = bob._objects[ptr.id_at_location]\n\n assert isinstance(ptr.child, PointerTensor)\n assert isinstance(remote_tensor, torch.Tensor)\n\n\ndef test_float_prec_on_pointer_of_pointer(workers):\n \"\"\"\n Ensure .float_precision() works along a chain of pointers.\n \"\"\"\n bob = workers[\"bob\"]\n alice = workers[\"alice\"]\n\n tensor = torch.tensor([1, 2, 3, 4.0])\n ptr = tensor.send(bob)\n ptr = ptr.send(alice)\n ptr = ptr.fix_precision()\n\n ptr = ptr.float_precision()\n\n alice_tensor = alice._objects[ptr.id_at_location]\n remote_tensor = bob._objects[alice_tensor.id_at_location]\n\n assert isinstance(ptr.child, PointerTensor)\n assert isinstance(remote_tensor, torch.Tensor)\n\n\ndef test_share_get(workers):\n \"\"\"\n Ensure .share() works as expected.\n \"\"\"\n bob = workers[\"bob\"]\n\n tensor = torch.tensor([1, 2, 3])\n ptr = tensor.send(bob)\n\n ptr = ptr.share()\n remote_tensor = bob._objects[ptr.id_at_location]\n\n assert isinstance(ptr.child, PointerTensor)\n assert isinstance(remote_tensor.child, AdditiveSharingTensor)\n\n\ndef test_registration_of_action_on_pointer_of_pointer(workers):\n \"\"\"\n Ensure actions along a chain of pointers are registered as expected.\n \"\"\"\n bob = workers[\"bob\"]\n alice = workers[\"alice\"]\n\n tensor = torch.tensor([1, 2, 3, 4.0])\n ptr = tensor.send(bob)\n ptr = ptr.send(alice)\n ptr_action = ptr + ptr\n\n assert len(alice._objects) == 2\n assert len(bob._objects) == 2\n\n\ndef test_setting_back_grad_to_origin_after_send(workers):\n \"\"\"\n Calling .backward() on a tensor sent using `.send(..., requires_grad=True)`\n should update the origin tensor gradient\n \"\"\"\n me = workers[\"me\"]\n alice = workers[\"alice\"]\n\n with me.registration_enabled():\n x = th.tensor([1.0, 2.0, 3, 4, 5], requires_grad=True)\n y = x + x\n me.register_obj(y) # registration on the local worker is sometimes buggy\n\n y_ptr = y.send(alice, requires_grad=True)\n z_ptr = y_ptr * 2\n\n z = z_ptr.sum()\n z.backward()\n\n assert (x.grad == th.tensor([4.0, 4.0, 4.0, 4.0, 4.0])).all()\n\n\ndef test_setting_back_grad_to_origin_after_move(workers):\n \"\"\"\n Calling .backward() on a tensor moved using `.move(..., requires_grad=True)`\n should update the origin tensor gradient\n \"\"\"\n me = workers[\"me\"]\n bob = workers[\"bob\"]\n alice = workers[\"alice\"]\n\n with me.registration_enabled():\n x = th.tensor([1.0, 2.0, 3, 4, 5], requires_grad=True)\n y = x + x\n me.register_obj(y) # registration on the local worker is sometimes buggy\n\n y_ptr = y.send(alice, requires_grad=True)\n z_ptr = y_ptr * 2\n\n z_ptr2 = z_ptr.move(bob, requires_grad=True)\n z = z_ptr2.sum()\n z.backward()\n\n assert (x.grad == th.tensor([4.0, 4.0, 4.0, 4.0, 4.0])).all()\n"
] | [
[
"torch.Size",
"torch.ones",
"torch.max",
"torch.Tensor",
"torch.tensor",
"torch.split",
"torch.device"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
MissPenguin/Paddle | [
"266fcbe0aed3e566c167ea8de5114f62c428c013"
] | [
"python/paddle/fluid/tests/unittests/ir/inference/auto_scan_test.py"
] | [
"# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\nimport unittest\nimport abc\nimport os\nimport enum\nimport logging\nimport paddle\nimport paddle.fluid as fluid\nfrom paddle.fluid.initializer import NumpyArrayInitializer\nimport paddle.fluid.core as core\nfrom paddle import compat as cpt\nimport paddle.inference as paddle_infer\nfrom typing import Optional, List, Callable, Dict, Any, Set\nfrom program_config import TensorConfig, OpConfig, ProgramConfig, create_fake_model, create_quant_model\n\nlogging.basicConfig(level=logging.INFO, format=\"%(message)s\")\n\n\nclass SkipReasons(enum.Enum):\n # Paddle not support, but trt support, we need to add the feature.\n TRT_NOT_IMPLEMENTED = 0\n # TRT not support.\n TRT_NOT_SUPPORT = 1\n\n\nclass AutoScanTest(unittest.TestCase):\n def __init__(self, methodName='runTest'):\n np.random.seed(1024)\n paddle.enable_static()\n super(AutoScanTest, self).__init__(methodName)\n self.skip_cases = []\n\n @abc.abstractmethod\n def sample_program_configs(self) -> List[ProgramConfig]:\n '''\n Generate all config with the combination of different Input tensor shape and\n different Attr values.\n '''\n raise NotImplementedError\n\n @abc.abstractmethod\n def sample_predictor_configs(self) -> List[paddle_infer.Config]:\n raise NotImplementedError\n\n @abc.abstractmethod\n def add_skip_case(\n self,\n teller: [Callable[[ProgramConfig, paddle_infer.Config], bool]],\n reason: SkipReasons,\n note: str):\n self.skip_cases.append((teller, reason, note))\n\n @abc.abstractmethod\n def is_program_valid(self, program_config: ProgramConfig) -> bool:\n raise NotImplementedError\n\n def run_test_config(self, model, params, prog_config, pred_config,\n feed_data) -> Dict[str, np.ndarray]:\n '''\n Test a single case.\n '''\n pred_config.set_model_buffer(model, len(model), params, len(params))\n predictor = paddle_infer.create_predictor(pred_config)\n\n for name, _ in prog_config.inputs.items():\n input_tensor = predictor.get_input_handle(name)\n input_tensor.copy_from_cpu(feed_data[name]['data'])\n if feed_data[name]['lod'] is not None:\n input_tensor.set_lod(feed_data[name]['lod'])\n predictor.run()\n result = {}\n for out_name, o_name in zip(prog_config.outputs,\n predictor.get_output_names()):\n result[out_name] = predictor.get_output_handle(o_name).copy_to_cpu()\n return result\n\n def assert_tensors_near(self,\n threshold: float,\n tensors: List[Dict[str, np.array]]):\n assert len(tensors) > 1\n first = tensors[0]\n for group in tensors[1:]:\n for key, arr in group.items():\n self.assertTrue(\n np.allclose(\n first[key], arr, atol=threshold),\n \"Output has diff between GPU and TensorRT. \")\n\n @abc.abstractmethod\n def run_test(self, quant=False):\n raise NotImplementedError\n"
] | [
[
"numpy.allclose",
"numpy.random.seed"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
funkchaser/compas | [
"b58de8771484aa0c6068d43df78b1679503215de"
] | [
"src/compas_plotters/artists/pointartist.py"
] | [
"from typing import Tuple\nfrom typing import List\nfrom typing import Any\n\nfrom matplotlib.patches import Circle\nfrom matplotlib.transforms import ScaledTranslation\nfrom compas.geometry import Point\n\nfrom compas.artists import PrimitiveArtist\nfrom .artist import PlotterArtist\n\nColor = Tuple[float, float, float]\n\n\nclass PointArtist(PlotterArtist, PrimitiveArtist):\n \"\"\"Artist for COMPAS points.\"\"\"\n\n def __init__(self,\n point: Point,\n size: int = 5,\n facecolor: Color = (1.0, 1.0, 1.0),\n edgecolor: Color = (0, 0, 0),\n zorder: int = 9000,\n **kwargs: Any):\n\n super().__init__(primitive=point, **kwargs)\n\n self._mpl_circle = None\n self._size = None\n self.size = size\n self.facecolor = facecolor\n self.edgecolor = edgecolor\n self.zorder = zorder\n\n @property\n def point(self):\n return self.primitive\n\n @point.setter\n def point(self, point):\n self.primitive = point\n\n @property\n def _T(self):\n F = self.plotter.figure.dpi_scale_trans\n S = ScaledTranslation(self.point[0], self.point[1], self.plotter.axes.transData)\n T = F + S\n return T\n\n @property\n def size(self) -> float:\n return self._size / self.plotter.dpi\n\n @size.setter\n def size(self, size: int):\n self._size = size\n\n @property\n def data(self) -> List[List[float]]:\n return [self.point[:2]]\n\n def draw(self) -> None:\n circle = Circle(\n [0, 0],\n radius=self.size,\n facecolor=self.facecolor,\n edgecolor=self.edgecolor,\n transform=self._T,\n zorder=self.zorder\n )\n self._mpl_circle = self.plotter.axes.add_artist(circle)\n self.update_data()\n\n def redraw(self) -> None:\n self._mpl_circle.set_radius(self.size)\n self._mpl_circle.set_edgecolor(self.edgecolor)\n self._mpl_circle.set_facecolor(self.facecolor)\n self._mpl_circle.set_transform(self._T)\n self.update_data()\n"
] | [
[
"matplotlib.patches.Circle",
"matplotlib.transforms.ScaledTranslation"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
adozier/pymatgen | [
"f1cc4d8db24ec11063be2fd84b4ea911f006eeb7",
"f1cc4d8db24ec11063be2fd84b4ea911f006eeb7"
] | [
"pymatgen/core/units.py",
"pymatgen/transformations/tests/test_site_transformations.py"
] | [
"# coding: utf-8\n# Copyright (c) Pymatgen Development Team.\n# Distributed under the terms of the MIT License.\n\nfrom __future__ import division, unicode_literals\n\n\"\"\"\nThis module implements a FloatWithUnit, which is a subclass of float. It\nalso defines supported units for some commonly used units for energy, length,\ntemperature, time and charge. FloatWithUnit also support conversion to one\nanother, and additions and subtractions perform automatic conversion if\nunits are detected. An ArrayWithUnit is also implemented, which is a subclass\nof numpy's ndarray with similar unit features.\n\"\"\"\n\nfrom six.moves import filter, zip\n\n__author__ = \"Shyue Ping Ong, Matteo Giantomassi\"\n__copyright__ = \"Copyright 2011, The Materials Project\"\n__version__ = \"1.0\"\n__maintainer__ = \"Shyue Ping Ong, Matteo Giantomassi\"\n__status__ = \"Production\"\n__date__ = \"Aug 30, 2013\"\n\nimport numpy as np\nimport six\n\nimport collections\nfrom numbers import Number\nimport numbers\nfrom functools import partial\n\nimport re\n\nimport scipy.constants as const\n\n\"\"\"\nSome conversion factors\n\"\"\"\nHa_to_eV = 1/const.physical_constants[\"electron volt-hartree relationship\"][0]\neV_to_Ha = 1 / Ha_to_eV\nRy_to_eV = Ha_to_eV / 2\namu_to_kg = const.physical_constants[\"atomic mass unit-kilogram relationship\"][0]\nmile_to_meters = const.mile\nbohr_to_angstrom = const.physical_constants[\"Bohr radius\"][0] * 1e10\nbohr_to_ang = bohr_to_angstrom\n\n\"\"\"\nDefinitions of supported units. Values below are essentially scaling and\nconversion factors. What matters is the relative values, not the absolute.\nThe SI units must have factor 1.\n\"\"\"\nBASE_UNITS = {\n \"length\": {\n \"m\": 1,\n \"km\": 1000,\n \"mile\": mile_to_meters,\n \"ang\": 1e-10,\n \"cm\": 1e-2,\n \"pm\": 1e-12,\n \"bohr\": bohr_to_angstrom * 1e-10,\n },\n \"mass\": {\n \"kg\": 1,\n \"g\": 1e-3,\n \"amu\": amu_to_kg,\n },\n \"time\": {\n \"s\": 1,\n \"min\": 60,\n \"h\": 3600,\n },\n \"current\": {\n \"A\": 1\n },\n \"temperature\": {\n \"K\": 1,\n },\n \"amount\": {\n \"mol\": 1,\n \"atom\": 1 / const.N_A\n },\n \"intensity\": {\n \"cd\": 1\n },\n \"memory\": {\n \"byte\": 1,\n \"Kb\": 1024,\n \"Mb\": 1024**2,\n \"Gb\": 1024**3,\n \"Tb\": 1024**4,\n },\n}\n\n# Accept kb, mb, gb ... as well.\nBASE_UNITS[\"memory\"].update({k.lower(): v\n for k, v in BASE_UNITS[\"memory\"].items()})\n\n\n# This current list are supported derived units defined in terms of powers of\n# SI base units and constants.\nDERIVED_UNITS = {\n \"energy\": {\n \"eV\": {\"kg\": 1, \"m\": 2, \"s\": -2, const.e: 1},\n \"meV\": {\"kg\": 1, \"m\": 2, \"s\": -2, const.e * 1e-3: 1},\n \"Ha\": {\"kg\": 1, \"m\": 2, \"s\": -2, const.e * Ha_to_eV: 1},\n \"Ry\": {\"kg\": 1, \"m\": 2, \"s\": -2, const.e * Ry_to_eV: 1},\n \"J\": {\"kg\": 1, \"m\": 2, \"s\": -2},\n \"kJ\": {\"kg\": 1, \"m\": 2, \"s\": -2, 1000: 1}\n },\n \"charge\": {\n \"C\": {\"A\": 1, \"s\": 1},\n \"e\": {\"A\": 1, \"s\": 1, const.e: 1},\n },\n \"force\": {\n \"N\": {\"kg\": 1, \"m\": 1, \"s\": -2},\n \"KN\": {\"kg\": 1, \"m\": 1, \"s\": -2, 1000: 1},\n \"MN\": {\"kg\": 1, \"m\": 1, \"s\": -2, 1e6: 1},\n \"GN\": {\"kg\": 1, \"m\": 1, \"s\": -2, 1e9: 1},\n },\n \"pressure\": {\n \"Pa\": {\"kg\": 1, \"m\": -1, \"s\": -2},\n \"KPa\": {\"kg\": 1, \"m\": -1, \"s\": -2, 1000: 1},\n \"MPa\": {\"kg\": 1, \"m\": -1, \"s\": -2, 1e6: 1},\n \"GPa\": {\"kg\": 1, \"m\": -1, \"s\": -2, 1e9: 1}\n },\n \"power\": {\n \"W\": {\"m\": 2, \"kg\": 1, \"s\": -3},\n \"KW\": {\"m\": 2, \"kg\": 1, \"s\": -3, 1000: 1},\n \"MW\": {\"m\": 2, \"kg\": 1, \"s\": -3, 1e6: 1},\n \"GW\": {\"m\": 2, \"kg\": 1, \"s\": -3, 1e9: 1}\n },\n \"emf\": {\n \"V\": {\"m\": 2, \"kg\": 1, \"s\": -3, \"A\": -1}\n },\n \"capacitance\": {\n \"F\": {\"m\": -2, \"kg\": -1, \"s\": 4, \"A\": 2}\n },\n \"resistance\": {\n \"ohm\": {\"m\": 2, \"kg\": 1, \"s\": -3, \"A\": -2}\n },\n \"conductance\": {\n \"S\": {\"m\": -2, \"kg\": -1, \"s\": 3, \"A\": 2}\n },\n \"magnetic_flux\": {\n \"Wb\": {\"m\": 2, \"kg\": 1, \"s\": -2, \"A\": -1}\n }\n}\n\n\nALL_UNITS = dict(list(BASE_UNITS.items()) + list(DERIVED_UNITS.items()))\nSUPPORTED_UNIT_NAMES = tuple([i for d in ALL_UNITS.values() for i in d.keys()])\n\n# Mapping unit name --> unit type (unit names must be unique).\n_UNAME2UTYPE = {}\nfor utype, d in ALL_UNITS.items():\n assert not set(d.keys()).intersection(_UNAME2UTYPE.keys())\n _UNAME2UTYPE.update({uname: utype for uname in d})\ndel utype, d\n\n\ndef _get_si_unit(unit):\n unit_type = _UNAME2UTYPE[unit]\n si_unit = filter(lambda k: BASE_UNITS[unit_type][k] == 1,\n BASE_UNITS[unit_type].keys())\n return list(si_unit)[0], BASE_UNITS[unit_type][unit]\n\n\nclass UnitError(BaseException):\n \"\"\"\n Exception class for unit errors.\n \"\"\"\n\n\ndef check_mappings(u):\n for v in DERIVED_UNITS.values():\n for k2, v2 in v.items():\n if all([v2.get(ku, 0) == vu for ku, vu in u.items()]) and \\\n all([u.get(kv2, 0) == vv2 for kv2, vv2 in v2.items()]):\n return {k2: 1}\n return u\n\n\nclass Unit(collections.Mapping):\n \"\"\"\n Represents a unit, e.g., \"m\" for meters, etc. Supports compound units.\n Only integer powers are supported for units.\n \"\"\"\n Error = UnitError\n\n def __init__(self, unit_def):\n \"\"\"\n Constructs a unit.\n\n Args:\n unit_def: A definition for the unit. Either a mapping of unit to\n powers, e.g., {\"m\": 2, \"s\": -1} represents \"m^2 s^-1\",\n or simply as a string \"kg m^2 s^-1\". Note that the supported\n format uses \"^\" as the power operator and all units must be\n space-separated.\n \"\"\"\n\n if isinstance(unit_def, six.string_types):\n unit = collections.defaultdict(int)\n for m in re.finditer(\"([A-Za-z]+)\\s*\\^*\\s*([\\-0-9]*)\", unit_def):\n p = m.group(2)\n p = 1 if not p else int(p)\n k = m.group(1)\n unit[k] += p\n else:\n unit = {k: v for k, v in dict(unit_def).items() if v != 0}\n self._unit = check_mappings(unit)\n\n def __mul__(self, other):\n new_units = collections.defaultdict(int)\n for k, v in self.items():\n new_units[k] += v\n for k, v in other.items():\n new_units[k] += v\n return Unit(new_units)\n\n def __rmul__(self, other):\n return self.__mul__(other)\n\n def __div__(self, other):\n new_units = collections.defaultdict(int)\n for k, v in self.items():\n new_units[k] += v\n for k, v in other.items():\n new_units[k] -= v\n return Unit(new_units)\n\n def __truediv__(self, other):\n return self.__div__(other)\n\n def __pow__(self, i):\n return Unit({k: v * i for k, v in self.items()})\n\n def __iter__(self):\n return self._unit.__iter__()\n\n def __getitem__(self, i):\n return self._unit[i]\n\n def __len__(self):\n return len(self._unit)\n\n def __repr__(self):\n sorted_keys = sorted(self._unit.keys(),\n key=lambda k: (-self._unit[k], k))\n return \" \".join([\"{}^{}\".format(k, self._unit[k])\n if self._unit[k] != 1 else k\n for k in sorted_keys if self._unit[k] != 0])\n\n def __str__(self):\n return self.__repr__()\n\n @property\n def as_base_units(self):\n \"\"\"\n Converts all units to base SI units, including derived units.\n\n Returns:\n (base_units_dict, scaling factor). base_units_dict will not\n contain any constants, which are gathered in the scaling factor.\n \"\"\"\n b = collections.defaultdict(int)\n factor = 1\n for k, v in self.items():\n derived = False\n for d in DERIVED_UNITS.values():\n if k in d:\n for k2, v2 in d[k].items():\n if isinstance(k2, Number):\n factor *= k2 ** (v2 * v)\n else:\n b[k2] += v2 * v\n derived = True\n break\n if not derived:\n si, f = _get_si_unit(k)\n b[si] += v\n factor *= f ** v\n return {k: v for k, v in b.items() if v != 0}, factor\n\n def get_conversion_factor(self, new_unit):\n \"\"\"\n Returns a conversion factor between this unit and a new unit.\n Compound units are supported, but must have the same powers in each\n unit type.\n\n Args:\n new_unit: The new unit.\n \"\"\"\n uo_base, ofactor = self.as_base_units\n un_base, nfactor = Unit(new_unit).as_base_units\n units_new = sorted(un_base.items(),\n key=lambda d: _UNAME2UTYPE[d[0]])\n units_old = sorted(uo_base.items(),\n key=lambda d: _UNAME2UTYPE[d[0]])\n factor = ofactor / nfactor\n for uo, un in zip(units_old, units_new):\n if uo[1] != un[1]:\n raise UnitError(\"Units %s and %s are not compatible!\" % (uo, un))\n c = ALL_UNITS[_UNAME2UTYPE[uo[0]]]\n factor *= (c[uo[0]] / c[un[0]]) ** uo[1]\n return factor\n\n\nclass FloatWithUnit(float):\n \"\"\"\n Subclasses float to attach a unit type. Typically, you should use the\n pre-defined unit type subclasses such as Energy, Length, etc. instead of\n using FloatWithUnit directly.\n\n Supports conversion, addition and subtraction of the same unit type. E.g.,\n 1 m + 20 cm will be automatically converted to 1.2 m (units follow the\n leftmost quantity). Note that FloatWithUnit does not override the eq\n method for float, i.e., units are not checked when testing for equality.\n The reason is to allow this class to be used transparently wherever floats\n are expected.\n\n >>> e = Energy(1.1, \"Ha\")\n >>> a = Energy(1.1, \"Ha\")\n >>> b = Energy(3, \"eV\")\n >>> c = a + b\n >>> print(c)\n 1.2102479761938871 Ha\n >>> c.to(\"eV\")\n 32.932522246000005 eV\n \"\"\"\n Error = UnitError\n\n @classmethod\n def from_string(cls, s):\n \"\"\"\n Initialize a FloatWithUnit from a string. Example Memory.from_string(\"1. Mb\")\n \"\"\"\n # Extract num and unit string. \n s = s.strip()\n for i, char in enumerate(s):\n if char.isalpha() or char.isspace():\n break\n else:\n raise Exception(\"Unit is missing in string %s\" % s)\n num, unit = float(s[:i]), s[i:]\n\n # Find unit type (set it to None if it cannot be detected)\n for unit_type, d in BASE_UNITS.items():\n if unit in d:\n break\n else:\n unit_type = None\n\n return cls(num, unit, unit_type=unit_type)\n\n def __new__(cls, val, unit, unit_type=None):\n new = float.__new__(cls, val)\n new._unit = Unit(unit)\n new._unit_type = unit_type\n return new\n\n def __init__(self, val, unit, unit_type=None):\n \"\"\"\n Initializes a float with unit.\n\n Args:\n val (float): Value\n unit (Unit): A unit. E.g., \"C\".\n unit_type (str): A type of unit. E.g., \"charge\"\n \"\"\"\n if unit_type is not None and str(unit) not in ALL_UNITS[unit_type]:\n raise UnitError(\n \"{} is not a supported unit for {}\".format(unit, unit_type))\n self._unit = Unit(unit)\n self._unit_type = unit_type\n\n def __repr__(self):\n return super(FloatWithUnit, self).__repr__()\n\n def __str__(self):\n s = super(FloatWithUnit, self).__str__()\n return \"{} {}\".format(s, self._unit)\n\n def __add__(self, other):\n if not hasattr(other, \"unit_type\"):\n return super(FloatWithUnit, self).__add__(other)\n if other.unit_type != self._unit_type:\n raise UnitError(\"Adding different types of units is not allowed\")\n val = other\n if other.unit != self._unit:\n val = other.to(self._unit)\n return FloatWithUnit(float(self) + val, unit_type=self._unit_type,\n unit=self._unit)\n\n def __sub__(self, other):\n if not hasattr(other, \"unit_type\"):\n return super(FloatWithUnit, self).__sub__(other)\n if other.unit_type != self._unit_type:\n raise UnitError(\"Subtracting different units is not allowed\")\n val = other\n if other.unit != self._unit:\n val = other.to(self._unit)\n return FloatWithUnit(float(self) - val, unit_type=self._unit_type,\n unit=self._unit)\n\n def __mul__(self, other):\n if not isinstance(other, FloatWithUnit):\n return FloatWithUnit(float(self) * other,\n unit_type=self._unit_type,\n unit=self._unit)\n return FloatWithUnit(float(self) * other, unit_type=None,\n unit=self._unit * other._unit)\n\n def __rmul__(self, other):\n if not isinstance(other, FloatWithUnit):\n return FloatWithUnit(float(self) * other,\n unit_type=self._unit_type,\n unit=self._unit)\n return FloatWithUnit(float(self) * other, unit_type=None,\n unit=self._unit * other._unit)\n\n def __pow__(self, i):\n return FloatWithUnit(float(self) ** i, unit_type=None,\n unit=self._unit ** i)\n\n def __div__(self, other):\n val = super(FloatWithUnit, self).__div__(other)\n if not isinstance(other, FloatWithUnit):\n return FloatWithUnit(val, unit_type=self._unit_type,\n unit=self._unit)\n return FloatWithUnit(val, unit_type=None,\n unit=self._unit / other._unit)\n\n def __truediv__(self, other):\n val = super(FloatWithUnit, self).__truediv__(other)\n if not isinstance(other, FloatWithUnit):\n return FloatWithUnit(val, unit_type=self._unit_type,\n unit=self._unit)\n return FloatWithUnit(val, unit_type=None,\n unit=self._unit / other._unit)\n\n def __neg__(self):\n return FloatWithUnit(super(FloatWithUnit, self).__neg__(),\n unit_type=self._unit_type,\n unit=self._unit)\n\n def __getnewargs__(self):\n \"\"\"Function used by pickle to recreate object.\"\"\"\n #print(self.__dict__)\n # FIXME\n # There's a problem with _unit_type if we try to unpickle objects from file.\n # since self._unit_type might not be defined. I think this is due to\n # the use of decorators (property and unitized). In particular I have problems with \"amu\"\n # likely due to weight in core.composition\n if hasattr(self, \"_unit_type\"):\n args = float(self), self._unit, self._unit_type\n else:\n args = float(self), self._unit, None\n\n return args\n\n def __getstate__(self):\n state = self.__dict__.copy()\n state[\"val\"] = float(self)\n #print(\"in getstate %s\" % state)\n return state\n\n def __setstate__(self, state):\n #print(\"in setstate %s\" % state)\n self._unit = state[\"_unit\"]\n\n @property\n def unit_type(self):\n return self._unit_type\n\n @property\n def unit(self):\n return self._unit\n\n def to(self, new_unit):\n \"\"\"\n Conversion to a new_unit. Right now, only supports 1 to 1 mapping of\n units of each type.\n\n Args:\n new_unit: New unit type.\n\n Returns:\n A FloatWithUnit object in the new units.\n\n Example usage:\n >>> e = Energy(1.1, \"eV\")\n >>> e = Energy(1.1, \"Ha\")\n >>> e.to(\"eV\")\n 29.932522246 eV\n \"\"\"\n return FloatWithUnit(\n self * self.unit.get_conversion_factor(new_unit),\n unit_type=self._unit_type,\n unit=new_unit)\n\n @property\n def as_base_units(self):\n \"\"\"\n Returns this FloatWithUnit in base SI units, including derived units.\n\n Returns:\n A FloatWithUnit object in base SI units\n \"\"\"\n return self.to(self.unit.as_base_units[0])\n\n\n @property\n def supported_units(self):\n \"\"\"\n Supported units for specific unit type.\n \"\"\"\n return tuple(ALL_UNITS[self._unit_type].keys())\n\n\nclass ArrayWithUnit(np.ndarray):\n \"\"\"\n Subclasses `numpy.ndarray` to attach a unit type. Typically, you should\n use the pre-defined unit type subclasses such as EnergyArray,\n LengthArray, etc. instead of using ArrayWithFloatWithUnit directly.\n\n Supports conversion, addition and subtraction of the same unit type. E.g.,\n 1 m + 20 cm will be automatically converted to 1.2 m (units follow the\n leftmost quantity).\n\n >>> a = EnergyArray([1, 2], \"Ha\")\n >>> b = EnergyArray([1, 2], \"eV\")\n >>> c = a + b\n >>> print(c)\n [ 1.03674933 2.07349865] Ha\n >>> c.to(\"eV\")\n array([ 28.21138386, 56.42276772]) eV\n \"\"\"\n Error = UnitError\n\n def __new__(cls, input_array, unit, unit_type=None):\n # Input array is an already formed ndarray instance\n # We first cast to be our class type\n obj = np.asarray(input_array).view(cls)\n # add the new attributes to the created instance\n obj._unit = Unit(unit)\n obj._unit_type = unit_type\n return obj\n\n def __array_finalize__(self, obj):\n \"\"\"\n See http://docs.scipy.org/doc/numpy/user/basics.subclassing.html for\n comments.\n \"\"\"\n if obj is None:\n return\n self._unit = getattr(obj, \"_unit\", None)\n self._unit_type = getattr(obj, \"_unit_type\", None)\n\n #TODO abstract base class property?\n @property\n def unit_type(self):\n return self._unit_type\n\n #TODO abstract base class property?\n @property\n def unit(self):\n return self._unit\n\n def __reduce__(self):\n #print(\"in reduce\")\n reduce = list(super(ArrayWithUnit, self).__reduce__())\n #print(\"unit\",self._unit)\n #print(reduce[2])\n reduce[2] = {\"np_state\": reduce[2], \"_unit\": self._unit}\n return tuple(reduce)\n\n def __setstate__(self, state):\n #print(\"in setstate %s\" % str(state))\n super(ArrayWithUnit, self).__setstate__(state[\"np_state\"])\n self._unit = state[\"_unit\"]\n\n def __repr__(self):\n return \"{} {}\".format(np.array(self).__repr__(), self.unit)\n\n def __str__(self):\n return \"{} {}\".format(np.array(self).__str__(), self.unit)\n\n def __add__(self, other):\n if hasattr(other, \"unit_type\"):\n if other.unit_type != self.unit_type:\n raise UnitError(\"Adding different types of units is\"\n \" not allowed\")\n\n if other.unit != self.unit:\n other = other.to(self.unit)\n\n return self.__class__(np.array(self) + np.array(other),\n unit_type=self.unit_type, unit=self.unit)\n\n def __sub__(self, other):\n if hasattr(other, \"unit_type\"):\n if other.unit_type != self.unit_type:\n raise UnitError(\"Subtracting different units is not allowed\")\n\n if other.unit != self.unit:\n other = other.to(self.unit)\n\n return self.__class__(np.array(self) - np.array(other),\n unit_type=self.unit_type, unit=self.unit)\n\n def __mul__(self, other):\n # FIXME\n # Here we have the most important difference between FloatWithUnit and\n # ArrayWithFloatWithUnit:\n # If other does not have units, I return an object with the same units\n # as self.\n # if other *has* units, I return an object *without* units since\n # taking into account all the possible derived quantities would be\n # too difficult.\n # Moreover Energy(1.0) * Time(1.0, \"s\") returns 1.0 Ha that is a\n # bit misleading.\n # Same protocol for __div__\n if not hasattr(other, \"unit_type\"):\n return self.__class__(np.array(self).__mul__(np.array(other)),\n unit_type=self._unit_type, unit=self._unit)\n else:\n # Cannot use super since it returns an instance of self.__class__\n # while here we want a bare numpy array.\n return self.__class__(\n np.array(self).__mul__(np.array(other)),\n unit=self.unit * other.unit)\n\n def __rmul__(self, other):\n if not hasattr(other, \"unit_type\"):\n return self.__class__(np.array(self).__rmul__(np.array(other)),\n unit_type=self._unit_type, unit=self._unit)\n else:\n return self.__class__(\n np.array(self).__rmul__(np.array(other)),\n unit=self.unit * other.unit)\n\n def __div__(self, other):\n if not hasattr(other, \"unit_type\"):\n return self.__class__(np.array(self).__div__(np.array(other)),\n unit_type=self._unit_type, unit=self._unit)\n else:\n return self.__class__(\n np.array(self).__div__(np.array(other)),\n unit=self.unit/other.unit)\n\n def __truediv__(self, other):\n if not hasattr(other, \"unit_type\"):\n return self.__class__(np.array(self).__truediv__(np.array(other)),\n unit_type=self._unit_type, unit=self._unit)\n else:\n return self.__class__(\n np.array(self).__truediv__(np.array(other)),\n unit=self.unit / other.unit)\n\n def __neg__(self):\n return self.__class__(np.array(self).__neg__(),\n unit_type=self.unit_type, unit=self.unit)\n\n def to(self, new_unit):\n \"\"\"\n Conversion to a new_unit.\n\n Args:\n new_unit:\n New unit type.\n\n Returns:\n A ArrayWithFloatWithUnit object in the new units.\n\n Example usage:\n >>> e = EnergyArray([1, 1.1], \"Ha\")\n >>> e.to(\"eV\")\n array([ 27.21138386, 29.93252225]) eV\n \"\"\"\n return self.__class__(\n np.array(self) * self.unit.get_conversion_factor(new_unit),\n unit_type=self.unit_type, unit=new_unit)\n\n @property\n def as_base_units(self):\n \"\"\"\n Returns this ArrayWithUnit in base SI units, including derived units.\n\n Returns:\n An ArrayWithUnit object in base SI units\n \"\"\"\n return self.to(self.unit.as_base_units[0])\n\n #TODO abstract base class property?\n @property\n def supported_units(self):\n \"\"\"\n Supported units for specific unit type.\n \"\"\"\n return ALL_UNITS[self.unit_type]\n\n #TODO abstract base class method?\n def conversions(self):\n \"\"\"\n Returns a string showing the available conversions.\n Useful tool in interactive mode.\n \"\"\"\n return \"\\n\".join(str(self.to(unit)) for unit in self.supported_units)\n\n\ndef _my_partial(func, *args, **kwargs):\n \"\"\"\n Partial returns a partial object and therefore we cannot inherit class\n methods defined in FloatWithUnit. This function calls partial and patches\n the new class before returning.\n \"\"\"\n newobj = partial(func, *args, **kwargs)\n # monkey patch\n newobj.from_string = FloatWithUnit.from_string\n return newobj\n\n\nEnergy = partial(FloatWithUnit, unit_type=\"energy\")\n\"\"\"\nA float with an energy unit.\n\nArgs:\n val (float): Value\n unit (Unit): E.g., eV, kJ, etc. Must be valid unit or UnitError is raised.\n\"\"\"\nEnergyArray = partial(ArrayWithUnit, unit_type=\"energy\")\n\nLength = partial(FloatWithUnit, unit_type=\"length\")\n\"\"\"\nA float with a length unit.\n\nArgs:\n val (float): Value\n unit (Unit): E.g., m, ang, bohr, etc. Must be valid unit or UnitError is\n raised.\n\"\"\"\nLengthArray = partial(ArrayWithUnit, unit_type=\"length\")\n\nMass = partial(FloatWithUnit, unit_type=\"mass\")\n\"\"\"\nA float with a mass unit.\n\nArgs:\n val (float): Value\n unit (Unit): E.g., amu, kg, etc. Must be valid unit or UnitError is\n raised.\n\"\"\"\nMassArray = partial(ArrayWithUnit, unit_type=\"mass\")\n\nTemp = partial(FloatWithUnit, unit_type=\"temperature\")\n\"\"\"\nA float with a temperature unit.\n\nArgs:\n val (float): Value\n unit (Unit): E.g., K. Only K (kelvin) is supported.\n\"\"\"\nTempArray = partial(ArrayWithUnit, unit_type=\"temperature\")\n\nTime = partial(FloatWithUnit, unit_type=\"time\")\n\"\"\"\nA float with a time unit.\n\nArgs:\n val (float): Value\n unit (Unit): E.g., s, min, h. Must be valid unit or UnitError is\n raised.\n\"\"\"\nTimeArray = partial(ArrayWithUnit, unit_type=\"time\")\n\nCharge = partial(FloatWithUnit, unit_type=\"charge\")\n\"\"\"\nA float with a charge unit.\n\nArgs:\n val (float): Value\n unit (Unit): E.g., C, e (electron charge). Must be valid unit or UnitError\n is raised.\n\"\"\"\nChargeArray = partial(ArrayWithUnit, unit_type=\"charge\")\n\n\nMemory = _my_partial(FloatWithUnit, unit_type=\"memory\")\n\"\"\"\nA float with a memory unit.\n\nArgs:\n val (float): Value\n unit (Unit): E.g., Kb, Mb, Gb, Tb. Must be valid unit or UnitError\n is raised.\n\"\"\"\n\n\ndef obj_with_unit(obj, unit):\n \"\"\"\n Returns a `FloatWithUnit` instance if obj is scalar, a dictionary of\n objects with units if obj is a dict, else an instance of\n `ArrayWithFloatWithUnit`.\n\n Args:\n unit: Specific units (eV, Ha, m, ang, etc.).\n \"\"\"\n unit_type = _UNAME2UTYPE[unit]\n\n if isinstance(obj, numbers.Number):\n return FloatWithUnit(obj, unit=unit, unit_type=unit_type)\n elif isinstance(obj, collections.Mapping):\n return {k: obj_with_unit(v, unit) for k,v in obj.items()}\n else:\n return ArrayWithUnit(obj, unit=unit, unit_type=unit_type)\n\n\ndef unitized(unit):\n \"\"\"\n Useful decorator to assign units to the output of a function. You can also\n use it to standardize the output units of a function that already returns\n a FloatWithUnit or ArrayWithUnit. For sequences, all values in the sequences\n are assigned the same unit. It works with Python sequences only. The creation\n of numpy arrays loses all unit information. For mapping types, the values\n are assigned units.\n\n Args:\n unit: Specific unit (eV, Ha, m, ang, etc.).\n\n Example usage::\n\n @unitized(unit=\"kg\")\n def get_mass():\n return 123.45\n\n \"\"\"\n def wrap(f):\n def wrapped_f(*args, **kwargs):\n val = f(*args, **kwargs)\n unit_type = _UNAME2UTYPE[unit]\n\n if isinstance(val, FloatWithUnit) or isinstance(val, ArrayWithUnit):\n return val.to(unit)\n\n elif isinstance(val, collections.Sequence):\n # TODO: why don't we return a ArrayWithUnit?\n # This complicated way is to ensure the sequence type is\n # preserved (list or tuple).\n return val.__class__([FloatWithUnit(i, unit_type=unit_type,\n unit=unit) for i in val])\n elif isinstance(val, collections.Mapping):\n for k, v in val.items():\n val[k] = FloatWithUnit(v, unit_type=unit_type, unit=unit)\n elif isinstance(val, numbers.Number):\n return FloatWithUnit(val, unit_type=unit_type, unit=unit)\n elif val is None:\n pass\n else:\n raise TypeError(\"Don't know how to assign units to %s\" % str(val))\n return val\n return wrapped_f\n return wrap\n\n\nif __name__ == \"__main__\":\n import doctest\n doctest.testmod()\n",
"# coding: utf-8\n# Copyright (c) Pymatgen Development Team.\n# Distributed under the terms of the MIT License.\n\nfrom __future__ import division, unicode_literals\n\n\"\"\"\nCreated on Mar 15, 2012\n\"\"\"\n\n\n__author__ = \"Shyue Ping Ong\"\n__copyright__ = \"Copyright 2012, The Materials Project\"\n__version__ = \"0.1\"\n__maintainer__ = \"Shyue Ping Ong\"\n__email__ = \"[email protected]\"\n__date__ = \"Mar 15, 2012\"\n\nimport unittest2 as unittest\n\nimport numpy as np\n\nfrom pymatgen import Lattice, Structure\nfrom pymatgen.transformations.site_transformations import \\\n InsertSitesTransformation, TranslateSitesTransformation, \\\n ReplaceSiteSpeciesTransformation, RemoveSitesTransformation, \\\n PartialRemoveSitesTransformation\n\nfrom monty.os.path import which\n\n\nenumlib_present = which('multienum.x') and which('makestr.x')\n\n\nclass TranslateSitesTransformationTest(unittest.TestCase):\n\n def setUp(self):\n coords = list()\n coords.append([0, 0, 0])\n coords.append([0.375, 0.375, 0.375])\n coords.append([.5, .5, .5])\n coords.append([0.875, 0.875, 0.875])\n coords.append([0.125, 0.125, 0.125])\n coords.append([0.25, 0.25, 0.25])\n coords.append([0.625, 0.625, 0.625])\n coords.append([0.75, 0.75, 0.75])\n\n lattice = Lattice([[3.8401979337, 0.00, 0.00],\n [1.9200989668, 3.3257101909, 0.00],\n [0.00, -2.2171384943, 3.1355090603]])\n self.struct = Structure(lattice, [\"Li+\", \"Li+\", \"Li+\", \"Li+\", \"O2-\",\n \"O2-\", \"O2-\", \"O2-\"], coords)\n\n def test_apply_transformation(self):\n t = TranslateSitesTransformation([0, 1], [0.1, 0.2, 0.3])\n s = t.apply_transformation(self.struct)\n self.assertTrue(np.allclose(s[0].frac_coords, [0.1, 0.2, 0.3]))\n self.assertTrue(np.allclose(s[1].frac_coords, [0.475, 0.575, 0.675]))\n inv_t = t.inverse\n s = inv_t.apply_transformation(s)\n self.assertTrue(np.allclose(s[0].frac_coords, [0, 0, 0]))\n self.assertTrue(np.allclose(s[1].frac_coords, [0.375, 0.375, 0.375]))\n str(t)\n\n def test_apply_transformation_site_by_site(self):\n t = TranslateSitesTransformation([0, 1], [[0.1, 0.2, 0.3],\n [-0.075, -0.075, -0.075]])\n s = t.apply_transformation(self.struct)\n self.assertTrue(np.allclose(s[0].frac_coords, [0.1, 0.2, 0.3]))\n self.assertTrue(np.allclose(s[1].frac_coords, [0.3, 0.3, 0.3]))\n inv_t = t.inverse\n s = inv_t.apply_transformation(s)\n self.assertTrue(np.allclose(s[0].frac_coords, [0, 0, 0]))\n self.assertTrue(np.allclose(s[1].frac_coords, [0.375, 0.375, 0.375]))\n str(t)\n\n def test_to_from_dict(self):\n d1 = TranslateSitesTransformation([0], [0.1, 0.2, 0.3]).as_dict()\n d2 = TranslateSitesTransformation([0, 1], [[0.1, 0.2, 0.3],\n [-0.075, -0.075, -0.075]]).as_dict()\n t1 = TranslateSitesTransformation.from_dict(d1)\n t2 = TranslateSitesTransformation.from_dict(d2)\n s1 = t1.apply_transformation(self.struct)\n s2 = t2.apply_transformation(self.struct)\n self.assertTrue(np.allclose(s1[0].frac_coords, [0.1, 0.2, 0.3]))\n self.assertTrue(np.allclose(s2[0].frac_coords, [0.1, 0.2, 0.3]))\n self.assertTrue(np.allclose(s2[1].frac_coords, [0.3, 0.3, 0.3]))\n str(t1)\n str(t2)\n\n\nclass ReplaceSiteSpeciesTransformationTest(unittest.TestCase):\n\n def setUp(self):\n coords = list()\n coords.append([0, 0, 0])\n coords.append([0.375, 0.375, 0.375])\n coords.append([.5, .5, .5])\n coords.append([0.875, 0.875, 0.875])\n coords.append([0.125, 0.125, 0.125])\n coords.append([0.25, 0.25, 0.25])\n coords.append([0.625, 0.625, 0.625])\n coords.append([0.75, 0.75, 0.75])\n\n lattice = Lattice([[3.8401979337, 0.00, 0.00],\n [1.9200989668, 3.3257101909, 0.00],\n [0.00, -2.2171384943, 3.1355090603]])\n self.struct = Structure(lattice, [\"Li+\", \"Li+\", \"Li+\", \"Li+\", \"O2-\",\n \"O2-\", \"O2-\", \"O2-\"], coords)\n\n def test_apply_transformation(self):\n t = ReplaceSiteSpeciesTransformation({0: \"Na\"})\n s = t.apply_transformation(self.struct)\n self.assertEqual(s.formula, \"Na1 Li3 O4\")\n str(t)\n\n def test_to_from_dict(self):\n d = ReplaceSiteSpeciesTransformation({0: \"Na\"}).as_dict()\n t = ReplaceSiteSpeciesTransformation.from_dict(d)\n s = t.apply_transformation(self.struct)\n self.assertEqual(s.formula, \"Na1 Li3 O4\")\n\n\nclass RemoveSitesTransformationTest(unittest.TestCase):\n\n def setUp(self):\n coords = list()\n coords.append([0, 0, 0])\n coords.append([0.375, 0.375, 0.375])\n coords.append([.5, .5, .5])\n coords.append([0.875, 0.875, 0.875])\n coords.append([0.125, 0.125, 0.125])\n coords.append([0.25, 0.25, 0.25])\n coords.append([0.625, 0.625, 0.625])\n coords.append([0.75, 0.75, 0.75])\n\n lattice = Lattice([[3.8401979337, 0.00, 0.00],\n [1.9200989668, 3.3257101909, 0.00],\n [0.00, -2.2171384943, 3.1355090603]])\n self.struct = Structure(lattice, [\"Li+\", \"Li+\", \"Li+\", \"Li+\", \"O2-\",\n \"O2-\", \"O2-\", \"O2-\"], coords)\n\n def test_apply_transformation(self):\n t = RemoveSitesTransformation(range(2))\n s = t.apply_transformation(self.struct)\n self.assertEqual(s.formula, \"Li2 O4\")\n str(t)\n\n def test_to_from_dict(self):\n d = RemoveSitesTransformation(range(2)).as_dict()\n t = RemoveSitesTransformation.from_dict(d)\n s = t.apply_transformation(self.struct)\n self.assertEqual(s.formula, \"Li2 O4\")\n\n\nclass InsertSitesTransformationTest(unittest.TestCase):\n\n def setUp(self):\n coords = list()\n coords.append([0, 0, 0])\n coords.append([0.375, 0.375, 0.375])\n coords.append([.5, .5, .5])\n coords.append([0.875, 0.875, 0.875])\n coords.append([0.125, 0.125, 0.125])\n coords.append([0.25, 0.25, 0.25])\n coords.append([0.625, 0.625, 0.625])\n coords.append([0.75, 0.75, 0.75])\n\n lattice = Lattice([[3.8401979337, 0.00, 0.00],\n [1.9200989668, 3.3257101909, 0.00],\n [0.00, -2.2171384943, 3.1355090603]])\n self.struct = Structure(lattice, [\"Li+\", \"Li+\", \"Li+\", \"Li+\", \"O2-\",\n \"O2-\", \"O2-\", \"O2-\"], coords)\n\n def test_apply_transformation(self):\n t = InsertSitesTransformation([\"Fe\", \"Mn\"], [[0., 0.5, 0],\n [0.5, 0.2, 0.2]])\n s = t.apply_transformation(self.struct)\n self.assertEqual(s.formula, \"Li4 Mn1 Fe1 O4\")\n t = InsertSitesTransformation([\"Fe\", \"Mn\"], [[0.001, 0, 0],\n [0.1, 0.2, 0.2]])\n #Test validate proximity\n self.assertRaises(ValueError, t.apply_transformation, self.struct)\n\n def test_to_from_dict(self):\n d = InsertSitesTransformation([\"Fe\", \"Mn\"],\n [[0.5, 0, 0], [0.1, 0.5, 0.2]]).as_dict()\n t = InsertSitesTransformation.from_dict(d)\n s = t.apply_transformation(self.struct)\n self.assertEqual(s.formula, \"Li4 Mn1 Fe1 O4\")\n\n\nclass PartialRemoveSitesTransformationTest(unittest.TestCase):\n\n def setUp(self):\n coords = list()\n coords.append([0, 0, 0])\n coords.append([0.375, 0.375, 0.375])\n coords.append([.5, .5, .5])\n coords.append([0.875, 0.875, 0.875])\n coords.append([0.125, 0.125, 0.125])\n coords.append([0.25, 0.25, 0.25])\n coords.append([0.625, 0.625, 0.625])\n coords.append([0.75, 0.75, 0.75])\n\n lattice = Lattice([[3.8401979337, 0.00, 0.00],\n [1.9200989668, 3.3257101909, 0.00],\n [0.00, -2.2171384943, 3.1355090603]])\n self.struct = Structure(lattice, [\"Li+\", \"Li+\", \"Li+\", \"Li+\", \"O2-\",\n \"O2-\", \"O2-\", \"O2-\"], coords)\n\n def test_apply_transformation_complete(self):\n t = PartialRemoveSitesTransformation(\n [tuple(range(4)), tuple(range(4, 8))],\n [0.5, 0.5],\n PartialRemoveSitesTransformation.ALGO_COMPLETE\n )\n s = t.apply_transformation(self.struct)\n self.assertEqual(s.formula, \"Li2 O2\")\n s = t.apply_transformation(self.struct, 12)\n self.assertEqual(len(s), 12)\n\n @unittest.skipIf(not enumlib_present, \"enum_lib not present.\")\n def test_apply_transformation_enumerate(self):\n t = PartialRemoveSitesTransformation(\n [tuple(range(4)), tuple(range(4, 8))],\n [0.5, 0.5],\n PartialRemoveSitesTransformation.ALGO_ENUMERATE\n )\n s = t.apply_transformation(self.struct)\n self.assertEqual(s.formula, \"Li2 O2\")\n s = t.apply_transformation(self.struct, 12)\n self.assertEqual(len(s), 12)\n\n def test_apply_transformation_best_first(self):\n t = PartialRemoveSitesTransformation(\n [tuple(range(4)), tuple(range(4, 8))],\n [0.5, 0.5],\n PartialRemoveSitesTransformation.ALGO_BEST_FIRST\n )\n s = t.apply_transformation(self.struct)\n self.assertEqual(s.formula, \"Li2 O2\")\n\n def test_apply_transformation_fast(self):\n t = PartialRemoveSitesTransformation(\n [tuple(range(4)), tuple(range(4, 8))],\n [0.5, 0.5],\n PartialRemoveSitesTransformation.ALGO_FAST\n )\n s = t.apply_transformation(self.struct)\n self.assertEqual(s.formula, \"Li2 O2\")\n t = PartialRemoveSitesTransformation(\n [tuple(range(8))], [0.5],\n PartialRemoveSitesTransformation.ALGO_FAST\n )\n s = t.apply_transformation(self.struct)\n self.assertEqual(s.formula, \"Li2 O2\")\n\n def test_to_from_dict(self):\n d = PartialRemoveSitesTransformation([tuple(range(4))], [0.5]).as_dict()\n t = PartialRemoveSitesTransformation.from_dict(d)\n s = t.apply_transformation(self.struct)\n self.assertEqual(s.formula, \"Li2 O4\")\n\n def test_str(self):\n d = PartialRemoveSitesTransformation([tuple(range(4))], [0.5]).as_dict()\n self.assertIsNotNone(str(d))\n\nif __name__ == \"__main__\":\n #import sys;sys.argv = ['', 'Test.testName']\n unittest.main()\n"
] | [
[
"numpy.asarray",
"numpy.array"
],
[
"numpy.allclose"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
rossbar/scipy-lecture-notes | [
"7f74e6925721c43bd81bf0bee34b4805ac4a3b57",
"7f74e6925721c43bd81bf0bee34b4805ac4a3b57",
"7f74e6925721c43bd81bf0bee34b4805ac4a3b57",
"7f74e6925721c43bd81bf0bee34b4805ac4a3b57"
] | [
"advanced/image_processing/examples/plot_numpy_array.py",
"intro/numpy/examples/plot_elephant.py",
"intro/summary-exercises/examples/plot_gumbell_wind_speed_prediction.py",
"packages/scikit-image/examples/plot_labels.py"
] | [
"\"\"\"\nImage manipulation and numpy arrays\n====================================\n\nThis example shows how to do image manipulation using common numpy arrays\ntricks.\n\n\"\"\"\n\nimport numpy as np\nimport scipy\nimport scipy.misc\nimport matplotlib.pyplot as plt\n\nface = scipy.misc.face(gray=True)\nface[10:13, 20:23]\nface[100:120] = 255\n\nlx, ly = face.shape\nX, Y = np.ogrid[0:lx, 0:ly]\nmask = (X - lx/2)**2 + (Y - ly/2)**2 > lx*ly/4\nface[mask] = 0\nface[range(400), range(400)] = 255\n\nplt.figure(figsize=(3, 3))\nplt.axes([0, 0, 1, 1])\nplt.imshow(face, cmap=plt.cm.gray)\nplt.axis('off')\n\nplt.show()\n",
"\"\"\"\nReading and writing an elephant\n===============================\n\nRead and write images\n\n\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n#################################\n# original figure\n#################################\n\nplt.figure()\nimg = plt.imread('../data/elephant.png')\nplt.imshow(img)\n\n#################################\n# red channel displayed in grey\n#################################\n\nplt.figure()\nimg_red = img[:, :, 0]\nplt.imshow(img_red, cmap=plt.cm.gray)\n\n#################################\n# lower resolution\n#################################\n\nplt.figure()\nimg_tiny = img[::6, ::6]\nplt.imshow(img_tiny, interpolation='nearest') \n\nplt.show()\n",
"\"\"\"\nThe Gumbell distribution\n=========================\n\nGenerate the exercise results on the Gumbell distribution\n\"\"\"\nimport numpy as np\nfrom scipy.interpolate import UnivariateSpline\nimport matplotlib.pyplot as plt\n\n\ndef gumbell_dist(arr):\n return -np.log(-np.log(arr))\n\nyears_nb = 21\nwspeeds = np.load('sprog-windspeeds.npy')\nmax_speeds = np.array([arr.max() for arr in np.array_split(wspeeds, years_nb)])\nsorted_max_speeds = np.sort(max_speeds)\n\ncprob = (np.arange(years_nb, dtype=np.float32) + 1)/(years_nb + 1)\ngprob = gumbell_dist(cprob)\nspeed_spline = UnivariateSpline(gprob, sorted_max_speeds, k=1)\nnprob = gumbell_dist(np.linspace(1e-3, 1-1e-3, 1e2))\nfitted_max_speeds = speed_spline(nprob)\n\nfifty_prob = gumbell_dist(49./50.)\nfifty_wind = speed_spline(fifty_prob)\n\nplt.figure()\nplt.plot(sorted_max_speeds, gprob, 'o')\nplt.plot(fitted_max_speeds, nprob, 'g--')\nplt.plot([fifty_wind], [fifty_prob], 'o', ms=8., mfc='y', mec='y')\nplt.plot([fifty_wind, fifty_wind], [plt.axis()[2], fifty_prob], 'k--')\nplt.text(35, -1, r'$V_{50} = %.2f \\, m/s$' % fifty_wind)\nplt.xlabel('Annual wind speed maxima [$m/s$]')\nplt.ylabel('Gumbell cumulative probability')\nplt.show()\n",
"\"\"\"\nLabelling connected components of an image\n===========================================\n\nThis example shows how to label connected components of a binary image, using\nthe dedicated skimage.measure.label function.\n\"\"\"\n\nfrom skimage import measure\nfrom skimage import filters\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nn = 12\nl = 256\nnp.random.seed(1)\nim = np.zeros((l, l))\npoints = l * np.random.random((2, n ** 2))\nim[(points[0]).astype(np.int), (points[1]).astype(np.int)] = 1\nim = filters.gaussian(im, sigma= l / (4. * n))\nblobs = im > 0.7 * im.mean()\n\nall_labels = measure.label(blobs)\nblobs_labels = measure.label(blobs, background=0)\n\nplt.figure(figsize=(9, 3.5))\nplt.subplot(131)\nplt.imshow(blobs, cmap='gray')\nplt.axis('off')\nplt.subplot(132)\nplt.imshow(all_labels, cmap='nipy_spectral')\nplt.axis('off')\nplt.subplot(133)\nplt.imshow(blobs_labels, cmap='nipy_spectral')\nplt.axis('off')\n\nplt.tight_layout()\nplt.show()\n"
] | [
[
"matplotlib.pyplot.imshow",
"scipy.misc.face",
"matplotlib.pyplot.axes",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
],
[
"matplotlib.pyplot.imread",
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
],
[
"scipy.interpolate.UnivariateSpline",
"numpy.log",
"numpy.linspace",
"numpy.arange",
"numpy.sort",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.ylabel",
"numpy.array_split",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.axis",
"numpy.load",
"matplotlib.pyplot.text",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
],
[
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.tight_layout",
"numpy.random.random",
"numpy.random.seed",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.show",
"numpy.zeros",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Jacob-Barhak/panel | [
"04cad38ea703e4e69fb76f063a27f4ffe40688e8"
] | [
"panel/reactive.py"
] | [
"\"\"\"\nDeclares Syncable and Reactive classes which provides baseclasses\nfor Panel components which sync their state with one or more bokeh\nmodels rendered on the frontend.\n\"\"\"\n\nimport difflib\nimport sys\nimport threading\n\nfrom collections import namedtuple\nfrom functools import partial\n\nimport numpy as np\nimport param\n\nfrom bokeh.models import LayoutDOM\nfrom tornado import gen\n\nfrom .config import config\nfrom .io.callbacks import PeriodicCallback\nfrom .io.model import hold\nfrom .io.notebook import push, push_on_root\nfrom .io.server import unlocked\nfrom .io.state import state\nfrom .util import edit_readonly, updating\nfrom .viewable import Renderable, Viewable\n\nLinkWatcher = namedtuple(\"Watcher\",\"inst cls fn mode onlychanged parameter_names what queued target links transformed bidirectional_watcher\")\n\n\nclass Syncable(Renderable):\n \"\"\"\n Syncable is an extension of the Renderable object which can not\n only render to a bokeh model but also sync the parameters on the\n object with the properties on the model.\n\n In order to bi-directionally link parameters with bokeh model\n instances the _link_params and _link_props methods define\n callbacks triggered when either the parameter or bokeh property\n values change. Since there may not be a 1-to-1 mapping between\n parameter and the model property the _process_property_change and\n _process_param_change may be overridden to apply any necessary\n transformations.\n \"\"\"\n\n # Timeout if a notebook comm message is swallowed\n _timeout = 20000\n\n # Timeout before the first event is processed\n _debounce = 50\n\n # Any parameters that require manual updates handling for the models\n # e.g. parameters which affect some sub-model\n _manual_params = []\n\n # Mapping from parameter name to bokeh model property name\n _rename = {}\n\n # Allows defining a mapping from model property name to a JS code\n # snippet that transforms the object before serialization\n _js_transforms = {}\n\n # Transforms from input value to bokeh property value\n _source_transforms = {}\n _target_transforms = {}\n\n __abstract = True\n\n def __init__(self, **params):\n super().__init__(**params)\n\n # Useful when updating model properties which trigger potentially\n # recursive events\n self._updating = False\n\n # A dictionary of current property change events\n self._events = {}\n\n # Any watchers associated with links between two objects\n self._links = []\n self._link_params()\n\n # A dictionary of bokeh property changes being processed\n self._changing = {}\n\n # Sets up watchers to process manual updates to models\n if self._manual_params:\n self.param.watch(self._update_manual, self._manual_params)\n\n #----------------------------------------------------------------\n # Model API\n #----------------------------------------------------------------\n\n def _process_property_change(self, msg):\n \"\"\"\n Transform bokeh model property changes into parameter updates.\n Should be overridden to provide appropriate mapping between\n parameter value and bokeh model change. By default uses the\n _rename class level attribute to map between parameter and\n property names.\n \"\"\"\n inverted = {v: k for k, v in self._rename.items()}\n return {inverted.get(k, k): v for k, v in msg.items()}\n\n def _process_param_change(self, msg):\n \"\"\"\n Transform parameter changes into bokeh model property updates.\n Should be overridden to provide appropriate mapping between\n parameter value and bokeh model change. By default uses the\n _rename class level attribute to map between parameter and\n property names.\n \"\"\"\n properties = {self._rename.get(k, k): v for k, v in msg.items()\n if self._rename.get(k, False) is not None}\n if 'width' in properties and self.sizing_mode is None:\n properties['min_width'] = properties['width']\n if 'height' in properties and self.sizing_mode is None:\n properties['min_height'] = properties['height']\n return properties\n\n @property\n def _linkable_params(self):\n \"\"\"\n Parameters that can be linked in JavaScript via source\n transforms.\n \"\"\"\n return [p for p in self._synced_params if self._rename.get(p, False) is not None\n and self._source_transforms.get(p, False) is not None] + ['loading']\n\n @property\n def _synced_params(self):\n \"\"\"\n Parameters which are synced with properties using transforms\n applied in the _process_param_change method.\n \"\"\"\n ignored = ['default_layout', 'loading']\n return [p for p in self.param if p not in self._manual_params+ignored]\n\n def _init_params(self):\n return {k: v for k, v in self.param.get_param_values()\n if k in self._synced_params and v is not None}\n\n def _link_params(self):\n params = self._synced_params\n if params:\n watcher = self.param.watch(self._param_change, params)\n self._callbacks.append(watcher)\n\n def _link_props(self, model, properties, doc, root, comm=None):\n ref = root.ref['id']\n if config.embed:\n return\n\n for p in properties:\n if isinstance(p, tuple):\n _, p = p\n if comm:\n model.on_change(p, partial(self._comm_change, doc, ref, comm))\n else:\n model.on_change(p, partial(self._server_change, doc, ref))\n\n def _manual_update(self, events, model, doc, root, parent, comm):\n \"\"\"\n Method for handling any manual update events, i.e. events triggered\n by changes in the manual params.\n \"\"\"\n\n def _update_manual(self, *events):\n for ref, (model, parent) in self._models.items():\n if ref not in state._views or ref in state._fake_roots:\n continue\n viewable, root, doc, comm = state._views[ref]\n if comm or state._unblocked(doc):\n with unlocked():\n self._manual_update(events, model, doc, root, parent, comm)\n if comm and 'embedded' not in root.tags:\n push(doc, comm)\n else:\n cb = partial(self._manual_update, events, model, doc, root, parent, comm)\n if doc.session_context:\n doc.add_next_tick_callback(cb)\n else:\n cb()\n\n def _update_model(self, events, msg, root, model, doc, comm):\n self._changing[root.ref['id']] = [\n attr for attr, value in msg.items()\n if not model.lookup(attr).property.matches(getattr(model, attr), value)\n ]\n try:\n model.update(**msg)\n finally:\n del self._changing[root.ref['id']]\n\n def _cleanup(self, root):\n super()._cleanup(root)\n ref = root.ref['id']\n self._models.pop(ref, None)\n comm, client_comm = self._comms.pop(ref, (None, None))\n if comm:\n try:\n comm.close()\n except Exception:\n pass\n if client_comm:\n try:\n client_comm.close()\n except Exception:\n pass\n\n def _param_change(self, *events):\n msgs = []\n for event in events:\n msg = self._process_param_change({event.name: event.new})\n if msg:\n msgs.append(msg)\n\n events = {event.name: event for event in events}\n msg = {k: v for msg in msgs for k, v in msg.items()}\n if not msg:\n return\n\n for ref, (model, parent) in self._models.items():\n if ref not in state._views or ref in state._fake_roots:\n continue\n viewable, root, doc, comm = state._views[ref]\n if comm or not doc.session_context or state._unblocked(doc):\n with unlocked():\n self._update_model(events, msg, root, model, doc, comm)\n if comm and 'embedded' not in root.tags:\n push(doc, comm)\n else:\n cb = partial(self._update_model, events, msg, root, model, doc, comm)\n doc.add_next_tick_callback(cb)\n\n def _process_events(self, events):\n with edit_readonly(state):\n state.busy = True\n try:\n with edit_readonly(self):\n self.param.set_param(**self._process_property_change(events))\n finally:\n with edit_readonly(state):\n state.busy = False\n\n @gen.coroutine\n def _change_coroutine(self, doc=None):\n self._change_event(doc)\n\n def _change_event(self, doc=None):\n try:\n state.curdoc = doc\n thread = threading.current_thread()\n thread_id = thread.ident if thread else None\n state._thread_id = thread_id\n events = self._events\n self._events = {}\n self._process_events(events)\n finally:\n state.curdoc = None\n state._thread_id = None\n\n def _comm_change(self, doc, ref, comm, attr, old, new):\n if attr in self._changing.get(ref, []):\n self._changing[ref].remove(attr)\n return\n\n with hold(doc, comm=comm):\n self._process_events({attr: new})\n\n def _server_change(self, doc, ref, attr, old, new):\n if attr in self._changing.get(ref, []):\n self._changing[ref].remove(attr)\n return\n\n state._locks.clear()\n processing = bool(self._events)\n self._events.update({attr: new})\n if not processing:\n if doc.session_context:\n doc.add_timeout_callback(partial(self._change_coroutine, doc), self._debounce)\n else:\n self._change_event(doc)\n\n\nclass Reactive(Syncable, Viewable):\n \"\"\"\n Reactive is a Viewable object that also supports syncing between\n the objects parameters and the underlying bokeh model either via\n the defined pyviz_comms.Comm type or using bokeh server.\n\n In addition it defines various methods which make it easy to link\n the parameters to other objects.\n \"\"\"\n\n #----------------------------------------------------------------\n # Public API\n #----------------------------------------------------------------\n\n def add_periodic_callback(self, callback, period=500, count=None,\n timeout=None, start=True):\n \"\"\"\n Schedules a periodic callback to be run at an interval set by\n the period. Returns a PeriodicCallback object with the option\n to stop and start the callback.\n\n Arguments\n ---------\n callback: callable\n Callable function to be executed at periodic interval.\n period: int\n Interval in milliseconds at which callback will be executed.\n count: int\n Maximum number of times callback will be invoked.\n timeout: int\n Timeout in seconds when the callback should be stopped.\n start: boolean (default=True)\n Whether to start callback immediately.\n\n Returns\n -------\n Return a PeriodicCallback object with start and stop methods.\n \"\"\"\n self.param.warning(\n \"Calling add_periodic_callback on a Panel component is \"\n \"deprecated and will be removed in the next minor release. \"\n \"Use the pn.state.add_periodic_callback API instead.\"\n )\n cb = PeriodicCallback(callback=callback, period=period,\n count=count, timeout=timeout)\n if start:\n cb.start()\n return cb\n\n def link(self, target, callbacks=None, bidirectional=False, **links):\n \"\"\"\n Links the parameters on this object to attributes on another\n object in Python. Supports two modes, either specify a mapping\n between the source and target object parameters as keywords or\n provide a dictionary of callbacks which maps from the source\n parameter to a callback which is triggered when the parameter\n changes.\n\n Arguments\n ---------\n target: object\n The target object of the link.\n callbacks: dict\n Maps from a parameter in the source object to a callback.\n bidirectional: boolean\n Whether to link source and target bi-directionally\n **links: dict\n Maps between parameters on this object to the parameters\n on the supplied object.\n \"\"\"\n if links and callbacks:\n raise ValueError('Either supply a set of parameters to '\n 'link as keywords or a set of callbacks, '\n 'not both.')\n elif not links and not callbacks:\n raise ValueError('Declare parameters to link or a set of '\n 'callbacks, neither was defined.')\n elif callbacks and bidirectional:\n raise ValueError('Bidirectional linking not supported for '\n 'explicit callbacks. You must define '\n 'separate callbacks for each direction.')\n\n _updating = []\n def link(*events):\n for event in events:\n if event.name in _updating: continue\n _updating.append(event.name)\n try:\n if callbacks:\n callbacks[event.name](target, event)\n else:\n setattr(target, links[event.name], event.new)\n finally:\n _updating.pop(_updating.index(event.name))\n params = list(callbacks) if callbacks else list(links)\n cb = self.param.watch(link, params)\n\n bidirectional_watcher = None\n if bidirectional:\n _reverse_updating = []\n reverse_links = {v: k for k, v in links.items()}\n def reverse_link(*events):\n for event in events:\n if event.name in _reverse_updating: continue\n _reverse_updating.append(event.name)\n try:\n setattr(self, reverse_links[event.name], event.new)\n finally:\n _reverse_updating.remove(event.name)\n bidirectional_watcher = target.param.watch(reverse_link, list(reverse_links))\n\n link = LinkWatcher(*tuple(cb)+(target, links, callbacks is not None, bidirectional_watcher))\n self._links.append(link)\n return cb\n\n def controls(self, parameters=[], jslink=True):\n \"\"\"\n Creates a set of widgets which allow manipulating the parameters\n on this instance. By default all parameters which support\n linking are exposed, but an explicit list of parameters can\n be provided.\n\n Arguments\n ---------\n parameters: list(str)\n An explicit list of parameters to return controls for.\n jslink: bool\n Whether to use jslinks instead of Python based links.\n This does not allow using all types of parameters.\n\n Returns\n -------\n A layout of the controls\n \"\"\"\n from .param import Param\n from .layout import Tabs, WidgetBox\n from .widgets import LiteralInput\n\n if parameters:\n linkable = parameters\n elif jslink:\n linkable = self._linkable_params + ['loading']\n else:\n linkable = list(self.param)\n\n params = [p for p in linkable if p not in Viewable.param]\n controls = Param(self.param, parameters=params, default_layout=WidgetBox,\n name='Controls')\n layout_params = [p for p in linkable if p in Viewable.param]\n if 'name' not in layout_params and self._rename.get('name', False) is not None and not parameters:\n layout_params.insert(0, 'name')\n style = Param(self.param, parameters=layout_params, default_layout=WidgetBox,\n name='Layout')\n if jslink:\n for p in params:\n widget = controls._widgets[p]\n widget.jslink(self, value=p, bidirectional=True)\n if isinstance(widget, LiteralInput):\n widget.serializer = 'json'\n for p in layout_params:\n widget = style._widgets[p]\n widget.jslink(self, value=p, bidirectional=p != 'loading')\n if isinstance(widget, LiteralInput):\n widget.serializer = 'json'\n\n if params and layout_params:\n return Tabs(controls.layout[0], style.layout[0])\n elif params:\n return controls.layout[0]\n return style.layout[0]\n\n def jscallback(self, args={}, **callbacks):\n \"\"\"\n Allows defining a JS callback to be triggered when a property\n changes on the source object. The keyword arguments define the\n properties that trigger a callback and the JS code that gets\n executed.\n\n Arguments\n ----------\n args: dict\n A mapping of objects to make available to the JS callback\n **callbacks: dict\n A mapping between properties on the source model and the code\n to execute when that property changes\n\n Returns\n -------\n callback: Callback\n The Callback which can be used to disable the callback.\n \"\"\"\n\n from .links import Callback\n for k, v in list(callbacks.items()):\n callbacks[k] = self._rename.get(v, v)\n return Callback(self, code=callbacks, args=args)\n\n def jslink(self, target, code=None, args=None, bidirectional=False, **links):\n \"\"\"\n Links properties on the source object to those on the target\n object in JS code. Supports two modes, either specify a\n mapping between the source and target model properties as\n keywords or provide a dictionary of JS code snippets which\n maps from the source parameter to a JS code snippet which is\n executed when the property changes.\n\n Arguments\n ----------\n target: HoloViews object or bokeh Model or panel Viewable\n The target to link the value to.\n code: dict\n Custom code which will be executed when the widget value\n changes.\n bidirectional: boolean\n Whether to link source and target bi-directionally\n **links: dict\n A mapping between properties on the source model and the\n target model property to link it to.\n\n Returns\n -------\n link: GenericLink\n The GenericLink which can be used unlink the widget and\n the target model.\n \"\"\"\n if links and code:\n raise ValueError('Either supply a set of properties to '\n 'link as keywords or a set of JS code '\n 'callbacks, not both.')\n elif not links and not code:\n raise ValueError('Declare parameters to link or a set of '\n 'callbacks, neither was defined.')\n if args is None:\n args = {}\n\n mapping = code or links\n for k in mapping:\n if k.startswith('event:'):\n continue\n elif hasattr(self, 'object') and isinstance(self.object, LayoutDOM):\n current = self.object\n for attr in k.split('.'):\n if not hasattr(current, attr):\n raise ValueError(f\"Could not resolve {k} on \"\n f\"{self.object} model. Ensure \"\n \"you jslink an attribute that \"\n \"exists on the bokeh model.\")\n current = getattr(current, attr)\n elif (k not in self.param and k not in list(self._rename.values())):\n matches = difflib.get_close_matches(k, list(self.param))\n if matches:\n matches = ' Similar parameters include: %r' % matches\n else:\n matches = ''\n raise ValueError(\"Could not jslink %r parameter (or property) \"\n \"on %s object because it was not found.%s\"\n % (k, type(self).__name__, matches))\n elif (self._source_transforms.get(k, False) is None or\n self._rename.get(k, False) is None):\n raise ValueError(\"Cannot jslink %r parameter on %s object, \"\n \"the parameter requires a live Python kernel \"\n \"to have an effect.\" % (k, type(self).__name__))\n\n if isinstance(target, Syncable) and code is None:\n for k, p in mapping.items():\n if k.startswith('event:'):\n continue\n elif p not in target.param and p not in list(target._rename.values()):\n matches = difflib.get_close_matches(p, list(target.param))\n if matches:\n matches = ' Similar parameters include: %r' % matches\n else:\n matches = ''\n raise ValueError(\"Could not jslink %r parameter (or property) \"\n \"on %s object because it was not found.%s\"\n % (p, type(self).__name__, matches))\n elif (target._source_transforms.get(p, False) is None or\n target._rename.get(p, False) is None):\n raise ValueError(\"Cannot jslink %r parameter on %s object \"\n \"to %r parameter on %s object. It requires \"\n \"a live Python kernel to have an effect.\"\n % (k, type(self).__name__, p, type(target).__name__))\n\n from .links import Link\n return Link(self, target, properties=links, code=code, args=args,\n bidirectional=bidirectional)\n\n\n\nclass SyncableData(Reactive):\n \"\"\"\n A baseclass for components which sync one or more data parameters\n with the frontend via a ColumnDataSource.\n \"\"\"\n\n selection = param.List(default=[], doc=\"\"\"\n The currently selected rows in the data.\"\"\")\n\n # Parameters which when changed require an update of the data \n _data_params = []\n\n _rename = {'selection': None}\n\n __abstract = True\n\n def __init__(self, **params):\n super().__init__(**params)\n self._data = None\n self._processed = None\n self.param.watch(self._validate, self._data_params)\n if self._data_params:\n self.param.watch(self._update_cds, self._data_params)\n self.param.watch(self._update_selected, 'selection')\n self._validate(None)\n self._update_cds()\n\n def _validate(self, event):\n \"\"\"\n Allows implementing validation for the data parameters.\n \"\"\"\n\n def _get_data(self):\n \"\"\"\n Implemented by subclasses converting data parameter(s) into\n a ColumnDataSource compatible data dictionary.\n\n Returns\n -------\n processed: object\n Raw data after pre-processing (e.g. after filtering)\n data: dict\n Dictionary of columns used to instantiate and update the\n ColumnDataSource\n \"\"\"\n\n def _update_column(self, column, array):\n \"\"\"\n Implemented by subclasses converting changes in columns to\n changes in the data parameter.\n\n Parameters\n ----------\n column: str\n The name of the column to update.\n array: numpy.ndarray\n The array data to update the column with.\n \"\"\"\n data = getattr(self, self._data_params[0])\n data[column] = array\n\n def _update_data(self, data):\n self.param.set_param(**{self._data_params[0]: data})\n\n def _manual_update(self, events, model, doc, root, parent, comm):\n for event in events:\n if event.type == 'triggered' and self._updating:\n continue\n elif hasattr(self, '_update_' + event.name):\n getattr(self, '_update_' + event.name)(model)\n\n def _update_cds(self, *events):\n if self._updating:\n return\n self._processed, self._data = self._get_data()\n for ref, (m, _) in self._models.items():\n m.source.data = self._data\n push_on_root(ref)\n\n def _update_selected(self, *events, indices=None):\n if self._updating:\n return\n indices = self.selection if indices is None else indices\n for ref, (m, _) in self._models.items():\n m.source.selected.indices = indices\n push_on_root(ref)\n\n @updating\n def _stream(self, stream, rollover=None):\n for ref, (m, _) in self._models.items():\n m.source.stream(stream, rollover)\n push_on_root(ref)\n\n @updating\n def _patch(self, patch):\n for ref, (m, _) in self._models.items():\n m.source.patch(patch)\n push_on_root(ref)\n\n def stream(self, stream_value, rollover=None, reset_index=True):\n \"\"\"\n Streams (appends) the `stream_value` provided to the existing\n value in an efficient manner.\n\n Arguments\n ---------\n stream_value: (Union[pd.DataFrame, pd.Series, Dict])\n The new value(s) to append to the existing value.\n rollover: int\n A maximum column size, above which data from the start of\n the column begins to be discarded. If None, then columns\n will continue to grow unbounded.\n reset_index (bool, default=True):\n If True and the stream_value is a DataFrame, then its index\n is reset. Helps to keep the index unique and named `index`.\n\n Raises\n ------\n ValueError: Raised if the stream_value is not a supported type.\n\n Examples\n --------\n\n Stream a Series to a DataFrame\n >>> value = pd.DataFrame({\"x\": [1, 2], \"y\": [\"a\", \"b\"]})\n >>> obj = DataComponent(value)\n >>> stream_value = pd.Series({\"x\": 4, \"y\": \"d\"})\n >>> obj.stream(stream_value)\n >>> obj.value.to_dict(\"list\")\n {'x': [1, 2, 4], 'y': ['a', 'b', 'd']}\n\n Stream a Dataframe to a Dataframe\n >>> value = pd.DataFrame({\"x\": [1, 2], \"y\": [\"a\", \"b\"]})\n >>> obj = DataComponent(value)\n >>> stream_value = pd.DataFrame({\"x\": [3, 4], \"y\": [\"c\", \"d\"]})\n >>> obj.stream(stream_value)\n >>> obj.value.to_dict(\"list\")\n {'x': [1, 2, 3, 4], 'y': ['a', 'b', 'c', 'd']}\n\n Stream a Dictionary row to a DataFrame\n >>> value = pd.DataFrame({\"x\": [1, 2], \"y\": [\"a\", \"b\"]})\n >>> tabulator = DataComponent(value)\n >>> stream_value = {\"x\": 4, \"y\": \"d\"}\n >>> obj.stream(stream_value)\n >>> obj.value.to_dict(\"list\")\n {'x': [1, 2, 4], 'y': ['a', 'b', 'd']}\n\n Stream a Dictionary of Columns to a Dataframe\n >>> value = pd.DataFrame({\"x\": [1, 2], \"y\": [\"a\", \"b\"]})\n >>> obj = DataComponent(value)\n >>> stream_value = {\"x\": [3, 4], \"y\": [\"c\", \"d\"]}\n >>> obj.stream(stream_value)\n >>> obj.value.to_dict(\"list\")\n {'x': [1, 2, 3, 4], 'y': ['a', 'b', 'c', 'd']}\n \"\"\"\n if 'pandas' in sys.modules:\n import pandas as pd\n else:\n pd = None\n if pd and isinstance(stream_value, pd.DataFrame):\n if isinstance(self._processed, dict):\n self.stream(stream_value.to_dict(), rollover)\n return\n if reset_index:\n value_index_start = self._processed.index.max() + 1\n stream_value = stream_value.reset_index(drop=True)\n stream_value.index += value_index_start\n combined = pd.concat([self._processed, stream_value])\n if rollover is not None:\n combined = combined.iloc[-rollover:]\n with param.discard_events(self):\n self._update_data(combined)\n try:\n self._updating = True\n self.param.trigger(self._data_params[0])\n finally:\n self._updating = False\n try:\n self._updating = True\n self._stream(stream_value, rollover)\n finally:\n self._updating = False\n elif pd and isinstance(stream_value, pd.Series):\n if isinstance(self._processed, dict):\n self.stream({k: [v] for k, v in stream_value.to_dict().items()}, rollover)\n return\n value_index_start = self._processed.index.max() + 1\n self._processed.loc[value_index_start] = stream_value\n with param.discard_events(self):\n self._update_data(self._processed)\n self._updating = True\n try:\n self._stream(self._processed.iloc[-1:], rollover)\n finally:\n self._updating = False\n elif isinstance(stream_value, dict):\n if isinstance(self._processed, dict):\n if not all(col in stream_value for col in self._data):\n raise ValueError(\"Stream update must append to all columns.\")\n for col, array in stream_value.items():\n combined = np.concatenate([self._data[col], array])\n if rollover is not None:\n combined = combined[-rollover:]\n self._update_column(col, combined)\n self._updating = True\n try:\n self._stream(stream_value, rollover)\n finally:\n self._updating = False\n else:\n try:\n stream_value = pd.DataFrame(stream_value)\n except ValueError:\n stream_value = pd.Series(stream_value)\n self.stream(stream_value)\n else:\n raise ValueError(\"The stream value provided is not a DataFrame, Series or Dict!\")\n\n def patch(self, patch_value):\n \"\"\"\n Efficiently patches (updates) the existing value with the `patch_value`.\n\n Arguments\n ---------\n patch_value: (Union[pd.DataFrame, pd.Series, Dict])\n The value(s) to patch the existing value with.\n\n Raises\n ------\n ValueError: Raised if the patch_value is not a supported type.\n\n Examples\n --------\n\n Patch a DataFrame with a Dictionary row.\n >>> value = pd.DataFrame({\"x\": [1, 2], \"y\": [\"a\", \"b\"]})\n >>> obj = DataComponent(value)\n >>> patch_value = {\"x\": [(0, 3)]}\n >>> obj.patch(patch_value)\n >>> obj.value.to_dict(\"list\")\n {'x': [3, 2], 'y': ['a', 'b']}\n\n Patch a Dataframe with a Dictionary of Columns.\n >>> value = pd.DataFrame({\"x\": [1, 2], \"y\": [\"a\", \"b\"]})\n >>> obj = DataComponent(value)\n >>> patch_value = {\"x\": [(slice(2), (3,4))], \"y\": [(1,'d')]}\n >>> obj.patch(patch_value)\n >>> obj.value.to_dict(\"list\")\n {'x': [3, 4], 'y': ['a', 'd']}\n\n Patch a DataFrame with a Series. Please note the index is used in the update.\n >>> value = pd.DataFrame({\"x\": [1, 2], \"y\": [\"a\", \"b\"]})\n >>> obj = DataComponent(value)\n >>> patch_value = pd.Series({\"index\": 1, \"x\": 4, \"y\": \"d\"})\n >>> obj.patch(patch_value)\n >>> obj.value.to_dict(\"list\")\n {'x': [1, 4], 'y': ['a', 'd']}\n\n Patch a Dataframe with a Dataframe. Please note the index is used in the update.\n >>> value = pd.DataFrame({\"x\": [1, 2], \"y\": [\"a\", \"b\"]})\n >>> obj = DataComponent(value)\n >>> patch_value = pd.DataFrame({\"x\": [3, 4], \"y\": [\"c\", \"d\"]})\n >>> obj.patch(patch_value)\n >>> obj.value.to_dict(\"list\")\n {'x': [3, 4], 'y': ['c', 'd']}\n \"\"\"\n if self._processed is None or isinstance(patch_value, dict):\n self._patch(patch_value)\n return\n\n if 'pandas' in sys.modules:\n import pandas as pd\n else:\n pd = None\n data = getattr(self, self._data_params[0])\n if pd and isinstance(patch_value, pd.DataFrame):\n patch_value_dict = {}\n for column in patch_value.columns:\n patch_value_dict[column] = []\n for index in patch_value.index:\n patch_value_dict[column].append((index, patch_value.loc[index, column]))\n self.patch(patch_value_dict)\n elif pd and isinstance(patch_value, pd.Series):\n if \"index\" in patch_value: # Series orient is row\n patch_value_dict = {\n k: [(patch_value[\"index\"], v)] for k, v in patch_value.items()\n }\n patch_value_dict.pop(\"index\")\n else: # Series orient is column\n patch_value_dict = {\n patch_value.name: [(index, value) for index, value in patch_value.items()]\n }\n self.patch(patch_value_dict)\n elif isinstance(patch_value, dict):\n for k, v in patch_value.items():\n for index, patch in v:\n if pd and isinstance(self._processed, pd.DataFrame):\n data.loc[index, k] = patch\n else:\n data[k][index] = patch\n self._updating = True\n try:\n self._patch(patch_value)\n finally:\n self._updating = False\n else:\n raise ValueError(\n f\"Patching with a patch_value of type {type(patch_value).__name__} \"\n \"is not supported. Please provide a DataFrame, Series or Dict.\"\n )\n\n\nclass ReactiveData(SyncableData):\n \"\"\"\n An extension of SyncableData which bi-directionally syncs a data\n parameter between frontend and backend using a ColumnDataSource.\n \"\"\"\n\n def _update_selection(self, indices):\n self.selection = indices\n\n def _process_events(self, events):\n if 'data' in events:\n data = events.pop('data')\n if self._updating:\n data = {}\n _, old_data = self._get_data()\n updated = False\n for k, v in data.items():\n if k in self.indexes:\n continue\n k = self._renamed_cols.get(k, k)\n if isinstance(v, dict):\n v = [v for _, v in sorted(v.items(), key=lambda it: int(it[0]))]\n try:\n isequal = (old_data[k] == np.asarray(v)).all()\n except Exception:\n isequal = False\n if not isequal:\n self._update_column(k, v)\n updated = True\n if updated:\n self._updating = True\n try:\n self.param.trigger('value')\n finally:\n self._updating = False\n if 'indices' in events:\n self._updating = True\n try:\n self._update_selection(events.pop('indices'))\n finally:\n self._updating = False\n super(ReactiveData, self)._process_events(events)\n"
] | [
[
"pandas.concat",
"pandas.Series",
"numpy.asarray",
"pandas.DataFrame",
"numpy.concatenate"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
trhallam/digirock | [
"05b1199d741a384345a4930605be97369c9ec270",
"05b1199d741a384345a4930605be97369c9ec270"
] | [
"docs/examples/batzle_wang_1992.py",
"tests/test_fluids_ecl.py"
] | [
"# -*- coding: utf-8 -*-\n# ---\n# jupyter:\n# jupytext:\n# formats: ipynb,py:percent\n# text_representation:\n# extension: .py\n# format_name: percent\n# format_version: '1.3'\n# jupytext_version: 1.13.6\n# kernelspec:\n# display_name: Python 3 (ipykernel)\n# language: python\n# name: python3\n# ---\n\n# %% [markdown]\n# __Recreate the work by Batzle and Wang 1992 to check `digirock.fluids.bw92` functionality.__\n#\n# Tony Hallam 2022\n\n# %% [markdown]\n# This notebook contains working code to test the functionality of `bw98.py` in `fluids` module of `digirock`, ensuring that the functions honor the work by B&W 1992.\n#\n# _Batzle, M., and Wang, Z. [1992]. Seismic properties of pore fluids. Geophysics, 57(11), 1396–1408._\n# [Available from the SEG](https://library.seg.org/doi/10.1190/1.1443207).\n\n# %%\nimport numpy as np\nfrom digirock.fluids import bw92\n\n# %matplotlib inline\nimport matplotlib.pyplot as plt\nfrom matplotlib import rc\n\nrc(\"font\", size=14)\nfigsize = (15, 5)\n\n# %%\n# Input parameters has defined by B&W 1992 for plotting purporses\n\ntemp_ar = np.arange(10, 350, 5) # degC\npres_ar = np.arange(1, 100, 0.1) # Mpa\nsal_ar = np.arange(0, 0.3, 0.01)\npres = np.array([0.1, 10, 25, 50]) # Mpa\ntemps = np.array([10, 100, 200, 350]) # degC\ngsg = [0.6, 1.2] # gas Gravity\nor0 = [1.0, 0.88, 0.78] # oil density re 15.6degC\n\n# %% [markdown]\n# ## GAS\n#\n# Hydrocarbon density as a function of temperature and pressure using `bw92.gas_oga_density`, BW92 Eq 10a.\n\n# %%\nfig, ax = plt.subplots(nrows=1, ncols=2, figsize=figsize)\n\nfor G in gsg:\n for p in pres:\n ax[0].plot(temp_ar, bw92.gas_oga_density(temp_ar, p, G), label=f'G={G}, P={p}')\n \n for t in temps:\n ax[1].plot(pres_ar, bw92.gas_oga_density(t, pres_ar, G), label=f'G={G}, T={t}')\n \nax[0].set_xlim(0, 350)\nax[0].set_ylim(0, 0.6)\nax[0].set_xlabel('Temp (C)')\nax[0].set_ylabel('Density (g/cc)')\nax[0].legend()\n_ = ax[0].set_title('B&W 1992, Figure 2')\n\nax[1].set_xlim(0, 50)\nax[1].set_ylim(0, 0.6)\nax[1].set_xlabel('Pressure (MPa)')\nax[1].set_ylabel('Density (g/cc)')\n_ = ax[1].legend()\n\n\n# %% [markdown]\n# Gas adibatic bulk modulus using `bw92.gas_adiabatic_bulkmod`.\n\n# %%\nfig, ax = plt.subplots(nrows=1, ncols=2, figsize=figsize, sharey=True)\n\nfor G in gsg:\n for p in pres:\n ax[0].plot(temp_ar, bw92.gas_adiabatic_bulkmod(temp_ar, p, G)*1000, label=f'G={G}, P={p}')\n \n for t in temps:\n ax[1].plot(pres_ar, bw92.gas_adiabatic_bulkmod(t, pres_ar, G)*1000, label=f'G={G}, T={t}')\n\nax[0].set_xlim(0, 350)\nax[0].set_ylim(0, 650)\nax[0].set_xlabel('Temp (C)')\nax[0].set_ylabel('Bulk Modulus (MPa)')\nax[0].legend()\nax[0].set_title('B&W 1992 - Figure 3')\n\nax[1].set_xlim(0, 50)\nax[1].set_xlabel('Pressure (MPa)')\n_ = ax[1].legend()\n\n# %% [markdown]\n# Gas viscosity using `bw92.gas_adiabatic_viscosity` using equations 12 and 13.\n\n# %%\nfig, ax = plt.subplots(nrows=1, ncols=2, figsize=figsize, sharey=True)\n\n\nfor G in gsg:\n for p in pres:\n ax[0].plot(temp_ar, bw92.gas_adiabatic_viscosity(temp_ar, p, G), label=f'G={G}, P={p}')\n \n for t in temps:\n ax[1].plot(pres_ar, bw92.gas_adiabatic_viscosity(t, pres_ar, G), label=f'G={G}, T={t}')\n \nax[0].set_xlabel('Temp (C)')\nax[0].set_ylabel('Viscosity (centipoise)')\nax[0].set_xlim(0, 350)\nax[0].set_ylim(0, 0.09)\nax[0].set_title('B&W 1992 - Figure 4')\n\nax[1].set_xlabel('Pressure (MPa)')\nax[1].set_xlim(0, 50)\n_ = ax[1].legend()\n\n# %% [markdown]\n# ## OIL\n#\n# Dead oil density using `bw92.oil_density`, BW92 eq19.\n\n# %%\nfig, ax = plt.subplots(nrows=1, ncols=2, figsize=figsize, sharey=True)\n\n\nfor p in pres:\n for r0 in or0:\n ax[0].plot(temp_ar, bw92.oil_density(r0, p, temp_ar), label=f'r0={r0}, P={p}')\n \n for t in temps:\n ax[1].plot(pres_ar, bw92.oil_density(r0, pres_ar, t), label=f'r0={r0}, T={t}')\n\nax[0].set_xlabel('Temp (C)')\nax[0].set_ylabel('Oil Density (g/cc)')\nax[0].set_xlim(0, 350)\nax[0].set_ylim(0.55, 1.05)\nax[0].set_title('B&W 1992 - Figure 5')\nax[0].legend()\n\nax[1].set_xlabel('Pressure (MPa)')\nax[1].set_xlim(0, 50)\n_ = ax[1].legend(loc=[1.1, 0])\n\n\n# %% [markdown]\n# Oil acoustic velocity using `bw92.oil_velocity`, BW92 eq 20a.\n\n# %%\nfig, ax = plt.subplots(nrows=1, ncols=1, figsize=(7.5,5))\n\napi_ar = np.arange(0,70) # oil api\nrho0_ar = 141/ (api_ar + 131.5)\n\nax.plot(api_ar, bw92.oil_velocity(rho0_ar, 15.6, 1E-4, 0.6, 50))\nax.set_xlim(0, 70)\nax.set_ylim(1100, 1800)\n\nax.set_xlabel('Oil API')\nax.set_ylabel('Oil Velocity (m/s)')\nax.set_title('B&W 1992 - Figure 6')\n\n\n# %% [markdown]\n# Oil bulk modulus using `bw92.bulkmod`.\n\n# %%\nfig, ax = plt.subplots(nrows=1, ncols=2, figsize=figsize, sharey=True)\nax[0].set_xlim(0, 350)\nax[0].set_ylim(0, 30)\n\nfor r0 in or0:\n for p in pres:\n oil_rho = bw92.oil_density(r0, p, temp_ar)\n oil_vp = bw92.oil_velocity(r0, p, temp_ar, 0.6, 50)\n ax[0].plot(temp_ar, bw92.bulkmod(oil_rho*10, oil_vp),label=f\"{r0} {p}MPa\")\n \n for t in temps:\n oil_rho = bw92.oil_density(r0, pres_ar, t)\n oil_vp = bw92.oil_velocity(r0, pres_ar, t, 0.6, 50)\n ax[1].plot(pres_ar, bw92.bulkmod(oil_rho*10, oil_vp),label=f\"{r0} {t}degC\")\n \n \nax[0].set_xlabel('Temp (C)')\nax[0].set_ylabel('Oil Bulk Modlus (MPa)')\nax[0].set_title('B&W 1992 - Figure 7')\nax[0].legend()#cols=2)\n\nax[1].set_xlabel('Pressure (MPa)')\nax[1].set_xlim(0, 50)\n_ = ax[1].legend()\n\n# %% [markdown]\n# ## WATER\n#\n# Set up some parameters for plotting water.\n\n# %%\npresv = [50, 100, 110] # pressure MPa for velocity plots\npresrho = [9.81, 49, 98.1] # pressure MPa for density plots\npresk = [0.1, 50, 100] # pressure MPa for modulus plots\nsal = np.array([20000, 150000, 240000])/1000000 # ppm to weigh fraction\nsalk = np.array([0, 150000, 300000])/1000000 # ppm to weigh fraction\n\n\n\n\n# %% [markdown]\n# Pure water sonic velocity using `bw92.wat_velocity_pure` and pure water density using `bw92.wat_density_pure`. The parameters Batzle and Wang use from Wilson for pure water velocity were only calibrated to 100degC and 100MPa. So the behaviour above that is a bit odd, even though the plot in the 1992 paper looks good.\n\n# %%\nfig, ax = plt.subplots(nrows=1, ncols=2, figsize=figsize, sharex=True)\n\npresv = [50, 100, 130] # pressure MPa\n\ntvp_mesh, pvvt_mesh = np.meshgrid(temp_ar, presv)\nwvp_mesh = bw92.wat_velocity_pure(tvp_mesh, pvvt_mesh)\nwdp_mesh = bw92.wat_density_pure(tvp_mesh, pvvt_mesh)\n\nfor i, p in enumerate(presv):\n ax[0].plot(temp_ar, wvp_mesh[i, :], label=f\"{p}MPa\")\n ax[1].plot(temp_ar, wdp_mesh[i, :], label=f\"{p}MPa\")\n \nax[0].set_xlabel('Temp (C)')\nax[0].set_ylabel('Velocity (m/s)')\nax[0].set_title('B&W 1992 - Figure 12')\nax[0].legend()#cols=2)\nax[0].set_xlim(0, 350)\nax[0].set_ylim(500, 2000)\n\nax[1].set_xlabel('Temp (C)')\nax[1].set_ylabel('Density (g/cc)')\n_ = ax[1].legend()\n\n# %% [markdown]\n# Brine sonic velocity using `bw92.wat_velocity_brine` and `bw92.wat_density_brine`. Again, odd behaviour due to the influence of the pure water function on the brine velocity.\n\n# %%\nfig, ax = plt.subplots(nrows=1, ncols=2, figsize=figsize, sharex=True)\n\npresv = [50, 100, 130] # pressure MPa\n\ndb1, db2, db3 = np.meshgrid(temp_ar, presrho, sal)\nwdb_mesh = bw92.wat_density_brine(db1, db2, db3)\nvb1, vb2, vb3 = np.meshgrid(temp_ar, presv, sal)\nwvb_mesh = bw92.wat_velocity_brine(vb1, vb2, vb3)\n\nfor i, p in enumerate(presv):\n ax[0].plot(temp_ar, wvb_mesh[i, :], label=f\"{p}MPa\")\n ax[1].plot(temp_ar, wdb_mesh[i, :], label=f\"{p}MPa\")\n \nax[0].set_xlabel('Temp (C)')\nax[0].set_ylabel('Velocity (m/s)')\nax[0].set_title('B&W 1992 - Figure 13')\nax[0].legend()#cols=2)\nax[0].set_xlim(0, 350)\nax[0].set_ylim(1000, 2500)\n\nax[1].set_xlabel('Temp (C)')\nax[1].set_ylabel('Density (g/cc)')\n_ = ax[1].legend()\n\n# %% [markdown]\n# Brine bulk modulus using `bw92.wat_bulkmod`. This relies on calculating the velocity and density first.\n\n# %% tags=[]\nfig, ax = plt.subplots(nrows=1, ncols=2, figsize=figsize)\n\nkb1, kb2, kb3 = np.meshgrid(temp_ar, presk, salk)\nkr = bw92.wat_density_brine(kb1, kb2, kb3)\nkv = bw92.wat_velocity_brine(kb1, kb2, kb3)\nwkb_mesh = bw92.wat_bulkmod(kr, kv)\n\nfor i, p in enumerate(presv):\n ax[0].plot(temp_ar, wkb_mesh[i, :], label=f\"{p}MPa\")\n \nkb1, kb2, kb3 = np.meshgrid(pres_ar, temps, salk)\nkr = bw92.wat_density_brine(kb2, kb1, kb3)\nkv = bw92.wat_velocity_brine(kb2, kb1, kb3)\nwkb_mesh = bw92.wat_bulkmod(kr, kv) \n\nfor i, t in enumerate(temps):\n ax[1].plot(pres_ar, wkb_mesh[i, :], label=f\"{t}degC\")\n \nax[0].set_xlabel('Temp (C)')\nax[0].set_ylabel('Bulk Modulus (GPa)')\nax[0].set_ylim(0.5, 5.5)\nax[0].set_title('B&W 1992 - Figure 14')\nax[0].legend()#cols=2)\n\nax[1].set_xlabel('Pressure (MPa)')\nax[1].set_ylabel('Bulk Modulus (GPa)')\n_ = ax[1].legend()\n\n# %% [markdown]\n# ## Other Methods\n#\n# For a full list of the BW92 equations available with `digirock` see the [`digirock.fluids.bw92` api](../api/fluid_methods.html#batzle-and-wang-92).\n",
"\"\"\"Test functions for pem.fluid.ecl module\r\n\"\"\"\r\n\r\nimport pytest\r\nfrom pytest import approx\r\nimport numpy as np\r\nimport digirock.fluids.ecl as fluid_ecl\r\nfrom inspect import getmembers, isfunction\r\n\r\n\r\[email protected]\r\ndef tol():\r\n return {\r\n \"rel\": 0.05, # relative testing tolerance in percent\r\n \"abs\": 0.00001, # absolute testing tolerance\r\n }\r\n\r\n\r\[email protected](\r\n \"pres, extrap, ans\",\r\n [\r\n (325, \"const\", 1.4615),\r\n (325, \"pchip\", 1.4615),\r\n (np.r_[325, 375], \"const\", np.r_[1.4615, 1.4505]),\r\n (np.r_[325, 375], \"pchip\", np.r_[1.4615, 1.4505]),\r\n ],\r\n)\r\ndef test_oil_fvf_table(test_data, pres, ans, extrap, tol):\r\n tab = np.loadtxt(test_data / \"PVT_BO.inc\")\r\n assert np.allclose(\r\n fluid_ecl.oil_fvf_table(tab[:, 0], tab[:, 1], pres, extrap=extrap),\r\n ans,\r\n rtol=tol[\"rel\"],\r\n )\r\n\r\n\r\ndef test_oil_fvf_table_bad_pchi(test_data):\r\n tab = np.loadtxt(test_data / \"PVT_BO.inc\")\r\n # test bad extrap\r\n with pytest.raises(ValueError):\r\n assert fluid_ecl.oil_fvf_table(\r\n tab[:, 0], tab[:, 1], 235, extrap=\"Unknown Extrap\"\r\n )\r\n\r\n\r\[email protected](\r\n \"pres, extrap, ans\",\r\n [\r\n (325, \"const\", 1.4615),\r\n (325, \"pchip\", 1.4615),\r\n (np.r_[325, 375], \"const\", np.r_[1.4615, 1.4505]),\r\n (np.r_[325, 375], \"pchip\", np.r_[1.4615, 1.4505]),\r\n ],\r\n)\r\ndef test_oil_fvf_table(test_data, pres, ans, extrap, tol):\r\n tab = np.loadtxt(test_data / \"PVT_BO.inc\")\r\n assert np.allclose(\r\n fluid_ecl.oil_fvf_table(tab[:, 0], tab[:, 1], pres, extrap=extrap),\r\n ans,\r\n rtol=tol[\"rel\"],\r\n )\r\n\r\n\r\[email protected](\"api,ans\", ((20, 0.933993399339934), (45, 0.8016997167138812)))\r\ndef test_e100_oil_density(api, ans, tol):\r\n assert fluid_ecl.e100_oil_density(api) == approx(ans)\r\n assert np.allclose(\r\n fluid_ecl.e100_oil_density(np.r_[api, api]), np.r_[ans, ans], atol=tol[\"abs\"]\r\n )\r\n"
] | [
[
"numpy.meshgrid",
"numpy.arange",
"matplotlib.pyplot.subplots",
"numpy.array",
"matplotlib.rc"
],
[
"numpy.loadtxt"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
iamgroot42/opacus | [
"51708309e71c030aa2bf15d6dccc7bcbbe9ed570",
"51708309e71c030aa2bf15d6dccc7bcbbe9ed570"
] | [
"examples/char-lstm-classification.py",
"opacus/tests/grad_samples/embedding_test.py"
] | [
"#!/usr/bin/env python3\n# Copyright (c) Meta Platforms, Inc. and affiliates.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport argparse\nfrom collections import Counter\nfrom pathlib import Path\nfrom statistics import mean\n\nimport torch\nimport torch.nn as nn\nfrom opacus import PrivacyEngine\nfrom opacus.layers import DPGRU, DPLSTM, DPRNN\nfrom torch.nn.utils.rnn import pad_sequence\nfrom torch.utils.data import DataLoader, Dataset\nfrom tqdm import tqdm\n\n\nparser = argparse.ArgumentParser(\n description=\"PyTorch Name language classification DP Training\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n)\nparser.add_argument(\n \"--data-root\",\n required=True,\n type=str,\n help=\"Path to training set of names (ie. ~/data/names/)\",\n)\nparser.add_argument(\n \"--device\",\n type=str,\n default=\"cuda\",\n help=\"GPU ID for this process\",\n)\nparser.add_argument(\n \"-b\",\n \"--batch-size\",\n default=800,\n type=int,\n metavar=\"N\",\n help=\"mini-batch size\",\n)\nparser.add_argument(\n \"--mode\",\n default=\"lstm\",\n choices=[\"lstm\", \"gru\", \"rnn\"],\n help=\"recursive network type\",\n)\nparser.add_argument(\n \"--embedding-size\", default=64, type=int, help=\"Character embedding dimension\"\n)\nparser.add_argument(\n \"--hidden-size\", default=128, type=int, help=\"hidden state dimensions\"\n)\nparser.add_argument(\"--n-layers\", default=1, type=int, help=\"How many layers to use\")\nparser.add_argument(\n \"--test-every\",\n default=0,\n type=int,\n help=\"Run evaluation on the test every these many epochs\",\n)\nparser.add_argument(\n \"--bidirectional\",\n action=\"store_true\",\n default=False,\n help=\"If turned on, makes the RNN bidirectional\",\n)\nparser.add_argument(\n \"--learning-rate\",\n default=2.0,\n type=float,\n metavar=\"LR\",\n help=\"initial learning rate\",\n)\nparser.add_argument(\"--epochs\", type=int, default=10, help=\"Number of training epochs\")\nparser.add_argument(\n \"--train-split\",\n type=float,\n default=0.8,\n help=\"Fraction of data to utilize for training (rest for evaluation)\",\n)\nparser.add_argument(\n \"--sigma\",\n type=float,\n default=1.0,\n metavar=\"S\",\n help=\"Noise multiplier\",\n)\nparser.add_argument(\n \"-c\",\n \"--max-per-sample-grad-norm\",\n type=float,\n default=1.5,\n metavar=\"C\",\n help=\"Clip per-sample gradients to this norm\",\n)\nparser.add_argument(\n \"--disable-dp\",\n action=\"store_true\",\n default=False,\n help=\"Disable privacy training and just train with vanilla SGD\",\n)\nparser.add_argument(\n \"--secure-rng\",\n action=\"store_true\",\n default=False,\n help=\"Enable Secure RNG to have trustworthy privacy guarantees. Comes at a performance cost\",\n)\nparser.add_argument(\n \"--delta\",\n type=float,\n default=8e-5,\n metavar=\"D\",\n help=\"Target delta\",\n)\nparser.add_argument(\n \"--print-every\",\n type=int,\n default=5,\n help=\"Print the evaluation accuracy every these many iterations\",\n)\n\n\nclass CharByteEncoder(nn.Module):\n \"\"\"\n This encoder takes a UTF-8 string and encodes its bytes into a Tensor. It can also\n perform the opposite operation to check a result.\n\n Examples:\n\n >>> encoder = CharByteEncoder()\n >>> t = encoder('Ślusàrski') # returns tensor([256, 197, 154, 108, 117, 115, 195, 160, 114, 115, 107, 105, 257])\n >>> encoder.decode(t) # returns \"<s>Ślusàrski</s>\"\n \"\"\"\n\n def __init__(self):\n super().__init__()\n self.start_token = \"<s>\"\n self.end_token = \"</s>\"\n self.pad_token = \"<pad>\"\n\n self.start_idx = 256\n self.end_idx = 257\n self.pad_idx = 258\n\n def forward(self, s: str, pad_to=0) -> torch.LongTensor:\n \"\"\"\n Encodes a string. It will append a start token <s> (id=self.start_idx) and an end token </s>\n (id=self.end_idx).\n\n Args:\n s: The string to encode.\n pad_to: If not zero, pad by appending self.pad_idx until string is of length `pad_to`.\n Defaults to 0.\n\n Returns:\n The encoded LongTensor of indices.\n \"\"\"\n encoded = s.encode()\n n_pad = pad_to - len(encoded) if pad_to > len(encoded) else 0\n return torch.LongTensor(\n [self.start_idx]\n + [c for c in encoded] # noqa\n + [self.end_idx]\n + [self.pad_idx for _ in range(n_pad)]\n )\n\n def decode(self, char_ids_tensor: torch.LongTensor) -> str:\n \"\"\"\n The inverse of `forward`. Keeps the start, end and pad indices.\n \"\"\"\n char_ids = char_ids_tensor.cpu().detach().tolist()\n\n out = []\n buf = []\n for c in char_ids:\n if c < 256:\n buf.append(c)\n else:\n if buf:\n out.append(bytes(buf).decode())\n buf = []\n if c == self.start_idx:\n out.append(self.start_token)\n elif c == self.end_idx:\n out.append(self.end_token)\n elif c == self.pad_idx:\n out.append(self.pad_token)\n\n if buf: # in case some are left\n out.append(bytes(buf).decode())\n return \"\".join(out)\n\n def __len__(self):\n \"\"\"\n The length of our encoder space. This is fixed to 256 (one byte) + 3 special chars\n (start, end, pad).\n\n Returns:\n 259\n \"\"\"\n return 259\n\n\nclass NamesDataset(Dataset):\n def __init__(self, root):\n self.root = Path(root)\n\n self.labels = list({langfile.stem for langfile in self.root.iterdir()})\n self.labels_dict = {label: i for i, label in enumerate(self.labels)}\n self.encoder = CharByteEncoder()\n self.samples = self.construct_samples()\n\n def __getitem__(self, i):\n return self.samples[i]\n\n def __len__(self):\n return len(self.samples)\n\n def construct_samples(self):\n samples = []\n for langfile in self.root.iterdir():\n label_name = langfile.stem\n label_id = self.labels_dict[label_name]\n with open(langfile, \"r\") as fin:\n for row in fin:\n samples.append(\n (self.encoder(row.strip()), torch.tensor(label_id).long())\n )\n return samples\n\n def label_count(self):\n cnt = Counter()\n for _x, y in self.samples:\n label = self.labels[int(y)]\n cnt[label] += 1\n return cnt\n\n\nVOCAB_SIZE = 256 + 3 # 256 alternatives in one byte, plus 3 special characters.\n\n\nclass CharNNClassifier(nn.Module):\n def __init__(\n self,\n rnn_type,\n embedding_size,\n hidden_size,\n output_size,\n num_layers=1,\n bidirectional=False,\n vocab_size=VOCAB_SIZE,\n ):\n super().__init__()\n\n self.embedding_size = embedding_size\n self.hidden_size = hidden_size\n self.output_size = output_size\n self.vocab_size = vocab_size\n\n self.embedding = nn.Embedding(vocab_size, embedding_size)\n self.rnn = rnn_type(\n embedding_size,\n hidden_size,\n num_layers=num_layers,\n bidirectional=bidirectional,\n batch_first=True,\n )\n self.out_layer = nn.Linear(hidden_size, output_size)\n\n def forward(self, x, hidden=None):\n x = self.embedding(x) # -> [B, T, D]\n x, _ = self.rnn(x, hidden) # -> [B, T, H]\n x = x[:, -1, :] # -> [B, H]\n x = self.out_layer(x) # -> [B, C]\n return x\n\n\ndef padded_collate(batch, padding_idx=0):\n x = pad_sequence(\n [elem[0] for elem in batch], batch_first=True, padding_value=padding_idx\n )\n y = torch.stack([elem[1] for elem in batch]).long()\n\n return x, y\n\n\ndef train(\n model,\n criterion,\n optimizer,\n train_loader,\n epoch,\n privacy_engine,\n target_delta,\n device=\"cuda:0\",\n):\n model.train()\n\n accs = []\n losses = []\n for x, y in tqdm(train_loader):\n x = x.to(device)\n y = y.to(device)\n\n logits = model(x)\n loss = criterion(logits, y)\n loss.backward()\n\n optimizer.step()\n optimizer.zero_grad()\n\n preds = logits.argmax(-1)\n n_correct = float(preds.eq(y).sum())\n batch_accuracy = n_correct / len(y)\n\n accs.append(batch_accuracy)\n losses.append(float(loss))\n\n printstr = (\n f\"\\t Epoch {epoch}. Accuracy: {mean(accs):.6f} | Loss: {mean(losses):.6f}\"\n )\n try:\n epsilon, best_alpha = privacy_engine.accountant.get_privacy_spent(\n delta=target_delta\n )\n printstr += f\" | (ε = {epsilon:.2f}, δ = {target_delta}) for α = {best_alpha}\"\n except AttributeError:\n pass\n print(printstr)\n return\n\n\ndef test(model, test_loader, privacy_engine, target_delta, device=\"cuda:0\"):\n model.eval()\n\n accs = []\n with torch.no_grad():\n for x, y in tqdm(test_loader):\n x = x.to(device)\n y = y.to(device)\n\n preds = model(x).argmax(-1)\n n_correct = float(preds.eq(y).sum())\n batch_accuracy = n_correct / len(y)\n\n accs.append(batch_accuracy)\n mean_acc = mean(accs)\n printstr = \"\\n----------------------------\\n\" f\"Test Accuracy: {mean_acc:.6f}\"\n if privacy_engine:\n epsilon, best_alpha = privacy_engine.accountant.get_privacy_spent(\n delta=target_delta\n )\n printstr += f\" (ε = {epsilon:.2f}, δ = {target_delta}) for α = {best_alpha}\"\n print(printstr + \"\\n----------------------------\\n\")\n return mean_acc\n\n\ndef main():\n args = parser.parse_args()\n device = torch.device(args.device)\n ds = NamesDataset(args.data_root)\n train_len = int(args.train_split * len(ds))\n test_len = len(ds) - train_len\n\n print(f\"{train_len} samples for training, {test_len} for testing\")\n\n if args.secure_rng:\n try:\n import torchcsprng as prng\n except ImportError as e:\n msg = (\n \"To use secure RNG, you must install the torchcsprng package! \"\n \"Check out the instructions here: https://github.com/pytorch/csprng#installation\"\n )\n raise ImportError(msg) from e\n\n generator = prng.create_random_device_generator(\"/dev/urandom\")\n\n else:\n generator = None\n\n train_ds, test_ds = torch.utils.data.random_split(\n ds, [train_len, test_len], generator=generator\n )\n\n if args.mode == \"rnn\":\n rnn_type = DPRNN\n elif args.mode == \"gru\":\n rnn_type = DPGRU\n elif args.mode == \"lstm\":\n rnn_type = DPLSTM\n else:\n raise ValueError(f\"Invalid network type: {args.mode}\")\n\n model = CharNNClassifier(\n rnn_type,\n args.embedding_size,\n args.hidden_size,\n len(ds.labels),\n args.n_layers,\n args.bidirectional,\n )\n model = model.to(device)\n\n train_ds, test_ds = torch.utils.data.random_split(\n ds, [train_len, test_len], generator=generator\n )\n\n train_loader = DataLoader(\n train_ds,\n batch_size=args.batch_size,\n num_workers=1,\n pin_memory=True,\n collate_fn=padded_collate,\n )\n\n test_loader = DataLoader(\n test_ds,\n batch_size=2 * args.batch_size,\n shuffle=False,\n num_workers=1,\n pin_memory=True,\n collate_fn=padded_collate,\n )\n\n criterion = nn.CrossEntropyLoss()\n optimizer = torch.optim.SGD(model.parameters(), lr=args.learning_rate)\n\n if not args.disable_dp:\n privacy_engine = PrivacyEngine(secure_mode=args.secure_rng)\n model, optimizer, train_loader = privacy_engine.make_private(\n module=model,\n optimizer=optimizer,\n data_loader=train_loader,\n noise_multiplier=args.sigma,\n max_grad_norm=args.max_per_sample_grad_norm,\n )\n else:\n privacy_engine = None\n\n print(f\"Train stats ({args.mode}): \\n\")\n for epoch in tqdm(range(args.epochs)):\n train(\n model,\n criterion,\n optimizer,\n train_loader,\n epoch,\n privacy_engine,\n args.delta,\n device=device,\n )\n if args.test_every:\n if epoch % args.test_every == 0:\n test(model, test_loader, privacy_engine, args.delta, device=device)\n\n mean_acc = test(model, test_loader, privacy_engine, args.delta, device=device)\n torch.save(mean_acc, f\"run_results_chr_{args.mode}_classification.pt\")\n\n\nif __name__ == \"__main__\":\n main()\n",
"#!/usr/bin/env python3\n# Copyright (c) Meta Platforms, Inc. and affiliates.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport hypothesis.strategies as st\nimport torch\nimport torch.nn as nn\nfrom hypothesis import given, settings\n\nfrom .common import GradSampleHooks_test\n\n\nclass Embedding_test(GradSampleHooks_test):\n @given(\n N=st.integers(1, 4),\n T=st.integers(1, 5),\n Q=st.integers(1, 4),\n R=st.integers(1, 2),\n V=st.integers(2, 32),\n D=st.integers(10, 17),\n dim=st.integers(2, 4),\n batch_first=st.booleans(),\n )\n @settings(deadline=10000)\n def test_input_across_dims(\n self,\n N: int,\n T: int,\n Q: int,\n R: int,\n V: int,\n D: int,\n dim: int,\n batch_first: bool,\n ):\n\n if dim == 1: # TODO: fix when dim is 1\n size = [T]\n elif dim == 2:\n size = [N, T]\n elif dim == 3:\n size = [N, T, Q]\n elif dim == 4:\n size = [N, T, Q, R]\n\n emb = nn.Embedding(V, D)\n x = torch.randint(low=0, high=V - 1, size=size)\n self.run_test(x, emb, batch_first=batch_first)\n"
] | [
[
"torch.nn.CrossEntropyLoss",
"torch.nn.utils.rnn.pad_sequence",
"torch.utils.data.DataLoader",
"torch.stack",
"torch.nn.Embedding",
"torch.tensor",
"torch.nn.Linear",
"torch.utils.data.random_split",
"torch.no_grad",
"torch.device",
"torch.save"
],
[
"torch.randint",
"torch.nn.Embedding"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.