repo_name
stringlengths 8
130
| hexsha
sequence | file_path
sequence | code
sequence | apis
sequence |
---|---|---|---|---|
Will03/NVSM_pytorch | [
"45e91efa6e4571a955c0f76807f2d6b5d7ffa66a"
] | [
"src/models/vectorTraining.py"
] | [
"import numpy as np\nfrom sklearn.metrics.pairwise import cosine_similarity\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nimport os\n\n\n\ndataPath = '../../Willll/' # Relative path of homework data\n\n\n# r=root, d=directories, f = files\n\nDocList = []\nQueryList = []\nDocData = []\nQueryData = []\n\ndef articleParser(myPath):\n with open(myPath, 'r') as fp:\n docData = fp.read().replace('\\n', '')\n return docData\n\n# read Query List\nwith open(dataPath+'test/query_list.txt', 'r') as fp:\n tmpLine = fp.readline()\n while tmpLine:\n tmpLine = tmpLine.strip('\\n')\n if tmpLine != '':\n QueryList.append(tmpLine)\n tmpLine = fp.readline()\n\n# Read query data\nfor eachQ in QueryList:\n QueryData.append(articleParser(dataPath+'test/query/%s'%eachQ))\n\n\nfor r, d, f in os.walk(dataPath+'doc'):\n for file in f:\n DocList.append(file)\n\nfor eachD in DocList:\n DocData.append(articleParser(dataPath+'doc/'+eachD))\n\n\n# TF-IDF\nmax_df = 0.95 # Ignore words with high df. (Similar effect to stopword filtering)\nmin_df = 5 # Ignore words with low df.\nsmooth_idf = True # Smooth idf weights by adding 1 to df.\nsublinear_tf = True # Replace tf with 1 + log(tf).\n\n# Rocchio (Below is a param set called Ide Dec-Hi)\nalpha = 1\nbeta = 0.75\ngamma = 0.15\nrel_count = 5 # Use top-5 relevant documents to update query vector.\nnrel_count = 1 # Use only the most non-relevant document to update query vector.\niters = 5\nprint('start train')\n# Build TF-IDF vectors of docs and queries\nvectorizer = TfidfVectorizer(max_df=max_df, min_df=min_df,\n smooth_idf=smooth_idf, sublinear_tf=sublinear_tf)\ndoc_tfidfs = vectorizer.fit_transform(DocData).toarray()\nquery_vecs = vectorizer.transform(QueryData).toarray()\nprint('start count simi')\n# Rank documents based on cosine similarity\ncos_sim = cosine_similarity(query_vecs, doc_tfidfs)\nrankings = np.flip(cos_sim.argsort(), axis=1)\n\nprint('start write file')\nlimit = 600\nfor query_name, ranking in zip(QueryList, rankings):\n ranked_docs=''\n index = 0\n for idx in ranking:\n if index >=600:\n break\n ranked_docs += '%s,'%DocList[idx]\n with open('../../Willll/%s.txt'%query_name, mode='w') as file:\n file.write('%s' % (ranked_docs))\n"
] | [
[
"sklearn.metrics.pairwise.cosine_similarity",
"sklearn.feature_extraction.text.TfidfVectorizer"
]
] |
faiyazsamin/FaceRecognition | [
"9c0bd65f300784910a923f446cf33bacfc502b52"
] | [
"Final.min.py"
] | [
"import cv2\nimport numpy as np\nimport os\n\nsubjects = [\"\",\"Mama\",\"Samin\",\"Delwar\"]\n\n\ndef detect_faces(colored_img, scaleFactor=1.06):\n\n img_copy = colored_img.copy()\n gray = cv2.cvtColor(img_copy, cv2.COLOR_BGR2GRAY)\n f_cascade = cv2.CascadeClassifier('data/lbpcascade_frontalface.xml')\n faces = f_cascade.detectMultiScale(gray, scaleFactor=scaleFactor, minNeighbors=5);\n if len(faces) == 0:\n return None, None\n (x, y, w, h) = faces[0]\n return gray[y:y+w, x:x+h], faces[0]\n\n\ndef prepare_training_data(data_folder_path):\n\n dirs = os.listdir(data_folder_path)\n faces = []\n labels = []\n\n for dir_name in dirs:\n\n if not dir_name.startswith(\"s\"):\n continue\n\n label = int(dir_name.replace(\"s\", \"\"))\n subject_dir_path = data_folder_path + \"/\" + dir_name\n subject_images_names = os.listdir(subject_dir_path)\n\n for image_name in subject_images_names:\n if image_name.startswith(\".\"):\n continue\n\n image_path = subject_dir_path + \"/\" + image_name\n image = cv2.imread(image_path)\n cv2.imshow(\"Training on image...\", cv2.resize(image, (400, 500)))\n cv2.waitKey(10)\n\n face, rect = detect_faces(image)\n if face is not None:\n faces.append(face)\n labels.append(label)\n\n cv2.destroyAllWindows()\n cv2.waitKey(1)\n cv2.destroyAllWindows()\n print(\"Total faces: \", len(faces))\n print(\"Total labels: \", len(labels))\n\n return faces, labels\n\n\ndef trainData(trainingDataPath, output_path):\n face_recognizer = cv2.face.LBPHFaceRecognizer_create()\n faces, labels = prepare_training_data(trainingDataPath)\n\n face_recognizer.train(faces, np.array(labels))\n face_recognizer.write(output_path)\n\n\ndef loadTrainedData(path):\n recognizer = cv2.face.LBPHFaceRecognizer_create()\n recognizer.read(path)\n return recognizer\n\n\ndef predictStaticImage(test_img,trainer_file):\n img = test_img.copy()\n face, rect = detect_faces(img)\n lt = loadTrainedData(trainer_file)\n label, confidence = lt.predict(face)\n label_text = subjects[label]\n (x, y, w, h) = rect\n cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 2)\n cv2.putText(img, label_text, (rect[0], rect[1] - 5), cv2.FONT_HERSHEY_PLAIN, 1.5, (0, 255, 0), 2)\n print(\"Confidence =\",confidence)\n return img\n\ndef showImage(image):\n cv2.imshow('Frame', image)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\n\ndef camToFile(framesToCapture,output_dir):\n cam = cv2.VideoCapture(1)\n detector = cv2.CascadeClassifier('data/haarcascade_frontalface_alt.xml')\n sampleNum = 0\n\n while True:\n ret, img = cam.read()\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n face = detector.detectMultiScale(gray, 1.5, 5)\n for (x, y, w, h) in face:\n cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2)\n\n sampleNum = sampleNum + 1\n if sampleNum%(100/framesToCapture) == 0:\n print(\"Frames Captured:\", int(sampleNum/(100/framesToCapture)))\n cv2.imwrite(output_dir+\"/\"+ str(int(sampleNum/(100/framesToCapture))) + \".jpg\", gray[y:y + h, x:x + w])\n\n cv2.imshow('frame', img)\n if cv2.waitKey(100) & 0xFF == ord('q'):\n break\n elif sampleNum >= 100:\n break\n\n\ndef detectFace(trainer_file):\n recognizer = cv2.face.LBPHFaceRecognizer_create()\n recognizer.read(trainer_file)\n faceCascade = cv2.CascadeClassifier(\"data/haarcascade_frontalface_alt.xml\")\n\n cam = cv2.VideoCapture(1)\n font = cv2.FONT_HERSHEY_DUPLEX\n while True:\n ret, im = cam.read()\n gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)\n faces = faceCascade.detectMultiScale(gray, scaleFactor=1.2, minNeighbors=5, minSize=(100, 100),\n flags=cv2.CASCADE_SCALE_IMAGE)\n for (x, y, w, h) in faces:\n nbr_predicted, conf = recognizer.predict(gray[y:y + h, x:x + w])\n cv2.rectangle(im, (x - 50, y - 50), (x + w + 50, y + h + 50), (0, 225, 0), 2)\n nbr_predicted = subjects[nbr_predicted]\n cv2.putText(im, str(nbr_predicted), (x + 30, y + h + 30), font, 1, (0, 0, 225)) # Draw the text\n cv2.imshow('FaceDetector', im)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n cam.release()\n cv2.destroyAllWindows()\n\n\n#trainData('training-data','test.yml')\ndetectFace('test.yml')\n#showImage(predictStaticImage(cv2.imread(\"test-data/4.jpg\"),'test3.yml'))\n#camToFile(20,'training-data/s7')"
] | [
[
"numpy.array"
]
] |
ankitdobhal/analytics-zoo | [
"b8374bcd6c73bba49fe0b0ab075528cdd94cf2af"
] | [
"pyzoo/test/zoo/zouwu/autots/test_auto_ts.py"
] | [
"#\n# Copyright 2018 Analytics Zoo Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport pytest\nimport numpy as np\nfrom test.zoo.pipeline.utils.test_utils import ZooTestCase\n\nfrom zoo.automl.config.recipe import LSTMGridRandomRecipe, MTNetGridRandomRecipe\nfrom zoo.zouwu.autots.forecast import AutoTSTrainer\nfrom zoo.zouwu.autots.forecast import TSPipeline\n\nimport pandas as pd\n\n\[email protected](\"init_ray_context_fixture\")\nclass TestZouwuAutoTS(ZooTestCase):\n\n def setup_method(self, method):\n # super(TestZouwuAutoTS, self).setup_method(method)\n self.create_data()\n\n def teardown_method(self, method):\n pass\n\n def create_data(self):\n sample_num = np.random.randint(100, 200)\n self.train_df = pd.DataFrame({\"datetime\": pd.date_range(\n '1/1/2019', periods=sample_num), \"value\": np.random.randn(sample_num)})\n val_sample_num = np.random.randint(20, 30)\n self.validation_df = pd.DataFrame({\"datetime\": pd.date_range(\n '1/1/2019', periods=val_sample_num), \"value\": np.random.randn(val_sample_num)})\n\n def test_AutoTSTrainer_smoke(self):\n horizon = np.random.randint(1, 6)\n tsp = AutoTSTrainer(dt_col=\"datetime\",\n target_col=\"value\",\n horizon=horizon,\n extra_features_col=None\n )\n pipeline = tsp.fit(self.train_df)\n assert isinstance(pipeline, TSPipeline)\n assert pipeline.internal.config is not None\n evaluate_result = pipeline.evaluate(self.validation_df)\n if horizon > 1:\n assert evaluate_result[0].shape[0] == horizon\n else:\n assert evaluate_result[0]\n predict_df = pipeline.predict(self.validation_df)\n assert not predict_df.empty\n\n def test_AutoTrainer_LstmRecipe(self):\n horizon = np.random.randint(1, 6)\n tsp = AutoTSTrainer(dt_col=\"datetime\",\n target_col=\"value\",\n horizon=horizon,\n extra_features_col=None\n )\n pipeline = tsp.fit(self.train_df,\n self.validation_df,\n recipe=LSTMGridRandomRecipe(\n num_rand_samples=5,\n batch_size=[1024],\n lstm_2_units=[8],\n training_iteration=1,\n epochs=1\n ))\n assert isinstance(pipeline, TSPipeline)\n assert pipeline.internal.config is not None\n evaluate_result = pipeline.evaluate(self.validation_df)\n if horizon > 1:\n assert evaluate_result[0].shape[0] == horizon\n else:\n assert evaluate_result[0]\n predict_df = pipeline.predict(self.validation_df)\n assert not predict_df.empty\n\n def test_AutoTrainer_MTNetRecipe(self):\n horizon = np.random.randint(1, 6)\n tsp = AutoTSTrainer(dt_col=\"datetime\",\n target_col=\"value\",\n horizon=horizon,\n extra_features_col=None\n )\n pipeline = tsp.fit(self.train_df,\n self.validation_df,\n recipe=MTNetGridRandomRecipe(\n num_rand_samples=5,\n time_step=[5],\n long_num=[2],\n batch_size=[1024],\n cnn_hid_size=[32, 50],\n training_iteration=1,\n epochs=1\n ))\n assert isinstance(pipeline, TSPipeline)\n assert pipeline.internal.config is not None\n evaluate_result = pipeline.evaluate(self.validation_df)\n if horizon > 1:\n assert evaluate_result[0].shape[0] == horizon\n else:\n assert evaluate_result[0]\n predict_df = pipeline.predict(self.validation_df)\n assert not predict_df.empty\n\n\nif __name__ == \"__main__\":\n pytest.main([__file__])\n"
] | [
[
"numpy.random.randint",
"numpy.random.randn",
"pandas.date_range"
]
] |
hzm2016/assistive-gym-robosuite | [
"5c529f4444cc386383618bfa584341740a8468f9",
"5c529f4444cc386383618bfa584341740a8468f9"
] | [
"code/pytorch/methods/SSAC.py",
"envs/mujoco/utils/play_model.py"
] | [
"import os\nimport torch\nimport torch.nn.functional as F\nimport glob\nimport numpy as np\nfrom torch.optim import Adam\nfrom utils.utils import soft_update, hard_update\nfrom utils.model import GaussianPolicy, QNetwork, DeterministicPolicy\nfrom keras.models import Sequential, Model\nfrom keras.layers import Dense, Dropout, Input, merge, Lambda, Activation\nfrom keras.layers.merge import Add, Multiply, Concatenate, concatenate\nfrom keras.initializers import RandomUniform\nfrom keras.optimizers import Adam\nimport keras.backend as K\nfrom keras import metrics\n\n\ndef weighted_entropy(p, w_norm):\n # w = tf.divide(tf.exp(A - np.max(A)), prob)\n # w_norm = w / K.sum(w)\n return K.sum(w_norm * p * K.log(p + 1e-8))\n\n\ndef weighted_mean(p, w_norm):\n # w = tf.exp(A- np.max(A))\n # w_norm = w / K.sum(w)\n p_weighted = np.multiply(w_norm, p)\n return K.mean(p_weighted, axis=0)\n\n\ndef weighted_mse(Q_target, Q_pred, w_norm):\n # w = tf.exp(A- np.max(A))\n # w_norm = w / K.sum(w)\n error = K.square(Q_target - Q_pred)\n return K.mean(w_norm * error)\n\n\ndef softmax(x):\n col = x.shape[1]\n x_max = np.reshape(np.amax(x, axis=1), (-1, 1))\n e_x = np.exp(x - np.matlib.repmat(x_max, 1, col) )\n e_x_sum = np.reshape( np.sum(e_x, axis=1), (-1, 1))\n out = e_x / np.matlib.repmat(e_x_sum, 1, col)\n return out\n\n\ndef weighted_mean_array(x, weights):\n weights_mean = np.mean(weights, axis=1)\n x_weighted = np.multiply(x, weights)\n mean_weighted = np.divide(np.mean(x_weighted, axis=1), weights_mean)\n return np.reshape(mean_weighted, (-1, 1))\n\n\ndef p_sample(p):\n row, col = p.shape\n p_sum = np.reshape(np.sum(p, axis=1), (row, 1))\n p_normalized = p/np.matlib.repmat(p_sum, 1, col)\n p_cumsum = np.matrix(np.cumsum( p_normalized, axis=1))\n # print(p_cumsum[0])\n rand = np.matlib.repmat(np.random.random((row, 1)), 1, col)\n # print(rand[0])\n o_softmax = np.argmax(p_cumsum >= rand, axis=1)\n return o_softmax\n\n\ndef entropy(p):\n return K.sum(p * K.log((p + 1e-8)))\n\n\ndef add_normal(x_input, outshape, at_eps):\n \"\"\"\n add normal noise to the input\n \"\"\"\n epsilon = K.random_normal(shape=outshape, mean=0., stddev=1.)\n x_out = x_input + at_eps * np.multiply(epsilon, np.absolute(x_input))\n return x_out\n\n\ndef kl(p, q):\n return K.sum(p * K.log((p + 1e-8) / (q + 1e-8)))\n\n\nclass Multi_SAC(object):\n def __init__(self, state_dim, action_dim, option_dim, max_action, action_space):\n\n self.alpha = 0.2\n self.lr = 0.0003\n self.option_num = option_dim\n\n self.policy_type = \"Gaussian\"\n self.target_update_interval = 1\n self.automatic_entropy_tuning = True\n\n self.device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n \"\"\" critic network \"\"\"\n self.critic = QNetwork(state_dim, action_dim, 400).to(device=self.device)\n self.critic_optim = Adam(self.critic.parameters(), lr=self.lr)\n\n self.critic_target = QNetwork(state_dim, action_dim, 400).to(self.device)\n hard_update(self.critic_target, self.critic)\n\n self.sampling_prob = torch.FloatTensor(state).to(self.device)\n # ===================================================================== #\n # Option Model #\n # ===================================================================== #\n self.option_state_input, self.option_action_input, self.option_input_concat, self.option_out_dec, \\\n self.option_out, self.option_out_noise, self.option_model = self.create_option_model()\n Advantage = np.stop_gradient(self.target_q_value - self.predicted_v_value)\n Weight = np.divide(np.exp(Advantage - np.max(Advantage)), self.sampling_prob)\n W_norm = Weight/K.mean(Weight)\n\n critic_conditional_entropy = weighted_entropy(self.option_out, tf.stop_gradient(W_norm))\n p_weighted_ave = weighted_mean(self.option_out, tf.stop_gradient(W_norm))\n self.critic_entropy = critic_conditional_entropy - self.c_ent * entropy(p_weighted_ave)\n\n self.vat_loss = kl(self.option_out, self.option_out_noise)\n self.reg_loss = metrics.mean_absolute_error(self.option_input_concat, self.option_out_dec)\n self.option_loss = self.reg_loss + self.entropy_coeff * (self.critic_entropy) + self.c_reg * self.vat_loss\n self.option_optimize = tf.train.AdamOptimizer(self.option_lr).minimize(self.option_loss)\n\n \"\"\" option network \"\"\"\n self.it = 0\n\n if self.policy_type == \"Gaussian\":\n # Target Entropy = −dim(A) (e.g. , -6 for HalfCheetah-v2) as given in the paper\n if self.automatic_entropy_tuning == True:\n self.target_entropy = -torch.prod(torch.Tensor(action_space.shape).to(self.device)).item()\n self.log_alpha = torch.zeros(1, requires_grad=True, device=self.device)\n self.alpha_optim = Adam([self.log_alpha], lr=self.lr)\n\n self.policy = GaussianPolicy(state_dim, action_dim, 400, max_action).to(self.device)\n self.policy_optim = Adam(self.policy.parameters(), lr=self.lr)\n\n elif self.policy_type == \"Multi_Gaussian\":\n if self.automatic_entropy_tuning == True:\n self.target_entropy = -torch.prod(torch.Tensor(action_space.shape).to(self.device)).item()\n self.log_alpha = torch.zeros(1, requires_grad=True, device=self.device)\n self.alpha_optim = Adam([self.log_alpha], lr=self.lr)\n\n self.policy = GaussianPolicy(state_dim, action_dim, 400, max_action).to(self.device)\n self.policy_optim = Adam(self.policy.parameters(), lr=self.lr)\n\n else:\n self.alpha = 0\n self.automatic_entropy_tuning = False\n self.policy = DeterministicPolicy(state_dim, action_dim, 400, max_action).to(self.device)\n self.policy_optim = Adam(self.policy.parameters(), lr=self.lr)\n\n def select_action(self, state, eval=True):\n\n state = torch.FloatTensor(state).to(self.device).unsqueeze(0)\n\n if eval == False:\n action, _, _ = self.policy.sample(state)\n else:\n _, _, action = self.policy.sample(state)\n return action.detach().cpu().numpy()[0]\n\n def train_actor_option(self, inputs, a_gradient, option):\n self.sess.run(self.actor_optimizer_list[option], feed_dict={\n self.actor_state_input_list[option]: inputs,\n self.action_gradient_list[option]: a_gradient\n })\n\n def train_critic(self, inputs, action, target_q_value, predicted_v_value, sampling_prob):\n return self.sess.run([self.critic_optimize], feed_dict={\n self.critic_state_input: inputs,\n self.critic_action_input: action,\n self.target_q_value: target_q_value,\n self.predicted_v_value: predicted_v_value,\n self.sampling_prob: sampling_prob\n })\n\n def train_option(self, inputs, action, target_q_value, predicted_v_value, sampling_prob):\n return self.sess.run([self.option_optimize], feed_dict={\n self.option_state_input: inputs,\n self.option_action_input: action,\n self.target_q_value: target_q_value,\n self.predicted_v_value: predicted_v_value,\n self.sampling_prob: sampling_prob\n })\n\n def max_option(self, inputs):\n Q_predict = []\n n = inputs.shape[0]\n for o in range(int(self.option_num)):\n action_i = self.predict_actor_target(inputs, o)\n Q_predict_i, _ = self.predict_critic_target(inputs, action_i)\n if o == 0:\n Q_predict = np.reshape(Q_predict_i, (-1, 1))\n else:\n Q_predict = np.concatenate((Q_predict, np.reshape(Q_predict_i, (-1, 1))), axis=1)\n\n o_max = np.argmax(Q_predict, axis=1)\n Q_max = np.max(Q_predict, axis=1)\n return o_max, Q_max, Q_predict\n\n def softmax_option_target(self, inputs):\n Q_predict = []\n n = inputs.shape[0]\n for o in range(int(self.option_num)):\n action_i = self.predict_actor_target(inputs, o)\n Q_predict_i, _ = self.predict_critic_target(inputs, action_i)\n\n if o == 0:\n Q_predict = np.reshape( Q_predict_i, (-1, 1) )\n else:\n Q_predict = np.concatenate((Q_predict, np.reshape(Q_predict_i, (-1, 1)) ), axis= 1)\n\n p = softmax(Q_predict)\n o_softmax = p_sample(p)\n n = Q_predict.shape[0]\n Q_softmax = Q_predict[np.arange(n), o_softmax.flatten()]\n\n return o_softmax, np.reshape(Q_softmax, (n, 1)), Q_predict\n\n def predict_actor_option(self, inputs, option):\n return self.sess.run(self.actor_out_list[option], feed_dict={self.actor_state_input_list[option]: inputs})\n\n def predict_actor(self, inputs, options):\n action_list = []\n for o in range(self.option_num):\n action_o = self.predict_actor_option(inputs, o)\n action_list.append(action_o)\n\n n = inputs.shape[0]\n action = 0\n if n == 1 or np.isscalar(options):\n action = action_list[options]\n # calculate the action\n else:\n for i in range(n):\n if i == 0:\n action = action_list[int(options[i])][i, :]\n else:\n action = np.vstack((action, action_list[int(options[i])][i, :]))\n\n return action\n",
"import argparse\n\nimport numpy as np\n\nfrom envs.mujoco.utils.experiment_files import (get_latest_experiment_dir, get_model,\n get_latest_checkpoint, get_params)\nfrom envs.mujoco.utils.load_model import load_params, load_model\n\n\n# def load_params(params_path):\n# with open(params_path) as f:\n# data = json.load(f)\n# return data\n\n\n# def load_model(model_path, params):\n# env_cls = globals()[params['env']]\n# orig_env = env_cls(**params['env_options'])\n# env = DummyVecEnv([lambda: orig_env])\n\n# if params['alg'] == 'PPO2':\n# model = PPO2.load(model_path, env=env)\n# elif params['alg'] == 'SAC':\n# model = SAC.load(model_path, env=env)\n# else:\n# raise NotImplementedError\n\n# return orig_env, model\n\ndef replay_model(env, model, deterministic=True, num_episodes=None, record=False, render=True):\n # Don't record data forever.\n assert (not record) or (num_episodes is not None), \\\n \"there must be a finite number of episodes to record the data\"\n \n # Initialize counts and data.\n num_episodes = num_episodes if num_episodes else np.inf\n episode_count = 0\n infos = []\n\n # Simulate forward.\n obs = env.reset()\n while episode_count < num_episodes:\n # import pdb; pdb.set_trace()\n action, _states = model.predict(obs, deterministic=deterministic)\n clipped_action = np.clip(action, env.action_space.low,\n env.action_space.high)\n obs, reward, done, info = env.step(clipped_action, render=render)\n if record:\n infos.append(info)\n if done:\n obs = env.reset()\n episode_count += 1\n\n return infos\n\n\nif __name__ == '__main__':\n # Parse command line arguments.\n parser = argparse.ArgumentParser()\n parser.add_argument(\n 'directory', type=str, help='The directory of the experiment.')\n parser.add_argument(\n '--deterministic', action='store_true', help='Optionally simulate the deterministic system.')\n\n args = parser.parse_args()\n\n # Load the model if it's availeble, otherwise that latest checkpoint.\n experiment_dir = get_latest_experiment_dir(args.directory)\n params_path = get_params(experiment_dir)\n params = load_params(params_path)\n\n model_path = get_model(experiment_dir)\n if model_path is None:\n model_path = get_latest_checkpoint(experiment_dir)\n\n env, model = load_model(model_path, params)\n\n # Replay model.\n replay_model(env, model, deterministic=args.deterministic)\n"
] | [
[
"numpy.sum",
"numpy.multiply",
"torch.FloatTensor",
"numpy.cumsum",
"torch.Tensor",
"numpy.reshape",
"torch.zeros",
"numpy.stop_gradient",
"numpy.argmax",
"numpy.matlib.repmat",
"numpy.random.random",
"numpy.arange",
"numpy.max",
"torch.cuda.is_available",
"numpy.absolute",
"numpy.amax",
"numpy.isscalar",
"numpy.mean"
],
[
"numpy.clip"
]
] |
erprashu/Metal_erning | [
"79d1a6a457be37258df50a9194946caeb86845a2"
] | [
"test.py"
] | [
"# -*- coding: utf-8 -*-\nimport argparse\n\nimport torch\nimport torch.nn.functional as F\nfrom torch.utils.data import DataLoader\n\nfrom torch.autograd import Variable\n\nfrom tqdm import tqdm\n\nfrom models.protonet_embedding import ProtoNetEmbedding\nfrom models.R2D2_embedding import R2D2Embedding\nfrom models.ResNet12_embedding import resnet12\n\nfrom models.classification_heads import ClassificationHead, R2D2Head\n\nfrom utils import pprint, set_gpu, Timer, count_accuracy, log\n\nimport random\nimport numpy as np\nimport os\nimport pdb\n\ndef get_model(options):\n # Choose the embedding network\n if options.network == 'ProtoNet':\n network = ProtoNetEmbedding().cuda()\n elif options.network == 'R2D2':\n network = R2D2Embedding().cuda()\n elif options.network == 'ResNet':\n if options.dataset == 'miniImageNet' or options.dataset == 'tieredImageNet':\n network = resnet12(avg_pool=False, drop_rate=0.1, dropblock_size=5).cuda()\n network = torch.nn.DataParallel(network)\n else:\n network = resnet12(avg_pool=False, drop_rate=0.1, dropblock_size=2).cuda()\n network = torch.nn.DataParallel(network)\n else:\n print (\"Cannot recognize the network type\")\n assert(False)\n \n # Choose the classification head\n if opt.head == 'ProtoNet':\n cls_head = ClassificationHead(base_learner='ProtoNet').cuda() \n elif opt.head == 'Ridge':\n cls_head = ClassificationHead(base_learner='Ridge').cuda()\n elif opt.head == 'R2D2':\n cls_head = R2D2Head().cuda()\n elif opt.head == 'SVM':\n cls_head = ClassificationHead(base_learner='SVM-CS').cuda()\n else:\n print (\"Cannot recognize the classification head type\")\n assert(False)\n \n return (network, cls_head)\n\ndef get_dataset(options):\n # Choose the embedding network\n if options.dataset == 'miniImageNet':\n from data.mini_imagenet import MiniImageNet, FewShotDataloader\n dataset_test = MiniImageNet(phase='test')\n data_loader = FewShotDataloader\n elif options.dataset == 'tieredImageNet':\n from data.tiered_imagenet import tieredImageNet, FewShotDataloader\n dataset_test = tieredImageNet(phase='test')\n data_loader = FewShotDataloader\n elif options.dataset == 'CIFAR_FS':\n from data.CIFAR_FS import CIFAR_FS, FewShotDataloader\n dataset_test = CIFAR_FS(phase='test')\n data_loader = FewShotDataloader\n elif options.dataset == 'FC100':\n from data.FC100 import FC100, FewShotDataloader\n dataset_test = FC100(phase='test')\n data_loader = FewShotDataloader\n else:\n print (\"Cannot recognize the dataset type\")\n assert(False)\n \n return (dataset_test, data_loader)\n\n\ndef self_mix(data):\n size = data.size()\n W = size[-1]\n H = size[-2]\n # uniform\n cx = np.random.randint(W)\n cy = np.random.randint(H)\n\n cut_w = W//2\n cut_h = H//2\n\n bbx1 = np.clip(cx - cut_w // 2, 0, W)\n bby1 = np.clip(cy - cut_h // 2, 0, H)\n bbx2 = np.clip(cx + cut_w // 2, 0, W)\n bby2 = np.clip(cy + cut_h // 2, 0, H)\n\n while True:\n bbxn = np.random.randint(0, W-(bbx2-bbx1))\n bbyn = np.random.randint(0, H-(bby2-bby1))\n\n if bbxn != bbx1 or bbyn != bby1:\n break\n if (bbx2 - bbx1) == (bby2 - bby1):\n k = random.sample([0, 1, 2, 3], 1)[0]\n else:\n k = 0\n data[:, :, bbx1:bbx2, bby1:bby2] = torch.rot90(data[:, :, bbxn:bbxn + (bbx2-bbx1), bbyn:bbyn + (bby2-bby1)], k, [2,3])\n #data[:, :, bbx1:bbx2, bby1:bby2] = data[:, :, bbxn:bbxn + (bbx2-bbx1), bbyn:bbyn + (bby2-bby1)]\n\n return data\n\ndef flip(x, dim):\n indices = [slice(None)] * x.dim()\n indices[dim] = torch.arange(x.size(dim) - 1, -1, -1,\n dtype=torch.long, device=x.device)\n return x[tuple(indices)]\n\ndef build_grid(source_size,target_size):\n k = float(target_size)/float(source_size)\n direct = torch.linspace(-k,k,target_size).unsqueeze(0).repeat(target_size,1).unsqueeze(-1)\n full = torch.cat([direct,direct.transpose(1,0)],dim=2).unsqueeze(0)\n\n return full.cuda()\n\ndef random_crop_grid(x,grid):\n delta = x.size(2)-grid.size(1)\n grid = grid.repeat(x.size(0),1,1,1).cuda()\n #Add random shifts by x\n grid[:,:,:,0] = grid[:,:,:,0]+ torch.FloatTensor(x.size(0)).cuda().random_(0, delta).unsqueeze(-1).unsqueeze(-1).expand(-1, grid.size(1), grid.size(2)) /x.size(2)\n #Add random shifts by y\n grid[:,:,:,1] = grid[:,:,:,1]+ torch.FloatTensor(x.size(0)).cuda().random_(0, delta).unsqueeze(-1).unsqueeze(-1).expand(-1, grid.size(1), grid.size(2)) /x.size(2)\n\n return grid\n\ndef random_cropping(batch, t):\n #Building central crop of t pixel size\n grid_source = build_grid(batch.size(-1),t)\n #Make radom shift for each batch\n grid_shifted = random_crop_grid(batch,grid_source)\n #Sample using grid sample\n sampled_batch = F.grid_sample(batch, grid_shifted, mode='nearest')\n\n return sampled_batch\n\ndef shot_aug(data_support, labels_support, n_support, method, opt):\n size = data_support.shape\n if method == \"fliplr\":\n n_support = opt.s_du * n_support\n data_shot = flip(data_support, -1)\n data_support = torch.cat((data_support, data_shot), dim = 1)\n labels_support = torch.cat((labels_support, labels_support), dim = 1)\n elif method == \"random_crop\":\n n_support = opt.s_du * n_support\n data_shot = F.pad(data_support.view([-1] + list(data_support.shape[-3:])), (4,4,4,4))\n data_shot = random_cropping(data_shot, 32)\n data_support = torch.cat((data_support, data_shot.view([size[0], -1] + list(data_support.shape[-3:]))), dim = 1)\n labels_support = torch.cat((labels_support, labels_support), dim = 1)\n return data_support, labels_support, n_support\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--gpu', default='0')\n parser.add_argument('--load', default='./experiments/exp_1/best_model.pth',\n help='path of the checkpoint file')\n parser.add_argument('--episode', type=int, default=1000,\n help='number of episodes to test')\n parser.add_argument('--way', type=int, default=5,\n help='number of classes in one test episode')\n parser.add_argument('--shot', type=int, default=1,\n help='number of support examples per training class')\n parser.add_argument('--shot_aug', '-shotaug', default=[], nargs='+', type=str,\n help='If use shot level data augmentation.')\n parser.add_argument('--s_du', type=int, default=1,\n help='number of support examples augmented by shot')\n parser.add_argument('--query', type=int, default=15,\n help='number of query examples per training class')\n parser.add_argument('--network', type=str, default='ProtoNet',\n help='choose which embedding network to use. ProtoNet, R2D2, ResNet')\n parser.add_argument('--head', type=str, default='ProtoNet',\n help='choose which embedding network to use. ProtoNet, Ridge, R2D2, SVM')\n parser.add_argument('--dataset', type=str, default='miniImageNet',\n help='choose which classification head to use. miniImageNet, tieredImageNet, CIFAR_FS, FC100')\n\n opt = parser.parse_args()\n (dataset_test, data_loader) = get_dataset(opt)\n\n dloader_test = data_loader(\n dataset=dataset_test,\n nKnovel=opt.way,\n nKbase=0,\n nExemplars=opt.shot, # num training examples per novel category\n nTestNovel=opt.query * opt.way, # num test examples for all the novel categories\n nTestBase=0, # num test examples for all the base categories\n batch_size=1,\n num_workers=1,\n epoch_size=opt.episode, # num of batches per epoch\n )\n\n set_gpu(opt.gpu)\n \n # Define the models\n (embedding_net, cls_head) = get_model(opt)\n \n # Load saved model checkpoints\n saved_models = torch.load(opt.load)\n embedding_net.load_state_dict(saved_models['embedding'])\n embedding_net.eval()\n cls_head.load_state_dict(saved_models['head'])\n cls_head.eval()\n \n # Evaluate on test set\n test_accuracies = []\n for i, batch in enumerate(tqdm(dloader_test()), 1):\n data_support, labels_support, data_query, labels_query, _, _ = [x.cuda() for x in batch]\n n_support = opt.way * opt.shot\n n_query = opt.way * opt.query\n \n for method in opt.shot_aug:\n data_support, labels_support, n_support = shot_aug(data_support, labels_support, n_support, method, opt)\n\n with torch.no_grad():\n emb_support = embedding_net(data_support.reshape([-1] + list(data_support.shape[-3:])))\n emb_support = emb_support.reshape(1, n_support, -1)\n \n emb_query = embedding_net(data_query.reshape([-1] + list(data_query.shape[-3:])))\n emb_query = emb_query.reshape(1, n_query, -1)\n\n if opt.head == 'SVM':\n logits = cls_head(emb_query, emb_support, labels_support, opt.way, opt.shot, maxIter=3)\n else:\n logits = cls_head(emb_query, emb_support, labels_support, opt.way, opt.shot)\n\n acc = count_accuracy(logits.reshape(-1, opt.way), labels_query.reshape(-1))\n test_accuracies.append(acc.item())\n \n avg = np.mean(np.array(test_accuracies))\n std = np.std(np.array(test_accuracies))\n ci = std / np.sqrt(i + 1)\n \n if i % 50 == 0:\n print('Episode [{}/{}]:\\t\\t\\tAccuracy: {:.2f} ± {:.2f} % ({:.2f} %)'\\\n .format(i, opt.episode, avg, ci, acc))\n"
] | [
[
"numpy.sqrt",
"torch.load",
"torch.linspace",
"torch.no_grad",
"torch.nn.DataParallel",
"numpy.clip",
"torch.nn.functional.grid_sample",
"numpy.array",
"torch.rot90",
"numpy.random.randint",
"torch.cat"
]
] |
mdeegen/pb_bss | [
"e8c380e27d82707e8d2b2d83c5c918d47ea5d89f"
] | [
"tests/test_distribution/test_von_mises_fisher.py"
] | [
"import numpy as np\nfrom numpy.testing import assert_allclose, assert_equal\nimport unittest\nfrom pb_bss.distribution import VonMisesFisher\nfrom pb_bss.distribution import VonMisesFisherTrainer\n\n\nclass TestGaussian(unittest.TestCase):\n def test_shapes(self):\n samples = 10000\n mean = np.ones((3,))\n covariance = np.eye(3)\n x = np.random.multivariate_normal(mean, covariance, size=(samples,))\n model = VonMisesFisherTrainer().fit(x)\n assert_equal(model.mean.shape, mean.shape)\n assert_equal(model.concentration.shape, ())\n\n def test_shapes_independent_dims(self):\n samples = 10000\n mean = np.ones((3,))\n covariance = np.eye(3)\n x = np.random.multivariate_normal(mean, covariance, size=(13, samples,))\n model = VonMisesFisherTrainer().fit(x)\n assert_equal(model.mean.shape, np.tile(mean, (13, 1)).shape)\n assert_equal(model.concentration.shape, (13,))\n\n def test_von_mises_fisher(self):\n samples = 10000\n mean = np.ones((3,))\n mean /= np.linalg.norm(mean, axis=-1)\n concentration = 50\n\n # ToDo: Implement VonMisesFisher(...).sample(...)\n return\n\n x = VonMisesFisher(mean, concentration).sample(size=(samples,))\n model = VonMisesFisherTrainer().fit(x)\n assert_allclose(model.mean, mean, atol=0.1)\n assert_allclose(model.covariance, concentration, atol=0.1)\n"
] | [
[
"numpy.ones",
"numpy.eye",
"numpy.tile",
"numpy.testing.assert_equal",
"numpy.random.multivariate_normal",
"numpy.testing.assert_allclose",
"numpy.linalg.norm"
]
] |
vamships/RelationPrediction | [
"45f48e8d09331e7244a7fe8d2d9d0fefa7e1f76b"
] | [
"code/extras/highway_layer.py"
] | [
"import numpy as np\nimport tensorflow as tf\nfrom model import Model\nfrom common.shared_functions import glorot_variance, make_tf_variable, make_tf_bias\n\nclass HighwayLayer(Model):\n vertex_embedding_function = {'train': None, 'test': None}\n\n def __init__(self, shape, next_component=None, next_component_2=None):\n self.next_component = next_component\n self.next_component_2 = next_component_2\n self.shape = shape\n\n def compute_vertex_embeddings(self, mode='train'):\n if self.vertex_embedding_function[mode] is None:\n code_1 = self.next_component.get_all_codes(mode=mode)[0]\n code_2 = self.next_component_2.get_all_codes(mode=mode)[0]\n\n gates = self.get_gates(mode=mode)\n\n self.vertex_embedding_function[mode] = gates * code_1 + (1-gates) * code_2\n\n return self.vertex_embedding_function[mode]\n\n def local_initialize_train(self):\n variance = glorot_variance(self.shape)\n\n self.W = make_tf_variable(0, variance, self.shape)\n self.b = make_tf_bias(self.shape[1], init=1)\n\n def local_get_weights(self):\n return [self.W, self.b]\n\n def get_gates(self, mode='train'):\n code = self.next_component_2.get_all_codes(mode=mode)[0]\n hidden = tf.matmul(code, self.W) + self.b\n\n return tf.nn.sigmoid(hidden)\n\n def get_all_codes(self, mode='train'):\n collected_messages = self.compute_vertex_embeddings(mode=mode)\n\n return collected_messages, None, collected_messages\n\n def get_all_subject_codes(self, mode='train'):\n return self.compute_vertex_embeddings(mode=mode)\n\n def get_all_object_codes(self, mode='train'):\n return self.compute_vertex_embeddings(mode=mode)\n"
] | [
[
"tensorflow.nn.sigmoid",
"tensorflow.matmul"
]
] |
seba-1511/gsoc15-demo | [
"c57d5cce7903511edd4048f8bfed2ad0dc6f6b6b"
] | [
"keras/preprocessing/sequence.py"
] | [
"import numpy as np\n\ndef pad_sequences(seqs, maxlen=None, dtype='int32'):\n \"\"\"\n Pad each sequence to the same lenght: \n the lenght of the longuest sequence.\n\n If maxlen is provided, any sequence longer\n than maxlen is truncated to maxlen.\n \"\"\"\n lengths = [len(s) for s in seqs]\n\n nb_samples = len(seqs)\n if maxlen is None:\n maxlen = np.max(lengths)\n\n x = np.zeros((nb_samples, maxlen)).astype(dtype)\n for idx, s in enumerate(seqs):\n x[idx, :lengths[idx]] = s[:maxlen]\n\n return x"
] | [
[
"numpy.max",
"numpy.zeros"
]
] |
wxy1988/ASR | [
"8ef3ef347523044c89c46c263ecc7b8e9b2c06d1"
] | [
"transformer/third_party/feat_convert/kaldi_io/batchmk.py"
] | [
"#!/usr/bin/python\r\n# coding=utf-8\r\n\r\n\"\"\"\r\n@version:\r\n@author: Dong Linhao\r\n@license: Apache Licence\r\n@contact: [email protected]\r\n@site:\r\n@software: PyCharm Community Edition\r\n@file: batchmk.py\r\n@time: 09/04/17 21:10\r\n\"\"\"\r\n\r\nimport src.io.fea as fea\r\nimport tensorflow as tf\r\nimport numpy as np\r\nimport time\r\n\r\nLONGEST_FRMS = 2000\r\n\r\nclass lstm_batch(object):\r\n def __init__(self, num_streams, num_steps, input_dim):\r\n self.sample_feat_list = [np.zeros([LONGEST_FRMS, input_dim]) for _ in range(num_streams)]\r\n self.sample_label_list = [np.zeros([LONGEST_FRMS]) for _ in range(num_streams)]\r\n self.sample_mask_list = [np.zeros([LONGEST_FRMS]) for _ in range(num_streams)]\r\n\r\n self.curt = np.zeros(num_streams, dtype=int)\r\n self.lent = np.zeros(num_streams, dtype=int)\r\n self.reset_flag = np.zeros(num_streams, dtype=bool)\r\n\r\n self.num_streams = num_streams\r\n self.num_steps = num_steps\r\n self.input_dim = input_dim\r\n self.handled_utt_num = 0\r\n self.handled_frm_num = 0\r\n self.cur_epoch_finish = False\r\n\r\n def set_stream_num(self, num_streams):\r\n self.num_streams = num_streams\r\n\r\n self.sample_feat_list = [np.zeros([LONGEST_FRMS, self.input_dim]) for _ in range(num_streams)]\r\n self.sample_label_list = [np.zeros([LONGEST_FRMS]) for _ in range(num_streams)]\r\n self.sample_mask_list = [np.zeros([LONGEST_FRMS]) for _ in range(num_streams)]\r\n\r\n self.curt = np.zeros(num_streams, dtype=int)\r\n self.lent = np.zeros(num_streams, dtype=int)\r\n self.reset_flag = np.zeros(num_streams, dtype=bool)\r\n\r\n def reset(self):\r\n self.sample_feat_list = [np.zeros([LONGEST_FRMS, self.input_dim]) for _ in range(self.num_streams)]\r\n self.sample_label_list = [np.zeros([LONGEST_FRMS]) for _ in range(self.num_streams)]\r\n self.sample_mask_list = [np.zeros([LONGEST_FRMS]) for _ in range(self.num_streams)]\r\n\r\n self.curt = np.zeros(self.num_streams, dtype=int)\r\n self.lent = np.zeros(self.num_streams, dtype=int)\r\n self.reset_flag = np.zeros(self.num_streams, dtype=bool)\r\n\r\n self.handled_utt_num = 0\r\n self.handled_frm_num = 0\r\n self.cur_epoch_finish = False\r\n\r\n def make_batch(self, sess, sample, run_device, total_utt_num):\r\n with tf.device(run_device):\r\n multistream_feat_batch = [np.zeros([self.num_steps, self.input_dim]) for _ in range(self.num_streams)]\r\n multistream_label_batch = [np.zeros([self.num_steps]) for _ in range(self.num_streams)]\r\n multistream_mask_batch = [np.zeros([self.num_steps]) for _ in range(self.num_streams)]\r\n reset_flag = np.zeros(self.num_streams, dtype=bool)\r\n\r\n for s in range(self.num_streams):\r\n if self.curt[s] < self.lent[s]:\r\n reset_flag[s] = False\r\n continue\r\n\r\n if self.handled_utt_num < total_utt_num:\r\n sample_feats, sample_labels, sample_masks = sess.run(sample)\r\n self.handled_utt_num += 1\r\n self.sample_feat_list[s] = sample_feats\r\n self.sample_label_list[s] = sample_labels\r\n self.sample_mask_list[s] = sample_masks\r\n self.lent[s] = np.shape(sample_feats)[0]\r\n self.curt[s] = 0\r\n reset_flag[s] = True\r\n\r\n for s in range(self.num_streams):\r\n if self.curt[s] < self.lent[s]:\r\n multistream_feat_batch[s] = self.sample_feat_list[s][self.curt[s]:self.curt[s]+self.num_steps, :]\r\n multistream_label_batch[s] = self.sample_label_list[s][self.curt[s]:self.curt[s]+self.num_steps]\r\n multistream_mask_batch[s] = self.sample_mask_list[s][self.curt[s]:self.curt[s]+self.num_steps]\r\n\r\n self.curt[s] += self.num_steps\r\n self.handled_frm_num += np.sum(multistream_mask_batch[s])\r\n else:\r\n multistream_mask_batch[s] = np.zeros([self.num_steps])\r\n\r\n final_feat_batch = np.stack(multistream_feat_batch, axis=1)\r\n final_label_batch = np.stack(multistream_label_batch, axis=1)\r\n final_mask_batch = np.stack(multistream_mask_batch, axis=1)\r\n\r\n done = True\r\n for s in range(self.num_streams):\r\n if self.curt[s] < self.lent[s]:\r\n done = False\r\n if done:\r\n self.cur_epoch_finish = True\r\n\r\n return final_feat_batch, final_label_batch, final_mask_batch, reset_flag\r\n\r\n\r\ndef getfilelst(scp_file_path):\r\n # get tf list\r\n tf_list = []\r\n with open(scp_file_path) as list_file:\r\n for line in list_file:\r\n tf_list.append(line.strip())\r\n return tf_list\r\n\r\n\r\ndef process_my_feature(feature, label, flags):\r\n # Add delta\r\n if flags.add_delta:\r\n feature = fea.tf_fea_add_delt(feature)\r\n # CMVN\r\n feature = fea.tf_fea_cmvn_global(feature, flags.feat_mean, flags.feat_var)\r\n # Splice\r\n feature = fea.tf_fea_splice(feature, flags.l_splice, flags.r_splice)\r\n feature = tf.reshape(feature, [-1, flags.input_dim])\r\n\r\n return feature[:], label[:]\r\n\r\n\r\ndef read_my_file_format(filename_queue, org_feat_dim):\r\n # build reader\r\n reader = tf.TFRecordReader()\r\n _, serialized_example = reader.read(filename_queue)\r\n raw_example = tf.parse_single_example(\r\n serialized_example,\r\n # Defaults are not specified since both keys are required.\r\n features={\r\n 'feat': tf.FixedLenFeature([], tf.string),\r\n 'label': tf.FixedLenFeature([], tf.string),\r\n })\r\n example = tf.decode_raw(raw_example['feat'], tf.float32)\r\n example = tf.reshape(example, [-1, org_feat_dim])\r\n label = tf.decode_raw(raw_example['label'], tf.int32)\r\n\r\n return example, label\r\n\r\n\r\ndef lstm_input_pipeline(flags, is_training, num_epochs=None, shuffle_state = True):\r\n with tf.device(flags.default_device):\r\n if is_training:\r\n filenames = getfilelst(flags.trn_data_dir + '/tf.lst')\r\n else:\r\n filenames = getfilelst(flags.cv_data_dir + '/tf.lst')\r\n\r\n # generate file queue\r\n filename_queue = tf.train.string_input_producer(\r\n filenames, num_epochs = num_epochs, shuffle = shuffle_state)\r\n\r\n # read from file queue\r\n sample = read_my_file_format(filename_queue, flags.org_feat_dim)\r\n\r\n # handle sample\r\n sample_feats, sample_labels = process_my_feature(sample[0], sample[1], flags)\r\n sample_length = tf.shape(sample_feats)[0]\r\n sample_masks = tf.ones([sample_length], dtype=tf.float32)\r\n\r\n # add target delay\r\n if flags.target_delay > 0:\r\n feats_part1 = tf.slice(sample_feats, [flags.target_delay, 0], [sample_length-flags.target_delay, -1])\r\n last_frm_feats = tf.slice(sample_feats, [sample_length-1, 0], [1, -1])\r\n feats_part2 = tf.concat([last_frm_feats for _ in range(flags.target_delay)], axis=0)\r\n sample_feats = tf.concat([feats_part1, feats_part2], axis=0)\r\n\r\n padding_length = flags.num_steps - sample_length % flags.num_steps\r\n padding_feats = tf.zeros([padding_length, flags.input_dim], dtype=tf.float32)\r\n feats = tf.concat(axis=0, values=[sample_feats, padding_feats])\r\n padding_labels = tf.zeros([padding_length], dtype=tf.int32)\r\n labels = tf.concat(axis=0, values=[sample_labels, padding_labels])\r\n padding_masks = tf.zeros([padding_length], dtype=tf.float32)\r\n frame_masks = tf.concat(axis=0, values=[sample_masks, padding_masks])\r\n\r\n return feats, labels, frame_masks\r\n\r\n"
] | [
[
"numpy.sum",
"tensorflow.zeros",
"tensorflow.TFRecordReader",
"tensorflow.shape",
"numpy.zeros",
"tensorflow.reshape",
"tensorflow.decode_raw",
"tensorflow.device",
"tensorflow.ones",
"tensorflow.train.string_input_producer",
"numpy.shape",
"tensorflow.FixedLenFeature",
"tensorflow.concat",
"tensorflow.slice",
"numpy.stack"
]
] |
lferraz/VideoProcessingFramework | [
"19b87eddc0539d90ae4025629bac7c93c1387d56"
] | [
"SampleEncodeMultiThread.py"
] | [
"#\n# Copyright 2020 NVIDIA Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n# Starting from Python 3.8 DLL search policy has changed.\n# We need to add path to CUDA DLLs explicitly.\nimport sys\nimport os\n\nif os.name == 'nt':\n # Add CUDA_PATH env variable\n cuda_path = os.environ[\"CUDA_PATH\"]\n if cuda_path:\n os.add_dll_directory(cuda_path)\n else:\n print(\"CUDA_PATH environment variable is not set.\", file = sys.stderr)\n print(\"Can't set CUDA DLLs search path.\", file = sys.stderr)\n exit(1)\n\n # Add PATH as well for minor CUDA releases\n sys_path = os.environ[\"PATH\"]\n if sys_path:\n paths = sys_path.split(';')\n for path in paths:\n if os.path.isdir(path):\n os.add_dll_directory(path)\n else:\n print(\"PATH environment variable is not set.\", file = sys.stderr)\n exit(1)\n\nimport PyNvCodec as nvc\nimport numpy as np\n\nfrom threading import Thread\n \nclass Worker(Thread):\n def __init__(self, gpuID, width, height, rawFilePath, encFilePath):\n Thread.__init__(self)\n\n res = width + 'x' + height\n \n self.nvUpl = nvc.PyFrameUploader(int(width), int(height), nvc.PixelFormat.YUV420, gpuID)\n self.nvCvt = nvc.PySurfaceConverter(int(width), int(height), nvc.PixelFormat.YUV420, nvc.PixelFormat.NV12, gpuID)\n self.nvEnc = nvc.PyNvEncoder({'preset': 'hq', 'codec': 'h264', 's': res}, gpuID)\n\n self.encFile = open(encFilePath, \"wb\")\n self.rawFile = open(rawFilePath, \"rb\")\n \n def run(self):\n try:\n while True:\n frameSize = self.nvEnc.Width() * self.nvEnc.Height() * 3 / 2\n rawFrame = np.fromfile(self.rawFile, np.uint8, count = int(frameSize))\n if not (rawFrame.size):\n print('No more video frames')\n break\n\n rawSurface = self.nvUpl.UploadSingleFrame(rawFrame)\n if (rawSurface.Empty()):\n print('Failed to upload video frame to GPU')\n break\n \n cvtSurface = self.nvCvt.Execute(rawSurface)\n if (cvtSurface.Empty()):\n print('Failed to do color conversion')\n break\n\n encFrame = np.ndarray(shape=(0), dtype=np.uint8)\n success = self.nvEnc.EncodeSingleSurface(cvtSurface, encFrame)\n if(success):\n bits = bytearray(encFrame)\n self.encFile.write(bits)\n\n #Encoder is asynchronous, so we need to flush it\n encFrame = np.ndarray(shape=(0), dtype=np.uint8)\n success = self.nvEnc.Flush(encFrame)\n if(success):\n bits = bytearray(encFrame)\n self.encFile.write(bits)\n \n except Exception as e:\n print(getattr(e, 'message', str(e)))\n decFile.close()\n \ndef create_threads(gpu_id1, width_1, height_1, input_file1, output_file1,\n gpu_id2, width_2, height_2, input_file2, output_file2):\n \n th1 = Worker(gpu_id1, width_1, height_1, input_file1, output_file1)\n th2 = Worker(gpu_id2, width_2, height_2, input_file2, output_file2)\n \n th1.start()\n th2.start()\n \n th1.join()\n th2.join()\n \nif __name__ == \"__main__\":\n print(\"This sample encodes 2 videos simultaneously from YUV files into 1/4 of initial size.\")\n print(\"Usage: SampleDecode.py $gpu_id1 $width_1 $height_1 $input_file1 $output_file_1 $gpu_id2 $width_2 $height_2 $input_file2 $output_file2\")\n \n if(len(sys.argv) < 11):\n print(\"Provide input CLI arguments as shown above\")\n exit(1)\n \n gpu_1 = int(sys.argv[1])\n width_1 = sys.argv[2]\n height_1 = sys.argv[3]\n input_1 = sys.argv[4]\n output_1 = sys.argv[5]\n \n gpu_2 = int(sys.argv[6])\n width_2 = sys.argv[7]\n height_2 = sys.argv[8]\n input_2 = sys.argv[9]\n output_2 = sys.argv[10]\n \n create_threads(gpu_1, width_1, height_1, input_1, output_1, gpu_2, width_2, height_2, input_2, output_2)\n"
] | [
[
"numpy.ndarray"
]
] |
haoxingchen/SSFormers | [
"3c2ea14db6a453d3345e03a790dd452af5fde8d8"
] | [
"modules/fsl_semi_query.py"
] | [
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom .encoder import make_encoder\nfrom .semi_query import make_query\n\n\nclass FSLSemiQuery(nn.Module):\n def __init__(self, cfg):\n super().__init__()\n\n self.encoder = make_encoder(cfg)\n self.query = make_query(self.encoder.out_channels, cfg)\n self.forward_encoding = cfg.model.forward_encoding\n self.pyramid_list = self._parse_encoding_params()\n\n def _parse_encoding_params(self):\n idx = self.forward_encoding.find('-')\n if idx < 0:\n return []\n blocks = self.forward_encoding[idx + 1:].split(',')\n blocks = [int(s) for s in blocks]\n return blocks\n\n def _pyramid_encoding(self, x):\n b, n, c, h, w = x.shape\n x = x.view(-1, c, h, w)\n feature_list = []\n for size_ in self.pyramid_list:\n feature_list.append(F.adaptive_avg_pool2d(x, size_).view(b, n, c, 1, -1))\n\n if not feature_list:\n out = x.view(b, n, c, 1, -1)\n else:\n out = torch.cat(feature_list, dim=-1)\n return out\n\n def forward_Grid(self, support_x, support_y, query_x, query_y, unlabeled_x):\n b, s, grids_sc, h, w = support_x.shape\n grids_s = grids_sc // 3\n _, q, grids_qc = query_x.shape[:3]\n grids_q = grids_qc // 3\n\n support_xf = F.adaptive_avg_pool2d(self.encoder(support_x.view(-1, 3, h, w)), 1)\n support_xf = support_xf.view(b, s, grids_s, -1).permute(0, 1, 3, 2).unsqueeze(-1)\n query_xf = F.adaptive_avg_pool2d(self.encoder(query_x.view(-1, 3, h, w)), 1)\n query_xf = query_xf.view(b, q, grids_q, -1).permute(0, 1, 3, 2).unsqueeze(-1)\n unlabeled_xf = F.adaptive_avg_pool2d(self.encoder(unlabeled_x.view(-1, 3, h, w)), 1)\n unlabeled_xf = unlabeled_xf.view(b, q, grids_q, -1).permute(0, 1, 3, 2).unsqueeze(-1)\n\n query = self.query(support_xf, support_y, query_xf, query_y, unlabeled_xf)\n return query\n\n def forward_FCN(self, support_x, support_y, query_x, query_y, unlabeled_x):\n b, s, c, h, w = support_x.shape\n q = query_x.shape[1]\n\n support_xf = self.encoder(support_x.view(-1, c, h, w))\n query_xf = self.encoder(query_x.view(-1, c, h, w))\n unlabeled_xf = self.encoder(unlabeled_x.view(-1, c, h, w))\n\n fc, fh, fw = support_xf.shape[-3:]\n support_xf = support_xf.view(b, s, fc, fh, fw)\n query_xf = query_xf.view(b, q, fc, fh, fw)\n\n query = self.query(support_xf, support_y, query_xf, query_y, unlabeled_xf)\n return query\n\n def forward(self, support_x, support_y, query_x, query_y, unlabeled_x):\n if self.forward_encoding == \"FCN\":\n query = self.forward_FCN(support_x, support_y, query_x, query_y, unlabeled_x)\n elif self.forward_encoding.startswith(\"Grid\"):\n query = self.forward_Grid(support_x, support_y, query_x, query_y, unlabeled_x)\n else:\n raise NotImplementedError\n return query\n\n\ndef make_semi_fsl(cfg):\n return FSLSemiQuery(cfg)\n\n"
] | [
[
"torch.nn.functional.adaptive_avg_pool2d",
"torch.cat"
]
] |
wryoung412/CS294_Deep_RL_fall2017 | [
"077167de524157cc5f85f40232e5bcf6933ab2f5"
] | [
"hw2/train_pg.py"
] | [
"import numpy as np\nimport tensorflow as tf\nimport gym\nimport logz\nimport scipy.signal\nimport os\nimport time\nimport inspect\nfrom multiprocessing import Process\n\n#============================================================================================#\n# Utilities\n#============================================================================================#\n\ndef build_mlp(\n input_placeholder, \n output_size,\n scope, \n n_layers=2, \n size=64, \n activation=tf.tanh,\n output_activation=None\n ):\n #========================================================================================#\n # ----------SECTION 3----------\n # Network building\n #\n # Your code should make a feedforward neural network (also called a multilayer perceptron)\n # with 'n_layers' hidden layers of size 'size' units. \n # \n # The output layer should have size 'output_size' and activation 'output_activation'.\n #\n # Hint: use tf.layers.dense\n #========================================================================================#\n\n with tf.variable_scope(scope):\n # MY_CODE_HERE\n hidden = input_placeholder\n for i in range(n_layers):\n hidden = tf.layers.dense(hidden, size, activation, name='blah' + str(i))\n return tf.layers.dense(hidden, output_size, output_activation)\n\ndef pathlength(path):\n return len(path[\"reward\"])\n\ndef reward_to_q(rewards, gamma, reward_to_go):\n q = np.zeros_like(rewards)\n T = len(rewards)\n if reward_to_go:\n q += rewards\n for i in range(1, T):\n q[:(T - i)] += gamma * q[i:T]\n else:\n r = 0\n for i in range(T - 1, -1, -1):\n r = rewards[i] + gamma * r\n q = r * np.ones_like(q)\n return q\n \n\n#============================================================================================#\n# Policy Gradient\n#============================================================================================#\n\n# batch_size is more natural for PG as we need to take average over paths. \n# timesteps_per_batch is more relevant for Q-learning as learning is done step by step.\n\n# CartPole\n# Here is a good run\n# python train_pg.py CartPole-v0 --n_layers 4 --target_reward 200 --learning_rate 1e-2 --nn_baseline --batch_size 10\n# ********** Iteration 8 ************\n# total trials: 90\n# ----------------------------------------\n# | Time | 31.1 |\n# | Iteration | 8 |\n# | AverageReturn | 200 |\n# | StdReturn | 0 |\n# | MaxReturn | 200 |\n# | MinReturn | 200 |\n# | EpLenMean | 200 |\n# | EpLenStd | 0 |\n# | TimestepsThisBatch | 2e+03 |\n# | TimestepsSoFar | 1.15e+04 |\n# ----------------------------------------\n#\n# MountainCar\n# Working poorly. It seems some good exploration is needed to get any positive path.\n# \n# Acrobot\n# Similar to MountainCar, but it is possible to randomly get a positive path,\n# and then the model starts to learn.\n# I can get to about 90 steps. What is the \"solve\" criterion?\n# https://github.com/jonholifield/Acrobot-v1\n\n# Box2D\n# https://github.com/pybox2d/pybox2d/blob/master/INSTALL.md\n# 'sudo' python setup.py install: should not use sudo in venv, it complains about setuptools not found\n# LunarLander\n# It does not do that well but works to some extent. \n\n\ndef train_PG(exp_name='',\n env_name='CartPole-v0',\n n_iter=100, \n gamma=1.0, \n # min_timesteps_per_batch=1000,\n batch_size=20,\n max_path_length=None,\n learning_rate=5e-3, \n reward_to_go=True, \n animate=True, \n logdir=None, \n normalize_advantages=True,\n nn_baseline=False, \n seed=0,\n # network arguments\n n_layers=1,\n size=32,\n target_reward=None\n ):\n\n start = time.time()\n\n TODO = 1\n\n # Configure output directory for logging\n logz.configure_output_dir(logdir)\n\n # Log experimental parameters\n args = inspect.getargspec(train_PG)[0]\n locals_ = locals()\n params = {k: locals_[k] if k in locals_ else None for k in args}\n logz.save_params(params)\n\n # Set random seeds\n tf.set_random_seed(seed)\n np.random.seed(seed)\n\n # Make the gym environment\n env = gym.make(env_name)\n \n # Is this env continuous, or discrete?\n discrete = isinstance(env.action_space, gym.spaces.Discrete)\n assert discrete, 'only discrete is implemented'\n\n # Maximum length for episodes\n max_path_length = max_path_length or env.spec.max_episode_steps\n\n #========================================================================================#\n # Notes on notation:\n # \n # Symbolic variables have the prefix sy_, to distinguish them from the numerical values\n # that are computed later in the function\n # \n # Prefixes and suffixes:\n # ob - observation \n # ac - action\n # _no - this tensor should have shape (batch size /n/, observation dim)\n # _na - this tensor should have shape (batch size /n/, action dim)\n # _n - this tensor should have shape (batch size /n/)\n # \n # Note: batch size /n/ is defined at runtime, and until then, the shape for that axis\n # is None\n #========================================================================================#\n\n # Observation and action sizes\n ob_dim = env.observation_space.shape[0]\n ac_dim = env.action_space.n if discrete else env.action_space.shape[0]\n\n #========================================================================================#\n # ----------SECTION 4----------\n # Placeholders\n # \n # Need these for batch observations / actions / advantages in policy gradient loss function.\n #========================================================================================#\n\n sy_ob_no = tf.placeholder(shape=[None, ob_dim], name=\"ob\", dtype=tf.float32)\n if discrete:\n sy_ac_na = tf.placeholder(shape=[None], name=\"ac\", dtype=tf.int32) \n else:\n sy_ac_na = tf.placeholder(shape=[None, ac_dim], name=\"ac\", dtype=tf.float32) \n\n # Define a placeholder for advantages\n sy_adv_n = tf.placeholder(shape=[None], name=\"adv\", dtype=tf.float32) \n\n\n #========================================================================================#\n # ----------SECTION 4----------\n # Networks\n # \n # Make symbolic operations for\n # 1. Policy network outputs which describe the policy distribution.\n # a. For the discrete case, just logits for each action.\n #\n # b. For the continuous case, the mean / log std of a Gaussian distribution over \n # actions.\n #\n # Hint: use the 'build_mlp' function you defined in utilities.\n #\n # Note: these ops should be functions of the placeholder 'sy_ob_no'\n #\n # 2. Producing samples stochastically from the policy distribution.\n # a. For the discrete case, an op that takes in logits and produces actions.\n #\n # Should have shape [None]\n #\n # b. For the continuous case, use the reparameterization trick:\n # The output from a Gaussian distribution with mean 'mu' and std 'sigma' is\n #\n # mu + sigma * z, z ~ N(0, I)\n #\n # This reduces the problem to just sampling z. (Hint: use tf.random_normal!)\n #\n # Should have shape [None, ac_dim]\n #\n # Note: these ops should be functions of the policy network output ops.\n #\n # 3. Computing the log probability of a set of actions that were actually taken, \n # according to the policy.\n #\n # Note: these ops should be functions of the placeholder 'sy_ac_na', and the \n # policy network output ops.\n # \n #========================================================================================#\n\n if discrete:\n # MY_CODE_HERE\n sy_logits_na = build_mlp(\n sy_ob_no,\n ac_dim,\n \"nn_policy\",\n n_layers=n_layers,\n size=size)\n sy_sampled_ac = tf.multinomial(sy_logits_na, 1) # Hint: Use the tf.multinomial op\n sy_logprob_n = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=sy_logits_na, labels=sy_ac_na)\n\n else:\n # YOUR_CODE_HERE\n sy_mean = TODO\n sy_logstd = TODO # logstd should just be a trainable variable, not a network output.\n sy_sampled_ac = TODO\n sy_logprob_n = TODO # Hint: Use the log probability under a multivariate gaussian. \n\n\n\n #========================================================================================#\n # ----------SECTION 4----------\n # Loss Function and Training Operation\n #========================================================================================#\n\n # MY_CODE_HERE\n # Loss function that we'll differentiate to get the policy gradient.\n # TODO: reduce_mean is not really correct here\n loss = tf.reduce_mean(sy_logprob_n * sy_adv_n)\n update_op = tf.train.AdamOptimizer(learning_rate).minimize(loss)\n\n\n #========================================================================================#\n # ----------SECTION 5----------\n # Optional Baseline\n #========================================================================================#\n\n if nn_baseline:\n baseline_prediction = tf.squeeze(build_mlp(\n sy_ob_no, \n 1, \n \"nn_baseline\",\n n_layers=n_layers,\n size=size))\n # Define placeholders for targets, a loss function and an update op for fitting a \n # neural network baseline. These will be used to fit the neural network baseline. \n # MY_CODE_HERE\n sy_q_n = tf.placeholder(shape=[None], name='q', dtype=tf.float32)\n baseline_loss = tf.nn.l2_loss(baseline_prediction - sy_q_n)\n baseline_update_op = tf.train.AdamOptimizer(learning_rate).minimize(baseline_loss)\n\n\n #========================================================================================#\n # Tensorflow Engineering: Config, Session, Variable initialization\n #========================================================================================#\n\n tf_config = tf.ConfigProto(inter_op_parallelism_threads=1, intra_op_parallelism_threads=1) \n\n sess = tf.Session(config=tf_config)\n sess.__enter__() # equivalent to `with sess:`\n tf.global_variables_initializer().run() #pylint: disable=E1101\n\n tf_board = os.path.join('/tmp/gube/hw2')\n writer = tf.summary.FileWriter(os.path.join(tf_board, str(int(time.time()))))\n writer.add_graph(sess.graph)\n merged_summary = tf.summary.merge_all()\n\n #========================================================================================#\n # Training Loop\n #========================================================================================#\n\n total_timesteps = 0\n total_trials = 0\n\n for itr in range(n_iter):\n print(\"********** Iteration %i ************\"%itr)\n\n # Collect paths until we have enough timesteps\n timesteps_this_batch = 0\n trials_this_batch = 0\n paths = []\n while True:\n ob = env.reset()\n obs, acs, rewards = [], [], []\n animate_this_episode=(len(paths)==0 and (itr % 5 == 0) and animate)\n steps = 0\n while True:\n if animate_this_episode:\n env.render()\n time.sleep(0.05)\n obs.append(ob)\n ac = sess.run(sy_sampled_ac, feed_dict={sy_ob_no : ob[None]})\n ac = ac[0][0] # was ac[0]\n acs.append(ac)\n ob, rew, done, _ = env.step(ac)\n rewards.append(rew)\n steps += 1\n if done or steps > max_path_length:\n break\n total_trials += 1\n trials_this_batch += 1\n path = {\"observation\" : np.array(obs), \n \"reward\" : np.array(rewards), \n \"action\" : np.array(acs)}\n paths.append(path)\n timesteps_this_batch += pathlength(path)\n # if timesteps_this_batch > min_timesteps_per_batch:\n # break\n if trials_this_batch == batch_size:\n break\n total_timesteps += timesteps_this_batch\n print('total trials:', total_trials)\n\n # Build arrays for observation, action for the policy gradient update by concatenating \n # across paths\n ob_no = np.concatenate([path[\"observation\"] for path in paths])\n ac_na = np.concatenate([path[\"action\"] for path in paths])\n\n #====================================================================================#\n # ----------SECTION 4----------\n # Computing Q-values\n #\n # Your code should construct numpy arrays for Q-values which will be used to compute\n # advantages (which will in turn be fed to the placeholder you defined above). \n #\n # Recall that the expression for the policy gradient PG is\n #\n # PG = E_{tau} [sum_{t=0}^T grad log pi(a_t|s_t) * (Q_t - b_t )]\n #\n # where \n #\n # tau=(s_0, a_0, ...) is a trajectory,\n # Q_t is the Q-value at time t, Q^{pi}(s_t, a_t),\n # and b_t is a baseline which may depend on s_t. \n #\n # You will write code for two cases, controlled by the flag 'reward_to_go':\n #\n # Case 1: trajectory-based PG \n #\n # (reward_to_go = False)\n #\n # Instead of Q^{pi}(s_t, a_t), we use the total discounted reward summed over \n # entire trajectory (regardless of which time step the Q-value should be for). \n #\n # For this case, the policy gradient estimator is\n #\n # E_{tau} [sum_{t=0}^T grad log pi(a_t|s_t) * Ret(tau)]\n #\n # where\n #\n # Ret(tau) = sum_{t'=0}^T gamma^t' r_{t'}.\n #\n # Thus, you should compute\n #\n # Q_t = Ret(tau)\n #\n # Case 2: reward-to-go PG \n #\n # (reward_to_go = True)\n #\n # Here, you estimate Q^{pi}(s_t, a_t) by the discounted sum of rewards starting\n # from time step t. Thus, you should compute\n #\n # Q_t = sum_{t'=t}^T gamma^(t'-t) * r_{t'}\n #\n #\n # Store the Q-values for all timesteps and all trajectories in a variable 'q_n',\n # like the 'ob_no' and 'ac_na' above. \n #\n #====================================================================================#\n\n # MY_CODE_HERE\n q_n = np.concatenate([reward_to_q(path['reward'], gamma, reward_to_go) for path in paths])\n\n #====================================================================================#\n # ----------SECTION 5----------\n # Computing Baselines\n #====================================================================================#\n\n if nn_baseline:\n # If nn_baseline is True, use your neural network to predict reward-to-go\n # at each timestep for each trajectory, and save the result in a variable 'b_n'\n # like 'ob_no', 'ac_na', and 'q_n'.\n #\n # Hint #bl1: rescale the output from the nn_baseline to match the statistics\n # (mean and std) of the current or previous batch of Q-values. (Goes with Hint\n # #bl2 below.)\n\n # MY_CODE_HERE\n # The bootstrap version uses r_t + v(s_{t+1}) - v(s_t), which is biased\n b_n = sess.run(baseline_prediction, feed_dict={sy_ob_no: ob_no})\n adv_n = q_n - b_n\n else:\n adv_n = q_n.copy()\n\n #====================================================================================#\n # ----------SECTION 4----------\n # Advantage Normalization\n #====================================================================================#\n\n if normalize_advantages:\n # On the next line, implement a trick which is known empirically to reduce variance\n # in policy gradient methods: normalize adv_n to have mean zero and std=1. \n # MY_CODE_HERE\n adv_mu = np.mean(adv_n)\n adv_std = np.std(adv_n)\n # Could be more robust than this\n if adv_std == 0.0:\n return\n # The normalization could be problematic.\n # For environments like CartPole, the reward is an integer and is capped at 200.\n # When not using base, adv_n could all be 200 and adv_std = 0. \n adv_n = (adv_n - adv_mu) / adv_std\n\n\n #====================================================================================#\n # ----------SECTION 5----------\n # Optimizing Neural Network Baseline\n #====================================================================================#\n if nn_baseline:\n # ----------SECTION 5----------\n # If a neural network baseline is used, set up the targets and the inputs for the \n # baseline. \n # \n # Fit it to the current batch in order to use for the next iteration. Use the \n # baseline_update_op you defined earlier.\n #\n # Hint #bl2: Instead of trying to target raw Q-values directly, rescale the \n # targets to have mean zero and std=1. (Goes with Hint #bl1 above.)\n\n # MY_CODE_HERE\n # TODO: what is the right way to fit?\n # 1. Using fixed number of steps.\n # It might not balance the good vs bad paths well, but 100 seems pretty good. \n # 2. Using timesteps as number of steps. This is CartPole specific.\n print('timesteps:', timesteps_this_batch)\n for i in range(100):\n sess.run(baseline_update_op, feed_dict={sy_ob_no: ob_no, sy_q_n: q_n})\n\n #====================================================================================#\n # ----------SECTION 4----------\n # Performing the Policy Update\n #====================================================================================#\n\n # Call the update operation necessary to perform the policy gradient update based on \n # the current batch of rollouts.\n # \n # For debug purposes, you may wish to save the value of the loss function before\n # and after an update, and then log them below. \n\n # MY_CODE_HERE\n sess.run(update_op, feed_dict={sy_ob_no: ob_no,\n sy_ac_na: ac_na,\n sy_adv_n: adv_n})\n\n\n # Log diagnostics\n returns = [path[\"reward\"].sum() for path in paths]\n ep_lengths = [pathlength(path) for path in paths]\n logz.log_tabular(\"Time\", time.time() - start)\n logz.log_tabular(\"Iteration\", itr)\n logz.log_tabular(\"AverageReturn\", np.mean(returns))\n logz.log_tabular(\"StdReturn\", np.std(returns))\n logz.log_tabular(\"MaxReturn\", np.max(returns))\n logz.log_tabular(\"MinReturn\", np.min(returns))\n logz.log_tabular(\"EpLenMean\", np.mean(ep_lengths))\n logz.log_tabular(\"EpLenStd\", np.std(ep_lengths))\n logz.log_tabular(\"TimestepsThisBatch\", timesteps_this_batch)\n logz.log_tabular(\"TimestepsSoFar\", total_timesteps)\n logz.dump_tabular()\n logz.pickle_tf_vars()\n \n # This stopping criterion is not robust when the batch size is small.\n if target_reward is not None:\n if np.mean([path[\"reward\"].sum() for path in paths]) >= target_reward:\n return\n\ndef main():\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument('env_name', type=str)\n parser.add_argument('--exp_name', type=str, default='vpg')\n parser.add_argument('--render', action='store_true')\n parser.add_argument('--discount', type=float, default=1.0)\n parser.add_argument('--n_iter', '-n', type=int, default=100)\n parser.add_argument('--batch_size', '-b', type=int, default=1000)\n parser.add_argument('--ep_len', '-ep', type=float, default=-1.)\n parser.add_argument('--learning_rate', '-lr', type=float, default=5e-3)\n parser.add_argument('--reward_to_go', '-rtg', action='store_true')\n parser.add_argument('--dont_normalize_advantages', '-dna', action='store_true')\n parser.add_argument('--nn_baseline', '-bl', action='store_true')\n parser.add_argument('--seed', type=int, default=1)\n parser.add_argument('--n_experiments', '-e', type=int, default=1)\n parser.add_argument('--n_layers', '-l', type=int, default=1)\n parser.add_argument('--size', '-s', type=int, default=32)\n parser.add_argument('--target_reward', type=float, default=None)\n args = parser.parse_args()\n\n if not(os.path.exists('data')):\n os.makedirs('data')\n logdir = args.exp_name + '_' + args.env_name + '_' + time.strftime(\"%d-%m-%Y_%H-%M-%S\")\n logdir = os.path.join('data', logdir)\n if not(os.path.exists(logdir)):\n os.makedirs(logdir)\n\n max_path_length = args.ep_len if args.ep_len > 0 else None\n\n for e in range(args.n_experiments):\n seed = args.seed + 10*e\n print('Running experiment with seed %d'%seed)\n def train_func():\n train_PG(\n exp_name=args.exp_name,\n env_name=args.env_name,\n n_iter=args.n_iter,\n gamma=args.discount,\n # min_timesteps_per_batch=args.batch_size,\n batch_size=args.batch_size,\n max_path_length=max_path_length,\n learning_rate=args.learning_rate,\n reward_to_go=args.reward_to_go,\n animate=args.render,\n logdir=os.path.join(logdir,'%d'%seed),\n normalize_advantages=not(args.dont_normalize_advantages),\n nn_baseline=args.nn_baseline, \n seed=seed,\n n_layers=args.n_layers,\n size=args.size,\n target_reward=args.target_reward\n )\n # Awkward hacky process runs, because Tensorflow does not like\n # repeatedly calling train_PG in the same thread.\n p = Process(target=train_func, args=tuple())\n p.start()\n p.join()\n \n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"numpy.random.seed",
"tensorflow.variable_scope",
"tensorflow.nn.l2_loss",
"numpy.ones_like",
"tensorflow.nn.sparse_softmax_cross_entropy_with_logits",
"tensorflow.multinomial",
"tensorflow.global_variables_initializer",
"numpy.mean",
"numpy.max",
"tensorflow.set_random_seed",
"numpy.min",
"tensorflow.Session",
"numpy.std",
"tensorflow.layers.dense",
"tensorflow.ConfigProto",
"numpy.zeros_like",
"tensorflow.placeholder",
"tensorflow.summary.merge_all",
"tensorflow.train.AdamOptimizer",
"tensorflow.reduce_mean",
"numpy.array",
"numpy.concatenate"
]
] |
andi611/Mockingjay-Speech-Representation | [
"e77df17a7f63a983c3757140c7a1e8c199cac614"
] | [
"runner_mockingjay.py"
] | [
"# -*- coding: utf-8 -*- #\n\"\"\"*********************************************************************************************\"\"\"\n# FileName [ runner_mockingjay.py ]\n# Synopsis [ runner for the mockingjay model ]\n# Author [ Andy T. Liu (Andi611) ]\n# Copyright [ Copyleft(c), Speech Lab, NTU, Taiwan ]\n\"\"\"*********************************************************************************************\"\"\"\n\n\n###############\n# IMPORTATION #\n###############\nimport yaml\nimport torch\nimport random\nimport argparse\nimport numpy as np\nfrom utility.timer import Timer\n\n\n#############################\n# MOCKINGJAY CONFIGURATIONS #\n#############################\ndef get_mockingjay_args():\n \n parser = argparse.ArgumentParser(description='Argument Parser for the mockingjay project.')\n \n # setting\n parser.add_argument('--config', default='config/mockingjay_libri.yaml', type=str, help='Path to experiment config.')\n parser.add_argument('--seed', default=1337, type=int, help='Random seed for reproducable results.', required=False)\n\n # Logging\n parser.add_argument('--logdir', default='log/log_mockingjay/', type=str, help='Logging path.', required=False)\n parser.add_argument('--name', default=None, type=str, help='Name for logging.', required=False)\n\n # model ckpt\n parser.add_argument('--load', action='store_true', help='Load pre-trained model to restore training, no need to specify this during testing.')\n parser.add_argument('--ckpdir', default='result/result_mockingjay/', type=str, help='Checkpoint/Result path.', required=False)\n parser.add_argument('--ckpt', default='mockingjay_libri_sd1337_LinearLarge/mockingjay-500000.ckpt', type=str, help='path to mockingjay model checkpoint.', required=False)\n # parser.add_argument('--ckpt', default='mockingjay_libri_sd1337_MelBase/mockingjay-500000.ckpt', type=str, help='path to mockingjay model checkpoint.', required=False)\n parser.add_argument('--dckpt', default='baseline_sentiment_libri_sd1337/baseline_sentiment-500000.ckpt', type=str, help='path to downstream checkpoint.', required=False)\n parser.add_argument('--apc_path', default='./result/result_apc/apc_libri_sd1337_standard/apc-500000.ckpt', type=str, help='path to the apc model checkpoint.', required=False)\n\n # mockingjay\n parser.add_argument('--train', action='store_true', help='Train the model.')\n parser.add_argument('--run_mockingjay', action='store_true', help='train and test the downstream tasks using mockingjay representations.')\n parser.add_argument('--run_apc', action='store_true', help='train and test the downstream tasks using apc representations.')\n parser.add_argument('--fine_tune', action='store_true', help='fine tune the mockingjay model with downstream task.')\n parser.add_argument('--plot', action='store_true', help='Plot model generated results during testing.')\n \n # phone task\n parser.add_argument('--train_phone', action='store_true', help='Train the phone classifier on mel or mockingjay representations.')\n parser.add_argument('--test_phone', action='store_true', help='Test mel or mockingjay representations using the trained phone classifier.')\n \n # sentiment task\n parser.add_argument('--train_sentiment', action='store_true', help='Train the sentiment classifier on mel or mockingjay representations.')\n parser.add_argument('--test_sentiment', action='store_true', help='Test mel or mockingjay representations using the trained sentiment classifier.')\n \n # speaker verification task\n parser.add_argument('--train_speaker', action='store_true', help='Train the speaker classifier on mel or mockingjay representations.')\n parser.add_argument('--test_speaker', action='store_true', help='Test mel or mockingjay representations using the trained speaker classifier.')\n \n # Options\n parser.add_argument('--with_head', action='store_true', help='inference with the spectrogram head, the model outputs spectrogram.')\n parser.add_argument('--output_attention', action='store_true', help='plot attention')\n parser.add_argument('--load_ws', default='result/result_mockingjay_sentiment/10111754-10170300-weight_sum/best_val.ckpt', help='load weighted-sum weights from trained downstream model')\n parser.add_argument('--cpu', action='store_true', help='Disable GPU training.')\n parser.add_argument('--no-msg', action='store_true', help='Hide all messages.')\n\n\n args = parser.parse_args()\n setattr(args,'gpu', not args.cpu)\n setattr(args,'verbose', not args.no_msg)\n config = yaml.load(open(args.config,'r'))\n config['timer'] = Timer()\n \n return config, args\n\n\n########\n# MAIN #\n########\ndef main():\n \n # get arguments\n config, args = get_mockingjay_args()\n \n # Fix seed and make backends deterministic\n random.seed(args.seed)\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n if torch.cuda.is_available(): torch.cuda.manual_seed_all(args.seed)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = True\n\n # Train Mockingjay\n if args.train:\n from mockingjay.solver import Trainer\n trainer = Trainer(config, args)\n trainer.load_data(split='train')\n trainer.set_model(inference=False)\n trainer.exec()\n\n ##################################################################################\n \n # Train Phone Task\n elif args.train_phone:\n from downstream.solver import Downstream_Trainer\n task = 'mockingjay_phone' if args.run_mockingjay \\\n else 'apc_phone' if args.run_apc else 'baseline_phone'\n trainer = Downstream_Trainer(config, args, task=task)\n trainer.load_data(split='train', load='phone')\n trainer.set_model(inference=False)\n trainer.exec()\n\n # Test Phone Task\n elif args.test_phone:\n from downstream.solver import Downstream_Tester\n task = 'mockingjay_phone' if args.run_mockingjay \\\n else 'apc_phone' if args.run_apc else 'baseline_phone'\n tester = Downstream_Tester(config, args, task=task)\n tester.load_data(split='test', load='phone')\n tester.set_model(inference=True)\n tester.exec()\n\n ##################################################################################\n\n # Train Sentiment Task\n elif args.train_sentiment:\n from downstream.solver import Downstream_Trainer\n task = 'mockingjay_sentiment' if args.run_mockingjay \\\n else 'apc_sentiment' if args.run_apc else 'baseline_sentiment'\n trainer = Downstream_Trainer(config, args, task=task)\n trainer.load_data(split='train', load='sentiment')\n trainer.set_model(inference=False)\n trainer.exec()\n\n # Test Sentiment Task\n elif args.test_sentiment:\n from downstream.solver import Downstream_Tester\n task = 'mockingjay_sentiment' if args.run_mockingjay \\\n else 'apc_sentiment' if args.run_apc else 'baseline_sentiment'\n tester = Downstream_Tester(config, args, task=task)\n tester.load_data(split='test', load='sentiment')\n tester.set_model(inference=True)\n tester.exec()\n\n ##################################################################################\n \n # Train Speaker Task\n elif args.train_speaker:\n from downstream.solver import Downstream_Trainer\n task = 'mockingjay_speaker' if args.run_mockingjay \\\n else 'apc_speaker' if args.run_apc else 'baseline_speaker'\n trainer = Downstream_Trainer(config, args, task=task)\n trainer.load_data(split='train', load='speaker')\n # trainer.load_data(split='train', load='speaker_large') # Deprecated\n trainer.set_model(inference=False)\n trainer.exec()\n\n # Test Speaker Task\n elif args.test_speaker:\n from downstream.solver import Downstream_Tester\n task = 'mockingjay_speaker' if args.run_mockingjay \\\n else 'apc_speaker' if args.run_apc else 'baseline_speaker'\n tester = Downstream_Tester(config, args, task=task)\n tester.load_data(split='test', load='speaker')\n # tester.load_data(split='test', load='speaker_large') # Deprecated\n tester.set_model(inference=True)\n tester.exec()\n\n ##################################################################################\n\n # Visualize Mockingjay\n elif args.plot:\n from mockingjay.solver import Tester\n tester = Tester(config, args)\n tester.load_data(split='test', load_mel_only=True)\n tester.set_model(inference=True, with_head=args.with_head, output_attention=args.output_attention)\n tester.plot(with_head=args.with_head)\n\n config['timer'].report()\n\n\n########################\n# GET MOCKINGJAY MODEL #\n########################\ndef get_mockingjay_model(from_path='result/result_mockingjay/mockingjay_libri_sd1337_best/mockingjay-500000.ckpt', display_settings=False):\n ''' Wrapper that loads the mockingjay model from checkpoint path '''\n\n # load config and paras\n all_states = torch.load(from_path, map_location='cpu')\n config = all_states['Settings']['Config']\n paras = all_states['Settings']['Paras']\n\n # display checkpoint settings\n if display_settings:\n for cluster in config:\n print(cluster + ':')\n for item in config[cluster]:\n print('\\t' + str(item) + ': ', config[cluster][item])\n print('paras:')\n v_paras = vars(paras)\n for item in v_paras:\n print('\\t' + str(item) + ': ', v_paras[item])\n\n # load model with Tester\n from mockingjay.solver import Tester\n mockingjay = Tester(config, paras)\n mockingjay.set_model(inference=True, with_head=False, from_path=from_path)\n return mockingjay\n\n\nif __name__ == '__main__':\n main()\n\n"
] | [
[
"torch.cuda.manual_seed_all",
"torch.load",
"torch.manual_seed",
"numpy.random.seed",
"torch.cuda.is_available"
]
] |
creaiter/Classification-PyTorch | [
"2feabf4b3d0d561420399bdf65840a58af76069d"
] | [
"models/wideresnet.py"
] | [
"import math\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.init as init\nimport torch.nn.functional as F\nfrom torch.nn.parameter import Parameter\n\n\ndef conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):\n \"\"\"3x3 convolution with padding\"\"\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=dilation, groups=groups, bias=False, dilation=dilation)\n\n\ndef conv1x1(in_planes, out_planes, stride=1):\n \"\"\"1x1 convolution\"\"\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)\n\n\ndef relu(inplace=False):\n \"\"\"ReLU activation\"\"\"\n return nn.ReLU(inplace=inplace)\n\n\ndef bn(num_features):\n \"\"\"Batch normalization 2D\"\"\"\n return nn.BatchNorm2d(num_features)\n\n\nclass BasicBlock(nn.Module):\n expansion = 1\n __constants__ = ['downsample']\n\n def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,\n base_width=64, dilation=1):\n super(BasicBlock, self).__init__()\n if groups != 1 or base_width != 64:\n raise ValueError('BasicBlock only supports groups=1 and base_width=64')\n if dilation > 1:\n raise NotImplementedError(\"Dilation > 1 not supported in BasicBlock\")\n # Both self.conv1 and self.downsample layers downsample the input when stride != 1\n self.conv1 = conv3x3(inplanes, planes, stride)\n self.bn1 = bn(planes)\n self.relu1 = relu(inplace=True)\n self.conv2 = conv3x3(planes, planes)\n self.bn2 = bn(planes)\n self.relu2 = relu(inplace=True)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n identity = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu1(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n\n if self.downsample is not None:\n identity = self.downsample(x)\n\n out += identity\n out = self.relu2(out)\n\n return out\n\n\nclass Bottleneck(nn.Module):\n expansion = 4\n __constants__ = ['downsample']\n\n def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,\n base_width=64, dilation=1):\n super(Bottleneck, self).__init__()\n width = int(planes * (base_width / 64.)) * groups\n # Both self.conv2 and self.downsample layers downsample the input when stride != 1\n self.conv1 = conv1x1(inplanes, width)\n self.bn1 = bn(width)\n self.relu1 = relu(inplace=True)\n self.conv2 = conv3x3(width, width, stride, groups, dilation)\n self.bn2 = bn(width)\n self.relu2 = relu(inplace=True)\n self.conv3 = conv1x1(width, planes * self.expansion)\n self.bn3 = bn(planes * self.expansion)\n self.relu3 = relu(inplace=True)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n identity = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu1(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu2(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n if self.downsample is not None:\n identity = self.downsample(x)\n\n out += identity\n out = self.relu3(out)\n\n return out\n\n\nclass WideResNet_Cifar(nn.Module):\n def __init__(self, block, layers, width_mult=1, num_classes=10, zero_init_residual=False,\n groups=1, width_per_group=64, replace_stride_with_dilation=None):\n super(WideResNet_Cifar, self).__init__()\n self.block_name = str(block.__name__)\n \n self.inplanes = 16\n self.dilation = 1\n if replace_stride_with_dilation is None:\n # each element in the tuple indicates if we should replace\n # the 2x2 stride with a dilated convolution instead\n replace_stride_with_dilation = [False, False, False]\n if len(replace_stride_with_dilation) != 3: \n raise ValueError(\"replace_stride_with_dilation should be None \"\n \"or a 3-element tuple, got {}\".format(replace_stride_with_dilation))\n self.groups = groups\n self.base_width = width_per_group\n \n self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=3, stride=1, padding=1,\n bias=False)\n self.bn1 = bn(self.inplanes)\n self.relu1 = relu(inplace=False)\n\n self.layer1 = self._make_layer(block, 16 * width_mult, layers[0])\n self.layer2 = self._make_layer(block, 32 * width_mult, layers[1], stride=2,\n dilate=replace_stride_with_dilation[0])\n self.layer3 = self._make_layer(block, 64 * width_mult, layers[2], stride=2,\n dilate=replace_stride_with_dilation[1])\n self.avgpool = nn.AdaptiveAvgPool2d((1, 1))\n self.fc = nn.Linear(64 * block.expansion * width_mult, num_classes)\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n #nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n\n # Zero-initialize the last BN in each residual branch,\n # so that the residual branch starts with zeros, and each residual block behaves like an identity.\n # This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677\n if zero_init_residual:\n for m in self.modules():\n if isinstance(m, Bottleneck):\n nn.init.constant_(m.bn3.weight, 0)\n elif isinstance(m, BasicBlock):\n nn.init.constant_(m.bn2.weight, 0)\n\n def _make_layer(self, block, planes, blocks, stride=1, dilate=False):\n downsample = None\n previous_dilation = self.dilation\n if dilate:\n self.dilation *= stride\n stride = 1\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n conv1x1(self.inplanes, planes * block.expansion, stride),\n bn(planes * block.expansion),\n )\n\n layers = []\n layers.append(block(self.inplanes, planes, stride, downsample, self.groups,\n self.base_width, previous_dilation))\n self.inplanes = planes * block.expansion\n for _ in range(1, blocks):\n layers.append(block(self.inplanes, planes, groups=self.groups,\n base_width=self.base_width, dilation=self.dilation))\n\n return nn.Sequential(*layers)\n\n def _forward_impl(self, x):\n # See note [TorchScript super()]\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu1(x)\n\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n\n x = self.avgpool(x)\n x = torch.flatten(x, 1)\n x = self.fc(x)\n return x\n\n def forward(self, x):\n return self._forward_impl(x)\n\n\n# Model configurations\n'''\nmodel_cfgs = {\n 18: (BasicBlock, [2, 2, 2, 2]),\n 34: (BasicBlock, [3, 4, 6, 3]),\n 50: (Bottleneck, [3, 4, 6, 3]),\n 101: (Bottleneck, [3, 4, 23, 3]),\n 152: (Bottleneck, [3, 8, 36, 3]),\n}\n'''\nmodel_cfgs_cifar = {\n 16: (BasicBlock, [2, 2, 2]),\n 22: (BasicBlock, [3, 3, 3]),\n 28: (BasicBlock, [4, 4, 4]),\n 40: (BasicBlock, [6, 6, 6]),\n 52: (BasicBlock, [8, 8, 8]),\n}\n\n\ndef set_model(cfg):\n r\"\"\"\n Args:\n cfg: configuration\n \"\"\"\n # set model configurations\n if data in ['cifar10', 'cifar100']:\n assert (cfg.layers - 4) % 6 == 0, \"The number of layers should be 16, 22, 28, 40, 52, etc.\"\n assert cfg.width_mult == int(cfg.width_mult), \"The width multiplier should be an integer value.\"\n n = int((cfg.layers - 4) / 6)\n layers = [n, n, n]\n image_size = 32\n num_classes = int(cfg.dataset[5:])\n model = WideResNet_Cifar(BasicBlock, layers, cfg.width_mult, num_classes)\n \n elif data == 'imagenet':\n model = None\n image_size = None\n raise Exception('Undefined dataset for WideResNet architecture.')\n\n else:\n raise Exception('Undefined dataset for WideResNet architecture.')\n \n\n return model, image_size"
] | [
[
"torch.nn.BatchNorm2d",
"torch.nn.Linear",
"torch.nn.init.constant_",
"torch.nn.AdaptiveAvgPool2d",
"torch.flatten",
"torch.nn.Conv2d",
"torch.nn.Sequential",
"torch.nn.ReLU"
]
] |
myutman/contracode | [
"f2a589e1efd2788874fd0468d1ecc30d6a14c396"
] | [
"scripts/hf_per_train_shard_tokenize.py"
] | [
"import sys\nimport numpy as np\nimport pandas as pd\nimport multiprocessing as mp\nfrom transformers import BertTokenizerFast\nfrom tqdm import tqdm\n\nif __name__ == \"__main__\":\n assert len(sys.argv) == 2\n data_shard_idx = int(sys.argv[1])\n data_shard_path = f\"/data/ajay/contracode/data/hf_data/train_chunks/augmented_pretrain_df.{data_shard_idx:04d}.train.pickle.gz\"\n data_shard_path_out = (\n f\"/data/ajay/contracode/data/hf_data/train_chunks_tokenized/augmented_pretrain_tokenized_df.{data_shard_idx:04d}.train.pickle.gz\"\n )\n\n def load_tokenizer(path=\"data/vocab/8k_bpe/8k_bpe-vocab.txt\"):\n return BertTokenizerFast(path, clean_text=True, lowercase=False, strip_accents=True, unk_token=\"<unk>\")\n\n def load_data(path):\n return pd.read_pickle(path)\n\n tokenizer = load_tokenizer()\n df_shard = load_data(data_shard_path)\n tqdm.pandas()\n df_shard[\"toks\"] = df_shard[\"text\"].progress_apply(lambda x: np.asarray(tokenizer.encode(x)))\n df_shard = df_shard[[\"data_idx\", \"toks\"]]\n\n df_shard.to_pickle(data_shard_path_out)\n"
] | [
[
"pandas.read_pickle"
]
] |
penguinflys/imgviz | [
"3deadced1fcce8ca51716c705d07a058b1839514"
] | [
"examples/resize.py"
] | [
"#!/usr/bin/env python\n\nimport matplotlib.pyplot as plt\n\nimport imgviz\n\n\ndef resize():\n data = imgviz.data.arc2017()\n\n rgb = data[\"rgb\"]\n\n H, W = rgb.shape[:2]\n rgb_resized = imgviz.resize(rgb, height=0.1)\n\n # -------------------------------------------------------------------------\n\n plt.figure(dpi=200)\n\n plt.subplot(121)\n plt.title(\"rgb:\\n{}\".format(rgb.shape))\n plt.imshow(rgb)\n plt.axis(\"off\")\n\n plt.subplot(122)\n plt.title(\"rgb_resized:\\n{}\".format(rgb_resized.shape))\n plt.imshow(rgb_resized)\n plt.axis(\"off\")\n\n img = imgviz.io.pyplot_to_numpy()\n plt.close()\n\n return img\n\n\nif __name__ == \"__main__\":\n from base import run_example\n\n run_example(resize)\n"
] | [
[
"matplotlib.pyplot.figure",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.close"
]
] |
Nathaniel-Rodriguez/neuralnetsim | [
"c353af92fb3f44539370220963b07bdfd9822149"
] | [
"src/neuralnetsim/simulation.py"
] | [
"__all__ = [\"simulate_model\",\n \"simulate_grid\",\n \"simulate_orig\"]\n\n\nimport neuralnetsim\nimport networkx as nx\nimport numpy as np\nfrom distributed import Client\nfrom pathlib import Path\nfrom typing import Type\nfrom typing import Dict\nfrom typing import Any\nfrom typing import List\nfrom typing import Union\n\n\ndef simulation_worker(\n graph: nx.DiGraph,\n rng: np.random.RandomState,\n x0: np.ndarray,\n parameter_path: Path,\n circuit_type: Union[Type[neuralnetsim.DistributionCircuit],\n Type[neuralnetsim.NeuralCircuit]],\n duration: float,\n kernel_parameters: Dict\n) -> Dict[int, np.ndarray]:\n \"\"\"\n\n\n :param x0:\n :param parameter_path:\n :param circuit_type:\n :param graph:\n :param rng:\n :param duration:\n :param kernel_parameters:\n :return:\n \"\"\"\n circuit_parameters = neuralnetsim.load(parameter_path)\n circuit_parameters.network = graph\n circuit_parameters.from_optimizer(x0)\n with neuralnetsim.CircuitManager(circuit_type, kernel_parameters,\n circuit_parameters, rng) as circuit:\n circuit.run(duration)\n return circuit.get_spike_trains()\n\n\ndef simulate_model(\n x0,\n parameter_path: Path,\n fitted_graph_path: Path,\n name: str,\n client: Client,\n duration: float,\n seed: int,\n circuit_type: Type,\n save_path: Path,\n kernel_parameters: Dict[str, Any] = None,\n):\n \"\"\"\n Data in list is matched to the order of the graphs in the fitted graph\n result file.\n\n :param x0:\n :param parameter_path:\n :param fitted_graph_path:\n :param name:\n :param client:\n :param duration:\n :param seed:\n :param circuit_type:\n :param save_path:\n :param kernel_parameters:\n :return:\n \"\"\"\n if kernel_parameters is None:\n kernel_parameters = {}\n fitted_graph_results = neuralnetsim.load(fitted_graph_path)\n rng = np.random.RandomState(seed)\n sims = client.map(\n simulation_worker,\n [graph for graph in fitted_graph_results['graphs']],\n [np.random.RandomState(rng.randint(1, 2**31))\n for _ in range(len(fitted_graph_results['graphs']))],\n pure=False,\n x0=x0,\n parameter_path=parameter_path,\n circuit_type=circuit_type,\n duration=duration,\n kernel_parameters=kernel_parameters\n )\n data = client.gather(sims)\n neuralnetsim.save(\n {\n 'spike_data': data,\n 'seed': seed,\n 'name': name,\n 'duration': duration,\n 'kernel_parameters': kernel_parameters\n },\n save_path\n )\n\n\ndef grid_worker(\n graph: nx.DiGraph,\n rng: np.random.RandomState,\n par: float,\n x0: np.ndarray,\n par_key: str,\n parameter_path: Path,\n circuit_type: Union[Type[neuralnetsim.DistributionCircuit],\n Type[neuralnetsim.NeuralCircuit]],\n duration: float,\n kernel_parameters: Dict\n) -> Dict[int, np.ndarray]:\n \"\"\"\n\n :param graph:\n :param rng:\n :param par:\n :param x0:\n :param par_key:\n :param parameter_path:\n :param circuit_type:\n :param duration:\n :param kernel_parameters:\n :return:\n \"\"\"\n kernel_parameters.update({'grng_seed': rng.randint(1, 2e5),\n **kernel_parameters})\n circuit_parameters = neuralnetsim.load(parameter_path)\n circuit_parameters.network = graph\n circuit_parameters.from_optimizer(x0)\n circuit_parameters.set_par(par_key, par)\n with neuralnetsim.CircuitManager(circuit_type, kernel_parameters,\n circuit_parameters, rng) as circuit:\n circuit.run(duration)\n # if not circuit.run(duration,\n # memory_guard={\n # 'duration': 1000.0,\n # 'max_spikes': 8000 # ~10 spikes/ms\n # }):\n # return {node: np.ndarray([])\n # for node in circuit_parameters.network.nodes()}\n return circuit.get_spike_trains()\n\n\ndef simulate_grid(\n x0,\n par_range: Union[List[float], np.ndarray],\n par_key: str,\n parameter_path: Path,\n fitted_graph_path: Path,\n name: str,\n client: Client,\n duration: float,\n seed: int,\n circuit_type: Type,\n save_path: Path,\n kernel_parameters: Dict[str, Any] = None,\n):\n \"\"\"\n\n :param x0:\n :param par_range:\n :param par_key:\n :param parameter_path:\n :param fitted_graph_path:\n :param name:\n :param client:\n :param duration:\n :param seed:\n :param circuit_type:\n :param save_path:\n :param kernel_parameters:\n :return:\n \"\"\"\n if kernel_parameters is None:\n kernel_parameters = {}\n fitted_graph_results = neuralnetsim.load(fitted_graph_path)\n rng = np.random.RandomState(seed)\n num_graphs = range(len(fitted_graph_results['graphs']))\n sims = client.map(\n grid_worker,\n [graph\n for _ in par_range\n for graph in fitted_graph_results['graphs']],\n [np.random.RandomState(rng.randint(1, 2**31))\n for _ in par_range\n for _ in num_graphs],\n [par for par in par_range\n for _ in num_graphs],\n pure=False,\n x0=x0,\n par_key=par_key,\n parameter_path=parameter_path,\n circuit_type=circuit_type,\n duration=duration,\n kernel_parameters=kernel_parameters\n )\n data = client.gather(sims)\n neuralnetsim.save(\n {\n 'spike_data': data,\n 'original_graph': fitted_graph_results['original'],\n 'graphs': [graph for _ in par_range\n for graph in fitted_graph_results['graphs']],\n 'target_modularities':\n [mu for _ in par_range\n for mu in fitted_graph_results['target_modularities']],\n 'grid_par': [par for par in par_range for _ in num_graphs],\n 'par_key': par_key,\n 'seed': seed,\n 'name': name,\n 'duration': duration,\n 'kernel_parameters': kernel_parameters\n },\n save_path\n )\n\n\ndef orig_worker(\n rng: np.random.RandomState,\n par: float,\n graph: nx.DiGraph,\n x0: np.ndarray,\n par_key: str,\n parameter_path: Path,\n circuit_type: Union[Type[neuralnetsim.DistributionCircuit],\n Type[neuralnetsim.NeuralCircuit]],\n duration: float,\n kernel_parameters: Dict\n):\n kernel_parameters.update({'grng_seed': rng.randint(1, 2e5),\n **kernel_parameters})\n circuit_parameters = neuralnetsim.load(parameter_path)\n circuit_parameters.network = graph\n circuit_parameters.from_optimizer(x0)\n circuit_parameters.set_par(par_key, par)\n with neuralnetsim.CircuitManager(circuit_type, kernel_parameters,\n circuit_parameters, rng) as circuit:\n if not circuit.run(duration,\n memory_guard={\n 'duration': 1000.0,\n 'max_spikes': 8000 # ~10 spikes/ms\n }):\n return {node: np.ndarray([])\n for node in circuit_parameters.network.nodes()}\n return circuit.get_spike_trains()\n\n\ndef simulate_orig(\n x0,\n par_range: Union[List[float], np.ndarray],\n par_key: str,\n parameter_path: Path,\n orig_graph_path: Path,\n n_trials: int,\n client: Client,\n duration: float,\n seed: int,\n circuit_type: Type,\n save_path: Path,\n kernel_parameters: Dict[str, Any] = None,\n):\n \"\"\"\n\n :param x0:\n :param par_range:\n :param par_key:\n :param parameter_path:\n :param fitted_graph_path:\n :param name:\n :param client:\n :param duration:\n :param seed:\n :param circuit_type:\n :param save_path:\n :param kernel_parameters:\n :return:\n \"\"\"\n if kernel_parameters is None:\n kernel_parameters = {}\n graph = neuralnetsim.load(orig_graph_path)\n rng = np.random.RandomState(seed)\n sims = client.map(\n orig_worker,\n [np.random.RandomState(rng.randint(1, 2**31))\n for _ in par_range\n for _ in range(n_trials)],\n [par for par in par_range\n for _ in range(n_trials)],\n pure=False,\n x0=x0,\n graph=graph,\n par_key=par_key,\n parameter_path=parameter_path,\n circuit_type=circuit_type,\n duration=duration,\n kernel_parameters=kernel_parameters\n )\n data = client.gather(sims)\n neuralnetsim.save(\n {\n 'spike_data': data,\n 'control_var': [par for par in par_range for _ in range(n_trials)],\n 'control_key': par_key,\n 'seed': seed,\n 'duration': duration\n },\n save_path\n )\n"
] | [
[
"numpy.random.RandomState",
"numpy.ndarray"
]
] |
geneti/courseworkproj | [
"5843cc14c2ce01172420befca5d2683f1123096a"
] | [
"pattern_recognition/code/DataSplit.py"
] | [
"import pandas as pd\nimport numpy as np\nimport math\nimport matplotlib.pyplot as plt\nimport copy\n\nraw_data = pd.read_csv('./raw_data.csv', header = 0, index_col = 0)\nsample_num = raw_data.shape[0]\n\n# sort features by nominal or non-nominal \ndtypes = {}\nfor j in range(raw_data.shape[1]):\n if isinstance(raw_data.iloc[0,j], str) or pd.isna(raw_data.iloc[0,j]):\n dtypes[raw_data.columns[j]] = str\n else:\n dtypes[raw_data.columns[j]] = np.float64\n\ndata = pd.read_csv('./raw_data.csv',sep = ',', header = 0, index_col = 0, dtype = dtypes)\n\n# separate the housing prices into several zones\ndata['PriceLevel'] = 'level'\n\nfor i in range(sample_num):\n if data.iloc[i,79] <= 135000:\n data.iloc[i,80] = 'level_1'\n elif data.iloc[i,79] <= 165000:\n data.iloc[i,80] = 'level_2'\n elif data.iloc[i,79] <= 200000:\n data.iloc[i,80] = 'level_3'\n else:\n data.iloc[i,80] = 'level_4'\ndata = data.drop(columns = 'SalePrice')\n\n#shuffle the data\ndata = data.sample(frac=1).reset_index(drop=True)\nprint('data: ',data)\n\n\ntmp = sample_num*9/10\nprint(data.shape)\ntrain = data.iloc[0:int(tmp),:]\ntest = data.iloc[int(tmp)+1:sample_num,:]\n\ntrain.to_csv('./train.csv')\ntest.to_csv('./test.csv')"
] | [
[
"pandas.read_csv",
"pandas.isna"
]
] |
GyroscopeHQ/keras-rl | [
"35f9b50c3b35f52722d740e8ee42e33c1750e44a"
] | [
"rl/policy.py"
] | [
"from __future__ import division\nimport numpy as np\n\nfrom rl.util import *\n\n\nclass Policy(object):\n def _set_agent(self, agent):\n self.agent = agent\n\n @property\n def metrics_names(self):\n return []\n\n @property\n def metrics(self):\n return []\n\n def select_action(self, **kwargs):\n raise NotImplementedError()\n\n def get_config(self):\n return {}\n\n\nclass LinearAnnealedPolicy(Policy):\n def __init__(self, inner_policy, attr, value_max, value_min, value_test, nb_steps):\n if not hasattr(inner_policy, attr):\n raise ValueError('Policy \"{}\" does not have attribute \"{}\".'.format(attr))\n\n super(LinearAnnealedPolicy, self).__init__()\n\n self.inner_policy = inner_policy\n self.attr = attr\n self.value_max = value_max\n self.value_min = value_min\n self.value_test = value_test\n self.nb_steps = nb_steps\n\n def get_current_value(self):\n if self.agent.training:\n # Linear annealed: f(x) = ax + b.\n a = -float(self.value_max - self.value_min) / float(self.nb_steps)\n b = float(self.value_max)\n value = max(self.value_min, a * float(self.agent.step) + b)\n else:\n value = self.value_test\n return value\n\n def select_action(self, **kwargs):\n setattr(self.inner_policy, self.attr, self.get_current_value())\n return self.inner_policy.select_action(**kwargs)\n\n @property\n def metrics_names(self):\n return ['mean_{}'.format(self.attr)]\n\n @property\n def metrics(self):\n return [getattr(self.inner_policy, self.attr)]\n\n def get_config(self):\n config = super(LinearAnnealedPolicy, self).get_config()\n config['attr'] = self.attr\n config['value_max'] = self.value_max\n config['value_min'] = self.value_min\n config['value_test'] = self.value_test\n config['nb_steps'] = self.nb_steps\n config['inner_policy'] = get_object_config(self.inner_policy)\n return config\n\n\nclass EpsGreedyQPolicy(Policy):\n def __init__(self, eps=.1):\n super(EpsGreedyQPolicy, self).__init__()\n self.eps = eps\n\n def select_action(self, q_values):\n assert q_values.ndim == 1\n nb_actions = q_values.shape[0]\n\n if np.random.uniform() < self.eps:\n action = np.random.random_integers(0, nb_actions-1)\n else:\n action = np.argmax(q_values)\n return action\n\n def get_config(self):\n config = super(EpsGreedyQPolicy, self).get_config()\n config['eps'] = self.eps\n return config\n\n\nclass GreedyQPolicy(Policy):\n def select_action(self, q_values):\n assert q_values.ndim == 1\n action = np.argmax(q_values)\n return action\n\n\nclass BoltzmannQPolicy(Policy):\n def __init__(self, tau=1., clip=(-500., 500.)):\n super(BoltzmannQPolicy, self).__init__()\n self.tau = tau\n self.clip = clip\n\n def select_action(self, q_values):\n assert q_values.ndim == 1\n q_values = q_values.astype('float64')\n nb_actions = q_values.shape[0]\n\n exp_values = np.exp(np.clip(q_values / self.tau, self.clip[0], self.clip[1]))\n probs = exp_values / np.sum(exp_values)\n action = np.random.choice(range(nb_actions), p=probs)\n return action\n\n def get_config(self):\n config = super(BoltzmannQPolicy, self).get_config()\n config['tau'] = self.tau\n config['clip'] = self.clip\n return config\n"
] | [
[
"numpy.random.uniform",
"numpy.sum",
"numpy.argmax",
"numpy.clip",
"numpy.random.random_integers"
]
] |
efthymis-mcl/algomorphism | [
"69a41e98e10458ac333da1350fc39da8a00b80d3"
] | [
"algomorphism/datasets/graph_base.py"
] | [
"from typing import List\nimport numpy as np\n\n\nclass GraphBaseDataset(object):\n def __int__(self):\n pass\n\n @staticmethod\n def numpy_to_mega_batch(x_list, a_list):\n \"\"\"\n List of numpy arrays to mega batch array.\n\n Args:\n x_list (`list[np.ndarray]`): feature matrixes.\n a_list (`list[np.ndarray]`): adjency matrixes.\n Returns:\n `tuple[np.ndarray, np.ndarray]`: batched x, a lists\n Examples:\n\n >>> graph_base = GraphBaseDataset()\n >>> x_list = [np.random.rand(6,4) for _ in range(6)]+[np.random.rand(3,4) for _ in range(6)]\n >>> a_list = [np.random.rand(6,6) for _ in range(6)]+[np.random.rand(3,3) for _ in range(6)]\n >>> x, a = graph_base.numpy_to_mega_batch(x,a)\n >>> print(a.shape)\n (12, 6, 6)\n >>> print(x.shape)\n (12, 6, 4)\n \"\"\"\n\n def a_post_concat(a):\n a_con = np.concatenate([a, np.zeros((a.shape[0], max_d - a.shape[1]))], axis=1)\n a_con = np.concatenate([a_con, np.zeros((max_d - a_con.shape[0], a_con.shape[1]))], axis=0)\n return a_con\n\n def x_post_concat(x):\n x_con = np.concatenate([x, np.zeros((max_d - x.shape[0], x.shape[1]))], axis=0)\n return x_con\n\n max_d = max([a.shape[0] for a in a_list])\n mega_batch_a = []\n mega_batch_x = []\n for (x, a) in zip(x_list, a_list):\n if a.shape[0] < max_d:\n a = a_post_concat(a)\n x = x_post_concat(x)\n mega_batch_a.append(a)\n mega_batch_x.append(x)\n mega_batch_a = np.array(mega_batch_a)\n mega_batch_x = np.stack(mega_batch_x, axis=0)\n\n return mega_batch_x, mega_batch_a\n\n @staticmethod\n def numpy_to_disjoint(x_list, a_list):\n \"\"\"\n Args:\n x_list (`List[np.ndarray]`): feature matrixes,\n a_list (`List[np.ndarray]`): adajence matrixes.\n\n Returns:\n `tuple[np.ndarray, np.ndarray]`: disjoint matrixes of x_list, a_list.\n\n Examples:\n >>> x_list = [np.random.rand(6,4) for _ in range(6)]+[np.random.rand(3,4) for _ in range(6)]\n >>> a_list = [np.random.rand(6,6) for _ in range(6)]+[np.random.rand(3,3) for _ in range(6)]\n >>> gbd = GraphBaseDataset()\n >>> x, a = gbd.numpy_to_disjoint(x_list,a_list)\n >>> print(a.shape)\n (54, 54)\n >>> print(x.shape)\n (54, 48)\n \"\"\"\n def zero_padding_concat(x, x_disjoint, nx, ndx):\n x_disjoint = np.concatenate([x_disjoint, np.zeros((x_disjoint.shape[0], nx))], axis=1)\n x = np.concatenate([np.zeros((x.shape[0], ndx)), x], axis=1)\n x_disjoint = np.concatenate([x_disjoint, x], axis=0)\n return x_disjoint\n\n a_disjoint = a_list[0]\n x_disjoint = x_list[0]\n for a, x in zip(a_list[1:], x_list[1:]):\n na = a.shape[1]\n nda = a_disjoint.shape[1]\n nx = x.shape[1]\n ndx = x_disjoint.shape[1]\n a_disjoint = zero_padding_concat(a, a_disjoint, na, nda)\n x_disjoint = zero_padding_concat(x, x_disjoint, nx, ndx)\n\n return x_disjoint, a_disjoint\n\n @staticmethod\n def renormalization(a):\n \"\"\"\n Give an adjacency matrix and returns the renormalized.\n\n Args:\n a: A ndarray, adjacency matrix.\n\n Returns:\n atld: A ndarray, renormalized adjacency matrix.\n\n Examples:\n >>> grapbase = GraphBaseDataset()\n >>> a = np.array([[[0,1,1], [1,0,0], [1,0,0]]])\n >>> atld = grapbase.renormalization(a)\n >>> print(atld)\n [[[0.33333333 0.40824829 0.40824829]\n [0.40824829 0.5 0. ]\n [0.40824829 0. 0.5 ]]]\n\n References:\n Thomas N. Kipf, Max Welling. Semi-supervised classification with graph convolutional networks,\n https://arxiv.org/pdf/1609.02907.pdf\n \"\"\"\n\n ai = a + np.eye(a.shape[-1])\n degree = np.sum(ai, axis=-1)\n degree = np.eye(a.shape[-1]) * degree\n degree_inv = np.linalg.inv(degree)\n degree_inv = np.power(degree_inv, 0.5)\n\n atld = np.matmul(degree_inv, ai)\n atld = np.matmul(atld, degree_inv)\n return atld\n\n\n"
] | [
[
"numpy.sum",
"numpy.matmul",
"numpy.eye",
"numpy.zeros",
"numpy.linalg.inv",
"numpy.stack",
"numpy.power",
"numpy.array",
"numpy.concatenate"
]
] |
goncalovalverde/seshat | [
"deff5cdd985f81ac2b4ebd077eea11f7c4f4118f"
] | [
"reader/gitlab.py"
] | [
"import gitlab\nimport dateutil.parser\nimport reader.cache\nimport hashlib\nimport logging\nfrom pandas import DataFrame, NaT\nfrom datetime import datetime\n\n\nclass Gitlab:\n def __init__(self, gitlab_config: dict, workflow: dict):\n self.gitlab_config = gitlab_config\n self.workflow = workflow\n\n def cache_name(self):\n token = self.gitlab_config[\"token\"]\n workflow = str(self.workflow)\n url = self.gitlab_config[\"url\"]\n project_id = (\n self.gitlab_config.get(\"project_id\")\n if self.gitlab_config.get(\"project_id\")\n else self.gitlab_config.get(\"group_id\")\n )\n name_hashed = hashlib.md5(\n (token + url + workflow + str(project_id)).encode(\"utf-8\")\n )\n return name_hashed.hexdigest()\n\n self.cache = reader.cache.Cache(cache_name(self))\n\n def get_gitlab_instance(self):\n gl = gitlab.Gitlab(\n self.gitlab_config[\"url\"], private_token=self.gitlab_config[\"token\"]\n )\n gl.auth()\n\n return gl\n\n def get_issue_data(self, issue):\n issue_data = {\n \"Key\": issue.id,\n \"Type\": \"issue\",\n \"Creator\": issue.author[\"name\"],\n \"Created\": dateutil.parser.parse(issue.created_at).replace(tzinfo=None),\n \"Done\": (\n dateutil.parser.parse(issue.created_at).replace(tzinfo=None)\n if issue.created_at\n else NaT\n ),\n }\n return issue_data\n\n def get_issues(self):\n gl = self.get_gitlab_instance()\n\n if self.gitlab_config.get(\"project_id\"):\n project = gl.projects.get(self.gitlab_config[\"project_id\"])\n issues = project.issues.list()\n\n elif self.gitlab_config.get(\"group_id\"):\n group = gl.groups.get(self.gitlab_config[\"group_id\"])\n issues = group.issues.list()\n\n else:\n raise Exception(\"No valid project_id or group_id found!\")\n\n return issues\n\n def get_data(self) -> DataFrame:\n\n if self.gitlab_config[\"cache\"] and self.cache.is_valid():\n logging.debug(\"Getting gitlab data from cache\")\n df_issue_data = self.cache.read()\n return df_issue_data\n\n issues = self.get_issues()\n\n # issue_data = {\"Key\": [], \"Type\": [], \"Creator\": [], \"Created\": [], \"Done\": []}\n issues_data = [self.get_issue_data(issue) for issue in issues]\n\n df_issues_data = DataFrame(issues_data)\n\n if self.gitlab_config[\"cache\"]:\n logging.debug(\"Storing gitlab issue data in cache\")\n self.cache.write(df_issues_data)\n\n return df_issues_data\n\n def refresh_data(self, date: datetime) -> DataFrame:\n if self.gitlab_config[\"cache\"] and self.cache.is_valid():\n self.cache.clean()\n\n return self.get_data()\n"
] | [
[
"pandas.DataFrame"
]
] |
ahkarimi/MMTOD | [
"d8160f643a0ee1943630b45fa094617dd2237c7e"
] | [
"main.py"
] | [
"from flask import Flask, request, jsonify, render_template, session\nimport os\nimport pickle\nimport datetime\nimport time\nimport pandas as pd\nimport numpy as np\nimport random\nimport logging\n\n##__________________________________ GPT-3 code __________________________________________##\nfrom colorama import Fore, Back, Style\nimport torch\nfrom transformers import GPT2Tokenizer, GPT2LMHeadModel, OpenAIGPTLMHeadModel, OpenAIGPTTokenizer\nimport sys, os\nimport pprint\nimport numpy as np\nimport torch\nfrom image_handler import Handler\n\nimg_handler_obj = Handler()\n# args = ArgsParser().parse()\n\n# device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n# n_gpu = 0 if args.no_cuda else torch.cuda.device_count()\n\npp = pprint.PrettyPrinter(indent=4)\nprev_beliefs = {}\ndomain_queue = []\n\n# sys.stdout.flush()\n\nmodel_checkpoint = \"./output/checkpoint-108420\"\n\ndecoding = \"DECODING METHOD HERE\"\n\n## if decoding == 'nucleus':\n## TOP_P = float(sys.argv[3])\n\ndelay = 0.5\n\n## multiwoz_db = MultiWozDB()\n\nprint('\\nLoading Model', end=\"\")\n\nif 'openai' in model_checkpoint:\n tokenizer = OpenAIGPTTokenizer.from_pretrained(model_checkpoint)\n model = OpenAIGPTLMHeadModel.from_pretrained(model_checkpoint)\nelse:\n tokenizer = GPT2Tokenizer.from_pretrained(model_checkpoint)\n model = GPT2LMHeadModel.from_pretrained(model_checkpoint)\n\n# model.load_state_dict(torch.load(model_checkpoint))\nmodel.eval()\nmodel.to('cpu')\n\nbreak_tokens = tokenizer.encode(tokenizer.eos_token) + tokenizer.encode('?') + tokenizer.encode('!')\n# break_tokens = tokenizer.encode(tokenizer.eos_token)\nMAX_LEN = model.config.n_ctx\n\nif 'openai-gpt' in model_checkpoint:\n tokenizer.add_special_tokens({'bos_token': '<|endoftext|>'})\n tokenizer.add_special_tokens({'eos_token': '<|endoftext|>'})\n\nsample = 1\n#print()\n#print('\\n What would you like to ask?')\n# history = []\ncontext = ''\ninput_text = ''\nturn = 0\n\n\n# dbmatch = 0\n\ndef get_belief_new_dbsearch(sent):\n if '<|belief|>' in sent:\n tmp = sent.strip(' ').split('<|belief|>')[-1].split('<|endofbelief|>')[0]\n # elif 'belief.' in sent:\n # tmp = sent.strip(' ').split('<belief>')[-1].split('<action>')[0]\n # elif 'belief' not in sent:\n # return []\n else:\n return []\n # else:\n # raise TypeError('unknown belief separator')\n tmp = tmp.strip(' .,')\n # assert tmp.endswith('<endofbelief>')\n tmp = tmp.replace('<|endofbelief|>', '')\n tmp = tmp.replace('<|endoftext|>', '')\n belief = tmp.split(',')\n new_belief = []\n for bs in belief:\n bs = bs.strip(' .,')\n if bs not in new_belief:\n new_belief.append(bs)\n return new_belief\n\n\ndef convert_belief(belief):\n dic = {}\n for bs in belief:\n if bs in [' ', '']:\n continue\n domain = bs.split(' ')[0]\n slot = bs.split(' ')[1]\n if slot == 'book':\n slot = ' '.join(bs.split(' ')[1:3])\n value = ' '.join(bs.split(' ')[3:])\n else:\n value = ' '.join(bs.split(' ')[2:])\n if domain not in dic:\n dic[domain] = {}\n try:\n dic[domain][slot] = value\n except:\n print(domain)\n print(slot)\n return dic\n\ndef get_turn_domain(beliefs, q):\n for k in beliefs.keys():\n if k not in q:\n q.append(k)\n turn_domain = k\n return turn_domain\n return q[-1]\n\n\n\n\n\ndef get_action_new(sent):\n if '<|action|>' not in sent:\n return []\n elif '<|belief|>' in sent:\n tmp = sent.split('<|belief|>')[-1].split('<|response|>')[0].split('<|action|>')[-1].strip()\n elif '<|action|>' in sent:\n tmp = sent.split('<|response|>')[0].split('<|action|>')[-1].strip()\n else:\n return []\n tmp = tmp.strip(' .,')\n # if not tmp.endswith('<endofaction>'):\n # ipdb.set_trace()\n tmp = tmp.replace('<|endofaction|>', '')\n tmp = tmp.replace('<|endoftext|>', '')\n action = tmp.split(',')\n new_action = []\n for act in action:\n if act == '':\n continue\n act = act.strip(' .,')\n if act not in new_action:\n new_action.append(act)\n return new_action\n\n\n\ndef get_response_new(sent, venuename):\n if '<|response|>' in sent:\n tmp = sent.split('<|belief|>')[-1].split('<|action|>')[-1].split('<|response|>')[-1]\n else:\n return ''\n # if '<belief>' in sent:\n # tmp = sent.split('<belief>')[-1].split('<action>')[-1].split('<response>')[-1]\n # elif '<action>' in sent:\n # tmp = sent.split('<action>')[-1].split('<response>')[-1]\n # elif '<response>' in sent:\n # tmp = sent.split('<response>')[-1]\n # else:\n # tmp = sent\n tmp = tmp.strip(' .,')\n # assert tmp.endswith('<endofresponse>')\n tmp = tmp.replace('<|endofresponse|>', '')\n tmp = tmp.replace('<|endoftext|>', '')\n tokens = tokenizer.encode(tmp)\n new_tokens = []\n for tok in tokens:\n # if tok in break_tokens:\n if tok in tokenizer.encode(tokenizer.eos_token):\n continue\n new_tokens.append(tok)\n # ipdb.set_trace()\n response = tokenizer.decode(new_tokens).strip(' ,.')\n response = response.replace('[venuename]', '{}'.format(venuename))\n return response\n\n\ndef get_venuename(bs):\n name = ''\n if 'venuename' in bs[0]:\n tmp_list = bs[0].split('venuename')[-1].split(' ')\n #action = tmp_list[-1]\n name = ' '. join(tmp_list[:-1])\n return name\n\n\ndef get_open_span(bs):\n action_names = []\n for tmp in bs[0].split(';'):\n if 'open span' in tmp:\n action = tmp.split('open span')[-1].split(' ')[-1]\n name = tmp.split('open span')[-1].split(action)[0]\n action_names.append((name, action))\n return action_names\n\n\n##____________________________ End of GPT-3 code __________________________________________##\n\n\nlogging.basicConfig(level=logging.DEBUG)\n\napp = Flask(__name__)\napp.secret_key = 'MY_SECRET_KEY'\n\n\ndef label_Message(message):\n logging.warning('In label_Message')\n # load the model from disk\n model_filename = 'model/model.pkl'\n tfidf_filename = 'model/tfidf.pkl'\n \n model = pickle.load(open(model_filename, 'rb'))\n tfidf = pickle.load(open(tfidf_filename, 'rb'))\n \n pred = model.predict(tfidf.transform([message]))\n message_label = pred[0]\n \n \n logging.warning('Out label_Message')\n return message_label\n\ndef label_to_persian(label):\n res = ''\n if label == 'HAPPY':\n res = 'خوشحال'\n elif label == 'SAD':\n res = 'ناراحت'\n\n return\n\ndef Create_message(message):\n global context\n global turn\n logging.warning('In create message')\n global result\n label = session['label']\n state = session['state']\n result = session['result']\n result['response'] = ''\n result['status'] = 'on'\n result['has_image'] = 'False'\n\n raw_text = message\n input_text = raw_text.replace('you> ', '')\n if input_text in ['q', 'quit']:\n return \"Ok, bye. Just for now!\"\n\n user = '<|user|> {}'.format(input_text)\n context = context + ' ' + user\n text = '<|endoftext|> <|context|> {} <|endofcontext|>'.format(context)\n\n # print(context)\n\n text = text.strip()\n indexed_tokens = tokenizer.encode(text)\n\n if len(indexed_tokens) > MAX_LEN:\n indexed_tokens = indexed_tokens[-1 * MAX_LEN:]\n\n # Convert indexed tokens in a PyTorch tensor\n tokens_tensor = torch.tensor([indexed_tokens])\n\n # If you have a GPU, put everything on cuda\n tokens_tensor = tokens_tensor.to('cpu')\n predicted_index = indexed_tokens[-1]\n\n with torch.no_grad():\n # Greedy decoding\n\n while predicted_index not in break_tokens:\n outputs = model(tokens_tensor)\n predictions = outputs[0]\n predicted_index = torch.argmax(predictions[0, -1, :]).item()\n indexed_tokens += [predicted_index]\n tokens_tensor = torch.tensor([indexed_tokens]).to('cpu')\n if len(indexed_tokens) > MAX_LEN:\n break\n if tokenizer.decode(indexed_tokens).endswith('<|endofbelief|>'):\n break\n\n tmp_pred = tokenizer.decode(indexed_tokens)\n\n print('\\ntmp_pred:\\n', tmp_pred)\n\n belief_text = get_belief_new_dbsearch(tmp_pred)\n print('\\nbelief_text:\\n', belief_text)\n\n beliefs = convert_belief(belief_text)\n # domain = list(beliefs.keys())[0]\n domain = get_turn_domain(beliefs, domain_queue)\n\n # Convert indexed tokens in a PyTorch tensor\n tokens_tensor = torch.tensor([indexed_tokens])\n\n # If you have a GPU, put everything on cuda\n tokens_tensor = tokens_tensor.to('cpu')\n predicted_index = indexed_tokens[-1]\n\n truncate_action = False\n # Predict all tokens\n with torch.no_grad():\n while predicted_index not in break_tokens:\n outputs = model(tokens_tensor)\n predictions = outputs[0]\n predicted_index = torch.argmax(predictions[0, -1, :]).item()\n indexed_tokens += [predicted_index]\n if len(indexed_tokens) > MAX_LEN:\n break\n\n predicted_text = tokenizer.decode(indexed_tokens)\n if '<|action|>' in predicted_text:\n generated_actions = predicted_text.split('<|action|>')[-1].split('<|endofaction|>')[0].split(',')\n new_actions = []\n for a in generated_actions:\n if a in ['', ' ']:\n continue\n new_actions.append(a.strip())\n len_actions = len(new_actions)\n if len(list(set(new_actions))) > len(new_actions) or (len_actions > 10 and not truncate_action):\n # ipdb.set_trace()\n actions = '<|action|> {} <|endofaction|>'.format(' , '.join(list(set(new_actions))))\n indexed_tokens = tokenizer.encode('{} {}'.format(predicted_text.split('<|action|>')[0], actions))\n # print('action truncated')\n truncate_action = True\n tokens_tensor = torch.tensor([indexed_tokens]).to('cpu')\n\n predicted_text = tokenizer.decode(indexed_tokens)\n print('\\npredicted_text:\\n', predicted_text)\n\n action_text = get_action_new(predicted_text)\n print('\\naction_text:\\n', action_text)\n\n venuename = get_venuename(action_text)\n #print('\\nVenuename:\\n', venuename)\n\n response_text = get_response_new(predicted_text, venuename)\n print('\\nresponse_text:\\n', response_text)\n #print(predicted_text)\n\n\n\n open_spans = get_open_span(action_text)\n print('\\open_spans:\\n', open_spans)\n\n # handling images\n\n if venuename:\n result['has_image'] = 'True'\n images = img_handler_obj.get_imgs_url(query=venuename + \"in Singapore\", num_of_img=5)\n result['image'] = images[0]\n print(images)\n\n delex_system = '{}'.format(response_text)\n context = context + ' ' + delex_system\n\n turn += 1\n prev_beliefs = beliefs\n\n result['response'] = response_text\n session['result'] = result\n return result\n\n\n \[email protected]('/')\ndef index():\n session['state'] = 'start'\n session['label'] = ''\n session['result'] = {}\n return render_template('index2.html')\n\[email protected]('/send_message', methods=['POST'])\ndef send_message():\n message = request.form['message']\n response_text = Create_message(message)\n\n \n #print('\\nRESPONSE TEXT ', response_text)\n return jsonify(response_text)\n\n\n\n\n"
] | [
[
"torch.no_grad",
"torch.tensor",
"torch.argmax"
]
] |
jssprz/attentive-visual-semantic-specialized-network-for-video-captioning | [
"00815884ba892c00db2d3778bd0083618ff6d2d7"
] | [
"test.py"
] | [
"import os\nimport argparse\nimport pickle\n\nfrom utils import decode_from_tokens\nfrom vocabulary import Vocabulary\nfrom configuration_file import ConfigurationFile\nfrom model.encoder import Encoder\nfrom model.decoder import AVSSNDecoder\n\nimport h5py\nimport torch\nimport numpy as np\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Generate captions por test samples')\n parser.add_argument('-chckpt', '--checkpoint_path', type=str, default='pretrain/chckpt.pt',\n help='Set the path to pre-trained model (default is pretrain/chckpt.pt).')\n parser.add_argument('-data', '--dataset_folder', type=str, default='data/MSVD',\n help='Set the path to dataset folder (default is data/MSVD).')\n parser.add_argument('-out', '--output_folder', type=str, default='results/MSVD',\n help='Set the path to output folder (default is results/MSVD).')\n\n args = parser.parse_args()\n\n # load vocabulary\n with open(os.path.join(args.dataset_folder, 'corpus.pkl'), \"rb\") as f:\n corpus = pickle.load(f)\n idx2word_dict = corpus[4]\n vocab = Vocabulary.from_idx2word_dict(idx2word_dict, False)\n print('Size of vocabulary: {}'.format(len(vocab)))\n\n # Pretrained Embedding\n pretrained_embedding = torch.Tensor(corpus[5])\n\n #max_frames = 20 #30\n cnn_feature_size = 2048\n c3d_feature_size = 4096\n i3d_feature_size = 400\n res_eco_features_size = 3584\n projected_size = 512\n hidden_size = 1024 # Number of hidden layer units of the cyclic network\n mid_size = 128 # The middle of the boundary detection layer represents the dimension\n\n n_tags = 300\n global_tagger_hidden_size = 1024\n specific_tagger_hidden_size = 128\n hidden_size = 1024\n embedding_size = 300 #1024\n rnn_in_size = 300 #1024\n rnn_hidden_size = 1024\n\n config = ConfigurationFile(os.path.join(args.dataset_folder, 'config.ini'), 'attn-vscn-max')\n\n # Models\n encoder = Encoder(cnn_feature_size=cnn_feature_size,\n c3d_feature_size=c3d_feature_size,\n i3d_feature_size=i3d_feature_size,\n n_tags=n_tags,\n hidden_size=hidden_size,\n global_tagger_hidden_size=global_tagger_hidden_size,\n specific_tagger_hidden_size=specific_tagger_hidden_size,\n n_layers=config.encoder_num_layers,\n input_dropout_p=config.encoder_dropout_p,\n rnn_dropout_p=config.encoder_dropout_p,\n bidirectional=config.encoder_bidirectional,\n rnn_cell=config.encoder_rnn_cell,\n device='cpu')\n\n decoder = AVSSNDecoder(in_seq_length=config.max_frames, \n out_seq_length=config.max_words,\n n_feats=res_eco_features_size + 512,\n n_tags=n_tags,\n embedding_size=embedding_size,\n pretrained_embedding=pretrained_embedding,\n hidden_size=hidden_size, \n rnn_in_size=rnn_in_size, \n rnn_hidden_size=rnn_hidden_size,\n vocab=vocab,\n device='cpu',\n rnn_cell=config.decoder_rnn_cell,\n encoder_num_layers=config.encoder_num_layers,\n encoder_bidirectional=config.encoder_bidirectional,\n num_layers=config.decoder_num_layers,\n dropout_p=config.decoder_dropout_p,\n beam_size=config.decoder_beam_size,\n temperature=config.decoder_temperature, \n train_sample_max=config.decoder_train_sample_max,\n test_sample_max=config.decoder_test_sample_max,\n beam_search_logic = config.decoder_beam_search_logic)\n\n # Checkpoint\n checkpoint = torch.load(args.checkpoint_path, map_location='cpu')\n\n # 1. filter out unnecessary keys for encoder\n chckpt_dict = {k: v for k, v in checkpoint['encoder'].items() if k not in ['fc1.weight', 'fc1.bias', 'fc2.weight', 'fc2.bias']}\n encoder_dict = encoder.state_dict()\n encoder_dict.update(chckpt_dict)\n\n encoder.load_state_dict(encoder_dict)\n decoder.load_state_dict(checkpoint['decoder'])\n\n #load test set features\n test_vidxs = sorted(list(set(corpus[2][1])))\n\n with h5py.File(os.path.join(args.dataset_folder, config.features_path), 'r') as feats_file:\n print('loading visual feats...')\n dataset = feats_file[config.dataset_name]\n cnn_feats = torch.from_numpy(dataset['cnn_features'][test_vidxs]).float()\n c3d_feats = torch.from_numpy(dataset['c3d_features'][test_vidxs]).float()\n cnn_globals = torch.zeros(cnn_feats.size(0), 512) # torch.from_numpy(dataset['cnn_globals'][test_vidxs]).float()\n cnn_sem_globals = torch.from_numpy(dataset['cnn_sem_globals'][test_vidxs]).float()\n f_counts = dataset['count_features'][test_vidxs]\n print('visual feats loaded')\n\n res_eco_globals = torch.from_numpy(np.load(os.path.join(args.dataset_folder, 'resnext_eco.npy'))[test_vidxs])\n tags_globals = torch.from_numpy(np.load(os.path.join(args.dataset_folder, 'tag_feats.npy'))[test_vidxs])\n\n encoder.eval()\n decoder.eval()\n\n with torch.no_grad():\n video_encoded = encoder(cnn_feats, c3d_feats, cnn_globals, tags_globals, res_eco_globals)\n logits, tokens = decoder(video_encoded, None, teacher_forcing_ratio=0)\n\n scores = logits.max(dim=2)[0].mean(dim=1)\n\n confidences, sentences = [], []\n for score, seq in zip(scores, tokens):\n s = decode_from_tokens(seq, vocab)\n print(score, s)\n sentences.append(s)\n confidences.append(score)\n\n if not os.path.exists(args.output_folder):\n os.makedirs(args.output_folder)\n\n with open(os.path.join(args.output_folder, 'predictions.txt'), 'w') as fo:\n for vidx, sentence in zip(test_vidxs, sentences):\n fo.write(f'{vidx}\\t{sentence}\\n')\n"
] | [
[
"torch.from_numpy",
"torch.no_grad",
"torch.load",
"torch.Tensor"
]
] |
dnidever/apogee | [
"83ad7496a0b4193df9e2c01b06dc36cb879ea6c1"
] | [
"external/synple/synple.py"
] | [
"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\"\"\"Python wrapper for synspec \n\nCalculation of synthetic spectra of stars and convolution with a rotational/Gaussian kernel.\nMakes the use of synspec simpler, and retains the main functionalities (when used from\npython). The command line interface is even simpler but fairly limited. \n\nFor information on\nsynspec visit http://nova.astro.umd.edu/Synspec43/synspec.html.\n\nExample\n-------\n\nTo compute the solar spectrum between 6160 and 6164 angstroms, using a model atmosphere in\nthe file sun.mod (provided with the distribution), with the output going into the file\nsun.syn\n\n $synple.py sun.mod 6160. 6164. \n\nTo force a micro of 1.1 km/s, and convolve the spectrum with a Gaussian kernel with a fwhm \nof 0.1 angstroms\n\n $synple.py sun.mod 6160. 6164. 1.1 0.1\n\nTo perform the calculations above in python and compare the emergent normalized profiles\n\n >>> from synple import syn\n >>> x, y, z = syn('sun.mod', (6160.,6164.))\n >>> x2, y2, z2 = syn('sun.mod', (6160.,6164.), vmicro=1.1, fwhm=0.1)\n\n in plain python\n >>> import matplotlib.pyplot as plt\n >>> plt.ion()\n >>> plt.plot(x,y/z, x2, y2/z2)\n\n or ipython\n In [1]: %pylab\n In [2]: plot(x,y/z, x2, y2/z2)\n\n\n\"\"\"\nimport os\nimport sys\nimport subprocess\nimport numpy as np\nimport glob\nimport time\nimport copy\nimport gzip\nfrom scipy import interpolate\nimport matplotlib.pyplot as plt\nfrom itertools import product\n\n\n#configuration\n#synpledir = /home/callende/synple\nsynpledir = os.path.dirname(os.path.realpath(__file__))\n\n\n#relative paths\nmodeldir = synpledir + \"/models\"\nmodelatomdir = synpledir + \"/data\"\nlinelistdir = synpledir + \"/linelists\"\nbindir = synpledir + \"/bin\"\nsynspec = bindir + \"/s54d\"\nrotin = bindir + \"/rotin3\"\n\n\n#other stuff\nclight = 299792.458\nepsilon = 0.6 #clv coeff.\nbolk = 1.38054e-16 # erg/ K\nzero = \" 0 \"\none = \" 1 \"\ntwo = \" 2 \"\n\ndef syn(modelfile, wrange, dw=None, strength=1e-4, vmicro=None, abu=None, \\\n linelist=['gfallx3_bpo.19','kmol3_0.01_30.20'], atom='ap18', vrot=0.0, fwhm=0.0, \\\n steprot=0.0, stepfwhm=0.0, clean=True, save=False, synfile=None, \n compute=True, tmpdir=None):\n\n \"\"\"Computes a synthetic spectrum\n\n Interface to the fortran codes synspec/rotin that only requires two mandatory inputs: \n a model atmosphere (modelfile) and the limits of the spectral range (wrange). The code \n recognizes Kurucz, MARCS and Phoenix LTE model atmospheres. The sampling of the frequency \n grid is chosen internally, but can also be set by adding a constant wavelength step (dw).\n The abundances and microturbulence velocity can be set through the abu and vmicro \n parameters, but default values will be taken from the model atmosphere. Rotational and \n Gaussian broadening can be introduced (vrot and fwhm parameters). The computed spectrum \n can be written to a file (save == True). \n\n\n Parameters\n ----------\n modelfile : str\n file with a model atmosphere\n wrange: tuple or list of two floats\n initial and ending wavelengths (angstroms)\n dw: float, optional\n wavelength step for the output fluxes\n this will be the maximum interval for the radiative \n transfer, and will trigger interpolation at the end\n (default is None for automatic selection)\n strength: float, optional\n threshold in the line-to-continuum opacity ratio for \n selecting lines (default is 1e-4)\n vmicro: float, optional\n microturbulence (km/s) \n (default is taken from the model atmosphere)\n abu: array of floats (99 elements), optional\n chemical abundances relative to hydrogen (N(X)/N(H))\n (default taken from input model atmosphere)\n linelist: array of str\n filenames of the line lists, the first one corresponds to \n the atomic lines and all the following ones (optional) to\n molecular lines\n (default ['gfallx3_bpo.19','kmol3_0.01_30.20'] from Allende Prieto+ 2018)\n atom: str\n 'ap18' -- generic opacities used in Allende Prieto+ 2018\n 'yo19' -- restricted set for NLTE calculations for APOGEE 2019 (Osorio+ 2019)\n 'hhm' -- continuum opacity is simplified to H and H-\n (default 'ap18')\n vrot: float\n projected rotational velocity (km/s)\n (default 0.)\n steprot: float\n wavelength step for convolution with rotational kernel (angstroms)\n set to 0. for automatic adjustment (default 0.)\n fwhm: float\n Gaussian broadening: macroturbulence, instrumental, etc. (angstroms)\n (default 0.)\n stepfwhm: float\n wavelength step for Gaussian convolution (angstroms)\n set to 0. for automatic adjustment (default 0.)\n clean: bool\n True by the default, set to False to avoid the removal of the synspec\n temporary files/links (default True)\n save: bool\n set to True to save the computed spectrum to a file (default False)\n the root of the model atmosphere file, with an extension \".syn\" will be used\n but see the parameter synfile to change that\n synfile: str\n when save is True, this can be used to set the name of the output file\n (default None)\n compute: bool\n set to False to skip the actual synspec run, triggering clean=False\n (default True)\n tmpdir: string\n when is not None a temporary directory with this name will be created to store\n the temporary synspec input/output files, and the synple log file (usually named\n syn.log) will be named as tmpdir_syn.log.\n\n Returns\n -------\n wave: numpy array of floats\n wavelengths (angstroms)\n flux: numpy array of floats\n flux (H_lambda in ergs/s/cm2/A)\n cont: numpy array of floats\n continuum flux (same units as flux)\n\n \"\"\"\n \n #basic checks on the line list and model atmosphere\n checksynspec(linelist,modelfile)\n\n #read model atmosphere\n atmostype, teff, logg, vmicro2, abu2, nd, atmos = read_model(modelfile)\n\n if vmicro == None: vmicro = vmicro2\n if abu == None: abu = abu2\n if dw == None: \n #space = 1e-2 \n space = np.mean(wrange) * np.sqrt(9.12e-15 * np.min(atmos['t']) + vmicro** 2) / clight / 3.\n else: \n space = dw\n\n\n #check input parameters are valid\n imode = checkinput(wrange, vmicro, linelist)\n \n\n print ('teff,logg,vmicro=',teff,logg,vmicro)\n #print ('abu=',abu)\n #print (len(abu))\n #print ('nd=',nd)\n #print ('linelist=',linelist)\n #print ('wrange=',wrange)\n\n logfile = 'syn.log'\n if tmpdir is not None:\n startdir = os.getcwd()\n logfile = os.path.join(startdir,os.path.split(tmpdir)[-1]) + \"_\" + logfile\n try:\n os.mkdir(tmpdir)\n except OSError:\n print( \"cannot create tmpdir %s \" % (tmpdir) )\n try:\n os.chdir(tmpdir)\n except OSError:\n print(\"cannot enter tmpdir %s \" % (tmpdir) )\n\n\n cleanup()\n\n writetas('tas',nd,linelist) #non-std param. file\n write5(teff,logg,abu,atom) #abundance/opacity file\n write8(teff,logg,nd,atmos,atmostype) #model atmosphere\n write55(wrange,space,imode,2,strength,vmicro,linelist,atmostype) #synspec control file\n create_links(linelist) #auxiliary data\n\n if compute == False:\n\n wave = None\n flux = None \n cont = None\n\n else:\n\n synin = open('fort.5')\n synout = open(logfile,'w')\n\n start = time.time()\n p = subprocess.Popen([synspec], stdin=synin, stdout = synout, stderr= synout, shell=True)\n p.wait()\n\n synout.flush()\n synout.close()\n synin.close()\n\n assert (os.path.isfile('fort.7')), 'Error: I cannot read the file *fort.7* in '+tmpdir+' -- looks like synspec has crashed, please look at syn.log'\n\n assert (os.path.isfile('fort.17')), 'Error: I cannot read the file *fort.17* in '+tmpdir+' -- looks like synspec has crashed, please look at syn.log'\n\n\n wave, flux = np.loadtxt('fort.7', unpack=True)\n wave2, flux2 = np.loadtxt('fort.17', unpack=True)\n if dw == None and fwhm <= 0. and vrot <= 0.: cont = np.interp(wave, wave2, flux2)\n end = time.time()\n print('syn ellapsed time ',end - start, 'seconds')\n\n if fwhm > 0. or vrot > 0.:\n start = time.time()\n print( vrot, fwhm, space, steprot, stepfwhm)\n wave, flux = call_rotin (wave, flux, vrot, fwhm, space, steprot, stepfwhm, clean=False, reuseinputfiles=True)\n if dw == None: cont = np.interp(wave, wave2, flux2)\n end = time.time()\n print('convol ellapsed time ',end - start, 'seconds')\n\n if (dw != None): \n nsamples = int((wrange[1] - wrange[0])/dw) + 1\n wave3 = np.arange(nsamples)*dw + wrange[0]\n #flux = np.interp(wave3, wave, flux)\n flux = interp_spl(wave3, wave, flux) \n cont = np.interp(wave3, wave2, flux2)\n wave = wave3\n\n if clean == True: cleanup()\n\n if tmpdir is not None:\n try:\n os.chdir(startdir)\n except OSError:\n print(\"cannot change directory from tmpdir %s to startdir %s\" % (tmpdir,startdir) ) \n if clean == True:\n try:\n os.rmdir(tmpdir)\n except OSError:\n print(\"cannot remove directory tmpdir %s\" % (tmpdir) )\n \n\n if save == True:\n if synfile == None: \n tmpstr = os.path.split(modelfile)[-1]\n synfile = tmpstr[:tmpstr.rfind('.')]+'.syn'\n np.savetxt(synfile,(wave,flux,cont))\n\n\n return(wave, flux, cont)\n\n\ndef mpsyn(modelfile, wrange, dw=None, strength=1e-4, vmicro=None, abu=None, \\\n linelist=['gfallx3_bpo.19','kmol3_0.01_30.20'],atom='ap18', vrot=0.0, fwhm=0.0, \\\n steprot=0.0, stepfwhm=0.0, clean=True, save=False, synfile=None, \n compute=True, nthreads=1):\n\n \"\"\"Computes a synthetic spectrum, splitting the spectral range in nthreads parallel calculations\n\n Wrapper for syn, using multiprocessing, to speed-up the calculation of a broad spectral range\n\n Parameters\n ----------\n modelfile : str\n file with a model atmosphere\n wrange: tuple or list of two floats\n initial and ending wavelengths (angstroms)\n dw: float, optional\n wavelength step for the output fluxes\n this will be the maximum interval for the radiative \n transfer, and will trigger interpolation at the end\n (default is None for automatic selection)\n strength: float, optional\n threshold in the line-to-continuum opacity ratio for \n selecting lines (default is 1e-4)\n vmicro: float, optional\n microturbulence (km/s) \n (default is taken from the model atmosphere)\n abu: array of floats (99 elements), optional\n chemical abundances relative to hydrogen (N(X)/N(H))\n (default taken from input model atmosphere)\n linelist: array of str\n filenames of the line lists, the first one corresponds to \n the atomic lines and all the following ones (optional) to\n molecular lines\n (default ['gfallx3_bpo.19','kmol3_0.01_30.20'] from Allende Prieto+ 2018)\n atom: str\n 'ap18' -- generic opacities used in Allende Prieto+ 2018\n 'yo19' -- restricted set for NLTE calculations for APOGEE 2019 (Osorio+ 2019)\n 'hhm' -- continuum opacity is simplified to H and H-\n (default 'ap18')\n vrot: float\n projected rotational velocity (km/s)\n (default 0.)\n steprot: float\n wavelength step for convolution with rotational kernel (angstroms)\n set to 0. for automatic adjustment (default 0.)\n fwhm: float\n Gaussian broadening: macroturbulence, instrumental, etc. (angstroms)\n (default 0.)\n stepfwhm: float\n wavelength step for Gaussian convolution (angstroms)\n set to 0. for automatic adjustment (default 0.)\n clean: bool\n True by the default, set to False to avoid the removal of the synspec\n temporary files/links (default True)\n save: bool\n set to True to save the computed spectrum to a file (default False)\n the root of the model atmosphere file, with an extension \".syn\" will be used\n but see the parameter synfile to change that\n synfile: str\n when save is True, this can be used to set the name of the output file\n (default None)\n compute: bool\n set to False to skip the actual synspec run, triggering clean=False\n (default True)\n nthreads: int\n choose the number of cores to use in the calculation\n (default 1, 0 has the meaning that the code should take all the cores available)\n\n Returns\n -------\n wave: numpy array of floats\n wavelengths (angstroms)\n flux: numpy array of floats\n flux (H_lambda in ergs/s/cm2/A)\n cont: numpy array of floats\n continuum flux (same units as flux)\n\n \"\"\"\n\n from multiprocessing import Pool,cpu_count\n\n\n if nthreads == 0: \n nthreads = cpu_count()\n\n delta = (wrange[1]-wrange[0])/nthreads\n pars = []\n for i in range(nthreads):\n\n wrange1 = (wrange[0]+delta*i,wrange[0]+delta*(i+1))\n\n pararr = [modelfile, wrange1, dw, strength, vmicro, abu, \\\n linelist, atom, vrot, fwhm, \\\n steprot, stepfwhm, clean, save, synfile, \n compute, 'par'+str(i) ]\n pars.append(pararr)\n\n pool = Pool(nthreads)\n results = pool.starmap(syn,pars)\n pool.close()\n pool.join()\n\n x = results[0][0]\n y = results[0][1]\n z = results[0][2]\n\n if len(results) > 1:\n for i in range(len(results)-1):\n x = np.concatenate((x, results[i+1][0][1:]) )\n y = np.concatenate((y, results[i+1][1][1:]) )\n z = np.concatenate((z, results[i+1][2][1:]) )\n\n return(x,y,z)\n\ndef raysyn(modelfile, wrange, dw=None, strength=1e-4, vmicro=None, abu=None, \\\n linelist=['gfallx3_bpo.19','kmol3_0.01_30.20'], atom='ap18', vrot=0.0, fwhm=0.0, \\\n steprot=0.0, stepfwhm=0.0, clean=True, save=False, synfile=None, \n compute=True, nthreads=1):\n\n \"\"\"Computes a synthetic spectrum, splitting the spectral range in nthreads parallel calculations \n\n Wrapper for syn, using ray, to speed-up the calculation of a broad spectral range\n\n Parameters\n ----------\n modelfile : str\n file with a model atmosphere\n wrange: tuple or list of two floats\n initial and ending wavelengths (angstroms)\n dw: float, optional\n wavelength step for the output fluxes\n this will be the maximum interval for the radiative \n transfer, and will trigger interpolation at the end\n (default is None for automatic selection)\n strength: float, optional\n threshold in the line-to-continuum opacity ratio for \n selecting lines (default is 1e-4)\n vmicro: float, optional\n microturbulence (km/s) \n (default is taken from the model atmosphere)\n abu: array of floats (99 elements), optional\n chemical abundances relative to hydrogen (N(X)/N(H))\n (default taken from input model atmosphere)\n linelist: array of str\n filenames of the line lists, the first one corresponds to \n the atomic lines and all the following ones (optional) to\n molecular lines\n (default ['gfallx3_bpo.19','kmol3_0.01_30.20'] from Allende Prieto+ 2018)\n atom: str\n 'ap18' -- generic opacities used in Allende Prieto+ 2018\n 'yo19' -- restricted set for NLTE calculations for APOGEE 2019 (Osorio+ 2019)\n 'hhm' -- continuum opacity is simplified to H and H-\n (default 'ap18')\n vrot: float\n projected rotational velocity (km/s)\n (default 0.)\n steprot: float\n wavelength step for convolution with rotational kernel (angstroms)\n set to 0. for automatic adjustment (default 0.)\n fwhm: float\n Gaussian broadening: macroturbulence, instrumental, etc. (angstroms)\n (default 0.)\n stepfwhm: float\n wavelength step for Gaussian convolution (angstroms)\n set to 0. for automatic adjustment (default 0.)\n clean: bool\n True by the default, set to False to avoid the removal of the synspec\n temporary files/links (default True)\n save: bool\n set to True to save the computed spectrum to a file (default False)\n the root of the model atmosphere file, with an extension \".syn\" will be used\n but see the parameter synfile to change that\n synfile: str\n when save is True, this can be used to set the name of the output file\n (default None)\n compute: bool\n set to False to skip the actual synspec run, triggering clean=False\n (default True)\n nthreads: int\n choose the number of cores to use in the calculation\n (default 1, 0 has the meaning that the code should take all the cores available)\n\n Returns\n -------\n wave: numpy array of floats\n wavelengths (angstroms)\n flux: numpy array of floats\n flux (H_lambda in ergs/s/cm2/A)\n cont: numpy array of floats\n continuum flux (same units as flux)\n\n \"\"\"\n\n import psutil\n import ray\n\n @ray.remote\n def fun(vari,cons):\n\n wrange,tmpdir = vari\n\n modelfile,dw,strength,vmicro,abu,linelist, \\\n atom,vrot,fwhm,steprot,stepfwhm,clean,save,synfile,compute = cons\n\n x, y, z = syn(modelfile, wrange, dw, strength, vmicro, abu, \\\n linelist, atom, vrot, fwhm, \\\n steprot, stepfwhm, clean, save, synfile, \n compute, tmpdir)\n\n return(x,y,z)\n\n\n if nthreads == 0: \n nthreads = psutil.cpu_count(logical=False)\n\n print('nthreads=',nthreads)\n\n ray.init(num_cpus=nthreads)\n\n rest = [ modelfile,dw,strength,vmicro,abu,linelist, \\\n atom,vrot,fwhm,steprot,stepfwhm,clean,save,synfile,compute ]\n\n constants = ray.put(rest)\n\n delta = (wrange[1]-wrange[0])/nthreads\n pars = []\n for i in range(nthreads):\n\n wrange1 = (wrange[0]+delta*i,wrange[0]+delta*(i+1))\n folder = 'par'+str(i)\n\n pararr = [wrange1, 'par'+str(i) ]\n pars.append(pararr)\n\n results = ray.get([fun.remote(pars[i],constants) for i in range(nthreads)])\n\n x = results[0][0]\n y = results[0][1]\n z = results[0][2]\n\n if len(results) > 1:\n for i in range(len(results)-1):\n x = np.concatenate((x, results[i+1][0][1:]) )\n y = np.concatenate((y, results[i+1][1][1:]) )\n z = np.concatenate((z, results[i+1][2][1:]) )\n\n return(x,y,z)\n\n\n\ndef multisyn(modelfiles, wrange, dw=None, strength=1e-4, abu=None, \\\n vmicro=None, vrot=0.0, fwhm=0.0, nfe=0.0, \\\n linelist=['gfallx3_bpo.19','kmol3_0.01_30.20'], atom='ap18', \\\n steprot=0.0, stepfwhm=0.0, clean=True, save=None, nthreads=1):\n\n \"\"\"Computes synthetic spectra for a list of files. The values of vmicro, vrot, \n fwhm, and nfe can be iterables. Whether or not dw is specified the results will be \n placed on a common wavelength scale by interpolation. When not specified, dw will be \n chosen as appropriate for the first model in modelfiles.\n\n\n Parameters\n ----------\n modelfiles : list of str\n files with model atmospheres\n wrange: tuple or list of two floats\n initial and ending wavelengths (angstroms)\n dw: float\n wavelength step for the output fluxes.\n Unlike in 'syn' this will not be used to set the maximum wavelength step for \n synthesizing any of the spectra; the appropriate step will be chosen dynamically.\n Unlike in 'syn', interpolation to a constant step will always be done\n (default is None for automatic selection based on the first model of the list)\n strength: float, optional\n threshold in the line-to-continuum opacity ratio for \n selecting lines (default is 1e-4)\n abu: array of floats (99 elements), optional\n chemical abundances relative to hydrogen (N(X)/N(H))\n (default taken from input model atmosphere)\n vmicro: float, optional, can be an iterable\n microturbulence (km/s) \n (default is taken from the model atmosphere)\n vrot: float, can be an iterable\n projected rotational velocity (km/s)\n (default 0.)\n fwhm: float, can be an iterable\n Gaussian broadening: macroturbulence, instrumental, etc. (angstroms)\n (default 0.)\n nfe: float, can be an iterable\n [N/Fe] nitrogen abundance change from the one specified in the array 'abu' (dex)\n (default 0.)\n linelist: array of str\n filenames of the line lists, the first one corresponds to \n the atomic lines and all the following ones (optional) to\n molecular lines\n (default ['gfallx3_bpo.19','kmol3_0.01_30.20'] from Allende Prieto+ 2018)\n atom: str\n 'ap18' -- generic opacities used in Allende Prieto+ 2018\n 'yo19' -- restricted set for NLTE calculations for APOGEE 2019 (Osorio+ 2019)\n 'hhm' -- continuum opacity is simplified to H and H-\n (default 'ap18')\n steprot: float\n wavelength step for convolution with rotational kernel (angstroms)\n set to 0. for automatic adjustment (default 0.)\n stepfwhm: float\n wavelength step for Gaussian convolution (angstroms)\n set to 0. for automatic adjustment (default 0.)\n clean: bool\n True by the default, set to False to avoid the removal of the synspec\n temporary files/links (default True)\n save: bool\n set to True to save the computed spectra to files (default False)\n the root of the model atmosphere file, with an extension \".syn\" will be used\n if multiple values of vmicro, vrot, fwhm or nfe are used, their values are\n prepended to the file names \n (default None)\n nthreads: int\n choose the number of cores to use in the calculation\n (default 1, 0 has the meaning that the code should take all the cores available)\n\n\n\n Returns\n -------\n wave: numpy array of floats (1D)\n wavelengths (angstroms)\n flux: numpy array of floats (2D -- as many rows as models input)\n flux (H_lambda in ergs/s/cm2/A)\n cont: numpy array of floats (2D -- as many rows as models input)\n continuum flux (same units as flux)\n\n \"\"\"\n\n\n #when vmicro, vrot, fwhm or nitrogen are not iterables, we create ones, otherwise we copy them\n try: \n nvmicro = len(vmicro)\n vmicros = vmicro\n except TypeError:\n nvmicro = 1\n vmicros = [ vmicro ] \n try: \n nvrot = len(vrot)\n vrots = vrots\n except TypeError:\n nvrot = 1\n vrots = [ vrot ] \n try: \n nfwhm = len(fwhm)\n fwhms = fwhm\n except TypeError:\n nfwhm = 1\n fwhms = [ fwhm ] \n try: \n nnfe = len(nfe)\n nnfes = nfe\n except TypeError:\n nnfe = 1\n nfes = [ nfe ] \n\n assert (len(modelfiles) > 0), 'multisyn needs at least one model to work with'\n wave = None\n flux = None\n cont = None\n\n for entry in modelfiles:\n for vmicro1 in vmicros:\n for nfe1 in nfes:\n\n abu1 = copy.copy(abu) \n\n #if need be, adjust nitrogen abundance according to nfe\n if (abs(nfe1) > 1e-7):\n if (abu1 == None):\n checksynspec(linelist,entry)\n atmostype, teff, logg, vmicro2, abu1, nd, atmos = read_model(entry)\n abu1[6] = abu1[6] * 10.**nfe1\n\n x, y, z = mpsyn(entry, wrange, dw=None, strength=strength, \\\n vmicro=vmicro1, abu=abu1, linelist=linelist, atom=atom, \\\n clean=clean, save=save, nthreads=nthreads)\n\n space = np.mean(np.diff(x))\n \n for vrot1 in vrots:\n for fwhm1 in fwhms:\n\n if fwhm1> 0. or vrot1 > 0.:\n start = time.time()\n print( entry, vmicro1, nfe1, vrot1, fwhm1, space)\n x2, y2 = call_rotin (x, y, vrot, fwhm, space, steprot, stepfwhm, \\\n clean=False, reuseinputfiles=True)\n z2 = np.interp(x2, x, z)\n end = time.time()\n print('convol ellapsed time ',end - start, 'seconds')\n else:\n x2, y2, z2 = x, y, z\n\n\n if entry == modelfiles[0] and vmicro1 == vmicros[0] and vrot1 == vrots[0] and fwhm1 == fwhms[0] and nfe1 == nfes[0]:\n if dw == None: dw = np.median(np.diff(x2))\n nsamples = int((wrange[1] - wrange[0])/dw) + 1\n wave = np.arange(nsamples)*dw + wrange[0]\n #flux = np.interp(wave, x2, y2)\n flux = interp_spl(wave, x2, y2)\n cont = np.interp(wave, x2, z2)\n else:\n #flux = np.vstack ( (flux, np.interp(wave, x, y) ) )\n flux = np.vstack ( (flux, interp_spl(wave, x, y) ) )\n cont = np.vstack ( (cont, np.interp(wave, x, z) ) )\n\n\n return(wave, flux, cont)\n\n\n\ndef polysyn(modelfiles, wrange, dw=None, strength=1e-4, abu=None, \\\n vmicro=None, vrot=0.0, fwhm=0.0, nfe=0.0, \\\n linelist=['gfallx3_bpo.19','kmol3_0.01_30.20'],atom='ap18', \\\n steprot=0.0, stepfwhm=0.0, clean=True, save=None):\n\n \"\"\"Sets up a directory tree for computing synthetic spectra for a list of files in \n parallel. The values of vmicro, vrot, fwhm, and nfe can be iterables. Whether or not \n dw is specified the results will be placed on a common wavelength scale by interpolation.\n When not specified, dw will be chosen as appropriate for the first model in modelfiles.\n\n\n Parameters\n ----------\n modelfiles : list of str\n files with model atmospheres\n wrange: tuple or list of two floats\n initial and ending wavelengths (angstroms)\n dw: float\n Unlike in 'syn' this will not be used to set the maximum wavelength step for \n synthesizing any of the spectra; the appropriate step will be chosen dynamically.\n Unlike in 'syn', interpolation to a constant step will always be done\n (default is None for automatic selection based on the first model of the list)\n strength: float, optional\n threshold in the line-to-continuum opacity ratio for \n selecting lines (default is 1e-4)\n abu: array of floats (99 elements), optional\n chemical abundances relative to hydrogen (N(X)/N(H))\n (default taken from input model atmosphere)\n vmicro: float, optional, can be an iterable\n microturbulence (km/s) \n (default is taken from the model atmosphere)\n vrot: float, can be an iterable\n projected rotational velocity (km/s)\n (default 0.)\n fwhm: float, can be an iterable\n Gaussian broadening: macroturbulence, instrumental, etc. (angstroms)\n (default 0.)\n nfe: float, can be an iterable\n [N/Fe] nitrogen abundance change from the one specified in the array 'abu' (dex)\n (default 0.)\n linelist: array of str\n filenames of the line lists, the first one corresponds to \n the atomic lines and all the following ones (optional) to\n molecular lines\n (default ['gfallx3_bpo.19','kmol3_0.01_30.20'] from Allende Prieto+ 2018)\n atom: str\n 'ap18' -- generic opacities used in Allende Prieto+ 2018\n 'yo19' -- restricted set for NLTE calculations for APOGEE 2019 (Osorio+ 2019)\n 'hhm' -- continuum opacity is simplified to H and H-\n (default 'ap18')\n steprot: float\n wavelength step for convolution with rotational kernel (angstroms)\n set to 0. for automatic adjustment (default 0.)\n stepfwhm: float\n wavelength step for Gaussian convolution (angstroms)\n set to 0. for automatic adjustment (default 0.)\n clean: bool\n True by the default, set to False to avoid the removal of the synspec\n temporary files/links (default True)\n save: bool\n set to True to save the computed spectra to files (default False)\n the root of the model atmosphere file, with an extension \".syn\" will be used\n if multiple values of vmicro, vrot, fwhm or nfe are used, their values are\n prepended to the file names \n (default None)\n\n\n Returns\n -------\n wave: numpy array of floats (1D)\n wavelengths (angstroms)\n flux: numpy array of floats (2D -- as many rows as models input)\n flux (H_lambda in ergs/s/cm2/A)\n cont: numpy array of floats (2D -- as many rows as models input)\n continuum flux (same units as flux)\n\n \"\"\"\n\n #synspec does not currently run in parallel\n nthreads = 1\n\n\n #when vmicro, vrot, fwhm or nitrogen are not iterables, we create ones, otherwise we copy them\n try: \n nvmicro = len(vmicro)\n vmicros = vmicro\n except TypeError:\n nvmicro = 1\n vmicros = [ vmicro ] \n try: \n nvrot = len(vrot)\n vrots = vrots\n except TypeError:\n nvrot = 1\n vrots = [ vrot ] \n try: \n nfwhm = len(fwhm)\n fwhms = fwhm\n except TypeError:\n nfwhm = 1\n fwhms = [ fwhm ] \n try: \n nnfe = len(nfe)\n nnfes = nfe\n except TypeError:\n nnfe = 1\n nfes = [ nfe ] \n\n\n idir = 0\n for entry in modelfiles:\n for vmicro1 in vmicros:\n for nfe1 in nfes:\n\n idir = idir + 1\n dir = ( \"hyd%07d\" % (idir) )\n try:\n os.mkdir(dir)\n except OSError:\n print( \"cannot create dir hyd%07d\" % (idir) )\n try:\n os.chdir(dir)\n except OSError:\n print( \"cannot change dir to hyd%07d\" % (idir) )\n\n if entry == 'missing':\n pass\n else:\n #setup the slurm script\n sfile = dir+\".job\"\n now=time.strftime(\"%c\")\n s = open(sfile ,\"w\")\n s.write(\"#!/bin/bash \\n\")\n s.write(\"#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-# \\n\")\n s.write(\"#This script was written by synple on \"+now+\" \\n\") \n s.write(\"#SBATCH -J \"+dir+\" \\n\")\n s.write(\"#SBATCH -o \"+dir+\"_%j.out\"+\" \\n\")\n s.write(\"#SBATCH -e \"+dir+\"_%j.err\"+\" \\n\")\n s.write(\"#SBATCH -n \"+str(nthreads)+\" \\n\")\n s.write(\"#SBATCH -t 04:00:00\"+\" \\n\") #hh:mm:ss\n s.write(\"#SBATCH -D \"+os.path.abspath(os.curdir)+\" \\n\")\n s.write(\"#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-# \\n\\n\\n\")\n\n\n abu1 = copy.copy(abu)\n\n #if need be, adjust nitrogen abundance according to nfe\n if (abs(nfe1) > 1e-7):\n if (abu1 == None):\n checksynspec(linelist,entry)\n atmostype, teff, logg, vmicro2, abu1, nd, atmos = read_model(entry)\n abu1[6] = abu1[6] * 10.**nfe1\n\n x, y, z = syn(entry, wrange, dw=None, strength=strength, vmicro=vmicro1, \\\n abu=abu1, linelist=linelist, atom=atom, compute=False)\n\n s.write(synspec+\" < \"+\"fort.5\"+\"\\n\")\n\n si = open(\"fort.55\",'r')\n for i in range(6): line = si.readline()\n entries = line.split()\n space = float(entries[5])\n si.close()\n \n iconv = 0\n for vrot1 in vrots:\n for fwhm1 in fwhms:\n\n print('iconv=',iconv)\n\n iconv = iconv + 1\n inconv = (\"%07dfort.5\" % (iconv) )\n outconv = (\"'%07dfort.7'\" % (iconv) )\n if fwhm1> 0. or vrot1 > 0.:\n f = open(inconv,'w')\n f.write( ' %s %s %s \\n' % (\"'fort.7'\", \"'fort.17'\", outconv) )\n f.write( ' %f %f %f \\n' % (vrot1, space, steprot) )\n f.write( ' %f %f \\n' % (fwhm1, stepfwhm) )\n print('stepfwhm=',stepfwhm)\n f.write( ' %f %f %i \\n' % (wrange[0], wrange[1], 0) )\n f.close()\n s.write(rotin+\" < \"+inconv+\"\\n\")\n else:\n s.write(\"cp \"+\" fort.7 \"+outconv[1:-1]+\"\\n\")\n\n s.close()\n os.chmod(sfile ,0o755)\n\n try:\n os.chdir('..')\n except OSError:\n print( \"cannot exit dir hyd%07d\" % (idir) )\n\n\n return(None,None,None)\n\n\n\ndef polyopt(wrange=(9.e2,1.e5),dw=0.1,strength=1e-3, linelist=['gfallx3_bpo.19','kmol3_0.01_30.20'], \\\n tlt = (20,3.08,0.068), tlrho = (20,-14.0,0.59), \\\n tfeh=(1,0.0,0.0), tafe=(1,0.0,0.0), tcfe=(1,0.0,0.0), tnfe=(1,0.0,0.0), \\\n tofe=(1,0.0,0.0), trfe=(1,0.0,0.0), tsfe=(1,0.0,0.0), tvmicro=(1,1.0,0.0), \\\n zexclude=None, atom='ap18'):\n\n \"\"\"Sets up a directory tree for computing opacity tables for TLUSTY. The table collection forms \n a regular grid defined by triads in various parameters. Each triad has three values (n, llimit, step)\n that define an array x = np.range(n)*step + llimit. Triads in teff (tteff) and logg\n (tlogg) are mandatory. Triads in [Fe/H] (tfeh), [alpha/Fe] (tafe), [C/Fe] (tcfe), \n [N/Fe] (tnfe), [O/Fe] (tofe), [r/Fe] (rfe), and [s/Fe] (sfe) are optional since \n arrays with just one 0.0 are included by default.\n\n Parameters\n ----------\n wrange: tuple or list of two floats\n initial and ending wavelengths (angstroms)\n dw: float\n Unlike in 'syn' this will not be used to set the maximum wavelength step for \n synthesizing any of the spectra; the appropriate step will be chosen dynamically.\n Unlike in 'syn', interpolation to a constant step will always be done\n (default is None for automatic selection based on the first model of the list)\n strength: float, optional\n threshold in the line-to-continuum opacity ratio for \n selecting lines (default is 1e-4)\n linelist: array of str\n filenames of the line lists, the first one corresponds to \n the atomic lines and all the following ones (optional) to\n molecular lines\n (default ['gfallx3_bpo.19','kmol3_0.01_30.20'] from Allende Prieto+ 2018)\n atom: str\n 'ap18' -- generic opacities used in Allende Prieto+ 2018\n 'yo19' -- restricted set for NLTE calculations for APOGEE 2019 (Osorio+ 2019)\n 'hhm' -- continuum opacity is simplified to H and H-\n (default 'ap18')\n tlt: tuple\n log10(T) triad (n, llimit, step) for opacity grid\n (default values chosen for grid lt = np.arange(20)*0.068 + 3.08,\n to cover the range in the DR16 APOGEE MARCS grids)\n tlrho: tuple\n log10(rho) triad (n, llimit, step) for opacity grid\n (default values chosen for grid lrho = np.arange(20)*0.59 -14.0,\n to cover the range in the DR16 APOGEE MARCS grids)\n tteff: tuple\n Teff triad (n, llimit, step)\n tlogg: tuple\n logg triad (n, llimit, step)\n tfeh: tuple\n [Fe/H] triad\n tafe: tuple\n [alpha/Fe] triad \n tcfe: tuple\n [C/Fe] triad\n tnfe: tuple\n [N/Fe] triad\n tofe: tuple\n [O/Fe] triad\n rfeh: tuple\n [r/Fe] triad (r-elements abundance ratio)\n sfeh: tuple\n [s.Fe] triad (s-elements abundance ratio)\n zexclude: list\n atomic numbers of the elements whose opacity is NOT to be\n included in the table\n (default None)\n\n \"\"\"\n\n #pynspec does not currently run in parallel\n nthreads = 1\n\n #expanding the triads t* into iterables\n try: \n nfeh = len(tfeh)\n assert (nfeh == 3), 'Error: feh triad must have three elements (n, llimit, step)'\n fehs = np.arange(tfeh[0])*tfeh[2] + tfeh[1]\n except TypeError:\n print('Error: feh triad must have three elements (n, llimit, step)')\n return ()\n\n try: \n nafe = len(tafe)\n assert (nafe == 3), 'Error: afe triad must have three elements (n, llimit, step)'\n afes = np.arange(tafe[0])*tafe[2] + tafe[1]\n except TypeError:\n print('Error: afe triad must have three elements (n, llimit, step)')\n return ()\n\n try: \n ncfe = len(tcfe)\n assert (ncfe == 3), 'Error: cfe triad must have three elements (n, llimit, step)'\n cfes = np.arange(tcfe[0])*tcfe[2] + tcfe[1]\n except TypeError:\n print('Error: cfe triad must have three elements (n, llimit, step)')\n return ()\n\n try: \n nnfe = len(tnfe)\n assert (nnfe == 3), 'Error: nfe triad must have three elements (n, llimit, step)'\n nfes = np.arange(tnfe[0])*tnfe[2] + tnfe[1]\n except TypeError:\n print('Error: nfe triad must have three elements (n, llimit, step)')\n return ()\n\n try: \n nofe = len(tofe)\n assert (nofe == 3), 'Error: ofe triad must have three elements (n, llimit, step)'\n ofes = np.arange(tofe[0])*tofe[2] + tofe[1]\n except TypeError:\n print('Error: ofe triad must have three elements (n, llimit, step)')\n return ()\n\n try: \n nrfe = len(trfe)\n assert (nrfe == 3), 'Error: rfe triad must have three elements (n, llimit, step)'\n rfes = np.arange(trfe[0])*trfe[2] + trfe[1]\n except TypeError:\n print('Error: rfe triad must have three elements (n, llimit, step)')\n return ()\n\n try: \n nsfe = len(tsfe)\n assert (nsfe == 3), 'Error: sfe triad must have three elements (n, llimit, step)'\n sfes = np.arange(tsfe[0])*tsfe[2] + tsfe[1]\n except TypeError:\n print('Error: sfe triad must have three elements (n, llimit, step)')\n return ()\n \n try: \n nvmicro = len(tvmicro)\n assert (nvmicro == 3), 'Error: vmicro triad must have three elements (n, llimit, step)'\n vmicros = np.arange(tvmicro[0])*tvmicro[2] + tvmicro[1]\n except TypeError:\n print('Error: vmicro triad must have three elements (n, llimit, step)')\n return ()\n \n\n #ranges for the opacity table\n try: \n nlt = len(tlt)\n assert (nlt == 3), 'Error: lt triad must have three elements (n, llimit, step)'\n lt = np.arange(tlt[0])*tlt[2] + tlt[1] #log10(T)\n except TypeError:\n print('Error: tlt triad must have three elements (n, llimit, step)')\n return ()\n\n try: \n nlrho = len(tlrho)\n assert (nlrho == 3), 'Error: lrho triad must have three elements (n, llimit, step)'\n lrho = np.arange(tlrho[0])*tlrho[2] + tlrho[1] #log10(density)\n except TypeError:\n print('Error: tlrho triad must have three elements (n, llimit, step)')\n return ()\n\n \n symbol, mass, sol = elements()\n z_metals = np.arange(97,dtype=int) + 3\n #Ar usually included among alphas in MARCS and not in Kurucz/Meszaros\n z_alphas = np.array([8,10,12,14,16,18,20,22],dtype=int) \n # rs increases: notes and data below from comments in the MARCS code (provided by B.Edvardsson) \n # Fractional r-process abundance for Ga-Bi (r+s simply assumed == 100%) | Date 2000-01-18\n # (Note: Ga-Sr (31-38) was just copied from Kaeppeler et al. 1989, below)\n # s-process from Stellar models: Arlandini C., Kaeppeler F., Wisshak K.,\n # Gallino R., Busso M., Straniero O., 1999, Astrophys J. 525, 886-900\n # Fractions corrected to the revised meteoritic abundances\n # of Grevesse N., Sauval A.J. 1998, Space Science Review 85, 161-174 \n # -0.99 is assigned to unstable elements\n z_rs = np.arange(62,dtype=int) + 31\n rfrac= np.array([.43, .47, .81, .85, .39, .47, \n .41, .11, .08, .17, .15, .50,-.99, .68, .86, \n .54, .80, .48, .65, .35, .75, .83, .80, .80, \n .85, .19, .38, .23, .51, .44,-.99, .71, .93, \n .85, .93, .85, .92, .83, .87, .67, .80, .44, \n .59, .44, .91, .91, .99, .95, .94, .41, .24, \n .54, .95,-.99,-.99,-.99,-.99,-.99,-.99, 1.0, \n -.99, 1.0], dtype=float) \n\n\n\n idir = 0\n for feh in fehs:\n for afe in afes:\n for cfe in cfes:\n for nfe in nfes:\n for ofe in ofes:\n for rfe in rfes:\n for sfe in sfes: \n for vmicro in vmicros:\n \n print(feh,afe,cfe,nfe,ofe,rfe,sfe)\n\n idir = idir + 1\n dir = ( \"hyd%07d\" % (idir) )\n try:\n os.mkdir(dir)\n except OSError:\n print( \"cannot create dir hyd%07d\" % (idir) )\n try:\n os.chdir(dir)\n except OSError:\n print( \"cannot change dir to hyd%07d\" % (idir) )\n\n #check input parameters are valid\n imode = checkinput(wrange, vmicro, linelist)\n\n #setup the slurm script\n sfile = dir+\".job\"\n now=time.strftime(\"%c\")\n s = open(sfile ,\"w\")\n s.write(\"#!/bin/bash \\n\")\n s.write(\"#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-# \\n\")\n s.write(\"#This script was written by synple on \"+now+\" \\n\") \n s.write(\"#SBATCH -J \"+dir+\" \\n\")\n s.write(\"#SBATCH -o \"+dir+\"_%j.out\"+\" \\n\")\n s.write(\"#SBATCH -e \"+dir+\"_%j.err\"+\" \\n\")\n s.write(\"#SBATCH -n \"+str(nthreads)+\" \\n\")\n s.write(\"#SBATCH --ntasks-per-node \"+str(4)+\" \\n\")\n s.write(\"#SBATCH -t 48:00:00\"+\" \\n\") #hh:mm:ss\n s.write(\"#SBATCH -D \"+os.path.abspath(os.curdir)+\" \\n\")\n s.write(\"#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-# \\n\\n\\n\")\n\n \n abu = copy.copy(sol)\n\n if (abs(feh) > 1e-7): \n for i in range(len(z_metals)): \n abu[z_metals[i] - 1] = abu[z_metals[i] - 1] * 10.**feh\n if (abs(afe) > 1e-7): \n for i in range(len(z_alphas)):\n abu[z_alphas[i] - 1] = abu[z_alphas[i] - 1] * 10.**afe\n if (abs(cfe) > 1e-7): abu[5] = abu[5] * 10.**cfe\n if (abs(nfe) > 1e-7): abu[6] = abu[6] * 10.**nfe\n if (abs(ofe) > 1e-7): abu[7] = abu[7] * 10.**ofe\n if (abs(rfe) > 1e-7): \n for i in range(len(z_rs)): \n if rfrac[i] > 0.0: abu[z_rs[i] - 1] = abu[z_rs[i] - 1] * rfrac[i] * 10.**rfe\n if (abs(sfe) > 1e-7): \n for i in range(len(z_rs)): \n if rfrac[i] > 0.0: abu[z_rs[i] - 1] = abu[z_rs[i] - 1] * (1.0 - rfrac[i]) * 10.**sfe\n\n\n write55(wrange,dw=dw,imode=-3,hydprf=0, strength=strength, vmicro=vmicro, linelist=linelist)\n\n write5(9999.,9.9,abu,atom)\n \n writetas('tas',1,linelist)\n\n write2(lt,lrho,wrange,filename='opt.dat', \\\n strength=strength,inttab=1)\n\n if zexclude != None: \n write3(zexclude)\n \n create_links(linelist)\n \n s.write('time ' + synspec + \" < \"+\"fort.5\"+\"\\n\")\n s.close()\n os.chmod(sfile ,0o755)\n \n try:\n os.chdir('..')\n except OSError:\n print( \"cannot exit dir hyd%07d\" % (idir) )\t\t \n\n return()\n\n\n\n\ndef collect_marcs(modeldir=modeldir, tteff=None, tlogg=None, tfeh=(1,0.0,0.0), tafe=(1,0.0,0.0), \\\n tcfe=(1,0.0,0.0), tnfe=(1,0.0,0.0), tofe=(1,0.0,0.0), trfe=(1,0.0,0.0), tsfe=(1,0.0,0.0), \\\n ignore_missing_models=False):\n\n \"\"\"Collects all the MARCS models in modeldir that are part of a regular grid defined\n by triads in various parameters. Each triad has three values (n, llimit, step)\n that define an array x = np.range(n)*step + llimit. Triads in teff (tteff) and logg\n (tlogg) are mandatory. Triads in [Fe/H] (tfeh), [alpha/Fe] (tafe), [C/Fe] (tcfe), \n [N/Fe] (tnfe), [O/Fe] (tofe), [r/Fe] (rfe), and [s/Fe] (sfe) are optional since \n arrays with just one 0.0 are included by default.\n\n Parameters\n ----------\n modeldir: str\n directory where model atmosphere files are\n tteff: tuple\n Teff triad (n, llimit, step)\n tlogg: tuple\n logg triad (n, llimit, step)\n tfeh: tuple\n [Fe/H] triad\n tafe: tuple\n [alpha/Fe] triad \n tcfe: tuple\n [C/Fe] triad\n tnfe: tuple\n [N/Fe] triad\n tofe: tuple\n [O/Fe] triad\n rfeh: tuple\n [r/Fe] triad (r-elements abundance ratio)\n sfeh: tuple\n [s.Fe] triad (s-elements abundance ratio)\n ignore_missing_models: bool\n set to True to avoid stopping when a model is missing,\n in which case a None is entered in the returning list\n \n Returns\n -------\n files: list of str\n file names with MARCS models that are in modeldir and match\n the parameters in the requested grid\n\n \"\"\"\n\n #expanding the triads t* into iterables\n try: \n nteff = len(tteff)\n assert (nteff == 3), 'Error: Teff triad must have three elements (n, llimit, step)'\n teffs = np.arange(tteff[0])*tteff[2] + tteff[1]\n except TypeError:\n print('Error: Teff triad must have three elements (n, llimit, step)')\n return ()\n\n try: \n nlogg = len(tlogg)\n assert (nlogg == 3), 'Error: logg triad must have three elements (n, llimit, step)'\n loggs = np.arange(tlogg[0])*tlogg[2] + tlogg[1]\n except TypeError:\n print('Error: logg triad must have three elements (n, llimit, step)')\n return ()\n\n try: \n nfeh = len(tfeh)\n assert (nfeh == 3), 'Error: feh triad must have three elements (n, llimit, step)'\n fehs = np.arange(tfeh[0])*tfeh[2] + tfeh[1]\n except TypeError:\n print('Error: feh triad must have three elements (n, llimit, step)')\n return ()\n\n try: \n nafe = len(tafe)\n assert (nafe == 3), 'Error: afe triad must have three elements (n, llimit, step)'\n afes = np.arange(tafe[0])*tafe[2] + tafe[1]\n except TypeError:\n print('Error: afe triad must have three elements (n, llimit, step)')\n return ()\n\n try: \n ncfe = len(tcfe)\n assert (ncfe == 3), 'Error: cfe triad must have three elements (n, llimit, step)'\n cfes = np.arange(tcfe[0])*tcfe[2] + tcfe[1]\n except TypeError:\n print('Error: cfe triad must have three elements (n, llimit, step)')\n return ()\n\n try: \n nnfe = len(tnfe)\n assert (nnfe == 3), 'Error: nfe triad must have three elements (n, llimit, step)'\n nfes = np.arange(tnfe[0])*tnfe[2] + tnfe[1]\n except TypeError:\n print('Error: nfe triad must have three elements (n, llimit, step)')\n return ()\n\n try: \n nofe = len(tofe)\n assert (nofe == 3), 'Error: ofe triad must have three elements (n, llimit, step)'\n ofes = np.arange(tofe[0])*tofe[2] + tofe[1]\n except TypeError:\n print('Error: ofe triad must have three elements (n, llimit, step)')\n return ()\n\n try: \n nrfe = len(trfe)\n assert (nrfe == 3), 'Error: rfe triad must have three elements (n, llimit, step)'\n rfes = np.arange(trfe[0])*trfe[2] + trfe[1]\n except TypeError:\n print('Error: rfe triad must have three elements (n, llimit, step)')\n return ()\n\n try: \n nsfe = len(tsfe)\n assert (nsfe == 3), 'Error: sfe triad must have three elements (n, llimit, step)'\n sfes = np.arange(tsfe[0])*tsfe[2] + tsfe[1]\n except TypeError:\n print('Error: sfe triad must have three elements (n, llimit, step)')\n return ()\n\n files = []\n\n fi = open('files.txt','w')\n\n for teff in teffs:\n for logg in loggs:\n for feh in fehs:\n for afe in afes:\n for cfe in cfes:\n for nfe in nfes:\n for ofe in ofes:\n for rfe in rfes:\n for sfe in sfes: \n \n print(teff,logg,feh,afe,cfe,nfe,ofe,rfe,sfe)\n code = 'm*_t*_x3'\n\n if logg >= 3.5: \n a1 = 'p' \n else: \n a1 = 's'\n\n filename = (\"%s%4i_g%+.1f_%s_z%+.2f_a%+.2f_c%+.2f_n%+.2f_o%+.2f_r%+.2f_s%+.2f.mod*\" % (a1,teff,logg,code,feh,afe,cfe,nfe,ofe,rfe,sfe) )\n\n file = glob.glob(os.path.join(modeldir,filename))\n\n if ignore_missing_models == False:\n assert len(file) > 0, 'Cannot find model '+filename+' in modeldir '+modeldir \n assert len(file) == 1, 'More than one model matches '+filename+' in modeldir '+modeldir\n else:\n if (len(file) == 0): files.append('missing')\n \n if (len(file) == 1): files.append(file[0])\n\n fi.write( \"%s %4i %+.1f %s %+.2f %+.2f %+.2f %+.2f %+.2f %+.2f %+.2f\\n\" % (files[-1],teff,logg,feh,afe,cfe,nfe,ofe,rfe,sfe) )\n\n\n\n fi.close()\n\n return(files)\n\ndef collect_k2odfnew(modeldir=modeldir, tteff=None, tlogg=None, tfeh=(1,0.0,0.0), tafe=(1,0.0,0.0), \\\n ignore_missing_models=False):\n\n \"\"\"Collects all the ODFNEW Castelli/Kurucz models in modeldir that are part of a regular grid defined\n by triads in various parameters. Each triad has three values (n, llimit, step)\n that define an array x = np.range(n)*step + llimit. Triads in teff (tteff) and logg\n (tlogg) are mandatory. Triads in [Fe/H] (tfeh), and [alpha/Fe] (tafe) are optional since \n arrays with just one 0.0 are included by default. \n\n NOTE: There are ODFNEW models with only afe=[alpha/Fe]=0.0 or 0.4. The latter are used whenever\n afe takes values > 0.0, while the afe=0.0 models are used otherwise.\n\n Parameters\n ----------\n modeldir: str\n directory where model atmosphere files are\n tteff: tuple\n Teff triad (n, llimit, step)\n tlogg: tuple\n logg triad (n, llimit, step)\n tfeh: tuple\n [Fe/H] triad\n tafe: tuple\n [alpha/Fe] triad \n ignore_missing_models: bool\n set to True to avoid stopping when a model is missing,\n in which case a None is entered in the returning list\n \n Returns\n -------\n files: list of str\n file names with Kurucz ODFNEWS models that are in modeldir and match\n the parameters in the requested grid\n\n \"\"\"\n\n #expanding the triads t* into iterables\n try: \n nteff = len(tteff)\n assert (nteff == 3), 'Error: Teff triad must have three elements (n, llimit, step)'\n teffs = np.arange(tteff[0])*tteff[2] + tteff[1]\n except TypeError:\n print('Error: Teff triad must have three elements (n, llimit, step)')\n return ()\n\n try: \n nlogg = len(tlogg)\n assert (nlogg == 3), 'Error: logg triad must have three elements (n, llimit, step)'\n loggs = np.arange(tlogg[0])*tlogg[2] + tlogg[1]\n except TypeError:\n print('Error: logg triad must have three elements (n, llimit, step)')\n return ()\n\n try: \n nfeh = len(tfeh)\n assert (nfeh == 3), 'Error: feh triad must have three elements (n, llimit, step)'\n fehs = np.arange(tfeh[0])*tfeh[2] + tfeh[1]\n except TypeError:\n print('Error: feh triad must have three elements (n, llimit, step)')\n return ()\n\n try: \n nafe = len(tafe)\n assert (nafe == 3), 'Error: afe triad must have three elements (n, llimit, step)'\n afes = np.arange(tafe[0])*tafe[2] + tafe[1]\n except TypeError:\n print('Error: afe triad must have three elements (n, llimit, step)')\n return ()\n\n\n files = []\n\n fi = open('files.txt','w')\n\n for teff in teffs:\n for logg in loggs:\n for feh in fehs:\n for afe in afes:\n \n print(teff,logg,feh,afe)\n code = 'k2odfnew.dat'\n\n if afe > 0.0: \n a1 = 'a' \n else: \n a1 = ''\n\n if feh < 0.0:\n a2 = 'am'\n else:\n a2 = 'ap'\n\n filename = (\"t%05ig%.1f%s%02i%s\" % (teff,logg,a2,int(abs(feh)*10),a1+code) )\n\n file = glob.glob(os.path.join(modeldir,filename))\n\n\n if ignore_missing_models == False:\n assert len(file) > 0, 'Cannot find model '+filename+' in modeldir '+modeldir \n assert len(file) == 1, 'More than one model matches '+filename+' in modeldir '+modeldir\n else:\n if (len(file) == 0): files.append('missing')\n \n if (len(file) == 1): files.append(file[0])\n\n fi.write( \"%s %4i %+.1f %+.2f %+.2f \\n\" % (files[-1],teff,logg,feh,afe) )\n\n fi.close()\n\n return(files)\n\n\n\ndef getallt(modelfiles):\n\n \"\"\"Collects all the values for temperature, density and electron number density\n in a list of files with model atmospheres\n\n Parameters\n ----------\n modelfiles : list of str\n files with model atmospheres\n\n Returns\n -------\n t: list\n list of all temperatures in all the layers of the input model atmospheres \n rho: list\n list of all values of gas pressure in all the layers of the input model atmospheres\n \n ne: list\n list of all values of electron number density in all the layers of the input model atmospheres\n\n \"\"\"\n\n t = []\n rho = []\n ne = []\n\n for entry in modelfiles:\n print('reading ',entry)\n teff, logg, vmicro, abu, nd, atmos = read_marcs_model2(entry)\n #atmostype,teff,logg,vmicro,abu,nd,atmos = read_model(entry)\n for value in atmos['t']: t.append(value)\n for value in atmos['rho']: rho.append(value)\n for value in atmos['ne']: ne.append(value)\n\n return(t,rho,ne)\n\n\n\ndef call_rotin(wave=None, flux=None, vrot=0.0, fwhm=0.0, space=1e-2, steprot=0.0, stepfwhm=0.0, clean=True, reuseinputfiles=False):\n\n\n \"\"\"Convolves a synthetic spectrum with a rotation and/or Gaussian kernel\n\n Interface to the fortran code rotin.\n\n Parameters\n ----------\n wave: numpy array of floats\n wavelengths (angstroms)\n flux: numpy array of floats\n flux \n vrot: float\n projected rotational velocity (km/s)\n (default 0.)\n space: float, optional\n characteristic wavelength scale for variations in the spectrum (angstroms)\n (default is 1e-2)\n steprot: float\n wavelength step for convolution with rotational kernel (angstroms)\n set to 0. for automatic adjustment (default 0.)\n fwhm: float\n Gaussian broadening: macroturbulence, instrumental, etc. (angstroms)\n (default 0.)\n stepfwhm: float\n wavelength step for Gaussian convolution (angstroms)\n set to 0. for automatic adjustment (default 0.)\n clean: bool\n True by the default, set to False to avoid the removal of the rotin\n temporary files (default Tr<ue)\n reuseinputfiles: bool\n set to take the input data from the output synspec file (fort.7) rather than \n from the input arrays (wave, flux)\n\n Returns\n -------\n wave2: numpy array of floats\n wavelengths (angstroms)\n flux2: numpy array of floats\n flux \n\n\n \"\"\"\n if reuseinputfiles == False:\n f = open('fort.7','w')\n f2 = open('fort.17','w')\n maxflux = np.max(flux)\n for i in range(len(wave)):\n f.write( ' %f %f \\n' % (wave[i], flux[i]) )\n f2.write( ' %f %f \\n' % (wave[i], maxflux) )\n f.close()\n f2.close()\n\n f = open('fort.5','w')\n f.write( ' %s %s %s \\n' % (\"'fort.7'\", \"'fort.17'\", \"'fort.11'\") )\n f.write( ' %f %f %f \\n' % (vrot, space, steprot) )\n f.write( ' %f %f \\n' % (fwhm, stepfwhm) )\n print('stepfwhm=',stepfwhm)\n f.write( ' %f %f %i \\n' % (np.min(wave), np.max(wave), 0) )\n f.close()\n\n synin = open('fort.5')\n synout = open('syn.log','a')\n p = subprocess.Popen([rotin], stdin=synin, stdout = synout, stderr = synout)\n p.wait()\n synout.flush()\n synout.close()\n synin.close()\n \n assert (os.path.isfile('fort.11')), 'Error: I cannot read the file *fort.11* in '+tmpdir+' -- looks like rotin has crashed, please look at syn.log'\n\n wave2, flux2 = np.loadtxt('fort.11', unpack=True)\n print(len(wave),len(wave2))\n \n if clean == True: cleanup()\n\n return(wave2, flux2)\n\ndef read_model(modelfile):\n \n \"\"\"Reads a model atmosphere into a structure\n \n Parameters\n ---------- \n modelfile : str\n file with a model atmosphere\n \n Returns\n -------\n atmostype : str\n type of model atmosphere (kurucz/marcs/phoenix)\n teff : float\n effective temperature (K)\n logg : float\n log10 of the surface gravity (cm s-2)\n vmicro : float\n microturbulence velocity (km/s)\n abu : list\n abundances, number densities of nuclei relative to hydrogen N(X)/N(H)\n for elements Z=1,99 (H to Es)\n nd: int\n number of depths (layers) of the model\n atmos: numpy structured array\n array with the run with depth of column mass, temperature, gas pressure \n and electron density\n \"\"\"\n\n #check\n if not os.path.isfile(modelfile):\n mf = os.path.join(modeldir,modelfile)\n if os.path.isfile(mf): modelfile = mf\n\n atmostype = identify_atmostype(modelfile)\n\n if atmostype == 'kurucz':\n teff, logg, vmicro, abu, nd, atmos = read_kurucz_model(modelfile) \n if atmostype == 'marcs':\n teff, logg, vmicro, abu, nd, atmos = read_marcs_model2(modelfile)\n if atmostype == 'phoenix':\n teff, logg, vmicro, abu, nd, atmos = read_phoenix_model(modelfile)\n\n return (atmostype,teff,logg,vmicro,abu,nd,atmos)\n\ndef identify_atmostype(modelfile):\n\n \"\"\"Idenfies the type of model atmosphere in an input file\n\n Valid options are kurucz, marcs or phoenix\n\n Parameters\n ----------\n modelfile: str\n file with a model atmosphere\n\n Returns\n -------\n atmostype: str\n can take the value 'kurucz', 'marcs' or 'phoenix' ('tlusty' soon to be added!)\n\n \"\"\"\n\n if ('PHOENIX' in modelfile and 'fits' in modelfile): atmostype = 'phoenix'\n else: \n if modelfile[-3:] == '.gz':\n f = gzip.open(modelfile,'rt')\n else:\n f = open(modelfile,'r')\n line = f.readline()\n print('modelfile / line=',modelfile,line)\n type(line)\n if ('TEFF' in line): atmostype = 'kurucz'\n else: atmostype = 'marcs'\n f.close()\n \n return(atmostype)\n\ndef checksynspec(linelist,modelfile):\n\n \"\"\"checking that executables and data are where it should be\n\n Parameters\n ----------\n linelist: array of str\n file names of the line lists to be used. The first string should correspond\n to the atomic line list and is mandatory. The remainder are optional and\n correspond to molecular line lists. All files should be in synspec format.\n (see documentation at http://nova.astro.umd.edu/Synspec43/synspec.html)\n\n \"\"\"\n\n dirs = [synpledir,modelatomdir,linelistdir,bindir]\n for entry in dirs: assert (os.path.isdir(entry)), 'dir '+entry+' missing'\n\n files = [synspec,rotin]\n for entry in linelist: \n if not os.path.isfile(entry):\n ll = os.path.join(linelistdir,entry)\n if os.path.isfile(ll): files.append(ll)\n for entry in files: assert (os.path.isfile(entry)), 'file '+entry+' missing'\n\n if not os.path.isfile(modelfile):\n mf = os.path.join(modeldir,modelfile)\n if os.path.isfile(mf): modelfile = mf\n\n print(modeldir)\n print(modelfile)\n assert (os.path.isfile(modelfile)),'model atmosphere file '+modelfile+' missing'\n\n\n return(True)\n\n\ndef checkinput(wrange, vmicro, linelist):\n\n \"\"\"checking input parameters from user\n\n\n Parameters\n ----------\n wrange: tuple or list of two floats\n initial and ending wavelengths (angstroms)\n vmicro: float, optional\n microturbulence (km/s) \n (default is taken from the model atmosphere)\n linelist: array of str\n filenames of the line lists, the first one corresponds to \n the atomic lines and all the following ones (optional) to\n molecular lines\n (default ['gfallx3_bpo.19','kmol3_0.01_30.20'] from Allende Prieto+ 2018)\n\n Returns\n ------\n imode: int\n appropriate value for the variable imode, which specifies whether\n one will use many atomic lines (imode=0), just a few (imode=1),\n or none (H lines are an exception; imode=2)\n\n \"\"\"\n\n\n #determine imode\n # imode = 0 is default, atoms and molecules, at least 2 line lists \n # synple sets IFMOL = 1 in 'tas' when an input molecular line list is used\n # but does not set it when only an atomic line list is given\n # imode = 2 for pure continuum\n # imode = 1 for few-lines mode\n # imode = -3 for regular opacity tables (TLUSTY)\n\n if len(linelist) == 0: \n imode = 2 # no atomic or molecular line list -> pure continuum and no molecules\n else:\n\n #find range of atomic line list\n if not os.path.isfile(linelist[0]):\n ll = os.path.join(linelistdir,linelist[0])\n if os.path.isfile(ll): linelist[0] = ll\n\n nlines, minlambda, maxlambda = getlinelistrange(linelist[0])\n\n #check\n if nlines > 10:\n assert (wrange[0] > minlambda-1 and wrange[1] < maxlambda+1),'wrange exceeds the allow range ('+str(minlambda)+' to '+str(maxlambda)+')'\n imode = 0\n else:\n imode = 1\n\n assert (vmicro >= 0.0),'vmicro = '+str(vmicro)+' but cannot < 0.'\n \n return(imode)\n\ndef getlinelistrange(atomiclinelist):\n#finds out min and max wavelengths for a line list\n\n f = open(atomiclinelist,'r')\n line = f.readline()\n entries = line.split()\n minlambda = float(entries[0])*10.\n fsize = os.path.getsize(atomiclinelist)\n f.seek(fsize-103)\n line = f.readline()\n f.close()\n entries = line.split()\n maxlambda = float(entries[0])*10.\n nlines = int(0.01 * fsize)\n\n return(nlines, minlambda,maxlambda)\n\n\n\ndef writetas(filename,nd,linelist):\n#write non-std input parameters\n# input: filename -- str -- name of the non-std. param. file to print\n# nd -- int -- number of layers in the model\n# nd -- list -- names of the linelist files (atomic first, then one \n#\t\t\t\tor more molecular ones\n \n f = open(filename,'w')\n f.write(\"ND= \"+str(nd)+\" \\n\")\n if len(linelist) > 1: f.write(\"IFMOL= \"+one+\" \\n\")\n f.write(\"TMOLIM= 8000. \\n\")\n\n f.close()\n\n return()\n\ndef write3(zexclude):\n \n f = open('fort.3','w')\n for z in zexclude:\n f.write( \" %d %10.4e \\n\" % (z, 0.0) )\n f.close()\n \n return()\n\n\ndef write2(lt,lrho,wrange, filename='opt.data', dlw=2e-5, binary=False,strength=1e-4,inttab=1):\n#write fort.2 file for creating opacity tables for TLUSTY\n\n f = open('fort.2','w')\n f.write( \" %d %10.4e %10.4e \\n\" % (len(lt),10.**lt[0],10.**lt[-1]) )\n f.write( \" %d \\n\" % (1) )\n f.write( \" %d %10.4e %10.4e \\n\" % (len(lrho),10.**lrho[0],10.**lrho[-1]) )\n \n nsamples = int( (np.log10(wrange[1]) - np.log10(wrange[0]) )/dlw) + 1 \n f.write( \" %d %d %10.4e %10.4e \\n\" % (nsamples,inttab,wrange[0],wrange[1]) ) \n if binary == True: \n ibingr = 1\n else:\n ibingr = 0\n filename = \"'\"+filename+\"'\"\n f.write( \" %s %d \\n\" % (filename,ibingr) )\n f.close()\n\n return()\n\n\ndef write55(wrange,dw=1e-2,imode=0,hydprf=2,strength=1e-4,vmicro=0.0, \\\n linelist=['gfallx3_bpo.19','kmol3_0.01_30.20'], atmostype='kurucz'):\n\n\n #imode,idst,iprin\n #inmod,zero,ichang,ichemc\n #lyman,zero,zero,zero,zero\n #one,nlte,icontl,zero,ifhe2\n #ihydpr,ihe1pr,ihe2pr\n #wstart,wend,cutoff,zero,strength,wdist \n\n if (atmostype == 'tlusty' or atmostype == 'marcs'): inmod = 1 \n else: inmod = 0\n\n f = open('fort.55','w')\n f.write(\" \"+str(imode)+\" \"+2*zero+\"\\n\")\n f.write(\" \"+str(inmod)+3*zero+\"\\n\")\n f.write(5*zero+\"\\n\")\n f.write(one+4*zero+\"\\n\")\n f.write(str(hydprf)+2*zero+\"\\n\")\n if imode == -3:\n f.write( ' %f %f %f %i %e %f \\n ' % (wrange[0], -wrange[1], 100., 2000, strength, dw) )\n else:\n f.write( ' %f %f %f %i %e %f \\n ' % (wrange[0], wrange[1], 200., 2000, strength, dw) )\n ll = len(linelist)\n if ll < 2: f.write(2*zero)\n else: f.write(str(ll-1) + ' ' + ' '.join(map(str,np.arange(ll-1)+20)))\n f.write(\"\\n\")\n f.write( ' %f \\n' % (vmicro) )\n f.close()\n\ndef write5(teff,logg,abu, atom='ap18', ofile='fort.5', nlte=False, tl=False):\n\n symbol, mass, sol = elements()\n\n f = open(ofile,'w')\n f.write(' '+str(teff)+\" \"+str(logg).format('%7.4f')+\" ! TEFF, GRAV \\n\")\n if nlte:\n f.write(\" F F ! LTE, GRAY \\n\")\n else:\n f.write(\" T F ! LTE, GRAY \\n\")\n f.write(\" 'tas' ! name of non-standard flags \\n\")\n f.write(\" 50 ! frequencies \\n\")\n\n if tl: \n natom = 30\n else:\n natom = len(abu)\n\n f.write(\" \"+str(natom)+\" ! NATOMS \\n\") \n\n assert (atom == 'hhm' or atom == 'ap18' or atom == 'yo19'), 'atom must be one of: hhm/ap18/yo19!'\n ex = np.ones(natom)\n if atom == 'hhm' : \n zex = [1] #atomic numbers of elements included explicitly (contributing cont. opacity)\n elif atom == 'yo19':\n zex = [1,11,12,19,20]\n elif atom == 'ap18': \n zex = [1,2,6,7,8,11,12,13,14,20,26]\n\n for i in zex: ex[i-1] = 2\n if nlte: ex[0] = -3\n\n for i in range(natom):\n f.write(' %2d %e %i %s\\n' % (ex[i], abu[i], 0, ' ! ' +symbol[i]) )\n\n for i in range(3): f.write(\"* \\n\")\n \n if atom == 'hhm': # highly simplified continuum opacities -- just H and H-\n f.write(\" 1 -1 1 0 0 1 ' H 1' 'data/hm.dat' \\n\" )\n f.write(\" 0 0 3 0 \\n\")\n f.write(\" 1 0 9 0 0 0 ' H 1' 'data/h1s.dat' \\n\")\n f.write(\" 1 1 1 1 0 0 ' H 2' ' ' \\n\")\n f.write(\" 0 0 0 -1 0 0 ' ' ' ' \\n\")\n elif atom == \"yo19\": # set for NLTE calculations for APOGEE (see Osorio+ 2019 A&A paper)\n f.write(\"* ../data_atom for ions \\n\")\n f.write(\" 1 -1 1 0 0 1 ' H 0' 'data_atom/hm.dat' \\n\")\n f.write(\" 0 0 3 0 \\n\")\n f.write(\" 1 0 16 0 0 0 ' H 1' 'data_atom/h1_16lev2.dat' \\n\")\n f.write(\" 1 1 1 1 0 0 ' H 2' ' ' \\n\")\n f.write(\" 11 0 42 0 0 0 'Na 1' 'data_atom/NaIkas.tl' \\n\")\n f.write(\" 11 1 1 1 0 0 'Na 2' '' \\n\")\n f.write(\" 12 0 96 0 0 0 'Mg 1' 'data_atom/Mg1kas_F_ccc.tl' \\n\")\n f.write(\" 12 1 29 0 0 0 'Mg 2' 'data_atom/Mg2kas_F_ccc.tl' \\n\")\n f.write(\" 12 2 1 1 0 0 'Mg 3' ' ' \\n\")\n f.write(\" 19 0 31 0 0 0 'K 1' 'data_atom/KIkas.tl' \\n\")\n f.write(\" 19 1 1 1 0 0 'K 2' '' \\n\")\n f.write(\" 20 0 66 0 0 0 'Ca 1' 'data_atom/Ca1kas_F_zat.tl' \\n\")\n f.write(\" 20 1 24 0 0 0 'Ca 2' 'data_atom/Ca2kas_F_zat.tl' \\n\")\n f.write(\" 20 2 1 1 0 0 'Ca 3' ' ' \\n\")\n f.write(\" 0 0 0 -1 0 0 ' ' ' ' \\n\")\n elif atom == 'ap18': # generic set used in Allende Prieto+ (2018) A&A paper\n f.write(\"* ../data for ions \\n\")\n f.write(\" 1 -1 1 0 0 1 ' H 1' 'data/hm.dat' \\n\")\n f.write(\" 0 0 3 0 \\n\")\n f.write(\" 1 0 9 0 0 0 ' H 1' 'data/h1s.dat' \\n\")\n f.write(\" 1 1 1 1 0 0 ' H 2' ' ' \\n\")\n f.write(\" 2 0 14 0 0 0 'He 1' 'data/he1.dat' \\n\")\n f.write(\" 2 1 14 0 0 0 'He 2' 'data/he2.dat ' \\n\")\n f.write(\" 2 2 1 1 0 0 'He 3' ' ' \\n\")\n f.write(\" 6 0 104 0 0 0 ' C 1' 'data/c1.t' \\n\")\n f.write(\" 6 1 40 0 0 0 ' C 2' 'data/c2.t' \\n\")\n f.write(\" 6 2 1 1 0 0 ' C 3' ' ' \\n\")\n f.write(\" 7 0 89 0 0 0 ' N 1' 'data/n1.t' \\n\")\n f.write(\" 7 1 51 0 0 0 ' N 2' 'data/n2.t' \\n\")\n f.write(\" 7 2 1 1 0 0 ' N 3' ' ' \\n\")\n f.write(\" 8 0 54 0 0 0 ' O 1' 'data/o1.t' \\n\")\n f.write(\" 8 1 74 0 0 0 ' O 2' 'data/o2.t' \\n\")\n f.write(\" 8 2 1 1 0 0 ' O 3' ' ' \\n\")\n f.write(\" 11 0 32 0 0 0 'Na 1' 'data/na1.t' \\n\")\n f.write(\" 11 1 8 0 0 0 'Na 2' 'data/na2.t' \\n\")\n f.write(\" 11 2 1 1 0 0 'Na 3' ' ' \\n\")\n f.write(\" 12 0 71 0 0 0 'Mg 1' 'data/mg1.t' \\n\")\n f.write(\" 12 1 31 0 0 0 'Mg 2' 'data/mg2.t' \\n\")\n f.write(\" 12 2 1 1 0 0 'Mg 3' ' ' \\n\")\n f.write(\" 13 0 33 0 0 0 'Al 1' 'data/al1.t' \\n\")\n f.write(\" 13 1 81 0 0 0 'Al 2' 'data/al2.t' \\n\")\n f.write(\" 13 2 1 1 0 0 'Al 3' ' ' \\n\")\n f.write(\" 14 0 57 0 0 0 'Si 1' 'data/si1.t' \\n\")\n f.write(\" 14 1 46 0 0 0 'Si 2' 'data/si2.t' \\n\")\n f.write(\" 14 2 1 1 0 0 'Si 3' ' ' \\n\")\n f.write(\" 20 0 79 0 0 0 'Ca 1' 'data/ca1.t' \\n\")\n f.write(\" 20 1 32 0 0 0 'Ca 2' 'data/ca2.t' \\n\")\n f.write(\" 20 2 1 1 0 0 'Ca 3' ' ' \\n\")\n f.write(\" 26 0 49 0 0 0 'Fe 1' 'data/tlusty_fe1_topmod.dat' \\n\")\n f.write(\" 26 1 41 0 0 0 'Fe 2' 'data/tlusty_fe2_topmod.dat' \\n\")\n f.write(\" 26 2 1 1 0 0 'Fe 3' ' ' \\n\")\n f.write(\" 0 0 0 -1 0 0 ' ' ' ' \\n\")\n f.write(\"* \\n\")\n f.write(\"* end \\n\")\n f.close()\n\ndef write8(teff, logg, nd, atmos, atmostype, ofile='fort.8'):\n\n f = open(ofile,'w')\n if atmostype == 'tlusty':\n f.write(\" \"+str(nd)+\" \"+str(3)+\"\\n\")\n for i in range(nd):\n f.write(' %e ' % atmos['dm'][i])\n f.write(\"\\n\")\n for i in range(nd):\n f.write( '%f %e %e \\n' % (atmos['t'][i], atmos['ne'][i], atmos['rho'][i] ) )\n f.close()\n\n else:\n\n if atmostype == 'marcs':\n f.write(\" \"+str(nd)+\" \"+str(-4)+\"\\n\")\n for i in range(nd):\n f.write(' %e ' % atmos['dm'][i])\n f.write(\"\\n\")\n for i in range(nd):\n f.write( '%f %e %e %e \\n' % (atmos['t'][i], atmos['ne'][i], atmos['rho'][i], atmos['rho'][i]/atmos['mmw'][i]/1.67333e-24 + atmos['ne'][i] ) )\n f.close()\n\n else:\n f.write( 'TEFF %7.0f GRAVITY %7.5f LTE \\n' % (teff, logg) )\n for i in range(21): f.write('\\n')\n f.write( 'READ DECK6%3i RHOX,T,P,XNE \\n' % nd )\n for i in range(nd): \n f.write( '%e %f %e %e \\n' % (atmos['dm'][i], atmos['t'][i], atmos['p'][i], atmos['ne'][i]) )\n f.close()\n\n return()\n \n\ndef create_links(linelist):\n#create soft links for line lists, mand odel atom dir \n\n for i in range(len(linelist)):\n if not os.path.isfile(linelist[i]):\n ll = os.path.join(linelistdir,linelist[i])\n if os.path.isfile(ll): linelist[i] = ll\n if i == 0: os.symlink(linelist[0],'fort.19')\n else: os.symlink(linelist[i],'fort.'+str(20-1+i))\n\n os.symlink(modelatomdir,'./data')\n\n return()\n\ndef cleanup():\n#cleanup all temporary files\n\n files = os.listdir('.')\n for entry in files: \n if os.path.islink(entry) and entry.startswith('fort'): os.unlink(entry)\n if os.path.isfile(entry) and entry.startswith('fort'): os.remove(entry)\n\n if os.path.islink('data'): os.unlink('data')\n if os.path.isfile('tas'): os.remove('tas')\n assert (not os.path.isdir('data')), 'A subdirectory *data* exists in this folder, and that prevents the creation of a link to the data directory for synple'\n\n\n return()\n\n\ndef read_kurucz_model(modelfile):\n \n \"\"\"Reads a Kurucz model atmospheres\n \n Parameters\n ----------\n modelfile: str\n file name \n \n Returns\n -------\n\n teff : float\n effective temperature (K)\n logg : float\n log10 of the surface gravity (cm s-2)\n vmicro : float\n microturbulence velocity (km/s)\n abu : list\n abundances, number densities of nuclei relative to hydrogen N(X)/N(H)\n for elements Z=1,99 (H to Es)\n nd: int\n number of depths (layers) of the model\n atmos: numpy structured array\n array with the run with depth of column mass, temperature, gas pressure \n and electron density \n \n \"\"\"\n\n f = open(modelfile,'r')\n line = f.readline()\n entries = line.split()\n assert (entries[0] == 'TEFF' and entries[2] == 'GRAVITY'), 'Cannot find Teff and logg in the file header'\n teff = float(entries[1])\n logg = float(entries[3])\n\n while entries[0] != 'ABUNDANCE': \n line = f.readline()\n entries = line.split()\n\n abu = []\n\n if entries[1] == 'SCALE': \n scale = float(entries[2])\n \n\n while entries[0] == 'ABUNDANCE':\n i = 0\n for word in entries: \n if (word == 'CHANGE'): w = i\n i = i + 1 \n for i in range(int((len(entries)-w-1)/2)):\n z = int(entries[w+1+2*i])\n if (z == 1): nhntot = float(entries[w+2+2*i])\n if (z < 3): abu.append(float(entries[w+2+2*i]) / nhntot) \n else: abu.append(scale*10.**(float(entries[w+2+2*i])) / nhntot)\n\n line = f.readline()\n entries = line.split() \n\n assert (entries[0] == 'READ'), 'I cannot find the header of the atmospheric table in the input Kurucz model'\n\n nd = int(entries[2]) - 1\n line = f.readline()\n entries = line.split()\n line = f.readline()\n entries = line.split()\n vmicro = float(entries[6])/1e5\n\n dm = [ float(entries[0]) ]\n t = [ float(entries[1]) ]\n p = [ float(entries[2]) ]\n ne = [ float(entries[3]) ] \n\n for i in range(nd-1):\n line = f.readline()\n entries = line.split()\n dm.append( float(entries[0]))\n t.append( float(entries[1]))\n p.append( float(entries[2]))\n ne.append( float(entries[3]))\n\n atmos = np.zeros(nd, dtype={'names':('dm', 't', 'p','ne'),\n 'formats':('f', 'f', 'f','f')}) \n atmos['dm'] = dm\n atmos['t'] = t\n atmos['p'] = p\n atmos['ne'] = ne\n\n return (teff,logg,vmicro,abu,nd,atmos)\n\n\ndef read_marcs_model(modelfile):\n \n \"\"\"Reads a MARCS model atmospheres\n \n Parameters\n ----------\n modelfile: str\n file name. It can be a gzipped (.gz) file\n \n Returns\n -------\n\n teff : float\n effective temperature (K)\n logg : float\n log10 of the surface gravity (cm s-2)\n vmicro : float\n microturbulence velocity (km/s)\n abu : list\n abundances, number densities of nuclei relative to hydrogen N(X)/N(H)\n for elements Z=1,99 (H to Es)\n nd: int\n number of depths (layers) of the model\n atmos: numpy structured array\n array with the run with depth of column mass, temperature, gas pressure \n and electron density \n \n \"\"\" \n\n if modelfile[-3:] == '.gz':\n f = gzip.open(modelfile,'rt')\n else:\n f = open(modelfile,'r')\n line = f.readline()\n line = f.readline()\n entries = line.split()\n assert (entries[1] == 'Teff'), 'Cannot find Teff in the file header'\n teff = float(entries[0])\n line = f.readline()\n line = f.readline()\n entries = line.split()\n assert (entries[1] == 'Surface' and entries[2] == 'gravity'), 'Cannot find logg in the file header'\n logg = np.log10(float(entries[0]))\n line = f.readline()\n entries = line.split()\n assert (entries[1] == 'Microturbulence'), 'Cannot find vmicro in the file header'\n vmicro = float(entries[0])\n\n while entries[0] != 'Logarithmic': \n line = f.readline()\n entries = line.split()\n\n abu = []\n line = f.readline()\n entries = line.split()\n\n i = 0\n while entries[1] != 'Number':\n for word in entries: \n abu.append( 10.**(float(word)-12.0) )\n i = i + 1 \n line = f.readline()\n entries = line.split() \n\n if i < 99: \n for j in range(99-i):\n abu.append(1e-111)\n i = i + 1\n\n nd = int(entries[0])\n line = f.readline()\n entries = line.split()\n\n assert (entries[0] == 'Model'), 'I cannot find the header of the atmospheric table in the input MARCS model'\n\n line = f.readline()\n line = f.readline()\n entries = line.split()\n\n t = [ float(entries[4]) ]\n p = [ float(entries[6]) ]\n ne = [ float(entries[5]) / bolk / float(entries[4]) ] \n\n for i in range(nd-1):\n line = f.readline()\n entries = line.split()\n\n t.append( float(entries[4]))\n p.append( float(entries[6]))\n ne.append( float(entries[5]) / bolk / float(entries[4]))\n\n line = f.readline()\n line = f.readline()\n entries = line.split()\n\n dm = [ float(entries[-1]) ]\n\n for i in range(nd-1):\n line = f.readline()\n entries = line.split()\n\n dm.append( float(entries[7]))\n\n atmos = np.zeros(nd, dtype={'names':('dm', 't', 'p','ne'),\n 'formats':('f', 'f', 'f','f')}) \n atmos['dm'] = dm\n atmos['t'] = t\n atmos['p'] = p\n atmos['ne'] = ne\n\n return (teff,logg,vmicro,abu,nd,atmos)\n\ndef read_marcs_model2(modelfile):\n \n \"\"\"Reads a MARCS model atmospheres. \n While read_marcs_model returns T, Pg and Ne in the structure 'atmos'\n read_marcs_model2 returns T, rho, mmw, and Ne.\n \n Parameters\n ----------\n modelfile: str\n file name. It can be a gzipped (.gz) file\n \n Returns\n -------\n\n teff : float\n effective temperature (K)\n logg : float\n log10 of the surface gravity (cm s-2)\n vmicro : float\n microturbulence velocity (km/s)\n abu : list\n abundances, number densities of nuclei relative to hydrogen N(X)/N(H)\n for elements Z=1,99 (H to Es)\n nd: int\n number of depths (layers) of the model\n atmos: numpy structured array\n array with the run with depth of column mass, temperature, density, \n mean molecular weight and electron number density \n \n \"\"\" \n\n if modelfile[-3:] == '.gz':\n f = gzip.open(modelfile,'rt')\n else:\n f = open(modelfile,'r')\n line = f.readline()\n line = f.readline()\n entries = line.split()\n assert (entries[1] == 'Teff'), 'Cannot find Teff in the file header'\n teff = float(entries[0])\n line = f.readline()\n line = f.readline()\n entries = line.split()\n assert (entries[1] == 'Surface' and entries[2] == 'gravity'), 'Cannot find logg in the file header'\n logg = np.log10(float(entries[0]))\n line = f.readline()\n entries = line.split()\n assert (entries[1] == 'Microturbulence'), 'Cannot find vmicro in the file header'\n vmicro = float(entries[0])\n\n while entries[0] != 'Logarithmic': \n line = f.readline()\n entries = line.split()\n\n abu = []\n line = f.readline()\n entries = line.split()\n\n i = 0\n while entries[1] != 'Number':\n for word in entries: \n abu.append( 10.**(float(word)-12.0) )\n i = i + 1 \n line = f.readline()\n entries = line.split() \n\n if i < 99: \n for j in range(99-i):\n abu.append(1e-111)\n i = i + 1\n\n nd = int(entries[0])\n line = f.readline()\n entries = line.split()\n\n assert (entries[0] == 'Model'), 'I cannot find the header of the atmospheric table in the input MARCS model'\n\n line = f.readline()\n line = f.readline()\n entries = line.split()\n\n t = [ float(entries[4]) ]\n p = [ float(entries[6]) ]\n ne = [ float(entries[5]) / bolk / float(entries[4]) ] \n\n for i in range(nd-1):\n line = f.readline()\n entries = line.split()\n\n t.append( float(entries[4]))\n p.append( float(entries[6]))\n ne.append( float(entries[5]) / bolk / float(entries[4]))\n\n line = f.readline()\n line = f.readline()\n entries = line.split()\n\n rho = [ float(entries[3]) ]\n dm = [ float(entries[7]) ]\n mmw = [ float(entries[4]) ]\n\n for i in range(nd-1):\n line = f.readline()\n entries = line.split()\n\n rho.append( float(entries[3]))\n dm.append( float(entries[7]))\n mmw.append( float(entries[4]))\n\n atmos = np.zeros(nd, dtype={'names':('dm', 't', 'rho','mmw','ne'),\n 'formats':('f', 'f', 'f','f','f')}) \n atmos['dm'] = dm\n atmos['t'] = t\n atmos['rho'] = rho\n atmos['mmw'] = mmw\n atmos['ne'] = ne\n\n return (teff,logg,vmicro,abu,nd,atmos)\n\n\ndef read_phoenix_model(modelfile):\n\n \"\"\"Reads a FITS Phoenix model atmospheres\n \n Parameters\n ----------\n modelfile: str\n file name \n \n Returns\n -------\n\n teff : float\n effective temperature (K)\n logg : float\n log10 of the surface gravity (cm s-2)\n vmicro : float\n microturbulence velocity (km/s)\n abu : list\n abundances, number densities of nuclei relative to hydrogen N(X)/N(H)\n for elements Z=1,99 (H to Es)\n nd: int\n number of depths (layers) of the model\n atmos: numpy structured array\n array with the run with depth of column mass, temperature, gas pressure \n and electron density \n \n \"\"\" \n\n from astropy.io import fits\n\n h = fits.open(modelfile)[0].header\n f = fits.open(modelfile)[1].data\n\n nd = len(f['temp'])\n\n teff = float(h['PHXTEFF'])\n logg = float(h['PHXLOGG'])\n vmicro = float(h['PHXXI_L'])\n\n m_h = float(h['PHXM_H'])\n alpha = float(h['PHXALPHA'])\n \n symbol, mass,sol = elements(husser=True) \n abu = sol \n z_metals = np.arange(97,dtype=int) + 3\n z_alphas = np.array([8,10,12,14,16,20,22],dtype=int)\n for i in range(len(z_metals)): abu[z_metals[i] - 1] = abu[z_metals[i] - 1] + m_h\n for i in range(len(z_alphas)): abu[z_alphas[i] - 1] = abu[z_alphas[i] - 1] + alpha\n \n\n atmos = np.zeros(nd, dtype={'names':('dm', 't', 'p','ne'),\n 'formats':('f', 'f', 'f','f')}) \n\n atmos['dm'] = f['pgas'] / 10.**logg\n atmos['t'] = f['temp']\n atmos['p'] = f['pgas']\n atmos['ne'] = f['pe']/ bolk / f['temp']\n\n return (teff,logg,vmicro,abu,nd,atmos)\n\n\ndef read_phoenix_text_model(modelfile):\n \n \n \"\"\"Reads a plain-text Phoenix model atmospheres\n \n Parameters\n ----------\n modelfile: str\n file name \n \n Returns\n -------\n\n teff : float\n effective temperature (K)\n logg : float\n log10 of the surface gravity (cm s-2)\n vmicro : float\n microturbulence velocity (km/s)\n abu : list\n abundances, number densities of nuclei relative to hydrogen N(X)/N(H)\n for elements Z=1,99 (H to Es)\n nd: int\n number of depths (layers) of the model\n atmos: numpy structured array\n array with the run with depth of column mass, temperature, gas pressure \n and electron density \n \n \"\"\" \n\n\n f = open(modelfile,'r')\n line = f.readline()\n while line[0:4] != \" no.\":\n line = f.readline()\n entries = line.split()\n nd = int(entries[5])\n print('nd=',nd)\n while line[0:14] != \" model: teff\":\n line = f.readline()\n entries = line.split()\n teff = float(entries[3])\n print('teff=',teff)\n line = f.readline()\n line = f.readline()\n entries = line.split()\n assert (entries[0] == 'log(g):' and entries[2] == '[cm/s**2]'), 'Cannot find logg in the file header'\n logg = float(entries[1])\n print('logg=',logg)\n line = f.readline()\n while line[0:22] != \" Element abundances :\": \n line = f.readline()\n\n\n symbol,mass,sol = elements()\n\n sy = []\n ab = []\n\n while line[0:29] != \" Element abundances relative\": \n line = f.readline()\n #print(line)\n if line[0:9] == ' element:':\n entries = line.split()\n for word in entries[1:]: sy.append(word)\n if line[0:11] == ' abundance:':\n entries = line.split()\n for word in entries[1:]: ab.append(word)\n\n assert (len(sy) == len(ab)), 'different elements in arrays sy (elemental symbols) and ab (abundances)'\n\n abu = np.ones(99)*1e-99\n i = 0\n for item in sy:\n try:\n index = symbol.index(item)\n abu[index] = 10.**(float(ab[i])-12.) \n except ValueError:\n print(\"the symbol \",item,\" is not recognized as a valid element\")\n i = i + 1\n\n print('abu=',abu)\n\n while line[0:72] != \" l tstd temperature pgas pe density mu\": \n line = f.readline()\n\n line = f.readline()\n entries = line.split()\n\n t = [ float(entries[2].replace('D','E')) ]\n p = [ float(entries[3].replace('D','E')) ]\n ne = [ float(entries[4].replace('D','E')) / bolk / float(entries[2].replace('D','E')) ] \n dm = [ float(entries[3].replace('D','E')) / 10.**logg ] #assuming hydrostatic equil. and negliglible radiation and turb. pressure\n\n for i in range(nd-1):\n line = f.readline()\n entries = line.split()\n\n t.append( float(entries[2].replace('D','E')))\n p.append( float(entries[3].replace('D','E')))\n ne.append( float(entries[4].replace('D','E')) / bolk / float(entries[2]))\n dm.append ( float(entries[3].replace('D','E')) / 10.**logg )\n\n vmicro = 0.0\n while (line[0:6] != \" greli\"):\n line = f.readline()\n if line == '':\n print('Cannot find a value for vmicro (vturb) in the model atmosphere file ',modelfile)\n break\n \n if line != '':\n entries = line.split()\n vmicro = float(entries[5])\n\n atmos = np.zeros(nd, dtype={'names':('dm', 't', 'p','ne'),\n 'formats':('f', 'f', 'f','f')}) \n atmos['dm'] = dm\n atmos['t'] = t\n atmos['p'] = p\n atmos['ne'] = ne\n\n return (teff,logg,vmicro,abu,nd,atmos)\n\ndef interp_spl(xout, x, y):\n\n \"\"\"Interpolates in 1D using cubic splines\n\n Parameters\n ----------\n x: numpy array or list\n input abscissae\n y: numpy array or list\n input ordinates \n xout: numpy array or list\n array of abscissae to interpolate to\n\n Returns\n -------\n yout: numpy array or list\n array of interpolated values\n\n \"\"\"\n\n tck = interpolate.splrep(x, y, s=0)\n yout = interpolate.splev(xout, tck, der=0)\n\n return(yout)\n\n\ndef elements(husser=False):\n \n \"\"\"Reads the solar elemental abundances\n \n Parameters\n ----------\n husser: bool, optional\n when set the abundances adopted for Phoenix models by Huser et al. (2013)\n are adopted. Otherwise Asplund et al. (2005) are used -- consistent with\n the MARCS (Gustafsson et al. 2008) models and and Kurucz (Meszaros et al. 2012)\n Kurucz model atmospheres.\n \n Returns\n -------\n symbol: numpy array of str\n element symbols\n mass: numpy array of floats\n atomic masses (elements Z=1-99)\n sol: numpy array of floats\n solar abundances N/N(H)\n \n \"\"\"\n\n symbol = [\n 'H' ,'He','Li','Be','B' ,'C' ,'N' ,'O' ,'F' ,'Ne', \n 'Na','Mg','Al','Si','P' ,'S' ,'Cl','Ar','K' ,'Ca', \n 'Sc','Ti','V' ,'Cr','Mn','Fe','Co','Ni','Cu','Zn', \n 'Ga','Ge','As','Se','Br','Kr','Rb','Sr','Y' ,'Zr', \n 'Nb','Mo','Tc','Ru','Rh','Pd','Ag','Cd','In','Sn', \n 'Sb','Te','I' ,'Xe','Cs','Ba','La','Ce','Pr','Nd', \n 'Pm','Sm','Eu','Gd','Tb','Dy','Ho','Er','Tm','Yb', \n 'Lu','Hf','Ta','W' ,'Re','Os','Ir','Pt','Au','Hg', \n 'Tl','Pb','Bi','Po','At','Rn','Fr','Ra','Ac','Th', \n 'Pa','U' ,'Np','Pu','Am','Cm','Bk','Cf','Es' ]\n\n mass = [ 1.00794, 4.00260, 6.941, 9.01218, 10.811, 12.0107, 14.00674, 15.9994,\n 18.99840, 20.1797, 22.98977, 24.3050, 26.98154, 28.0855, 30.97376, \n 32.066, 35.4527, 39.948, 39.0983, 40.078, 44.95591, 47.867, 50.9415, \n 51.9961, 54.93805, 55.845, 58.93320, 58.6934, 63.546, 65.39, 69.723, \n 72.61, 74.92160, 78.96, 79.904, 83.80, 85.4678, 87.62, 88.90585, \n 91.224, 92.90638, 95.94, 98., 101.07, 102.90550, 106.42, 107.8682, \n 112.411, 114.818, 118.710, 121.760, 127.60, 126.90447, 131.29, \n 132.90545, 137.327, 138.9055, 140.116, 140.90765, 144.24, 145, 150.36, \n 151.964, 157.25, 158.92534, 162.50, 164.93032, 167.26, 168.93421, \n 173.04, 174.967, 178.49, 180.9479, 183.84, 186.207, 190.23, 192.217, \n 195.078, 196.96655, 200.59, 204.3833, 207.2, 208.98038, 209., 210., \n 222., 223., 226., 227., 232.0381, 231.03588, 238.0289, 237., 244., \n 243., 247., 247., 251., 252. ]\n\n if not husser:\n #Asplund, Grevesse and Sauval (2005), basically the same as \n #Grevesse N., Asplund M., Sauval A.J. 2007, Space Science Review 130, 205\n sol = [ 0.911, 10.93, 1.05, 1.38, 2.70, 8.39, 7.78, 8.66, 4.56, 7.84, \n 6.17, 7.53, 6.37, 7.51, 5.36, 7.14, 5.50, 6.18, 5.08, 6.31, \n 3.05, 4.90, 4.00, 5.64, 5.39, 7.45, 4.92, 6.23, 4.21, 4.60, \n 2.88, 3.58, 2.29, 3.33, 2.56, 3.28, 2.60, 2.92, 2.21, 2.59, \n 1.42, 1.92, -9.99, 1.84, 1.12, 1.69, 0.94, 1.77, 1.60, 2.00, \n 1.00, 2.19, 1.51, 2.27, 1.07, 2.17, 1.13, 1.58, 0.71, 1.45, \n -9.99, 1.01, 0.52, 1.12, 0.28, 1.14, 0.51, 0.93, 0.00, 1.08, \n 0.06, 0.88, -0.17, 1.11, 0.23, 1.45, 1.38, 1.64, 1.01, 1.13,\n 0.90, 2.00, 0.65, -9.99, -9.99, -9.99, -9.99, -9.99, -9.99, 0.06, \n -9.99, -0.52, -9.99, -9.99, -9.99, -9.99, -9.99, -9.99, -9.99 ]\n\t \n sol[0] = 1.\n\n else:\n #a combination of meteoritic/photospheric abundances from Asplund et al. 2009\n #chosen for the Husser et al. (2013) Phoenix model atmospheres\n sol = [ 12.00, 10.93, 3.26, 1.38, 2.79, 8.43, 7.83, 8.69, 4.56, 7.93, \n 6.24, 7.60, 6.45, 7.51, 5.41, 7.12, 5.50, 6.40, 5.08, 6.34, \n 3.15, 4.95, 3.93, 5.64, 5.43, 7.50, 4.99, 6.22, 4.19, 4.56, \n 3.04, 3.65, 2.30, 3.34, 2.54, 3.25, 2.36, 2.87, 2.21, 2.58, \n 1.46, 1.88, -9.99, 1.75, 1.06, 1.65, 1.20, 1.71, 0.76, 2.04, \n 1.01, 2.18, 1.55, 2.24, 1.08, 2.18, 1.10, 1.58, 0.72, 1.42, \n -9.99, 0.96, 0.52, 1.07, 0.30, 1.10, 0.48, 0.92, 0.10, 0.92, \n 0.10, 0.85, -0.12, 0.65, 0.26, 1.40, 1.38, 1.62, 0.80, 1.17,\n 0.77, 2.04, 0.65, -9.99, -9.99, -9.99, -9.99, -9.99, -9.99, 0.06, \n -9.99, -0.54, -9.99, -9.99, -9.99, -9.99, -9.99, -9.99, -9.99 ]\n\n sol[0] = 1.\n for i in range(len(sol)-1): sol[i+1] = 10.**(sol[i+1]-12.0)\n\n return (symbol,mass,sol)\n\n\ndef lgconv(xinput, yinput, fwhm, ppr=None):\n\n \"\"\"convolution with a Gaussian in linear lambda scale\n for a constant resolution\n\n Parameters\n ----------\n xinput: numpy float array\n wavelengths \n yinput: numpy array of floats\n fluxes\n fwhm: float\n FWHM of the Gaussian (same units as for xinput)\n ppr: float, optional\n Points per resolution element to downsample the convolved spectrum\n (default None, to keep the original sampling)\n\n Returns\n -------\n x: numpy float array\n wavelengths after convolution, will be a subset of xinput when that is linear, \n otherwise a subset of the linearly resampled version\n y: numpy array of floats\n fluxes after convolution\n\n \"\"\"\n\n #resampling to a linear lambda wavelength scale if need be\n xx = np.diff(xinput)\n if max(xx) - min(xx) > 1.e-7: #input not linearly sampled\n nel = len(xinput)\n minx = np.min(xinput)\n maxx = np.max(xinput)\n x = np.linspace(minx,maxx,nel)\n #y = np.interp( x, xinput, yinput)\n y = interp_spl( x, xinput, yinput)\n else: #input linearly sampled\n x = xinput\n y = yinput\n\n step = x[1] - x[0]\n sigma=fwhm/2.0/np.sqrt(-2.0*np.log(0.5))\n npoints = 2*int(3*fwhm/2./step)+1\n half = npoints * step /2.\n xx = np.linspace(-half,half,npoints)\n kernel = np.exp(-(xx-np.mean(xx))**2/2./sigma**2)\n kernel = kernel/np.sum(kernel)\n\n y = np.convolve(y,kernel,'valid')\n #y = ss.fftconvolve(y,kernel,'valid')\n print(npoints)\n edge = int(npoints/2)\n x = x[edge:-edge]\n\n print(xinput.size,x.size,y.size)\n\n if ppr != None:\n fac = int(fwhm / step / ppr)\n subset = np.arange(x.size / fac, dtype=int) * fac \n x = x[subset]\n y = y[subset]\n\n return(x,y)\n\ndef vgconv(xinput,yinput,fwhm, ppr=None):\n\n\n \"\"\"convolution with a Gaussian in log lambda scale\n for a constant resolving power\n\n Parameters\n ----------\n xinput: numpy float array\n wavelengths \n yinput: numpy array of floats\n fluxes\n fwhm: float\n FWHM of the Gaussian (km/s)\n ppr: float, optional\n Points per resolution element to downsample the convolved spectrum\n (default None, to keep the original sampling)\n\n Returns\n -------\n x: numpy float array\n wavelengths after convolution, will be a subset of xinput when that is equidistant\n in log lambda, otherwise a subset of the resampled version\n y: numpy array of floats\n fluxes after convolution\n\n \"\"\"\n #resampling to ln(lambda) if need be\n xx = np.diff(np.log(xinput))\n if max(xx) - min(xx) > 1.e-7: #input not equidist in loglambda\n nel = len(xinput)\n minx = np.log(xinput[0])\n maxx = np.log(xinput[-1])\n x = np.linspace(minx,maxx,nel)\n step = x[1] - x[0]\n x = np.exp(x)\n #y = np.interp( x, xinput, yinput)\n y = interp_spl( x, xinput, yinput)\n else:\n x = xinput\n y = yinput\n step = np.log(xinput[1])-np.log(xinput[0])\n\n fwhm = fwhm/clight # inverse of the resolving power\n sigma=fwhm/2.0/np.sqrt(-2.0*np.log(0.5))\n npoints = 2*int(3*fwhm/2./step)+1\n half = npoints * step /2.\n xx = np.linspace(-half,half,npoints)\n kernel = np.exp(-(xx-np.mean(xx))**2/2./sigma**2)\n kernel = kernel/np.sum(kernel)\n\n y = np.convolve(y,kernel,'valid')\n edge = int(npoints/2)\n x = x[edge:-edge]\n\n #print(xinput.size,x.size,y.size)\n\n if ppr != None:\n fac = int(fwhm / step / ppr)\n print(fwhm,step,ppr,fac)\n subset = np.arange(x.size / fac, dtype=int) * fac \n x = x[subset]\n y = y[subset]\n\n\n return(x,y)\n\ndef rotconv(xinput,yinput,vsini, ppr=None):\n\n\n \"\"\"convolution with a Rotation profile \n\n Parameters\n ----------\n xinput: numpy float array\n wavelengths \n yinput: numpy array of floats\n fluxes\n vsini: float\n projected rotational velocity (km/s)\n ppr: float, optional\n Points per resolution element to downsample the convolved spectrum\n (default None, to keep the original sampling)\n\n Returns\n -------\n x: numpy float array\n wavelengths after convolution, will be a subset of xinput when that is equidistant\n in log lambda, otherwise a subset of the resampled version\n y: numpy array of floats\n fluxes after convolution\n\n \"\"\"\n\n #resampling to ln(lambda) if need be\n xx = np.diff(np.log(xinput))\n if max(xx) - min(xx) > 1.e-7: #input not equidist in loglambda\n nel = len(xinput)\n minx = np.min(np.log(xinput))\n maxx = np.max(np.log(xinput))\n x = np.linspace(minx,maxx,nel)\n step = x[1] - x[0]\n x = np.exp(x)\n #y = np.interp( x, xinput, yinput)\n y = interp_spl( x, xinput, yinput)\n else:\n x = xinput\n y = yinput\n\n deltamax=vsini/clight\n npoints = 2*int(deltamax/step)+1\n xx = np.linspace(-deltamax,deltamax,npoints)\n c1=2.0*(1.0-epsilon)/np.pi/(1.0-epsilon/3.0)/deltamax\n c2=0.5*epsilon/(1.0-epsilon/3.0)/deltamax\n r2=(xx/deltamax)**2\n kernel = c1*np.sqrt(1.0-r2)+c2*(1.0-r2)\n kernel = kernel/np.sum(kernel)\n\n\n y = np.convolve(y,kernel,'valid')\n print(xinput.size,x.size,y.size)\n edge = int(npoints/2)\n x = x[edge:-edge]\n\n if ppr != None:\n fac = int(deltamax / step / ppr)\n subset = np.arange(x.size / fac, dtype=int) * fac \n x = x[subset]\n y = y[subset]\n\n return(x,y)\n\ndef gsynth(synthfile,fwhm=0.0,outsynthfile=None,ppr=5,wrange=None,freeze=None):\n\n \"\"\"Smooth the spectra in a FERRE grid by Gaussian convolution\n\n Parameters\n ----------\n synthfile: str\n name of the input FERRE synth file \n fwhm: float\n FWHM of the Gaussian kernel (km/s) \n (default is 0.0, which means no convolution is performed)\n outsynthfile: str\n name of the output FERRE synth file\n (default is the same as synth file, but starting with 'n')\n ppr: float, optional\n Points per resolution element to downsample the convolved spectrum\n (default is 5, set to None to keep the original sampling)\n wrange: tuple\n Starting and ending wavelengths (if a smaller range that \n the input's is desired)\n (default None, to keep the original range)\n freeze: dictionary\n Allows to reduce the dimensionality of the grid. The keys are the labels\n of the dimensions to freeze (as given in in the header of the input grid) \n with the values that should be adopted for those 'frozen' dimensions. \n Example: set freeze = {'TEFF': 5000.} to fix that value for the Teff dimension\n in a grid.\n (default None, to retain all the original dimensions)\n Returns\n -------\n writes outsynthfile with the smooth spectra\n\n \"\"\"\n\n if outsynthfile is None: outsynthfile='n'+synthfile[1:]\n logw=0\n\n #read header, update and write out\n fin = open(synthfile,'r')\n fout = open(outsynthfile,'w')\n hd = []\n labels = []\n line = fin.readline()\n hd.append(line)\n while line[1] != \"/\":\n line = fin.readline()\n if \"N_P\" in line: n_p = np.array(line.split()[2:],dtype=int)\n if \"STEPS\" in line: steps = np.array(line.split()[2:],dtype=float)\n if \"LLIMITS\" in line: llimits = np.array(line.split()[2:],dtype=float)\n if \"LABEL\" in line: labels.append(line.split()[-1][1:-1])\n if \"NPIX\" in line: npix = int(line.split()[2])\n if \"N_OF_DIM\" in line: ndim = int(line.split()[2])\n if \"WAVE\" in line: wave = np.array(line.split()[2:],dtype=float)\n if \"LOGW\" in line: logw = int(line.split()[2]) \n if \"RESOLUTION\" in line: resolution = float(line.split()[2])\n hd.append(line)\n\n assert (len(n_p) == len(steps) & len(n_p) == len(llimits) & len(n_p) == len(labels) & len(n_p) == ndim), 'The dimension of the parameters from the header are inconsistent'\n\n #update header parameters\n x = np.arange(npix)*wave[1]+wave[0]\n if logw == 1: x=10.**x\n if logw == 2: x=np.exp(x)\n \n #define indices for grid loops\n ll = []\n ind_n_p = []\n i = 0\n for entry in labels:\n if freeze is not None: \n lfkeys = list(freeze.keys())\n if entry not in lfkeys: ind_n_p.append(i)\n else:\n ind_n_p.append(i)\n ll.append(np.arange(n_p[i]))\n i = i + 1\n ind = list(product(*ll))\n \n if wrange is not None:\n assert (len(wrange) == 2), 'Error: wrange must have two elements'\n section1 = np.where( (x >= wrange[0]*(1.-10.*fwhm/clight)) & (x <= wrange[1]*(1.+10.*fwhm/clight)) )\n x = x[section1]\n npix = len(x)\n \n if fwhm > 1.e-7:\n y = np.ones(npix)\n xx,yy = vgconv(x,y,fwhm,ppr=ppr)\n else:\n print('Warning -- fwhm <= 1.e-7, no convolution will be performed, ppr will be ignored')\n xx = x\n \n print(len(x),len(xx))\n \n if wrange is not None: \n section2 = np.where( (xx >= wrange[0]) & (xx <= wrange[1]) ) \n xx = xx [section2]\n \n #print(x,xx)\n #print(len(x),len(xx))\n \n jlabel = 0\n for line in hd:\n if \"N_OF_DIM\" in line: line = \" N_OF_DIM = \"+str(len(ind_n_p))+\"\\n\" \n if \"N_P\" in line: line = \" N_P = \"+' '.join(map(str,n_p[ind_n_p]))+\"\\n\" \n if \"STEPS\" in line: line = \" STEPS = \"+' '.join(map(str,steps[ind_n_p]))+\"\\n\" \n if \"LLIMITS\" in line: line = \" LLIMITS = \"+' '.join(map(str,llimits[ind_n_p]))+\"\\n\"\n if freeze is not None:\n if \"LABEL\" in line:\n ilabel = line.split()[-1][1:-1] #drop starting/ending quotes\n if ilabel in lfkeys:\n continue\n else:\n jlabel = jlabel + 1\n line = \" LABEL(\"+str(jlabel)+\") = \"+ilabel+\"\\n\"\n if \"NPIX\" in line: line = \" NPIX = \"+str(len(xx))+\"\\n\"\n if \"WAVE\" in line: line = \" WAVE = \"+str(np.log10(xx[0]))+\" \"+str(np.log10(xx[1])-np.log10(xx[0]))+\"\\n\"\n if \"LOGW\" in line: line = \" LOGW = 1 \\n\"\n if \"RESOLUTION\" in line: line = \" RESOLUTION = \"+str(clight/np.sqrt(clight**2/resolution**2 + fwhm**2))+\"\\n\"\n fout.write(line)\n\n #smooth and write data\n k = 0\n j = 0\n ntot = np.prod(n_p)\n for i in ind:\n j = j + 1\n print('line ',j,' of ',ntot)\n #print(k,ntot,i)\n #print(i,steps,llimits)\n par = i*steps+llimits\n line = fin.readline()\n if freeze is not None:\n skip = True\n for entry in lfkeys: \n if (abs(freeze[entry] - par[labels.index(entry)]) < 1e-6): skip = False\n if skip: continue\n y = np.array(line.split(),dtype=float)\n if wrange is not None: y = y [section1]\n if fwhm > 1.e-7:\n xx,yy = vgconv(x,y,fwhm,ppr=ppr)\n else:\n xx,yy = x, y \n if wrange is not None: yy = yy[section2]\n yy.tofile(fout,sep=\" \",format=\"%0.4e\")\n fout.write(\"\\n\")\n k = k + 1\n\n fin.close()\n fout.close()\n\nif __name__ == \"__main__\":\n\n npar = len(sys.argv)\n assert (npar >= 4), 'Synple requires at least 3 input parameters (modelfile wstart wend)'\n assert (npar <= 7), 'Synple requires at maximum 6 input parameters (modelfile wstart wend vmicro vrot fwhm)'\n vmicro = None\n vrot = 0.0\n fwhm = 0.0\n modelfile = sys.argv[1]\n wstart = float(sys.argv[2])\n wend = float(sys.argv[3])\n if (npar > 4): \n vmicro = float(sys.argv[4])\n if (npar > 5):\n fwhm = float(sys.argv[5])\n if (npar > 6):\n vrot = float(sys.argv[6])\n\n #symbol, mass, sol = elements()\n x, y, z = syn(modelfile, (wstart,wend), save=True, vmicro=vmicro, vrot=vrot, fwhm=fwhm)\n\n\n"
] | [
[
"numpy.ones",
"numpy.sum",
"numpy.diff",
"numpy.savetxt",
"numpy.log",
"numpy.concatenate",
"scipy.interpolate.splev",
"numpy.log10",
"numpy.where",
"numpy.linspace",
"numpy.mean",
"numpy.sqrt",
"numpy.zeros",
"numpy.arange",
"numpy.max",
"scipy.interpolate.splrep",
"numpy.min",
"numpy.prod",
"numpy.interp",
"numpy.exp",
"numpy.array",
"numpy.convolve",
"numpy.loadtxt"
]
] |
phil-lo/pyportlib | [
"3fbe7460c809a80e48615e934990dcd2d1f5003b"
] | [
"pyportlib/services/cash_manager.py"
] | [
"from datetime import datetime\nfrom typing import List, Union\nimport pandas as pd\n\nfrom pyportlib.services.cash_change import CashChange\nfrom pyportlib.utils import df_utils, files_utils\nfrom pyportlib.utils import logger\n\n\nclass CashManager:\n NAME = \"Cash Account\"\n ACCOUNTS_DIRECTORY = files_utils.get_accounts_dir()\n CASH_INFO = ['Date', 'Direction', 'Amount']\n CASH_FILENAME = \"cash.csv\"\n\n def __init__(self, account):\n self.account = account\n self.directory = f\"{self.ACCOUNTS_DIRECTORY}{self.account}\"\n self._cash_changes = pd.DataFrame()\n self.load()\n\n def __repr__(self):\n return self.NAME\n\n def load(self) -> None:\n \"\"\"\n Loads account cash changes from .csv of creates empty one if it is a new account\n :return:\n \"\"\"\n if files_utils.check_file(self.directory, self.CASH_FILENAME):\n cash = pd.read_csv(f\"{self.directory}/{self.CASH_FILENAME}\")\n try:\n cash.drop(columns='Unnamed: 0', inplace=True)\n except KeyError:\n pass\n finally:\n if df_utils.check_df_columns(df=cash, columns=self.CASH_INFO):\n cash.set_index('Date', inplace=True)\n cash.index.name = 'Date'\n cash.index = pd.to_datetime(cash.index)\n self._cash_changes = cash\n else:\n logger.logging.info(f'cash file does not match requirements: {self.account}')\n else:\n # if new ptf, create required files to use it\n if not files_utils.check_dir(self.directory):\n files_utils.make_dir(self.directory)\n # create empty transaction file in new directory\n empty_cash = self._empty_cash()\n empty_cash.to_csv(f\"{self.directory}/{self.CASH_FILENAME}\")\n self._cash_changes = empty_cash\n\n @property\n def cash_changes(self):\n return self._cash_changes\n\n def get_cash_change(self, date: datetime):\n c_ch = self.cash_changes\n return c_ch.loc[self.cash_changes.index <= date, 'Amount'].sum()\n\n def _write(self, date: datetime, direction: str, amount: float):\n direction = direction.title()\n if direction not in ['Deposit', 'Withdrawal']:\n raise Exception(f'cash direction type not supported {direction}')\n\n self.cash_changes.loc[date, \"Direction\"] = direction\n self.cash_changes.loc[date, \"Amount\"] = amount\n\n self.cash_changes.to_csv(f\"{self.directory}/{self.CASH_FILENAME}\")\n self.load()\n\n def add(self, cash_changes: Union[List[CashChange], CashChange]):\n if cash_changes:\n if not hasattr(cash_changes, '__iter__'):\n cash_changes = [cash_changes]\n\n for cc in cash_changes:\n cc = cc.info\n self._write(date=cc[\"Date\"], direction=cc['Direction'], amount=cc['Amount'])\n\n def reset(self):\n empty_cash = self._empty_cash()\n empty_cash.to_csv(f\"{self.directory}/{self.CASH_FILENAME}\")\n self._cash_changes = empty_cash\n\n def _empty_cash(self):\n return pd.DataFrame(columns=self.CASH_INFO).set_index('Date')\n\n"
] | [
[
"pandas.read_csv",
"pandas.to_datetime",
"pandas.DataFrame"
]
] |
covid-19-impact-lab/sid | [
"d867f55d4d005b01c672bd2edd0e1dc974cb182b"
] | [
"tests/test_parse_model.py"
] | [
"from contextlib import ExitStack as does_not_raise # noqa: N813\n\nimport numpy as np\nimport pandas as pd\nimport pytest\nfrom sid.config import DEFAULT_VIRUS_STRAINS\nfrom sid.config import INITIAL_CONDITIONS\nfrom sid.parse_model import parse_duration\nfrom sid.parse_model import parse_initial_conditions\nfrom sid.parse_model import parse_virus_strains\n\n\[email protected]\[email protected](\n \"duration, expectation, expected\",\n [\n (\n {\"start\": \"2020-01-01\", \"end\": \"2020-01-02\"},\n does_not_raise(),\n {\n \"start\": pd.Timestamp(\"2020-01-01\"),\n \"end\": pd.Timestamp(\"2020-01-02\"),\n \"dates\": pd.DatetimeIndex(pd.to_datetime([\"2020-01-01\", \"2020-01-02\"])),\n },\n ),\n (\n {\"start\": \"2020-01-01\", \"periods\": 2},\n does_not_raise(),\n {\n \"start\": pd.Timestamp(\"2020-01-01\"),\n \"end\": pd.Timestamp(\"2020-01-02\"),\n \"dates\": pd.DatetimeIndex(pd.to_datetime([\"2020-01-01\", \"2020-01-02\"])),\n },\n ),\n (\n {\"start\": \"2020-01-01\", \"periods\": 2, \"freq\": \"s\"},\n pytest.warns(UserWarning, match=\"Only 'start', 'end', and 'periods'\"),\n {\n \"start\": pd.Timestamp(\"2020-01-01\"),\n \"end\": pd.Timestamp(\"2020-01-02\"),\n \"dates\": pd.DatetimeIndex(pd.to_datetime([\"2020-01-01\", \"2020-01-02\"])),\n },\n ),\n ({\"periods\": 2}, pytest.raises(ValueError, match=\"Of the four\"), None),\n ],\n)\ndef test_parse_duration(duration, expectation, expected):\n with expectation:\n result = parse_duration(duration)\n for k in result:\n if k == \"dates\":\n assert np.all(result[k] == expected[k])\n else:\n assert result[k] == expected[k]\n\n\[email protected]\[email protected](\n (\n \"initial_conditions\",\n \"start_date_simulation\",\n \"virus_strains\",\n \"expectation\",\n \"expected\",\n ),\n [\n (\n None,\n pd.Timestamp(\"2020-01-02\"),\n {\"names\": [\"base_strain\"], \"factors\": np.ones(1)},\n does_not_raise(),\n {**INITIAL_CONDITIONS, \"virus_shares\": {\"base_strain\": 1.0}},\n ),\n (\n {\"assort_by\": [\"region\"]},\n pd.Timestamp(\"2020-01-02\"),\n {\"names\": [\"base_strain\"], \"factors\": np.ones(1)},\n does_not_raise(),\n {\n **INITIAL_CONDITIONS,\n \"assort_by\": [\"region\"],\n \"virus_shares\": {\"base_strain\": 1.0},\n },\n ),\n (\n {\"assort_by\": \"region\"},\n pd.Timestamp(\"2020-01-02\"),\n {\"names\": [\"base_strain\"], \"factors\": np.ones(1)},\n does_not_raise(),\n {\n **INITIAL_CONDITIONS,\n \"assort_by\": [\"region\"],\n \"virus_shares\": {\"base_strain\": 1.0},\n },\n ),\n (\n {\"growth_rate\": 0},\n pd.Timestamp(\"2020-01-02\"),\n {\"names\": [\"base_strain\"], \"factors\": np.ones(1)},\n pytest.raises(ValueError, match=\"'growth_rate' must be greater than or\"),\n None,\n ),\n (\n {\"burn_in_periods\": 0},\n pd.Timestamp(\"2020-01-02\"),\n {\"names\": [\"base_strain\"], \"factors\": np.ones(1)},\n pytest.raises(ValueError, match=\"'burn_in_periods' must be greater or\"),\n None,\n ),\n (\n {\"burn_in_periods\": 2.0},\n pd.Timestamp(\"2020-01-02\"),\n {\"names\": [\"base_strain\"], \"factors\": np.ones(1)},\n pytest.raises(ValueError, match=\"'burn_in_periods' must be an integer\"),\n None,\n ),\n (\n {\"initial_infections\": None},\n pd.Timestamp(\"2020-01-02\"),\n {\"names\": [\"base_strain\"], \"factors\": np.ones(1)},\n pytest.raises(ValueError, match=\"'initial_infections' must be a\"),\n None,\n ),\n ],\n)\ndef test_parse_initial_conditions(\n initial_conditions, start_date_simulation, virus_strains, expectation, expected\n):\n with expectation:\n result = parse_initial_conditions(\n initial_conditions, start_date_simulation, virus_strains\n )\n expected[\"burn_in_periods\"] = pd.DatetimeIndex([pd.Timestamp(\"2020-01-01\")])\n assert result == expected\n\n\[email protected]\[email protected](\n \"virus_strains, params, expectation, expected\",\n [\n pytest.param(None, None, does_not_raise(), DEFAULT_VIRUS_STRAINS, id=\"default\"),\n pytest.param(\n [],\n None,\n pytest.raises(ValueError, match=\"The list of\"),\n None,\n id=\"empty list\",\n ),\n pytest.param(\n [\"b117\"],\n pd.DataFrame(\n index=pd.MultiIndex.from_tuples(\n [], names=[\"category\", \"subcategory\", \"value\"]\n )\n ),\n pytest.raises(ValueError, match=\"Some factors for the infectiousness\"),\n None,\n id=\"missing param\",\n ),\n pytest.param(\n [\"wild_strain\", \"b117\"],\n pd.DataFrame(\n index=pd.MultiIndex.from_tuples(\n [\n (\"virus_strains\", \"wild_strain\", \"factor\"),\n (\"virus_strains\", \"b117\", \"factor\"),\n ],\n names=[\"category\", \"subcategory\", \"value\"],\n ),\n ),\n pytest.raises(ValueError, match=\"Some factors for the infectiousness\"),\n {\"names\": [\"b117\", \"wild_strain\"]},\n id=\"usual parsing\",\n ),\n pytest.param(\n set(),\n None,\n pytest.raises(ValueError, match=\"'virus_strains' is not 'None'\"),\n None,\n id=\"wrong input\",\n ),\n ],\n)\ndef test_parse_virus_strains(virus_strains, params, expectation, expected):\n with expectation:\n result = parse_virus_strains(virus_strains, params)\n\n assert result[\"names\"] == expected[\"names\"]\n assert \"factors\" not in result\n"
] | [
[
"numpy.ones",
"pandas.to_datetime",
"numpy.all",
"pandas.MultiIndex.from_tuples",
"pandas.Timestamp"
]
] |
hamhochoisg/moneydetection | [
"32a02f54a4a0c1a6f41a232fa30a3f0f15bdab13"
] | [
"main.py"
] | [
"import streamlit as st\nimport pandas as pd\nimport numpy as np\nimport cv2\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\nimport os\nfrom tensorflow.keras.preprocessing import image \n\nst.title('Banknotes Classification')\nmenu = ['Home','Up Load & Predict', 'Capture From Webcam']\n\n#========================#\n#==== Function #=========#\nModel_Path = 'model\\my_model_checkpoint.h5'\nclass_names = ['1000', '10000', '100000', '2000', '20000', '200000', '5000', '50000', '500000']\n\ndef get_saved_model(Model_Path):\n # Learning Rate maybe decrease so quick => start with 0.01\n restored_model = tf.keras.models.load_model(Model_Path)\n\n # Show the model architecture\n # restored_model.summary() #print in terminal\n return restored_model\n\ndef predict_image(image_path): #input and image show prediction label, reutrn string value of prediction\n model = get_saved_model(Model_Path)\n #Preprocess image:\n img = image.load_img(image_path, target_size=(224, 224))\n img_array = image.img_to_array(img)\n img_array = np.expand_dims(img_array, axis=0) #predict nhận theo batch (1,224,224,3)\n\n #Prediction:\n \n prediction = model.predict(img_array)\n index = prediction.argmax()\n l = list(prediction)\n tmp_percent = l[0][index]*100\n\n pred = class_names[index]\n st.write('model prediction:')\n st.write(pred)\n st.write('Model Propotion:')\n st.write(tmp_percent)\n\ndef predict_image_array(img_array): #input and image array with shape = (1,224,224,3) show prediction label, reutrn string value of prediction\n model = get_saved_model(Model_Path)\n \n prediction = model.predict(img_array)\n index = prediction.argmax()\n l = list(prediction)\n tmp_percent = l[0][index]*100\n\n pred = class_names[index]\n st.write('model prediction:')\n st.write(pred)\n st.write('Model Propotion:')\n st.write(tmp_percent)\n \n print(l)\n\n return l,index\n\n#========================#\n\nchoice = st.sidebar.selectbox('Danh mục', menu)\n\nif choice == 'Home':\n st.title('This is Home Page')\n st.write('Xin chào, đây là ứng dụng phân loại tiền')\n \n # Get The current Path\n current_path = os.getcwd()\n st.write('current path:')\n st.write(current_path)\n\n #Load Model\n st.write('This is our model:')\n # model = get_saved_model(Model_Path) \n test_image_path = \"media\\\\test\\\\500000\\\\Sự-thật-về-cách-đoán-3-số-Seri-tiền-500k-200k-100k-50k-20k-10k.jpg\"\n \n #Show Image\n st.write('For Example Below Image')\n st.image(test_image_path,use_column_width='auto')\n st.write(\"Model Can Understand This Value\") \n\n #Prediction: \n # predict_image(test_image_path)\n \n\nelif choice == 'Up Load & Predict':\n st.title('Please Upload Your Banknotes Image, I Can Understand it:')\n photo_uploaded = st.file_uploader('Choose your banknotes photo', ['png', 'jpg', 'jpeg'])\n if photo_uploaded != None:\n \n image_np = np.asarray(bytearray(photo_uploaded.read()), dtype=np.uint8)\n # print(image_np)\n # print(image_np.shape)\n img = cv2.imdecode(image_np, 1)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) \n print(img.shape)\n\n st.image(img)\n st.write(photo_uploaded.size)\n st.write(photo_uploaded.type)\n\n #Then Predict it\n img = cv2.resize(img, (224,224), interpolation = cv2.INTER_AREA)\n img_array = np.expand_dims(img, axis=0)\n # print(img_array.shape)\n print(type(img))\n\n predict_image_array(img_array)\n\nelif choice == 'Capture From Webcam':\n cap = cv2.VideoCapture(0) # device 0\n run = st.checkbox('Show Webcam')\n capture_button = st.checkbox('Campture')\n quit_button = st.checkbox('Quit')\n # Check if the webcam is opened correctly\n if not cap.isOpened():\n raise IOError(\"Cannot open webcam\")\n\n \n FRAME_WINDOW = st.image([])\n\n # Keep reading images from webcam until press 'q'\n while run:\n ret, frame = cap.read() \n \n # Display Webcam\n # cv2.imshow('My App!', frame)\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB ) #Convert màu cho đúng\n\n FRAME_WINDOW.image(frame)\n\n if capture_button: # press \"c\" => capture\n # save the current frame and predict\n cap.release() # Thử release ra liền để lấy cái hình hiện tại\n cv2.destroyAllWindows()\n print('Frame shape',frame.shape)\n captured_image = frame\n # captured_image = cv2.cvtColor(captured_image, cv2.COLOR_BGR2RGB ) #Đã convert ở trên rồi \n \n st.image(captured_image)\n st.write('Model is predicting it:')\n captured_image = cv2.resize(captured_image, (224,224))\n img_array = np.expand_dims(captured_image, axis=0)\n predict_image_array(img_array)\n\n run = False\n capture_button = False\n\n if quit_button: # press \"q\" => quit\n run = False\n capture_button = False\n quit_button = False\n # break\n\n cap.release()\n cv2.destroyAllWindows()\n\n # if captured_image.shape != None:\n # captured_image = cv2.cvtColor(captured_image, cv2.COLOR_BGR2RGB ) \n # st.write('Image That Captured')\n # st.image(captured_image)\n # captured_image = cv2.resize(captured_image, (224,224))\n\n # if captured_image.shape != None:\n # st.write('Image That Captured')\n # st.image(captured_image)\n # captured_image = cv2.resize(captured_image, (224,224))\n # print('Captured Image Shape:',captured_image.shape)\n # print('Captured Image Type:',type(captured_image)) \n # img_array = np.expand_dims(captured_image, axis=0)\n # predict_image_array(img_array)\n\n\n\n"
] | [
[
"tensorflow.keras.preprocessing.image.img_to_array",
"tensorflow.keras.preprocessing.image.load_img",
"numpy.expand_dims",
"tensorflow.keras.models.load_model"
]
] |
ssawwqdf/-project-stock_info_dashboard | [
"f14a462d915d2207db1da12307aefdef4b6921e1"
] | [
"code_cr.py"
] | [
"import re\nimport numpy as np\nimport pandas as pd\nimport requests #웹통신\nimport json\nfrom pmdarima.arima import ndiffs\nimport pmdarima as pm\nfrom pykrx import stock\nfrom bs4 import BeautifulSoup\nimport html5lib\n\n\n# ==============\n# 업종 분류\n# ==============\n# -------- 동일 업종 기업 출력\n# TODO(미완성) 동일 업종 선택\ndef select_same_industry(corp_name):\n indus=com_df[com_df['nm']==corp_name]['industry'].values[0] # TODO(df 확인)\n\n # print(com_df.groupby(by='industry')['nm'].nunique().max()) # 동종업계 최대 151개 -> 151개 재무제표 크롤링?\n\n list_com=com_df[com_df['industry']==indus]['corp_name'].values.tolist()\n return list_com\n\n\n\n# -------- 네이버증권 연관기업 코드(hjh)\ndef relate_code_crawl(co):\n #연관 종목코드 있는 페이지 불러오기\n url='https://finance.naver.com/item/main.naver?code='+str(co)\n page=pd.read_html(url,encoding='CP949')\n #연관 종목명과 종목코드 뽑아내기(code_list[0]은 '종목명'이어서 제외)\n code_list=page[4].columns.tolist()\n code_list=code_list[1:]\n #종목코드 리스트 반환\n codes=[]\n for word in (code_list):\n codes.append(word[-6:])\n #print(codes)\n return codes\n\n#relate_code_crawl('000660')\n\n\n\n# ==============\n# 기업 이름 코드 변환\n# ==============\n\n# -------- 네이버 재무제표 크롤링 용 gicode로 변환\ndef nm_to_bs_gicode(corp_name):\n gi=com_df[com_df['nm']==corp_name]['cd']\n gi=gi.values[0]\n return gi\n\n\n\ndef stc_code_to_bs_gicode(stock_code):\n gi = com_df[com_df['stock_code'] == stock_code]['cd']\n gi = gi.values[0]\n return gi\n\n\n\ndef yh_code_to_bs_gicode(yh_code):\n gi = com_df[com_df['yh_code'] == yhcode]['cd']\n gi = gi.values[0]\n return gi\n\n\n\n# -------- 네이버 금융 크롤링 용 gicode로 변환\ndef nm_to_fn_gicode(corp_name):\n gi=com_df[com_df['nm']==corp_name]['stock_code']\n gi=gi.values[0]\n return gi\n\n\n\ndef yh_code_to_fn_gicode(yh_code):\n gi=com_df[com_df['yh_code']==yh_code]['stock_code']\n gi=gi.values[0]\n return gi\n\n\n\n# -------- 코드를 기업이름으로 변환\ndef stc_code_to_nm(stock_code):\n gi = com_df[com_df['stock_code'] == stock_code]['nm']\n gi = gi.values[0]\n return gi\n\n\n\ndef yh_code_to_nm(yh_code):\n gi = com_df[com_df['yh_code'] == yh_code]['nm']\n gi = gi.values[0]\n return gi\n\n\n\n# ==============\n# 데이터 수집\n# ==============\n\n\n# -------- Balance Sheets API call\n# def bs_api(corp_name=None, yh_code=None, stock_code=None):\n# print('haha')\n\n\n\n\n# -------- Balance Sheets Crawling(재무제표 크롤링)\n# 220220 수정\n# 1) 매개변수 stock_code로 축약\n# 2) kind로 특정 테이블 지정하는 대신 데이터프레임 리스트 전체 반환\n# 3) '~계산에 참여한 계정 펼치기' 제거는 선택사항으로 둠\n\ndef bs_craw(stock_code, clear_name=False): # ------- 검색과 연동해서 입력 변수 설정\n \"\"\"\n # kind\n : 0 (연간 포괄손익계산서), 1 (분기별 포괄손익계산서)\n 2 (연간 재무상태표), 3 (분기별 재무상태표)\n 4 (연간 현금흐름표), 5 (분기별 현금프름표)\n \"\"\"\n\n # ------- 검색과 연동해서 입력되는 변수 따라 gicode(네이버에서 분류하는 기업 코드)로 변환\n gcode = stc_code_to_bs_gicode(stock_code)\n\n url = f\"http://comp.fnguide.com/SVO2/ASP/SVD_Finance.asp?NewMenuID=103&gicode={gcode}\"\n\n table_list = pd.read_html(url, encoding='UTF-8')\n\n # 항목에서 불필요한 부분 제거('계산에 참여한 계정 펼치기')\n if clear_name == False:\n return table_list\n\n else:\n new_table_list = []\n for tbl in table_list:\n for i, idx in enumerate(tbl.iloc[:, 0]):\n m = idx.replace('계산에 참여한 계정 펼치기', '')\n tbl.iloc[i, 0] = m\n new_table_list.append(tbl)\n return new_table_list\n\n\n# ------- 네이버 금융\n# 220220 수정\n# 1) 매개변수 stock_code로 축약\n# 2) kind로 특정 테이블 지정하는 대신 데이터프레임 리스트 전체 반환\ndef fn_craw(stock_code):\n \"\"\"\n # kind\n : 0 (전일&당일 상한가, 하한가, 거래량 등) #TODO 가공 필요\n 1 (증권사 별 매도 매수 정보) #TODO 가공 필요(컬럼이름)\n 2 (외국인, 기관 거래 정보) #TODO 가공 필요\n 3 (기업실적분석(연도별 분기별 주요재무 정보)) #TODO 가공 필요?\n 4 (동일업종비교) #TODO 가공 필요?\n 5 (시가총액, 주식수, 액면가 정보) #TODO 가공 필요\n 6 (외국인 주식 한도, 보유 정보)\n 7 (목표주가 정보) #TODO 가공 필요\n 8 (PER, PBR 배당수익률 정보) (주가 따라 변동) #TODO 가공 필요\n 9 (동일업종 PER, 등락률 정보) #TODO 가공 필요\n 10 (호가 10단계)\n 11 (인기 검색 종목: 코스피) #TODO 가공 필요\n 12 (인기 검색 종목: 코스닥) #TODO 가공 필요\n \"\"\"\n\n gcode = str(stock_code)\n\n url = f\"https://finance.naver.com/item/main.naver?code={gcode}\"\n table_list = pd.read_html(url, encoding='euc-kr')\n\n return table_list\n\n# ==============\n# 지표 선정\n# ==============\n\n# 220222 날씨 수정 시작 ---------------------------------------------\n\n# -------- 지표 선정\n# 220220 수정\n# 1) 매개변수 stock_code로 축약\n# 2) 데이터프레임 하나가 아닌 리스트로 받아오기때문에 kind 제거하고 직접 선택해줌\n# 3) sli_df_y, sil_df_q 에서 '-' 가공 시 if 조건에 따라 처리하는 대신 lambda와 re.sub 이용\n# 4) dict 대신 array로 반환, 기업 이름(nm도 반환)\ndef idv_radar_weather_data(stock_code):\n \"\"\"\n # <지표 설명>\n # 1. 배당 분석 -> 배당성향(배당 커버리지의 역수.)\n # 2. 유동성 분석(단기채무지급능력) -> 당좌비율(당좌자산 / 유동부채)\n # 3. 재무건전성 분석(레버리지 비율) -> 부채비율(총부채 / 자기자본)의 역수\n # 4. 수익성분석 -> 매출수익성(당기순이익/매출액))\n # 5. 성장성분석 -> 순이익성장률\n \"\"\"\n\n gcode = stock_code\n nm = stc_code_to_nm(stock_code)\n\n sil_df = fn_craw(gcode)[3] # 3: 기업실적정보 재무제표 (220220 수정)\n foreign_ms = fn_craw(gcode)[2].loc[1, '외국인'] # 2 : 외국인, 기관 거래 정보\n giguan_ms = fn_craw(gcode)[2].loc[1, '기관'] # 2 : 외국인, 기관 거래 정보\n\n if (sil_df.iloc[0:8, 3].isna().sum()) > 0: # 표 안 가르고 계산하는 건 신규 상장 기업은 정보가 아예 없기 때문\n pass\n elif (sil_df.iloc[0:8, 9].isna().sum()) > 0: # 표 안 가르고 계산하는 건 신규 상장 기업은 정보가 아예 없기 때문\n pass\n\n\n else:\n # 0. 재무정보는 최신 분기 실공시 기준\n # 0. 단, 배당은 1년에 한 번 이루어지기 때문에 최신 년도 공시 기준임\n sil_df_y = sil_df['최근 연간 실적'].iloc[:, 2] # 느리지만 .iloc으로 하는 이유는 공시 날짜가 다른 기업이 있기 때문\n sil_df_q = sil_df['최근 분기 실적'].iloc[:, 4]\n\n sil_df_y = sil_df_y.fillna(0)\n sil_df_q = sil_df_q.fillna(0)\n\n if sil_df_y.dtype == 'O':\n sil_df_y = sil_df_y.apply(lambda x: re.sub('^-$', '0', '{}'.format(x)))\n sil_df_y = sil_df_y.astype('float')\n\n if sil_df_q.dtype == 'O':\n sil_df_q = sil_df_q.apply(lambda x: re.sub('^-$', '0', '{}'.format(x)))\n sil_df_q = sil_df_q.astype('float')\n\n # 1. 배당성향(bd_tend)\n bd_tend = sil_df_y[15] # 실제 배당 성향\n\n # 2. 유동성 분석 - 당좌비율(당좌자산/유동부채)\n # 당좌자산 = (유동자산 - 재고자산)\n dj_rate = sil_df_q[7] # 당좌비율\n\n # 3. 재무건전성 분석 - 부채비율(총부채/자기자본)의 역수\n bch_rate = sil_df_q[6] / 100 # 부채비율\n bch_rate = round((1 / bch_rate) * 100, 2)\n\n # 4. 수익성 분석 - 매출수익성(당기순이익/매출액) # TODO 매출액 0인 애들은?\n\n dg_bene = sil_df_q[2]\n mch = sil_df_q[0]\n\n suyk = round((dg_bene / mch) * 100, 2)\n\n # 5. 성장성 분석 - 순이익성장률(지속성장 가능률)\n # (1-배당성향)*자기자본순이익률(ROE)\n # 유보율\n\n roe = sil_df_y[5] / 100\n ubo = (100 - bd_tend) / 100\n grth = round(roe * ubo * 100, 2)\n\n data_arr = np.array([bd_tend, dj_rate, bch_rate, suyk, grth])\n\n # weather part----------------\n # PER?\n weather_per = sil_df_y[10]\n\n # PBR\n weather_pbr = sil_df_y[12]\n\n # ROE\n weather_roe = sil_df_y[5]\n\n # EPS\n weather_eps = sil_df_y[9]\n\n # BPS\n weather_bps = sil_df_y[11]\n\n # array\n weather_arr = np.array([weather_per, weather_pbr, weather_roe, weather_eps, weather_bps])\n\n return data_arr, weather_arr, nm, foreign_ms, giguan_ms\n\n# 수정수정수정\n\n# -------- 관련 기업 지표 선정(상대적 비율 기준)\n# 220220 수정\n# 1) 매개변수 stock_code로 축약\n# 2) dict 대신 array로 반환, 기업 이름(nm도 반환)\n\n# 220222 날씨\n\ndef relate_radar_weather_data(stock_code):\n label_list = ['배당성향', '유동성', '건전성', '수익성', '성장성']\n arr_list = []\n\n # 주식 코드,이름으로 변환\n\n gcode = stock_code\n\n relate_corp = relate_code_crawl(co=gcode)\n\n # 다섯 개 회사가 안에 있다\n arr_list = [idv_radar_weather_data(stock_code=stcd) for stcd in relate_corp]\n\n # arr_list에서 데이터 분리\n radar_list = [x[0] for x in arr_list if x is not None]\n weather_list = [x[1] for x in arr_list if x is not None]\n nm_list = [x[2] for x in arr_list if x is not None]\n\n # 외인 매수, 기관 매수\n try:\n foreign_ms = arr_list[0][3]\n except TypeError:\n foreign_ms=0.01\n\n try:\n giguan_ms = arr_list[0][4]\n except TypeError:\n giguan_ms=0.01\n\n # radar_chart_data\n radar_list = np.array(radar_list)\n\n radar_list[:, 0] = (radar_list[:, 0] / radar_list[:, 0].mean()) * 100\n radar_list[:, 1] = (radar_list[:, 1] / radar_list[:, 1].mean()) * 100\n radar_list[:, 2] = (radar_list[:, 2] / radar_list[:, 2].mean()) * 100\n radar_list[:, 3] = (radar_list[:, 3] / radar_list[:, 3].mean()) * 100\n radar_list[:, 4] = (radar_list[:, 4] / radar_list[:, 4].mean()) * 100\n\n # radar_chart_dict\n radar_dict_list = []\n\n for i, nm in enumerate(nm_list):\n dic = {}\n dic[nm] = radar_list[i, :].tolist()\n radar_dict_list.append(dic)\n\n # weather_chart_data\n weather_list = np.array(weather_list)\n\n weather_list[:, 0] = (weather_list[:, 0] / weather_list[:, 0].mean()) # 각 기업의 평균 대비 PER\n weather_list[:, 1] = (weather_list[:, 1] / weather_list[:, 1].mean()) # 각 기업의 평균 대비 PBR\n weather_list[:, 2] = (weather_list[:, 2] / weather_list[:, 2].mean()) # 각 기업의 평균 대비 ROE\n weather_list[:, 3] = (weather_list[:, 3] / weather_list[:, 3].mean()) # 각 기업의 평균 대비 EPS\n weather_list[:, 4] = (weather_list[:, 4] / weather_list[:, 4].mean()) # 각 기업의 평균 대비 BPS\n weather_list=np.round(weather_list, 2)\n\n return label_list, radar_dict_list, weather_list[0], foreign_ms, giguan_ms\n\n\n# 220222 날씨 수정 끝 ---------------------------------------------\n\n# ==============\n# 지표 선정\n# ==============\n\n# -------- 지표 선정\n# 220220 수정\n# 1) 매개변수 stock_code로 축약\n# 2) 데이터프레임 하나가 아닌 리스트로 받아오기때문에 kind 제거하고 직접 선택해줌\n# 3) sli_df_y, sil_df_q 에서 '-' 가공 시 if 조건에 따라 처리하는 대신 lambda와 re.sub 이용\n# 4) dict 대신 array로 반환, 기업 이름(nm도 반환)\ndef idv_radar_data(stock_code):\n \"\"\"\n # <지표 설명>\n # 1. 배당 분석 -> 배당성향(배당 커버리지의 역수.)\n # 2. 유동성 분석(단기채무지급능력) -> 당좌비율(당좌자산 / 유동부채)\n # 3. 재무건전성 분석(레버리지 비율) -> 부채비율(총부채 / 자기자본)의 역수\n # 4. 수익성분석 -> 매출수익성(당기순이익/매출액))\n # 5. 성장성분석 -> 순이익성장률\n \"\"\"\n\n gcode = stock_code\n nm = stc_code_to_nm(stock_code)\n\n sil_df = fn_craw(gcode)[3] # 3: 기업실적정보 재무제표 (220220 수정)\n\n if (sil_df.iloc[0:8, 3].isna().sum()) > 0: # 표 안 가르고 계산하는 건 신규 상장 기업은 정보가 아예 없기 때문\n pass\n elif (sil_df.iloc[0:8, 9].isna().sum()) > 0: # 표 안 가르고 계산하는 건 신규 상장 기업은 정보가 아예 없기 때문\n pass\n\n\n else:\n # 0. 재무정보는 최신 분기 실공시 기준\n # 0. 단, 배당은 1년에 한 번 이루어지기 때문에 최신 년도 공시 기준임\n sil_df_y = sil_df['최근 연간 실적'].iloc[:, 2] # 느리지만 .iloc으로 하는 이유는 공시 날짜가 다른 기업이 있기 때문\n sil_df_q = sil_df['최근 분기 실적'].iloc[:, 4]\n\n sil_df_y = sil_df_y.fillna(0)\n sil_df_q = sil_df_q.fillna(0)\n\n if sil_df_y.dtype == 'O':\n sil_df_y = sil_df_y.apply(lambda x: re.sub('^-$', '0', '{}'.format(x)))\n sil_df_y = sil_df_y.astype('float')\n\n if sil_df_q.dtype == 'O':\n sil_df_q = sil_df_q.apply(lambda x: re.sub('^-$', '0', '{}'.format(x)))\n sil_df_q = sil_df_q.astype('float')\n\n # 1. 배당성향(bd_tend)\n bd_tend = sil_df_y[15] # 실제 배당 성향\n\n # 2. 유동성 분석 - 당좌비율(당좌자산/유동부채)\n # 당좌자산 = (유동자산 - 재고자산)\n dj_rate = sil_df_q[7] # 당좌비율\n\n # 3. 재무건전성 분석 - 부채비율(총부채/자기자본)의 역수\n bch_rate = sil_df_q[6] / 100 # 부채비율\n bch_rate = round((1 / bch_rate) * 100, 2)\n\n # 4. 수익성 분석 - 매출수익성(당기순이익/매출액) # TODO 매출액 0인 애들은?\n\n dg_bene = sil_df_q[2]\n mch = sil_df_q[0]\n\n suyk = round((dg_bene / mch) * 100, 2)\n\n # 5. 성장성 분석 - 순이익성장률(지속성장 가능률)\n # (1-배당성향)*자기자본순이익률(ROE)\n # 유보율\n\n roe = sil_df_y[5] / 100\n ubo = (100 - bd_tend) / 100\n grth = round(roe * ubo * 100, 2)\n\n data_arr = np.array([bd_tend, dj_rate, bch_rate, suyk, grth])\n\n return data_arr, nm\n\n\n# -------- 관련 기업 지표 선정(상대적 비율 기준)\n# 220220 수정\n# 1) 매개변수 stock_code로 축약\n# 2) dict 대신 array로 반환, 기업 이름(nm도 반환)\ndef relate_radar_data(stock_code):\n label_list = ['배당성향', '유동성', '건전성', '수익성', '성장성']\n arr_list = []\n\n # 주식 코드,이름으로 변환\n\n gcode = stock_code\n\n relate_corp = relate_code_crawl(co=gcode)\n\n arr_list = [idv_radar_data(stock_code=stcd) for stcd in relate_corp]\n nm_list = [x[1] for x in arr_list if x is not None]\n arr_list = [x[0] for x in arr_list if x is not None]\n\n arr_list = np.array(arr_list)\n\n arr_list[:, 0] = (arr_list[:, 0] / arr_list[:, 0].mean()) * 100\n arr_list[:, 1] = (arr_list[:, 1] / arr_list[:, 1].mean()) * 100\n arr_list[:, 2] = (arr_list[:, 2] / arr_list[:, 2].mean()) * 100\n arr_list[:, 3] = (arr_list[:, 3] / arr_list[:, 3].mean()) * 100\n arr_list[:, 4] = (arr_list[:, 4] / arr_list[:, 4].mean()) * 100\n\n dict_list = []\n\n for i, nm in enumerate(nm_list):\n dic = {}\n dic[nm] = arr_list[i, :].tolist()\n dict_list.append(dic)\n\n return label_list, dict_list\n\n\n# -------- 관련 기업 지표 선정(원본)\n\n# def relate_radar_data(yh_code=None, corp_name=None, stock_code=None):\n# label_list=['배당성향', '유동성', '건전성', '수익성', '성장성']\n# dict_list = []\n#\n# # 주식 코드로 변환\n# gcode = 0\n# if yh_code != None:\n# gcode = yh_code_to_fn_gicode(yh_code)\n# elif corp_name != None:\n# gcode = nm_to_fn_gicode(corp_name)\n# elif stock_code != None:\n# gcode = stock_code\n#\n# relate_corp = relate_code_crawl(co=gcode)\n#\n# dict_list = [idv_radar_data(stock_code=stcd) for stcd in relate_corp]\n#\n# dict_list = [x for x in dict_list if x is not None]\n#\n#\n# return label_list, dict_list\n\n\n# ==============\n# 시각화\n# ==============\n\n# -------- 매출, 당기순이익 추이 그래프\n# 220220 수정\n# 1) 매개변수 stock_code로 축약\n# 2) 크롤링한 데이터는 list로 받아오므로 kind 없애고 직접 인덱스 처리\n\ndef mch_dg(stock_code):\n gcode = stock_code\n nm = stc_code_to_nm(stock_code)\n\n bs_df = bs_craw(stock_code=gcode)[0]\n label_list = bs_df.columns[1:6].tolist() # 네 분기 + 전년동기\n mch_list = bs_df.loc[0, label_list].tolist() # 매출액\n dg_list = bs_df.loc[15, label_list].tolist() # 당기순이익\n\n return label_list, mch_list, dg_list\n\n\ndef icon_selection(index_array):\n res=[]\n for idx in index_array:\n if 3<idx :\n res.append(\"RAIN\")\n elif ( 1.2<idx and idx<=3 ):\n res.append(\"CLOUDY\")\n elif ( 0.8<idx and idx<=1.2 ):\n res.append(\"PARTLY_CLOUDY_DAY\")\n elif ( 0<idx and idx<=0.8 ):\n res.append(\"CLEAR_DAY\")\n else:\n res.append(\"SNOW\")\n\n return res\n\ndef foreign_giguan(index_array):\n res = []\n for idx in index_array:\n if idx >0:\n res.append(\"CLEAR_DAY\")\n elif idx==0:\n res.append(\"CLOUDY\")\n else:\n res.append(\"RAIN\")\n\n return res\n\n\n\n\n\n# ====================================================\n# 데이터\n# ====================================================\n\n# -------- 병합 파일 불러오기\ncom_df=pd.read_csv('com_df.csv',\n dtype={'stock_code': 'str', '표준코드': 'str', '단축코드': 'str', 'stock_code_ori':'str'},\n parse_dates=['listed_date', '상장일'])\n\n\n\n# -------- 뉴스 크롤링\ndef news_crawl(gi):\n\n\n tot_list = []\n\n for p in range(1):\n # 뉴스 기사 모인 페이지\n url = 'https://m.stock.naver.com/domestic/stock/' + str(gi) + '/news/title' # https://m.stock.naver.com/domestic/stock/003550/total\n #F12누르면 나오는 네트워크상에서 찾아온 경로\n #https://m.stock.naver.com/api/news/stock/005930?pageSize=20&page=1&searchMethod=title_entity_id.basic\n url = \"https://m.stock.naver.com/api/news/stock/\"+str(gi)+\"?pageSize=5&searchMethod=title_entity_id.basic&page=1\"\n res = requests.get(url)\n\n news_list = json.loads(res.text)\n #페이지에서 가져온 전체 뉴스기사를 for문으로 분리\n #print(news_list[0])\n for i, news in enumerate(news_list) :\n #신문사 id\n a=news['items'][0]['officeId']\n #기사 id\n b=news['items'][0]['articleId']\n list = []\n list.append(news['items'][0]['officeName']) #신문사\n list.append(news['items'][0]['datetime'][:8]) #날짜\n list.append(news['items'][0]['title'].replace('"','\\\"')) #제목\n list.append(news['items'][0]['imageOriginLink']) #이미지\n list.append(news['items'][0]['body'].replace('"','\\\"')) # 기사 내용\n list.append('https://m.stock.naver.com/domestic/stock/005930/news/view/'+str(a)+'/'+str(b)) #기사 url\n tot_list.append(list)\n\n news_df = pd.DataFrame(data=tot_list, columns=['offname','rdate','title','imgsrc','content','url'])\n news_df['title'] = news_df['title'].str.replace('&', '&')\n news_df['content'] = news_df['content'].str.replace('&', '&')\n\n #news_df['title'] = [re.sub('[^A-Za-z0-9가-힣]', '' ,s) for s in news_df['title']]\n\n\n #news_df.to_csv('css.csv',index=False)\n return news_df\n\n#co-종목코드\ndef relate_code_crawl(co):\n #연관 종목코드 있는 페이지 불러오기\n url='https://finance.naver.com/item/main.naver?code='+str(co)\n page=pd.read_html(url,encoding='CP949')\n #연관 종목명과 종목코드 뽑아내기(code_list[0]은 '종목명'이어서 제외)\n code_list=page[4].columns.tolist()\n code_list=code_list[1:]\n #종목코드 리스트 반환\n codes=[]\n for word in (code_list):\n codes.append(word[-6:])\n #print(codes)\n return codes\n\n\n# def before_1w_kospi(date):\n# before1w=date-timedelta(days=7)\n# return fdr.DataReader('KS11',before1w)[['Close']]#, fdr.DataReader('KQ11',before1w)\n\ndef invest_opinion(gcode):\n url='https://finance.naver.com/item/coinfo.naver?code='+str(gcode)\n page=pd.read_html(url,encoding='CP949')\n try:\n a,b=page[3][1].tolist()[0][:4].split('.')\n return ((int(a)+int(b)/100)/5)*100 #의견 점수 구한 후 백분율로 다시 변환\n except ValueError:\n return 0.1\n#최상현 함수\ndef crawl_ifrs(gcode):\n url = \"http://comp.fnguide.com/SVO2/ASP/SVD_Main.asp?pGB=1&gicode=A\"+gcode+\"&cID=&MenuYn=Y&ReportGB=&NewMenuID=11&stkGb=701\"\n table_list = pd.read_html(url, encoding='UTF-8')\n\n ifrs = table_list[10]\n\n ifrs = ifrs.fillna('9999999999')\n for i in range(1, 5):\n if ifrs.iloc[:, i].dtype == 'O':\n ifrs.iloc[:, i] = ifrs.iloc[:, i].apply(lambda x: '9999999999' if type(x) == str else x)\n print(ifrs.iloc[:, i])\n ifrs.iloc[:, i] = ifrs.iloc[:, i].astype('float')\n ifrs.iloc[:, i] = ifrs.iloc[:, i].apply(lambda x: format(float(x), ','))\n\n ifrs = pd.concat([ifrs.iloc[:, 0], ifrs['Annual']], axis=1)\n ifrs = ifrs.astype(str)\n\n for i in range(1, 5):\n ifrs.iloc[:12, i] = ifrs.iloc[:12, i].apply(lambda x: x[:-2])\n ifrs.iloc[18:21, i] = ifrs.iloc[18:21, i].apply(lambda x: x[:-2])\n ifrs.iloc[23:24, i] = ifrs.iloc[23:24, i].apply(lambda x: x[:-2])\n ifrs = ifrs.replace(['9,999,999,999', '9,999,999,999.0'], ['-', '-'])\n\n ifrs.rename(columns={'IFRS(연결)': ''}, inplace=True)\n ifrs = ifrs.to_html(justify=\"right\", index=False, classes=\"table\")\n ifrs = ifrs.replace('border=\"1\"', 'border=\"0\"')\n pd.options.display.float_format = '{:,.0f}'.format\n ifrs = ifrs.replace('<td>', '<td align=\"right\">')\n ifrs = ifrs.replace('<th>', '<th style=\"text-align: right;\">')\n ifrs = ifrs.replace('halign=\"left\"', 'style=\"text-align: center;\"')\n ifrs = ifrs.replace('class =\"dataframe table\"',\n 'class =\"dataframe table\" style = \"table-layout:fixed;word-break:break-all;\"')\n\n return (ifrs)\n\n\ndef ori_code(yh_code):\n origin_stock=com_df[com_df['yh_code']==yh_code]['stock_code_ori'].values[0]\n return origin_stock\n\n\n\n# 아리마 모델\ndef stock_predict(code,ptype):\n data = stock.get_market_ohlcv_by_date(fromdate=\"20220101\", todate=\"20220222\", ticker=str(code))\n print(data.head())\n data=data[[ptype]]\n y_train=data\n y_test=data\n kpss_diffs = ndiffs(y_train, alpha=0.05, test='kpss', max_d=6)\n adf_diffs = ndiffs(y_train, alpha=0.05, test='adf', max_d=6)\n n_diffs = max(adf_diffs, kpss_diffs)\n\n print(f\"추정된 차수 d = {n_diffs}\")\n model=pm.auto_arima(y_train,d=n_diffs,seasonal=False,trace=True)\n model.fit(y_train)\n print(model.summary())\n def forecast_one_step():\n fc, conf_int = model.predict(n_periods=1 # 한 스텝씩!\n , return_conf_int=True) # 신뢰구간 출력\n return (\n fc.tolist()[0],\n np.asarray(conf_int).tolist()[0]\n )\n forecasts = []\n y_pred = []\n pred_upper = []\n pred_lower = []\n\n for new_ob in y_test[ptype]:\n\n fc, conf = forecast_one_step()\n y_pred.append(int(fc))\n pred_upper.append(conf[1])\n pred_lower.append(conf[0])\n\n ## 모형 업데이트 !!\n model.update(new_ob)\n fc_last = model.predict(n_periods=1 # 한 스텝씩!\n )\n df=pd.DataFrame({\"test\": y_test[ptype], \"pred\": y_pred})\n print(df.tail())\n def MAE(y_test, y_pred):\n return np.mean(np.abs((df['test']-df['pred'])/df['test']))*100\n mae=np.round(MAE(y_test, y_pred).astype('float'),4)\n print(f\"MAE: {MAE(y_test, y_pred):.3f}\")\n price_list=[]\n return int(fc_last),mae\n\n\n\n\n\n"
] | [
[
"pandas.read_csv",
"pandas.DataFrame",
"pandas.read_html",
"numpy.abs",
"numpy.asarray",
"pandas.concat",
"numpy.round",
"numpy.array"
]
] |
donnyyou/centerX | [
"1073753533f26483c3ab053a7d8753708fcacde7"
] | [
"projects/speedup/centerX2onnx.py"
] | [
"from types import MethodType\nimport onnx\nimport torch\nfrom torch.onnx import OperatorExportTypes\nfrom onnxsim import simplify\nimport argparse\nimport io\nimport sys\nimport torch.nn as nn\n\nsys.path.insert(0, '.')\nfrom configs import add_centernet_config\nfrom detectron2.config import get_cfg\nfrom inference.centernet import build_model\nfrom detectron2.checkpoint import DetectionCheckpointer\nfrom fvcore.common.file_io import PathManager\n\ndef centerX_forward(self, x):\n x = self.normalizer(x / 255.)\n y = self._forward(x)\n fmap_max = nn.MaxPool2d(kernel_size=3, stride=1, padding=1)(y['cls'])\n keep = (y['cls'] - fmap_max).float() + 1e-9\n keep = nn.ReLU()(keep)\n keep = keep * 1e9\n result = y['cls'] * keep\n ret = [result,y['reg'],y['wh']] ## change dict to list\n return ret\n\ndef load_model(config_file,model_path):\n cfg = get_cfg()\n add_centernet_config(cfg)\n cfg.merge_from_file(config_file)\n forward = {'centerX': centerX_forward}\n\n # model\n model = build_model(cfg)\n model.forward = MethodType(forward['centerX'], model)\n DetectionCheckpointer(model).load(model_path)\n model.eval()\n model.cuda()\n return model\n\ndef get_parser():\n parser = argparse.ArgumentParser(description=\"Convert Pytorch to ONNX model\")\n\n parser.add_argument(\n \"--config-file\",\n metavar=\"FILE\",\n help=\"path to config file\",\n )\n parser.add_argument(\n \"--model-path\",\n metavar=\"FILE\",\n help=\"path to model\",\n )\n parser.add_argument(\n \"--name\",\n default=\"baseline\",\n help=\"name for converted model\"\n )\n parser.add_argument(\n \"--output\",\n default='onnx_model',\n help='path to save converted onnx model'\n )\n parser.add_argument(\n \"--input_w\",\n default=640,\n type=int,\n help='image_width'\n )\n parser.add_argument(\n \"--input_h\",\n default=384,\n type=int,\n help='image_height'\n )\n return parser\n\n\ndef remove_initializer_from_input(model):\n if model.ir_version < 4:\n print(\n 'Model with ir_version below 4 requires to include initilizer in graph input'\n )\n return\n\n inputs = model.graph.input\n name_to_input = {}\n for input in inputs:\n name_to_input[input.name] = input\n\n for initializer in model.graph.initializer:\n if initializer.name in name_to_input:\n inputs.remove(name_to_input[initializer.name])\n\n return model\n\n\ndef export_onnx_model(model, inputs):\n \"\"\"\n Trace and export a model to onnx format.\n Args:\n model (nn.Module):\n inputs (torch.Tensor): the model will be called by `model(*inputs)`\n Returns:\n an onnx model\n \"\"\"\n assert isinstance(model, torch.nn.Module)\n\n # make sure all modules are in eval mode, onnx may change the training state\n # of the module if the states are not consistent\n def _check_eval(module):\n assert not module.training\n\n model.apply(_check_eval)\n\n # Export the model to ONNX\n with torch.no_grad():\n with io.BytesIO() as f:\n torch.onnx.export(\n model,\n inputs,\n f,\n operator_export_type=OperatorExportTypes.ONNX_ATEN_FALLBACK,\n # verbose=True, # NOTE: uncomment this for debugging\n # export_params=True,\n )\n onnx_model = onnx.load_from_string(f.getvalue())\n\n # Apply ONNX's Optimization\n all_passes = onnx.optimizer.get_available_passes()\n passes = [\"extract_constant_to_initializer\", \"eliminate_unused_initializer\", \"fuse_bn_into_conv\"]\n assert all(p in all_passes for p in passes)\n onnx_model = onnx.optimizer.optimize(onnx_model, passes)\n return onnx_model\n\nif __name__ == '__main__':\n args = get_parser().parse_args()\n model = load_model(args.config_file, args.model_path)\n\n inputs = torch.randn(1, 3, args.input_h, args.input_w).cuda()\n onnx_model = export_onnx_model(model, inputs)\n\n model_simp, check = simplify(onnx_model)\n\n model_simp = remove_initializer_from_input(model_simp)\n\n assert check, \"Simplified ONNX model could not be validated\"\n\n PathManager.mkdirs(args.output)\n\n onnx.save_model(model_simp, f\"{args.output}/{args.name}.onnx\")\n\n print(f\"Export onnx model in {args.output} successfully!\")\n"
] | [
[
"torch.nn.MaxPool2d",
"torch.onnx.export",
"torch.randn",
"torch.no_grad",
"torch.nn.ReLU"
]
] |
WHATDOESTHEFOXSAY2U/Colab_Train | [
"30fdf2f9f72fbef51447ecc91070189ccca301b2"
] | [
"callbacks.py"
] | [
"\"\"\"\nContains custom callbacks.\n\"\"\"\n\nfrom constants import minimum_scores, maximum_scores\nimport constants\nimport datetime\nimport json\nfrom keras.callbacks import Callback, ModelCheckpoint\nimport numpy as np\nimport os\nfrom sklearn.metrics import cohen_kappa_score\nfrom util import process_data, create_folder\n\nclass QWKScore(Callback):\n def __init__(self, essays, save_to_file=True, print_to_screen=True):\n super()\n self.essays = essays\n self.save_to_file = save_to_file\n self.print_to_screen = print_to_screen\n\n def on_epoch_end(self, epoch, logs={}):\n # for each essay set calculate the QWK scores\n qwk_scores = []\n number_essays = []\n\n if self.print_to_screen:\n print(\"\\nQWK Scores\")\n\n for essay_set in range(1, 9):\n essays_in_set = self.essays[self.essays['essay_set'] == essay_set]\n X, y = process_data(essays_in_set)\n y_true = essays_in_set['domain1_score'].values\n\n normalised_prediction = self.model.predict(X)\n normalised_prediction = np.array(normalised_prediction)\n y_pred = np.around((normalised_prediction * (maximum_scores[essay_set] - minimum_scores[essay_set])) + minimum_scores[essay_set])\n\n qwk_score = cohen_kappa_score(y_true, y_pred, weights='quadratic')\n qwk_scores.append(qwk_score)\n number_essays.append(len(essays_in_set))\n\n if self.print_to_screen:\n print(\"Set {}: {:.2f}\".format(essay_set, qwk_score), end=' ')\n\n qwk_scores = np.array(qwk_scores)\n number_essays = np.array(number_essays)\n\n weighted_qwk_score = np.sum(qwk_scores * number_essays) / np.sum(number_essays)\n if self.print_to_screen:\n print('\\nWeighted QWK score: {:.2f}'.format(weighted_qwk_score))\n\n if self.save_to_file:\n summary = \"Epoch \" + str(epoch + 1)\n log_values = \"\\n\"\n for key, value in logs.items():\n log_values += \"{}: {:.4f} \".format(key, value)\n individual_qwk_scores = \"\\n\"\n for essay_set in range(8):\n individual_qwk_scores += \"Set {}: {:.2f} \".format(essay_set + 1, qwk_scores[essay_set])\n summary = summary + log_values + individual_qwk_scores\n summary += '\\nWeighted QWK score: {:.2f}'.format(weighted_qwk_score)\n summary += '\\n\\n'\n with open(os.path.join(constants.SAVE_DIR, \"scores.txt\"), \"a\") as f:\n f.write(summary)\n\nclass SaveModel(ModelCheckpoint):\n \"\"\"\n Wrapper of Model Checkpoint class.\n \"\"\"\n def __init__(self, directory, filename, monitor='val_loss', verbose=0, save_best_only=False, save_weights_only=False, mode='auto', period=1):\n \n # make folder with the current time as name\n now = datetime.datetime.now()\n current_time = \"{}_{}_{}_{}_{}_{}\".format(now.day, now.month, now.year, now.hour, now.minute, now.second)\n constants.SAVE_DIR = os.path.join(directory, current_time)\n\n create_folder(constants.SAVE_DIR)\n\n ModelCheckpoint.__init__(self, os.path.join(constants.SAVE_DIR, filename), monitor=monitor, save_best_only=save_best_only, save_weights_only=save_weights_only, mode=mode, period=period)\n\n def on_train_begin(self, logs=None):\n # save model architecture.\n parsed = json.loads(self.model.to_json())\n with open(os.path.join(constants.SAVE_DIR, 'model.txt'), 'w') as file:\n file.write(json.dumps(parsed, indent=4))\n"
] | [
[
"numpy.array",
"numpy.around",
"sklearn.metrics.cohen_kappa_score",
"numpy.sum"
]
] |
hujunxianligong/Graph-U-Nets | [
"d1a483400131fbe75a55cff27439585c62c4a575"
] | [
"main.py"
] | [
"import sys\nimport os\nimport torch\nimport random\nimport numpy as np\nfrom tqdm import tqdm\nimport torch.nn as nn\nimport torch.optim as optim\nimport math\nfrom network import GUNet\nfrom mlp_dropout import MLPClassifier\nfrom sklearn import metrics\nfrom util import cmd_args, load_data\n\n\nsys.path.append(\n '%s/pytorch_structure2vec-master/s2v_lib' % os.path.dirname(\n os.path.realpath(__file__)))\n\n\nclass Classifier(nn.Module):\n def __init__(self):\n super(Classifier, self).__init__()\n model = GUNet\n\n self.s2v = model(\n latent_dim=cmd_args.latent_dim,\n output_dim=cmd_args.out_dim,\n num_node_feats=cmd_args.feat_dim+cmd_args.attr_dim,\n num_edge_feats=0,\n k=cmd_args.sortpooling_k)\n out_dim = cmd_args.out_dim\n if out_dim == 0:\n out_dim = self.s2v.dense_dim\n self.mlp = MLPClassifier(\n input_size=out_dim, hidden_size=cmd_args.hidden,\n num_class=cmd_args.num_class, with_dropout=cmd_args.dropout)\n\n def PrepareFeatureLabel(self, batch_graph):\n labels = torch.LongTensor(len(batch_graph))\n n_nodes = 0\n\n if batch_graph[0].node_tags is not None:\n node_tag_flag = True\n concat_tag = []\n else:\n node_tag_flag = False\n\n if batch_graph[0].node_features is not None:\n node_feat_flag = True\n concat_feat = []\n else:\n node_feat_flag = False\n\n for i in range(len(batch_graph)):\n labels[i] = batch_graph[i].label\n n_nodes += batch_graph[i].num_nodes\n if node_tag_flag:\n concat_tag += batch_graph[i].node_tags\n if node_feat_flag:\n tmp = torch.from_numpy(\n batch_graph[i].node_features).type('torch.FloatTensor')\n concat_feat.append(tmp)\n\n if node_tag_flag:\n concat_tag = torch.LongTensor(concat_tag).view(-1, 1)\n node_tag = torch.zeros(n_nodes, cmd_args.feat_dim)\n node_tag.scatter_(1, concat_tag, 1)\n\n if node_feat_flag:\n node_feat = torch.cat(concat_feat, 0)\n\n if node_feat_flag and node_tag_flag:\n # concatenate one-hot embedding of node tags (node labels)\n # with continuous node features\n node_feat = torch.cat([node_tag.type_as(node_feat), node_feat], 1)\n elif node_feat_flag is False and node_tag_flag:\n node_feat = node_tag\n elif node_feat_flag and node_tag_flag is False:\n pass\n else:\n node_feat = torch.ones(n_nodes, 1)\n\n if cmd_args.mode == 'gpu':\n node_feat = node_feat.cuda()\n labels = labels.cuda()\n\n return node_feat, labels\n\n def forward(self, batch_graph):\n node_feat, labels = self.PrepareFeatureLabel(batch_graph)\n embed = self.s2v(batch_graph, node_feat, None)\n\n return self.mlp(embed, labels)\n\n def output_features(self, batch_graph):\n node_feat, labels = self.PrepareFeatureLabel(batch_graph)\n embed = self.s2v(batch_graph, node_feat, None)\n return embed, labels\n\n\ndef loop_dataset(g_list, classifier, sample_idxes, optimizer=None,\n bsize=cmd_args.batch_size):\n total_loss = []\n total_iters = (len(sample_idxes) + (bsize - 1) * (optimizer is None)) // bsize # noqa\n pbar = tqdm(range(total_iters), unit='batch')\n all_targets = []\n all_scores = []\n\n n_samples = 0\n for pos in pbar:\n selected_idx = sample_idxes[pos * bsize: (pos + 1) * bsize]\n\n batch_graph = [g_list[idx] for idx in selected_idx]\n targets = [g_list[idx].label for idx in selected_idx]\n all_targets += targets\n logits, loss, acc = classifier(batch_graph)\n all_scores.append(logits[:, 1].detach()) # for binary classification\n\n if optimizer is not None:\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n loss = loss.data.cpu().numpy()\n pbar.set_description('loss: %0.5f acc: %0.5f' % (loss, acc))\n\n total_loss.append(np.array([loss, acc]) * len(selected_idx))\n\n n_samples += len(selected_idx)\n if optimizer is None:\n assert n_samples == len(sample_idxes)\n total_loss = np.array(total_loss)\n avg_loss = np.sum(total_loss, 0) / n_samples\n all_scores = torch.cat(all_scores).cpu().numpy()\n\n # np.savetxt('test_scores.txt', all_scores) # output test predictions\n\n all_targets = np.array(all_targets)\n fpr, tpr, _ = metrics.roc_curve(all_targets, all_scores, pos_label=1)\n auc = metrics.auc(fpr, tpr)\n avg_loss = np.concatenate((avg_loss, [auc]))\n\n return avg_loss\n\n\nif __name__ == '__main__':\n print(cmd_args)\n random.seed(cmd_args.seed)\n np.random.seed(cmd_args.seed)\n torch.manual_seed(cmd_args.seed)\n\n train_graphs, test_graphs = load_data()\n print('# train: %d, # test: %d' % (len(train_graphs), len(test_graphs)))\n\n if cmd_args.sortpooling_k <= 1:\n num_nodes_list = sorted([\n g.num_nodes for g in train_graphs + test_graphs])\n cmd_args.sortpooling_k = num_nodes_list[\n int(math.ceil(cmd_args.sortpooling_k * len(num_nodes_list))) - 1]\n cmd_args.sortpooling_k = max(10, cmd_args.sortpooling_k)\n print('k used in SortPooling is: ' + str(cmd_args.sortpooling_k))\n\n classifier = Classifier()\n if cmd_args.mode == 'gpu':\n classifier = classifier.cuda()\n\n optimizer = optim.Adam(\n classifier.parameters(), lr=cmd_args.learning_rate, amsgrad=True,\n weight_decay=0.0008)\n\n train_idxes = list(range(len(train_graphs)))\n best_loss = None\n max_acc = 0.0\n for epoch in range(cmd_args.num_epochs):\n random.shuffle(train_idxes)\n classifier.train()\n avg_loss = loop_dataset(\n train_graphs, classifier, train_idxes, optimizer=optimizer)\n if not cmd_args.printAUC:\n avg_loss[2] = 0.0\n print('\\033[92maverage training of epoch %d: loss %.5f acc %.5f auc %.5f\\033[0m' % (epoch, avg_loss[0], avg_loss[1], avg_loss[2])) # noqa\n\n classifier.eval()\n test_loss = loop_dataset(\n test_graphs, classifier, list(range(len(test_graphs))))\n if not cmd_args.printAUC:\n test_loss[2] = 0.0\n print('\\033[93maverage test of epoch %d: loss %.5f acc %.5f auc %.5f\\033[0m' % (epoch, test_loss[0], test_loss[1], test_loss[2])) # noqa\n max_acc = max(max_acc, test_loss[1])\n\n with open('acc_result_%s.txt' % cmd_args.data, 'a+') as f:\n # f.write(str(test_loss[1]) + '\\n')\n f.write(str(max_acc) + '\\n')\n\n if cmd_args.printAUC:\n with open('auc_results.txt', 'a+') as f:\n f.write(str(test_loss[2]) + '\\n')\n\n if cmd_args.extract_features:\n features, labels = classifier.output_features(train_graphs)\n labels = labels.type('torch.FloatTensor')\n np.savetxt('extracted_features_train.txt', torch.cat(\n [labels.unsqueeze(1), features.cpu()], dim=1).detach().numpy(),\n '%.4f')\n features, labels = classifier.output_features(test_graphs)\n labels = labels.type('torch.FloatTensor')\n np.savetxt('extracted_features_test.txt', torch.cat(\n [labels.unsqueeze(1), features.cpu()], dim=1).detach().numpy(),\n '%.4f')\n"
] | [
[
"numpy.sum",
"torch.ones",
"sklearn.metrics.roc_curve",
"sklearn.metrics.auc",
"torch.manual_seed",
"numpy.random.seed",
"torch.zeros",
"torch.from_numpy",
"numpy.array",
"numpy.concatenate",
"torch.LongTensor",
"torch.cat"
]
] |
carlsummer/python_developer_tools | [
"fc0dcf5c4ef088e2e535206dc82f09bbfd01f280"
] | [
"python_developer_tools/cv/classes/ResNeXt.py"
] | [
"import torch\nimport torch.nn as nn\nimport torchvision\n\n\nclass ResNeXtBlock(nn.Module):\n def __init__(self,in_places,places, stride=1,downsampling=False, expansion = 2, cardinality=32):\n super(ResNeXtBlock,self).__init__()\n self.expansion = expansion\n self.downsampling = downsampling\n\n self.bottleneck = nn.Sequential(\n nn.Conv2d(in_channels=in_places, out_channels=places, kernel_size=1, stride=1, bias=False),\n nn.BatchNorm2d(places),\n nn.ReLU(inplace=True),\n nn.Conv2d(in_channels=places, out_channels=places, kernel_size=3, stride=stride, padding=1, bias=False, groups=cardinality),\n nn.BatchNorm2d(places),\n nn.ReLU(inplace=True),\n nn.Conv2d(in_channels=places, out_channels=places * self.expansion, kernel_size=1, stride=1, bias=False),\n nn.BatchNorm2d(places * self.expansion),\n )\n\n if self.downsampling:\n self.downsample = nn.Sequential(\n nn.Conv2d(in_channels=in_places, out_channels=places * self.expansion, kernel_size=1, stride=stride,bias=False),\n nn.BatchNorm2d(places * self.expansion)\n )\n self.relu = nn.ReLU(inplace=True)\n\n def forward(self, x):\n residual = x\n out = self.bottleneck(x)\n\n if self.downsampling:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n return out\n\n\nif __name__ =='__main__':\n model = ResNeXtBlock(in_places=256, places=128)\n print(model)\n\n input = torch.randn(1,256,64,64)\n out = model(input)\n print(out.shape)"
] | [
[
"torch.randn",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU",
"torch.nn.Conv2d"
]
] |
edornd/multimodal-icl | [
"f79bfa73665db471c12ee9cb57bbee1bcabb0467"
] | [
"saticl/training.py"
] | [
"from itertools import chain\nfrom pathlib import Path\nfrom typing import Tuple\n\nimport torch\nfrom accelerate import Accelerator\nfrom torch.utils.data import DataLoader\n\nfrom saticl.config import Configuration, SSLConfiguration\nfrom saticl.datasets.icl import ICLDataset\nfrom saticl.datasets.transforms import invariance_transforms, inverse_transform, ssl_transforms\nfrom saticl.datasets.wrappers import SSLDataset\nfrom saticl.logging.tensorboard import TensorBoardLogger\nfrom saticl.losses.regularization import AugmentationInvariance\nfrom saticl.models.icl import ICLSegmenter\nfrom saticl.prepare import prepare_dataset, prepare_metrics, prepare_metrics_ssl, prepare_model, prepare_model_ssl\nfrom saticl.tasks import Task\nfrom saticl.trainer.base import Trainer\nfrom saticl.trainer.callbacks import Checkpoint, DisplaySamples, EarlyStopping, EarlyStoppingCriterion\nfrom saticl.trainer.invariance import AugInvarianceTrainer\nfrom saticl.trainer.ssl import SSLStage, SSLTrainer\nfrom saticl.utils.common import flatten_config, get_logger, git_revision_hash, store_config\nfrom saticl.utils.ml import checkpoint_path, init_experiment, seed_everything, seed_worker\n\n\nLOG = get_logger(__name__)\n\n\ndef init_from_previous_step(config: Configuration, new_model: ICLSegmenter, old_model: ICLSegmenter,\n model_folder: Path, task: Task) -> Tuple[ICLSegmenter, ICLSegmenter]:\n if task.step == 0:\n LOG.info(\"Step 0: training from scratch without old model\")\n return new_model, old_model\n\n LOG.info(\"Loading checkpoint from step: %d\", task.step - 1)\n if config.task.step_checkpoint is not None:\n ckpt_path = Path(config.task.step_checkpoint)\n else:\n ckpt_path = checkpoint_path(model_folder, task_name=task.name, step=task.step - 1)\n assert ckpt_path.exists() and ckpt_path.is_file(), f\"Checkpoint for step {task.step-1} not found at {str(ckpt_path)}\"\n\n checkpoint = torch.load(str(ckpt_path), map_location=\"cpu\")\n # load checkpoint into the new model, without strict matching because of ICL heads\n new_model.load_state_dict(checkpoint, strict=False)\n if config.model.init_balanced:\n new_model.init_classifier()\n # load the same checkpoint into the old model, this time strict since it's the very same\n old_model.load_state_dict(checkpoint, strict=True)\n old_model.freeze()\n old_model.eval()\n del checkpoint\n return new_model, old_model\n\n\ndef train(config: Configuration):\n # assertions before starting\n assert config.name is not None or config.task.step == 0, \"Specify the experiment name with ICL steps >= 1!\"\n assert torch.backends.cudnn.enabled, \"AMP requires CUDNN backend to be enabled.\"\n\n # prepare accelerator ASAP\n accelerator = Accelerator(fp16=config.trainer.amp, cpu=config.trainer.cpu)\n\n # Create the directory tree:\n # outputs\n # |-- dataset\n # |--task_name\n # |-- exp_name\n # |-- models\n # |-- logs\n accelerator.wait_for_everyone()\n log_name = f\"output-{config.task.step}.log\"\n exp_id, out_folder, model_folder, logs_folder = init_experiment(config=config, log_name=log_name)\n config_path = out_folder / f\"segmenter-config-s{config.task.step}.yaml\"\n LOG.info(\"Run started\")\n LOG.info(\"Experiment ID: %s\", exp_id)\n LOG.info(\"Output folder: %s\", out_folder)\n LOG.info(\"Models folder: %s\", model_folder)\n LOG.info(\"Logs folder: %s\", logs_folder)\n LOG.info(\"Configuration: %s\", config_path)\n\n # seeding everything\n LOG.info(\"Using seed: %d\", config.seed)\n seed_everything(config.seed)\n # prepare datasets\n LOG.info(\"Loading datasets...\")\n train_set, valid_set = prepare_dataset(config=config, partial_transforms=False)\n LOG.info(\"Full sets - train set: %d samples, validation set: %d samples\", len(train_set), len(valid_set))\n\n add_background = not train_set.has_background()\n task = Task(dataset=config.dataset,\n name=config.task.name,\n step=config.task.step,\n add_background=add_background)\n train_mask, valid_mask = 0, 255\n train_set = ICLDataset(dataset=train_set, task=task, mask_value=train_mask, filter_mode=config.task.filter_mode)\n valid_set = ICLDataset(dataset=valid_set, task=task, mask_value=valid_mask, filter_mode=config.task.filter_mode)\n # construct data loaders\n train_loader = DataLoader(dataset=train_set,\n batch_size=config.trainer.batch_size,\n shuffle=True,\n num_workers=config.trainer.num_workers,\n worker_init_fn=seed_worker,\n drop_last=True)\n valid_loader = DataLoader(dataset=valid_set,\n batch_size=config.trainer.batch_size,\n shuffle=False,\n num_workers=config.trainer.num_workers,\n worker_init_fn=seed_worker)\n LOG.info(\"ICL sets - Train set: %d samples, validation set: %d samples\", len(train_set), len(valid_set))\n\n # prepare models\n LOG.info(\"Preparing model...\")\n new_model = prepare_model(config=config, task=task)\n new_model = new_model.to(accelerator.device)\n if task.step > 0:\n old_task = Task(dataset=config.dataset,\n name=config.task.name,\n step=task.step - 1,\n add_background=add_background)\n old_model = prepare_model(config=config, task=old_task)\n old_model = old_model.to(accelerator.device)\n else:\n old_model = None\n new_model, old_model = init_from_previous_step(config, new_model, old_model, model_folder, task)\n LOG.info(\"Done preparing models\")\n\n # prepare optimizer and scheduler\n optimizer = config.optimizer.instantiate(new_model.parameters())\n scheduler = config.scheduler.instantiate(optimizer)\n # prepare losses\n weights = None\n if config.class_weights:\n weights = train_set.load_class_weights(Path(config.class_weights),\n device=accelerator.device,\n normalize=config.ce.tversky)\n LOG.info(\"Using class weights: %s\", str(weights))\n segment_loss = config.ce.instantiate(ignore_index=255, old_class_count=task.old_class_count(), weight=weights)\n distill_loss = config.kd.instantiate()\n if task.step > 0 and config.ce.unbiased:\n seg_loss_name = str(type(segment_loss))\n kdd_loss_name = str(type(distill_loss))\n if \"Unbiased\" not in seg_loss_name:\n LOG.warn(f\"Non-ubiased segmentation loss '{seg_loss_name}' for step {task.step}!\")\n if \"Unbiased\" not in kdd_loss_name:\n LOG.warn(f\"Non-unbiased KD loss '{kdd_loss_name}' for step {task.step}\")\n # prepare metrics and logger\n monitored = config.trainer.monitor.name\n train_metrics, valid_metrics = prepare_metrics(task=task, device=accelerator.device)\n logger = TensorBoardLogger(log_folder=logs_folder,\n filename_suffix=f\"step-{task.step}\",\n icl_step=task.step,\n comment=config.comment)\n # logging configuration to tensorboard\n LOG.debug(\"Logging flattened config. to TensorBoard\")\n logger.log_table(\"config\", flatten_config(config.dict()))\n\n # prepare trainer\n LOG.info(\"Visualize: %s, num. batches for visualization: %s\", str(config.visualize), str(config.num_samples))\n num_samples = int(config.visualize) * config.num_samples\n # choose trainer class depending on task or regularization\n trainer_class = Trainer\n kwargs = dict()\n if config.aug.apply:\n inv_transforms = invariance_transforms(config.aug)\n LOG.info(\"Invariance transforms: \")\n LOG.info(str(inv_transforms))\n kwargs.update(aug_criterion=AugmentationInvariance(transform=inv_transforms),\n aug_lambda=config.aug.factor,\n aug_lambda_icl=config.aug.factor_icl,\n temperature=config.trainer.temperature,\n temp_epochs=config.trainer.temp_epochs)\n trainer_class = AugInvarianceTrainer\n trainer = trainer_class(accelerator=accelerator,\n task=task,\n new_model=new_model,\n old_model=old_model,\n optimizer=optimizer,\n scheduler=scheduler,\n train_metrics=train_metrics,\n val_metrics=valid_metrics,\n old_classes=train_set.old_categories(),\n new_classes=train_set.new_categories(),\n seg_criterion=segment_loss,\n kdd_criterion=distill_loss,\n kde_criterion=None,\n kdd_lambda=config.kd.decoder_factor,\n kde_lambda=config.kd.encoder_factor,\n logger=logger,\n samples=num_samples,\n debug=config.debug,\n **kwargs)\n trainer.add_callback(EarlyStopping(call_every=1, metric=monitored,\n criterion=EarlyStoppingCriterion.maximum,\n patience=config.trainer.patience)) \\\n .add_callback(Checkpoint(call_every=1,\n model_folder=model_folder,\n name_format=f\"task{task.name}_step-{task.step}\",\n save_best=True)) \\\n .add_callback(DisplaySamples(inverse_transform=inverse_transform(),\n color_palette=train_set.palette()))\n # storing config and starting training\n config.version = git_revision_hash()\n store_config(config, path=config_path)\n trainer.fit(train_dataloader=train_loader, val_dataloader=valid_loader, max_epochs=config.trainer.max_epochs)\n LOG.info(f\"Training completed at epoch {trainer.current_epoch:<2d} \"\n f\"(best {monitored}: {trainer.best_score:.4f})\")\n LOG.info(\"Experiment %s (step %d) completed!\", exp_id, task.step)\n\n\ndef train_ssl(config: SSLConfiguration):\n # assertions before starting\n assert config.name is not None or config.task.step == 0, \"Specify the experiment name with ICL steps >= 1!\"\n assert torch.backends.cudnn.enabled, \"AMP requires CUDNN backend to be enabled.\"\n if config.in_channels != 4:\n LOG.warn(\"Forcing input channels to 4 (previous value: %d)\", config.in_channels)\n config.in_channels = 4\n # prepare accelerator ASAP\n accelerator = Accelerator(fp16=config.trainer.amp, cpu=config.trainer.cpu)\n\n # Create the directory tree:\n # outputs\n # |-- dataset\n # |--task_name\n # |-- exp_name\n # |-- models\n # |-- logs\n accelerator.wait_for_everyone()\n log_name = f\"output-{config.task.step}.log\"\n exp_id, out_folder, model_folder, logs_folder = init_experiment(config=config, log_name=log_name)\n config_path = out_folder / f\"segmenter-config-s{config.task.step}.yaml\"\n store_config(config, path=config_path)\n LOG.info(\"Run started\")\n LOG.info(\"Experiment ID: %s\", exp_id)\n LOG.info(\"Output folder: %s\", out_folder)\n LOG.info(\"Models folder: %s\", model_folder)\n LOG.info(\"Logs folder: %s\", logs_folder)\n LOG.info(\"Configuration: %s\", config_path)\n\n # seeding everything\n LOG.info(\"Using seed: %d\", config.seed)\n seed_everything(config.seed)\n # prepare datasets\n LOG.info(\"Loading datasets...\")\n train_set, valid_set = prepare_dataset(config=config)\n train_set = SSLDataset(train_set, transform=ssl_transforms())\n LOG.info(\"Full sets - train set: %d samples, validation set: %d samples\", len(train_set), len(valid_set))\n\n add_background = not train_set.has_background()\n task = Task(dataset=config.dataset,\n name=config.task.name,\n step=config.task.step,\n add_background=add_background)\n train_mask, valid_mask = 0, 255\n train_set = ICLDataset(dataset=train_set, task=task, mask_value=train_mask, filter_mode=config.task.filter_mode)\n valid_set = ICLDataset(dataset=valid_set, task=task, mask_value=valid_mask, filter_mode=config.task.filter_mode)\n train_loader = DataLoader(dataset=train_set,\n batch_size=config.trainer.batch_size,\n shuffle=True,\n num_workers=config.trainer.num_workers,\n worker_init_fn=seed_worker,\n drop_last=True)\n valid_loader = DataLoader(dataset=valid_set,\n batch_size=config.trainer.batch_size,\n shuffle=False,\n num_workers=config.trainer.num_workers,\n worker_init_fn=seed_worker)\n LOG.info(\"ICL sets - Train set: %d samples, validation set: %d samples\", len(train_set), len(valid_set))\n\n # prepare models\n LOG.info(\"Preparing model...\")\n new_model, ssl_model = prepare_model_ssl(config=config, task=task)\n new_model = new_model.to(accelerator.device)\n ssl_model = ssl_model.to(accelerator.device)\n if task.step > 0:\n old_task = Task(dataset=config.dataset,\n name=config.task.name,\n step=task.step - 1,\n add_background=add_background)\n old_model = prepare_model(config=config, task=old_task)\n old_model = old_model.to(accelerator.device)\n else:\n old_model = None\n new_model, old_model = init_from_previous_step(config, new_model, old_model, model_folder, task)\n LOG.info(\"Done preparing models\")\n\n # prepare optimizer and scheduler\n parameters = chain(new_model.parameters(), ssl_model.head.parameters())\n optimizer = config.optimizer.instantiate(parameters)\n scheduler = config.scheduler.instantiate(optimizer)\n # prepare losses, including SSL\n segment_loss = config.ce.instantiate(ignore_index=255, old_class_count=task.old_class_count())\n distill_loss = config.kd.instantiate()\n pretext_loss = config.ssl_loss()\n # asserts to verify their validity\n if task.step > 0 and config.ce.unbiased:\n seg_loss_name = str(type(segment_loss))\n kdd_loss_name = str(type(distill_loss))\n assert \"Unbiased\" in seg_loss_name, f\"Wrong loss '{seg_loss_name}' for step {task.step}\"\n assert \"Unbiased\" in kdd_loss_name, f\"Wrong loss '{kdd_loss_name}' for step {task.step}\"\n # prepare metrics and logger\n monitored = config.trainer.monitor.name\n train_metrics, valid_metrics = prepare_metrics(task=task, device=accelerator.device)\n ssl_metrics = prepare_metrics_ssl(num_classes=config.model.pretext_classes, device=accelerator.device)\n logger = TensorBoardLogger(log_folder=logs_folder,\n filename_suffix=f\"step-{task.step}\",\n icl_step=task.step,\n comment=config.comment)\n # logging configuration to tensorboard\n LOG.debug(\"Logging flattened config. to TensorBoard\")\n logger.log_table(\"config\", flatten_config(config.dict()))\n\n # prepare trainer\n LOG.info(\"Visualize: %s, num. batches for visualization: %s\", str(config.visualize), str(config.num_samples))\n num_samples = int(config.visualize) * config.num_samples\n trainer = SSLTrainer(accelerator=accelerator,\n task=task,\n new_model=new_model,\n old_model=old_model,\n ssl_model=ssl_model,\n optimizer=optimizer,\n scheduler=scheduler,\n train_metrics=train_metrics,\n val_metrics=valid_metrics,\n old_classes=train_set.old_categories(),\n new_classes=train_set.new_categories(),\n seg_criterion=segment_loss,\n ssl_criterion=pretext_loss,\n kdd_criterion=distill_loss,\n kde_criterion=None,\n kdd_lambda=config.kd.decoder_factor,\n kde_lambda=config.kd.encoder_factor,\n logger=logger,\n samples=num_samples,\n debug=config.debug)\n trainer.add_metrics(SSLStage.ssl, metrics=ssl_metrics)\n trainer.add_callback(EarlyStopping(call_every=1, metric=monitored,\n criterion=EarlyStoppingCriterion.maximum,\n patience=config.trainer.patience)) \\\n .add_callback(Checkpoint(call_every=1,\n model_folder=model_folder,\n name_format=f\"task{task.name}_step-{task.step}\",\n save_best=True)) \\\n .add_callback(DisplaySamples(inverse_transform=inverse_transform(),\n color_palette=train_set.palette()))\n trainer.fit(train_dataloader=train_loader, val_dataloader=valid_loader, max_epochs=config.trainer.max_epochs)\n LOG.info(f\"Training completed at epoch {trainer.current_epoch:<2d} \"\n f\"(best {monitored}: {trainer.best_score:.4f})\")\n LOG.info(\"Experiment %s (step %d) completed!\", exp_id, task.step)\n"
] | [
[
"torch.utils.data.DataLoader"
]
] |
frank20a/collaborative-sats | [
"9d26d3c8f66cf43bbd514f02434851439e746797"
] | [
"src/slider_experiment/slider_experiment/thruster_pwm_tsl.py"
] | [
"import rclpy\nfrom rclpy.node import Node\nfrom geometry_msgs.msg import Vector3\nfrom std_msgs.msg import Int16\nfrom rclpy.qos import QoSPresetProfiles\nfrom ament_index_python import get_package_share_directory\n\nimport numpy as np\nimport sys, os\n\nfrom .parameters import force\nfrom .flags import flags\n\n\ndef create_pwm(value, resolution):\n if value < 0.0: \n value = -value\n if value > 1.0:\n value = 1.0\n \n return np.concatenate((np.ones(np.floor(resolution * value).astype(np.int32)), np.zeros(np.ceil(resolution * (1 - value)).astype(np.int32))))\n\n\nclass ThrustController(Node):\n def __init__(self):\n super().__init__('thrust_controller')\n \n self.declare_parameter('verbose', 0)\n self.declare_parameter('frequency', 10)\n self.declare_parameter('resolution', 100)\n\n self.verbose = self.get_parameter('verbose').get_parameter_value().integer_value\n self.frequency = self.get_parameter('frequency').get_parameter_value().integer_value\n self.resolution = self.get_parameter('resolution').get_parameter_value().integer_value\n\n sys.path.insert(1, os.path.join(get_package_share_directory('slider_experiment'), 'python_build/tsl_optimizer'))\n import tsl_optimizer as optimizer\n self.solver = optimizer.solver()\n \n self.signals = [create_pwm(0, self.resolution) for i in range(8)]\n self.i = 0\n \n self.create_subscription(Vector3, 'thrust_cmd', self.callback, QoSPresetProfiles.get_from_short_key('system_default'))\n self.pub = self.create_publisher(Int16, 'thruster_flags', QoSPresetProfiles.get_from_short_key('sensor_data'))\n\n self.create_timer(1/(self.frequency * self.resolution), self.send_signals)\n \n def callback(self, msg: Vector3):\n \n T = self.solver.run(p = [msg.x, msg.y, msg.z]).solution\n\n if self.verbose > 0: \n self.get_logger().info(f'\\n Fx = {msg.x: 2.2f}\\n Fy = {msg.y: 2.2f}\\ntau = {msg.z: 2.2f}')\n self.get_logger().info(f'cmd: {T}')\n\n self.signals = [create_pwm(T[i] / force, self.resolution) for i in range(8)]\n \n def send_signals(self):\n req = Int16()\n \n tmp = 0\n for i in range(8):\n if self.signals[i][self.i] == 1:\n tmp ^= flags[i]\n try:\n req.data = tmp\n except AssertionError:\n print(tmp)\n\n \n self.i += 1\n self.i %= self.resolution\n \n self.pub.publish(req)\n \n \ndef main(args=None):\n rclpy.init(args=args)\n node = ThrustController()\n rclpy.spin(node) \n node.destroy_node()\n rclpy.shutdown()\n \n\nif __name__ == '__main__':\n main()"
] | [
[
"numpy.floor",
"numpy.ceil"
]
] |
lucaskolson/ddd | [
"f273c61856bca27a40b9691b2a9842d8705a3503"
] | [
"app.py"
] | [
"import dash\nfrom dash import dcc\nfrom dash import html\nfrom dash.dependencies import Input, Output\nimport plotly.express as px\nimport pandas as pd\nfrom dash import callback_context\n\ndf = px.data.election()\ngeojson = px.data.election_geojson()\ncandidates = df.winner.unique()\n\nexternal_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']\n\napp = dash.Dash(__name__, external_stylesheets=external_stylesheets)\nserver = app.server\n\napp.title = \"ICE Detention Data Dashboard\"\n\nfy = ['2015-10-01', '2016-10-01', '2017-10-01', '2018-10-01']\n\nloc = [\"East Coast\", \"West Coast\", \"Southwest\", \"Midwest\", \"All\"]\n\napp.layout = html.Div(\n children=[\n html.Div(\n children=[\n html.H1(\n children=\"ICE Detention Analytics\", className=\"header-title\"\n ),\n html.P(\n children=\"A dashboard and data repository of\"\n \" ICE detention trends and facilities across the US\"\n \" between 2010 and 2020\",\n className=\"header-description\",\n ),\n ],\n className=\"header\",\n ),\n html.Div(\n children=[\n dcc.RadioItems(\n id='candidate', \n options=[{'value': x, 'label': x} \n for x in candidates],\n value=candidates[0],\n labelStyle={'display': 'inline-block'}\n ),\n html.Div(\n children=[dcc.Graph(\n id=\"choropleth\", config={\"displayModeBar\": False},\n ),\n html.Button(\"Download CSV\", id=\"btn_csv\"),\n dcc.Download(id=\"download-dataframe-csv\"),\n html.Button(\"Download Image\", id=\"btn_image\"),\n dcc.Download(id=\"download-image\")],\n className=\"card\",\n ),\n dcc.RadioItems(\n id='us_loc', \n options=[{'value': x, 'label': x} \n for x in loc],\n value=loc[0],\n labelStyle={'display': 'inline-block'}\n ),\n html.Div(\n children=dcc.Graph(\n id=\"fy_arrests\", config={\"displayModeBar\": False},\n ),\n className=\"card\",\n ),\n ],\n className=\"wrapper\",\n ),\n ]\n)\n\n\[email protected](\n Output(\"choropleth\", \"figure\"), \n [Input(\"candidate\", \"value\")])\n\ndef display_choropleth(candidate):\n fig = px.choropleth(\n df, geojson=geojson, color=candidate,\n locations=\"district\", featureidkey=\"properties.district\",\n projection=\"mercator\", range_color=[0, 6500])\n fig.update_geos(fitbounds=\"locations\", visible=False)\n fig.update_layout(margin={\"r\":0,\"t\":0,\"l\":0,\"b\":0})\n\n return fig\n\[email protected](\n Output(\"download-dataframe-csv\", \"data\"),\n Input(\"btn_csv\", \"n_clicks\"),\n prevent_initial_call=True,\n)\ndef func(n_clicks):\n return dcc.send_data_frame(df.to_csv, \"mydf.csv\")\n\[email protected](\n Output(\"download-image\", \"data\"),\n Input(\"btn_image\", \"n_clicks\"),\n prevent_initial_call=True,\n)\ndef func(n_clicks):\n return dcc.send_file(\n \"./plot_downloads/test.png\"\n )\n\[email protected](\n Output(\"fy_arrests\", \"figure\"),\n [Input(\"us_loc\", \"value\")])\n\ndef display_arrest_fy(us_loc):\n arrests_by_fy = pd.read_csv(\"./data/arrests_by_fy.csv\")\n if us_loc == \"West Coast\":\n aor = ['LOS', 'SEA', 'SFR', 'SND']\n elif us_loc == \"East Coast\":\n aor = ['ATL', 'BAL', 'BOS', 'BUF', 'DET', 'MIA', 'NEW', 'NOL', 'NYC', 'PHI', 'WAS', 'HQ']\n elif us_loc == \"Midwest\":\n aor = ['CHI', 'SPM']\n elif us_loc == \"Southwest\":\n aor = ['DAL', 'DEN', 'ELP', 'HOU', 'PHO', 'SLC', 'SNA']\n elif us_loc == \"All\":\n aor = ['ATL', 'BAL', 'BOS', 'BUF', 'CHI', 'DAL', 'DEN', 'DET', 'ELP', 'HOU', 'HQ', 'LOS', 'MIA', 'NEW', 'NOL','NYC', 'PHI', 'PHO', 'SEA', 'SFR', 'SLC', 'SNA', 'SND', 'SPM', 'WAS']\n else:\n aor = ['ATL', 'BAL', 'BOS', 'BUF', 'CHI', 'DAL', 'DEN', 'DET', 'ELP', 'HOU', 'HQ', 'LOS', 'MIA', 'NEW', 'NOL','NYC', 'PHI', 'PHO', 'SEA', 'SFR', 'SLC', 'SNA', 'SND', 'SPM', 'WAS']\n\n\n\n fig = px.line(arrests_by_fy, x=fy, \n y=aor, \n title = \"Arrests in AOR per FY\",\n labels=dict(x=\"Fiscal Year\", y=\"Number of Arrests\"))\n fig.update_xaxes(title=\"Fiscal Year\", nticks = 4)\n fig.update_yaxes(title=\"Number of Arrests\")\n fig.update_layout(legend_title_text='AOR')\n\n return fig\n\n\nif __name__ == \"__main__\":\n app.run_server(debug=True)\n"
] | [
[
"pandas.read_csv"
]
] |
Roshan-Thomas/qiskit-terra | [
"77219b5c7b7146b1545c5e5190739b36f4064b2f",
"77219b5c7b7146b1545c5e5190739b36f4064b2f"
] | [
"qiskit/visualization/timeline/plotters/matplotlib.py",
"qiskit/algorithms/optimizers/qnspsa.py"
] | [
"# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2020.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n# pylint: disable=invalid-name\n\n\"\"\"Matplotlib plotter API.\"\"\"\n\nfrom typing import Optional, Tuple\n\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom matplotlib.collections import PatchCollection\nfrom matplotlib.patches import Rectangle\n\nfrom qiskit.visualization.exceptions import VisualizationError\nfrom qiskit.visualization.timeline import core, types, drawings\nfrom qiskit.visualization.timeline.plotters.base_plotter import BasePlotter\nfrom qiskit.visualization.utils import matplotlib_close_if_inline\n\n\nclass MplPlotter(BasePlotter):\n \"\"\"Matplotlib API for pulse drawer.\n\n This plotter arranges bits along y axis of 2D canvas with vertical offset.\n \"\"\"\n\n def __init__(self, canvas: core.DrawerCanvas, axis: Optional[plt.Axes] = None):\n \"\"\"Create new plotter.\n\n Args:\n canvas: Configured drawer canvas object. Canvas object should be updated\n with `.update` method before initializing the plotter.\n axis: Matplotlib axis object. When `axis` is provided, the plotter updates\n given axis instead of creating and returning new matplotlib figure.\n \"\"\"\n super().__init__(canvas=canvas)\n\n if axis is None:\n fig_height = self.canvas.vmax - self.canvas.vmin\n fig_h = self.canvas.formatter[\"general.fig_unit_height\"] * fig_height\n fig_w = self.canvas.formatter[\"general.fig_width\"]\n\n self.figure = plt.figure(figsize=(fig_w, fig_h))\n self.ax = self.figure.add_subplot(1, 1, 1)\n else:\n self.figure = axis.figure\n self.ax = axis\n\n self.initialize_canvas()\n\n def initialize_canvas(self):\n \"\"\"Format appearance of matplotlib canvas.\"\"\"\n self.ax.set_facecolor(self.canvas.formatter[\"color.background\"])\n\n # axis lines\n self.ax.spines[\"right\"].set_color(\"none\")\n self.ax.spines[\"left\"].set_color(\"none\")\n self.ax.spines[\"top\"].set_color(\"none\")\n\n # axis labels\n self.ax.set_yticks([])\n axis_config = self.canvas.layout[\"time_axis_map\"](time_window=self.canvas.time_range)\n\n self.ax.set_xticks(list(axis_config.axis_map.keys()))\n self.ax.set_xticklabels(\n list(axis_config.axis_map.values()),\n fontsize=self.canvas.formatter[\"text_size.axis_label\"],\n )\n self.ax.set_xlabel(\n axis_config.label, fontsize=self.canvas.formatter[\"text_size.axis_label\"]\n )\n\n # boundary\n self.ax.set_xlim(*self.canvas.time_range)\n self.ax.set_ylim(self.canvas.vmin, self.canvas.vmax)\n\n def draw(self):\n \"\"\"Output drawings stored in canvas object.\"\"\"\n\n for _, data in self.canvas.collections:\n xvals = np.asarray(data.xvals, dtype=float)\n yvals = np.asarray(data.yvals, dtype=float)\n offsets = [self.canvas.assigned_coordinates[bit] for bit in data.bits]\n\n if isinstance(data, drawings.BoxData):\n # box data\n if data.data_type in [\n str(types.BoxType.SCHED_GATE.value),\n str(types.BoxType.DELAY.value),\n ]:\n # draw a smoothly rounded rectangle\n xs, ys1, ys2 = self._time_bucket_outline(xvals, yvals)\n self.ax.fill_between(\n x=xs, y1=ys1 + offsets[0], y2=ys2 + offsets[0], **data.styles\n )\n\n else:\n # draw a rectangle\n x0, x1 = xvals\n y0, y1 = yvals + offsets[0]\n\n rect = Rectangle(xy=(x0, y0), width=x1 - x0, height=y1 - y0)\n pc = PatchCollection([rect], **data.styles)\n self.ax.add_collection(pc)\n\n elif isinstance(data, drawings.LineData):\n # line data\n self.ax.plot(xvals, yvals + offsets[0], **data.styles)\n\n elif isinstance(data, drawings.TextData):\n # text data\n if data.latex is not None:\n s = rf\"${data.latex}$\"\n else:\n s = data.text\n\n self.ax.text(x=xvals[0], y=yvals[0] + offsets[0], s=s, **data.styles)\n\n elif isinstance(data, drawings.GateLinkData):\n # gate link data\n self.ax.plot(xvals.repeat(len(offsets)), offsets, **data.styles)\n\n else:\n VisualizationError(\n \"Data {name} is not supported by {plotter}\"\n \"\".format(name=data, plotter=self.__class__.__name__)\n )\n\n def _time_bucket_outline(\n self, xvals: np.ndarray, yvals: np.ndarray\n ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:\n \"\"\"Generate outline of time bucket. Edges are smoothly faded.\n\n Args:\n xvals: Left and right point coordinates.\n yvals: Bottom and top point coordinates.\n\n Returns:\n Coordinate vectors of time bucket fringe.\n \"\"\"\n x0, x1 = xvals\n y0, y1 = yvals\n\n width = x1 - x0\n y_mid = 0.5 * (y0 + y1)\n\n risefall = int(min(self.canvas.formatter[\"time_bucket.edge_dt\"], max(width / 2 - 2, 0)))\n edge = np.sin(np.pi / 2 * np.arange(0, risefall) / risefall)\n\n xs = np.concatenate(\n [\n np.arange(x0, x0 + risefall),\n [x0 + risefall, x1 - risefall],\n np.arange(x1 - risefall + 1, x1 + 1),\n ]\n )\n\n l1 = (y1 - y_mid) * np.concatenate([edge, [1, 1], edge[::-1]])\n l2 = (y0 - y_mid) * np.concatenate([edge, [1, 1], edge[::-1]])\n\n return xs, l1, l2\n\n def save_file(self, filename: str):\n \"\"\"Save image to file.\n Args:\n filename: File path to output image data.\n \"\"\"\n plt.savefig(filename, bbox_inches=\"tight\", dpi=self.canvas.formatter[\"general.dpi\"])\n\n def get_image(self, interactive: bool = False) -> matplotlib.pyplot.Figure:\n \"\"\"Get image data to return.\n Args:\n interactive: When set `True` show the circuit in a new window.\n This depends on the matplotlib backend being used supporting this.\n Returns:\n Matplotlib figure data.\n \"\"\"\n matplotlib_close_if_inline(self.figure)\n\n if self.figure and interactive:\n self.figure.show()\n\n return self.figure\n",
"# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2021.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"\"\"The QN-SPSA optimizer.\"\"\"\n\nfrom typing import Any, Iterator, Optional, Union, Callable, Dict\n\nimport numpy as np\nfrom qiskit.providers import Backend\nfrom qiskit.circuit import ParameterVector, QuantumCircuit\nfrom qiskit.opflow import StateFn, CircuitSampler, ExpectationBase\nfrom qiskit.utils import QuantumInstance\n\nfrom .spsa import SPSA, CALLBACK, TERMINATIONCHECKER, _batch_evaluate\n\n# the function to compute the fidelity\nFIDELITY = Callable[[np.ndarray, np.ndarray], float]\n\n\nclass QNSPSA(SPSA):\n r\"\"\"The Quantum Natural SPSA (QN-SPSA) optimizer.\n\n The QN-SPSA optimizer [1] is a stochastic optimizer that belongs to the family of gradient\n descent methods. This optimizer is based on SPSA but attempts to improve the convergence by\n sampling the **natural gradient** instead of the vanilla, first-order gradient. It achieves\n this by approximating Hessian of the ``fidelity`` of the ansatz circuit.\n\n Compared to natural gradients, which require :math:`\\mathcal{O}(d^2)` expectation value\n evaluations for a circuit with :math:`d` parameters, QN-SPSA only requires\n :math:`\\mathcal{O}(1)` and can therefore significantly speed up the natural gradient calculation\n by sacrificing some accuracy. Compared to SPSA, QN-SPSA requires 4 additional function\n evaluations of the fidelity.\n\n The stochastic approximation of the natural gradient can be systematically improved by\n increasing the number of ``resamplings``. This leads to a Monte Carlo-style convergence to\n the exact, analytic value.\n\n .. note::\n\n This component has some function that is normally random. If you want to reproduce behavior\n then you should set the random number generator seed in the algorithm_globals\n (``qiskit.utils.algorithm_globals.random_seed = seed``).\n\n Examples:\n\n This short example runs QN-SPSA for the ground state calculation of the ``Z ^ Z``\n observable where the ansatz is a ``PauliTwoDesign`` circuit.\n\n .. code-block:: python\n\n import numpy as np\n from qiskit.algorithms.optimizers import QNSPSA\n from qiskit.circuit.library import PauliTwoDesign\n from qiskit.opflow import Z, StateFn\n\n ansatz = PauliTwoDesign(2, reps=1, seed=2)\n observable = Z ^ Z\n initial_point = np.random.random(ansatz.num_parameters)\n\n def loss(x):\n bound = ansatz.bind_parameters(x)\n return np.real((StateFn(observable, is_measurement=True) @ StateFn(bound)).eval())\n\n fidelity = QNSPSA.get_fidelity(ansatz)\n qnspsa = QNSPSA(fidelity, maxiter=300)\n result = qnspsa.optimize(ansatz.num_parameters, loss, initial_point=initial_point)\n\n\n References:\n\n [1] J. Gacon et al, \"Simultaneous Perturbation Stochastic Approximation of the Quantum\n Fisher Information\", `arXiv:2103.09232 <https://arxiv.org/abs/2103.09232>`_\n\n \"\"\"\n\n def __init__(\n self,\n fidelity: FIDELITY,\n maxiter: int = 100,\n blocking: bool = True,\n allowed_increase: Optional[float] = None,\n learning_rate: Optional[Union[float, Callable[[], Iterator]]] = None,\n perturbation: Optional[Union[float, Callable[[], Iterator]]] = None,\n last_avg: int = 1,\n resamplings: Union[int, Dict[int, int]] = 1,\n perturbation_dims: Optional[int] = None,\n regularization: Optional[float] = None,\n hessian_delay: int = 0,\n lse_solver: Optional[Callable[[np.ndarray, np.ndarray], np.ndarray]] = None,\n initial_hessian: Optional[np.ndarray] = None,\n callback: Optional[CALLBACK] = None,\n termination_checker: Optional[TERMINATIONCHECKER] = None,\n ) -> None:\n r\"\"\"\n Args:\n fidelity: A function to compute the fidelity of the ansatz state with itself for\n two different sets of parameters.\n maxiter: The maximum number of iterations. Note that this is not the maximal number\n of function evaluations.\n blocking: If True, only accepts updates that improve the loss (up to some allowed\n increase, see next argument).\n allowed_increase: If ``blocking`` is ``True``, this argument determines by how much\n the loss can increase with the proposed parameters and still be accepted.\n If ``None``, the allowed increases is calibrated automatically to be twice the\n approximated standard deviation of the loss function.\n learning_rate: The update step is the learning rate is multiplied with the gradient.\n If the learning rate is a float, it remains constant over the course of the\n optimization. It can also be a callable returning an iterator which yields the\n learning rates for each optimization step.\n If ``learning_rate`` is set ``perturbation`` must also be provided.\n perturbation: Specifies the magnitude of the perturbation for the finite difference\n approximation of the gradients. Can be either a float or a generator yielding\n the perturbation magnitudes per step.\n If ``perturbation`` is set ``learning_rate`` must also be provided.\n last_avg: Return the average of the ``last_avg`` parameters instead of just the\n last parameter values.\n resamplings: The number of times the gradient (and Hessian) is sampled using a random\n direction to construct a gradient estimate. Per default the gradient is estimated\n using only one random direction. If an integer, all iterations use the same number\n of resamplings. If a dictionary, this is interpreted as\n ``{iteration: number of resamplings per iteration}``.\n perturbation_dims: The number of perturbed dimensions. Per default, all dimensions\n are perturbed, but a smaller, fixed number can be perturbed. If set, the perturbed\n dimensions are chosen uniformly at random.\n regularization: To ensure the preconditioner is symmetric and positive definite, the\n identity times a small coefficient is added to it. This generator yields that\n coefficient.\n hessian_delay: Start multiplying the gradient with the inverse Hessian only after a\n certain number of iterations. The Hessian is still evaluated and therefore this\n argument can be useful to first get a stable average over the last iterations before\n using it as preconditioner.\n lse_solver: The method to solve for the inverse of the Hessian. Per default an\n exact LSE solver is used, but can e.g. be overwritten by a minimization routine.\n initial_hessian: The initial guess for the Hessian. By default the identity matrix\n is used.\n callback: A callback function passed information in each iteration step. The\n information is, in this order: the parameters, the function value, the number\n of function evaluations, the stepsize, whether the step was accepted.\n termination_checker: A callback function executed at the end of each iteration step. The\n arguments are, in this order: the parameters, the function value, the number\n of function evaluations, the stepsize, whether the step was accepted. If the callback\n returns True, the optimization is terminated.\n To prevent additional evaluations of the objective method, if the objective has not yet\n been evaluated, the objective is estimated by taking the mean of the objective\n evaluations used in the estimate of the gradient.\n\n\n \"\"\"\n super().__init__(\n maxiter,\n blocking,\n allowed_increase,\n # trust region *must* be false for natural gradients to work\n trust_region=False,\n learning_rate=learning_rate,\n perturbation=perturbation,\n resamplings=resamplings,\n callback=callback,\n second_order=True,\n hessian_delay=hessian_delay,\n lse_solver=lse_solver,\n regularization=regularization,\n perturbation_dims=perturbation_dims,\n initial_hessian=initial_hessian,\n termination_checker=termination_checker,\n )\n\n self.fidelity = fidelity\n\n def _point_sample(self, loss, x, eps, delta1, delta2):\n loss_points = [x + eps * delta1, x - eps * delta1]\n fidelity_points = [\n (x, x + eps * delta1),\n (x, x - eps * delta1),\n (x, x + eps * (delta1 + delta2)),\n (x, x + eps * (-delta1 + delta2)),\n ]\n self._nfev += 6\n\n loss_values = _batch_evaluate(loss, loss_points, self._max_evals_grouped)\n fidelity_values = _batch_evaluate(self.fidelity, fidelity_points, self._max_evals_grouped)\n\n # compute the gradient approximation and additionally return the loss function evaluations\n gradient_estimate = (loss_values[0] - loss_values[1]) / (2 * eps) * delta1\n\n # compute the preconditioner point estimate\n diff = fidelity_values[2] - fidelity_values[0]\n diff -= fidelity_values[3] - fidelity_values[1]\n diff /= 2 * eps**2\n\n rank_one = np.outer(delta1, delta2)\n # -0.5 factor comes from the fact that we need -0.5 * fidelity\n hessian_estimate = -0.5 * diff * (rank_one + rank_one.T) / 2\n\n return np.mean(loss_values), gradient_estimate, hessian_estimate\n\n @property\n def settings(self) -> Dict[str, Any]:\n \"\"\"The optimizer settings in a dictionary format.\"\"\"\n # re-use serialization from SPSA\n settings = super().settings\n settings.update({\"fidelity\": self.fidelity})\n\n # remove SPSA-specific arguments not in QNSPSA\n settings.pop(\"trust_region\")\n settings.pop(\"second_order\")\n\n return settings\n\n @staticmethod\n def get_fidelity(\n circuit: QuantumCircuit,\n backend: Optional[Union[Backend, QuantumInstance]] = None,\n expectation: Optional[ExpectationBase] = None,\n ) -> Callable[[np.ndarray, np.ndarray], float]:\n r\"\"\"Get a function to compute the fidelity of ``circuit`` with itself.\n\n Let ``circuit`` be a parameterized quantum circuit performing the operation\n :math:`U(\\theta)` given a set of parameters :math:`\\theta`. Then this method returns\n a function to evaluate\n\n .. math::\n\n F(\\theta, \\phi) = \\big|\\langle 0 | U^\\dagger(\\theta) U(\\phi) |0\\rangle \\big|^2.\n\n The output of this function can be used as input for the ``fidelity`` to the\n :class:~`qiskit.algorithms.optimizers.QNSPSA` optimizer.\n\n Args:\n circuit: The circuit preparing the parameterized ansatz.\n backend: A backend of quantum instance to evaluate the circuits. If None, plain\n matrix multiplication will be used.\n expectation: An expectation converter to specify how the expected value is computed.\n If a shot-based readout is used this should be set to ``PauliExpectation``.\n\n Returns:\n A handle to the function :math:`F`.\n\n \"\"\"\n params_x = ParameterVector(\"x\", circuit.num_parameters)\n params_y = ParameterVector(\"y\", circuit.num_parameters)\n\n expression = ~StateFn(circuit.assign_parameters(params_x)) @ StateFn(\n circuit.assign_parameters(params_y)\n )\n\n if expectation is not None:\n expression = expectation.convert(expression)\n\n if backend is None:\n\n def fidelity(values_x, values_y):\n value_dict = dict(\n zip(params_x[:] + params_y[:], values_x.tolist() + values_y.tolist())\n )\n return np.abs(expression.bind_parameters(value_dict).eval()) ** 2\n\n else:\n sampler = CircuitSampler(backend)\n\n def fidelity(values_x, values_y=None):\n if values_y is not None: # no batches\n value_dict = dict(\n zip(params_x[:] + params_y[:], values_x.tolist() + values_y.tolist())\n )\n else:\n value_dict = {p: [] for p in params_x[:] + params_y[:]}\n for values_xy in values_x:\n for value_x, param_x in zip(values_xy[0, :], params_x):\n value_dict[param_x].append(value_x)\n\n for value_y, param_y in zip(values_xy[1, :], params_y):\n value_dict[param_y].append(value_y)\n\n return np.abs(sampler.convert(expression, params=value_dict).eval()) ** 2\n\n return fidelity\n"
] | [
[
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.figure",
"numpy.asarray",
"numpy.arange",
"matplotlib.patches.Rectangle",
"matplotlib.collections.PatchCollection",
"numpy.concatenate"
],
[
"numpy.outer",
"numpy.mean"
]
] |
limberc/mindspore | [
"59a277756eb4faad9ac9afcc7fd526e8277d4994"
] | [
"tests/ut/python/dataset/test_compose.py"
] | [
"# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nimport numpy as np\nimport pytest\nimport mindspore.common.dtype as mstype\nimport mindspore.dataset as ds\nimport mindspore.dataset.transforms.c_transforms as c_transforms\nimport mindspore.dataset.transforms.py_transforms as py_transforms\n\nimport mindspore.dataset.vision.c_transforms as c_vision\nimport mindspore.dataset.vision.py_transforms as py_vision\n\nfrom util import visualize_list, save_and_check_md5, config_get_set_seed, config_get_set_num_parallel_workers\n\nGENERATE_GOLDEN = False\n\n\ndef test_compose():\n \"\"\"\n Test C++ and Python Compose Op\n \"\"\"\n ds.config.set_seed(0)\n\n def test_config(arr, op_list):\n try:\n data = ds.NumpySlicesDataset(arr, column_names=\"col\", shuffle=False)\n data = data.map(input_columns=[\"col\"], operations=op_list)\n res = []\n for i in data.create_dict_iterator(output_numpy=True):\n res.append(i[\"col\"].tolist())\n return res\n except (TypeError, ValueError) as e:\n return str(e)\n\n # Test simple compose with only 1 op, this would generate a warning\n assert test_config([[1, 0], [3, 4]], c_transforms.Compose([c_transforms.Fill(2)])) == [[2, 2], [2, 2]]\n\n # Test 1 column -> 2 columns -> 1 -> 2 -> 1\n assert test_config([[1, 0]],\n c_transforms.Compose(\n [c_transforms.Duplicate(), c_transforms.Concatenate(), c_transforms.Duplicate(),\n c_transforms.Concatenate()])) \\\n == [[1, 0] * 4]\n\n # Test one Python transform followed by a C++ transform. Type after OneHot is a float (mixed use-case)\n assert test_config([1, 0],\n c_transforms.Compose([py_transforms.OneHotOp(2), c_transforms.TypeCast(mstype.int32)])) \\\n == [[[0, 1]], [[1, 0]]]\n\n # Test exceptions.\n with pytest.raises(TypeError) as error_info:\n c_transforms.Compose([1, c_transforms.TypeCast(mstype.int32)])\n assert \"op_list[0] is neither a c_transform op (TensorOperation) nor a callable pyfunc.\" in str(error_info.value)\n\n # Test empty op list\n with pytest.raises(ValueError) as error_info:\n test_config([1, 0], c_transforms.Compose([]))\n assert \"op_list can not be empty.\" in str(error_info.value)\n\n # Test Python compose op\n assert test_config([1, 0], py_transforms.Compose([py_transforms.OneHotOp(2)])) == [[[0, 1]], [[1, 0]]]\n assert test_config([1, 0], py_transforms.Compose([py_transforms.OneHotOp(2), (lambda x: x + x)])) == [[[0, 2]],\n [[2, 0]]]\n\n # Test nested Python compose op\n assert test_config([1, 0],\n py_transforms.Compose([py_transforms.Compose([py_transforms.OneHotOp(2)]), (lambda x: x + x)])) \\\n == [[[0, 2]], [[2, 0]]]\n\n # Test passing a list of Python ops without Compose wrapper\n assert test_config([1, 0],\n [py_transforms.Compose([py_transforms.OneHotOp(2)]), (lambda x: x + x)]) \\\n == [[[0, 2]], [[2, 0]]]\n assert test_config([1, 0], [py_transforms.OneHotOp(2), (lambda x: x + x)]) == [[[0, 2]], [[2, 0]]]\n\n # Test a non callable function\n with pytest.raises(ValueError) as error_info:\n py_transforms.Compose([1])\n assert \"transforms[0] is not callable.\" in str(error_info.value)\n\n # Test empty Python op list\n with pytest.raises(ValueError) as error_info:\n test_config([1, 0], py_transforms.Compose([]))\n assert \"transforms list is empty.\" in str(error_info.value)\n\n # Pass in extra brackets\n with pytest.raises(TypeError) as error_info:\n py_transforms.Compose([(lambda x: x + x)])()\n assert \"Compose was called without an image. Fix invocation (avoid it being invoked as Compose([...])()).\" in str(\n error_info.value)\n\n\ndef test_lambdas():\n \"\"\"\n Test Multi Column Python Compose Op\n \"\"\"\n ds.config.set_seed(0)\n\n def test_config(arr, input_columns, output_cols, op_list):\n data = ds.NumpySlicesDataset(arr, column_names=input_columns, shuffle=False)\n data = data.map(operations=op_list, input_columns=input_columns, output_columns=output_cols,\n column_order=output_cols)\n res = []\n for i in data.create_dict_iterator(output_numpy=True):\n for col_name in output_cols:\n res.append(i[col_name].tolist())\n return res\n\n arr = ([[1]], [[3]])\n\n assert test_config(arr, [\"col0\", \"col1\"], [\"a\"], py_transforms.Compose([(lambda x, y: x)])) == [[1]]\n assert test_config(arr, [\"col0\", \"col1\"], [\"a\"], py_transforms.Compose([lambda x, y: x, lambda x: x])) == [[1]]\n assert test_config(arr, [\"col0\", \"col1\"], [\"a\", \"b\"],\n py_transforms.Compose([lambda x, y: x, lambda x: (x, x * 2)])) == \\\n [[1], [2]]\n assert test_config(arr, [\"col0\", \"col1\"], [\"a\", \"b\"],\n [lambda x, y: (x, x + y), lambda x, y: (x, y * 2)]) == [[1], [8]]\n\n\ndef test_c_py_compose_transforms_module():\n \"\"\"\n Test combining Python and C++ transforms\n \"\"\"\n ds.config.set_seed(0)\n\n def test_config(arr, input_columns, output_cols, op_list):\n data = ds.NumpySlicesDataset(arr, column_names=input_columns, shuffle=False)\n data = data.map(operations=op_list, input_columns=input_columns, output_columns=output_cols,\n column_order=output_cols)\n res = []\n for i in data.create_dict_iterator(output_numpy=True):\n for col_name in output_cols:\n res.append(i[col_name].tolist())\n return res\n\n arr = [1, 0]\n assert test_config(arr, [\"cols\"], [\"cols\"],\n [py_transforms.OneHotOp(2), c_transforms.Mask(c_transforms.Relational.EQ, 1)]) == \\\n [[[False, True]],\n [[True, False]]]\n assert test_config(arr, [\"cols\"], [\"cols\"],\n [py_transforms.OneHotOp(2), (lambda x: x + x), c_transforms.Fill(1)]) \\\n == [[[1, 1]], [[1, 1]]]\n assert test_config(arr, [\"cols\"], [\"cols\"],\n [py_transforms.OneHotOp(2), (lambda x: x + x), c_transforms.Fill(1), (lambda x: x + x)]) \\\n == [[[2, 2]], [[2, 2]]]\n assert test_config([[1, 3]], [\"cols\"], [\"cols\"],\n [c_transforms.PadEnd([3], -1), (lambda x: x + x)]) \\\n == [[2, 6, -2]]\n\n arr = ([[1]], [[3]])\n assert test_config(arr, [\"col0\", \"col1\"], [\"a\"], [(lambda x, y: x + y), c_transforms.PadEnd([2], -1)]) == [[4, -1]]\n\n\ndef test_c_py_compose_vision_module(plot=False, run_golden=True):\n \"\"\"\n Test combining Python and C++ vision transforms\n \"\"\"\n original_seed = config_get_set_seed(10)\n original_num_parallel_workers = config_get_set_num_parallel_workers(1)\n\n def test_config(plot, file_name, op_list):\n data_dir = \"../data/dataset/testImageNetData/train/\"\n data1 = ds.ImageFolderDataset(dataset_dir=data_dir, shuffle=False)\n data1 = data1.map(operations=op_list, input_columns=[\"image\"])\n data2 = ds.ImageFolderDataset(dataset_dir=data_dir, shuffle=False)\n data2 = data2.map(operations=c_vision.Decode(), input_columns=[\"image\"])\n original_images = []\n transformed_images = []\n\n for item in data1.create_dict_iterator(num_epochs=1, output_numpy=True):\n transformed_images.append(item[\"image\"])\n for item in data2.create_dict_iterator(num_epochs=1, output_numpy=True):\n original_images.append(item[\"image\"])\n\n if run_golden:\n # Compare with expected md5 from images\n save_and_check_md5(data1, file_name, generate_golden=GENERATE_GOLDEN)\n\n if plot:\n visualize_list(original_images, transformed_images)\n\n test_config(op_list=[c_vision.Decode(),\n py_vision.ToPIL(),\n py_vision.Resize((224, 224)),\n np.array],\n plot=plot, file_name=\"compose_c_py_1.npz\")\n\n test_config(op_list=[c_vision.Decode(),\n c_vision.Resize((224, 244)),\n py_vision.ToPIL(),\n np.array,\n c_vision.Resize((24, 24))],\n plot=plot, file_name=\"compose_c_py_2.npz\")\n\n test_config(op_list=[py_vision.Decode(),\n py_vision.Resize((224, 224)),\n np.array,\n c_vision.RandomColor()],\n plot=plot, file_name=\"compose_c_py_3.npz\")\n\n # Restore configuration\n ds.config.set_seed(original_seed)\n ds.config.set_num_parallel_workers((original_num_parallel_workers))\n\n\ndef test_py_transforms_with_c_vision():\n \"\"\"\n These examples will fail, as py_transforms.Random(Apply/Choice/Order) expect callable functions\n \"\"\"\n\n ds.config.set_seed(0)\n\n def test_config(op_list):\n data_dir = \"../data/dataset/testImageNetData/train/\"\n data = ds.ImageFolderDataset(dataset_dir=data_dir, shuffle=False)\n data = data.map(operations=op_list)\n res = []\n for i in data.create_dict_iterator(output_numpy=True):\n for col_name in output_cols:\n res.append(i[col_name].tolist())\n return res\n\n with pytest.raises(ValueError) as error_info:\n test_config(py_transforms.RandomApply([c_vision.RandomResizedCrop(200)]))\n assert \"transforms[0] is not callable.\" in str(error_info.value)\n\n with pytest.raises(ValueError) as error_info:\n test_config(py_transforms.RandomChoice([c_vision.RandomResizedCrop(200)]))\n assert \"transforms[0] is not callable.\" in str(error_info.value)\n\n with pytest.raises(ValueError) as error_info:\n test_config(py_transforms.RandomOrder([np.array, c_vision.RandomResizedCrop(200)]))\n assert \"transforms[1] is not callable.\" in str(error_info.value)\n\n with pytest.raises(RuntimeError) as error_info:\n test_config([py_transforms.OneHotOp(20, 0.1)])\n assert \"The truth value of an array with more than one element is ambiguous. Use a.any() or a.all()\" in str(\n error_info.value)\n\n\ndef test_py_vision_with_c_transforms():\n \"\"\"\n Test combining Python vision operations with C++ transforms operations\n \"\"\"\n\n ds.config.set_seed(0)\n\n def test_config(op_list):\n data_dir = \"../data/dataset/testImageNetData/train/\"\n data1 = ds.ImageFolderDataset(dataset_dir=data_dir, shuffle=False)\n data1 = data1.map(operations=op_list, input_columns=[\"image\"])\n transformed_images = []\n\n for item in data1.create_dict_iterator(num_epochs=1, output_numpy=True):\n transformed_images.append(item[\"image\"])\n return transformed_images\n\n # Test with Mask Op\n output_arr = test_config([py_vision.Decode(),\n py_vision.CenterCrop((2)), np.array,\n c_transforms.Mask(c_transforms.Relational.GE, 100)])\n\n exp_arr = [np.array([[[True, False, False],\n [True, False, False]],\n [[True, False, False],\n [True, False, False]]]),\n np.array([[[True, False, False],\n [True, False, False]],\n [[True, False, False],\n [True, False, False]]])]\n\n for exp_a, output in zip(exp_arr, output_arr):\n np.testing.assert_array_equal(exp_a, output)\n\n # Test with Fill Op\n output_arr = test_config([py_vision.Decode(),\n py_vision.CenterCrop((4)), np.array,\n c_transforms.Fill(10)])\n\n exp_arr = [np.ones((4, 4, 3)) * 10] * 2\n for exp_a, output in zip(exp_arr, output_arr):\n np.testing.assert_array_equal(exp_a, output)\n\n # Test with Concatenate Op, which will raise an error since ConcatenateOp only supports rank 1 tensors.\n with pytest.raises(RuntimeError) as error_info:\n test_config([py_vision.Decode(),\n py_vision.CenterCrop((2)), np.array,\n c_transforms.Concatenate(0)])\n assert \"Only 1D tensors supported\" in str(error_info.value)\n\n\ndef test_compose_with_custom_function():\n \"\"\"\n Test Python Compose with custom function\n \"\"\"\n\n def custom_function(x):\n return (x, x * x)\n\n # First dataset\n op_list = [\n lambda x: x * 3,\n custom_function,\n # convert two column output to one\n lambda *images: np.stack(images)\n ]\n\n data = ds.NumpySlicesDataset([[1, 2]], column_names=[\"col0\"], shuffle=False)\n data = data.map(input_columns=[\"col0\"], operations=op_list)\n #\n\n res = []\n for i in data.create_dict_iterator(output_numpy=True):\n res.append(i[\"col0\"].tolist())\n assert res == [[[3, 6], [9, 36]]]\n\n\nif __name__ == \"__main__\":\n test_compose()\n test_lambdas()\n test_c_py_compose_transforms_module()\n test_c_py_compose_vision_module(plot=True)\n test_py_transforms_with_c_vision()\n test_py_vision_with_c_transforms()\n test_compose_with_custom_function()\n"
] | [
[
"numpy.array",
"numpy.stack",
"numpy.testing.assert_array_equal",
"numpy.ones"
]
] |
vbelissen/packnet-sfm | [
"dfba692596b08ccff17abb9423c1958cecc75b0f"
] | [
"packnet_sfm/models/SemiSupModel_fisheye.py"
] | [
"# Copyright 2020 Toyota Research Institute. All rights reserved.\n\nimport torch\n\nfrom packnet_sfm.models.SelfSupModel_fisheye import SfmModel, SelfSupModel_fisheye\nfrom packnet_sfm.losses.supervised_loss_valeo import SupervisedLoss\nfrom packnet_sfm.models.model_utils import merge_outputs\nfrom packnet_sfm.utils.depth import depth2inv\n\n\nclass SemiSupModel_fisheye(SelfSupModel_fisheye):\n \"\"\"\n Model that inherits a depth and pose networks, plus the self-supervised loss from\n SelfSupModel and includes a supervised loss for semi-supervision.\n\n Parameters\n ----------\n supervised_loss_weight : float\n Weight for the supervised loss\n kwargs : dict\n Extra parameters\n \"\"\"\n def __init__(self, supervised_loss_weight=0.9, **kwargs):\n # Initializes SelfSupModel\n super().__init__(**kwargs)\n # If supervision weight is 0.0, use SelfSupModel directly\n assert 0. < supervised_loss_weight <= 1., \"Model requires (0, 1] supervision\"\n # Store weight and initializes supervised loss\n self.supervised_loss_weight = supervised_loss_weight\n self._supervised_loss = SupervisedLoss(**kwargs)\n\n # Pose network is only required if there is self-supervision\n self._network_requirements['pose_net'] = self.supervised_loss_weight < 1\n # GT depth is only required if there is supervision\n self._train_requirements['gt_depth'] = self.supervised_loss_weight > 0\n\n @property\n def logs(self):\n \"\"\"Return logs.\"\"\"\n return {\n **super().logs,\n **self._supervised_loss.logs\n }\n\n def supervised_loss(self, inv_depths, gt_inv_depths,\n path_to_ego_mask,\n return_logs=False, progress=0.0):\n \"\"\"\n Calculates the supervised loss.\n\n Parameters\n ----------\n inv_depths : torch.Tensor [B,1,H,W]\n Predicted inverse depth maps from the original image\n gt_inv_depths : torch.Tensor [B,1,H,W]\n Ground-truth inverse depth maps from the original image\n return_logs : bool\n True if logs are stored\n progress :\n Training progress percentage\n\n Returns\n -------\n output : dict\n Dictionary containing a \"loss\" scalar a \"metrics\" dictionary\n \"\"\"\n return self._supervised_loss(\n inv_depths, gt_inv_depths,\n path_to_ego_mask,\n return_logs=return_logs, progress=progress)\n\n def forward(self, batch, return_logs=False, progress=0.0):\n \"\"\"\n Processes a batch.\n\n Parameters\n ----------\n batch : dict\n Input batch\n return_logs : bool\n True if logs are stored\n progress :\n Training progress percentage\n\n Returns\n -------\n output : dict\n Dictionary containing a \"loss\" scalar and different metrics and predictions\n for logging and downstream usage.\n \"\"\"\n if not self.training:\n # If not training, no need for self-supervised loss\n return SfmModel.forward(self, batch)\n else:\n if self.supervised_loss_weight == 1.:\n # If no self-supervision, no need to calculate loss\n self_sup_output = SfmModel.forward(self, batch)\n loss = torch.tensor([0.]).type_as(batch['rgb'])\n else:\n # Otherwise, calculate and weight self-supervised loss\n self_sup_output = SelfSupModel_fisheye.forward(self, batch)\n loss = (1.0 - self.supervised_loss_weight) * self_sup_output['loss']\n # Calculate and weight supervised loss\n sup_output = self.supervised_loss(\n self_sup_output['inv_depths'], depth2inv(batch['depth']),\n batch['path_to_ego_mask'],\n return_logs=return_logs, progress=progress)\n loss += self.supervised_loss_weight * sup_output['loss']\n # Merge and return outputs\n return {\n 'loss': loss,\n **merge_outputs(self_sup_output, sup_output),\n }\n"
] | [
[
"torch.tensor"
]
] |
terasakisatoshi/pythonCodes | [
"baee095ecee96f6b5ec6431267cdc6c40512a542"
] | [
"cythonTest/onlineSample/combination/jit_combination.py"
] | [
"import time\nfrom numba import jit\nimport numpy as np \n\n@jit()\ndef jit_sum_conbination(N):\n xs = [i for i in range(N)]\n ys = [i for i in range(N)]\n total = 0\n for x in xs:\n for y in ys:\n total += x+y\n return total\n\ndef py_sum_conbination(N):\n xs = np.arange(N)\n ys = np.arange(N)\n total = 0\n for x in xs:\n for y in ys:\n total += x+y\n return total\n\ndef main():\n N = 10000\n start = time.time()\n total = jit_sum_conbination(N)\n end = time.time()\n print(total)\n print('elapsed time=', end-start)\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"numpy.arange"
]
] |
alexshuang/onnxruntime | [
"771a6d235b8495d05bcf6a906107df1bd6e81744"
] | [
"orttraining/orttraining/python/training/_ortmodule_utils.py"
] | [
"# -------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n# --------------------------------------------------------------------------\n\nfrom . import _utils\n\nfrom onnxruntime.capi.onnxruntime_inference_collection import OrtValue\nfrom onnxruntime.capi import _pybind_state as C\n\nimport torch\nfrom torch.utils.dlpack import from_dlpack, to_dlpack\nfrom torch.utils.cpp_extension import load_inline\n\n\ndef _ortvalue_to_torch_tensor(ortvalue):\n # PyTorch's to_dlpack() uses same config for both torch.bool and torch.uint8,\n # and convert the config to torch.uint8 tensor duing from_dlpack().\n # So we need to convert the torch tensor to torch.bool type if OrtValue is bool tensor.\n torch_tensor = from_dlpack(ortvalue._ortvalue.to_dlpack())\n return torch_tensor.to(torch.bool) if ortvalue.data_type() == 'tensor(bool)' else torch_tensor\n\n\ndef _ortvalue_from_torch_tensor(torch_tensor):\n return OrtValue(C.OrtValue.from_dlpack(to_dlpack(torch_tensor), torch_tensor.dtype == torch.bool))\n\n\ndef _load_torch_gpu_allocator_cpp_extension(verbosity, is_rocm_pytorch):\n gpu_identifier = \"hip\" if is_rocm_pytorch else \"cuda\"\n gpu_allocator_header = \"HIPCachingAllocator\" if is_rocm_pytorch else \"CUDACachingAllocator\"\n torch_gpu_allocator_addresses_cpp_source = f'''\n #include <torch/extension.h>\n #include <c10/{gpu_identifier}/{gpu_allocator_header}.h>\n\n size_t gpu_caching_allocator_raw_alloc_address() {{\n return reinterpret_cast<size_t>(&c10::{gpu_identifier}::{gpu_allocator_header}::raw_alloc);\n }}\n\n size_t gpu_caching_allocator_raw_delete_address() {{\n return reinterpret_cast<size_t>(&c10::{gpu_identifier}::{gpu_allocator_header}::raw_delete);\n }}\n '''\n\n return load_inline(name='inline_extension',\n cpp_sources=[torch_gpu_allocator_addresses_cpp_source],\n extra_cflags=['-D__HIP_PLATFORM_HCC__=1' if is_rocm_pytorch else ''],\n functions=['gpu_caching_allocator_raw_alloc_address',\n 'gpu_caching_allocator_raw_delete_address'],\n verbose=verbosity,\n with_cuda=True)\n\n\ndef _check_same_device(device, argument_str, *args):\n '''Check that all tensor arguments in *args reside on the same device as the input device'''\n\n assert isinstance(device, torch.device), '`device` must be a valid `torch.device` object'\n for arg in args:\n if arg is not None and isinstance(arg, torch.Tensor):\n arg_device = torch.device(arg.device)\n if arg_device != device:\n raise RuntimeError(\n f\"{argument_str} found on device {arg_device}, but expected it to be on module device {device}.\")\n\n\ndef get_device_from_module(module):\n '''Returns the first device found in the `module`'s parameters or None'''\n device = None\n try:\n device = next(module.parameters()).device\n for param in module.parameters():\n if param.device != device:\n raise RuntimeError('ORTModule supports a single device per model for now')\n except StopIteration:\n # Model doesn't have a device set to any of the model parameters\n pass\n return device\n\n\ndef _create_iobinding(io_binding, inputs, model, device):\n '''Creates IO binding for a `model` inputs and output'''\n for idx, value_info in enumerate(model.graph.input):\n io_binding.bind_ortvalue_input(value_info.name, _ortvalue_from_torch_tensor(inputs[idx]))\n\n for value_info in model.graph.output:\n io_binding.bind_output(value_info.name, device.type, device_id=_utils.get_device_index(device))\n"
] | [
[
"torch.utils.cpp_extension.load_inline",
"torch.device",
"torch.utils.dlpack.to_dlpack"
]
] |
Paul-31415/soundplay | [
"0e7ea27c6d4bdf5f94e5034c7775a10c62d1583e"
] | [
"fmsynth.py"
] | [
"\n\nfrom itools import lmbdWr,lm\n\nimport itertools\n\nfrom bisect import bisect_right\n\nimport brailleG as gr\n\ndef abs2(n):\n return (n*n.conjugate()).real\n \n\ndef fsample(buf,m=1,b=0):\n index = 0\n y = 0\n while 1:\n index = (index+b+m*y)%len(buf)\n y = yield buf[(int(index)+1)%len(buf)]*(index%1)+buf[int(index)]*(1-(index%1))\n\ndef fsine(a=1,m=1/48000,b=0):\n s = 0\n c = a\n y = 0\n while 1:\n amt = b+m*y\n s += c*amt\n c -= s*amt\n y = yield s\n\nimport math\npi = math.pi\neone = math.exp(2*pi)\nbuffer_size = 8192\n\nsinBuffer = [math.sin(i*2*math.pi/4/buffer_size) for i in range(buffer_size+1)]\n \ndef nsin(a):\n a = 4*buffer_size*(a%1)\n if a<=buffer_size:\n return sinBuffer[math.floor(a)]\n elif a<=buffer_size*2:\n return sinBuffer[math.floor(buffer_size-a)-1]\n elif a<=buffer_size*3:\n return -sinBuffer[math.floor(a-buffer_size*2)]\n else:\n return -sinBuffer[math.floor(buffer_size*3-a)-1]\ndef nsaw(a):\n return (a%1)*2-1\ndef ntri(a):\n return abs((a%1)-.5)*4-1\ndef nsquare(a,p=.5):\n return ((a%1)<p)*2-1\n\nlsin = lm(nsin)\nlsaw = lm(nsaw)\nltri = lm(ntri)\nlsqr = lm(nsquare)\n\ndef pulse(w=.5):\n def p(v):\n return ((v%1)<w)*2-1\n return p\ndef tri(w=.5):\n def t(v):\n v %= 1\n return 2*v/w-1 if v < w else 2*(w-v)/(1-w)+1\n return t\n\n_reserved = []\ndef polyvars(varstr):\n return [Polynomial(i) for i in varstr]\nclass Polynomial:\n def __init__(self,coef,var=\"x\"):\n if type(coef) == str:\n var = coef\n coef = [0,1]\n self.a = coef\n self.var = var\n self.trim()\n def trim(self):\n while len(self.a):\n if self.a[-1] == 0:\n self.a = self.a[:-1]\n else:\n break\n def simplified(self,tv=None):\n if tv == None:\n tv = self.var\n if tv == self.var:\n r = Polynomial([],tv)\n x = Polynomial(tv)\n xa = 1\n for t in self.a:\n if type(t) == Polynomial:\n r += t.simplified(tv)*xa\n else:\n r += t*xa\n xa *= x\n else:\n r = Polynomial([],tv)\n x = Polynomial(self.var)\n xa = 1\n for t in self.a:\n if type(t) == Polynomial:\n r += t.simplified(tv).__mul__(xa)\n else:\n r += t*xa\n xa *= x\n return r\n def __call__(self,vrs):\n if type(vrs) != dict:\n vrs = {self.var:vrs}\n if self.var in vrs:\n x = vrs[self.var]\n v = 0\n xa = 1\n for t in self.a:\n if type(t) == Polynomial:\n t = t(vrs)\n v += xa*t\n xa *= x\n return v\n return Polynomial([t(vrs) if type(t) == Polynomial else t for t in self.a],self.var)\n def __getitem__(self,i):\n if i>=len(self):\n return 0\n return self.a[i]\n def __setitem__(self,i,v):\n if i>=len(self):\n self.a += [0]*(i-len(self))+[v]\n else:\n self.a[i] = v\n self.trim()\n def __neg__(self):\n return Polynomial([-i for i in self.a],self.var)\n def __radd__(self,o):\n return self.__add__(o)\n def __add__(self,o):\n if type(o) == Polynomial and o.var == self.var:\n return self.padd(o)\n return self.npadd(o)\n def padd(self,o):\n return Polynomial(sumPolyn(self.a,o.a),self.var)\n def npadd(self,o):\n if len(self.a):\n return Polynomial([self.a[0]+o]+self.a[1:],self.var)\n return Polynomial([o],self.var)\n def __rsub__(self,o):\n return -self.__sub__(o)\n def __sub__(self,o):\n if type(o) == Polynomial and o.var == self.var:\n return self.psub(o)\n return self.npsub(o)\n def psub(self,o):\n return self.padd(-o)\n def npsub(self,o):\n if len(self.a):\n return Polynomial([self.a[0]-o]+self.a[1:],self.var)\n return Polynomial([-o],self.var)\n def __rmul__(self,o):\n return self.__mul__(o)\n def __mul__(self,o):\n if type(o) == Polynomial and o.var == self.var:\n return self.pmul(o)\n return self.npmul(o)\n def pmul(self,o):\n return Polynomial(prodPolyn(self.a,o.a),self.var)\n def npmul(self,o):\n if len(self.a):\n return Polynomial([e*o for e in self.a],self.var)\n return Polynomial([],self.var)\n #def __divmod__(self,o):\n #def __repr__(self,var=None):\n # if var == None:\n # var = self.var\n # return f\"polyn({var}) = \"+\" + \".join((f\"({self.a[i]})\"+[\"\",f\"{var}\"][i>0]+[\"\",f\"**{i}\"][i>1] for i in range(len(self.a))))\n def __repr__(self,var=None):\n if var == None:\n var = self.var\n return f\"p({var})=\"*0+\" + \".join(((f\"({self.a[i]})\" if self.a[i] != 1 else [\"1\",\"\"][i>0])+[\"\",f\"{var}\"][i>0]+[\"\",f\"**{i}\"][i>1] for i in range(len(self.a)) if self.a[i] != 0))\n def deriv(self):\n return Polynomial([self.a[i+1]*(i+1) for i in range(len(self.a)-1)],self.var)\n def integ(self,k=0):\n return Polynomial([k]+[self.a[i]*(1/(i+1)) for i in range(len(self.a))],self.var)\n def convolve(self,o):\n #integ of self(t-x)o(t) dt\n #want first arg to be x, second to be bounds\n #so, \n x = Polynomial('x')\n t = Polynomial('t')\n integrand = Polynomial([self(t-x)*o(t)],'t')\n return integrand.simplified('t').integ()\n def __len__(self):\n return len(self.a)\n def __eq__(self,o):\n if type(o) == Polynomial:\n if len(o) != len(self) or o.var != self.var:\n return False\n for i in range(len(self)):\n if self.a[i] != o.a[i]:\n return False\n return True\n if type(o) == float or type(o) == int or type(o) == complex:\n return len(self) <= 1 and (self.a+[0])[0] == o\n def __matmul__(self,o):\n return self.convolve(o)\n def plot(self,*args):\n plotPoly(self.a,*args)\n \ndef evalPolyn(polyn,x):\n v = 0\n xa = 1\n for t in polyn:\n v += xa*t\n xa *= x\n return v\ndef sumPolyn(p1,p2):\n res = [0 for i in range(max(len(p1),len(p2)))]\n for i in range(len(res)):\n if i < len(p1):\n if i < len(p2):\n res[i] = p1[i] + p2[i]\n else:\n res[i] = p1[i]\n else:\n res[i] = p2[i]\n return res\ndef prodPolyn(p1,p2):\n if len(p1) == 0 or len(p2) == 0:\n return []\n res = [0 for i in range(len(p1)+len(p2)-1)]\n for i in range(len(p1)):\n for j in range(len(p2)):\n res[i+j] += p1[i]*p2[j]\n return res\ndef composePolyn(p1,p2): #retuns p1(p2(x))\n px = [1]\n pr = []\n for i in p1:\n pr = sumPolyn(pr,prodPolyn(px,[i]))\n px = prodPolyn(px,p2)\n return pr\n\ndef fourierPolyn(p,freq):\n factor = 1/(2j*math.pi*freq)\n mask = [factor]\n result = [0 for i in p]\n for i in range(len(p)):\n facacc = factor\n for j in range(i,-1,-1):\n result[j] += facacc*p[i]\n facacc *= -factor*j\n return result\ndef evalFourierPolyn(p,freq,phase,low,high):\n l = evalPolyn(p,low)\n h = evalPolyn(p,high)\n return h*(eone**(1j*(freq*high+phase)))-l*(eone**(1j*(freq*low+phase)))\n \n\ndef convolvePolyn(p1,p2):\n pass\n\n\n\ndef softGCD(a,b,f=.01):\n if abs(b)<=f:\n return a\n return softGCD(b,a%b,f)\n\ndef convPolyFrags(p0,p1,l0,l1):\n #convolves 2 polynomial fragments\n if l0 > l1:\n return convPolyFrags(p1,p0,l1,l0)\n #l0 ≤ l1\n times = [-l0,0,l1-l0]\n #moving = composePolyn(p0,[t-x])\n\n def xify(v):\n return Polynomial([v],'x').simplified('x')\n \n p_0 = Polynomial(p0,'x')\n p_1 = Polynomial(p1,'x')\n x = Polynomial('x')\n conv = p_0@p_1\n a,b,c = conv(l0+x)-conv(0),conv(l0+x)-conv(x),conv(l1)-conv(x)\n a,b,c = [xify(xify((a,b,c)[i])(x+times[i])) for i in range(3)]\n if l1 != l0:\n return PiecewizePoly([[],a.a,b.a,c.a,[]],[-math.inf]+times+[l1],0)\n return PiecewizePoly([[],a.a,c.a,[]],[-math.inf]+times[:2]+[l1],0)\n \n \n\ndef plotPoly(p,t0=0,t1=1,res=50):\n import matplotlib.pyplot as plt\n fig, ax = plt.subplots(nrows=1, ncols=1)\n st = p[0]\n end = evalPolyn(p,t1-t0)\n mid = []\n ts = []\n if len(p) > 2:\n for j in range(1,res):\n t = (t1-t0)*j/res\n ts += [t+t0]\n mid += [evalPolyn(p,t)]\n ts = [t0]+ts+[t1]\n ys = [st]+mid+[end]\n plt.plot(ts,[i.real for i in ys],linestyle='-',color=(.3,.3,1), linewidth=2)\n plt.plot(ts,[i.imag for i in ys],linestyle='-',color=(1,.3,.3), linewidth=2)\n plt.show(block=0)\n\n#todo: closed form convolution\n# perhaps use @ (__matmul__)\n#todo: closed form composition? possible? not always: requires root finding\nclass PiecewizePoly:\n def __init__(self,polys = [[]],times=[0],mod=1):\n self.times = times\n self.polys = polys\n self.mod = mod\n def __call__(self,x):\n if self.mod != 0:\n x %= self.mod\n #binary search for correct polyn\n l = bisect_right(self.times,x)-1\n #eval polyn\n return evalPolyn(self.polys[l],x-self.times[l])\n def deriv(self):\n #do derivitive on self\n res_t = []\n res_p = []\n for p in range(len(self.polys)):\n res_t += [self.times[p]]\n res_p += [[]]\n for i in range(len(self.polys[p])-1):\n res_p[-1] += [self.polys[p][i+1]*(i+1)]\n return PiecewizePoly(res_p,res_t,self.mod)\n def integ(self,start=0,scale=1):\n #do integral on self\n res_t = []\n res_p = []\n for p in range(len(self.polys)):\n res_t += [self.times[p]]\n res_p += [[start]]\n for i in range(len(self.polys[p])):\n res_p[-1] += [self.polys[p][i]/(i+1)*scale]\n #continuize segments after first\n for i in range(1,len(res_t)):\n val = evalPolyn(res_p[i-1],res_t[i]-res_t[i-1])\n res_p[i][0] = val#-evalPolyn(res[i][1],res[i][0]) #not needed with new def\n return PiecewizePoly(res_p,res_t,self.mod)\n def timeshift(self,s):\n assert self.mod==0\n for i in range(len(self.times)):\n self.times[i] -= s\n return self\n def timescale(self,s):\n self.mod *= s\n for i in range(len(self.times)):\n self.times[i] *= s\n self.polys[i] = composePolyn(self.polys[i],[0,1/s])\n return self\n def convolve(self,o,fudgefactor = .001):\n ts = self.times + [self.mod if self.mod else math.inf]\n to = o.times + [o.mod if o.mod else math.inf]\n result = PiecewizePoly([[]],[-math.inf],0)\n for i in range(len(self.polys)):\n for j in range(len(o.polys)):\n pc = convPolyFrags(self.polys[i],o.polys[j],ts[i+1]-ts[i],to[j+1]-to[j])\n result += pc.timeshift(ts[i]-to[j])\n #now do moddy stuff\n return result\n \n \n def __matmul__(self,o,fudgefactor = .001):\n return self.convolve(o,fudgefactor)\n def __lmbdWr__(self):\n return lmbdWr(self)\n def __iterWr__(self):\n return iterWr(iter(lmbdWr(self)))\n def bias(self):\n intg = self.integ()\n return (intg.end()-intg(0))/self.mod\n def unbiased(self):\n #self shifted to have 0 dc bias\n bias = self.bias()\n res_t = []\n res_p = []\n for p in range(len(self.polys)):\n res_t += [self.times[p]]\n res_p += [sumPolyn([-bias],self.polys[p])]\n return PiecewizePoly(res_p,res_t,self.mod)\n def graph(self,w=40,h=20,lo=-2,hi=2):\n gr.graph(self,0,self.mod,lo,hi,w,h)\n def plot(self,res=50):\n import matplotlib.pyplot as plt\n fig, ax = plt.subplots(nrows=1, ncols=1)\n dash = 0\n for i in range(len(self.polys)):\n dash = 1-dash\n t0 = self.times[i]\n if t0 == -math.inf:\n t0 = self.times[i+1]-1\n t1 = (self.times+[self.mod if self.mod != 0 else self.times[-2]+1])[i+1]\n p = self.polys[i]\n st = (p+[0])[0]\n end = evalPolyn(p,t1-t0)\n mid = []\n ts = []\n if len(p) > 2:\n for j in range(1,res):\n t = (t1-t0)*j/res\n ts += [t+t0]\n mid += [evalPolyn(p,t)]\n ts = [t0]+ts+[t1]\n ys = [st]+mid+[end]\n plt.plot(ts,[i.real for i in ys],linestyle='-',color=(.3*dash,.3*dash,1), linewidth=2)\n plt.plot(ts,[i.imag for i in ys],linestyle='-',color=(1,.3*dash,.3*dash), linewidth=2)\n plt.show(block=0)\n \n def mag2(self):\n sqd = PiecewizePoly([prodPolyn(p,p) for p in self.polys],[t for t in self.times],self.mod+1).integ()\n return (sqd(self.mod)-sqd(0))/self.mod\n def norm(self,v=.5):\n #normalizes it so that integ(0,mod, of self^2) = v*mod\n target = v\n factor = target/self.mag2()**.5\n return PiecewizePoly([[i*factor for i in p] for p in self.polys],[t for t in self.times],self.mod)\n def __add__(self,o,fudgefactor = .001):\n if type(o) == PiecewizePoly:\n if self.mod == 0:\n assert o.mod == 0\n res_t = [-math.inf]\n res_p = [sumPolyn(self.polys[0],o.polys[0])]\n si = 0\n oi = 0\n sts = self.times + [math.inf]\n ots = o.times + [math.inf]\n sp = self.polys + [[]]\n op = o.polys + [[]]\n while si < len(self.times) and oi < len(o.times):\n st,ot = sts[si+1],ots[oi+1]\n if st < ot:\n si += 1\n res_t += [st]\n res_p += [sumPolyn(sp[si],\n composePolyn(op[oi],[st-ot,1]))]\n elif st > ot:\n oi += 1\n res_t += [ot]\n res_p += [sumPolyn(composePolyn(sp[si],[ot-st,1]),\n op[oi])]\n else:\n si += 1\n oi += 1\n res_t += [st]\n res_p += [sumPolyn(sp[si],op[oi])]\n return PiecewizePoly(res_p,res_t,0)\n \n else:\n assert o.mod != 0\n gcd = softGCD(self.mod,o.mod,fudgefactor*(self.mod*o.mod)**.5) \n lcm = self.mod*o.mod/gcd \n t = 0 \n res_t = []\n res_p = []\n sto = 0\n oto = 0\n si = 0\n oi = 0\n while t < lcm:\n res_t += [t]\n res_p += [sumPolyn(composePolyn(self.polys[si],[t-(self.times[si]+sto),1]),\n composePolyn(o.polys[oi],[t-(o.times[oi]+oto),1]))]\n \n st = sto+(self.times+[self.times[0]+self.mod])[si+1]\n ot = oto+(o.times+[o.times[0]+o.mod])[oi+1]\n t = min(st,ot)\n if st <= t:\n si += 1\n if si >= len(self.polys):\n si = 0\n sto += self.mod\n if ot <= t:\n oi += 1\n if oi >= len(o.polys):\n oi = 0\n oto += o.mod\n return PiecewizePoly(res_p,res_t,lcm)\n else:\n return PiecewizePoly([sumPolyn(p,[o]) for p in self.polys],[t for t in self.times],self.mod)\n def __mul__(self,o,fudgefactor = .001):\n if type(o) == PiecewizePoly:\n gcd = softGCD(self.mod,o.mod,fudgefactor*(self.mod*o.mod)**.5) \n lcm = self.mod*o.mod/gcd \n t = 0 \n res_t = []\n res_p = []\n sto = 0\n oto = 0\n si = 0\n oi = 0\n while t < lcm:\n res_t += [t]\n res_p += [prodPolyn(composePolyn(self.polys[si],[t-(self.times[si]+sto),1]),\n composePolyn(o.polys[oi],[t-(o.times[oi]+oto),1]))]\n \n st = sto+(self.times+[self.times[0]+self.mod])[si+1]\n ot = oto+(o.times+[o.times[0]+o.mod])[oi+1]\n t = min(st,ot)\n if st <= t:\n si += 1\n if si >= len(self.polys):\n si = 0\n sto += self.mod\n if ot <= t:\n oi += 1\n if oi >= len(o.polys):\n oi = 0\n oto += o.mod\n return PiecewizePoly(res_p,res_t,lcm)\n else:\n return PiecewizePoly([prodPolyn(p,[o]) for p in self.polys],[t for t in self.times],self.mod)\n \n def __radd__(self,o):\n return self.__add__(o)\n def __rmul__(self,o):\n return self.__mul__(o)\n def __sub__(self,o):\n return self.__add__(o.__mul__(-1))\n def __rsub__(self,o):\n return self.__mul__(-1).__add__(o)\n \n def t(self,v=1):\n return PiecewizePoly([[p[i]/(v**i) for i in range(len(p))] for p in self.polys],[t*v for t in self.times],self.mod*v)\n def isZero(self):\n for i in self.polys:\n for j in i:\n if j != 0:\n return False\n return True\n def end(self):\n x = self.mod\n l = -1\n #eval polyn\n return evalPolyn(self.polys[l],x-self.times[l])\n \n def freqComponent(self,f):\n if f == 0:\n return self.bias()\n result = 0\n f /= self.mod\n for i in range(len(self.polys)):\n p = fourierPolyn(self.polys[i],f)\n result += evalFourierPolyn(p,f,f*self.times[i],0,(self.times+[self.mod])[i+1]-self.times[i])\n return result\n def graphSpectrum(self,w=20,h=10,both=True):\n gr.graphLT(lambda x:abs(self.freqComponent(x)),both-h*2*both,h*(4-2*both)+both,0,1,w,h)\n def graphSpectrumLog(self,w=20,h=10,both = True,low=-10,hi=1):\n gr.graphLT(lambda x: (lambda v: (math.log(v) if v!=0 else -1e300))(abs(self.freqComponent(x))),both-h*2*both,h*(4-2*both)+both,low,hi,w,h)\n\n def bandLimit(self,t,bl=5,neg=False):\n tot = 0\n for i in range(neg*(1-bl),bl):\n tot += eone**(1j*i*t)*self.freqComponent(i)\n return tot\n\n def getBandlimitedBuffer(self,denominator,numerator = 1,ff=0,fnd=2,neg=False):\n #f_nyquist = .5\n # f_n = n*(num/den) < f_nyquist\n # n < .5*den/num\n d = softGCD(numerator,denominator,ff)\n numerator=int(round(numerator/d))\n denominator=int(round(denominator/d))\n return [self.bandLimit(numerator*i*self.mod/denominator,int(denominator/numerator/fnd),neg) for i in range(numerator*denominator)]\n \n def bandConj(self,t,bl=5):\n tot = 0\n re = self.real()\n im = self.imag()\n for i in range(0,bl):\n f = eone**(1j*i*t)\n tot += (f*re.freqComponent(i)).imag+(f*im.freqComponent(i)).imag*1j\n return tot\n \n \n \n def real(self):\n return PiecewizePoly([[i.real for i in j]for j in self.polys],[t for t in self.times],self.mod)\n def imag(self):\n return PiecewizePoly([[i.imag for i in j]for j in self.polys],[t for t in self.times],self.mod)\n def oscope(self,w=40,h=20,s=.5+.5j,m=.5,n=256):\n scrn = gr.brailleScreen(w*2,h*4)\n for i in range(n):\n t = i*self.mod/n\n v = self(t).conjugate()*m+s\n if 0<=int(v.real*w*2)<w*2 and 0<=int(v.imag*h*4) < h*4:\n gr.brailleScreenSet(scrn,int(v.real*w*2),int(v.imag*h*4))\n gr.brailleScreenPrint(scrn)\n\ndef forever(v):\n while 1:\n yield v\n#NEW BWLSYNTH IDEA:\n# sample the nth integral then derivitate the signal n times\n# the high harmonics are suppressed in the integrals which means\n# when they alias they are tiny\n# but the reconstruction filter doesn't amplify them a ton because they were aliased\n# thus cheap and easy bwl synthesis\ndef idbwlPoly(p,rate=440/48000,q=1,d=1):\n try:\n rate.__next__\n except:\n rate = forever(rate)\n ds = [[0]*d for i in range(q)]\n rates = [0]*d\n trate = 0\n for i in range(q):\n p = p.unbiased().integ()\n t = 0\n di = 0\n for i in range(q*d):\n di = (di+1)%d\n t += rate\n t %= 1\n r = p(t)\n trate -= rates[di]\n rates[di] = next(rate)\n trate += rates[di]\n for i in range(q):\n r,ds[i][di] = (r-ds[i][di]) / trate,r\n while 1:\n di = (di+1)%d\n t += rate\n t %= 1\n r = p(t)\n trate -= rates[di]\n rates[di] = next(rate)\n trate += rates[di]\n for i in range(q):\n r,ds[i][di] = (r-ds[i][di]) / trate,r\n yield r\n \n\ndef ditherPoly(p,rate=440/48000,dd=1):\n from random import random\n t = 0\n while 1:\n t += rate\n yield p(t+dd*rate*random())\n\n \ndef gaussApprox(mean=0,spread=1,iters=3):\n s = spread/iters\n blip = PiecewizePoly([[],[1/s],[]],[-math.inf,0,s],0)\n acc = blip\n for b in bin(iters)[3:]:\n acc.plot()\n acc @= acc\n if b == '1':\n acc @= blip\n return acc.timeshift(mean)\n \n\ndef plinConnectDots(dat,speed=1):\n polys = []\n times = []\n t = 0\n for i in range(len(dat)):\n leng = abs(dat[i-1]-dat[i])\n polys += [[dat[i-1],(dat[i]-dat[i-1])/leng]]\n times += [t]\n t += leng\n return PiecewizePoly(polys,times,t)\ndef pnlinConnectDots(dat,speed=1):\n r = plinConnectDots(dat,speed)\n return r.t(1/r.mod)\n \n \ndef papprox(dat,integ=2):\n #derivitive the freqs integ times\n dcs = []\n for intg in range(integ):\n dcs += [dat[-1]/(intg+1)]\n ddat = [(-dat[i-1]+dat[i])/(intg+1) for i in range(len(dat))]\n dat = ddat\n res = PiecewizePoly([[i] for i in dat],[i for i in range(len(dat))],len(dat))\n for i in range(integ):\n res = res.integ(dcs[-i-1])\n return res\n \"\"\"bl = len(dat)//2\n guess1 = PiecewizePoly([[i] for i in dat],[i/len(dat) for i in range(len(dat))],1)\n freqs = [guess1.freqComponent(i) for i in range(1-bl,bl)]\n dc = guess1.bias()\n #derivitive the freqs integ times\n for i in range(integ):\n for f in range(len(freqs)):\n freqs[f] *= (f+1-bl//2)*1j\n #come up with new samples to integrate repeatedly\n samps = []\n for t in range(len(dat)):\n tot = 0\n for i in range(1-bl,bl):\n tot += eone**(1j*i*t/len(dat))*freqs[i]\n samps += [tot]\n res = PiecewizePoly([[i] for i in samps],[i/len(samps) for i in range(len(samps))],1)\n for i in range(integ):\n res = res.unbiased().integ(0,1).unbiased()\n return res + dc\n \"\"\"\n \n\ndef ppulse(width=.5,amplitude=1):\n return PiecewizePoly([[0,[-1]],[width,[1]]]).unbiased()\n\n\npsqr = PiecewizePoly([[-1],[1]],[0,.5])\n\n#.5 -> 2\nptri = psqr.integ(0,4).unbiased()\n\n#.25*.5=1/8\nppar = ptri.integ(0,8)\n\n\npsaw = PiecewizePoly([[1,-2]],[0])\n\n\ncf = pnlinConnectDots([-.75+1.5j,-.5+1j,.5+1j,.75+1.5j,1+1j,1-1j,-1j-1,1j-1])*.5\n\ncfi = plinConnectDots([-.75+1.5j,-.5+1j,.5+1j,.75+1.5j,1+1j,1-1j,-1j-1,1j-1])\ncfi.polys += [[-1/3+.5j,-1j],[1/3+.5j,-1j]]\ncfi.times += [cfi.mod,cfi.mod+.75]\ncfi.mod += 1.5\ncfi = cfi.t(1/cfi.mod)*.5\n\n\n\ndef reorderTimes(times,order,top):\n newTimes = []\n t = 0\n for i in order:\n if i == len(times)-1:\n l = top-times[i]\n else:\n l = times[i+1]-times[i]\n newTimes += [t]\n t += l\n return newTimes\n\ndef reorder(wave,goal,fs=20,wfd = lambda f,a,b: abs(abs2(a)-abs2(b))):\n l = [i for i in range(len(wave.polys))]\n goalF = [goal.freqComponent(i) for i in range(1-fs,fs)]\n best = wave\n bestD = 1e300\n for p in itertools.permutations(l):\n guess = PiecewizePoly([wave.polys[i] for i in p],reorderTimes(wave.times,p,wave.mod),wave.mod)\n guessF = [guess.freqComponent(i) for i in range(1-fs,fs)]\n d = 0\n for i in range(len(goalF)):\n d += wfd(1-fs+i,goalF[i],guessF[i])\n if d < bestD:\n best = guess\n bestD = d\n return best\n\n\ndef quickStar(n,s=2):\n return pnlinConnectDots([eone**(1j*i*s/n) for i in range(n)])*.5\n\ndef prettyStar(n,rl=.5):\n return pnlinConnectDots([eone**(1j*(i+.5*j)/n)*[1,rl][j] for i in range(n) for j in range(2)])*.5\n\n\n\ndef getPSineApprox(sects=2,integs=12):\n offs = integs%4\n guess = PiecewizePoly([[math.sin(((i+.5)/sects+offs/4)*2*math.pi)] for i in range(sects)],[i/sects for i in range(sects)]).unbiased()\n for i in range(integs):\n guess = guess.integ(0,1).unbiased().norm()\n return guess\n\n\n\n\n\ndef c(f,g):\n for i in g:\n yield f(i)\n\ndef x(n,g):\n for i in g:\n yield n*i\ndef p(n,g):\n for i in g:\n yield n+i\ndef const(n):\n while 1:\n yield n\ndef integ(g,a=0):\n for i in g:\n a += i\n yield a\ndef deriv(g):\n p = next(g)\n for i in g:\n yield i-p\n p = i\ndef clamp(n,v=1):\n return min(max(n,-v),v)\ndef bderiv(g,b=1):\n p = next(g)\n d = 0\n for i in g:\n d += i-p\n p = i\n v = clamp(d,b)\n yield v\n d -= v\n\n\n\n\n \n \ndef send(g1,g2):\n next(g1)\n while 1:\n yield g1.send(next(g2))\n \nclass passFilter:\n def __init__(self):\n self.value = 0\n def send(self,val,time=1):\n self.value = val\n return val\nclass contRAvgFilt(passFilter):\n def __init__(self,a):\n self.alpha = math.log(a)\n self.value = 0\n def send(self,val,time=1):\n self.value = val+(self.value-val)*math.exp(self.alpha*time)\n return self.value\n\ndef getPerfSquareBuff(n,d=1):\n w = 1\n outbuf = [0 for i in range(n)]\n while w < n/d/2:\n for i in range(n):\n outbuf[i] += math.sin(i*2*pi*d/n*w)/w\n w += 2\n return outbuf\n\n\ndef nearestDownSample(g,r=1):\n a = 0\n for i in g:\n while a < 1:\n yield i\n a += r\n a -= 1\n \ndef linearDownSample(g,r=1):\n p = 0\n a = 0\n for i in g:\n while a < 1:\n yield a*i+(1-a)*p\n a += r\n p = i\n a -= 1\n \ndef fsamp(f,s=[(-1,.5),(1,.5)],filt=None,r=48000):\n if filt == None:\n filt = contRAvgFilt(1/r)\n a = 0\n i = 0\n if type(f)==int or type(f)==float:\n def g(v):\n while 1:\n yield v\n f = g(f)\n filtered = 0\n while 1:\n t = next(f)/r\n while t > 0:\n dt = min(t,s[i][1]-a)\n \n a += dt\n t -= dt\n filt.send(s[i][0],dt)\n\n if a>=s[i][1]:\n a -= s[i][1]\n i = (i+1)%len(s)\n\n \n yield filt.value\n \n \n\n\n\n\n\n#actual fm stuff\nfrom filters import IIR\nimport numpy as np\n\n\ndef phaseModulate(g,d=.1,f=10000,sr=48000):\n t = 0\n for i in g:\n t += f/sr\n yield nsin(t+i.real*d)+1j*(nsin(t+.25+i.imag*d))\ndef modulate(g,d=0.01,f=10000,sr=48000):\n t = .25\n for i in g:\n t += (d*i+1+1j)*f/sr\n yield (nsin(t.real)+1j*nsin(t.imag))\n\n\"\"\"def stereoEncode(g,c=10000,sr=48000):\n t = 0\n flt = IIR()\n flt.setPolys([1],\n for i in g:\n t += (c+i.imag)/sr\n yield nsin(t)+i.real\n\n\ndef stereoDecode(g,c=15000,sr=48000):\n for i in g:\n r = \n\"\"\"\n\n\n\n\n\n#def fm(\n"
] | [
[
"matplotlib.pyplot.plot",
"matplotlib.pyplot.show",
"matplotlib.pyplot.subplots"
]
] |
accordinglyto/dferte | [
"d4b8449c1633973dc538c9e72aca5d37802a4ee4"
] | [
"src/predict-binary.py"
] | [
"import os\nimport numpy as np\n#os.environ[\"KERAS_BACKEND\"] = \"plaidml.keras.backend\"\n\nfrom keras.preprocessing.image import ImageDataGenerator, load_img, img_to_array\nfrom keras.models import Sequential, load_model\n\n\nimg_width, img_height = 48, 48\nmodel_path = '../src/models/model.h5'\nweights_path = '../src/models/weights'\nmodel = load_model(model_path)\ntest_path = '../data/test'\n\ndef predict(file):\n x = load_img(file, target_size=(img_width,img_height))\n x = img_to_array(x)\n x = np.expand_dims(x, axis=0)\n array = model.predict(x)\n result = array[0]\n if result[0] > result[1]:\n if result[0] > 0.9:\n print(\"Predicted answer: Buy\")\n answer = 'buy'\n print(result)\n print(array)\n else:\n print(\"Predicted answer: Not confident\")\n answer = 'n/a'\n print(result)\n else:\n if result[1] > 0.9:\n print(\"Predicted answer: Sell\")\n answer = 'sell'\n print(result)\n else:\n print(\"Predicted answer: Not confident\")\n answer = 'n/a'\n print(result)\n\n return answer\n\n\ntb = 0\nts = 0\nfb = 0\nfs = 0\nna = 0\n\nfor i, ret in enumerate(os.walk(test_path + '/buy')):\n for i, filename in enumerate(ret[2]):\n if filename.startswith(\".\"):\n continue\n print(\"Label: buy\")\n result = predict(ret[0] + '/' + filename)\n if result == \"buy\":\n tb += 1\n elif result == 'n/a':\n print('no action')\n na += 1\n else:\n fb += 1\n\nfor i, ret in enumerate(os.walk(test_path + '/sell')):\n for i, filename in enumerate(ret[2]):\n if filename.startswith(\".\"):\n continue\n print(\"Label: sell\")\n result = predict(ret[0] + '/' + filename)\n if result == \"sell\":\n ts += 1\n elif result == 'n/a':\n print('no action')\n na += 1\n else:\n fs += 1\n\n\"\"\"\nCheck metrics\n\"\"\"\nprint(\"True buy: \", tb)\nprint(\"True sell: \", ts)\nprint(\"False buy: \", fb) # important\nprint(\"False sell: \", fs)\nprint(\"No action\", na)\n\nprecision = (tb+ts) / (tb + ts + fb + fs)\nrecall = tb / (tb + fs)\nprint(\"Precision: \", precision)\nprint(\"Recall: \", recall)\n\nf_measure = (2 * recall * precision) / (recall + precision)\nprint(\"F-measure: \", f_measure)\n"
] | [
[
"numpy.expand_dims"
]
] |
T3p/baselines | [
"5623c9160d1e86ebca3e673f142fe6b14a1db06c"
] | [
"sacred_mis/_sources/pomis2_57be95a71b575624c33c6ffe64e50d6e.py"
] | [
"import numpy as np\nimport warnings\nimport baselines.common.tf_util as U\nimport tensorflow as tf\nimport time\nfrom baselines.common import zipsame, colorize\nfrom contextlib import contextmanager\nfrom collections import deque\nfrom baselines import logger\nfrom baselines.common.cg import cg\nfrom baselines.pomis2.memory import Memory\nfrom baselines.common.centralized_sampler import traj_segment_generator\nfrom baselines.pois.utils import cluster_rewards\n\n@contextmanager\ndef timed(msg):\n print(colorize(msg, color='magenta'))\n tstart = time.time()\n yield\n print(colorize('done in %.3f seconds'%(time.time() - tstart), color='magenta'))\n\ndef update_epsilon(delta_bound, epsilon_old, max_increase=2.):\n if delta_bound > (1. - 1. / (2 * max_increase)) * epsilon_old:\n return epsilon_old * max_increase\n else:\n return epsilon_old ** 2 / (2 * (epsilon_old - delta_bound))\n\ndef line_search_parabola(theta_init, alpha, natural_gradient, set_parameter, evaluate_bound, delta_bound_tol=1e-4, max_line_search_ite=30):\n epsilon = 1.\n epsilon_old = 0.\n delta_bound_old = -np.inf\n bound_init = evaluate_bound()\n theta_old = theta_init\n\n for i in range(max_line_search_ite):\n\n theta = theta_init + epsilon * alpha * natural_gradient\n set_parameter(theta)\n\n bound = evaluate_bound()\n\n if np.isnan(bound):\n warnings.warn('Got NaN bound value: rolling back!')\n return theta_old, epsilon_old, delta_bound_old, i + 1\n\n delta_bound = bound - bound_init\n\n epsilon_old = epsilon\n epsilon = update_epsilon(delta_bound, epsilon_old)\n if delta_bound <= delta_bound_old + delta_bound_tol:\n if delta_bound_old < 0.:\n return theta_init, 0., 0., i+1\n else:\n return theta_old, epsilon_old, delta_bound_old, i+1\n\n delta_bound_old = delta_bound\n theta_old = theta\n\n return theta_old, epsilon_old, delta_bound_old, i+1\n\ndef line_search_binary(theta_init, alpha, natural_gradient, set_parameter, evaluate_loss, delta_bound_tol=1e-4, max_line_search_ite=30):\n low = 0.\n high = None\n bound_init = evaluate_loss()\n delta_bound_old = 0.\n theta_opt = theta_init\n i_opt = 0\n delta_bound_opt = 0.\n epsilon_opt = 0.\n\n epsilon = 1.\n\n for i in range(max_line_search_ite):\n\n theta = theta_init + epsilon * natural_gradient * alpha\n set_parameter(theta)\n\n bound = evaluate_loss()\n delta_bound = bound - bound_init\n\n if np.isnan(bound):\n warnings.warn('Got NaN bound value: rolling back!')\n\n if np.isnan(bound) or delta_bound <= delta_bound_opt:\n high = epsilon\n else:\n low = epsilon\n theta_opt = theta\n delta_bound_opt = delta_bound\n i_opt = i\n epsilon_opt = epsilon\n\n epsilon_old = epsilon\n\n if high is None:\n epsilon *= 2\n else:\n epsilon = (low + high) / 2.\n\n if abs(epsilon_old - epsilon) < 1e-12:\n break\n\n return theta_opt, epsilon_opt, delta_bound_opt, i_opt+1\n\ndef optimize_offline(theta_init, set_parameter, line_search, evaluate_loss, evaluate_gradient, evaluate_natural_gradient=None, gradient_tol=1e-4, bound_tol=1e-4, max_offline_ite=100):\n theta = theta_old = theta_init\n improvement = improvement_old = 0.\n set_parameter(theta)\n\n\n '''\n bound_init = evaluate_loss()\n import scipy.optimize as opt\n\n def func(x):\n set_parameter(x)\n return -evaluate_loss()\n\n def grad(x):\n set_parameter(x)\n return -evaluate_gradient().astype(np.float64)\n\n theta, bound, d = opt.fmin_l_bfgs_b(func=func,\n fprime=grad,\n x0=theta_init.astype(np.float64),\n maxiter=100,\n )\n print(bound_init, bound)\n\n print(d)\n\n set_parameter(theta)\n improvement = bound_init + bound\n return theta, improvement\n\n '''\n\n fmtstr = '%6i %10.3g %10.3g %18i %18.3g %18.3g %18.3g'\n titlestr = '%6s %10s %10s %18s %18s %18s %18s'\n print(titlestr % ('iter', 'epsilon', 'step size', 'num line search', 'gradient norm', 'delta bound ite', 'delta bound tot'))\n\n for i in range(max_offline_ite):\n bound = evaluate_loss()\n gradient = evaluate_gradient()\n\n if np.any(np.isnan(gradient)):\n warnings.warn('Got NaN gradient! Stopping!')\n set_parameter(theta_old)\n return theta_old, improvement\n\n if np.isnan(bound):\n warnings.warn('Got NaN bound! Stopping!')\n set_parameter(theta_old)\n return theta_old, improvement_old\n\n if evaluate_natural_gradient is not None:\n natural_gradient = evaluate_natural_gradient(gradient)\n else:\n natural_gradient = gradient\n\n if np.dot(gradient, natural_gradient) < 0:\n warnings.warn('NatGradient dot Gradient < 0! Using vanilla gradient')\n natural_gradient = gradient\n\n gradient_norm = np.sqrt(np.dot(gradient, natural_gradient))\n\n if gradient_norm < gradient_tol:\n print('stopping - gradient norm < gradient_tol')\n return theta, improvement\n\n alpha = 1. / gradient_norm ** 2\n\n theta_old = theta\n improvement_old = improvement\n theta, epsilon, delta_bound, num_line_search = line_search(theta, alpha, natural_gradient, set_parameter, evaluate_loss)\n set_parameter(theta)\n\n improvement += delta_bound\n print(fmtstr % (i+1, epsilon, alpha*epsilon, num_line_search, gradient_norm, delta_bound, improvement))\n\n if delta_bound < bound_tol:\n print('stopping - delta bound < bound_tol')\n return theta, improvement\n\n return theta, improvement\n\ndef learn(env, make_policy, *,\n n_episodes,\n horizon,\n delta,\n gamma,\n max_iters,\n sampler=None,\n use_natural_gradient=False, #can be 'exact', 'approximate'\n fisher_reg=1e-2,\n iw_method='is',\n iw_norm='none',\n bound='J',\n line_search_type='parabola',\n save_weights=0,\n improvement_tol=0.,\n center_return=False,\n render_after=None,\n max_offline_iters=100,\n callback=None,\n clipping=False,\n entropy='none',\n positive_return=False,\n reward_clustering='none',\n capacity=10,\n warm_start=True):\n\n np.set_printoptions(precision=3)\n max_samples = horizon * n_episodes\n\n if line_search_type == 'binary':\n line_search = line_search_binary\n elif line_search_type == 'parabola':\n line_search = line_search_parabola\n else:\n raise ValueError()\n\n # Building the environment\n ob_space = env.observation_space\n ac_space = env.action_space\n\n # Creating the memory buffer\n memory = Memory(capacity=capacity, batch_size=n_episodes, horizon=horizon,\n ob_space=ob_space, ac_space=ac_space)\n\n # Building the target policy and saving its parameters\n pi = make_policy('pi', ob_space, ac_space)\n all_var_list = pi.get_trainable_variables()\n var_list = [v for v in all_var_list if v.name.split('/')[1].startswith('pol')]\n shapes = [U.intprod(var.get_shape().as_list()) for var in var_list]\n n_parameters = sum(shapes)\n\n # Building a set of behavioral policies\n behavioral_policies = memory.build_policies(make_policy, pi)\n\n # Placeholders\n ob_ = ob = U.get_placeholder_cached(name='ob')\n ac_ = pi.pdtype.sample_placeholder([None], name='ac')\n mask_ = tf.placeholder(dtype=tf.float32, shape=(None), name='mask')\n rew_ = tf.placeholder(dtype=tf.float32, shape=(None), name='rew')\n disc_rew_ = tf.placeholder(dtype=tf.float32, shape=(None), name='disc_rew')\n clustered_rew_ = tf.placeholder(dtype=tf.float32, shape=(None))\n gradient_ = tf.placeholder(dtype=tf.float32, shape=(n_parameters, 1), name='gradient')\n iter_number_ = tf.placeholder(dtype=tf.int32, name='iter_number')\n active_policies = tf.placeholder(dtype=tf.float32, shape=(capacity), name='active_policies')\n losses_with_name = []\n\n # Total number of trajectories\n N_total = tf.reduce_sum(active_policies) * n_episodes\n\n # Split operations\n disc_rew_split = tf.reshape(disc_rew_ * mask_, [-1, horizon])\n rew_split = tf.reshape(rew_ * mask_, [-1, horizon])\n mask_split = tf.reshape(mask_, [-1, horizon])\n\n # Policy densities\n target_log_pdf = pi.pd.logp(ac_) * mask_\n target_log_pdf_split = tf.reshape(target_log_pdf, [-1, horizon])\n behavioral_log_pdfs = tf.stack([bpi.pd.logp(ac_) * mask_ for bpi in memory.policies]) # Shape is (capacity, ntraj*horizon)\n behavioral_log_pdfs_split = tf.reshape(behavioral_log_pdfs, [memory.capacity, -1, horizon])\n\n # Compute renyi divergencies and sum over time, then exponentiate\n emp_d2_split = tf.reshape(tf.stack([pi.pd.renyi(bpi.pd, 2) * mask_ for bpi in memory.policies]), [memory.capacity, -1, horizon])\n emp_d2_split_cum = tf.exp(tf.reduce_sum(emp_d2_split, axis=2))\n # Compute arithmetic and harmonic mean of emp_d2\n emp_d2_mean = tf.reduce_mean(emp_d2_split_cum, axis=1)\n emp_d2_arithmetic = tf.reduce_sum(emp_d2_mean * active_policies) / tf.reduce_sum(active_policies)\n emp_d2_harmonic = tf.reduce_sum(active_policies) / tf.reduce_sum(1 / emp_d2_mean)\n\n # Return processing: clipping, centering, discounting\n ep_return = clustered_rew_ #tf.reduce_sum(mask_split * disc_rew_split, axis=1)\n if clipping:\n rew_split = tf.clip_by_value(rew_split, -1, 1)\n if center_return:\n ep_return = ep_return - tf.reduce_mean(ep_return)\n rew_split = rew_split - (tf.reduce_sum(rew_split) / (tf.reduce_sum(mask_split) + 1e-24))\n discounter = [pow(gamma, i) for i in range(0, horizon)] # Decreasing gamma\n discounter_tf = tf.constant(discounter)\n disc_rew_split = rew_split * discounter_tf\n\n # Reward statistics\n return_mean = tf.reduce_mean(ep_return)\n return_std = U.reduce_std(ep_return)\n return_max = tf.reduce_max(ep_return)\n return_min = tf.reduce_min(ep_return)\n return_abs_max = tf.reduce_max(tf.abs(ep_return))\n return_step_max = tf.reduce_max(tf.abs(rew_split)) # Max step reward\n return_step_mean = tf.abs(tf.reduce_mean(rew_split))\n positive_step_return_max = tf.maximum(0.0, tf.reduce_max(rew_split))\n negative_step_return_max = tf.maximum(0.0, tf.reduce_max(-rew_split))\n return_step_maxmin = tf.abs(positive_step_return_max - negative_step_return_max)\n losses_with_name.extend([(return_mean, 'InitialReturnMean'),\n (return_max, 'InitialReturnMax'),\n (return_min, 'InitialReturnMin'),\n (return_std, 'InitialReturnStd'),\n (emp_d2_arithmetic, 'EmpiricalD2Arithmetic'),\n (emp_d2_harmonic, 'EmpiricalD2Harmonic'),\n (return_step_max, 'ReturnStepMax'),\n (return_step_maxmin, 'ReturnStepMaxmin')])\n\n if iw_method == 'is':\n # Sum the log prob over time. Shapes: target(Nep, H), behav (Cap, Nep, H)\n target_log_pdf_episode = tf.reduce_sum(target_log_pdf_split, axis=1)\n behavioral_log_pdf_episode = tf.reduce_sum(behavioral_log_pdfs_split, axis=2)\n # To avoid numerical instability, compute the inversed ratio\n log_ratio = target_log_pdf_split - behavioral_log_pdfs_split\n inverse_log_ratio_episode = - tf.reduce_sum(log_ratio, axis=2)\n\n iw = 1 / tf.reduce_sum(tf.exp(inverse_log_ratio_episode) * tf.expand_dims(active_policies, -1), axis=0)\n\n # Compute also the balance-heuristic weights\n iw_split = tf.reshape(iw, (memory.capacity, -1))\n iw_by_behavioral = tf.reduce_mean(iw_split, axis=1)\n losses_with_name.append((iw_by_behavioral[0] / tf.reduce_sum(iw_by_behavioral), 'MultiIWFirstRatio'))\n losses_with_name.append((tf.reduce_max(iw_by_behavioral), 'MultiIWMax'))\n losses_with_name.append((tf.reduce_sum(iw_by_behavioral), 'MultiIWSum'))\n losses_with_name.append((tf.reduce_min(iw_by_behavioral), 'MultiIWMin'))\n\n # Get the probability by exponentiation\n #target_pdf_episode = tf.exp(target_log_pdf_episode)\n #behavioral_pdf_episode = tf.exp(behavioral_log_pdf_episode)\n # Get the denominator by averaging over behavioral policies\n #behavioral_pdf_mixture = tf.reduce_mean(behavioral_pdf_episode, axis=0) + 1e-24\n #iw = target_pdf_episode / behavioral_pdf_mixture\n iwn = iw / n_episodes\n\n # Compute the J\n w_return_mean = tf.reduce_sum(ep_return * iwn)\n # Empirical D2 of the mixture and relative ESS\n ess_renyi_arithmetic = N_total / emp_d2_arithmetic\n ess_renyi_harmonic = N_total / emp_d2_harmonic\n # Log quantities\n losses_with_name.extend([(tf.reduce_max(iw), 'MaxIW'),\n (tf.reduce_min(iw), 'MinIW'),\n (tf.reduce_mean(iw), 'MeanIW'),\n (U.reduce_std(iw), 'StdIW'),\n (tf.reduce_min(target_log_pdf_episode), 'MinTargetPdf'),\n (tf.reduce_min(behavioral_log_pdf_episode), 'MinBehavPdf'),\n (ess_renyi_arithmetic, 'ESSRenyiArithmetic'),\n (ess_renyi_harmonic, 'ESSRenyiHarmonic')])\n else:\n raise NotImplementedError()\n\n if bound == 'J':\n bound_ = w_return_mean\n elif bound == 'max-d2-harmonic':\n bound_ = w_return_mean - tf.sqrt((1 - delta) / (delta * ess_renyi_harmonic)) * return_abs_max\n elif bound == 'max-d2-arithmetic':\n bound_ = w_return_mean - tf.sqrt((1 - delta) / (delta * ess_renyi_arithmetic)) * return_abs_max\n else:\n raise NotImplementedError()\n\n # Policy entropy for exploration\n ent = pi.pd.entropy()\n meanent = tf.reduce_mean(ent)\n losses_with_name.append((meanent, 'MeanEntropy'))\n # Add policy entropy bonus\n if entropy != 'none':\n scheme, v1, v2 = entropy.split(':')\n if scheme == 'step':\n entcoeff = tf.cond(iter_number_ < int(v2), lambda: float(v1), lambda: float(0.0))\n losses_with_name.append((entcoeff, 'EntropyCoefficient'))\n entbonus = entcoeff * meanent\n bound_ = bound_ + entbonus\n elif scheme == 'lin':\n ip = tf.cast(iter_number_ / max_iters, tf.float32)\n entcoeff_decay = tf.maximum(0.0, float(v2) + (float(v1) - float(v2)) * (1.0 - ip))\n losses_with_name.append((entcoeff_decay, 'EntropyCoefficient'))\n entbonus = entcoeff_decay * meanent\n bound_ = bound_ + entbonus\n elif scheme == 'exp':\n ent_f = tf.exp(-tf.abs(tf.reduce_mean(iw) - 1) * float(v2)) * float(v1)\n losses_with_name.append((ent_f, 'EntropyCoefficient'))\n bound_ = bound_ + ent_f * meanent\n else:\n raise Exception('Unrecognized entropy scheme.')\n\n losses_with_name.append((w_return_mean, 'ReturnMeanIW'))\n losses_with_name.append((bound_, 'Bound'))\n losses, loss_names = map(list, zip(*losses_with_name))\n\n '''\n if use_natural_gradient:\n p = tf.placeholder(dtype=tf.float32, shape=[None])\n target_logpdf_episode = tf.reduce_sum(target_log_pdf_split * mask_split, axis=1)\n grad_logprob = U.flatgrad(tf.stop_gradient(iwn) * target_logpdf_episode, var_list)\n dot_product = tf.reduce_sum(grad_logprob * p)\n hess_logprob = U.flatgrad(dot_product, var_list)\n compute_linear_operator = U.function([p, ob_, ac_, disc_rew_, mask_], [-hess_logprob])\n '''\n\n assert_ops = tf.group(*tf.get_collection('asserts'))\n print_ops = tf.group(*tf.get_collection('prints'))\n\n compute_lossandgrad = U.function([ob_, ac_, rew_, disc_rew_, clustered_rew_, mask_, iter_number_, active_policies], losses + [U.flatgrad(bound_, var_list), assert_ops, print_ops])\n compute_grad = U.function([ob_, ac_, rew_, disc_rew_, clustered_rew_, mask_, iter_number_, active_policies], [U.flatgrad(bound_, var_list), assert_ops, print_ops])\n compute_bound = U.function([ob_, ac_, rew_, disc_rew_, clustered_rew_, mask_, iter_number_, active_policies], [bound_, assert_ops, print_ops])\n compute_losses = U.function([ob_, ac_, rew_, disc_rew_, clustered_rew_, mask_, iter_number_, active_policies], losses)\n #compute_temp = U.function([ob_, ac_, rew_, disc_rew_, clustered_rew_, mask_, iter_number_, active_policies], [log_inverse_ratio, abc, iw])\n\n set_parameter = U.SetFromFlat(var_list)\n get_parameter = U.GetFlat(var_list)\n policy_reinit = tf.variables_initializer(var_list)\n\n if sampler is None:\n seg_gen = traj_segment_generator(pi, env, n_episodes, horizon, stochastic=True, gamma=gamma)\n sampler = type(\"SequentialSampler\", (object,), {\"collect\": lambda self, _: seg_gen.__next__()})()\n\n U.initialize()\n\n # Starting optimizing\n episodes_so_far = 0\n timesteps_so_far = 0\n iters_so_far = 0\n tstart = time.time()\n lenbuffer = deque(maxlen=n_episodes)\n rewbuffer = deque(maxlen=n_episodes)\n\n while True:\n\n iters_so_far += 1\n if iters_so_far == 50:\n print('=== CHANGED GAMMA TO 1.0')\n seg_gen = traj_segment_generator(pi, env, n_episodes, horizon, stochastic=True, gamma=1.0)\n sampler = type(\"SequentialSampler\", (object,), {\"collect\": lambda self, _: seg_gen.__next__()})()\n\n if render_after is not None and iters_so_far % render_after == 0:\n if hasattr(env, 'render'):\n render(env, pi, horizon)\n\n if callback:\n callback(locals(), globals())\n\n if iters_so_far >= max_iters:\n print('Finished...')\n break\n\n logger.log('********** Iteration %i ************' % iters_so_far)\n\n theta = get_parameter()\n\n with timed('sampling'):\n seg = sampler.collect(theta)\n\n lens, rets = seg['ep_lens'], seg['ep_rets']\n lenbuffer.extend(lens)\n rewbuffer.extend(rets)\n episodes_so_far += len(lens)\n timesteps_so_far += sum(lens)\n\n # Adding batch of trajectories to memory\n memory.add_trajectory_batch(seg)\n\n # Get multiple batches from memory\n seg_with_memory = memory.get_trajectories()\n\n # Get clustered reward\n reward_matrix = np.reshape(seg_with_memory['disc_rew'] * seg_with_memory['mask'], (-1, horizon))\n ep_reward = np.sum(reward_matrix, axis=1)\n ep_reward = cluster_rewards(ep_reward, reward_clustering)\n\n args = ob, ac, rew, disc_rew, clustered_rew, mask, iter_number, active_policies = (seg_with_memory['ob'],\n seg_with_memory['ac'],\n seg_with_memory['rew'],\n seg_with_memory['disc_rew'],\n ep_reward,\n seg_with_memory['mask'],\n iters_so_far,\n memory.get_active_policies_mask())\n\n def evaluate_loss():\n loss = compute_bound(*args)\n return loss[0]\n\n def evaluate_gradient():\n gradient = compute_grad(*args)\n return gradient[0]\n\n if use_natural_gradient:\n def evaluate_fisher_vector_prod(x):\n return compute_linear_operator(x, *args)[0] + fisher_reg * x\n\n def evaluate_natural_gradient(g):\n return cg(evaluate_fisher_vector_prod, g, cg_iters=10, verbose=0)\n else:\n evaluate_natural_gradient = None\n\n with timed('summaries before'):\n logger.record_tabular(\"Iteration\", iters_so_far)\n logger.record_tabular(\"InitialBound\", evaluate_loss())\n logger.record_tabular(\"EpLenMean\", np.mean(lenbuffer))\n logger.record_tabular(\"EpRewMean\", np.mean(rewbuffer))\n logger.record_tabular(\"EpThisIter\", len(lens))\n logger.record_tabular(\"EpisodesSoFar\", episodes_so_far)\n logger.record_tabular(\"TimestepsSoFar\", timesteps_so_far)\n logger.record_tabular(\"TimeElapsed\", time.time() - tstart)\n\n if save_weights > 0 and iters_so_far % save_weights == 0:\n logger.record_tabular('Weights', str(get_parameter()))\n import pickle\n file = open('checkpoint' + str(iters_so_far) + '.pkl', 'wb')\n pickle.dump(theta, file)\n\n if not warm_start or memory.get_current_load() == capacity:\n # Optimize\n with timed(\"offline optimization\"):\n theta, improvement = optimize_offline(theta,\n set_parameter,\n line_search,\n evaluate_loss,\n evaluate_gradient,\n evaluate_natural_gradient,\n max_offline_ite=max_offline_iters)\n\n set_parameter(theta)\n print(theta)\n\n with timed('summaries after'):\n meanlosses = np.array(compute_losses(*args))\n for (lossname, lossval) in zip(loss_names, meanlosses):\n logger.record_tabular(lossname, lossval)\n else:\n # Reinitialize the policy\n tf.get_default_session().run(policy_reinit)\n\n logger.dump_tabular()\n\n env.close()\n"
] | [
[
"numpy.sum",
"tensorflow.reduce_max",
"tensorflow.reshape",
"tensorflow.abs",
"tensorflow.reduce_sum",
"numpy.reshape",
"numpy.set_printoptions",
"tensorflow.clip_by_value",
"numpy.isnan",
"tensorflow.reduce_min",
"tensorflow.constant",
"numpy.mean",
"tensorflow.variables_initializer",
"tensorflow.get_collection",
"tensorflow.expand_dims",
"tensorflow.get_default_session",
"tensorflow.cast",
"tensorflow.placeholder",
"tensorflow.sqrt",
"tensorflow.reduce_mean",
"tensorflow.exp",
"numpy.dot"
]
] |
NicolasHug/pyts | [
"29659fb09f568df2e7f8190f2d5a1c383dc7e9fa"
] | [
"pyts/transformation/boss.py"
] | [
"\"\"\"Code for Bag-of-SFA Symbols.\"\"\"\n\n# Author: Johann Faouzi <[email protected]>\n# License: BSD-3-Clause\n\nimport numpy as np\nfrom math import ceil\nfrom scipy.sparse import csr_matrix\nfrom sklearn.base import BaseEstimator, TransformerMixin\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.utils.validation import check_array, check_is_fitted\nfrom sklearn.utils.multiclass import check_classification_targets\nfrom ..approximation import SymbolicFourierApproximation\nfrom ..utils import windowed_view\n\n\nclass BOSS(BaseEstimator, TransformerMixin):\n \"\"\"Bag of Symbolic Fourier Approximation Symbols.\n\n For each time series, subseries are extracted using a slidind window.\n Then the subseries are transformed into a word using the Symbolic\n Fourier Approximation (SFA) algorithm. For each time series, the words\n are grouped together and a histogram counting the occurences of each\n word is created.\n\n Parameters\n ----------\n word_size : int (default = 4)\n Size of each word.\n\n n_bins : int (default = 4)\n The number of bins to produce. It must be between 2 and 26.\n\n strategy : str (default = 'quantile')\n Strategy used to define the widths of the bins:\n\n - 'uniform': All bins in each sample have identical widths\n - 'quantile': All bins in each sample have the same number of points\n - 'normal': Bin edges are quantiles from a standard normal distribution\n - 'entropy': Bin edges are computed using information gain\n\n window_size : int or float (default = 10)\n Size of the sliding window. If float, it represents the percentage of\n the size of each time series and must be between 0 and 1. The window\n size will be computed as ``ceil(window_size * n_timestamps)``.\n\n window_step : int or float (default = 1)\n Step of the sliding window. If float, it represents the percentage of\n the size of each time series and must be between 0 and 1. The window\n size will be computed as ``ceil(window_step * n_timestamps)``.\n\n anova : bool (default = False)\n If True, the Fourier coefficient selection is done via a one-way\n ANOVA test. If False, the first Fourier coefficients are selected.\n\n drop_sum : bool (default = False)\n If True, the first Fourier coefficient (i.e. the sum of the subseries)\n is dropped. Otherwise, it is kept.\n\n norm_mean : bool (default = False)\n If True, center each subseries before scaling.\n\n norm_std : bool (default = False)\n If True, scale each subseries to unit variance.\n\n numerosity_reduction : bool (default = True)\n If True, delete sample-wise all but one occurence of back to back\n identical occurences of the same words.\n\n sparse : bool (default = True)\n Return a sparse matrix if True, else return an array.\n\n alphabet : None, 'ordinal' or array-like, shape = (n_bins,)\n Alphabet to use. If None, the first `n_bins` letters of the Latin\n alphabet are used.\n\n Attributes\n ----------\n vocabulary_ : dict\n A mapping of feature indices to terms.\n\n References\n ----------\n .. [1] P. Schäfer, \"The BOSS is concerned with time series classification\n in the presence of noise\". Data Mining and Knowledge Discovery,\n 29(6), 1505-1530 (2015).\n\n Examples\n --------\n >>> from pyts.datasets import load_gunpoint\n >>> from pyts.transformation import BOSS\n >>> X_train, X_test, _, _ = load_gunpoint(return_X_y=True)\n >>> boss = BOSS(word_size=2, n_bins=2, sparse=False)\n >>> boss.fit(X_train) # doctest: +ELLIPSIS\n BOSS(...)\n >>> sorted(boss.vocabulary_.values())\n ['aa', 'ab', 'ba', 'bb']\n >>> boss.transform(X_test) # doctest: +ELLIPSIS\n array(...)\n\n \"\"\"\n\n def __init__(self, word_size=4, n_bins=4, strategy='quantile',\n window_size=10, window_step=1, anova=False, drop_sum=False,\n norm_mean=False, norm_std=False, numerosity_reduction=True,\n sparse=True, alphabet=None):\n self.word_size = word_size\n self.n_bins = n_bins\n self.strategy = strategy\n self.window_size = window_size\n self.window_step = window_step\n self.anova = anova\n self.drop_sum = drop_sum\n self.norm_mean = norm_mean\n self.norm_std = norm_std\n self.numerosity_reduction = numerosity_reduction\n self.sparse = sparse\n self.alphabet = alphabet\n\n def fit(self, X, y=None):\n \"\"\"Fit the model according to the given training data.\n\n Parameters\n ----------\n X : array-like, shape = (n_samples, n_timestamps)\n Training vector.\n\n y : None or array-like, shape = (n_samples,)\n Class labels for each data sample.\n\n Returns\n -------\n self : object\n\n \"\"\"\n X = check_array(X)\n n_samples, n_timestamps = X.shape\n if y is not None:\n check_classification_targets(y)\n\n window_size, window_step = self._check_params(n_timestamps)\n n_windows = (n_timestamps - window_size + window_step) // window_step\n\n X_windowed = windowed_view(\n X, window_size=window_size, window_step=window_step\n )\n X_windowed = X_windowed.reshape(n_samples * n_windows, window_size)\n\n sfa = SymbolicFourierApproximation(\n n_coefs=self.word_size, drop_sum=self.drop_sum, anova=self.anova,\n norm_mean=self.norm_mean, norm_std=self.norm_std,\n n_bins=self.n_bins, strategy=self.strategy, alphabet=self.alphabet\n )\n if y is None:\n y_repeated = None\n else:\n y_repeated = np.repeat(y, n_windows)\n X_sfa = sfa.fit_transform(X_windowed, y_repeated)\n\n X_word = np.asarray([''.join(X_sfa[i])\n for i in range(n_samples * n_windows)])\n X_word = X_word.reshape(n_samples, n_windows)\n\n if self.numerosity_reduction:\n not_equal = np.c_[X_word[:, 1:] != X_word[:, :-1],\n np.full(n_samples, True)]\n X_bow = np.asarray([' '.join(X_word[i, not_equal[i]])\n for i in range(n_samples)])\n else:\n X_bow = np.asarray([' '.join(X_word[i]) for i in range(n_samples)])\n\n vectorizer = CountVectorizer()\n vectorizer.fit(X_bow)\n self.vocabulary_ = {value: key for key, value in\n vectorizer.vocabulary_.items()}\n self._window_size = window_size\n self._window_step = window_step\n self._n_windows = n_windows\n self._sfa = sfa\n self._vectorizer = vectorizer\n return self\n\n def transform(self, X):\n \"\"\"Transform the provided data.\n\n Parameters\n ----------\n X : array-like, shape = (n_samples, n_timestamps)\n Test samples.\n\n Returns\n -------\n X_new : sparse matrix, shape = (n_samples, n_words)\n Document-term matrix.\n\n \"\"\"\n check_is_fitted(self, ['_sfa', '_vectorizer', 'vocabulary_'])\n X = check_array(X)\n n_samples, n_timestamps = X.shape\n\n X_windowed = windowed_view(\n X, window_size=self._window_size, window_step=self._window_step\n )\n X_windowed = X_windowed.reshape(-1, self._window_size)\n\n X_sfa = self._sfa.transform(X_windowed)\n X_word = np.asarray([''.join(X_sfa[i]) for i in range(X_sfa.shape[0])])\n X_word = X_word.reshape(n_samples, self._n_windows)\n\n if self.numerosity_reduction:\n not_equal = np.c_[X_word[:, 1:] != X_word[:, :-1],\n np.full(n_samples, True)]\n X_bow = np.asarray([' '.join(X_word[i, not_equal[i]])\n for i in range(n_samples)])\n else:\n X_bow = np.asarray([' '.join(X_word[i]) for i in range(n_samples)])\n\n X_boss = self._vectorizer.transform(X_bow)\n if not self.sparse:\n return X_boss.A\n return csr_matrix(X_boss)\n\n def fit_transform(self, X, y=None):\n \"\"\"Fit the data then transform it.\n\n Parameters\n ----------\n X : array-like, shape = (n_samples, n_timestamps)\n Training vector.\n\n y : None or array-like, shape = (n_samples,)\n Class labels for each data sample.\n\n Returns\n -------\n X_new : sparse matrix, shape = (n_samples, n_words)\n Document-term matrix.\n\n \"\"\"\n X = check_array(X)\n n_samples, n_timestamps = X.shape\n if y is not None:\n check_classification_targets(y)\n\n window_size, window_step = self._check_params(n_timestamps)\n n_windows = (n_timestamps - window_size + window_step) // window_step\n\n X_windowed = windowed_view(\n X, window_size=window_size, window_step=window_step\n )\n X_windowed = X_windowed.reshape(n_samples * n_windows, window_size)\n\n sfa = SymbolicFourierApproximation(\n n_coefs=self.word_size, drop_sum=self.drop_sum, anova=self.anova,\n norm_mean=self.norm_mean, norm_std=self.norm_std,\n n_bins=self.n_bins, strategy=self.strategy, alphabet=self.alphabet\n )\n if y is None:\n y_repeated = None\n else:\n y_repeated = np.repeat(y, n_windows)\n X_sfa = sfa.fit_transform(X_windowed, y_repeated)\n\n X_word = np.asarray([''.join(X_sfa[i])\n for i in range(n_samples * n_windows)])\n X_word = X_word.reshape(n_samples, n_windows)\n\n if self.numerosity_reduction:\n not_equal = np.c_[X_word[:, 1:] != X_word[:, :-1],\n np.full(n_samples, True)]\n X_bow = np.asarray([' '.join(X_word[i, not_equal[i]])\n for i in range(n_samples)])\n else:\n X_bow = np.asarray([' '.join(X_word[i]) for i in range(n_samples)])\n\n vectorizer = CountVectorizer()\n X_boss = vectorizer.fit_transform(X_bow)\n self.vocabulary_ = {value: key for key, value in\n vectorizer.vocabulary_.items()}\n self._window_size = window_size\n self._window_step = window_step\n self._n_windows = n_windows\n self._sfa = sfa\n self._vectorizer = vectorizer\n if not self.sparse:\n return X_boss.A\n return csr_matrix(X_boss)\n\n def _check_params(self, n_timestamps):\n if not isinstance(self.word_size, (int, np.integer)):\n raise TypeError(\"'word_size' must be an integer.\")\n if not self.word_size >= 1:\n raise ValueError(\"'word_size' must be a positive integer.\")\n\n if not isinstance(self.window_size,\n (int, np.integer, float, np.floating)):\n raise TypeError(\"'window_size' must be an integer or a float.\")\n if isinstance(self.window_size, (int, np.integer)):\n if self.drop_sum:\n if not 1 <= self.window_size <= (n_timestamps - 1):\n raise ValueError(\n \"If 'window_size' is an integer, it must be greater \"\n \"than or equal to 1 and lower than or equal to \"\n \"(n_timestamps - 1) if 'drop_sum=True'.\"\n )\n else:\n if not 1 <= self.window_size <= n_timestamps:\n raise ValueError(\n \"If 'window_size' is an integer, it must be greater \"\n \"than or equal to 1 and lower than or equal to \"\n \"n_timestamps if 'drop_sum=False'.\"\n )\n window_size = self.window_size\n else:\n if not 0 < self.window_size <= 1:\n raise ValueError(\n \"If 'window_size' is a float, it must be greater \"\n \"than 0 and lower than or equal to 1.\"\n )\n window_size = ceil(self.window_size * n_timestamps)\n\n if not isinstance(self.window_step,\n (int, np.integer, float, np.floating)):\n raise TypeError(\"'window_step' must be an integer or a float.\")\n if isinstance(self.window_step, (int, np.integer)):\n if not 1 <= self.window_step <= n_timestamps:\n raise ValueError(\n \"If 'window_step' is an integer, it must be greater \"\n \"than or equal to 1 and lower than or equal to \"\n \"n_timestamps.\"\n )\n window_step = self.window_step\n else:\n if not 0 < self.window_step <= 1:\n raise ValueError(\n \"If 'window_step' is a float, it must be greater \"\n \"than 0 and lower than or equal to 1.\"\n )\n window_step = ceil(self.window_step * n_timestamps)\n if self.drop_sum:\n if not self.word_size <= (window_size - 1):\n raise ValueError(\n \"'word_size' must be lower than or equal to \"\n \"(window_size - 1) if 'drop_sum=True'.\"\n )\n else:\n if not self.word_size <= window_size:\n raise ValueError(\n \"'word_size' must be lower than or equal to \"\n \"window_size if 'drop_sum=False'.\"\n )\n return window_size, window_step\n"
] | [
[
"sklearn.utils.multiclass.check_classification_targets",
"sklearn.utils.validation.check_is_fitted",
"sklearn.feature_extraction.text.CountVectorizer",
"scipy.sparse.csr_matrix",
"numpy.repeat",
"sklearn.utils.validation.check_array",
"numpy.full"
]
] |
fishjojo/pydmfe | [
"93cfc655314933d3531b5733521a1f95a044f6cb"
] | [
"examples/research/proj/C3H6.py"
] | [
"from pydmfet import proj_ao\nfrom pydmfet.qcwrap.pyscf_rks_ao import rks_ao\nfrom pyscf import gto,scf\nimport numpy as np\nfrom pyscf.tools import molden\nfrom pyscf import lo\nfrom pyscf.lo import iao,orth\nfrom functools import reduce\nimport math\n\nbas ='ccpvdz'\ntemp = 0.01\n\nmol = gto.Mole()\nmol.atom = open('C3H6.xyz').read()\nmol.basis = bas\nmol.charge = 0\nmol.build(max_memory = 4000, verbose=4)\n\n\n#mf = scf.RKS(mol)\nmf = rks_ao(mol,smear_sigma = temp)\nmf.xc = \"pbe,pbe\"\nmf.max_cycle = 50\n\nDMguess = None\nmf.scf(dm0=DMguess)\n\n\nnatoms = mol.natm\nimpAtom = np.zeros([natoms], dtype=int)\nfor i in range(5):\n impAtom[i] = 1\n\n\nembed = proj_ao.proj_embed(mf,impAtom, Ne_env = 8)\nembed.pop_method = 'meta_lowdin'\nembed.make_frozen_orbs(norb = 11)\n#embed.embedding_potential()\n"
] | [
[
"numpy.zeros"
]
] |
Lilgabz/Quantum-Algorithm-Implementations | [
"2bb5df522d76e94b300275dfefff2869ff31bc2c"
] | [
"Quantum Key Distribution/Mutation Testing/QKD Mutation Testing Cirq/Remove_mutant_2.py"
] | [
"import unittest\n\nimport cirq\nfrom cirq.ops import H, X, I\nimport random\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom numpy.random import randint\n\nimport hypothesis.strategies as st\nfrom hypothesis import given, settings\n\ndef generate_binary(len):\n return randint(2, size=len)\n\ndef encode_message(bits, bases, messageLen):\n message = []\n for i in range(messageLen):\n qubits = cirq.LineQubit.range(1)\n qc = cirq.Circuit()\n if bases[i] == 0: # Prepare qubit in Z-basis\n if bits[i] == 0:\n qc.append(cirq.I(qubits[0]))\n else:\n qc.append(cirq.X(qubits[0]))\n else: # Prepare qubit in X-basis\n if bits[i] == 0:\n ### mutant - remove ###\n qc.append(cirq.I(qubits[0]))\n else:\n qc.append(cirq.X(qubits[0]))\n qc.append(cirq.H(qubits[0]))\n message.append(qc)\n return message\n\ndef measure_message(message, bases, messageLen):\n measurements = []\n for q in range(messageLen):\n if bases[q] == 0: # measuring in Z-basis\n if (not message[q].has_measurements()):\n for qubit in message[q].all_qubits():\n message[q].append(cirq.measure(qubit))\n if bases[q] == 1: # measuring in X-basis\n if (not message[q].has_measurements()):\n for qubit in message[q].all_qubits():\n message[q].append(cirq.H(qubit))\n message[q].append(cirq.measure(qubit))\n simulator = cirq.Simulator()\n measured_bit = simulator.run(message[q])\n measurements.append((measured_bit.data.iat[0,0])) \n return measurements\n\ndef remove_garbage(a_bases, b_bases, bits, messageLen):\n good_bits = []\n for q in range(messageLen):\n if a_bases[q] == b_bases[q]:\n # If both used the same basis, add\n # this to the list of 'good' bits\n good_bits.append(bits[q])\n return good_bits\n\ndef sample_bits(bits, selection):\n sample = []\n for i in selection:\n # use np.mod to make sure the\n # bit we sample is always in \n # the list range\n i = np.mod(i, len(bits))\n # pop(i) removes the element of the\n # list at index 'i'\n sample.append(bits.pop(i))\n return sample"
] | [
[
"numpy.random.randint"
]
] |
YMandCL/Hands-On-Deep-Learning-for-Games | [
"0225661409c3bf59ae6b7996c254bb485ebd10cb",
"0225661409c3bf59ae6b7996c254bb485ebd10cb"
] | [
"Chapter03/Chapter_3/musegen/musegen.py",
"Chapter05/Chapter_5/Chapter_5_5.py"
] | [
"# Currently this script is configured to use the note-generator model.\n\nfrom config import sequence_length, output_dir, note_generator_dir\nfrom helper import loadChorales, loadModelAndWeights, createPitchSpecificVocabularies, createDurationVocabularySpecific\nfrom music21 import note, instrument, stream, duration\nimport numpy as np\nimport os\n\n# disable GPU processing\nos.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"\"\n# ----------------------------------------------\n\nfrom keras.utils import to_categorical\n\n# select the epoch to use when loading the weights of the model generator\ngenerator_epoch = 43\n\n# how many notes to generate ('end' marks are created along the way and the result is split into pieces)\nnumber_of_notes = 200\n\n# load chorales to create the vocabularies\nprint('loading chorales...')\nnotes = loadChorales()\n\n# create the vocabulary\nnote_vocab, note_names_vocab, note_vocab_categorical = createPitchSpecificVocabularies([x[0] for (x, _) in notes])\nduration_vocab = createDurationVocabularySpecific([d for (_, d) in notes])\nduration_vocab_categorical = to_categorical(range(len(duration_vocab)))\n\nnote_to_int = dict((note, number) for number, note in enumerate(note_vocab))\nint_to_note = dict((number, note) for number, note in enumerate(note_vocab))\n\nduration_to_int = dict((dur, number) for number, dur in enumerate(duration_vocab))\n\nduration_dim = duration_vocab.shape[0]\npitch_dim = np.array(note_vocab).shape[0]\n\nprint('loading networks...')\ndir_path = os.path.dirname(os.path.realpath(__file__))\ngenerator = loadModelAndWeights(os.path.join(dir_path, note_generator_dir, 'model.json'),\n os.path.join(dir_path, note_generator_dir, 'weights-{:02d}.hdf5'.format(generator_epoch)))\n\n# make a melody!!!\npitch_input = np.eye(pitch_dim)[np.random.choice(pitch_dim, size=sequence_length)]\nduration_input = np.eye(duration_dim)[np.random.choice(duration_dim, size=sequence_length)]\n\nprint('generating output...')\n\n# generate notes\ngenerator_output = []\n\nfor _ in range(number_of_notes):\n # reshape inputs\n pi = np.reshape(pitch_input, (1, sequence_length, pitch_dim))\n di = np.reshape(duration_input, (1, sequence_length, duration_dim))\n\n # make prediction\n pitch_pred, dur_pred = generator.predict({'pitches_input': pi, 'durations_input': di}, verbose=0)\n\n generator_output.append((pitch_pred, dur_pred))\n\n pitch_input = np.vstack([pitch_input, pitch_pred])\n pitch_input = pitch_input[1:len(pitch_input)]\n\n duration_input = np.vstack([duration_input, dur_pred])\n duration_input = duration_input[1:len(duration_input)]\n\n\noutput_notes = [(int_to_note[np.argmax(n)], duration_vocab[np.argmax(d)]) for (n, d) in generator_output]\noutput_notes = np.array(output_notes)\noutput_notes = np.reshape(output_notes, (-1, 2))\n\n# output_notes contains: pitch values in midi format (integers), 'rest' marks, 'end' marks\n\n# split the generated notes into pieces based on 'end' marks\nindices = []\nfor (ind, (n, _)) in enumerate(output_notes):\n if n == 'end':\n indices.append(ind)\nindices = np.insert(np.reshape(indices, (-1)), 0, 0)\n \npieces = [output_notes]\nif len(indices) > 1:\n pieces = ([ output_notes[(indices[j] + 1):indices[j + 1] ] for j in range(len(indices) - 1)])\n\nprint('writing output to disk...')\n\nos.makedirs(os.path.join(dir_path, output_dir, 'note-generator'), exist_ok=True)\n\n# output pieces to midi files\nfor index, notes in enumerate(pieces):\n midi_notes = []\n offset = 0\n for n, d in notes:\n # since a duration of 0 is included in the vocabulary (for the 'end' marks), the network may generate a 0 duration for other notes\n # naively correct and report this erroneous behaviour\n if abs(float(d)) < 0.001:\n print('found zero duration')\n d = '1.0'\n if n == 'rest':\n new_note = note.Rest()\n new_note.duration = duration.Duration(float(d))\n new_note.offset = offset\n new_note.storedInstrument = instrument.Piano()\n midi_notes.append(new_note)\n else:\n new_note = note.Note(int(n))\n new_note.duration = duration.Duration(float(d))\n new_note.offset = offset\n new_note.storedInstrument = instrument.Piano()\n midi_notes.append(new_note)\n offset += float(d)\n \n midi_stream = stream.Stream(midi_notes)\n midi_stream.write('midi', fp=os.path.join(dir_path, output_dir, 'note-generator', 'sample-{}.mid'.format(index)))",
"# -*- coding: utf-8 -*-\n# source from https://github.com/keon/deep-q-learning/blob/master/dqn.py\nimport random\nimport gym\nimport numpy as np\nfrom collections import deque\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.optimizers import Adam\n\nEPISODES = 1000\n\nclass DQNAgent:\n def __init__(self, state_size, action_size):\n self.state_size = state_size\n self.action_size = action_size\n self.memory = deque(maxlen=2000)\n self.gamma = 0.95 # discount rate\n self.epsilon = 1.0 # exploration rate\n self.epsilon_min = 0.01\n self.epsilon_decay = 0.995\n self.learning_rate = 0.001\n self.model = self._build_model()\n\n def _build_model(self):\n # Neural Net for Deep-Q learning Model\n model = Sequential()\n model.add(Dense(24, input_dim=self.state_size, activation='relu'))\n model.add(Dense(24, activation='relu'))\n model.add(Dense(self.action_size, activation='linear'))\n model.compile(loss='mse',\n optimizer=Adam(lr=self.learning_rate))\n return model\n\n def remember(self, state, action, reward, next_state, done):\n self.memory.append((state, action, reward, next_state, done))\n\n def act(self, state):\n if np.random.rand() <= self.epsilon:\n return random.randrange(self.action_size)\n act_values = self.model.predict(state)\n return np.argmax(act_values[0]) # returns action\n\n def replay(self, batch_size):\n minibatch = random.sample(self.memory, batch_size)\n for state, action, reward, next_state, done in minibatch:\n target = reward\n if not done:\n target = (reward + self.gamma *\n np.amax(self.model.predict(next_state)[0]))\n target_f = self.model.predict(state)\n target_f[0][action] = target\n self.model.fit(state, target_f, epochs=1, verbose=0)\n if self.epsilon > self.epsilon_min:\n self.epsilon *= self.epsilon_decay\n\n def load(self, name):\n self.model.load_weights(name)\n\n def save(self, name):\n self.model.save_weights(name)\n\n\nif __name__ == \"__main__\":\n env = gym.make('MountainCar-v0')\n state_size = env.observation_space.shape[0]\n action_size = env.action_space.n\n agent = DQNAgent(state_size, action_size)\n # agent.load(\"./save/cartpole-dqn.h5\")\n done = False\n batch_size = 32\n\n for e in range(EPISODES):\n state = env.reset()\n state = np.reshape(state, [1, state_size]) \n for time in range(500):\n # env.render()\n action = agent.act(state)\n env.render()\n next_state, reward, done, _ = env.step(action)\n reward = reward if not done else -10\n next_state = np.reshape(next_state, [1, state_size])\n agent.remember(state, action, reward, next_state, done)\n state = next_state\n if done:\n print(\"episode: {}/{}, score: {}, e: {:.2}\"\n .format(e, EPISODES, time, agent.epsilon))\n break\n if len(agent.memory) > batch_size:\n agent.replay(batch_size)\n # if e % 10 == 0:\n # agent.save(\"./save/cartpole-dqn.h5\")"
] | [
[
"numpy.vstack",
"numpy.eye",
"numpy.reshape",
"numpy.random.choice",
"numpy.argmax",
"numpy.array"
],
[
"numpy.reshape",
"numpy.random.rand",
"numpy.argmax"
]
] |
HassanDayoub/tfx | [
"dc9221abbb8dad991d1ae22fb91876da1290efae"
] | [
"tfx/orchestration/kubeflow/executor_wrappers.py"
] | [
"# Copyright 2019 Google LLC. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Wrappers for TFX executors running as part of a Kubeflow pipeline.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport abc\nimport argparse\nimport json\nimport os\nimport re\nfrom future import utils\nimport six\nimport tensorflow as tf\nfrom typing import Any, Dict, List, Text\n\nfrom tensorflow.python.lib.io import file_io # pylint: disable=g-direct-tensorflow-import\n\nfrom tfx import version\nfrom tfx.components.base import base_executor\nfrom tfx.utils import import_utils\nfrom tfx.utils import types\n\n\ndef parse_tfx_type(json_str: Text):\n \"\"\"Parses a list of artifacts and their types from json.\"\"\"\n json_artifact_list = json.loads(json_str)\n\n tfx_types = []\n for json_artifact in json_artifact_list:\n tfx_type = types.TfxArtifact.parse_from_json_dict(json_artifact)\n tfx_types.append(tfx_type)\n\n return tfx_types\n\n\ndef to_snake_case(name: Text):\n s1 = re.sub('(.)([A-Z][a-z]+)', r'\\1_\\2', name)\n return re.sub('([a-z0-9])([A-Z])', r'\\1_\\2', s1).lower()\n\n\nclass KubeflowExecutorWrapper(utils.with_metaclass(abc.ABCMeta), object):\n \"\"\"Abstract base class for all Kubeflow Pipelines-based TFX components.\"\"\"\n\n def __init__(\n self,\n executor_class_path: Text,\n name: Text,\n input_dict: Dict[Text, List[types.TfxArtifact]],\n outputs: Text,\n exec_properties: Dict[Text, Any],\n ):\n self._input_dict = input_dict\n self._output_dict = types.parse_tfx_type_dict(outputs)\n self._component_name = to_snake_case(name)\n self._exec_properties = exec_properties\n self._output_dir = self._exec_properties['output_dir']\n self._workflow_id = os.environ['WORKFLOW_ID']\n\n raw_args = self._exec_properties.get('beam_pipeline_args', [])\n\n # Beam expects str types for it's pipeline args. Ensure unicode type is\n # converted to str if required.\n beam_pipeline_args = []\n for arg in raw_args:\n # In order to support both Py2 and Py3: Py3 doesn't have `unicode` type.\n if six.PY2 and isinstance(arg, unicode):\n arg = arg.encode('ascii', 'ignore')\n\n beam_pipeline_args.append(arg)\n\n # TODO(zhitaoli): Revisit usage of setup_file here.\n module_dir = os.path.dirname(os.path.dirname(version.__file__))\n setup_file = os.path.join(module_dir, 'setup.py')\n tf.logging.info('Using setup_file \\'%s\\' to capture TFX dependencies',\n setup_file)\n beam_pipeline_args.append('--setup_file={}'.format(setup_file))\n\n executor_cls = import_utils.import_class_by_path(executor_class_path)\n # TODO(swoonna): Switch to execution_id when available\n unique_id = '{}_{}'.format(self._component_name, self._workflow_id)\n # TODO(swoonna): Add tmp_dir to additional_pipeline_args\n executor_context = base_executor.BaseExecutor.Context(\n beam_pipeline_args=beam_pipeline_args,\n tmp_dir=os.path.join(self._output_dir, '.temp', ''),\n unique_id=unique_id)\n self._executor = executor_cls(executor_context)\n\n def _set_outputs(self):\n tf.logging.info('Using workflow id {}'.format(self._workflow_id))\n\n max_input_span = 0\n for input_list in self._input_dict.values():\n for single_input in input_list:\n max_input_span = max(max_input_span, single_input.span)\n for output_name, output_artifact_list in self._output_dict.items():\n for output_artifact in output_artifact_list:\n output_artifact.uri = os.path.join(self._output_dir,\n self._component_name, output_name,\n self._workflow_id,\n output_artifact.split, '')\n output_artifact.span = max_input_span\n\n def run(self, output_basedir: Text = '/'):\n \"\"\"Runs the wrapped Executor, and writes metadata of output artifacts.\n\n Args:\n output_basedir: Base directory to which output artifacts metadata\n is written. Useful for unit tests.\n \"\"\"\n self._executor.Do(self._input_dict, self._output_dict,\n self._exec_properties)\n\n output_dir = os.path.join(output_basedir, 'output/ml_metadata')\n tf.gfile.MakeDirs(output_dir)\n for output_name, output_artifact_list in self._output_dict.items():\n filename = os.path.join(output_dir, output_name)\n with file_io.FileIO(filename, 'w') as f:\n output_list = [x.json_dict() for x in output_artifact_list]\n f.write(json.dumps(output_list))\n\n\n# TODO(b/132197968): Get rid of all the individual wrapper classes below and\n# combine them into a single generic one that constructs the input dict from\n# the individual named arguments instead. In the future, the generic wrapper\n# can call into TFX drivers to handle component-specific logic as well.\nclass CsvExampleGenWrapper(KubeflowExecutorWrapper):\n \"\"\"Wrapper for CSVExampleGen component.\"\"\"\n\n def __init__(self, args: argparse.Namespace):\n super(CsvExampleGenWrapper, self).__init__(\n executor_class_path=args.executor_class_path,\n name='CSVExampleGen',\n input_dict={\n 'input-base': parse_tfx_type(args.input_base),\n },\n outputs=args.outputs,\n exec_properties=json.loads(args.exec_properties),\n )\n self._set_input_artifact_span()\n self._set_outputs()\n\n def _set_input_artifact_span(self):\n for input_artifact in self._input_dict['input-base']:\n matched = re.match(r'span_([0-9]+)', input_artifact.uri)\n span = matched.group(1) if matched else 1\n input_artifact.span = span\n\n\nclass BigQueryExampleGenWrapper(KubeflowExecutorWrapper):\n \"\"\"Wrapper for BigQueryExampleGen component.\"\"\"\n\n def __init__(self, args: argparse.Namespace):\n super(BigQueryExampleGenWrapper, self).__init__(\n executor_class_path=args.executor_class_path,\n name='BigQueryExampleGen',\n input_dict={},\n outputs=args.outputs,\n exec_properties=json.loads(args.exec_properties),\n )\n self._set_outputs()\n\n\nclass StatisticsGenWrapper(KubeflowExecutorWrapper):\n \"\"\"Wrapper for StatisticsGen component.\"\"\"\n\n def __init__(self, args: argparse.Namespace):\n super(StatisticsGenWrapper, self).__init__(\n executor_class_path=args.executor_class_path,\n name='StatisticsGen',\n input_dict={\n 'input_data': parse_tfx_type(args.input_data),\n },\n outputs=args.outputs,\n exec_properties=json.loads(args.exec_properties),\n )\n self._set_outputs()\n\n\nclass SchemaGenWrapper(KubeflowExecutorWrapper):\n \"\"\"Wrapper for SchemaGen component.\"\"\"\n\n def __init__(self, args: argparse.Namespace):\n super(SchemaGenWrapper, self).__init__(\n executor_class_path=args.executor_class_path,\n name='SchemaGen',\n input_dict={\n 'stats': parse_tfx_type(args.stats),\n },\n outputs=args.outputs,\n exec_properties=json.loads(args.exec_properties),\n )\n self._set_outputs()\n\n\nclass ExampleValidatorWrapper(KubeflowExecutorWrapper):\n \"\"\"Wrapper for ExampleValidator component.\"\"\"\n\n def __init__(self, args: argparse.Namespace):\n super(ExampleValidatorWrapper, self).__init__(\n executor_class_path=args.executor_class_path,\n name='ExampleValidator',\n input_dict={\n 'stats': parse_tfx_type(args.stats),\n 'schema': parse_tfx_type(args.schema),\n },\n outputs=args.outputs,\n exec_properties=json.loads(args.exec_properties),\n )\n self._set_outputs()\n\n\nclass TransformWrapper(KubeflowExecutorWrapper):\n \"\"\"Wrapper for Transform component.\"\"\"\n\n def __init__(self, args: argparse.Namespace):\n super(TransformWrapper, self).__init__(\n executor_class_path=args.executor_class_path,\n name='Transform',\n input_dict={\n 'input_data': parse_tfx_type(args.input_data),\n 'schema': parse_tfx_type(args.schema),\n },\n outputs=args.outputs,\n exec_properties=json.loads(args.exec_properties),\n )\n self._set_outputs()\n\n\nclass TrainerWrapper(KubeflowExecutorWrapper):\n \"\"\"Wrapper for Trainer component.\"\"\"\n\n def __init__(self, args: argparse.Namespace):\n super(TrainerWrapper, self).__init__(\n executor_class_path=args.executor_class_path,\n name='Trainer',\n input_dict={\n 'transformed_examples': parse_tfx_type(args.transformed_examples),\n 'transform_output': parse_tfx_type(args.transform_output),\n 'schema': parse_tfx_type(args.schema),\n },\n outputs=args.outputs,\n exec_properties=json.loads(args.exec_properties),\n )\n self._set_outputs()\n\n # TODO(ajaygopinathan): Implement warm starting.\n self._exec_properties['warm_starting'] = False\n self._exec_properties['warm_start_from'] = None\n\n\nclass EvaluatorWrapper(KubeflowExecutorWrapper):\n \"\"\"Wrapper for Evaluator component.\"\"\"\n\n def __init__(self, args: argparse.Namespace):\n super(EvaluatorWrapper, self).__init__(\n executor_class_path=args.executor_class_path,\n name='Evaluator',\n input_dict={\n 'examples': parse_tfx_type(args.examples),\n 'model_exports': parse_tfx_type(args.model_exports),\n },\n outputs=args.outputs,\n exec_properties=json.loads(args.exec_properties),\n )\n self._set_outputs()\n\n\nclass ModelValidatorWrapper(KubeflowExecutorWrapper):\n \"\"\"Wrapper for ModelValidator component.\"\"\"\n\n def __init__(self, args: argparse.Namespace):\n super(ModelValidatorWrapper, self).__init__(\n executor_class_path=args.executor_class_path,\n name='ModelValidator',\n input_dict={\n 'examples': parse_tfx_type(args.examples),\n 'model': parse_tfx_type(args.model),\n },\n outputs=args.outputs,\n exec_properties=json.loads(args.exec_properties),\n )\n self._set_outputs()\n\n # TODO(ajaygopinathan): Implement latest blessed model determination.\n self._exec_properties['latest_blessed_model'] = None\n self._exec_properties['latest_blessed_model_id'] = None\n\n\nclass PusherWrapper(KubeflowExecutorWrapper):\n \"\"\"Wrapper for Pusher component.\"\"\"\n\n def __init__(self, args: argparse.Namespace):\n super(PusherWrapper, self).__init__(\n executor_class_path=args.executor_class_path,\n name='Pusher',\n input_dict={\n 'model_export': parse_tfx_type(args.model_export),\n 'model_blessing': parse_tfx_type(args.model_blessing),\n },\n outputs=args.outputs,\n exec_properties=json.loads(args.exec_properties),\n )\n self._set_outputs()\n\n # TODO(ajaygopinathan): Implement latest pushed model\n self._exec_properties['latest_pushed_model'] = None\n"
] | [
[
"tensorflow.gfile.MakeDirs",
"tensorflow.python.lib.io.file_io.FileIO",
"tensorflow.logging.info"
]
] |
maptube/UMaaS | [
"0758d8352213f332546d728f3eb02411c16c97ac"
] | [
"benchmark/benchmark_SingleOrigin.py"
] | [
"import timeit\nimport os.path\nimport numpy as np\nfrom math import exp, fabs\nfrom sys import float_info\n\nfrom globals import *\nfrom utils import loadMatrix, resizeMatrix\n\nfrom models.SingleOrigin import SingleOrigin\n\n\"\"\"\nBenchmarks for the Single Origin Constrained model (models/SingleOrigin.py)\nAll code here is lifted from the original model code and changed to be\nself-contained (no setup) so that timings of various optimisations are easy.\nCode here is designed to be a test of timings, NOT necessarily a test of\nreturn values, although real data has been used wherever possible i.e. instead\nof an NxN matrix containing random values, I try to load in a real matrix\ninstead.\n\"\"\"\n\n#modelRunsDir = '../model-runs'\n#TObsFilename = 'TObs.bin' #1 mode\n#CijRoadMinFilename = 'Cij_road_min.bin'\n\n#load and init\nTij=loadMatrix(os.path.join(modelRunsDir,TObs31Filename))\ncij=loadMatrix(os.path.join(modelRunsDir,CijRoadMinFilename))\n#end load and init\n\n###############################################################################\n\n\"\"\"\ncalculateCBar_slow\nMean trips calculation, straight conversion from original C# code, no python optimisation\n@returns float\n\"\"\"\ndef benchmark_calculateCBar_slow():\n #main code\n (M, N) = np.shape(Tij)\n CNumerator = 0.0\n CDenominator = 0.0\n for i in range(0,N):\n for j in range(0,N):\n CNumerator += Tij[i, j] * cij[i, j]\n CDenominator += Tij[i, j]\n CBar = CNumerator / CDenominator\n print(\"CBar=\",CBar)\n\n return CBar\n\n###############################################################################\n\n\"\"\"\ncalculateCBar_fast\nMean trips calculation, python optimised version of \"_slow\"\n@returns float (NOTE: the return value MUST be identical to the _slow version, to prove they're functionally identical)\n\"\"\"\ndef benchmark_calculateCBar_fast():\n #load and init\n Tij=loadMatrix(os.path.join(modelRunsDir,TObs31Filename))\n cij=loadMatrix(os.path.join(modelRunsDir,CijRoadMinFilename))\n #end load and init\n\n #main code\n CNumerator2 = np.sum(Tij*cij)\n CDenominator2 = np.sum(Tij)\n CBar2=CNumerator2/CDenominator2\n print(\"CBar2=\",CBar2)\n\n return CBar2\n\n###############################################################################\n\n\"\"\"\nThis is a benchmark of the simple Python code for SingleOrigin using different matrix sizes.\nIt is a test for how long a single execution of the main loop takes. Timings are printed\nto the console based on 1000 runs of the model code i.e. the timing you see in seconds\nmust be divided by 1000.\nNOTE: this could take a VERY long time to run if you pass in a high number for Nfinish \n\"\"\"\ndef benchmarkSingleOriginMatrixSizes(Nstart,Nfinish,Nstep):\n print(\"benchmark_SingleDest running matrix Nstart=\",Nstart,\" Nfinish=\",Nfinish, \" Nstep=\",Nstep)\n\n #load testing matrices\n TObs1 = loadMatrix(os.path.join(modelRunsDir,TObs31Filename))\n Cij1 = loadMatrix(os.path.join(modelRunsDir,CijRoadMinFilename))\n\n for N in range(Nstart,Nfinish,Nstep):\n #print(\"TPred runModel N=\",N)\n #set up the model\n testModel = SingleOrigin()\n (TPred, secs)=testModel.benchmarkRun(1000,resizeMatrix(TObs1,N),resizeMatrix(Cij1,N),1.0)\n #NOTE: timing printed to console based on 1000 iterations of the main loop in the above code\n #Should not contain any setup timings - only the actual algorithm run time.\n print(N,\",1000,\",secs) #all console logging from here - makes it nice and easy to import into excel\n\n###############################################################################\n\n"
] | [
[
"numpy.sum",
"numpy.shape"
]
] |
joe-jordan/picosdk-python-wrappers | [
"76f393b500200de168b4f2b74b74aad74d89fd92"
] | [
"ps3000aExamples/ps3000aBlockMSOExample.py"
] | [
"#\n# Copyright (C) 2018 Pico Technology Ltd. See LICENSE file for terms.\n#\n# PS3000A BLOCK MODE MSO EXAMPLE\n# This example opens a 3000a driver device, sets up one digital port and a trigger to collect a block of data.\n# This data is then split into the indivual digital channels and plotted as the binary value against time in ns.\n\nimport ctypes\nfrom picosdk.ps3000a import ps3000a as ps\nfrom picosdk.functions import splitMSODataPort0, assert_pico_ok\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport time\nfrom array import *\n\n# Gives the device a handle\nstatus = {}\nchandle = ctypes.c_int16()\n\n# Opens the device/s\nstatus[\"openunit\"] = ps.ps3000aOpenUnit(ctypes.byref(chandle), None)\n\ntry:\n assert_pico_ok(status[\"openunit\"])\nexcept:\n # powerstate becomes the status number of openunit\n powerstate = status[\"openunit\"]\n\n # If powerstate is the same as 282 then it will run this if statement\n if powerstate == 282:\n # Changes the power input to \"PICO_POWER_SUPPLY_NOT_CONNECTED\"\n status[\"ChangePowerSource\"] = ps.ps3000aChangePowerSource(chandle, 282)\n # If the powerstate is the same as 286 then it will run this if statement\n elif powerstate == 286:\n # Changes the power input to \"PICO_USB3_0_DEVICE_NON_USB3_0_PORT\"\n status[\"ChangePowerSource\"] = ps.ps3000aChangePowerSource(chandle, 286)\n else:\n raise\n\n assert_pico_ok(status[\"ChangePowerSource\"])\n\n# set up digital port\n# handle = chandle\n# PS3000a_DIGITAL_PORT = 0x80\n# Enable = 1\n# logicLevel = 10000\nstatus[\"SetDigitalPort\"] = ps.ps3000aSetDigitalPort( chandle, 0x80, 1, 10000)\nassert_pico_ok(status[\"SetDigitalPort\"])\n\n# Setting the number of sample to be collected\npreTriggerSamples = 400\npostTriggerSamples = 400\nmaxsamples = preTriggerSamples + postTriggerSamples\n\n# Gets timebase innfomation\n# Handle = chandle\n# Timebase = 2 = timebase\n# Nosample = maxsamples\n# TimeIntervalNanoseconds = ctypes.byref(timeIntervalns)\n# MaxSamples = ctypes.byref(returnedMaxSamples)\n# Segement index = 0\ntimebase = 8\ntimeIntervalns = ctypes.c_float()\nreturnedMaxSamples = ctypes.c_int16()\nstatus[\"GetTimebase\"] = ps.ps3000aGetTimebase2(chandle, timebase, maxsamples, ctypes.byref(timeIntervalns), 1, ctypes.byref(returnedMaxSamples), 0)\nassert_pico_ok(status[\"GetTimebase\"])\n\n# Creates a overlow location for data\noverflow = ctypes.c_int16()\n# Creates converted types maxsamples\ncmaxSamples = ctypes.c_int32(maxsamples)\n\n# Create buffers ready for assigning pointers for data collection\nbufferAMax = (ctypes.c_int16 * maxsamples)()\nbufferAMin = (ctypes.c_int16 * maxsamples)()\n\n# Setting the data buffer location for data collection from PS3000A_DIGITAL_PORT0\n# Handle = Chandle\n# source = PS3000A_DIGITAL_PORT0 = 0x80\n# Buffer max = ctypes.byref(bufferAMax)\n# Buffer min = ctypes.byref(bufferAMin)\n# Buffer length = maxsamples\n# Segment index = 0\n# Ratio mode = ps3000A_Ratio_Mode_None = 0\nstatus[\"SetDataBuffers\"] = ps.ps3000aSetDataBuffers(chandle, 0x80, ctypes.byref(bufferAMax), ctypes.byref(bufferAMin), maxsamples, 0, 0)\nassert_pico_ok(status[\"SetDataBuffers\"])\n\n# Starts the block capture\n# Handle = chandle\n# Number of prTriggerSamples\n# Number of postTriggerSamples\n# Timebase = 2 = 4ns (see Programmer's guide for more information on timebases)\n# time indisposed ms = None (This is not needed within the example)\n# Segment index = 0\n# LpRead = None\n# pParameter = None\nstatus[\"runblock\"] = ps.ps3000aRunBlock(chandle, preTriggerSamples, postTriggerSamples, timebase, 1, None, 0, None, None)\nassert_pico_ok(status[\"runblock\"])\n\n# Creates a overlow location for data\noverflow = (ctypes.c_int16 * 10)()\n# Creates converted types maxsamples\ncmaxSamples = ctypes.c_int32(maxsamples)\n\n# Checks data collection to finish the capture\nready = ctypes.c_int16(0)\ncheck = ctypes.c_int16(0)\nwhile ready.value == check.value:\n status[\"isReady\"] = ps.ps3000aIsReady(chandle, ctypes.byref(ready))\n\n# Handle = chandle\n# start index = 0\n# noOfSamples = ctypes.byref(cmaxSamples)\n# DownSampleRatio = 0\n# DownSampleRatioMode = 0\n# SegmentIndex = 0\n# Overflow = ctypes.byref(overflow)\n\nstatus[\"GetValues\"] = ps.ps3000aGetValues(chandle, 0, ctypes.byref(cmaxSamples), 0, 0, 0, ctypes.byref(overflow))\nassert_pico_ok(status[\"GetValues\"])\n\nbufferAMaxBinaryD0, bufferAMaxBinaryD1, bufferAMaxBinaryD2, bufferAMaxBinaryD3, bufferAMaxBinaryD4, bufferAMaxBinaryD5, bufferAMaxBinaryD6, bufferAMaxBinaryD7 = splitMSODataPort0(cmaxSamples, bufferAMax)\n\n# Creates the time data\ntime = np.linspace(0, (cmaxSamples.value) * timeIntervalns.value, cmaxSamples.value)\n\n# Plots the data from digital channel onto a graph\nplt.plot(time, bufferAMaxBinaryD0[:])\nplt.plot(time, bufferAMaxBinaryD1[:])\nplt.plot(time, bufferAMaxBinaryD2[:])\nplt.plot(time, bufferAMaxBinaryD3[:])\nplt.plot(time, bufferAMaxBinaryD4[:])\nplt.plot(time, bufferAMaxBinaryD5[:])\nplt.plot(time, bufferAMaxBinaryD6[:])\nplt.plot(time, bufferAMaxBinaryD7[:])\nplt.xlabel('Time (ns)')\nplt.ylabel('Binary')\nplt.show()\n\n\n# Stops the scope\n# Handle = chandle\nstatus[\"stop\"] = ps.ps3000aStop(chandle)\nassert_pico_ok(status[\"stop\"])\n\n# Closes the unit\n# Handle = chandle\nstatus[\"stop\"] = ps.ps3000aCloseUnit(chandle)\nassert_pico_ok(status[\"stop\"])\n\n# Displays the staus returns\nprint(status)"
] | [
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.plot",
"numpy.linspace",
"matplotlib.pyplot.xlabel"
]
] |
Kshitiz-Bansal/wavetorch | [
"927ad02dc9db83f72b8df1d91418a6681e60fd56"
] | [
"wavetorch/io.py"
] | [
"import copy\nimport os\n\nimport torch\n\nfrom . import geom\nfrom .cell import WaveCell\nfrom .probe import WaveIntensityProbe\nfrom .rnn import WaveRNN\nfrom .source import WaveSource\nfrom .utils import set_dtype\n\ndef save_model(model,\n\t\t\t name,\n\t\t\t savedir='./study/',\n\t\t\t history=None,\n\t\t\t history_geom_state=None,\n\t\t\t cfg=None,\n\t\t\t verbose=True):\n\t\"\"\"Save the model state and history to a file\n\t\"\"\"\n\tstr_filename = name + '.pt'\n\tif not os.path.exists(savedir):\n\t\tos.makedirs(savedir)\n\tstr_savepath = savedir + str_filename\n\n\tif history_geom_state is None:\n\t\thistory_geom_state = [model.cell.geom.state_reconstruction_args()]\n\n\tdata = {'model_geom_class_str': model.cell.geom.__class__.__name__,\n\t\t\t# Class name so we know which constructor to call in load()\n\t\t\t'model_state': model.state_dict(),\n\t\t\t# For now just store model state without history (only geom is likely to change)\n\t\t\t'history': history,\n\t\t\t'history_geom_state': history_geom_state, # Full history of the geometry state,\n\t\t\t'cfg': cfg}\n\n\tif verbose:\n\t\tprint(\"Saving model to %s\" % str_savepath)\n\ttorch.save(data, str_savepath)\n\n\ndef new_geometry(class_str, state):\n\tWaveGeometryClass = getattr(geom, class_str)\n\tgeom_state = copy.deepcopy(state)\n\treturn WaveGeometryClass(**geom_state)\n\n\ndef load_model(str_filename, which_iteration=-1):\n\t\"\"\"Load a previously saved model and its history from a file\n\t\"\"\"\n\n\tprint(\"Loading model from %s\" % str_filename)\n\n\tdata = torch.load(str_filename)\n\n\t# Set the type for floats from the save\n\tset_dtype(data['cfg']['dtype'])\n\n\t# Reconstruct Geometry\n\tnew_geom = new_geometry(data['model_geom_class_str'], data['history_geom_state'][which_iteration])\n\n\t# Get model state to recreate probes and sources\n\tmodel_state = copy.deepcopy(data['model_state'])\n\n\t# Parse out the probe and source coords\n\tpx = [model_state[k].item() for k in model_state if 'probes' in k and 'x' in k]\n\tpy = [model_state[k].item() for k in model_state if 'probes' in k and 'y' in k]\n\tsx = [model_state[k].item() for k in model_state if 'sources' in k and 'x' in k]\n\tsy = [model_state[k].item() for k in model_state if 'sources' in k and 'y' in k]\n\n\t# Manually add the probes and sources\n\tnew_probes = []\n\tfor (x, y) in zip(px, py):\n\t\tnew_probes.append(WaveIntensityProbe(x, y))\n\t\t# TODO(ian): here we should actually try to infer the type of probe (e.g. intensity or not)\n\n\tnew_sources = []\n\tfor (x, y) in zip(sx, sy):\n\t\tnew_sources.append(WaveSource(x, y))\n\n\tnew_cell = WaveCell(model_state['cell.dt'].item(), new_geom)\n\tnew_model = WaveRNN(new_cell, new_sources, new_probes)\n\t# Put into eval mode (doesn't really matter for us but whatever)\n\tnew_model.eval()\n\n\treturn new_model, data['history'], data['history_geom_state'], data['cfg']\n"
] | [
[
"torch.save",
"torch.load"
]
] |
philtrade/gQuant | [
"08b2a82a257c234b92f097b925f25cab16fd0926"
] | [
"tests/unit/test_indicator_node.py"
] | [
"'''\nTechnical Indicator Node Unit Tests\n\nTo run unittests:\n\n# Using standard library unittest\n\npython -m unittest -v\npython -m unittest tests/unit/test_indicator_node.py -v\n\nor\n\npython -m unittest discover <test_directory>\npython -m unittest discover -s <directory> -p 'test_*.py'\n\n# Using pytest\n# \"conda install pytest\" or \"pip install pytest\"\npytest -v tests\npytest -v tests/unit/test_indicator_node.py\n\n'''\nimport warnings\nimport unittest\nimport cudf\nimport gquant.cuindicator as gi\nfrom gquant.plugin_nodes.transform.indicatorNode import IndicatorNode\nfrom gquant.dataframe_flow.task import Task\nfrom .utils import make_orderer\nimport numpy as np\nimport copy\n\nordered, compare = make_orderer()\nunittest.defaultTestLoader.sortTestMethodsUsing = compare\n\n\nclass TestIndicatorNode(unittest.TestCase):\n\n def setUp(self):\n warnings.simplefilter('ignore', category=ImportWarning)\n warnings.simplefilter('ignore', category=DeprecationWarning)\n # ignore importlib warnings.\n size = 200\n half = size // 2\n self.size = size\n self.half = half\n np.random.seed(10)\n random_array = np.random.rand(size)\n open_array = np.random.rand(size)\n close_array = np.random.rand(size)\n high_array = np.random.rand(size)\n low_array = np.random.rand(size)\n volume_array = np.random.rand(size)\n indicator = np.zeros(size, dtype=np.int32)\n indicator[0] = 1\n indicator[half] = 1\n df = cudf.DataFrame()\n df['in'] = random_array\n df['open'] = open_array\n df['close'] = close_array\n df['high'] = high_array\n df['low'] = low_array\n df['volume'] = volume_array\n df['indicator'] = indicator\n self._cudf_data = df\n self.conf = {\n \"indicators\": [\n {\"function\": \"port_chaikin_oscillator\",\n \"columns\": [\"high\", \"low\", \"close\", \"volume\"],\n \"args\": [10, 20]},\n {\"function\": \"port_bollinger_bands\",\n \"columns\": [\"close\"],\n \"args\": [10],\n \"outputs\": [\"b1\", \"b2\"]}\n ],\n \"remove_na\": True\n }\n\n def tearDown(self):\n pass\n\n @ordered\n def test_colums(self):\n '''Test node columns requirments'''\n node_obj = {\"id\": \"abc\",\n \"type\": \"IndicatorNode\",\n \"conf\": self.conf,\n \"inputs\": []}\n task = Task(node_obj)\n inN = IndicatorNode(task)\n\n col = \"indicator\"\n msg = \"bad error: %s is missing\" % (col)\n self.assertTrue(col in inN.required, msg)\n col = \"high\"\n msg = \"bad error: %s is missing\" % (col)\n self.assertTrue(col in inN.required, msg)\n col = \"low\"\n msg = \"bad error: %s is missing\" % (col)\n self.assertTrue(col in inN.required, msg)\n col = \"close\"\n msg = \"bad error: %s is missing\" % (col)\n self.assertTrue(col in inN.required, msg)\n col = \"volume\"\n msg = \"bad error: %s is missing\" % (col)\n self.assertTrue(col in inN.required, msg)\n\n col = \"CH_OS_10_20\"\n msg = \"bad error: %s is missing\" % (col)\n self.assertTrue(col in inN.addition, msg)\n col = \"BO_BA_b1_10\"\n msg = \"bad error: %s is missing\" % (col)\n self.assertTrue(col in inN.addition, msg)\n col = \"BO_BA_b2_10\"\n msg = \"bad error: %s is missing\" % (col)\n self.assertTrue(col in inN.addition, msg)\n\n @ordered\n def test_drop(self):\n '''Test node columns drop'''\n node_obj = {\"id\": \"abc\",\n \"type\": \"IndicatorNode\",\n \"conf\": self.conf,\n \"inputs\": []}\n task = Task(node_obj)\n inN = IndicatorNode(task)\n o = inN.process([self._cudf_data])\n msg = \"bad error: df len %d is not right\" % (len(o))\n self.assertTrue(len(o) == 162, msg)\n\n newConf = copy.deepcopy(self.conf)\n newConf['remove_na'] = False\n node_obj = {\"id\": \"abc\",\n \"type\": \"IndicatorNode\",\n \"conf\": newConf,\n \"inputs\": []}\n task = Task(node_obj)\n inN = IndicatorNode(task)\n o = inN.process([self._cudf_data])\n msg = \"bad error: df len %d is not right\" % (len(o))\n self.assertTrue(len(o) == 200, msg)\n\n @ordered\n def test_signal(self):\n '''Test signal computation'''\n\n newConf = copy.deepcopy(self.conf)\n newConf['remove_na'] = False\n node_obj = {\"id\": \"abc\",\n \"type\": \"IndicatorNode\",\n \"conf\": newConf,\n \"inputs\": []}\n task = Task(node_obj)\n inN = IndicatorNode(task)\n o = inN.process([self._cudf_data])\n # check chaikin oscillator computation\n r_cudf = gi.chaikin_oscillator(self._cudf_data[:self.half]['high'],\n self._cudf_data[:self.half]['low'],\n self._cudf_data[:self.half]['close'],\n self._cudf_data[:self.half]['volume'],\n 10, 20)\n computed = o[:self.half]['CH_OS_10_20'].to_array('pandas')\n ref = r_cudf.to_array('pandas')\n err = np.abs(computed[~np.isnan(computed)] - ref[~np.isnan(ref)]).max()\n msg = \"bad error %f\\n\" % (err,)\n self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)\n\n r_cudf = gi.chaikin_oscillator(self._cudf_data[self.half:]['high'],\n self._cudf_data[self.half:]['low'],\n self._cudf_data[self.half:]['close'],\n self._cudf_data[self.half:]['volume'],\n 10, 20)\n computed = o[self.half:]['CH_OS_10_20'].to_array('pandas')\n ref = r_cudf.to_array('pandas')\n err = np.abs(computed[~np.isnan(computed)] - ref[~np.isnan(ref)]).max()\n msg = \"bad error %f\\n\" % (err,)\n self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)\n\n # check bollinger bands computation\n r_cudf = gi.bollinger_bands(self._cudf_data[:self.half]['close'], 10)\n computed = o[:self.half][\"BO_BA_b1_10\"].to_array('pandas')\n ref = r_cudf.b1.to_array('pandas')\n err = np.abs(computed[~np.isnan(computed)] - ref[~np.isnan(ref)]).max()\n msg = \"bad error %f\\n\" % (err,)\n self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)\n\n computed = o[:self.half][\"BO_BA_b2_10\"].to_array('pandas')\n ref = r_cudf.b2.to_array('pandas')\n err = np.abs(computed[~np.isnan(computed)] - ref[~np.isnan(ref)]).max()\n msg = \"bad error %f\\n\" % (err,)\n self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)\n\n r_cudf = gi.bollinger_bands(self._cudf_data[self.half:]['close'], 10)\n computed = o[self.half:][\"BO_BA_b1_10\"].to_array('pandas')\n ref = r_cudf.b1.to_array('pandas')\n err = np.abs(computed[~np.isnan(computed)] - ref[~np.isnan(ref)]).max()\n msg = \"bad error %f\\n\" % (err,)\n self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)\n\n computed = o[self.half:][\"BO_BA_b2_10\"].to_array('pandas')\n ref = r_cudf.b2.to_array('pandas')\n err = np.abs(computed[~np.isnan(computed)] - ref[~np.isnan(ref)]).max()\n msg = \"bad error %f\\n\" % (err,)\n self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)\n\n\nif __name__ == '__main__':\n unittest.main()\n"
] | [
[
"numpy.zeros",
"numpy.isclose",
"numpy.random.seed",
"numpy.random.rand",
"numpy.isnan"
]
] |
avivajpeyi/scipy | [
"dbfe06e6618232b26c241cbe8861e2ea1489b535",
"dbfe06e6618232b26c241cbe8861e2ea1489b535"
] | [
"scipy/fft/tests/test_real_transforms.py",
"tools/refguide_check.py"
] | [
"\nimport numpy as np\nfrom numpy.testing import assert_allclose, assert_array_equal\nimport pytest\n\nfrom scipy.fft import dct, idct, dctn, idctn, dst, idst, dstn, idstn\nimport scipy.fft as fft\nfrom scipy import fftpack\n\n# scipy.fft wraps the fftpack versions but with normalized inverse transforms.\n# So, the forward transforms and definitions are already thoroughly tested in\n# fftpack/test_real_transforms.py\n\n\[email protected](\"forward, backward\", [(dct, idct), (dst, idst)])\[email protected](\"type\", [1, 2, 3, 4])\[email protected](\"n\", [2, 3, 4, 5, 10, 16])\[email protected](\"axis\", [0, 1])\[email protected](\"norm\", [None, 'ortho'])\ndef test_identity_1d(forward, backward, type, n, axis, norm):\n # Test the identity f^-1(f(x)) == x\n x = np.random.rand(n, n)\n\n y = forward(x, type, axis=axis, norm=norm)\n z = backward(y, type, axis=axis, norm=norm)\n assert_allclose(z, x)\n\n pad = [(0, 0)] * 2\n pad[axis] = (0, 4)\n\n y2 = np.pad(y, pad, mode='edge')\n z2 = backward(y2, type, n, axis, norm)\n assert_allclose(z2, x)\n\n\[email protected](\"forward, backward\", [(dct, idct), (dst, idst)])\[email protected](\"type\", [1, 2, 3, 4])\[email protected](\"dtype\", [np.float16, np.float32, np.float64,\n np.complex64, np.complex128])\[email protected](\"axis\", [0, 1])\[email protected](\"norm\", [None, 'ortho'])\[email protected](\"overwrite_x\", [True, False])\ndef test_identity_1d_overwrite(forward, backward, type, dtype, axis, norm,\n overwrite_x):\n # Test the identity f^-1(f(x)) == x\n x = np.random.rand(7, 8)\n x_orig = x.copy()\n\n y = forward(x, type, axis=axis, norm=norm, overwrite_x=overwrite_x)\n y_orig = y.copy()\n z = backward(y, type, axis=axis, norm=norm, overwrite_x=overwrite_x)\n if not overwrite_x:\n assert_allclose(z, x, rtol=1e-6, atol=1e-6)\n assert_array_equal(x, x_orig)\n assert_array_equal(y, y_orig)\n else:\n assert_allclose(z, x_orig, rtol=1e-6, atol=1e-6)\n\n\[email protected](\"forward, backward\", [(dctn, idctn), (dstn, idstn)])\[email protected](\"type\", [1, 2, 3, 4])\[email protected](\"shape, axes\",\n [\n ((4, 4), 0),\n ((4, 4), 1),\n ((4, 4), None),\n ((4, 4), (0, 1)),\n ((10, 12), None),\n ((10, 12), (0, 1)),\n ((4, 5, 6), None),\n ((4, 5, 6), 1),\n ((4, 5, 6), (0, 2)),\n ])\[email protected](\"norm\", [None, 'ortho'])\ndef test_identity_nd(forward, backward, type, shape, axes, norm):\n # Test the identity f^-1(f(x)) == x\n\n x = np.random.random(shape)\n\n if axes is not None:\n shape = np.take(shape, axes)\n\n y = forward(x, type, axes=axes, norm=norm)\n z = backward(y, type, axes=axes, norm=norm)\n assert_allclose(z, x)\n\n if axes is None:\n pad = [(0, 4)] * x.ndim\n elif isinstance(axes, int):\n pad = [(0, 0)] * x.ndim\n pad[axes] = (0, 4)\n else:\n pad = [(0, 0)] * x.ndim\n\n for a in axes:\n pad[a] = (0, 4)\n\n y2 = np.pad(y, pad, mode='edge')\n z2 = backward(y2, type, shape, axes, norm)\n assert_allclose(z2, x)\n\n\[email protected](\"forward, backward\", [(dctn, idctn), (dstn, idstn)])\[email protected](\"type\", [1, 2, 3, 4])\[email protected](\"shape, axes\",\n [\n ((4, 5), 0),\n ((4, 5), 1),\n ((4, 5), None),\n ])\[email protected](\"dtype\", [np.float16, np.float32, np.float64,\n np.complex64, np.complex128])\[email protected](\"norm\", [None, 'ortho'])\[email protected](\"overwrite_x\", [False, True])\ndef test_identity_nd_overwrite(forward, backward, type, shape, axes, dtype,\n norm, overwrite_x):\n # Test the identity f^-1(f(x)) == x\n\n x = np.random.random(shape).astype(dtype)\n x_orig = x.copy()\n\n if axes is not None:\n shape = np.take(shape, axes)\n\n y = forward(x, type, axes=axes, norm=norm)\n y_orig = y.copy()\n z = backward(y, type, axes=axes, norm=norm)\n if overwrite_x:\n assert_allclose(z, x_orig, rtol=1e-6, atol=1e-6)\n else:\n assert_allclose(z, x, rtol=1e-6, atol=1e-6)\n assert_array_equal(x, x_orig)\n assert_array_equal(y, y_orig)\n\n\[email protected](\"func\", ['dct', 'dst', 'dctn', 'dstn'])\[email protected](\"type\", [1, 2, 3, 4])\[email protected](\"norm\", [None, 'ortho'])\ndef test_fftpack_equivalience(func, type, norm):\n x = np.random.rand(8, 16)\n fft_res = getattr(fft, func)(x, type, norm=norm)\n fftpack_res = getattr(fftpack, func)(x, type, norm=norm)\n\n assert_allclose(fft_res, fftpack_res)\n",
"#!/usr/bin/env python\n\"\"\"\nrefguide_check.py [OPTIONS] [-- ARGS]\n\nCheck for a Scipy submodule whether the objects in its __all__ dict\ncorrespond to the objects included in the reference guide.\n\nExample of usage::\n\n $ python refguide_check.py optimize\n\nNote that this is a helper script to be able to check if things are missing;\nthe output of this script does need to be checked manually. In some cases\nobjects are left out of the refguide for a good reason (it's an alias of\nanother function, or deprecated, or ...)\n\nAnother use of this helper script is to check validity of code samples\nin docstrings. This is different from doctesting [we do not aim to have\nscipy docstrings doctestable!], this is just to make sure that code in\ndocstrings is valid python::\n\n $ python refguide_check.py --doctests optimize\n\n\"\"\"\nimport copy\nimport doctest\nimport glob\nimport inspect\nimport io\nimport os\nimport re\nimport shutil\nimport sys\nimport tempfile\nimport warnings\nfrom argparse import ArgumentParser\nfrom contextlib import contextmanager, redirect_stderr\nfrom doctest import NORMALIZE_WHITESPACE, ELLIPSIS, IGNORE_EXCEPTION_DETAIL\n\nimport docutils.core\nimport numpy as np\nimport sphinx\nfrom docutils.parsers.rst import directives\nfrom pkg_resources import parse_version\n\nsys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', 'doc', 'sphinxext'))\nfrom numpydoc.docscrape_sphinx import get_doc_object\n\nif parse_version(sphinx.__version__) >= parse_version('1.5'):\n # Enable specific Sphinx directives\n from sphinx.directives import SeeAlso, Only\n directives.register_directive('seealso', SeeAlso)\n directives.register_directive('only', Only)\nelse:\n # Remove sphinx directives that don't run without Sphinx environment.\n # Sphinx < 1.5 installs all directives on import...\n directives._directives.pop('versionadded', None)\n directives._directives.pop('versionchanged', None)\n directives._directives.pop('moduleauthor', None)\n directives._directives.pop('sectionauthor', None)\n directives._directives.pop('codeauthor', None)\n directives._directives.pop('toctree', None)\n\n\nBASE_MODULE = \"scipy\"\n\nPUBLIC_SUBMODULES = [\n 'cluster',\n 'cluster.hierarchy',\n 'cluster.vq',\n 'constants',\n 'fft',\n 'fftpack',\n 'fftpack.convolve',\n 'integrate',\n 'interpolate',\n 'io',\n 'io.arff',\n 'io.wavfile',\n 'linalg',\n 'linalg.blas',\n 'linalg.lapack',\n 'linalg.interpolative',\n 'misc',\n 'ndimage',\n 'odr',\n 'optimize',\n 'signal',\n 'signal.windows',\n 'sparse',\n 'sparse.csgraph',\n 'sparse.linalg',\n 'spatial',\n 'spatial.distance',\n 'spatial.transform',\n 'special',\n 'stats',\n 'stats.mstats',\n 'stats.contingency',\n]\n\n# Docs for these modules are included in the parent module\nOTHER_MODULE_DOCS = {\n 'fftpack.convolve': 'fftpack',\n 'io.wavfile': 'io',\n 'io.arff': 'io',\n}\n\n# these names are known to fail doctesting and we like to keep it that way\n# e.g. sometimes pseudocode is acceptable etc\nDOCTEST_SKIPLIST = set([\n 'scipy.stats.kstwobign', # inaccurate cdf or ppf\n 'scipy.stats.levy_stable',\n 'scipy.special.sinc', # comes from numpy\n 'scipy.misc.who', # comes from numpy\n 'scipy.optimize.show_options',\n 'io.rst', # XXX: need to figure out how to deal w/ mat files\n])\n\n# these names are not required to be present in ALL despite being in\n# autosummary:: listing\nREFGUIDE_ALL_SKIPLIST = [\n r'scipy\\.sparse\\.csgraph',\n r'scipy\\.sparse\\.linalg',\n r'scipy\\.spatial\\.distance',\n r'scipy\\.linalg\\.blas\\.[sdczi].*',\n r'scipy\\.linalg\\.lapack\\.[sdczi].*',\n]\n\n# these names are not required to be in an autosummary:: listing\n# despite being in ALL\nREFGUIDE_AUTOSUMMARY_SKIPLIST = [\n r'scipy\\.special\\..*_roots', # old aliases for scipy.special.*_roots\n r'scipy\\.special\\.jn', # alias for jv\n r'scipy\\.ndimage\\.sum', # alias for sum_labels\n r'scipy\\.linalg\\.solve_lyapunov', # deprecated name\n r'scipy\\.stats\\.contingency\\.chi2_contingency',\n r'scipy\\.stats\\.contingency\\.expected_freq',\n r'scipy\\.stats\\.contingency\\.margins',\n r'scipy\\.stats\\.reciprocal',\n]\n# deprecated windows in scipy.signal namespace\nfor name in ('barthann', 'bartlett', 'blackmanharris', 'blackman', 'bohman',\n 'boxcar', 'chebwin', 'cosine', 'exponential', 'flattop',\n 'gaussian', 'general_gaussian', 'hamming', 'hann', 'hanning',\n 'kaiser', 'nuttall', 'parzen', 'slepian', 'triang', 'tukey'):\n REFGUIDE_AUTOSUMMARY_SKIPLIST.append(r'scipy\\.signal\\.' + name)\n\nHAVE_MATPLOTLIB = False\n\n\ndef short_path(path, cwd=None):\n \"\"\"\n Return relative or absolute path name, whichever is shortest.\n \"\"\"\n if not isinstance(path, str):\n return path\n if cwd is None:\n cwd = os.getcwd()\n abspath = os.path.abspath(path)\n relpath = os.path.relpath(path, cwd)\n if len(abspath) <= len(relpath):\n return abspath\n return relpath\n\n\ndef find_names(module, names_dict):\n # Refguide entries:\n #\n # - 3 spaces followed by function name, and maybe some spaces, some\n # dashes, and an explanation; only function names listed in\n # refguide are formatted like this (mostly, there may be some false\n # positives)\n #\n # - special directives, such as data and function\n #\n # - (scipy.constants only): quoted list\n #\n patterns = [\n r\"^\\s\\s\\s([a-z_0-9A-Z]+)(\\s+-+.*)?$\",\n r\"^\\.\\. (?:data|function)::\\s*([a-z_0-9A-Z]+)\\s*$\"\n ]\n\n if module.__name__ == 'scipy.constants':\n patterns += [\"^``([a-z_0-9A-Z]+)``\"]\n\n patterns = [re.compile(pattern) for pattern in patterns]\n module_name = module.__name__\n\n for line in module.__doc__.splitlines():\n res = re.search(r\"^\\s*\\.\\. (?:currentmodule|module):: ([a-z0-9A-Z_.]+)\\s*$\", line)\n if res:\n module_name = res.group(1)\n continue\n\n for pattern in patterns:\n res = re.match(pattern, line)\n if res is not None:\n name = res.group(1)\n entry = '.'.join([module_name, name])\n names_dict.setdefault(module_name, set()).add(name)\n break\n\n\ndef get_all_dict(module):\n \"\"\"Return a copy of the __all__ dict with irrelevant items removed.\"\"\"\n if hasattr(module, \"__all__\"):\n all_dict = copy.deepcopy(module.__all__)\n else:\n all_dict = copy.deepcopy(dir(module))\n all_dict = [name for name in all_dict\n if not name.startswith(\"_\")]\n for name in ['absolute_import', 'division', 'print_function']:\n try:\n all_dict.remove(name)\n except ValueError:\n pass\n\n # Modules are almost always private; real submodules need a separate\n # run of refguide_check.\n all_dict = [name for name in all_dict\n if not inspect.ismodule(getattr(module, name, None))]\n\n deprecated = []\n not_deprecated = []\n for name in all_dict:\n f = getattr(module, name, None)\n if callable(f) and is_deprecated(f):\n deprecated.append(name)\n else:\n not_deprecated.append(name)\n\n others = set(dir(module)).difference(set(deprecated)).difference(set(not_deprecated))\n\n return not_deprecated, deprecated, others\n\n\ndef compare(all_dict, others, names, module_name):\n \"\"\"Return sets of objects only in __all__, refguide, or completely missing.\"\"\"\n only_all = set()\n for name in all_dict:\n if name not in names:\n for pat in REFGUIDE_AUTOSUMMARY_SKIPLIST:\n if re.match(pat, module_name + '.' + name):\n break\n else:\n only_all.add(name)\n\n only_ref = set()\n missing = set()\n for name in names:\n if name not in all_dict:\n for pat in REFGUIDE_ALL_SKIPLIST:\n if re.match(pat, module_name + '.' + name):\n if name not in others:\n missing.add(name)\n break\n else:\n only_ref.add(name)\n\n return only_all, only_ref, missing\n\ndef is_deprecated(f):\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter(\"error\")\n try:\n f(**{\"not a kwarg\":None})\n except DeprecationWarning:\n return True\n except Exception:\n pass\n return False\n\ndef check_items(all_dict, names, deprecated, others, module_name, dots=True):\n num_all = len(all_dict)\n num_ref = len(names)\n\n output = \"\"\n\n output += \"Non-deprecated objects in __all__: %i\\n\" % num_all\n output += \"Objects in refguide: %i\\n\\n\" % num_ref\n\n only_all, only_ref, missing = compare(all_dict, others, names, module_name)\n dep_in_ref = set(only_ref).intersection(deprecated)\n only_ref = set(only_ref).difference(deprecated)\n\n if len(dep_in_ref) > 0:\n output += \"Deprecated objects in refguide::\\n\\n\"\n for name in sorted(deprecated):\n output += \" \" + name + \"\\n\"\n\n if len(only_all) == len(only_ref) == len(missing) == 0:\n if dots:\n output_dot('.')\n return [(None, True, output)]\n else:\n if len(only_all) > 0:\n output += \"ERROR: objects in %s.__all__ but not in refguide::\\n\\n\" % module_name\n for name in sorted(only_all):\n output += \" \" + name + \"\\n\"\n\n output += \"\\nThis issue can be fixed by adding these objects to\\n\"\n output += \"the function listing in __init__.py for this module\\n\"\n\n if len(only_ref) > 0:\n output += \"ERROR: objects in refguide but not in %s.__all__::\\n\\n\" % module_name\n for name in sorted(only_ref):\n output += \" \" + name + \"\\n\"\n\n output += \"\\nThis issue should likely be fixed by removing these objects\\n\"\n output += \"from the function listing in __init__.py for this module\\n\"\n output += \"or adding them to __all__.\\n\"\n\n if len(missing) > 0:\n output += \"ERROR: missing objects::\\n\\n\"\n for name in sorted(missing):\n output += \" \" + name + \"\\n\"\n\n if dots:\n output_dot('F')\n return [(None, False, output)]\n\n\ndef validate_rst_syntax(text, name, dots=True):\n if text is None:\n if dots:\n output_dot('E')\n return False, \"ERROR: %s: no documentation\" % (name,)\n\n ok_unknown_items = set([\n 'mod', 'currentmodule', 'autosummary', 'data',\n 'obj', 'versionadded', 'versionchanged', 'module', 'class', 'meth',\n 'ref', 'func', 'toctree', 'moduleauthor',\n 'sectionauthor', 'codeauthor', 'eq', 'doi', 'DOI', 'arXiv', 'arxiv'\n ])\n\n # Run through docutils\n error_stream = io.StringIO()\n\n def resolve(name, is_label=False):\n return (\"http://foo\", name)\n\n token = '<RST-VALIDATE-SYNTAX-CHECK>'\n\n docutils.core.publish_doctree(\n text, token,\n settings_overrides = dict(halt_level=5,\n traceback=True,\n default_reference_context='title-reference',\n default_role='emphasis',\n link_base='',\n resolve_name=resolve,\n stylesheet_path='',\n raw_enabled=0,\n file_insertion_enabled=0,\n warning_stream=error_stream))\n\n # Print errors, disregarding unimportant ones\n error_msg = error_stream.getvalue()\n errors = error_msg.split(token)\n success = True\n output = \"\"\n\n for error in errors:\n lines = error.splitlines()\n if not lines:\n continue\n\n m = re.match(r'.*Unknown (?:interpreted text role|directive type) \"(.*)\".*$', lines[0])\n if m:\n if m.group(1) in ok_unknown_items:\n continue\n\n m = re.match(r'.*Error in \"math\" directive:.*unknown option: \"label\"', \" \".join(lines), re.S)\n if m:\n continue\n\n output += name + lines[0] + \"::\\n \" + \"\\n \".join(lines[1:]).rstrip() + \"\\n\"\n success = False\n\n if not success:\n output += \" \" + \"-\"*72 + \"\\n\"\n for lineno, line in enumerate(text.splitlines()):\n output += \" %-4d %s\\n\" % (lineno+1, line)\n output += \" \" + \"-\"*72 + \"\\n\\n\"\n\n if dots:\n output_dot('.' if success else 'F')\n return success, output\n\n\ndef output_dot(msg='.', stream=sys.stderr):\n stream.write(msg)\n stream.flush()\n\n\ndef check_rest(module, names, dots=True):\n \"\"\"\n Check reStructuredText formatting of docstrings\n\n Returns: [(name, success_flag, output), ...]\n \"\"\"\n\n try:\n skip_types = (dict, str, unicode, float, int)\n except NameError:\n # python 3\n skip_types = (dict, str, float, int)\n\n results = []\n\n if module.__name__[6:] not in OTHER_MODULE_DOCS:\n results += [(module.__name__,) +\n validate_rst_syntax(inspect.getdoc(module),\n module.__name__, dots=dots)]\n\n for name in names:\n full_name = module.__name__ + '.' + name\n obj = getattr(module, name, None)\n\n if obj is None:\n results.append((full_name, False, \"%s has no docstring\" % (full_name,)))\n continue\n elif isinstance(obj, skip_types):\n continue\n\n if inspect.ismodule(obj):\n text = inspect.getdoc(obj)\n else:\n try:\n text = str(get_doc_object(obj))\n except Exception:\n import traceback\n results.append((full_name, False,\n \"Error in docstring format!\\n\" +\n traceback.format_exc()))\n continue\n\n m = re.search(\"([\\x00-\\x09\\x0b-\\x1f])\", text)\n if m:\n msg = (\"Docstring contains a non-printable character %r! \"\n \"Maybe forgot r\\\"\\\"\\\"?\" % (m.group(1),))\n results.append((full_name, False, msg))\n continue\n\n try:\n src_file = short_path(inspect.getsourcefile(obj))\n except TypeError:\n src_file = None\n\n if src_file:\n file_full_name = src_file + ':' + full_name\n else:\n file_full_name = full_name\n\n results.append((full_name,) + validate_rst_syntax(text, file_full_name, dots=dots))\n\n return results\n\n\n### Doctest helpers ####\n\n# the namespace to run examples in\nDEFAULT_NAMESPACE = {'np': np}\n\n# the namespace to do checks in\nCHECK_NAMESPACE = {\n 'np': np,\n 'assert_allclose': np.testing.assert_allclose,\n 'assert_equal': np.testing.assert_equal,\n # recognize numpy repr's\n 'array': np.array,\n 'matrix': np.matrix,\n 'int64': np.int64,\n 'uint64': np.uint64,\n 'int8': np.int8,\n 'int32': np.int32,\n 'float32': np.float32,\n 'float64': np.float64,\n 'dtype': np.dtype,\n 'nan': np.nan,\n 'NaN': np.nan,\n 'inf': np.inf,\n 'Inf': np.inf,}\n\n\nclass DTRunner(doctest.DocTestRunner):\n DIVIDER = \"\\n\"\n\n def __init__(self, item_name, checker=None, verbose=None, optionflags=0):\n self._item_name = item_name\n doctest.DocTestRunner.__init__(self, checker=checker, verbose=verbose,\n optionflags=optionflags)\n\n def _report_item_name(self, out, new_line=False):\n if self._item_name is not None:\n if new_line:\n out(\"\\n\")\n self._item_name = None\n\n def report_start(self, out, test, example):\n self._checker._source = example.source\n return doctest.DocTestRunner.report_start(self, out, test, example)\n\n def report_success(self, out, test, example, got):\n if self._verbose:\n self._report_item_name(out, new_line=True)\n return doctest.DocTestRunner.report_success(self, out, test, example, got)\n\n def report_unexpected_exception(self, out, test, example, exc_info):\n self._report_item_name(out)\n return doctest.DocTestRunner.report_unexpected_exception(\n self, out, test, example, exc_info)\n\n def report_failure(self, out, test, example, got):\n self._report_item_name(out)\n return doctest.DocTestRunner.report_failure(self, out, test,\n example, got)\n\nclass Checker(doctest.OutputChecker):\n obj_pattern = re.compile(r'at 0x[0-9a-fA-F]+>')\n vanilla = doctest.OutputChecker()\n rndm_markers = {'# random', '# Random', '#random', '#Random', \"# may vary\"}\n stopwords = {'plt.', '.hist', '.show', '.ylim', '.subplot(',\n 'set_title', 'imshow', 'plt.show', '.axis(', '.plot(',\n '.bar(', '.title', '.ylabel', '.xlabel', 'set_ylim', 'set_xlim',\n '# reformatted', '.set_xlabel(', '.set_ylabel(', '.set_zlabel(',\n '.set(xlim=', '.set(ylim=', '.set(xlabel=', '.set(ylabel='}\n\n def __init__(self, parse_namedtuples=True, ns=None, atol=1e-8, rtol=1e-2):\n self.parse_namedtuples = parse_namedtuples\n self.atol, self.rtol = atol, rtol\n if ns is None:\n self.ns = dict(CHECK_NAMESPACE)\n else:\n self.ns = ns\n\n def check_output(self, want, got, optionflags):\n # cut it short if they are equal\n if want == got:\n return True\n\n # skip stopwords in source\n if any(word in self._source for word in self.stopwords):\n return True\n\n # skip random stuff\n if any(word in want for word in self.rndm_markers):\n return True\n\n # skip function/object addresses\n if self.obj_pattern.search(got):\n return True\n\n # ignore comments (e.g. signal.freqresp)\n if want.lstrip().startswith(\"#\"):\n return True\n\n # try the standard doctest\n try:\n if self.vanilla.check_output(want, got, optionflags):\n return True\n except Exception:\n pass\n\n # OK then, convert strings to objects\n try:\n a_want = eval(want, dict(self.ns))\n a_got = eval(got, dict(self.ns))\n except Exception:\n # Maybe we're printing a numpy array? This produces invalid python\n # code: `print(np.arange(3))` produces \"[0 1 2]\" w/o commas between\n # values. So, reinsert commas and retry.\n # TODO: handle (1) abberivation (`print(np.arange(10000))`), and\n # (2) n-dim arrays with n > 1\n s_want = want.strip()\n s_got = got.strip()\n cond = (s_want.startswith(\"[\") and s_want.endswith(\"]\") and\n s_got.startswith(\"[\") and s_got.endswith(\"]\"))\n if cond:\n s_want = \", \".join(s_want[1:-1].split())\n s_got = \", \".join(s_got[1:-1].split())\n return self.check_output(s_want, s_got, optionflags)\n\n if not self.parse_namedtuples:\n return False\n # suppose that \"want\" is a tuple, and \"got\" is smth like\n # MoodResult(statistic=10, pvalue=0.1).\n # Then convert the latter to the tuple (10, 0.1),\n # and then compare the tuples.\n try:\n num = len(a_want)\n regex = (r'[\\w\\d_]+\\(' +\n ', '.join([r'[\\w\\d_]+=(.+)']*num) +\n r'\\)')\n grp = re.findall(regex, got.replace('\\n', ' '))\n if len(grp) > 1: # no more than one for now\n return False\n # fold it back to a tuple\n got_again = '(' + ', '.join(grp[0]) + ')'\n return self.check_output(want, got_again, optionflags)\n except Exception:\n return False\n\n # ... and defer to numpy\n try:\n return self._do_check(a_want, a_got)\n except Exception:\n # heterog tuple, eg (1, np.array([1., 2.]))\n try:\n return all(self._do_check(w, g) for w, g in zip(a_want, a_got))\n except (TypeError, ValueError):\n return False\n\n def _do_check(self, want, got):\n # This should be done exactly as written to correctly handle all of\n # numpy-comparable objects, strings, and heterogeneous tuples\n try:\n if want == got:\n return True\n except Exception:\n pass\n return np.allclose(want, got, atol=self.atol, rtol=self.rtol)\n\n\ndef _run_doctests(tests, full_name, verbose, doctest_warnings):\n \"\"\"Run modified doctests for the set of `tests`.\n\n Returns: list of [(success_flag, output), ...]\n \"\"\"\n flags = NORMALIZE_WHITESPACE | ELLIPSIS | IGNORE_EXCEPTION_DETAIL\n runner = DTRunner(full_name, checker=Checker(), optionflags=flags,\n verbose=verbose)\n\n output = io.StringIO(newline='')\n success = True\n # Redirect stderr to the stdout or output\n tmp_stderr = sys.stdout if doctest_warnings else output\n\n @contextmanager\n def temp_cwd():\n cwd = os.getcwd()\n tmpdir = tempfile.mkdtemp()\n try:\n os.chdir(tmpdir)\n yield tmpdir\n finally:\n os.chdir(cwd)\n shutil.rmtree(tmpdir)\n\n # Run tests, trying to restore global state afterward\n cwd = os.getcwd()\n with np.errstate(), np.printoptions(), temp_cwd() as tmpdir, \\\n redirect_stderr(tmp_stderr):\n # try to ensure random seed is NOT reproducible\n np.random.seed(None)\n\n for t in tests:\n t.filename = short_path(t.filename, cwd)\n fails, successes = runner.run(t, out=output.write)\n if fails > 0:\n success = False\n\n output.seek(0)\n return success, output.read()\n\n\ndef check_doctests(module, verbose, ns=None,\n dots=True, doctest_warnings=False):\n \"\"\"Check code in docstrings of the module's public symbols.\n\n Returns: list of [(item_name, success_flag, output), ...]\n \"\"\"\n if ns is None:\n ns = dict(DEFAULT_NAMESPACE)\n\n # Loop over non-deprecated items\n results = []\n\n for name in get_all_dict(module)[0]:\n full_name = module.__name__ + '.' + name\n\n if full_name in DOCTEST_SKIPLIST:\n continue\n\n try:\n obj = getattr(module, name)\n except AttributeError:\n import traceback\n results.append((full_name, False,\n \"Missing item!\\n\" +\n traceback.format_exc()))\n continue\n\n finder = doctest.DocTestFinder()\n try:\n tests = finder.find(obj, name, globs=dict(ns))\n except Exception:\n import traceback\n results.append((full_name, False,\n \"Failed to get doctests!\\n\" +\n traceback.format_exc()))\n continue\n\n success, output = _run_doctests(tests, full_name, verbose,\n doctest_warnings)\n\n if dots:\n output_dot('.' if success else 'F')\n\n results.append((full_name, success, output))\n\n if HAVE_MATPLOTLIB:\n import matplotlib.pyplot as plt\n plt.close('all')\n\n return results\n\n\ndef check_doctests_testfile(fname, verbose, ns=None,\n dots=True, doctest_warnings=False):\n \"\"\"Check code in a text file.\n\n Mimic `check_doctests` above, differing mostly in test discovery.\n (which is borrowed from stdlib's doctest.testfile here,\n https://github.com/python-git/python/blob/master/Lib/doctest.py)\n\n Returns: list of [(item_name, success_flag, output), ...]\n\n Notes\n -----\n\n refguide can be signalled to skip testing code by adding\n ``#doctest: +SKIP`` to the end of the line. If the output varies or is\n random, add ``# may vary`` or ``# random`` to the comment. for example\n\n >>> plt.plot(...) # doctest: +SKIP\n >>> random.randint(0,10)\n 5 # random\n\n We also try to weed out pseudocode:\n * We maintain a list of exceptions which signal pseudocode,\n * We split the text file into \"blocks\" of code separated by empty lines\n and/or intervening text.\n * If a block contains a marker, the whole block is then assumed to be\n pseudocode. It is then not being doctested.\n\n The rationale is that typically, the text looks like this:\n\n blah\n <BLANKLINE>\n >>> from numpy import some_module # pseudocode!\n >>> func = some_module.some_function\n >>> func(42) # still pseudocode\n 146\n <BLANKLINE>\n blah\n <BLANKLINE>\n >>> 2 + 3 # real code, doctest it\n 5\n\n \"\"\"\n results = []\n\n if ns is None:\n ns = dict(DEFAULT_NAMESPACE)\n\n _, short_name = os.path.split(fname)\n if short_name in DOCTEST_SKIPLIST:\n return results\n\n full_name = fname\n with open(fname, encoding='utf-8') as f:\n text = f.read()\n\n PSEUDOCODE = set(['some_function', 'some_module', 'import example',\n 'ctypes.CDLL', # likely need compiling, skip it\n 'integrate.nquad(func,' # ctypes integrate tutotial\n ])\n\n # split the text into \"blocks\" and try to detect and omit pseudocode blocks.\n parser = doctest.DocTestParser()\n good_parts = []\n for part in text.split('\\n\\n'):\n tests = parser.get_doctest(part, ns, fname, fname, 0)\n if any(word in ex.source for word in PSEUDOCODE\n for ex in tests.examples):\n # omit it\n pass\n else:\n # `part` looks like a good code, let's doctest it\n good_parts += [part]\n\n # Reassemble the good bits and doctest them:\n good_text = '\\n\\n'.join(good_parts)\n tests = parser.get_doctest(good_text, ns, fname, fname, 0)\n success, output = _run_doctests([tests], full_name, verbose,\n doctest_warnings)\n\n if dots:\n output_dot('.' if success else 'F')\n\n results.append((full_name, success, output))\n\n if HAVE_MATPLOTLIB:\n import matplotlib.pyplot as plt\n plt.close('all')\n\n return results\n\n\ndef init_matplotlib():\n global HAVE_MATPLOTLIB\n\n try:\n import matplotlib\n matplotlib.use('Agg')\n HAVE_MATPLOTLIB = True\n except ImportError:\n HAVE_MATPLOTLIB = False\n\n\ndef main(argv):\n parser = ArgumentParser(usage=__doc__.lstrip())\n parser.add_argument(\"module_names\", metavar=\"SUBMODULES\", default=[],\n nargs='*', help=\"Submodules to check (default: all public)\")\n parser.add_argument(\"--doctests\", action=\"store_true\", help=\"Run also doctests\")\n parser.add_argument(\"-v\", \"--verbose\", action=\"count\", default=0)\n parser.add_argument(\"--doctest-warnings\", action=\"store_true\",\n help=\"Enforce warning checking for doctests\")\n parser.add_argument(\"--skip-tutorial\", action=\"store_true\",\n help=\"Skip running doctests in the tutorial.\")\n args = parser.parse_args(argv)\n\n modules = []\n names_dict = {}\n\n if args.module_names:\n args.skip_tutorial = True\n else:\n args.module_names = list(PUBLIC_SUBMODULES)\n\n os.environ['SCIPY_PIL_IMAGE_VIEWER'] = 'true'\n\n module_names = list(args.module_names)\n for name in list(module_names):\n if name in OTHER_MODULE_DOCS:\n name = OTHER_MODULE_DOCS[name]\n if name not in module_names:\n module_names.append(name)\n\n for submodule_name in module_names:\n module_name = BASE_MODULE + '.' + submodule_name\n __import__(module_name)\n module = sys.modules[module_name]\n\n if submodule_name not in OTHER_MODULE_DOCS:\n find_names(module, names_dict)\n\n if submodule_name in args.module_names:\n modules.append(module)\n\n dots = True\n success = True\n results = []\n\n print(\"Running checks for %d modules:\" % (len(modules),))\n\n if args.doctests or not args.skip_tutorial:\n init_matplotlib()\n\n for module in modules:\n if dots:\n if module is not modules[0]:\n sys.stderr.write(' ')\n sys.stderr.write(module.__name__ + ' ')\n sys.stderr.flush()\n\n all_dict, deprecated, others = get_all_dict(module)\n names = names_dict.get(module.__name__, set())\n\n mod_results = []\n mod_results += check_items(all_dict, names, deprecated, others, module.__name__)\n mod_results += check_rest(module, set(names).difference(deprecated),\n dots=dots)\n if args.doctests:\n mod_results += check_doctests(module, (args.verbose >= 2), dots=dots,\n doctest_warnings=args.doctest_warnings)\n\n for v in mod_results:\n assert isinstance(v, tuple), v\n\n results.append((module, mod_results))\n\n if dots:\n sys.stderr.write(\"\\n\")\n sys.stderr.flush()\n\n if not args.skip_tutorial:\n base_dir = os.path.join(os.path.abspath(os.path.dirname(__file__)), '..')\n tut_path = os.path.join(base_dir, 'doc', 'source', 'tutorial', '*.rst')\n print('\\nChecking tutorial files at %s:' % os.path.relpath(tut_path, os.getcwd()))\n for filename in sorted(glob.glob(tut_path)):\n if dots:\n sys.stderr.write('\\n')\n sys.stderr.write(os.path.split(filename)[1] + ' ')\n sys.stderr.flush()\n\n tut_results = check_doctests_testfile(filename, (args.verbose >= 2),\n dots=dots, doctest_warnings=args.doctest_warnings)\n\n def scratch():\n pass # stub out a \"module\", see below\n scratch.__name__ = filename\n results.append((scratch, tut_results))\n\n if dots:\n sys.stderr.write(\"\\n\")\n sys.stderr.flush()\n\n # Report results\n all_success = True\n\n for module, mod_results in results:\n success = all(x[1] for x in mod_results)\n all_success = all_success and success\n\n if success and args.verbose == 0:\n continue\n\n print(\"\")\n print(\"=\" * len(module.__name__))\n print(module.__name__)\n print(\"=\" * len(module.__name__))\n print(\"\")\n\n for name, success, output in mod_results:\n if name is None:\n if not success or args.verbose >= 1:\n print(output.strip())\n print(\"\")\n elif not success or (args.verbose >= 2 and output.strip()):\n print(name)\n print(\"-\"*len(name))\n print(\"\")\n print(output.strip())\n print(\"\")\n\n if all_success:\n print(\"\\nOK: refguide and doctests checks passed!\")\n sys.exit(0)\n else:\n print(\"\\nERROR: refguide or doctests have errors\")\n sys.exit(1)\n\n\nif __name__ == '__main__':\n main(argv=sys.argv[1:])\n"
] | [
[
"numpy.take",
"numpy.testing.assert_array_equal",
"numpy.random.random",
"numpy.random.rand",
"numpy.testing.assert_allclose",
"numpy.pad"
],
[
"numpy.allclose",
"numpy.random.seed",
"numpy.errstate",
"numpy.printoptions",
"matplotlib.pyplot.close",
"matplotlib.use"
]
] |
willshiao/brgan | [
"99d1627176a59811bf9032ef1f99d6e7261095fb"
] | [
"src/dsloader/kronecker.py"
] | [
"import networkx as nx\nimport numpy as np\nimport torch\nfrom torch.utils.data import Dataset\nfrom dsloader.util import kron_graph, random_binary, make_fractional\n\n\nclass KroneckerDataset (Dataset):\n\n def __init__(self, kron_iter=4, seed_size=4, fixed_seed=None, num_graphs=1, perms_per_graph=256, progress_bar=False):\n self.kron_iter = kron_iter\n self.seed_size = seed_size\n\n\n self.num_nodes = seed_size ** (kron_iter + 1)\n self.seeds = []\n self.matrices = []\n\n num_iter = range(num_graphs)\n if progress_bar:\n from tqdm import tqdm\n num_iter = tqdm(num_iter)\n\n for i in num_iter:\n seed = random_binary(seed_size, use_sparsity=False)\n self.seeds.append(seed)\n if fixed_seed is not None:\n k_g = kron_graph(fixed_seed, n=kron_iter).astype(np.float)\n else:\n k_g = kron_graph(seed, n=kron_iter).astype(np.float)\n for j in range(perms_per_graph):\n self.matrices.append(make_fractional(k_g, inplace=False))\n \n \n def __len__(self):\n return len(self.matrices)\n\n def __getitem__(self, idx):\n return torch.tensor(self.matrices[idx])\n"
] | [
[
"torch.tensor"
]
] |
koriavinash1/pgm | [
"89e11b61f7141a75d8991ff4ea229ef66d7a4a0c"
] | [
"examples/assignment3/MH.py"
] | [
"import sys\nimport numpy as np\nsys.path.append('../..')\nfrom pgm.inference.MetropolisHastings import MH\nfrom matplotlib import pyplot as plt\n\ndef Gamma(theta, k = 1):\n def G(k):\n if k <= 0: return 1\n elif k == 0.5: return np.pi **0.5\n return k*G(k-1)\n def distribution(x):\n x = np.abs(x)\n return (x**(k-1))*np.exp(-x/theta)/((theta**k) * G(k)) \n return distribution\n\n\ndef proposalDistribution(sigma=0.1):\n \"\"\"\n Describes example proposal distribution\n considers gaussion distribution with fixed sigma\n as the mean keeps changing it's made an inner function argument\n \"\"\"\n def QDistribution(param = 0):\n return lambda x: (1/(((2*np.pi)**0.5) * sigma))*np.exp(-((x-param)**2)/ (sigma**2))\n\n return QDistribution, lambda x: np.random.normal(x, sigma)\n\n\n# ==========================================\nfunction = Gamma(theta=5.5, k=1)\nsigma = [0.1, 1.0, 2.0]\nburnin = [2, 5, 10, 100, 200]\n\n\"\"\"\nfor sig in sigma:\n for _burnin in burnin: \n proposalDist, proposalSamp = proposalDistribution(sig)\n\n mh = MH(function, _burnin, proposalDist, proposalSamp)\n nMontecarlo = 1000\n\n for _ in range(nMontecarlo):\n next(mh.sampler())\n\n sampledvalues = np.array(mh.x_seq)\n print(\"sig, burin, mean, bacc, cacc: \", sig, _burnin, np.mean(sampledvalues), np.mean(mh.burninAcc), np.mean(mh.collectionAcc))\n\n\n\"\"\"\nx = np.linspace(-20, 20, 500)\nfx = function(x)\n\nproposalDist, proposalSamp = proposalDistribution(sigma = 2.0)\nmh = MH(function, 100, proposalDist, proposalSamp)\nfor _ in range(1000):\n next(mh.sampler())\n\nsampledvalues = np.array(mh.x_seq)\nplt.plot(x, fx, 'b--', linewidth=2.0)\n\nhist = np.histogram(sampledvalues, bins=50)\nx = hist[1][1:]\nhist = hist[0]\nprint(hist.shape, x.shape)\nhist = hist*np.max(fx)/np.max(hist)\nplt.bar(x, hist, color = 'g', width=1.8, alpha=0.7)\n# plt.hist(sampledvalues, 50, density=True, stacked=True, facecolor='g', alpha=0.7, linewidth=0)\nplt.legend(['target pdf', 'sampled histogram'])\nplt.show()\n\nplt.plot(sampledvalues, linewidth=2.0)\nplt.ylim(-20.0, 20.0)\nplt.show()\n\n\n"
] | [
[
"matplotlib.pyplot.legend",
"numpy.histogram",
"numpy.abs",
"numpy.random.normal",
"numpy.exp",
"matplotlib.pyplot.show",
"numpy.max",
"matplotlib.pyplot.ylim",
"numpy.array",
"matplotlib.pyplot.plot",
"numpy.linspace",
"matplotlib.pyplot.bar"
]
] |
Freakawho/sagemaker-tensorflow-training-toolkit-master | [
"f37c7d85600beb5461788db8c471b66c25beff8f"
] | [
"src/sagemaker_tensorflow_container/training.py"
] | [
"# Copyright 2018-2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the 'License'). You\n# may not use this file except in compliance with the License. A copy of\n# the License is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# or in the 'license' file accompanying this file. This file is\n# distributed on an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific\n# language governing permissions and limitations under the License.\nfrom __future__ import absolute_import\n\nimport json\nimport logging\nimport multiprocessing\nimport os\nimport subprocess\nimport time\n\nfrom sagemaker_training import entry_point, environment, mapping, runner\nimport tensorflow as tf\n\nfrom sagemaker_tensorflow_container import s3_utils\n\nlogger = logging.getLogger(__name__)\n\nSAGEMAKER_PARAMETER_SERVER_ENABLED = \"sagemaker_parameter_server_enabled\"\nMODEL_DIR = \"/opt/ml/model\"\n\n\ndef _is_host_master(hosts, current_host):\n return current_host == hosts[0]\n\n\ndef _build_tf_config(hosts, current_host, ps_task=False):\n \"\"\"Builds a dictionary containing cluster information based on number of hosts and number of\n parameter servers.\n\n Args:\n hosts (list[str]): List of host names in the cluster\n current_host (str): Current host name\n ps_task (bool): Set to True if this config is built for a parameter server process\n (default: False)\n\n Returns:\n dict[str: dict]: A dictionary describing the cluster setup for distributed training.\n For more information regarding TF_CONFIG:\n https://cloud.google.com/ml-engine/docs/tensorflow/distributed-training-details\n \"\"\"\n # Assign the first host as the master. Rest of the hosts if any will be worker hosts.\n # The first ps_num hosts will also have a parameter task assign to them.\n masters = hosts[:1]\n workers = hosts[1:]\n ps = hosts if len(hosts) > 1 else None\n\n def host_addresses(hosts, port=2222):\n return [\"{}:{}\".format(host, port) for host in hosts]\n\n tf_config = {\"cluster\": {\"master\": host_addresses(masters)}, \"environment\": \"cloud\"}\n\n if ps:\n tf_config[\"cluster\"][\"ps\"] = host_addresses(ps, port=\"2223\")\n\n if workers:\n tf_config[\"cluster\"][\"worker\"] = host_addresses(workers)\n\n if ps_task:\n if ps is None:\n raise ValueError(\n \"Cannot have a ps task if there are no parameter servers in the cluster\"\n )\n task_type = \"ps\"\n task_index = ps.index(current_host)\n elif _is_host_master(hosts, current_host):\n task_type = \"master\"\n task_index = 0\n else:\n task_type = \"worker\"\n task_index = workers.index(current_host)\n\n tf_config[\"task\"] = {\"index\": task_index, \"type\": task_type}\n return tf_config\n\n\ndef _run_ps(env, cluster):\n logger.info(\"Running distributed training job with parameter servers\")\n\n cluster_spec = tf.train.ClusterSpec(cluster)\n task_index = env.hosts.index(env.current_host)\n # Force parameter server to run on cpu. Running multiple TensorFlow processes on the same\n # GPU is not safe:\n # https://stackoverflow.com/questions/46145100/is-it-unsafe-to-run-multiple-tensorflow-processes-on-the-same-gpu\n no_gpu_config = tf.ConfigProto(device_count={\"GPU\": 0})\n\n server = tf.train.Server(\n cluster_spec, job_name=\"ps\", task_index=task_index, config=no_gpu_config\n )\n\n multiprocessing.Process(target=lambda: server.join()).start()\n\n\ndef _run_worker(env, cmd_args, tf_config):\n env_vars = env.to_env_vars()\n env_vars[\"TF_CONFIG\"] = json.dumps(tf_config)\n\n entry_point.run(\n uri=env.module_dir,\n user_entry_point=env.user_entry_point,\n args=cmd_args,\n env_vars=env_vars,\n capture_error=True,\n )\n\n\ndef _wait_until_master_is_down(master):\n while True:\n try:\n subprocess.check_call(\n [\"curl\", \"{}:2222\".format(master)], stdout=subprocess.PIPE, stderr=subprocess.PIPE\n )\n logger.info(\"master {} is still up, waiting for it to exit\".format(master))\n time.sleep(10)\n except subprocess.CalledProcessError:\n logger.info(\"master {} is down, stopping parameter server\".format(master))\n return\n\n\ndef train(env, cmd_args):\n \"\"\"Get training job environment from env and run the training job.\n\n Args:\n env (sagemaker_training.env.TrainingEnv): Instance of TrainingEnv class\n \"\"\"\n parameter_server_enabled = env.additional_framework_parameters.get(\n SAGEMAKER_PARAMETER_SERVER_ENABLED, False\n )\n if len(env.hosts) > 1 and parameter_server_enabled:\n\n tf_config = _build_tf_config(hosts=env.hosts, current_host=env.current_host)\n\n logger.info(\"Running distributed training job with parameter servers\")\n logger.info(\"Launching parameter server process\")\n _run_ps(env, tf_config[\"cluster\"])\n logger.info(\"Launching worker process\")\n _run_worker(env, cmd_args, tf_config)\n\n if not _is_host_master(env.hosts, env.current_host):\n _wait_until_master_is_down(env.hosts[0])\n\n else:\n\n mpi_enabled = env.additional_framework_parameters.get(\"sagemaker_mpi_enabled\")\n\n if mpi_enabled:\n runner_type = runner.MPIRunnerType\n else:\n runner_type = runner.ProcessRunnerType\n\n entry_point.run(\n uri=env.module_dir,\n user_entry_point=env.user_entry_point,\n args=cmd_args,\n env_vars=env.to_env_vars(),\n capture_error=True,\n runner_type=runner_type,\n )\n\n\ndef _log_model_missing_warning(model_dir):\n pb_file_exists = False\n file_exists = False\n for dirpath, dirnames, filenames in os.walk(model_dir):\n if filenames:\n file_exists = True\n for f in filenames:\n if \"saved_model.pb\" in f or \"saved_model.pbtxt\" in f:\n pb_file_exists = True\n path, direct_parent_dir = os.path.split(dirpath)\n if not str.isdigit(direct_parent_dir):\n logger.warn(\n \"Your model will NOT be servable with SageMaker TensorFlow Serving containers. \"\n 'The SavedModel bundle is under directory \"{}\", not a numeric name.'.format(\n direct_parent_dir\n )\n )\n\n if not file_exists:\n logger.warn(\n \"No model artifact is saved under path {}.\"\n \" Your training job will not save any model files to S3.\\n\"\n \"For details of how to construct your training script see:\\n\"\n \"https://sagemaker.readthedocs.io/en/stable/using_tf.html#adapting-your-local-tensorflow-script\".format(\n model_dir\n )\n )\n elif not pb_file_exists:\n logger.warn(\n \"Your model will NOT be servable with SageMaker TensorFlow Serving container. \"\n \"The model artifact was not saved in the TensorFlow SavedModel directory structure:\\n\"\n \"https://www.tensorflow.org/guide/saved_model#structure_of_a_savedmodel_directory\"\n )\n\n\ndef _model_dir_with_training_job(model_dir, job_name):\n if model_dir and model_dir.startswith(\"/opt/ml\"):\n return model_dir\n else:\n return \"{}/{}/model\".format(model_dir, job_name)\n\n\ndef main():\n \"\"\"Training entry point\n \"\"\"\n hyperparameters = environment.read_hyperparameters()\n env = environment.Environment(hyperparameters=hyperparameters)\n\n user_hyperparameters = env.hyperparameters\n\n # If the training job is part of the multiple training jobs for tuning, we need to append the training job name to\n # model_dir in case they read from/write to the same object\n if \"_tuning_objective_metric\" in hyperparameters:\n model_dir = _model_dir_with_training_job(hyperparameters.get(\"model_dir\"), env.job_name)\n logger.info(\"Appending the training job name to model_dir: {}\".format(model_dir))\n user_hyperparameters[\"model_dir\"] = model_dir\n\n s3_utils.configure(user_hyperparameters.get(\"model_dir\"), os.environ.get(\"SAGEMAKER_REGION\"))\n train(env, mapping.to_cmd_args(user_hyperparameters))\n _log_model_missing_warning(MODEL_DIR)\n"
] | [
[
"tensorflow.train.Server",
"tensorflow.ConfigProto",
"tensorflow.train.ClusterSpec"
]
] |
TanayGahlot/mne-python | [
"857aa97c201451b82931c5eba50642975afc423d"
] | [
"examples/decoding/plot_decoding_csp_eeg.py"
] | [
"\"\"\"\n===========================================================================\nMotor imagery decoding from EEG data using the Common Spatial Pattern (CSP)\n===========================================================================\n\nDecoding of motor imagery applied to EEG data decomposed using CSP.\nHere the classifier is applied to features extracted on CSP filtered signals.\n\nSee http://en.wikipedia.org/wiki/Common_spatial_pattern and [1]\n\nThe EEGBCI dataset is documented in [2]\nThe data set is available at PhysioNet [3]\n\n[1] Zoltan J. Koles. The quantitative extraction and topographic mapping\n of the abnormal components in the clinical EEG. Electroencephalography\n and Clinical Neurophysiology, 79(6):440--447, December 1991.\n\n[2] Schalk, G., McFarland, D.J., Hinterberger, T., Birbaumer, N.,\n Wolpaw, J.R. (2004) BCI2000: A General-Purpose Brain-Computer Interface\n (BCI) System. IEEE TBME 51(6):1034-1043\n\n[3] Goldberger AL, Amaral LAN, Glass L, Hausdorff JM, Ivanov PCh, Mark RG,\n Mietus JE, Moody GB, Peng C-K, Stanley HE. (2000) PhysioBank,\n PhysioToolkit, and PhysioNet: Components of a New Research Resource for\n Complex Physiologic Signals. Circulation 101(23):e215-e220\n\"\"\"\n# Authors: Martin Billinger <[email protected]>\n#\n# License: BSD (3-clause)\n\nprint(__doc__)\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom mne import Epochs, pick_types\nfrom mne.io import concatenate_raws\nfrom mne.io.edf import read_raw_edf\nfrom mne.datasets import eegbci\nfrom mne.event import find_events\nfrom mne.decoding import CSP\nfrom mne.layouts import read_layout\n\n###############################################################################\n## Set parameters and read data\n\n# avoid classification of evoked responses by using epochs that start 1s after\n# cue onset.\ntmin, tmax = -1., 4.\nevent_id = dict(hands=2, feet=3)\nsubject = 1\nruns = [6, 10, 14] # motor imagery: hands vs feet\n\nraw_fnames = eegbci.load_data(subject, runs)\nraw_files = [read_raw_edf(f, tal_channel=-1, preload=True) for f in raw_fnames]\nraw = concatenate_raws(raw_files)\n\n# strip channel names\nraw.info['ch_names'] = [chn.strip('.') for chn in raw.info['ch_names']]\n\n# Apply band-pass filter\nraw.filter(7., 30., method='iir')\n\nevents = find_events(raw, shortest_event=0, stim_channel='STI 014')\n\npicks = pick_types(raw.info, meg=False, eeg=True, stim=False, eog=False,\n exclude='bads')\n\n# Read epochs (train will be done only between 1 and 2s)\n# Testing will be done with a running classifier\nepochs = Epochs(raw, events, event_id, tmin, tmax, proj=True, picks=picks,\n baseline=None, preload=True, add_eeg_ref=False)\nepochs_train = epochs.crop(tmin=1., tmax=2., copy=True)\nlabels = epochs.events[:, -1] - 2\n\n###############################################################################\n# Classification with linear discrimant analysis\n\nfrom sklearn.lda import LDA\nfrom sklearn.cross_validation import ShuffleSplit\n\n# Assemble a classifier\nsvc = LDA()\ncsp = CSP(n_components=4, reg=None, log=True)\n\n# Define a monte-carlo cross-validation generator (reduce variance):\ncv = ShuffleSplit(len(labels), 10, test_size=0.2, random_state=42)\nscores = []\nepochs_data = epochs.get_data()\nepochs_data_train = epochs_train.get_data()\n\n# Use scikit-learn Pipeline with cross_val_score function\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.cross_validation import cross_val_score\nclf = Pipeline([('CSP', csp), ('SVC', svc)])\nscores = cross_val_score(clf, epochs_data_train, labels, cv=cv, n_jobs=1)\n\n# Printing the results\nclass_balance = np.mean(labels == labels[0])\nclass_balance = max(class_balance, 1. - class_balance)\nprint(\"Classification accuracy: %f / Chance level: %f\" % (np.mean(scores),\n class_balance))\n\n# plot CSP patterns estimated on full data for visualization\ncsp.fit_transform(epochs_data, labels)\n\nevoked = epochs.average()\nevoked.data = csp.patterns_.T\nevoked.times = np.arange(evoked.data.shape[0])\n\nlayout = read_layout('EEG1005')\nevoked.plot_topomap(times=[0, 1, 2, 61, 62, 63], ch_type='eeg', layout=layout,\n scale_time=1, time_format='%i', scale=1,\n unit='Patterns (AU)', size=1.5)\n\n###############################################################################\n# Look at performance over time\n\nsfreq = raw.info['sfreq']\nw_length = int(sfreq * 0.5) # running classifier: window length\nw_step = int(sfreq * 0.1) # running classifier: window step size\nw_start = np.arange(0, epochs_data.shape[2] - w_length, w_step)\n\nscores_windows = []\n\nfor train_idx, test_idx in cv:\n y_train, y_test = labels[train_idx], labels[test_idx]\n\n X_train = csp.fit_transform(epochs_data_train[train_idx], y_train)\n X_test = csp.transform(epochs_data_train[test_idx])\n\n # fit classifier\n svc.fit(X_train, y_train)\n\n # running classifier: test classifier on sliding window\n score_this_window = []\n for n in w_start:\n X_test = csp.transform(epochs_data[test_idx][:, :, n:(n + w_length)])\n score_this_window.append(svc.score(X_test, y_test))\n scores_windows.append(score_this_window)\n\n# Plot scores over time\nw_times = (w_start + w_length / 2.) / sfreq + epochs.tmin\n\nplt.figure()\nplt.plot(w_times, np.mean(scores_windows, 0), label='Score')\nplt.axvline(0, linestyle='--', color='k', label='Onset')\nplt.axhline(0.5, linestyle='-', color='k', label='Chance')\nplt.xlabel('time (s)')\nplt.ylabel('classification accuracy')\nplt.title('Classification score over time')\nplt.legend(loc='lower right')\nplt.show()\n"
] | [
[
"matplotlib.pyplot.axvline",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.axhline",
"sklearn.lda.LDA",
"numpy.arange",
"matplotlib.pyplot.title",
"sklearn.cross_validation.cross_val_score",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.show",
"sklearn.pipeline.Pipeline",
"matplotlib.pyplot.xlabel",
"numpy.mean"
]
] |
kafkasl/contextualLSTM | [
"a4421d592c3960c79842b0f23de162e61fcab3dd"
] | [
"src/lstm/lstm_wp.py"
] | [
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Example / benchmark for building a PTB LSTM model.\n\nTrains the model described in:\n(Zaremba, et. al.) Recurrent Neural Network Regularization\nhttp://arxiv.org/abs/1409.2329\n\nThere are 3 supported model configurations:\n===========================================\n| config | epochs | train | valid | test\n===========================================\n| small | 13 | 37.99 | 121.39 | 115.91\n| medium | 39 | 48.45 | 86.16 | 82.07\n| large | 55 | 37.87 | 82.62 | 78.29\nThe exact results may vary depending on the random initialization.\n\nThe hyperparameters used in the model:\n- init_scale - the initial scale of the weights\n- learning_rate - the initial value of the learning rate\n- max_grad_norm - the maximum permissible norm of the gradient\n- num_layers - the number of LSTM layers\n- num_steps - the number of unrolled steps of LSTM\n- hidden_size - the number of LSTM units\n- max_epoch - the number of epochs trained with the initial learning rate\n- max_max_epoch - the total number of epochs for training\n- keep_prob - the probability of keeping weights in the dropout layer\n- lr_decay - the decay of the learning rate for each epoch after \"max_epoch\"\n- batch_size - the batch size\n\nThe data required for this example is in the data/ dir of the\nPTB dataset from Tomas Mikolov's webpage:\n\n$ wget http://www.fit.vutbr.cz/~imikolov/rnnlm/simple-examples.tgz\n$ tar xvf simple-examples.tgz\n\nTo run:\n\n$ python ptb_word_lm.py --data_path=simple-examples/data/\n\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport sys\nsys.path.insert(0, \"../src/\")\n\nimport inspect\nimport time\n\nimport numpy as np\nimport tensorflow as tf\n\nimport reader_wp as reader\n\nflags = tf.flags\nlogging = tf.logging\n\nflags.DEFINE_string(\n \"model\", \"small\",\n \"A type of model. Possible options are: small, medium, large.\")\n\nflags.DEFINE_string(\n \"tasks\", \"all\",\n \"Tasks to be performed. Possible options are: all, train, test, valid\")\n\nflags.DEFINE_string(\n \"word_to_id_path\", \"../models/eos/word2id_1000.pklz\",\n \"A type of model. Possible options are: small, medium, large.\")\n\nflags.DEFINE_string(\"data_path\", None,\n \"Where the training/test data is stored.\")\nflags.DEFINE_string(\"save_path\", None,\n \"Model output directory.\")\nflags.DEFINE_bool(\"use_fp16\", False,\n \"Train using 16-bit floats instead of 32bit floats\")\n\nFLAGS = flags.FLAGS\n\n\ndef data_type():\n return tf.float16 if FLAGS.use_fp16 else tf.float32\n\n\ndef get_vocab_size():\n word_to_id = VectorManager.read_vector(FLAGS.word_to_id_path)\n size = len(word_to_id)\n print(\"Vocabulary size: %s\" % size)\n return size\n\nclass WPInput(object):\n \"\"\"The input data.\"\"\"\n\n def __init__(self, config, data, name=None):\n self.batch_size = batch_size = config.batch_size\n self.num_steps = num_steps = config.num_steps\n self.epoch_size = ((len(data) // batch_size) - 1) // num_steps\n self.input_data, self.targets = reader.wiki_producer(\n data, batch_size, num_steps, name=name)\n\n\nclass WPModel(object):\n \"\"\"Word Prediction model.\"\"\"\n\n def __init__(self, is_training, config, input_):\n self._input = input_\n\n batch_size = input_.batch_size\n num_steps = input_.num_steps\n size = config.hidden_size\n vocab_size = config.vocab_size\n\n # Slightly better results can be obtained with forget gate biases\n # initialized to 1 but the hyperparameters of the model would need to be\n # different than reported in the paper.\n def lstm_cell():\n # With the latest TensorFlow source code (as of Mar 27, 2017),\n # the BasicLSTMCell will need a reuse parameter which is unfortunately not\n # defined in TensorFlow 1.0. To maintain backwards compatibility, we add\n # an argument check here:\n # if 'reuse' in inspect.getargspec(\n # tf.contrib.rnn.BasicLSTMCell.__init__).args:\n # return tf.contrib.rnn.BasicLSTMCell(\n # size, forget_bias=0.0, state_is_tuple=True,\n # reuse=tf.get_variable_scope().reuse)\n # else:\n return tf.contrib.rnn.BasicLSTMCell(\n size, forget_bias=0.0, state_is_tuple=True)\n\n attn_cell = lstm_cell\n if is_training and config.keep_prob < 1:\n def attn_cell():\n return tf.contrib.rnn.DropoutWrapper(\n lstm_cell(), output_keep_prob=config.keep_prob)\n\n cell = tf.contrib.rnn.MultiRNNCell(\n [attn_cell() for _ in range(config.num_layers)], state_is_tuple=True)\n\n # data_type() returns float32 or float16\n self._initial_state = cell.zero_state(batch_size, data_type())\n\n with tf.device(\"/cpu:0\"):\n # TODO: replace TF input with my embeddings\n # TODO: implement PTB reader or something similar\n embedding = tf.get_variable(\n \"embedding\", [vocab_size, size], dtype=data_type())\n inputs = tf.nn.embedding_lookup(embedding, input_.input_data)\n\n\n if is_training and config.keep_prob < 1:\n # Dropout allows to use the net for train and testing\n # See: https://stackoverflow.com/questions/34597316/why-input-is-scaled-in-tf-nn-dropout-in-tensorflow\n # and: http://www.cs.toronto.edu/~rsalakhu/papers/srivastava14a.pdf\n inputs = tf.nn.dropout(inputs, config.keep_prob)\n\n # Simplified version of models/tutorials/rnn/rnn.py's rnn().\n # This builds an unrolled LSTM for tutorial purposes only.\n # In general, use the rnn() or state_saving_rnn() from rnn.py.\n #\n # The alternative version of the code below is:\n #\n inputs = tf.unstack(inputs, num=num_steps, axis=1)\n outputs, state = tf.contrib.rnn.static_rnn(\n cell, inputs, initial_state=self._initial_state)\n # TODO: passing the sequence_length argument will enable to input variable-length tensors\n\n # outputs = []\n # state = self._initial_state\n # with tf.variable_scope(\"RNN\"):\n # for time_step in range(num_steps):\n # if time_step > 0:\n # tf.get_variable_scope().reuse_variables()\n # (cell_output, state) = cell(inputs[:, time_step, :], state) # Call (inputs, state)\n # outputs.append(cell_output)\n\n # TODO: check why outputs are stacked and resized\n output = tf.reshape(tf.stack(axis=1, values=outputs), [-1, size])\n softmax_w = tf.get_variable(\n \"softmax_w\", [size, vocab_size], dtype=data_type())\n softmax_b = tf.get_variable(\"softmax_b\", [vocab_size], dtype=data_type())\n logits = tf.matmul(output, softmax_w) + softmax_b\n loss = tf.contrib.legacy_seq2seq.sequence_loss_by_example(\n [logits],\n [tf.reshape(input_.targets, [-1])],\n [tf.ones([batch_size * num_steps], dtype=data_type())])\n self._cost = cost = tf.reduce_sum(loss) / batch_size\n self._final_state = state\n\n if not is_training:\n return\n\n self._lr = tf.Variable(0.0, trainable=False)\n tvars = tf.trainable_variables()\n grads, _ = tf.clip_by_global_norm(tf.gradients(cost, tvars),\n config.max_grad_norm)\n optimizer = tf.train.GradientDescentOptimizer(self._lr)\n self._train_op = optimizer.apply_gradients(\n zip(grads, tvars),\n global_step=tf.contrib.framework.get_or_create_global_step())\n\n self._new_lr = tf.placeholder(\n tf.float32, shape=[], name=\"new_learning_rate\")\n self._lr_update = tf.assign(self._lr, self._new_lr)\n\n def assign_lr(self, session, lr_value):\n session.run(self._lr_update, feed_dict={self._new_lr: lr_value})\n\n @property\n def input(self):\n return self._input\n\n @property\n def initial_state(self):\n return self._initial_state\n\n @property\n def cost(self):\n return self._cost\n\n @property\n def final_state(self):\n return self._final_state\n\n @property\n def lr(self):\n return self._lr\n\n @property\n def train_op(self):\n return self._train_op\n\n\nclass SmallConfig(object):\n \"\"\"Small config.\"\"\"\n init_scale = 0.1\n learning_rate = 1.0\n max_grad_norm = 5\n num_layers = 2\n num_steps = 20\n hidden_size = 200\n max_epoch = 4\n max_max_epoch = 13\n keep_prob = 1.0\n lr_decay = 0.5\n batch_size = 20\n vocab_size = 27942\n\n\nclass MediumConfig(object):\n \"\"\"Medium config.\"\"\"\n init_scale = 0.05\n learning_rate = 1.0\n max_grad_norm = 5\n num_layers = 2\n num_steps = 35\n hidden_size = 650\n max_epoch = 6\n max_max_epoch = 39\n keep_prob = 0.5\n lr_decay = 0.8\n batch_size = 20\n vocab_size = 10000\n\n\nclass LargeConfig(object):\n \"\"\"Large config.\"\"\"\n init_scale = 0.04\n learning_rate = 1.0\n max_grad_norm = 10\n num_layers = 2\n num_steps = 35\n hidden_size = 1024\n max_epoch = 14\n max_max_epoch = 55\n keep_prob = 0.35\n lr_decay = 1 / 1.15\n batch_size = 20\n vocab_size = 10000\n\n\nclass TestConfig(object):\n \"\"\"Tiny config, for testing.\"\"\"\n init_scale = 0.1\n learning_rate = 1.0\n max_grad_norm = 1\n num_layers = 1\n num_steps = 2\n hidden_size = 2\n max_epoch = 1\n max_max_epoch = 1\n keep_prob = 1.0\n lr_decay = 0.5\n batch_size = 20\n vocab_size = 10000\n\n\ndef run_epoch(session, model, eval_op=None, verbose=False):\n \"\"\"Runs the model on the given data.\"\"\"\n start_time = time.time()\n costs = 0.0\n iters = 0\n state = session.run(model.initial_state)\n\n fetches = {\n \"cost\": model.cost,\n \"final_state\": model.final_state,\n }\n if eval_op is not None:\n fetches[\"eval_op\"] = eval_op\n\n for step in range(model.input.epoch_size):\n feed_dict = {}\n for i, (c, h) in enumerate(model.initial_state):\n feed_dict[c] = state[i].c\n feed_dict[h] = state[i].h\n\n vals = session.run(fetches, feed_dict)\n cost = vals[\"cost\"]\n state = vals[\"final_state\"]\n\n costs += cost\n iters += model.input.num_steps\n\n if verbose and step % (model.input.epoch_size // 10) == 10:\n print(\"%.3f perplexity: %.3f speed: %.0f wps\" %\n (step * 1.0 / model.input.epoch_size, np.exp(costs / iters),\n iters * model.input.batch_size / (time.time() - start_time)))\n\n return np.exp(costs / iters)\n\n\ndef get_config():\n if FLAGS.model == \"small\":\n return SmallConfig()\n elif FLAGS.model == \"medium\":\n return MediumConfig()\n elif FLAGS.model == \"large\":\n return LargeConfig()\n elif FLAGS.model == \"test\":\n return TestConfig()\n else:\n raise ValueError(\"Invalid model: %s\", FLAGS.model)\n\n\ndef main(_):\n if not FLAGS.data_path:\n raise ValueError(\"Must set --data_path to wiki data directory\")\n\n raw_data = reader.wiki_raw_data(FLAGS.data_path, FLAGS.word_to_id_path)\n train_data, valid_data, test_data = raw_data\n\n #vocab_size = get_vocab_size()\n vocab_size = 126930\n\n config = get_config()\n config.vocab_size = vocab_size\n\n eval_config = get_config()\n eval_config.batch_size = 1\n eval_config.num_steps = 1\n eval_config.vocab_size = vocab_size\n\n with tf.Graph().as_default():\n # Args: [minval, maxval]\n initializer = tf.random_uniform_initializer(-config.init_scale,\n config.init_scale)\n\n with tf.name_scope(\"Train\"):\n train_input = WPInput(config=config, data=train_data, name=\"TrainInput\")\n with tf.variable_scope(\"Model\", reuse=None, initializer=initializer):\n m = WPModel(is_training=True, config=config, input_=train_input)\n tf.summary.scalar(\"Training Loss\", m.cost)\n tf.summary.scalar(\"Learning Rate\", m.lr)\n\n with tf.name_scope(\"Valid\"):\n valid_input = WPInput(config=config, data=valid_data, name=\"ValidInput\")\n with tf.variable_scope(\"Model\", reuse=True, initializer=initializer):\n mvalid = WPModel(is_training=False, config=config, input_=valid_input)\n tf.summary.scalar(\"Validation Loss\", mvalid.cost)\n\n with tf.name_scope(\"Test\"):\n test_input = WPInput(config=eval_config, data=test_data, name=\"TestInput\")\n with tf.variable_scope(\"Model\", reuse=True, initializer=initializer):\n mtest = WPModel(is_training=False, config=eval_config,\n input_=test_input)\n\n sv = tf.train.Supervisor(logdir=FLAGS.save_path)\n with sv.managed_session() as session:\n for i in range(config.max_max_epoch):\n lr_decay = config.lr_decay ** max(i + 1 - config.max_epoch, 0.0)\n m.assign_lr(session, config.learning_rate * lr_decay)\n\n print(\"Epoch: %d Learning rate: %.3f\" % (i + 1, session.run(m.lr)))\n train_perplexity = run_epoch(session, m, eval_op=m.train_op,\n verbose=True)\n print(\"Epoch: %d Train Perplexity: %.3f\" % (i + 1, train_perplexity))\n valid_perplexity = run_epoch(session, mvalid)\n print(\"Epoch: %d Valid Perplexity: %.3f\" % (i + 1, valid_perplexity))\n\n test_perplexity = run_epoch(session, mtest)\n print(\"Test Perplexity: %.3f\" % test_perplexity)\n\n if FLAGS.save_path:\n print(\"Saving model to %s.\" % FLAGS.save_path)\n sv.saver.save(session, FLAGS.save_path, global_step=sv.global_step)\n\n\nif __name__ == \"__main__\":\n tf.app.run()\n"
] | [
[
"tensorflow.summary.scalar",
"tensorflow.reshape",
"tensorflow.unstack",
"tensorflow.variable_scope",
"tensorflow.matmul",
"tensorflow.contrib.rnn.static_rnn",
"tensorflow.name_scope",
"tensorflow.Variable",
"tensorflow.reduce_sum",
"tensorflow.nn.dropout",
"tensorflow.device",
"tensorflow.Graph",
"tensorflow.random_uniform_initializer",
"tensorflow.contrib.rnn.BasicLSTMCell",
"tensorflow.stack",
"tensorflow.app.run",
"tensorflow.assign",
"tensorflow.gradients",
"tensorflow.train.Supervisor",
"tensorflow.nn.embedding_lookup",
"tensorflow.placeholder",
"tensorflow.contrib.framework.get_or_create_global_step",
"numpy.exp",
"tensorflow.trainable_variables",
"tensorflow.train.GradientDescentOptimizer"
]
] |
fazildgr8/virtual_pen_MNIST | [
"69055980ee0f0005766e62e3a1ca4e2a0259157c"
] | [
"pensetup.py"
] | [
"import cv2\r\nimport numpy as np\r\nimport time\r\n\r\n\r\n# A required callback method that goes into the trackbar function.\r\ndef nothing(x):\r\n pass\r\n\r\n\r\n# Initializing the webcam feed.\r\ncap = cv2.VideoCapture(0)\r\ncap.set(3, 1280)\r\ncap.set(4, 720)\r\n\r\n# Create a window named trackbars.\r\ncv2.namedWindow(\"Trackbars\")\r\n\r\n# Now create 6 trackbars that will control the lower and upper range of\r\n# H,S and V channels. The Arguments are like this: Name of trackbar,\r\n# window name, range,callback function. For Hue the range is 0-179 and\r\n# for S,V its 0-255.\r\ncv2.createTrackbar(\"L - H\", \"Trackbars\", 0, 179, nothing)\r\ncv2.createTrackbar(\"L - S\", \"Trackbars\", 0, 255, nothing)\r\ncv2.createTrackbar(\"L - V\", \"Trackbars\", 0, 255, nothing)\r\ncv2.createTrackbar(\"U - H\", \"Trackbars\", 179, 179, nothing)\r\ncv2.createTrackbar(\"U - S\", \"Trackbars\", 255, 255, nothing)\r\ncv2.createTrackbar(\"U - V\", \"Trackbars\", 255, 255, nothing)\r\n\r\nwhile True:\r\n\r\n # Start reading the webcam feed frame by frame.\r\n ret, frame = cap.read()\r\n if not ret:\r\n break\r\n # Flip the frame horizontally (Not required)\r\n frame = cv2.flip(frame, 1)\r\n\r\n # Convert the BGR image to HSV image.\r\n hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\r\n\r\n # Get the new values of the trackbar in real time as the user changes\r\n # them\r\n l_h = cv2.getTrackbarPos(\"L - H\", \"Trackbars\")\r\n l_s = cv2.getTrackbarPos(\"L - S\", \"Trackbars\")\r\n l_v = cv2.getTrackbarPos(\"L - V\", \"Trackbars\")\r\n u_h = cv2.getTrackbarPos(\"U - H\", \"Trackbars\")\r\n u_s = cv2.getTrackbarPos(\"U - S\", \"Trackbars\")\r\n u_v = cv2.getTrackbarPos(\"U - V\", \"Trackbars\")\r\n\r\n # Set the lower and upper HSV range according to the value selected\r\n # by the trackbar\r\n lower_range = np.array([l_h, l_s, l_v])\r\n upper_range = np.array([u_h, u_s, u_v])\r\n\r\n # Filter the image and get the binary mask, where white represents\r\n # your target color\r\n mask = cv2.inRange(hsv, lower_range, upper_range)\r\n\r\n # You can also visualize the real part of the target color (Optional)\r\n res = cv2.bitwise_and(frame, frame, mask=mask)\r\n\r\n # Converting the binary mask to 3 channel image, this is just so\r\n # we can stack it with the others\r\n mask_3 = cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR)\r\n\r\n # stack the mask, orginal frame and the filtered result\r\n stacked = np.hstack((mask_3, frame, res))\r\n\r\n # Show this stacked frame at 40% of the size.\r\n cv2.imshow('Trackbars', cv2.resize(stacked, None, fx=0.4, fy=0.4))\r\n\r\n # If the user presses ESC then exit the program\r\n key = cv2.waitKey(1)\r\n if key == 27:\r\n break\r\n\r\n # If the user presses `s` then print this array.\r\n if key == ord('s'):\r\n\r\n thearray = [[l_h, l_s, l_v], [u_h, u_s, u_v]]\r\n print(thearray)\r\n\r\n # Also save this array as penval.npy\r\n np.save('penval', thearray)\r\n break\r\n\r\n# Release the camera & destroy the windows.\r\ncap.release()\r\ncv2.destroyAllWindows()"
] | [
[
"numpy.array",
"numpy.save",
"numpy.hstack"
]
] |
gitter-badger/agent | [
"3f53eaa7ebdee3ab423c7b58785d584fe1a6ae11"
] | [
"neodroidagent/utilities/exploration/sampling/random_process/ornstein_uhlenbeck.py"
] | [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nfrom .annealed_guassian import AnnealedGaussianProcess\n\n__author__ = \"Christian Heider Nielsen\"\n\n# Based on http://math.stackexchange.com/questions/1287634/implementing-ornstein-uhlenbeck-in-matlab\nimport numpy\n\n__all__ = [\"OrnsteinUhlenbeckProcess\"]\n\n\nclass OrnsteinUhlenbeckProcess(AnnealedGaussianProcess):\n def __init__(\n self,\n *,\n theta: float = 0.15,\n mean: float = 0.0,\n sigma: float = 1.0,\n dt: float = 1e-2,\n x_0=None,\n sigma_min: float = None,\n n_steps_annealing: int = 1000,\n **kwargs\n ):\n super().__init__(\n mean=mean,\n sigma=sigma,\n sigma_min=sigma_min,\n n_steps_annealing=n_steps_annealing,\n **kwargs\n )\n self.theta = theta\n self.mean = mean\n self.dt = dt\n self.x_0 = x_0\n self.reset()\n\n def sample(self, size):\n x = (\n self.x_prev\n + self.theta * (self.mean - self.x_prev) * self.dt\n + self.current_sigma * numpy.sqrt(self.dt) * numpy.random.normal(size=size)\n )\n self.x_prev = x\n self.n_steps += 1\n return x\n\n def reset(self):\n super().reset()\n self.x_prev = self.x_0 if self.x_0 is not None else numpy.zeros_like(self.x_0)\n\n\nif __name__ == \"__main__\":\n\n random_process = OrnsteinUhlenbeckProcess(theta=0.5)\n\n for i in range(1000):\n print(random_process.sample((2, 1)))\n"
] | [
[
"numpy.random.normal",
"numpy.zeros_like",
"numpy.sqrt"
]
] |
diegulio/Breed_Recognition-to-Buscomiperro | [
"040ee45b9b5c355c3ec2c7413cd89a623024ad4e"
] | [
"label_traincatset.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"label_TrainCatSet.ipynb\n\nAutomatically generated by Colaboratory.\n\nOriginal file is located at\n https://colab.research.google.com/drive/1vDyBZ7Ql-8qQ3l7EWJB9TfnwGy66qGGn\n\"\"\"\n\nimport pandas as pd\nimport os\nimport numpy as np\n\n# Enlisto los nombres de las imagenes\nimagenes = os.listdir('/content/drive/MyDrive/Colab Data/Proyecto buscomiperro/gatos')\nimagenes[:5]\n\ndef extract_ext(id): # Para que el resultado sea como el de razas le quito la extensión\n return os.path.splitext(id)[0]\n\nlabels = list(map(extract_ext, imagenes))\n\ndf = pd.DataFrame()\ndf['id'] = labels\ndf['breed'] = 'gato'\ndf.to_csv('cat_labels.csv')\n\n"
] | [
[
"pandas.DataFrame"
]
] |
lebrice/RoBO | [
"0cb58a1622d3a540f7714b239f0cedf048b6fd9f",
"0cb58a1622d3a540f7714b239f0cedf048b6fd9f"
] | [
"robo/fmin/bayesian_optimization.py",
"robo/maximizers/differential_evolution.py"
] | [
"import logging\nimport george\nimport numpy as np\nimport inspect\n\nfrom pybnn import BaseModel\nfrom pybnn.dngo import DNGO\n\nfrom robo.priors.default_priors import DefaultPrior\nfrom robo.models.base_model import BaseModel as BaseModel_\nfrom robo.models.wrapper_bohamiann import WrapperBohamiann\nfrom robo.models.gaussian_process import GaussianProcess\nfrom robo.models.gaussian_process_mcmc import GaussianProcessMCMC\nfrom robo.models.random_forest import RandomForest\nfrom robo.maximizers.base_maximizer import BaseMaximizer\nfrom robo.maximizers.scipy_optimizer import SciPyOptimizer\nfrom robo.maximizers.random_sampling import RandomSampling\nfrom robo.maximizers.differential_evolution import DifferentialEvolution\nfrom robo.solver.bayesian_optimization import BayesianOptimization\nfrom robo.acquisition_functions.base_acquisition import BaseAcquisitionFunction\nfrom robo.acquisition_functions.ei import EI\nfrom robo.acquisition_functions.pi import PI\nfrom robo.acquisition_functions.log_ei import LogEI\nfrom robo.acquisition_functions.lcb import LCB\nfrom robo.acquisition_functions.marginalization import MarginalizationGPMCMC\nfrom robo.initial_design import init_latin_hypercube_sampling\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef bayesian_optimization(objective_function, lower, upper, num_iterations=30, X_init=None, Y_init=None,\n maximizer=\"random\", acquisition_func=\"log_ei\", model_type=\"gp_mcmc\",\n n_init=3, rng=None, output_path=None):\n \"\"\"\n General interface for Bayesian optimization for global black box\n optimization problems.\n\n Parameters\n ----------\n objective_function: function\n The objective function that is minimized. This function gets a numpy\n array (D,) as input and returns the function value (scalar)\n lower: np.ndarray (D,)\n The lower bound of the search space\n upper: np.ndarray (D,)\n The upper bound of the search space\n num_iterations: int\n The number of iterations (initial design + BO)\n X_init: np.ndarray(N,D)\n Initial points to warmstart BO\n Y_init: np.ndarray(N,1)\n Function values of the already initial points\n maximizer: {\"random\", \"scipy\", \"differential_evolution\"}\n The optimizer for the acquisition function.\n acquisition_func: {\"ei\", \"log_ei\", \"lcb\", \"pi\"}\n The acquisition function\n model_type: {\"gp\", \"gp_mcmc\", \"rf\", \"bohamiann\", \"dngo\"}\n The model for the objective function.\n n_init: int\n Number of points for the initial design. Make sure that it\n is <= num_iterations.\n output_path: string\n Specifies the path where the intermediate output after each iteration will be saved.\n If None no output will be saved to disk.\n rng: numpy.random.RandomState\n Random number generator\n\n Returns\n -------\n dict with all results\n \"\"\"\n assert upper.shape[0] == lower.shape[0], \"Dimension miss match\"\n assert np.all(lower < upper), \"Lower bound >= upper bound\"\n assert n_init <= num_iterations, \"Number of initial design point has to be <= than the number of iterations\"\n\n if rng is None:\n rng = np.random.RandomState(np.random.randint(0, 10000))\n\n cov_amp = 2\n n_dims = lower.shape[0]\n\n initial_ls = np.ones([n_dims])\n exp_kernel = george.kernels.Matern52Kernel(initial_ls,\n ndim=n_dims)\n kernel = cov_amp * exp_kernel\n\n prior = DefaultPrior(len(kernel) + 1)\n\n n_hypers = 3 * len(kernel)\n if n_hypers % 2 == 1:\n n_hypers += 1\n\n if model_type == \"gp\":\n model = GaussianProcess(kernel, prior=prior, rng=rng,\n normalize_output=False, normalize_input=True,\n lower=lower, upper=upper)\n elif model_type == \"gp_mcmc\":\n model = GaussianProcessMCMC(kernel, prior=prior,\n n_hypers=n_hypers,\n chain_length=200,\n burnin_steps=100,\n normalize_input=True,\n normalize_output=False,\n rng=rng, lower=lower, upper=upper)\n\n elif model_type == \"rf\":\n model = RandomForest(rng=rng)\n\n elif model_type == \"bohamiann\":\n model = WrapperBohamiann()\n\n elif model_type == \"dngo\":\n model = DNGO()\n\n elif isinstance(model_type, (BaseModel, BaseModel_)):\n model = model_type\n\n elif callable(model_type):\n model = model_type()\n\n else:\n raise ValueError(\"'{}' is not a valid model\".format(model_type))\n\n if acquisition_func == \"ei\":\n a = EI(model)\n elif acquisition_func == \"log_ei\":\n a = LogEI(model)\n elif acquisition_func == \"pi\":\n a = PI(model)\n elif acquisition_func == \"lcb\":\n a = LCB(model)\n elif isinstance(acquisition_func, BaseAcquisitionFunction):\n a = acquisition_func\n elif callable(acquisition_func):\n a = acquisition_func(model)\n else:\n raise ValueError(\"'{}' is not a valid acquisition function\"\n .format(acquisition_func))\n\n if model_type == \"gp_mcmc\":\n acquisition_func = MarginalizationGPMCMC(a)\n else:\n acquisition_func = a\n\n if maximizer == \"random\":\n max_func = RandomSampling(acquisition_func, lower, upper, rng=rng)\n elif maximizer == \"scipy\":\n max_func = SciPyOptimizer(acquisition_func, lower, upper, rng=rng)\n elif maximizer == \"differential_evolution\":\n max_func = DifferentialEvolution(acquisition_func, lower, upper, rng=rng)\n elif isinstance(maximizer, BaseMaximizer):\n max_func = maximizer\n elif callable(maximizer):\n max_func = maximizer(acquisition_func, lower, upper, rng=rng)\n else:\n raise ValueError(\"'{}' is not a valid function to maximize the \"\n \"acquisition function\".format(maximizer))\n\n bo = BayesianOptimization(objective_function, lower, upper,\n acquisition_func, model, max_func,\n initial_points=n_init, rng=rng,\n initial_design=init_latin_hypercube_sampling,\n output_path=output_path)\n\n x_best, f_min = bo.run(num_iterations, X=X_init, y=Y_init)\n\n results = dict()\n results[\"x_opt\"] = x_best\n results[\"f_opt\"] = f_min\n results[\"incumbents\"] = [inc for inc in bo.incumbents]\n results[\"incumbent_values\"] = [val for val in bo.incumbents_values]\n results[\"runtime\"] = bo.runtime\n results[\"overhead\"] = bo.time_overhead\n results[\"X\"] = [x.tolist() for x in bo.X]\n results[\"y\"] = [y for y in bo.y]\n return results\n",
"import sys\nimport numpy as np\nimport scipy as sp\n\nfrom robo.maximizers.base_maximizer import BaseMaximizer\n\n\nclass DifferentialEvolution(BaseMaximizer):\n\n def __init__(self, objective_function, lower, upper, n_iters=20, rng=None):\n \"\"\"\n\n Parameters\n ----------\n objective_function: acquisition function\n The acquisition function which will be maximized\n lower: np.ndarray (D)\n Lower bounds of the input space\n upper: np.ndarray (D)\n Upper bounds of the input space\n n_iters: int\n Number of iterations\n \"\"\"\n self.n_iters = n_iters\n super(DifferentialEvolution, self).__init__(objective_function, lower, upper, rng)\n\n def _acquisition_fkt_wrapper(self, acq_f):\n def _l(x):\n a = -acq_f(np.array([np.clip(x, self.lower, self.upper)]))\n if np.any(np.isinf(a)):\n return sys.float_info.max\n return a\n\n return _l\n\n def maximize(self):\n \"\"\"\n Maximizes the given acquisition function.\n\n Returns\n -------\n np.ndarray(N,D)\n Point with highest acquisition value.\n \"\"\"\n\n bounds = list(zip(self.lower, self.upper))\n\n res = sp.optimize.differential_evolution(self._acquisition_fkt_wrapper(self.objective_func),\n bounds, maxiter=self.n_iters)\n\n return np.clip(res[\"x\"], self.lower, self.upper)\n"
] | [
[
"numpy.ones",
"numpy.all",
"numpy.random.randint"
],
[
"numpy.isinf",
"numpy.clip"
]
] |
andersonmanhaes/ml_mestrado | [
"d737d80e07d9392895e4455e49a33b8700080cf1"
] | [
"T1/code/visualizar_reta.py"
] | [
"import os\nimport pandas as pd\nimport numpy as np\n\nimport matplotlib.pyplot as plt\n\n\ndef plot(filepath, theta):\n path = os.getcwd() + filepath\n dataset = pd.read_csv(path, header=None)\n X = dataset.iloc[:, 0:-1].values\n y = dataset.iloc[:, -1:].values\n\n t = np.arange(0, 25, 1)\n plt.scatter(X, y, color='red', marker='x', label='Training Data')\n plt.plot(t, theta[0] + (theta[1]*t), color='blue', label='Linear Regression')\n plt.axis([4, 25, -5, 25])\n plt.title('Populacao da cidade x Lucro da filial')\n plt.xlabel('Populacao da cidade (10k)')\n plt.ylabel('Lucro (10k)')\n plt.legend()\n plt.show()\n\n filename = 'target/plot1.2.png'\n if not os.path.exists(os.path.dirname(filename)):\n os.makedirs(os.path.dirname(filename))\n\n plt.savefig(filename)\n"
] | [
[
"matplotlib.pyplot.legend",
"pandas.read_csv",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.axis",
"numpy.arange",
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.scatter"
]
] |
eyov7/CV_LTH_Pre-training-LLNL | [
"bb18ba2093328aeb4e5ab3929f2749264ef3c981"
] | [
"main_imp_visda.py"
] | [
"import argparse\nimport os\nimport random\nimport shutil\nimport time\nimport warnings\nimport copy \n\nimport torch\nimport torch.nn as nn\nimport torch.nn.parallel\nimport torch.backends.cudnn as cudnn\nimport torch.optim\nimport torch.utils.data\nimport torchvision.transforms as transforms\nimport torchvision.datasets as datasets\nimport torchvision.models as models\n\nfrom pruning_utils import *\nfrom visda2017 import VisDA17\n\nmodel_names = sorted(name for name in models.__dict__\n if name.islower() and not name.startswith(\"__\")\n and callable(models.__dict__[name]))\n\nparser = argparse.ArgumentParser(description='PyTorch Visda Training')\n################################ required settings ################################\nparser.add_argument('data', metavar='DIR',\n help='path to dataset')\nparser.add_argument('-a', '--arch', metavar='ARCH', default='resnet50',\n choices=model_names,\n help='model architecture: ' +\n ' | '.join(model_names) +\n ' (default: resnet18)')\nparser.add_argument('--epochs', default=20, type=int, metavar='N',\n help='number of total epochs to run')\nparser.add_argument('-b', '--batch-size', default=128, type=int,\n metavar='N',\n help='mini-batch size (default: 256), this is the total '\n 'batch size of all GPUs on the current node when '\n 'using Data Parallel or Distributed Data Parallel')\nparser.add_argument('--lr', '--learning-rate', default=0.001, type=float,\n metavar='LR', help='initial learning rate', dest='lr')\n\nparser.add_argument('--prune_type', default=None, type=str, help='prune type [lt, pt_trans]')\nparser.add_argument('--pre_weight', default=None, type=str)\nparser.add_argument('--dataset', default='visda17', type=str)\nparser.add_argument('--save_dir', default='results/', type=str)\nparser.add_argument('--percent', default=0.2, type=float, help='pruning rate for each iteration')\nparser.add_argument('--states', default=19, type=int, help='number of iterative pruning states')\nparser.add_argument('--start_state', default=0, type=int, help='number of iterative pruning states')\n\n################################ other settings ################################\nparser.add_argument('-j', '--workers', default=4, type=int, metavar='N',\n help='number of data loading workers (default: 4)')\nparser.add_argument('--start-epoch', default=0, type=int, metavar='N',\n help='manual epoch number (useful on restarts)')\nparser.add_argument('--momentum', default=0.9, type=float, metavar='M',\n help='momentum')\nparser.add_argument('--wd', '--weight-decay', default=5e-4, type=float,\n metavar='W', help='weight decay (default: 1e-4)',\n dest='weight_decay')\nparser.add_argument('-p', '--print-freq', default=50, type=int,\n metavar='N', help='print frequency (default: 10)')\nparser.add_argument('--resume', default='', type=str, metavar='PATH',\n help='path to latest checkpoint (default: none)')\nparser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',\n help='evaluate model on validation set')\nparser.add_argument('--pretrained', dest='pretrained', action='store_true',\n help='use pre-trained model')\nparser.add_argument('--seed', default=None, type=int,\n help='seed for initializing training. ')\nparser.add_argument('--gpu', default=None, type=int,\n help='GPU id to use.')\n\n\nbest_acc1 = 0\nbest_epoch = 0\n\ndef main():\n args = parser.parse_args()\n\n os.makedirs(args.save_dir, exist_ok=True)\n\n if args.seed is not None:\n random.seed(args.seed)\n torch.manual_seed(args.seed)\n cudnn.deterministic = True\n warnings.warn('You have chosen to seed training. '\n 'This will turn on the CUDNN deterministic setting, '\n 'which can slow down your training considerably! '\n 'You may see unexpected behavior when restarting '\n 'from checkpoints.')\n\n if args.gpu is not None:\n warnings.warn('You have chosen a specific GPU. This will completely '\n 'disable data parallelism.')\n\n main_worker(args.gpu, args)\n\n\ndef main_worker(gpu, args):\n global best_acc1, best_epoch\n args.gpu = gpu\n\n if args.gpu is not None:\n print(\"Use GPU: {} for training\".format(args.gpu))\n\n # create model\n print(\"=> using model '{}'\".format(args.arch))\n model = models.__dict__[args.arch](pretrained=False)\n if_pruned = False\n\n assert args.dataset == 'visda17'\n\n ch = model.fc.in_features\n model.fc = nn.Linear(ch,12)\n\n if args.prune_type=='lt':\n print('using Lottery Tickets setting ')\n initalization = copy.deepcopy(model.state_dict())\n torch.save({'state_dict': initalization}, os.path.join(args.save_dir, 'random_init.pt'))\n\n elif args.prune_type=='pt_trans':\n print('using Pretrain Tickets setting')\n ticket_init_weight = torch.load(args.pre_weight)\n if 'state_dict' in ticket_init_weight.keys():\n ticket_init_weight = ticket_init_weight['state_dict']\n\n all_keys = list(ticket_init_weight.keys())\n for key in all_keys:\n if 'fc.' in key:\n del ticket_init_weight[key] \n\n print('layer number', len(ticket_init_weight.keys()))\n for key in ticket_init_weight.keys():\n assert key in model.state_dict().keys()\n model.load_state_dict(ticket_init_weight, strict=False)\n initalization = copy.deepcopy(model.state_dict())\n\n else:\n raise ValueError(\"Unknown Pruning Type\")\n\n print('Mode: Dataparallel')\n model = torch.nn.DataParallel(model).cuda()\n\n # optionally resume from a checkpoint\n if args.resume:\n if os.path.isfile(args.resume):\n print(\"=> loading checkpoint '{}'\".format(args.resume))\n if args.gpu is None:\n checkpoint = torch.load(args.resume)\n else:\n # Map model to be loaded to specified single gpu.\n loc = 'cuda:{}'.format(args.gpu)\n checkpoint = torch.load(args.resume, map_location=loc)\n args.start_epoch = checkpoint['epoch']\n args.start_state = checkpoint['state']\n best_acc1 = checkpoint['best_acc1']\n if_pruned = checkpoint['if_pruned']\n initalization = checkpoint['init_weight']\n\n if if_pruned:\n prune_model_custom(model.module, checkpoint['mask'], False)\n\n model.module.load_state_dict(checkpoint['state_dict'])\n optimizer.load_state_dict(checkpoint['optimizer'])\n print(\"=> loaded checkpoint '{}' (epoch {})\"\n .format(args.resume, checkpoint['epoch']))\n else:\n print(\"=> no checkpoint found at '{}'\".format(args.resume))\n\n criterion = nn.CrossEntropyLoss().cuda(args.gpu)\n optimizer = torch.optim.SGD(model.parameters(), args.lr,\n momentum=args.momentum,\n weight_decay=args.weight_decay)\n\n cudnn.benchmark = True\n\n # Data loading code\n normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n\n train_trans = transforms.Compose([\n transforms.RandomResizedCrop(size=224, scale=(0.75, 1.33)),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n normalize,\n ])\n\n val_trans = transforms.Compose([\n transforms.Resize((224, 224)),\n transforms.ToTensor(),\n normalize,\n ])\n\n train_dataset = VisDA17(txt_file=os.path.join(args.data, \"train/image_list.txt\"), \n root_dir=os.path.join(args.data, \"train\"), transform=train_trans)\n val_dataset = VisDA17(txt_file=os.path.join(args.data, \"validation/image_list.txt\"), \n root_dir=os.path.join(args.data, \"validation\"), transform=val_trans)\n\n train_loader = torch.utils.data.DataLoader(\n train_dataset, batch_size=args.batch_size, shuffle=True,\n num_workers=args.workers, pin_memory=True)\n\n val_loader = torch.utils.data.DataLoader(\n val_dataset,\n batch_size=args.batch_size, shuffle=False,\n num_workers=args.workers, pin_memory=True)\n\n if args.evaluate:\n validate(val_loader, model, criterion, args)\n return\n\n for prun_iter in range(args.start_state, args.states):\n\n check_sparsity(model.module, False)\n for epoch in range(args.start_epoch, args.epochs):\n\n print(optimizer.state_dict()['param_groups'][0]['lr'])\n # train for one epoch\n train(train_loader, model, criterion, optimizer, epoch, args)\n\n # evaluate on validation set\n acc1 = validate(val_loader, model, criterion, args)\n\n # remember best acc@1 and save checkpoint\n is_best = acc1 > best_acc1\n best_acc1 = max(acc1, best_acc1)\n\n if is_best:\n best_epoch = epoch+1\n\n if if_pruned:\n mask_dict = extract_mask(model.state_dict())\n else:\n mask_dict = None\n\n save_checkpoint({\n 'epoch': epoch + 1,\n 'state': prun_iter,\n 'arch': args.arch,\n 'state_dict': model.module.state_dict(),\n 'mask': mask_dict,\n 'best_acc1': best_acc1,\n 'optimizer' : optimizer.state_dict(),\n 'if_pruned': if_pruned,\n 'init_weight':initalization\n }, is_best, checkpoint=args.save_dir, best_name=str(prun_iter)+'model_best.pth.tar')\n\n check_sparsity(model.module, False)\n print('**best TA = ', best_acc1, 'best epoch = ', best_epoch)\n\n # start pruning \n print('start pruning model')\n pruning_model(model.module, args.percent, False)\n if_pruned = True\n \n current_mask = extract_mask(model.state_dict())\n remove_prune(model.module, False)\n\n model.module.load_state_dict(initalization)\n best_acc1 = 0 \n best_epoch = 0\n prune_model_custom(model.module, current_mask, False)\n validate(val_loader, model, criterion, args)\n optimizer = torch.optim.SGD(model.parameters(), args.lr,\n momentum=args.momentum,\n weight_decay=args.weight_decay)\n\n\ndef train(train_loader, model, criterion, optimizer, epoch, args):\n batch_time = AverageMeter('Time', ':6.3f')\n data_time = AverageMeter('Data', ':6.3f')\n losses = AverageMeter('Loss', ':.4e')\n top1 = AverageMeter('Acc@1', ':6.2f')\n top5 = AverageMeter('Acc@5', ':6.2f')\n progress = ProgressMeter(\n len(train_loader),\n [batch_time, data_time, losses, top1, top5],\n prefix=\"Epoch: [{}]\".format(epoch))\n\n # switch to train mode\n model.train()\n wp_steps = len(train_loader)\n\n end = time.time()\n for i, (images, target) in enumerate(train_loader):\n # measure data loading time\n data_time.update(time.time() - end)\n\n adjust_learning_rate(optimizer, epoch, args, i+1, steps_for_one_epoch=wp_steps)\n\n if args.gpu is not None:\n images = images.cuda(args.gpu, non_blocking=True)\n target = target.cuda(args.gpu, non_blocking=True)\n\n # compute output\n output = model(images)\n loss = criterion(output, target)\n\n # measure accuracy and record loss\n acc1, acc5 = accuracy(output, target, topk=(1, 5))\n losses.update(loss.item(), images.size(0))\n top1.update(acc1[0], images.size(0))\n top5.update(acc5[0], images.size(0))\n\n # compute gradient and do SGD step\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n if i % args.print_freq == 0:\n progress.display(i)\n\n\ndef validate(val_loader, model, criterion, args):\n batch_time = AverageMeter('Time', ':6.3f')\n losses = AverageMeter('Loss', ':.4e')\n top1 = AverageMeter('Acc@1', ':6.2f')\n top5 = AverageMeter('Acc@5', ':6.2f')\n progress = ProgressMeter(\n len(val_loader),\n [batch_time, losses, top1, top5],\n prefix='Test: ')\n\n # switch to evaluate mode\n model.eval()\n\n with torch.no_grad():\n end = time.time()\n for i, (images, target) in enumerate(val_loader):\n if args.gpu is not None:\n images = images.cuda(args.gpu, non_blocking=True)\n target = target.cuda(args.gpu, non_blocking=True)\n\n # compute output\n output = model(images)\n loss = criterion(output, target)\n\n # measure accuracy and record loss\n acc1, acc5 = accuracy(output, target, topk=(1, 5))\n losses.update(loss.item(), images.size(0))\n top1.update(acc1[0], images.size(0))\n top5.update(acc5[0], images.size(0))\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n if i % args.print_freq == 0:\n progress.display(i)\n\n # TODO: this should also be done with the ProgressMeter\n print(' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}'\n .format(top1=top1, top5=top5))\n\n return top1.avg\n\ndef save_checkpoint(state, is_best, checkpoint, filename='checkpoint.pth.tar', best_name='model_best.pth.tar'):\n filepath = os.path.join(checkpoint, filename)\n torch.save(state, filepath)\n if is_best:\n shutil.copyfile(filepath, os.path.join(checkpoint, best_name))\n\ndef adjust_learning_rate(optimizer, epoch, args, iterations, steps_for_one_epoch):\n\n max_lr = args.lr\n\n if epoch < 10:\n lr = max_lr\n else:\n lr = max_lr*0.1\n\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n\nclass AverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n def __init__(self, name, fmt=':f'):\n self.name = name\n self.fmt = fmt\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count\n\n def __str__(self):\n fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'\n return fmtstr.format(**self.__dict__)\n\nclass ProgressMeter(object):\n def __init__(self, num_batches, meters, prefix=\"\"):\n self.batch_fmtstr = self._get_batch_fmtstr(num_batches)\n self.meters = meters\n self.prefix = prefix\n\n def display(self, batch):\n entries = [self.prefix + self.batch_fmtstr.format(batch)]\n entries += [str(meter) for meter in self.meters]\n print('\\t'.join(entries))\n\n def _get_batch_fmtstr(self, num_batches):\n num_digits = len(str(num_batches // 1))\n fmt = '{:' + str(num_digits) + 'd}'\n return '[' + fmt + '/' + fmt.format(num_batches) + ']'\n\ndef accuracy(output, target, topk=(1,)):\n \"\"\"Computes the accuracy over the k top predictions for the specified values of k\"\"\"\n with torch.no_grad():\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res\n\nif __name__ == '__main__':\n main()"
] | [
[
"torch.utils.data.DataLoader",
"torch.nn.Linear",
"torch.load",
"torch.manual_seed",
"torch.save",
"torch.no_grad",
"torch.nn.CrossEntropyLoss",
"torch.nn.DataParallel"
]
] |
Lim-Guowei/RUL | [
"e23e97a373df73abc2fde14ce070dcb5230a79c2"
] | [
"eda.py"
] | [
"import os\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom dataloader import dataloader\nimport seaborn as sns\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.model_selection import train_test_split\n\npd.set_option('display.float_format', '{:.6f}'.format)\n\ndef countNullPercent(dataframe):\n \"\"\" Print percentage of null values for each column in dataframe sorted in descending order\n \"\"\"\n nullCollect = {}\n for column in dataframe:\n rowCount = len(dataframe[column])\n nullCount = dataframe[column].isnull().sum()\n percentNull = round((nullCount/rowCount)*100, 2)\n nullCollect.update({column: percentNull})\n\n for key, value in sorted(nullCollect.items(), key=lambda item: item[1], reverse=True): # Sort dictionary based on value in descending order\n print(\"{}: {}\".format(key, value))\n return \n\ndef countUniqueVal(dataframe, column):\n \"\"\" Print unique values for each columns\n \"\"\"\n for count, name in enumerate(column):\n print(\"#{} - {}\".format(count, name))\n print(dataframe[name].value_counts())\n print(\"\\n\")\n return\n\ndef plot_by_unit(dataframe, unit):\n \"\"\" Generate visualization for each fleet unit\n Unit number can be obtained by inspecting \"unit\" column in dataframe\n Generate plot for each variable (x-axis) vs rul (y-axis)\n \"\"\"\n df_unit = dataframe[dataframe[\"unit\"] == unit]\n print(df_unit)\n\n ### Correlation plot\n plt.subplots(figsize=(20,15))\n color = plt.get_cmap('inferno') # default color\n color.set_bad('lightblue')\n corr_plot = sns.heatmap(data=df_unit.corr(), annot=False, cmap=color)\n plt.title(\"Correlation matrix for unit {}\".format(unit), fontdict={'fontsize': 16})\n plt.savefig(\"corr_plot_unit_{}.png\".format(unit))\n return\n\ndef rank_feature_importance(dataframe):\n feat_labels = dataframe.columns.values\n\n Y = dataframe[\"RUL\"]\n X = dataframe.drop([\"RUL\"], axis=1)\n X_train, X_test, Y_train, Y_test = train_test_split(X, Y, random_state=42, shuffle=True, test_size=0.2)\n \n # Create a random forest classifier\n clf = RandomForestClassifier(n_estimators=100, random_state=0, n_jobs=-1)\n\n # Train the classifier\n clf.fit(X_train, Y_train)\n\n # Plot random forest feature importance\n importances = clf.feature_importances_\n indices = np.argsort(importances)\n\n plt.title('Feature Importances', fontdict={'fontsize': 16})\n plt.barh(range(len(indices)), importances[indices], color='b', align='center')\n plt.yticks(range(len(indices)), [feat_labels[i] for i in indices])\n plt.xlabel('Relative Importance')\n plt.savefig(\"feature_importance.png\")\n return\n\ndef add_lag_features(dataframe):\n\n dataframe[\"RUL_lag1\"] = dataframe[\"RUL\"].shift(1)\n dataframe[\"RUL_lag3\"] = dataframe[\"RUL\"].shift(3)\n dataframe[\"RUL_lag5\"] = dataframe[\"RUL\"].shift(5)\n dataframe = dataframe.iloc[5::] # Discard NaN rows\n \n fig = dataframe.plot(y=[\"RUL\", \"RUL_lag1\", \"RUL_lag1\", \"RUL_lag3\", \"RUL_lag5\"], \n kind=\"line\", \n title=\"Lag on RUL variable\", \n xlabel=\"index\", \n use_index=True,\n linewidth=1.0,\n alpha=0.7,\n xlim=(0, dataframe.index.max()),\n figsize=(20, 15)\n ).get_figure()\n \n fig.savefig(\"lag_on_RUL.png\")\n return\n\ndef eda(filename):\n df_dev, df_test = dataloader(filename)\n column_name = df_dev.columns.tolist()\n\n ### Check for null or zeroes\n countNullPercent(df_dev) # No null values in dataframe\n countNullPercent(df_test) # No null values in dataframe\n df_dev.describe().to_csv(\"df_dev_description.csv\")\n df_test.describe().to_csv(\"df_test_description.csv\")\n\n # Remove columns containing all zeroes\n # Remove \"cycle\" as \"RUL\" is sufficient as target variable\n df_dev = df_dev.drop(columns=[\"fan_eff_mod\", \"fan_flow_mod\", \"LPC_eff_mod\", \"LPC_flow_mod\", \"HPC_eff_mod\", \"HPC_flow_mod\", \"HPT_flow_mod\", \"LPT_eff_mod\", \"LPT_flow_mod\", \"cycle\"])\n df_test = df_test.drop(columns=[\"fan_eff_mod\", \"fan_flow_mod\", \"LPC_eff_mod\", \"LPC_flow_mod\", \"HPC_eff_mod\", \"HPC_flow_mod\", \"HPT_flow_mod\", \"LPT_eff_mod\", \"LPT_flow_mod\", \"cycle\"])\n\n ### Identify categorical features as \"unit\", \"Fc\", \"hs\"\n countUniqueVal(df_dev, [\"unit\", \"Fc\", \"hs\"])\n\n ### Generate correlation matrix plot for each unit in fleet \n plot_by_unit(df_dev, 1.0)\n plot_by_unit(df_dev, 2.0)\n plot_by_unit(df_dev, 3.0)\n plot_by_unit(df_dev, 4.0)\n plot_by_unit(df_dev, 5.0)\n plot_by_unit(df_dev, 6.0)\n\n # Rank feature importance using random forest classifier\n rank_feature_importance(df_dev)\n\n add_lag_features(df_dev)\n\n return\n \nif __name__ == \"__main__\":\n eda(\"N-CMAPSS_DS01-005.h5\")"
] | [
[
"matplotlib.pyplot.savefig",
"numpy.argsort",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.subplots",
"pandas.set_option",
"matplotlib.pyplot.title",
"matplotlib.pyplot.get_cmap",
"sklearn.ensemble.RandomForestClassifier",
"sklearn.model_selection.train_test_split"
]
] |
fredyshox/AppVideoFramer | [
"0e43f2828d2e3737451a0cf1ec81e6840796ac30"
] | [
"Tools/fastlane-templates.py"
] | [
"#!/usr/bin/env python3\n# \n# Retrieve templates from fastlane/frameit\n#\n\nimport sys\nimport os \nfrom os import path\nfrom shutil import copyfile\nfrom tempfile import gettempdir\nimport re\nimport json\nimport cv2\nimport numpy as np\nfrom common import sanitize_color, sanitize_device_name, sanitize_device_key, apply_default_color\n\n# URL to frameit-frames repository\nFRAMEIT_URL = \"https://github.com/fastlane/frameit-frames/archive/gh-pages.zip\"\n\ndef main():\n if len(sys.argv) < 3:\n print(f\"Usage: {sys.argv[0]} resource_dir contents_file\")\n exit(1)\n\n resource_dir = sys.argv[1]\n contents_path = sys.argv[2]\n zip_path = path.join(resource_dir, \"gh-pages.zip\")\n repo_dir = path.join(resource_dir, \"frameit-frames-gh-pages\")\n\n print(\"Downloading frameit frames...\")\n status_code = os.system(f\"wget -q --show-progress -O \\\"{zip_path}\\\" \\\"{FRAMEIT_URL}\\\" && unzip -d \\\"{resource_dir}\\\" \\\"{zip_path}\\\"\")\n print(f\"Status code: {status_code}\")\n\n # path to latest frames\n frameit_dir = path.join(repo_dir, \"latest\")\n with open(contents_path, \"r\") as cf:\n contents = json.load(cf)\n\n for frame_path in os.listdir(frameit_dir):\n frame_path = path.join(frameit_dir, frame_path)\n filename = path.basename(frame_path)\n if not path.isfile(frame_path) or not filename_valid(filename):\n continue\n \n device_name = sanitize_device_name(filename)\n device_key = sanitize_device_key(device_name)\n device_color = sanitize_color(filename)\n print(f\"Found template: {frame_path}\")\n print(f\"Template {device_name} - {device_color}\")\n \n image = cv2.imread(frame_path, cv2.IMREAD_UNCHANGED) # read preserving alpha\n frame_height, frame_width = image.shape[:2]\n ox, oy, width, height = measure_screen_bounds(image)\n print(f\"==> +{ox}+{oy}, {width}x{height}\")\n\n if device_key in contents:\n device_info = contents[device_key]\n else:\n device_info = { \n \"images\": {},\n \"left\": ox,\n \"top\": oy,\n \"right\": ox + width,\n \"bottom\": oy + height,\n \"res_height\": frame_height,\n \"res_width\": frame_width\n }\n device_info[\"images\"][device_color] = filename\n \n contents[device_key] = device_info\n copyfile(frame_path, path.join(resource_dir, filename))\n\n # default colors - first model color which is available in DEFAULT_COLOR array\n for key in contents.keys():\n apply_default_color(contents, key)\n\n with open(contents_path, \"w\") as cf:\n json.dump(contents, cf, sort_keys=True, indent=4)\n\n print(\"Cleaning up...\")\n os.system(f\"rm {zip_path} && rm -r {repo_dir}\")\n\ndef measure_screen_bounds(image):\n alpha = image[:, :, 3]\n alpha = cv2.threshold(alpha, 252, 255, cv2.THRESH_BINARY_INV)[1] # 99% threshold\n # connected component analysis\n n, labels, stats, centroids = cv2.connectedComponentsWithStats(alpha, connectivity=8)\n # compare centroids to image center\n img_center = np.array([alpha.shape[0] // 2, alpha.shape[1] // 2])\n # component which contains image center should be screen\n screen_label = labels[img_center[0], img_center[1]]\n x, y, width, height = stats[screen_label][:4]\n return int(x), int(y), int(width), int(height)\n\ndef filename_valid(filename):\n pattern = \"^Apple iP.*\\.png$\"\n return re.search(pattern, filename) is not None\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"numpy.array"
]
] |
data-weirdo/studio | [
"48852c4f097f773ce3d408b59f79fda2e2d60470"
] | [
"function/python/brightics/function/transform/sql/functions.py"
] | [
"\"\"\"\n Copyright 2019 Samsung SDS\n \n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n \n http://www.apache.org/licenses/LICENSE-2.0\n \n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\n\n# -*- coding: utf-8 -*-\n\nimport dateutil.parser\nimport numpy as np\nfrom .serializer import _serialize\nfrom .serializer import _deserialize\nimport re\n\n\"\"\" \nconstants \n\"\"\"\n\n\ndef e():\n return np.math.e\n\n\ndef pi():\n return np.math.pi\n\n\"\"\"\nlambda functions\n\"\"\"\nlog = lambda _: np.math.log(_) if _ is not None else np.math.nan # ?\nln = lambda _: np.math.log(_)\nlog10 = lambda _: np.math.log10(_)\nlog2 = lambda _: np.math.log2(_)\nexp = lambda _: np.math.exp(_)\nexp2 = lambda _: np.math.pow(2, _)\nsqrt = lambda _: np.math.sqrt(_)\nceil = lambda _: np.math.ceil(_)\nfloor = lambda _: np.math.floor(_)\nsign = lambda _: int(np.sign(_))\n \nfactorial = lambda _: np.math.factorial(_)\n\npow = lambda a, b: np.math.pow(a, b)\n\nljust = lambda item, length, lpad_str: str(item).ljust(length, lpad_str) # ?\nrjust = lambda item, length, rpad_str: str(item).rjust(length, rpad_str) # ?\n\nis_null = lambda _: 1 if _ is None else 0\n\n\"\"\"\nregular expression related functions\n\"\"\"\n\nregexp = lambda exp, str_: False if re.search(exp, str_) is None else True\nregexp_replace = lambda initial_str, pattern, replacement: re.sub(pattern, replacement, initial_str)\n\n\ndef regexp_extract(subject, pattern, *index): # todo index??\n\n def _is_empty(tup):\n return not tup\n \n if _is_empty(index):\n return re.search(pattern, subject).group(1)\n else:\n return re.search(pattern, subject).group(index[0])\n \n\"\"\"\ndatetime related functions\n\"\"\"\n# todo weekofmonth, datediff, timediff\n\n\ndef datediff(end_isotime, start_isotime):\n end_datetime = dateutil.parser.parse(end_isotime)\n start_datetime = dateutil.parser.parse(start_isotime)\n diff_datetime = end_datetime - start_datetime\n return diff_datetime.days\n\n\ndef strftime_a(isotime): # ?\n return dateutil.parser.parse(isotime).strftime('%a')\n\n\ndef strftime_aa(isotime): # ?\n return dateutil.parser.parse(isotime).strftime('%A')\n\n\ndef strftime_aak(isotime): # ?\n w_dict = {'Monday':'월요일',\n 'Tuesday':'화요일',\n 'Wednesday':'수요일',\n 'Thursday':'목요일',\n 'Friday':'금요일',\n 'Saturday':'토요일',\n 'Sunday':'일요일',\n }\n return w_dict[dateutil.parser.parse(isotime).strftime('%A')]\n\n\ndef strftime_ak(isotime): # ?\n w_dict = {'Monday':'월',\n 'Tuesday':'화',\n 'Wednesday':'수',\n 'Thursday':'목',\n 'Friday':'금',\n 'Saturday':'토',\n 'Sunday':'일',\n }\n return w_dict[dateutil.parser.parse(isotime).strftime('%A')]\n\n\"\"\" \narray related functions \n\"\"\"\n\n\ndef array(*args):\n return _serialize(np.array(list(args)))\n\n\ndef get_array_element(serialized_list, index):\n return _deserialize(serialized_list)[index]\n\n\ndef concat_ws(sep, serialized_list):\n arr = _deserialize(serialized_list)\n return sep.join([str(item) for item in arr])\n\n\ndef split(str_, *sep):\n nargs = len(sep)\n if nargs == 0:\n return _serialize(str_.split())\n else: # todo elif nargs == 1:\n return _serialize(str_.split(sep[0]))\n\n \ndef size(serialized_list):\n arr = _deserialize(serialized_list)\n return len(arr)\n"
] | [
[
"numpy.sign",
"numpy.math.log2",
"numpy.math.log10",
"numpy.math.sqrt",
"numpy.math.log",
"numpy.math.ceil",
"numpy.math.pow",
"numpy.math.exp",
"numpy.math.factorial",
"numpy.math.floor"
]
] |
sander102907/autoencoder_program_synthesis | [
"752954f9ef268908553189a1c3323bad15b39f04"
] | [
"autoencoder_program_synthesis/model_utils/modules.py"
] | [
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass AddGate(nn.Module):\n \"\"\"\n Add gate similar to LSTM add gate: :math: `y = σ(W_mul * inp + b_mul) * tanh(W_add * inp + b_add)`\n\n Outputs information that can be added to some state\n where the network learns: if and how much of the input should be added\n \"\"\"\n\n def __init__(self, dim):\n super().__init__()\n\n self.W_mul = nn.Linear(dim, dim, bias=True)\n self.W_add = nn.Linear(dim, dim, bias=True)\n\n self.sigmoid = nn.Sigmoid()\n\n\n def forward(self, inp):\n out_mul = self.sigmoid(self.W_mul(inp))\n out_add = torch.tanh(self.W_add(inp))\n\n return out_mul * out_add\n\n\nclass PredictiveHidden(nn.Module):\n \"\"\"\n Computes a combined predictive hidden state from two hidden states: :math:`y = tanh(W1 * x1 + W2 * x2)`\n \"\"\"\n\n def __init__(self, dim):\n super().__init__()\n\n # Learnable parameter weights1 -> for calculating: W1 * inp1\n self.W1 = nn.Linear(dim, dim, bias=True)\n\n # Learnable parameter weights2 -> for calculating: W2 * inp2\n self.W2 = nn.Linear(dim, dim, bias=True)\n\n\n def forward(self, inp1, inp2):\n # predictive hidden state: tanh(W1 * inp1 + W2 * inp2)\n h_pred = torch.tanh(self.W1(inp1) + self.W2(inp2))\n\n return h_pred\n\n\nclass TreeTopologyPred(nn.Module):\n \"\"\"\n Computes logits for depth, width and res predictions with linear transformations: dim -> 1\n \"\"\"\n\n def __init__(self, dim):\n super().__init__()\n\n # For topology prediction, we predict whether there are children\n self.depth_pred = nn.Linear(dim, 1)\n\n # For topology prediction, we predict whether there are successor siblings\n self.width_pred = nn.Linear(dim, 1)\n\n # For predicting whether a token is a reserved keyword of c++ or not\n self.res_pred = nn.Linear(dim, 1)\n\n def forward(self, inp):\n depth_pred = self.depth_pred(inp)\n width_pred = self.width_pred(inp)\n res_pred = self.res_pred(inp)\n\n return depth_pred, width_pred, res_pred\n\n\nclass LstmAttention(nn.Module):\n \"\"\"\n ATTENTION-BASED LSTM FOR PSYCHOLOGICAL STRESS DETECTION FROM SPOKEN\n LANGUAGE USING DISTANT SUPERVISION\n\n https://arxiv.org/abs/1805.12307\n \"\"\"\n\n def __init__(self, dim):\n super().__init__()\n\n self.attention_weights = nn.Linear(dim, dim)\n self.softmax = nn.Softmax(dim=-1)\n\n def forward(self, inp):\n u = torch.tanh(self.attention_weights(inp))\n\n a = self.softmax(u)\n\n v = torch.sum(a * inp, dim=-1)\n\n return u * inp\n\n\nclass MultiLayerLSTMCell(nn.Module):\n \"\"\"\n A long short-term memory (LSTM) cell with support for multiple layers.\n\n input_size: The number of expected features in the input\n hidden_size: The number of features in the hidden state\n num_layers: Number of recurrent layers.\n E.g., setting num_layers=2 would mean stacking two LSTM cells together\n to form a stacked LSTM cell, with the second LSTM cell taking in outputs of\n the first LSTM cell and computing the final results. Default: 1\n \"\"\"\n\n def __init__(self, input_size, hidden_size, num_layers = 1, recurrent_dropout=0):\n super().__init__()\n\n self.num_layers = num_layers\n self.rnns = nn.ModuleList([])\n self.dropout = nn.Dropout(recurrent_dropout)\n\n # Initialize RNNs with num layers\n for i in range(num_layers):\n if i == 0:\n self.rnns.append(nn.LSTMCell(input_size, hidden_size))\n else:\n self.rnns.append(nn.LSTMCell(hidden_size, hidden_size))\n\n\n def forward(self, input, hidden_states):\n new_hidden_states = []\n\n for i in range(self.num_layers):\n if i == 0:\n h, c = self.rnns[i](input, hidden_states[i])\n else:\n h, c = self.rnns[i](h, hidden_states[i])\n\n # apply recurrent dropout on the outputs of each LSTM cell hidden except the last layer\n if i < self.num_layers - 1:\n h = self.dropout(h)\n\n\n new_hidden_states.append((h, c))\n\n return new_hidden_states\n\n\n\nclass Highway(nn.Module):\n \"\"\"\n Code from:\n https://github.com/kefirski/pytorch_RVAE/blob/19103d1298d7d77423c6e7d76dcc190400d7256e/selfModules/highway.py#L5\n\n Highway networks use learned gating mechanisms to regulate information flow, inspired by Long Short-Term Memory (LSTM) recurrent neural networks.\n The gating mechanisms allow neural networks to have paths for information to follow across different layers (\"information highways\")\n\n http://papers.nips.cc/paper/5850-training-very-deep-networks \n \"\"\"\n \n def __init__(self, size, num_layers, f):\n\n super(Highway, self).__init__()\n\n self.num_layers = num_layers\n\n self.nonlinear = [nn.Linear(size, size) for _ in range(num_layers)]\n for i, module in enumerate(self.nonlinear):\n self._add_to_parameters(module.parameters(), 'nonlinear_module_{}'.format(i))\n\n self.linear = [nn.Linear(size, size) for _ in range(num_layers)]\n for i, module in enumerate(self.linear):\n self._add_to_parameters(module.parameters(), 'linear_module_{}'.format(i))\n\n self.gate = [nn.Linear(size, size) for _ in range(num_layers)]\n for i, module in enumerate(self.gate):\n self._add_to_parameters(module.parameters(), 'gate_module_{}'.format(i))\n\n self.f = f\n\n def forward(self, x):\n \"\"\"\n :param x: tensor with shape of [batch_size, size]\n :return: tensor with shape of [batch_size, size]\n applies σ(x) ⨀ (f(G(x))) + (1 - σ(x)) ⨀ (Q(x)) transformation | G and Q is affine transformation,\n f is non-linear transformation, σ(x) is affine transformation with sigmoid non-linearition\n and ⨀ is element-wise multiplication\n \"\"\"\n\n for layer in range(self.num_layers):\n gate = F.sigmoid(self.gate[layer](x))\n\n nonlinear = self.f(self.nonlinear[layer](x))\n linear = self.linear[layer](x)\n\n x = gate * nonlinear + (1 - gate) * linear\n\n return x\n\n def _add_to_parameters(self, parameters, name):\n for i, parameter in enumerate(parameters):\n self.register_parameter(name='{}-{}'.format(name, i), param=parameter)\n\n \n\n \n"
] | [
[
"torch.sum",
"torch.nn.Linear",
"torch.nn.Softmax",
"torch.nn.LSTMCell",
"torch.nn.ModuleList",
"torch.nn.Sigmoid",
"torch.nn.Dropout"
]
] |
yyHaker/EKMRC-is-your-need | [
"483e2d9d822907ef36a39333933fd939dac1cea0"
] | [
"EKMRC/src/test_gnn.py"
] | [
"#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\n'''\n@File : test_gnn.py\n@Author : yyhaker \n@Contact : [email protected]\n@Time : 2020/04/22 15:19:24\n'''\n\n# here put the import lib\nimport torch\nfrom torch_geometric.data import Data\nimport torch.nn.functional as F\nfrom torch_geometric.nn import GCNConv\n\nedge_index = torch.tensor([[0, 2],\n [2, 0],\n [3, 2],\n [2, 3]], dtype=torch.long)\nx = torch.tensor([[-1], [0], [1]], dtype=torch.float)\n\n\ndata = Data(x=x, edge_index=edge_index.t().contiguous())\ndevice = torch.device('cuda')\ndata = data.to(device)\n\n\nclass Net(torch.nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n self.conv1 = GCNConv(1, 16)\n self.conv2 = GCNConv(16, 2)\n\n def forward(self, data):\n x, edge_index = data.x, data.edge_index\n\n x = self.conv1(x, edge_index)\n x = F.relu(x)\n x = F.dropout(x, training=self.training)\n x = self.conv2(x, edge_index)\n\n return F.log_softmax(x, dim=1)\n\n\nmodel = Net().to(device)\noptimizer = torch.optim.Adam(model.parameters(), lr=0.01, weight_decay=5e-4)\n\nmodel.train()\nfor epoch in range(200):\n # optimizer.zero_grad()\n out = model(data)"
] | [
[
"torch.nn.functional.log_softmax",
"torch.nn.functional.dropout",
"torch.tensor",
"torch.nn.functional.relu",
"torch.device"
]
] |
toptaldev92/tensorflow | [
"1fd1f65d1b0896149e44a1f105267c27994010d9"
] | [
"tensorflow/examples/learn/text_classification_cnn.py"
] | [
"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Example of Estimator for CNN-based text classification with DBpedia data.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport argparse\nimport sys\n\nimport numpy as np\nimport pandas\nfrom sklearn import metrics\nimport tensorflow as tf\n\nfrom tensorflow.contrib import learn\n\nFLAGS = None\n\nMAX_DOCUMENT_LENGTH = 100\nEMBEDDING_SIZE = 20\nN_FILTERS = 10\nWINDOW_SIZE = 20\nFILTER_SHAPE1 = [WINDOW_SIZE, EMBEDDING_SIZE]\nFILTER_SHAPE2 = [WINDOW_SIZE, N_FILTERS]\nPOOLING_WINDOW = 4\nPOOLING_STRIDE = 2\nn_words = 0\n\n\ndef cnn_model(x, y):\n \"\"\"2 layer Convolutional network to predict from sequence of words\n to a class.\"\"\"\n # Convert indexes of words into embeddings.\n # This creates embeddings matrix of [n_words, EMBEDDING_SIZE] and then\n # maps word indexes of the sequence into [batch_size, sequence_length,\n # EMBEDDING_SIZE].\n y = tf.one_hot(y, 15, 1, 0)\n word_vectors = learn.ops.categorical_variable(x, n_classes=n_words,\n embedding_size=EMBEDDING_SIZE, name='words')\n word_vectors = tf.expand_dims(word_vectors, 3)\n with tf.variable_scope('CNN_Layer1'):\n # Apply Convolution filtering on input sequence.\n conv1 = tf.contrib.layers.convolution2d(word_vectors, N_FILTERS,\n FILTER_SHAPE1, padding='VALID')\n # Add a RELU for non linearity.\n conv1 = tf.nn.relu(conv1)\n # Max pooling across output of Convolution+Relu.\n pool1 = tf.nn.max_pool(conv1, ksize=[1, POOLING_WINDOW, 1, 1],\n strides=[1, POOLING_STRIDE, 1, 1], padding='SAME')\n # Transpose matrix so that n_filters from convolution becomes width.\n pool1 = tf.transpose(pool1, [0, 1, 3, 2])\n with tf.variable_scope('CNN_Layer2'):\n # Second level of convolution filtering.\n conv2 = tf.contrib.layers.convolution2d(pool1, N_FILTERS,\n FILTER_SHAPE2, padding='VALID')\n # Max across each filter to get useful features for classification.\n pool2 = tf.squeeze(tf.reduce_max(conv2, 1), squeeze_dims=[1])\n\n # Apply regular WX + B and classification.\n prediction, loss = learn.models.logistic_regression(pool2, y)\n\n train_op = tf.contrib.layers.optimize_loss(\n loss, tf.contrib.framework.get_global_step(),\n optimizer='Adam', learning_rate=0.01)\n\n return {'class': tf.argmax(prediction, 1), 'prob': prediction}, loss, train_op\n\n\ndef main(unused_argv):\n global n_words\n # Prepare training and testing data\n dbpedia = learn.datasets.load_dataset(\n 'dbpedia', test_with_fake_data=FLAGS.test_with_fake_data)\n x_train = pandas.DataFrame(dbpedia.train.data)[1]\n y_train = pandas.Series(dbpedia.train.target)\n x_test = pandas.DataFrame(dbpedia.test.data)[1]\n y_test = pandas.Series(dbpedia.test.target)\n\n # Process vocabulary\n vocab_processor = learn.preprocessing.VocabularyProcessor(MAX_DOCUMENT_LENGTH)\n x_train = np.array(list(vocab_processor.fit_transform(x_train)))\n x_test = np.array(list(vocab_processor.transform(x_test)))\n n_words = len(vocab_processor.vocabulary_)\n print('Total words: %d' % n_words)\n\n # Build model\n classifier = learn.Estimator(model_fn=cnn_model)\n\n # Train and predict\n classifier.fit(x_train, y_train, steps=100)\n y_predicted = [\n p['class'] for p in classifier.predict(x_test, as_iterable=True)]\n score = metrics.accuracy_score(y_test, y_predicted)\n print('Accuracy: {0:f}'.format(score))\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '--test_with_fake_data',\n default=False,\n help='Test the example code with fake data.',\n action='store_true'\n )\n FLAGS, unparsed = parser.parse_known_args()\n tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)\n"
] | [
[
"pandas.Series",
"tensorflow.reduce_max",
"tensorflow.variable_scope",
"tensorflow.one_hot",
"tensorflow.contrib.learn.preprocessing.VocabularyProcessor",
"tensorflow.contrib.learn.models.logistic_regression",
"tensorflow.contrib.learn.datasets.load_dataset",
"tensorflow.nn.max_pool",
"tensorflow.contrib.learn.ops.categorical_variable",
"tensorflow.transpose",
"tensorflow.app.run",
"tensorflow.expand_dims",
"sklearn.metrics.accuracy_score",
"tensorflow.contrib.layers.convolution2d",
"pandas.DataFrame",
"tensorflow.contrib.framework.get_global_step",
"tensorflow.contrib.learn.Estimator",
"tensorflow.argmax",
"tensorflow.nn.relu"
]
] |
filipesouzacit/RL-with-MCTS | [
"cca1a8a79e5973a30b423c45a090e2473975c189"
] | [
"trainer.py"
] | [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thus Jan 07 15:54:13 2021\n@author: Filipe Souza\n\nBased on Josh Varty (https://github.com/JoshVarty/AlphaZeroSimple)\n\"\"\"\nimport numpy as np\nfrom random import shuffle\nimport keras\n\nfrom gym_go import gogame\nfrom monte_carlo_tree_search import MCTS\n\nclass Trainer:\n\n def __init__(self, game, model, args):\n self.game = game\n self.model = model\n self.args = args\n self.mcts = MCTS(self.game, self.model, self.args)\n\n def exceute_episode(self):\n\n train_examples = []\n current_player = 1\n state = gogame.init_state(self.args['boardSize'])\n\n while True:\n #print(\"while True\")\n canonical_board = gogame.canonical_form(state)\n\n self.mcts = MCTS(self.game, self.model, self.args)\n root = self.mcts.run(self.model, canonical_board, to_play=1)\n\n action_probs = [0 for _ in range((self.args['boardSize']* self.args['boardSize'])+1)]\n for k, v in root.children.items():\n action_probs[k] = v.visit_count\n\n action_probs = action_probs / np.sum(action_probs)\n train_examples.append((canonical_board, current_player, action_probs))\n\n action = root.select_action(temperature=1)\n state = gogame.next_state(state, action, canonical=False)\n current_player = - current_player\n reward = gogame.winning(state)*current_player if gogame.game_ended(state) else None \n\n if reward is not None:\n ret = []\n for hist_state, hist_current_player, hist_action_probs in train_examples:\n # [Board, currentPlayer, actionProbabilities, Reward]\n tfBoard = np.array([hist_state[0],hist_state[1],hist_state[3]]).transpose().tolist()\n #ret.append(np.array([tfBoard,tfBoard, hist_action_probs, reward * ((-1) ** (hist_current_player != current_player))]))\n ret.append((tfBoard,hist_action_probs, reward * ((-1) ** (hist_current_player != current_player))))\n return ret\n\n def learn(self):\n for i in range(1, self.args['numIters'] + 1):\n\n print(\"numIters: {}/{}\".format(i, self.args['numIters']))\n\n train_examples = []\n\n for eps in range(self.args['numEps']):\n print(\"numEps: {}/{}\".format(eps, self.args['numEps']))\n iteration_train_examples = self.exceute_episode()\n train_examples.extend(iteration_train_examples)\n\n shuffle(train_examples)\n self.train(train_examples)\n\n def train(self, trainD):\n \n # Define the checkpoint\n checkpoint = keras.callbacks.ModelCheckpoint(self.args['checkpointPath'], monitor=\"val_loss\",\n mode=\"min\", save_best_only=True, verbose=0)\n\n # train the network\n print(\"Training network...\")\n \n x = [i[0] for i in trainD]\n x = np.array(x)\n \n y1 = [i[1] for i in trainD]\n y2 = [i[2] for i in trainD]\n y1 = np.array(y1)\n y2 = np.array(y2)\n \n history = self.model.model.fit(x,y={\"action_output\": y1, \"Value_output\": y2}, \n validation_split=0.2,\n batch_size=self.args['batchSize'], epochs=self.args['epochs'], \n verbose=1, callbacks=[checkpoint])\n \n # print accurary of the best epoch\n self.model.model.load_weights(self.args['checkpointPath'])\n \n"
] | [
[
"numpy.array",
"numpy.sum"
]
] |
Lemon-Nation/PyLMDI | [
"54d15ec44b84bd84b960003b1fd6690057240565"
] | [
"Numercial_Examples/Examples_China.py"
] | [
"\n\n\n\n# =============================================================================\n# Step1: Input\n# =============================================================================\nimport numpy as np\nfrom PyLMDI import PyLMDI\n\nif __name__=='__main__':\n \n #--- Step1: Input\n Ct = 794.6119504871361 # Carbon emission from China's commercial buildings in 2018\n C0 = 761.984276581356 # Carbon emission from China's commercial buildings in 2017\n \n Pt = 1395.38 # Population size in 2018\n P0 = 1390.08 # in 2017 \n gt = 64.52073987 \n g0 = 59.04367375\n st = 0.521570193\n s0 = 0.51892765\n it = 0.002743568\n i0 = 0.002876626\n et = 3.053397862\n e0 = 3.004500526\n kt = 2.02\n k0 = 2.07\n \n \n Ct,C0 = [Ct],[C0]\n \n Xt = np.array([Pt,gt,st,it,et,kt]).reshape([-1,1])\n X0 = np.array([P0,g0,s0,i0,e0,k0]).reshape([-1,1])\n \n #--- Step2-4: LMDI decomposition analysis\n \n LMDI = PyLMDI(Ct,C0,Xt,X0)\n ans = LMDI.Add()\n \n \n # --- Step 5: Output\n \n print(\"The change of carbon emission of China's commercial buildings from 2017 to 2018 is: \",ans[0])\n \n print(\"The various driving forces contribute as follows:\")\n \n print(\"P: \",ans[1])\n print(\"g: \",ans[2])\n print(\"s: \",ans[3])\n print(\"i: \",ans[4])\n print(\"e: \",ans[5])\n print(\"K: \",ans[6])"
] | [
[
"numpy.array"
]
] |
HuangHaoyu1997/NRI | [
"e0cd1ef5e168db19cd904eabfd369a65238b5d07"
] | [
"utils.py"
] | [
"import numpy as np\nimport torch\nfrom torch.utils.data.dataset import TensorDataset\nfrom torch.utils.data import DataLoader\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\n\n\ndef my_softmax(input, axis=1):\n trans_input = input.transpose(axis, 0).contiguous()\n soft_max_1d = F.softmax(trans_input)\n return soft_max_1d.transpose(axis, 0)\n\n\ndef binary_concrete(logits, tau=1, hard=False, eps=1e-10):\n y_soft = binary_concrete_sample(logits, tau=tau, eps=eps)\n if hard:\n y_hard = (y_soft > 0.5).float()\n y = Variable(y_hard.data - y_soft.data) + y_soft\n else:\n y = y_soft\n return y\n\n\ndef binary_concrete_sample(logits, tau=1, eps=1e-10):\n logistic_noise = sample_logistic(logits.size(), eps=eps)\n if logits.is_cuda:\n logistic_noise = logistic_noise.cuda()\n y = logits + Variable(logistic_noise)\n return F.sigmoid(y / tau)\n\n\ndef sample_logistic(shape, eps=1e-10):\n uniform = torch.rand(shape).float()\n return torch.log(uniform + eps) - torch.log(1 - uniform + eps)\n\n\ndef sample_gumbel(shape, eps=1e-10):\n \"\"\"\n NOTE: Stolen from https://github.com/pytorch/pytorch/pull/3341/commits/327fcfed4c44c62b208f750058d14d4dc1b9a9d3\n\n Sample from Gumbel(0, 1)\n\n based on\n https://github.com/ericjang/gumbel-softmax/blob/3c8584924603869e90ca74ac20a6a03d99a91ef9/Categorical%20VAE.ipynb ,\n (MIT license)\n \"\"\"\n U = torch.rand(shape).float()\n return - torch.log(eps - torch.log(U + eps))\n\n\ndef gumbel_softmax_sample(logits, tau=1, eps=1e-10):\n \"\"\"\n NOTE: Stolen from https://github.com/pytorch/pytorch/pull/3341/commits/327fcfed4c44c62b208f750058d14d4dc1b9a9d3\n\n Draw a sample from the Gumbel-Softmax distribution\n\n based on\n https://github.com/ericjang/gumbel-softmax/blob/3c8584924603869e90ca74ac20a6a03d99a91ef9/Categorical%20VAE.ipynb\n (MIT license)\n \"\"\"\n gumbel_noise = sample_gumbel(logits.size(), eps=eps)\n if logits.is_cuda:\n gumbel_noise = gumbel_noise.cuda()\n y = logits + Variable(gumbel_noise)\n return my_softmax(y / tau, axis=-1)\n\n\ndef gumbel_softmax(logits, tau=1, hard=False, eps=1e-10):\n \"\"\"\n NOTE: Stolen from https://github.com/pytorch/pytorch/pull/3341/commits/327fcfed4c44c62b208f750058d14d4dc1b9a9d3\n\n Sample from the Gumbel-Softmax distribution and optionally discretize.\n Args:\n logits: [batch_size, n_class] unnormalized log-probs\n tau: non-negative scalar temperature\n hard: if True, take argmax, but differentiate w.r.t. soft sample y\n Returns:\n [batch_size, n_class] sample from the Gumbel-Softmax distribution.\n If hard=True, then the returned sample will be one-hot, otherwise it will\n be a probability distribution that sums to 1 across classes\n\n Constraints:\n - this implementation only works on batch_size x num_features tensor for now\n\n based on\n https://github.com/ericjang/gumbel-softmax/blob/3c8584924603869e90ca74ac20a6a03d99a91ef9/Categorical%20VAE.ipynb ,\n (MIT license)\n \"\"\"\n y_soft = gumbel_softmax_sample(logits, tau=tau, eps=eps)\n if hard:\n shape = logits.size()\n _, k = y_soft.data.max(-1)\n # this bit is based on\n # https://discuss.pytorch.org/t/stop-gradients-for-st-gumbel-softmax/530/5\n y_hard = torch.zeros(*shape)\n if y_soft.is_cuda:\n y_hard = y_hard.cuda()\n y_hard = y_hard.zero_().scatter_(-1, k.view(shape[:-1] + (1,)), 1.0)\n # this cool bit of code achieves two things:\n # - makes the output value exactly one-hot (since we add then\n # subtract y_soft value)\n # - makes the gradient equal to y_soft gradient (since we strip\n # all other gradients)\n y = Variable(y_hard - y_soft.data) + y_soft\n else:\n y = y_soft\n return y\n\n\ndef binary_accuracy(output, labels):\n preds = output > 0.5\n correct = preds.type_as(labels).eq(labels).double()\n correct = correct.sum()\n return correct / len(labels)\n\n\ndef load_data(batch_size=1, suffix=''):\n loc_train = np.load('data/loc_train' + suffix + '.npy')\n vel_train = np.load('data/vel_train' + suffix + '.npy')\n edges_train = np.load('data/edges_train' + suffix + '.npy')\n\n loc_valid = np.load('data/loc_valid' + suffix + '.npy')\n vel_valid = np.load('data/vel_valid' + suffix + '.npy')\n edges_valid = np.load('data/edges_valid' + suffix + '.npy')\n\n loc_test = np.load('data/loc_test' + suffix + '.npy')\n vel_test = np.load('data/vel_test' + suffix + '.npy')\n edges_test = np.load('data/edges_test' + suffix + '.npy')\n\n # [num_samples, num_timesteps, num_dims, num_atoms]\n num_atoms = loc_train.shape[3] # 质点的数量\n\n loc_max = loc_train.max()\n loc_min = loc_train.min()\n vel_max = vel_train.max()\n vel_min = vel_train.min()\n\n # Normalize to [-1, 1]\n loc_train = (loc_train - loc_min) * 2 / (loc_max - loc_min) - 1\n vel_train = (vel_train - vel_min) * 2 / (vel_max - vel_min) - 1\n\n loc_valid = (loc_valid - loc_min) * 2 / (loc_max - loc_min) - 1\n vel_valid = (vel_valid - vel_min) * 2 / (vel_max - vel_min) - 1\n\n loc_test = (loc_test - loc_min) * 2 / (loc_max - loc_min) - 1\n vel_test = (vel_test - vel_min) * 2 / (vel_max - vel_min) - 1\n\n # Reshape to: [num_sims, num_atoms, num_timesteps, num_dims], e.g. [50000, 5, 49, 2]\n loc_train = np.transpose(loc_train, [0, 3, 1, 2])\n vel_train = np.transpose(vel_train, [0, 3, 1, 2])\n feat_train = np.concatenate([loc_train, vel_train], axis=3) # [50000, 5, 49, 4]\n edges_train = np.reshape(edges_train, [-1, num_atoms ** 2]) # [50000, 25]\n edges_train = np.array((edges_train + 1) / 2, dtype=np.int64) # float -> long \n\n loc_valid = np.transpose(loc_valid, [0, 3, 1, 2])\n vel_valid = np.transpose(vel_valid, [0, 3, 1, 2])\n feat_valid = np.concatenate([loc_valid, vel_valid], axis=3)\n edges_valid = np.reshape(edges_valid, [-1, num_atoms ** 2])\n edges_valid = np.array((edges_valid + 1) / 2, dtype=np.int64)\n\n loc_test = np.transpose(loc_test, [0, 3, 1, 2])\n vel_test = np.transpose(vel_test, [0, 3, 1, 2])\n feat_test = np.concatenate([loc_test, vel_test], axis=3)\n edges_test = np.reshape(edges_test, [-1, num_atoms ** 2])\n edges_test = np.array((edges_test + 1) / 2, dtype=np.int64)\n\n feat_train = torch.FloatTensor(feat_train) # feature就是location和velocity向量concat\n edges_train = torch.LongTensor(edges_train)\n feat_valid = torch.FloatTensor(feat_valid)\n edges_valid = torch.LongTensor(edges_valid)\n feat_test = torch.FloatTensor(feat_test)\n edges_test = torch.LongTensor(edges_test)\n\n # Exclude self edges\n off_diag_idx = np.ravel_multi_index(\n np.where(np.ones((num_atoms, num_atoms)) - np.eye(num_atoms)), # 对角线0元素,其余为1,np.where输出非零元素坐标\n [num_atoms, num_atoms]) # 把对角线元素的index去掉,返回剩下的index\n edges_train = edges_train[:, off_diag_idx] # 将edge邻接矩阵中所有的对角线元素都去掉\n edges_valid = edges_valid[:, off_diag_idx]\n edges_test = edges_test[:, off_diag_idx]\n\n train_data = TensorDataset(feat_train, edges_train)\n valid_data = TensorDataset(feat_valid, edges_valid)\n test_data = TensorDataset(feat_test, edges_test)\n\n train_data_loader = DataLoader(train_data, batch_size=batch_size)\n valid_data_loader = DataLoader(valid_data, batch_size=batch_size)\n test_data_loader = DataLoader(test_data, batch_size=batch_size)\n\n return train_data_loader, valid_data_loader, test_data_loader, loc_max, loc_min, vel_max, vel_min\n\n\ndef load_kuramoto_data(batch_size=1, suffix=''):\n feat_train = np.load('data/feat_train' + suffix + '.npy')\n edges_train = np.load('data/edges_train' + suffix + '.npy')\n feat_valid = np.load('data/feat_valid' + suffix + '.npy')\n edges_valid = np.load('data/edges_valid' + suffix + '.npy')\n feat_test = np.load('data/feat_test' + suffix + '.npy')\n edges_test = np.load('data/edges_test' + suffix + '.npy')\n\n # [num_sims, num_atoms, num_timesteps, num_dims]\n num_atoms = feat_train.shape[1]\n\n # Normalize each feature dim. individually\n feat_max = feat_train.max(0).max(0).max(0)\n feat_min = feat_train.min(0).min(0).min(0)\n\n feat_max = np.expand_dims(np.expand_dims(np.expand_dims(feat_max, 0), 0), 0)\n feat_min = np.expand_dims(np.expand_dims(np.expand_dims(feat_min, 0), 0), 0)\n\n # Normalize to [-1, 1]\n feat_train = (feat_train - feat_min) * 2 / (feat_max - feat_min) - 1\n feat_valid = (feat_valid - feat_min) * 2 / (feat_max - feat_min) - 1\n feat_test = (feat_test - feat_min) * 2 / (feat_max - feat_min) - 1\n\n # Reshape to: [num_sims, num_atoms, num_timesteps, num_dims]\n edges_train = np.reshape(edges_train, [-1, num_atoms ** 2])\n edges_valid = np.reshape(edges_valid, [-1, num_atoms ** 2])\n edges_test = np.reshape(edges_test, [-1, num_atoms ** 2])\n\n feat_train = torch.FloatTensor(feat_train)\n edges_train = torch.LongTensor(edges_train)\n feat_valid = torch.FloatTensor(feat_valid)\n edges_valid = torch.LongTensor(edges_valid)\n feat_test = torch.FloatTensor(feat_test)\n edges_test = torch.LongTensor(edges_test)\n\n # Exclude self edges\n off_diag_idx = np.ravel_multi_index(\n np.where(np.ones((num_atoms, num_atoms)) - np.eye(num_atoms)),\n [num_atoms, num_atoms])\n edges_train = edges_train[:, off_diag_idx]\n edges_valid = edges_valid[:, off_diag_idx]\n edges_test = edges_test[:, off_diag_idx]\n\n train_data = TensorDataset(feat_train, edges_train)\n valid_data = TensorDataset(feat_valid, edges_valid)\n test_data = TensorDataset(feat_test, edges_test)\n\n train_data_loader = DataLoader(train_data, batch_size=batch_size)\n valid_data_loader = DataLoader(valid_data, batch_size=batch_size)\n test_data_loader = DataLoader(test_data, batch_size=batch_size)\n\n return train_data_loader, valid_data_loader, test_data_loader\n\n\ndef load_kuramoto_data_old(batch_size=1, suffix=''):\n feat_train = np.load('data/old_kuramoto/feat_train' + suffix + '.npy')\n edges_train = np.load('data/old_kuramoto/edges_train' + suffix + '.npy')\n feat_valid = np.load('data/old_kuramoto/feat_valid' + suffix + '.npy')\n edges_valid = np.load('data/old_kuramoto/edges_valid' + suffix + '.npy')\n feat_test = np.load('data/old_kuramoto/feat_test' + suffix + '.npy')\n edges_test = np.load('data/old_kuramoto/edges_test' + suffix + '.npy')\n\n # [num_sims, num_atoms, num_timesteps, num_dims]\n num_atoms = feat_train.shape[1]\n\n # Reshape to: [num_sims, num_atoms, num_timesteps, num_dims]\n edges_train = np.reshape(edges_train, [-1, num_atoms ** 2])\n edges_valid = np.reshape(edges_valid, [-1, num_atoms ** 2])\n edges_test = np.reshape(edges_test, [-1, num_atoms ** 2])\n\n feat_train = torch.FloatTensor(feat_train)\n edges_train = torch.LongTensor(edges_train)\n feat_valid = torch.FloatTensor(feat_valid)\n edges_valid = torch.LongTensor(edges_valid)\n feat_test = torch.FloatTensor(feat_test)\n edges_test = torch.LongTensor(edges_test)\n\n # Exclude self edges\n off_diag_idx = np.ravel_multi_index(\n np.where(np.ones((num_atoms, num_atoms)) - np.eye(num_atoms)),\n [num_atoms, num_atoms])\n edges_train = edges_train[:, off_diag_idx]\n edges_valid = edges_valid[:, off_diag_idx]\n edges_test = edges_test[:, off_diag_idx]\n\n train_data = TensorDataset(feat_train, edges_train)\n valid_data = TensorDataset(feat_valid, edges_valid)\n test_data = TensorDataset(feat_test, edges_test)\n\n train_data_loader = DataLoader(train_data, batch_size=batch_size)\n valid_data_loader = DataLoader(valid_data, batch_size=batch_size)\n test_data_loader = DataLoader(test_data, batch_size=batch_size)\n\n return train_data_loader, valid_data_loader, test_data_loader\n\n\ndef load_motion_data(batch_size=1, suffix=''):\n feat_train = np.load('data/motion_train' + suffix + '.npy')\n feat_valid = np.load('data/motion_valid' + suffix + '.npy')\n feat_test = np.load('data/motion_test' + suffix + '.npy')\n adj = np.load('data/motion_adj' + suffix + '.npy')\n\n # NOTE: Already normalized\n\n # [num_samples, num_nodes, num_timesteps, num_dims]\n num_nodes = feat_train.shape[1]\n\n edges_train = np.repeat(np.expand_dims(adj.flatten(), 0),\n feat_train.shape[0], axis=0)\n edges_valid = np.repeat(np.expand_dims(adj.flatten(), 0),\n feat_valid.shape[0], axis=0)\n edges_test = np.repeat(np.expand_dims(adj.flatten(), 0),\n feat_test.shape[0], axis=0)\n\n feat_train = torch.FloatTensor(feat_train)\n edges_train = torch.LongTensor(np.array(edges_train, dtype=np.int64))\n feat_valid = torch.FloatTensor(feat_valid)\n edges_valid = torch.LongTensor(np.array(edges_valid, dtype=np.int64))\n feat_test = torch.FloatTensor(feat_test)\n edges_test = torch.LongTensor(np.array(edges_test, dtype=np.int64))\n\n # Exclude self edges\n off_diag_idx = np.ravel_multi_index(\n np.where(np.ones((num_nodes, num_nodes)) - np.eye(num_nodes)),\n [num_nodes, num_nodes])\n edges_train = edges_train[:, off_diag_idx]\n edges_valid = edges_valid[:, off_diag_idx]\n edges_test = edges_test[:, off_diag_idx]\n\n train_data = TensorDataset(feat_train, edges_train)\n valid_data = TensorDataset(feat_valid, edges_valid)\n test_data = TensorDataset(feat_test, edges_test)\n\n train_data_loader = DataLoader(train_data, batch_size=batch_size)\n valid_data_loader = DataLoader(valid_data, batch_size=batch_size)\n test_data_loader = DataLoader(test_data, batch_size=batch_size)\n\n return train_data_loader, valid_data_loader, test_data_loader\n\n\ndef to_2d_idx(idx, num_cols):\n idx = np.array(idx, dtype=np.int64)\n y_idx = np.array(np.floor(idx / float(num_cols)), dtype=np.int64)\n x_idx = idx % num_cols\n return x_idx, y_idx\n\n\ndef encode_onehot(labels):\n classes = set(labels) # {0, 1, 2, 3, 4}\n classes_dict = {c: np.identity(len(classes))[i, :] for i, c in enumerate(classes)} \n # {0: array([1., 0., 0., 0., 0.]), 1: array([0., 1., 0., 0., 0.]), 2: array([0., 0., 1., 0., 0.]), 3: array([0., 0., 0., 1., 0.]), 4: array([0., 0., 0., 0., 1.])}\n # print('class:',classes_dict)\n labels_onehot = np.array(list(map(classes_dict.get, labels)), dtype=np.int32)\n return labels_onehot\n\n\ndef get_triu_indices(num_nodes):\n \"\"\"Linear triu (upper triangular) indices.\"\"\"\n ones = torch.ones(num_nodes, num_nodes)\n eye = torch.eye(num_nodes, num_nodes)\n triu_indices = (ones.triu() - eye).nonzero().t()\n triu_indices = triu_indices[0] * num_nodes + triu_indices[1]\n return triu_indices\n\n\ndef get_tril_indices(num_nodes):\n \"\"\"Linear tril (lower triangular) indices.\"\"\"\n ones = torch.ones(num_nodes, num_nodes)\n eye = torch.eye(num_nodes, num_nodes)\n tril_indices = (ones.tril() - eye).nonzero().t()\n tril_indices = tril_indices[0] * num_nodes + tril_indices[1]\n return tril_indices\n\n\ndef get_offdiag_indices(num_nodes):\n \"\"\"Linear off-diagonal indices.\"\"\"\n ones = torch.ones(num_nodes, num_nodes)\n eye = torch.eye(num_nodes, num_nodes)\n offdiag_indices = (ones - eye).nonzero().t()\n offdiag_indices = offdiag_indices[0] * num_nodes + offdiag_indices[1]\n return offdiag_indices\n\n\ndef get_triu_offdiag_indices(num_nodes):\n \"\"\"Linear triu (upper) indices w.r.t. vector of off-diagonal elements.\"\"\"\n triu_idx = torch.zeros(num_nodes * num_nodes)\n triu_idx[get_triu_indices(num_nodes)] = 1.\n triu_idx = triu_idx[get_offdiag_indices(num_nodes)]\n return triu_idx.nonzero()\n\n\ndef get_tril_offdiag_indices(num_nodes):\n \"\"\"Linear tril (lower) indices w.r.t. vector of off-diagonal elements.\"\"\"\n tril_idx = torch.zeros(num_nodes * num_nodes)\n tril_idx[get_tril_indices(num_nodes)] = 1.\n tril_idx = tril_idx[get_offdiag_indices(num_nodes)]\n return tril_idx.nonzero()\n\n\ndef get_minimum_distance(data):\n data = data[:, :, :, :2].transpose(1, 2)\n data_norm = (data ** 2).sum(-1, keepdim=True)\n dist = data_norm + \\\n data_norm.transpose(2, 3) - \\\n 2 * torch.matmul(data, data.transpose(2, 3))\n min_dist, _ = dist.min(1)\n return min_dist.view(min_dist.size(0), -1)\n\n\ndef get_buckets(dist, num_buckets):\n dist = dist.cpu().data.numpy()\n\n min_dist = np.min(dist)\n max_dist = np.max(dist)\n bucket_size = (max_dist - min_dist) / num_buckets\n thresholds = bucket_size * np.arange(num_buckets)\n\n bucket_idx = []\n for i in range(num_buckets):\n if i < num_buckets - 1:\n idx = np.where(np.all(np.vstack((dist > thresholds[i],\n dist <= thresholds[i + 1])), 0))[0]\n else:\n idx = np.where(dist > thresholds[i])[0]\n bucket_idx.append(idx)\n\n return bucket_idx, thresholds\n\n\ndef get_correct_per_bucket(bucket_idx, pred, target):\n pred = pred.cpu().numpy()[:, 0]\n target = target.cpu().data.numpy()\n\n correct_per_bucket = []\n for i in range(len(bucket_idx)):\n preds_bucket = pred[bucket_idx[i]]\n target_bucket = target[bucket_idx[i]]\n correct_bucket = np.sum(preds_bucket == target_bucket)\n correct_per_bucket.append(correct_bucket)\n\n return correct_per_bucket\n\n\ndef get_correct_per_bucket_(bucket_idx, pred, target):\n pred = pred.cpu().numpy()\n target = target.cpu().data.numpy()\n\n correct_per_bucket = []\n for i in range(len(bucket_idx)):\n preds_bucket = pred[bucket_idx[i]]\n target_bucket = target[bucket_idx[i]]\n correct_bucket = np.sum(preds_bucket == target_bucket)\n correct_per_bucket.append(correct_bucket)\n\n return correct_per_bucket\n\n\ndef kl_categorical(preds, log_prior, num_atoms, eps=1e-16):\n kl_div = preds * (torch.log(preds + eps) - log_prior)\n return kl_div.sum() / (num_atoms * preds.size(0))\n\n\ndef kl_categorical_uniform(preds, num_atoms, num_edge_types, add_const=False,\n eps=1e-16):\n kl_div = preds * torch.log(preds + eps)\n if add_const:\n const = np.log(num_edge_types)\n kl_div += const\n return kl_div.sum() / (num_atoms * preds.size(0))\n\n\ndef nll_gaussian(preds, target, variance, add_const=False):\n neg_log_p = ((preds - target) ** 2 / (2 * variance))\n if add_const:\n const = 0.5 * np.log(2 * np.pi * variance)\n neg_log_p += const\n return neg_log_p.sum() / (target.size(0) * target.size(1))\n\n\ndef edge_accuracy(preds, target):\n _, preds = preds.max(-1)\n correct = preds.float().data.eq(\n target.float().data.view_as(preds)).cpu().sum()\n return np.float(correct) / (target.size(0) * target.size(1))\n\nif __name__==\"__main__\":\n triu_indices = get_triu_offdiag_indices(5)\n print(triu_indices)\n"
] | [
[
"torch.utils.data.DataLoader",
"numpy.sum",
"numpy.ones",
"torch.nn.functional.softmax",
"torch.rand",
"torch.utils.data.dataset.TensorDataset",
"numpy.float",
"torch.log",
"numpy.log",
"torch.eye",
"numpy.vstack",
"torch.nn.functional.sigmoid",
"numpy.transpose",
"numpy.reshape",
"torch.autograd.Variable",
"numpy.expand_dims",
"numpy.where",
"numpy.load",
"torch.ones",
"numpy.eye",
"numpy.arange",
"numpy.max",
"numpy.min",
"numpy.array",
"torch.FloatTensor",
"torch.zeros",
"numpy.concatenate",
"torch.LongTensor"
]
] |
craymichael/tensorflow | [
"b5de565c9c57fa7ca02d42bcfe6f470ecf117ba5"
] | [
"tensorflow/python/compat/compat.py"
] | [
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Utilities for API compatibility between TensorFlow release versions.\n\nSee [Version\nCompatibility](https://tensorflow.org/guide/version_compat#backward_forward)\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport datetime\nimport os\n\nfrom tensorflow.python.platform import tf_logging as logging\nfrom tensorflow.python.util import tf_contextlib\nfrom tensorflow.python.util.tf_export import tf_export\n\n\n# This value changes every day with an automatic CL. It can be modified in code\n# via `forward_compatibility_horizon()` or with the environment variable\n# TF_FORWARD_COMPATIBILITY_DELTA_DAYS, which is added to the compatibility date.\n_FORWARD_COMPATIBILITY_HORIZON = datetime.date(2021, 6, 17)\n_FORWARD_COMPATIBILITY_DELTA_DAYS_VAR_NAME = \"TF_FORWARD_COMPATIBILITY_DELTA_DAYS\"\n_FORWARD_COMPATIBILITY_DATE_NUMBER = None\n\n\ndef _date_to_date_number(year, month, day):\n return (year << 9) | (month << 5) | day\n\n\ndef _update_forward_compatibility_date_number(date_to_override=None):\n \"\"\"Update the base date to compare in forward_compatible function.\"\"\"\n\n global _FORWARD_COMPATIBILITY_DATE_NUMBER\n\n if date_to_override:\n date = date_to_override\n else:\n date = _FORWARD_COMPATIBILITY_HORIZON\n delta_days = os.getenv(_FORWARD_COMPATIBILITY_DELTA_DAYS_VAR_NAME)\n if delta_days:\n date += datetime.timedelta(days=int(delta_days))\n\n if date < _FORWARD_COMPATIBILITY_HORIZON:\n logging.warning(\"Trying to set the forward compatibility date to the past\"\n \" date %s. This will be ignored by TensorFlow.\" % (date))\n return\n _FORWARD_COMPATIBILITY_DATE_NUMBER = _date_to_date_number(\n date.year, date.month, date.day)\n\n\n_update_forward_compatibility_date_number()\n\n\n@tf_export(\"compat.forward_compatible\")\ndef forward_compatible(year, month, day):\n \"\"\"Return true if the forward compatibility window has expired.\n\n See [Version\n compatibility](https://tensorflow.org/guide/version_compat#backward_forward).\n\n Forward-compatibility refers to scenarios where the producer of a TensorFlow\n model (a GraphDef or SavedModel) is compiled against a version of the\n TensorFlow library newer than what the consumer was compiled against. The\n \"producer\" is typically a Python program that constructs and trains a model\n while the \"consumer\" is typically another program that loads and serves the\n model.\n\n TensorFlow has been supporting a 3 week forward-compatibility window for\n programs compiled from source at HEAD.\n\n For example, consider the case where a new operation `MyNewAwesomeAdd` is\n created with the intent of replacing the implementation of an existing Python\n wrapper - `tf.add`. The Python wrapper implementation should change from\n something like:\n\n ```python\n def add(inputs, name=None):\n return gen_math_ops.add(inputs, name)\n ```\n\n to:\n\n ```python\n from tensorflow.python.compat import compat\n\n def add(inputs, name=None):\n if compat.forward_compatible(year, month, day):\n # Can use the awesome new implementation.\n return gen_math_ops.my_new_awesome_add(inputs, name)\n # To maintain forward compatibility, use the old implementation.\n return gen_math_ops.add(inputs, name)\n ```\n\n Where `year`, `month`, and `day` specify the date beyond which binaries\n that consume a model are expected to have been updated to include the\n new operations. This date is typically at least 3 weeks beyond the date\n the code that adds the new operation is committed.\n\n Args:\n year: A year (e.g., 2018). Must be an `int`.\n month: A month (1 <= month <= 12) in year. Must be an `int`.\n day: A day (1 <= day <= 31, or 30, or 29, or 28) in month. Must be an\n `int`.\n\n Returns:\n True if the caller can expect that serialized TensorFlow graphs produced\n can be consumed by programs that are compiled with the TensorFlow library\n source code after (year, month, day).\n \"\"\"\n return _FORWARD_COMPATIBILITY_DATE_NUMBER > _date_to_date_number(\n year, month, day)\n\n\n@tf_export(\"compat.forward_compatibility_horizon\")\n@tf_contextlib.contextmanager\ndef forward_compatibility_horizon(year, month, day):\n \"\"\"Context manager for testing forward compatibility of generated graphs.\n\n See [Version\n compatibility](https://tensorflow.org/guide/version_compat#backward_forward).\n\n To ensure forward compatibility of generated graphs (see `forward_compatible`)\n with older binaries, new features can be gated with:\n\n ```python\n if compat.forward_compatible(year=2018, month=08, date=01):\n generate_graph_with_new_features()\n else:\n generate_graph_so_older_binaries_can_consume_it()\n ```\n\n However, when adding new features, one may want to unittest it before\n the forward compatibility window expires. This context manager enables\n such tests. For example:\n\n ```python\n from tensorflow.python.compat import compat\n\n def testMyNewFeature(self):\n with compat.forward_compatibility_horizon(2018, 08, 02):\n # Test that generate_graph_with_new_features() has an effect\n ```\n\n Args:\n year: A year (e.g., 2018). Must be an `int`.\n month: A month (1 <= month <= 12) in year. Must be an `int`.\n day: A day (1 <= day <= 31, or 30, or 29, or 28) in month. Must be an\n `int`.\n\n Yields:\n Nothing.\n \"\"\"\n try:\n _update_forward_compatibility_date_number(datetime.date(year, month, day))\n yield\n finally:\n _update_forward_compatibility_date_number()\n"
] | [
[
"tensorflow.python.platform.tf_logging.warning",
"tensorflow.python.util.tf_export.tf_export"
]
] |
atranitell/TensorGate | [
"855ae0c69a706c179c26ba4a75a8067a514285fe"
] | [
"utils/device.py"
] | [
"# Copyright 2017 The KaiJIN Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"performance tools\"\"\"\n\nfrom tensorflow.python.client import device_lib\n\n\ndef showing_avaliable_device():\n \"\"\"Showing the available device.\"\"\"\n for x in device_lib.list_local_devices():\n print(x)\n"
] | [
[
"tensorflow.python.client.device_lib.list_local_devices"
]
] |
aburousan/manim | [
"c11b649e9aed34976844e6a131fb12e2a30c7bc8"
] | [
"manim/mobject/opengl_geometry.py"
] | [
"import numpy as np\n\nfrom .. import logger\nfrom ..constants import *\nfrom ..mobject.mobject import Mobject\nfrom ..mobject.types.opengl_vectorized_mobject import (\n OpenGLDashedVMobject,\n OpenGLVGroup,\n OpenGLVMobject,\n)\nfrom ..utils.color import *\nfrom ..utils.deprecation import deprecated_params\nfrom ..utils.iterables import adjacent_n_tuples, adjacent_pairs\nfrom ..utils.simple_functions import clip, fdiv\nfrom ..utils.space_ops import (\n angle_between_vectors,\n angle_of_vector,\n compass_directions,\n find_intersection,\n normalize,\n rotate_vector,\n rotation_matrix_transpose,\n)\n\nDEFAULT_DOT_RADIUS = 0.08\nDEFAULT_SMALL_DOT_RADIUS = 0.04\nDEFAULT_DASH_LENGTH = 0.05\nDEFAULT_ARROW_TIP_LENGTH = 0.35\nDEFAULT_ARROW_TIP_WIDTH = 0.35\n\n\nclass OpenGLTipableVMobject(OpenGLVMobject):\n \"\"\"\n Meant for shared functionality between Arc and Line.\n Functionality can be classified broadly into these groups:\n\n * Adding, Creating, Modifying tips\n - add_tip calls create_tip, before pushing the new tip\n into the TipableVMobject's list of submobjects\n - stylistic and positional configuration\n\n * Checking for tips\n - Boolean checks for whether the TipableVMobject has a tip\n and a starting tip\n\n * Getters\n - Straightforward accessors, returning information pertaining\n to the TipableVMobject instance's tip(s), its length etc\n \"\"\"\n\n # Adding, Creating, Modifying tips\n\n def __init__(\n self,\n tip_length=DEFAULT_ARROW_TIP_LENGTH,\n normal_vector=OUT,\n tip_config={},\n **kwargs\n ):\n self.tip_length = tip_length\n self.normal_vector = normal_vector\n self.tip_config = tip_config\n OpenGLVMobject.__init__(self, **kwargs)\n\n def add_tip(self, at_start=False, **kwargs):\n \"\"\"\n Adds a tip to the TipableVMobject instance, recognising\n that the endpoints might need to be switched if it's\n a 'starting tip' or not.\n \"\"\"\n tip = self.create_tip(at_start, **kwargs)\n self.reset_endpoints_based_on_tip(tip, at_start)\n self.asign_tip_attr(tip, at_start)\n self.add(tip)\n return self\n\n def create_tip(self, at_start=False, **kwargs):\n \"\"\"\n Stylises the tip, positions it spacially, and returns\n the newly instantiated tip to the caller.\n \"\"\"\n tip = self.get_unpositioned_tip(**kwargs)\n self.position_tip(tip, at_start)\n return tip\n\n def get_unpositioned_tip(self, **kwargs):\n \"\"\"\n Returns a tip that has been stylistically configured,\n but has not yet been given a position in space.\n \"\"\"\n config = {}\n config.update(self.tip_config)\n config.update(kwargs)\n return OpenGLArrowTip(**config)\n\n def position_tip(self, tip, at_start=False):\n # Last two control points, defining both\n # the end, and the tangency direction\n if at_start:\n anchor = self.get_start()\n handle = self.get_first_handle()\n else:\n handle = self.get_last_handle()\n anchor = self.get_end()\n tip.rotate(angle_of_vector(handle - anchor) - PI - tip.get_angle())\n tip.shift(anchor - tip.get_tip_point())\n return tip\n\n def reset_endpoints_based_on_tip(self, tip, at_start):\n if self.get_length() == 0:\n # Zero length, put_start_and_end_on wouldn't\n # work\n return self\n\n if at_start:\n start = tip.get_base()\n end = self.get_end()\n else:\n start = self.get_start()\n end = tip.get_base()\n self.put_start_and_end_on(start, end)\n return self\n\n def asign_tip_attr(self, tip, at_start):\n if at_start:\n self.start_tip = tip\n else:\n self.tip = tip\n return self\n\n # Checking for tips\n def has_tip(self):\n return hasattr(self, \"tip\") and self.tip in self\n\n def has_start_tip(self):\n return hasattr(self, \"start_tip\") and self.start_tip in self\n\n # Getters\n def pop_tips(self):\n start, end = self.get_start_and_end()\n result = OpenGLVGroup()\n if self.has_tip():\n result.add(self.tip)\n self.remove(self.tip)\n if self.has_start_tip():\n result.add(self.start_tip)\n self.remove(self.start_tip)\n self.put_start_and_end_on(start, end)\n return result\n\n def get_tips(self):\n \"\"\"\n Returns a VGroup (collection of VMobjects) containing\n the TipableVMObject instance's tips.\n \"\"\"\n result = OpenGLVGroup()\n if hasattr(self, \"tip\"):\n result.add(self.tip)\n if hasattr(self, \"start_tip\"):\n result.add(self.start_tip)\n return result\n\n def get_tip(self):\n \"\"\"Returns the TipableVMobject instance's (first) tip,\n otherwise throws an exception.\"\"\"\n tips = self.get_tips()\n if len(tips) == 0:\n raise Exception(\"tip not found\")\n else:\n return tips[0]\n\n def get_default_tip_length(self):\n return self.tip_length\n\n def get_first_handle(self):\n return self.points[1]\n\n def get_last_handle(self):\n return self.points[-2]\n\n def get_end(self):\n if self.has_tip():\n return self.tip.get_start()\n else:\n return OpenGLVMobject.get_end(self)\n\n def get_start(self):\n if self.has_start_tip():\n return self.start_tip.get_start()\n else:\n return OpenGLVMobject.get_start(self)\n\n def get_length(self):\n start, end = self.get_start_and_end()\n return np.linalg.norm(start - end)\n\n\nclass OpenGLArc(OpenGLTipableVMobject):\n def __init__(\n self,\n start_angle=0,\n angle=TAU / 4,\n radius=1.0,\n n_components=8,\n arc_center=ORIGIN,\n **kwargs\n ):\n self.start_angle = start_angle\n self.angle = angle\n self.radius = radius\n self.n_components = n_components\n self.arc_center = arc_center\n super().__init__(self, **kwargs)\n self.orientation = -1\n\n def init_points(self):\n self.set_points(\n OpenGLArc.create_quadratic_bezier_points(\n angle=self.angle,\n start_angle=self.start_angle,\n n_components=self.n_components,\n )\n )\n # To maintain proper orientation for fill shaders.\n self.scale(self.radius, about_point=ORIGIN)\n self.shift(self.arc_center)\n\n @staticmethod\n def create_quadratic_bezier_points(angle, start_angle=0, n_components=8):\n samples = np.array(\n [\n [np.cos(a), np.sin(a), 0]\n for a in np.linspace(\n start_angle,\n start_angle + angle,\n 2 * n_components + 1,\n )\n ]\n )\n theta = angle / n_components\n samples[1::2] /= np.cos(theta / 2)\n\n points = np.zeros((3 * n_components, 3))\n points[0::3] = samples[0:-1:2]\n points[1::3] = samples[1::2]\n points[2::3] = samples[2::2]\n return points\n\n def get_arc_center(self):\n \"\"\"\n Looks at the normals to the first two\n anchors, and finds their intersection points\n \"\"\"\n # First two anchors and handles\n a1, h, a2 = self.points[:3]\n # Tangent vectors\n t1 = h - a1\n t2 = h - a2\n # Normals\n n1 = rotate_vector(t1, TAU / 4)\n n2 = rotate_vector(t2, TAU / 4)\n return find_intersection(a1, n1, a2, n2)\n\n def get_start_angle(self):\n angle = angle_of_vector(self.get_start() - self.get_arc_center())\n return angle % TAU\n\n def get_stop_angle(self):\n angle = angle_of_vector(self.get_end() - self.get_arc_center())\n return angle % TAU\n\n def move_arc_center_to(self, point):\n self.shift(point - self.get_arc_center())\n return self\n\n\nclass OpenGLArcBetweenPoints(OpenGLArc):\n def __init__(self, start, end, angle=TAU / 4, **kwargs):\n super().__init__(angle=angle, **kwargs)\n if angle == 0:\n self.set_points_as_corners([LEFT, RIGHT])\n self.put_start_and_end_on(start, end)\n\n\nclass OpenGLCurvedArrow(OpenGLArcBetweenPoints):\n def __init__(self, start_point, end_point, **kwargs):\n OpenGLArcBetweenPoints.__init__(self, start_point, end_point, **kwargs)\n self.add_tip()\n\n\nclass OpenGLCurvedDoubleArrow(OpenGLCurvedArrow):\n def __init__(self, start_point, end_point, **kwargs):\n OpenGLCurvedArrow.__init__(self, start_point, end_point, **kwargs)\n self.add_tip(at_start=True)\n\n\nclass OpenGLCircle(OpenGLArc):\n def __init__(self, color=RED, **kwargs):\n OpenGLArc.__init__(self, 0, TAU, color=color, **kwargs)\n\n def surround(self, mobject, dim_to_match=0, stretch=False, buff=MED_SMALL_BUFF):\n # Ignores dim_to_match and stretch; result will always be a circle\n # TODO: Perhaps create an ellipse class to handle singele-dimension stretching\n\n self.replace(mobject, dim_to_match, stretch)\n self.stretch((self.get_width() + 2 * buff) / self.get_width(), 0)\n self.stretch((self.get_height() + 2 * buff) / self.get_height(), 1)\n\n def point_at_angle(self, angle):\n start_angle = self.get_start_angle()\n return self.point_from_proportion((angle - start_angle) / TAU)\n\n\nclass OpenGLDot(OpenGLCircle):\n def __init__(\n self,\n point=ORIGIN,\n radius=DEFAULT_DOT_RADIUS,\n stroke_width=0,\n fill_opacity=1.0,\n color=WHITE,\n **kwargs\n ):\n super().__init__(\n arc_center=point,\n radius=radius,\n stroke_width=stroke_width,\n fill_opacity=fill_opacity,\n color=color,\n **kwargs\n )\n\n\nclass OpenGLEllipse(OpenGLCircle):\n def __init__(self, width=2, height=1, **kwargs):\n super().__init__(**kwargs)\n self.set_width(width, stretch=True)\n self.set_height(height, stretch=True)\n\n\nclass OpenGLAnnularSector(OpenGLArc):\n def __init__(\n self,\n inner_radius=1,\n outer_radius=2,\n angle=TAU / 4,\n start_angle=0,\n fill_opacity=1,\n stroke_width=0,\n color=WHITE,\n **kwargs\n ):\n self.inner_radius = inner_radius\n self.outer_radius = outer_radius\n OpenGLArc.__init__(\n self,\n start_angle=start_angle,\n angle=angle,\n fill_opacity=fill_opacity,\n stroke_width=stroke_width,\n color=color,\n **kwargs\n )\n\n def init_points(self):\n inner_arc, outer_arc = (\n OpenGLArc(\n start_angle=self.start_angle,\n angle=self.angle,\n radius=radius,\n arc_center=self.arc_center,\n )\n for radius in (self.inner_radius, self.outer_radius)\n )\n outer_arc.reverse_points()\n self.append_points(inner_arc.points)\n self.add_line_to(outer_arc.points[0])\n self.append_points(outer_arc.points)\n self.add_line_to(inner_arc.points[0])\n\n\nclass OpenGLSector(OpenGLAnnularSector):\n def __init__(self, outer_radius=1, inner_radius=0, **kwargs):\n OpenGLAnnularSector.__init__(\n self, inner_radius=inner_radius, outer_radius=outer_radius, **kwargs\n )\n\n\nclass OpenGLAnnulus(OpenGLCircle):\n def __init__(\n self,\n inner_radius=1,\n outer_radius=2,\n fill_opacity=1,\n stroke_width=0,\n color=WHITE,\n mark_paths_closed=False,\n **kwargs\n ):\n self.mark_paths_closed = mark_paths_closed # is this even used?\n self.inner_radius = inner_radius\n self.outer_radius = outer_radius\n OpenGLCircle.__init__(\n self,\n fill_opacity=fill_opacity,\n stroke_width=stroke_width,\n color=color,\n **kwargs\n )\n\n def init_points(self):\n self.radius = self.outer_radius\n outer_circle = OpenGLCircle(radius=self.outer_radius)\n inner_circle = OpenGLCircle(radius=self.inner_radius)\n inner_circle.reverse_points()\n self.append_points(outer_circle.points)\n self.append_points(inner_circle.points)\n self.shift(self.arc_center)\n\n\nclass OpenGLLine(OpenGLTipableVMobject):\n def __init__(self, start=LEFT, end=RIGHT, buff=0, path_arc=0, **kwargs):\n self.dim = 3\n self.buff = buff\n self.path_arc = path_arc\n self.set_start_and_end_attrs(start, end)\n super().__init__(**kwargs)\n\n def init_points(self):\n self.set_points_by_ends(self.start, self.end, self.buff, self.path_arc)\n\n def set_points_by_ends(self, start, end, buff=0, path_arc=0):\n if path_arc:\n self.set_points(OpenGLArc.create_quadratic_bezier_points(path_arc))\n self.put_start_and_end_on(start, end)\n else:\n self.set_points_as_corners([start, end])\n self.account_for_buff(self.buff)\n\n def set_path_arc(self, new_value):\n self.path_arc = new_value\n self.init_points()\n\n def account_for_buff(self, buff):\n if buff == 0:\n return\n #\n if self.path_arc == 0:\n length = self.get_length()\n else:\n length = self.get_arc_length()\n #\n if length < 2 * buff:\n return\n buff_prop = buff / length\n self.pointwise_become_partial(self, buff_prop, 1 - buff_prop)\n return self\n\n def set_start_and_end_attrs(self, start, end):\n # If either start or end are Mobjects, this\n # gives their centers\n rough_start = self.pointify(start)\n rough_end = self.pointify(end)\n vect = normalize(rough_end - rough_start)\n # Now that we know the direction between them,\n # we can find the appropriate boundary point from\n # start and end, if they're mobjects\n self.start = self.pointify(start, vect) + self.buff * vect\n self.end = self.pointify(end, -vect) - self.buff * vect\n\n def pointify(self, mob_or_point, direction=None):\n \"\"\"\n Take an argument passed into Line (or subclass) and turn\n it into a 3d point.\n \"\"\"\n if isinstance(mob_or_point, Mobject):\n mob = mob_or_point\n if direction is None:\n return mob.get_center()\n else:\n return mob.get_continuous_bounding_box_point(direction)\n else:\n point = mob_or_point\n result = np.zeros(self.dim)\n result[: len(point)] = point\n return result\n\n def put_start_and_end_on(self, start, end):\n curr_start, curr_end = self.get_start_and_end()\n if (curr_start == curr_end).all():\n self.set_points_by_ends(start, end, self.path_arc)\n return super().put_start_and_end_on(start, end)\n\n def get_vector(self):\n return self.get_end() - self.get_start()\n\n def get_unit_vector(self):\n return normalize(self.get_vector())\n\n def get_angle(self):\n return angle_of_vector(self.get_vector())\n\n def get_projection(self, point):\n \"\"\"\n Return projection of a point onto the line\n \"\"\"\n unit_vect = self.get_unit_vector()\n start = self.get_start()\n return start + np.dot(point - start, unit_vect) * unit_vect\n\n def get_slope(self):\n return np.tan(self.get_angle())\n\n def set_angle(self, angle, about_point=None):\n if about_point is None:\n about_point = self.get_start()\n self.rotate(\n angle - self.get_angle(),\n about_point=about_point,\n )\n return self\n\n def set_length(self, length):\n self.scale(length / self.get_length())\n\n\nclass OpenGLDashedLine(OpenGLLine):\n @deprecated_params(\n params=\"positive_space_ratio dash_spacing\",\n since=\"v0.9.0\",\n message=\"Use dashed_ratio instead of positive_space_ratio.\",\n )\n def __init__(\n self, *args, dash_length=DEFAULT_DASH_LENGTH, dashed_ratio=0.5, **kwargs\n ):\n # Simplify with removal of deprecation warning\n self.dash_spacing = kwargs.pop(\"dash_spacing\", None) # Unused param\n self.dashed_ratio = kwargs.pop(\"positive_space_ratio\", None) or dashed_ratio\n self.dash_length = dash_length\n super().__init__(*args, **kwargs)\n dashed_ratio = self.dashed_ratio\n num_dashes = self.calculate_num_dashes(dashed_ratio)\n dashes = OpenGLDashedVMobject(\n self, num_dashes=num_dashes, dashed_ratio=dashed_ratio\n )\n self.clear_points()\n self.add(*dashes)\n\n def calculate_num_dashes(self, dashed_ratio):\n return max(\n 2, int(np.ceil((self.get_length() / self.dash_length) * dashed_ratio))\n )\n\n def get_start(self):\n if len(self.submobjects) > 0:\n return self.submobjects[0].get_start()\n else:\n return OpenGLLine.get_start(self)\n\n def get_end(self):\n if len(self.submobjects) > 0:\n return self.submobjects[-1].get_end()\n else:\n return OpenGLLine.get_end(self)\n\n def get_first_handle(self):\n return self.submobjects[0].points[1]\n\n def get_last_handle(self):\n return self.submobjects[-1].points[-2]\n\n\nclass OpenGLTangentLine(OpenGLLine):\n def __init__(self, vmob, alpha, length=1, d_alpha=1e-6, **kwargs):\n self.length = length\n self.d_alpha = d_alpha\n da = self.d_alpha\n a1 = clip(alpha - da, 0, 1)\n a2 = clip(alpha + da, 0, 1)\n super().__init__(vmob.pfp(a1), vmob.pfp(a2), **kwargs)\n self.scale(self.length / self.get_length())\n\n\nclass OpenGLElbow(OpenGLVMobject):\n def __init__(self, width=0.2, angle=0, **kwargs):\n self.angle = angle\n super().__init__(self, **kwargs)\n self.set_points_as_corners([UP, UP + RIGHT, RIGHT])\n self.set_width(width, about_point=ORIGIN)\n self.rotate(self.angle, about_point=ORIGIN)\n\n\nclass OpenGLArrow(OpenGLLine):\n def __init__(\n self,\n start=LEFT,\n end=RIGHT,\n path_arc=0,\n fill_color=GREY_A,\n fill_opacity=1,\n stroke_width=0,\n buff=MED_SMALL_BUFF,\n thickness=0.05,\n tip_width_ratio=5,\n tip_angle=PI / 3,\n max_tip_length_to_length_ratio=0.5,\n max_width_to_length_ratio=0.1,\n **kwargs\n ):\n self.thickness = thickness\n self.tip_width_ratio = tip_width_ratio\n self.tip_angle = tip_angle\n self.max_tip_length_to_length_ratio = max_tip_length_to_length_ratio\n self.max_width_to_length_ratio = max_width_to_length_ratio\n super().__init__(\n start=start,\n end=end,\n buff=buff,\n path_arc=path_arc,\n fill_color=fill_color,\n fill_opacity=fill_opacity,\n stroke_width=stroke_width,\n **kwargs\n )\n\n def set_points_by_ends(self, start, end, buff=0, path_arc=0):\n # Find the right tip length and thickness\n vect = end - start\n length = max(np.linalg.norm(vect), 1e-8)\n thickness = self.thickness\n w_ratio = fdiv(self.max_width_to_length_ratio, fdiv(thickness, length))\n if w_ratio < 1:\n thickness *= w_ratio\n\n tip_width = self.tip_width_ratio * thickness\n tip_length = tip_width / (2 * np.tan(self.tip_angle / 2))\n t_ratio = fdiv(self.max_tip_length_to_length_ratio, fdiv(tip_length, length))\n if t_ratio < 1:\n tip_length *= t_ratio\n tip_width *= t_ratio\n\n # Find points for the stem\n if path_arc == 0:\n points1 = (length - tip_length) * np.array([RIGHT, 0.5 * RIGHT, ORIGIN])\n points1 += thickness * UP / 2\n points2 = points1[::-1] + thickness * DOWN\n else:\n # Solve for radius so that the tip-to-tail length matches |end - start|\n a = 2 * (1 - np.cos(path_arc))\n b = -2 * tip_length * np.sin(path_arc)\n c = tip_length ** 2 - length ** 2\n R = (-b + np.sqrt(b ** 2 - 4 * a * c)) / (2 * a)\n\n # Find arc points\n points1 = OpenGLArc.create_quadratic_bezier_points(path_arc)\n points2 = np.array(points1[::-1])\n points1 *= R + thickness / 2\n points2 *= R - thickness / 2\n if path_arc < 0:\n tip_length *= -1\n rot_T = rotation_matrix_transpose(PI / 2 - path_arc, OUT)\n for points in points1, points2:\n points[:] = np.dot(points, rot_T)\n points += R * DOWN\n\n self.set_points(points1)\n # Tip\n self.add_line_to(tip_width * UP / 2)\n self.add_line_to(tip_length * LEFT)\n self.tip_index = len(self.points) - 1\n self.add_line_to(tip_width * DOWN / 2)\n self.add_line_to(points2[0])\n # Close it out\n self.append_points(points2)\n self.add_line_to(points1[0])\n\n if length > 0:\n # Final correction\n super().scale(length / self.get_length())\n\n self.rotate(angle_of_vector(vect) - self.get_angle())\n self.rotate(\n PI / 2 - np.arccos(normalize(vect)[2]),\n axis=rotate_vector(self.get_unit_vector(), -PI / 2),\n )\n self.shift(start - self.get_start())\n self.refresh_triangulation()\n\n def reset_points_around_ends(self):\n self.set_points_by_ends(\n self.get_start(), self.get_end(), path_arc=self.path_arc\n )\n return self\n\n def get_start(self):\n nppc = self.n_points_per_curve\n points = self.points\n return (points[0] + points[-nppc]) / 2\n\n def get_end(self):\n return self.points[self.tip_index]\n\n def put_start_and_end_on(self, start, end):\n self.set_points_by_ends(start, end, buff=0, path_arc=self.path_arc)\n return self\n\n def scale(self, *args, **kwargs):\n super().scale(*args, **kwargs)\n self.reset_points_around_ends()\n return self\n\n def set_thickness(self, thickness):\n self.thickness = thickness\n self.reset_points_around_ends()\n return self\n\n def set_path_arc(self, path_arc):\n self.path_arc = path_arc\n self.reset_points_around_ends()\n return self\n\n\nclass OpenGLVector(OpenGLArrow):\n def __init__(self, direction=RIGHT, buff=0, **kwargs):\n self.buff = buff\n if len(direction) == 2:\n direction = np.hstack([direction, 0])\n super().__init__(ORIGIN, direction, buff=buff, **kwargs)\n\n\nclass OpenGLDoubleArrow(OpenGLArrow):\n def __init__(self, *args, **kwargs):\n OpenGLArrow.__init__(self, *args, **kwargs)\n self.add_tip(at_start=True)\n\n\nclass OpenGLCubicBezier(OpenGLVMobject):\n def __init__(self, a0, h0, h1, a1, **kwargs):\n OpenGLVMobject.__init__(self, **kwargs)\n self.add_cubic_bezier_curve(a0, h0, h1, a1)\n\n\nclass OpenGLPolygon(OpenGLVMobject):\n def __init__(self, *vertices, **kwargs):\n self.vertices = vertices\n super().__init__(**kwargs)\n\n def init_points(self):\n verts = self.vertices\n self.set_points_as_corners([*verts, verts[0]])\n\n def get_vertices(self):\n return self.get_start_anchors()\n\n def round_corners(self, radius=0.5):\n vertices = self.get_vertices()\n arcs = []\n for v1, v2, v3 in adjacent_n_tuples(vertices, 3):\n vect1 = v2 - v1\n vect2 = v3 - v2\n unit_vect1 = normalize(vect1)\n unit_vect2 = normalize(vect2)\n angle = angle_between_vectors(vect1, vect2)\n # Negative radius gives concave curves\n angle *= np.sign(radius)\n # Distance between vertex and start of the arc\n cut_off_length = radius * np.tan(angle / 2)\n # Determines counterclockwise vs. clockwise\n sign = np.sign(np.cross(vect1, vect2)[2])\n arc = OpenGLArcBetweenPoints(\n v2 - unit_vect1 * cut_off_length,\n v2 + unit_vect2 * cut_off_length,\n angle=sign * angle,\n n_components=2,\n )\n arcs.append(arc)\n\n self.clear_points()\n # To ensure that we loop through starting with last\n arcs = [arcs[-1], *arcs[:-1]]\n for arc1, arc2 in adjacent_pairs(arcs):\n self.append_points(arc1.points)\n line = OpenGLLine(arc1.get_end(), arc2.get_start())\n # Make sure anchors are evenly distributed\n len_ratio = line.get_length() / arc1.get_arc_length()\n line.insert_n_curves(int(arc1.get_num_curves() * len_ratio))\n self.append_points(line.points)\n return self\n\n\nclass OpenGLRegularPolygon(OpenGLPolygon):\n def __init__(self, n=6, start_angle=None, **kwargs):\n self.start_angle = start_angle\n if self.start_angle is None:\n if n % 2 == 0:\n self.start_angle = 0\n else:\n self.start_angle = 90 * DEGREES\n start_vect = rotate_vector(RIGHT, self.start_angle)\n vertices = compass_directions(n, start_vect)\n super().__init__(*vertices, **kwargs)\n\n\nclass OpenGLTriangle(OpenGLRegularPolygon):\n def __init__(self, **kwargs):\n super().__init__(n=3, **kwargs)\n\n\nclass OpenGLArrowTip(OpenGLTriangle):\n def __init__(\n self,\n fill_opacity=1,\n fill_color=WHITE,\n stroke_width=0,\n width=DEFAULT_ARROW_TIP_WIDTH,\n length=DEFAULT_ARROW_TIP_LENGTH,\n angle=0,\n **kwargs\n ):\n OpenGLTriangle.__init__(\n self,\n start_angle=0,\n fill_opacity=fill_opacity,\n fill_color=fill_color,\n stroke_width=stroke_width,\n **kwargs\n )\n self.set_width(width, stretch=True)\n self.set_height(length, stretch=True)\n\n def get_base(self):\n return self.point_from_proportion(0.5)\n\n def get_tip_point(self):\n return self.points[0]\n\n def get_vector(self):\n return self.get_tip_point() - self.get_base()\n\n def get_angle(self):\n return angle_of_vector(self.get_vector())\n\n def get_length(self):\n return np.linalg.norm(self.get_vector())\n\n\nclass OpenGLRectangle(OpenGLPolygon):\n def __init__(self, color=WHITE, width=4.0, height=2.0, **kwargs):\n OpenGLPolygon.__init__(self, UR, UL, DL, DR, color=color, **kwargs)\n\n self.set_width(width, stretch=True)\n self.set_height(height, stretch=True)\n\n\nclass OpenGLSquare(OpenGLRectangle):\n def __init__(self, side_length=2.0, **kwargs):\n self.side_length = side_length\n\n super().__init__(height=side_length, width=side_length, **kwargs)\n\n\nclass OpenGLRoundedRectangle(OpenGLRectangle):\n def __init__(self, corner_radius=0.5, **kwargs):\n self.corner_radius = corner_radius\n OpenGLRectangle.__init__(self, **kwargs)\n self.round_corners(self.corner_radius)\n"
] | [
[
"numpy.sqrt",
"numpy.sign",
"numpy.zeros",
"numpy.cross",
"numpy.cos",
"numpy.hstack",
"numpy.tan",
"numpy.linspace",
"numpy.array",
"numpy.sin",
"numpy.dot",
"numpy.linalg.norm"
]
] |
ddarmon/transCSSR | [
"8ed057eee70d2d50d14bc719c7850ac46a00e4d4"
] | [
"demo-and-experimental-scripts/demo_predict_presynch_eT.py"
] | [
"import numpy\nimport scipy.stats\nimport itertools\nimport copy\nimport string\nimport os\n\nfrom collections import Counter, defaultdict\nfrom filter_data_methods import *\nfrom igraph import *\n\nfrom transCSSR import *\n\ndata_prefix = ''\n\n#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n#\n# The various test transducers. Xt is the input\n# and Yt is the output.\n#\n#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\n# Xt_name = 'coinflip'\n# Yt_name = 'coinflip-excite_w_refrac'\n\nXt_name = 'barnettX'\nYt_name = 'barnettY'\n\n# Xt_name = ''\n# Yt_name = 'even'\n\n#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n#\n# Load in the data for each process.\n#\n#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\nstringY = open('data/{}{}.dat'.format(data_prefix, Yt_name)).readline().strip()\n\nif Xt_name == '':\n\tstringX = '0'*len(stringY)\nelse:\n\tstringX = open('data/{}{}.dat'.format(data_prefix, Xt_name)).readline().strip()\n\n#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n#\n# Set the parameters and associated quantities:\n# \taxs, ays -- the input / output alphabets\n# \talpha -- the significance level associated with\n# \t CSSR's hypothesis tests.\n# \tL -- The maximum history length to look\n# back when inferring predictive\n# distributions.\n#\n#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\nif Xt_name == '':\n\taxs = ['0']\n\tays = ['0', '1']\nelse:\n\taxs = ['0', '1']\n\tays = ['0', '1']\n\ne_symbols = list(itertools.product(axs, ays)) # All of the possible pairs of emission\n # symbols for (x, y)\n\nalpha = 0.001\n\nverbose = False\n\n# L is the maximum amount we want to ever look back.\n\nL_max = 3\n\nTx = len(stringX); Ty = len(stringY)\n\nassert Tx == Ty, 'The two time series must have the same length.'\n\nT = Tx\n\nword_lookup_marg, word_lookup_fut = estimate_predictive_distributions(stringX, stringY, L_max)\n\nepsilon, invepsilon, morph_by_state = run_transCSSR(word_lookup_marg, word_lookup_fut, L_max, axs, ays, e_symbols, Xt_name, Yt_name, alpha = alpha)\n\nind_go_to = 20\n\npossible_states_from_predict_presynch_eT = numpy.zeros((ind_go_to-1, len(invepsilon)), dtype = numpy.int32)\n\nfor cur_ind in range(1, ind_go_to):\n\tcurX = stringX[:cur_ind]\n\tcurY = stringY[:cur_ind-1]\n\n\tpreds, possible_states = predict_presynch_eT(curX, curY, machine_fname = 'transCSSR_results/+{}.dot'.format(Xt_name), transducer_fname = 'transCSSR_results/{}+{}.dot'.format(Xt_name, Yt_name), axs = axs, ays = ays, inf_alg = 'transCSSR')\n\n\tpossible_states_from_predict_presynch_eT[cur_ind - 1] = possible_states\n\n\tprint((cur_ind, curX, curY + '*', preds.tolist(), possible_states))\n\nprint('')\n\npreds_all, possible_states_all = filter_and_pred_probs(stringX, stringY, machine_fname = 'transCSSR_results/+{}.dot'.format(Xt_name), transducer_fname = 'transCSSR_results/{}+{}.dot'.format(Xt_name, Yt_name), axs = axs, ays = ays, inf_alg = 'transCSSR')\n\nfor cur_ind in range(1, ind_go_to):\n\tcurX = stringX[:cur_ind]\n\tcurY = stringY[:cur_ind-1]\n\n\tprint((cur_ind, curX, curY + '*', preds_all[cur_ind-1, :].tolist(), possible_states_all[cur_ind-1, :].tolist()))\n\nfiltered_states, filtered_probs, stringY_pred = filter_and_predict(stringX, stringY, epsilon, invepsilon, morph_by_state, axs, ays, e_symbols, L_max, memoryless = False)\n\nprint_go_to = 40\n\nprint((\"\\n\\nFirst {} predictions.\".format(print_go_to)))\nfor ind in range(print_go_to):\n\tprint((filtered_probs[ind], preds_all[ind, 1]))\n\nprint((\"\\n\\nLast {} predictions.\".format(print_go_to)))\nfor ind in range(preds_all.shape[0] - print_go_to, preds_all.shape[0]):\n\tprint((filtered_probs[ind], preds_all[ind, 1]))\n\nimport matplotlib.pyplot as plt\n\nplt.figure()\nplt.plot(filtered_probs[:, 1], label = 'Using filter_and_predict')\nplt.plot(preds_all[:, 1], label = 'Using filter_and_pred_probs')\nplt.xlim([0, 1000])\nplt.legend()\nplt.show()"
] | [
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot"
]
] |
ds-praveenkumar/m5-accuracy-prediction | [
"20255adc95c3e0fe6c6acec9fd16ac88c6e95908"
] | [
"src/models/build_model.py"
] | [
"# github link: https://github.com/ds-praveenkumar/kaggle\n# Author: ds-praveenkumar\n# file: forcasting/build_model.py/\n# Created by ds-praveenkumar at 13-06-2020 02 09\n# feature:\n\nimport os\nimport psutil\nfrom fbprophet import Prophet\nfrom pathlib import Path\nimport pandas as pd\nimport numpy as np\nimport pickle\nfrom src.utility.timeit import timeit\n\nROOT_DIR = Path(__file__).parent.parent.parent\nprint('ROOT_DIR:', ROOT_DIR)\n\n@timeit\ndef us_public_holidays():\n ny = pd.DataFrame({'holiday': \"New Year's Day\", 'ds': pd.to_datetime(['2016-01-01', '2017-01-01'])})\n mlk = pd.DataFrame(\n {'holiday': 'Birthday of Martin Luther King, Jr.', 'ds': pd.to_datetime(['2016-01-18', '2017-01-16'])})\n wash = pd.DataFrame({'holiday': \"Washington's Birthday\", 'ds': pd.to_datetime(['2016-02-15', '2017-02-20'])})\n mem = pd.DataFrame({'holiday': 'Memorial Day', 'ds': pd.to_datetime(['2016-05-30', '2017-05-29'])})\n ind = pd.DataFrame(\n {'holiday': 'Independence Day', 'ds': pd.to_datetime(['2015-07-04', '2016-07-04', '2017-07-04'])})\n lab = pd.DataFrame({'holiday': 'Labor Day', 'ds': pd.to_datetime(['2015-09-07', '2016-09-05', '2017-09-04'])})\n col = pd.DataFrame({'holiday': 'Columbus Day', 'ds': pd.to_datetime(['2015-10-12', '2016-10-10', '2017-10-09'])})\n vet = pd.DataFrame({'holiday': \"Veteran's Day\", 'ds': pd.to_datetime(['2015-11-11', '2016-11-11', '2017-11-11'])})\n thanks = pd.DataFrame({'holiday': 'Thanksgiving Day', 'ds': pd.to_datetime(['2015-11-26', '2016-11-24'])})\n christ = pd.DataFrame({'holiday': 'Christmas', 'ds': pd.to_datetime(['2015-12-25', '2016-12-25'])})\n inaug = pd.DataFrame({'holiday': 'Inauguration Day', 'ds': pd.to_datetime(['2017-01-20'])})\n us_public_holidays = pd.concat([ny, mlk, wash, mem, ind, lab, col, vet, thanks, christ, inaug])\n return us_public_holidays\n\n\ndef is_nfl_season(ds):\n date = pd.to_datetime(ds)\n return (date.month > 8 or date.month < 2)\n\n\ndef nfl_sunday(ds):\n date = pd.to_datetime(ds)\n if date.weekday() == 6 and (date.month > 8 or date.month < 2):\n return 1\n else:\n return 0\n\n@timeit\ndef build_model():\n df = pd.read_csv('H:\\\\forcasting\\\\data\\\\training\\\\10655.csv')\n df['y'] = np.log1p(df.y.astype(float) + 1)\n print(df)\n model = Prophet(\n interval_width=0.95,\n changepoint_prior_scale=0.15,\n daily_seasonality=True,\n holidays=us_public_holidays(),\n\n yearly_seasonality=True,\n weekly_seasonality=True,\n seasonality_mode='multiplicative'\n )\n model.add_seasonality(\n name='weekly', period=7, fourier_order=3, prior_scale=0.1)\n\n\n df['nfl_sunday'] = df['ds'].apply(nfl_sunday)\n\n print(df)\n model.add_regressor('nfl_sunday')\n model.add_country_holidays(country_name='US')\n #save model\n filename = 'prophet_1.0.pkl'\n root = os.path.join(ROOT_DIR,'models')\n print(ROOT_DIR)\n path = os.path.join(root,filename)\n\n # with open(path, \"wb\") as f:\n # pickle.dump(model, f)\n print(f\"model saved at: {path}\")\n\n model.fit(df)\n future = model.make_future_dataframe(periods=28)\n future['nfl_sunday'] = future['ds'].apply(nfl_sunday)\n forecast = model.predict(future)\n print(forecast[-28:])\n\n\n\nif __name__ == '__main__':\n process = psutil.Process(os.getpid())\n build_model()\n print('Memory Usage(MB):',process.memory_info()[0] / float(2 ** 20))"
] | [
[
"pandas.read_csv",
"pandas.to_datetime",
"pandas.concat"
]
] |
Spiilgriim/nnexpy | [
"f8e419598ef94bebb532eb32ccaeeb48a3edfb5e"
] | [
"nnexpy/network_generator.py"
] | [
"class NetworkGenerator(object):\n def build_model(self, *args, **kwargs):\n import tensorflow as tf\n depth = kwargs.get('depth', 1)\n input_shape = kwargs.get('input_shape', (2,))\n width = kwargs.get('width', 8)\n activation = kwargs.get('activation', 'relu')\n\n model = tf.keras.Sequential()\n\n model.add(tf.keras.layers.Dense(8, input_dim=input_shape[0], activation=activation,\n kernel_initializer='he_uniform'))\n for _ in range(depth):\n model.add(tf.keras.layers.Dense(8, activation=activation))\n model.add(tf.keras.layers.Dense(1, activation='sigmoid'))\n return model\n\n def train_and_save(self, *args, **kwargs):\n import tensorflow as tf\n tf.compat.v1.disable_eager_execution()\n\n model = kwargs.get('model', None)\n epoch_number = kwargs.get('epoch_number', 100)\n data = kwargs.get('data', None)\n label = kwargs.get('label', None)\n save_path = kwargs.get('save_path', './model.h5')\n callbacks = kwargs.get('callbacks', None)\n batch_size = kwargs.get('batch_size', 10)\n loss = kwargs.get('loss', 'sparse_categorical_crossentropy')\n\n model.summary()\n model.compile(optimizer=\"adam\",\n loss=loss, metrics=['accuracy'])\n model.fit(data, label, validation_split=0.2, batch_size=batch_size,\n epochs=epoch_number, shuffle=True, verbose=2, callbacks=callbacks)\n model.save(save_path)\n import gc\n del model\n gc.collect()\n tf.keras.backend.clear_session()\n tf.compat.v1.reset_default_graph()\n\n def full_net_combined(self, depth, input_shape, mypath, epoch_number, data, label):\n import tensorflow as tf\n tf.compat.v1.disable_eager_execution()\n model = self.build_model(\n depth=depth, input_shape=input_shape, width=8, activation='relu')\n csv = tf.keras.callbacks.CSVLogger(\n mypath + str(depth) + 'layer.csv', separator=',', append=False)\n self.train_and_save(model=model, epoch_number=epoch_number, data=data, label=label, save_path=mypath +\n str(depth) + 'layer.h5', batch_size=64, loss=\"binary_crossentropy\", callbacks=[csv])\n import gc\n del model\n gc.collect()\n tf.keras.backend.clear_session()\n tf.compat.v1.reset_default_graph()\n"
] | [
[
"tensorflow.compat.v1.disable_eager_execution",
"tensorflow.compat.v1.reset_default_graph",
"tensorflow.keras.Sequential",
"tensorflow.keras.backend.clear_session",
"tensorflow.keras.layers.Dense"
]
] |
MartinCooke/jocular | [
"635816d4ef6aa6ea75187137e25386dad2d551e9"
] | [
"jocular/stretch.py"
] | [
"''' Various stretch functions. Easy to add more. Room for refinement,\n methinks.\n'''\n\nimport numpy as np\n\ndef stretch(x, method='linear', param=None, NR=0, background=None):\n\n # if no noise reduction just use stretch alone\n if (NR <= 0) or (background is None):\n return stretch_main(x, method=method, param=param)\n\n else:\n # get stretched data and lightly suppress low end\n y = stretch_main(x, method=method, param=param)\n hyper_param = 1 - .1 * (NR / 100)\n return y * stretch_main(x, method='hyper', param=hyper_param)\n\ndef stretch_main(x, method='linear', param=None):\n\n if method == 'linear':\n return x\n\n if method == 'hyper':\n d = .02\n c = d * (1 + d - param)\n return (1 + c) * (x / (x + c))\n\n if method == 'log':\n c = param * 200\n return np.log(c*x + 1) / np.log(c + 1)\n\n if method == 'asinh':\n # c = param * 250\n c = param * 2000\n return np.arcsinh(c*x) / np.arcsinh(c + .0000001)\n\n if method == 'gamma':\n # with noise reduction, linear from x=0-a, with slope s\n y = x.copy()\n # g = .5 - .5 * param\n # g = .75 - .75 * param\n g = 1 - param\n a0 = .01\n s = g / (a0 * (g - 1) + a0 ** (1 - g))\n d = (1 / (a0 ** g * (g - 1) + 1)) - 1\n y[x < a0] = x[x < a0] * s\n y[x >= a0] = (1 + d) * (x[x >= a0] ** g) - d\n return y\n \n else:\n return x\n\n"
] | [
[
"numpy.log",
"numpy.arcsinh"
]
] |
juvilius/mathchem-package | [
"ca56cb03e6ccdb47b3dfc382ca36b0a00d3e28b9"
] | [
"mathchem/mathchem.py"
] | [
"import numpy as np\n\n\nclass Mol():\n r\"\"\"\n Molecule.\n \"\"\"\n __g6_string = ''\n # Adjacency matrix\n __A = []\n # Incidence matrix\n __B = []\n # Laplacian matrix\n __L = []\n # Normalized laplacian matrix\n __NL = []\n # Signless laplacian matrix\n __Q = []\n # Distance matrix\n __D = []\n # Resistance Distance matrix\n __RD = []\n\n __Order = 0\n __Edges = []\n\n __Sage_graph = None\n __NX_graph = None\n\n __Degrees = []\n\n __Spectrum = []\n __Laplacian_spectrum = []\n __Distance_spectrum = []\n __Norm_laplacian_spectrum = []\n __Signless_laplacian_spectrum = []\n __RD_spectrum = []\n\n __Is_connected = None\n # Switch it to False when we know that the graph is connected. Useful for big calculations\n __Check_connectedness = True\n\n def _reset_(self):\n \"\"\" Reset all attributes \"\"\"\n self.__g6_string = ''\n # Adjacency matrix\n self.__A = []\n # Incidence matrix\n self.__B = []\n # Laplacian matrix\n self.__L = []\n # Normalized laplacian matrix\n self.__NL = []\n # Signless laplacian matrix\n self.__Q = []\n # Distance matrix\n self.__D = []\n # Resistance Distance matrix\n self.__RD = []\n\n self.__Order = 0\n self.__Edges = []\n\n self.__Sage_graph = None\n self.__NX_graph = None\n\n self.__Degrees = []\n\n self.__Spectrum = []\n self.__Laplacian_spectrum = []\n self.__Distance_spectrum = []\n self.__Norm_laplacian_spectrum = []\n self.__Signless_laplacian_spectrum = []\n self.__RD_spectrum = []\n\n self.__Is_connected = None\n\n # allow to set structure from somewhere\n # used in utilites\n\n def _set_A(self, A):\n self.__A = A\n\n def _set_Edges(self, edges):\n self.__Edges = edges\n\n def _set_Order(self, order):\n self.__Order = order\n\n # native method to initialize Mol class is to provide g6 string\n def __init__(self, string=None, check_connectedness=True):\n \"\"\" Molecular graph class \"\"\"\n self.__Check_connectedness = check_connectedness\n if string != None:\n if string[0] == '>':\n if string.startswith('>>graph6<<'):\n string = string[10:]\n elif string.startswith('>>sparse6<<'):\n string = string[11:]\n\n if string[0] == ':':\n self.read_s6(string)\n else:\n self.read_g6(string)\n\n def __repr__(self):\n if self.__A != None:\n return 'Molecular graph on ' + str(\n self.__Order) + ' vertices and ' + str(self.size()) + ' edges'\n return 'Empty Molecular graph'\n\n def __len__(self):\n if self.__A != None: return len(self.__A)\n else: return 0\n\n def set_check_connectedness(self, c):\n \"\"\" Switch on/off of checking connectedness for the graph. Might be useful in batch calculations to economy time.\n args: c (True/False)\n \"\"\"\n self.check_connectedness = c\n\n def g6_string(self):\n \"\"\" Return a graph6 string representation of the graph\n \n Alias: graph6_string \"\"\"\n return self.__g6_string\n\n # alias like in Sage:\n graph6_string = g6_string\n\n def order(self):\n \"\"\" Return number of vertices \"\"\"\n return self.__Order\n\n # alias for order\n n = order\n\n def edges(self):\n \"\"\" Return list of edges \"\"\"\n return self.__Edges\n\n def size(self):\n \"\"\" Return number of edges\"\"\"\n return len(self.__Edges)\n\n # alias for size\n m = size\n\n def vertices(self):\n \"\"\" Return list of vertices \"\"\"\n return range(self.__Order)\n\n def sage_graph(self):\n \"\"\" Return Sage Graph object \"\"\"\n if self.__Sage_graph is None: self._init_sage_graph_()\n return self.__Sage_graph\n\n def NX_graph(self):\n \"\"\" Return NetworkX graph object \"\"\"\n if self.__NX_graph is None:\n import networkx as nx\n self.__NX_graph = nx.Graph(self.__Edges)\n return self.__NX_graph\n\n nx_graph = NX_graph\n\n def _init_sage_graph_(self):\n \"\"\" Initialize SAGE graph from Adjacency matrix\"\"\"\n from sage.graphs.graph import Graph\n self.__Sage_graph = Graph(self.__Edges)\n\n def read_g6(self, s):\n \"\"\" Initialize graph from graph6 string \"\"\"\n\n def graph_bit(pos, off):\n return ((ord(s[off + 1 + pos / 6]) - 63) & (2**(5 - pos % 6))) != 0\n\n if s.startswith('>>graph6<<'):\n s = s[10:]\n # reset all the attributes before changing the structure\n self._reset_()\n\n n = ord(s[0]) - 63\n off = 0\n if n == 63:\n if ord(s[1]) - 63 != 63:\n n = ((ord(s[1]) - 63) << 12) + (\n (ord(s[2]) - 63) << 6) + ord(s[3]) - 63\n\n off = 3\n else:\n n = ((ord(s[2]) - 63) << 30) + ((ord(s[3]) - 63) << 24) + (\n (ord(s[4]) - 63) << 18) + ((ord(s[5]) - 63) << 12) + (\n (ord(s[6]) - 63) << 6) + ord(s[7]) - 63\n\n off = 7\n\n self.__Order = n\n\n self.__A = [[0 for col in range(n)] for row in range(n)]\n\n i = 0\n j = 1\n\n self.__Edges = []\n for x in range(n * (n - 1) / 2):\n if graph_bit(x, off):\n self.__A[i][j] = 1\n self.__A[j][i] = 1\n self.__Edges.append((i, j))\n if j - i == 1:\n i = 0\n j += 1\n else:\n i += 1\n\n self.__g6_string = s\n\n read_graph6 = read_g6\n\n def read_s6(self, s):\n \"\"\" Initialize graph from sparse6 string \"\"\"\n\n def graph_bit(pos, off):\n return ((ord(s[off + 1 + pos / 6]) - 63) & (2**(5 - pos % 6))) != 0\n\n if s.startswith('>>sparse6<<'):\n s = s[11:]\n if not s[0] == ':':\n print('This is not a sparse6 format!')\n return False\n\n # reset all the attributes before changing the structure\n self._reset_()\n\n s = s[1:]\n n = ord(s[0]) - 63\n off = 0\n if n == 63:\n if ord(s[1]) - 63 != 63:\n n = ((ord(s[1]) - 63) << 12) + (\n (ord(s[2]) - 63) << 6) + ord(s[3]) - 63\n\n off = 3\n else:\n n = ((ord(s[2]) - 63) << 30) + ((ord(s[3]) - 63) << 24) + (\n (ord(s[4]) - 63) << 18) + ((ord(s[5]) - 63) << 12) + (\n (ord(s[6]) - 63) << 6) + ord(s[7]) - 63\n\n off = 7\n\n self.__Order = n\n\n k = 1\n while 1 << k < n:\n k += 1\n\n data = s[off + 1:]\n\n #print n,k\n #print data\n\n def parseData():\n \"\"\"Return stream of pairs b[i], x[i] for sparse6 format.\"\"\"\n chunks = iter(data)\n d = None # partial data word\n dLen = 0 # how many unparsed bits are left in d\n\n while 1:\n if dLen < 1:\n d = ord(next(chunks)) - 63\n dLen = 6\n dLen -= 1\n b = (d >> dLen) & 1 # grab top remaining bit\n\n x = d & ((1 << dLen) - 1) # partially built up value of x\n xLen = dLen # how many bits included so far in x\n while xLen < k: # now grab full chunks until we have enough\n d = ord(next(chunks)) - 63\n dLen = 6\n x = (x << 6) + d\n xLen += 6\n x = (x >> (xLen - k)) # shift back the extra bits\n dLen = xLen - k\n yield b, x\n\n self.__A = [[0 for col in range(n)] for row in range(n)]\n\n self.__Edges = []\n\n v = 0\n\n for b, x in parseData():\n if b: v += 1\n if x >= n:\n break # padding with ones can cause overlarge number here\n elif x > v:\n v = x\n else:\n self.__A[x][v] = 1\n self.__A[v][x] = 1\n self.__Edges.append((x, v))\n\n self.__g6_string = ''\n\n read_sparse6 = read_s6\n\n def read_matrix(self, matrix):\n \"\"\"Initialize graph from adjacency matrix including numpy.matrix\"\"\"\n if type(matrix) == np.matrix:\n matrix = matrix.astype(int).tolist()\n self._reset_()\n self.__Order = len(matrix)\n self.__A = matrix\n\n for i in range(self.__Order):\n for j in range(i):\n if matrix[i][j] == 1:\n self.__Edges.append((i, j))\n\n def read_edgelist(self, edges):\n \"\"\"Initialize graph from list of edges.\n Example:\n m = mathchem.Mol()\n m.read_edgelist( [(4,3),(3,1),(1,4))] )\"\"\"\n # first relabel nodes\n nodes = []\n for e in edges:\n if not e[0] in nodes: nodes.append(e[0])\n if not e[1] in nodes: nodes.append(e[1])\n self._reset_()\n self.__Order = len(nodes)\n d = dict(zip(nodes, range(len(nodes))))\n self.__Edges = [(d[e[0]], d[e[1]]) for e in edges]\n\n self.__A = [[0 for col in range(self.__Order)]\n for row in range(self.__Order)]\n for i, j in self.__Edges:\n self.__A[i][j] = 1\n self.__A[j][i] = 1\n\n def write_dot_file(self, filename):\n\n f_out = open(filename, 'w')\n f_out.writelines('graph Mol {\\n')\n for (i, j) in self.edges():\n f_out.writelines(' ' + str(i) + ' -- ' + str(j) + ';\\n')\n f_out.writelines('}')\n f_out.close()\n\n #\n #\n # matrices\n #\n #\n\n def adjacency_matrix(self):\n \"\"\" Return Adjacency matrix\n \n Alias : A\n \"\"\"\n return self.__A\n\n A = adjacency_matrix\n\n def incidence_matrix(self):\n \"\"\" Return Incidence matrix \n \n Alias: B\n \"\"\"\n if self.__B == []:\n\n def func(u, v):\n col = [0] * self.__Order\n col[u] = 1\n col[v] = 1\n return col\n\n # apply func to each edge\n b = map(lambda e: func(e), self.edges())\n # transpose the result\n self.__B = map(list, zip(*b))\n return self.__B\n\n B = incidence_matrix\n\n def laplacian_matrix(self):\n \"\"\" Return Laplacian matrix\n \n L = D-A\n where D - matrix whose diagonal elements are the degrees of the corresponding vertices\n A - adjacency matrix\n \n Alias : L\n \"\"\"\n if self.__L == []:\n self.__L = np.diag(self.degrees()) - np.matrix(self.__A)\n return self.__L\n\n L = laplacian_matrix\n\n def signless_laplacian_matrix(self):\n \"\"\" Return Signless Laplacian matrix\n \n Q = D+A\n Alias : Q\n \"\"\"\n if self.__Q == []:\n\n self.__Q = np.diag(self.degrees()) + np.matrix(self.__A)\n return self.__Q\n\n Q = signless_laplacian_matrix\n\n def normalized_laplacian_matrix(self):\n \"\"\" Return Normalized Laplacian matrix\n \n NL = deg^(-1/2) * L * deg(1/2)\n Alias : NL\n \"\"\"\n ## TODO: check if we have zeros in degrees()\n if self.__NL == []:\n d1 = np.diag(np.power(self.degrees(), -.5))\n d2 = np.diag(np.power(self.degrees(), .5))\n self.__NL = d1 * self.laplacian_matrix() * d2\n return self.__NL\n\n NL = normalized_laplacian_matrix\n\n def distance_matrix(self):\n \"\"\" Return Distance matrix\n \n Alias : D\n \"\"\"\n if self.__Order == 0: return []\n\n if self.__D == []:\n # use here float only for using np.inf - infinity\n A = np.matrix(self.__A, dtype=float)\n n, m = A.shape\n I = np.identity(n)\n A[A == 0] = np.inf # set zero entries to inf\n A[I == 1] = 0 # except diagonal which should be zero\n for i in range(n):\n r = A[i, :]\n A = np.minimum(A, r + r.T)\n self.__D = np.matrix(A, dtype=int)\n\n return self.__D\n\n D = distance_matrix\n\n def reciprocal_distance_matrix(self):\n \"\"\" Return Reciprocal Distance matrix \"\"\"\n\n rd = np.matrix(self.distance_matrix(), dtype=float)\n # probably there exists more python way to apply a function to each element of matrix\n for i in range(self.__Order):\n for j in range(self.__Order):\n if not rd[i, j] == 0: rd[i, j] = 1 / rd[i, j]\n\n return rd\n\n def resistance_distance_matrix(self):\n \"\"\" Return Resistance Distance matrix \"\"\"\n\n if not self.is_connected() or self.__Order == 0:\n return False\n\n if self.__RD == []:\n #from numpy import linalg as la\n n = self.__Order\n s = n * self.laplacian_matrix() + 1\n sn = n * np.linalg.inv(s)\n RD = np.ndarray((n, n))\n for i in range(n):\n for j in range(n):\n RD[i, j] = np.float64(\n np.longdouble(sn[i, i]) + np.longdouble(sn[j, j]) -\n 2 * np.longdouble(sn[i, j]))\n self.__RD = RD\n\n return self.__RD\n\n def seidel_matrix(self):\n \"\"\" Return Seidel matrix \n S = J - I - 2A\n\n Alias: S\n \"\"\"\n n = self.__Order\n return np.ones((n, n)) - np.identity(n) - 2 * np.matrix(self.__A)\n\n S = seidel_matrix\n\n #\n #\n # Graph invariants\n #\n #\n\n def diameter(self):\n \"\"\" Return diameter of the graph\n \n Diameter is the maximum value of distance matrix\n \"\"\"\n if self.__Order == 0: return 0\n return self.distance_matrix().max()\n\n def degrees(self):\n \"\"\" Return degree of the vertex\n \n Alias : deg\n \"\"\"\n if self.__Degrees == []:\n self.__Degrees = map(lambda r: sum(r), self.__A)\n ## calcuate degrees for all vertices\n return self.__Degrees\n\n deg = degrees\n\n def eccentricity(self):\n \"\"\" Eccentricity of the graph for all its vertices\"\"\"\n if self.__Order == 0: return None\n\n return self.distance_matrix().max(axis=0).tolist()[0]\n\n def distances_from_vertex(self, v):\n \"\"\" Return list of all distances from a given vertex to all others\"\"\"\n # used to test graph where it is connected or not\n seen = {}\n level = 0\n nextlevel = [v]\n while nextlevel:\n thislevel = nextlevel\n nextlevel = []\n for v in thislevel:\n if v not in seen:\n seen[v] = level\n nb = [\n i\n for (i, j) in zip(range(len(self.__A[v])), self.__A[v])\n if j != 0\n ]\n nextlevel.extend(nb)\n #if (cutoff is not None and cutoff <= level): break\n level = level + 1\n return seen\n\n def is_connected(self):\n \"\"\" Return True/False depends on the graph is connected or not \"\"\"\n if self.__Order == 0: return False\n\n if not self.__Check_connectedness: return True\n\n if self.__Is_connected is None:\n # we take vertex 0 and check whether we can reach all other vertices\n self.__Is_connected = len(\n self.distances_from_vertex(0)) == self.order()\n return self.__Is_connected\n\n #\n #\n # Graph spectra\n #\n #\n\n def spectrum(self, matrix=\"adjacency\"):\n r\"\"\" Spectrum of the graph\n \n args:\n matrix (str or matrix)\n 'adjacency' or 'A' : default\n 'laplacian' or 'L'\n 'distance' or 'D'\n 'signless_laplacian' or 'Q'\n 'normalized_laplacian' or 'NL'\n 'resistance_distance' or 'RD'\n 'reciprocal_distance'\n\n arbitrary matrix\n \n \"\"\"\n\n from numpy import linalg as la\n\n if type(matrix) is str:\n\n if self.__Order == 0: return []\n\n if matrix == \"adjacency\" or matrix == \"A\":\n if self.__Spectrum == []:\n s = la.eigvalsh(self.__A).tolist()\n s.sort(reverse=True)\n self.__Spectrum = s\n return self.__Spectrum\n\n elif matrix == \"laplacian\" or matrix == \"L\":\n if self.__Laplacian_spectrum == []:\n s = la.eigvalsh(self.laplacian_matrix()).tolist()\n s.sort(reverse=True)\n self.__Laplacian_spectrum = map(\n lambda x: x if x > 0 else 0, s)\n return self.__Laplacian_spectrum\n\n elif matrix == \"distance\" or matrix == \"D\":\n if self.__Distance_spectrum == []:\n s = la.eigvalsh(self.distance_matrix()).tolist()\n s.sort(reverse=True)\n self.__Distance_spectrum = s\n return self.__Distance_spectrum\n\n elif matrix == \"signless_laplacian\" or matrix == \"Q\":\n if self.__Signless_laplacian_spectrum == []:\n ## TODO: check if we have zeros in degrees()\n s = la.eigvalsh(self.signless_laplacian_matrix()).tolist()\n s.sort(reverse=True)\n self.__Signless_laplacian_spectrum = map(\n lambda x: x if x > 0 else 0, s)\n return self.__Signless_laplacian_spectrum\n\n elif matrix == \"normalized_laplacian\" or matrix == \"NL\":\n if self.__Norm_laplacian_spectrum == []:\n ## TODO: check if we have zeros in degrees()\n s = la.eigvalsh(\n self.normalized_laplacian_matrix()).tolist()\n s.sort(reverse=True)\n self.__Norm_laplacian_spectrum = s\n return self.__Norm_laplacian_spectrum\n\n elif matrix == \"resistance_distance\" or matrix == \"RD\":\n if self.__RD_spectrum == []:\n s = la.eigvalsh(self.resistance_distance_matrix()).tolist()\n s.sort(reverse=True)\n self.__RD_spectrum = s\n return self.__RD_spectrum\n # NO CACHE\n elif matrix == \"reciprocal_distance\":\n s = la.eigvalsh(self.reciprocal_distance_matrix()).tolist()\n s.sort(reverse=True)\n return s\n else:\n return False\n\n # if the parameter is an arbitrary matrix\n # DEPRECATED:\n # use mathchem.spectrum(matrix) for arbitrary matrices\n #\n else:\n s = la.eigvalsh(matrix).tolist()\n s.sort(reverse=True)\n return s\n\n # for arbitrary matrices use:\n # mathchem.spectral_moment(matrix)\n def spectral_moment(self, k, matrix=\"adjacency\"):\n \"\"\" Return k-th spectral moment\n \n parameters: matrix - see spectrum help\n \"\"\"\n return np.sum(np.power(self.spectrum(matrix), k))\n\n # for arbitrary matrices use:\n # mathchem.spectral_radius(matrix)\n def spectral_radius(self, matrix=\"adjacency\"):\n s = self.spectrum(matrix)\n return max(abs(s[0]), abs(s[len(s) - 1]))\n\n # for arbitrary matrices use:\n # mathchem.energy(matrix)\n def energy(self, matrix=\"adjacency\"):\n \"\"\" Return energy of the graph \n \n parameters: matrix - see spectrum help\n \"\"\"\n if self.__Order == 0: return False\n s = self.spectrum(matrix)\n a = np.sum(s, dtype=np.longdouble) / len(s)\n return np.float64(\n np.sum(map(lambda x: abs(x - a), s), dtype=np.longdouble))\n\n def incidence_energy(self):\n \"\"\" Return incidence energy (IE)\n \n Incidence energy is the sum of singular values of incidence matrix\n \"\"\"\n if self.__Order == 0: return False\n from numpy.linalg import svd\n return np.float64(\n np.sum(svd(self.incidence_matrix(), compute_uv=False),\n dtype=np.longdouble))\n\n #\n #\n # Chemical indices\n #\n #\n\n def zagreb_m1_index(self):\n \"\"\" Zagreb M1 Index \"\"\"\n return sum(map(lambda d: d**2, self.degrees()))\n\n def zagreb_m2_index(self):\n \"\"\" Zagreb M2 Index \n \n The molecular graph must contain at least one edge, otherwise the function Return False\n Zagreb M2 Index is a special case of Connectivity Index with power = 1\"\"\"\n return sum(\n map(lambda e1, e2: self.degrees()[e1] * self.degrees()[e2],\n self.edges()))\n\n def zagreb_m1_coindex(self):\n \"\"\" Zagreb M1 Coindex \"\"\"\n return 2 * self.size() * (self.__Order - 1) - self.zagreb_m1_index()\n\n def zagreb_m2_coindex(self):\n \"\"\" Zagreb M2 Coindex \"\"\"\n return 2 * (self.size()**\n 2) - self.zagreb_m2_index() - self.zagreb_m1_index() * .5\n\n def connectivity_index(self, power):\n \"\"\" Connectivity index (R)\"\"\"\n E = self.edges() # E - all edges\n if len(E) == 0: return 0\n return np.float64(\n np.sum(map(\n lambda e1, e2:\n (self.degrees()[e1] * self.degrees()[e2])**power, E),\n dtype=np.longdouble))\n\n def augmented_zagreb_index(self):\n \"\"\" Augmented Zagreb Index\"\"\"\n E = self.edges() # E - all edges\n d = self.degrees()\n if len(E) < 2: return 0\n return np.float64(\n np.sum(map(\n lambda e1, e2: (np.longdouble(d[e1] * d[e2]) /\n (d[e1] + d[e2] - 2))**3, E),\n dtype=np.longdouble))\n\n def sum_connectivity_index(self):\n \"\"\" Sum-Connectivity index\"\"\"\n E = self.edges() # E - all edges\n if len(E) == 0: return 0\n return np.float64(\n np.sum(map(\n lambda e1, e2:\n (self.degrees()[e1] + self.degrees()[e2])**(-0.5), E),\n dtype=np.longdouble))\n\n def geometric_arithmetic_index(self):\n \"\"\" Geometric-Arithmetic index\"\"\"\n E = self.edges() # E - all edges\n if len(E) == 0: return 0\n return np.float64(\n np.sum(map(\n lambda e1, e2: 2.0 * np.sqrt(self.degrees()[e1] * self.degrees(\n )[e2]) / (self.degrees()[e1] + self.degrees()[e2]), E),\n dtype=np.longdouble))\n\n def eccentric_connectivity_index(self):\n \"\"\" Eccentric Connectivity Index \n \n The molecuar graph must be connected, otherwise the function Return False\"\"\"\n if not self.is_connected():\n return False\n return sum(map(lambda a, b: a * b, self.degrees(),\n self.eccentricity()))\n\n def randic_index(self):\n \"\"\" Randic Index \n \n The molecular graph must contain at least one edge, otherwise the function Return False\n Randic Index is a special case of Connectivity Index with power = -1/2\"\"\"\n return self.connectivity_index(-0.5)\n\n def atom_bond_connectivity_index(self):\n \"\"\" Atom-Bond Connectivity Index (ABC) \"\"\"\n s = np.longdouble(0) # summator\n for u, v in self.edges():\n d1 = np.float64(self.degrees()[u])\n d2 = np.float64(self.degrees()[v])\n s += np.longdouble(((d1 + d2 - 2) / (d1 * d2))**.5)\n return np.float64(s)\n\n def estrada_index(self, matrix=\"adjacency\"):\n \"\"\" Estrada Index (EE) \n \n args:\n matrix -- see spectrum for help, default value is 'adjacency'\n \n There is an alias 'distance_estrada_index' for distance matrix\n \"\"\"\n return np.float64(\n np.sum(map(lambda x: np.exp(x.real), self.spectrum(matrix)),\n dtype=np.longdouble))\n\n def distance_estrada_index(self):\n \"\"\" Distance Estrada Index (DEE) \n \n Special case of Estrada index with distance matrix\n \"\"\"\n return self.estrada_index('distance')\n\n def degree_distance(self):\n \"\"\" Degree Distance (DD)\n \n The molecuar graph must be connected, otherwise the function Return False\"\"\"\n if not self.is_connected():\n return False\n dd = np.matrix(self.degrees()) * self.distance_matrix().sum(axis=1)\n return dd[0, 0]\n\n def reverse_degree_distance(self):\n \"\"\" Reverse Distance Degree (rDD)\n \n The molecuar graph must be connected, otherwise the function Return False\"\"\"\n if not self.is_connected():\n return False\n return 2 * (self.order() - 1) * len(\n self.edges()) * self.diameter() - self.degree_distance()\n\n def molecular_topological_index(self):\n \"\"\" (Schultz) Molecular Topological Index (MTI)\n \n The molecuar graph must be connected, otherwise the function Return False\"\"\"\n if not self.is_connected():\n return False\n # (A+D)*d\n\n A = np.matrix(self.__A)\n d = np.matrix(self.degrees())\n return np.float64(\n ((A + self.distance_matrix()) * d.T).sum(dtype=np.longdouble))\n\n def eccentric_distance_sum(self):\n \"\"\" Distance Sum\n \n The molecuar graph must be connected, otherwise the function Return False\"\"\"\n if not self.is_connected():\n return False\n return (self.eccentricity() * self.distance_matrix().sum(axis=1))[0, 0]\n\n # strange - it is slow ((\n def balaban_j_index(self):\n \"\"\" Balaban J index \n \n The molecuar graph must be connected, otherwise the function Return False\"\"\"\n if not self.is_connected():\n return False\n ds = self.distance_matrix().sum(axis=1)\n m = len(self.edges())\n k = (m / (m - self.__Order + 2.0))\n return np.float64(\n k *\n np.sum(map(lambda u, v: 1 / np.sqrt(\n (ds[u][0, 0] * ds[v][0, 0])), self.edges()),\n dtype=np.longdouble))\n\n def sum_balaban_index(self):\n \"\"\" Sum Balaban index \n \n The molecuar graph must be connected, otherwise the function Return False\"\"\"\n if not self.is_connected():\n return False\n ds = self.distance_matrix().sum(axis=1)\n m = len(self.edges())\n k = (m / (m - self.__Order + 2.0))\n return np.float64(\n k *\n np.sum(map(lambda u, v: 1 / np.sqrt(\n (ds[u][0, 0] + ds[v][0, 0])), self.edges()),\n dtype=np.longdouble))\n\n def kirchhoff_index(self):\n \"\"\" Kirchhoff Index (Kf)\n \n Kf = 1/2 * sum_i sum_j RD[i,j]\n Based on resistance distance matrix RD\n \n Alias: resistance\n \n The molecuar graph must be connected, otherwise the function Return False\n \"\"\"\n if not self.is_connected():\n return False\n return np.float64(\n self.resistance_distance_matrix().sum(dtype=np.longdouble) / 2)\n\n resistance = kirchhoff_index\n\n def wiener_index(self):\n \"\"\" Wiener Index (W)\n \n W = 1/2 * sum_i sum_j D[i,j]\n where D is distance matrix\n The molecuar graph must be connected, otherwise the function Return False\n \"\"\"\n if not self.is_connected():\n return False\n return self.distance_matrix().sum(dtype=np.float64) / 2\n\n def terminal_wiener_index(self):\n \"\"\" Calculate Terminal Wiener Index (TW)\n \n TW = Sum of all distances between pendent vertices (with degree = 1)\n \"\"\"\n if not self.is_connected(): return False\n s = 0\n for u in range(self.order()):\n if self.degrees()[u] != 1: continue\n for v in range(u + 1, self.order()):\n if self.degrees()[v] == 1:\n s = s + self.distance_matrix()[u, v]\n return s\n\n def reverse_wiener_index(self):\n \"\"\" Reverse Wiener Index (RW)\n \n RW = 1/2 * sum_i!=j ( d - D[i,j] )\n where D is distance matrix and d is diameter\n \n The molecuar graph must be connected, otherwise the function Return False\n \"\"\"\n if not self.is_connected():\n return False\n # here we use formula: RW = 1/2 * n * (n-1) * d - W\n return self.diameter() * (\n self.__Order * (self.__Order - 1)) / 2 - self.wiener_index()\n\n def hyper_wiener_index(self):\n \"\"\" Hyper-Wiener Index (WW)\n \n WW = 1/2 * ( sum_ij d(i,j)^2 + sum_i_j d(i,j) )\n where D is distance matrix\n\n The molecuar graph must be connected, otherwise the function Return False\n \"\"\"\n if not self.is_connected():\n return False\n return (\n np.power(self.distance_matrix(), 2).sum() +\n self.distance_matrix().sum()) / 4 # since we have symmetric matrix\n\n def harary_index(self):\n \"\"\" Harary Index (H)\n \n H = 1/2 sum_i sum_j Rd[i,j]\n where Rd is reciprocal distance matrix \n Rd[i,j] = 1 / D[i,j] for D[i,j] != 0\n Rd[i,j] = 0 otherwise\n\n The molecuar graph must be connected, otherwise the function Return False\n \"\"\"\n if not self.is_connected():\n return False\n return np.float64(\n self.reciprocal_distance_matrix().sum(dtype=np.longdouble)) / 2\n\n def LEL(self):\n \"\"\" Return Laplacian-like energy (LEL) \"\"\"\n return np.float64(\n np.sum(map(lambda x: np.sqrt(x), self.spectrum('laplacian')),\n dtype=np.longdouble))\n\n def multiplicative_sum_zagreb_index(self):\n \"\"\" Log( Multiplicative Sum Zagreb index )\"\"\"\n d = self.degrees()\n return np.float64(\n np.sum(map(lambda u, v: np.log(np.float64(d[u] + d[v])),\n self.edges()),\n dtype=np.longdouble))\n\n def multiplicative_p2_zagreb_index(self):\n \"\"\"Calculates Log( Multiplicative P2 Zagreb index )\"\"\"\n d = self.degrees()\n return np.float64(\n np.sum(map(lambda u, v: np.log(np.float64(d[u] * d[v])),\n self.edges()),\n dtype=np.longdouble))\n\n def multiplicative_p1_zagreb_index(self):\n \"\"\"Calculates Log( Multiplicative P1 Zagreb index )\"\"\"\n d = self.degrees()\n return np.float64(\n np.sum(map(lambda v: np.log(np.float64(d[v]**2)), self.vertices()),\n dtype=np.longdouble))\n\n def szeged_index(self):\n \"\"\"Calculates Szeged index\"\"\"\n if not self.is_connected():\n return False\n s = 0\n D = self.distance_matrix()\n for u, v in self.edges():\n diff = D[u, :] - D[v, :]\n s += (diff > 0).sum() * (diff < 0).sum()\n return float(s)\n\n def revised_szeged_index(self):\n \"\"\"Calculates Revised Szeged index\"\"\"\n if not self.is_connected():\n return False\n s = 0.0\n D = self.distance_matrix()\n for u, v in self.edges():\n diff = D[u, :] - D[v, :]\n o = (diff == 0).sum()\n s += ((diff > 0).sum() + .5 * o) * ((diff < 0).sum() + .5 * o)\n return s\n\n def homo_lumo_index(self):\n \"\"\"Calculates HOMO-LUMO index\"\"\"\n if not self.is_connected():\n return False\n n = self.order()\n if n % 2 == 0:\n h = int(n / 2 -\n 1) # because array indices start from 0 instead of 1\n l = int(h + 1)\n return max([abs(self.spectrum()[h]), abs(self.spectrum()[l])])\n # else:\n h = int((n - 1) / 2)\n return abs(self.spectrum()[h])\n\n HL_index = homo_lumo_index\n\n # Adriatic indices\n\n # DEPRECATED\n # use mathchem.all_adriatic()\n\n def all_adriatic(self):\n \"\"\" Generate all possible parameters sets for adriatic indices\"\"\"\n r = []\n for p in [0, 1]:\n for i in [1, 2, 3]:\n for j in range(1, 9):\n if i == 3:\n for a in [0.5, 2]:\n r.append((p, i, j, a))\n elif i == 2 and j in range(1, 6):\n for a in [-1, -0.5, 0.5, 1, 2]:\n r.append((p, i, j, a))\n elif i == 2 or i == 1:\n for a in [0.5, 1, 2]:\n r.append((p, i, j, a))\n return r\n\n def adriatic_name(self, p, i, j, a):\n \"\"\" Return the name for given parameters of Adriatic indices\"\"\"\n #(j)\n name1 = {1:'Randic type ',\\\n 2:'sum ',\\\n 3:'inverse sum ', \\\n 4:'misbalance ', \\\n 5:'inverse misbalance ', \\\n 6:'min-max ', \\\n 7:'max-min ', \\\n 8:'symmetric division '}\n # (i,a)\n name2 = {(1, 0.5):'lor',\\\n (1,1):'lo', \\\n (1,2):'los', \\\n (2,-1):'in', \\\n (2, -0.5):'ir', \\\n (2, 0.5):'ro', \\\n (2,1):'', \\\n (2,2):'s', \\\n (3, 0.5):'ha', \\\n (3,2):'two'}\n #(p)\n name3 = {0: 'deg', 1: 'di'}\n\n return (name1[j] + name2[(i, a)] + name3[p])\n\n def _adriatic_entry_(self, du, dv, i, j, a):\n \"\"\" Return an individual edge contribution for Adriatic indices and matrices\"\"\"\n # phi(x,a)\n phi = {\n 1: lambda x, a: np.log(x)**a,\n 2: lambda x, a: x**a,\n 3: lambda x, a: a**x\n }\n # gamma (x,y)\n gamma = {\\\n 1: lambda x,y: x*y,\\\n 2: lambda x,y: x+y,\\\n 3: lambda x,y: 0 if x+y==0 else 1.0/(x+y),\\\n 4: lambda x,y: abs(x-y),\\\n 5: lambda x,y: 0 if x==y else 1.0/abs(x-y),\\\n 6: lambda x,y: 0 if max(x,y)==0 else min(x,y)/max(x,y),\\\n 7: lambda x,y: 0 if min(x,y)==0 else max(x,y)/min(x,y),\\\n 8: lambda x,y: 0 if x==0 or y==0 else x/y+y/x}\n\n return gamma[j](phi[i](du, a), phi[i](dv, a))\n\n def adriatic_matrix(self, p, i, j, a):\n \"\"\" Return the Adriatic matrix with given parameters\"\"\"\n\n if p == 0: d = self.degrees()\n else: d = self.distance_matrix().sum(axis=0).tolist()[0]\n\n AM = [[0] * self.order() for k in range(self.order())]\n\n for u, v in self.edges():\n AM[u][v] = AM[v][u] = self._adriatic_entry_(\n np.float64(d[u]), np.float64(d[v]), i, j, a)\n\n return AM\n\n def adriatic_index(self, p, i, j, a):\n \"\"\" Return the Adriatic index with given parameters\"\"\"\n\n if p == 0: d = self.degrees()\n else: d = self.distance_matrix().sum(axis=0).tolist()[0]\n\n func = lambda u, v: self._adriatic_entry_(np.float64(d[u]),\n np.float64(d[v]), i, j, a)\n return np.float64(np.sum(map(func, self.edges()), dtype=np.longdouble))\n\n # Adriatic indices by names\n\n def randic_type_lordeg_index(self):\n \"\"\" Adriatic index: Randic type lordeg index\"\"\"\n return self.adriatic_index(0, 1, 1, 0.5)\n\n def randic_type_lodeg_index(self):\n \"\"\" Adriatic index: Randic type lodeg index\"\"\"\n return self.adriatic_index(0, 1, 1, 1)\n\n def randic_type_losdeg_index(self):\n \"\"\" Adriatic index: Randic type losdeg index\"\"\"\n return self.adriatic_index(0, 1, 1, 2)\n\n def sum_lordeg_index(self):\n \"\"\" Adriatic index: sum lordeg index\"\"\"\n return self.adriatic_index(0, 1, 2, 0.5)\n\n def sum_lodeg_index(self):\n \"\"\" Adriatic index: sum lodeg index\"\"\"\n return self.adriatic_index(0, 1, 2, 1)\n\n def sum_losdeg_index(self):\n \"\"\" Adriatic index: sum losdeg index\"\"\"\n return self.adriatic_index(0, 1, 2, 2)\n\n def inverse_sum_lordeg_index(self):\n \"\"\" Adriatic index: inverse sum lordeg index\"\"\"\n return self.adriatic_index(0, 1, 3, 0.5)\n\n def inverse_sum_lodeg_index(self):\n \"\"\" Adriatic index: inverse sum lodeg index\"\"\"\n return self.adriatic_index(0, 1, 3, 1)\n\n def inverse_sum_losdeg_index(self):\n \"\"\" Adriatic index: inverse sum losdeg index\"\"\"\n return self.adriatic_index(0, 1, 3, 2)\n\n def misbalance_lordeg_index(self):\n \"\"\" Adriatic index: misbalance lordeg index\"\"\"\n return self.adriatic_index(0, 1, 4, 0.5)\n\n def misbalance_lodeg_index(self):\n \"\"\" Adriatic index: misbalance lodeg index\"\"\"\n return self.adriatic_index(0, 1, 4, 1)\n\n def misbalance_losdeg_index(self):\n \"\"\" Adriatic index: misbalance losdeg index\"\"\"\n return self.adriatic_index(0, 1, 4, 2)\n\n def inverse_misbalance_lordeg_index(self):\n \"\"\" Adriatic index: inverse misbalance lordeg index\"\"\"\n return self.adriatic_index(0, 1, 5, 0.5)\n\n def inverse_misbalance_lodeg_index(self):\n \"\"\" Adriatic index: inverse misbalance lodeg index\"\"\"\n return self.adriatic_index(0, 1, 5, 1)\n\n def inverse_misbalance_losdeg_index(self):\n \"\"\" Adriatic index: inverse misbalance losdeg index\"\"\"\n return self.adriatic_index(0, 1, 5, 2)\n\n def min_max_lordeg_index(self):\n \"\"\" Adriatic index: min-max lordeg index\"\"\"\n return self.adriatic_index(0, 1, 6, 0.5)\n\n def min_max_lodeg_index(self):\n \"\"\" Adriatic index: min-max lodeg index\"\"\"\n return self.adriatic_index(0, 1, 6, 1)\n\n def min_max_losdeg_index(self):\n \"\"\" Adriatic index: min-max losdeg index\"\"\"\n return self.adriatic_index(0, 1, 6, 2)\n\n def max_min_lordeg_index(self):\n \"\"\" Adriatic index: max-min lordeg index\"\"\"\n return self.adriatic_index(0, 1, 7, 0.5)\n\n def max_min_lodeg_index(self):\n \"\"\" Adriatic index: max-min lodeg index\"\"\"\n return self.adriatic_index(0, 1, 7, 1)\n\n def max_min_losdeg_index(self):\n \"\"\" Adriatic index: max-min losdeg index\"\"\"\n return self.adriatic_index(0, 1, 7, 2)\n\n def symmetric_division_lordeg_index(self):\n \"\"\" Adriatic index: symmetric division lordeg index\"\"\"\n return self.adriatic_index(0, 1, 8, 0.5)\n\n def symmetric_division_lodeg_index(self):\n \"\"\" Adriatic index: symmetric division lodeg index\"\"\"\n return self.adriatic_index(0, 1, 8, 1)\n\n def symmetric_division_losdeg_index(self):\n \"\"\" Adriatic index: symmetric division losdeg index\"\"\"\n return self.adriatic_index(0, 1, 8, 2)\n\n def randic_type_indeg_index(self):\n \"\"\" Adriatic index: Randic type indeg index\"\"\"\n return self.adriatic_index(0, 2, 1, -1)\n\n def randic_type_irdeg_index(self):\n \"\"\" Adriatic index: Randic type irdeg index\"\"\"\n return self.adriatic_index(0, 2, 1, -0.5)\n\n def randic_type_rodeg_index(self):\n \"\"\" Adriatic index: Randic type rodeg index\"\"\"\n return self.adriatic_index(0, 2, 1, 0.5)\n\n def randic_type_deg_index(self):\n \"\"\" Adriatic index: Randic type deg index\"\"\"\n return self.adriatic_index(0, 2, 1, 1)\n\n def randic_type_sdeg_index(self):\n \"\"\" Adriatic index: Randic type sdeg index\"\"\"\n return self.adriatic_index(0, 2, 1, 2)\n\n def sum_indeg_index(self):\n \"\"\" Adriatic index: sum indeg index\"\"\"\n return self.adriatic_index(0, 2, 2, -1)\n\n def sum_irdeg_index(self):\n \"\"\" Adriatic index: sum irdeg index\"\"\"\n return self.adriatic_index(0, 2, 2, -0.5)\n\n def sum_rodeg_index(self):\n \"\"\" Adriatic index: sum rodeg index\"\"\"\n return self.adriatic_index(0, 2, 2, 0.5)\n\n def sum_deg_index(self):\n \"\"\" Adriatic index: sum deg index\"\"\"\n return self.adriatic_index(0, 2, 2, 1)\n\n def sum_sdeg_index(self):\n \"\"\" Adriatic index: sum sdeg index\"\"\"\n return self.adriatic_index(0, 2, 2, 2)\n\n def inverse_sum_indeg_index(self):\n \"\"\" Adriatic index: inverse sum indeg index\"\"\"\n return self.adriatic_index(0, 2, 3, -1)\n\n def inverse_sum_irdeg_index(self):\n \"\"\" Adriatic index: inverse sum irdeg index\"\"\"\n return self.adriatic_index(0, 2, 3, -0.5)\n\n def inverse_sum_rodeg_index(self):\n \"\"\" Adriatic index: inverse sum rodeg index\"\"\"\n return self.adriatic_index(0, 2, 3, 0.5)\n\n def inverse_sum_deg_index(self):\n \"\"\" Adriatic index: inverse sum deg index\"\"\"\n return self.adriatic_index(0, 2, 3, 1)\n\n def inverse_sum_sdeg_index(self):\n \"\"\" Adriatic index: inverse sum sdeg index\"\"\"\n return self.adriatic_index(0, 2, 3, 2)\n\n def misbalance_indeg_index(self):\n \"\"\" Adriatic index: misbalance indeg index\"\"\"\n return self.adriatic_index(0, 2, 4, -1)\n\n def misbalance_irdeg_index(self):\n \"\"\" Adriatic index: misbalance irdeg index\"\"\"\n return self.adriatic_index(0, 2, 4, -0.5)\n\n def misbalance_rodeg_index(self):\n \"\"\" Adriatic index: misbalance rodeg index\"\"\"\n return self.adriatic_index(0, 2, 4, 0.5)\n\n def misbalance_deg_index(self):\n \"\"\" Adriatic index: misbalance deg index\"\"\"\n return self.adriatic_index(0, 2, 4, 1)\n\n def misbalance_sdeg_index(self):\n \"\"\" Adriatic index: misbalance sdeg index\"\"\"\n return self.adriatic_index(0, 2, 4, 2)\n\n def inverse_misbalance_indeg_index(self):\n \"\"\" Adriatic index: inverse misbalance indeg index\"\"\"\n return self.adriatic_index(0, 2, 5, -1)\n\n def inverse_misbalance_irdeg_index(self):\n \"\"\" Adriatic index: inverse misbalance irdeg index\"\"\"\n return self.adriatic_index(0, 2, 5, -0.5)\n\n def inverse_misbalance_rodeg_index(self):\n \"\"\" Adriatic index: inverse misbalance rodeg index\"\"\"\n return self.adriatic_index(0, 2, 5, 0.5)\n\n def inverse_misbalance_deg_index(self):\n \"\"\" Adriatic index: inverse misbalance deg index\"\"\"\n return self.adriatic_index(0, 2, 5, 1)\n\n def inverse_misbalance_sdeg_index(self):\n \"\"\" Adriatic index: inverse misbalance sdeg index\"\"\"\n return self.adriatic_index(0, 2, 5, 2)\n\n def min_max_rodeg_index(self):\n \"\"\" Adriatic index: min-max rodeg index\"\"\"\n return self.adriatic_index(0, 2, 6, 0.5)\n\n def min_max_deg_index(self):\n \"\"\" Adriatic index: min-max deg index\"\"\"\n return self.adriatic_index(0, 2, 6, 1)\n\n def min_max_sdeg_index(self):\n \"\"\" Adriatic index: min-max sdeg index\"\"\"\n return self.adriatic_index(0, 2, 6, 2)\n\n def max_min_rodeg_index(self):\n \"\"\" Adriatic index: max-min rodeg index\"\"\"\n return self.adriatic_index(0, 2, 7, 0.5)\n\n def max_min_deg_index(self):\n \"\"\" Adriatic index: max-min deg index\"\"\"\n return self.adriatic_index(0, 2, 7, 1)\n\n def max_min_sdeg_index(self):\n \"\"\" Adriatic index: max-min sdeg index\"\"\"\n return self.adriatic_index(0, 2, 7, 2)\n\n def symmetric_division_rodeg_index(self):\n \"\"\" Adriatic index: symmetric division rodeg index\"\"\"\n return self.adriatic_index(0, 2, 8, 0.5)\n\n def symmetric_division_deg_index(self):\n \"\"\" Adriatic index: symmetric division deg index\"\"\"\n return self.adriatic_index(0, 2, 8, 1)\n\n def symmetric_division_sdeg_index(self):\n \"\"\" Adriatic index: symmetric division sdeg index\"\"\"\n return self.adriatic_index(0, 2, 8, 2)\n\n def randic_type_hadeg_index(self):\n \"\"\" Adriatic index: Randic type hadeg index\"\"\"\n return self.adriatic_index(0, 3, 1, 0.5)\n\n def randic_type_twodeg_index(self):\n \"\"\" Adriatic index: Randic type twodeg index\"\"\"\n return self.adriatic_index(0, 3, 1, 2)\n\n def sum_hadeg_index(self):\n \"\"\" Adriatic index: sum hadeg index\"\"\"\n return self.adriatic_index(0, 3, 2, 0.5)\n\n def sum_twodeg_index(self):\n \"\"\" Adriatic index: sum twodeg index\"\"\"\n return self.adriatic_index(0, 3, 2, 2)\n\n def inverse_sum_hadeg_index(self):\n \"\"\" Adriatic index: inverse sum hadeg index\"\"\"\n return self.adriatic_index(0, 3, 3, 0.5)\n\n def inverse_sum_twodeg_index(self):\n \"\"\" Adriatic index: inverse sum twodeg index\"\"\"\n return self.adriatic_index(0, 3, 3, 2)\n\n def misbalance_hadeg_index(self):\n \"\"\" Adriatic index: misbalance hadeg index\"\"\"\n return self.adriatic_index(0, 3, 4, 0.5)\n\n def misbalance_twodeg_index(self):\n \"\"\" Adriatic index: misbalance twodeg index\"\"\"\n return self.adriatic_index(0, 3, 4, 2)\n\n def inverse_misbalance_hadeg_index(self):\n \"\"\" Adriatic index: inverse misbalance hadeg index\"\"\"\n return self.adriatic_index(0, 3, 5, 0.5)\n\n def inverse_misbalance_twodeg_index(self):\n \"\"\" Adriatic index: inverse misbalance twodeg index\"\"\"\n return self.adriatic_index(0, 3, 5, 2)\n\n def min_max_hadeg_index(self):\n \"\"\" Adriatic index: min-max hadeg index\"\"\"\n return self.adriatic_index(0, 3, 6, 0.5)\n\n def min_max_twodeg_index(self):\n \"\"\" Adriatic index: min-max twodeg index\"\"\"\n return self.adriatic_index(0, 3, 6, 2)\n\n def max_min_hadeg_index(self):\n \"\"\" Adriatic index: max-min hadeg index\"\"\"\n return self.adriatic_index(0, 3, 7, 0.5)\n\n def max_min_twodeg_index(self):\n \"\"\" Adriatic index: max-min twodeg index\"\"\"\n return self.adriatic_index(0, 3, 7, 2)\n\n def symmetric_division_hadeg_index(self):\n \"\"\" Adriatic index: symmetric division hadeg index\"\"\"\n return self.adriatic_index(0, 3, 8, 0.5)\n\n def symmetric_division_twodeg_index(self):\n \"\"\" Adriatic index: symmetric division twodeg index\"\"\"\n return self.adriatic_index(0, 3, 8, 2)\n\n def randic_type_lordi_index(self):\n \"\"\" Adriatic index: Randic type lordi index\"\"\"\n return self.adriatic_index(1, 1, 1, 0.5)\n\n def randic_type_lodi_index(self):\n \"\"\" Adriatic index: Randic type lodi index\"\"\"\n return self.adriatic_index(1, 1, 1, 1)\n\n def randic_type_losdi_index(self):\n \"\"\" Adriatic index: Randic type losdi index\"\"\"\n return self.adriatic_index(1, 1, 1, 2)\n\n def sum_lordi_index(self):\n \"\"\" Adriatic index: sum lordi index\"\"\"\n return self.adriatic_index(1, 1, 2, 0.5)\n\n def sum_lodi_index(self):\n \"\"\" Adriatic index: sum lodi index\"\"\"\n return self.adriatic_index(1, 1, 2, 1)\n\n def sum_losdi_index(self):\n \"\"\" Adriatic index: sum losdi index\"\"\"\n return self.adriatic_index(1, 1, 2, 2)\n\n def inverse_sum_lordi_index(self):\n \"\"\" Adriatic index: inverse sum lordi index\"\"\"\n return self.adriatic_index(1, 1, 3, 0.5)\n\n def inverse_sum_lodi_index(self):\n \"\"\" Adriatic index: inverse sum lodi index\"\"\"\n return self.adriatic_index(1, 1, 3, 1)\n\n def inverse_sum_losdi_index(self):\n \"\"\" Adriatic index: inverse sum losdi index\"\"\"\n return self.adriatic_index(1, 1, 3, 2)\n\n def misbalance_lordi_index(self):\n \"\"\" Adriatic index: misbalance lordi index\"\"\"\n return self.adriatic_index(1, 1, 4, 0.5)\n\n def misbalance_lodi_index(self):\n \"\"\" Adriatic index: misbalance lodi index\"\"\"\n return self.adriatic_index(1, 1, 4, 1)\n\n def misbalance_losdi_index(self):\n \"\"\" Adriatic index: misbalance losdi index\"\"\"\n return self.adriatic_index(1, 1, 4, 2)\n\n def inverse_misbalance_lordi_index(self):\n \"\"\" Adriatic index: inverse misbalance lordi index\"\"\"\n return self.adriatic_index(1, 1, 5, 0.5)\n\n def inverse_misbalance_lodi_index(self):\n \"\"\" Adriatic index: inverse misbalance lodi index\"\"\"\n return self.adriatic_index(1, 1, 5, 1)\n\n def inverse_misbalance_losdi_index(self):\n \"\"\" Adriatic index: inverse misbalance losdi index\"\"\"\n return self.adriatic_index(1, 1, 5, 2)\n\n def min_max_lordi_index(self):\n \"\"\" Adriatic index: min-max lordi index\"\"\"\n return self.adriatic_index(1, 1, 6, 0.5)\n\n def min_max_lodi_index(self):\n \"\"\" Adriatic index: min-max lodi index\"\"\"\n return self.adriatic_index(1, 1, 6, 1)\n\n def min_max_losdi_index(self):\n \"\"\" Adriatic index: min-max losdi index\"\"\"\n return self.adriatic_index(1, 1, 6, 2)\n\n def max_min_lordi_index(self):\n \"\"\" Adriatic index: max-min lordi index\"\"\"\n return self.adriatic_index(1, 1, 7, 0.5)\n\n def max_min_lodi_index(self):\n \"\"\" Adriatic index: max-min lodi index\"\"\"\n return self.adriatic_index(1, 1, 7, 1)\n\n def max_min_losdi_index(self):\n \"\"\" Adriatic index: max-min losdi index\"\"\"\n return self.adriatic_index(1, 1, 7, 2)\n\n def symmetric_division_lordi_index(self):\n \"\"\" Adriatic index: symmetric division lordi index\"\"\"\n return self.adriatic_index(1, 1, 8, 0.5)\n\n def symmetric_division_lodi_index(self):\n \"\"\" Adriatic index: symmetric division lodi index\"\"\"\n return self.adriatic_index(1, 1, 8, 1)\n\n def symmetric_division_losdi_index(self):\n \"\"\" Adriatic index: symmetric division losdi index\"\"\"\n return self.adriatic_index(1, 1, 8, 2)\n\n def randic_type_indi_index(self):\n \"\"\" Adriatic index: Randic type indi index\"\"\"\n return self.adriatic_index(1, 2, 1, -1)\n\n def randic_type_irdi_index(self):\n \"\"\" Adriatic index: Randic type irdi index\"\"\"\n return self.adriatic_index(1, 2, 1, -0.5)\n\n def randic_type_rodi_index(self):\n \"\"\" Adriatic index: Randic type rodi index\"\"\"\n return self.adriatic_index(1, 2, 1, 0.5)\n\n def randic_type_di_index(self):\n \"\"\" Adriatic index: Randic type di index\"\"\"\n return self.adriatic_index(1, 2, 1, 1)\n\n def randic_type_sdi_index(self):\n \"\"\" Adriatic index: Randic type sdi index\"\"\"\n return self.adriatic_index(1, 2, 1, 2)\n\n def sum_indi_index(self):\n \"\"\" Adriatic index: sum indi index\"\"\"\n return self.adriatic_index(1, 2, 2, -1)\n\n def sum_irdi_index(self):\n \"\"\" Adriatic index: sum irdi index\"\"\"\n return self.adriatic_index(1, 2, 2, -0.5)\n\n def sum_rodi_index(self):\n \"\"\" Adriatic index: sum rodi index\"\"\"\n return self.adriatic_index(1, 2, 2, 0.5)\n\n def sum_di_index(self):\n \"\"\" Adriatic index: sum di index\"\"\"\n return self.adriatic_index(1, 2, 2, 1)\n\n def sum_sdi_index(self):\n \"\"\" Adriatic index: sum sdi index\"\"\"\n return self.adriatic_index(1, 2, 2, 2)\n\n def inverse_sum_indi_index(self):\n \"\"\" Adriatic index: inverse sum indi index\"\"\"\n return self.adriatic_index(1, 2, 3, -1)\n\n def inverse_sum_irdi_index(self):\n \"\"\" Adriatic index: inverse sum irdi index\"\"\"\n return self.adriatic_index(1, 2, 3, -0.5)\n\n def inverse_sum_rodi_index(self):\n \"\"\" Adriatic index: inverse sum rodi index\"\"\"\n return self.adriatic_index(1, 2, 3, 0.5)\n\n def inverse_sum_di_index(self):\n \"\"\" Adriatic index: inverse sum di index\"\"\"\n return self.adriatic_index(1, 2, 3, 1)\n\n def inverse_sum_sdi_index(self):\n \"\"\" Adriatic index: inverse sum sdi index\"\"\"\n return self.adriatic_index(1, 2, 3, 2)\n\n def misbalance_indi_index(self):\n \"\"\" Adriatic index: misbalance indi index\"\"\"\n return self.adriatic_index(1, 2, 4, -1)\n\n def misbalance_irdi_index(self):\n \"\"\" Adriatic index: misbalance irdi index\"\"\"\n return self.adriatic_index(1, 2, 4, -0.5)\n\n def misbalance_rodi_index(self):\n \"\"\" Adriatic index: misbalance rodi index\"\"\"\n return self.adriatic_index(1, 2, 4, 0.5)\n\n def misbalance_di_index(self):\n \"\"\" Adriatic index: misbalance di index\"\"\"\n return self.adriatic_index(1, 2, 4, 1)\n\n def misbalance_sdi_index(self):\n \"\"\" Adriatic index: misbalance sdi index\"\"\"\n return self.adriatic_index(1, 2, 4, 2)\n\n def inverse_misbalance_indi_index(self):\n \"\"\" Adriatic index: inverse misbalance indi index\"\"\"\n return self.adriatic_index(1, 2, 5, -1)\n\n def inverse_misbalance_irdi_index(self):\n \"\"\" Adriatic index: inverse misbalance irdi index\"\"\"\n return self.adriatic_index(1, 2, 5, -0.5)\n\n def inverse_misbalance_rodi_index(self):\n \"\"\" Adriatic index: inverse misbalance rodi index\"\"\"\n return self.adriatic_index(1, 2, 5, 0.5)\n\n def inverse_misbalance_di_index(self):\n \"\"\" Adriatic index: inverse misbalance di index\"\"\"\n return self.adriatic_index(1, 2, 5, 1)\n\n def inverse_misbalance_sdi_index(self):\n \"\"\" Adriatic index: inverse misbalance sdi index\"\"\"\n return self.adriatic_index(1, 2, 5, 2)\n\n def min_max_rodi_index(self):\n \"\"\" Adriatic index: min-max rodi index\"\"\"\n return self.adriatic_index(1, 2, 6, 0.5)\n\n def min_max_di_index(self):\n \"\"\" Adriatic index: min-max di index\"\"\"\n return self.adriatic_index(1, 2, 6, 1)\n\n def min_max_sdi_index(self):\n \"\"\" Adriatic index: min-max sdi index\"\"\"\n return self.adriatic_index(1, 2, 6, 2)\n\n def max_min_rodi_index(self):\n \"\"\" Adriatic index: max-min rodi index\"\"\"\n return self.adriatic_index(1, 2, 7, 0.5)\n\n def max_min_di_index(self):\n \"\"\" Adriatic index: max-min di index\"\"\"\n return self.adriatic_index(1, 2, 7, 1)\n\n def max_min_sdi_index(self):\n \"\"\" Adriatic index: max-min sdi index\"\"\"\n return self.adriatic_index(1, 2, 7, 2)\n\n def symmetric_division_rodi_index(self):\n \"\"\" Adriatic index: symmetric division rodi index\"\"\"\n return self.adriatic_index(1, 2, 8, 0.5)\n\n def symmetric_division_di_index(self):\n \"\"\" Adriatic index: symmetric division di index\"\"\"\n return self.adriatic_index(1, 2, 8, 1)\n\n def symmetric_division_sdi_index(self):\n \"\"\" Adriatic index: symmetric division sdi index\"\"\"\n return self.adriatic_index(1, 2, 8, 2)\n\n def randic_type_hadi_index(self):\n \"\"\" Adriatic index: Randic type hadi index\"\"\"\n return self.adriatic_index(1, 3, 1, 0.5)\n\n def randic_type_twodi_index(self):\n \"\"\" Adriatic index: Randic type twodi index\"\"\"\n return self.adriatic_index(1, 3, 1, 2)\n\n def sum_hadi_index(self):\n \"\"\" Adriatic index: sum hadi index\"\"\"\n return self.adriatic_index(1, 3, 2, 0.5)\n\n def sum_twodi_index(self):\n \"\"\" Adriatic index: sum twodi index\"\"\"\n return self.adriatic_index(1, 3, 2, 2)\n\n def inverse_sum_hadi_index(self):\n \"\"\" Adriatic index: inverse sum hadi index\"\"\"\n return self.adriatic_index(1, 3, 3, 0.5)\n\n def inverse_sum_twodi_index(self):\n \"\"\" Adriatic index: inverse sum twodi index\"\"\"\n return self.adriatic_index(1, 3, 3, 2)\n\n def misbalance_hadi_index(self):\n \"\"\" Adriatic index: misbalance hadi index\"\"\"\n return self.adriatic_index(1, 3, 4, 0.5)\n\n def misbalance_twodi_index(self):\n \"\"\" Adriatic index: misbalance twodi index\"\"\"\n return self.adriatic_index(1, 3, 4, 2)\n\n def inverse_misbalance_hadi_index(self):\n \"\"\" Adriatic index: inverse misbalance hadi index\"\"\"\n return self.adriatic_index(1, 3, 5, 0.5)\n\n def inverse_misbalance_twodi_index(self):\n \"\"\" Adriatic index: inverse misbalance twodi index\"\"\"\n return self.adriatic_index(1, 3, 5, 2)\n\n def min_max_hadi_index(self):\n \"\"\" Adriatic index: min-max hadi index\"\"\"\n return self.adriatic_index(1, 3, 6, 0.5)\n\n def min_max_twodi_index(self):\n \"\"\" Adriatic index: min-max twodi index\"\"\"\n return self.adriatic_index(1, 3, 6, 2)\n\n def max_min_hadi_index(self):\n \"\"\" Adriatic index: max-min hadi index\"\"\"\n return self.adriatic_index(1, 3, 7, 0.5)\n\n def max_min_twodi_index(self):\n \"\"\" Adriatic index: max-min twodi index\"\"\"\n return self.adriatic_index(1, 3, 7, 2)\n\n def symmetric_division_hadi_index(self):\n \"\"\" Adriatic index: symmetric division hadi index\"\"\"\n return self.adriatic_index(1, 3, 8, 0.5)\n\n def symmetric_division_twodi_index(self):\n \"\"\" Adriatic index: symmetric division twodi index\"\"\"\n return self.adriatic_index(1, 3, 8, 2)\n"
] | [
[
"numpy.longdouble",
"numpy.sum",
"numpy.ones",
"numpy.linalg.inv",
"numpy.matrix",
"numpy.linalg.eigvalsh",
"numpy.exp",
"numpy.ndarray",
"numpy.log",
"numpy.sqrt",
"numpy.identity",
"numpy.float64",
"numpy.minimum"
]
] |
ElmerCSC/ElmerIceCourses | [
"6ff1011f3a1311d84699a30da9f8fc56cb984a08"
] | [
"TeteRousse/Step1/Makegeo.py"
] | [
"# -*- coding: utf-8 -*-\n# Create a geo (gmsh input file) file from a contour file\n# the contour file contains the (x,y) coordinates of the ordered\n# points defining the contour of the domain\n#\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# Test these options\n # edge size of the elements\nel_size = 18.0 \n\n # Spline or line \nspline = True\n\nContour = np.loadtxt('./../Data/Contour_TR_glacier.dat')\nx = Contour[:,0]\ny = Contour[:,1]\n\nif x[0]==x[-1] and y[0]==y[-1]:\n print('Same first and last points in contour file')\n Npt = len(x)-1\nelse:\n Npt = len(x) \n\n# Open the output file\ngeo = open('teterousse0.geo', 'w')\ngeo.write('// This a a geo file created using the python script Makegeo.py // \\n')\ngeo.write('Mesh.Algorithm=5; \\n')\ngeo.write('// To controle the element size, one can directly modify the lc value in the geo file // \\n')\ngeo.write('lc = {0} ; \\n'.format(el_size))\n\n# write the points coordinates (x,y,0,lc)\nnp=0\nfor j in range(0,Npt):\n np=np+1\n geo.write('Point({0}) = '.format(np)+r'{'+' {0}, {1}, 0.0, lc'.format(x[j],y[j])+r'}'+'; \\n')\n\n# if spline\nif spline: \n geo.write('Spline(1) = {')\n for j in range(0,Npt):\n geo.write('{0},'.format(j+1))\n geo.write('1}; \\n')\n \n geo.write('Line Loop(2) = {1}; \\n')\n geo.write('Plane Surface(3) = {2}; \\n')\n geo.write('Physical Line(4) = {1}; \\n')\n geo.write('Physical Surface(5) = {3}; \\n')\n \n \n# else it is lines, as a spline might not work in all case\nelse:\n nl=0\n for j in range(0,Npt-1):\n nl=nl+1\n geo.write('Line({0}) = '.format(nl)+r'{'+'{0},{1}'.format(j+1,j+2)+r'}'+'; \\n')\n geo.write('Line({0}) = '.format(nl+1)+r'{'+'{0},{1}'.format(j+2,1)+r'}'+'; \\n')\n \n geo.write('Compound Line({0}) = '.format(nl+2)+r'{')\n for j in range(0,Npt-1):\n geo.write('{0}, '.format(j+1))\n geo.write('{0}'.format(j+2)+'}; \\n')\n \n geo.write('Line Loop({0}) = '.format(nl+3)+r'{'+'{0}'.format(nl+2)+r'};'+' \\n')\n geo.write('Plane Surface({0}) = '.format(nl+4)+r'{'+'{0}'.format(nl+3)+r'};'+' \\n')\n geo.write('Physical Line({0}) = '.format(nl+5)+r'{'+'{0}'.format(nl+2)+r'};'+' \\n')\n geo.write('Physical Surface({0}) = '.format(nl+6)+r'{'+'{0}'.format(nl+4)+r'};'+' \\n')\n\ngeo.close()\n"
] | [
[
"numpy.loadtxt"
]
] |
qingyuanxingsi/incubator-mxnet | [
"fc9e70bf2d349ad4c6cb65ff3f0958e23a7410bf"
] | [
"tests/python/unittest/test_optimizer.py"
] | [
"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nimport numpy as np\nimport mxnet as mx\nimport mxnet.lr_scheduler as lr_scheduler\nfrom mxnet import gluon\nimport unittest\nfrom nose.tools import raises\nimport math\nfrom mxnet.test_utils import *\nfrom common import setup_module, with_seed\n\n@with_seed()\ndef test_learning_rate():\n o1 = mx.optimizer.Optimizer(learning_rate=0.01)\n o1.set_learning_rate(0.2)\n assert o1.learning_rate == 0.2\n\n lr_s = lr_scheduler.FactorScheduler(step=1)\n o2 = mx.optimizer.Optimizer(lr_scheduler=lr_s, learning_rate=0.3)\n assert o2.learning_rate == 0.3\n o2.lr_scheduler.base_lr = 0.4\n assert o2.learning_rate == 0.4\n\n\n@raises(UserWarning)\n@with_seed()\ndef test_learning_rate_expect_user_warning():\n lr_s = lr_scheduler.FactorScheduler(step=1)\n o = mx.optimizer.Optimizer(lr_scheduler=lr_s, learning_rate=0.3)\n o.set_learning_rate(0.5)\n\n\n@with_seed()\ndef test_lr_wd_mult():\n data = mx.sym.Variable('data')\n bias = mx.sym.Variable('fc1_bias', lr_mult=1.0)\n fc1 = mx.sym.FullyConnected(data=data, bias=bias, name='fc1', num_hidden=10, lr_mult=0)\n fc2 = mx.sym.FullyConnected(data=fc1, name='fc2', num_hidden=10, wd_mult=0.5)\n\n mod = mx.mod.Module(symbol=fc2, label_names=None, context=default_context())\n mod.bind(data_shapes=[('data', (5,10))])\n mod.init_params(initializer=mx.init.Uniform(1.0))\n mod.init_optimizer(optimizer_params={'learning_rate': 1.0})\n args1, _ = mod.get_params()\n args1 = {k: v.asnumpy() for k, v in args1.items()}\n mod.forward(mx.io.DataBatch(data=[mx.random.uniform(low=-1.0, high=1.0, shape=(5,10))], label=None), is_train=True)\n mod.backward(mod.get_outputs())\n mod.update()\n args2, _ = mod.get_params()\n args2 = {k: v.asnumpy() for k, v in args2.items()}\n\n assert mod._optimizer.lr_mult == {'fc1_bias': 1.0, 'fc1_weight': 0.0}\n assert mod._optimizer.wd_mult == {'fc2_bias': 0.5, 'fc2_weight': 0.5, 'fc1_bias': 0.0}\n assert mx.test_utils.almost_equal(args1['fc1_weight'], args2['fc1_weight'], 1e-10)\n assert not mx.test_utils.almost_equal(args1['fc1_bias'], args2['fc1_bias'], 1e-1)\n assert not mx.test_utils.almost_equal(args1['fc2_weight'], args2['fc2_weight'], 1e-1)\n\ndef compare_ndarray_tuple(t1, t2, rtol=None, atol=None):\n if t1 is not None and t2 is not None:\n if isinstance(t1, tuple):\n for s1, s2 in zip(t1, t2):\n compare_ndarray_tuple(s1, s2, rtol, atol)\n else:\n assert_almost_equal(t1.asnumpy(), t2.asnumpy(), rtol=rtol, atol=atol)\n\n\ndef compare_optimizer(opt1, opt2, shape, dtype, w_stype='default', g_stype='default',\n rtol=1e-4, atol=1e-5):\n if w_stype == 'default':\n w2 = mx.random.uniform(shape=shape, ctx=default_context(), dtype=dtype)\n w1 = w2.copyto(default_context())\n elif w_stype == 'row_sparse' or w_stype == 'csr':\n w2 = rand_ndarray(shape, w_stype, density=1, dtype=dtype)\n w1 = w2.copyto(default_context()).tostype('default')\n else:\n raise Exception(\"type not supported yet\")\n if g_stype == 'default':\n g2 = mx.random.uniform(shape=shape, ctx=default_context(), dtype=dtype)\n g1 = g2.copyto(default_context())\n elif g_stype == 'row_sparse' or g_stype == 'csr':\n g2 = rand_ndarray(shape, g_stype, dtype=dtype)\n g1 = g2.copyto(default_context()).tostype('default')\n else:\n raise Exception(\"type not supported yet\")\n\n state1 = opt1.create_state_multi_precision(0, w1)\n state2 = opt2.create_state_multi_precision(0, w2)\n compare_ndarray_tuple(state1, state2)\n\n opt1.update_multi_precision(0, w1, g1, state1)\n opt2.update_multi_precision(0, w2, g2, state2)\n compare_ndarray_tuple(state1, state2, rtol=rtol, atol=atol)\n assert_almost_equal(w1.asnumpy(), w2.asnumpy(), rtol=rtol, atol=atol)\n\n# SGD\n\nclass PySGD(mx.optimizer.Optimizer):\n \"\"\"python reference implemenation of sgd\"\"\"\n def __init__(self, learning_rate=0.01, momentum=0.0, multi_precision=False, **kwargs):\n super(PySGD, self).__init__(learning_rate=learning_rate, **kwargs)\n self.momentum = momentum\n self.multi_precision = multi_precision\n\n def create_state(self, index, weight):\n \"\"\"Create additional optimizer state: momentum\n\n Parameters\n ----------\n weight : NDArray\n The weight data\n\n \"\"\"\n momentum = None\n weight_master_copy = None\n do_multi_precision = self.multi_precision and weight.dtype == np.float16\n if do_multi_precision:\n if self.momentum != 0.0:\n momentum = mx.nd.zeros(weight.shape, weight.context, dtype=np.float32)\n weight_master_copy = array(weight, ctx=weight.context, dtype=np.float32)\n return (momentum, weight_master_copy)\n else:\n if self.momentum != 0.0:\n momentum = mx.nd.zeros(weight.shape, weight.context, dtype=weight.dtype)\n return momentum\n\n def create_state_multi_precision(self, index, weight):\n return self.create_state(index, weight)\n\n def update(self, index, weight, grad, state):\n \"\"\"Update the parameters.\n\n Parameters\n ----------\n index : int\n An unique integer key used to index the parameters\n\n weight : NDArray\n weight ndarray\n\n grad : NDArray\n grad ndarray\n\n state : NDArray or other objects returned by init_state\n The auxiliary state used in optimization.\n \"\"\"\n lr = self._get_lr(index)\n wd = self._get_wd(index)\n self._update_count(index)\n use_multi_precision = isinstance(state, list) or isinstance(state, tuple)\n\n if not use_multi_precision:\n if self.momentum == 0.0:\n if self.clip_gradient is not None:\n weight[:] = ((1 - lr*wd)*weight -\n lr*mx.nd.clip(grad*self.rescale_grad, -self.clip_gradient, self.clip_gradient))\n else:\n weight[:] = (1 - lr*wd)*weight - lr*self.rescale_grad*grad\n else:\n mom = state\n if self.clip_gradient is not None:\n mom[:] = (self.momentum*mom - lr*wd*weight -\n lr*mx.nd.clip(grad*self.rescale_grad, -self.clip_gradient, self.clip_gradient))\n weight += mom\n else:\n mom[:] = self.momentum*mom - lr*wd*weight - lr*self.rescale_grad*grad\n weight += mom\n else:\n grad32 = array(grad, ctx=grad.context, dtype=np.float32)\n mom = state[0]\n weight32 = state[1]\n if self.momentum == 0.0:\n if self.clip_gradient is not None:\n weight32[:] = ((1 - lr*wd)*weight32 -\n lr*mx.nd.clip(grad32*self.rescale_grad, -self.clip_gradient, self.clip_gradient))\n else:\n weight32[:] = (1 - lr*wd)*weight32 - lr*self.rescale_grad*grad32\n else:\n if self.clip_gradient is not None:\n mom[:] = (self.momentum*mom - lr*wd*weight32 -\n lr*mx.nd.clip(grad32*self.rescale_grad, -self.clip_gradient, self.clip_gradient))\n weight32 += mom\n else:\n mom[:] = self.momentum*mom - lr*wd*weight32 - lr*self.rescale_grad*grad32\n weight32 += mom\n tmp = weight32.astype(weight.dtype)\n tmp.copyto(weight)\n\n def update_multi_precision(self, index, weight, grad, state):\n self.update(index, weight, grad, state)\n\[email protected](\"Test fails intermittently. Temporarily disabled until fixed. Tracked at https://github.com/apache/incubator-mxnet/issues/9000\")\n@with_seed()\ndef test_sgd():\n opt1 = PySGD\n opt2 = mx.optimizer.SGD\n shape = (3, 4, 5)\n mom_options = [{}, {'momentum': 0.9}]\n cg_options = [{}, {'clip_gradient': 0.4}, {'clip_gradient': 0.5}]\n rg_options = [{}, {'rescale_grad': 0.14}, {'rescale_grad': 0.8}]\n wd_options = [{}, {'wd': 0.03}, {'wd': 0.05}, {'wd': 0.07}]\n mp_options = [{}, {'multi_precision': False}, {'multi_precision': True}]\n for dtype in [np.float16, np.float32, np.float64]:\n for mom_option in mom_options:\n for cg_option in cg_options:\n for rg_option in rg_options:\n for wd_option in wd_options:\n for mp_option in mp_options:\n kwarg = {}\n kwarg.update(mom_option)\n kwarg.update(cg_option)\n kwarg.update(rg_option)\n kwarg.update(wd_option)\n kwarg.update(mp_option)\n if (dtype == np.float16 and\n ('multi_precision' not in kwarg or\n not kwarg['multi_precision'])):\n continue\n compare_optimizer(opt1(**kwarg), opt2(**kwarg), shape, dtype)\n # test operator fallback on cpu\n if (default_context() == mx.cpu()):\n compare_optimizer(opt1(**kwarg), opt2(**kwarg), shape, dtype,\n g_stype='row_sparse')\n if dtype != np.float16:\n compare_optimizer(opt1(**kwarg), opt2(**kwarg), shape[:2],\n dtype, w_stype='csr', g_stype='csr')\n # test optimizer with a big shape\n big_shape = (54686454, 1)\n kwarg = {'momentum': 0.9, 'wd': 0.05}\n compare_optimizer(opt1(**kwarg), opt2(**kwarg), big_shape, np.float32)\n\nclass PySparseSGD(mx.optimizer.Optimizer):\n \"\"\"python reference implemenation of sgd\"\"\"\n def __init__(self, learning_rate=0.01, momentum=0.0, **kwargs):\n super(PySparseSGD, self).__init__(learning_rate=learning_rate, **kwargs)\n self.momentum = momentum\n\n def create_state(self, index, weight):\n \"\"\"Create additional optimizer state: momentum\n\n Parameters\n ----------\n weight : NDArray\n The weight data\n\n \"\"\"\n if self.momentum == 0.0:\n return None\n else:\n return mx.nd.zeros(weight.shape, weight.context, dtype=weight.dtype)\n\n def update(self, index, weight, grad, state):\n \"\"\"Update the parameters.\n\n Parameters\n ----------\n index : int\n An unique integer key used to index the parameters\n\n weight : NDArray\n weight ndarray\n\n grad : NDArray\n grad ndarray\n\n state : NDArray or other objects returned by init_state\n The auxiliary state used in optimization.\n \"\"\"\n lr = self._get_lr(index)\n wd = self._get_wd(index)\n self._update_count(index)\n num_rows = weight.shape[0]\n if self.momentum == 0.0:\n # Update on a per row basis, skip all-zero rows\n for row in range(num_rows):\n grad_row = grad[row].asnumpy()\n all_zeros = mx.test_utils.almost_equal(grad_row, np.zeros_like(grad_row))\n if all_zeros:\n continue\n if self.clip_gradient is not None:\n weight[row] = ((1 - lr*wd)*weight[row] -\n lr*mx.nd.clip(grad[row]*self.rescale_grad,\n -self.clip_gradient, self.clip_gradient))\n else:\n weight[row] = (1 - lr*wd)*weight[row] - lr*self.rescale_grad*grad[row]\n else:\n mom = state\n for row in range(num_rows):\n grad_row = grad[row].asnumpy()\n all_zeros = mx.test_utils.almost_equal(grad_row, np.zeros_like(grad_row))\n if all_zeros:\n continue\n if self.clip_gradient is not None:\n mom[row] = (self.momentum*mom[row] - lr*wd*weight[row] -\n lr*mx.nd.clip(grad[row]*self.rescale_grad, -self.clip_gradient, self.clip_gradient))\n weight[row] += mom[row]\n else:\n mom[row] = self.momentum*mom[row] - lr*wd*weight[row] - lr*self.rescale_grad*grad[row]\n weight[row] += mom[row]\n\n@with_seed()\ndef test_sparse_sgd():\n opt1 = PySparseSGD\n opt2 = mx.optimizer.SGD\n shape = (3, 4, 5)\n mom_options = [{}, {'momentum': 0.9}]\n cg_options = [{}, {'clip_gradient': 0.4}, {'clip_gradient': 0.5}]\n rg_options = [{}, {'rescale_grad': 0.14}, {'rescale_grad': 0.8}]\n wd_options = [{}, {'wd': 0.03}, {'wd': 0.05}, {'wd': 0.07}]\n mp_options = [{}, {'multi_precision': False}, {'multi_precision': True}]\n for dtype in [np.float32]:\n for mom_option in mom_options:\n for cg_option in cg_options:\n for rg_option in rg_options:\n for wd_option in wd_options:\n for mp_option in mp_options:\n kwarg = {}\n kwarg.update(mom_option)\n kwarg.update(cg_option)\n kwarg.update(rg_option)\n kwarg.update(wd_option)\n kwarg.update(mp_option)\n compare_optimizer(opt1(**kwarg), opt2(**kwarg), shape, dtype,\n w_stype='row_sparse', g_stype='row_sparse')\n\n\n@with_seed(0)\ndef test_std_sparse_sgd():\n opt1 = PySGD\n opt2 = mx.optimizer.SGD\n shape = (3, 4, 5)\n mom_options = [{'momentum': 0.9}]\n cg_options = [{}, {'clip_gradient': 0.4}, {'clip_gradient': 0.5}]\n rg_options = [{}, {'rescale_grad': 0.14}, {'rescale_grad': 0.8}]\n wd_options = [{}, {'wd': 0.03}, {'wd': 0.05}, {'wd': 0.07}]\n for dtype in [np.float32]:\n for mom_option in mom_options:\n for cg_option in cg_options:\n for rg_option in rg_options:\n for wd_option in wd_options:\n kwarg = {}\n kwarg.update(mom_option)\n kwarg.update(cg_option)\n kwarg.update(rg_option)\n kwarg.update(wd_option)\n compare_optimizer(opt1(**kwarg), opt2(lazy_update=False, **kwarg), shape, dtype,\n w_stype='row_sparse', g_stype='row_sparse')\n\n\nclass PyNAG(PySGD):\n def __init__(self, **kwargs):\n super(PyNAG, self).__init__(**kwargs)\n\n def create_state(self, index, weight):\n \"\"\"Create additional optimizer state: momentum\n\n Parameters\n ----------\n weight : NDArray\n The weight data\n\n \"\"\"\n momentum = None\n weight_master_copy = None\n do_multi_precision = self.multi_precision and weight.dtype == np.float16\n if do_multi_precision:\n if self.momentum != 0.0:\n momentum = mx.nd.zeros(weight.shape, weight.context, dtype=np.float32)\n weight_master_copy = array(weight, ctx=weight.context, dtype=np.float32)\n return (weight_master_copy, momentum)\n else:\n if self.momentum != 0.0:\n momentum = mx.nd.zeros(weight.shape, weight.context, dtype=weight.dtype)\n return momentum\n\n def create_state_multi_precision(self, index, weight):\n return self.create_state(index, weight)\n\n def update(self, index, weight, grad, state):\n \"\"\"Update the parameters.\n\n Parameters\n ----------\n index : int\n An unique integer key used to index the parameters\n\n weight : NDArray\n weight ndarray\n\n grad : NDArray\n grad ndarray\n\n state : NDArray or other objects returned by init_state\n The auxiliary state used in optimization.\n \"\"\"\n lr = self._get_lr(index)\n wd = self._get_wd(index)\n self._update_count(index)\n use_multi_precision = isinstance(state, list) or isinstance(state, tuple)\n if not use_multi_precision:\n grad = grad * self.rescale_grad\n if self.clip_gradient is not None:\n grad = mx.nd.clip(grad, -self.clip_gradient, self.clip_gradient)\n if self.momentum == 0.0:\n weight[:] += -lr * (grad + wd * weight)\n else:\n mom = state\n mom[:] *= self.momentum\n grad += wd * weight\n mom[:] += grad\n grad[:] += self.momentum * mom\n weight[:] += -lr * grad \n else:\n grad32 = array(grad, ctx=grad.context, dtype=np.float32)\n grad32 = grad32 * self.rescale_grad\n if self.clip_gradient is not None:\n grad32 = mx.nd.clip(grad32, -self.clip_gradient, self.clip_gradient)\n mom = state[1]\n weight32 = state[0]\n if self.momentum == 0.0:\n weight32[:] += -lr * (grad32 + wd * weight32)\n else:\n mom[:] *= self.momentum\n grad32 += wd * weight32\n mom[:] += grad32\n grad32[:] += self.momentum * mom\n weight32[:] += -lr * grad32\n tmp = weight32.astype(weight.dtype)\n tmp.copyto(weight)\n\n@with_seed(0)\ndef test_nag():\n opt1 = PyNAG\n opt2 = mx.optimizer.NAG\n shape = (3, 4, 5)\n mom_options = [{}, {'momentum': 0.9}]\n cg_options = [{}, {'clip_gradient': 0.4}, {'clip_gradient': 0.5}]\n rg_options = [{}, {'rescale_grad': 0.14}, {'rescale_grad': 0.8}]\n wd_options = [{}, {'wd': 0.03}, {'wd': 0.05}, {'wd': 0.07}]\n mp_options = [{}, {'multi_precision': False}, {'multi_precision': True}]\n for dtype in [np.float16, np.float32, np.float64]:\n for mom_option in mom_options:\n for cg_option in cg_options:\n for rg_option in rg_options:\n for wd_option in wd_options:\n for mp_option in mp_options:\n kwarg = {}\n kwarg.update(mom_option)\n kwarg.update(cg_option)\n kwarg.update(rg_option)\n kwarg.update(wd_option)\n kwarg.update(mp_option)\n if (dtype == np.float16 and\n ('multi_precision' not in kwarg or\n not kwarg['multi_precision'])):\n continue\n compare_optimizer(opt1(**kwarg), opt2(**kwarg), shape, dtype)\n\n\n\n# FTML\n\nclass PyFTML(mx.optimizer.Optimizer):\n \"\"\"python reference implemenation of FTML\"\"\"\n def __init__(self, beta1=0.6, beta2=0.999, epsilon=1e-8, **kwargs):\n super(PyFTML, self).__init__(**kwargs)\n self.beta1 = beta1\n self.beta2 = beta2\n self.epsilon = epsilon\n\n def create_state(self, index, weight):\n return (mx.nd.zeros(weight.shape, weight.context, dtype=weight.dtype), # d_0\n mx.nd.zeros(weight.shape, weight.context, dtype=weight.dtype), # v_0\n mx.nd.zeros(weight.shape, weight.context, dtype=weight.dtype)) # z_0\n\n def update(self, index, weight, grad, state):\n assert(isinstance(weight, mx.nd. NDArray))\n assert(isinstance(grad, mx.nd.NDArray))\n self._update_count(index)\n lr = self._get_lr(index)\n wd = self._get_wd(index)\n t = self._index_update_count[index]\n\n grad = grad * self.rescale_grad + wd * weight\n if self.clip_gradient is not None:\n grad = mx.nd.clip(grad, -self.clip_gradient, self.clip_gradient)\n # get previous states\n prev_d, prev_v, prev_z = state\n # compute states\n v_t = self.beta2 * prev_v + (1 - self.beta2) * mx.nd.square(grad)\n d_t = (1 - pow(self.beta1, t)) / lr * (mx.nd.sqrt(v_t / (1 - pow(self.beta2, t))) + self.epsilon)\n sigma_t = d_t - self.beta1 * prev_d\n z_t = self.beta1 * prev_z + (1 - self.beta1) * grad - sigma_t * weight\n # update weight\n weight[:] = - z_t / d_t\n # update states\n prev_d[:] = d_t\n prev_v[:] = v_t\n prev_z[:] = z_t\n\n@with_seed(0)\ndef test_ftml():\n opt1 = PyFTML\n opt2 = mx.optimizer.FTML\n shape = (3, 4, 5)\n beta1_options = [{}, {'beta1': 0.5}, {'beta1': 0.7}]\n beta2_options = [{}, {'beta2': 0.8}, {'beta2': 0.9}]\n cg_options = [{}, {'clip_gradient': 0.4}, {'clip_gradient': 0.5}]\n rg_options = [{}, {'rescale_grad': 0.14}, {'rescale_grad': 0.8}]\n wd_options = [{}, {'wd': 0.03}, {'wd': 0.05}, {'wd': 0.07}]\n for dtype in [np.float32]:\n for beta1_option in beta1_options:\n for beta2_option in beta2_options:\n for cg_option in cg_options:\n for rg_option in rg_options:\n for wd_option in wd_options:\n kwarg = {}\n kwarg.update(beta1_option)\n kwarg.update(beta2_option)\n kwarg.update(cg_option)\n kwarg.update(rg_option)\n kwarg.update(wd_option)\n compare_optimizer(opt1(**kwarg), opt2(**kwarg), shape, dtype)\n\n\n# ADAM\n\nclass PyAdam(mx.optimizer.Optimizer):\n \"\"\"python reference implemenation of adam\"\"\"\n def __init__(self, learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-8,\n decay_factor=(1 - 1e-8), sparse_update=False, **kwargs):\n super(PyAdam, self).__init__(learning_rate=learning_rate, **kwargs)\n self.beta1 = beta1\n self.beta2 = beta2\n self.epsilon = epsilon\n self.decay_factor = decay_factor\n self.sparse_update = sparse_update\n\n def create_state(self, index, weight):\n \"\"\"Create additional optimizer state: mean, variance\n\n Parameters\n ----------\n weight : NDArray\n The weight data\n\n \"\"\"\n return (mx.nd.zeros(weight.shape, weight.context, dtype=weight.dtype), # mean\n mx.nd.zeros(weight.shape, weight.context, dtype=weight.dtype)) # variance\n\n def update(self, index, weight, grad, state):\n \"\"\"Update the parameters.\n\n Parameters\n ----------\n index : int\n An unique integer key used to index the parameters\n\n weight : NDArray\n weight ndarray\n\n grad : NDArray\n grad ndarray\n\n state : NDArray or other objects returned by init_state\n The auxiliary state used in optimization.\n \"\"\"\n lr = self._get_lr(index)\n self._update_count(index)\n\n t = self._index_update_count[index]\n mean, variance = state\n\n wd = self._get_wd(index)\n num_rows = weight.shape[0]\n coef1 = 1. - self.beta1**t\n coef2 = 1. - self.beta2**t\n lr *= math.sqrt(coef2)/coef1\n for row in range(num_rows):\n # check row slices of all zeros\n all_zeros = mx.test_utils.almost_equal(grad[row].asnumpy(), np.zeros_like(grad[row].asnumpy()))\n # skip zeros during sparse update\n if all_zeros and self.sparse_update:\n continue\n grad[row] = grad[row] * self.rescale_grad + wd * weight[row]\n # clip gradients\n if self.clip_gradient is not None:\n mx.nd.clip(grad[row], -self.clip_gradient, self.clip_gradient, out=grad[row])\n # update mean\n mean[row] *= self.beta1\n mean[row] += grad[row] * (1. - self.beta1)\n # update variance\n variance[row] *= self.beta2\n variance[row] += (1 - self.beta2) * mx.nd.square(grad[row], out=grad[row])\n # update weight\n weight[row] -= lr*mean[row]/(mx.nd.sqrt(variance[row]) + self.epsilon)\n\n\n@with_seed()\ndef test_adam():\n opt1 = PyAdam\n opt2 = mx.optimizer.Adam\n shape = (3, 4, 5)\n cg_options = [{}, {'clip_gradient': 0.4}, {'clip_gradient': 0.5}]\n rg_options = [{}, {'rescale_grad': 0.14}, {'rescale_grad': 0.8}]\n wd_options = [{}, {'wd': 0.03}, {'wd': 0.05}, {'wd': 0.07}]\n mp_options = [{}, {'multi_precision': False}, {'multi_precision': True}]\n for dtype in [np.float16, np.float32, np.float64]:\n for cg_option in cg_options:\n for rg_option in rg_options:\n for wd_option in wd_options:\n for mp_option in mp_options:\n kwarg = {}\n kwarg.update(cg_option)\n kwarg.update(rg_option)\n kwarg.update(wd_option)\n kwarg.update(mp_option)\n if (dtype == np.float16 and\n ('multi_precision' not in kwarg or\n not kwarg['multi_precision'])):\n continue\n # atol 2e-5 needed to pass with seed 1248389097\n compare_optimizer(opt1(**kwarg), opt2(**kwarg), shape, dtype,\n rtol=1e-4, atol=2e-5)\n # atol 2e-5 needed to pass with seed 781809840\n compare_optimizer(opt1(sparse_update=True, **kwarg), opt2(**kwarg), shape,\n dtype, w_stype='row_sparse', g_stype='row_sparse',\n rtol=1e-4, atol=2e-5)\n compare_optimizer(opt1(**kwarg), opt2(lazy_update=False, **kwarg), shape,\n dtype, w_stype='row_sparse', g_stype='row_sparse',\n rtol=1e-4, atol=2e-5)\n\n# Signum\nclass PySignum(mx.optimizer.Optimizer):\n \"\"\"The python reference of Signum optimizer.\n\n The optimizer updates the weight by:\n\n rescaled_grad = rescale_grad * clip(grad, clip_gradient) + wd * weight\n state = momentum * state + (1-momentum)*rescaled_grad\n weight = (1 - lr * wd_lh) * weight - lr * sign(state)\n\n See the original paper at: https://jeremybernste.in/projects/amazon/signum.pdf\n\n For details of the update algorithm see\n :class:`~mxnet.ndarray.signsgd_update` and :class:`~mxnet.ndarray.signum_update`.\n\n This optimizer accepts the following parameters in addition to those accepted\n by :class:`.Optimizer`.\n\n Parameters\n ----------\n momentum : float, optional\n The momentum value.\n wd_lh : float, optitional\n The amount of decoupled weight decay regularization.\n \"\"\"\n def __init__(self, learning_rate=0.01, momentum=0.9, wd_lh = 0.0, **kwargs):\n super(PySignum, self).__init__(learning_rate = learning_rate, **kwargs)\n self.momentum = momentum\n self.wd_lh = wd_lh\n\n def create_state(self, index, weight):\n momentum = None\n if self.momentum != 0.0:\n momentum = mx.nd.zeros(weight.shape, weight.context, dtype=weight.dtype, stype=weight.stype)\n return momentum\n\n def update(self, index, weight, grad, state):\n self._update_count(index)\n lr = self._get_lr(index)\n wd = self._get_wd(index)\n\n if state is not None:\n mom = state\n if self.clip_gradient is not None:\n mom[:] = (self.momentum*mom - (1-self.momentum)*(wd*weight +\n mx.nd.clip(grad*self.rescale_grad, -self.clip_gradient, self.clip_gradient)))\n else:\n mom[:] = self.momentum*mom - (1-self.momentum)*wd*weight - (1-self.momentum)*self.rescale_grad*grad\n weight[:] = (1 - lr*self.wd_lh)*weight + lr*mx.nd.sign(mom)\n else:\n weight[:] = (1 - lr*(wd+self.wd_lh))*weight - lr*mx.nd.sign(grad)\n\n@with_seed(0)\ndef test_signum():\n opt1 = PySignum\n opt2 = mx.optimizer.Signum\n shape = (3, 4, 5)\n cg_options = [{}, {'clip_gradient': 0.4}, {'clip_gradient': 0.5}]\n rg_options = [{}, {'rescale_grad': 0.14}, {'rescale_grad': 0.8}]\n wd_options = [{}, {'wd': 0.03}, {'wd': 0.05}, {'wd': 0.07}]\n wd_lh_options = [{}, {'wd_lh': 0.015}, {'wd_lh': 0.0}]\n mom_options = [{}, {'momentum': 0.9}]\n lr_options = [{'learning_rate': 0.05},{'learning_rate': 0.01}]\n for dtype in [np.float32, np.float64]:\n for cg_option in cg_options:\n for rg_option in rg_options:\n for wd_option in wd_options:\n for mp_option in wd_lh_options:\n for lr_option in lr_options:\n for mom_option in mom_options:\n kwarg = {}\n kwarg.update(cg_option)\n kwarg.update(rg_option)\n kwarg.update(wd_option)\n kwarg.update(mp_option)\n kwarg.update(lr_option)\n kwarg.update(mom_option)\n compare_optimizer(opt1(**kwarg), opt2(**kwarg), shape, dtype)\n\n\n# RMSProp\nclass PyRMSProp(mx.optimizer.Optimizer):\n \"\"\"RMSProp optimizer of Tieleman & Hinton, 2012,\n\n For centered=False, the code follows the version in\n http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf by\n Tieleman & Hinton, 2012\n\n For centered=True, the code follows the version in\n http://arxiv.org/pdf/1308.0850v5.pdf Eq(38) - Eq(45) by Alex Graves, 2013.\n\n Parameters\n ----------\n learning_rate : float, optional\n Step size.\n Default value is set to 0.001.\n gamma1: float, optional\n decay factor of moving average for gradient, gradient^2.\n Default value is set to 0.9.\n gamma2: float, optional\n \"momentum\" factor.\n Default value if set to 0.9.\n Only used if centered=True\n epsilon : float, optional\n Default value is set to 1e-8.\n centered : boolean, optional\n Use Graves or Tielemans & Hintons version of RMSProp\n wd : float, optional\n L2 regularization coefficient add to all the weights\n rescale_grad : float, optional\n rescaling factor of gradient.\n clip_gradient : float, optional\n clip gradient in range [-clip_gradient, clip_gradient]\n clip_weights : float, optional\n clip weights in range [-clip_weights, clip_weights]\n\n \"\"\"\n def __init__(self, learning_rate=0.001, gamma1=0.9, gamma2=0.9,\n epsilon=1e-8, centered=False, clip_weights=None, **kwargs):\n super(PyRMSProp, self).__init__(learning_rate=learning_rate, **kwargs)\n self.centered = centered\n self.gamma1 = gamma1\n self.gamma2 = gamma2\n self.epsilon = epsilon\n self.clip_weights = clip_weights\n\n def create_state(self, index, weight):\n \"\"\"Create additional optimizer state.\n\n For centered=False: n\n For centered=True: n, g, delta\n\n Parameters\n ----------\n weight : NDArray\n The weight data\n \"\"\"\n if self.centered:\n return (mx.nd.zeros(weight.shape, weight.context), # n\n mx.nd.zeros(weight.shape, weight.context), # g\n mx.nd.zeros(weight.shape, weight.context)) # delta\n else:\n return (mx.nd.zeros(weight.shape, weight.context), ) # n\n\n def update(self, index, weight, grad, state):\n \"\"\"Update the parameters.\n\n Parameters\n ----------\n index : int\n An unique integer key used to index the parameters\n\n weight : NDArray\n weight ndarray\n\n grad : NDArray\n grad ndarray\n\n state : NDArray or other objects returned by init_state\n The auxiliary state used in optimization.\n \"\"\"\n lr = self._get_lr(index)\n wd = self._get_wd(index)\n self._update_count(index)\n grad = grad * self.rescale_grad + wd * weight\n\n if not self.centered:\n (n, ) = state\n if self.clip_gradient is not None:\n grad = mx.nd.clip(grad, -self.clip_gradient, self.clip_gradient)\n n[:] = (1 - self.gamma1) * (grad * grad) + self.gamma1 * n\n weight[:] -= lr * grad/(mx.nd.sqrt(n + self.epsilon))\n\n else:\n n, g, delta = state\n if self.clip_gradient is not None:\n grad = mx.nd.clip(grad, -self.clip_gradient, self.clip_gradient)\n n[:] = (1 - self.gamma1) * (grad * grad) + self.gamma1 * n\n g[:] = (1 - self.gamma1) * grad + self.gamma1 * g\n delta[:] = (self.gamma2) * delta - lr * grad/(mx.nd.sqrt(n - g*g + self.epsilon))\n weight[:] += delta\n\n if self.clip_weights:\n mx.ndarray.clip(weight, -self.clip_weights, self.clip_weights, out=weight)\n\[email protected](\"Test fails intermittently. Temporarily disabled until fixed. Tracked at https://github.com/apache/incubator-mxnet/issues/8230\")\n@with_seed(0)\ndef test_rms():\n opt1 = PyRMSProp\n opt2 = mx.optimizer.RMSProp\n shape = (3, 4, 5)\n cg_options = [{}, {'clip_gradient': 0.4}, {'clip_gradient': 0.5}]\n cw_options = [{}, {'clip_weights': 0.01}]\n center_options = [{}, {'centered': False}, {'centered': True}]\n rg_options = [{}, {'rescale_grad': 0.14}, {'rescale_grad': 0.8}]\n wd_options = [{}, {'wd': 0.03}, {'wd': 0.05}, {'wd': 0.07}]\n mp_options = [{}, {'multi_precision': False}, {'multi_precision': True}]\n for dtype in [np.float16, np.float32]:\n for cw_option in cw_options:\n for cg_option in cg_options:\n for center_option in center_options:\n for rg_option in rg_options:\n for wd_option in wd_options:\n for mp_option in mp_options:\n kwarg = {}\n kwarg.update(cw_option)\n kwarg.update(cg_option)\n kwarg.update(center_option)\n kwarg.update(rg_option)\n kwarg.update(wd_option)\n kwarg.update(mp_option)\n if (dtype == np.float16 and\n ('multi_precision' not in kwarg or\n not kwarg['multi_precision'])):\n continue\n compare_optimizer(opt1(**kwarg), opt2(**kwarg), shape, dtype)\n if (default_context() == mx.cpu()):\n compare_optimizer(opt1(**kwarg), opt2(**kwarg), shape, dtype, g_stype='row_sparse')\n\nclass PyFtrl(mx.optimizer.Optimizer):\n \"\"\"The Ftrl optimizer.\n\n Referenced from *Ad Click Prediction: a View from the Trenches*, available at\n http://dl.acm.org/citation.cfm?id=2488200.\n\n Parameters\n ----------\n lamda1 : float, optional\n L1 regularization coefficient.\n learning_rate : float, optional\n The initial learning rate.\n beta : float, optional\n Per-coordinate learning rate correlation parameter.\n eta :\n .. math::\n \\\\eta_{t,i} = \\\\frac{learningrate}{\\\\beta+\\\\sqrt{\\\\sum_{s=1}^tg_{s,i}^t}}\n \"\"\"\n\n def __init__(self, lamda1=0.01, learning_rate=0.1, beta=1, sparse_update=False, **kwargs):\n super(PyFtrl, self).__init__(**kwargs)\n self.lamda1 = lamda1\n self.beta = beta\n self.lr = learning_rate\n self.sparse_update = sparse_update\n\n def create_state(self, index, weight):\n return (mx.nd.zeros(weight.shape, weight.context, dtype=weight.dtype), # dn\n mx.nd.zeros(weight.shape, weight.context, dtype=weight.dtype)) # n\n\n def update(self, index, weight, grad, state):\n self._update_count(index)\n wd = self._get_wd(index)\n lr = self._get_lr(index)\n num_rows = weight.shape[0]\n\n dn, n = state\n for row in range(num_rows):\n all_zeros = mx.test_utils.almost_equal(grad[row].asnumpy(), np.zeros_like(grad[row].asnumpy()))\n if all_zeros and self.sparse_update:\n continue\n grad[row] = grad[row] * self.rescale_grad\n if self.clip_gradient is not None:\n mx.nd.clip(grad[row], -self.clip_gradient, self.clip_gradient, out=grad[row])\n\n #update dn, n\n dn[row] += grad[row] - (mx.nd.sqrt(n[row] + grad[row] * grad[row]) - mx.nd.sqrt(n[row])) * weight[row] / lr\n n[row] += grad[row] * grad[row]\n\n # update weight\n weight[row] = (mx.nd.sign(dn[row]) * self.lamda1 - dn[row]) / \\\n ((self.beta + mx.nd.sqrt(n[row])) / lr + wd) * (mx.nd.abs(dn[row]) > self.lamda1)\n\n@with_seed()\ndef test_ftrl():\n opt1 = PyFtrl\n opt2 = mx.optimizer.Ftrl\n shape = (3, 4, 5)\n kwargs = [{},\n {'clip_gradient': 0.5},\n {'clip_gradient': 0.4, 'rescale_grad': 0.14},\n {'rescale_grad': 0.8},\n {'clip_gradient': 0.5, 'wd': 0.07},\n {'clip_gradient': 0.4, 'rescale_grad': 0.14, 'wd': 0.03},\n {'rescale_grad': 0.8, 'wd': 0.05},\n {'rescale_grad': 0.8, 'wd': 0.05, 'lamda1': 0.01},\n {'clip_gradient': 0.5, 'wd': 0.07, 'lamda1': 1.0}]\n for kwarg in kwargs:\n compare_optimizer(opt1(**kwarg), opt2(**kwarg), shape, np.float32)\n compare_optimizer(opt1(sparse_update=True, **kwarg), opt2(**kwarg), shape,\n np.float32, w_stype='row_sparse', g_stype='row_sparse')\n\n@with_seed(1234)\ndef test_nadam():\n\n def get_net(num_hidden, flatten=True):\n data = mx.symbol.Variable('data')\n fc1 = mx.symbol.FullyConnected(data, name='fc1', num_hidden=128, flatten=flatten)\n act1 = mx.symbol.Activation(fc1, name='relu1', act_type=\"relu\")\n fc2 = mx.symbol.FullyConnected(act1, name = 'fc2', num_hidden = 64, flatten=flatten)\n act2 = mx.symbol.Activation(fc2, name='relu2', act_type=\"relu\")\n fc3 = mx.symbol.FullyConnected(act2, name='fc3', num_hidden=num_hidden, flatten=flatten)\n return fc3\n\n N = 20\n data = mx.random.uniform(-1, 1, shape=(N, 10))\n label = mx.random.uniform(-1, 1, shape=(N, 1))\n data_iter = mx.io.NDArrayIter(data, label, batch_size=5, label_name='label', shuffle=True)\n output = get_net(1)\n l = mx.symbol.Variable('label')\n Loss = gluon.loss.L1Loss()\n loss = Loss(output, l)\n loss = mx.sym.make_loss(loss)\n mod = mx.mod.Module(loss, data_names=('data',), label_names=('label',))\n mod.fit(data_iter, num_epoch=60, optimizer_params={'learning_rate': 0.0005, 'wd': 0.0005},\n initializer=mx.init.Xavier(magnitude=2), eval_metric=mx.metric.Loss(),\n optimizer='nadam')\n assert mod.score(data_iter, eval_metric=mx.metric.Loss())[0][1] < 0.1\n\n# AdaGrad\nclass PyAdaGrad(mx.optimizer.Optimizer):\n \"\"\"The python reference of AdaGrad optimizer.\n\n This class implements the AdaGrad optimizer described in *Adaptive Subgradient\n Methods for Online Learning and Stochastic Optimization*, and available at\n http://www.jmlr.org/papers/volume12/duchi11a/duchi11a.pdf.\n\n Updates are applied by::\n\n rescaled_grad = clip(grad * rescale_grad + wd * weight, clip_gradient)\n history = history + square(rescaled_grad)\n w = w - learning_rate * rescaled_grad / sqrt(history + epsilon)\n\n This optimizer accepts the following parameters in addition to those accepted\n by :class:`.Optimizer`.\n\n Parameters\n ----------\n eps: float, optional\n Small value to avoid division by 0.\n\n \"\"\"\n def __init__(self, eps=1e-7, **kwargs):\n super(PyAdaGrad, self).__init__(**kwargs)\n self.float_stable_eps = eps\n\n def create_state(self, index, weight):\n return mx.nd.zeros(weight.shape, weight.context, stype=weight.stype)\n\n def update(self, index, weight, grad, state):\n self._update_count(index)\n lr = self._get_lr(index)\n wd = self._get_wd(index)\n\n history = state\n grad = grad * self.rescale_grad\n if self.clip_gradient is not None:\n grad = mx.nd.clip(grad, -self.clip_gradient, self.clip_gradient)\n history[:] += mx.nd.square(grad)\n div = grad / mx.nd.sqrt(history + self.float_stable_eps)\n weight[:] += (div + weight * wd) * -lr\n\ndef test_adagrad():\n mx.random.seed(0)\n opt1 = PyAdaGrad\n opt2 = mx.optimizer.AdaGrad\n shape = (3, 4, 5)\n eps_options = [{}, {'eps': 1e-8}]\n cg_options = [{}, {'clip_gradient': 0.4}, {'clip_gradient': 0.5}]\n rg_options = [{}, {'rescale_grad': 0.14}, {'rescale_grad': 0.8}]\n wd_options = [{}, {'wd': 0.0}]\n for dtype in [np.float32]:\n for eps_option in eps_options:\n for cg_option in cg_options:\n for rg_option in rg_options:\n for wd_option in wd_options:\n kwarg = {}\n kwarg.update(eps_option)\n kwarg.update(cg_option)\n kwarg.update(rg_option)\n kwarg.update(wd_option)\n compare_optimizer(opt1(**kwarg), opt2(**kwarg), shape, dtype)\n if wd_option.get('wd', 0.0) == 0.0:\n compare_optimizer(opt1(**kwarg), opt2(**kwarg), shape, dtype,\n w_stype='row_sparse', g_stype='row_sparse')\n\n\n\nif __name__ == '__main__':\n import nose\n nose.runmodule()\n"
] | [
[
"numpy.zeros_like"
]
] |
kavach-feature/Advanced_lane_finding | [
"12e4e330e338734fdb35655c7581b98ba1eb490b"
] | [
"line.py"
] | [
"import numpy as np\r\nimport cv2\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.image as mpimg\r\nimport pickle\r\n\r\nclass Line():\r\n\tdef __init__(self,n):\r\n\t\tself.n=n\r\n\t\tself.detected =False\r\n\r\n\t\t#Polynomial coefficients of the lines\r\n\r\n\t\tself.A=[]\r\n\t\tself.B=[]\r\n\t\tself.C=[]\r\n\r\n\t\t#Running average of coefficients\r\n\r\n\t\tself.A_avg=0.\r\n\t\tself.B_avg=0.\r\n\t\tself.C_avg=0.\r\n\r\n\tdef obtain_fit(self):\r\n\t\treturn (self.A_avg,self.B_avg,self.C_avg)\t\r\n\r\n\r\n\tdef update_fit(self,fit_coeffs):\r\n\r\n\t\t\"\"\"Obtain the fit coefficients from the latest frame and apply over each of 2nd polynomial coefficients\r\n\t\tfor the purpose of smoothing\r\n\t\t\"\"\"\r\n\r\n\t\tfull_Q= len(self.A) >= self.n\r\n\r\n\r\n\t\t#Append line fit coefficients\r\n\r\n\t\tself.A.append(fit_coeffs[0])\r\n\t\tself.B.append(fit_coeffs[1])\r\n\t\tself.C.append(fit_coeffs[2])\r\n\r\n\t\tif full_Q:\r\n\t\t\t_=self.A.pop(0)\r\n\t\t\t_=self.B.pop(0)\r\n\t\t\t_=self.C.pop(0)\r\n\r\n\r\n\t\t# Compute the average of the polynomial coefficients \r\n\r\n\t\tself.A_avg = np.mean(self.A)\r\n\t\tself.B_avg = np.mean(self.B)\r\n\t\tself.C_avg = np.mean(self.C)\r\n\r\n\r\n\t\treturn (self.A_avg,self.B_avg,self.C_avg)\r\n\r\n"
] | [
[
"numpy.mean"
]
] |
vermouth1992/torchlib | [
"63b2bedb40f670b2d9fbfc0daeab4a8d44623095"
] | [
"torchlib/deep_rl/algorithm/ppo/utils.py"
] | [
"\"\"\"\nCommon utilities to implement policy gradient algorithms\n\"\"\"\n\nfrom collections import namedtuple, deque\n\nimport numpy as np\nfrom scipy import signal\nfrom torchlib.dataset.utils import create_data_loader\nfrom torchlib.deep_rl.utils.replay.replay import ReplayBuffer\nfrom torchlib.deep_rl.utils.replay.sampler import Sampler\nfrom torchlib.utils.math import unnormalize, normalize\n\nTrajectory = namedtuple('Trajectory', ('state', 'action', 'reward_to_go', 'advantage', 'old_log_prob'))\n\n\nclass PPOReplayBuffer(ReplayBuffer):\n def __init__(self, gamma, lam, policy, alpha=0.9):\n \"\"\"\n\n Args:\n gamma: discount factor\n lam: generalized advantage estimation\n policy: PPO policy\n alpha: value moving average ratio\n \"\"\"\n super(PPOReplayBuffer, self).__init__(None, None, None, None, None)\n self.gamma = gamma\n self.lam = lam\n self.alpha = alpha\n self.policy = policy\n\n def _initialize(self):\n self.memory = deque()\n self.running_value_mean = 0.\n self.running_value_std = 0.\n\n def clear(self):\n self._size = 0\n self.memory.clear()\n\n def _finish_trajectory(self, states, actions, rewards, last_value):\n \"\"\"Compute path accessory information including (reward_to_go, old_log_prob, advantage)\n\n Returns:\n\n \"\"\"\n predicted_state_values = self.policy.predict_state_value_batch(states)\n predicted_state_values = unnormalize(predicted_state_values, self.running_value_mean, self.running_value_std)\n\n rewards_last_state = np.append(rewards, last_value)\n predicted_state_values = np.append(predicted_state_values, last_value)\n\n # Used for fit value function\n reward_to_go = discount(rewards_last_state, self.gamma).astype(np.float32)[:-1]\n\n temporal_difference = rewards + predicted_state_values[1:] * self.gamma - predicted_state_values[:-1]\n # calculate reward-to-go\n gae = discount(temporal_difference, self.gamma * self.lam).astype(np.float32)\n\n old_log_prob = self.policy.predict_log_prob_batch(states, actions)\n\n return reward_to_go, gae, old_log_prob\n\n def add_trajectory(self, states, actions, rewards, last_value):\n \"\"\"If last_state is not None, this trajectory is truncated.\n\n Args:\n states: (T, ob_dim)\n actions: (T, ac_dim)\n rewards: (T,)\n last_state: (ob_dim)\n\n Returns:\n\n \"\"\"\n reward_to_go, gae, old_log_prob = self._finish_trajectory(states, actions, rewards, last_value)\n self.memory.append(Trajectory(\n state=states,\n action=actions,\n reward_to_go=reward_to_go,\n advantage=gae,\n old_log_prob=old_log_prob\n ))\n\n self._size += actions.shape[0]\n\n def random_iterator(self, batch_size):\n \"\"\"Create an iterator of all the dataset and update value mean and std\n\n\n Args:\n batch_size:\n\n Returns:\n\n \"\"\"\n states = np.concatenate([trajectory.state for trajectory in self.memory], axis=0)\n actions = np.concatenate([trajectory.action for trajectory in self.memory], axis=0)\n reward_to_go = np.concatenate([trajectory.reward_to_go for trajectory in self.memory], axis=0)\n gaes = np.concatenate([trajectory.advantage for trajectory in self.memory], axis=0)\n old_log_prob = np.concatenate([trajectory.old_log_prob for trajectory in self.memory], axis=0)\n\n value_mean, value_std = np.mean(reward_to_go), np.std(reward_to_go)\n reward_to_go = normalize(reward_to_go, value_mean, value_std)\n\n self.running_value_mean = self.running_value_mean * self.alpha + value_mean * (1 - self.alpha)\n self.running_value_std = self.running_value_std * self.alpha + value_std * (1 - self.alpha)\n\n gaes = normalize(gaes, np.mean(gaes), np.std(gaes))\n\n batch_size = min(batch_size, states.shape[0])\n\n data_loader = create_data_loader((states, actions, reward_to_go, gaes, old_log_prob),\n batch_size=batch_size, shuffle=True, drop_last=True)\n\n return data_loader\n\n\nclass PPOSampler(Sampler):\n def __init__(self, min_steps_per_batch, logger=None):\n super(PPOSampler, self).__init__()\n self.min_steps_per_batch = min_steps_per_batch\n self.logger = logger\n\n def sample_trajectories(self, policy=None):\n obs_lst = []\n action_lst = []\n reward_lst = []\n done_lst = []\n\n policy = self.policy if policy is None else policy\n obs = self.env.reset()\n for _ in range(self.min_steps_per_batch // obs.shape[0]):\n action = policy.predict_batch(obs)\n obs_lst.append(obs)\n action_lst.append(action)\n\n obs, rewards, dones, infos = self.env.step(action)\n\n reward_lst.append(rewards)\n done_lst.append(dones)\n\n # compute last state value for the last trajectory in each environment\n last_state_lst = obs\n last_value_lst = self.policy.predict_state_value_batch(last_state_lst)\n last_value_lst = unnormalize(last_value_lst, self.pool.running_value_mean, self.pool.running_value_std)\n\n obs_lst = np.stack(obs_lst, axis=1)\n action_lst = np.stack(action_lst, axis=1)\n reward_lst = np.stack(reward_lst, axis=1)\n done_lst = np.stack(done_lst, axis=1)\n\n # separate trajectories and add to pool\n for i in range(self.env.num_envs):\n done_index = np.where(done_lst[i])[0] + 1\n if done_lst[i][-1] == True:\n done_index = done_index[:-1] # ignore the last one\n last_value = 0.\n else:\n last_value = last_value_lst[i]\n\n sub_obs_lst = np.split(obs_lst[i], done_index)\n sub_action_lst = np.split(action_lst[i], done_index)\n sub_reward_lst = np.split(reward_lst[i], done_index)\n sub_last_value_lst = [0.] * (len(sub_obs_lst) - 1) + [last_value]\n\n for j in range(len(sub_obs_lst)):\n self.pool.add_trajectory(states=sub_obs_lst[j],\n actions=sub_action_lst[j],\n rewards=sub_reward_lst[j],\n last_value=sub_last_value_lst[j])\n if self.logger:\n self.logger.store(EpReward=np.sum(sub_reward_lst[j]) + sub_last_value_lst[j])\n self.logger.store(EpLength=sub_obs_lst[j].shape[0])\n\n\ndef discount(x, gamma):\n return signal.lfilter([1.0], [1.0, -gamma], x[::-1])[::-1]\n"
] | [
[
"numpy.sum",
"numpy.append",
"scipy.signal.lfilter",
"numpy.where",
"numpy.stack",
"numpy.std",
"numpy.concatenate",
"numpy.mean",
"numpy.split"
]
] |
jianoaix/ray | [
"1701b923bc83905f8961c06a6a173e3eba46a936",
"1701b923bc83905f8961c06a6a173e3eba46a936"
] | [
"python/ray/_private/utils.py",
"python/ray/tune/tests/test_integration_wandb.py"
] | [
"import binascii\nimport errno\nimport functools\nimport hashlib\nimport importlib\nimport logging\nimport multiprocessing\nimport os\nimport signal\nimport subprocess\nimport sys\nimport tempfile\nimport threading\nimport time\nfrom typing import Optional, Sequence, Tuple, Any, Union, Dict\nimport uuid\nimport grpc\nimport warnings\n\ntry:\n from grpc import aio as aiogrpc\nexcept ImportError:\n from grpc.experimental import aio as aiogrpc\n\nimport inspect\nfrom inspect import signature\nfrom pathlib import Path\nimport numpy as np\n\nimport ray\nfrom ray.core.generated.gcs_pb2 import ErrorTableData\nimport ray.ray_constants as ray_constants\nfrom ray._private.tls_utils import load_certs_from_env\n\n# Import psutil after ray so the packaged version is used.\nimport psutil\n\npwd = None\nif sys.platform != \"win32\":\n import pwd\n\nlogger = logging.getLogger(__name__)\n\n# Linux can bind child processes' lifetimes to that of their parents via prctl.\n# prctl support is detected dynamically once, and assumed thereafter.\nlinux_prctl = None\n\n# Windows can bind processes' lifetimes to that of kernel-level \"job objects\".\n# We keep a global job object to tie its lifetime to that of our own process.\nwin32_job = None\nwin32_AssignProcessToJobObject = None\n\n\ndef get_user_temp_dir():\n if \"RAY_TMPDIR\" in os.environ:\n return os.environ[\"RAY_TMPDIR\"]\n elif sys.platform.startswith(\"linux\") and \"TMPDIR\" in os.environ:\n return os.environ[\"TMPDIR\"]\n elif sys.platform.startswith(\"darwin\") or sys.platform.startswith(\"linux\"):\n # Ideally we wouldn't need this fallback, but keep it for now for\n # for compatibility\n tempdir = os.path.join(os.sep, \"tmp\")\n else:\n tempdir = tempfile.gettempdir()\n return tempdir\n\n\ndef get_ray_temp_dir():\n return os.path.join(get_user_temp_dir(), \"ray\")\n\n\ndef _random_string():\n id_hash = hashlib.shake_128()\n id_hash.update(uuid.uuid4().bytes)\n id_bytes = id_hash.digest(ray_constants.ID_SIZE)\n assert len(id_bytes) == ray_constants.ID_SIZE\n return id_bytes\n\n\ndef format_error_message(exception_message: str, task_exception: bool = False):\n \"\"\"Improve the formatting of an exception thrown by a remote function.\n\n This method takes a traceback from an exception and makes it nicer by\n removing a few uninformative lines and adding some space to indent the\n remaining lines nicely.\n\n Args:\n exception_message: A message generated by traceback.format_exc().\n\n Returns:\n A string of the formatted exception message.\n \"\"\"\n lines = exception_message.split(\"\\n\")\n if task_exception:\n # For errors that occur inside of tasks, remove lines 1 and 2 which are\n # always the same, they just contain information about the worker code.\n lines = lines[0:1] + lines[3:]\n pass\n return \"\\n\".join(lines)\n\n\ndef push_error_to_driver(\n worker, error_type: str, message: str, job_id: Optional[str] = None\n):\n \"\"\"Push an error message to the driver to be printed in the background.\n\n Args:\n worker: The worker to use.\n error_type: The type of the error.\n message: The message that will be printed in the background\n on the driver.\n job_id: The ID of the driver to push the error message to. If this\n is None, then the message will be pushed to all drivers.\n \"\"\"\n if job_id is None:\n job_id = ray.JobID.nil()\n assert isinstance(job_id, ray.JobID)\n worker.core_worker.push_error(job_id, error_type, message, time.time())\n\n\ndef construct_error_message(job_id, error_type, message, timestamp):\n \"\"\"Construct an ErrorTableData object.\n\n Args:\n job_id: The ID of the job that the error should go to. If this is\n nil, then the error will go to all drivers.\n error_type: The type of the error.\n message: The error message.\n timestamp: The time of the error.\n\n Returns:\n The ErrorTableData object.\n \"\"\"\n data = ErrorTableData()\n data.job_id = job_id.binary()\n data.type = error_type\n data.error_message = message\n data.timestamp = timestamp\n return data\n\n\ndef publish_error_to_driver(\n error_type: str,\n message: str,\n gcs_publisher,\n job_id=None,\n):\n \"\"\"Push an error message to the driver to be printed in the background.\n\n Normally the push_error_to_driver function should be used. However, in some\n instances, the raylet client is not available, e.g., because the\n error happens in Python before the driver or worker has connected to the\n backend processes.\n\n Args:\n error_type: The type of the error.\n message: The message that will be printed in the background\n on the driver.\n gcs_publisher: The GCS publisher to use.\n job_id: The ID of the driver to push the error message to. If this\n is None, then the message will be pushed to all drivers.\n \"\"\"\n if job_id is None:\n job_id = ray.JobID.nil()\n assert isinstance(job_id, ray.JobID)\n error_data = construct_error_message(job_id, error_type, message, time.time())\n try:\n gcs_publisher.publish_error(job_id.hex().encode(), error_data)\n except Exception:\n logger.exception(f\"Failed to publish error {error_data}\")\n\n\ndef random_string():\n \"\"\"Generate a random string to use as an ID.\n\n Note that users may seed numpy, which could cause this function to generate\n duplicate IDs. Therefore, we need to seed numpy ourselves, but we can't\n interfere with the state of the user's random number generator, so we\n extract the state of the random number generator and reset it after we are\n done.\n\n TODO(rkn): If we want to later guarantee that these are generated in a\n deterministic manner, then we will need to make some changes here.\n\n Returns:\n A random byte string of length ray_constants.ID_SIZE.\n \"\"\"\n # Get the state of the numpy random number generator.\n numpy_state = np.random.get_state()\n # Try to use true randomness.\n np.random.seed(None)\n # Generate the random ID.\n random_id = np.random.bytes(ray_constants.ID_SIZE)\n # Reset the state of the numpy random number generator.\n np.random.set_state(numpy_state)\n return random_id\n\n\ndef decode(byte_str: str, allow_none: bool = False, encode_type: str = \"utf-8\"):\n \"\"\"Make this unicode in Python 3, otherwise leave it as bytes.\n\n Args:\n byte_str: The byte string to decode.\n allow_none: If true, then we will allow byte_str to be None in which\n case we will return an empty string. TODO(rkn): Remove this flag.\n This is only here to simplify upgrading to flatbuffers 1.10.0.\n\n Returns:\n A byte string in Python 2 and a unicode string in Python 3.\n \"\"\"\n if byte_str is None and allow_none:\n return \"\"\n\n if not isinstance(byte_str, bytes):\n raise ValueError(f\"The argument {byte_str} must be a bytes object.\")\n if sys.version_info >= (3, 0):\n return byte_str.decode(encode_type)\n else:\n return byte_str\n\n\ndef ensure_str(s, encoding=\"utf-8\", errors=\"strict\"):\n \"\"\"Coerce *s* to `str`.\n\n - `str` -> `str`\n - `bytes` -> decoded to `str`\n \"\"\"\n if isinstance(s, str):\n return s\n else:\n assert isinstance(s, bytes)\n return s.decode(encoding, errors)\n\n\ndef binary_to_object_ref(binary_object_ref):\n return ray.ObjectRef(binary_object_ref)\n\n\ndef binary_to_task_id(binary_task_id):\n return ray.TaskID(binary_task_id)\n\n\ndef binary_to_hex(identifier):\n hex_identifier = binascii.hexlify(identifier)\n if sys.version_info >= (3, 0):\n hex_identifier = hex_identifier.decode()\n return hex_identifier\n\n\ndef hex_to_binary(hex_identifier):\n return binascii.unhexlify(hex_identifier)\n\n\n# TODO(qwang): Remove these hepler functions\n# once we separate `WorkerID` from `UniqueID`.\ndef compute_job_id_from_driver(driver_id):\n assert isinstance(driver_id, ray.WorkerID)\n return ray.JobID(driver_id.binary()[0 : ray.JobID.size()])\n\n\ndef compute_driver_id_from_job(job_id):\n assert isinstance(job_id, ray.JobID)\n rest_length = ray_constants.ID_SIZE - job_id.size()\n driver_id_str = job_id.binary() + (rest_length * b\"\\xff\")\n return ray.WorkerID(driver_id_str)\n\n\ndef get_cuda_visible_devices():\n \"\"\"Get the device IDs in the CUDA_VISIBLE_DEVICES environment variable.\n\n Returns:\n devices (List[str]): If CUDA_VISIBLE_DEVICES is set, returns a\n list of strings representing the IDs of the visible GPUs.\n If it is not set or is set to NoDevFiles, returns empty list.\n \"\"\"\n gpu_ids_str = os.environ.get(\"CUDA_VISIBLE_DEVICES\", None)\n\n if gpu_ids_str is None:\n return None\n\n if gpu_ids_str == \"\":\n return []\n\n if gpu_ids_str == \"NoDevFiles\":\n return []\n\n # GPU identifiers are given as strings representing integers or UUIDs.\n return list(gpu_ids_str.split(\",\"))\n\n\nlast_set_gpu_ids = None\n\n\ndef set_cuda_visible_devices(gpu_ids):\n \"\"\"Set the CUDA_VISIBLE_DEVICES environment variable.\n\n Args:\n gpu_ids (List[str]): List of strings representing GPU IDs.\n \"\"\"\n\n if os.environ.get(ray_constants.NOSET_CUDA_VISIBLE_DEVICES_ENV_VAR):\n return\n\n global last_set_gpu_ids\n if last_set_gpu_ids == gpu_ids:\n return # optimization: already set\n\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = \",\".join([str(i) for i in gpu_ids])\n last_set_gpu_ids = gpu_ids\n\n\ndef resources_from_ray_options(options_dict: Dict[str, Any]) -> Dict[str, Any]:\n \"\"\"Determine a task's resource requirements.\n\n Args:\n options_dict: The dictionary that contains resources requirements.\n\n Returns:\n A dictionary of the resource requirements for the task.\n \"\"\"\n resources = (options_dict.get(\"resources\") or {}).copy()\n\n if \"CPU\" in resources or \"GPU\" in resources:\n raise ValueError(\n \"The resources dictionary must not contain the key 'CPU' or 'GPU'\"\n )\n elif \"memory\" in resources or \"object_store_memory\" in resources:\n raise ValueError(\n \"The resources dictionary must not \"\n \"contain the key 'memory' or 'object_store_memory'\"\n )\n\n num_cpus = options_dict.get(\"num_cpus\")\n num_gpus = options_dict.get(\"num_gpus\")\n memory = options_dict.get(\"memory\")\n object_store_memory = options_dict.get(\"object_store_memory\")\n accelerator_type = options_dict.get(\"accelerator_type\")\n\n if num_cpus is not None:\n resources[\"CPU\"] = num_cpus\n if num_gpus is not None:\n resources[\"GPU\"] = num_gpus\n if memory is not None:\n resources[\"memory\"] = ray_constants.to_memory_units(memory, round_up=True)\n if object_store_memory is not None:\n resources[\"object_store_memory\"] = ray_constants.to_memory_units(\n object_store_memory, round_up=True\n )\n if accelerator_type is not None:\n resources[\n f\"{ray_constants.RESOURCE_CONSTRAINT_PREFIX}{accelerator_type}\"\n ] = 0.001\n\n return resources\n\n\nclass Unbuffered(object):\n \"\"\"There's no \"built-in\" solution to programatically disabling buffering of\n text files. Ray expects stdout/err to be text files, so creating an\n unbuffered binary file is unacceptable.\n\n See\n https://mail.python.org/pipermail/tutor/2003-November/026645.html.\n https://docs.python.org/3/library/functions.html#open\n\n \"\"\"\n\n def __init__(self, stream):\n self.stream = stream\n\n def write(self, data):\n self.stream.write(data)\n self.stream.flush()\n\n def writelines(self, datas):\n self.stream.writelines(datas)\n self.stream.flush()\n\n def __getattr__(self, attr):\n return getattr(self.stream, attr)\n\n\ndef open_log(path, unbuffered=False, **kwargs):\n \"\"\"\n Opens the log file at `path`, with the provided kwargs being given to\n `open`.\n \"\"\"\n # Disable buffering, see test_advanced_3.py::test_logging_to_driver\n kwargs.setdefault(\"buffering\", 1)\n kwargs.setdefault(\"mode\", \"a\")\n kwargs.setdefault(\"encoding\", \"utf-8\")\n stream = open(path, **kwargs)\n if unbuffered:\n return Unbuffered(stream)\n else:\n return stream\n\n\ndef get_system_memory(\n # For cgroups v1:\n memory_limit_filename=\"/sys/fs/cgroup/memory/memory.limit_in_bytes\",\n # For cgroups v2:\n memory_limit_filename_v2=\"/sys/fs/cgroup/memory.max\",\n):\n \"\"\"Return the total amount of system memory in bytes.\n\n Returns:\n The total amount of system memory in bytes.\n \"\"\"\n # Try to accurately figure out the memory limit if we are in a docker\n # container. Note that this file is not specific to Docker and its value is\n # often much larger than the actual amount of memory.\n docker_limit = None\n if os.path.exists(memory_limit_filename):\n with open(memory_limit_filename, \"r\") as f:\n docker_limit = int(f.read())\n elif os.path.exists(memory_limit_filename_v2):\n with open(memory_limit_filename_v2, \"r\") as f:\n max_file = f.read()\n if max_file.isnumeric():\n docker_limit = int(max_file)\n else:\n # max_file is \"max\", i.e. is unset.\n docker_limit = None\n\n # Use psutil if it is available.\n psutil_memory_in_bytes = psutil.virtual_memory().total\n\n if docker_limit is not None:\n # We take the min because the cgroup limit is very large if we aren't\n # in Docker.\n return min(docker_limit, psutil_memory_in_bytes)\n\n return psutil_memory_in_bytes\n\n\ndef _get_docker_cpus(\n cpu_quota_file_name=\"/sys/fs/cgroup/cpu/cpu.cfs_quota_us\",\n cpu_period_file_name=\"/sys/fs/cgroup/cpu/cpu.cfs_period_us\",\n cpuset_file_name=\"/sys/fs/cgroup/cpuset/cpuset.cpus\",\n cpu_max_file_name=\"/sys/fs/cgroup/cpu.max\",\n) -> Optional[float]:\n # TODO (Alex): Don't implement this logic oursleves.\n # Docker has 2 underyling ways of implementing CPU limits:\n # https://docs.docker.com/config/containers/resource_constraints/#configure-the-default-cfs-scheduler\n # 1. --cpuset-cpus 2. --cpus or --cpu-quota/--cpu-period (--cpu-shares is a\n # soft limit so we don't worry about it). For Ray's purposes, if we use\n # docker, the number of vCPUs on a machine is whichever is set (ties broken\n # by smaller value).\n\n cpu_quota = None\n # See: https://bugs.openjdk.java.net/browse/JDK-8146115\n if os.path.exists(cpu_quota_file_name) and os.path.exists(cpu_period_file_name):\n try:\n with open(cpu_quota_file_name, \"r\") as quota_file, open(\n cpu_period_file_name, \"r\"\n ) as period_file:\n cpu_quota = float(quota_file.read()) / float(period_file.read())\n except Exception:\n logger.exception(\"Unexpected error calculating docker cpu quota.\")\n # Look at cpu.max for cgroups v2\n elif os.path.exists(cpu_max_file_name):\n try:\n max_file = open(cpu_max_file_name).read()\n quota_str, period_str = max_file.split()\n if quota_str.isnumeric() and period_str.isnumeric():\n cpu_quota = float(quota_str) / float(period_str)\n else:\n # quota_str is \"max\" meaning the cpu quota is unset\n cpu_quota = None\n except Exception:\n logger.exception(\"Unexpected error calculating docker cpu quota.\")\n if (cpu_quota is not None) and (cpu_quota < 0):\n cpu_quota = None\n elif cpu_quota == 0:\n # Round up in case the cpu limit is less than 1.\n cpu_quota = 1\n\n cpuset_num = None\n if os.path.exists(cpuset_file_name):\n try:\n with open(cpuset_file_name) as cpuset_file:\n ranges_as_string = cpuset_file.read()\n ranges = ranges_as_string.split(\",\")\n cpu_ids = []\n for num_or_range in ranges:\n if \"-\" in num_or_range:\n start, end = num_or_range.split(\"-\")\n cpu_ids.extend(list(range(int(start), int(end) + 1)))\n else:\n cpu_ids.append(int(num_or_range))\n cpuset_num = len(cpu_ids)\n except Exception:\n logger.exception(\"Unexpected error calculating docker cpuset ids.\")\n # Possible to-do: Parse cgroups v2's cpuset.cpus.effective for the number\n # of accessible CPUs.\n\n if cpu_quota and cpuset_num:\n return min(cpu_quota, cpuset_num)\n return cpu_quota or cpuset_num\n\n\ndef get_num_cpus() -> int:\n cpu_count = multiprocessing.cpu_count()\n if os.environ.get(\"RAY_USE_MULTIPROCESSING_CPU_COUNT\"):\n logger.info(\n \"Detected RAY_USE_MULTIPROCESSING_CPU_COUNT=1: Using \"\n \"multiprocessing.cpu_count() to detect the number of CPUs. \"\n \"This may be inconsistent when used inside docker. \"\n \"To correctly detect CPUs, unset the env var: \"\n \"`RAY_USE_MULTIPROCESSING_CPU_COUNT`.\"\n )\n return cpu_count\n try:\n # Not easy to get cpu count in docker, see:\n # https://bugs.python.org/issue36054\n docker_count = _get_docker_cpus()\n if docker_count is not None and docker_count != cpu_count:\n # Don't log this warning if we're on K8s or if the warning is\n # explicitly disabled.\n if (\n \"RAY_DISABLE_DOCKER_CPU_WARNING\" not in os.environ\n and \"KUBERNETES_SERVICE_HOST\" not in os.environ\n ):\n logger.warning(\n \"Detecting docker specified CPUs. In \"\n \"previous versions of Ray, CPU detection in containers \"\n \"was incorrect. Please ensure that Ray has enough CPUs \"\n \"allocated. As a temporary workaround to revert to the \"\n \"prior behavior, set \"\n \"`RAY_USE_MULTIPROCESSING_CPU_COUNT=1` as an env var \"\n \"before starting Ray. Set the env var: \"\n \"`RAY_DISABLE_DOCKER_CPU_WARNING=1` to mute this warning.\"\n )\n # TODO (Alex): We should probably add support for fractional cpus.\n if int(docker_count) != float(docker_count):\n logger.warning(\n f\"Ray currently does not support initializing Ray\"\n f\"with fractional cpus. Your num_cpus will be \"\n f\"truncated from {docker_count} to \"\n f\"{int(docker_count)}.\"\n )\n docker_count = int(docker_count)\n cpu_count = docker_count\n\n except Exception:\n # `nproc` and cgroup are linux-only. If docker only works on linux\n # (will run in a linux VM on other platforms), so this is fine.\n pass\n\n return cpu_count\n\n\ndef get_used_memory():\n \"\"\"Return the currently used system memory in bytes\n\n Returns:\n The total amount of used memory\n \"\"\"\n # Try to accurately figure out the memory usage if we are in a docker\n # container.\n docker_usage = None\n # For cgroups v1:\n memory_usage_filename = \"/sys/fs/cgroup/memory/memory.usage_in_bytes\"\n # For cgroups v2:\n memory_usage_filename_v2 = \"/sys/fs/cgroup/memory.current\"\n if os.path.exists(memory_usage_filename):\n with open(memory_usage_filename, \"r\") as f:\n docker_usage = int(f.read())\n elif os.path.exists(memory_usage_filename_v2):\n with open(memory_usage_filename_v2, \"r\") as f:\n docker_usage = int(f.read())\n\n # Use psutil if it is available.\n psutil_memory_in_bytes = psutil.virtual_memory().used\n\n if docker_usage is not None:\n # We take the min because the cgroup limit is very large if we aren't\n # in Docker.\n return min(docker_usage, psutil_memory_in_bytes)\n\n return psutil_memory_in_bytes\n\n\ndef estimate_available_memory():\n \"\"\"Return the currently available amount of system memory in bytes.\n\n Returns:\n The total amount of available memory in bytes. Based on the used\n and total memory.\n\n \"\"\"\n return get_system_memory() - get_used_memory()\n\n\ndef get_shared_memory_bytes():\n \"\"\"Get the size of the shared memory file system.\n\n Returns:\n The size of the shared memory file system in bytes.\n \"\"\"\n # Make sure this is only called on Linux.\n assert sys.platform == \"linux\" or sys.platform == \"linux2\"\n\n shm_fd = os.open(\"/dev/shm\", os.O_RDONLY)\n try:\n shm_fs_stats = os.fstatvfs(shm_fd)\n # The value shm_fs_stats.f_bsize is the block size and the\n # value shm_fs_stats.f_bavail is the number of available\n # blocks.\n shm_avail = shm_fs_stats.f_bsize * shm_fs_stats.f_bavail\n finally:\n os.close(shm_fd)\n\n return shm_avail\n\n\ndef check_oversized_function(\n pickled: bytes, name: str, obj_type: str, worker: \"ray.Worker\"\n) -> None:\n \"\"\"Send a warning message if the pickled function is too large.\n\n Args:\n pickled: the pickled function.\n name: name of the pickled object.\n obj_type: type of the pickled object, can be 'function',\n 'remote function', or 'actor'.\n worker: the worker used to send warning message. message will be logged\n locally if None.\n \"\"\"\n length = len(pickled)\n if length <= ray_constants.FUNCTION_SIZE_WARN_THRESHOLD:\n return\n elif length < ray_constants.FUNCTION_SIZE_ERROR_THRESHOLD:\n warning_message = (\n \"The {} {} is very large ({} MiB). \"\n \"Check that its definition is not implicitly capturing a large \"\n \"array or other object in scope. Tip: use ray.put() to put large \"\n \"objects in the Ray object store.\"\n ).format(obj_type, name, length // (1024 * 1024))\n if worker:\n push_error_to_driver(\n worker,\n ray_constants.PICKLING_LARGE_OBJECT_PUSH_ERROR,\n \"Warning: \" + warning_message,\n job_id=worker.current_job_id,\n )\n else:\n error = (\n \"The {} {} is too large ({} MiB > FUNCTION_SIZE_ERROR_THRESHOLD={}\"\n \" MiB). Check that its definition is not implicitly capturing a \"\n \"large array or other object in scope. Tip: use ray.put() to \"\n \"put large objects in the Ray object store.\"\n ).format(\n obj_type,\n name,\n length // (1024 * 1024),\n ray_constants.FUNCTION_SIZE_ERROR_THRESHOLD // (1024 * 1024),\n )\n raise ValueError(error)\n\n\ndef is_main_thread():\n return threading.current_thread().getName() == \"MainThread\"\n\n\ndef detect_fate_sharing_support_win32():\n global win32_job, win32_AssignProcessToJobObject\n if win32_job is None and sys.platform == \"win32\":\n import ctypes\n\n try:\n from ctypes.wintypes import BOOL, DWORD, HANDLE, LPVOID, LPCWSTR\n\n kernel32 = ctypes.WinDLL(\"kernel32\")\n kernel32.CreateJobObjectW.argtypes = (LPVOID, LPCWSTR)\n kernel32.CreateJobObjectW.restype = HANDLE\n sijo_argtypes = (HANDLE, ctypes.c_int, LPVOID, DWORD)\n kernel32.SetInformationJobObject.argtypes = sijo_argtypes\n kernel32.SetInformationJobObject.restype = BOOL\n kernel32.AssignProcessToJobObject.argtypes = (HANDLE, HANDLE)\n kernel32.AssignProcessToJobObject.restype = BOOL\n kernel32.IsDebuggerPresent.argtypes = ()\n kernel32.IsDebuggerPresent.restype = BOOL\n except (AttributeError, TypeError, ImportError):\n kernel32 = None\n job = kernel32.CreateJobObjectW(None, None) if kernel32 else None\n job = subprocess.Handle(job) if job else job\n if job:\n from ctypes.wintypes import DWORD, LARGE_INTEGER, ULARGE_INTEGER\n\n class JOBOBJECT_BASIC_LIMIT_INFORMATION(ctypes.Structure):\n _fields_ = [\n (\"PerProcessUserTimeLimit\", LARGE_INTEGER),\n (\"PerJobUserTimeLimit\", LARGE_INTEGER),\n (\"LimitFlags\", DWORD),\n (\"MinimumWorkingSetSize\", ctypes.c_size_t),\n (\"MaximumWorkingSetSize\", ctypes.c_size_t),\n (\"ActiveProcessLimit\", DWORD),\n (\"Affinity\", ctypes.c_size_t),\n (\"PriorityClass\", DWORD),\n (\"SchedulingClass\", DWORD),\n ]\n\n class IO_COUNTERS(ctypes.Structure):\n _fields_ = [\n (\"ReadOperationCount\", ULARGE_INTEGER),\n (\"WriteOperationCount\", ULARGE_INTEGER),\n (\"OtherOperationCount\", ULARGE_INTEGER),\n (\"ReadTransferCount\", ULARGE_INTEGER),\n (\"WriteTransferCount\", ULARGE_INTEGER),\n (\"OtherTransferCount\", ULARGE_INTEGER),\n ]\n\n class JOBOBJECT_EXTENDED_LIMIT_INFORMATION(ctypes.Structure):\n _fields_ = [\n (\"BasicLimitInformation\", JOBOBJECT_BASIC_LIMIT_INFORMATION),\n (\"IoInfo\", IO_COUNTERS),\n (\"ProcessMemoryLimit\", ctypes.c_size_t),\n (\"JobMemoryLimit\", ctypes.c_size_t),\n (\"PeakProcessMemoryUsed\", ctypes.c_size_t),\n (\"PeakJobMemoryUsed\", ctypes.c_size_t),\n ]\n\n debug = kernel32.IsDebuggerPresent()\n\n # Defined in <WinNT.h>; also available here:\n # https://docs.microsoft.com/en-us/windows/win32/api/jobapi2/nf-jobapi2-setinformationjobobject\n JobObjectExtendedLimitInformation = 9\n JOB_OBJECT_LIMIT_BREAKAWAY_OK = 0x00000800\n JOB_OBJECT_LIMIT_DIE_ON_UNHANDLED_EXCEPTION = 0x00000400\n JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE = 0x00002000\n buf = JOBOBJECT_EXTENDED_LIMIT_INFORMATION()\n buf.BasicLimitInformation.LimitFlags = (\n (0 if debug else JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE)\n | JOB_OBJECT_LIMIT_DIE_ON_UNHANDLED_EXCEPTION\n | JOB_OBJECT_LIMIT_BREAKAWAY_OK\n )\n infoclass = JobObjectExtendedLimitInformation\n if not kernel32.SetInformationJobObject(\n job, infoclass, ctypes.byref(buf), ctypes.sizeof(buf)\n ):\n job = None\n win32_AssignProcessToJobObject = (\n kernel32.AssignProcessToJobObject if kernel32 is not None else False\n )\n win32_job = job if job else False\n return bool(win32_job)\n\n\ndef detect_fate_sharing_support_linux():\n global linux_prctl\n if linux_prctl is None and sys.platform.startswith(\"linux\"):\n try:\n from ctypes import c_int, c_ulong, CDLL\n\n prctl = CDLL(None).prctl\n prctl.restype = c_int\n prctl.argtypes = [c_int, c_ulong, c_ulong, c_ulong, c_ulong]\n except (AttributeError, TypeError):\n prctl = None\n linux_prctl = prctl if prctl else False\n return bool(linux_prctl)\n\n\ndef detect_fate_sharing_support():\n result = None\n if sys.platform == \"win32\":\n result = detect_fate_sharing_support_win32()\n elif sys.platform.startswith(\"linux\"):\n result = detect_fate_sharing_support_linux()\n return result\n\n\ndef set_kill_on_parent_death_linux():\n \"\"\"Ensures this process dies if its parent dies (fate-sharing).\n\n Linux-only. Must be called in preexec_fn (i.e. by the child).\n \"\"\"\n if detect_fate_sharing_support_linux():\n import signal\n\n PR_SET_PDEATHSIG = 1\n if linux_prctl(PR_SET_PDEATHSIG, signal.SIGKILL, 0, 0, 0) != 0:\n import ctypes\n\n raise OSError(ctypes.get_errno(), \"prctl(PR_SET_PDEATHSIG) failed\")\n else:\n assert False, \"PR_SET_PDEATHSIG used despite being unavailable\"\n\n\ndef set_kill_child_on_death_win32(child_proc):\n \"\"\"Ensures the child process dies if this process dies (fate-sharing).\n\n Windows-only. Must be called by the parent, after spawning the child.\n\n Args:\n child_proc: The subprocess.Popen or subprocess.Handle object.\n \"\"\"\n\n if isinstance(child_proc, subprocess.Popen):\n child_proc = child_proc._handle\n assert isinstance(child_proc, subprocess.Handle)\n\n if detect_fate_sharing_support_win32():\n if not win32_AssignProcessToJobObject(win32_job, int(child_proc)):\n import ctypes\n\n raise OSError(ctypes.get_last_error(), \"AssignProcessToJobObject() failed\")\n else:\n assert False, \"AssignProcessToJobObject used despite being unavailable\"\n\n\ndef set_sigterm_handler(sigterm_handler):\n \"\"\"Registers a handler for SIGTERM in a platform-compatible manner.\"\"\"\n if sys.platform == \"win32\":\n # Note that these signal handlers only work for console applications.\n # TODO(mehrdadn): implement graceful process termination mechanism\n # SIGINT is Ctrl+C, SIGBREAK is Ctrl+Break.\n signal.signal(signal.SIGBREAK, sigterm_handler)\n else:\n signal.signal(signal.SIGTERM, sigterm_handler)\n\n\ndef try_make_directory_shared(directory_path):\n try:\n os.chmod(directory_path, 0o0777)\n except OSError as e:\n # Silently suppress the PermissionError that is thrown by the chmod.\n # This is done because the user attempting to change the permissions\n # on a directory may not own it. The chmod is attempted whether the\n # directory is new or not to avoid race conditions.\n # ray-project/ray/#3591\n if e.errno in [errno.EACCES, errno.EPERM]:\n pass\n else:\n raise\n\n\ndef try_to_create_directory(directory_path):\n \"\"\"Attempt to create a directory that is globally readable/writable.\n\n Args:\n directory_path: The path of the directory to create.\n \"\"\"\n directory_path = os.path.expanduser(directory_path)\n os.makedirs(directory_path, exist_ok=True)\n # Change the log directory permissions so others can use it. This is\n # important when multiple people are using the same machine.\n try_make_directory_shared(directory_path)\n\n\ndef try_to_symlink(symlink_path, target_path):\n \"\"\"Attempt to create a symlink.\n\n If the symlink path exists and isn't a symlink, the symlink will not be\n created. If a symlink exists in the path, it will be attempted to be\n removed and replaced.\n\n Args:\n symlink_path: The path at which to create the symlink.\n target_path: The path the symlink should point to.\n \"\"\"\n symlink_path = os.path.expanduser(symlink_path)\n target_path = os.path.expanduser(target_path)\n\n if os.path.exists(symlink_path):\n if os.path.islink(symlink_path):\n # Try to remove existing symlink.\n try:\n os.remove(symlink_path)\n except OSError:\n return\n else:\n # There's an existing non-symlink file, don't overwrite it.\n return\n\n try:\n os.symlink(target_path, symlink_path)\n except OSError:\n return\n\n\ndef get_user():\n if pwd is None:\n return \"\"\n try:\n return pwd.getpwuid(os.getuid()).pw_name\n except Exception:\n return \"\"\n\n\ndef get_function_args(callable):\n all_parameters = frozenset(signature(callable).parameters)\n return list(all_parameters)\n\n\ndef get_conda_bin_executable(executable_name):\n \"\"\"\n Return path to the specified executable, assumed to be discoverable within\n the 'bin' subdirectory of a conda installation. Adapted from\n https://github.com/mlflow/mlflow.\n \"\"\"\n\n # Use CONDA_EXE as per https://github.com/conda/conda/issues/7126\n if \"CONDA_EXE\" in os.environ:\n conda_bin_dir = os.path.dirname(os.environ[\"CONDA_EXE\"])\n return os.path.join(conda_bin_dir, executable_name)\n return executable_name\n\n\ndef get_conda_env_dir(env_name):\n \"\"\"Find and validate the conda directory for a given conda environment.\n\n For example, given the environment name `tf1`, this function checks\n the existence of the corresponding conda directory, e.g.\n `/Users/scaly/anaconda3/envs/tf1`, and returns it.\n \"\"\"\n conda_prefix = os.environ.get(\"CONDA_PREFIX\")\n if conda_prefix is None:\n # The caller is neither in a conda env or in (base) env. This is rare\n # because by default, new terminals start in (base), but we can still\n # support this case.\n conda_exe = os.environ.get(\"CONDA_EXE\")\n if conda_exe is None:\n raise ValueError(\n \"Cannot find environment variables set by conda. \"\n \"Please verify conda is installed.\"\n )\n # Example: CONDA_EXE=$HOME/anaconda3/bin/python\n # Strip out /bin/python by going up two parent directories.\n conda_prefix = str(Path(conda_exe).parent.parent)\n\n # There are two cases:\n # 1. We are in a conda (base) env: CONDA_DEFAULT_ENV=base and\n # CONDA_PREFIX=$HOME/anaconda3\n # 2. We are in a user-created conda env: CONDA_DEFAULT_ENV=$env_name and\n # CONDA_PREFIX=$HOME/anaconda3/envs/$current_env_name\n if os.environ.get(\"CONDA_DEFAULT_ENV\") == \"base\":\n # Caller's curent environment is (base).\n # Not recommended by conda, but we can still support it.\n if env_name == \"base\":\n # Desired environment is (base), located at e.g. $HOME/anaconda3\n env_dir = conda_prefix\n else:\n # Desired environment is user-created, e.g.\n # $HOME/anaconda3/envs/$env_name\n env_dir = os.path.join(conda_prefix, \"envs\", env_name)\n else:\n # Now `conda_prefix` should be something like\n # $HOME/anaconda3/envs/$current_env_name\n # We want to replace the last component with the desired env name.\n conda_envs_dir = os.path.split(conda_prefix)[0]\n env_dir = os.path.join(conda_envs_dir, env_name)\n if not os.path.isdir(env_dir):\n raise ValueError(\n \"conda env \"\n + env_name\n + \" not found in conda envs directory. Run `conda env list` to \"\n + \"verify the name is correct.\"\n )\n return env_dir\n\n\ndef get_call_location(back: int = 1):\n \"\"\"\n Get the location (filename and line number) of a function caller, `back`\n frames up the stack.\n\n Args:\n back: The number of frames to go up the stack, not including this\n function.\n \"\"\"\n stack = inspect.stack()\n try:\n frame = stack[back + 1]\n return f\"{frame.filename}:{frame.lineno}\"\n except IndexError:\n return \"UNKNOWN\"\n\n\n# Used to only print a deprecation warning once for a given function if we\n# don't wish to spam the caller.\n_PRINTED_WARNING = set()\n\n\n# The following is inspired by\n# https://github.com/tensorflow/tensorflow/blob/dec8e0b11f4f87693b67e125e67dfbc68d26c205/tensorflow/python/util/deprecation.py#L274-L329\ndef deprecated(\n instructions: Optional[str] = None,\n removal_release: Optional[str] = None,\n removal_date: Optional[str] = None,\n warn_once: bool = True,\n):\n \"\"\"\n Creates a decorator for marking functions as deprecated. The decorator\n will log a deprecation warning on the first (or all, see `warn_once` arg)\n invocations, and will otherwise leave the wrapped function unchanged.\n\n Args:\n instructions: Instructions for the caller to update their code.\n removal_release: The release in which this deprecated function\n will be removed. Only one of removal_release and removal_date\n should be specified. If neither is specfieid, we'll warning that\n the function will be removed \"in a future release\".\n removal_date: The date on which this deprecated function will be\n removed. Only one of removal_release and removal_date should be\n specified. If neither is specfieid, we'll warning that\n the function will be removed \"in a future release\".\n warn_once: If true, the deprecation warning will only be logged\n on the first invocation. Otherwise, the deprecation warning will\n be logged on every invocation. Defaults to True.\n\n Returns:\n A decorator to be used for wrapping deprecated functions.\n \"\"\"\n if removal_release is not None and removal_date is not None:\n raise ValueError(\n \"Only one of removal_release and removal_date should be specified.\"\n )\n\n def deprecated_wrapper(func):\n @functools.wraps(func)\n def new_func(*args, **kwargs):\n global _PRINTED_WARNING\n if func not in _PRINTED_WARNING:\n if warn_once:\n _PRINTED_WARNING.add(func)\n msg = (\n \"From {}: {} (from {}) is deprecated and will \".format(\n get_call_location(), func.__name__, func.__module__\n )\n + \"be removed \"\n + (\n f\"in version {removal_release}.\"\n if removal_release is not None\n else f\"after {removal_date}\"\n if removal_date is not None\n else \"in a future version\"\n )\n + (f\" {instructions}\" if instructions is not None else \"\")\n )\n warnings.warn(msg)\n return func(*args, **kwargs)\n\n return new_func\n\n return deprecated_wrapper\n\n\ndef import_attr(full_path: str):\n \"\"\"Given a full import path to a module attr, return the imported attr.\n\n For example, the following are equivalent:\n MyClass = import_attr(\"module.submodule:MyClass\")\n MyClass = import_attr(\"module.submodule.MyClass\")\n from module.submodule import MyClass\n\n Returns:\n Imported attr\n \"\"\"\n if full_path is None:\n raise TypeError(\"import path cannot be None\")\n\n if \":\" in full_path:\n if full_path.count(\":\") > 1:\n raise ValueError(\n f'Got invalid import path \"{full_path}\". An '\n \"import path may have at most one colon.\"\n )\n module_name, attr_name = full_path.split(\":\")\n else:\n last_period_idx = full_path.rfind(\".\")\n module_name = full_path[:last_period_idx]\n attr_name = full_path[last_period_idx + 1 :]\n\n module = importlib.import_module(module_name)\n return getattr(module, attr_name)\n\n\ndef get_wheel_filename(\n sys_platform: str = sys.platform,\n ray_version: str = ray.__version__,\n py_version: str = f\"{sys.version_info.major}{sys.version_info.minor}\",\n) -> str:\n \"\"\"Returns the filename used for the nightly Ray wheel.\n\n Args:\n sys_platform: The platform as returned by sys.platform. Examples:\n \"darwin\", \"linux\", \"win32\"\n ray_version: The Ray version as returned by ray.__version__ or\n `ray --version`. Examples: \"3.0.0.dev0\"\n py_version (str):\n The major and minor Python versions concatenated. Examples: \"36\",\n \"37\", \"38\", \"39\"\n Returns:\n The wheel file name. Examples:\n ray-3.0.0.dev0-cp38-cp38-manylinux2014_x86_64.whl\n \"\"\"\n assert py_version in [\"36\", \"37\", \"38\", \"39\"], py_version\n\n os_strings = {\n \"darwin\": \"macosx_10_15_x86_64\"\n if py_version in [\"38\", \"39\"]\n else \"macosx_10_15_intel\",\n \"linux\": \"manylinux2014_x86_64\",\n \"win32\": \"win_amd64\",\n }\n\n assert sys_platform in os_strings, sys_platform\n\n wheel_filename = (\n f\"ray-{ray_version}-cp{py_version}-\"\n f\"cp{py_version}{'m' if py_version in ['36', '37'] else ''}\"\n f\"-{os_strings[sys_platform]}.whl\"\n )\n\n return wheel_filename\n\n\ndef get_master_wheel_url(\n ray_commit: str = ray.__commit__,\n sys_platform: str = sys.platform,\n ray_version: str = ray.__version__,\n py_version: str = f\"{sys.version_info.major}{sys.version_info.minor}\",\n) -> str:\n \"\"\"Return the URL for the wheel from a specific commit.\"\"\"\n filename = get_wheel_filename(\n sys_platform=sys_platform, ray_version=ray_version, py_version=py_version\n )\n return (\n f\"https://s3-us-west-2.amazonaws.com/ray-wheels/master/\"\n f\"{ray_commit}/{filename}\"\n )\n\n\ndef get_release_wheel_url(\n ray_commit: str = ray.__commit__,\n sys_platform: str = sys.platform,\n ray_version: str = ray.__version__,\n py_version: str = f\"{sys.version_info.major}{sys.version_info.minor}\",\n) -> str:\n \"\"\"Return the URL for the wheel for a specific release.\"\"\"\n filename = get_wheel_filename(\n sys_platform=sys_platform, ray_version=ray_version, py_version=py_version\n )\n return (\n f\"https://ray-wheels.s3-us-west-2.amazonaws.com/releases/\"\n f\"{ray_version}/{ray_commit}/{filename}\"\n )\n # e.g. https://ray-wheels.s3-us-west-2.amazonaws.com/releases/1.4.0rc1/e7c7\n # f6371a69eb727fa469e4cd6f4fbefd143b4c/ray-1.4.0rc1-cp36-cp36m-manylinux201\n # 4_x86_64.whl\n\n\ndef validate_namespace(namespace: str):\n if not isinstance(namespace, str):\n raise TypeError(\"namespace must be None or a string.\")\n elif namespace == \"\":\n raise ValueError(\n '\"\" is not a valid namespace. ' \"Pass None to not specify a namespace.\"\n )\n\n\ndef init_grpc_channel(\n address: str,\n options: Optional[Sequence[Tuple[str, Any]]] = None,\n asynchronous: bool = False,\n):\n grpc_module = aiogrpc if asynchronous else grpc\n if os.environ.get(\"RAY_USE_TLS\", \"0\").lower() in (\"1\", \"true\"):\n server_cert_chain, private_key, ca_cert = load_certs_from_env()\n credentials = grpc.ssl_channel_credentials(\n certificate_chain=server_cert_chain,\n private_key=private_key,\n root_certificates=ca_cert,\n )\n channel = grpc_module.secure_channel(address, credentials, options=options)\n else:\n channel = grpc_module.insecure_channel(address, options=options)\n\n return channel\n\n\ndef check_dashboard_dependencies_installed() -> bool:\n \"\"\"Returns True if Ray Dashboard dependencies are installed.\n\n Checks to see if we should start the dashboard agent or not based on the\n Ray installation version the user has installed (ray vs. ray[default]).\n Unfortunately there doesn't seem to be a cleaner way to detect this other\n than just blindly importing the relevant packages.\n\n \"\"\"\n try:\n import ray.dashboard.optional_deps # noqa: F401\n\n return True\n except ImportError:\n return False\n\n\ndef internal_kv_list_with_retry(gcs_client, prefix, namespace, num_retries=20):\n result = None\n if isinstance(prefix, str):\n prefix = prefix.encode()\n if isinstance(namespace, str):\n namespace = namespace.encode()\n for _ in range(num_retries):\n try:\n result = gcs_client.internal_kv_keys(prefix, namespace)\n except Exception as e:\n if isinstance(e, grpc.RpcError) and e.code() in (\n grpc.StatusCode.UNAVAILABLE,\n grpc.StatusCode.UNKNOWN,\n ):\n logger.warning(\n f\"Unable to connect to GCS at {gcs_client.address}. \"\n \"Check that (1) Ray GCS with matching version started \"\n \"successfully at the specified address, and (2) there is \"\n \"no firewall setting preventing access.\"\n )\n else:\n logger.exception(\"Internal KV List failed\")\n result = None\n\n if result is not None:\n break\n else:\n logger.debug(f\"Fetched {prefix}=None from KV. Retrying.\")\n time.sleep(2)\n if result is None:\n raise RuntimeError(\n f\"Could not list '{prefix}' from GCS. Did GCS start successfully?\"\n )\n return result\n\n\ndef internal_kv_get_with_retry(gcs_client, key, namespace, num_retries=20):\n result = None\n if isinstance(key, str):\n key = key.encode()\n for _ in range(num_retries):\n try:\n result = gcs_client.internal_kv_get(key, namespace)\n except Exception as e:\n if isinstance(e, grpc.RpcError) and e.code() in (\n grpc.StatusCode.UNAVAILABLE,\n grpc.StatusCode.UNKNOWN,\n ):\n logger.warning(\n f\"Unable to connect to GCS at {gcs_client.address}. \"\n \"Check that (1) Ray GCS with matching version started \"\n \"successfully at the specified address, and (2) there is \"\n \"no firewall setting preventing access.\"\n )\n else:\n logger.exception(\"Internal KV Get failed\")\n result = None\n\n if result is not None:\n break\n else:\n logger.debug(f\"Fetched {key}=None from KV. Retrying.\")\n time.sleep(2)\n if not result:\n raise RuntimeError(\n f\"Could not read '{key.decode()}' from GCS. Did GCS start successfully?\"\n )\n return result\n\n\ndef internal_kv_put_with_retry(gcs_client, key, value, namespace, num_retries=20):\n if isinstance(key, str):\n key = key.encode()\n if isinstance(value, str):\n value = value.encode()\n if isinstance(namespace, str):\n namespace = namespace.encode()\n error = None\n for _ in range(num_retries):\n try:\n return gcs_client.internal_kv_put(\n key, value, overwrite=True, namespace=namespace\n )\n except grpc.RpcError as e:\n if e.code() in (\n grpc.StatusCode.UNAVAILABLE,\n grpc.StatusCode.UNKNOWN,\n ):\n logger.warning(\n f\"Unable to connect to GCS at {gcs_client.address}. \"\n \"Check that (1) Ray GCS with matching version started \"\n \"successfully at the specified address, and (2) there is \"\n \"no firewall setting preventing access.\"\n )\n else:\n logger.exception(\"Internal KV Put failed\")\n time.sleep(2)\n error = e\n # Reraise the last grpc.RpcError.\n raise error\n\n\ndef compute_version_info():\n \"\"\"Compute the versions of Python, and Ray.\n\n Returns:\n A tuple containing the version information.\n \"\"\"\n ray_version = ray.__version__\n python_version = \".\".join(map(str, sys.version_info[:3]))\n return ray_version, python_version\n\n\ndef get_directory_size_bytes(path: Union[str, Path] = \".\") -> int:\n \"\"\"Get the total size of a directory in bytes, including subdirectories.\"\"\"\n total_size_bytes = 0\n for dirpath, dirnames, filenames in os.walk(path):\n for f in filenames:\n fp = os.path.join(dirpath, f)\n # skip if it is a symbolic link or a .pyc file\n if not os.path.islink(fp) and not f.endswith(\".pyc\"):\n total_size_bytes += os.path.getsize(fp)\n\n return total_size_bytes\n\n\ndef check_version_info(cluster_metadata):\n \"\"\"Check if the Python and Ray versions stored in GCS matches this process.\n Args:\n cluster_metadata: Ray cluster metadata from GCS.\n\n Raises:\n Exception: An exception is raised if there is a version mismatch.\n \"\"\"\n cluster_version_info = (\n cluster_metadata[\"ray_version\"],\n cluster_metadata[\"python_version\"],\n )\n version_info = compute_version_info()\n if version_info != cluster_version_info:\n node_ip_address = ray._private.services.get_node_ip_address()\n error_message = (\n \"Version mismatch: The cluster was started with:\\n\"\n \" Ray: \" + cluster_version_info[0] + \"\\n\"\n \" Python: \" + cluster_version_info[1] + \"\\n\"\n \"This process on node \" + node_ip_address + \" was started with:\" + \"\\n\"\n \" Ray: \" + version_info[0] + \"\\n\"\n \" Python: \" + version_info[1] + \"\\n\"\n )\n raise RuntimeError(error_message)\n",
"import os\nimport tempfile\nfrom collections import namedtuple\nfrom multiprocessing import Queue\nimport unittest\n\nimport numpy as np\n\nfrom ray.tune import Trainable\nfrom ray.tune.function_runner import wrap_function\nfrom ray.tune.integration.wandb import (\n WandbLoggerCallback,\n _WandbLoggingProcess,\n WANDB_ENV_VAR,\n WandbTrainableMixin,\n wandb_mixin,\n _QueueItem,\n)\nfrom ray.tune.result import TRIAL_INFO\nfrom ray.tune.trial import _TrialInfo\nfrom ray.tune.utils.placement_groups import PlacementGroupFactory\n\n\nclass Trial(\n namedtuple(\n \"MockTrial\",\n [\n \"config\",\n \"trial_id\",\n \"trial_name\",\n \"trainable_name\",\n \"placement_group_factory\",\n \"logdir\",\n ],\n )\n):\n def __hash__(self):\n return hash(self.trial_id)\n\n def __str__(self):\n return self.trial_name\n\n\nclass _MockWandbLoggingProcess(_WandbLoggingProcess):\n def __init__(self, logdir, queue, exclude, to_config, *args, **kwargs):\n super(_MockWandbLoggingProcess, self).__init__(\n logdir, queue, exclude, to_config, *args, **kwargs\n )\n\n self.logs = Queue()\n self.config_updates = Queue()\n\n def run(self):\n while True:\n result_type, result_content = self.queue.get()\n if result_type == _QueueItem.END:\n break\n log, config_update = self._handle_result(result_content)\n self.config_updates.put(config_update)\n self.logs.put(log)\n\n\nclass WandbTestExperimentLogger(WandbLoggerCallback):\n _logger_process_cls = _MockWandbLoggingProcess\n\n @property\n def trial_processes(self):\n return self._trial_processes\n\n\nclass _MockWandbAPI(object):\n def init(self, *args, **kwargs):\n self.args = args\n self.kwargs = kwargs\n return self\n\n\nclass _MockWandbTrainableMixin(WandbTrainableMixin):\n _wandb = _MockWandbAPI()\n\n\nclass WandbTestTrainable(_MockWandbTrainableMixin, Trainable):\n pass\n\n\nclass WandbIntegrationTest(unittest.TestCase):\n def setUp(self):\n if WANDB_ENV_VAR in os.environ:\n del os.environ[WANDB_ENV_VAR]\n\n def tearDown(self):\n if WANDB_ENV_VAR in os.environ:\n del os.environ[WANDB_ENV_VAR]\n\n def testWandbLoggerConfig(self):\n trial_config = {\"par1\": 4, \"par2\": 9.12345678}\n trial = Trial(\n trial_config,\n 0,\n \"trial_0\",\n \"trainable\",\n PlacementGroupFactory([{\"CPU\": 1}]),\n \"/tmp\",\n )\n\n if WANDB_ENV_VAR in os.environ:\n del os.environ[WANDB_ENV_VAR]\n\n # No API key\n with self.assertRaises(ValueError):\n logger = WandbTestExperimentLogger(project=\"test_project\")\n logger.setup()\n\n # API Key in config\n logger = WandbTestExperimentLogger(project=\"test_project\", api_key=\"1234\")\n logger.setup()\n self.assertEqual(os.environ[WANDB_ENV_VAR], \"1234\")\n\n del logger\n del os.environ[WANDB_ENV_VAR]\n\n # API Key file\n with tempfile.NamedTemporaryFile(\"wt\") as fp:\n fp.write(\"5678\")\n fp.flush()\n\n logger = WandbTestExperimentLogger(\n project=\"test_project\", api_key_file=fp.name\n )\n logger.setup()\n self.assertEqual(os.environ[WANDB_ENV_VAR], \"5678\")\n\n del logger\n del os.environ[WANDB_ENV_VAR]\n\n # API Key in env\n os.environ[WANDB_ENV_VAR] = \"9012\"\n logger = WandbTestExperimentLogger(project=\"test_project\")\n logger.setup()\n del logger\n\n # From now on, the API key is in the env variable.\n\n logger = WandbTestExperimentLogger(project=\"test_project\")\n logger.log_trial_start(trial)\n\n self.assertEqual(\n logger.trial_processes[trial].kwargs[\"project\"], \"test_project\"\n )\n self.assertEqual(logger.trial_processes[trial].kwargs[\"id\"], trial.trial_id)\n self.assertEqual(logger.trial_processes[trial].kwargs[\"name\"], trial.trial_name)\n self.assertEqual(\n logger.trial_processes[trial].kwargs[\"group\"], trial.trainable_name\n )\n self.assertIn(\"config\", logger.trial_processes[trial]._exclude)\n\n del logger\n\n # log config.\n logger = WandbTestExperimentLogger(project=\"test_project\", log_config=True)\n logger.log_trial_start(trial)\n self.assertNotIn(\"config\", logger.trial_processes[trial]._exclude)\n self.assertNotIn(\"metric\", logger.trial_processes[trial]._exclude)\n\n del logger\n\n # Exclude metric.\n logger = WandbTestExperimentLogger(project=\"test_project\", excludes=[\"metric\"])\n logger.log_trial_start(trial)\n self.assertIn(\"config\", logger.trial_processes[trial]._exclude)\n self.assertIn(\"metric\", logger.trial_processes[trial]._exclude)\n\n del logger\n\n def testWandbLoggerReporting(self):\n trial_config = {\"par1\": 4, \"par2\": 9.12345678}\n trial = Trial(\n trial_config,\n 0,\n \"trial_0\",\n \"trainable\",\n PlacementGroupFactory([{\"CPU\": 1}]),\n \"/tmp\",\n )\n\n logger = WandbTestExperimentLogger(\n project=\"test_project\", api_key=\"1234\", excludes=[\"metric2\"]\n )\n logger.on_trial_start(0, [], trial)\n\n r1 = {\n \"metric1\": 0.8,\n \"metric2\": 1.4,\n \"metric3\": np.asarray(32.0),\n \"metric4\": np.float32(32.0),\n \"const\": \"text\",\n \"config\": trial_config,\n }\n\n logger.on_trial_result(0, [], trial, r1)\n\n logged = logger.trial_processes[trial].logs.get(timeout=10)\n self.assertIn(\"metric1\", logged)\n self.assertNotIn(\"metric2\", logged)\n self.assertIn(\"metric3\", logged)\n self.assertIn(\"metric4\", logged)\n self.assertNotIn(\"const\", logged)\n self.assertNotIn(\"config\", logged)\n\n del logger\n\n def testWandbMixinConfig(self):\n config = {\"par1\": 4, \"par2\": 9.12345678}\n trial = Trial(\n config,\n 0,\n \"trial_0\",\n \"trainable\",\n PlacementGroupFactory([{\"CPU\": 1}]),\n \"/tmp\",\n )\n trial_info = _TrialInfo(trial)\n\n config[TRIAL_INFO] = trial_info\n\n if WANDB_ENV_VAR in os.environ:\n del os.environ[WANDB_ENV_VAR]\n\n # Needs at least a project\n with self.assertRaises(ValueError):\n trainable = WandbTestTrainable(config)\n\n # No API key\n config[\"wandb\"] = {\"project\": \"test_project\"}\n with self.assertRaises(ValueError):\n trainable = WandbTestTrainable(config)\n\n # API Key in config\n config[\"wandb\"] = {\"project\": \"test_project\", \"api_key\": \"1234\"}\n trainable = WandbTestTrainable(config)\n self.assertEqual(os.environ[WANDB_ENV_VAR], \"1234\")\n\n del os.environ[WANDB_ENV_VAR]\n\n # API Key file\n with tempfile.NamedTemporaryFile(\"wt\") as fp:\n fp.write(\"5678\")\n fp.flush()\n\n config[\"wandb\"] = {\"project\": \"test_project\", \"api_key_file\": fp.name}\n\n trainable = WandbTestTrainable(config)\n self.assertEqual(os.environ[WANDB_ENV_VAR], \"5678\")\n\n del os.environ[WANDB_ENV_VAR]\n\n # API Key in env\n os.environ[WANDB_ENV_VAR] = \"9012\"\n config[\"wandb\"] = {\"project\": \"test_project\"}\n trainable = WandbTestTrainable(config)\n\n # From now on, the API key is in the env variable.\n\n # Default configuration\n config[\"wandb\"] = {\"project\": \"test_project\"}\n config[TRIAL_INFO] = trial_info\n\n trainable = WandbTestTrainable(config)\n self.assertEqual(trainable.wandb.kwargs[\"project\"], \"test_project\")\n self.assertEqual(trainable.wandb.kwargs[\"id\"], trial.trial_id)\n self.assertEqual(trainable.wandb.kwargs[\"name\"], trial.trial_name)\n self.assertEqual(trainable.wandb.kwargs[\"group\"], \"WandbTestTrainable\")\n\n def testWandbDecoratorConfig(self):\n config = {\"par1\": 4, \"par2\": 9.12345678}\n trial = Trial(\n config,\n 0,\n \"trial_0\",\n \"trainable\",\n PlacementGroupFactory([{\"CPU\": 1}]),\n \"/tmp\",\n )\n trial_info = _TrialInfo(trial)\n\n @wandb_mixin\n def train_fn(config):\n return 1\n\n train_fn.__mixins__ = (_MockWandbTrainableMixin,)\n\n config[TRIAL_INFO] = trial_info\n\n if WANDB_ENV_VAR in os.environ:\n del os.environ[WANDB_ENV_VAR]\n\n # Needs at least a project\n with self.assertRaises(ValueError):\n wrapped = wrap_function(train_fn)(config)\n\n # No API key\n config[\"wandb\"] = {\"project\": \"test_project\"}\n with self.assertRaises(ValueError):\n wrapped = wrap_function(train_fn)(config)\n\n # API Key in config\n config[\"wandb\"] = {\"project\": \"test_project\", \"api_key\": \"1234\"}\n wrapped = wrap_function(train_fn)(config)\n self.assertEqual(os.environ[WANDB_ENV_VAR], \"1234\")\n\n del os.environ[WANDB_ENV_VAR]\n\n # API Key file\n with tempfile.NamedTemporaryFile(\"wt\") as fp:\n fp.write(\"5678\")\n fp.flush()\n\n config[\"wandb\"] = {\"project\": \"test_project\", \"api_key_file\": fp.name}\n\n wrapped = wrap_function(train_fn)(config)\n self.assertEqual(os.environ[WANDB_ENV_VAR], \"5678\")\n\n del os.environ[WANDB_ENV_VAR]\n\n # API Key in env\n os.environ[WANDB_ENV_VAR] = \"9012\"\n config[\"wandb\"] = {\"project\": \"test_project\"}\n wrapped = wrap_function(train_fn)(config)\n\n # From now on, the API key is in the env variable.\n\n # Default configuration\n config[\"wandb\"] = {\"project\": \"test_project\"}\n config[TRIAL_INFO] = trial_info\n\n wrapped = wrap_function(train_fn)(config)\n self.assertEqual(wrapped.wandb.kwargs[\"project\"], \"test_project\")\n self.assertEqual(wrapped.wandb.kwargs[\"id\"], trial.trial_id)\n self.assertEqual(wrapped.wandb.kwargs[\"name\"], trial.trial_name)\n\n def testWandbMixinRLlib(self):\n \"\"\"Test compatibility with RLlib configuration dicts\"\"\"\n # Local import to avoid tune dependency on rllib\n try:\n from ray.rllib.algorithms.ppo import PPO\n except ImportError:\n self.skipTest(\"ray[rllib] not available\")\n return\n\n class WandbPPOTrainer(_MockWandbTrainableMixin, PPO):\n pass\n\n config = {\n \"env\": \"CartPole-v0\",\n \"wandb\": {\n \"project\": \"test_project\",\n \"api_key\": \"1234\",\n },\n }\n\n # Test that trainer object can be initialized\n WandbPPOTrainer(config)\n\n\nif __name__ == \"__main__\":\n import pytest\n import sys\n\n sys.exit(pytest.main([\"-v\", __file__]))\n"
] | [
[
"numpy.random.bytes",
"numpy.random.set_state",
"numpy.random.seed",
"numpy.random.get_state"
],
[
"numpy.float32",
"numpy.asarray"
]
] |
gitter-badger/mlmodels | [
"f70f1da7434e8855eed50adc67b49cc169f2ea24"
] | [
"mlmodels/model_tf/misc/tf_nlp/speech-to-text/1.tacotron/train.py"
] | [
"# coding: utf-8\n\n# In[1]:\n\n\nimport os\n\nimport numpy as np\nimport tensorflow as tf\nfrom tqdm import tqdm\n\nfrom model import Model\nfrom setting import batch_size, get_cached, idx2char, n_mels, reduction_factor, text2idx\n\n# In[2]:\n\n\npaths, lengths, texts = [], [], []\ntext_files = [f for f in os.listdir(\"spectrogram\") if f.endswith(\".npy\")]\nfor fpath in text_files:\n with open(\"../data/\" + fpath.replace(\"npy\", \"txt\")) as fopen:\n text, converted = text2idx(fopen.read())\n texts.append(converted)\n lengths.append(len(text))\n paths.append(fpath.replace(\".npy\", \"\"))\n\n\n# In[3]:\n\n\ndef dynamic_batching(paths):\n spectrograms, max_x = [], 0\n for path in paths:\n spectrograms.append(np.load(\"spectrogram/\" + path + \".npy\"))\n if spectrograms[-1].shape[0] > max_x:\n max_x = spectrograms[-1].shape[0]\n return spectrograms, max_x\n\n\n# In[4]:\n\n\ntf.reset_default_graph()\nsess = tf.InteractiveSession()\nmodel = Model()\nsess.run(tf.global_variables_initializer())\n\n\n# In[5]:\n\n\nfor e in range(30):\n pbar = tqdm(range(0, len(text_files), batch_size), desc=\"minibatch loop\")\n total_cost, total_acc = 0, 0\n for k in pbar:\n index = min(k + batch_size, len(text_files))\n files, max_x = dynamic_batching(paths[k:index])\n max_y = max(lengths[k:index])\n batch_x = np.zeros((len(files), max_x, n_mels * reduction_factor))\n batch_y = np.zeros((len(files), max_y))\n for n in range(len(files)):\n batch_x[n] = np.pad(files[n], ((max_x - files[n].shape[0], 0), (0, 0)), mode=\"constant\")\n batch_y[n] = np.pad(texts[k + n], ((0, max_y - len(texts[k + n]))), mode=\"constant\")\n _, acc, cost = sess.run(\n [model.optimizer, model.accuracy, model.cost],\n feed_dict={model.X: batch_x, model.Y: batch_y, model.Y_seq_len: lengths[k:index]},\n )\n total_cost += cost\n total_acc += acc\n pbar.set_postfix(cost=cost, accuracy=acc)\n total_cost /= len(text_files) / batch_size\n total_acc /= len(text_files) / batch_size\n\n print(\"epoch %d, avg loss %f, avg acc %f\" % (e + 1, total_cost, total_acc))\n\n\nempty_y = np.zeros((1, len(batch_y[0])))\npredicted = \"\".join(\n [\n idx2char[c]\n for c in sess.run(model.preds, feed_dict={model.X: batch_x[:1], model.Y: empty_y})[0]\n if idx2char[c] not in [\"S\", \"E\"]\n ]\n)\nground_truth = \"\".join([idx2char[c] for c in batch_y[0] if idx2char[c] not in [\"S\", \"E\"]])\nprint(\"predicted: %s, ground truth: %s\" % (predicted, ground_truth))\n"
] | [
[
"numpy.load",
"tensorflow.global_variables_initializer",
"tensorflow.InteractiveSession",
"numpy.pad",
"tensorflow.reset_default_graph"
]
] |
uhh-lt/semeval2019-hhmm | [
"b746b0fb8ab3b957d399276cb354e950f0ef30ed"
] | [
"utils.py"
] | [
"import pandas as pd\nfrom pathlib import Path\n\n\ndef df_to_csv(df, path):\n df.to_csv(path, sep='\\t', index=False, encoding='utf-8')\n\n\ndef csv_to_df(path):\n df = pd.read_csv(path, sep='\\t', dtype=str, encoding='utf-8')\n return df\n\n\ndef max_arguments(task):\n fp = open(task, 'r')\n lines_args = fp.readlines()\n maxT = 0\n for line in lines_args:\n tokens = len(line.split(' '))\n if tokens > maxT:\n maxT = tokens\n return maxT - 3 # context_id, verb pos, verb-frame\n\n\ndef max_frameArguments(dataset):\n dir = \"./semeval_data\"\n task21_auto = dir + \"/dev/auto/task-2.1.auto.txt\"\n task21_dev = dir + \"/dev/task-2.1.txt\"\n task21_test =dir+\"/test/task-2.1.txt\"\n\n\n\n if dataset == 'dev':\n task21 = task21_dev\n elif dataset == 'auto':\n task21 = task21_auto\n elif dataset == 'test':\n task21 = task21_test\n\n\n return max_arguments(task21)\n# ------------------------------------------------------------- df input from txt\nimport ud2csv\n\ndir = \"./semeval_data\"\nud_gold = dir+\"/dep-stx/pos-gold-dep-auto.conll.txt\"\n# -----------------------------------\ndef task_to_df(task, dataset):\n\n if Path('./input/train_task{}_{}.csv'.format(task, dataset)).exists():\n return csv_to_df('./input/train_task{}_{}.csv'.format(task, dataset))\n else:\n if task==1:\n return ud2csv.task1_to_df(dir+'/{}/task-1.txt'.format(dataset), ud_gold)\n if task ==22:\n return ud2csv.task22_to_df(dir + '/{}/task-2.2.txt'.format(dataset), ud_gold)\n\n\ndef task1_to_df_gd(dataset):\n if Path('./input/train_task{}_{}.csv'.format(1, dataset)).exists():\n return csv_to_df('./input/gd_task{}_{}.csv'.format(1, dataset))\n\n else:\n return ud2csv.task1_to_df_gd(dir+'/{}/task-1.txt'.format(dataset), ud_gold)\n\n\ndef task22_baselines(dataset, gr='in'):\n\n if Path('./input/all_grammaticalLabels_{}.csv'.format(dataset)).exists():\n df_task22 = csv_to_df('./input/all_grammaticalLabels_{}.csv'.format(dataset))\n else:\n df_task22 = ud2csv.task22_to_df_withFrameArgsDependencies(dir+'/{}/task-2.2.txt'.format(dataset), ud_gold)\n return ud2csv.getGrammaticalBaseline(df_task22, gr)\n"
] | [
[
"pandas.read_csv"
]
] |
qinliuliuqin/active-mri-acquisition | [
"b561f838667f4bc7753b1f89dfbdd545d0f00ada"
] | [
"activemri/experimental/cvpr19_models/data/masking_utils.py"
] | [
"# Copyright (c) Facebook, Inc. and its affiliates.\n\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport logging\n\nimport numpy as np\nimport torch\n\n\ndef get_mask_func(mask_type, which_dataset, rnl_params=None):\n # Whether the number of lines is random or not\n random_num_lines = mask_type[-4:] == \"_rnl\"\n if \"symmetric_basic\" in mask_type:\n logging.info(\n f\"Mask is symmetric uniform choice with random_num_lines={random_num_lines}.\"\n )\n return SymmetricUniformChoiceMaskFunc(\n [0.125],\n [4],\n which_dataset,\n random_num_lines=random_num_lines,\n rnl_params=rnl_params,\n )\n if \"basic\" in mask_type:\n # First two parameters are ignored if `random_num_lines` is True\n logging.info(\n f\"Mask is fixed acceleration mask with random_num_lines={random_num_lines}.\"\n )\n return BasicMaskFunc(\n [0.125],\n [4],\n which_dataset,\n random_num_lines=random_num_lines,\n rnl_params=rnl_params,\n )\n if \"low_to_high\" in mask_type:\n logging.info(\n f\"Mask is symmetric low to high with random_num_lines={random_num_lines}.\"\n )\n return SymmetricLowToHighMaskFunc(\n [0.125],\n [4],\n which_dataset,\n random_num_lines=random_num_lines,\n rnl_params=rnl_params,\n )\n if \"symmetric_grid\" in mask_type:\n logging.info(\"Mask is symmetric grid.\")\n return SymmetricUniformGridMaskFunc(\n [], [], which_dataset, random_num_lines=True, rnl_params=rnl_params\n )\n if \"grid\" in mask_type:\n logging.info(\"Mask is grid (not symmetric).\")\n return UniformGridMaskFunc(\n [], [], which_dataset, random_num_lines=True, rnl_params=rnl_params\n )\n raise ValueError(f\"Invalid mask type: {mask_type}.\")\n\n\nclass MaskFunc:\n def __init__(\n self,\n center_fractions,\n accelerations,\n which_dataset,\n random_num_lines=False,\n rnl_params=None,\n ):\n if len(center_fractions) != len(accelerations):\n raise ValueError(\n \"Number of center fractions should match number of accelerations\"\n )\n\n self.center_fractions = center_fractions\n self.accelerations = accelerations\n self.random_num_lines = random_num_lines\n\n if rnl_params is None:\n # The lines below give approx. 4x acceleration on average.\n self.min_lowf_lines = 10 if which_dataset != \"KNEE_RAW\" else 30\n self.max_lowf_lines = 12 if which_dataset != \"KNEE_RAW\" else 32\n self.highf_beta_alpha = 1\n self.highf_beta_beta = 5\n else:\n params = [int(x) for x in rnl_params.split(\",\")]\n assert len(params) == 4\n self.min_lowf_lines = params[0]\n self.max_lowf_lines = params[1]\n self.highf_beta_alpha = params[2]\n self.highf_beta_beta = params[3]\n\n self.rng = np.random.RandomState()\n\n def __call__(self, shape, seed=None):\n if len(shape) < 3:\n raise ValueError(\"Shape should have 3 or more dimensions\")\n\n self.rng.seed(seed)\n num_cols = shape[-2]\n\n # Determine number of low and high frequency lines to scan\n if self.random_num_lines:\n # These are guaranteed to be an even number (useful for symmetric masks)\n num_low_freqs = self.rng.choice(\n range(self.min_lowf_lines, self.max_lowf_lines, 2)\n )\n num_high_freqs = (\n int(\n self.rng.beta(self.highf_beta_alpha, self.highf_beta_beta)\n * (num_cols - num_low_freqs)\n // 2\n )\n * 2\n )\n else:\n choice = self.rng.randint(0, len(self.accelerations))\n center_fraction = self.center_fractions[choice]\n acceleration = self.accelerations[choice]\n\n num_low_freqs = int(round(num_cols * center_fraction))\n num_high_freqs = int(num_cols // acceleration - num_low_freqs)\n\n # Create the mask\n mask = self.create_lf_focused_mask(num_cols, num_high_freqs, num_low_freqs)\n\n # Reshape the mask\n mask_shape = [1 for _ in shape]\n mask_shape[-1] = num_cols\n mask = torch.from_numpy(mask.reshape(*mask_shape).astype(np.float32))\n return mask\n\n def create_lf_focused_mask(self, num_cols, num_high_freqs, num_low_freqs):\n p = num_high_freqs / (num_cols - num_low_freqs)\n mask = self.rng.uniform(size=num_cols) < p\n pad = (num_cols - num_low_freqs + 1) // 2\n mask[pad : pad + num_low_freqs] = True\n return mask\n\n\nclass BasicMaskFunc(MaskFunc):\n def create_lf_focused_mask(self, num_cols, num_high_freqs, num_low_freqs):\n mask = np.zeros([num_cols])\n hf_cols = self.rng.choice(\n np.arange(num_cols - num_low_freqs), num_high_freqs, replace=False\n )\n hf_cols[hf_cols >= (num_cols - num_low_freqs + 1) // 2] += num_low_freqs\n mask[hf_cols] = True\n pad = (num_cols - num_low_freqs + 1) // 2\n mask[pad : pad + num_low_freqs] = True\n mask = np.fft.ifftshift(mask, axes=0)\n return mask\n\n\nclass SymmetricUniformChoiceMaskFunc(MaskFunc):\n def create_lf_focused_mask(self, num_cols, num_high_freqs, num_low_freqs):\n mask = np.zeros([num_cols])\n num_cols //= 2\n num_low_freqs //= 2\n num_high_freqs //= 2\n hf_cols = self.rng.choice(\n np.arange(num_cols - num_low_freqs), num_high_freqs, replace=False\n )\n mask[hf_cols] = True\n pad = num_cols - num_low_freqs\n mask[pad:num_cols] = True\n mask[: -(num_cols + 1) : -1] = mask[:num_cols]\n mask = np.fft.ifftshift(mask, axes=0)\n return mask\n\n\nclass UniformGridMaskFunc(MaskFunc):\n def create_lf_focused_mask(self, num_cols, num_high_freqs, num_low_freqs):\n mask = np.zeros([num_cols])\n acceleration = self.rng.choice([4, 8, 16])\n hf_cols = np.arange(acceleration, num_cols, acceleration)\n mask[hf_cols] = True\n mask[: num_low_freqs // 2] = mask[-(num_low_freqs // 2) :] = True\n return mask\n\n\nclass SymmetricLowToHighMaskFunc(MaskFunc):\n def create_lf_focused_mask(self, num_cols, num_high_freqs, num_low_freqs):\n mask = np.zeros([num_cols])\n num_cols //= 2\n num_low_freqs //= 2\n num_high_freqs //= 2\n num_low_freqs += num_high_freqs\n pad = num_cols - num_low_freqs\n mask[pad:num_cols] = True\n mask[: -(num_cols + 1) : -1] = mask[:num_cols]\n mask = np.fft.ifftshift(mask, axes=0)\n return mask\n\n\nclass SymmetricUniformGridMaskFunc(MaskFunc):\n def create_lf_focused_mask(self, num_cols, num_high_freqs, num_low_freqs):\n mask = np.zeros([num_cols])\n acceleration = self.rng.choice([4, 8, 16])\n num_cols //= 2\n num_low_freqs //= 2\n hf_cols = np.arange(acceleration, num_cols, acceleration)\n mask[hf_cols] = True\n mask[:num_low_freqs] = True\n mask[: -(num_cols + 1) : -1] = mask[:num_cols]\n return mask\n"
] | [
[
"numpy.random.RandomState",
"numpy.arange",
"numpy.zeros",
"numpy.fft.ifftshift"
]
] |
izhorvath/MetGAN | [
"aca85fb3306d2515a65c8d525cd78e1147ba7e1b"
] | [
"models/networks.py"
] | [
"import torch\nimport torch.nn as nn\nfrom torch.nn import init\nimport functools\nfrom torch.optim import lr_scheduler\nfrom math import floor, log2\nfrom functools import partial\nfrom linear_attention_transformer import ImageLinearAttention\n\n###\n\nfrom random import random\n\n\nimport numpy as np\nimport torch.nn.functional as F\n\n\n###\n\nfrom models.networks_SPADE.base_network import BaseNetwork\nfrom models.networks_SPADE.architecture import ResnetBlock as ResnetBlock\nfrom models.networks_SPADE.architecture import SPADEResnetBlock as SPADEResnetBlock\n\n\n###############################################################################\n# Helper Functions\n###############################################################################\n\n\nclass Identity(nn.Module):\n def forward(self, x):\n return x\n\n\ndef get_norm_layer(norm_type='instance'):\n \"\"\"Return a normalization layer\n\n Parameters:\n norm_type (str) -- the name of the normalization layer: batch | instance | none\n\n For BatchNorm, we use learnable affine parameters and track running statistics (mean/stddev).\n For InstanceNorm, we do not use learnable affine parameters. We do not track running statistics.\n \"\"\"\n if norm_type == 'batch':\n norm_layer = functools.partial(nn.BatchNorm2d, affine=True, track_running_stats=True)\n elif norm_type == 'instance':\n norm_layer = functools.partial(nn.InstanceNorm2d, affine=False, track_running_stats=False)\n elif norm_type == 'none':\n def norm_layer(x): return Identity()\n else:\n raise NotImplementedError('normalization layer [%s] is not found' % norm_type)\n return norm_layer\n\n\ndef get_scheduler(optimizer, opt):\n \"\"\"Return a learning rate scheduler\n\n Parameters:\n optimizer -- the optimizer of the network\n opt (option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions. \n opt.lr_policy is the name of learning rate policy: linear | step | plateau | cosine\n\n For 'linear', we keep the same learning rate for the first <opt.n_epochs> epochs\n and linearly decay the rate to zero over the next <opt.n_epochs_decay> epochs.\n For other schedulers (step, plateau, and cosine), we use the default PyTorch schedulers.\n See https://pytorch.org/docs/stable/optim.html for more details.\n \"\"\"\n if opt.lr_policy == 'linear':\n def lambda_rule(epoch):\n lr_l = 1.0 - max(0, epoch + opt.epoch_count - opt.n_epochs) / float(opt.n_epochs_decay + 1)\n return lr_l\n scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)\n elif opt.lr_policy == 'step':\n scheduler = lr_scheduler.StepLR(optimizer, step_size=opt.lr_decay_iters, gamma=0.1)\n elif opt.lr_policy == 'plateau':\n scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.2, threshold=0.01, patience=5)\n elif opt.lr_policy == 'cosine':\n scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=opt.n_epochs, eta_min=0)\n else:\n return NotImplementedError('learning rate policy [%s] is not implemented', opt.lr_policy)\n return scheduler\n\ndef define_SPADE(opt,gpu_ids):\n if('spade8' in opt.netG):\n net = SPADE8Generator(input_nc=1, output_nc=1, num_downs = 8, ngf=1, norm_layer='abc', use_dropout=False, opt=opt)\n elif('spade6' in opt.netG):\n net = SPADE6Generator(input_nc=1, output_nc=1, num_downs = 8, ngf=1, norm_layer='abc', use_dropout=False, opt=opt)\n else:\n net = SPADEGenerator(input_nc=1, output_nc=1, num_downs = 8, ngf=1, norm_layer='abc', use_dropout=False, opt=opt)\n if len(gpu_ids) > 0:\n assert(torch.cuda.is_available())\n net.to(gpu_ids[0])\n #net = torch.nn.DataParallel(net, gpu_ids) \n net.init_weights()\n return net\n\ndef init_weights(net, init_type='normal', init_gain=0.02):\n \"\"\"Initialize network weights.\n\n Parameters:\n net (network) -- network to be initialized\n init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal\n init_gain (float) -- scaling factor for normal, xavier and orthogonal.\n\n We use 'normal' in the original pix2pix and CycleGAN paper. But xavier and kaiming might\n work better for some applications. Feel free to try yourself.\n \"\"\"\n def init_func(m): # define the initialization function\n classname = m.__class__.__name__\n if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):\n if init_type == 'normal':\n init.normal_(m.weight.data, 0.0, init_gain)\n elif init_type == 'xavier':\n init.xavier_normal_(m.weight.data, gain=init_gain)\n elif init_type == 'kaiming':\n init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')\n elif init_type == 'orthogonal':\n init.orthogonal_(m.weight.data, gain=init_gain)\n else:\n raise NotImplementedError('initialization method [%s] is not implemented' % init_type)\n if hasattr(m, 'bias') and m.bias is not None:\n init.constant_(m.bias.data, 0.0)\n elif classname.find('BatchNorm2d') != -1: # BatchNorm Layer's weight is not a matrix; only normal distribution applies.\n init.normal_(m.weight.data, 1.0, init_gain)\n init.constant_(m.bias.data, 0.0)\n\n print('initialize network with %s' % init_type)\n net.apply(init_func) # apply the initialization function <init_func>\n\n\ndef init_net(net, init_type='normal', init_gain=0.02, gpu_ids=[]):\n \"\"\"Initialize a network: 1. register CPU/GPU device (with multi-GPU support); 2. initialize the network weights\n Parameters:\n net (network) -- the network to be initialized\n init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal\n gain (float) -- scaling factor for normal, xavier and orthogonal.\n gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2\n\n Return an initialized network.\n \"\"\"\n if len(gpu_ids) > 0:\n assert(torch.cuda.is_available())\n net.to(gpu_ids[0])\n #net = torch.nn.DataParallel(net, gpu_ids) # multi-GPUs\n init_weights(net, init_type, init_gain=init_gain)\n return net\n\n\ndef define_G(input_nc, output_nc, ngf, netG, norm='batch', use_dropout=False, init_type='normal', init_gain=0.02, gpu_ids=[]):\n \"\"\"Create a generator\n\n Parameters:\n input_nc (int) -- the number of channels in input images\n output_nc (int) -- the number of channels in output images\n ngf (int) -- the number of filters in the last conv layer\n netG (str) -- the architecture's name: resnet_9blocks | resnet_6blocks | unet_256 | unet_128\n norm (str) -- the name of normalization layers used in the network: batch | instance | none\n use_dropout (bool) -- if use dropout layers.\n init_type (str) -- the name of our initialization method.\n init_gain (float) -- scaling factor for normal, xavier and orthogonal.\n gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2\n\n Returns a generator\n\n Our current implementation provides two types of generators:\n U-Net: [unet_128] (for 128x128 input images) and [unet_256] (for 256x256 input images)\n The original U-Net paper: https://arxiv.org/abs/1505.04597\n\n Resnet-based generator: [resnet_6blocks] (with 6 Resnet blocks) and [resnet_9blocks] (with 9 Resnet blocks)\n Resnet-based generator consists of several Resnet blocks between a few downsampling/upsampling operations.\n We adapt Torch code from Justin Johnson's neural style transfer project (https://github.com/jcjohnson/fast-neural-style).\n\n\n The generator has been initialized by <init_net>. It uses RELU for non-linearity.\n \"\"\"\n net = None\n norm_layer = get_norm_layer(norm_type=norm)\n\n if netG == 'resnet_9blocks':\n net = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=9)\n elif netG == 'resnet_9blocksup':\n net = ResnetGeneratorUp(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=9)\n elif netG == 'resnet_6blocks':\n net = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=6)\n elif netG == 'unet_128':\n net = UnetGenerator(input_nc, output_nc, 7, ngf, norm_layer=norm_layer, use_dropout=use_dropout)\n elif netG == 'unet_256':\n net = UnetGenerator(input_nc, output_nc, 8, ngf, norm_layer=norm_layer, use_dropout=use_dropout)\n elif netG == 'unet_768':\n net = UNet768(input_nc, output_nc, 8, ngf, norm_layer=norm_layer, use_dropout=use_dropout)\n elif netG == 'unet_768_sigm':\n net = UNet768Sigm(input_nc, output_nc, 8, ngf, norm_layer=norm_layer, use_dropout=use_dropout)\n elif netG == 'unet_spade':\n net = UNet768PIXSPADE(input_nc, output_nc, 8, ngf, norm_layer=norm_layer, use_dropout=use_dropout)\n elif netG == 'unet_spade8sm':\n net = UNet768PIXSPADE8SM(input_nc, output_nc, 8, ngf, norm_layer=norm_layer, use_dropout=use_dropout)\n else:\n raise NotImplementedError('Generator model name [%s] is not recognized' % netG)\n return init_net(net, init_type, init_gain, gpu_ids)\n\n\ndef define_D(input_nc, ndf, netD, n_layers_D=3, norm='batch', init_type='normal', init_gain=0.02, gpu_ids=[]):\n \"\"\"Create a discriminator\n\n Parameters:\n input_nc (int) -- the number of channels in input images\n ndf (int) -- the number of filters in the first conv layer\n netD (str) -- the architecture's name: basic | n_layers | pixel\n n_layers_D (int) -- the number of conv layers in the discriminator; effective when netD=='n_layers'\n norm (str) -- the type of normalization layers used in the network.\n init_type (str) -- the name of the initialization method.\n init_gain (float) -- scaling factor for normal, xavier and orthogonal.\n gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2\n\n Returns a discriminator\n\n Our current implementation provides three types of discriminators:\n [basic]: 'PatchGAN' classifier described in the original pix2pix paper.\n It can classify whether 70×70 overlapping patches are real or fake.\n Such a patch-level discriminator architecture has fewer parameters\n than a full-image discriminator and can work on arbitrarily-sized images\n in a fully convolutional fashion.\n\n [n_layers]: With this mode, you can specify the number of conv layers in the discriminator\n with the parameter <n_layers_D> (default=3 as used in [basic] (PatchGAN).)\n\n [pixel]: 1x1 PixelGAN discriminator can classify whether a pixel is real or not.\n It encourages greater color diversity but has no effect on spatial statistics.\n\n The discriminator has been initialized by <init_net>. It uses Leakly RELU for non-linearity.\n \"\"\"\n net = None\n norm_layer = get_norm_layer(norm_type=norm)\n\n if netD == 'basic': # default PatchGAN classifier\n net = NLayerDiscriminator(input_nc, ndf, n_layers=3, norm_layer=norm_layer)\n elif netD == 'n_layers': # more options\n net = NLayerDiscriminator(input_nc, ndf, n_layers_D, norm_layer=norm_layer)\n elif netD == 'pixel': # classify if each pixel is real or fake\n net = PixelDiscriminator(input_nc, ndf, norm_layer=norm_layer)\n elif netD == 'conditional': #conditional patchGAN\n net = NLayerDiscriminator(input_nc, ndf, n_layers=3, norm_layer=norm_layer)\n elif netD == 'unet':\n net = UnetDiscriminator()\n else:\n raise NotImplementedError('Discriminator model name [%s] is not recognized' % netD)\n return init_net(net, init_type, init_gain, gpu_ids)\n\n\n##############################################################################\n# Classes\n##############################################################################\nclass GANLoss(nn.Module):\n \"\"\"Define different GAN objectives.\n\n The GANLoss class abstracts away the need to create the target label tensor\n that has the same size as the input.\n \"\"\"\n\n def __init__(self, gan_mode, target_real_label=1.0, target_fake_label=0.0):\n \"\"\" Initialize the GANLoss class.\n\n Parameters:\n gan_mode (str) - - the type of GAN objective. It currently supports vanilla, lsgan, and wgangp.\n target_real_label (bool) - - label for a real image\n target_fake_label (bool) - - label of a fake image\n\n Note: Do not use sigmoid as the last layer of Discriminator.\n LSGAN needs no sigmoid. vanilla GANs will handle it with BCEWithLogitsLoss.\n \"\"\"\n super(GANLoss, self).__init__()\n self.register_buffer('real_label', torch.tensor(target_real_label))\n self.register_buffer('fake_label', torch.tensor(target_fake_label))\n self.gan_mode = gan_mode\n if gan_mode == 'lsgan':\n self.loss = nn.MSELoss()\n elif gan_mode == 'vanilla':\n self.loss = nn.BCEWithLogitsLoss()\n elif gan_mode in ['wgangp']:\n self.loss = None\n else:\n raise NotImplementedError('gan mode %s not implemented' % gan_mode)\n\n def get_target_tensor(self, prediction, target_is_real):\n \"\"\"Create label tensors with the same size as the input.\n\n Parameters:\n prediction (tensor) - - tpyically the prediction from a discriminator\n target_is_real (bool) - - if the ground truth label is for real images or fake images\n\n Returns:\n A label tensor filled with ground truth label, and with the size of the input\n \"\"\"\n\n if target_is_real:\n target_tensor = self.real_label\n else:\n target_tensor = self.fake_label\n return target_tensor.expand_as(prediction)\n\n def __call__(self, prediction, target_is_real):\n \"\"\"Calculate loss given Discriminator's output and grount truth labels.\n\n Parameters:\n prediction (tensor) - - tpyically the prediction output from a discriminator\n target_is_real (bool) - - if the ground truth label is for real images or fake images\n\n Returns:\n the calculated loss.\n \"\"\"\n if self.gan_mode in ['lsgan', 'vanilla']:\n target_tensor = self.get_target_tensor(prediction, target_is_real)\n loss = self.loss(prediction, target_tensor)\n elif self.gan_mode == 'wgangp':\n if target_is_real:\n loss = -prediction.mean()\n else:\n loss = prediction.mean()\n return loss\n \nclass UnetGANLoss(nn.Module):\n \"\"\"Define different GAN objectives.\n\n The GANLoss class abstracts away the need to create the target label tensor\n that has the same size as the input.\n \"\"\"\n\n def __init__(self, gan_mode, target_real_label=1.0, target_fake_label=0.0):\n \"\"\" Initialize the GANLoss class.\n\n Parameters:\n gan_mode (str) - - the type of GAN objective. It currently supports vanilla, lsgan, and wgangp.\n target_real_label (bool) - - label for a real image\n target_fake_label (bool) - - label of a fake image\n\n Note: Do not use sigmoid as the last layer of Discriminator.\n LSGAN needs no sigmoid. vanilla GANs will handle it with BCEWithLogitsLoss.\n \"\"\"\n super(UnetGANLoss, self).__init__()\n self.register_buffer('real_label_1', torch.tensor(target_real_label))\n self.register_buffer('real_label_2', torch.tensor(np.ones((1,256,256))))\n self.register_buffer('fake_label_1', torch.tensor(target_fake_label))\n self.register_buffer('fake_label_2', torch.tensor(np.zeros((1,256,256))))\n\n self.loss_1 = nn.BCEWithLogitsLoss()\n self.loss_2 = nn.BCEWithLogitsLoss()\n\n def get_target_tensor(self, prediction_1, prediction_2, target_is_real):\n \"\"\"Create label tensors with the same size as the input.\n\n Parameters:\n prediction (tensor) - - tpyically the prediction from a discriminator\n target_is_real (bool) - - if the ground truth label is for real images or fake images\n\n Returns:\n A label tensor filled with ground truth label, and with the size of the input\n \"\"\"\n\n if target_is_real:\n target_tensor_1 = self.real_label_1\n target_tensor_2 = self.real_label_2\n else:\n target_tensor_1 = self.fake_label_1\n target_tensor_2 = self.fake_label_2\n return target_tensor_1.expand_as(prediction_1), target_tensor_2.expand_as(prediction_2)\n\n def __call__(self, prediction_1, prediction_2, target_is_real):\n \"\"\"Calculate loss given Discriminator's output and grount truth labels.\n\n Parameters:\n prediction (tensor) - - tpyically the prediction output from a discriminator\n target_is_real (bool) - - if the ground truth label is for real images or fake images\n\n Returns:\n the calculated loss.\n \"\"\"\n\n target_tensor_1, target_tensor_2 = self.get_target_tensor(prediction_1, prediction_2, target_is_real)\n loss_1 = self.loss_1(prediction_1, target_tensor_1)\n loss_2 = self.loss_2(prediction_2, target_tensor_2)\n\n \n loss = loss_1.mean()+loss_2.mean()\n return loss\n\n\ndef cal_gradient_penalty(netD, real_data, fake_data, device, type='mixed', constant=1.0, lambda_gp=10.0):\n \"\"\"Calculate the gradient penalty loss, used in WGAN-GP paper https://arxiv.org/abs/1704.00028\n\n Arguments:\n netD (network) -- discriminator network\n real_data (tensor array) -- real images\n fake_data (tensor array) -- generated images from the generator\n device (str) -- GPU / CPU: from torch.device('cuda:{}'.format(self.gpu_ids[0])) if self.gpu_ids else torch.device('cpu')\n type (str) -- if we mix real and fake data or not [real | fake | mixed].\n constant (float) -- the constant used in formula ( | |gradient||_2 - constant)^2\n lambda_gp (float) -- weight for this loss\n\n Returns the gradient penalty loss\n \"\"\"\n if lambda_gp > 0.0:\n if type == 'real': # either use real images, fake images, or a linear interpolation of two.\n interpolatesv = real_data\n elif type == 'fake':\n interpolatesv = fake_data\n elif type == 'mixed':\n alpha = torch.rand(real_data.shape[0], 1, device=device)\n alpha = alpha.expand(real_data.shape[0], real_data.nelement() // real_data.shape[0]).contiguous().view(*real_data.shape)\n interpolatesv = alpha * real_data + ((1 - alpha) * fake_data)\n else:\n raise NotImplementedError('{} not implemented'.format(type))\n interpolatesv.requires_grad_(True)\n disc_interpolates = netD(interpolatesv)\n gradients = torch.autograd.grad(outputs=disc_interpolates, inputs=interpolatesv,\n grad_outputs=torch.ones(disc_interpolates.size()).to(device),\n create_graph=True, retain_graph=True, only_inputs=True)\n gradients = gradients[0].view(real_data.size(0), -1) # flat the data\n gradient_penalty = (((gradients + 1e-16).norm(2, dim=1) - constant) ** 2).mean() * lambda_gp # added eps\n return gradient_penalty, gradients\n else:\n return 0.0, None\n\n\nclass ResnetGenerator(nn.Module):\n \"\"\"Resnet-based generator that consists of Resnet blocks between a few downsampling/upsampling operations.\n\n We adapt Torch code and idea from Justin Johnson's neural style transfer project(https://github.com/jcjohnson/fast-neural-style)\n \"\"\"\n\n def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=6, padding_type='reflect'):\n \"\"\"Construct a Resnet-based generator\n\n Parameters:\n input_nc (int) -- the number of channels in input images\n output_nc (int) -- the number of channels in output images\n ngf (int) -- the number of filters in the last conv layer\n norm_layer -- normalization layer\n use_dropout (bool) -- if use dropout layers\n n_blocks (int) -- the number of ResNet blocks\n padding_type (str) -- the name of padding layer in conv layers: reflect | replicate | zero\n \"\"\"\n assert(n_blocks >= 0)\n super(ResnetGenerator, self).__init__()\n if type(norm_layer) == functools.partial:\n use_bias = norm_layer.func == nn.InstanceNorm2d\n else:\n use_bias = norm_layer == nn.InstanceNorm2d\n\n model = [nn.ReflectionPad2d(3),\n nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0, bias=use_bias),\n norm_layer(ngf),\n nn.ReLU(True)]\n\n n_downsampling = 2\n for i in range(n_downsampling): # add downsampling layers\n mult = 2 ** i\n model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1, bias=use_bias),\n norm_layer(ngf * mult * 2),\n nn.ReLU(True)]\n\n mult = 2 ** n_downsampling\n for i in range(n_blocks): # add ResNet blocks\n\n model += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias)]\n\n for i in range(n_downsampling): # add upsampling layers\n mult = 2 ** (n_downsampling - i)\n model += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2),\n kernel_size=3, stride=2,\n padding=1, output_padding=1,\n bias=use_bias),\n norm_layer(int(ngf * mult / 2)),\n nn.ReLU(True)]\n model += [nn.ReflectionPad2d(3)]\n model += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)]\n model += [nn.Tanh()]\n\n self.model = nn.Sequential(*model)\n\n def forward(self, input):\n \"\"\"Standard forward\"\"\"\n return self.model(input)\n \nclass ResnetGeneratorUp(nn.Module):\n \"\"\"Resnet-based generator that consists of Resnet blocks between a few downsampling/upsampling operations.\n\n We adapt Torch code and idea from Justin Johnson's neural style transfer project(https://github.com/jcjohnson/fast-neural-style)\n \"\"\"\n\n def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=6, padding_type='reflect'):\n \"\"\"Construct a Resnet-based generator\n\n Parameters:\n input_nc (int) -- the number of channels in input images\n output_nc (int) -- the number of channels in output images\n ngf (int) -- the number of filters in the last conv layer\n norm_layer -- normalization layer\n use_dropout (bool) -- if use dropout layers\n n_blocks (int) -- the number of ResNet blocks\n padding_type (str) -- the name of padding layer in conv layers: reflect | replicate | zero\n \"\"\"\n assert(n_blocks >= 0)\n super(ResnetGeneratorUp, self).__init__()\n if type(norm_layer) == functools.partial:\n use_bias = norm_layer.func == nn.InstanceNorm2d\n else:\n use_bias = norm_layer == nn.InstanceNorm2d\n\n model = [nn.ReflectionPad2d(3),\n nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0, bias=use_bias),\n norm_layer(ngf),\n nn.ReLU(True)]\n\n n_downsampling = 2\n for i in range(n_downsampling): # add downsampling layers\n mult = 2 ** i\n model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1, bias=use_bias),\n norm_layer(ngf * mult * 2),\n nn.ReLU(True)]\n\n mult = 2 ** n_downsampling\n for i in range(n_blocks): # add ResNet blocks\n\n model += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias)]\n\n for i in range(n_downsampling): # add upsampling layers\n mult = 2 ** (n_downsampling - i)\n model += [nn.Upsample(scale_factor = 2, mode='nearest'),\n nn.ReflectionPad2d(1),\n nn.Conv2d(ngf * mult, int(ngf * mult / 2), kernel_size=3, stride=1, padding=0),]\n model += [nn.ReflectionPad2d(3)]\n model += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)]\n model += [nn.Tanh()]\n\n self.model = nn.Sequential(*model)\n\n def forward(self, input):\n \"\"\"Standard forward\"\"\"\n return self.model(input)\n\n\nclass ResnetBlock(nn.Module):\n \"\"\"Define a Resnet block\"\"\"\n\n def __init__(self, dim, padding_type, norm_layer, use_dropout, use_bias):\n \"\"\"Initialize the Resnet block\n\n A resnet block is a conv block with skip connections\n We construct a conv block with build_conv_block function,\n and implement skip connections in <forward> function.\n Original Resnet paper: https://arxiv.org/pdf/1512.03385.pdf\n \"\"\"\n super(ResnetBlock, self).__init__()\n self.conv_block = self.build_conv_block(dim, padding_type, norm_layer, use_dropout, use_bias)\n\n def build_conv_block(self, dim, padding_type, norm_layer, use_dropout, use_bias):\n \"\"\"Construct a convolutional block.\n\n Parameters:\n dim (int) -- the number of channels in the conv layer.\n padding_type (str) -- the name of padding layer: reflect | replicate | zero\n norm_layer -- normalization layer\n use_dropout (bool) -- if use dropout layers.\n use_bias (bool) -- if the conv layer uses bias or not\n\n Returns a conv block (with a conv layer, a normalization layer, and a non-linearity layer (ReLU))\n \"\"\"\n conv_block = []\n p = 0\n if padding_type == 'reflect':\n conv_block += [nn.ReflectionPad2d(1)]\n elif padding_type == 'replicate':\n conv_block += [nn.ReplicationPad2d(1)]\n elif padding_type == 'zero':\n p = 1\n else:\n raise NotImplementedError('padding [%s] is not implemented' % padding_type)\n\n conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim), nn.ReLU(True)]\n if use_dropout:\n conv_block += [nn.Dropout(0.5)]\n\n p = 0\n if padding_type == 'reflect':\n conv_block += [nn.ReflectionPad2d(1)]\n elif padding_type == 'replicate':\n conv_block += [nn.ReplicationPad2d(1)]\n elif padding_type == 'zero':\n p = 1\n else:\n raise NotImplementedError('padding [%s] is not implemented' % padding_type)\n conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim)]\n\n return nn.Sequential(*conv_block)\n\n def forward(self, x):\n \"\"\"Forward function (with skip connections)\"\"\"\n out = x + self.conv_block(x) # add skip connections\n return out\n\n\nclass UnetGenerator(nn.Module):\n \"\"\"Create a Unet-based generator\"\"\"\n\n def __init__(self, input_nc, output_nc, num_downs, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False):\n \"\"\"Construct a Unet generator\n Parameters:\n input_nc (int) -- the number of channels in input images\n output_nc (int) -- the number of channels in output images\n num_downs (int) -- the number of downsamplings in UNet. For example, # if |num_downs| == 7,\n image of size 128x128 will become of size 1x1 # at the bottleneck\n ngf (int) -- the number of filters in the last conv layer\n norm_layer -- normalization layer\n\n We construct the U-Net from the innermost layer to the outermost layer.\n It is a recursive process.\n \"\"\"\n super(UnetGenerator, self).__init__()\n # construct unet structure\n unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=None, norm_layer=norm_layer, innermost=True) # add the innermost layer\n for i in range(num_downs - 5): # add intermediate layers with ngf * 8 filters\n unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer, use_dropout=use_dropout)\n # gradually reduce the number of filters from ngf * 8 to ngf\n unet_block = UnetSkipConnectionBlock(ngf * 4, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer)\n unet_block = UnetSkipConnectionBlock(ngf * 2, ngf * 4, input_nc=None, submodule=unet_block, norm_layer=norm_layer)\n unet_block = UnetSkipConnectionBlock(ngf, ngf * 2, input_nc=None, submodule=unet_block, norm_layer=norm_layer)\n self.model = UnetSkipConnectionBlock(output_nc, ngf, input_nc=input_nc, submodule=unet_block, outermost=True, norm_layer=norm_layer) # add the outermost layer\n\n def forward(self, input):\n \"\"\"Standard forward\"\"\"\n return self.model(input)\n\n\nclass UnetSkipConnectionBlock(nn.Module):\n \"\"\"Defines the Unet submodule with skip connection.\n X -------------------identity----------------------\n |-- downsampling -- |submodule| -- upsampling --|\n \"\"\"\n\n def __init__(self, outer_nc, inner_nc, input_nc=None,\n submodule=None, outermost=False, innermost=False, norm_layer=nn.BatchNorm2d, use_dropout=False):\n \"\"\"Construct a Unet submodule with skip connections.\n\n Parameters:\n outer_nc (int) -- the number of filters in the outer conv layer\n inner_nc (int) -- the number of filters in the inner conv layer\n input_nc (int) -- the number of channels in input images/features\n submodule (UnetSkipConnectionBlock) -- previously defined submodules\n outermost (bool) -- if this module is the outermost module\n innermost (bool) -- if this module is the innermost module\n norm_layer -- normalization layer\n use_dropout (bool) -- if use dropout layers.\n \"\"\"\n super(UnetSkipConnectionBlock, self).__init__()\n self.outermost = outermost\n if type(norm_layer) == functools.partial:\n use_bias = norm_layer.func == nn.InstanceNorm2d\n else:\n use_bias = norm_layer == nn.InstanceNorm2d\n if input_nc is None:\n input_nc = outer_nc\n downconv = nn.Conv2d(input_nc, inner_nc, kernel_size=4,\n stride=2, padding=1, bias=use_bias)\n downrelu = nn.LeakyReLU(0.2, True)\n downnorm = norm_layer(inner_nc)\n uprelu = nn.ReLU(True)\n upnorm = norm_layer(outer_nc)\n\n if outermost:\n upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,\n kernel_size=4, stride=2,\n padding=1)\n down = [downconv]\n up = [uprelu, upconv, nn.Tanh()]\n model = down + [submodule] + up\n elif innermost:\n upconv = nn.ConvTranspose2d(inner_nc, outer_nc,\n kernel_size=4, stride=2,\n padding=1, bias=use_bias)\n down = [downrelu, downconv]\n up = [uprelu, upconv, upnorm]\n model = down + up\n else:\n upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,\n kernel_size=4, stride=2,\n padding=1, bias=use_bias)\n down = [downrelu, downconv, downnorm]\n up = [uprelu, upconv, upnorm]\n\n if use_dropout:\n model = down + [submodule] + up + [nn.Dropout(0.5)]\n else:\n model = down + [submodule] + up\n\n self.model = nn.Sequential(*model)\n\n def forward(self, x):\n if self.outermost:\n return self.model(x)\n else: # add skip connections\n return torch.cat([x, self.model(x)], 1)\n \n#%%% Unet from DeepMact\n \n \nclass ConvBnRelu2d(torch.nn.Module):\n def __init__(self, in_channels, out_channels, kernel_size=3, padding=1, output_padding=1, dilation=1, stride=1, groups=1, is_bn=True, is_relu=True, is_decoder=False):\n super(ConvBnRelu2d, self).__init__()\n if is_decoder:\n self.transpConv = torch.nn.ConvTranspose2d(in_channels, out_channels, kernel_size=kernel_size, padding=padding, output_padding=output_padding, stride=stride, dilation=dilation, groups=groups, bias=False)\n self.conv = None\n else:\n self.transpConv = None\n self.conv = torch.nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, padding=padding, stride=stride, dilation=dilation, groups=groups, bias=False)\n self.bn = torch.nn.BatchNorm2d(out_channels, eps=1e-4)\n self.relu = torch.nn.ReLU(inplace=True)\n if is_bn is False: self.bn = None\n if is_relu is False: self.relu = None\n\n def forward(self, x):\n if self.conv is None:\n x = self.transpConv(x)\n elif self.transpConv is None:\n x = self.conv(x)\n \n if self.bn is not None:\n x = self.bn(x)\n if self.relu is not None:\n x = self.relu(x)\n return x\n\n \nclass StackEncoder(torch.nn.Module):\n def __init__(self, x_channels, y_channels, kernel_size=3, stride=1):\n super(StackEncoder, self).__init__()\n padding = (kernel_size - 1) // 2\n self.encode = torch.nn.Sequential(\n ConvBnRelu2d(x_channels, y_channels, kernel_size=kernel_size, padding=padding, dilation=1, stride=stride, groups=1),\n ConvBnRelu2d(y_channels, y_channels, kernel_size=kernel_size, padding=padding, dilation=1, stride=stride, groups=1),\n )\n\n def forward(self, x):\n y = self.encode(x)\n y_small = torch.nn.functional.max_pool2d(y, kernel_size=2, stride=2)\n return y, y_small\n\n\nclass StackDecoder(torch.nn.Module):\n def __init__(self, x_big_channels, x_channels, y_channels, kernel_size=3, stride=1):\n super(StackDecoder, self).__init__()\n padding = (kernel_size - 1) // 2\n\n self.decode = torch.nn.Sequential(\n ConvBnRelu2d(x_big_channels + x_channels, y_channels, kernel_size=kernel_size, padding=padding, dilation=1, stride=stride, groups=1),\n ConvBnRelu2d(y_channels, y_channels, kernel_size=kernel_size, padding=padding, dilation=1, stride=stride, groups=1),\n ConvBnRelu2d(y_channels, y_channels, kernel_size=kernel_size, padding=padding, dilation=1, stride=stride, groups=1),\n )\n\n def forward(self, x_big, x):\n N, C, H, W = x_big.size()\n y = torch.nn.functional.upsample(x, size=(H, W), mode='bilinear', align_corners=True)\n y = torch.cat([y, x_big], 1)\n y = self.decode(y)\n return y\n# 768\nclass UNet768(torch.nn.Module):\n def __init__(self, input_nc, output_nc, num_downs, ngf, norm_layer=nn.BatchNorm2d, use_dropout=False):\n super(UNet768, self).__init__()\n # def __init__(self, input_nc, output_nc, num_downs, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False):\n # C, H, W = in_shape\n # assert(C==3)\n self.output_nc = output_nc\n\n # 1024\n self.down1 = StackEncoder(input_nc, 24, kernel_size=3) # Channels: 1 in, 24 out; Image size: 300 in, 150 out\n self.down2 = StackEncoder(24, 64, kernel_size=3) # Channels: 24 in, 64 out; Image size: 150 in, 75 out\n self.down3 = StackEncoder(64, 128, kernel_size=3) # Channels: 64 in, 128 out; Image size: 75 in, 38 out\n self.down4 = StackEncoder(128, 256, kernel_size=3) # Channels: 128 in, 256 out; Image size: 38 in, 19 out\n self.down5 = StackEncoder(256, 512, kernel_size=3) # Channels: 256 in, 512 out; Image size: 19 in, 10 out\n self.down6 = StackEncoder(512, 768, kernel_size=3) # Channels: 512 in, 768 out; Image size: 10 in, 5 out\n\n self.center = torch.nn.Sequential(\n ConvBnRelu2d(768, 768, kernel_size=3, padding=1, stride=1), # Channels: 768 in, 768 out; Image size: 5 in, 5 out\n )\n\n # x_big_channels, x_channels, y_channels\n self.up6 = StackDecoder(768, 768, 512, kernel_size=3) # Channels: 768+768 = 1536 in, 512 out; Image size: 5 in, 10 out\n self.up5 = StackDecoder(512, 512, 256, kernel_size=3) # Channels: 512+512 = 1024 in, 256 out; Image size: 10 in, 19 out\n self.up4 = StackDecoder(256, 256, 128, kernel_size=3) # Channels: 256+256 = 512 in, 128 out; Image size: 19 in, 38 out\n self.up3 = StackDecoder(128, 128, 64, kernel_size=3) # Channels: 128+128 = 256 in, 64 out; Image size: 38 in, 75 out\n self.up2 = StackDecoder(64, 64, 24, kernel_size=3) # Channels: 64+64 = 128 in, 24 out; Image size: 75 in, 150 out\n self.up1 = StackDecoder(24, 24, 24, kernel_size=3) # Channels: 24+24 = 48 in, 24 out; Image size: 150 in, 300 out\n self.classify = torch.nn.Conv2d(24, output_nc, kernel_size=1, padding=0, stride=1, bias=True) # Channels: 24 in, 1 out; Image size: 300 in, 300 out\n self.final_out = torch.nn.Tanh()\n\n def _crop_concat(self, upsampled, bypass):\n \"\"\"\n Crop y to the (h, w) of x and concat them.\n Used for the expansive path.\n Returns:\n The concatenated tensor\n \"\"\"\n c = (bypass.size()[2] - upsampled.size()[2]) // 2\n bypass = torch.nn.functional.pad(bypass, (-c, -c, -c, -c))\n\n return torch.cat((upsampled, bypass), 1)\n\n def forward(self, x):\n out = x # ;print('x ',x.size())\n #\n down1, out = self.down1(out) ##;\n #print('down1',down1.shape) #256\n down2, out = self.down2(out) # ;\n #print('down2',down2.shape) #128\n down3, out = self.down3(out) # ;\n #print('down3',down3.shape) #64\n down4, out = self.down4(out) # ;\n #print('down4',down4.shape) #32\n down5, out = self.down5(out) # ;\n #print('down5',down5.shape) #16\n down6, out = self.down6(out) # ;\n #print('down6',down6.shape) #8\n pass # ;\n #print('out ',out.shape)\n\n out = self.center(out)\n #print('0',out.shape)\n out = self.up6(down6, out)\n #print('1',out.shape)\n out = self.up5(down5, out)\n #print('2',out.shape)\n out = self.up4(down4, out)\n #print('3',out.shape)\n out = self.up3(down3, out)\n #print('4',out.shape)\n out = self.up2(down2, out)\n #print('5',out.shape)\n out = self.up1(down1, out)\n # 1024\n #print('6',out.shape)\n out = self.final_out(self.classify(out))\n out = torch.reshape(out,(-1, self.output_nc, x.shape[2],x.shape[3]))#, dim=1)\n return out\n \n#%%Unet_spade_768_300\n \n \n \n#%%sigm\n\n\nclass UNet768Sigm(torch.nn.Module):\n def __init__(self, input_nc, output_nc, num_downs, ngf, norm_layer=nn.BatchNorm2d, use_dropout=False):\n super(UNet768Sigm, self).__init__()\n # def __init__(self, input_nc, output_nc, num_downs, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False):\n # C, H, W = in_shape\n # assert(C==3)\n self.output_nc = output_nc\n\n # 1024\n self.down1 = StackEncoder(input_nc, 24, kernel_size=3) # Channels: 1 in, 24 out; Image size: 300 in, 150 out\n self.down2 = StackEncoder(24, 64, kernel_size=3) # Channels: 24 in, 64 out; Image size: 150 in, 75 out\n self.down3 = StackEncoder(64, 128, kernel_size=3) # Channels: 64 in, 128 out; Image size: 75 in, 38 out\n self.down4 = StackEncoder(128, 256, kernel_size=3) # Channels: 128 in, 256 out; Image size: 38 in, 19 out\n self.down5 = StackEncoder(256, 512, kernel_size=3) # Channels: 256 in, 512 out; Image size: 19 in, 10 out\n self.down6 = StackEncoder(512, 768, kernel_size=3) # Channels: 512 in, 768 out; Image size: 10 in, 5 out\n\n self.center = torch.nn.Sequential(\n ConvBnRelu2d(768, 768, kernel_size=3, padding=1, stride=1), # Channels: 768 in, 768 out; Image size: 5 in, 5 out\n )\n\n # x_big_channels, x_channels, y_channels\n self.up6 = StackDecoder(768, 768, 512, kernel_size=3) # Channels: 768+768 = 1536 in, 512 out; Image size: 5 in, 10 out\n self.up5 = StackDecoder(512, 512, 256, kernel_size=3) # Channels: 512+512 = 1024 in, 256 out; Image size: 10 in, 19 out\n self.up4 = StackDecoder(256, 256, 128, kernel_size=3) # Channels: 256+256 = 512 in, 128 out; Image size: 19 in, 38 out\n self.up3 = StackDecoder(128, 128, 64, kernel_size=3) # Channels: 128+128 = 256 in, 64 out; Image size: 38 in, 75 out\n self.up2 = StackDecoder(64, 64, 24, kernel_size=3) # Channels: 64+64 = 128 in, 24 out; Image size: 75 in, 150 out\n self.up1 = StackDecoder(24, 24, 24, kernel_size=3) # Channels: 24+24 = 48 in, 24 out; Image size: 150 in, 300 out\n self.classify = torch.nn.Conv2d(24, output_nc, kernel_size=1, padding=0, stride=1, bias=True) # Channels: 24 in, 1 out; Image size: 300 in, 300 out\n self.final_out = torch.nn.Sigmoid()\n\n def _crop_concat(self, upsampled, bypass):\n \"\"\"\n Crop y to the (h, w) of x and concat them.\n Used for the expansive path.\n Returns:\n The concatenated tensor\n \"\"\"\n c = (bypass.size()[2] - upsampled.size()[2]) // 2\n bypass = torch.nn.functional.pad(bypass, (-c, -c, -c, -c))\n\n return torch.cat((upsampled, bypass), 1)\n\n def forward(self, x):\n out = x # ;print('x ',x.size())\n #\n down1, out = self.down1(out) ##;print('down1',down1.size()) #256\n down2, out = self.down2(out) # ;print('down2',down2.size()) #128\n down3, out = self.down3(out) # ;print('down3',down3.size()) #64\n down4, out = self.down4(out) # ;print('down4',down4.size()) #32\n down5, out = self.down5(out) # ;print('down5',down5.size()) #16\n down6, out = self.down6(out) # ;print('down6',down6.size()) #8\n pass # ;print('out ',out.size())\n\n out = self.center(out)\n out = self.up6(down6, out)\n out = self.up5(down5, out)\n out = self.up4(down4, out)\n out = self.up3(down3, out)\n out = self.up2(down2, out)\n out = self.up1(down1, out)\n # 1024\n\n out = self.final_out(self.classify(out))\n out = torch.reshape(out,(1, self.output_nc, 256,256))#, dim=1)\n return out\n\n\n\n\n\nclass NLayerDiscriminator(nn.Module):\n \"\"\"Defines a PatchGAN discriminator\"\"\"\n\n def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d):\n \"\"\"Construct a PatchGAN discriminator\n\n Parameters:\n input_nc (int) -- the number of channels in input images\n ndf (int) -- the number of filters in the last conv layer\n n_layers (int) -- the number of conv layers in the discriminator\n norm_layer -- normalization layer\n \"\"\"\n super(NLayerDiscriminator, self).__init__()\n if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters\n use_bias = norm_layer.func == nn.InstanceNorm2d\n else:\n use_bias = norm_layer == nn.InstanceNorm2d\n\n kw = 4\n padw = 1\n sequence = [nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True)]\n nf_mult = 1\n nf_mult_prev = 1\n for n in range(1, n_layers): # gradually increase the number of filters\n nf_mult_prev = nf_mult\n nf_mult = min(2 ** n, 8)\n sequence += [\n nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias),\n norm_layer(ndf * nf_mult),\n nn.LeakyReLU(0.2, True)\n ]\n\n nf_mult_prev = nf_mult\n nf_mult = min(2 ** n_layers, 8)\n sequence += [\n nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1, padding=padw, bias=use_bias),\n norm_layer(ndf * nf_mult),\n nn.LeakyReLU(0.2, True)\n ]\n\n sequence += [nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)] # output 1 channel prediction map\n self.model = nn.Sequential(*sequence)\n\n def forward(self, input):\n \"\"\"Standard forward.\"\"\"\n return self.model(input)\n\n\nclass PixelDiscriminator(nn.Module):\n \"\"\"Defines a 1x1 PatchGAN discriminator (pixelGAN)\"\"\"\n\n def __init__(self, input_nc, ndf=64, norm_layer=nn.BatchNorm2d):\n \"\"\"Construct a 1x1 PatchGAN discriminator\n\n Parameters:\n input_nc (int) -- the number of channels in input images\n ndf (int) -- the number of filters in the last conv layer\n norm_layer -- normalization layer\n \"\"\"\n super(PixelDiscriminator, self).__init__()\n if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters\n use_bias = norm_layer.func == nn.InstanceNorm2d\n else:\n use_bias = norm_layer == nn.InstanceNorm2d\n\n self.net = [\n nn.Conv2d(input_nc, ndf, kernel_size=1, stride=1, padding=0),\n nn.LeakyReLU(0.2, True),\n nn.Conv2d(ndf, ndf * 2, kernel_size=1, stride=1, padding=0, bias=use_bias),\n norm_layer(ndf * 2),\n nn.LeakyReLU(0.2, True),\n nn.Conv2d(ndf * 2, 1, kernel_size=1, stride=1, padding=0, bias=use_bias)]\n\n self.net = nn.Sequential(*self.net)\n\n def forward(self, input):\n \"\"\"Standard forward.\"\"\"\n return self.net(input)\n\n\n#%% Unet as Disdef random_hflip(tensor, prob):\n \n\ndef DiffAugment(x, types=[]):\n for p in types:\n for f in AUGMENT_FNS[p]:\n x = f(x)\n return x.contiguous(memory_format = torch.contiguous_format)\n\ndef rand_brightness(x):\n x = x + (torch.rand(x.size(0), 1, 1, 1, dtype=x.dtype, device=x.device) - 0.5)\n return x\n\ndef rand_saturation(x):\n x_mean = x.mean(dim=1, keepdim=True)\n x = (x - x_mean) * (torch.rand(x.size(0), 1, 1, 1, dtype=x.dtype, device=x.device) * 2) + x_mean\n return x\n\ndef rand_contrast(x):\n x_mean = x.mean(dim=[1, 2, 3], keepdim=True)\n x = (x - x_mean) * (torch.rand(x.size(0), 1, 1, 1, dtype=x.dtype, device=x.device) + 0.5) + x_mean\n return x\n\ndef rand_translation(x, ratio=0.125):\n shift_x, shift_y = int(x.size(2) * ratio + 0.5), int(x.size(3) * ratio + 0.5)\n translation_x = torch.randint(-shift_x, shift_x + 1, size=[x.size(0), 1, 1], device=x.device)\n translation_y = torch.randint(-shift_y, shift_y + 1, size=[x.size(0), 1, 1], device=x.device)\n grid_batch, grid_x, grid_y = torch.meshgrid(\n torch.arange(x.size(0), dtype=torch.long, device=x.device),\n torch.arange(x.size(2), dtype=torch.long, device=x.device),\n torch.arange(x.size(3), dtype=torch.long, device=x.device),\n )\n grid_x = torch.clamp(grid_x + translation_x + 1, 0, x.size(2) + 1)\n grid_y = torch.clamp(grid_y + translation_y + 1, 0, x.size(3) + 1)\n x_pad = F.pad(x, [1, 1, 1, 1, 0, 0, 0, 0])\n x = x_pad.permute(0, 2, 3, 1).contiguous()[grid_batch, grid_x, grid_y].permute(0, 3, 1, 2).contiguous(memory_format = torch.contiguous_format)\n return x\n\ndef rand_cutout(x, ratio=0.5):\n cutout_size = int(x.size(2) * ratio + 0.5), int(x.size(3) * ratio + 0.5)\n offset_x = torch.randint(0, x.size(2) + (1 - cutout_size[0] % 2), size=[x.size(0), 1, 1], device=x.device)\n offset_y = torch.randint(0, x.size(3) + (1 - cutout_size[1] % 2), size=[x.size(0), 1, 1], device=x.device)\n grid_batch, grid_x, grid_y = torch.meshgrid(\n torch.arange(x.size(0), dtype=torch.long, device=x.device),\n torch.arange(cutout_size[0], dtype=torch.long, device=x.device),\n torch.arange(cutout_size[1], dtype=torch.long, device=x.device),\n )\n grid_x = torch.clamp(grid_x + offset_x - cutout_size[0] // 2, min=0, max=x.size(2) - 1)\n grid_y = torch.clamp(grid_y + offset_y - cutout_size[1] // 2, min=0, max=x.size(3) - 1)\n mask = torch.ones(x.size(0), x.size(2), x.size(3), dtype=x.dtype, device=x.device)\n mask[grid_batch, grid_x, grid_y] = 0\n x = x * mask.unsqueeze(1)\n return x\n\nAUGMENT_FNS = {\n 'color': [rand_brightness, rand_saturation, rand_contrast],\n 'translation': [rand_translation],\n 'cutout': [rand_cutout],\n}\n \ndef random_float(lo, hi):\n return lo + (hi - lo) * random()\n\ndef random_crop_and_resize(tensor, scale):\n b, c, h, _ = tensor.shape\n new_width = int(h * scale)\n delta = h - new_width\n h_delta = int(random() * delta)\n w_delta = int(random() * delta)\n cropped = tensor[:, :, h_delta:(h_delta + new_width), w_delta:(w_delta + new_width)].clone()\n return F.interpolate(cropped, size=(h, h), mode='bilinear')\n\ndef random_hflip(tensor, prob):\n if prob > random():\n return tensor\n return torch.flip(tensor, dims=(3,))\n\nclass AugWrapper(nn.Module):\n def __init__(self, D, image_size, types):\n super().__init__()\n self.D = D\n self.types = types\n\n def forward(self, images, prob = 0., detach = False):\n if random() < prob:\n images = random_hflip(images, prob=0.5)\n images = DiffAugment(images, types=self.types)\n\n if detach:\n images.detach_()\n\n return self.D(images), images\n \n \ndef leaky_relu(p=0.2):\n return nn.LeakyReLU(p)\n\n\nclass Residual(nn.Module):\n def __init__(self, fn):\n super().__init__()\n self.fn = fn\n def forward(self, x):\n return self.fn(x) + x\n\nclass Flatten(nn.Module):\n def __init__(self, index):\n super().__init__()\n self.index = index\n def forward(self, x):\n return x.flatten(self.index)\n\nclass Rezero(nn.Module):\n def __init__(self, fn):\n super().__init__()\n self.fn = fn\n self.g = nn.Parameter(torch.zeros(1))\n def forward(self, x):\n return self.fn(x) * self.g \n \ndef double_conv(chan_in, chan_out):\n return nn.Sequential(\n nn.Conv2d(chan_in, chan_out, 3, padding=1),\n leaky_relu(),\n nn.Conv2d(chan_out, chan_out, 3, padding=1),\n leaky_relu()\n )\n \nclass DownBlock(nn.Module):\n def __init__(self, input_channels, filters, downsample=True):\n super().__init__()\n self.conv_res = nn.Conv2d(input_channels, filters, 1, stride = (2 if downsample else 1))\n\n self.net = double_conv(input_channels, filters)\n self.down = nn.Conv2d(filters, filters, 3, padding = 1, stride = 2) if downsample else None\n\n def forward(self, x):\n res = self.conv_res(x)\n x = self.net(x)\n unet_res = x\n\n if self.down is not None:\n x = self.down(x)\n\n x = x + res\n return x, unet_res\n \n\n# one layer of self-attention and feedforward, for images\n\nattn_and_ff = lambda chan: nn.Sequential(*[\n Residual(Rezero(ImageLinearAttention(chan, norm_queries = True))),\n Residual(Rezero(nn.Sequential(nn.Conv2d(chan, chan * 2, 1), leaky_relu(), nn.Conv2d(chan * 2, chan, 1))))\n])\n \nclass UpBlock(nn.Module):\n def __init__(self, input_channels, filters):\n super().__init__()\n self.conv_res = nn.ConvTranspose2d(input_channels // 2, filters, 1, stride = 2)\n self.net = double_conv(input_channels, filters)\n self.up = nn.Upsample(scale_factor = 2, mode='bilinear', align_corners=False)\n self.input_channels = input_channels\n self.filters = filters\n\n def forward(self, x, res):\n *_, h, w = x.shape\n conv_res = self.conv_res(x, output_size = (h * 2, w * 2))\n x = self.up(x)\n x = torch.cat((x, res), dim=1)\n x = self.net(x)\n x = x + conv_res\n return x\n \nclass UnetDiscriminator(nn.Module):\n def __init__(self, image_size=256, network_capacity = 16, transparent = False, fmap_max = 256):\n super().__init__()\n num_layers = int(log2(image_size) - 3)\n num_init_filters = 2# if not transparent else 4\n\n blocks = []\n filters = [num_init_filters] + [(network_capacity) * (2 ** i) for i in range(num_layers + 1)]\n\n set_fmap_max = partial(min, fmap_max)\n filters = list(map(set_fmap_max, filters))\n filters[-1] = filters[-2]\n\n chan_in_out = list(zip(filters[:-1], filters[1:]))\n chan_in_out = list(map(list, chan_in_out))\n\n print('Channels',chan_in_out)\n down_blocks = []\n attn_blocks = []\n\n for ind, (in_chan, out_chan) in enumerate(chan_in_out):\n num_layer = ind + 1\n is_not_last = ind != (len(chan_in_out) - 1)\n\n block = DownBlock(in_chan, out_chan, downsample = is_not_last)\n down_blocks.append(block)\n\n attn_fn = attn_and_ff(out_chan)\n attn_blocks.append(attn_fn)\n\n self.down_blocks = nn.ModuleList(down_blocks)\n self.attn_blocks = nn.ModuleList(attn_blocks)\n\n last_chan = filters[-1]\n\n self.to_logit = nn.Sequential(\n leaky_relu(),\n nn.AvgPool2d(image_size // (2 ** num_layers)),\n Flatten(1),\n nn.Linear(last_chan, 1)\n )\n\n self.conv = double_conv(last_chan, last_chan)\n\n dec_chan_in_out = chan_in_out[:-1][::-1]\n self.up_blocks = nn.ModuleList(list(map(lambda c: UpBlock(c[1] * 2, c[0]), dec_chan_in_out)))\n self.conv_out = nn.Conv2d(2, 1, 1)\n\n def forward(self, x):\n \n #print('Input shape:', x.shape)\n b, *_ = x.shape\n\n residuals = []\n i=0\n for (down_block, attn_block) in zip(self.down_blocks, self.attn_blocks):\n #print('Step', i, x.shape)\n i=i+1\n x, unet_res = down_block(x)\n residuals.append(unet_res)\n\n if attn_block is not None:\n x = attn_block(x)\n\n x = self.conv(x) + x\n enc_out = self.to_logit(x)\n\n for (up_block, res) in zip(self.up_blocks, residuals[:-1][::-1]):\n #print('in up blocK', x.shape)\n x = up_block(x, res)\n\n dec_out = self.conv_out(x)\n return enc_out.squeeze(), dec_out\n\n\n#%% SPADE RESNET\n \n \nclass SPADEGenerator(BaseNetwork):\n\n def __init__(self, input_nc, output_nc, num_downs, ngf, norm_layer=nn.BatchNorm2d, use_dropout=False,opt=None):\n super(SPADEGenerator, self).__init__()\n self.opt = opt\n self.opt.num_upsampling_layers = 'normal'\n self.opt.norm_G = 'spectralspadesyncbatch3x3'\n self.opt.ngf = 64\n self.opt.semantic_nc = 2\n self.opt.use_vae = False\n self.opt.crop_size = 256\n self.opt.normG = 'spectralinstance'\n self.opt.aspect_ratio = 1.0\n nf = self.opt.ngf\n opt = self.opt\n\n self.sw, self.sh = self.compute_latent_vector_size(opt)\n\n\n self.fc = nn.Conv2d(self.opt.semantic_nc, 16 * nf, 3, padding=1)\n\n self.head_0 = SPADEResnetBlock(16 * nf, 16 * nf, opt)\n\n self.G_middle_0 = SPADEResnetBlock(16 * nf, 16 * nf, opt)\n self.G_middle_1 = SPADEResnetBlock(16 * nf, 16 * nf, opt)\n\n self.up_0 = SPADEResnetBlock(16 * nf, 8 * nf, opt)\n self.up_1 = SPADEResnetBlock(8 * nf, 4 * nf, opt)\n self.up_2 = SPADEResnetBlock(4 * nf, 2 * nf, opt)\n self.up_3 = SPADEResnetBlock(2 * nf, 1 * nf, opt)\n\n final_nc = nf\n\n if opt.num_upsampling_layers == 'most':\n self.up_4 = SPADEResnetBlock(1 * nf, nf // 2, opt)\n final_nc = nf // 2\n\n self.conv_img = nn.Conv2d(final_nc, 3, 3, padding=1)\n\n self.up = nn.Upsample(scale_factor=2)\n\n def compute_latent_vector_size(self, opt):\n if opt.num_upsampling_layers == 'normal':\n num_up_layers = 5\n elif opt.num_upsampling_layers == 'more':\n num_up_layers = 6\n elif opt.num_upsampling_layers == 'most':\n num_up_layers = 7\n else:\n raise ValueError('opt.num_upsampling_layers [%s] not recognized' %\n opt.num_upsampling_layers)\n\n sw = self.opt.crop_size // (2**num_up_layers)\n sh = round(sw / opt.aspect_ratio)\n\n return sw, sh\n\n def forward(self, input, z=None):\n seg = input\n\n if self.opt.use_vae:\n # we sample z from unit normal and reshape the tensor\n if z is None:\n z = torch.randn(input.size(0), self.opt.z_dim,\n dtype=torch.float32, device=input.get_device())\n x = self.fc(z)\n x = x.view(-1, 16 * self.opt.ngf, self.sh, self.sw)\n else:\n # we downsample segmap and run convolution\n x = F.interpolate(seg, size=(self.sh, self.sw))\n x = self.fc(x)\n\n #print('0,', x.shape)\n x = self.head_0(x, seg)\n #print('1,', x.shape)\n x = self.up(x)\n #print('2', x.shape)\n x = self.G_middle_0(x, seg)\n #print('3,', x.shape)\n if self.opt.num_upsampling_layers == 'more' or \\\n self.opt.num_upsampling_layers == 'most':\n x = self.up(x)\n #print('4,', x.shape)\n #x = self.G_middle_1(x, seg)\n output_5 = x\n #print('5,', x.shape)\n x = self.up(x)\n output_6 = x\n #print('6,', x.shape)\n x = self.up_0(x, seg)\n #print('7,', x.shape)\n x = self.up(x)\n #print('8,', x.shape)\n x = self.up_1(x, seg)\n output_9 = x\n #print('9,', x.shape)\n x = self.up(x)\n #print('10,', x.shape)\n x = self.up_2(x, seg)\n #print('11,', x.shape)\n output_11 = x\n x = self.up(x)\n # print('12,', x.shape)\n x = self.up_3(x, seg)\n #print('13,', x.shape)\n\n if self.opt.num_upsampling_layers == 'most':\n x = self.up(x)\n x = self.up_4(x, seg)\n #print('14,', x.shape)\n x = self.conv_img(F.leaky_relu(x, 2e-1))\n # print('15,', x.shape)\n output_15 = x\n #x = F.tanh(x)\n #print('16,', x.shape)\n\n return output_5,output_6,output_9,output_11,output_15\n \n#%% spade8\n \nclass SPADE8Generator(BaseNetwork):\n\n def __init__(self, input_nc, output_nc, num_downs, ngf, norm_layer=nn.BatchNorm2d, use_dropout=False,opt=None):\n super(SPADE8Generator, self).__init__()\n self.opt = opt\n self.opt.num_upsampling_layers = 'normal'\n self.opt.norm_G = 'spectralspadesyncbatch3x3'\n self.opt.ngf = 8\n self.opt.semantic_nc = 2\n self.opt.use_vae = False\n self.opt.crop_size = 256\n self.opt.normG = 'spectralinstance'\n self.opt.aspect_ratio = 1.0\n nf = self.opt.ngf\n opt = self.opt\n\n self.sw, self.sh = self.compute_latent_vector_size(opt)\n\n\n self.fc = nn.Conv2d(self.opt.semantic_nc, 16 * nf, 3, padding=1)\n\n self.head_0 = SPADEResnetBlock(16 * nf, 16 * nf, opt)\n\n self.G_middle_0 = SPADEResnetBlock(16 * nf, 16 * nf, opt)\n self.G_middle_1 = SPADEResnetBlock(16 * nf, 16 * nf, opt)\n\n self.up_0 = SPADEResnetBlock(16 * nf, 8 * nf, opt)\n self.up_1 = SPADEResnetBlock(8 * nf, 4 * nf, opt)\n self.up_2 = SPADEResnetBlock(4 * nf, 2 * nf, opt)\n self.up_3 = SPADEResnetBlock(2 * nf, 1 * nf, opt)\n\n final_nc = nf\n\n if opt.num_upsampling_layers == 'most':\n self.up_4 = SPADEResnetBlock(1 * nf, nf // 2, opt)\n final_nc = nf // 2\n\n self.conv_img = nn.Conv2d(final_nc, 3, 3, padding=1)\n\n self.up = nn.Upsample(scale_factor=2)\n\n def compute_latent_vector_size(self, opt):\n if opt.num_upsampling_layers == 'normal':\n num_up_layers = 5\n elif opt.num_upsampling_layers == 'more':\n num_up_layers = 6\n elif opt.num_upsampling_layers == 'most':\n num_up_layers = 7\n else:\n raise ValueError('opt.num_upsampling_layers [%s] not recognized' %\n opt.num_upsampling_layers)\n\n sw = self.opt.crop_size // (2**num_up_layers)\n sh = round(sw / opt.aspect_ratio)\n\n return sw, sh\n\n def forward(self, input, z=None):\n seg = input\n\n if self.opt.use_vae:\n # we sample z from unit normal and reshape the tensor\n if z is None:\n z = torch.randn(input.size(0), self.opt.z_dim,\n dtype=torch.float32, device=input.get_device())\n x = self.fc(z)\n x = x.view(-1, 16 * self.opt.ngf, self.sh, self.sw)\n else:\n # we downsample segmap and run convolution\n x = F.interpolate(seg, size=(self.sh, self.sw))\n x = self.fc(x)\n\n #print('0,', x.shape)\n x = self.head_0(x, seg)\n #print('1,', x.shape)\n x = self.up(x)\n #print('2', x.shape)\n x = self.G_middle_0(x, seg)\n #print('3,', x.shape)\n if self.opt.num_upsampling_layers == 'more' or \\\n self.opt.num_upsampling_layers == 'most':\n x = self.up(x)\n #print('4,', x.shape)\n x = self.G_middle_1(x, seg)\n output_5 = x\n #print('5,', x.shape)\n x = self.up(x)\n output_6 = x\n #print('6,', x.shape)\n x = self.up_0(x, seg)\n #print('7,', x.shape)\n x = self.up(x)\n #print('8,', x.shape)\n x = self.up_1(x, seg)\n output_9 = x\n #print('9,', x.shape)\n x = self.up(x)\n #print('10,', x.shape)\n x = self.up_2(x, seg)\n #print('11,', x.shape)\n output_11 = x\n '''this can be removed'''\n x = self.up(x)\n #print('12,', x.shape)\n x = self.up_3(x, seg)\n #print('13,', x.shape)\n\n if self.opt.num_upsampling_layers == 'most':\n x = self.up(x)\n x = self.up_4(x, seg)\n #print('14,', x.shape)\n x = self.conv_img(F.leaky_relu(x, 2e-1))\n #print('15,', x.shape)\n output_15 = x\n #x = F.tanh(x)\n #print('16,', x.shape)\n '''til here'''\n return output_5,output_6,output_9,output_11,output_15\n \n#%%\nclass SPADE6Generator(BaseNetwork):\n\n def __init__(self, input_nc, output_nc, num_downs, ngf, norm_layer=nn.BatchNorm2d, use_dropout=False,opt=None):\n super(SPADE6Generator, self).__init__()\n self.opt = opt\n self.opt.num_upsampling_layers = 'normal'\n self.opt.norm_G = 'spectralspadesyncbatch3x3'\n self.opt.ngf = 6\n self.opt.semantic_nc = 2\n self.opt.use_vae = False\n self.opt.crop_size = 300\n self.opt.normG = 'spectralinstance'\n self.opt.aspect_ratio = 1.0\n nf = self.opt.ngf\n opt = self.opt\n\n self.sw, self.sh = self.compute_latent_vector_size(opt)\n\n\n self.fc = nn.Conv2d(self.opt.semantic_nc, 16 * nf, 3, padding=1)\n\n self.head_0 = SPADEResnetBlock(16 * nf, 16 * nf, opt)\n\n self.G_middle_0 = SPADEResnetBlock(16 * nf, 16 * nf, opt)\n self.G_middle_1 = SPADEResnetBlock(16 * nf, 16 * nf, opt)\n\n self.up_0 = SPADEResnetBlock(16 * nf, 8 * nf, opt)\n self.up_1 = SPADEResnetBlock(8 * nf, 4 * nf, opt)\n self.up_2 = SPADEResnetBlock(4 * nf, 2 * nf, opt)\n self.up_3 = SPADEResnetBlock(2 * nf, 1 * nf, opt)\n\n final_nc = nf\n\n if opt.num_upsampling_layers == 'most':\n self.up_4 = SPADEResnetBlock(1 * nf, nf // 2, opt)\n final_nc = nf // 2\n\n self.conv_img = nn.Conv2d(final_nc, 3, 3, padding=1)\n\n self.up = nn.Upsample(scale_factor=2)\n\n def compute_latent_vector_size(self, opt):\n if opt.num_upsampling_layers == 'normal':\n num_up_layers = 5\n elif opt.num_upsampling_layers == 'more':\n num_up_layers = 6\n elif opt.num_upsampling_layers == 'most':\n num_up_layers = 7\n else:\n raise ValueError('opt.num_upsampling_layers [%s] not recognized' %\n opt.num_upsampling_layers)\n\n sw = 10#self.opt.crop_size // (2**num_up_layers)\n sh = round(sw / opt.aspect_ratio)\n\n return sw, sh\n\n def forward(self, input, z=None):\n seg = input\n\n if self.opt.use_vae:\n # we sample z from unit normal and reshape the tensor\n if z is None:\n z = torch.randn(input.size(0), self.opt.z_dim,\n dtype=torch.float32, device=input.get_device())\n x = self.fc(z)\n x = x.view(-1, 16 * self.opt.ngf, self.sh, self.sw)\n else:\n # we downsample segmap and run convolution\n x = F.interpolate(seg, size=(self.sh, self.sw))\n x = self.fc(x)\n\n print('0,', x.shape)\n x = self.head_0(x, seg)\n print('1,', x.shape)\n x = self.up(x)\n print('2', x.shape)\n x = self.G_middle_0(x, seg)\n print('3,', x.shape)\n if self.opt.num_upsampling_layers == 'more' or \\\n self.opt.num_upsampling_layers == 'most':\n x = self.up(x)\n print('4,', x.shape)\n x = self.G_middle_1(x, seg)\n output_5 = x\n print('5,', x.shape)\n x = self.up(x)\n output_6 = x\n print('6,', x.shape)\n x = self.up_0(x, seg)\n print('7,', x.shape)\n x = self.up(x)\n print('8,', x.shape)\n x = self.up_1(x, seg)\n output_9 = x\n print('9,', x.shape)\n x = self.up(x)\n print('10,', x.shape)\n x = self.up_2(x, seg)\n print('11,', x.shape)\n output_11 = x\n x = self.up(x)\n print('12,', x.shape)\n x = self.up_3(x, seg)\n print('13,', x.shape)\n\n if self.opt.num_upsampling_layers == 'most':\n x = self.up(x)\n x = self.up_4(x, seg)\n print('14,', x.shape)\n x = self.conv_img(F.leaky_relu(x, 2e-1))\n print('15,', x.shape)\n output_15 = x\n #x = F.tanh(x)\n print('16,', x.shape)\n\n return output_5,output_6,output_9,output_11,output_15\n\n#%% For the PIX2SPADE\n \nclass UNet768PIXSPADE(torch.nn.Module):\n def __init__(self, input_nc, output_nc, num_downs, ngf, norm_layer=nn.BatchNorm2d, use_dropout=False):\n super(UNet768PIXSPADE, self).__init__()\n # def __init__(self, input_nc, output_nc, num_downs, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False):\n # C, H, W = in_shape\n # assert(C==3)\n print('UNET 768 SPADE')\n self.output_nc = output_nc\n\n # 1024\n self.down1 = StackEncoder(1, 24, kernel_size=3) # Channels: 1 in, 24 out; Image size: 300 in, 150 out\n self.down2 = StackEncoder(24, 64, kernel_size=3) # Channels: 24 in, 64 out; Image size: 150 in, 75 out\n self.down3 = StackEncoder(64, 128, kernel_size=3) # Channels: 64 in, 128 out; Image size: 75 in, 38 out\n self.down4 = StackEncoder(128, 256, kernel_size=3) # Channels: 128 in, 256 out; Image size: 38 in, 19 out\n self.down5 = StackEncoder(256, 512, kernel_size=3) # Channels: 256 in, 512 out; Image size: 19 in, 10 out\n self.down6 = StackEncoder(512, 768, kernel_size=3) # Channels: 512 in, 768 out; Image size: 10 in, 5 out\n\n self.center = torch.nn.Sequential(\n ConvBnRelu2d(768, 768, kernel_size=3, padding=1, stride=1), # Channels: 768 in, 768 out; Image size: 5 in, 5 out\n )\n\n # x_big_channels, x_channels, y_channels\n self.up6 = StackDecoder(768, 768, 512, kernel_size=3) # Channels: 768+768 = 1536 in, 512 out; Image size: 5 in, 10 out\n self.up5 = StackDecoder(512, 512, 256, kernel_size=3) # Channels: 512+512 = 1024 in, 256 out; Image size: 10 in, 19 out\n self.up4 = StackDecoder(256+1024, 256, 128, kernel_size=3) # Channels: 256+256 = 512 in, 128 out; Image size: 19 in, 38 out\n self.up3 = StackDecoder(128+1024, 128, 64, kernel_size=3) # Channels: 128+128 = 256 in, 64 out; Image size: 38 in, 75 out\n self.up2 = StackDecoder(64+256, 64, 24, kernel_size=3) # Channels: 64+64 = 128 in, 24 out; Image size: 75 in, 150 out\n self.up1 = StackDecoder(24+128, 24, 24, kernel_size=3) # Channels: 24+24 = 48 in, 24 out; Image size: 150 in, 300 out\n self.classify = torch.nn.Conv2d(24+3, output_nc, kernel_size=1, padding=0, stride=1, bias=True) # Channels: 24 in, 1 out; Image size: 300 in, 300 out\n self.final_out = torch.nn.Tanh()\n\n def _crop_concat(self, upsampled, bypass):\n \"\"\"\n Crop y to the (h, w) of x and concat them.\n Used for the expansive path.\n Returns:\n The concatenated tensor\n \"\"\"\n c = (bypass.size()[2] - upsampled.size()[2]) // 2\n bypass = torch.nn.functional.pad(bypass, (-c, -c, -c, -c))\n\n return torch.cat((upsampled, bypass), 1)\n\n def forward(self,x, input_to_net):\n #print(input_to_net.shape)\n output_5,output_6,output_9,output_11,output_15 = input_to_net\n \n #print(x.shape)\n \n out = x # ;print('x ',x.size())\n #\n down1, out = self.down1(out) ##;\n #print('down1',down1.shape) #256\n down2, out = self.down2(out) # ;\n #print('down2',down2.shape) #128\n down3, out = self.down3(out) # ;\n #print('down3',down3.shape) #64\n down4, out = self.down4(out) # ;\n #print('down4',down4.shape) #32\n down5, out = self.down5(out) # ;\n #print('down5',down5.shape) #16\n down6, out = self.down6(out) # ;\n #print('down6',down6.shape) #8\n pass # ;\n #print('out ',out.shape)\n\n out = self.center(out)\n #print('0',out.shape)\n out = self.up6(down6, out)\n #print('1',out.shape)\n out = self.up5(down5, out)\n out = torch.cat((out,output_5 ),1 )\n #print('2',out.shape)\n out = self.up4(down4, out)\n out = torch.cat((out,output_6 ),1 )\n #print('3',out.shape)\n out = self.up3(down3, out)\n out = torch.cat((out,output_9 ),1 )\n #print('4',out.shape)\n out = self.up2(down2, out)\n out = torch.cat((out,output_11 ),1 )\n #print('5',out.shape)\n out = self.up1(down1, out)\n # 1024\n out = torch.cat((out,output_15 ),1 )\n #print('6',out.shape)\n out = self.final_out(self.classify(out))\n out = torch.reshape(out,(-1, self.output_nc, 256,256))#, dim=1)\n return out\n\n#%%Unet for spade8\n \nclass UNet768PIXSPADE8SM(torch.nn.Module):\n def __init__(self, input_nc, output_nc, num_downs, ngf, norm_layer=nn.BatchNorm2d, use_dropout=False):\n super(UNet768PIXSPADE8SM, self).__init__()\n # def __init__(self, input_nc, output_nc, num_downs, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False):\n # C, H, W = in_shape\n # assert(C==3)\n print('UNET 768 SPADE')\n self.output_nc = output_nc\n\n # 1024\n self.down1 = StackEncoder(1, 24, kernel_size=3) # Channels: 1 in, 24 out; Image size: 300 in, 150 out\n self.down2 = StackEncoder(24, 64, kernel_size=3) # Channels: 24 in, 64 out; Image size: 150 in, 75 out\n self.down3 = StackEncoder(64, 128, kernel_size=3) # Channels: 64 in, 128 out; Image size: 75 in, 38 out\n self.down4 = StackEncoder(128, 256, kernel_size=3) # Channels: 128 in, 256 out; Image size: 38 in, 19 out\n self.down5 = StackEncoder(256, 512, kernel_size=3) # Channels: 256 in, 512 out; Image size: 19 in, 10 out\n self.down6 = StackEncoder(512, 768, kernel_size=3) # Channels: 512 in, 768 out; Image size: 10 in, 5 out\n\n self.center = torch.nn.Sequential(\n ConvBnRelu2d(768, 768, kernel_size=3, padding=1, stride=1), # Channels: 768 in, 768 out; Image size: 5 in, 5 out\n )\n\n # x_big_channels, x_channels, y_channels\n self.up6 = StackDecoder(768, 768, 512, kernel_size=3) # Channels: 768+768 = 1536 in, 512 out; Image size: 5 in, 10 out\n self.up5 = StackDecoder(512, 512, 256, kernel_size=3) # Channels: 512+512 = 1024 in, 256 out; Image size: 10 in, 19 out\n self.up4 = StackDecoder(256+128, 256, 128, kernel_size=3) # Channels: 256+256 = 512 in, 128 out; Image size: 19 in, 38 out\n self.up3 = StackDecoder(128+128, 128, 64, kernel_size=3) # Channels: 128+128 = 256 in, 64 out; Image size: 38 in, 75 out\n self.up2 = StackDecoder(64+32, 64, 24, kernel_size=3) # Channels: 64+64 = 128 in, 24 out; Image size: 75 in, 150 out\n self.up1 = StackDecoder(24+16, 24, 24, kernel_size=3) # Channels: 24+24 = 48 in, 24 out; Image size: 150 in, 300 out\n self.classify = torch.nn.Conv2d(24, output_nc, kernel_size=1, padding=0, stride=1, bias=True) # Channels: 24 in, 1 out; Image size: 300 in, 300 out\n self.final_out = torch.nn.Tanh()\n\n def _crop_concat(self, upsampled, bypass):\n \"\"\"\n Crop y to the (h, w) of x and concat them.\n Used for the expansive path.\n Returns:\n The concatenated tensor\n \"\"\"\n c = (bypass.size()[2] - upsampled.size()[2]) // 2\n bypass = torch.nn.functional.pad(bypass, (-c, -c, -c, -c))\n\n return torch.cat((upsampled, bypass), 1)\n\n def forward(self,x, input_to_net):\n #print(input_to_net.shape)\n output_5,output_6,output_9,output_11,output_15 = input_to_net\n \n #print(x.shape)\n \n out = x # ;print('x ',x.size())\n #\n down1, out = self.down1(out) ##;\n #print('down1',down1.shape) #256\n down2, out = self.down2(out) # ;\n #print('down2',down2.shape) #128\n down3, out = self.down3(out) # ;\n #print('down3',down3.shape) #64\n down4, out = self.down4(out) # ;\n #print('down4',down4.shape) #32\n down5, out = self.down5(out) # ;\n #print('down5',down5.shape) #16\n down6, out = self.down6(out) # ;\n #print('down6',down6.shape) #8\n pass # ;\n #print('out ',out.shape)\n\n out = self.center(out)\n #print('0',out.shape)\n out = self.up6(down6, out)\n #print('1',out.shape)\n out = self.up5(down5, out)\n out = torch.cat((out,output_5 ),1 )\n #print('2',out.shape)\n out = self.up4(down4, out)\n out = torch.cat((out,output_6 ),1 )\n #print('3',out.shape)\n out = self.up3(down3, out)\n out = torch.cat((out,output_9 ),1 )\n #print('4',out.shape)\n out = self.up2(down2, out)\n out = torch.cat((out,output_11 ),1 )\n #print('5',out.shape)\n out = self.up1(down1, out)\n # 1024\n #out = torch.cat((out,output_15 ),1 )\n #print('6',out.shape)\n out = self.final_out(self.classify(out))\n out = torch.reshape(out,(-1, self.output_nc, 256,256))#, dim=1)\n return out\n\n \n\n \n\n"
] | [
[
"torch.optim.lr_scheduler.CosineAnnealingLR",
"numpy.ones",
"torch.rand",
"torch.nn.Upsample",
"torch.cuda.is_available",
"torch.nn.Conv2d",
"torch.nn.ModuleList",
"torch.nn.ReflectionPad2d",
"torch.optim.lr_scheduler.ReduceLROnPlateau",
"torch.nn.Sigmoid",
"torch.cat",
"torch.nn.Dropout",
"torch.nn.ConvTranspose2d",
"torch.nn.BatchNorm2d",
"torch.nn.init.kaiming_normal_",
"torch.flip",
"torch.nn.init.xavier_normal_",
"torch.nn.functional.pad",
"torch.nn.functional.upsample",
"torch.nn.init.normal_",
"torch.arange",
"torch.nn.AvgPool2d",
"torch.nn.functional.leaky_relu",
"numpy.zeros",
"torch.nn.functional.max_pool2d",
"torch.optim.lr_scheduler.LambdaLR",
"torch.tensor",
"torch.reshape",
"torch.nn.ReplicationPad2d",
"torch.optim.lr_scheduler.StepLR",
"torch.nn.Linear",
"torch.nn.MSELoss",
"torch.nn.init.constant_",
"torch.nn.Tanh",
"torch.nn.BCEWithLogitsLoss",
"torch.nn.Sequential",
"torch.zeros",
"torch.nn.ReLU",
"torch.nn.init.orthogonal_",
"torch.nn.functional.interpolate",
"torch.nn.LeakyReLU"
]
] |
xxia-kathy/models | [
"157faae1af5d89c53a5699b601dc68fee274ef09"
] | [
"official/core/base_trainer_test.py"
] | [
"# Lint as: python3\n# Copyright 2020 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for tensorflow_models.core.trainers.trainer.\"\"\"\n# pylint: disable=g-direct-tensorflow-import\n\nimport os\nfrom absl.testing import parameterized\nimport tensorflow as tf\n\nfrom tensorflow.python.distribute import combinations\nfrom tensorflow.python.distribute import strategy_combinations\nfrom official.core import base_trainer as trainer_lib\nfrom official.core import train_lib\nfrom official.modeling.hyperparams import config_definitions as cfg\nfrom official.utils.testing import mock_task\n\n\ndef all_strategy_combinations():\n return combinations.combine(\n distribution=[\n strategy_combinations.default_strategy,\n strategy_combinations.tpu_strategy,\n strategy_combinations.one_device_strategy_gpu,\n ],\n mode='eager',\n )\n\n\nclass TrainerTest(tf.test.TestCase, parameterized.TestCase):\n\n def setUp(self):\n super().setUp()\n self._config = cfg.ExperimentConfig(\n trainer=cfg.TrainerConfig(\n optimizer_config=cfg.OptimizationConfig({\n 'optimizer': {\n 'type': 'sgd'\n },\n 'learning_rate': {\n 'type': 'constant'\n }\n })))\n\n def create_test_trainer(self, config, model_dir=None):\n task = mock_task.MockTask(config.task, logging_dir=model_dir)\n ckpt_exporter = train_lib.maybe_create_best_ckpt_exporter(config, model_dir)\n trainer = trainer_lib.Trainer(\n config,\n task,\n model=task.build_model(),\n optimizer=trainer_lib.create_optimizer(config.trainer, config.runtime),\n checkpoint_exporter=ckpt_exporter)\n return trainer\n\n @combinations.generate(all_strategy_combinations())\n def test_trainer_train(self, distribution):\n with distribution.scope():\n trainer = self.create_test_trainer(self._config)\n logs = trainer.train(tf.convert_to_tensor(5, dtype=tf.int32))\n self.assertIn('training_loss', logs)\n self.assertIn('learning_rate', logs)\n\n @combinations.generate(all_strategy_combinations())\n def test_trainer_validate(self, distribution):\n with distribution.scope():\n trainer = self.create_test_trainer(self._config)\n logs = trainer.evaluate(tf.convert_to_tensor(5, dtype=tf.int32))\n self.assertIn('validation_loss', logs)\n self.assertEqual(logs['acc'], 5. * distribution.num_replicas_in_sync)\n\n @combinations.generate(\n combinations.combine(\n mixed_precision_dtype=['float32', 'bfloat16', 'float16'],\n loss_scale=[None, 'dynamic', 128, 256],\n ))\n def test_configure_optimizer(self, mixed_precision_dtype, loss_scale):\n config = cfg.ExperimentConfig(\n runtime=cfg.RuntimeConfig(\n mixed_precision_dtype=mixed_precision_dtype, loss_scale=loss_scale),\n trainer=cfg.TrainerConfig(\n optimizer_config=cfg.OptimizationConfig({\n 'optimizer': {\n 'type': 'sgd'\n },\n 'learning_rate': {\n 'type': 'constant'\n }\n })))\n trainer = self.create_test_trainer(config)\n if mixed_precision_dtype != 'float16':\n self.assertIsInstance(trainer.optimizer, tf.keras.optimizers.SGD)\n elif mixed_precision_dtype == 'float16' and loss_scale is None:\n self.assertIsInstance(trainer.optimizer, tf.keras.optimizers.SGD)\n else:\n self.assertIsInstance(\n trainer.optimizer,\n tf.keras.mixed_precision.experimental.LossScaleOptimizer)\n\n metrics = trainer.train(tf.convert_to_tensor(5, dtype=tf.int32))\n self.assertIn('training_loss', metrics)\n\n @combinations.generate(all_strategy_combinations())\n def test_export_best_ckpt(self, distribution):\n config = cfg.ExperimentConfig(\n trainer=cfg.TrainerConfig(\n best_checkpoint_export_subdir='best_ckpt',\n best_checkpoint_eval_metric='acc',\n optimizer_config=cfg.OptimizationConfig({\n 'optimizer': {\n 'type': 'sgd'\n },\n 'learning_rate': {\n 'type': 'constant'\n }\n })))\n model_dir = self.get_temp_dir()\n trainer = self.create_test_trainer(config, model_dir=model_dir)\n trainer.train(tf.convert_to_tensor(1, dtype=tf.int32))\n trainer.evaluate(tf.convert_to_tensor(1, dtype=tf.int32))\n self.assertTrue(\n tf.io.gfile.exists(os.path.join(model_dir, 'best_ckpt', 'info.json')))\n\n\nif __name__ == '__main__':\n tf.test.main()\n"
] | [
[
"tensorflow.python.distribute.combinations.combine",
"tensorflow.convert_to_tensor",
"tensorflow.test.main"
]
] |
prkriley/neurips2019_intrus | [
"3e36930246347e6b80a583d2ab378054ea3b9f7a"
] | [
"lib/models.py"
] | [
"\"\"\"\nTransformer encoder / decoder layer chain\n\"\"\"\nimport numpy as np\nimport tensorflow as tf\n\nimport lib.layers\nfrom . import layers, ops\nfrom .data import linelen\n\n\nclass Transformer:\n\n def __init__(\n self, name, inp_voc, out_voc,\n logits_bias=False, share_emb=False, dst_rand_offset=False,\n rescale_emb=True, inp_emb_bias=False, emb_inp_device='', emb_out_device='',\n **kwargs\n ):\n \"\"\"\n Transformer-based model that predicts logp(insert(i, token) | x, y)\n :type inp_voc: lib.voc.Voc\n :type out_voc: lib.voc.Voc\n :param logits_bias: if True, final logits layer has bias term.\n :param share_emb: if True, input and output embeddings will use the same matrix.\n Useful for in case of shared vocabularies or when there is a\n :param dst_rand_offset: if True, adds a random offset to output embeddings, same for all positions\n :param kwargs: other hyperparameters - see TransformerChain and TransformerEmbedding\n \"\"\"\n self.name = name\n self.inp_voc, self.out_voc = inp_voc, out_voc\n self.dst_rand_offset = dst_rand_offset\n self.hp = kwargs\n\n emb_size = kwargs.get('emb_size', kwargs.get('hid_size', 512))\n max_voc_size = max(len(inp_voc), len(out_voc))\n\n with tf.variable_scope(self.name) as self.scope:\n # Embeddings\n self.emb_inp = layers.TransformerEmbedding(\n 'emb_inp', max_voc_size if share_emb else len(inp_voc), emb_size,\n bias=inp_emb_bias, rescale=rescale_emb, device=emb_inp_device)\n\n self.emb_out = layers.TransformerEmbedding(\n 'emb_out', max_voc_size if share_emb else len(out_voc), emb_size,\n matrix=self.emb_inp.emb.mat if share_emb else None,\n rescale=rescale_emb, device=emb_out_device)\n\n # Model body\n self.encoder = layers.TransformerChain('enc', **kwargs)\n self.decoder = layers.TransformerChain('dec', attn_inputs=['enc'], **kwargs)\n\n # logits: token insertions plus one extra logit to predict position where to insert\n self.logits = layers.Dense(\n 'logits', kwargs['hid_size'], len(out_voc) + 1,\n matrix=tf.transpose(self.emb_out.emb.mat) if kwargs.get('dwwt', False) else None,\n bias=None if logits_bias else 0\n )\n\n def _get_batch_sample(self):\n \"\"\" A minimal example of model input data \"\"\"\n return [(\"i saw a cat\", \"i write the code\")]\n\n def make_encoder_batch_ph(self):\n return {\n 'inp': tf.placeholder('int32', [None, None]),\n 'inp_len': tf.placeholder('int32', [None])\n }\n\n def make_feed_dict(self, batch, **kwargs):\n \"\"\" Take input data strings, return a dict { key: np.array(value) } \"\"\"\n inp_lines, out_lines = zip(*batch)\n inp_len = [linelen(line) for line in inp_lines]\n out_len = [linelen(line) for line in out_lines]\n return {\n 'inp': self.inp_voc.to_matrix(inp_lines),\n 'inp_len': np.array(inp_len, 'int32'),\n 'out': self.out_voc.to_matrix(out_lines),\n 'out_len': np.array(out_len, 'int32')\n }\n\n def encode(self, batch, is_train):\n \"\"\" Take placeholders for data batch, return encoder state \"\"\"\n with tf.name_scope(self.name), ops.dropout_scope(is_train):\n inp = batch['inp'] # [batch_size * ninp]\n inp_len = batch.get('inp_len', ops.infer_length(inp, self.inp_voc.eos)) # [batch]\n attn_mask = ops.make_attn_mask(inp, inp_len) # [batch_size, 1, 1, ninp]\n out, _ = self.encoder(self.emb_inp(inp), self_attn_mask=attn_mask)\n # ^-- [batch_size, ninp, hid_size]\n return dict(out=out, attn_mask=attn_mask)\n\n def compute_action_logprobs(self, batch, is_train, enc=None, temperature=None):\n \"\"\"\n Compute log-probabilities for all possible actions (aka agent policy)\n :param batch: a dict with\n - token matrix 'out'[batch_size, output_length]\n - optional length vector out_len[batch_size]\n :param is_train: whether or not to use training behavior (e.g. dropout)\n :returns: {'insert':logp(insert(i, c) | x, y), 'finish':logp(terminate| x, y)}\n \"\"\"\n enc = self.encode(batch, is_train) if enc is None else enc\n with tf.name_scope(self.name), ops.dropout_scope(is_train):\n out = batch['out'] # partial translation, shape: [batch_size * nout]\n out_len = batch.get('out_len', ops.infer_length(out, self.out_voc.eos)) # [batch]\n\n # embedding. Note: at this point, a special \"zero\" vector is added\n # to the first position hence length is increased by 1\n\n out_padded = tf.concat([tf.zeros_like(out[:, :1]), out], axis=1) # [batch_size, nout+1]\n dec_emb = self.emb_out(out_padded, offset='random' if self.dst_rand_offset else 0)\n # ^-- shape: [batch_size, nout + 1]\n\n # run decoder\n attn_mask = ops.make_attn_mask(out_padded, out_len + 1) # [batch_size, 1, 1, nout + 1]\n dec_out, _ = self.decoder(dec_emb, self_attn_mask=attn_mask,\n enc_out=enc['out'], enc_attn_mask=enc['attn_mask'])\n # ^-- [batch_size, nout + 1, hid_size]\n\n logits = self.logits(dec_out) # [batch_size, nout + 1, voc_size + 1]\n if temperature is not None:\n logits /= temperature\n\n # compute log-probabilities for actions\n\n # position log-probabilities, logP(insert(pos, *) | ...)\n # used to predict position of next insert and termination condition (EOS)\n position_logits = logits[:, :, -1] # [batch_size, nout + 1]\n\n position_mask = tf.cast(attn_mask, tf.bool)[:, 0, 0, :] # [batch_size, nout + 1]\n position_logits = tf.where(position_mask, position_logits,\n tf.fill(tf.shape(position_logits), -1e9))\n position_logp = tf.nn.log_softmax(position_logits, axis=-1) # [batch_size, n_out]\n\n # two actions: insert - at any non-EOS position - or finish - defined as inserting at EOS\n finish_logp = tf.gather_nd(position_logp,\n tf.stack([tf.range(tf.shape(out_len)[0]), out_len], axis=1))\n # ^-- [batch_size]\n\n insert_position_logp = tf.where(position_mask[:, 1:], position_logp[:, :-1],\n tf.fill(tf.shape(position_logp[:, :-1]), -1e9))\n # ^-- [batch_size, nout]\n\n # insertion log-probabilities:\n # logP(insert(pos, tok) | ...) = logP(insert(pos, *) | ...) + logP(insert(pos, tok) | insert(pos, *), ...)\n\n token_logits = logits[:, :-1, :len(self.out_voc)] # [batch_size, n_out, voc_size]\n token_logp_given_position = tf.nn.log_softmax(token_logits, axis=-1)\n # note: we do not need mask on token_logp_given_position cuz mask is already applied to insert_position_logp\n\n insert_logp = insert_position_logp[:, :, None] + token_logp_given_position\n\n return {\n # group 1 (exps sum to 1)\n 'insert': insert_logp, # [batch_size, nout, voc_size]\n 'finish': finish_logp, # [batch_size]\n }\n\n\nclass ImgToSeqTransformer(Transformer):\n def __init__(\n self, name, out_voc, inp_w, inp_h, inp_channels=3, make_encoder=lib.layers.ImageEncoder,\n logits_bias=False, share_emb=False, dst_rand_offset=False,\n rescale_emb=True, emb_out_device='',\n **kwargs\n ):\n \"\"\"\n Transformer-based model that predicts logp(insert(i, token) | x, y)\n :type out_voc: lib.voc.Voc\n :param logits_bias: if True, final logits layer has bias term.\n :param dst_rand_offset: if True, adds a random offset to output embeddings, same for all positions\n :param kwargs: other hyperparameters - see TransformerChain and TransformerEmbedding\n \"\"\"\n self.name = name\n self.inp_voc, self.out_voc = out_voc, out_voc # inp voc is a stub, the same as out_voc\n self.dst_rand_offset = dst_rand_offset\n self.hp = kwargs\n self.w = inp_w\n self.h = inp_h\n self.inp_channels = inp_channels\n\n emb_size = kwargs.get('emb_size', kwargs.get('hid_size', 512))\n max_voc_size = len(out_voc)\n\n with tf.variable_scope(self.name) as self.scope:\n # Embeddings\n\n self.emb_out = layers.TransformerEmbedding(\n 'emb_out', max_voc_size if share_emb else len(out_voc), emb_size,\n matrix=self.emb_inp.emb.mat if share_emb else None,\n rescale=rescale_emb, device=emb_out_device)\n\n # Model body\n self.encoder = make_encoder('enc', inp_h=inp_w, inp_w=inp_h, inp_channels=inp_channels, **kwargs)\n\n enc_out_shape = self.encode(self.make_encoder_batch_ph(), True)['out'].shape\n assert enc_out_shape.ndims == 3 and enc_out_shape[-1].value is not None, \\\n \"encoder output shape must be a 3d tensor with fixed num units, \" \\\n \"got shape {}\".format(enc_out_shape)\n\n self.decoder = layers.TransformerChain('dec', attn_inputs=['enc'],\n attn_input_sizes={'enc': enc_out_shape[-1].value},\n **kwargs)\n\n # logits: token insertions plus one extra logit to predict position where to insert\n self.logits = layers.Dense(\n 'logits', kwargs['hid_size'], len(out_voc) + 1,\n bias=None if logits_bias else 0\n )\n\n\n def _get_batch_sample(self):\n \"\"\" A minimal example of model input data \"\"\"\n return [(np.zeros((self.h, self.w, self.inp_channels)), 'A cat sat')]\n\n def make_feed_dict(self, batch, **kwargs):\n \"\"\" Take input data strings, return a dict { key: np.array(value) } \"\"\"\n inp_imgs, out_lines = zip(*batch)\n\n out_len = [linelen(line) for line in out_lines]\n return {\n 'inp': np.array(inp_imgs, 'float32'),\n 'out': self.out_voc.to_matrix(out_lines),\n 'out_len': np.array(out_len, 'int32')\n }\n\n def make_encoder_batch_ph(self):\n return {\n 'inp': tf.placeholder('float32', [None, self.h, self.w, self.inp_channels]),\n }\n\n def encode(self, batch, is_train):\n \"\"\" Take placeholders for data batch, return encoder state \"\"\"\n with tf.name_scope(self.name), ops.dropout_scope(is_train):\n inp = batch['inp'] # [batch_size * ninp]\n\n out = self.encoder(inp)\n assert out.shape[-1] is not None\n out_shape = tf.shape(out)\n\n out = tf.reshape(out, [out_shape[0], -1, out.shape[-1]])\n\n attn_mask = tf.ones((out_shape[0], 1, 1, out_shape[1] * out_shape[2])) # [batch_size, 1, 1, ninp]\n\n return dict(out=out, attn_mask=attn_mask)\n"
] | [
[
"tensorflow.placeholder",
"tensorflow.shape",
"tensorflow.reshape",
"numpy.zeros",
"tensorflow.ones",
"tensorflow.variable_scope",
"tensorflow.zeros_like",
"tensorflow.nn.log_softmax",
"tensorflow.cast",
"tensorflow.name_scope",
"numpy.array",
"tensorflow.transpose"
]
] |
Prtfw/PySyft | [
"35012f5bf55628bb19761d5f40d03181fbbb1766"
] | [
"test/torch/pointers/test_pointer_tensor.py"
] | [
"import torch\nimport torch as th\nimport syft\n\nfrom syft.frameworks.torch.tensors.interpreters.additive_shared import AdditiveSharingTensor\nfrom syft.frameworks.torch.tensors.interpreters.precision import FixedPrecisionTensor\nfrom syft.generic.pointers.pointer_tensor import PointerTensor\nimport pytest\n\n\ndef test_init(workers):\n alice, me = workers[\"alice\"], workers[\"me\"]\n pointer = PointerTensor(id=1000, location=alice, owner=me)\n pointer.__str__()\n\n\ndef test_create_pointer():\n x = torch.Tensor([1, 2])\n x.create_pointer()\n\n\ndef test_send_default_garbage_collector_true(workers):\n \"\"\"\n Remote tensor should be garbage collected by default on\n deletion of the Pointer tensor pointing to remote tensor\n \"\"\"\n alice = workers[\"alice\"]\n\n x = torch.Tensor([-1, 2])\n x_ptr = x.send(alice)\n assert x_ptr.child.garbage_collect_data\n\n\ndef test_send_garbage_collect_data_false(workers):\n \"\"\"\n Remote tensor should be not garbage collected on\n deletion of the Pointer tensor pointing to remote tensor\n \"\"\"\n alice = workers[\"alice\"]\n\n x = torch.Tensor([-1, 2])\n x_ptr = x.send(alice)\n x_ptr.garbage_collection = False\n assert x_ptr.child.garbage_collect_data == False\n\n\ndef test_send_gc_false(workers):\n \"\"\"\n Remote tensor should be not garbage collected on\n deletion of the Pointer tensor pointing to remote tensor\n \"\"\"\n alice = workers[\"alice\"]\n x = torch.Tensor([-1, 2])\n x_ptr = x.send(alice)\n x_ptr.gc = False\n assert x_ptr.child.garbage_collect_data == False\n assert x_ptr.gc == False, \"property GC is not in sync\"\n assert x_ptr.garbage_collection == False, \"property garbage_collection is not in sync\"\n\n\ndef test_send_gc_true(workers):\n \"\"\"\n Remote tensor by default is garbage collected on\n deletion of Pointer Tensor\n \"\"\"\n alice = workers[\"alice\"]\n\n x = torch.Tensor([-1, 2])\n x_ptr = x.send(alice)\n\n assert x_ptr.gc == True\n\n\ndef test_send_disable_gc(workers):\n \"\"\"Pointer tensor should be not garbage collected.\"\"\"\n alice = workers[\"alice\"]\n\n x = torch.Tensor([-1, 2])\n x_ptr = x.send(alice).disable_gc\n assert x_ptr.child.garbage_collect_data == False\n assert x_ptr.gc == False, \"property GC is not in sync\"\n assert x_ptr.garbage_collection == False, \"property garbage_collection is not in sync\"\n\n\ndef test_send_get(workers):\n \"\"\"Test several send get usages\"\"\"\n bob = workers[\"bob\"]\n alice = workers[\"alice\"]\n\n # simple send\n x = torch.Tensor([1, 2])\n x_ptr = x.send(bob)\n x_back = x_ptr.get()\n assert (x == x_back).all()\n\n # send with variable overwriting\n x = torch.Tensor([1, 2])\n x = x.send(bob)\n x_back = x.get()\n assert (torch.Tensor([1, 2]) == x_back).all()\n\n # double send\n x = torch.Tensor([1, 2])\n x_ptr = x.send(bob)\n x_ptr_ptr = x_ptr.send(alice)\n x_ptr_back = x_ptr_ptr.get()\n x_back_back = x_ptr_back.get()\n assert (x == x_back_back).all()\n\n # double send with variable overwriting\n x = torch.Tensor([1, 2])\n x = x.send(bob)\n x = x.send(alice)\n x = x.get()\n x_back = x.get()\n assert (torch.Tensor([1, 2]) == x_back).all()\n\n # chained double send\n x = torch.Tensor([1, 2])\n x = x.send(bob).send(alice)\n x_back = x.get().get()\n assert (torch.Tensor([1, 2]) == x_back).all()\n\n\ndef test_inplace_send_get(workers):\n bob = workers[\"bob\"]\n\n tensor = torch.tensor([1.0, -1.0, 3.0, 4.0])\n tensor_ptr = tensor.send_(bob)\n\n assert tensor_ptr.id == tensor.id\n assert id(tensor_ptr) == id(tensor)\n\n tensor_back = tensor_ptr.get_()\n\n assert tensor_back.id == tensor_ptr.id\n assert tensor_back.id == tensor.id\n assert id(tensor_back) == id(tensor)\n assert id(tensor_back) == id(tensor)\n\n assert (tensor_back == tensor).all()\n\n\ndef test_repeated_send(workers):\n \"\"\"Tests that repeated calls to .send(bob) works gracefully.\n Previously garbage collection deleted the remote object\n when .send() was called twice. This test ensures the fix still\n works.\"\"\"\n\n bob = workers[\"bob\"]\n\n # create tensor\n x = torch.Tensor([1, 2])\n\n # send tensor to bob\n x_ptr = x.send(bob)\n\n # send tensor again\n x_ptr = x.send(bob)\n\n # ensure bob has tensor\n assert x.id in bob._objects\n\n\ndef test_remote_autograd(workers):\n \"\"\"Tests the ability to backpropagate gradients on a remote\n worker.\"\"\"\n\n bob = workers[\"bob\"]\n\n # TEST: simple remote grad calculation\n\n # create a tensor\n x = torch.tensor([1, 2, 3, 4.0], requires_grad=True)\n\n # send tensor to bob\n x = x.send(bob)\n\n # do some calculation\n y = (x + x).sum()\n\n # backpropagate on remote machine\n y.backward()\n\n # check that remote gradient is correct\n x_grad = bob._objects[x.id_at_location].grad\n x_grad_target = torch.ones(4).float() + 1\n assert (x_grad == x_grad_target).all()\n\n # TEST: Ensure remote grad calculation gets properly serded\n\n # create tensor\n x = torch.tensor([1, 2, 3, 4.0], requires_grad=True).send(bob)\n\n # compute function\n y = x.sum()\n\n # backpropagate\n y.backward()\n\n # get the gradient created from backpropagation manually\n x_grad = bob._objects[x.id_at_location].grad\n\n # get the entire x tensor (should bring the grad too)\n x = x.get()\n\n # make sure that the grads match\n assert (x.grad == x_grad).all()\n\n\ndef test_gradient_send_recv(workers):\n \"\"\"Tests that gradients are properly sent and received along\n with their tensors.\"\"\"\n\n bob = workers[\"bob\"]\n\n # create a tensor\n x = torch.tensor([1, 2, 3, 4.0], requires_grad=True)\n\n # create gradient on tensor\n x.sum().backward(th.tensor(1.0))\n\n # save gradient\n orig_grad = x.grad\n\n # send and get back\n t = x.send(bob).get()\n\n # check that gradient was properly serde\n assert (t.grad == orig_grad).all()\n\n\ndef test_method_on_attribute(workers):\n\n bob = workers[\"bob\"]\n\n # create remote object with children\n x = torch.Tensor([1, 2, 3])\n x = syft.LoggingTensor().on(x).send(bob)\n\n # call method on data tensor directly\n x.child.point_to_attr = \"child.child\"\n y = x.add(x)\n assert isinstance(y.get(), torch.Tensor)\n\n # call method on loggingtensor directly\n x.child.point_to_attr = \"child\"\n y = x.add(x)\n y = y.get()\n assert isinstance(y.child, syft.LoggingTensor)\n\n # # call method on zeroth attribute\n # x.child.point_to_attr = \"\"\n # y = x.add(x)\n # y = y.get()\n #\n # assert isinstance(y, torch.Tensor)\n # assert isinstance(y.child, syft.LoggingTensor)\n # assert isinstance(y.child.child, torch.Tensor)\n\n # call .get() on pinter to attribute (should error)\n x.child.point_to_attr = \"child\"\n try:\n x.get()\n except syft.exceptions.CannotRequestObjectAttribute as e:\n assert True\n\n\ndef test_grad_pointer(workers):\n \"\"\"Tests the automatic creation of a .grad pointer when\n calling .send() on a tensor with requires_grad==True\"\"\"\n\n bob = workers[\"bob\"]\n\n x = torch.tensor([1, 2, 3.0], requires_grad=True).send(bob)\n y = (x + x).sum()\n y.backward()\n\n assert (bob._objects[x.id_at_location].grad == torch.tensor([2, 2, 2.0])).all()\n\n\ndef test_move(workers):\n alice, bob, james, me = workers[\"alice\"], workers[\"bob\"], workers[\"james\"], workers[\"me\"]\n\n x = torch.tensor([1, 2, 3, 4, 5]).send(bob)\n\n assert x.id_at_location in bob._objects\n assert x.id_at_location not in alice._objects\n\n x.move(alice)\n\n assert x.id_at_location in bob._objects\n assert x.id_at_location in alice._objects\n\n x = torch.tensor([1.0, 2, 3, 4, 5], requires_grad=True).send(bob)\n\n assert x.id_at_location in bob._objects\n assert x.id_at_location not in alice._objects\n\n x.move(alice)\n\n assert x.id_at_location in bob._objects\n assert x.id_at_location in alice._objects\n\n alice.clear_objects()\n bob.clear_objects()\n x = torch.tensor([1.0, 2, 3, 4, 5]).send(bob)\n x.move(alice)\n\n assert len(alice._objects) == 1\n\n # Test .move on remote objects\n\n james.clear_objects()\n x = th.tensor([1.0]).send(james)\n remote_x = james._objects[x.id_at_location]\n remote_ptr = remote_x.send(bob)\n assert remote_ptr.id in james._objects.keys()\n remote_ptr2 = remote_ptr.move(alice)\n assert remote_ptr2.id in james._objects.keys()\n\n # Test .move back to myself\n\n alice.clear_objects()\n bob.clear_objects()\n x = torch.tensor([1.0, 2, 3, 4, 5]).send(bob)\n y = x.move(alice)\n z = y.move(me)\n assert (z == x).all()\n\n\ndef test_combine_pointers(workers):\n \"\"\"\n Ensure that the sy.combine_pointers works as expected\n \"\"\"\n\n bob = workers[\"bob\"]\n alice = workers[\"alice\"]\n\n x = th.tensor([1, 2, 3, 4, 5]).send(bob)\n y = th.tensor([1, 2, 3, 4, 5]).send(alice)\n\n a = x.combine(y)\n b = a + a\n\n c = b.get(sum_results=True)\n assert (c == th.tensor([4, 8, 12, 16, 20])).all()\n\n b = a + a\n c = b.get(sum_results=False)\n assert len(c) == 2\n assert (c[0] == th.tensor([2, 4, 6, 8, 10])).all\n\n\ndef test_remote_to_cpu_device(workers):\n \"\"\"Ensure remote .to cpu works\"\"\"\n device = torch.device(\"cpu\")\n bob = workers[\"bob\"]\n\n x = th.tensor([1, 2, 3, 4, 5]).send(bob)\n x.to(device)\n\n\ndef test_get_remote_shape(workers):\n \"\"\"Test pointer.shape functionality\"\"\"\n bob = workers[\"bob\"]\n # tensor directly sent: shape stored at sending\n x = th.tensor([1, 2, 3, 4, 5]).send(bob)\n assert x.shape == torch.Size([5])\n # result of an operation: need to make a call to the remote worker\n y = x + x\n assert y.shape == torch.Size([5])\n\n\ndef test_remote_function_with_multi_ouput(workers):\n \"\"\"\n Functions like .split return several tensors, registration and response\n must be made carefully in this case\n \"\"\"\n bob = workers[\"bob\"]\n\n tensor = torch.tensor([1, 2, 3, 4.0])\n ptr = tensor.send(bob)\n r_ptr = torch.split(ptr, 2)\n assert (r_ptr[0].get() == torch.tensor([1, 2.0])).all()\n\n tensor = torch.tensor([1, 2, 3, 4.0])\n ptr = tensor.send(bob)\n max_value, argmax_idx = torch.max(ptr, 0)\n\n assert max_value.get().item() == 4.0\n assert argmax_idx.get().item() == 3\n\n\ndef test_raising_error_when_item_func_called(workers):\n pointer = PointerTensor(id=1000, location=workers[\"alice\"], owner=workers[\"me\"])\n with pytest.raises(RuntimeError):\n pointer.item()\n\n\ndef test_fix_prec_on_pointer_tensor(workers):\n \"\"\"\n Ensure .fix_precision() works as expected.\n Also check that fix_precision() is not inplace.\n \"\"\"\n bob = workers[\"bob\"]\n\n tensor = torch.tensor([1, 2, 3, 4.0])\n ptr = tensor.send(bob)\n\n ptr_fp = ptr.fix_precision()\n\n remote_tensor = bob._objects[ptr.id_at_location]\n remote_fp_tensor = bob._objects[ptr_fp.id_at_location]\n\n # check that fix_precision is not inplace\n assert (remote_tensor == tensor).all()\n\n assert isinstance(ptr.child, PointerTensor)\n assert isinstance(remote_fp_tensor.child, FixedPrecisionTensor)\n\n\ndef test_fix_prec_on_pointer_of_pointer(workers):\n \"\"\"\n Ensure .fix_precision() works along a chain of pointers.\n \"\"\"\n bob = workers[\"bob\"]\n alice = workers[\"alice\"]\n\n tensor = torch.tensor([1, 2, 3, 4.0])\n ptr = tensor.send(bob)\n ptr = ptr.send(alice)\n\n ptr = ptr.fix_precision()\n\n alice_tensor = alice._objects[ptr.id_at_location]\n remote_tensor = bob._objects[alice_tensor.id_at_location]\n\n assert isinstance(ptr.child, PointerTensor)\n assert isinstance(remote_tensor.child, FixedPrecisionTensor)\n\n\ndef test_float_prec_on_pointer_tensor(workers):\n \"\"\"\n Ensure .float_precision() works as expected.\n \"\"\"\n bob = workers[\"bob\"]\n\n tensor = torch.tensor([1, 2, 3, 4.0])\n ptr = tensor.send(bob)\n ptr = ptr.fix_precision()\n\n ptr = ptr.float_precision()\n remote_tensor = bob._objects[ptr.id_at_location]\n\n assert isinstance(ptr.child, PointerTensor)\n assert isinstance(remote_tensor, torch.Tensor)\n\n\ndef test_float_prec_on_pointer_of_pointer(workers):\n \"\"\"\n Ensure .float_precision() works along a chain of pointers.\n \"\"\"\n bob = workers[\"bob\"]\n alice = workers[\"alice\"]\n\n tensor = torch.tensor([1, 2, 3, 4.0])\n ptr = tensor.send(bob)\n ptr = ptr.send(alice)\n ptr = ptr.fix_precision()\n\n ptr = ptr.float_precision()\n\n alice_tensor = alice._objects[ptr.id_at_location]\n remote_tensor = bob._objects[alice_tensor.id_at_location]\n\n assert isinstance(ptr.child, PointerTensor)\n assert isinstance(remote_tensor, torch.Tensor)\n\n\ndef test_share_get(workers):\n \"\"\"\n Ensure .share() works as expected.\n \"\"\"\n bob = workers[\"bob\"]\n\n tensor = torch.tensor([1, 2, 3])\n ptr = tensor.send(bob)\n\n ptr = ptr.share()\n remote_tensor = bob._objects[ptr.id_at_location]\n\n assert isinstance(ptr.child, PointerTensor)\n assert isinstance(remote_tensor.child, AdditiveSharingTensor)\n\n\ndef test_registration_of_action_on_pointer_of_pointer(workers):\n \"\"\"\n Ensure actions along a chain of pointers are registered as expected.\n \"\"\"\n bob = workers[\"bob\"]\n alice = workers[\"alice\"]\n\n tensor = torch.tensor([1, 2, 3, 4.0])\n ptr = tensor.send(bob)\n ptr = ptr.send(alice)\n ptr_action = ptr + ptr\n\n assert len(alice._objects) == 2\n assert len(bob._objects) == 2\n\n\ndef test_setting_back_grad_to_origin_after_send(workers):\n \"\"\"\n Calling .backward() on a tensor sent using `.send(..., requires_grad=True)`\n should update the origin tensor gradient\n \"\"\"\n me = workers[\"me\"]\n alice = workers[\"alice\"]\n\n with me.registration_enabled():\n x = th.tensor([1.0, 2.0, 3, 4, 5], requires_grad=True)\n y = x + x\n me.register_obj(y) # registration on the local worker is sometimes buggy\n\n y_ptr = y.send(alice, requires_grad=True)\n z_ptr = y_ptr * 2\n\n z = z_ptr.sum()\n z.backward()\n\n assert (x.grad == th.tensor([4.0, 4.0, 4.0, 4.0, 4.0])).all()\n\n\ndef test_setting_back_grad_to_origin_after_move(workers):\n \"\"\"\n Calling .backward() on a tensor moved using `.move(..., requires_grad=True)`\n should update the origin tensor gradient\n \"\"\"\n me = workers[\"me\"]\n bob = workers[\"bob\"]\n alice = workers[\"alice\"]\n\n with me.registration_enabled():\n x = th.tensor([1.0, 2.0, 3, 4, 5], requires_grad=True)\n y = x + x\n me.register_obj(y) # registration on the local worker is sometimes buggy\n\n y_ptr = y.send(alice, requires_grad=True)\n z_ptr = y_ptr * 2\n\n z_ptr2 = z_ptr.move(bob, requires_grad=True)\n z = z_ptr2.sum()\n z.backward()\n\n assert (x.grad == th.tensor([4.0, 4.0, 4.0, 4.0, 4.0])).all()\n"
] | [
[
"torch.ones",
"torch.Size",
"torch.split",
"torch.tensor",
"torch.max",
"torch.device",
"torch.Tensor"
]
] |
MissPenguin/Paddle | [
"266fcbe0aed3e566c167ea8de5114f62c428c013"
] | [
"python/paddle/fluid/tests/unittests/ir/inference/auto_scan_test.py"
] | [
"# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\nimport unittest\nimport abc\nimport os\nimport enum\nimport logging\nimport paddle\nimport paddle.fluid as fluid\nfrom paddle.fluid.initializer import NumpyArrayInitializer\nimport paddle.fluid.core as core\nfrom paddle import compat as cpt\nimport paddle.inference as paddle_infer\nfrom typing import Optional, List, Callable, Dict, Any, Set\nfrom program_config import TensorConfig, OpConfig, ProgramConfig, create_fake_model, create_quant_model\n\nlogging.basicConfig(level=logging.INFO, format=\"%(message)s\")\n\n\nclass SkipReasons(enum.Enum):\n # Paddle not support, but trt support, we need to add the feature.\n TRT_NOT_IMPLEMENTED = 0\n # TRT not support.\n TRT_NOT_SUPPORT = 1\n\n\nclass AutoScanTest(unittest.TestCase):\n def __init__(self, methodName='runTest'):\n np.random.seed(1024)\n paddle.enable_static()\n super(AutoScanTest, self).__init__(methodName)\n self.skip_cases = []\n\n @abc.abstractmethod\n def sample_program_configs(self) -> List[ProgramConfig]:\n '''\n Generate all config with the combination of different Input tensor shape and\n different Attr values.\n '''\n raise NotImplementedError\n\n @abc.abstractmethod\n def sample_predictor_configs(self) -> List[paddle_infer.Config]:\n raise NotImplementedError\n\n @abc.abstractmethod\n def add_skip_case(\n self,\n teller: [Callable[[ProgramConfig, paddle_infer.Config], bool]],\n reason: SkipReasons,\n note: str):\n self.skip_cases.append((teller, reason, note))\n\n @abc.abstractmethod\n def is_program_valid(self, program_config: ProgramConfig) -> bool:\n raise NotImplementedError\n\n def run_test_config(self, model, params, prog_config, pred_config,\n feed_data) -> Dict[str, np.ndarray]:\n '''\n Test a single case.\n '''\n pred_config.set_model_buffer(model, len(model), params, len(params))\n predictor = paddle_infer.create_predictor(pred_config)\n\n for name, _ in prog_config.inputs.items():\n input_tensor = predictor.get_input_handle(name)\n input_tensor.copy_from_cpu(feed_data[name]['data'])\n if feed_data[name]['lod'] is not None:\n input_tensor.set_lod(feed_data[name]['lod'])\n predictor.run()\n result = {}\n for out_name, o_name in zip(prog_config.outputs,\n predictor.get_output_names()):\n result[out_name] = predictor.get_output_handle(o_name).copy_to_cpu()\n return result\n\n def assert_tensors_near(self,\n threshold: float,\n tensors: List[Dict[str, np.array]]):\n assert len(tensors) > 1\n first = tensors[0]\n for group in tensors[1:]:\n for key, arr in group.items():\n self.assertTrue(\n np.allclose(\n first[key], arr, atol=threshold),\n \"Output has diff between GPU and TensorRT. \")\n\n @abc.abstractmethod\n def run_test(self, quant=False):\n raise NotImplementedError\n"
] | [
[
"numpy.allclose",
"numpy.random.seed"
]
] |
funkchaser/compas | [
"b58de8771484aa0c6068d43df78b1679503215de"
] | [
"src/compas_plotters/artists/pointartist.py"
] | [
"from typing import Tuple\nfrom typing import List\nfrom typing import Any\n\nfrom matplotlib.patches import Circle\nfrom matplotlib.transforms import ScaledTranslation\nfrom compas.geometry import Point\n\nfrom compas.artists import PrimitiveArtist\nfrom .artist import PlotterArtist\n\nColor = Tuple[float, float, float]\n\n\nclass PointArtist(PlotterArtist, PrimitiveArtist):\n \"\"\"Artist for COMPAS points.\"\"\"\n\n def __init__(self,\n point: Point,\n size: int = 5,\n facecolor: Color = (1.0, 1.0, 1.0),\n edgecolor: Color = (0, 0, 0),\n zorder: int = 9000,\n **kwargs: Any):\n\n super().__init__(primitive=point, **kwargs)\n\n self._mpl_circle = None\n self._size = None\n self.size = size\n self.facecolor = facecolor\n self.edgecolor = edgecolor\n self.zorder = zorder\n\n @property\n def point(self):\n return self.primitive\n\n @point.setter\n def point(self, point):\n self.primitive = point\n\n @property\n def _T(self):\n F = self.plotter.figure.dpi_scale_trans\n S = ScaledTranslation(self.point[0], self.point[1], self.plotter.axes.transData)\n T = F + S\n return T\n\n @property\n def size(self) -> float:\n return self._size / self.plotter.dpi\n\n @size.setter\n def size(self, size: int):\n self._size = size\n\n @property\n def data(self) -> List[List[float]]:\n return [self.point[:2]]\n\n def draw(self) -> None:\n circle = Circle(\n [0, 0],\n radius=self.size,\n facecolor=self.facecolor,\n edgecolor=self.edgecolor,\n transform=self._T,\n zorder=self.zorder\n )\n self._mpl_circle = self.plotter.axes.add_artist(circle)\n self.update_data()\n\n def redraw(self) -> None:\n self._mpl_circle.set_radius(self.size)\n self._mpl_circle.set_edgecolor(self.edgecolor)\n self._mpl_circle.set_facecolor(self.facecolor)\n self._mpl_circle.set_transform(self._T)\n self.update_data()\n"
] | [
[
"matplotlib.transforms.ScaledTranslation",
"matplotlib.patches.Circle"
]
] |
adozier/pymatgen | [
"f1cc4d8db24ec11063be2fd84b4ea911f006eeb7"
] | [
"pymatgen/core/units.py"
] | [
"# coding: utf-8\n# Copyright (c) Pymatgen Development Team.\n# Distributed under the terms of the MIT License.\n\nfrom __future__ import division, unicode_literals\n\n\"\"\"\nThis module implements a FloatWithUnit, which is a subclass of float. It\nalso defines supported units for some commonly used units for energy, length,\ntemperature, time and charge. FloatWithUnit also support conversion to one\nanother, and additions and subtractions perform automatic conversion if\nunits are detected. An ArrayWithUnit is also implemented, which is a subclass\nof numpy's ndarray with similar unit features.\n\"\"\"\n\nfrom six.moves import filter, zip\n\n__author__ = \"Shyue Ping Ong, Matteo Giantomassi\"\n__copyright__ = \"Copyright 2011, The Materials Project\"\n__version__ = \"1.0\"\n__maintainer__ = \"Shyue Ping Ong, Matteo Giantomassi\"\n__status__ = \"Production\"\n__date__ = \"Aug 30, 2013\"\n\nimport numpy as np\nimport six\n\nimport collections\nfrom numbers import Number\nimport numbers\nfrom functools import partial\n\nimport re\n\nimport scipy.constants as const\n\n\"\"\"\nSome conversion factors\n\"\"\"\nHa_to_eV = 1/const.physical_constants[\"electron volt-hartree relationship\"][0]\neV_to_Ha = 1 / Ha_to_eV\nRy_to_eV = Ha_to_eV / 2\namu_to_kg = const.physical_constants[\"atomic mass unit-kilogram relationship\"][0]\nmile_to_meters = const.mile\nbohr_to_angstrom = const.physical_constants[\"Bohr radius\"][0] * 1e10\nbohr_to_ang = bohr_to_angstrom\n\n\"\"\"\nDefinitions of supported units. Values below are essentially scaling and\nconversion factors. What matters is the relative values, not the absolute.\nThe SI units must have factor 1.\n\"\"\"\nBASE_UNITS = {\n \"length\": {\n \"m\": 1,\n \"km\": 1000,\n \"mile\": mile_to_meters,\n \"ang\": 1e-10,\n \"cm\": 1e-2,\n \"pm\": 1e-12,\n \"bohr\": bohr_to_angstrom * 1e-10,\n },\n \"mass\": {\n \"kg\": 1,\n \"g\": 1e-3,\n \"amu\": amu_to_kg,\n },\n \"time\": {\n \"s\": 1,\n \"min\": 60,\n \"h\": 3600,\n },\n \"current\": {\n \"A\": 1\n },\n \"temperature\": {\n \"K\": 1,\n },\n \"amount\": {\n \"mol\": 1,\n \"atom\": 1 / const.N_A\n },\n \"intensity\": {\n \"cd\": 1\n },\n \"memory\": {\n \"byte\": 1,\n \"Kb\": 1024,\n \"Mb\": 1024**2,\n \"Gb\": 1024**3,\n \"Tb\": 1024**4,\n },\n}\n\n# Accept kb, mb, gb ... as well.\nBASE_UNITS[\"memory\"].update({k.lower(): v\n for k, v in BASE_UNITS[\"memory\"].items()})\n\n\n# This current list are supported derived units defined in terms of powers of\n# SI base units and constants.\nDERIVED_UNITS = {\n \"energy\": {\n \"eV\": {\"kg\": 1, \"m\": 2, \"s\": -2, const.e: 1},\n \"meV\": {\"kg\": 1, \"m\": 2, \"s\": -2, const.e * 1e-3: 1},\n \"Ha\": {\"kg\": 1, \"m\": 2, \"s\": -2, const.e * Ha_to_eV: 1},\n \"Ry\": {\"kg\": 1, \"m\": 2, \"s\": -2, const.e * Ry_to_eV: 1},\n \"J\": {\"kg\": 1, \"m\": 2, \"s\": -2},\n \"kJ\": {\"kg\": 1, \"m\": 2, \"s\": -2, 1000: 1}\n },\n \"charge\": {\n \"C\": {\"A\": 1, \"s\": 1},\n \"e\": {\"A\": 1, \"s\": 1, const.e: 1},\n },\n \"force\": {\n \"N\": {\"kg\": 1, \"m\": 1, \"s\": -2},\n \"KN\": {\"kg\": 1, \"m\": 1, \"s\": -2, 1000: 1},\n \"MN\": {\"kg\": 1, \"m\": 1, \"s\": -2, 1e6: 1},\n \"GN\": {\"kg\": 1, \"m\": 1, \"s\": -2, 1e9: 1},\n },\n \"pressure\": {\n \"Pa\": {\"kg\": 1, \"m\": -1, \"s\": -2},\n \"KPa\": {\"kg\": 1, \"m\": -1, \"s\": -2, 1000: 1},\n \"MPa\": {\"kg\": 1, \"m\": -1, \"s\": -2, 1e6: 1},\n \"GPa\": {\"kg\": 1, \"m\": -1, \"s\": -2, 1e9: 1}\n },\n \"power\": {\n \"W\": {\"m\": 2, \"kg\": 1, \"s\": -3},\n \"KW\": {\"m\": 2, \"kg\": 1, \"s\": -3, 1000: 1},\n \"MW\": {\"m\": 2, \"kg\": 1, \"s\": -3, 1e6: 1},\n \"GW\": {\"m\": 2, \"kg\": 1, \"s\": -3, 1e9: 1}\n },\n \"emf\": {\n \"V\": {\"m\": 2, \"kg\": 1, \"s\": -3, \"A\": -1}\n },\n \"capacitance\": {\n \"F\": {\"m\": -2, \"kg\": -1, \"s\": 4, \"A\": 2}\n },\n \"resistance\": {\n \"ohm\": {\"m\": 2, \"kg\": 1, \"s\": -3, \"A\": -2}\n },\n \"conductance\": {\n \"S\": {\"m\": -2, \"kg\": -1, \"s\": 3, \"A\": 2}\n },\n \"magnetic_flux\": {\n \"Wb\": {\"m\": 2, \"kg\": 1, \"s\": -2, \"A\": -1}\n }\n}\n\n\nALL_UNITS = dict(list(BASE_UNITS.items()) + list(DERIVED_UNITS.items()))\nSUPPORTED_UNIT_NAMES = tuple([i for d in ALL_UNITS.values() for i in d.keys()])\n\n# Mapping unit name --> unit type (unit names must be unique).\n_UNAME2UTYPE = {}\nfor utype, d in ALL_UNITS.items():\n assert not set(d.keys()).intersection(_UNAME2UTYPE.keys())\n _UNAME2UTYPE.update({uname: utype for uname in d})\ndel utype, d\n\n\ndef _get_si_unit(unit):\n unit_type = _UNAME2UTYPE[unit]\n si_unit = filter(lambda k: BASE_UNITS[unit_type][k] == 1,\n BASE_UNITS[unit_type].keys())\n return list(si_unit)[0], BASE_UNITS[unit_type][unit]\n\n\nclass UnitError(BaseException):\n \"\"\"\n Exception class for unit errors.\n \"\"\"\n\n\ndef check_mappings(u):\n for v in DERIVED_UNITS.values():\n for k2, v2 in v.items():\n if all([v2.get(ku, 0) == vu for ku, vu in u.items()]) and \\\n all([u.get(kv2, 0) == vv2 for kv2, vv2 in v2.items()]):\n return {k2: 1}\n return u\n\n\nclass Unit(collections.Mapping):\n \"\"\"\n Represents a unit, e.g., \"m\" for meters, etc. Supports compound units.\n Only integer powers are supported for units.\n \"\"\"\n Error = UnitError\n\n def __init__(self, unit_def):\n \"\"\"\n Constructs a unit.\n\n Args:\n unit_def: A definition for the unit. Either a mapping of unit to\n powers, e.g., {\"m\": 2, \"s\": -1} represents \"m^2 s^-1\",\n or simply as a string \"kg m^2 s^-1\". Note that the supported\n format uses \"^\" as the power operator and all units must be\n space-separated.\n \"\"\"\n\n if isinstance(unit_def, six.string_types):\n unit = collections.defaultdict(int)\n for m in re.finditer(\"([A-Za-z]+)\\s*\\^*\\s*([\\-0-9]*)\", unit_def):\n p = m.group(2)\n p = 1 if not p else int(p)\n k = m.group(1)\n unit[k] += p\n else:\n unit = {k: v for k, v in dict(unit_def).items() if v != 0}\n self._unit = check_mappings(unit)\n\n def __mul__(self, other):\n new_units = collections.defaultdict(int)\n for k, v in self.items():\n new_units[k] += v\n for k, v in other.items():\n new_units[k] += v\n return Unit(new_units)\n\n def __rmul__(self, other):\n return self.__mul__(other)\n\n def __div__(self, other):\n new_units = collections.defaultdict(int)\n for k, v in self.items():\n new_units[k] += v\n for k, v in other.items():\n new_units[k] -= v\n return Unit(new_units)\n\n def __truediv__(self, other):\n return self.__div__(other)\n\n def __pow__(self, i):\n return Unit({k: v * i for k, v in self.items()})\n\n def __iter__(self):\n return self._unit.__iter__()\n\n def __getitem__(self, i):\n return self._unit[i]\n\n def __len__(self):\n return len(self._unit)\n\n def __repr__(self):\n sorted_keys = sorted(self._unit.keys(),\n key=lambda k: (-self._unit[k], k))\n return \" \".join([\"{}^{}\".format(k, self._unit[k])\n if self._unit[k] != 1 else k\n for k in sorted_keys if self._unit[k] != 0])\n\n def __str__(self):\n return self.__repr__()\n\n @property\n def as_base_units(self):\n \"\"\"\n Converts all units to base SI units, including derived units.\n\n Returns:\n (base_units_dict, scaling factor). base_units_dict will not\n contain any constants, which are gathered in the scaling factor.\n \"\"\"\n b = collections.defaultdict(int)\n factor = 1\n for k, v in self.items():\n derived = False\n for d in DERIVED_UNITS.values():\n if k in d:\n for k2, v2 in d[k].items():\n if isinstance(k2, Number):\n factor *= k2 ** (v2 * v)\n else:\n b[k2] += v2 * v\n derived = True\n break\n if not derived:\n si, f = _get_si_unit(k)\n b[si] += v\n factor *= f ** v\n return {k: v for k, v in b.items() if v != 0}, factor\n\n def get_conversion_factor(self, new_unit):\n \"\"\"\n Returns a conversion factor between this unit and a new unit.\n Compound units are supported, but must have the same powers in each\n unit type.\n\n Args:\n new_unit: The new unit.\n \"\"\"\n uo_base, ofactor = self.as_base_units\n un_base, nfactor = Unit(new_unit).as_base_units\n units_new = sorted(un_base.items(),\n key=lambda d: _UNAME2UTYPE[d[0]])\n units_old = sorted(uo_base.items(),\n key=lambda d: _UNAME2UTYPE[d[0]])\n factor = ofactor / nfactor\n for uo, un in zip(units_old, units_new):\n if uo[1] != un[1]:\n raise UnitError(\"Units %s and %s are not compatible!\" % (uo, un))\n c = ALL_UNITS[_UNAME2UTYPE[uo[0]]]\n factor *= (c[uo[0]] / c[un[0]]) ** uo[1]\n return factor\n\n\nclass FloatWithUnit(float):\n \"\"\"\n Subclasses float to attach a unit type. Typically, you should use the\n pre-defined unit type subclasses such as Energy, Length, etc. instead of\n using FloatWithUnit directly.\n\n Supports conversion, addition and subtraction of the same unit type. E.g.,\n 1 m + 20 cm will be automatically converted to 1.2 m (units follow the\n leftmost quantity). Note that FloatWithUnit does not override the eq\n method for float, i.e., units are not checked when testing for equality.\n The reason is to allow this class to be used transparently wherever floats\n are expected.\n\n >>> e = Energy(1.1, \"Ha\")\n >>> a = Energy(1.1, \"Ha\")\n >>> b = Energy(3, \"eV\")\n >>> c = a + b\n >>> print(c)\n 1.2102479761938871 Ha\n >>> c.to(\"eV\")\n 32.932522246000005 eV\n \"\"\"\n Error = UnitError\n\n @classmethod\n def from_string(cls, s):\n \"\"\"\n Initialize a FloatWithUnit from a string. Example Memory.from_string(\"1. Mb\")\n \"\"\"\n # Extract num and unit string. \n s = s.strip()\n for i, char in enumerate(s):\n if char.isalpha() or char.isspace():\n break\n else:\n raise Exception(\"Unit is missing in string %s\" % s)\n num, unit = float(s[:i]), s[i:]\n\n # Find unit type (set it to None if it cannot be detected)\n for unit_type, d in BASE_UNITS.items():\n if unit in d:\n break\n else:\n unit_type = None\n\n return cls(num, unit, unit_type=unit_type)\n\n def __new__(cls, val, unit, unit_type=None):\n new = float.__new__(cls, val)\n new._unit = Unit(unit)\n new._unit_type = unit_type\n return new\n\n def __init__(self, val, unit, unit_type=None):\n \"\"\"\n Initializes a float with unit.\n\n Args:\n val (float): Value\n unit (Unit): A unit. E.g., \"C\".\n unit_type (str): A type of unit. E.g., \"charge\"\n \"\"\"\n if unit_type is not None and str(unit) not in ALL_UNITS[unit_type]:\n raise UnitError(\n \"{} is not a supported unit for {}\".format(unit, unit_type))\n self._unit = Unit(unit)\n self._unit_type = unit_type\n\n def __repr__(self):\n return super(FloatWithUnit, self).__repr__()\n\n def __str__(self):\n s = super(FloatWithUnit, self).__str__()\n return \"{} {}\".format(s, self._unit)\n\n def __add__(self, other):\n if not hasattr(other, \"unit_type\"):\n return super(FloatWithUnit, self).__add__(other)\n if other.unit_type != self._unit_type:\n raise UnitError(\"Adding different types of units is not allowed\")\n val = other\n if other.unit != self._unit:\n val = other.to(self._unit)\n return FloatWithUnit(float(self) + val, unit_type=self._unit_type,\n unit=self._unit)\n\n def __sub__(self, other):\n if not hasattr(other, \"unit_type\"):\n return super(FloatWithUnit, self).__sub__(other)\n if other.unit_type != self._unit_type:\n raise UnitError(\"Subtracting different units is not allowed\")\n val = other\n if other.unit != self._unit:\n val = other.to(self._unit)\n return FloatWithUnit(float(self) - val, unit_type=self._unit_type,\n unit=self._unit)\n\n def __mul__(self, other):\n if not isinstance(other, FloatWithUnit):\n return FloatWithUnit(float(self) * other,\n unit_type=self._unit_type,\n unit=self._unit)\n return FloatWithUnit(float(self) * other, unit_type=None,\n unit=self._unit * other._unit)\n\n def __rmul__(self, other):\n if not isinstance(other, FloatWithUnit):\n return FloatWithUnit(float(self) * other,\n unit_type=self._unit_type,\n unit=self._unit)\n return FloatWithUnit(float(self) * other, unit_type=None,\n unit=self._unit * other._unit)\n\n def __pow__(self, i):\n return FloatWithUnit(float(self) ** i, unit_type=None,\n unit=self._unit ** i)\n\n def __div__(self, other):\n val = super(FloatWithUnit, self).__div__(other)\n if not isinstance(other, FloatWithUnit):\n return FloatWithUnit(val, unit_type=self._unit_type,\n unit=self._unit)\n return FloatWithUnit(val, unit_type=None,\n unit=self._unit / other._unit)\n\n def __truediv__(self, other):\n val = super(FloatWithUnit, self).__truediv__(other)\n if not isinstance(other, FloatWithUnit):\n return FloatWithUnit(val, unit_type=self._unit_type,\n unit=self._unit)\n return FloatWithUnit(val, unit_type=None,\n unit=self._unit / other._unit)\n\n def __neg__(self):\n return FloatWithUnit(super(FloatWithUnit, self).__neg__(),\n unit_type=self._unit_type,\n unit=self._unit)\n\n def __getnewargs__(self):\n \"\"\"Function used by pickle to recreate object.\"\"\"\n #print(self.__dict__)\n # FIXME\n # There's a problem with _unit_type if we try to unpickle objects from file.\n # since self._unit_type might not be defined. I think this is due to\n # the use of decorators (property and unitized). In particular I have problems with \"amu\"\n # likely due to weight in core.composition\n if hasattr(self, \"_unit_type\"):\n args = float(self), self._unit, self._unit_type\n else:\n args = float(self), self._unit, None\n\n return args\n\n def __getstate__(self):\n state = self.__dict__.copy()\n state[\"val\"] = float(self)\n #print(\"in getstate %s\" % state)\n return state\n\n def __setstate__(self, state):\n #print(\"in setstate %s\" % state)\n self._unit = state[\"_unit\"]\n\n @property\n def unit_type(self):\n return self._unit_type\n\n @property\n def unit(self):\n return self._unit\n\n def to(self, new_unit):\n \"\"\"\n Conversion to a new_unit. Right now, only supports 1 to 1 mapping of\n units of each type.\n\n Args:\n new_unit: New unit type.\n\n Returns:\n A FloatWithUnit object in the new units.\n\n Example usage:\n >>> e = Energy(1.1, \"eV\")\n >>> e = Energy(1.1, \"Ha\")\n >>> e.to(\"eV\")\n 29.932522246 eV\n \"\"\"\n return FloatWithUnit(\n self * self.unit.get_conversion_factor(new_unit),\n unit_type=self._unit_type,\n unit=new_unit)\n\n @property\n def as_base_units(self):\n \"\"\"\n Returns this FloatWithUnit in base SI units, including derived units.\n\n Returns:\n A FloatWithUnit object in base SI units\n \"\"\"\n return self.to(self.unit.as_base_units[0])\n\n\n @property\n def supported_units(self):\n \"\"\"\n Supported units for specific unit type.\n \"\"\"\n return tuple(ALL_UNITS[self._unit_type].keys())\n\n\nclass ArrayWithUnit(np.ndarray):\n \"\"\"\n Subclasses `numpy.ndarray` to attach a unit type. Typically, you should\n use the pre-defined unit type subclasses such as EnergyArray,\n LengthArray, etc. instead of using ArrayWithFloatWithUnit directly.\n\n Supports conversion, addition and subtraction of the same unit type. E.g.,\n 1 m + 20 cm will be automatically converted to 1.2 m (units follow the\n leftmost quantity).\n\n >>> a = EnergyArray([1, 2], \"Ha\")\n >>> b = EnergyArray([1, 2], \"eV\")\n >>> c = a + b\n >>> print(c)\n [ 1.03674933 2.07349865] Ha\n >>> c.to(\"eV\")\n array([ 28.21138386, 56.42276772]) eV\n \"\"\"\n Error = UnitError\n\n def __new__(cls, input_array, unit, unit_type=None):\n # Input array is an already formed ndarray instance\n # We first cast to be our class type\n obj = np.asarray(input_array).view(cls)\n # add the new attributes to the created instance\n obj._unit = Unit(unit)\n obj._unit_type = unit_type\n return obj\n\n def __array_finalize__(self, obj):\n \"\"\"\n See http://docs.scipy.org/doc/numpy/user/basics.subclassing.html for\n comments.\n \"\"\"\n if obj is None:\n return\n self._unit = getattr(obj, \"_unit\", None)\n self._unit_type = getattr(obj, \"_unit_type\", None)\n\n #TODO abstract base class property?\n @property\n def unit_type(self):\n return self._unit_type\n\n #TODO abstract base class property?\n @property\n def unit(self):\n return self._unit\n\n def __reduce__(self):\n #print(\"in reduce\")\n reduce = list(super(ArrayWithUnit, self).__reduce__())\n #print(\"unit\",self._unit)\n #print(reduce[2])\n reduce[2] = {\"np_state\": reduce[2], \"_unit\": self._unit}\n return tuple(reduce)\n\n def __setstate__(self, state):\n #print(\"in setstate %s\" % str(state))\n super(ArrayWithUnit, self).__setstate__(state[\"np_state\"])\n self._unit = state[\"_unit\"]\n\n def __repr__(self):\n return \"{} {}\".format(np.array(self).__repr__(), self.unit)\n\n def __str__(self):\n return \"{} {}\".format(np.array(self).__str__(), self.unit)\n\n def __add__(self, other):\n if hasattr(other, \"unit_type\"):\n if other.unit_type != self.unit_type:\n raise UnitError(\"Adding different types of units is\"\n \" not allowed\")\n\n if other.unit != self.unit:\n other = other.to(self.unit)\n\n return self.__class__(np.array(self) + np.array(other),\n unit_type=self.unit_type, unit=self.unit)\n\n def __sub__(self, other):\n if hasattr(other, \"unit_type\"):\n if other.unit_type != self.unit_type:\n raise UnitError(\"Subtracting different units is not allowed\")\n\n if other.unit != self.unit:\n other = other.to(self.unit)\n\n return self.__class__(np.array(self) - np.array(other),\n unit_type=self.unit_type, unit=self.unit)\n\n def __mul__(self, other):\n # FIXME\n # Here we have the most important difference between FloatWithUnit and\n # ArrayWithFloatWithUnit:\n # If other does not have units, I return an object with the same units\n # as self.\n # if other *has* units, I return an object *without* units since\n # taking into account all the possible derived quantities would be\n # too difficult.\n # Moreover Energy(1.0) * Time(1.0, \"s\") returns 1.0 Ha that is a\n # bit misleading.\n # Same protocol for __div__\n if not hasattr(other, \"unit_type\"):\n return self.__class__(np.array(self).__mul__(np.array(other)),\n unit_type=self._unit_type, unit=self._unit)\n else:\n # Cannot use super since it returns an instance of self.__class__\n # while here we want a bare numpy array.\n return self.__class__(\n np.array(self).__mul__(np.array(other)),\n unit=self.unit * other.unit)\n\n def __rmul__(self, other):\n if not hasattr(other, \"unit_type\"):\n return self.__class__(np.array(self).__rmul__(np.array(other)),\n unit_type=self._unit_type, unit=self._unit)\n else:\n return self.__class__(\n np.array(self).__rmul__(np.array(other)),\n unit=self.unit * other.unit)\n\n def __div__(self, other):\n if not hasattr(other, \"unit_type\"):\n return self.__class__(np.array(self).__div__(np.array(other)),\n unit_type=self._unit_type, unit=self._unit)\n else:\n return self.__class__(\n np.array(self).__div__(np.array(other)),\n unit=self.unit/other.unit)\n\n def __truediv__(self, other):\n if not hasattr(other, \"unit_type\"):\n return self.__class__(np.array(self).__truediv__(np.array(other)),\n unit_type=self._unit_type, unit=self._unit)\n else:\n return self.__class__(\n np.array(self).__truediv__(np.array(other)),\n unit=self.unit / other.unit)\n\n def __neg__(self):\n return self.__class__(np.array(self).__neg__(),\n unit_type=self.unit_type, unit=self.unit)\n\n def to(self, new_unit):\n \"\"\"\n Conversion to a new_unit.\n\n Args:\n new_unit:\n New unit type.\n\n Returns:\n A ArrayWithFloatWithUnit object in the new units.\n\n Example usage:\n >>> e = EnergyArray([1, 1.1], \"Ha\")\n >>> e.to(\"eV\")\n array([ 27.21138386, 29.93252225]) eV\n \"\"\"\n return self.__class__(\n np.array(self) * self.unit.get_conversion_factor(new_unit),\n unit_type=self.unit_type, unit=new_unit)\n\n @property\n def as_base_units(self):\n \"\"\"\n Returns this ArrayWithUnit in base SI units, including derived units.\n\n Returns:\n An ArrayWithUnit object in base SI units\n \"\"\"\n return self.to(self.unit.as_base_units[0])\n\n #TODO abstract base class property?\n @property\n def supported_units(self):\n \"\"\"\n Supported units for specific unit type.\n \"\"\"\n return ALL_UNITS[self.unit_type]\n\n #TODO abstract base class method?\n def conversions(self):\n \"\"\"\n Returns a string showing the available conversions.\n Useful tool in interactive mode.\n \"\"\"\n return \"\\n\".join(str(self.to(unit)) for unit in self.supported_units)\n\n\ndef _my_partial(func, *args, **kwargs):\n \"\"\"\n Partial returns a partial object and therefore we cannot inherit class\n methods defined in FloatWithUnit. This function calls partial and patches\n the new class before returning.\n \"\"\"\n newobj = partial(func, *args, **kwargs)\n # monkey patch\n newobj.from_string = FloatWithUnit.from_string\n return newobj\n\n\nEnergy = partial(FloatWithUnit, unit_type=\"energy\")\n\"\"\"\nA float with an energy unit.\n\nArgs:\n val (float): Value\n unit (Unit): E.g., eV, kJ, etc. Must be valid unit or UnitError is raised.\n\"\"\"\nEnergyArray = partial(ArrayWithUnit, unit_type=\"energy\")\n\nLength = partial(FloatWithUnit, unit_type=\"length\")\n\"\"\"\nA float with a length unit.\n\nArgs:\n val (float): Value\n unit (Unit): E.g., m, ang, bohr, etc. Must be valid unit or UnitError is\n raised.\n\"\"\"\nLengthArray = partial(ArrayWithUnit, unit_type=\"length\")\n\nMass = partial(FloatWithUnit, unit_type=\"mass\")\n\"\"\"\nA float with a mass unit.\n\nArgs:\n val (float): Value\n unit (Unit): E.g., amu, kg, etc. Must be valid unit or UnitError is\n raised.\n\"\"\"\nMassArray = partial(ArrayWithUnit, unit_type=\"mass\")\n\nTemp = partial(FloatWithUnit, unit_type=\"temperature\")\n\"\"\"\nA float with a temperature unit.\n\nArgs:\n val (float): Value\n unit (Unit): E.g., K. Only K (kelvin) is supported.\n\"\"\"\nTempArray = partial(ArrayWithUnit, unit_type=\"temperature\")\n\nTime = partial(FloatWithUnit, unit_type=\"time\")\n\"\"\"\nA float with a time unit.\n\nArgs:\n val (float): Value\n unit (Unit): E.g., s, min, h. Must be valid unit or UnitError is\n raised.\n\"\"\"\nTimeArray = partial(ArrayWithUnit, unit_type=\"time\")\n\nCharge = partial(FloatWithUnit, unit_type=\"charge\")\n\"\"\"\nA float with a charge unit.\n\nArgs:\n val (float): Value\n unit (Unit): E.g., C, e (electron charge). Must be valid unit or UnitError\n is raised.\n\"\"\"\nChargeArray = partial(ArrayWithUnit, unit_type=\"charge\")\n\n\nMemory = _my_partial(FloatWithUnit, unit_type=\"memory\")\n\"\"\"\nA float with a memory unit.\n\nArgs:\n val (float): Value\n unit (Unit): E.g., Kb, Mb, Gb, Tb. Must be valid unit or UnitError\n is raised.\n\"\"\"\n\n\ndef obj_with_unit(obj, unit):\n \"\"\"\n Returns a `FloatWithUnit` instance if obj is scalar, a dictionary of\n objects with units if obj is a dict, else an instance of\n `ArrayWithFloatWithUnit`.\n\n Args:\n unit: Specific units (eV, Ha, m, ang, etc.).\n \"\"\"\n unit_type = _UNAME2UTYPE[unit]\n\n if isinstance(obj, numbers.Number):\n return FloatWithUnit(obj, unit=unit, unit_type=unit_type)\n elif isinstance(obj, collections.Mapping):\n return {k: obj_with_unit(v, unit) for k,v in obj.items()}\n else:\n return ArrayWithUnit(obj, unit=unit, unit_type=unit_type)\n\n\ndef unitized(unit):\n \"\"\"\n Useful decorator to assign units to the output of a function. You can also\n use it to standardize the output units of a function that already returns\n a FloatWithUnit or ArrayWithUnit. For sequences, all values in the sequences\n are assigned the same unit. It works with Python sequences only. The creation\n of numpy arrays loses all unit information. For mapping types, the values\n are assigned units.\n\n Args:\n unit: Specific unit (eV, Ha, m, ang, etc.).\n\n Example usage::\n\n @unitized(unit=\"kg\")\n def get_mass():\n return 123.45\n\n \"\"\"\n def wrap(f):\n def wrapped_f(*args, **kwargs):\n val = f(*args, **kwargs)\n unit_type = _UNAME2UTYPE[unit]\n\n if isinstance(val, FloatWithUnit) or isinstance(val, ArrayWithUnit):\n return val.to(unit)\n\n elif isinstance(val, collections.Sequence):\n # TODO: why don't we return a ArrayWithUnit?\n # This complicated way is to ensure the sequence type is\n # preserved (list or tuple).\n return val.__class__([FloatWithUnit(i, unit_type=unit_type,\n unit=unit) for i in val])\n elif isinstance(val, collections.Mapping):\n for k, v in val.items():\n val[k] = FloatWithUnit(v, unit_type=unit_type, unit=unit)\n elif isinstance(val, numbers.Number):\n return FloatWithUnit(val, unit_type=unit_type, unit=unit)\n elif val is None:\n pass\n else:\n raise TypeError(\"Don't know how to assign units to %s\" % str(val))\n return val\n return wrapped_f\n return wrap\n\n\nif __name__ == \"__main__\":\n import doctest\n doctest.testmod()\n"
] | [
[
"numpy.array",
"numpy.asarray"
]
] |
rossbar/scipy-lecture-notes | [
"7f74e6925721c43bd81bf0bee34b4805ac4a3b57",
"7f74e6925721c43bd81bf0bee34b4805ac4a3b57"
] | [
"advanced/image_processing/examples/plot_numpy_array.py",
"intro/numpy/examples/plot_elephant.py"
] | [
"\"\"\"\nImage manipulation and numpy arrays\n====================================\n\nThis example shows how to do image manipulation using common numpy arrays\ntricks.\n\n\"\"\"\n\nimport numpy as np\nimport scipy\nimport scipy.misc\nimport matplotlib.pyplot as plt\n\nface = scipy.misc.face(gray=True)\nface[10:13, 20:23]\nface[100:120] = 255\n\nlx, ly = face.shape\nX, Y = np.ogrid[0:lx, 0:ly]\nmask = (X - lx/2)**2 + (Y - ly/2)**2 > lx*ly/4\nface[mask] = 0\nface[range(400), range(400)] = 255\n\nplt.figure(figsize=(3, 3))\nplt.axes([0, 0, 1, 1])\nplt.imshow(face, cmap=plt.cm.gray)\nplt.axis('off')\n\nplt.show()\n",
"\"\"\"\nReading and writing an elephant\n===============================\n\nRead and write images\n\n\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n#################################\n# original figure\n#################################\n\nplt.figure()\nimg = plt.imread('../data/elephant.png')\nplt.imshow(img)\n\n#################################\n# red channel displayed in grey\n#################################\n\nplt.figure()\nimg_red = img[:, :, 0]\nplt.imshow(img_red, cmap=plt.cm.gray)\n\n#################################\n# lower resolution\n#################################\n\nplt.figure()\nimg_tiny = img[::6, ::6]\nplt.imshow(img_tiny, interpolation='nearest') \n\nplt.show()\n"
] | [
[
"scipy.misc.face",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.axes",
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.show"
],
[
"matplotlib.pyplot.imread",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.show"
]
] |
Jacob-Barhak/panel | [
"04cad38ea703e4e69fb76f063a27f4ffe40688e8"
] | [
"panel/reactive.py"
] | [
"\"\"\"\nDeclares Syncable and Reactive classes which provides baseclasses\nfor Panel components which sync their state with one or more bokeh\nmodels rendered on the frontend.\n\"\"\"\n\nimport difflib\nimport sys\nimport threading\n\nfrom collections import namedtuple\nfrom functools import partial\n\nimport numpy as np\nimport param\n\nfrom bokeh.models import LayoutDOM\nfrom tornado import gen\n\nfrom .config import config\nfrom .io.callbacks import PeriodicCallback\nfrom .io.model import hold\nfrom .io.notebook import push, push_on_root\nfrom .io.server import unlocked\nfrom .io.state import state\nfrom .util import edit_readonly, updating\nfrom .viewable import Renderable, Viewable\n\nLinkWatcher = namedtuple(\"Watcher\",\"inst cls fn mode onlychanged parameter_names what queued target links transformed bidirectional_watcher\")\n\n\nclass Syncable(Renderable):\n \"\"\"\n Syncable is an extension of the Renderable object which can not\n only render to a bokeh model but also sync the parameters on the\n object with the properties on the model.\n\n In order to bi-directionally link parameters with bokeh model\n instances the _link_params and _link_props methods define\n callbacks triggered when either the parameter or bokeh property\n values change. Since there may not be a 1-to-1 mapping between\n parameter and the model property the _process_property_change and\n _process_param_change may be overridden to apply any necessary\n transformations.\n \"\"\"\n\n # Timeout if a notebook comm message is swallowed\n _timeout = 20000\n\n # Timeout before the first event is processed\n _debounce = 50\n\n # Any parameters that require manual updates handling for the models\n # e.g. parameters which affect some sub-model\n _manual_params = []\n\n # Mapping from parameter name to bokeh model property name\n _rename = {}\n\n # Allows defining a mapping from model property name to a JS code\n # snippet that transforms the object before serialization\n _js_transforms = {}\n\n # Transforms from input value to bokeh property value\n _source_transforms = {}\n _target_transforms = {}\n\n __abstract = True\n\n def __init__(self, **params):\n super().__init__(**params)\n\n # Useful when updating model properties which trigger potentially\n # recursive events\n self._updating = False\n\n # A dictionary of current property change events\n self._events = {}\n\n # Any watchers associated with links between two objects\n self._links = []\n self._link_params()\n\n # A dictionary of bokeh property changes being processed\n self._changing = {}\n\n # Sets up watchers to process manual updates to models\n if self._manual_params:\n self.param.watch(self._update_manual, self._manual_params)\n\n #----------------------------------------------------------------\n # Model API\n #----------------------------------------------------------------\n\n def _process_property_change(self, msg):\n \"\"\"\n Transform bokeh model property changes into parameter updates.\n Should be overridden to provide appropriate mapping between\n parameter value and bokeh model change. By default uses the\n _rename class level attribute to map between parameter and\n property names.\n \"\"\"\n inverted = {v: k for k, v in self._rename.items()}\n return {inverted.get(k, k): v for k, v in msg.items()}\n\n def _process_param_change(self, msg):\n \"\"\"\n Transform parameter changes into bokeh model property updates.\n Should be overridden to provide appropriate mapping between\n parameter value and bokeh model change. By default uses the\n _rename class level attribute to map between parameter and\n property names.\n \"\"\"\n properties = {self._rename.get(k, k): v for k, v in msg.items()\n if self._rename.get(k, False) is not None}\n if 'width' in properties and self.sizing_mode is None:\n properties['min_width'] = properties['width']\n if 'height' in properties and self.sizing_mode is None:\n properties['min_height'] = properties['height']\n return properties\n\n @property\n def _linkable_params(self):\n \"\"\"\n Parameters that can be linked in JavaScript via source\n transforms.\n \"\"\"\n return [p for p in self._synced_params if self._rename.get(p, False) is not None\n and self._source_transforms.get(p, False) is not None] + ['loading']\n\n @property\n def _synced_params(self):\n \"\"\"\n Parameters which are synced with properties using transforms\n applied in the _process_param_change method.\n \"\"\"\n ignored = ['default_layout', 'loading']\n return [p for p in self.param if p not in self._manual_params+ignored]\n\n def _init_params(self):\n return {k: v for k, v in self.param.get_param_values()\n if k in self._synced_params and v is not None}\n\n def _link_params(self):\n params = self._synced_params\n if params:\n watcher = self.param.watch(self._param_change, params)\n self._callbacks.append(watcher)\n\n def _link_props(self, model, properties, doc, root, comm=None):\n ref = root.ref['id']\n if config.embed:\n return\n\n for p in properties:\n if isinstance(p, tuple):\n _, p = p\n if comm:\n model.on_change(p, partial(self._comm_change, doc, ref, comm))\n else:\n model.on_change(p, partial(self._server_change, doc, ref))\n\n def _manual_update(self, events, model, doc, root, parent, comm):\n \"\"\"\n Method for handling any manual update events, i.e. events triggered\n by changes in the manual params.\n \"\"\"\n\n def _update_manual(self, *events):\n for ref, (model, parent) in self._models.items():\n if ref not in state._views or ref in state._fake_roots:\n continue\n viewable, root, doc, comm = state._views[ref]\n if comm or state._unblocked(doc):\n with unlocked():\n self._manual_update(events, model, doc, root, parent, comm)\n if comm and 'embedded' not in root.tags:\n push(doc, comm)\n else:\n cb = partial(self._manual_update, events, model, doc, root, parent, comm)\n if doc.session_context:\n doc.add_next_tick_callback(cb)\n else:\n cb()\n\n def _update_model(self, events, msg, root, model, doc, comm):\n self._changing[root.ref['id']] = [\n attr for attr, value in msg.items()\n if not model.lookup(attr).property.matches(getattr(model, attr), value)\n ]\n try:\n model.update(**msg)\n finally:\n del self._changing[root.ref['id']]\n\n def _cleanup(self, root):\n super()._cleanup(root)\n ref = root.ref['id']\n self._models.pop(ref, None)\n comm, client_comm = self._comms.pop(ref, (None, None))\n if comm:\n try:\n comm.close()\n except Exception:\n pass\n if client_comm:\n try:\n client_comm.close()\n except Exception:\n pass\n\n def _param_change(self, *events):\n msgs = []\n for event in events:\n msg = self._process_param_change({event.name: event.new})\n if msg:\n msgs.append(msg)\n\n events = {event.name: event for event in events}\n msg = {k: v for msg in msgs for k, v in msg.items()}\n if not msg:\n return\n\n for ref, (model, parent) in self._models.items():\n if ref not in state._views or ref in state._fake_roots:\n continue\n viewable, root, doc, comm = state._views[ref]\n if comm or not doc.session_context or state._unblocked(doc):\n with unlocked():\n self._update_model(events, msg, root, model, doc, comm)\n if comm and 'embedded' not in root.tags:\n push(doc, comm)\n else:\n cb = partial(self._update_model, events, msg, root, model, doc, comm)\n doc.add_next_tick_callback(cb)\n\n def _process_events(self, events):\n with edit_readonly(state):\n state.busy = True\n try:\n with edit_readonly(self):\n self.param.set_param(**self._process_property_change(events))\n finally:\n with edit_readonly(state):\n state.busy = False\n\n @gen.coroutine\n def _change_coroutine(self, doc=None):\n self._change_event(doc)\n\n def _change_event(self, doc=None):\n try:\n state.curdoc = doc\n thread = threading.current_thread()\n thread_id = thread.ident if thread else None\n state._thread_id = thread_id\n events = self._events\n self._events = {}\n self._process_events(events)\n finally:\n state.curdoc = None\n state._thread_id = None\n\n def _comm_change(self, doc, ref, comm, attr, old, new):\n if attr in self._changing.get(ref, []):\n self._changing[ref].remove(attr)\n return\n\n with hold(doc, comm=comm):\n self._process_events({attr: new})\n\n def _server_change(self, doc, ref, attr, old, new):\n if attr in self._changing.get(ref, []):\n self._changing[ref].remove(attr)\n return\n\n state._locks.clear()\n processing = bool(self._events)\n self._events.update({attr: new})\n if not processing:\n if doc.session_context:\n doc.add_timeout_callback(partial(self._change_coroutine, doc), self._debounce)\n else:\n self._change_event(doc)\n\n\nclass Reactive(Syncable, Viewable):\n \"\"\"\n Reactive is a Viewable object that also supports syncing between\n the objects parameters and the underlying bokeh model either via\n the defined pyviz_comms.Comm type or using bokeh server.\n\n In addition it defines various methods which make it easy to link\n the parameters to other objects.\n \"\"\"\n\n #----------------------------------------------------------------\n # Public API\n #----------------------------------------------------------------\n\n def add_periodic_callback(self, callback, period=500, count=None,\n timeout=None, start=True):\n \"\"\"\n Schedules a periodic callback to be run at an interval set by\n the period. Returns a PeriodicCallback object with the option\n to stop and start the callback.\n\n Arguments\n ---------\n callback: callable\n Callable function to be executed at periodic interval.\n period: int\n Interval in milliseconds at which callback will be executed.\n count: int\n Maximum number of times callback will be invoked.\n timeout: int\n Timeout in seconds when the callback should be stopped.\n start: boolean (default=True)\n Whether to start callback immediately.\n\n Returns\n -------\n Return a PeriodicCallback object with start and stop methods.\n \"\"\"\n self.param.warning(\n \"Calling add_periodic_callback on a Panel component is \"\n \"deprecated and will be removed in the next minor release. \"\n \"Use the pn.state.add_periodic_callback API instead.\"\n )\n cb = PeriodicCallback(callback=callback, period=period,\n count=count, timeout=timeout)\n if start:\n cb.start()\n return cb\n\n def link(self, target, callbacks=None, bidirectional=False, **links):\n \"\"\"\n Links the parameters on this object to attributes on another\n object in Python. Supports two modes, either specify a mapping\n between the source and target object parameters as keywords or\n provide a dictionary of callbacks which maps from the source\n parameter to a callback which is triggered when the parameter\n changes.\n\n Arguments\n ---------\n target: object\n The target object of the link.\n callbacks: dict\n Maps from a parameter in the source object to a callback.\n bidirectional: boolean\n Whether to link source and target bi-directionally\n **links: dict\n Maps between parameters on this object to the parameters\n on the supplied object.\n \"\"\"\n if links and callbacks:\n raise ValueError('Either supply a set of parameters to '\n 'link as keywords or a set of callbacks, '\n 'not both.')\n elif not links and not callbacks:\n raise ValueError('Declare parameters to link or a set of '\n 'callbacks, neither was defined.')\n elif callbacks and bidirectional:\n raise ValueError('Bidirectional linking not supported for '\n 'explicit callbacks. You must define '\n 'separate callbacks for each direction.')\n\n _updating = []\n def link(*events):\n for event in events:\n if event.name in _updating: continue\n _updating.append(event.name)\n try:\n if callbacks:\n callbacks[event.name](target, event)\n else:\n setattr(target, links[event.name], event.new)\n finally:\n _updating.pop(_updating.index(event.name))\n params = list(callbacks) if callbacks else list(links)\n cb = self.param.watch(link, params)\n\n bidirectional_watcher = None\n if bidirectional:\n _reverse_updating = []\n reverse_links = {v: k for k, v in links.items()}\n def reverse_link(*events):\n for event in events:\n if event.name in _reverse_updating: continue\n _reverse_updating.append(event.name)\n try:\n setattr(self, reverse_links[event.name], event.new)\n finally:\n _reverse_updating.remove(event.name)\n bidirectional_watcher = target.param.watch(reverse_link, list(reverse_links))\n\n link = LinkWatcher(*tuple(cb)+(target, links, callbacks is not None, bidirectional_watcher))\n self._links.append(link)\n return cb\n\n def controls(self, parameters=[], jslink=True):\n \"\"\"\n Creates a set of widgets which allow manipulating the parameters\n on this instance. By default all parameters which support\n linking are exposed, but an explicit list of parameters can\n be provided.\n\n Arguments\n ---------\n parameters: list(str)\n An explicit list of parameters to return controls for.\n jslink: bool\n Whether to use jslinks instead of Python based links.\n This does not allow using all types of parameters.\n\n Returns\n -------\n A layout of the controls\n \"\"\"\n from .param import Param\n from .layout import Tabs, WidgetBox\n from .widgets import LiteralInput\n\n if parameters:\n linkable = parameters\n elif jslink:\n linkable = self._linkable_params + ['loading']\n else:\n linkable = list(self.param)\n\n params = [p for p in linkable if p not in Viewable.param]\n controls = Param(self.param, parameters=params, default_layout=WidgetBox,\n name='Controls')\n layout_params = [p for p in linkable if p in Viewable.param]\n if 'name' not in layout_params and self._rename.get('name', False) is not None and not parameters:\n layout_params.insert(0, 'name')\n style = Param(self.param, parameters=layout_params, default_layout=WidgetBox,\n name='Layout')\n if jslink:\n for p in params:\n widget = controls._widgets[p]\n widget.jslink(self, value=p, bidirectional=True)\n if isinstance(widget, LiteralInput):\n widget.serializer = 'json'\n for p in layout_params:\n widget = style._widgets[p]\n widget.jslink(self, value=p, bidirectional=p != 'loading')\n if isinstance(widget, LiteralInput):\n widget.serializer = 'json'\n\n if params and layout_params:\n return Tabs(controls.layout[0], style.layout[0])\n elif params:\n return controls.layout[0]\n return style.layout[0]\n\n def jscallback(self, args={}, **callbacks):\n \"\"\"\n Allows defining a JS callback to be triggered when a property\n changes on the source object. The keyword arguments define the\n properties that trigger a callback and the JS code that gets\n executed.\n\n Arguments\n ----------\n args: dict\n A mapping of objects to make available to the JS callback\n **callbacks: dict\n A mapping between properties on the source model and the code\n to execute when that property changes\n\n Returns\n -------\n callback: Callback\n The Callback which can be used to disable the callback.\n \"\"\"\n\n from .links import Callback\n for k, v in list(callbacks.items()):\n callbacks[k] = self._rename.get(v, v)\n return Callback(self, code=callbacks, args=args)\n\n def jslink(self, target, code=None, args=None, bidirectional=False, **links):\n \"\"\"\n Links properties on the source object to those on the target\n object in JS code. Supports two modes, either specify a\n mapping between the source and target model properties as\n keywords or provide a dictionary of JS code snippets which\n maps from the source parameter to a JS code snippet which is\n executed when the property changes.\n\n Arguments\n ----------\n target: HoloViews object or bokeh Model or panel Viewable\n The target to link the value to.\n code: dict\n Custom code which will be executed when the widget value\n changes.\n bidirectional: boolean\n Whether to link source and target bi-directionally\n **links: dict\n A mapping between properties on the source model and the\n target model property to link it to.\n\n Returns\n -------\n link: GenericLink\n The GenericLink which can be used unlink the widget and\n the target model.\n \"\"\"\n if links and code:\n raise ValueError('Either supply a set of properties to '\n 'link as keywords or a set of JS code '\n 'callbacks, not both.')\n elif not links and not code:\n raise ValueError('Declare parameters to link or a set of '\n 'callbacks, neither was defined.')\n if args is None:\n args = {}\n\n mapping = code or links\n for k in mapping:\n if k.startswith('event:'):\n continue\n elif hasattr(self, 'object') and isinstance(self.object, LayoutDOM):\n current = self.object\n for attr in k.split('.'):\n if not hasattr(current, attr):\n raise ValueError(f\"Could not resolve {k} on \"\n f\"{self.object} model. Ensure \"\n \"you jslink an attribute that \"\n \"exists on the bokeh model.\")\n current = getattr(current, attr)\n elif (k not in self.param and k not in list(self._rename.values())):\n matches = difflib.get_close_matches(k, list(self.param))\n if matches:\n matches = ' Similar parameters include: %r' % matches\n else:\n matches = ''\n raise ValueError(\"Could not jslink %r parameter (or property) \"\n \"on %s object because it was not found.%s\"\n % (k, type(self).__name__, matches))\n elif (self._source_transforms.get(k, False) is None or\n self._rename.get(k, False) is None):\n raise ValueError(\"Cannot jslink %r parameter on %s object, \"\n \"the parameter requires a live Python kernel \"\n \"to have an effect.\" % (k, type(self).__name__))\n\n if isinstance(target, Syncable) and code is None:\n for k, p in mapping.items():\n if k.startswith('event:'):\n continue\n elif p not in target.param and p not in list(target._rename.values()):\n matches = difflib.get_close_matches(p, list(target.param))\n if matches:\n matches = ' Similar parameters include: %r' % matches\n else:\n matches = ''\n raise ValueError(\"Could not jslink %r parameter (or property) \"\n \"on %s object because it was not found.%s\"\n % (p, type(self).__name__, matches))\n elif (target._source_transforms.get(p, False) is None or\n target._rename.get(p, False) is None):\n raise ValueError(\"Cannot jslink %r parameter on %s object \"\n \"to %r parameter on %s object. It requires \"\n \"a live Python kernel to have an effect.\"\n % (k, type(self).__name__, p, type(target).__name__))\n\n from .links import Link\n return Link(self, target, properties=links, code=code, args=args,\n bidirectional=bidirectional)\n\n\n\nclass SyncableData(Reactive):\n \"\"\"\n A baseclass for components which sync one or more data parameters\n with the frontend via a ColumnDataSource.\n \"\"\"\n\n selection = param.List(default=[], doc=\"\"\"\n The currently selected rows in the data.\"\"\")\n\n # Parameters which when changed require an update of the data \n _data_params = []\n\n _rename = {'selection': None}\n\n __abstract = True\n\n def __init__(self, **params):\n super().__init__(**params)\n self._data = None\n self._processed = None\n self.param.watch(self._validate, self._data_params)\n if self._data_params:\n self.param.watch(self._update_cds, self._data_params)\n self.param.watch(self._update_selected, 'selection')\n self._validate(None)\n self._update_cds()\n\n def _validate(self, event):\n \"\"\"\n Allows implementing validation for the data parameters.\n \"\"\"\n\n def _get_data(self):\n \"\"\"\n Implemented by subclasses converting data parameter(s) into\n a ColumnDataSource compatible data dictionary.\n\n Returns\n -------\n processed: object\n Raw data after pre-processing (e.g. after filtering)\n data: dict\n Dictionary of columns used to instantiate and update the\n ColumnDataSource\n \"\"\"\n\n def _update_column(self, column, array):\n \"\"\"\n Implemented by subclasses converting changes in columns to\n changes in the data parameter.\n\n Parameters\n ----------\n column: str\n The name of the column to update.\n array: numpy.ndarray\n The array data to update the column with.\n \"\"\"\n data = getattr(self, self._data_params[0])\n data[column] = array\n\n def _update_data(self, data):\n self.param.set_param(**{self._data_params[0]: data})\n\n def _manual_update(self, events, model, doc, root, parent, comm):\n for event in events:\n if event.type == 'triggered' and self._updating:\n continue\n elif hasattr(self, '_update_' + event.name):\n getattr(self, '_update_' + event.name)(model)\n\n def _update_cds(self, *events):\n if self._updating:\n return\n self._processed, self._data = self._get_data()\n for ref, (m, _) in self._models.items():\n m.source.data = self._data\n push_on_root(ref)\n\n def _update_selected(self, *events, indices=None):\n if self._updating:\n return\n indices = self.selection if indices is None else indices\n for ref, (m, _) in self._models.items():\n m.source.selected.indices = indices\n push_on_root(ref)\n\n @updating\n def _stream(self, stream, rollover=None):\n for ref, (m, _) in self._models.items():\n m.source.stream(stream, rollover)\n push_on_root(ref)\n\n @updating\n def _patch(self, patch):\n for ref, (m, _) in self._models.items():\n m.source.patch(patch)\n push_on_root(ref)\n\n def stream(self, stream_value, rollover=None, reset_index=True):\n \"\"\"\n Streams (appends) the `stream_value` provided to the existing\n value in an efficient manner.\n\n Arguments\n ---------\n stream_value: (Union[pd.DataFrame, pd.Series, Dict])\n The new value(s) to append to the existing value.\n rollover: int\n A maximum column size, above which data from the start of\n the column begins to be discarded. If None, then columns\n will continue to grow unbounded.\n reset_index (bool, default=True):\n If True and the stream_value is a DataFrame, then its index\n is reset. Helps to keep the index unique and named `index`.\n\n Raises\n ------\n ValueError: Raised if the stream_value is not a supported type.\n\n Examples\n --------\n\n Stream a Series to a DataFrame\n >>> value = pd.DataFrame({\"x\": [1, 2], \"y\": [\"a\", \"b\"]})\n >>> obj = DataComponent(value)\n >>> stream_value = pd.Series({\"x\": 4, \"y\": \"d\"})\n >>> obj.stream(stream_value)\n >>> obj.value.to_dict(\"list\")\n {'x': [1, 2, 4], 'y': ['a', 'b', 'd']}\n\n Stream a Dataframe to a Dataframe\n >>> value = pd.DataFrame({\"x\": [1, 2], \"y\": [\"a\", \"b\"]})\n >>> obj = DataComponent(value)\n >>> stream_value = pd.DataFrame({\"x\": [3, 4], \"y\": [\"c\", \"d\"]})\n >>> obj.stream(stream_value)\n >>> obj.value.to_dict(\"list\")\n {'x': [1, 2, 3, 4], 'y': ['a', 'b', 'c', 'd']}\n\n Stream a Dictionary row to a DataFrame\n >>> value = pd.DataFrame({\"x\": [1, 2], \"y\": [\"a\", \"b\"]})\n >>> tabulator = DataComponent(value)\n >>> stream_value = {\"x\": 4, \"y\": \"d\"}\n >>> obj.stream(stream_value)\n >>> obj.value.to_dict(\"list\")\n {'x': [1, 2, 4], 'y': ['a', 'b', 'd']}\n\n Stream a Dictionary of Columns to a Dataframe\n >>> value = pd.DataFrame({\"x\": [1, 2], \"y\": [\"a\", \"b\"]})\n >>> obj = DataComponent(value)\n >>> stream_value = {\"x\": [3, 4], \"y\": [\"c\", \"d\"]}\n >>> obj.stream(stream_value)\n >>> obj.value.to_dict(\"list\")\n {'x': [1, 2, 3, 4], 'y': ['a', 'b', 'c', 'd']}\n \"\"\"\n if 'pandas' in sys.modules:\n import pandas as pd\n else:\n pd = None\n if pd and isinstance(stream_value, pd.DataFrame):\n if isinstance(self._processed, dict):\n self.stream(stream_value.to_dict(), rollover)\n return\n if reset_index:\n value_index_start = self._processed.index.max() + 1\n stream_value = stream_value.reset_index(drop=True)\n stream_value.index += value_index_start\n combined = pd.concat([self._processed, stream_value])\n if rollover is not None:\n combined = combined.iloc[-rollover:]\n with param.discard_events(self):\n self._update_data(combined)\n try:\n self._updating = True\n self.param.trigger(self._data_params[0])\n finally:\n self._updating = False\n try:\n self._updating = True\n self._stream(stream_value, rollover)\n finally:\n self._updating = False\n elif pd and isinstance(stream_value, pd.Series):\n if isinstance(self._processed, dict):\n self.stream({k: [v] for k, v in stream_value.to_dict().items()}, rollover)\n return\n value_index_start = self._processed.index.max() + 1\n self._processed.loc[value_index_start] = stream_value\n with param.discard_events(self):\n self._update_data(self._processed)\n self._updating = True\n try:\n self._stream(self._processed.iloc[-1:], rollover)\n finally:\n self._updating = False\n elif isinstance(stream_value, dict):\n if isinstance(self._processed, dict):\n if not all(col in stream_value for col in self._data):\n raise ValueError(\"Stream update must append to all columns.\")\n for col, array in stream_value.items():\n combined = np.concatenate([self._data[col], array])\n if rollover is not None:\n combined = combined[-rollover:]\n self._update_column(col, combined)\n self._updating = True\n try:\n self._stream(stream_value, rollover)\n finally:\n self._updating = False\n else:\n try:\n stream_value = pd.DataFrame(stream_value)\n except ValueError:\n stream_value = pd.Series(stream_value)\n self.stream(stream_value)\n else:\n raise ValueError(\"The stream value provided is not a DataFrame, Series or Dict!\")\n\n def patch(self, patch_value):\n \"\"\"\n Efficiently patches (updates) the existing value with the `patch_value`.\n\n Arguments\n ---------\n patch_value: (Union[pd.DataFrame, pd.Series, Dict])\n The value(s) to patch the existing value with.\n\n Raises\n ------\n ValueError: Raised if the patch_value is not a supported type.\n\n Examples\n --------\n\n Patch a DataFrame with a Dictionary row.\n >>> value = pd.DataFrame({\"x\": [1, 2], \"y\": [\"a\", \"b\"]})\n >>> obj = DataComponent(value)\n >>> patch_value = {\"x\": [(0, 3)]}\n >>> obj.patch(patch_value)\n >>> obj.value.to_dict(\"list\")\n {'x': [3, 2], 'y': ['a', 'b']}\n\n Patch a Dataframe with a Dictionary of Columns.\n >>> value = pd.DataFrame({\"x\": [1, 2], \"y\": [\"a\", \"b\"]})\n >>> obj = DataComponent(value)\n >>> patch_value = {\"x\": [(slice(2), (3,4))], \"y\": [(1,'d')]}\n >>> obj.patch(patch_value)\n >>> obj.value.to_dict(\"list\")\n {'x': [3, 4], 'y': ['a', 'd']}\n\n Patch a DataFrame with a Series. Please note the index is used in the update.\n >>> value = pd.DataFrame({\"x\": [1, 2], \"y\": [\"a\", \"b\"]})\n >>> obj = DataComponent(value)\n >>> patch_value = pd.Series({\"index\": 1, \"x\": 4, \"y\": \"d\"})\n >>> obj.patch(patch_value)\n >>> obj.value.to_dict(\"list\")\n {'x': [1, 4], 'y': ['a', 'd']}\n\n Patch a Dataframe with a Dataframe. Please note the index is used in the update.\n >>> value = pd.DataFrame({\"x\": [1, 2], \"y\": [\"a\", \"b\"]})\n >>> obj = DataComponent(value)\n >>> patch_value = pd.DataFrame({\"x\": [3, 4], \"y\": [\"c\", \"d\"]})\n >>> obj.patch(patch_value)\n >>> obj.value.to_dict(\"list\")\n {'x': [3, 4], 'y': ['c', 'd']}\n \"\"\"\n if self._processed is None or isinstance(patch_value, dict):\n self._patch(patch_value)\n return\n\n if 'pandas' in sys.modules:\n import pandas as pd\n else:\n pd = None\n data = getattr(self, self._data_params[0])\n if pd and isinstance(patch_value, pd.DataFrame):\n patch_value_dict = {}\n for column in patch_value.columns:\n patch_value_dict[column] = []\n for index in patch_value.index:\n patch_value_dict[column].append((index, patch_value.loc[index, column]))\n self.patch(patch_value_dict)\n elif pd and isinstance(patch_value, pd.Series):\n if \"index\" in patch_value: # Series orient is row\n patch_value_dict = {\n k: [(patch_value[\"index\"], v)] for k, v in patch_value.items()\n }\n patch_value_dict.pop(\"index\")\n else: # Series orient is column\n patch_value_dict = {\n patch_value.name: [(index, value) for index, value in patch_value.items()]\n }\n self.patch(patch_value_dict)\n elif isinstance(patch_value, dict):\n for k, v in patch_value.items():\n for index, patch in v:\n if pd and isinstance(self._processed, pd.DataFrame):\n data.loc[index, k] = patch\n else:\n data[k][index] = patch\n self._updating = True\n try:\n self._patch(patch_value)\n finally:\n self._updating = False\n else:\n raise ValueError(\n f\"Patching with a patch_value of type {type(patch_value).__name__} \"\n \"is not supported. Please provide a DataFrame, Series or Dict.\"\n )\n\n\nclass ReactiveData(SyncableData):\n \"\"\"\n An extension of SyncableData which bi-directionally syncs a data\n parameter between frontend and backend using a ColumnDataSource.\n \"\"\"\n\n def _update_selection(self, indices):\n self.selection = indices\n\n def _process_events(self, events):\n if 'data' in events:\n data = events.pop('data')\n if self._updating:\n data = {}\n _, old_data = self._get_data()\n updated = False\n for k, v in data.items():\n if k in self.indexes:\n continue\n k = self._renamed_cols.get(k, k)\n if isinstance(v, dict):\n v = [v for _, v in sorted(v.items(), key=lambda it: int(it[0]))]\n try:\n isequal = (old_data[k] == np.asarray(v)).all()\n except Exception:\n isequal = False\n if not isequal:\n self._update_column(k, v)\n updated = True\n if updated:\n self._updating = True\n try:\n self.param.trigger('value')\n finally:\n self._updating = False\n if 'indices' in events:\n self._updating = True\n try:\n self._update_selection(events.pop('indices'))\n finally:\n self._updating = False\n super(ReactiveData, self)._process_events(events)\n"
] | [
[
"pandas.Series",
"pandas.DataFrame",
"numpy.asarray",
"pandas.concat",
"numpy.concatenate"
]
] |
trhallam/digirock | [
"05b1199d741a384345a4930605be97369c9ec270"
] | [
"docs/examples/batzle_wang_1992.py"
] | [
"# -*- coding: utf-8 -*-\n# ---\n# jupyter:\n# jupytext:\n# formats: ipynb,py:percent\n# text_representation:\n# extension: .py\n# format_name: percent\n# format_version: '1.3'\n# jupytext_version: 1.13.6\n# kernelspec:\n# display_name: Python 3 (ipykernel)\n# language: python\n# name: python3\n# ---\n\n# %% [markdown]\n# __Recreate the work by Batzle and Wang 1992 to check `digirock.fluids.bw92` functionality.__\n#\n# Tony Hallam 2022\n\n# %% [markdown]\n# This notebook contains working code to test the functionality of `bw98.py` in `fluids` module of `digirock`, ensuring that the functions honor the work by B&W 1992.\n#\n# _Batzle, M., and Wang, Z. [1992]. Seismic properties of pore fluids. Geophysics, 57(11), 1396–1408._\n# [Available from the SEG](https://library.seg.org/doi/10.1190/1.1443207).\n\n# %%\nimport numpy as np\nfrom digirock.fluids import bw92\n\n# %matplotlib inline\nimport matplotlib.pyplot as plt\nfrom matplotlib import rc\n\nrc(\"font\", size=14)\nfigsize = (15, 5)\n\n# %%\n# Input parameters has defined by B&W 1992 for plotting purporses\n\ntemp_ar = np.arange(10, 350, 5) # degC\npres_ar = np.arange(1, 100, 0.1) # Mpa\nsal_ar = np.arange(0, 0.3, 0.01)\npres = np.array([0.1, 10, 25, 50]) # Mpa\ntemps = np.array([10, 100, 200, 350]) # degC\ngsg = [0.6, 1.2] # gas Gravity\nor0 = [1.0, 0.88, 0.78] # oil density re 15.6degC\n\n# %% [markdown]\n# ## GAS\n#\n# Hydrocarbon density as a function of temperature and pressure using `bw92.gas_oga_density`, BW92 Eq 10a.\n\n# %%\nfig, ax = plt.subplots(nrows=1, ncols=2, figsize=figsize)\n\nfor G in gsg:\n for p in pres:\n ax[0].plot(temp_ar, bw92.gas_oga_density(temp_ar, p, G), label=f'G={G}, P={p}')\n \n for t in temps:\n ax[1].plot(pres_ar, bw92.gas_oga_density(t, pres_ar, G), label=f'G={G}, T={t}')\n \nax[0].set_xlim(0, 350)\nax[0].set_ylim(0, 0.6)\nax[0].set_xlabel('Temp (C)')\nax[0].set_ylabel('Density (g/cc)')\nax[0].legend()\n_ = ax[0].set_title('B&W 1992, Figure 2')\n\nax[1].set_xlim(0, 50)\nax[1].set_ylim(0, 0.6)\nax[1].set_xlabel('Pressure (MPa)')\nax[1].set_ylabel('Density (g/cc)')\n_ = ax[1].legend()\n\n\n# %% [markdown]\n# Gas adibatic bulk modulus using `bw92.gas_adiabatic_bulkmod`.\n\n# %%\nfig, ax = plt.subplots(nrows=1, ncols=2, figsize=figsize, sharey=True)\n\nfor G in gsg:\n for p in pres:\n ax[0].plot(temp_ar, bw92.gas_adiabatic_bulkmod(temp_ar, p, G)*1000, label=f'G={G}, P={p}')\n \n for t in temps:\n ax[1].plot(pres_ar, bw92.gas_adiabatic_bulkmod(t, pres_ar, G)*1000, label=f'G={G}, T={t}')\n\nax[0].set_xlim(0, 350)\nax[0].set_ylim(0, 650)\nax[0].set_xlabel('Temp (C)')\nax[0].set_ylabel('Bulk Modulus (MPa)')\nax[0].legend()\nax[0].set_title('B&W 1992 - Figure 3')\n\nax[1].set_xlim(0, 50)\nax[1].set_xlabel('Pressure (MPa)')\n_ = ax[1].legend()\n\n# %% [markdown]\n# Gas viscosity using `bw92.gas_adiabatic_viscosity` using equations 12 and 13.\n\n# %%\nfig, ax = plt.subplots(nrows=1, ncols=2, figsize=figsize, sharey=True)\n\n\nfor G in gsg:\n for p in pres:\n ax[0].plot(temp_ar, bw92.gas_adiabatic_viscosity(temp_ar, p, G), label=f'G={G}, P={p}')\n \n for t in temps:\n ax[1].plot(pres_ar, bw92.gas_adiabatic_viscosity(t, pres_ar, G), label=f'G={G}, T={t}')\n \nax[0].set_xlabel('Temp (C)')\nax[0].set_ylabel('Viscosity (centipoise)')\nax[0].set_xlim(0, 350)\nax[0].set_ylim(0, 0.09)\nax[0].set_title('B&W 1992 - Figure 4')\n\nax[1].set_xlabel('Pressure (MPa)')\nax[1].set_xlim(0, 50)\n_ = ax[1].legend()\n\n# %% [markdown]\n# ## OIL\n#\n# Dead oil density using `bw92.oil_density`, BW92 eq19.\n\n# %%\nfig, ax = plt.subplots(nrows=1, ncols=2, figsize=figsize, sharey=True)\n\n\nfor p in pres:\n for r0 in or0:\n ax[0].plot(temp_ar, bw92.oil_density(r0, p, temp_ar), label=f'r0={r0}, P={p}')\n \n for t in temps:\n ax[1].plot(pres_ar, bw92.oil_density(r0, pres_ar, t), label=f'r0={r0}, T={t}')\n\nax[0].set_xlabel('Temp (C)')\nax[0].set_ylabel('Oil Density (g/cc)')\nax[0].set_xlim(0, 350)\nax[0].set_ylim(0.55, 1.05)\nax[0].set_title('B&W 1992 - Figure 5')\nax[0].legend()\n\nax[1].set_xlabel('Pressure (MPa)')\nax[1].set_xlim(0, 50)\n_ = ax[1].legend(loc=[1.1, 0])\n\n\n# %% [markdown]\n# Oil acoustic velocity using `bw92.oil_velocity`, BW92 eq 20a.\n\n# %%\nfig, ax = plt.subplots(nrows=1, ncols=1, figsize=(7.5,5))\n\napi_ar = np.arange(0,70) # oil api\nrho0_ar = 141/ (api_ar + 131.5)\n\nax.plot(api_ar, bw92.oil_velocity(rho0_ar, 15.6, 1E-4, 0.6, 50))\nax.set_xlim(0, 70)\nax.set_ylim(1100, 1800)\n\nax.set_xlabel('Oil API')\nax.set_ylabel('Oil Velocity (m/s)')\nax.set_title('B&W 1992 - Figure 6')\n\n\n# %% [markdown]\n# Oil bulk modulus using `bw92.bulkmod`.\n\n# %%\nfig, ax = plt.subplots(nrows=1, ncols=2, figsize=figsize, sharey=True)\nax[0].set_xlim(0, 350)\nax[0].set_ylim(0, 30)\n\nfor r0 in or0:\n for p in pres:\n oil_rho = bw92.oil_density(r0, p, temp_ar)\n oil_vp = bw92.oil_velocity(r0, p, temp_ar, 0.6, 50)\n ax[0].plot(temp_ar, bw92.bulkmod(oil_rho*10, oil_vp),label=f\"{r0} {p}MPa\")\n \n for t in temps:\n oil_rho = bw92.oil_density(r0, pres_ar, t)\n oil_vp = bw92.oil_velocity(r0, pres_ar, t, 0.6, 50)\n ax[1].plot(pres_ar, bw92.bulkmod(oil_rho*10, oil_vp),label=f\"{r0} {t}degC\")\n \n \nax[0].set_xlabel('Temp (C)')\nax[0].set_ylabel('Oil Bulk Modlus (MPa)')\nax[0].set_title('B&W 1992 - Figure 7')\nax[0].legend()#cols=2)\n\nax[1].set_xlabel('Pressure (MPa)')\nax[1].set_xlim(0, 50)\n_ = ax[1].legend()\n\n# %% [markdown]\n# ## WATER\n#\n# Set up some parameters for plotting water.\n\n# %%\npresv = [50, 100, 110] # pressure MPa for velocity plots\npresrho = [9.81, 49, 98.1] # pressure MPa for density plots\npresk = [0.1, 50, 100] # pressure MPa for modulus plots\nsal = np.array([20000, 150000, 240000])/1000000 # ppm to weigh fraction\nsalk = np.array([0, 150000, 300000])/1000000 # ppm to weigh fraction\n\n\n\n\n# %% [markdown]\n# Pure water sonic velocity using `bw92.wat_velocity_pure` and pure water density using `bw92.wat_density_pure`. The parameters Batzle and Wang use from Wilson for pure water velocity were only calibrated to 100degC and 100MPa. So the behaviour above that is a bit odd, even though the plot in the 1992 paper looks good.\n\n# %%\nfig, ax = plt.subplots(nrows=1, ncols=2, figsize=figsize, sharex=True)\n\npresv = [50, 100, 130] # pressure MPa\n\ntvp_mesh, pvvt_mesh = np.meshgrid(temp_ar, presv)\nwvp_mesh = bw92.wat_velocity_pure(tvp_mesh, pvvt_mesh)\nwdp_mesh = bw92.wat_density_pure(tvp_mesh, pvvt_mesh)\n\nfor i, p in enumerate(presv):\n ax[0].plot(temp_ar, wvp_mesh[i, :], label=f\"{p}MPa\")\n ax[1].plot(temp_ar, wdp_mesh[i, :], label=f\"{p}MPa\")\n \nax[0].set_xlabel('Temp (C)')\nax[0].set_ylabel('Velocity (m/s)')\nax[0].set_title('B&W 1992 - Figure 12')\nax[0].legend()#cols=2)\nax[0].set_xlim(0, 350)\nax[0].set_ylim(500, 2000)\n\nax[1].set_xlabel('Temp (C)')\nax[1].set_ylabel('Density (g/cc)')\n_ = ax[1].legend()\n\n# %% [markdown]\n# Brine sonic velocity using `bw92.wat_velocity_brine` and `bw92.wat_density_brine`. Again, odd behaviour due to the influence of the pure water function on the brine velocity.\n\n# %%\nfig, ax = plt.subplots(nrows=1, ncols=2, figsize=figsize, sharex=True)\n\npresv = [50, 100, 130] # pressure MPa\n\ndb1, db2, db3 = np.meshgrid(temp_ar, presrho, sal)\nwdb_mesh = bw92.wat_density_brine(db1, db2, db3)\nvb1, vb2, vb3 = np.meshgrid(temp_ar, presv, sal)\nwvb_mesh = bw92.wat_velocity_brine(vb1, vb2, vb3)\n\nfor i, p in enumerate(presv):\n ax[0].plot(temp_ar, wvb_mesh[i, :], label=f\"{p}MPa\")\n ax[1].plot(temp_ar, wdb_mesh[i, :], label=f\"{p}MPa\")\n \nax[0].set_xlabel('Temp (C)')\nax[0].set_ylabel('Velocity (m/s)')\nax[0].set_title('B&W 1992 - Figure 13')\nax[0].legend()#cols=2)\nax[0].set_xlim(0, 350)\nax[0].set_ylim(1000, 2500)\n\nax[1].set_xlabel('Temp (C)')\nax[1].set_ylabel('Density (g/cc)')\n_ = ax[1].legend()\n\n# %% [markdown]\n# Brine bulk modulus using `bw92.wat_bulkmod`. This relies on calculating the velocity and density first.\n\n# %% tags=[]\nfig, ax = plt.subplots(nrows=1, ncols=2, figsize=figsize)\n\nkb1, kb2, kb3 = np.meshgrid(temp_ar, presk, salk)\nkr = bw92.wat_density_brine(kb1, kb2, kb3)\nkv = bw92.wat_velocity_brine(kb1, kb2, kb3)\nwkb_mesh = bw92.wat_bulkmod(kr, kv)\n\nfor i, p in enumerate(presv):\n ax[0].plot(temp_ar, wkb_mesh[i, :], label=f\"{p}MPa\")\n \nkb1, kb2, kb3 = np.meshgrid(pres_ar, temps, salk)\nkr = bw92.wat_density_brine(kb2, kb1, kb3)\nkv = bw92.wat_velocity_brine(kb2, kb1, kb3)\nwkb_mesh = bw92.wat_bulkmod(kr, kv) \n\nfor i, t in enumerate(temps):\n ax[1].plot(pres_ar, wkb_mesh[i, :], label=f\"{t}degC\")\n \nax[0].set_xlabel('Temp (C)')\nax[0].set_ylabel('Bulk Modulus (GPa)')\nax[0].set_ylim(0.5, 5.5)\nax[0].set_title('B&W 1992 - Figure 14')\nax[0].legend()#cols=2)\n\nax[1].set_xlabel('Pressure (MPa)')\nax[1].set_ylabel('Bulk Modulus (GPa)')\n_ = ax[1].legend()\n\n# %% [markdown]\n# ## Other Methods\n#\n# For a full list of the BW92 equations available with `digirock` see the [`digirock.fluids.bw92` api](../api/fluid_methods.html#batzle-and-wang-92).\n"
] | [
[
"matplotlib.pyplot.subplots",
"matplotlib.rc",
"numpy.arange",
"numpy.array",
"numpy.meshgrid"
]
] |
iamgroot42/opacus | [
"51708309e71c030aa2bf15d6dccc7bcbbe9ed570"
] | [
"examples/char-lstm-classification.py"
] | [
"#!/usr/bin/env python3\n# Copyright (c) Meta Platforms, Inc. and affiliates.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport argparse\nfrom collections import Counter\nfrom pathlib import Path\nfrom statistics import mean\n\nimport torch\nimport torch.nn as nn\nfrom opacus import PrivacyEngine\nfrom opacus.layers import DPGRU, DPLSTM, DPRNN\nfrom torch.nn.utils.rnn import pad_sequence\nfrom torch.utils.data import DataLoader, Dataset\nfrom tqdm import tqdm\n\n\nparser = argparse.ArgumentParser(\n description=\"PyTorch Name language classification DP Training\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n)\nparser.add_argument(\n \"--data-root\",\n required=True,\n type=str,\n help=\"Path to training set of names (ie. ~/data/names/)\",\n)\nparser.add_argument(\n \"--device\",\n type=str,\n default=\"cuda\",\n help=\"GPU ID for this process\",\n)\nparser.add_argument(\n \"-b\",\n \"--batch-size\",\n default=800,\n type=int,\n metavar=\"N\",\n help=\"mini-batch size\",\n)\nparser.add_argument(\n \"--mode\",\n default=\"lstm\",\n choices=[\"lstm\", \"gru\", \"rnn\"],\n help=\"recursive network type\",\n)\nparser.add_argument(\n \"--embedding-size\", default=64, type=int, help=\"Character embedding dimension\"\n)\nparser.add_argument(\n \"--hidden-size\", default=128, type=int, help=\"hidden state dimensions\"\n)\nparser.add_argument(\"--n-layers\", default=1, type=int, help=\"How many layers to use\")\nparser.add_argument(\n \"--test-every\",\n default=0,\n type=int,\n help=\"Run evaluation on the test every these many epochs\",\n)\nparser.add_argument(\n \"--bidirectional\",\n action=\"store_true\",\n default=False,\n help=\"If turned on, makes the RNN bidirectional\",\n)\nparser.add_argument(\n \"--learning-rate\",\n default=2.0,\n type=float,\n metavar=\"LR\",\n help=\"initial learning rate\",\n)\nparser.add_argument(\"--epochs\", type=int, default=10, help=\"Number of training epochs\")\nparser.add_argument(\n \"--train-split\",\n type=float,\n default=0.8,\n help=\"Fraction of data to utilize for training (rest for evaluation)\",\n)\nparser.add_argument(\n \"--sigma\",\n type=float,\n default=1.0,\n metavar=\"S\",\n help=\"Noise multiplier\",\n)\nparser.add_argument(\n \"-c\",\n \"--max-per-sample-grad-norm\",\n type=float,\n default=1.5,\n metavar=\"C\",\n help=\"Clip per-sample gradients to this norm\",\n)\nparser.add_argument(\n \"--disable-dp\",\n action=\"store_true\",\n default=False,\n help=\"Disable privacy training and just train with vanilla SGD\",\n)\nparser.add_argument(\n \"--secure-rng\",\n action=\"store_true\",\n default=False,\n help=\"Enable Secure RNG to have trustworthy privacy guarantees. Comes at a performance cost\",\n)\nparser.add_argument(\n \"--delta\",\n type=float,\n default=8e-5,\n metavar=\"D\",\n help=\"Target delta\",\n)\nparser.add_argument(\n \"--print-every\",\n type=int,\n default=5,\n help=\"Print the evaluation accuracy every these many iterations\",\n)\n\n\nclass CharByteEncoder(nn.Module):\n \"\"\"\n This encoder takes a UTF-8 string and encodes its bytes into a Tensor. It can also\n perform the opposite operation to check a result.\n\n Examples:\n\n >>> encoder = CharByteEncoder()\n >>> t = encoder('Ślusàrski') # returns tensor([256, 197, 154, 108, 117, 115, 195, 160, 114, 115, 107, 105, 257])\n >>> encoder.decode(t) # returns \"<s>Ślusàrski</s>\"\n \"\"\"\n\n def __init__(self):\n super().__init__()\n self.start_token = \"<s>\"\n self.end_token = \"</s>\"\n self.pad_token = \"<pad>\"\n\n self.start_idx = 256\n self.end_idx = 257\n self.pad_idx = 258\n\n def forward(self, s: str, pad_to=0) -> torch.LongTensor:\n \"\"\"\n Encodes a string. It will append a start token <s> (id=self.start_idx) and an end token </s>\n (id=self.end_idx).\n\n Args:\n s: The string to encode.\n pad_to: If not zero, pad by appending self.pad_idx until string is of length `pad_to`.\n Defaults to 0.\n\n Returns:\n The encoded LongTensor of indices.\n \"\"\"\n encoded = s.encode()\n n_pad = pad_to - len(encoded) if pad_to > len(encoded) else 0\n return torch.LongTensor(\n [self.start_idx]\n + [c for c in encoded] # noqa\n + [self.end_idx]\n + [self.pad_idx for _ in range(n_pad)]\n )\n\n def decode(self, char_ids_tensor: torch.LongTensor) -> str:\n \"\"\"\n The inverse of `forward`. Keeps the start, end and pad indices.\n \"\"\"\n char_ids = char_ids_tensor.cpu().detach().tolist()\n\n out = []\n buf = []\n for c in char_ids:\n if c < 256:\n buf.append(c)\n else:\n if buf:\n out.append(bytes(buf).decode())\n buf = []\n if c == self.start_idx:\n out.append(self.start_token)\n elif c == self.end_idx:\n out.append(self.end_token)\n elif c == self.pad_idx:\n out.append(self.pad_token)\n\n if buf: # in case some are left\n out.append(bytes(buf).decode())\n return \"\".join(out)\n\n def __len__(self):\n \"\"\"\n The length of our encoder space. This is fixed to 256 (one byte) + 3 special chars\n (start, end, pad).\n\n Returns:\n 259\n \"\"\"\n return 259\n\n\nclass NamesDataset(Dataset):\n def __init__(self, root):\n self.root = Path(root)\n\n self.labels = list({langfile.stem for langfile in self.root.iterdir()})\n self.labels_dict = {label: i for i, label in enumerate(self.labels)}\n self.encoder = CharByteEncoder()\n self.samples = self.construct_samples()\n\n def __getitem__(self, i):\n return self.samples[i]\n\n def __len__(self):\n return len(self.samples)\n\n def construct_samples(self):\n samples = []\n for langfile in self.root.iterdir():\n label_name = langfile.stem\n label_id = self.labels_dict[label_name]\n with open(langfile, \"r\") as fin:\n for row in fin:\n samples.append(\n (self.encoder(row.strip()), torch.tensor(label_id).long())\n )\n return samples\n\n def label_count(self):\n cnt = Counter()\n for _x, y in self.samples:\n label = self.labels[int(y)]\n cnt[label] += 1\n return cnt\n\n\nVOCAB_SIZE = 256 + 3 # 256 alternatives in one byte, plus 3 special characters.\n\n\nclass CharNNClassifier(nn.Module):\n def __init__(\n self,\n rnn_type,\n embedding_size,\n hidden_size,\n output_size,\n num_layers=1,\n bidirectional=False,\n vocab_size=VOCAB_SIZE,\n ):\n super().__init__()\n\n self.embedding_size = embedding_size\n self.hidden_size = hidden_size\n self.output_size = output_size\n self.vocab_size = vocab_size\n\n self.embedding = nn.Embedding(vocab_size, embedding_size)\n self.rnn = rnn_type(\n embedding_size,\n hidden_size,\n num_layers=num_layers,\n bidirectional=bidirectional,\n batch_first=True,\n )\n self.out_layer = nn.Linear(hidden_size, output_size)\n\n def forward(self, x, hidden=None):\n x = self.embedding(x) # -> [B, T, D]\n x, _ = self.rnn(x, hidden) # -> [B, T, H]\n x = x[:, -1, :] # -> [B, H]\n x = self.out_layer(x) # -> [B, C]\n return x\n\n\ndef padded_collate(batch, padding_idx=0):\n x = pad_sequence(\n [elem[0] for elem in batch], batch_first=True, padding_value=padding_idx\n )\n y = torch.stack([elem[1] for elem in batch]).long()\n\n return x, y\n\n\ndef train(\n model,\n criterion,\n optimizer,\n train_loader,\n epoch,\n privacy_engine,\n target_delta,\n device=\"cuda:0\",\n):\n model.train()\n\n accs = []\n losses = []\n for x, y in tqdm(train_loader):\n x = x.to(device)\n y = y.to(device)\n\n logits = model(x)\n loss = criterion(logits, y)\n loss.backward()\n\n optimizer.step()\n optimizer.zero_grad()\n\n preds = logits.argmax(-1)\n n_correct = float(preds.eq(y).sum())\n batch_accuracy = n_correct / len(y)\n\n accs.append(batch_accuracy)\n losses.append(float(loss))\n\n printstr = (\n f\"\\t Epoch {epoch}. Accuracy: {mean(accs):.6f} | Loss: {mean(losses):.6f}\"\n )\n try:\n epsilon, best_alpha = privacy_engine.accountant.get_privacy_spent(\n delta=target_delta\n )\n printstr += f\" | (ε = {epsilon:.2f}, δ = {target_delta}) for α = {best_alpha}\"\n except AttributeError:\n pass\n print(printstr)\n return\n\n\ndef test(model, test_loader, privacy_engine, target_delta, device=\"cuda:0\"):\n model.eval()\n\n accs = []\n with torch.no_grad():\n for x, y in tqdm(test_loader):\n x = x.to(device)\n y = y.to(device)\n\n preds = model(x).argmax(-1)\n n_correct = float(preds.eq(y).sum())\n batch_accuracy = n_correct / len(y)\n\n accs.append(batch_accuracy)\n mean_acc = mean(accs)\n printstr = \"\\n----------------------------\\n\" f\"Test Accuracy: {mean_acc:.6f}\"\n if privacy_engine:\n epsilon, best_alpha = privacy_engine.accountant.get_privacy_spent(\n delta=target_delta\n )\n printstr += f\" (ε = {epsilon:.2f}, δ = {target_delta}) for α = {best_alpha}\"\n print(printstr + \"\\n----------------------------\\n\")\n return mean_acc\n\n\ndef main():\n args = parser.parse_args()\n device = torch.device(args.device)\n ds = NamesDataset(args.data_root)\n train_len = int(args.train_split * len(ds))\n test_len = len(ds) - train_len\n\n print(f\"{train_len} samples for training, {test_len} for testing\")\n\n if args.secure_rng:\n try:\n import torchcsprng as prng\n except ImportError as e:\n msg = (\n \"To use secure RNG, you must install the torchcsprng package! \"\n \"Check out the instructions here: https://github.com/pytorch/csprng#installation\"\n )\n raise ImportError(msg) from e\n\n generator = prng.create_random_device_generator(\"/dev/urandom\")\n\n else:\n generator = None\n\n train_ds, test_ds = torch.utils.data.random_split(\n ds, [train_len, test_len], generator=generator\n )\n\n if args.mode == \"rnn\":\n rnn_type = DPRNN\n elif args.mode == \"gru\":\n rnn_type = DPGRU\n elif args.mode == \"lstm\":\n rnn_type = DPLSTM\n else:\n raise ValueError(f\"Invalid network type: {args.mode}\")\n\n model = CharNNClassifier(\n rnn_type,\n args.embedding_size,\n args.hidden_size,\n len(ds.labels),\n args.n_layers,\n args.bidirectional,\n )\n model = model.to(device)\n\n train_ds, test_ds = torch.utils.data.random_split(\n ds, [train_len, test_len], generator=generator\n )\n\n train_loader = DataLoader(\n train_ds,\n batch_size=args.batch_size,\n num_workers=1,\n pin_memory=True,\n collate_fn=padded_collate,\n )\n\n test_loader = DataLoader(\n test_ds,\n batch_size=2 * args.batch_size,\n shuffle=False,\n num_workers=1,\n pin_memory=True,\n collate_fn=padded_collate,\n )\n\n criterion = nn.CrossEntropyLoss()\n optimizer = torch.optim.SGD(model.parameters(), lr=args.learning_rate)\n\n if not args.disable_dp:\n privacy_engine = PrivacyEngine(secure_mode=args.secure_rng)\n model, optimizer, train_loader = privacy_engine.make_private(\n module=model,\n optimizer=optimizer,\n data_loader=train_loader,\n noise_multiplier=args.sigma,\n max_grad_norm=args.max_per_sample_grad_norm,\n )\n else:\n privacy_engine = None\n\n print(f\"Train stats ({args.mode}): \\n\")\n for epoch in tqdm(range(args.epochs)):\n train(\n model,\n criterion,\n optimizer,\n train_loader,\n epoch,\n privacy_engine,\n args.delta,\n device=device,\n )\n if args.test_every:\n if epoch % args.test_every == 0:\n test(model, test_loader, privacy_engine, args.delta, device=device)\n\n mean_acc = test(model, test_loader, privacy_engine, args.delta, device=device)\n torch.save(mean_acc, f\"run_results_chr_{args.mode}_classification.pt\")\n\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"torch.nn.utils.rnn.pad_sequence",
"torch.utils.data.DataLoader",
"torch.stack",
"torch.nn.Linear",
"torch.save",
"torch.no_grad",
"torch.nn.Embedding",
"torch.nn.CrossEntropyLoss",
"torch.tensor",
"torch.utils.data.random_split",
"torch.device"
]
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.