markdown
stringlengths
0
1.02M
code
stringlengths
0
832k
output
stringlengths
0
1.02M
license
stringlengths
3
36
path
stringlengths
6
265
repo_name
stringlengths
6
127
pick some samples to test
model.eval() with torch.no_grad(): text = 'premise: I am supposed to take food to a party tomorrow. initial: I had bought all the ingredients for it last week. counterfactual: I need to buy all the ingredients for it after work today. original_ending: I spent all day yesterday cooking the food. Unfortunately, I burnt the food. I won\'t be able to get new ingredients in time for tomorrow\'s party.' input_ids = tokenizer(text, return_tensors="pt").input_ids.to(device) outputs = model.generate(input_ids, max_length=config.TARGET_LEN, num_beams=2) print(tokenizer.decode(outputs[0], skip_special_tokens=True))
edited_ending: I spent all day yesterday cooking the food. Unfortunately, I burnt the food. I won't be able to get new ingredients in time for tomorrow's party.
MIT
huggingface_t5_6_3.ipynb
skywalker00001/Conterfactual-Reasoning-Project
8. Evalutation 7.1 Blue score
# predicitions: y', actuals: y from torchtext.data.metrics import bleu_score pre_corpus = [i.split(" ") for i in predictions] act_corpus = [i.split(" ") for i in actuals] print(act_corpus) print(pre_corpus) #bs = bleu_score([pre_corpus[0]], [act_corpus[0]], max_n=1, weights=[1]) #bs = bleu_score([pre_corpus[0]], [act_corpus[0]], max_n=2, weights=[0.5, 0.5]) bs_1 = bleu_score(pre_corpus, act_corpus, max_n=1, weights=[1]) #bs_2 = bleu_score(pre_corpus, act_corpus, max_n=2, weights=[0.1, 0.9]) #print(f'bs_1: {bs_1:.5f}, bs_2: {bs_2:.5f}') print(f'bleus_1: {bs_1:.5f}')
bleus_1: 0.02605
MIT
huggingface_t5_6_3.ipynb
skywalker00001/Conterfactual-Reasoning-Project
7.2 ROUGE
!pip install rouge from rouge import Rouge def compute_rouge(predictions, targets): predictions = [" ".join(prediction).lower() for prediction in predictions] predictions = [prediction if prediction else "EMPTY" for prediction in predictions] targets = [" ".join(target).lower() for target in targets] targets = [target if target else "EMPTY" for target in targets] rouge = Rouge() scores = rouge.get_scores(hyps=predictions, refs=targets, avg=True) return scores['rouge-1']['f'] rouge_1 = compute_rouge(predictions, actuals) print(f'rouge_1: {rouge_1:.5f}')
rouge_1: 0.96353
MIT
huggingface_t5_6_3.ipynb
skywalker00001/Conterfactual-Reasoning-Project
7.3 T5 loss (cross entropy), discussed before
print(final_loss / len(part_large_cleaned_df)) # source = tokenizer.encode_plus(predictions, max_length= config.SOURCE_LEN, padding='max_length', return_tensors='pt') # target = tokenizer.encode_plus(actuals, max_length= config.TARGET_LEN, padding='max_length', return_tensors='pt') # source_ids = source['input_ids'].squeeze() # target_ids = target['input_ids'].squeeze() # encode the sources OOM ''' source_encoding = tokenizer( predictions, max_length= config.SOURCE_LEN, padding='max_length', return_tensors='pt' ) original, attention_mask = source_encoding.input_ids, source_encoding.attention_mask # encode the targets target_encoding = tokenizer( actuals, max_length= config.TARGET_LEN, padding='max_length', return_tensors='pt' ) ending = target_encoding.input_ids ending[ending == tokenizer.pad_token_id] = -100 original, attention_mask, ending = original.to(device), attention_mask.to(device), ending.to(device) # forward pass loss = model(input_ids=original, attention_mask=attention_mask, labels=ending).loss ''' print(a) print(a.splitlines()) diff = d.compare(a.splitlines(), b.splitlines()) a = "I paid the cashier and patiently waited for my drink." b = "I paid the cashier and patiently waited at the counter for my drink." from difflib import Differ d = Differ() diff = d.compare(a.splitlines(), b.splitlines()) print('\n'.join(list(diff))) import difflib a = "I paid the cashier and patiently waited for my drink." b = "I paid the cashier and patiently waited at the counter for my drink." s = difflib.SequenceMatcher(None, a, b) for block in s.get_matching_blocks(): print(block) import difflib a = "I paid the cashier and patiently waited acoorinding to for my drink." b = "I paid the cashier and patiently waited at the counter for my drink." s = difflib.SequenceMatcher(None, a, b) matches = [] for block in s.get_matching_blocks(): #matches.append([block[0], block[1], block[2]]) matches.append([i for i in block]) #matches.append(block) print(matches) # explanation: matches[i, 0] are the a index, matches[i, 1] are the b index, matches[i, 2] are the lengths of same (matched) words. changes = [] for i in range(len(matches) - 1): print(matches[i]) if ((a[i,0]+ a[i,2] < a[i+1,0]) && (b[i,0]+ b[i,2] < b[i+1,0])): # replacing changes.append(f"{a[i,0]+a[i,1]}-{a[i,2]}: {}") print(a) print(len(a)) a1 = tokenizer(a) print(a1)
{'input_ids': [27, 1866, 8, 1723, 972, 11, 1868, 120, 3, 13106, 3, 9, 509, 32, 13119, 53, 12, 21, 82, 3281, 5, 1], 'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]}
MIT
huggingface_t5_6_3.ipynb
skywalker00001/Conterfactual-Reasoning-Project
Global Alignment: The Needleman Wunsch AlgorithmThe objective of this notebook is to help you familiarize yourself with the Needleman Wunsch algorithm for pairwise alignment of sequences.
import numpy as np # to print colored arrows you will need the termcolor module # if you don't have it, traceback arrows will be printed # without color color = True try : from termcolor import colored except : color = False # the three directions you can go in the traceback: DIAG = 0 UP = 1 LEFT = 2 # UTF-8 representations of arrow symbols # arrows[DIAG] is a diagonal arrow # arrows[UP] is an up arrow # arrows[LEFT] is a left arrows = [u"\u2196", u"\u2191", u"\u2190"] def needleman_wunsch_matrix(seq1, seq2, match=1, mismatch=-1, indel=-1): """ Fill the DP matrix according to the Needleman-Wunsch algorithm for two sequences seq1 and seq2. match: the match score mismatch: the mismatch score indel: the indel score Returns the matrix of scores and the matrix of pointers """ n = len(seq1) m = len(seq2) s = np.zeros( (n+1, m+1) ) # DP matrix ptr = np.zeros( (n+1, m+1), dtype=int ) # matrix of pointers ##### INITIALIZE SCORING MATRIX (base case) ##### for i in range(1, n+1) : s[i,0] = indel * i for j in range(1, m+1): s[0,j] = indel * j ########## INITIALIZE TRACEBACK MATRIX ########## # Tag first row by LEFT, indicating initial '-'s ptr[0,1:] = LEFT # Tag first column by UP, indicating initial '-'s ptr[1:,0] = UP ##################################################### for i in range(1,n+1): for j in range(1,m+1): # match if seq1[i-1] == seq2[j-1]: s[i,j] = s[i-1,j-1] + match ptr[i,j] = DIAG # mismatch else : s[i,j] = s[i-1,j-1] + mismatch ptr[i,j] = DIAG # indel penalty if s[i-1,j] + indel > s[i,j] : s[i,j] = s[i-1,j] + indel ptr[i,j] = UP # indel penalty if s[i, j-1] + indel > s[i,j]: s[i,j] = s[i, j-1] + indel ptr[i,j] = LEFT return s, ptr def needleman_wunsch_trace(seq1, seq2, s, ptr) : #### TRACE BEST PATH TO GET ALIGNMENT #### align1 = "" align2 = "" n, m = (len(seq1), len(seq2)) i = n j = m curr = ptr[i, j] while (i > 0 or j > 0): ptr[i,j] += 3 if curr == DIAG : align1 = seq1[i-1] + align1 align2 = seq2[j-1] + align2 i -= 1 j -= 1 elif curr == LEFT: align1 = '-' + align1 align2 = seq2[j-1] + align2 j -= 1 elif curr == UP: align1 = seq1[i-1] + align1 align2 = '-' + align2 i -= 1 curr = ptr[i,j] return align1, align2 def show_ptr_matrix(ptr, seq1, seq2) : print('\n'+'~`'*25) print("Traceback") global color print(" " + " ".join(seq2)) for i in range(len(ptr)) : if (i > 0) : print (seq1[i-1] + " ",end="") if (i == 0) : print(" ",end="") for j in range(len(ptr[i])) : if color and ptr[i,j] >= 3 : print(" " + colored(arrows[ptr[i,j]-3], 'green' ), end="") else : if ptr[i,j] >=3 : ptr[i,j] -=3 print(" " + arrows[ptr[i,j]],end="") print() def show_dp_matrix(s, seq1, seq2) : print('\n'+'~`'*25) print("DP matrix") print(" " + " ".join(seq2)) for i in range(len(s)) : if (i > 0) : print(seq1[i-1] + " ",end="") if (i == 0) : print(" ",end="") for j in range(len(s[i])) : print(" " + "% 2.1f" % s[i,j],end="") print() def needleman_wunsch(seq1, seq2, match=1, mismatch=-1, indel=-1, verbose=True) : """ computes an optimal global alignment of two sequences using the Needleman-Wunsch algorithm returns the alignment and its score """ s,ptr = needleman_wunsch_matrix(seq1, seq2, match, mismatch, indel) alignment = needleman_wunsch_trace(seq1, seq2, s, ptr) if verbose : show_dp_matrix(s, seq1, seq2) show_ptr_matrix(ptr, seq1, seq2) print('\n'+'~`'*25) print("Alignment Score: %f\n" % (s[len(seq1),len(seq2)])) print("Alignment:") print(alignment[0]) print(alignment[1]) return alignment, s[len(seq1), len(seq2)] from random import randint def random_DNA_sequence(length): """ Returns a random DNA of the given length. """ nucleotides = ['A','T','G','C'] seq = [ nucleotides[randint(0,3)] for i in range(length) ] return ''.join(seq) seq1 = random_DNA_sequence(10) seq2 = random_DNA_sequence(10) needleman_wunsch(seq1, seq2, 1, -1, -1) needleman_wunsch(seq1, seq2, 1, -1, -0.1)
~`~`~`~`~`~`~`~`~`~`~`~`~`~`~`~`~`~`~`~`~`~`~`~`~` DP matrix A T G T C G C T T A 0.0 -0.1 -0.2 -0.3 -0.4 -0.5 -0.6 -0.7 -0.8 -0.9 -1.0 A -0.1 1.0 0.9 0.8 0.7 0.6 0.5 0.4 0.3 0.2 0.1 T -0.2 0.9 2.0 1.9 1.8 1.7 1.6 1.5 1.4 1.3 1.2 A -0.3 0.8 1.9 1.8 1.7 1.6 1.5 1.4 1.3 1.2 2.3 C -0.4 0.7 1.8 1.7 1.6 2.7 2.6 2.5 2.4 2.3 2.2 A -0.5 0.6 1.7 1.6 1.5 2.6 2.5 2.4 2.3 2.2 3.3 C -0.6 0.5 1.6 1.5 1.4 2.5 2.4 3.5 3.4 3.3 3.2 T -0.7 0.4 1.5 1.4 2.5 2.4 2.3 3.4 4.5 4.4 4.3 C -0.8 0.3 1.4 1.3 2.4 3.5 3.4 3.3 4.4 4.3 4.2 C -0.9 0.2 1.3 1.2 2.3 3.4 3.3 4.4 4.3 4.2 4.1 G -1.0 0.1 1.2 2.3 2.2 3.3 4.4 4.3 4.2 4.1 4.0 ~`~`~`~`~`~`~`~`~`~`~`~`~`~`~`~`~`~`~`~`~`~`~`~`~` Traceback A T G T C G C T T A β†– ← ← ← ← ← ← ← ← ← ← A ↑ β†– ← ← ← ← ← ← ← ← ← T ↑ ↑ β†– ← β†– ← ← ← β†– β†– ← A ↑ β†– ↑ ↑ ↑ ↑ ↑ ↑ ↑ ↑ β†– C ↑ ↑ ↑ ↑ ↑ β†– ← β†– ← ← ↑ A ↑ ↑ ↑ ↑ ↑ ↑ ↑ ↑ ↑ ↑ β†– C ↑ ↑ ↑ ↑ ↑ β†– ↑ β†– ← ← ↑ T ↑ ↑ β†– ← β†– ↑ ↑ ↑ β†– β†– ← C ↑ ↑ ↑ ↑ ↑ β†– ← β†– ↑ ↑ ↑ C ↑ ↑ ↑ ↑ ↑ β†– ↑ β†– ↑ ↑ ↑ G ↑ ↑ ↑ β†– ← ↑ β†– ↑ ↑ ↑ ↑ ~`~`~`~`~`~`~`~`~`~`~`~`~`~`~`~`~`~`~`~`~`~`~`~`~` Alignment Score: 4.000000 Alignment: A--TAC-AC-T-CCG ATGT-CG-CTTA---
MIT
notebooks/04_global_alignment.ipynb
asabenhur/CS425
Summary
import numpy as np from scipy.linalg import sqrtm import matplotlib.pyplot as plt N = 1000
_____no_output_____
MIT
HW5/notebook/HW5.ipynb
okuchap/SML
Facet WrappingFacets divide a plot into subplots based on the values of one or morediscrete variable.
import pandas as pd from lets_plot import * LetsPlot.setup_html() df = pd.read_csv('https://raw.githubusercontent.com/JetBrains/lets-plot-docs/master/data/mpg.csv') p = ggplot(df, aes('cty', 'hwy')) + geom_point() p p + facet_wrap(facets='fl', ncol=3)
_____no_output_____
MIT
docs/_downloads/29369f7678f70a010207df843f9d0358/plot__facet_wrapping.ipynb
IKupriyanov-HORIS/lets-plot-docs
Now put heading according to the description mentioned in the dataset
data.columns = ["sepal length", "sepal width", "petal length", "petal width", "Class"] data.head()
_____no_output_____
MIT
Naive_Bayes/Naive_bayes_classifier.ipynb
Ajith013/Machine_learning
Make sure that all the datatypes are correct and consistent
data.info()
<class 'pandas.core.frame.DataFrame'> RangeIndex: 149 entries, 0 to 148 Data columns (total 5 columns): sepal length 149 non-null float64 sepal width 149 non-null float64 petal length 149 non-null float64 petal width 149 non-null float64 Class 149 non-null object dtypes: float64(4), object(1) memory usage: 5.9+ KB
MIT
Naive_Bayes/Naive_bayes_classifier.ipynb
Ajith013/Machine_learning
Dividing the dataset in X and Y (Attributes and Classes)
X = data.drop(['Class'], axis = 1) Y = data['Class'] X.head() Y.head()
_____no_output_____
MIT
Naive_Bayes/Naive_bayes_classifier.ipynb
Ajith013/Machine_learning
Now split the data into training and test data
X_train, X_test, y_train, y_test = train_test_split(X, Y, random_state = 0, test_size = 0.30) classifier = GaussianNB() classifier.fit(X_train, y_train)
_____no_output_____
MIT
Naive_Bayes/Naive_bayes_classifier.ipynb
Ajith013/Machine_learning
__The class prior shows the probability of each class. This can be set before building the model manually. If not then it is handled by the function.In the above cas the priors are not set. So it is adjusted according to the data.__ __The priors adjusted according to the data are as follows__
classifier.class_prior_
_____no_output_____
MIT
Naive_Bayes/Naive_bayes_classifier.ipynb
Ajith013/Machine_learning
__Var_smoothing is the portion of the largest variance of all features that is added to variances for calculation stability.In this case the parameter has been set to default.__
classifier.get_params() y_pred = classifier.predict(X_test) cm = confusion_matrix(y_test, y_pred) print("Confusion matrix: ", cm) print("Accuracy of the model: " ,accuracy_score(y_test, y_pred))
Accuracy of the model: 0.8888888888888888
MIT
Naive_Bayes/Naive_bayes_classifier.ipynb
Ajith013/Machine_learning
Using Cache (available since v21.06.00) Need for CacheIn many deep learning use cases, small image patches need to be extracted from the large image and they are fed into the neural network. If the patch size doesn't align with the underlying tile layout of TIFF image (e.g., AI model such as ResNet may accept a particular size of the image [e.g., 224x224] that is smaller than the underlying tile size [256x256]), redundant image loadings for a tile are needed (See the following two figures)![image](https://user-images.githubusercontent.com/1928522/118344267-333a4f00-b4e2-11eb-898c-8980c8725d32.png)![image](https://user-images.githubusercontent.com/1928522/118344294-5238e100-b4e2-11eb-8f3a-4772ef055658.png)Which resulted in lower performance for unaligned cases as shown in our [GTC 2021 presentation](https://www.nvidia.com/en-us/gtc/catalog/?search=cuCIM)![image](https://user-images.githubusercontent.com/1928522/118344737-c07ea300-b4e4-11eb-9c95-15c2e5022274.png)The proper use of cache improves the loading performance greatly, especially for **inference** use cases and when [accessing tiles sequentially (left to right, top to bottom) from one TIFF file](https://nbviewer.jupyter.org/github/rapidsai/cucim/blob/branch-21.06/notebooks/File-access_Experiments_on_TIFF.ipynb1.-Accessing-tiles-sequentially-(left-to-right,-top-to-bottom)-from-one-TIFF-file).On the other hand, if the application [accesses partial tiles randomly from multiple TIFF files](https://nbviewer.jupyter.org/github/rapidsai/cucim/blob/branch-21.06/notebooks/File-access_Experiments_on_TIFF.ipynb3.-Accessing-partial-tiles-randomly-from-multiple-TIFF-files) (this usually happens for **training** use cases), using a cache could be meaningless. Enabling cacheCurrently, cuCIM supports the following three strategies: - `nocache` - `per_process` - `shared_memory` (interprocess)**1) `nocache`**No cache.By default, this cache strategy is used.With this strategy, the behavior is the same as one before `v20.06.00`.**2) `per_process`**The cache memory is shared among threads.**3) `shared_memory`**The cache memory is shared among processes. Getting cache setting`CuImage.cache()` would return an object that can control the current cache. The object has the following properties:- `type`: The type (strategy) name- `memory_size`: The number of bytes used in the cache memory- `memory_capacity`: The maximum number of bytes that can be allocated (used) in the cache memory- `free_memory`: The number of bytes available in the cache memory- `size`: The number of cache items used- `capacity`: The maximum number of cache items that can be created- `hit_count`: The cache hit count- `miss_count`: The cache miss count- `config`: A configuration dictionary that was used for configuring cache.
from cucim import CuImage cache = CuImage.cache() print(f' type: {cache.type}({int(cache.type)})') print(f'memory_size: {cache.memory_size}/{cache.memory_capacity}') print(f'free_memory: {cache.free_memory}') print(f' size: {cache.size}/{cache.capacity}') print(f' hit_count: {cache.hit_count}') print(f' miss_count: {cache.miss_count}') print(f' config: {cache.config}')
type: CacheType.NoCache(0) memory_size: 0/0 free_memory: 0 size: 0/0 hit_count: 0 miss_count: 0 config: {'type': 'nocache', 'memory_capacity': 1024, 'capacity': 5461, 'mutex_pool_capacity': 11117, 'list_padding': 10000, 'extra_shared_memory_size': 100, 'record_stat': False}
Apache-2.0
notebooks/Using_Cache.ipynb
madsbk/cucim
Changing Cache SettingCache configuration can be changed by adding parameters to `cache()` method.The following parameters are available:- `type`: The type (strategy) name. Default to 'no_cache'.- `memory_capacity`: The maximum number of mebibytes (`MiB`, 2^20) that can be allocated (used) in the cache memory. Default to `1024`.- `capacity`: The maximum number of cache items that can be created. Default to `5461` (= (\ x 2^20) / (256x256x3)).- `mutex_pool_capacity`: The mutex pool size. Default to `11117`.- `list_padding`: The number of additional items used for the internal circular queue. Default to `10000`.- `extra_shared_memory_size`: The size of additional memory allocation (in MiB) for shared_memory allocator in `shared_process` strategy. Default to `100`.- `record_stat`: If the cache statistic should be recorded or not. Default to `False`.In most cases, `type`(required) and `memory_capacity` are used.
from cucim import CuImage cache = CuImage.cache('per_process', memory_capacity=2048) print(f' type: {cache.type}({int(cache.type)})') print(f'memory_size: {cache.memory_size}/{cache.memory_capacity}') print(f'free_memory: {cache.free_memory}') print(f' size: {cache.size}/{cache.capacity}') print(f' hit_count: {cache.hit_count}') print(f' miss_count: {cache.miss_count}') print(f' config: {cache.config}')
type: CacheType.PerProcess(1) memory_size: 0/2147483648 free_memory: 2147483648 size: 0/10922 hit_count: 0 miss_count: 0 config: {'type': 'per_process', 'memory_capacity': 2048, 'capacity': 10922, 'mutex_pool_capacity': 11117, 'list_padding': 10000, 'extra_shared_memory_size': 100, 'record_stat': False}
Apache-2.0
notebooks/Using_Cache.ipynb
madsbk/cucim
Choosing Proper Cache Memory SizeIt is important to select the appropriate cache memory size (capacity). Small cache memory size results in low cache hit rates. Conversely, if the cache memory size is too large, memory is wasted.For example, if the default tile size is 256x256 and the patch size to load is 224x224, the cache memory needs to be large enough to contain at least two rows of tiles in the image to avoid deleting the required cache entries while loading patches sequentially (left to right, top to bottom) from one TIFF file.![image](https://user-images.githubusercontent.com/1928522/120760720-4cbf2d00-c4c9-11eb-875b-b070203fd8e6.png)cuCIM provide a utility method (`cucim.clara.cache.preferred_memory_capacity()`) to calculate a preferred cache memory size for the given image (image size and tile size) and the patch size.Internal logic is available at
from cucim import CuImage from cucim.clara.cache import preferred_memory_capacity img = CuImage('input/image.tif') image_size = img.size('XY') # same with `img.resolutions["level_dimensions"][0]` tile_size = img.resolutions['level_tile_sizes'][0] # default: (256, 256) patch_size = (1024, 1024) # default: (256, 256) bytes_per_pixel = 3 # default: 3 print(f'image size: {image_size}') print(f'tile size: {tile_size}') # Below three statements are the same. memory_capacity = preferred_memory_capacity(img, patch_size=patch_size) memory_capacity2 = preferred_memory_capacity(None, image_size, tile_size, patch_size, bytes_per_pixel) memory_capacity3 = preferred_memory_capacity(None, image_size, patch_size=patch_size) print(f'memory_capacity : {memory_capacity} MiB') print(f'memory_capacity2: {memory_capacity2} MiB') print(f'memory_capacity3: {memory_capacity3} MiB') cache = CuImage.cache('per_process', memory_capacity=memory_capacity) # You can also manually set capacity` (e.g., `capacity=500`) print('= Cache Info =') print(f' type: {cache.type}({int(cache.type)})') print(f'memory_size: {cache.memory_size}/{cache.memory_capacity}') print(f' size: {cache.size}/{cache.capacity}')
image size: [19920, 26420] tile size: (256, 256) memory_capacity : 74 MiB memory_capacity2: 74 MiB memory_capacity3: 74 MiB = Cache Info = type: CacheType.PerProcess(1) memory_size: 0/77594624 size: 0/394
Apache-2.0
notebooks/Using_Cache.ipynb
madsbk/cucim
Reserve More Cache MemoryIf more cache memory capacity is needed in runtime, you can use `reserve()` method.
from cucim import CuImage from cucim.clara.cache import preferred_memory_capacity img = CuImage('input/image.tif') memory_capacity = preferred_memory_capacity(img, patch_size=(256, 256)) new_memory_capacity = preferred_memory_capacity(img, patch_size=(512, 512)) print(f'memory_capacity : {memory_capacity} MiB') print(f'new_memory_capacity: {new_memory_capacity} MiB') print() cache = CuImage.cache('per_process', memory_capacity=memory_capacity) print('= Cache Info =') print(f' type: {cache.type}({int(cache.type)})') print(f'memory_size: {cache.memory_size}/{cache.memory_capacity}') print(f' size: {cache.size}/{cache.capacity}') print() cache.reserve(new_memory_capacity) print('= Cache Info (update memory capacity) =') print(f' type: {cache.type}({int(cache.type)})') print(f'memory_size: {cache.memory_size}/{cache.memory_capacity}') print(f' size: {cache.size}/{cache.capacity}') print() cache.reserve(memory_capacity, capacity=500) print('= Cache Info (update memory capacity & capacity) =') print(f' type: {cache.type}({int(cache.type)})') print(f'memory_size: {cache.memory_size}/{cache.memory_capacity} # smaller `memory_capacity` value does not change this') print(f' size: {cache.size}/{cache.capacity}') print() cache = CuImage.cache('no_cache') print('= Cache Info (no cache) =') print(f' type: {cache.type}({int(cache.type)})') print(f'memory_size: {cache.memory_size}/{cache.memory_capacity}') print(f' size: {cache.size}/{cache.capacity}')
memory_capacity : 30 MiB new_memory_capacity: 44 MiB = Cache Info = type: CacheType.PerProcess(1) memory_size: 0/31457280 size: 0/160 = Cache Info (update memory capacity) = type: CacheType.PerProcess(1) memory_size: 0/46137344 size: 0/234 = Cache Info (update memory capacity & capacity) = type: CacheType.PerProcess(1) memory_size: 0/46137344 # smaller `memory_capacity` value does not change this size: 0/500 = Cache Info (no cache) = type: CacheType.NoCache(0) memory_size: 0/0 size: 0/0
Apache-2.0
notebooks/Using_Cache.ipynb
madsbk/cucim
Profiling Cache Hit/MissIf you add an argument `record_stat=True` to `CuImage.cache()` method, cache statistics is recorded.Cache hit/miss count is accessible through `hit_count`/`miss_count` property of the cache object.You can get/set/unset the recording through `record()` method.
from cucim import CuImage from cucim.clara.cache import preferred_memory_capacity img = CuImage('input/image.tif') memory_capacity = preferred_memory_capacity(img, patch_size=(256, 256)) cache = CuImage.cache('per_process', memory_capacity=memory_capacity, record_stat=True) img.read_region((0,0), (100,100)) print(f'cache hit: {cache.hit_count}, cache miss: {cache.miss_count}') region = img.read_region((0,0), (100,100)) print(f'cache hit: {cache.hit_count}, cache miss: {cache.miss_count}') region = img.read_region((0,0), (100,100)) print(f'cache hit: {cache.hit_count}, cache miss: {cache.miss_count}') print(f'Is recorded: {cache.record()}') cache.record(False) print(f'Is recorded: {cache.record()}') region = img.read_region((0,0), (100,100)) print(f'cache hit: {cache.hit_count}, cache miss: {cache.miss_count}') print() print(f' type: {cache.type}({int(cache.type)})') print(f'memory_size: {cache.memory_size}/{cache.memory_capacity}') print(f'free_memory: {cache.free_memory}') print(f' size: {cache.size}/{cache.capacity}') print() cache = CuImage.cache('no_cache') print(f' type: {cache.type}({int(cache.type)})') print(f'memory_size: {cache.memory_size}/{cache.memory_capacity}') print(f'free_memory: {cache.free_memory}') print(f' size: {cache.size}/{cache.capacity}')
cache hit: 0, cache miss: 1 cache hit: 1, cache miss: 1 cache hit: 2, cache miss: 1 Is recorded: True Is recorded: False cache hit: 0, cache miss: 0 type: CacheType.PerProcess(1) memory_size: 196608/31457280 free_memory: 31260672 size: 1/160 type: CacheType.NoCache(0) memory_size: 0/0 free_memory: 0 size: 0/0
Apache-2.0
notebooks/Using_Cache.ipynb
madsbk/cucim
Considerations in Multi-threading/processing Environment `per_process` strategy Cache memoryIf used in the multi-threading environment and each thread is reading the different part of the image sequentially, please consider increasing cache memory size than the size suggested by `cucim.clara.cache.preferred_memory_capacity()` to avoid dropping necessary cache items.If used in the multi-processing environment, the cache memory size allocated can be `( of processes) x (cache memory capacity)`. Please be careful not to oversize the memory allocated by the cache. Cache StatisticsIf used in the multi-processing environment (e.g, using `concurrent.futures.ProcessPoolExecutor()`), cache hit count (`hit_count`) and miss count (`miss_count`) wouldn't be recorded in the main process's cache object. `shared_memory` strategyIn general, `shared_memory` strategy has more overhead than `per_process` strategy. However, it is recommended that you select this strategy if you want to use a fixed size of cache memory regardless of the number of processes.Note that, this strategy pre-allocates the cache memory in the shared memory and allocates more memory (as specified in `extra_shared_memory_size` parameter) than the requested cache memory size (capacity) for the memory allocator to handle memory segments. Cache memorySince the cache memory would be shared by multiple threads/processes, you will need to set enough cache memory to avoid dropping necessary cache items. Setting Default Cache ConfigurationThe configuration for cuCIM can be specified in `.cucim.json` file and user can set a default cache settings there.cuCIM finds `.cucim.json` file from the following order:1. The current folder2. `$HOME/.cucim.json`The configuration for the cache can be specified like below.```jsonc{ // This is actually JSONC file so comments are available. "cache": { "type": "nocache", "memory_capacity": 1024, "capacity": 5461, "mutex_pool_capacity": 11117, "list_padding": 10000, "extra_shared_memory_size": 100, "record_stat": false }}```You can write the current cache configuration into the file like below:
import json from cucim import CuImage cache = CuImage.cache() config_data = {'cache': cache.config} json_text = json.dumps(config_data, indent=4) print(json_text) # Save into the configuration file. with open('.cucim.json', 'w') as fp: fp.write(json_text)
{ "cache": { "type": "nocache", "memory_capacity": 1024, "capacity": 5461, "mutex_pool_capacity": 11117, "list_padding": 10000, "extra_shared_memory_size": 100, "record_stat": false } }
Apache-2.0
notebooks/Using_Cache.ipynb
madsbk/cucim
!mkdir epic3752
from IPython.display import HTML HTML('''<script> code_show=true; function code_toggle() { if (code_show){ $('div.input').hide(); } else { $('div.input').show(); } code_show = !code_show } $( document ).ready(code_toggle); </script> <form action="javascript:code_toggle()"><input type="submit" value="Click here to toggle on/off the raw code."></form>''') %%html <style> body { font-family: "Times New Roman"; font-size: 12pt; } </style> """ Created on Tue Nov 2 20:31:38 2018 @author: Maximilian N. GΓΌnther MIT Kavli Institute for Astrophysics and Space Research, Massachusetts Institute of Technology, 77 Massachusetts Avenue, Cambridge, MA 02109, USA Email: [email protected] Web: www.mnguenther.com """ from __future__ import print_function, division, absolute_import #::: plotting settings import seaborn as sns sns.set(context='paper', style='ticks', palette='deep', font='sans-serif', font_scale=1.5, color_codes=True) sns.set_style({"xtick.direction": "in","ytick.direction": "in"}) sns.set_context(rc={'lines.markeredgewidth': 1}) #::: modules import numpy as np import os, sys import csv import ipywidgets as widgets from IPython.display import display, HTML, Markdown, clear_output, Javascript display(HTML("<style>.container { width:80% !important; }</style>")) from multiprocessing import cpu_count if sys.version_info[0] == 3: # for Python3 from tkinter import Tk, filedialog elif sys.version_info[0] == 2: # for Python2 from Tkinter import Tk import tkFileDialog as filedialog import warnings def custom_formatwarning(msg, *args, **kwargs): return str(msg) + '\n' warnings.formatwarning = custom_formatwarning #::: allesfitter modules #::: somehow jupyter notebooks don't allow relative imports, so it needs a little hack... module_path = os.path.abspath(os.path.join('..')) if module_path not in sys.path: sys.path.append(module_path) import allesfitter from allesfitter.utils.latex_printer import round_txt_separately from allesfitter.priors.transform_priors import get_cosi_from_i, get_Rsuma_from_a_over_Rstar, get_Rsuma_from_Rstar_over_a from allesfitter.priors.estimate_noise import estimate_noise %load_ext autoreload %autoreload 2
_____no_output_____
MIT
allesfitter/epic3752_ini.ipynb
jpdeleon/kesprint2
![allesfitter](logo_circ.png)
#::: globals global INPUT global VBOXES global BUTTONS global DROPDOWNS INPUT = {} VBOXES = {} BUTTONS = {} DROPDOWNS = {} layout = {'width': '180px'} layout_wide = {'width': '360px'} layout_textbox = {'width': '120px'} layout_checkbox = {} #:::: clean up csv file def clean_up_csv(fname, N_last_rows=0): with open(fname, "r") as f: params_csv = list(csv.reader(f)) with open(fname, "w") as f: writer = csv.writer(f) for i in range(len(params_csv)-N_last_rows): row = params_csv[i] writer.writerow(row) #:::: append a row into csv file def fwrite_params_line(text): with open(INPUT['fname_params'], 'a') as f: f.write(text+'\n') #:::: write params into csv file def fwrite_params(key, label, unit, physical_bounds, return_str=False): if INPUT[key+'_bounds_type'].value == 'uniform': bounds = 'uniform ' \ + str( np.max( [physical_bounds[0], float(INPUT[key+'_median'].value)-float(INPUT[key+'_lerr'].value)] ) ) + ' ' \ + str( np.min( [physical_bounds[1], float(INPUT[key+'_median'].value)+float(INPUT[key+'_uerr'].value)] ) ) elif INPUT[key+'_bounds_type'].value == 'uniform * 5': bounds = 'uniform ' \ + str( np.max( [physical_bounds[0], float(INPUT[key+'_median'].value)-5*float(INPUT[key+'_lerr'].value)] ) ) + ' ' \ + str( np.min( [physical_bounds[1], float(INPUT[key+'_median'].value)+5*float(INPUT[key+'_uerr'].value)] ) ) elif INPUT[key+'_bounds_type'].value == 'trunc_normal': bounds = 'trunc_normal ' \ + str(physical_bounds[0]) + ' ' \ + str(physical_bounds[1]) + ' ' \ + str(INPUT[key+'_median'].value) + ' ' \ + str(np.max( [ float(INPUT[key+'_lerr'].value), float(INPUT[key+'_uerr'].value) ] )) elif INPUT[key+'_bounds_type'].value == 'trunc_normal * 5': bounds = 'trunc_normal ' \ + str(physical_bounds[0]) + ' ' \ + str(physical_bounds[1]) + ' ' \ + str(INPUT[key+'_median'].value) + ' ' \ + str(5*np.max( [ float(INPUT[key+'_lerr'].value), float(INPUT[key+'_uerr'].value) ] )) string = key + ',' + str(INPUT[key+'_median'].value) + ',' + str(int(INPUT[key+'_fit'].value)) + ',' + bounds + ',' + label + ',' + unit if not return_str: fwrite_params_line(string) else: return string #unique def unique(array): uniq, index = np.unique(array, return_index=True) return uniq[index.argsort()]
_____no_output_____
MIT
allesfitter/epic3752_ini.ipynb
jpdeleon/kesprint2
1. working directory Select the working directory for this fit, for example `/Users/me/TESS-1b/`. Then you can run a fit using `allesfitter.ns_fit('/Users/me/TESS-1b/')`.
BUTTONS['datadir'] = widgets.Button(description='Select directory', button_style='') text_af_directory = widgets.Text(value='', placeholder='for example: /Users/me/TESS-1b/', disable=True) hbox = widgets.HBox([BUTTONS['datadir'], text_af_directory]) display(hbox) def select_datadir(change): root = Tk() root.withdraw() root.call('wm', 'attributes', '.', '-topmost', True) INPUT['datadir'] = filedialog.askdirectory() %gui tk if INPUT['datadir'] != '': text_af_directory.value = INPUT['datadir'] BUTTONS['datadir'].style.button_color = 'lightgreen' INPUT['show_step_2a'] = True display(Javascript('IPython.notebook.execute_cell_range(IPython.notebook.get_selected_index()+1, IPython.notebook.ncells())')) BUTTONS['datadir'].on_click(select_datadir)
_____no_output_____
MIT
allesfitter/epic3752_ini.ipynb
jpdeleon/kesprint2
2. settings
if 'show_step_2a' in INPUT and INPUT['show_step_2a'] == True: display(Markdown('### General settings')) DROPDOWNS['planet_or_EB'] = widgets.Dropdown(options=['Planets', 'EBs']) display( widgets.HBox([widgets.Label(value='Fitting planets or EBs?', layout=layout), DROPDOWNS['planet_or_EB']]) ) display(Markdown('Give the companion letters and instruments, space-separated. Leave empty if not applicable.')) hbox_list = [] text_companions_phot = widgets.Text(value='', placeholder='for example: b') hbox_list.append( widgets.HBox([widgets.Label(value='Companions in photometry', layout=layout), text_companions_phot]) ) text_companions_rv = widgets.Text(value='', placeholder='for example: b c') hbox_list.append( widgets.HBox([widgets.Label(value='Companions in RV', layout=layout), text_companions_rv]) ) text_inst_phot = widgets.Text(value='', placeholder='for example: TESS NGTS') hbox_list.append( widgets.HBox([widgets.Label(value='Instruments for photometry', layout=layout), text_inst_phot]) ) text_inst_rv = widgets.Text(value='', placeholder='for example: HARPS Coralie') hbox_list.append( widgets.HBox([widgets.Label(value='Instruments for RV', layout=layout), text_inst_rv]) ) display(widgets.VBox(hbox_list)) def confirm(change): #::: set stuff if len(text_inst_phot.value): INPUT['inst_phot'] = str(text_inst_phot.value).split(' ') else: INPUT['inst_phot'] = [] if len(text_inst_rv.value): INPUT['inst_rv'] = str(text_inst_rv.value).split(' ') else: INPUT['inst_rv'] = [] if len(text_companions_phot.value): INPUT['companions_phot'] = str(text_companions_phot.value).split(' ') else: INPUT['companions_phot'] = [] if len(text_companions_rv.value): INPUT['companions_rv'] = str(text_companions_rv.value).split(' ') else: INPUT['companions_rv'] = [] INPUT['companions_all'] = list(np.unique(INPUT['companions_phot']+INPUT['companions_rv'])) #sorted by b, c, d... INPUT['inst_all'] = list(unique(INPUT['inst_phot']+INPUT['inst_rv'])) #sorted like user input button_2a.style.button_color = 'lightgreen' INPUT['show_step_2b'] = True display(Javascript('IPython.notebook.execute_cell_range(IPython.notebook.get_selected_index()+1, IPython.notebook.ncells())')) button_2a = widgets.Button(description='Confirm', button_style='') display(button_2a) button_2a.on_click(confirm) if 'show_step_2b' in INPUT and INPUT['show_step_2b'] == True: display(Markdown('### Advanced settings')) vbox_list = [] #::: Fitting & performance hbox_list = [] max_cores = cpu_count() DROPDOWNS['multiprocessing'] = widgets.Dropdown(options=['No'] + ['on '+str(i)+' of my '+str(max_cores)+' cores' for i in range(2,max_cores)] + ['always on all - 1 cores on any system']) hbox_list.append(widgets.HBox([widgets.Label(value='Multiprocessing', layout=layout), DROPDOWNS['multiprocessing']])) DROPDOWNS['fit_type'] = widgets.Dropdown(options=['Transit (fast)', 'Transit and occultation (fast)', 'Full lightcurve (slow)']) hbox_list.append(widgets.HBox([widgets.Label(value='Fit type', layout=layout), DROPDOWNS['fit_type']])) DROPDOWNS['shift_epoch'] = widgets.Dropdown(options=['Yes', 'No']) hbox_list.append(widgets.HBox([widgets.Label(value='Automatically shift epoch?', layout=layout), DROPDOWNS['shift_epoch']])) DROPDOWNS['mcmc_settings'] = widgets.Dropdown(options=['Default']) hbox_list.append(widgets.HBox([widgets.Label(value='MCMC settings', layout=layout), DROPDOWNS['mcmc_settings']])) DROPDOWNS['ns_settings'] = widgets.Dropdown(options=['Default']) hbox_list.append(widgets.HBox([widgets.Label(value='Nested Sampling settings', layout=layout), DROPDOWNS['ns_settings']])) vbox_list.append( widgets.VBox(hbox_list) ) #::: Limb darkening hbox_list = [] for inst in INPUT['inst_phot']: DROPDOWNS['host_ld_law_'+inst] = widgets.Dropdown(options=['None','Linear','Quadratic','Sing'], value='Quadratic') hbox_list.append( widgets.HBox([widgets.Label(value='Host limb darkening '+inst, layout=layout), DROPDOWNS['host_ld_law_'+inst]]) ) if DROPDOWNS['planet_or_EB'].value == 'EBs': for companion in INPUT['companions_all']: DROPDOWNS[companion+'_ld_law_'+inst] = widgets.Dropdown(options=['None','Linear','Quadratic','Sing']) hbox_list.append( widgets.HBox([widgets.Label(value=companion+' limb darkening '+inst, layout=layout), DROPDOWNS[companion+'_ld_law_'+inst]]) ) vbox_list.append( widgets.VBox(hbox_list) ) #::: Baseline settings hbox_list = [] for inst in INPUT['inst_phot']: DROPDOWNS['baseline_flux_'+inst] = widgets.Dropdown(options=['sample_offset', 'sample_linear', 'sample_GP_Matern32', 'sample_GP_SHO', 'sample_GP_real', 'sample_GP_complex', 'hybrid_offset', 'hybrid_poly_1', 'hybrid_poly_2', 'hybrid_poly_3', 'hybrid_poly_4', 'hybrid_spline'], value='hybrid_offset') hbox_list.append( widgets.HBox([widgets.Label(value='Baseline flux '+inst, layout=layout), DROPDOWNS['baseline_flux_'+inst]]) ) for inst in INPUT['inst_rv']: DROPDOWNS['baseline_rv_'+inst] = widgets.Dropdown(options=['sample_offset', 'sample_linear', 'sample_GP_Matern32', 'sample_GP_SHO', 'sample_GP_real', 'sample_GP_complex', 'hybrid_offset', 'hybrid_poly_1', 'hybrid_poly_2', 'hybrid_poly_3', 'hybrid_poly_4', 'hybrid_spline'], value='hybrid_offset') hbox_list.append( widgets.HBox([widgets.Label(value='Baseline RV '+inst, layout=layout), DROPDOWNS['baseline_rv_'+inst]]) ) vbox_list.append( widgets.VBox(hbox_list) ) #::: Error settings hbox_list = [] for inst in INPUT['inst_phot']: DROPDOWNS['error_flux_'+inst] = widgets.Dropdown(options=['sample', 'hybrid'], value='sample') hbox_list.append( widgets.HBox([widgets.Label(value='Error flux '+inst, layout=layout), DROPDOWNS['error_flux_'+inst]]) ) for inst in INPUT['inst_rv']: DROPDOWNS['error_rv_'+inst] = widgets.Dropdown(options=['sample', 'hybrid'], value='sample') hbox_list.append( widgets.HBox([widgets.Label(value='Error RV '+inst, layout=layout), DROPDOWNS['error_rv_'+inst]]) ) vbox_list.append( widgets.VBox(hbox_list) ) #::: Exposure time interpolation hbox_list = [] for inst in INPUT['inst_all']: DROPDOWNS['t_exp_'+inst] = widgets.Text( placeholder='None' ) hbox_list.append( widgets.HBox([widgets.Label(value='Exposure time '+inst, layout=layout), DROPDOWNS['t_exp_'+inst], widgets.Label(value='days', layout=layout)]) ) for inst in INPUT['inst_all']: DROPDOWNS['t_exp_n_int_'+inst] = widgets.Text( placeholder='None' ) hbox_list.append( widgets.HBox([widgets.Label(value='Interpolation points '+inst, layout=layout), DROPDOWNS['t_exp_n_int_'+inst], widgets.Label(value='(integer)', layout=layout)]) ) vbox_list.append( widgets.VBox(hbox_list) ) #::: Number of spots hbox_list = [] for inst in INPUT['inst_all']: DROPDOWNS['host_N_spots_'+inst] = widgets.Text( placeholder='None' ) hbox_list.append( widgets.HBox([widgets.Label(value='host: Nr. of spots '+inst, layout=layout), DROPDOWNS['host_N_spots_'+inst], widgets.Label(value='(integer)', layout=layout)]) ) vbox_list.append( widgets.VBox(hbox_list) ) #::: Number of flares hbox_list = [] DROPDOWNS['N_flares'] = widgets.Text( placeholder='None' ) hbox_list.append( widgets.HBox([widgets.Label(value='Nr. of flares', layout=layout), DROPDOWNS['N_flares'], widgets.Label(value='(integer)', layout=layout)]) ) vbox_list.append( widgets.VBox(hbox_list) ) #::: Fit TTVs? hbox_list = [] DROPDOWNS['fit_ttvs'] = widgets.Dropdown(options=["yes","no"], value="no") hbox_list.append( widgets.HBox([widgets.Label(value='Fit TTVs?', layout=layout), DROPDOWNS['fit_ttvs']]) ) vbox_list.append( widgets.VBox(hbox_list) ) #::: Stellar grid (e.g. use "sparse" to speed up intense spot computations) hbox_list = [] for inst in INPUT['inst_all']: DROPDOWNS['host_grid_'+inst] = widgets.Dropdown(options=["very_sparse", "sparse", "default", "fine", "very_fine"], value="default") hbox_list.append( widgets.HBox([widgets.Label(value='Host grid '+inst, layout=layout), DROPDOWNS['host_grid_'+inst]]) ) if DROPDOWNS['planet_or_EB'].value == 'EBs': for companion in INPUT['companions_all']: DROPDOWNS[companion+'_grid_'+inst] = widgets.Dropdown(options=["very_sparse", "sparse", "default", "fine", "very_fine"], value="default") hbox_list.append( widgets.HBox([widgets.Label(value=companion+' grid '+inst, layout=layout), DROPDOWNS[companion+'_grid_'+inst]]) ) vbox_list.append( widgets.VBox(hbox_list) ) #::: Stellar shape (e.g. use "roche" for ellipsoidal variablity) hbox_list = [] for inst in INPUT['inst_all']: DROPDOWNS['host_shape_'+inst] = widgets.Dropdown(options=["roche", "roche_v", "sphere", "poly1p5", "poly3p0", "love"], value="sphere") hbox_list.append( widgets.HBox([widgets.Label(value='Host shape '+inst, layout=layout), DROPDOWNS['host_shape_'+inst]]) ) if DROPDOWNS['planet_or_EB'].value == 'EBs': for companion in INPUT['companions_all']: DROPDOWNS[companion+'_shape_'+inst] = widgets.Dropdown(options=["roche", "roche_v", "sphere", "poly1p5", "poly3p0", "love"], value="sphere") hbox_list.append( widgets.HBox([widgets.Label(value=companion+' shape '+inst, layout=layout), DROPDOWNS[companion+'_shape_'+inst]]) ) vbox_list.append( widgets.VBox(hbox_list) ) #::: Flux weighted RVs ("Yes" for Rossiter-McLaughlin effect) hbox_list = [] for inst in INPUT['inst_rv']: for companion in INPUT['companions_rv']: DROPDOWNS[companion+'_flux_weighted_'+inst] = widgets.Dropdown(options=['No', 'Yes']) hbox_list.append( widgets.HBox([widgets.Label(value=companion+' flux weighted RV '+inst, layout=layout), DROPDOWNS[companion+'_flux_weighted_'+inst]]) ) vbox_list.append( widgets.VBox(hbox_list) ) #::: accordion accordion = widgets.Accordion(children=vbox_list) accordion.set_title(0, 'Fitting & performance') accordion.set_title(1, 'Limb darkening laws') accordion.set_title(2, 'Baseline sampling') accordion.set_title(3, 'Error sampling') accordion.set_title(4, 'Exposure time interpolation') accordion.set_title(5, 'Number of spots') accordion.set_title(6, 'Number of flares') accordion.set_title(7, 'TTVs') accordion.set_title(8, 'Stellar grid (e.g. use "very_sparse" to speed up computations)') accordion.set_title(9, 'Stellar shape (e.g. use "roche" for ellipsoidal variablity)') accordion.set_title(10, 'Flux weighted RVs (e.g. use "true" for Rossiter-McLaughlin effect)') display(accordion) #::: confirm button button_2b = widgets.Button(description='Confirm', button_style='') display(button_2b) def confirm(change): button_2b.style.button_color = 'lightgreen' INPUT['show_step_2c'] = True display(Javascript('IPython.notebook.execute_cell_range(IPython.notebook.get_selected_index()+1, IPython.notebook.ncells())')) button_2b.on_click(confirm) if 'show_step_2c' in INPUT and INPUT['show_step_2c'] == True: BUTTONS['2c'] = widgets.Button(description='Create settings.csv', button_style='') checkbox_2c = widgets.Checkbox(description='Overwrite old settings.csv (if existing)', value=False) display(widgets.HBox([BUTTONS['2c'], checkbox_2c])) def create_settings_file(change): clear_output() display(widgets.HBox([BUTTONS['2c'], checkbox_2c])) go_ahead = True if 'datadir' not in INPUT: warnings.warn('No allesfitter woking directory selected yet. Please go back to step 1) and fill in all fields.') go_ahead = False if os.path.exists(os.path.join(INPUT['datadir'],'settings.csv')) and (checkbox_2c.value==False): warnings.warn('The selected working directory '+os.path.join(INPUT['datadir'],'settings.csv')+' already exists. To proceed, give permission to overwrite it.') go_ahead = False if go_ahead: fname_settings = os.path.join(INPUT['datadir'], 'settings.csv') with open(fname_settings, 'w+') as f: f.write('#name,value\n') def fwrite_settings(text): with open(fname_settings, 'a') as f: f.write(text+'\n') fwrite_settings('###############################################################################,') fwrite_settings('# General settings,') fwrite_settings('###############################################################################,') fwrite_settings('companions_phot,'+text_companions_phot.value) fwrite_settings('companions_rv,'+text_companions_rv.value) fwrite_settings('inst_phot,'+text_inst_phot.value) fwrite_settings('inst_rv,'+text_inst_rv.value) fwrite_settings('###############################################################################,') fwrite_settings('# Fit performance settings,') fwrite_settings('###############################################################################,') if DROPDOWNS['multiprocessing'].value=='No': fwrite_settings('multiprocess,False') elif DROPDOWNS['multiprocessing'].value=='always on all - 1 cores on any system': fwrite_settings('multiprocess,True') fwrite_settings('multiprocess_cores,all') else: fwrite_settings('multiprocess,True') fwrite_settings('multiprocess_cores,'+DROPDOWNS['multiprocessing'].value.split(' ')[1]) if DROPDOWNS['fit_type'].value=='Transit (fast)': fwrite_settings('fast_fit,True') fwrite_settings('fast_fit_width,0.3333333333333333') fwrite_settings('secondary_eclipse,False') fwrite_settings('phase_curve,False') elif DROPDOWNS['fit_type'].value=='Transit and occultation (fast)': fwrite_settings('fast_fit,True') fwrite_settings('fast_fit_width,0.3333333333333333') fwrite_settings('secondary_eclipse,True') fwrite_settings('phase_curve,False') elif DROPDOWNS['fit_type'].value=='Full lightcurve (slow)': fwrite_settings('fast_fit,False') fwrite_settings('fast_fit_width,') fwrite_settings('secondary_eclipse,True') fwrite_settings('phase_curve,True') fwrite_settings('phase_curve_style,GP') if DROPDOWNS['shift_epoch'].value=='Yes': fwrite_settings('shift_epoch,True') for companion in INPUT['companions_all']: fwrite_settings('inst_for_'+companion+'_epoch,all') fwrite_settings('###############################################################################,') fwrite_settings('# MCMC settings,') fwrite_settings('###############################################################################,') if DROPDOWNS['mcmc_settings'].value=='Default': fwrite_settings('mcmc_nwalkers,100') fwrite_settings('mcmc_total_steps,2000') fwrite_settings('mcmc_burn_steps,1000') fwrite_settings('mcmc_thin_by,1') fwrite_settings('###############################################################################,') fwrite_settings('# Nested Sampling settings,') fwrite_settings('###############################################################################,') if DROPDOWNS['ns_settings'].value=='Default': fwrite_settings('ns_modus,dynamic') fwrite_settings('ns_nlive,500') fwrite_settings('ns_bound,single') fwrite_settings('ns_sample,rwalk') fwrite_settings('ns_tol,0.01') fwrite_settings('###############################################################################,') fwrite_settings("# Limb darkening law per object and instrument,") fwrite_settings("# if 'lin' one corresponding parameter called 'ldc_q1_inst' has to be given in params.csv,") fwrite_settings("# if 'quad' two corresponding parameter called 'ldc_q1_inst' and 'ldc_q2_inst' have to be given in params.csv,") fwrite_settings("# if 'sing' three corresponding parameter called 'ldc_q1_inst'; 'ldc_q2_inst' and 'ldc_q3_inst' have to be given in params.csv,") fwrite_settings('###############################################################################,') def translate_ld(x): if x=='None': return '' elif x=='Linear': return 'lin' elif x=='Quadratic': return 'quad' elif x=='Sing': return 'sing' for inst in INPUT['inst_phot']: fwrite_settings('host_ld_law_'+inst+','+translate_ld(DROPDOWNS['host_ld_law_'+inst].value)) if DROPDOWNS['planet_or_EB'].value == 'EBs': for companion in INPUT['companions_all']: fwrite_settings(companion+'_ld_law_'+inst+','+translate_ld(DROPDOWNS[companion+'_ld_law_'+inst].value)) fwrite_settings('###############################################################################,') fwrite_settings("# Baseline settings per instrument,") fwrite_settings("# baseline params per instrument: sample_offset / sample_linear / sample_GP / hybrid_offset / hybrid_poly_1 / hybrid_poly_2 / hybrid_poly_3 / hybrid_pol_4 / hybrid_spline / hybrid_GP,") fwrite_settings("# if 'sample_offset' one corresponding parameter called 'baseline_offset_key_inst' has to be given in params.csv,") fwrite_settings("# if 'sample_linear' two corresponding parameters called 'baseline_a_key_inst' and 'baseline_b_key_inst' have to be given in params.csv,") fwrite_settings("# if 'sample_GP' two corresponding parameters called 'baseline_gp1_key_inst' and 'baseline_gp2_key_inst' have to be given in params.csv,") fwrite_settings('###############################################################################,') for inst in INPUT['inst_phot']: fwrite_settings('baseline_flux_'+inst+','+DROPDOWNS['baseline_flux_'+inst].value) for inst in INPUT['inst_rv']: fwrite_settings('baseline_rv_'+inst+','+DROPDOWNS['baseline_rv_'+inst].value) fwrite_settings('###############################################################################,') fwrite_settings("# Error settings per instrument,") fwrite_settings("# errors (overall scaling) per instrument: sample / hybrid,") fwrite_settings("# if 'sample' one corresponding parameter called 'ln_err_key_inst' (photometry) or 'ln_jitter_key_inst' (RV) has to be given in params.csv,") fwrite_settings('###############################################################################,') for inst in INPUT['inst_phot']: fwrite_settings('error_flux_'+inst+','+DROPDOWNS['error_flux_'+inst].value) for inst in INPUT['inst_rv']: fwrite_settings('error_rv_'+inst+','+DROPDOWNS['error_rv_'+inst].value) fwrite_settings('###############################################################################,') fwrite_settings('# Exposure times for interpolation,') fwrite_settings('# needs to be in the same units as the time series,') fwrite_settings('# if not given the observing times will not be interpolated leading to biased results,') fwrite_settings('###############################################################################,') for inst in INPUT['inst_all']: fwrite_settings('t_exp_'+inst+','+DROPDOWNS['t_exp_'+inst].value) fwrite_settings('###############################################################################,') fwrite_settings('# Number of points for exposure interpolation,') fwrite_settings('# Sample as fine as possible; generally at least with a 2 min sampling for photometry,') fwrite_settings('# n_int=5 was found to be a good number of interpolation points for any short photometric cadence t_exp;,') fwrite_settings('# increase to at least n_int=10 for 30 min phot. cadence,') fwrite_settings('# the impact on RV is not as drastic and generally n_int=5 is fine enough,') fwrite_settings('###############################################################################,') for inst in INPUT['inst_all']: fwrite_settings('t_exp_n_int_'+inst+','+DROPDOWNS['t_exp_n_int_'+inst].value) fwrite_settings('###############################################################################,') fwrite_settings('# Number of spots per object and instrument,') fwrite_settings('###############################################################################,') for inst in INPUT['inst_all']: fwrite_settings('host_N_spots_'+inst+','+DROPDOWNS['host_N_spots_'+inst].value) fwrite_settings('###############################################################################,') fwrite_settings('# Number of flares (in total),') fwrite_settings('###############################################################################,') fwrite_settings('N_flares'+','+DROPDOWNS['N_flares'].value) fwrite_settings('###############################################################################,') fwrite_settings('# TTVs,') fwrite_settings('###############################################################################,') if DROPDOWNS['fit_ttvs'].value == 'no': fwrite_settings('fit_ttvs'+',False') elif DROPDOWNS['fit_ttvs'].value == 'yes': fwrite_settings('fit_ttvs'+',True') fwrite_settings('###############################################################################,') fwrite_settings('# Stellar grid per object and instrument,') fwrite_settings('###############################################################################,') for inst in INPUT['inst_all']: fwrite_settings('host_grid_'+inst+','+DROPDOWNS['host_grid_'+inst].value) if DROPDOWNS['planet_or_EB'].value == 'EBs': for companion in INPUT['companions_all']: fwrite_settings(companion+'_grid_'+inst+','+DROPDOWNS[companion+'_grid_'+inst].value) fwrite_settings('###############################################################################,') fwrite_settings('# Stellar shape per object and instrument,') fwrite_settings('###############################################################################,') for inst in INPUT['inst_all']: fwrite_settings('host_shape_'+inst+','+DROPDOWNS['host_shape_'+inst].value) if DROPDOWNS['planet_or_EB'].value == 'EBs': for companion in INPUT['companions_all']: fwrite_settings(companion+'_shape_'+inst+','+DROPDOWNS[companion+'_shape_'+inst].value) fwrite_settings('###############################################################################,') fwrite_settings('# Flux weighted RVs per object and instrument,') fwrite_settings('# ("Yes" for Rossiter-McLaughlin effect),') fwrite_settings('###############################################################################,') for inst in INPUT['inst_rv']: for companion in INPUT['companions_rv']: fwrite_settings(companion+'_flux_weighted_'+inst+','+DROPDOWNS[companion+'_flux_weighted_'+inst].value) BUTTONS['2c'].style.button_color = 'lightgreen' print('Done.') INPUT['show_step_3'] = True display(Javascript('IPython.notebook.execute_cell_range(IPython.notebook.get_selected_index()+1, IPython.notebook.ncells())')) BUTTONS['2c'].on_click(create_settings_file)
_____no_output_____
MIT
allesfitter/epic3752_ini.ipynb
jpdeleon/kesprint2
3. parameters
import chronos as cr all_campaigns = cr.get_all_campaigns(epic) camps = "c".join([str(c).zfill(2) for c in all_campaigns]) camps import pandas as pd from glob import glob fp = f"{loc}/everest_w_limbdark_prior2_new_ini/EPIC{epic}_c{camps}" csvs = glob(f"{fp}/*mcmc-results.csv") assert len(csvs)>0 ds = {} for i,csv in enumerate(sorted(csvs)): d = pd.read_csv(csv, squeeze=True, index_col=0) ds[i] = d df = pd.concat(ds) df d.columns for i,d in df.iterrows(): print(i) for c in ['$a/R_{\star}$', '$R_p/R_{\star}$', '$T_{0}$', '$P$']: print(f"{c}: {d.loc[c]:.4f}, {d.loc[c+'_lo']:.5f}, {d.loc[c+'_hi']:.5f}") print() if 'show_step_3' in INPUT and INPUT['show_step_3'] == True: #::: placeholder placeholder = widgets.Label(value='', visible=False, layout=layout) #::: helper function def add_row(key, label, hbox_list, median=0, lerr=0, uerr=0, transform='trunc_normal * 5', fit_value=False): INPUT[key+'_median'] = widgets.FloatText(value=median, placeholder='NaN', layout=layout_textbox) INPUT[key+'_lerr'] = widgets.FloatText(value=lerr, placeholder='NaN', layout=layout_textbox) INPUT[key+'_uerr'] = widgets.FloatText(value=uerr, placeholder='NaN', layout=layout_textbox) INPUT[key+'_bounds_type'] = widgets.Dropdown(options=['uniform', 'uniform * 5', 'trunc_normal', 'trunc_normal * 5'], value=transform, layout=layout) INPUT[key+'_fit'] = widgets.Checkbox(value=fit_value, description='fit?', layout=layout_checkbox) buf = placeholder if key in [ companion+'_rsuma' for companion in INPUT['companions_all'] ]: INPUT[key+'_input_type'] = widgets.Dropdown(options=['(R_comp + R_host) / a', 'R_host / a', 'a / R_host'], layout=layout) buf = INPUT[key+'_input_type'] elif key in [ companion+'_cosi' for companion in INPUT['companions_all'] ]: INPUT[key+'_input_type'] = widgets.Dropdown(options=['cos(i)', 'i (degree)', 'i (rad)'], layout=layout) buf = INPUT[key+'_input_type'] hbox_list.append( widgets.HBox([widgets.Label(value=label, layout=layout), INPUT[key+'_median'], widgets.Label(value="-"), INPUT[key+'_lerr'], widgets.Label(value="+"), INPUT[key+'_uerr'], buf, INPUT[key+'_bounds_type'], INPUT[key+'_fit']]) ) #::: start display(Markdown('### Initial guess and error bars')) display(Markdown('These values will be converted into either uniform or truncated normal priors (with physical boundaries). The errors can be blown up by a factor of 5.')) display(Markdown('#### Astrophysical params per companion')) vbox_list = [] for companion in INPUT['companions_all']: # display(Markdown('##### Companion '+companion)) hbox_list = [] add_row(companion+'_rsuma', 'Radii & semi-major axis:', hbox_list) add_row(companion+'_rr', '$R_'+companion+' / R_\star$:', hbox_list) add_row(companion+'_cosi', 'Inclination:', hbox_list) add_row(companion+'_epoch', 'Epoch (d):', hbox_list) add_row(companion+'_period', 'Period (d):', hbox_list) if companion in INPUT['companions_rv']: add_row(companion+'_K', 'K (km/s):', hbox_list) add_row(companion+'_f_c', '$\sqrt{e} \cos{\omega}$:', hbox_list) add_row(companion+'_f_s', '$\sqrt{e} \sin{\omega}$:', hbox_list) vbox_list.append( widgets.VBox(hbox_list) ) tab = widgets.Tab(children=vbox_list) for i, comp in enumerate(INPUT['companions_all']): tab.set_title(i, 'Companion '+comp) display(tab) # else: # print('Complete previous steps first.') from configobj import ConfigObj epic = 211413752 loc = '/home/jp/github/research/project/kesprint/data/transit_fit' co = ConfigObj(f'{loc}/{epic}.ini') co star = {i:list(map(float, co['star'][i]) )for i in co['star']} star from limbdark import claret u = claret('Kp', *star['teff'], *star['logg'], *star['feh'], # transform=True ) u from limbdark.limbdark import u_to_q u_to_q(u[0], u[2]), u_to_q(u[1], u[3]) if 'show_step_3' in INPUT and INPUT['show_step_3'] == True: display(Markdown('### Advanced params')) vbox_list = [] #::: Dilution per instrument hbox_list = [] for inst in INPUT['inst_phot']: add_row('dil_'+inst, 'Dilution '+inst, hbox_list) vbox_list.append( widgets.VBox(hbox_list) ) #::: Limb darkening per object and instrument hbox_list = [] for inst in INPUT['inst_phot']: if DROPDOWNS['host_ld_law_'+inst].value=='None': pass elif DROPDOWNS['host_ld_law_'+inst].value=='Linear': add_row('host_ldc_q1_'+inst, 'host LD q1 '+inst, hbox_list, median=0.5, lerr=0.5, uerr=0.5, transform='uniform', fit_value=True) elif DROPDOWNS['host_ld_law_'+inst].value=='Quadratic': add_row('host_ldc_q1_'+inst, 'host LD q1 '+inst, hbox_list, median=0.5, lerr=0.5, uerr=0.5, transform='uniform', fit_value=True) add_row('host_ldc_q2_'+inst, 'host LD q2 '+inst, hbox_list, median=0.5, lerr=0.5, uerr=0.5, transform='uniform', fit_value=True) elif DROPDOWNS['host_ld_law_'+inst].value=='Sing': add_row('host_ldc_q1_'+inst, 'host LD q1 '+inst, hbox_list, median=0.5, lerr=0.5, uerr=0.5, transform='uniform', fit_value=True) add_row('host_ldc_q2_'+inst, 'host LD q2 '+inst, hbox_list, median=0.5, lerr=0.5, uerr=0.5, transform='uniform', fit_value=True) add_row('host_ldc_q3_'+inst, 'host LD q3 '+inst, hbox_list, median=0.5, lerr=0.5, uerr=0.5, transform='uniform', fit_value=True) if DROPDOWNS['planet_or_EB']=='EBs': for companion in INPUT['companions_phot']: if DROPDOWNS[companion+'_ld_law_'+inst].value=='None': pass elif DROPDOWNS[companion+'_ld_law_'+inst].value=='Linear': add_row(companion+'_ldc_q1_'+inst, companion+' LD q1 '+inst, hbox_list, median=0.5, lerr=0.5, uerr=0.5, transform='uniform', fit_value=True) elif DROPDOWNS[companion+'_ld_law_'+inst].value=='Quadratic': add_row(companion+'_ldc_q1_'+inst, companion+' LD q1 '+inst, hbox_list, median=0.5, lerr=0.5, uerr=0.5, transform='uniform', fit_value=True) add_row(companion+'_ldc_q2_'+inst, companion+' LD q2 '+inst, hbox_list, median=0.5, lerr=0.5, uerr=0.5, transform='uniform', fit_value=True) elif DROPDOWNS[companion+'_ld_law_'+inst].value=='Sing': add_row(companion+'_ldc_q1_'+inst, companion+' LD q1 '+inst, hbox_list, median=0.5, lerr=0.5, uerr=0.5, transform='uniform', fit_value=True) add_row(companion+'_ldc_q2_'+inst, companion+' LD q2 '+inst, hbox_list, median=0.5, lerr=0.5, uerr=0.5, transform='uniform', fit_value=True) add_row(companion+'_ldc_q3_'+inst, companion+' LD q3 '+inst, hbox_list, median=0.5, lerr=0.5, uerr=0.5, transform='uniform', fit_value=True) vbox_list.append( widgets.VBox(hbox_list) ) #::: Surface brightness ratio per system and instrument hbox_list = [] for inst in INPUT['inst_all']: for companion in INPUT['companions_all']: add_row(companion+'_sbratio_'+inst, companion+' sbratio '+inst, hbox_list) vbox_list.append( widgets.VBox(hbox_list) ) #::: Geometric albedo per object and instrument hbox_list = [] for inst in INPUT['inst_all']: add_row('host_geom_albedo_'+inst, 'host geom. alb. '+inst, hbox_list) for companion in INPUT['companions_all']: add_row(companion+'_geom_albedo_'+inst, companion+' geom. alb. '+inst, hbox_list) vbox_list.append( widgets.VBox(hbox_list) ) #::: Gravity darkening per object and instrument hbox_list = [] for inst in INPUT['inst_all']: add_row('host_gdc_'+inst, 'host grav. dark. '+inst, hbox_list) if DROPDOWNS['planet_or_EB']=='EBs': for companion in INPUT['companions_all']: add_row(companion+'_gdc_'+inst, companion+' grav. dark. '+inst, hbox_list) vbox_list.append( widgets.VBox(hbox_list) ) #::: Stellar spots per object and instrument hbox_list = [] for inst in INPUT['inst_all']: if len(DROPDOWNS['host_N_spots_'+inst].value): N_spots = int(DROPDOWNS['host_N_spots_'+inst].value) for i in range(1,N_spots+1): add_row('host_spot_'+str(i)+'_lat_'+inst, 'host spot '+str(i)+' lat. '+inst+' (deg)', hbox_list) add_row('host_spot_'+str(i)+'_long_'+inst, 'host spot '+str(i)+' long. '+inst+' (deg)', hbox_list) add_row('host_spot_'+str(i)+'_size_'+inst, 'host spot '+str(i)+' size '+inst+' (deg)', hbox_list) add_row('host_spot_'+str(i)+'_brightness_'+inst,'host spot '+str(i)+' brightness '+inst, hbox_list) # To keep the GUI simplistic, spots on companions are only available by manually editing the params.csv and settings.csv files # if DROPDOWNS['planet_or_EB'].value == 'EBs': # for companion in INPUT['companions_all']: # if len(DROPDOWNS[companion+'_N_spots_'+inst].value): # N_spots = int(DROPDOWNS[companion+'_N_spots_'+inst].value) # for i in range(1,N_spots+1): # add_row(companion+'_spot_'+str(i)+'_lat_'+inst, companion+' spot '+str(i)+' lat. '+inst+' (deg)', hbox_list) # add_row(companion+'_spot_'+str(i)+'_long_'+inst, companion+' spot '+str(i)+' long. '+inst+' (deg)', hbox_list) # add_row(companion+'_spot_'+str(i)+'_size_'+inst, companion+' spot '+str(i)+' size '+inst+' (deg)', hbox_list) # add_row(companion+'_spot_'+str(i)+'_brightness_'+inst, companion+' spot '+str(i)+' brightness '+inst, hbox_list) if len(hbox_list)==0: pass #hbox_list.append(widgets.Label(value='N_spots was set to "None" for all objects and instruments.')) vbox_list.append( widgets.VBox(hbox_list) ) #::: Flares hbox_list = [] if len(DROPDOWNS['N_flares'].value): N_flares = int(DROPDOWNS['N_flares'].value) for i in range(1,N_flares+1): add_row('flare_tpeak_'+str(i), 'Flare tpeak '+str(i), hbox_list) add_row('flare_fwhm_'+str(i), 'Flare fwhm '+str(i), hbox_list) add_row('flare_ampl_'+str(i), 'Flare ampl '+str(i), hbox_list) vbox_list.append( widgets.VBox(hbox_list) ) #::: TTV per transit hbox_list = [] if (DROPDOWNS['fit_ttvs'].value)=='yes': for companion in INPUT['companions_all']: add_row(companion+'_ttv_per_transit', 'TTV per transit', hbox_list, median=0, lerr=0.00347222, uerr=0.00347222, transform='uniform', fit_value=True) vbox_list.append( widgets.VBox(hbox_list) ) #::: Errors per instrument hbox_list = [] for inst in INPUT['inst_phot']: if DROPDOWNS['error_flux_'+inst].value == 'sample': add_row('ln_err_flux_'+inst, 'ln err flux '+inst, hbox_list, median=-7, lerr=8, uerr=7, transform='uniform', fit_value=True) else: pass #hbox_list.append(widgets.Label(value='Not applicable, error sampling was set to "hybrid".')) for inst in INPUT['inst_rv']: if DROPDOWNS['error_rv_'+inst].value == 'sample': add_row('ln_jitter_rv_'+inst, 'ln jitter rv '+inst, hbox_list, median=-3, lerr=12, uerr=3, transform='uniform', fit_value=True) else: pass #hbox_list.append(widgets.Label(value='Not applicable, error sampling was set to "hybrid".')) vbox_list.append( widgets.VBox(hbox_list) ) #::: Baselines per instrument hbox_list = [] for inst in INPUT['inst_all']: if inst in INPUT['inst_phot']: key = 'flux' elif inst in INPUT['inst_rv']: key = 'rv' if DROPDOWNS['baseline_'+key+'_'+inst].value == 'sample_GP_Matern32': add_row('baseline_gp_matern32_lnsigma_'+key+'_'+inst, 'baseline gp Matern32 lnsigma '+inst, hbox_list, median=0, lerr=15, uerr=15, transform='uniform', fit_value=True) add_row('baseline_gp_matern32_lnrho_'+key+'_'+inst, 'baseline gp Matern32 lnrho '+inst, hbox_list, median=0, lerr=15, uerr=15, transform='uniform', fit_value=True) elif DROPDOWNS['baseline_'+key+'_'+inst].value == 'sample_GP_SHO': add_row('baseline_gp_sho_lnS0_'+key+'_'+inst, 'baseline gp SHO lnS0 '+inst, hbox_list, median=0, lerr=15, uerr=15, transform='uniform', fit_value=True) add_row('baseline_gp_sho_lnQ_'+key+'_'+inst, 'baseline gp SHO lnQ '+inst, hbox_list, median=0, lerr=15, uerr=15, transform='uniform', fit_value=True) add_row('baseline_gp_sho_lnomega0_'+key+'_'+inst, 'baseline gp SHO lnomega0 '+inst, hbox_list, median=0, lerr=15, uerr=15, transform='uniform', fit_value=True) elif DROPDOWNS['baseline_'+key+'_'+inst].value == 'sample_GP_real': add_row('baseline_gp_real_lna_'+key+'_'+inst, 'baseline gp real lna '+inst, hbox_list, median=0, lerr=15, uerr=15, transform='uniform', fit_value=True) add_row('baseline_gp_real_lnc_'+key+'_'+inst, 'baseline gp real lnc '+inst, hbox_list, median=0, lerr=15, uerr=15, transform='uniform', fit_value=True) elif DROPDOWNS['baseline_'+key+'_'+inst].value == 'sample_GP_complex': add_row('baseline_gp_complex_lna_'+key+'_'+inst, 'baseline gp complex lna '+inst, hbox_list, median=0, lerr=15, uerr=15, transform='uniform', fit_value=True) add_row('baseline_gp_complex_lnc_'+key+'_'+inst, 'baseline gp complex lnc '+inst, hbox_list, median=0, lerr=15, uerr=15, transform='uniform', fit_value=True) add_row('baseline_gp_complex_lnb_'+key+'_'+inst, 'baseline gp complex lnb '+inst, hbox_list, median=0, lerr=15, uerr=15, transform='uniform', fit_value=True) add_row('baseline_gp_complex_lnd_'+key+'_'+inst, 'baseline gp complex lnd '+inst, hbox_list, median=0, lerr=15, uerr=15, transform='uniform', fit_value=True) elif DROPDOWNS['baseline_'+key+'_'+inst].value == 'sample_offset': add_row('baseline_offset_'+key+'_'+inst, 'baseline offset '+inst, hbox_list, median=0, lerr=0, uerr=0, transform='uniform', fit_value=True) elif DROPDOWNS['baseline_'+key+'_'+inst].value == 'sample_linear': add_row('baseline_offset_'+key+'_'+inst, 'baseline offset '+inst, hbox_list, median=0, lerr=0, uerr=0, transform='uniform', fit_value=True) add_row('baseline_slope_'+key+'_'+inst, 'baseline slope '+inst, hbox_list, median=0, lerr=0, uerr=0, transform='uniform', fit_value=True) vbox_list.append( widgets.VBox(hbox_list) ) #::: accordion accordion = widgets.Accordion(children=vbox_list) accordion.set_title(0, 'Dilution') accordion.set_title(1, 'Limb darkening') accordion.set_title(2, 'Surface brightness ratio') accordion.set_title(3, 'Geometric albedo') accordion.set_title(4, 'Gravity darkening') accordion.set_title(5, 'Stellar spots') accordion.set_title(6, 'Flares') accordion.set_title(7, 'TTVs') accordion.set_title(8, 'Errors & jitter') accordion.set_title(9, 'Baselines') display(accordion) if 'show_step_3' in INPUT and INPUT['show_step_3'] == True: nan_fields = False button_create_params_file = widgets.Button(description='Create params.csv', button_style='') checkbox_overwrite_params_file = widgets.Checkbox(description='Overwrite old params.csv (if existing)', value=False) hbox_params_file = widgets.HBox([button_create_params_file, checkbox_overwrite_params_file]) display(hbox_params_file) def create_params_file(change): clear_output() display(hbox_params_file) print('Calculating... this might take a few seconds. Please be patient, you will get notified once everything is completed.') go_ahead = True if 'datadir' not in INPUT: warnings.warn('No allesfitter woking directory selected yet. Please go back to step 1) and fill in all fields.') go_ahead = False if os.path.exists(os.path.join(INPUT['datadir'],'params.csv')) and (checkbox_overwrite_params_file.value==False): warnings.warn('The selected working directory '+os.path.join(INPUT['datadir'],'params.csv')+' already exists. To proceed, give permission to overwrite it.') go_ahead = False if go_ahead: INPUT['fname_params'] = os.path.join(INPUT['datadir'], 'params.csv') with open(INPUT['fname_params'], 'w+') as f: f.write('#name,value,fit,bounds,label,unit\n') def get_median_and_error_strings(text_median, text_lerr, text_uerr): if (text_median.value == ''): median = 'NaN' nan_fields = True else: median = text_median.value if (text_lerr.value == '') or (text_uerr.value == ''): err = 'NaN' nan_fields = True else: err = str( 5.* np.max( [float(text_lerr.value), float(text_uerr.value)] ) ) median, err, _ = round_txt_separately( float(median), float(err), float(err) ) return median, err #:::: astrophysical parameters per system for companion in INPUT['companions_all']: fwrite_params_line('#companion '+companion+' astrophysical params,,,,,') #::: rr fwrite_params(companion+'_rr', '$R_'+companion+' / R_\star$', '', [0,1]) #::: rsuma if INPUT[companion+'_rsuma_input_type'].value=='(R_comp + R_host) / a': pass elif INPUT[companion+'_rsuma_input_type'].value=='R_host / a': Rstar_over_a = [ float(INPUT[companion+'_rsuma_median'].value), float(INPUT[companion+'_rsuma_lerr'].value), float(INPUT[companion+'_rsuma_uerr'].value) ] Rp_over_Rstar = [ float(INPUT[companion+'_rr_median'].value), float(INPUT[companion+'_rr_lerr'].value), float(INPUT[companion+'_rr_uerr'].value) ] INPUT[companion+'_rsuma_median'].value, INPUT[companion+'_rsuma_lerr'].value, INPUT[companion+'_rsuma_uerr'].value \ = get_Rsuma_from_Rstar_over_a(Rstar_over_a, Rp_over_Rstar) INPUT[companion+'_rsuma_input_type'].value = '(R_comp + R_host) / a' elif INPUT[companion+'_rsuma_input_type'].value=='a / R_host': a_over_Rstar = [ float(INPUT[companion+'_rsuma_median'].value), float(INPUT[companion+'_rsuma_lerr'].value), float(INPUT[companion+'_rsuma_uerr'].value) ] Rp_over_Rstar = [ float(INPUT[companion+'_rr_median'].value), float(INPUT[companion+'_rr_lerr'].value), float(INPUT[companion+'_rr_uerr'].value) ] INPUT[companion+'_rsuma_median'].value, INPUT[companion+'_rsuma_lerr'].value, INPUT[companion+'_rsuma_uerr'].value \ = get_Rsuma_from_a_over_Rstar(a_over_Rstar, Rp_over_Rstar) INPUT[companion+'_rsuma_input_type'].value = '(R_comp + R_host) / a' else: raise ValueError('Oops, something went wrong.') fwrite_params(companion+'_rsuma', '$(R_\star + R_'+companion+') / a_'+companion+'$', '', [0,1]) #::: cosi if INPUT[companion+'_cosi_input_type'].value=='cos(i)': pass elif INPUT[companion+'_cosi_input_type'].value=='i (degree)': incl = [ float(INPUT[companion+'_cosi_median'].value), float(INPUT[companion+'_cosi_lerr'].value), float(INPUT[companion+'_cosi_uerr'].value) ] INPUT[companion+'_cosi_median'].value, INPUT[companion+'_cosi_lerr'].value, INPUT[companion+'_cosi_uerr'].value \ = get_cosi_from_i(incl) INPUT[companion+'_cosi_input_type'].value = 'cos(i)' elif INPUT[companion+'_cosi_input_type'].value=='i (rad)': incl = [ float(INPUT[companion+'_cosi_median'].value)/180.*np.pi, float(INPUT[companion+'_cosi_lerr'].value)/180.*np.pi, float(INPUT[companion+'_cosi_uerr'].value)/180.*np.pi ] INPUT[companion+'_cosi_median'].value, INPUT[companion+'_cosi_lerr'].value, INPUT[companion+'_cosi_uerr'].value \ = get_cosi_from_i(incl) INPUT[companion+'_cosi_input_type'].value = 'cos(i)' fwrite_params(companion+'_cosi', '$\cos{i_'+companion+'}$', '', [0,1]) #::: epoch fwrite_params(companion+'_epoch', '$T_{0;'+companion+'}$', '$\mathrm{BJD}$', [-1e12,1e12]) #::: period fwrite_params(companion+'_period', '$P_'+companion+'$', '$\mathrm{d}$', [-1e12,1e12]) #::: RV semi-amplitude if companion in INPUT['companions_rv']: fwrite_params(companion+'_K', '$K_'+companion+'$', '$\mathrm{km/s}$', [-1e12,1e12]) #::: eccentricity f_c fwrite_params(companion+'_f_c', '$\sqrt{e_'+companion+'} \cos{\omega_'+companion+'}$', '', [-1,1]) #::: eccentricity f_s fwrite_params(companion+'_f_s', '$\sqrt{e_'+companion+'} \sin{\omega_'+companion+'}$', '', [-1,1]) #::: dilution per instrument if len(INPUT['inst_phot']): fwrite_params_line('#dilution per instrument,,,,,') for inst in INPUT['inst_phot']: fwrite_params('dil_'+inst, '$D_\mathrm{0; '+inst+'}$', '', [0,1]) #fwrite_params('dil_'+inst+',0,0,trunc_normal 0 1 0 0,$D_\mathrm{0; '+inst+'}$,') #::: limb darkening coefficients per instrument if len(INPUT['inst_phot']): fwrite_params_line('#limb darkening coefficients per instrument,,,,,') for inst in INPUT['inst_phot']: #::: host if DROPDOWNS['host_ld_law_'+inst].value=='None': pass elif DROPDOWNS['host_ld_law_'+inst].value=='Linear': fwrite_params('host_ldc_q1_'+inst, '$q_{1; \mathrm{'+inst+'}}$', '', [0,1]) elif DROPDOWNS['host_ld_law_'+inst].value=='Quadratic': fwrite_params('host_ldc_q1_'+inst, '$q_{1; \mathrm{'+inst+'}}$', '', [0,1]) fwrite_params('host_ldc_q2_'+inst, '$q_{2; \mathrm{'+inst+'}}$', '', [0,1]) elif DROPDOWNS['host_ld_law_'+inst].value=='Sing': fwrite_params('host_ldc_q1_'+inst, '$q_{1; \mathrm{'+inst+'}}$', '', [0,1]) fwrite_params('host_ldc_q2_'+inst, '$q_{2; \mathrm{'+inst+'}}$', '', [0,1]) fwrite_params('host_ldc_q3_'+inst, '$q_{3; \mathrm{'+inst+'}}$', '', [0,1]) #::: companion (if EB) if DROPDOWNS['planet_or_EB']=='EBs': if DROPDOWNS[companion+'_ld_law_'+inst].value=='None': pass elif DROPDOWNS[companion+'_ld_law_'+inst].value=='Linear': fwrite_params(companion+'_ldc_q1_'+inst, '$q_{1; \mathrm{'+inst+'}}$', '', [0,1]) elif DROPDOWNS[companion+'_ld_law_'+inst].value=='Quadratic': fwrite_params(companion+'_ldc_q1_'+inst, '$q_{1; \mathrm{'+inst+'}}$', '', [0,1]) fwrite_params(companion+'_ldc_q2_'+inst, '$q_{2; \mathrm{'+inst+'}}$', '', [0,1]) elif DROPDOWNS[companion+'_ld_law_'+inst].value=='Sing': fwrite_params(companion+'_ldc_q1_'+inst, '$q_{1; \mathrm{'+inst+'}}$', '', [0,1]) fwrite_params(companion+'_ldc_q2_'+inst, '$q_{2; \mathrm{'+inst+'}}$', '', [0,1]) fwrite_params(companion+'_ldc_q3_'+inst, '$q_{3; \mathrm{'+inst+'}}$', '', [0,1]) #::: brightness ratio per system and instrument if len(INPUT['inst_all']): fwrite_params_line('#surface brightness per instrument and companion,,,,,') for companion in INPUT['companions_all']: for inst in INPUT['inst_all']: fwrite_params(companion+'_sbratio_'+inst, '$J_{'+companion+'; \mathrm{'+inst+'}}$', '', [0,1]) #::: geometric albedo per system and instrument if len(INPUT['inst_all']): fwrite_params_line('#albedo per instrument and companion,,,,,') for inst in INPUT['inst_all']: fwrite_params('host_geom_albedo_'+inst, '$A_{\mathrm{geom}; host; \mathrm{'+inst+'}}$', '', [0,1]) for companion in INPUT['companions_all']: for inst in INPUT['inst_all']: fwrite_params(companion+'_geom_albedo_'+inst, '$A_{\mathrm{geom}; '+companion+'; \mathrm{'+inst+'}}$', '', [0,1]) #::: gravity darkening per object and instrument if len(INPUT['inst_all']): fwrite_params_line('#gravity darkening per instrument and companion,,,,,') for inst in INPUT['inst_all']: #::: host fwrite_params('host_gdc_'+inst, '$Grav. dark._{'+companion+'; \mathrm{'+inst+'}}$', '', [0,1]) #::: companion (if EB) if DROPDOWNS['planet_or_EB']=='EBs': for companion in INPUT['companions_all']: fwrite_params(companion+'_sbratio_'+inst, '$Grav. dark._{'+companion+'; \mathrm{'+inst+'}}$', '', [0,1]) #::: spots per object and instrument if len(INPUT['inst_all']): fwrite_params_line('#spots per instrument and companion,,,,,') for inst in INPUT['inst_all']: if len(DROPDOWNS['host_N_spots_'+inst].value): N_spots = int(DROPDOWNS['host_N_spots_'+inst].value) for i in range(1,N_spots+1): #::: host fwrite_params('host_spot_'+str(i)+'_long_'+inst, '$\mathrm{host: spot '+str(i)+' long. '+inst+'}$', '\mathrm{deg}', [0,360]) fwrite_params('host_spot_'+str(i)+'_lat_'+inst, '$\mathrm{host: spot '+str(i)+' lat. '+inst+'}$', '\mathrm{deg}', [-90,90]) fwrite_params('host_spot_'+str(i)+'_size_'+inst, '$\mathrm{host: spot '+str(i)+' size '+inst+'}$', '\mathrm{deg}', [0,30]) fwrite_params('host_spot_'+str(i)+'_brightness_'+inst, '$\mathrm{host: spot '+str(i)+' brightness '+inst+'}$', '', [0,1]) #::: companion (if EB) if DROPDOWNS['planet_or_EB']=='EBs': for companion in INPUT['companions_all']: if len(DROPDOWNS[companion+'_N_spots_'+inst].value): N_spots = int(DROPDOWNS[companion+'_N_spots_'+inst].value) fwrite_params(companion+'_spot_'+str(i)+'_long_'+inst, '$\mathrm{'+companion+': spot '+str(i)+' long. '+inst+'}$', '\mathrm{deg}', [0,360]) fwrite_params(companion+'_spot_'+str(i)+'_lat_'+inst, '$\mathrm{'+companion+': spot '+str(i)+' lat. '+inst+'}$', '\mathrm{deg}', [-90,90]) fwrite_params(companion+'_spot_'+str(i)+'_size_'+inst, '$\mathrm{'+companion+': spot '+str(i)+' size '+inst+'}$', '\mathrm{deg}', [0,30]) fwrite_params(companion+'_spot_'+str(i)+'_brightness_'+inst, '$\mathrm{'+companion+': spot '+str(i)+' brightness '+inst+'}$', '', [0,1]) #::: flares if len(DROPDOWNS['N_flares'].value): fwrite_params_line('#flares,,,,,') N_flares = int(DROPDOWNS['N_flares'].value) for i in range(1,N_flares+1): fwrite_params('flare_tpeak_'+str(i), '$t_\mathrm{peak; flare '+str(i)+'}$', '$\mathrm{BJD}$', [-1e12,1e12]) fwrite_params('flare_ampl_'+str(i), '$A_\mathrm{flare '+str(i)+'}$', '$\mathrm{rel. flux.}$', [-1e12,1e12]) fwrite_params('flare_fwhm_'+str(i), '$FWHM_\mathrm{flare '+str(i)+'}$', '$\mathrm{BJD}$', [-1e12,1e12]) #::: TTV per instrument if (DROPDOWNS['fit_ttvs'].value=='yes'): fwrite_params_line('#TTV per transit,,,,,') warnings.warn('TTV priors in params.csv will not be set until you also complete step 4 (adding the data files).') # for inst in INPUT['inst_phot']: # fwrite_params('ttv_'+inst, '$\mathrm{TTV_'+inst+'}$', '$\mathrm{d}$', [-1e12,1e12]) #::: errors and baselines - keep track of rows INPUT['N_last_rows'] = 0 #::: errors per instrument if any( [ 'sample' in DROPDOWNS['error_flux_'+inst].value for inst in INPUT['inst_phot'] ] ) \ or any( [ 'sample' in DROPDOWNS['error_rv_'+inst].value for inst in INPUT['inst_rv'] ] ): fwrite_params_line('#errors per instrument,') INPUT['N_last_rows'] += 1 for inst in INPUT['inst_phot']: if 'hybrid' not in DROPDOWNS['error_flux_'+inst].value: fwrite_params('ln_err_flux_'+inst, '$\ln{\sigma_\mathrm{'+inst+'}}$', '$\ln{ \mathrm{rel. flux.} }$', [-15,0]) INPUT['N_last_rows'] += 1 for inst in INPUT['inst_rv']: if 'hybrid' not in DROPDOWNS['error_rv_'+inst].value: fwrite_params('ln_jitter_rv_'+inst, '$\ln{\sigma_\mathrm{jitter; '+inst+'}}$', '$\ln{ \mathrm{km/s} }$', [-15,0]) INPUT['N_last_rows'] += 1 #::: baseline if any( [ 'sample' in DROPDOWNS['baseline_flux_'+inst].value for inst in INPUT['inst_phot'] ] ) \ or any( [ 'sample' in DROPDOWNS['baseline_rv_'+inst].value for inst in INPUT['inst_rv'] ] ): fwrite_params_line('#baseline per instrument,') INPUT['N_last_rows'] += 1 for inst in INPUT['inst_all']: if inst in INPUT['inst_phot']: key = 'flux' elif inst in INPUT['inst_rv']: key = 'rv' if DROPDOWNS['baseline_'+key+'_'+inst].value == 'sample_GP_Matern32': fwrite_params('baseline_gp_matern32_lnsigma_'+key+'_'+inst, '$\mathrm{gp: \ln{\sigma} ('+inst+')}$', '', [-15,15]) fwrite_params('baseline_gp_matern32_lnrho_'+key+'_'+inst, '$\mathrm{gp: \ln{\\rho} ('+inst+')}$', '', [-15,15]) INPUT['N_last_rows'] += 2 elif DROPDOWNS['baseline_'+key+'_'+inst].value == 'sample_GP_SHO': fwrite_params('baseline_gp_sho_lnS0_'+key+'_'+inst, '$\mathrm{gp: \ln{S_0} ('+inst+')}$', '', [-15,15]) fwrite_params('baseline_gp_sho_lnQ_'+key+'_'+inst, '$\mathrm{gp: \ln{Q} ('+inst+')}$', '', [-15,15]) fwrite_params('baseline_gp_sho_lnomega0_'+key+'_'+inst, '$\mathrm{gp: \ln{\omega_0} ('+inst+')}$', '', [-15,15]) INPUT['N_last_rows'] += 3 elif DROPDOWNS['baseline_'+key+'_'+inst].value == 'sample_GP_real': fwrite_params('baseline_gp_real_lna_'+key+'_'+inst, '$\mathrm{gp: \ln{a} ('+inst+')}$', '', [-15,15]) fwrite_params('baseline_gp_real_lnc_'+key+'_'+inst, '$\mathrm{gp: \ln{c} ('+inst+')}$', '', [-15,15]) INPUT['N_last_rows'] += 2 elif DROPDOWNS['baseline_'+key+'_'+inst].value == 'sample_GP_complex': fwrite_params('baseline_gp_real_lna_'+key+'_'+inst, '$\mathrm{gp: \ln{a} ('+inst+')}$', '', [-15,15]) fwrite_params('baseline_gp_real_lnc_'+key+'_'+inst, '$\mathrm{gp: \ln{c} ('+inst+')}$', '', [-15,15]) fwrite_params('baseline_gp_real_lnb_'+key+'_'+inst, '$\mathrm{gp: \ln{b} ('+inst+')}$', '', [-15,15]) fwrite_params('baseline_gp_real_lnd_'+key+'_'+inst, '$\mathrm{gp: \ln{d} ('+inst+')}$', '', [-15,15]) INPUT['N_last_rows'] += 4 elif DROPDOWNS['baseline_'+key+'_'+inst].value == 'sample_offset': fwrite_params('baseline_offset_flux_'+inst, 'offset ('+inst+')', '', [-1e12,1e12]) INPUT['N_last_rows'] += 1 elif DROPDOWNS['baseline_'+key+'_'+inst].value == 'sample_linear': fwrite_params('baseline_a_flux_'+inst, 'lin. a ('+inst+')', '', [-1e12,1e12]) fwrite_params('baseline_b_flux_'+inst, 'lin. b ('+inst+')', '', [-1e12,1e12]) INPUT['N_last_rows'] += 2 #::: continue button_create_params_file.style.button_color = 'lightgreen' print('Done.') INPUT['show_step_4'] = True display(Javascript('IPython.notebook.execute_cell_range(IPython.notebook.get_selected_index()+1, IPython.notebook.ncells())')) if nan_fields: warnings.warn('You left some fields empty. These will be set NaN in params.csv. Make sure to fix this manually later.') button_create_params_file.on_click(create_params_file) epic pipeline = 'everest' save = True output_cols = 'time flux flux_err'.split() for n,campaign in enumerate(all_campaigns): if pipeline=='everest': e = cr.Everest(epicid=epic, campaign=campaign, verbose=False) lc = e.get_everest_lc() else: e = cr.K2sff(epicid=epic, campaign=campaign, verbose=False) lc = e.get_k2sff_lc() lc = lc.remove_outliers() # if save: # camp = str(campaign).zfill(2) # fp_csv = f'../data/lc/candidate_lcs/{pipeline}_lcs/EPIC{epic}_{pipeline}_c{camp}_raw.csv' # raw = pd.DataFrame(np.c_[lc.time, lc.flux, lc.flux_err], # columns=output_cols # ) # raw.to_csv(fp_csv, columns=output_cols, # sep=' ', # header=False, # index=False) # print(f'Saved: {fp_csv}') if n==0: lcs = lc.copy() else: lcs = lcs.append(lc) lcs.campaign = all_campaigns if save: output_cols = 'time flux flux_err'.split() # fp_csv = f'../data/lc/candidate_lcs/{pipeline}_lcs/EPIC{e.epicid}_{pipeline}_c{camps}_raw.csv' fp_csv = f'./epic{str(epic)[-4:]}/k2.csv' raw = pd.DataFrame(np.c_[lcs.time, lcs.flux, lcs.flux_err], columns=output_cols ) raw.to_csv(fp_csv, columns=output_cols, sep=',', header=False, index=False) print(f'Saved: {fp_csv}')
Saved: ./epic3752/k2.csv
MIT
allesfitter/epic3752_ini.ipynb
jpdeleon/kesprint2
4. data filesPlease put all data files into the selected directory, and click the button to confirm.
if 'show_step_4' in INPUT and INPUT['show_step_4']==True: BUTTONS['confirm_data_files'] = widgets.Button(description='Confirm', button_style='') display(BUTTONS['confirm_data_files']) def check_data_files(change): clear_output() display(BUTTONS['confirm_data_files']) all_data_exists = True for inst in INPUT['inst_all']: if not os.path.exists( os.path.join(INPUT['datadir'], inst+'.csv') ): warnings.warn('Data file '+os.path.join(INPUT['datadir'], inst+'.csv')+' does not exist. Please include the data file into the directory and then repeat this step.') all_data_exists = False if all_data_exists: BUTTONS['confirm_data_files'].style.button_color = 'lightgreen' INPUT['show_step_5'] = True display(Javascript('IPython.notebook.execute_cell_range(IPython.notebook.get_selected_index()+1, IPython.notebook.ncells())')) BUTTONS['confirm_data_files'].on_click(check_data_files) # else: # print('Complete previous steps first.') ############################################################################ #::: time to include those TTV lines into the folder! ############################################################################ if 'show_step_5' in INPUT and INPUT['show_step_5']==True and DROPDOWNS['fit_ttvs'].value=='yes': from allesfitter import config config.init(INPUT['datadir']) new_lines = '' for companion in INPUT['companions_all']: N_observed_transits = len(config.BASEMENT.data[companion+'_tmid_observed_transits']) for i in range(N_observed_transits): string = fwrite_params(companion+'_ttv_per_transit', 'TTV$_\mathrm{'+str(i+1)+'}}$', '$\mathrm{d}$', [-15,15], return_str=True) + '\n' string = string.replace('per_transit', 'transit_'+str(i+1)) new_lines += string with open(INPUT['fname_params'], "r") as f: contents = f.readlines() for i, line in enumerate(contents): line = line.rstrip() # remove '\n' at end of line if line == '#TTV per transit,,,,,': index = i+1 contents.insert(index, new_lines) with open(INPUT['fname_params'], "w") as f: contents = "".join(contents) f.write(contents) print('TTVs per transit were added to params.csv.') print('params.csv and settings.csv are now ready to use.')
_____no_output_____
MIT
allesfitter/epic3752_ini.ipynb
jpdeleon/kesprint2
5. check
if 'show_step_5' in INPUT and INPUT['show_step_5']==True: from allesfitter.general_output import show_initial_guess import matplotlib.pyplot as plt fig_list = show_initial_guess(INPUT['datadir'], do_logprint=False, return_figs=True) for fig in fig_list: plt.show(fig) if 'show_step_5' in INPUT and INPUT['show_step_5']==True: BUTTONS['confirm_plots'] = widgets.Button(description='Looks good', button_style='') display(BUTTONS['confirm_plots']) def check_plots(change): clear_output() display(BUTTONS['confirm_plots']) BUTTONS['confirm_plots'].style.button_color = 'lightgreen' INPUT['show_step_6'] = True display(Javascript('IPython.notebook.execute_cell_range(IPython.notebook.get_selected_index()+1, IPython.notebook.ncells())')) BUTTONS['confirm_plots'].on_click(check_plots) # else: # print('Complete previous steps first.')
_____no_output_____
MIT
allesfitter/epic3752_ini.ipynb
jpdeleon/kesprint2
6. tighter priors on errors and baselinesThis will take a couple of minutes. Make sure your initial guess above is very good. This will subtract the model from the data and evaluate the remaining noise patterns to estimate errors, jitter and GP baselines.
if 'show_step_6' in INPUT and INPUT['show_step_6']==True: def estimate_tighter_priors(change): print('\nEstimating errors and baselines... this will take a couple of minutes. Please be patient, you will get notified once everything is completed.\n') #::: run MCMC fit to estimate errors and baselines estimate_noise(INPUT['datadir']) #::: delete the rows containing the default (zero) errors and baselines from the params.csv file clean_up_csv( os.path.join( INPUT['datadir'], 'params.csv' ), N_last_rows=INPUT['N_last_rows'] ) #::: write new rows into params.csv #::: errors fwrite_params_line('#errors per instrument,') for i, inst in enumerate(INPUT['inst_phot']): #::: read in the summary file summaryfile = os.path.join( INPUT['datadir'], 'priors', 'summary_phot.csv' ) priors2 = np.genfromtxt(summaryfile, names=True, delimiter=',', dtype=None) priors = {} for key in priors2.dtype.names: priors[key] = np.atleast_1d(priors2[key]) median = priors['ln_yerr_median'][i] err = 5.*np.max([ float(priors['ln_yerr_ll'][i]), float(priors['ln_yerr_ul'][i]) ]) median, err, _ = round_txt_separately(median,err,err) fwrite_params_line('ln_err_flux_'+inst+','+median+',1,trunc_normal -15 0 '+median+' '+err+',$\ln{\sigma_\mathrm{'+inst+'}}$,') for i, inst in enumerate(INPUT['inst_rv']): #::: read in the summary file summaryfile = os.path.join( INPUT['datadir'], 'priors', 'summary_rv.csv' ) priors2 = np.genfromtxt(summaryfile, names=True, delimiter=',', dtype=None) priors = {} for key in priors2.dtype.names: priors[key] = np.atleast_1d(priors2[key]) median = priors['ln_yerr_median'][i] err = 5.*np.max([ float(priors['ln_yerr_ll'][i]), float(priors['ln_yerr_ul'][i]) ]) median, err, _ = round_txt_separately(median,err,err) fwrite_params('ln_jitter_rv_'+inst+','+median+',1,trunc_normal -15 0 '+median+' '+err+',$\ln{\sigma_\mathrm{jitter; '+inst+'}}$,') #::: write new rows into params.csv #::: baselines fwrite_params_line('#baseline per instrument,') for i, inst in enumerate(INPUT['inst_phot']): #::: read in the summary file summaryfile = os.path.join( INPUT['datadir'], 'priors', 'summary_phot.csv' ) priors2 = np.genfromtxt(summaryfile, names=True, delimiter=',', dtype=None) priors = {} for key in priors2.dtype.names: priors[key] = np.atleast_1d(priors2[key]) median = priors['gp_ln_sigma_median'][i] err = 5.*np.max([ float(priors['gp_ln_sigma_ll'][i]), float(priors['gp_ln_sigma_ul'][i]) ]) median, err, _ = round_txt_separately(median,err,err) fwrite_params_line('baseline_gp1_flux_'+inst+','+median+',1,trunc_normal -15 15 '+median+' '+err+',$\mathrm{gp: \ln{\sigma} ('+inst+')}$,') median = priors['gp_ln_rho_median'][i] err = 5.*np.max([ float(priors['gp_ln_rho_ll'][i]), float(priors['gp_ln_rho_ul'][i]) ]) median, err, _ = round_txt_separately(median,err,err) fwrite_params_line('baseline_gp2_flux_'+inst+','+median+',1,trunc_normal -15 15 '+median+' '+err+',$\mathrm{gp: \ln{\\rho} ('+inst+')}$,') #::: confirm BUTTONS['estimate_tighter_priors'].style.button_color = 'lightgreen' print('Done.') INPUT['show_step_7'] = True display(Javascript('IPython.notebook.execute_cell_range(IPython.notebook.get_selected_index()+1, IPython.notebook.ncells())')) def skip(change): BUTTONS['skip'].style.button_color = 'lightgreen' print('Skipped.') INPUT['show_step_7'] = True display(Javascript('IPython.notebook.execute_cell_range(IPython.notebook.get_selected_index()+1, IPython.notebook.ncells())')) # else: # print('Complete previous steps first.') if 'show_step_6' in INPUT and INPUT['show_step_6']==True: BUTTONS['estimate_tighter_priors'] = widgets.Button(value=False, description='Estimate tighter priors') BUTTONS['skip'] = widgets.Button(value=False, description='Skip') display( widgets.HBox([BUTTONS['estimate_tighter_priors'],BUTTONS['skip']])) BUTTONS['estimate_tighter_priors'].on_click(estimate_tighter_priors) BUTTONS['skip'].on_click(skip)
_____no_output_____
MIT
allesfitter/epic3752_ini.ipynb
jpdeleon/kesprint2
7. run the fit
if 'show_step_7' in INPUT and INPUT['show_step_7']==True: try: from importlib import reload except: pass try: from imp import reload except: pass import allesfitter reload(allesfitter) button_run_ns_fit = widgets.Button(description='Run NS fit', button_style='') button_run_mcmc_fit = widgets.Button(description='Run MCMC fit', button_style='') hbox = widgets.HBox([button_run_ns_fit, button_run_mcmc_fit]) display(hbox) def run_ns_fit(change): button_run_ns_fit.style.button_color = 'lightgreen' allesfitter.ns_fit(INPUT['datadir']) allesfitter.ns_output(INPUT['datadir']) def run_mcmc_fit(change): button_run_mcmc_fit.style.button_color = 'lightgreen' allesfitter.mcmc_fit(INPUT['datadir']) allesfitter.mcmc_output(INPUT['datadir']) button_run_ns_fit.on_click(run_ns_fit) button_run_mcmc_fit.on_click(run_mcmc_fit) # else: # print('Complete previous steps first.')
_____no_output_____
MIT
allesfitter/epic3752_ini.ipynb
jpdeleon/kesprint2
Deploying a trained model to Cloud Machine Learning EngineA Kubeflow Pipeline component to deploy a trained model from a Cloud Storage path to a Cloud Machine Learning Engine service. Intended useUse the component to deploy a trained model to Cloud Machine Learning Engine service. The deployed model can serve online or batch predictions in a KFP pipeline. Runtime arguments:Name | Description | Type | Optional | Default:--- | :---------- | :--- | :------- | :------model_uri | The Cloud Storage URI which contains a model file. Commonly used TF model search paths (export/exporter) will be used. | GCSPath | No |project_id | The ID of the parent project of the serving model. | GCPProjectID | No | model_id | The user-specified name of the model. If it is not provided, the operation uses a random name. | String | Yes | ` `version_id | The user-specified name of the version. If it is not provided, the operation uses a random name. | String | Yes | ` `runtime_version | The [Cloud ML Engine runtime version](https://cloud.google.com/ml-engine/docs/tensorflow/runtime-version-list) to use for this deployment. If it is not set, the Cloud ML Engine uses the default stable version, 1.0. | String | Yes | ` ` python_version | The version of Python used in the prediction. If it is not set, the default version is `2.7`. Python `3.5` is available when the runtime_version is set to `1.4` and above. Python `2.7` works with all supported runtime versions. | String | Yes | ` `version | The JSON payload of the new [Version](https://cloud.google.com/ml-engine/reference/rest/v1/projects.models.versions). | Dict | Yes | ` `replace_existing_version | A Boolean flag that indicates whether to replace existing version in case of conflict. | Bool | Yes | Falseset_default | A Boolean flag that indicates whether to set the new version as default version in the model. | Bool | Yes | Falsewait_interval | A time-interval to wait for in case the operation has a long run time. | Integer | Yes | 30 Output:Name | Description | Type:--- | :---------- | :---model_uri | The Cloud Storage URI of the trained model. | GCSPathmodel_name | The name of the serving model. | Stringversion_name | The name of the deployed version of the model. | String Cautions & requirementsTo use the component, you must:* Setup cloud environment by following the [guide](https://cloud.google.com/ml-engine/docs/tensorflow/getting-started-training-predictionsetup).* The component is running under a secret of [Kubeflow user service account](https://www.kubeflow.org/docs/started/getting-started-gke/gcp-service-accounts) in a Kubeflow cluster. For example:```pythonmlengine_deploy_op(...).apply(gcp.use_gcp_secret('user-gcp-sa'))```* Grant Kubeflow user service account the read access to the Cloud Storage buckets which contains the trained model. Detailed DescriptionThe component does:* Search for the trained model from the user provided Cloud Storage path.* Create a new model if user provided model doesn’t exist.* Delete the existing model version if `replace_existing_version` is enabled.* Create a new model version from the trained model.* Set the new version as the default version of the model if β€˜set_default’ is enabled.Here are the steps to use the component in a pipeline:1. Install KFP SDK
%%capture --no-stderr KFP_PACKAGE = 'https://storage.googleapis.com/ml-pipeline/release/0.1.14/kfp.tar.gz' !pip3 install $KFP_PACKAGE --upgrade
_____no_output_____
Apache-2.0
components/gcp/ml_engine/deploy/sample.ipynb
JohnPaton/pipelines
2. Load the component using KFP SDK
import kfp.components as comp mlengine_deploy_op = comp.load_component_from_url( 'https://raw.githubusercontent.com/kubeflow/pipelines/d2f5cc92a46012b9927209e2aaccab70961582dc/components/gcp/ml_engine/deploy/component.yaml') help(mlengine_deploy_op)
_____no_output_____
Apache-2.0
components/gcp/ml_engine/deploy/sample.ipynb
JohnPaton/pipelines
For more information about the component, please checkout:* [Component python code](https://github.com/kubeflow/pipelines/blob/master/component_sdk/python/kfp_component/google/ml_engine/_deploy.py)* [Component docker file](https://github.com/kubeflow/pipelines/blob/master/components/gcp/container/Dockerfile)* [Sample notebook](https://github.com/kubeflow/pipelines/blob/master/components/gcp/ml_engine/deploy/sample.ipynb)* [Cloud Machine Learning Engine Model REST API](https://cloud.google.com/ml-engine/reference/rest/v1/projects.models)* [Cloud Machine Learning Engine Version REST API](https://cloud.google.com/ml-engine/reference/rest/v1/projects.versions) SampleNote: The following sample code works in IPython notebook or directly in Python code.In this sample, we will deploy a pre-built trained model from `gs://ml-pipeline-playground/samples/ml_engine/census/trained_model/` to Cloud Machine Learning Engine service. The deployed model is named `kfp_sample_model`. A new version will be created every time when the sample is run, and the latest version will be set as the default version of the deployed model. Set sample parameters
# Required Parameters PROJECT_ID = '<Please put your project ID here>' # Optional Parameters EXPERIMENT_NAME = 'CLOUDML - Deploy' TRAINED_MODEL_PATH = 'gs://ml-pipeline-playground/samples/ml_engine/census/trained_model/'
_____no_output_____
Apache-2.0
components/gcp/ml_engine/deploy/sample.ipynb
JohnPaton/pipelines
Example pipeline that uses the component
import kfp.dsl as dsl import kfp.gcp as gcp import json @dsl.pipeline( name='CloudML deploy pipeline', description='CloudML deploy pipeline' ) def pipeline( model_uri = 'gs://ml-pipeline-playground/samples/ml_engine/census/trained_model/', project_id = PROJECT_ID, model_id = 'kfp_sample_model', version_id = '', runtime_version = '1.10', python_version = '', version = '', replace_existing_version = 'False', set_default = 'True', wait_interval = '30'): task = mlengine_deploy_op( model_uri=model_uri, project_id=project_id, model_id=model_id, version_id=version_id, runtime_version=runtime_version, python_version=python_version, version=version, replace_existing_version=replace_existing_version, set_default=set_default, wait_interval=wait_interval).apply(gcp.use_gcp_secret('user-gcp-sa'))
_____no_output_____
Apache-2.0
components/gcp/ml_engine/deploy/sample.ipynb
JohnPaton/pipelines
Compile the pipeline
pipeline_func = pipeline pipeline_filename = pipeline_func.__name__ + '.zip' import kfp.compiler as compiler compiler.Compiler().compile(pipeline_func, pipeline_filename)
_____no_output_____
Apache-2.0
components/gcp/ml_engine/deploy/sample.ipynb
JohnPaton/pipelines
Submit the pipeline for execution
#Specify pipeline argument values arguments = {} #Get or create an experiment and submit a pipeline run import kfp client = kfp.Client() experiment = client.create_experiment(EXPERIMENT_NAME) #Submit a pipeline run run_name = pipeline_func.__name__ + ' run' run_result = client.run_pipeline(experiment.id, run_name, pipeline_filename, arguments)
_____no_output_____
Apache-2.0
components/gcp/ml_engine/deploy/sample.ipynb
JohnPaton/pipelines
Asking salient questions Now that we can generate the concept map, and calculate the cognitive load per sentence, let's display text blurbs in order of increasing cognitive load as we traverse the created learning path. Based on the blurbs, we will ask questions of the student that are multiple choice. The answers will be passed through a basic inference engine. The questions are designed to fill general schema for different entity types in the STEM domain. Relationships are defined based on the roles these entities play. A memory map will be simulated for the student, and review and follow-up questions will be asked based on the memory map for that student. Further work will be done for more intelligent question asking, and being able to prompt critical and creative thinking, through relationship extraction and entity detection models. Directing and/or exploring based on learning goals defined based on Bloom's taxonomy will also be done.
import itertools from itertools import chain import nltk #stop_words = set(stopwords.words('english')) #filename = 'A Mind For Numbers_ How to Excel at Math and Science (Even If You Flunked Algebra)' filename = 'physics_iitjee_vol1' concepts = {} import pickle # Loading extracted concepts from file (see concept_extraction.ipynb) #concepts = {'sents':sents,'rawtxt':rawtxt,'sent_to_npflat':sent_to_npflat,'sent_to_tags':sent_to_tags,'sent_to_ltags':sent_to_ltags,'np_to_sent':np_to_sent,'Conceptdata':Conceptdata} with open('../processed_data/'+filename +'concepts.pickle', 'rb') as f: concepts = pickle.load(f) # Loading idf dictionary (see Build_IDF_dictionary.ipynb) with open('../processed_data/'+'idf_dict.pickle','rb') as f1: idf_dict =pickle.load(f1) with open('../processed_data/'+filename+'conceptimp.pickle','rb') as f2: dfConceptimp = pickle.load(f2) sents = concepts['sents'] rawtxt = concepts['rawtxt'] sent_to_npflat = concepts['sent_to_npflat'] sent_to_tags= concepts['sent_to_tags'] sent_to_ltags = concepts['sent_to_ltags'] np_to_sent = concepts['np_to_sent'] Conceptdata = concepts['Conceptdata'] import math def get_idf(wrd,totaldocs=10788): wrd = wrd.lower() return idf_dict.get(wrd,math.log(totaldocs)) import pandas as pd with open('../processed_data/'+filename+'conceptmap.pickle','rb') as f2: df = pickle.load(f2) def calc_cl_per_sentence(sent_to_npflatsent_to_npflat,known_concepts = [], maxidf=9.1): sent_to_clt = [] for i in range(len(sent_to_npflat)): npinsent = sent_to_npflat[i] npinsent = [np for np in npinsent if np not in known_concepts] clt= 0 for np in npinsent: tokens = np.split(' ') idf = 0 for t in tokens: idf = idf + get_idf(t) idf = idf*len(tokens)*1.67 if (idf>=maxidf): clt = clt + 1 sent_to_clt.append(clt) return sent_to_clt def plot_clt(sent_to_clt): from matplotlib import pyplot as plt plt.xlabel('document sentence #') plt.ylabel('Load added to working memory by sentence') plt.title('Cognitive Load for '+filename) plt.plot(list(range(1,len(sent_to_npflat)+1)),sent_to_clt,drawstyle='steps') plt.savefig('cltfig1.png') return None sent_to_clt = calc_cl_per_sentence(sent_to_npflat) plot_clt(sent_to_clt) def print_sents_by_target_cl(target,cl): return [sents[s] for s in np_to_sent[target] if sent_to_clt[s]==cl] print_sents_by_target_cl('life',1) sent_to_clt = calc_cl_per_sentence(sent_to_npflat) print('Mininum cognitive load sentence: ',sents[sent_to_clt.index(min(sent_to_clt))]) print('Maximum cognitive load sentence: ',sents[sent_to_clt.index(max(sent_to_clt))]) plot_clt(sent_to_clt)
_____no_output_____
MIT
asking_questions_inferencing/graph_opening.ipynb
rts1988/IntelligentTutoringSystem_Experiments
Functions to get blurbs for two concepts
import pandas as pd def calc_clt_blurb_order(tuplist): tup_to_clt = {} for tup in tuplist: blurb_clt = 0 for i in range(tup[0],tup[1]+1): blurb_clt = blurb_clt + sent_to_clt[i] tup_to_clt[tup] = blurb_clt tup_to_clt = pd.Series(tup_to_clt) tup_to_clt.sort_values(ascending=True) return list(tup_to_clt.sort_values(ascending=True).index) def get_sentence_indices(np1,np2,max_distance=3): sents1 = np_to_sent[np1] sents2 = np_to_sent[np2] ind1 = 0 ind2 = 0 tuplist = [] lensents1 = len(sents1) #print(lensents1) lensents2 = len(sents2) #print(lensents2) while(ind1<lensents1 and ind2 <lensents2): #print(ind1,ind2) if (sents1[ind1]<sents2[ind2]): #print('sent1 less than sent2') if sents2[ind2]-sents1[ind1]<=max_distance: tuplist.append((sents1[ind1],sents2[ind2])) ind1 = ind1+1 ind2 = ind2 + 1 else: #ind1 = bs.bisect_left(sents1,sents2[ind2]) ind1 = ind1 + 1 elif (sents1[ind1]>sents2[ind2]): #print('sent2 less than sent1') if sents1[ind1]-sents2[ind2] <= max_distance: tuplist.append((sents2[ind2],sents1[ind1])) ind1 = ind1 + 1 ind2 = ind2 + 1 else: #ind2 = bs.bisect_left(sents2,sents1[ind1]) ind2 = ind2 + 1 else: tuplist.append((sents1[ind1],sents2[ind2])) ind1 = ind1+1 ind2 = ind2+1 return tuplist def get_blurbs(np1,np2,max_distance=3): blurblist = [] tuplist = calc_clt_blurb_order(get_sentence_indices(np1,np2,max_distance)) print(tuplist) for t in tuplist: blurb = [] print(t) blurb = ' '.join(sents[t[0]:t[1]+1]).replace('\n', ' ').replace('\r', '') print(blurb) blurblist.append(blurb) return tuplist, blurblist tuplist, blurblist = get_blurbs('speed','velocity',0) import networkx as nx import numpy as num bond_threshold=num.quantile(df['Bondstrength'],0.0) dfdir = df[(df['Direction']>=1) & (df['Bondstrength']>=bond_threshold)] print(len(dfdir)) print(bond_threshold) import networkx as nx import matplotlib.pyplot as plt G = nx.from_pandas_edgelist(dfdir,'Concept1','Concept2', create_using=nx.DiGraph()) paths=dict(nx.all_pairs_shortest_path(G,cutoff=None)) def get_nodes_allpairs(concept_list): nodelist = [paths.get(cl1, {}).get(cl2,None) for cl1 in concept_list for cl2 in concept_list if paths.get(cl1, {}).get(cl2,None) is not None] nodelist = list(chain.from_iterable(nodelist)) return list(set(nodelist)) start_concepts_number = 5 # Start drawing graph with top n concepts start_concept_list = list(dfConceptimp.sort_values(by=['TFIDFA','Amap','SdevA','AfirstOc'],ascending=[False,False,False,True]).head(start_concepts_number)['Concept1']) print('Including the following concepts :',start_concept_list) # now get the paths from all pairs in the concept list and the corresponding nodes nodelist = get_nodes_allpairs(start_concept_list) print('The nodes that connect these concepts: ', nodelist) start_concept_edges = dfdir[dfdir['Concept1'].isin(nodelist) & dfdir['Concept2'].isin(nodelist)] start_concept_from = set(start_concept_edges['Concept1']) print(start_concept_from) start_concept_to = set(start_concept_edges['Concept2']) print(start_concept_to) roots = start_concept_from - start_concept_to print('Roots: ',roots) leaves = start_concept_to - start_concept_from print('Leaves:',leaves) print('All the longest paths in this subgraph:') for r in roots: for l in leaves: if nx.has_path(G,r,l): print(paths[r][l]) len(start_concept_edges) plt.figure(figsize=(20,10)) nx.draw_circular(G.subgraph(list(start_concept_from | start_concept_to)),with_labels=True, font_size=18,node_size=1200) def in_and_out_nodes(G,concept): return list(G.predecessors(concept)) + list(G.successors(concept)) + [concept] def draw_graph(G,nodes_list): plt.figure(figsize=(20,10)) nx.draw_circular(G.subgraph(nodes_list),with_labels=True, font_size=18,node_size=1200) def draw_graph(G,in_and_out_nodes(G,'particle')) get_blurbs('speed','classical mechanic',10) plot_clt(calc_cl_per_sentence(sent_to_npflat)) known_concepts = ['geometry','orbitals','electron','atom','element','parameter','program','nucleus'] plot_clt(calc_cl_per_sentence(sent_to_npflat,known_concepts)) print_sents_by_target_cl('life',2) df[(df['Concept1']=='surface') & (df['Direction']>=1)].sort_values(by=['Bondstrength','dAB_dBA_diff'],ascending=[False, False]).head(41) print_sents_by_target_cl('monte carlo method',5) print_sents_by_target_cl('functional',2) sents[1275:1279] tuplist,blurblist = get_blurbs('life','woman',3) print([sents[i] for i in np_to_sent['pes']]) fromset = set(dfdir['Concept1']) toset = set(dfdir['Concept2']) rootset = fromset-toset leavesset = toset-fromset df[df['Concept2'].isin(list(leavesset))].sort_values(by='FB',ascending=False).head(30)
_____no_output_____
MIT
asking_questions_inferencing/graph_opening.ipynb
rts1988/IntelligentTutoringSystem_Experiments
Deep learning - hw1- 0756708 ε­«θŒ‚ε‹›
import numpy as np import random import pandas as pd from sklearn.model_selection import train_test_split from keras.utils import to_categorical import matplotlib.pyplot as plt import copy
Using TensorFlow backend.
MIT
Assignment1/hw1.ipynb
john850512/Deep_Learning
1. Data processing
df = pd.read_csv('./titanic.csv') df.head() training_set = df[:800] testing_set = df[800:] X_train = training_set[['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare']].values X_test = testing_set[['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare']].values X_train = X_train.reshape(X_train.shape[0], -1, 1) X_test = X_test.reshape(X_test.shape[0], -1, 1) y_train = training_set['Survived'].values y_test = testing_set['Survived'].values y_train = to_categorical(y_train).reshape(y_train.shape[0], -1, 1) y_test = to_categorical(y_test).reshape(y_test.shape[0], -1, 1) print(X_train.shape, y_train.shape) print(X_test.shape, y_test.shape) # combine data & labels training_data = list(zip(X_train, y_train)) testing_data = list(zip(X_test, y_test))
(800, 6, 1) (800, 2, 1) (91, 6, 1) (91, 2, 1)
MIT
Assignment1/hw1.ipynb
john850512/Deep_Learning
2. Model Architecture
def sigmoid(z): return 1.0 / (1.0 + np.exp(-z)) def sigmoid_derivate(z): return sigmoid(z) * (1-sigmoid(z)) def cross_entropy(output, ground_truth): return np.sum( np.nan_to_num( -ground_truth*np.log(output) - (1-ground_truth)*np.log(1-output) ) ) def cross_entropy_derivative(output, ground_truth): return output - ground_truth class NN(): def __init__(self, neurons): self.num_layers = len(neurons) self.neurons = neurons # create weights & bias self.weights = [ np.zeros((j, i)) for i, j in zip(neurons[:-1], neurons[1:]) ] self.biases = [ np.zeros((i, 1)) for i in neurons[1:] ] # info self.training_loss = [] self.training_error_rate = [] self.testing_error_rate = [] def SGD(self, training_data, testing_data, epochs, batch_size, lr): num = len(training_data) self.training_loss = [] self.training_error_rate = [] self.testing_error_rate = [] evaluation_cost, evaluation_accuracy = [], [] training_cost, training_accuracy = [], [] for epoch in range(epochs): random.shuffle(training_data) mini_batch = [ training_data[i : i + batch_size] for i in range(0, num, batch_size) ] # split data into mini_batch for single_data in mini_batch: self.update_mini_batch(single_data, lr) if (epoch % 50 == 0): # record info self.training_loss.append(self.calc_loss(training_data)) self.training_error_rate.append(self.count_error(training_data) / len(training_data)) self.testing_error_rate.append(self.count_error(testing_data) / len(testing_data)) print('===================================') print("【Epoch %s】" % epoch) print(' training loss: %f' % self.calc_loss(training_data)) print(' training error rate: %d / %d(%f)' % (self.count_error(training_data), len(training_data), self.count_error(training_data) / len(training_data))) print(' testing error rate: %d / %d(%f)' % (self.count_error(testing_data), len(testing_data), self.count_error(testing_data) / len(testing_data))) def update_mini_batch(self, single_data, lr): sum_gradient_w = [ np.zeros(w.shape) for w in self.weights ] sum_gradient_b = [ np.zeros(b.shape) for b in self.biases ] # cumulate gradient of each single data for x, y in single_data: gradient_w, gradient_b = self.backward(x, y) sum_gradient_w = [ sw + w for sw, w in zip(sum_gradient_w, gradient_w)] sum_gradient_b = [ sb + b for sb, b in zip(sum_gradient_b, gradient_b)] # update weights & biases with (mean of sum of gradient * learning rate) self.weights = [ w - lr/len(single_data) * sw for w, sw in zip(self.weights, sum_gradient_w) ] self.biases = [ b - lr/len(single_data) * sb for b, sb in zip(self.biases, sum_gradient_b) ] def forward(self, x): for w, b in zip(self.weights, self.biases): x = np.dot(w, x) + b x = sigmoid(x) return x def backward(self, x, y): # store gradient of w, b gradient_w = [ np.zeros(w.shape) for w in self.weights ] gradient_b = [ np.zeros(b.shape) for b in self.biases ] # forward activation = x zs = [] # store vectors which is input of activation function activations = [x] # store vectors which is output of activation function for w, b in zip(self.weights, self.biases): z = np.dot(w, activation) + b zs.append(z) activation = sigmoid(z) activations.append(activation) # backward # we calc last layer separately, because loss function is diff with activation funcion delta = cross_entropy_derivative(activations[-1], y) gradient_b[-1] = delta * 1 gradient_w[-1] = np.dot(delta, activations[-2].T) for layer in range(2, self.num_layers): z = zs[-layer] delta = np.dot(self.weights[-layer + 1].T, delta) * sigmoid_derivate(z) gradient_w[-layer] = np.dot(delta, activations[-layer - 1].T) gradient_b[-layer] = delta return gradient_w, gradient_b def calc_loss(self, data): # calc cross entropy loss loss = 0 for x, y in data: output = self.forward(x) loss += cross_entropy(output, y)/ len(data) return loss def count_error(self, data): # count error number compare_list = [ (np.argmax(self.forward(x)), np.argmax(y)) for x, y in data ] error_count = sum( int(y1 != y2) for y1, y2 in compare_list) return error_count
_____no_output_____
MIT
Assignment1/hw1.ipynb
john850512/Deep_Learning
3. Training p1
module1 = NN([6, 32, 32, 64, 2]) module1.SGD(training_data, testing_data, 3000, 100, 0.3) new_x_axis = np.arange(0,3000, 50) fig, ax = plt.subplots(1, 1) ax.plot(new_x_axis, module1.training_loss) ax.set_title('training loss') ax.set_xlabel('Epochs') ax.set_ylabel('Average cross entropy') fig, ax = plt.subplots(1, 2) fig.set_size_inches(12, 4) ax[0].plot(new_x_axis, module1.training_error_rate) ax[0].set_title('training error rate') ax[0].set_xlabel('Epochs') ax[0].set_ylabel('Error rate') ax[1].plot(new_x_axis, module1.testing_error_rate) ax[1].set_title('testing error rate') ax[1].set_xlabel('Epochs') ax[1].set_ylabel('Error rate')
_____no_output_____
MIT
Assignment1/hw1.ipynb
john850512/Deep_Learning
p2
module2 = NN([6, 3, 3, 2]) module2.SGD(training_data, testing_data, 3000, 100, 0.03) fig, ax = plt.subplots(1, 1) ax.plot(new_x_axis, module2.training_loss) ax.set_title('training loss') ax.set_xlabel('Epochs') ax.set_ylabel('Average cross entropy') fig, ax = plt.subplots(1, 2) fig.set_size_inches(12, 4) ax[0].plot(new_x_axis, module2.training_error_rate) ax[0].set_title('training error rate') ax[0].set_xlabel('Epochs') ax[0].set_ylabel('Error rate') ax[1].plot(new_x_axis, module2.testing_error_rate) ax[1].set_title('testing error rate') ax[1].set_xlabel('Epochs') ax[1].set_ylabel('Error rate')
_____no_output_____
MIT
Assignment1/hw1.ipynb
john850512/Deep_Learning
p4
df.head() module2.weights[0]
_____no_output_____
MIT
Assignment1/hw1.ipynb
john850512/Deep_Learning
p3.
df_new = df.copy() df_new.head() from sklearn.preprocessing import StandardScaler fare_scaler = StandardScaler() df_new['Fare'] = pd.DataFrame(fare_scaler.fit_transform(df_new['Fare'].values.reshape(-1,1))) df_new.head() training_set = df_new[:800] testing_set = df_new[800:] X_train = training_set[['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare']].values X_test = testing_set[['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare']].values X_train = X_train.reshape(X_train.shape[0], -1, 1) X_test = X_test.reshape(X_test.shape[0], -1, 1) y_train = training_set['Survived'].values y_test = testing_set['Survived'].values y_train = to_categorical(y_train).reshape(y_train.shape[0], -1, 1) y_test = to_categorical(y_test).reshape(y_test.shape[0], -1, 1) print(X_train.shape, y_train.shape) print(X_test.shape, y_test.shape) # combine data & labels training_data = list(zip(X_train, y_train)) testing_data = list(zip(X_test, y_test)) module3 = NN([6, 3, 3, 2]) module3.SGD(training_data, testing_data, 3000, 100, 0.1) new_x_axis = np.arange(0, 3000, 50) fig, ax = plt.subplots(1, 1) ax.plot(new_x_axis, module3.training_loss) ax.set_title('training loss') ax.set_xlabel('Epochs') ax.set_ylabel('Average cross entropy') fig, ax = plt.subplots(1, 2) fig.set_size_inches(12, 4) ax[0].plot(new_x_axis, module3.training_error_rate) ax[0].set_title('training error rate') ax[0].set_xlabel('Epochs') ax[0].set_ylabel('Error rate') ax[1].plot(new_x_axis, module3.testing_error_rate) ax[1].set_title('testing error rate') ax[1].set_xlabel('Epochs') ax[1].set_ylabel('Error rate')
_____no_output_____
MIT
Assignment1/hw1.ipynb
john850512/Deep_Learning
p3-2
df_new_1 = df.copy() fare_scaler = StandardScaler() age_scaler = StandardScaler() df_new_1['Fare'] = pd.DataFrame(fare_scaler.fit_transform(df_new_1['Fare'].values.reshape(-1,1))) df_new_1['Age'] = pd.DataFrame(age_scaler.fit_transform(df_new_1['Age'].values.reshape(-1,1))) training_set = df_new_1[:800] testing_set = df_new_1[800:] X_train = training_set[['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare']].values X_test = testing_set[['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare']].values X_train = X_train.reshape(X_train.shape[0], -1, 1) X_test = X_test.reshape(X_test.shape[0], -1, 1) y_train = training_set['Survived'].values y_test = testing_set['Survived'].values y_train = to_categorical(y_train).reshape(y_train.shape[0], -1, 1) y_test = to_categorical(y_test).reshape(y_test.shape[0], -1, 1) print(X_train.shape, y_train.shape) print(X_test.shape, y_test.shape) # combine data & labels training_data = list(zip(X_train, y_train)) testing_data = list(zip(X_test, y_test)) module4 = NN([6, 3, 3, 2]) module4.SGD(training_data, testing_data, 3000, 100, 0.1) fig, ax = plt.subplots(1, 1) ax.plot(new_x_axis, module4.training_loss) ax.set_title('training loss') ax.set_xlabel('Epochs') ax.set_ylabel('Average cross entropy') fig, ax = plt.subplots(1, 2) fig.set_size_inches(12, 4) ax[0].plot(new_x_axis, module4.training_error_rate) ax[0].set_title('training error rate') ax[0].set_xlabel('Epochs') ax[0].set_ylabel('Error rate') ax[1].plot(new_x_axis, module4.testing_error_rate) ax[1].set_title('testing error rate') ax[1].set_xlabel('Epochs') ax[1].set_ylabel('Error rate')
_____no_output_____
MIT
Assignment1/hw1.ipynb
john850512/Deep_Learning
p5
df_new_2 = pd.get_dummies(df_new_1, columns=['Pclass']) df_new_2.head() training_set = df_new_2[:800] testing_set = df_new_2[800:] X_train = training_set[['Pclass_1', 'Pclass_2', 'Pclass_3', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare']].values X_test = testing_set[['Pclass_1', 'Pclass_2', 'Pclass_3', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare']].values X_train = X_train.reshape(X_train.shape[0], -1, 1) X_test = X_test.reshape(X_test.shape[0], -1, 1) y_train = training_set['Survived'].values y_test = testing_set['Survived'].values y_train = to_categorical(y_train).reshape(y_train.shape[0], -1, 1) y_test = to_categorical(y_test).reshape(y_test.shape[0], -1, 1) print(X_train.shape, y_train.shape) print(X_test.shape, y_test.shape) # combine data & labels training_data = list(zip(X_train, y_train)) testing_data = list(zip(X_test, y_test)) module5 = NN([8, 3, 3, 2]) module5.SGD(training_data, testing_data, 3000, 100, 0.1) new_x_axis = np.arange(0, 3000, 50) fig, ax = plt.subplots(1, 1) ax.plot(new_x_axis, module5.training_loss) ax.set_title('training loss') ax.set_xlabel('Epochs') ax.set_ylabel('Average cross entropy') fig, ax = plt.subplots(1, 2) fig.set_size_inches(12, 4) ax[0].plot(new_x_axis, module5.training_error_rate) ax[0].set_title('training error rate') ax[0].set_xlabel('Epochs') ax[0].set_ylabel('Error rate') ax[1].plot(new_x_axis, module5.testing_error_rate) ax[1].set_title('testing error rate') ax[1].set_xlabel('Epochs') ax[1].set_ylabel('Error rate')
_____no_output_____
MIT
Assignment1/hw1.ipynb
john850512/Deep_Learning
p6.
# X_train = training_set[['Pclass_1', 'Pclass_2', 'Pclass_3', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare']].values X_train[1] people_John = np.array([[0, 0, 1, 1, age_scaler.transform([[23]]), 2, 2, fare_scaler.transform([[0.87]])]]).reshape(-1, 1) print(people_John) prediction_john = module5.forward(people_John) print('Johnζ­»δΊ‘ηš„ζ©ŸηŽ‡vsε­˜ζ΄»ηš„ζ©ŸηŽ‡:', prediction_john[0], prediction_john[1] ) people_Angela = np.array([[1, 0, 0, 0, age_scaler.transform([[18]]), 1, 2, fare_scaler.transform([[20]])]]).reshape(-1, 1) print(people_Angela) prediction_Angela = module5.forward(people_Angela) print('Angelaζ­»δΊ‘ηš„ζ©ŸηŽ‡vsε­˜ζ΄»ηš„ζ©ŸηŽ‡:', prediction_Angela[0], prediction_Angela[1] )
Angelaζ­»δΊ‘ηš„ζ©ŸηŽ‡vsε­˜ζ΄»ηš„ζ©ŸηŽ‡: [0.03808093] [0.96191907]
MIT
Assignment1/hw1.ipynb
john850512/Deep_Learning
> This is one of the 100 recipes of the [IPython Cookbook](http://ipython-books.github.io/), the definitive guide to high-performance scientific computing and data science in Python. Links: * http://mrob.com/pub/comp/xmorphia/F260/F260-k550.html * http://mrob.com/pub/comp/xmorphia/ 12.4. Simulating a Partial Differential Equation: reaction-diffusion systems and Turing patterns 1. Let's import the packages.
import numpy as np import matplotlib.pyplot as plt %matplotlib inline
_____no_output_____
Apache-2.0
BiologicalPatternFormation/WorkingReactionDiffusion.ipynb
topatomer/IntroToBiophysics
2. We will simulate the following system of partial differential equations on the domain $E=[-1,1]^2$: \begin{align*}\frac{\partial u}{\partial t} &= a \Delta u + u - u^3 - v + k\\\tau\frac{\partial v}{\partial t} &= b \Delta v + u - v\\\end{align*} The variable $u$ represents the concentration of a substance favoring skin pigmentation, whereas $v$ represents another substance that reacts with the first and impedes pigmentation.At initialization time, we assume that $u$ and $v$ contain independent random numbers on every grid point. Besides, we take **Neumann boundary conditions**: we require the spatial derivatives of the variables with respect to the normal vectors to be null on the boundaries of the domain $E$.Let's define the four parameters of the model.
#a = 2.8e-4 #b = 5e-3 a=4e-4 b=2e-4 F=0.0180 k=0.0510 #F=0.0260 #k=0.0550
_____no_output_____
Apache-2.0
BiologicalPatternFormation/WorkingReactionDiffusion.ipynb
topatomer/IntroToBiophysics
3. We discretize time and space. The following condition ensures that the discretization scheme we use here is stable:$$dt \leq \frac{dx^2}{2}$$
size = 200 # size of the 2D grid dx = 2./size # space step T = 10.0 # total time dt = .9 * dx**2/2 # time step n = int(T/dt)
_____no_output_____
Apache-2.0
BiologicalPatternFormation/WorkingReactionDiffusion.ipynb
topatomer/IntroToBiophysics
4. We initialize the variables $u$ and $v$. The matrices $U$ and $V$ contain the values of these variables on the vertices of the 2D grid. These variables are initialized with a uniform noise between $0$ and $1$.
U = np.random.rand(size, size) V = np.random.rand(size, size)
_____no_output_____
Apache-2.0
BiologicalPatternFormation/WorkingReactionDiffusion.ipynb
topatomer/IntroToBiophysics
5. Now, we define a function that computes the discrete Laplace operator of a 2D variable on the grid, using a five-point stencil finite difference method. This operator is defined by:$$\Delta u(x,y) \simeq \frac{u(x+h,y)+u(x-h,y)+u(x,y+h)+u(x,y-h)-4u(x,y)}{dx^2}$$We can compute the values of this operator on the grid using vectorized matrix operations. Because of side effects on the edges of the matrix, we need to remove the borders of the grid in the computation.
def laplacian(Z): Ztop = Z[0:-2,1:-1] Zleft = Z[1:-1,0:-2] Zbottom = Z[2:,1:-1] Zright = Z[1:-1,2:] Zcenter = Z[1:-1,1:-1] return (Ztop + Zleft + Zbottom + Zright - 4 * Zcenter) / dx**2
_____no_output_____
Apache-2.0
BiologicalPatternFormation/WorkingReactionDiffusion.ipynb
topatomer/IntroToBiophysics
6. Now, we simulate the system of equations using the finite difference method. At each time step, we compute the right-hand sides of the two equations on the grid using discrete spatial derivatives (Laplacians). Then, we update the variables using a discrete time derivative.
plt.imshow(U,cmap=plt.cm.copper,interpolation='none') # We simulate the PDE with the finite difference method. for i in range(n): # We compute the Laplacian of u and v. deltaU = laplacian(U) deltaV = laplacian(V) # We take the values of u and v inside the grid. Uc = U[1:-1,1:-1] Vc = V[1:-1,1:-1] # We update the variables. U[1:-1,1:-1], V[1:-1,1:-1] = \ Uc + dt * (a * deltaU - Uc*Vc*Vc + F*(1-Uc)), \ Vc + dt * (b * deltaV + Uc*Vc*Vc - (F+k)*Vc) # Neumann conditions: derivatives at the edges # are null. for Z in (U, V): Z[0,:] = Z[1,:] Z[-1,:] = Z[-2,:] Z[:,0] = Z[:,1] Z[:,-1] = Z[:,-2]
_____no_output_____
Apache-2.0
BiologicalPatternFormation/WorkingReactionDiffusion.ipynb
topatomer/IntroToBiophysics
7. Finally, we display the variable $u$ after a time $T$ of simulation.
plt.imshow(U, cmap=plt.cm.jet, extent=[-1,1,-1,1],interpolation='none');
_____no_output_____
Apache-2.0
BiologicalPatternFormation/WorkingReactionDiffusion.ipynb
topatomer/IntroToBiophysics
Statistics
import numpy as np
_____no_output_____
MIT
Statistics.ipynb
Data-science-vidhya/Numpy
Order statistics Return the minimum value of x along the second axis.
x = np.arange(4).reshape((2, 2)) print("x=\n", x) print("ans=\n", np.amin(x, 1))
x= [[0 1] [2 3]] ans= [0 2]
MIT
Statistics.ipynb
Data-science-vidhya/Numpy
Return the maximum value of x along the second axis. Reduce the second axis to the dimension with size one.
x = np.arange(4).reshape((2, 2)) print("x=\n", x) print("ans=\n", np.amax(x, 1, keepdims=True))
x= [[0 1] [2 3]] ans= [[1] [3]]
MIT
Statistics.ipynb
Data-science-vidhya/Numpy
Calcuate the difference between the maximum and the minimum of x along the second axis.
x = np.arange(10).reshape((2, 5)) print("x=\n", x) out1 = np.ptp(x, 1) out2 = np.amax(x, 1) - np.amin(x, 1) assert np.allclose(out1, out2) print("ans=\n", out1)
x= [[0 1 2 3 4] [5 6 7 8 9]] ans= [4 4]
MIT
Statistics.ipynb
Data-science-vidhya/Numpy
Compute the 75th percentile of x along the second axis.
x = np.arange(1, 11).reshape((2, 5)) print("x=\n", x) print("ans=\n", np.percentile(x, 75, 1))
x= [[ 1 2 3 4 5] [ 6 7 8 9 10]] ans= [4. 9.]
MIT
Statistics.ipynb
Data-science-vidhya/Numpy
Averages and variances Compute the median of flattened x.
x = np.arange(1, 10).reshape((3, 3)) print("x=\n", x) print("ans=\n", np.median(x))
x= [[1 2 3] [4 5 6] [7 8 9]] ans= 5.0
MIT
Statistics.ipynb
Data-science-vidhya/Numpy
Compute the weighted average of x.
x = np.arange(5) weights = np.arange(1, 6) out1 = np.average(x, weights=weights) out2 = (x*(weights/weights.sum())).sum() assert np.allclose(out1, out2) print(out1)
2.6666666666666665
MIT
Statistics.ipynb
Data-science-vidhya/Numpy
Compute the mean, standard deviation, and variance of x along the second axis.
x = np.arange(5) print("x=\n",x) out1 = np.mean(x) out2 = np.average(x) assert np.allclose(out1, out2) print("mean=\n", out1) out3 = np.std(x) out4 = np.sqrt(np.mean((x - np.mean(x)) ** 2 )) assert np.allclose(out3, out4) print("std=\n", out3) out5 = np.var(x) out6 = np.mean((x - np.mean(x)) ** 2 ) assert np.allclose(out5, out6) print("variance=\n", out5)
x= [0 1 2 3 4] mean= 2.0 std= 1.4142135623730951 variance= 2.0
MIT
Statistics.ipynb
Data-science-vidhya/Numpy
Correlating Compute the covariance matrix of x and y.
x = np.array([0, 1, 2]) y = np.array([2, 1, 0]) print("ans=\n", np.cov(x, y))
ans= [[ 1. -1.] [-1. 1.]]
MIT
Statistics.ipynb
Data-science-vidhya/Numpy
In the above covariance matrix, what does the -1 mean? It means `x` and `y` correlate perfectly in opposite directions. Compute Pearson product-moment correlation coefficients of x and y.
x = np.array([0, 1, 3]) y = np.array([2, 4, 5]) print("ans=\n", np.corrcoef(x, y))
ans= [[1. 0.92857143] [0.92857143 1. ]]
MIT
Statistics.ipynb
Data-science-vidhya/Numpy
Compute cross-correlation of x and y.
x = np.array([0, 1, 3]) y = np.array([2, 4, 5]) print("ans=\n", np.correlate(x, y))
ans= [19]
MIT
Statistics.ipynb
Data-science-vidhya/Numpy
Histograms Compute the histogram of x against the bins.
x = np.array([0.5, 0.7, 1.0, 1.2, 1.3, 2.1]) bins = np.array([0, 1, 2, 3]) print("ans=\n", np.histogram(x, bins)) import matplotlib.pyplot as plt %matplotlib inline plt.hist(x, bins=bins) plt.show()
ans= (array([2, 3, 1], dtype=int64), array([0, 1, 2, 3]))
MIT
Statistics.ipynb
Data-science-vidhya/Numpy
Compute the 2d histogram of x and y.
xedges = [0, 1, 2, 3] yedges = [0, 1, 2, 3, 4] x = np.array([0, 0.1, 0.2, 1., 1.1, 2., 2.1]) y = np.array([0, 0.1, 0.2, 1., 1.1, 2., 3.3]) H, xedges, yedges = np.histogram2d(x, y, bins=(xedges, yedges)) print("ans=\n", H) plt.scatter(x, y) plt.grid()
ans= [[3. 0. 0. 0.] [0. 2. 0. 0.] [0. 0. 1. 1.]]
MIT
Statistics.ipynb
Data-science-vidhya/Numpy
Count number of occurrences of 0 through 7 in x.
x = np.array([0, 1, 1, 3, 2, 1, 7]) print("ans=\n", np.bincount(x))
ans= [1 3 1 1 0 0 0 1]
MIT
Statistics.ipynb
Data-science-vidhya/Numpy
Return the indices of the bins to which each value in x belongs.
x = np.array([0.2, 6.4, 3.0, 1.6]) bins = np.array([0.0, 1.0, 2.5, 4.0, 10.0]) print("ans=\n", np.digitize(x, bins))
ans= [1 4 3 2]
MIT
Statistics.ipynb
Data-science-vidhya/Numpy
Saving and Loading ModelsIn this notebook, I'll show you how to save and load models with PyTorch. This is important because you'll often want to load previously trained models to use in making predictions or to continue training on new data.
%matplotlib inline %config InlineBackend.figure_format = 'retina' import matplotlib.pyplot as plt import torch from torch import nn from torch import optim import torch.nn.functional as F from torchvision import datasets, transforms import helper import fc_model # Define a transform to normalize the data transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5,), (0.5,))]) # Download and load the training data trainset = datasets.FashionMNIST('F_MNIST_data/', download=True, train=True, transform=transform) trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) # Download and load the test data testset = datasets.FashionMNIST('F_MNIST_data/', download=True, train=False, transform=transform) testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True)
Downloading http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/train-images-idx3-ubyte.gz Downloading http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/train-labels-idx1-ubyte.gz Downloading http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/t10k-images-idx3-ubyte.gz Downloading http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/t10k-labels-idx1-ubyte.gz Processing... Done!
MIT
1. Introduction/.ipynb_checkpoints/Part 6 - Saving and Loading Models-checkpoint.ipynb
Not-A-Builder/DL-PyTorch
Here we can see one of the images.
image, label = next(iter(trainloader)) helper.imshow(image[0,:]);
_____no_output_____
MIT
1. Introduction/.ipynb_checkpoints/Part 6 - Saving and Loading Models-checkpoint.ipynb
Not-A-Builder/DL-PyTorch
Train a networkTo make things more concise here, I moved the model architecture and training code from the last part to a file called `fc_model`. Importing this, we can easily create a fully-connected network with `fc_model.Network`, and train the network using `fc_model.train`. I'll use this model (once it's trained) to demonstrate how we can save and load models.
# Create the network, define the criterion and optimizer model = fc_model.Network(784, 10, [512, 256, 128]) criterion = nn.NLLLoss() optimizer = optim.Adam(model.parameters(), lr=0.001) fc_model.train(model, trainloader, testloader, criterion, optimizer, epochs=2)
Epoch: 1/2.. Training Loss: 1.684.. Test Loss: 1.004.. Test Accuracy: 0.627 Epoch: 1/2.. Training Loss: 1.023.. Test Loss: 0.752.. Test Accuracy: 0.719 Epoch: 1/2.. Training Loss: 0.897.. Test Loss: 0.672.. Test Accuracy: 0.738 Epoch: 1/2.. Training Loss: 0.773.. Test Loss: 0.655.. Test Accuracy: 0.750 Epoch: 1/2.. Training Loss: 0.744.. Test Loss: 0.604.. Test Accuracy: 0.768 Epoch: 1/2.. Training Loss: 0.742.. Test Loss: 0.577.. Test Accuracy: 0.772 Epoch: 1/2.. Training Loss: 0.673.. Test Loss: 0.567.. Test Accuracy: 0.781 Epoch: 1/2.. Training Loss: 0.672.. Test Loss: 0.565.. Test Accuracy: 0.785 Epoch: 1/2.. Training Loss: 0.682.. Test Loss: 0.553.. Test Accuracy: 0.791 Epoch: 1/2.. Training Loss: 0.633.. Test Loss: 0.544.. Test Accuracy: 0.798 Epoch: 1/2.. Training Loss: 0.621.. Test Loss: 0.539.. Test Accuracy: 0.794 Epoch: 1/2.. Training Loss: 0.598.. Test Loss: 0.539.. Test Accuracy: 0.797 Epoch: 1/2.. Training Loss: 0.622.. Test Loss: 0.528.. Test Accuracy: 0.809 Epoch: 1/2.. Training Loss: 0.595.. Test Loss: 0.505.. Test Accuracy: 0.816 Epoch: 1/2.. Training Loss: 0.631.. Test Loss: 0.514.. Test Accuracy: 0.808 Epoch: 1/2.. Training Loss: 0.589.. Test Loss: 0.513.. Test Accuracy: 0.815 Epoch: 1/2.. Training Loss: 0.617.. Test Loss: 0.518.. Test Accuracy: 0.813 Epoch: 1/2.. Training Loss: 0.582.. Test Loss: 0.485.. Test Accuracy: 0.816 Epoch: 1/2.. Training Loss: 0.573.. Test Loss: 0.487.. Test Accuracy: 0.824 Epoch: 1/2.. Training Loss: 0.590.. Test Loss: 0.501.. Test Accuracy: 0.814 Epoch: 1/2.. Training Loss: 0.596.. Test Loss: 0.487.. Test Accuracy: 0.825 Epoch: 1/2.. Training Loss: 0.596.. Test Loss: 0.505.. Test Accuracy: 0.808 Epoch: 1/2.. Training Loss: 0.539.. Test Loss: 0.493.. Test Accuracy: 0.820 Epoch: 2/2.. Training Loss: 0.581.. Test Loss: 0.487.. Test Accuracy: 0.820 Epoch: 2/2.. Training Loss: 0.567.. Test Loss: 0.498.. Test Accuracy: 0.819 Epoch: 2/2.. Training Loss: 0.545.. Test Loss: 0.506.. Test Accuracy: 0.822 Epoch: 2/2.. Training Loss: 0.552.. Test Loss: 0.481.. Test Accuracy: 0.825 Epoch: 2/2.. Training Loss: 0.558.. Test Loss: 0.480.. Test Accuracy: 0.825 Epoch: 2/2.. Training Loss: 0.537.. Test Loss: 0.461.. Test Accuracy: 0.829 Epoch: 2/2.. Training Loss: 0.523.. Test Loss: 0.471.. Test Accuracy: 0.831 Epoch: 2/2.. Training Loss: 0.550.. Test Loss: 0.459.. Test Accuracy: 0.833 Epoch: 2/2.. Training Loss: 0.522.. Test Loss: 0.457.. Test Accuracy: 0.834 Epoch: 2/2.. Training Loss: 0.540.. Test Loss: 0.458.. Test Accuracy: 0.833 Epoch: 2/2.. Training Loss: 0.525.. Test Loss: 0.488.. Test Accuracy: 0.821 Epoch: 2/2.. Training Loss: 0.469.. Test Loss: 0.464.. Test Accuracy: 0.832 Epoch: 2/2.. Training Loss: 0.509.. Test Loss: 0.481.. Test Accuracy: 0.819 Epoch: 2/2.. Training Loss: 0.542.. Test Loss: 0.468.. Test Accuracy: 0.831 Epoch: 2/2.. Training Loss: 0.570.. Test Loss: 0.447.. Test Accuracy: 0.838 Epoch: 2/2.. Training Loss: 0.495.. Test Loss: 0.458.. Test Accuracy: 0.832 Epoch: 2/2.. Training Loss: 0.512.. Test Loss: 0.449.. Test Accuracy: 0.831 Epoch: 2/2.. Training Loss: 0.512.. Test Loss: 0.451.. Test Accuracy: 0.836 Epoch: 2/2.. Training Loss: 0.516.. Test Loss: 0.441.. Test Accuracy: 0.837 Epoch: 2/2.. Training Loss: 0.496.. Test Loss: 0.467.. Test Accuracy: 0.830 Epoch: 2/2.. Training Loss: 0.486.. Test Loss: 0.461.. Test Accuracy: 0.831 Epoch: 2/2.. Training Loss: 0.514.. Test Loss: 0.437.. Test Accuracy: 0.843 Epoch: 2/2.. Training Loss: 0.497.. Test Loss: 0.445.. Test Accuracy: 0.836
MIT
1. Introduction/.ipynb_checkpoints/Part 6 - Saving and Loading Models-checkpoint.ipynb
Not-A-Builder/DL-PyTorch
Saving and loading networksAs you can imagine, it's impractical to train a network every time you need to use it. Instead, we can save trained networks then load them later to train more or use them for predictions.The parameters for PyTorch networks are stored in a model's `state_dict`. We can see the state dict contains the weight and bias matrices for each of our layers.
print("Our model: \n\n", model, '\n') print("The state dict keys: \n\n", model.state_dict().keys())
Our model: Network( (hidden_layers): ModuleList( (0): Linear(in_features=784, out_features=512, bias=True) (1): Linear(in_features=512, out_features=256, bias=True) (2): Linear(in_features=256, out_features=128, bias=True) ) (output): Linear(in_features=128, out_features=10, bias=True) (dropout): Dropout(p=0.5) ) The state dict keys: odict_keys(['hidden_layers.0.weight', 'hidden_layers.0.bias', 'hidden_layers.1.weight', 'hidden_layers.1.bias', 'hidden_layers.2.weight', 'hidden_layers.2.bias', 'output.weight', 'output.bias'])
MIT
1. Introduction/.ipynb_checkpoints/Part 6 - Saving and Loading Models-checkpoint.ipynb
Not-A-Builder/DL-PyTorch
The simplest thing to do is simply save the state dict with `torch.save`. For example, we can save it to a file `'checkpoint.pth'`.
torch.save(model.state_dict(), 'checkpoint.pth')
_____no_output_____
MIT
1. Introduction/.ipynb_checkpoints/Part 6 - Saving and Loading Models-checkpoint.ipynb
Not-A-Builder/DL-PyTorch
Then we can load the state dict with `torch.load`.
state_dict = torch.load('checkpoint.pth') print(state_dict.keys())
odict_keys(['hidden_layers.0.weight', 'hidden_layers.0.bias', 'hidden_layers.1.weight', 'hidden_layers.1.bias', 'hidden_layers.2.weight', 'hidden_layers.2.bias', 'output.weight', 'output.bias'])
MIT
1. Introduction/.ipynb_checkpoints/Part 6 - Saving and Loading Models-checkpoint.ipynb
Not-A-Builder/DL-PyTorch
And to load the state dict in to the network, you do `model.load_state_dict(state_dict)`.
model.load_state_dict(state_dict)
_____no_output_____
MIT
1. Introduction/.ipynb_checkpoints/Part 6 - Saving and Loading Models-checkpoint.ipynb
Not-A-Builder/DL-PyTorch
Seems pretty straightforward, but as usual it's a bit more complicated. Loading the state dict works only if the model architecture is exactly the same as the checkpoint architecture. If I create a model with a different architecture, this fails.
# Try this model = fc_model.Network(784, 10, [400, 200, 100]) # This will throw an error because the tensor sizes are wrong! model.load_state_dict(state_dict)
_____no_output_____
MIT
1. Introduction/.ipynb_checkpoints/Part 6 - Saving and Loading Models-checkpoint.ipynb
Not-A-Builder/DL-PyTorch
This means we need to rebuild the model exactly as it was when trained. Information about the model architecture needs to be saved in the checkpoint, along with the state dict. To do this, you build a dictionary with all the information you need to compeletely rebuild the model.
checkpoint = {'input_size': 784, 'output_size': 10, 'hidden_layers': [each.out_features for each in model.hidden_layers], 'state_dict': model.state_dict()} torch.save(checkpoint, 'checkpoint.pth')
_____no_output_____
MIT
1. Introduction/.ipynb_checkpoints/Part 6 - Saving and Loading Models-checkpoint.ipynb
Not-A-Builder/DL-PyTorch
Now the checkpoint has all the necessary information to rebuild the trained model. You can easily make that a function if you want. Similarly, we can write a function to load checkpoints.
def load_checkpoint(filepath): checkpoint = torch.load(filepath) model = fc_model.Network(checkpoint['input_size'], checkpoint['output_size'], checkpoint['hidden_layers']) model.load_state_dict(checkpoint['state_dict']) return model model = load_checkpoint('checkpoint.pth') print(model)
Network( (hidden_layers): ModuleList( (0): Linear(in_features=784, out_features=400, bias=True) (1): Linear(in_features=400, out_features=200, bias=True) (2): Linear(in_features=200, out_features=100, bias=True) ) (output): Linear(in_features=100, out_features=10, bias=True) (dropout): Dropout(p=0.5) )
MIT
1. Introduction/.ipynb_checkpoints/Part 6 - Saving and Loading Models-checkpoint.ipynb
Not-A-Builder/DL-PyTorch
Modules, Packages and Classes When working with Python interactively, as we have thus far been doing, all functions that we define are available only within that notebook. This would similarly be the case if we were to write a simple script within an IDE.Thus, in order to write more complex programs it is important to be able to write code in carefully organised and structured files that can then be accessed from the main program. To support this Python has a way to put definitions in a file and use them in a script or in an interactive instance. This type of file is called a _Module_. Groups of related modules can be used to create _Packages_. As Python is open source and utlised by a broad community in research in industry a wide variety of advanced packages for Machine Learning and Statistical Packages have been developed and are well supported and documented.In this notebook we will discuss how to create modules and call them from scripts. We will list some useful modules from the Python standard library, then introduce the names of Python packages that will be used throughout the course. At the end we will introduce the topic of Object Oriented programming in Python. Python .py files Similarly to Matlab python (.py) files can have many uses. They can be used to encapsulate modules and classes. Or they can be used to encapsulate a script that imports external modules, reads in data, then processes the data through application of inline code and functions. Modules We will start by creating our own module containing some simple math functions 'simplemath.py'. In this we will provide simple math operations on two inputs:
def mysum(x,y): return x+y def mult(x,y): return x*y def divide(x,y): return x/y
_____no_output_____
Apache-2.0
2.3_Modules_and_Packages.ipynb
estherpuyol/BHF_Python_workshop
Now we will call these functions in a separate Python script 'apply_simple_functions.py'. Open these files, in your IDE. Try running 'apply_simple_functions.py'. Note the initial line which loads the module and renames it in shorthand (see also below); it is important that this module file is available in the same folder as the script you call it from, or available as part of your system path.
from BHF_Python_workshop import simplemath as sm # load module # define variables x=2 y=5 print('output sum of x and y:', sm.mysum(x,y)) print('output product of x and y:', sm.mult(x,y)) print('output quotient of x and y:', sm.divide(x,y))
output sum of x and y: 7 output product of x and y: 10 output quotient of x and y: 0.4
Apache-2.0
2.3_Modules_and_Packages.ipynb
estherpuyol/BHF_Python_workshop
The functions defined in the module are now available in the script (and this notebook) by simply prefixing with the name given to the module when it is imported. It is also possible to just load selective functions from a module using the call
from BHF_Python_workshop.simplemath import mysum as simplesum # note use of 'as' here, allows the change of names of functions print('output sum of x and y:', simplesum(x,y))
output sum of x and y: 7
Apache-2.0
2.3_Modules_and_Packages.ipynb
estherpuyol/BHF_Python_workshop
Alternatively all functions can be imported using *
from simplemath import * print('output sum of x and y:', mysum(x,y)) print('output product of x and y:', mult(x,y)) print('output quotient of x and y:', divide(x,y))
_____no_output_____
Apache-2.0
2.3_Modules_and_Packages.ipynb
estherpuyol/BHF_Python_workshop
Standard Modules Some modules come packaged with Python as standard. Useful examples include, ```os```:
import os dirname='/some/path/to/directory' filename='myfile.txt' print('my file path is:', os.path.join(dirname,filename)) # intelligent concatenation of path components print('my file path exists:', os.path.exists(os.path.join(dirname,filename))) # checks whether file exists
_____no_output_____
Apache-2.0
2.3_Modules_and_Packages.ipynb
estherpuyol/BHF_Python_workshop
```os``` performs useful operations on filenames; for more examples see https://docs.python.org/3/library/os.path.htmlmodule-os.path. Also, ```sys```: this allows the addition or removal of paths from your python search path (https://docs.python.org/3/library/sys.htmlmodule-sys), and is useful when you want to add the location of new modules to your path for example:
import sys print('system path:', sys.path) # add path to your system sys.path.append('/some/path/') print('after append system path:', sys.path) #remove path from your system sys.path.remove('/some/path/')
_____no_output_____
Apache-2.0
2.3_Modules_and_Packages.ipynb
estherpuyol/BHF_Python_workshop
```random``` is a random number generator
import random mult=25 rand_int = random.randint(1, 10) #Β random int in defined range rand_float = random.random() # random float between 0 and 1 rand_float_gen = random.random()*mult # random float between 0 and 25 print('my random integer is: ', rand_int) print('my random float (between 0 and 1) is: ', rand_float) print('my random float (between 0 and {}) is: {}'.format(mult,rand_float_gen))
_____no_output_____
Apache-2.0
2.3_Modules_and_Packages.ipynb
estherpuyol/BHF_Python_workshop
math is Python's standard math module:
import math x=2.2 y=4 print('ceil of {} is {}'. format(x,math.ceil(x))) print('{} to the power {} is {}'.format(x,y,math.pow(x,y))) print('The natural log of {} is {}'.format(x,math.log(x)))
_____no_output_____
Apache-2.0
2.3_Modules_and_Packages.ipynb
estherpuyol/BHF_Python_workshop
For an extensive list of all standard math operations see https://docs.python.org/3/library/math.htmlmodule-math. Finally, copy which was introduced in the previous notebook for generation of hard copies of objects in memory (https://docs.python.org/3/library/copy.html). For more examples of standard modules see https://docs.python.org/3/py-modindex.html Classes and Objects Python is an object-oriented language. This allows the structuring of code into classes, allowing a clean and efficient coding style which improves code structure and reuse. The basic structure of a Python class can be written in pseudo code as ```class ClassName: . . . ```Where it is generally important to start classes with an constructor (instantiation function) such as:
class MyClass: """A simple example class""" def __init__(self): # constructor self.data = [] x=MyClass() #Β creates new instance of class
_____no_output_____
Apache-2.0
2.3_Modules_and_Packages.ipynb
estherpuyol/BHF_Python_workshop
And, in practice, the statements inside a class definition will usually be method (object function) definitions e.g. :
class MyClass: """A simple example class""" def __init__(self): self.data = [] def f(self): # method return 'hello world' x=MyClass() #Β creates new instance of class print(x.f()) # now run the class sub function f
_____no_output_____
Apache-2.0
2.3_Modules_and_Packages.ipynb
estherpuyol/BHF_Python_workshop
Understanding of the formatting of Python classes is essential knoweldge for development of advanced python packages. However, in this course we will stick to relatively simple scripting. We leave investigation of more advanced features to the reader. For more materials on Python Classes see: https://docs.python.org/3/tutorial/classes.html**Exercise: Define a class representing a point in 2D space which has members for storing x and y position, and methods for calculating cartesian length.**
_____no_output_____
Apache-2.0
2.3_Modules_and_Packages.ipynb
estherpuyol/BHF_Python_workshop
Examples of the supported features in Autograd Before using Autograd for more complicated calculations, it might be useful to experiment with what kind of functions Autograd is capable of finding the gradient of. The following Python functions are just meant to illustrate what Autograd can do, but please feel free to experiment with other, possibly more complicated, functions as well!
import autograd.numpy as np from autograd import grad
_____no_output_____
CC0-1.0
doc/src/GradientOptim/autodiff/examples_allowed_functions.ipynb
ndavila/MachineLearningMSU
Supported functions Here are some examples of supported function implementations that Autograd can differentiate. Keep in mind that this list over examples is not comprehensive, but rather explores which basic constructions one might often use. Functions using simple arithmetics
def f1(x): return x**3 + 1 f1_grad = grad(f1) # Remember to send in float as argument to the computed gradient from Autograd! a = 1.0 # See the evaluated gradient at a using autograd: print("The gradient of f1 evaluated at a = %g using autograd is: %g"%(a,f1_grad(a))) # Compare with the analytical derivative, that is f1'(x) = 3*x**2 grad_analytical = 3*a**2 print("The gradient of f1 evaluated at a = %g by finding the analytic expression is: %g"%(a,grad_analytical))
The gradient of f1 evaluated at a = 1 using autograd is: 3 The gradient of f1 evaluated at a = 1 by finding the analytic expression is: 3
CC0-1.0
doc/src/GradientOptim/autodiff/examples_allowed_functions.ipynb
ndavila/MachineLearningMSU
Functions with two (or more) arguments To differentiate with respect to two (or more) arguments of a Python function, Autograd need to know at which variable the function if being differentiated with respect to.
def f2(x1,x2): return 3*x1**3 + x2*(x1 - 5) + 1 # By sending the argument 0, Autograd will compute the derivative w.r.t the first variable, in this case x1 f2_grad_x1 = grad(f2,0) # ... and differentiate w.r.t x2 by sending 1 as an additional arugment to grad f2_grad_x2 = grad(f2,1) x1 = 1.0 x2 = 3.0 print("Evaluating at x1 = %g, x2 = %g"%(x1,x2)) print("-"*30) # Compare with the analytical derivatives: # Derivative of f2 w.r.t x1 is: 9*x1**2 + x2: f2_grad_x1_analytical = 9*x1**2 + x2 # Derivative of f2 w.r.t x2 is: x1 - 5: f2_grad_x2_analytical = x1 - 5 # See the evaluated derivations: print("The derivative of f2 w.r.t x1: %g"%( f2_grad_x1(x1,x2) )) print("The analytical derivative of f2 w.r.t x1: %g"%( f2_grad_x1(x1,x2) )) print() print("The derivative of f2 w.r.t x2: %g"%( f2_grad_x2(x1,x2) )) print("The analytical derivative of f2 w.r.t x2: %g"%( f2_grad_x2(x1,x2) ))
Evaluating at x1 = 1, x2 = 3 ------------------------------ The derivative of f2 w.r.t x1: 12 The analytical derivative of f2 w.r.t x1: 12 The derivative of f2 w.r.t x2: -4 The analytical derivative of f2 w.r.t x2: -4
CC0-1.0
doc/src/GradientOptim/autodiff/examples_allowed_functions.ipynb
ndavila/MachineLearningMSU
Note that the grad function will not produce the true gradient of the function. The true gradient of a function with two or more variables will produce a vector, where each element is the function differentiated w.r.t a variable. Functions using the elements of its argument directly
def f3(x): # Assumes x is an array of length 5 or higher return 2*x[0] + 3*x[1] + 5*x[2] + 7*x[3] + 11*x[4]**2 f3_grad = grad(f3) x = np.linspace(0,4,5) # Print the computed gradient: print("The computed gradient of f3 is: ", f3_grad(x)) # The analytical gradient is: (2, 3, 5, 7, 22*x[4]) f3_grad_analytical = np.array([2, 3, 5, 7, 22*x[4]]) # Print the analytical gradient: print("The analytical gradient of f3 is: ", f3_grad_analytical)
The computed gradient of f3 is: [ 2. 3. 5. 7. 88.] The analytical gradient of f3 is: [ 2. 3. 5. 7. 88.]
CC0-1.0
doc/src/GradientOptim/autodiff/examples_allowed_functions.ipynb
ndavila/MachineLearningMSU
Note that in this case, when sending an array as input argument, the output from Autograd is another array. This is the true gradient of the function, as opposed to the function in the previous example. By using arrays to represent the variables, the output from Autograd might be easier to work with, as the output is closer to what one could expect form a gradient-evaluting function. Functions using mathematical functions from Numpy
def f4(x): return np.sqrt(1+x**2) + np.exp(x) + np.sin(2*np.pi*x) f4_grad = grad(f4) x = 2.7 # Print the computed derivative: print("The computed derivative of f4 at x = %g is: %g"%(x,f4_grad(x))) # The analytical derivative is: x/sqrt(1 + x**2) + exp(x) + cos(2*pi*x)*2*pi f4_grad_analytical = x/np.sqrt(1 + x**2) + np.exp(x) + np.cos(2*np.pi*x)*2*np.pi # Print the analytical gradient: print("The analytical gradient of f4 at x = %g is: %g"%(x,f4_grad_analytical))
The computed derivative of f4 at x = 2.7 is: 13.8759 The analytical gradient of f4 is: 13.87586944687107
CC0-1.0
doc/src/GradientOptim/autodiff/examples_allowed_functions.ipynb
ndavila/MachineLearningMSU
Functions using if-else tests
def f5(x): if x >= 0: return x**2 else: return -3*x + 1 f5_grad = grad(f5) x = 2.7 # Print the computed derivative: print("The computed derivative of f5 at x = %g is: %g"%(x,f5_grad(x))) # The analytical derivative is: # if x >= 0, then 2*x # else -3 if x >= 0: f5_grad_analytical = 2*x else: f5_grad_analytical = -3 # Print the analytical derivative: print("The analytical derivative of f5 at x = %g is: %g"%(x,f5_grad_analytical))
The computed derivative of f5 is: 5.4 The analytical derivative of f5 is: 5.4
CC0-1.0
doc/src/GradientOptim/autodiff/examples_allowed_functions.ipynb
ndavila/MachineLearningMSU
Functions using for- and while loops
def f6_for(x): val = 0 for i in range(10): val = val + x**i return val def f6_while(x): val = 0 i = 0 while i < 10: val = val + x**i i = i + 1 return val f6_for_grad = grad(f6_for) f6_while_grad = grad(f6_while) x = 0.5 # Print the computed derivaties of f6_for and f6_while print("The computed derivative of f6_for at x = %g is: %g"%(x,f6_for_grad(x))) print("The computed derivative of f6_while at x = %g is: %g"%(x,f6_while_grad(x))) # Both of the functions are implementation of the sum: sum(x**i) for i = 0, ..., 9 # The analytical derivative is: sum(i*x**(i-1)) f6_grad_analytical = 0 for i in range(10): f6_grad_analytical += i*x**(i-1) print("The analytical derivative of f6 at x = %g is: %g"%(x,f6_grad_analytical))
The computed derivative of f6_for at x = 0.5 is: 3.95703 The computed derivative of f6_while at x = 0.5 is: 3.95703 The analytical derivative of f6 at x = 0.5 is: 3.95703
CC0-1.0
doc/src/GradientOptim/autodiff/examples_allowed_functions.ipynb
ndavila/MachineLearningMSU
Functions using recursion
def f7(n): # Assume that n is an integer if n == 1 or n == 0: return 1 else: return n*f7(n-1) f7_grad = grad(f7) n = 2.0 print("The computed derivative of f7 at n = %d is: %g"%(n,f7_grad(n))) # The function f7 is an implementation of the factorial of n. # By using the product rule, one can find that the derivative is: f7_grad_analytical = 0 for i in range(int(n)-1): tmp = 1 for k in range(int(n)-1): if k != i: tmp *= (n - k) f7_grad_analytical += tmp print("The analytical derivative of f7 at n = %d is: %g"%(n,f7_grad_analytical))
The computed derivative of f7 at n = 2 is: 1 The analytical derivative of f7 at n = 2 is: 1
CC0-1.0
doc/src/GradientOptim/autodiff/examples_allowed_functions.ipynb
ndavila/MachineLearningMSU
Note that if n is equal to zero or one, Autograd will give an error message. This message appears when the output is independent on input. Unsupported functions Autograd supports many features. However, there are some functions that is not supported (yet) by Autograd. Assigning a value to the variable being differentiated with respect to
def f8(x): # Assume x is an array x[2] = 3 return x*2 f8_grad = grad(f8) x = 8.4 print("The derivative of f8 is:",f8_grad(x))
_____no_output_____
CC0-1.0
doc/src/GradientOptim/autodiff/examples_allowed_functions.ipynb
ndavila/MachineLearningMSU
Here, Autograd tells us that an 'ArrayBox' does not support item assignment. The item assignment is done when the program tries to assign x[2] to the value 3. However, Autograd has implemented the computation of the derivative such that this assignment is not possible. The syntax a.dot(b) when finding the dot product
def f9(a): # Assume a is an array with 2 elements b = np.array([1.0,2.0]) return a.dot(b) f9_grad = grad(f9) x = np.array([1.0,0.0]) print("The derivative of f9 is:",f9_grad(x))
_____no_output_____
CC0-1.0
doc/src/GradientOptim/autodiff/examples_allowed_functions.ipynb
ndavila/MachineLearningMSU
Here we are told that the 'dot' function does not belong to Autograd's version of a Numpy array. To overcome this, an alternative syntax which also computed the dot product can be used:
def f9_alternative(x): # Assume a is an array with 2 elements b = np.array([1.0,2.0]) return np.dot(x,b) # The same as x_1*b_1 + x_2*b_2 f9_alternative_grad = grad(f9_alternative) x = np.array([3.0,0.0]) print("The gradient of f9 is:",f9_alternative_grad(x)) # The analytical gradient of the dot product of vectors x and b with two elements (x_1,x_2) and (b_1, b_2) respectively # w.r.t x is (b_1, b_2).
The gradient of f9 is: [1. 2.]
CC0-1.0
doc/src/GradientOptim/autodiff/examples_allowed_functions.ipynb
ndavila/MachineLearningMSU