markdown
stringlengths
0
1.02M
code
stringlengths
0
832k
output
stringlengths
0
1.02M
license
stringlengths
3
36
path
stringlengths
6
265
repo_name
stringlengths
6
127
To get asked dates binary list
import datetime true_false_female=[] for key in data_dose1.columns[:-1]: changed_key=key.split('.')[0].split('/') true_false_female.append(datetime.date(int(changed_key[2]),int(changed_key[1]),int(changed_key[0]))<datetime.date(2021,8,15)) true_false_female.append(False) true_false_male=[] for key in data_dose2.columns[:-1]: changed_key=key.split('.')[0].split('/') true_false_male.append(datetime.date(int(changed_key[2]),int(changed_key[1]),int(changed_key[0]))<datetime.date(2021,8,15)) true_false_male.append(False)
_____no_output_____
MIT
Q7_Asgn1.nbconvert.ipynb
sunil-dhaka/india-covid19-cases-and-vaccination-analysis
Districts
districtids=[] stateids=[] ratio=[] count1=[] count2=[] for i in range(len(district_ids)): for j in range(data_dose1.shape[0]): if district_ids[i]==data_dose1['District'][j+1]: # why there is 'j+1 'in this line :: due to that NaN in first raw districtids.append(district_ids[i]) stateids.append(district_ids[i].split('_')[0]) count1.append(((data_dose2.loc[data_dose2['District']==district_ids[i],np.array(true_false_male)]).astype(int).sum().sum())) count2.append(((data_dose1.loc[data_dose1['District']==district_ids[i],np.array(true_false_male)]).astype(int).sum().sum())) if count2[-1]==0: ratio.append(np.nan) # ratio=NaN for when covaxin count = 0 else: ratio.append(np.round(count1[-1]/count2[-1],3)) break ratio_df=pd.DataFrame({'districtid':districtids, 'vaccinationratio':ratio}) ratio_df.sort_values('vaccinationratio', axis = 0, ascending = True, kind='mergesort',inplace=True) ratio_df.reset_index(inplace=True,drop=True) ratio_df.to_csv('district-vaccine-type-ratio.csv',index=False)
_____no_output_____
MIT
Q7_Asgn1.nbconvert.ipynb
sunil-dhaka/india-covid19-cases-and-vaccination-analysis
States
ratio_df1=pd.DataFrame({'districtid':districtids,'covaxin':count2,'covishield':count1,'stateid':stateids}) unique_state_codes=np.array(np.unique(stateids)) stateid=[] ratio_state=[] covaxin_count=[] covishield_count=[] for i in range(len(unique_state_codes)): stateid.append(unique_state_codes[i]) foo_df=ratio_df1.loc[ratio_df1.stateid==unique_state_codes[i]] covaxin_count.append(foo_df.covaxin.astype(int).sum()) covishield_count.append(foo_df.covishield.astype(int).sum()) if covaxin_count[-1]==0: ratio_state.append(np.nan) # ratio=NaN for when covaxin count = 0 else: ratio_state.append(np.round(covishield_count[-1]/covaxin_count[-1],3)) ratio_df_states=pd.DataFrame({'stateid':stateid, 'vaccinationratio':ratio_state}) ratio_df_states.sort_values('vaccinationratio', axis = 0, ascending = True, kind='mergesort',inplace=True) ratio_df_states.reset_index(inplace=True,drop=True) ratio_df_states.to_csv('state-vaccine-type-ratio.csv',index=False) ratio_df_states.to_csv('state-vaccine-type-ratio.csv',index=False)
_____no_output_____
MIT
Q7_Asgn1.nbconvert.ipynb
sunil-dhaka/india-covid19-cases-and-vaccination-analysis
Overall
# overall overall_df=pd.DataFrame({'overallid':['IN'], 'vaccinationratio':[np.round(sum(covishield_count)/sum(covaxin_count),3)]}) overall_df.to_csv('overall-vaccine-type-ratio.csv',index=False)
_____no_output_____
MIT
Q7_Asgn1.nbconvert.ipynb
sunil-dhaka/india-covid19-cases-and-vaccination-analysis
Downloading MEDLINE/PubMed Data and Posting to PostgreSQL Brandon L. Kramer - University of Virginia's Bicomplexity Institute This notebook detail the process of downloading all of [PubMed's MEDLINE data](https://www.nlm.nih.gov/databases/download/pubmed_medline.html) and posting it to a PostgresSQL database ([UVA's Rivanna OpenOnDemand](https://rivanna-portal.hpc.virginia.edu/)). To do this, we use the terminal to download all of the data into Rivanna. Next, we use the [PubMedPortable](https://github.com/KerstenDoering/PubMedPortable) package through the Python shell to parse all of the data and build up a database. Step 1: Download PubMed DataFirst, we download all of the data from [here](ftp://ftp.ncbi.nlm.nih.gov/pubmed/baseline/) using `wget`.
cd /scratch/kb7hp/pubmed_new wget --recursive --no-parent ftp://ftp.ncbi.nlm.nih.gov/pubmed/baseline/
_____no_output_____
MIT
src/01_pubmed_db/.ipynb_checkpoints/02_pubmed_parser-checkpoint.ipynb
brandonleekramer/the-growth-of-diversity
Step 2: Download PubMedPortableSecond, we will clone [PubMedPortable package from GitHub](https://github.com/KerstenDoering/PubMedPortable).
cd /home/kb7hp/git/ git clone https://github.com/KerstenDoering/PubMedPortable.git cd PubMedPortable
_____no_output_____
MIT
src/01_pubmed_db/.ipynb_checkpoints/02_pubmed_parser-checkpoint.ipynb
brandonleekramer/the-growth-of-diversity
Step 3: Populate Tables in PostgreSQL Database Go to the [PubMedPortable](https://github.com/KerstenDoering/PubMedPortable/wikibuild-up-a-relational-database-in-postgresql) protocol: - Skip the part on making a superuser named parser and use Rivanna login and pwd instead - Since `PubMedPortable` is written with the login/pwd of parser/parser you have to update lines 704-750 of `PubMedDB.py` - Add `import psycopg2 as pg` to the beginning of the file - Update all the connections to: `con = 'postgresql+psycopg2://login:pwd@postgis1/sdad'` - Update all the `print` statements to `print()` (e.g. line 728)Go to [Rivanna OpenOnDemand](https://rivanna-portal.hpc.virginia.edu/), click on Clusters > Rivanna Shell Access and then create a new schema using the following commands:
psql -U login -d sdad -h postgis1 CREATE SCHEMA pubmed_2021;
_____no_output_____
MIT
src/01_pubmed_db/.ipynb_checkpoints/02_pubmed_parser-checkpoint.ipynb
brandonleekramer/the-growth-of-diversity
Then return to the Python terminal and run this to populate the new schema:
cd /home/kb7hp/git/PubMedPortable python PubMedDB.py -d pubmed_2021
_____no_output_____
MIT
src/01_pubmed_db/.ipynb_checkpoints/02_pubmed_parser-checkpoint.ipynb
brandonleekramer/the-growth-of-diversity
Go back to the Rivanna PostgreSQL shell to check if that worked:
\dt pubmed_2021.*
_____no_output_____
MIT
src/01_pubmed_db/.ipynb_checkpoints/02_pubmed_parser-checkpoint.ipynb
brandonleekramer/the-growth-of-diversity
Looks like it did so now we can start parsing. Step 4: Testing MEDLINE Data Upload We don't want to start dumping all 1062 files, so let's just start with one. We will create a pm_0001 folder and download just one of the .xml files from PubMed. Next, we had to debug the `PubMedParser.py` file by updating all of the `con` and `print` statements as we did above and update `_next` to `__next__` on line 65. After doing this, we ran the following code to upload our first test file. Batch 1 (0001)Let's give this a try:
cd /home/kb7hp/git/PubMedPortable/data mkdir pm_0001 cd pm_0001 wget ftp://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed21n0001.xml.gz cd /home/kb7hp/git/PubMedPortable/ python PubMedParser.py -i data/pm_0001/
_____no_output_____
MIT
src/01_pubmed_db/.ipynb_checkpoints/02_pubmed_parser-checkpoint.ipynb
brandonleekramer/the-growth-of-diversity
It took about 8 minutes to run this one file. Step 5: Uploading the Rest of the MEDLINE Dataset to PostgreSQL Database in Batches Let's add the rest of the data to Postgres. Ideally, we would just dump the whole thing at once, but Rivanna limits the amount of data we can store locally (for some reason `PubMedPortable` does not like absolute paths). Thus, we will only copy part of the data from the `\scratch` folder to our temporary local folders. Batch 2 (0002-0011)
# move all the .xml.gz files to their own folder cd /scratch/kb7hp/ mkdir pubmed_gz cd /scratch/kb7hp/pubmed_new/ftp.ncbi.nlm.nih.gov/pubmed/baseline/ mv *.gz /scratch/kb7hp/pubmed_gz # and copy 10 of those files to that new folder cd /scratch/kb7hp/pubmed_gz/ cp pubmed21n{0002..0011}.xml.gz /home/kb7hp/git/PubMedPortable/data/pm_0002_0011 # and then we add those 10 files to our existing database cd /home/kb7hp/git/PubMedPortable/data/ python PubMedParser.py -i data/pm_0002_0011/ -c -p 4
_____no_output_____
MIT
src/01_pubmed_db/.ipynb_checkpoints/02_pubmed_parser-checkpoint.ipynb
brandonleekramer/the-growth-of-diversity
While I intially thought this process would take ~80 minutes, running these 10 files only look ~22 minutes because of the 4 cores that essentially cut the timing by a quarter. Thus, we spun an instance with 5 cores (1 extra as directed by the Rivanna admins) and ran the next ~90 files with this new allocation. When I checked the `pubmed_2021.tbl_abstract` table, we had 146,854 rows, which seemed low. Yet, the notification from the `PubMedParser.py` file indicated that all files were parsed. I would late come to realize that there are fewer abstracts than total records, which can be validated in the `pubmed_2021.tbl_medline_citation` table. Batch 3 (0012-0100)Let's dump the next batch of citations (0012-0100). We will copy over the next batch of data and with multiprocessing accounted for this should take ~3 hours to complete.
cd /scratch/kb7hp/pubmed_gz/ cp pubmed21n{0012..0100}.xml.gz /home/kb7hp/git/PubMedPortable/data/pm_0012_0100 cd /home/kb7hp/git/PubMedPortable/data/ python PubMedParser.py -i data/pm_0012_0100/ -c -p 4
_____no_output_____
MIT
src/01_pubmed_db/.ipynb_checkpoints/02_pubmed_parser-checkpoint.ipynb
brandonleekramer/the-growth-of-diversity
And indeed it did! We have loaded the first 100 files and it took just over 3 hours (13:19:19-16:22:52). Batch 4 (0101-0500)Now, let's get a bit more ambitious. Given its now night time, we are can boost the allocation to 9 cores and try ~400 files. This should take around around 7 hours to complete (400 files * 8 mins/file with 8 cores).
# first we will clean up the local directory cd /home/kb7hp/git/PubMedPortable/data/ rm -r pm_0001 rm -r pm_0002_0011 rm -r pm_0012_0100 # copy over our new files cd /scratch/kb7hp/pubmed_gz cp pubmed21n{0101..0500}.xml.gz /home/kb7hp/git/PubMedPortable/data/pm_0101_0500 # and then run the script for the next 400 files cd /home/kb7hp/git/PubMedPortable/ python PubMedParser.py -i data/pm_0101_0500/ -c -p 8
_____no_output_____
MIT
src/01_pubmed_db/.ipynb_checkpoints/02_pubmed_parser-checkpoint.ipynb
brandonleekramer/the-growth-of-diversity
After parsing the pm_101_500 files, I woke up to a minor error, but it looks like the program continued running up through the very last citation of the last file. I checked the `pubmed_2021.tbl_abstract` table and had 6,388,959 entries while `pubmed_2021.tbl_medline_citation` had 13,095,000, which almost half of the 26 million advertised on [MEDLINE's website](https://www.nlm.nih.gov/bsd/medline.html). Thus, it does seem like everything parsed without any serious problems. I decided to finsih up the rest of the file parsing since (1) I cannot address any problem in a systematic way and (2) a full database with problems is still better than a half database with problems. Batch 5 (0501-0750)With the space limitations, let's take a conservative approach and post the next 250 files to the database (once again using 9 cores on Rivanna).
cd /home/kb7hp/git/PubMedPortable/data rm -r pm_0101_0500 mkdir pm_0501_0750 cd /scratch/kb7hp/pubmed_gz cp pubmed21n{0501..0750}.xml.gz /home/kb7hp/git/PubMedPortable/data/pm_0501_0750 cd /home/kb7hp/git/PubMedPortable/ python PubMedParser.py -i data/pm_0501_0750/ -c -p 8
_____no_output_____
MIT
src/01_pubmed_db/.ipynb_checkpoints/02_pubmed_parser-checkpoint.ipynb
brandonleekramer/the-growth-of-diversity
This took just over 4 hours (08:34:23-13:00:31) and worked flawlessly (no errors whatsoever). At this point, we have 12,158,748 abstracts in the `pubmed_2021.tbl_abstract` table. Batch 6 (0751-0900)While I thought this would be the last batch, I ran out of space again trying to dump 750-1062. Let's do up to 900 and do the last batch later today.
cd /home/kb7hp/git/PubMedPortable/data rm -r pm_0501_0750 mkdir pm_0751_0900 cd /scratch/kb7hp/pubmed_gz cp pubmed21n{0751..0900}.xml.gz /home/kb7hp/git/PubMedPortable/data/pm_0751_0900 cd /home/kb7hp/git/PubMedPortable/ python PubMedParser.py -i data/pm_0751_0900/ -c -p 8
_____no_output_____
MIT
src/01_pubmed_db/.ipynb_checkpoints/02_pubmed_parser-checkpoint.ipynb
brandonleekramer/the-growth-of-diversity
That took __ hours and once again ran without errors. Batch 7 (0901-1062)We dumped the last batch with this code and we were done!
cd /home/kb7hp/git/PubMedPortable/data rm -r pm_0751_0900 mkdir pm_0901_1062 cd /scratch/kb7hp/pubmed_gz cp pubmed21n{0901..1062}.xml.gz /home/kb7hp/git/PubMedPortable/data/pm_0901_1062 cd /home/kb7hp/git/PubMedPortable/ python PubMedParser.py -i data/pm_0901_1062/ -c -p 8 # started this around 8:50am
_____no_output_____
MIT
src/01_pubmed_db/.ipynb_checkpoints/02_pubmed_parser-checkpoint.ipynb
brandonleekramer/the-growth-of-diversity
On to the Next Step in Your Research ProjectOverall, this was a surprisingly easy process. A major kudos goes out to PubMedPortable for this fantastic package. Now, let's get to text mining! References Döring, K., Grüning, B. A., Telukunta, K. K., Thomas, P., & Günther, S. (2016). PubMedPortable: a framework for supporting the development of text mining applications. Plos one, 11(10), e0163794. (Link to [Article](https://journals.plos.org/plosone/article?id=10.1371/journal.pone.0163794) and [GitHub Repo](https://github.com/KerstenDoering/PubMedPortable))National Library of Medicine. (2021). Download MEDLINE/PubMed Data. Link to [Data](ftp://ftp.ncbi.nlm.nih.gov/pubmed/baseline) and [Documentation](https://www.nlm.nih.gov/databases/download/pubmed_medline.html)
SELECT xml_file_name, COUNT(fk_pmid) FROM pubmed_2021.tbl_pmids_in_file GROUP BY xml_file_name; --- looks like there was some kind of problem parsing these files --- affected 0816, 0829, 0865, 0866, 0875, 0879, 0884, 0886, 0891 --- all of the rest were in the high 29,000s or at 30000 --- i think i parse 900:1062 and come back to these problems later --- the best approach would be to create a view where the fk_pmids of all --- those files is removed across all the tables and then started anew --- another way is just to remove all duplicates from each table later --- https://stackoverflow.com/questions/6583916/delete-duplicate-rows-from-small-table python PubMedParser.py -i data/pubmed/ -c # ran 0001-0100 first python PubMedParser.py -i data/pubmed_data/ -c -p 12 # took an hour and 10 mins # downloaded 0101-0500 python PubMedParser.py -i data/after400/ -c -p 12 python PubMedParser.py -i data/the700s/ -c -p 12
_____no_output_____
MIT
src/01_pubmed_db/.ipynb_checkpoints/02_pubmed_parser-checkpoint.ipynb
brandonleekramer/the-growth-of-diversity
Homework 2**Instructions:** Complete the notebook below. Download the completed notebook in HTML format. Upload assignment using Canvas.**Due:** Jan. 19 at **2pm**. Exercise: NumPy ArraysFollow the instructions in the following cells.
# Import numpy # Use the 'np.arange()' function to create a variable called 'numbers1' that stores the integers # 1 through (and including) 10 # Print the value of 'numbers1' # Use the 'np.arange()' function to create a variable called 'numbers2' that stores the numbers # 0 through (and including) 1 with a step increment of 0.01 # Print the value of 'numbers2' # Print the 5th value of 'numbers2'. (Remember that the index starts counting at 0) # Print the last value of 'numbers2'. # Print the first 12 values of 'numbers2'. # Print the last 12 values of 'numbers2'. # Use the 'np.zeros()' function to create a variable called 'zeros' that is an array of 20 zeros # Print the value of 'zeros' # Change the second value of 'zeros' to 1 and print # Print the value of 'zeros'
_____no_output_____
MIT
Homework Notebooks/Econ126_Winter2021_Homework_02_blank.ipynb
letsgoexploring/econ126
Exercise: Random NumbersFollow the instructions in the following cells.
# Set the seed of NumPy's random number generator to 126 # Create a variable called 'epsilon' that is an array containing 25 draws from # a normal distribution with mean 4 and standard deviation 2 # Print the value of epsilon # Print the mean of 'epsilon' # Print the standard deviation of 'epsilon'
_____no_output_____
MIT
Homework Notebooks/Econ126_Winter2021_Homework_02_blank.ipynb
letsgoexploring/econ126
Exercise: The Cobb-Douglas Production FunctionThe Cobb-Douglas production function can be written in per worker terms as : \begin{align} y & = A k^{\alpha}, \end{align}where $y$ denotes output per worker, $k$ denotes capital per worker, and $A$ denotes total factor productivity or technology. Part (a)On a single axis: plot the Cobb-Douglas production for $A$ = 0.8, 1, 1.2, and 1.4 with $\alpha$ = 0.35 and $k$ ranging from 0 to 10. Each line should have a different color. Your plot must have a title and axis labels. The plot should also contain a legend that clearly indicates which line is associated with which value of $A$ and does not cover the plotted lines.
# Import the pyplot module from Matplotlib as plt # Select the Matlplotlib style sheet to use (Optional) # Use the '%matplotlib inline' magic command to ensure that Matplotlib plots are displayed in the Notebook # Set capital share (alpha) # Create an array of capital values # Plot production function for each of the given values for A # Add x- and y-axis labels # Add a title to the plot # Create a legend # Add a grid
_____no_output_____
MIT
Homework Notebooks/Econ126_Winter2021_Homework_02_blank.ipynb
letsgoexploring/econ126
**Question**1. *Briefly* explain in words how increasing $A$ affects the shape of the production function. **Answer**1. Part (b)On a single axis: plot the Cobb-Douglas production for $\alpha$ = 0.1, 0.2, 0.3, 0.4, and 0.5 with $A$ = 1 and $k$ ranging from 0 to 10. Each line should have a different color. Your plot must have a title and axis labels. The plot should also contain a legend that clearly indicates which line is associated with which value of $\alpha$ and does not cover the plotted lines.
# Set TFP (A) # Plot production function for each of the given values for alpha # Add x- and y-axis labels # Add a title to the plot # Create a legend # Add a grid
_____no_output_____
MIT
Homework Notebooks/Econ126_Winter2021_Homework_02_blank.ipynb
letsgoexploring/econ126
**Question**1. *Briefly* explain in words how increasing $\alpha$ affects the shape of the production function. **Answer**1. Exercise: The CardioidThe cardioid is a shape described by the parametric equations: \begin{align} x & = a(2\cos \theta - \cos 2\theta), \\ y & = a(2\sin \theta - \sin 2\theta). \end{align} Construct a well-labeled graph of the cardiod for $a=4$ and $\theta$ in $[0,2\pi]$. Your plot must have a title and axis labels.
# Construct data for x and y # Plot y against x # Create x-axis label # Create y-axis label # Create title for plot # Add a grid to the plot
_____no_output_____
MIT
Homework Notebooks/Econ126_Winter2021_Homework_02_blank.ipynb
letsgoexploring/econ126
Exercise: Unconstrained optimizationConsider the quadratic function: \begin{align}f(x) & = -7x^2 + 930x + 30\end{align} You will use analytic (i.e., pencil and paper) and numerical methods to find the the value of $x$ that maximizes $f(x)$. Another name for $x$ that maximizes $f(x)$ is the *argument of the maximum* value $f(x)$. Part (a): Analytic solutionUse standard calculus methods to solve for the value of $x$ that maximizes $f(x)$ to **five decimal places**. Use your answer to complete the sentence in the next cell. The value of $x$ that maximizes $f(x)$ is: Part (b): Numerical solutionIn the cells below, you will use NumPy to try to compute the argument of the maximum of $f(x)$.
# Use np.arange to create a variable called 'x' that is equal to the numbers 0 through 100 # with a spacing between numbers of 0.1 # Create a variable called 'f' that equals f(x) at each value of the array 'x' just defined # Use np.argmax to create a variable called xstar equal to the value of 'x' that maximizes the function f(x). # Print the value of xstar # Use np.arange to create a variable called 'x' that is equal to the numbers 0 through 100 # with a spacing between numbers of 0.001 # Create a variable called 'f' that equals f(x) at each value of the array 'x' just defined # Use np.argmax to create a variable called xstar equal to the value of 'x' that maximizes the function f(x). # Print the value of xstar # Use np.arange to create a variable called 'x' that is equal to the numbers 0 through *50* # with a spacing between numbers of 0.001 # Create a variable called 'f' that equals f(x) at each value of the array 'x' just defined # Use np.argmax to create a variable called xstar equal to the value of 'x' that maximizes the function f(x). # Print the value of xstar
_____no_output_____
MIT
Homework Notebooks/Econ126_Winter2021_Homework_02_blank.ipynb
letsgoexploring/econ126
Part (c): EvaluationProvide answers to the follow questions in the next cell.**Questions**1. How did the choice of step size in the array `x` affect the accuracy of the computed results in the first two cells of Part (b)?2. What do you think is the drawback to decreasing the stepsize in `x`?3. In the previous cell, why did NumPy return value for `xstar` that is so different from the solution you derived in Part (a)? **Answers**1. 2. 3. Exercise: Utility MaximizationRecall the two good utility maximization problem from microeconomics. Let $x$ and $y$ denote the amount of two goods that a person consumes. The person receives utility from consumption given by: \begin{align} u(x,y) & = x^{\alpha}y^{\beta} \end{align}The person has income $M$ to spend on the two goods and the price of the goods are $p_x$ and $p_y$. The consumer's budget constraint is: \begin{align} M & = p_x x + p_y y \end{align}Suppose that $M = 100$, $\alpha=0.25$, $\beta=0.75$, $p_x = 1$. and $p_y = 0.5$. The consumer's problem is to maximize their utility subject to the budget constraint. While this problem can easily be solved by hand, we're going to use a computational approach. You can also solve the problem by hand to verify your solution. Part (a)Use the budget constraint to solve for $y$ in terms of $x$, $p_x$, $p_y$, and $M$. Use the result to write the consumer's utility as a function of $x$ only. Create a variable called `x` equal to an array of values from 0 to 80 with step size equal to 0.001 and a variable called `utility` equal to the consumer's utility. Plot the consumer's utility against $x$.
# Assign values to the constants alpha, beta, M, px, py # Create an array of x values # Create an array of utility values # Plot utility against x. # x- and y-axis labels # Title # Add grid
_____no_output_____
MIT
Homework Notebooks/Econ126_Winter2021_Homework_02_blank.ipynb
letsgoexploring/econ126
Part (b)The NumPy function `np.max()` returns the highest value in an array and `np.argmax()` returns the index of the highest value. Print the highest value and index of the highest value of `utility`.
_____no_output_____
MIT
Homework Notebooks/Econ126_Winter2021_Homework_02_blank.ipynb
letsgoexploring/econ126
Part (c)Use the index of the highest value of utility to find the value in `x` with the same index and store value in a new variable called `xstar`. Print the value of `xstar`.
# Create variable 'xstar' equal to value in 'x' that maximizes utility # Print value of 'xstar'
_____no_output_____
MIT
Homework Notebooks/Econ126_Winter2021_Homework_02_blank.ipynb
letsgoexploring/econ126
Part (d)Use the budget constraint to the find the implied utility-maximizing vaue of $y$ and store this in a variable called `ystar`. Print `ystar`.
# Create variable 'ystar' equal to value in 'y' that maximizes utility # Print value of 'xstar'
_____no_output_____
MIT
Homework Notebooks/Econ126_Winter2021_Homework_02_blank.ipynb
letsgoexploring/econ126
Use Spark to recommend mitigation for car rental company with `ibm-watson-machine-learning` This notebook contains steps and code to create a predictive model, and deploy it on WML. This notebook introduces commands for pipeline creation, model training, model persistance to Watson Machine Learning repository, model deployment, and scoring.Some familiarity with Python is helpful. This notebook uses Python 3.8 and Apache® Spark 2.4.You will use **car_rental_training** dataset. Learning goalsThe learning goals of this notebook are:- Load a CSV file into an Apache® Spark DataFrame.- Explore data.- Prepare data for training and evaluation.- Create an Apache® Spark machine learning pipeline.- Train and evaluate a model.- Persist a pipeline and model in Watson Machine Learning repository.- Deploy a model for online scoring using Wastson Machine Learning API.- Score sample scoring data using the Watson Machine Learning API. ContentsThis notebook contains the following parts:1. [Setup](setup)2. [Load and explore data](load)3. [Create an Apache Spark machine learning model](model)4. [Store the model in the Watson Machine Learning repository](upload)5. [Deploy the model in the IBM Cloud](deploy)6. [Score](score)7. [Clean up](cleanup)8. [Summary and next steps](summary) **Note:** This notebook works correctly with kernel `Python 3.6 with Spark 2.4`, please **do not change kernel**. 1. Set up the environmentBefore you use the sample code in this notebook, you must perform the following setup tasks:- Create a Watson Machine Learning (WML) Service instance (a free plan is offered and information about how to create the instance can be found here). Connection to WMLAuthenticate the Watson Machine Learning service on IBM Cloud. You need to provide platform `api_key` and instance `location`.You can use [IBM Cloud CLI](https://cloud.ibm.com/docs/cli/index.html) to retrieve platform API Key and instance location.API Key can be generated in the following way:```ibmcloud loginibmcloud iam api-key-create API_KEY_NAME```In result, get the value of `api_key` from the output.Location of your WML instance can be retrieved in the following way:```ibmcloud login --apikey API_KEY -a https://cloud.ibm.comibmcloud resource service-instance WML_INSTANCE_NAME```In result, get the value of `location` from the output. **Tip**: Your `Cloud API key` can be generated by going to the [**Users** section of the Cloud console](https://cloud.ibm.com/iam/users). From that page, click your name, scroll down to the **API Keys** section, and click **Create an IBM Cloud API key**. Give your key a name and click **Create**, then copy the created key and paste it below. You can also get a service specific url by going to the [**Endpoint URLs** section of the Watson Machine Learning docs](https://cloud.ibm.com/apidocs/machine-learning). You can check your instance location in your Watson Machine Learning (WML) Service instance details.You can also get service specific apikey by going to the [**Service IDs** section of the Cloud Console](https://cloud.ibm.com/iam/serviceids). From that page, click **Create**, then copy the created key and paste it below.**Action**: Enter your `api_key` and `location` in the following cell.
api_key = 'PASTE YOUR PLATFORM API KEY HERE' location = 'PASTE YOUR INSTANCE LOCATION HERE' wml_credentials = { "apikey": api_key, "url": 'https://' + location + '.ml.cloud.ibm.com' }
_____no_output_____
Apache-2.0
cloud/notebooks/python_sdk/deployments/spark/cars-4-you/Use Spark to recommend mitigation for car rental company.ipynb
muthukumarbala07/watson-machine-learning-samples
Install and import the `ibm-watson-machine-learning` package**Note:** `ibm-watson-machine-learning` documentation can be found here.
!pip install -U ibm-watson-machine-learning from ibm_watson_machine_learning import APIClient client = APIClient(wml_credentials)
_____no_output_____
Apache-2.0
cloud/notebooks/python_sdk/deployments/spark/cars-4-you/Use Spark to recommend mitigation for car rental company.ipynb
muthukumarbala07/watson-machine-learning-samples
Working with spacesFirst of all, you need to create a space that will be used for your work. If you do not have space already created, you can use [Deployment Spaces Dashboard](https://dataplatform.cloud.ibm.com/ml-runtime/spaces?context=cpdaas) to create one.- Click New Deployment Space- Create an empty space- Select Cloud Object Storage- Select Watson Machine Learning instance and press Create- Copy `space_id` and paste it below**Tip**: You can also use SDK to prepare the space for your work. More information can be found [here](https://github.com/IBM/watson-machine-learning-samples/blob/master/cloud/notebooks/python_sdk/instance-management/Space%20management.ipynb).**Action**: Assign space ID below
space_id = 'PASTE YOUR SPACE ID HERE'
_____no_output_____
Apache-2.0
cloud/notebooks/python_sdk/deployments/spark/cars-4-you/Use Spark to recommend mitigation for car rental company.ipynb
muthukumarbala07/watson-machine-learning-samples
You can use `list` method to print all existing spaces.
client.spaces.list(limit=10)
_____no_output_____
Apache-2.0
cloud/notebooks/python_sdk/deployments/spark/cars-4-you/Use Spark to recommend mitigation for car rental company.ipynb
muthukumarbala07/watson-machine-learning-samples
To be able to interact with all resources available in Watson Machine Learning, you need to set **space** which you will be using.
client.set.default_space(space_id)
_____no_output_____
Apache-2.0
cloud/notebooks/python_sdk/deployments/spark/cars-4-you/Use Spark to recommend mitigation for car rental company.ipynb
muthukumarbala07/watson-machine-learning-samples
**Note**: Please restart the kernel (Kernel -> Restart) Test Spark
try: from pyspark.sql import SparkSession except: print('Error: Spark runtime is missing. If you are using Watson Studio change the notebook runtime to Spark.') raise
_____no_output_____
Apache-2.0
cloud/notebooks/python_sdk/deployments/spark/cars-4-you/Use Spark to recommend mitigation for car rental company.ipynb
muthukumarbala07/watson-machine-learning-samples
2. Load and explore data In this section you will load the data as an Apache Spark DataFrame and perform a basic exploration. Read data into Spark DataFrame from DB2 database and show sample record. Load data
import os from wget import download sample_dir = 'spark_sample_model' if not os.path.isdir(sample_dir): os.mkdir(sample_dir) filename = os.path.join(sample_dir, 'car_rental_training_data.csv') if not os.path.isfile(filename): filename = download('https://github.com/IBM/watson-machine-learning-samples/raw/master/cloud/data/cars-4-you/car_rental_training_data.csv', out=sample_dir) spark = SparkSession.builder.getOrCreate() df_data = spark.read\ .format('org.apache.spark.sql.execution.datasources.csv.CSVFileFormat')\ .option('header', 'true')\ .option('inferSchema', 'true')\ .option("delimiter", ";")\ .load(filename) df_data.take(3)
_____no_output_____
Apache-2.0
cloud/notebooks/python_sdk/deployments/spark/cars-4-you/Use Spark to recommend mitigation for car rental company.ipynb
muthukumarbala07/watson-machine-learning-samples
Explore data
df_data.printSchema()
_____no_output_____
Apache-2.0
cloud/notebooks/python_sdk/deployments/spark/cars-4-you/Use Spark to recommend mitigation for car rental company.ipynb
muthukumarbala07/watson-machine-learning-samples
As you can see, the data contains eleven fields. `Action` field is the one you would like to predict using feedback data in `Customer_Service` field.
print("Number of records: " + str(df_data.count()))
_____no_output_____
Apache-2.0
cloud/notebooks/python_sdk/deployments/spark/cars-4-you/Use Spark to recommend mitigation for car rental company.ipynb
muthukumarbala07/watson-machine-learning-samples
As you can see, the data set contains 243 records.
df_data.select('Business_area').groupBy('Business_area').count().show() df_data.select('Action').groupBy('Action').count().show(truncate=False)
_____no_output_____
Apache-2.0
cloud/notebooks/python_sdk/deployments/spark/cars-4-you/Use Spark to recommend mitigation for car rental company.ipynb
muthukumarbala07/watson-machine-learning-samples
3. Create an Apache Spark machine learning modelIn this section you will learn how to:- [3.1 Prepare data for training a model](prep)- [3.2 Create an Apache Spark machine learning pipeline](pipe)- [3.3 Train a model](train) 3.1 Prepare data for training a modelIn this subsection you will split your data into: train and test data set.
train_data, test_data = df_data.randomSplit([0.8, 0.2], 24) print("Number of training records: " + str(train_data.count())) print("Number of testing records : " + str(test_data.count()))
_____no_output_____
Apache-2.0
cloud/notebooks/python_sdk/deployments/spark/cars-4-you/Use Spark to recommend mitigation for car rental company.ipynb
muthukumarbala07/watson-machine-learning-samples
3.2 Create the pipeline In this section you will create an Apache Spark machine learning pipeline and then train the model.
from pyspark.ml.feature import OneHotEncoder, StringIndexer, IndexToString, VectorAssembler, HashingTF, IDF, Tokenizer from pyspark.ml.classification import DecisionTreeClassifier from pyspark.ml.evaluation import MulticlassClassificationEvaluator from pyspark.ml import Pipeline, Model
_____no_output_____
Apache-2.0
cloud/notebooks/python_sdk/deployments/spark/cars-4-you/Use Spark to recommend mitigation for car rental company.ipynb
muthukumarbala07/watson-machine-learning-samples
In the following step, use the StringIndexer transformer to convert all the string fields to numeric ones.
string_indexer_gender = StringIndexer(inputCol="Gender", outputCol="gender_ix") string_indexer_customer_status = StringIndexer(inputCol="Customer_Status", outputCol="customer_status_ix") string_indexer_status = StringIndexer(inputCol="Status", outputCol="status_ix") string_indexer_owner = StringIndexer(inputCol="Car_Owner", outputCol="owner_ix") string_business_area = StringIndexer(inputCol="Business_Area", outputCol="area_ix") assembler = VectorAssembler(inputCols=["gender_ix", "customer_status_ix", "status_ix", "owner_ix", "area_ix", "Children", "Age", "Satisfaction"], outputCol="features") string_indexer_action = StringIndexer(inputCol="Action", outputCol="label").fit(df_data) label_action_converter = IndexToString(inputCol="prediction", outputCol="predictedLabel", labels=string_indexer_action.labels) dt_action = DecisionTreeClassifier() pipeline_action = Pipeline(stages=[string_indexer_gender, string_indexer_customer_status, string_indexer_status, string_indexer_action, string_indexer_owner, string_business_area, assembler, dt_action, label_action_converter]) model_action = pipeline_action.fit(train_data) predictions_action = model_action.transform(test_data) predictions_action.select('Business_Area','Action','probability','predictedLabel').show(2) evaluator = MulticlassClassificationEvaluator(labelCol="label", predictionCol="prediction", metricName="accuracy") accuracy = evaluator.evaluate(predictions_action) print("Accuracy = %g" % accuracy)
_____no_output_____
Apache-2.0
cloud/notebooks/python_sdk/deployments/spark/cars-4-you/Use Spark to recommend mitigation for car rental company.ipynb
muthukumarbala07/watson-machine-learning-samples
4. Persist model In this section you will learn how to store your pipeline and model in Watson Machine Learning repository by using python client libraries. **Note**: Apache® Spark 2.4 is required. Save training data in your Cloud Object Storage ibm-cos-sdk library allows Python developers to manage Cloud Object Storage (COS).
import ibm_boto3 from ibm_botocore.client import Config
_____no_output_____
Apache-2.0
cloud/notebooks/python_sdk/deployments/spark/cars-4-you/Use Spark to recommend mitigation for car rental company.ipynb
muthukumarbala07/watson-machine-learning-samples
**Action**: Put credentials from Object Storage Service in Bluemix here.
cos_credentials = { "apikey": "***", "cos_hmac_keys": { "access_key_id": "***", "secret_access_key": "***" }, "endpoints": "***", "iam_apikey_description": "***", "iam_apikey_name": "***", "iam_role_crn": "***", "iam_serviceid_crn": "***", "resource_instance_id": "***" } connection_apikey = cos_credentials['apikey'] connection_resource_instance_id = cos_credentials["resource_instance_id"] connection_access_key_id = cos_credentials['cos_hmac_keys']['access_key_id'] connection_secret_access_key = cos_credentials['cos_hmac_keys']['secret_access_key']
_____no_output_____
Apache-2.0
cloud/notebooks/python_sdk/deployments/spark/cars-4-you/Use Spark to recommend mitigation for car rental company.ipynb
muthukumarbala07/watson-machine-learning-samples
**Action**: Define the service endpoint we will use. **Tip**: You can find this information in Endpoints section of your Cloud Object Storage intance's dashbord.
service_endpoint = 'https://s3.us.cloud-object-storage.appdomain.cloud'
_____no_output_____
Apache-2.0
cloud/notebooks/python_sdk/deployments/spark/cars-4-you/Use Spark to recommend mitigation for car rental company.ipynb
muthukumarbala07/watson-machine-learning-samples
You also need IBM Cloud authorization endpoint to be able to create COS resource object.
auth_endpoint = 'https://iam.cloud.ibm.com/identity/token'
_____no_output_____
Apache-2.0
cloud/notebooks/python_sdk/deployments/spark/cars-4-you/Use Spark to recommend mitigation for car rental company.ipynb
muthukumarbala07/watson-machine-learning-samples
We create COS resource to be able to write data to Cloud Object Storage.
cos = ibm_boto3.resource('s3', ibm_api_key_id=cos_credentials['apikey'], ibm_service_instance_id=cos_credentials['resource_instance_id'], ibm_auth_endpoint=auth_endpoint, config=Config(signature_version='oauth'), endpoint_url=service_endpoint)
_____no_output_____
Apache-2.0
cloud/notebooks/python_sdk/deployments/spark/cars-4-you/Use Spark to recommend mitigation for car rental company.ipynb
muthukumarbala07/watson-machine-learning-samples
Now you will create bucket in COS and copy `training dataset` for model from **car_rental_training_data.csv**.
from uuid import uuid4 bucket_uid = str(uuid4()) score_filename = "car_rental_training_data.csv" buckets = ["car-rental-" + bucket_uid] for bucket in buckets: if not cos.Bucket(bucket) in cos.buckets.all(): print('Creating bucket "{}"...'.format(bucket)) try: cos.create_bucket(Bucket=bucket) except ibm_boto3.exceptions.ibm_botocore.client.ClientError as e: print('Error: {}.'.format(e.response['Error']['Message'])) bucket_obj = cos.Bucket(buckets[0]) print('Uploading data {}...'.format(score_filename)) with open(filename, 'rb') as f: bucket_obj.upload_fileobj(f, score_filename) print('{} is uploaded.'.format(score_filename))
_____no_output_____
Apache-2.0
cloud/notebooks/python_sdk/deployments/spark/cars-4-you/Use Spark to recommend mitigation for car rental company.ipynb
muthukumarbala07/watson-machine-learning-samples
Create connections to a COS bucket
datasource_type = client.connections.get_datasource_type_uid_by_name('bluemixcloudobjectstorage') conn_meta_props= { client.connections.ConfigurationMetaNames.NAME: "COS connection - spark", client.connections.ConfigurationMetaNames.DATASOURCE_TYPE: datasource_type, client.connections.ConfigurationMetaNames.PROPERTIES: { 'bucket': buckets[0], 'access_key': connection_access_key_id, 'secret_key': connection_secret_access_key, 'iam_url': auth_endpoint, 'url': service_endpoint } } conn_details = client.connections.create(meta_props=conn_meta_props)
_____no_output_____
Apache-2.0
cloud/notebooks/python_sdk/deployments/spark/cars-4-you/Use Spark to recommend mitigation for car rental company.ipynb
muthukumarbala07/watson-machine-learning-samples
**Note**: The above connection can be initialized alternatively with `api_key` and `resource_instance_id`. The above cell can be replaced with:```conn_meta_props= { client.connections.ConfigurationMetaNames.NAME: f"Connection to Database - {db_name} ", client.connections.ConfigurationMetaNames.DATASOURCE_TYPE: client.connections.get_datasource_type_uid_by_name(db_name), client.connections.ConfigurationMetaNames.DESCRIPTION: "Connection to external Database", client.connections.ConfigurationMetaNames.PROPERTIES: { 'bucket': bucket_name, 'api_key': cos_credentials['apikey'], 'resource_instance_id': cos_credentials['resource_instance_id'], 'iam_url': 'https://iam.cloud.ibm.com/identity/token', 'url': 'https://s3.us.cloud-object-storage.appdomain.cloud' }}conn_details = client.connections.create(meta_props=conn_meta_props)```
connection_id = client.connections.get_uid(conn_details)
_____no_output_____
Apache-2.0
cloud/notebooks/python_sdk/deployments/spark/cars-4-you/Use Spark to recommend mitigation for car rental company.ipynb
muthukumarbala07/watson-machine-learning-samples
4.2 Save the pipeline and model
training_data_references = [ { "id":"car-rental-training", "type": "connection_asset", "connection": { "id": connection_id }, "location": { "bucket": buckets[0], "file_name": score_filename, } } ] saved_model = client.repository.store_model( model=model_action, meta_props={ client.repository.ModelMetaNames.NAME:"CARS4U - Action Recommendation Model", client.repository.ModelMetaNames.TYPE: "mllib_2.4", client.repository.ModelMetaNames.SOFTWARE_SPEC_UID: client.software_specifications.get_id_by_name('spark-mllib_2.4'), client.repository.ModelMetaNames.TRAINING_DATA_REFERENCES: training_data_references, client.repository.ModelMetaNames.LABEL_FIELD: "Action", }, training_data=train_data, pipeline=pipeline_action)
_____no_output_____
Apache-2.0
cloud/notebooks/python_sdk/deployments/spark/cars-4-you/Use Spark to recommend mitigation for car rental company.ipynb
muthukumarbala07/watson-machine-learning-samples
Get saved model metadata from Watson Machine Learning.
published_model_id = client.repository.get_model_uid(saved_model) print("Model Id: " + str(published_model_id))
_____no_output_____
Apache-2.0
cloud/notebooks/python_sdk/deployments/spark/cars-4-you/Use Spark to recommend mitigation for car rental company.ipynb
muthukumarbala07/watson-machine-learning-samples
**Model Id** can be used to retrive latest model version from Watson Machine Learning instance. Below you can see stored model details.
client.repository.get_model_details(published_model_id)
_____no_output_____
Apache-2.0
cloud/notebooks/python_sdk/deployments/spark/cars-4-you/Use Spark to recommend mitigation for car rental company.ipynb
muthukumarbala07/watson-machine-learning-samples
5. Deploy model in the IBM Cloud You can use following command to create online deployment in cloud.
deployment_details = client.deployments.create( published_model_id, meta_props={ client.deployments.ConfigurationMetaNames.NAME: "CARS4U - Action Recommendation model deployment", client.deployments.ConfigurationMetaNames.ONLINE: {} } ) deployment_details
_____no_output_____
Apache-2.0
cloud/notebooks/python_sdk/deployments/spark/cars-4-you/Use Spark to recommend mitigation for car rental company.ipynb
muthukumarbala07/watson-machine-learning-samples
6. Score
fields = ['ID', 'Gender', 'Status', 'Children', 'Age', 'Customer_Status','Car_Owner', 'Customer_Service', 'Business_Area', 'Satisfaction'] values = [3785, 'Male', 'S', 1, 17, 'Inactive', 'Yes', 'The car should have been brought to us instead of us trying to find it in the lot.', 'Product: Information', 0] import json payload_scoring = {"input_data": [{"fields": fields,"values": [values]}]} scoring_response = client.deployments.score(client.deployments.get_id(deployment_details), payload_scoring) print(json.dumps(scoring_response, indent=3))
_____no_output_____
Apache-2.0
cloud/notebooks/python_sdk/deployments/spark/cars-4-you/Use Spark to recommend mitigation for car rental company.ipynb
muthukumarbala07/watson-machine-learning-samples
Check If All Class 1 Bad Pixels Are Indeed Just Noisy Pixels---
quirks_store[classes_store == 1].shape fig = figure()#figsize=(6,6)) ax = fig.add_subplot(111) # ax.plot([nan,nan]) corrections = [] for cnow in np.where(classes_store == 1)[0]: # ax.lines.pop() ax.clear() ax.plot(quirks_store[cnow] - median(quirks_store[cnow])) ax.set_title('Entry:' + str(cnow) + '/ Class:' + str(classes_store[cnow])) fig.canvas.draw() display.display(plt.gcf()) display.clear_output(wait=True) # checkClass = input('Is this a Noisy Pixel? '); # if checkClass != '': # corrections.append([cnow, checkClass]) # for cnow in np.where(classes_store == 1)[0]: # plt.plot(quirks_store[cnow]) # display.clear_output(wait=True) # display.display(plt.gcf()) # checkClass = input('Is this a Noisy Pixel? ');print(checkClass) # plt.clf() # display.clear_output(wait=True)
_____no_output_____
BSD-3-Clause
notebooks/RTN Classification - Active.ipynb
exowanderer/BadPixelDetector
Check If All Class 4 Bad Pixels Are Indeed Just CR Pixels---
quirks_store[classes_store == 4].shape
_____no_output_____
BSD-3-Clause
notebooks/RTN Classification - Active.ipynb
exowanderer/BadPixelDetector
fig = figure()figsize=(6,6))ax = fig.add_subplot(111)CRs = np.where(classes_store == 4)[0]corrections = []for cnow in : ax.lines.pop() ax = fig.add_subplot(111) ax.plot((quirks_store[cnow] - min(quirks_store[cnow])) / (max(quirks_store[cnow]) - min(quirks_store[cnow])), lw=2) ax.set_title('Entry:' + str(cnow) + '/ Class:' + str(classes_store[cnow])) ax.annotate(str(cnow), [110, 0.5], fontsize=50) fig.canvas.draw() display.display(plt.gcf()) time.sleep(.05) display.clear_output(wait=True) ax.lines.pop() ax.texts.pop() if cnow > 500 and cnow < 1000: display.display(plt.clf()) ax = fig.add_subplot(111) checkClass = input('Is this a Cosmic Ray? '); if checkClass != '': corrections.append([cnow, checkClass]) for cnow in np.where(classes_store == 1)[0]: plt.plot(quirks_store[cnow]) display.clear_output(wait=True) display.display(plt.gcf()) checkClass = input('Is this a Noisy Pixel? ');print(checkClass) plt.clf() display.clear_output(wait=True) corrections
np.where(classes_store == 6)[0]
_____no_output_____
BSD-3-Clause
notebooks/RTN Classification - Active.ipynb
exowanderer/BadPixelDetector
classes_store[[140,260, 380]] = 2
plot(quirks_store[140]); plot(quirks_store[260]); plot(quirks_store[380]); ((quirks_store.T - np.min(quirks_store,axis=1)) / (np.max(quirks_store,axis=1) - np.min(quirks_store, axis=1))).shape ((quirks_store.T - np.min(quirks_store,axis=1)) / (np.max(quirks_store,axis=1) - np.min(quirks_store, axis=1))).T[classes_store == 4].T.shape np.sum(classes_store == 2) // 100 quirk_store_norm = ((quirks_store.T - np.min(quirks_store,axis=1)) / (np.max(quirks_store,axis=1) - np.min(quirks_store, axis=1))).T classNow = 4 k = 1 stepsize = 100 quirksNow = quirk_store_norm[classes_store == classNow][k*stepsize:(k+1)*stepsize].T quirksNow.shape classes_store_bak = np.copy(classes_store)
_____no_output_____
BSD-3-Clause
notebooks/RTN Classification - Active.ipynb
exowanderer/BadPixelDetector
classNow = 5stepsize = 50fig = figure(figsize=(16,30))for k in range( np.sum(classes_store == classNow) // stepsize): quirksNow = quirk_store_norm[classes_store == classNow][k*stepsize:(k+1)*stepsize] upper = np.where(quirksNow[:,-1] > 0.5)[0] lower = np.where(quirksNow[:,-1] < 0.5)[0] classes_store[classes_store == classNow][lower] = np.ones(len(classes_store[classes_store == classNow][lower]))*6 ax = fig.add_subplot(np.int(np.ceil(np.sum(classes_store == classNow) // stepsize / 2)), 2, k+1) plot(quirksNow[lower].T);
fig = figure(figsize=(16,8)) ax1 = fig.add_subplot(121) ax2 = fig.add_subplot(122) ax1.plot(quirk_store_norm[classes_store == 5].T, lw=1); ylims = ax1.get_ylim() xlims = ax1.get_xlim() xyNow = [np.min(xlims) + 0.5*diff(xlims), np.min(ylims) + 0.5*diff(ylims)] ax1.annotate(str(5), xyNow, fontsize=75) ax2.plot(quirk_store_norm[classes_store == 6].T, lw=1); ylims = ax2.get_ylim() xlims = ax2.get_xlim() xyNow = [np.min(xlims) + 0.5*diff(xlims), np.min(ylims) + 0.5*diff(ylims)] ax2.annotate(str(6), xyNow, fontsize=75)
_____no_output_____
BSD-3-Clause
notebooks/RTN Classification - Active.ipynb
exowanderer/BadPixelDetector
classes_store_new = np.copy(classes_store)classes_store_new[(classes_store == 5)*(quirk_store_norm[:,-1] < 0.5)] = 6 classes_store_new[(classes_store == 5)*(quirk_store_norm[:,-1] >= 0.5)] = classes_store[(classes_store == 5)*(quirk_store_norm[:,-1] >= 0.5)]classes_store_new[classes_store_new == 6]np.savetxt('myclasses_new_FINAL_for_cnaw_mask_CV3_dark_frames.txt', classes_store_new.astype(int), fmt='%d')
darks.shape darks_trnspsd = np.transpose(darks, axes=(1,2,0)) for irow in range(len(quirks_store)): quirk_pp = pp.scale(quirks_store[irow]) # print(std(quirk_pp), scale.mad(quirk_pp)) plot(quirk_pp, alpha=0.5)# - median(darks_trnspsd[icol,irow]))) # darks_scaled = pp.scale(darks,axis=0) darks.shape, darks_trnspsd.shape darks_reshaped = darks_trnspsd.reshape(darks_trnspsd.shape[0]*darks_trnspsd.shape[1], darks_trnspsd.shape[2]) darks_reshaped.shape icol,irow = np.random.randint(0,2048,2) pp.scale(darks_trnspsd[icol,irow] / median(darks_trnspsd[icol,irow])) darks_norm = darks / median(darks, axis=0) darks_std = std(darks_norm, axis=0) darks_std.shape darks_med_std = median(darks_std) darks_flat = [] for irow in range(darks_reshaped.shape[0]): limit_check = std(darks_reshaped[irow] / median(darks_reshaped[irow])-1) < 2*darks_med_std # print(limit_check, std(darks_reshaped[irow] / median(darks_reshaped[irow])), darks_med_std) if limit_check: darks_flat.append(darks_reshaped[irow]) nNormals = len(darks_flat) nNormals darks_flat = np.array(darks_flat) darks_flat.shape
_____no_output_____
BSD-3-Clause
notebooks/RTN Classification - Active.ipynb
exowanderer/BadPixelDetector
darks_flat = darks_trnspsd[darks_std < 2*darks_med_std]nNormals = len(darks_flat)nNormals
darks_norm_trnspsd = np.transpose(darks_norm, axes=(1,2,0)) darks_norm_flat = darks_norm_trnspsd[darks_std < 2*darks_med_std] darks_norm_flat.shape darks_norm_flat.shape[0]
_____no_output_____
BSD-3-Clause
notebooks/RTN Classification - Active.ipynb
exowanderer/BadPixelDetector
Simulate RTNs because the CV3 training data has None---
np.random.seed(42) saturation = 2**16 dynRange = 2**9 nSamps = 1000 nSig = 4.0 nFrames = darks_norm_flat.shape[1] rtn_syn = np.zeros((nSamps, nFrames)) rtn_classes = np.zeros(nSamps) maxRTNs = np.int(0.9*nFrames) maxWidth = 50 minWidth = 10 rtnCnt = 0 dark_inds = np.arange(darks_reshaped.shape[0]) frame_inds= np.arange(darks_reshaped.shape[1]) for irow in np.random.choice(dark_inds,nSamps,replace=False): rtn_syn[rtnCnt] = np.copy(darks_reshaped[irow]) if darks_reshaped[irow].std() > 50: print(darks_reshaped[irow].std()) nRTNs = np.random.randint(maxRTNs) coinflip = np.random.randint(0, 2) sign_rand = np.random.choice([-1,1]) minJump = nSig*std(rtn_syn[rtnCnt] - median(rtn_syn[rtnCnt])) jump = abs(np.random.normal(minJump,dynRange) - minJump) + minJump if coinflip: rtn_classes[rtnCnt] = 0 RTN_locs = np.random.choice(frame_inds, nRTNs, replace=False) for iRTN in RTN_locs: rtn_syn[rtnCnt][iRTN] += sign_rand*jump else: randWidth = np.random.randint(minWidth, maxWidth + 1) randStart = np.random.randint(minWidth, nFrames - randWidth - minWidth + 1) rtn_syn[rtnCnt][randStart:randStart+randWidth] += sign_rand*jump rtn_classes[rtnCnt] = 1 rtn_syn[rtnCnt][rtn_syn[rtnCnt] > saturation] = saturation # if not rtnCnt % 100: plot(rtn_syn[rtnCnt] - median(rtn_syn[rtnCnt])) rtnCnt = rtnCnt + 1 xlim(-1,110); # ylim(-100,100); darks_flat_med_axis0 = np.median(darks_flat,axis=0) darks_flat_med_axis1 = np.median(darks_flat,axis=1) darks_flat_std_axis0 = np.std(darks_flat,axis=0) darks_flat_std_axis1 = np.std(darks_flat,axis=1) darks_flat_med_axis0_norm = darks_flat_med_axis0[1:] / median(darks_flat_med_axis0[1:]) classLabels = {1:'Noisy', 2:'HP', 3:'IHP', 4:'LHP', 5:'SHP', 6:'CR', 7:'RTN0', 8:'RTN1'} for k in range(1,9): print(k, classLabels[k]) def kde_sklearn(x, x_grid, bandwidth=0.2, **kwargs): """Kernel Density Estimation with Scikit-learn""" kde_skl = KernelDensity(bandwidth=bandwidth, **kwargs) kde_skl.fit(x[:, np.newaxis]) # score_samples() returns the log-likelihood of the samples log_pdf = kde_skl.score_samples(x_grid[:, np.newaxis]) return np.exp(log_pdf) flags_loc[:,0] # x0, y0 = flags_loc[2187] fig = figure(figsize=(16,6)) ax1 = fig.add_subplot(121) ax2 = fig.add_subplot(122) ax1.clear() rtnNow = 3 leave1out = np.zeros(rtn_syn[rtnNow].size) for k in range(rtn_syn[rtnNow].size): leave1out[k] = np.std(hstack([rtn_syn[rtnNow][:k],rtn_syn[rtnNow][k+1:]])) testRand1 = np.random.normal(rtn_syn[rtnNow].mean(), 0.1*rtn_syn[rtnNow].std(), rtn_syn[rtnNow].size) testRand2 = np.random.normal(rtn_syn[rtnNow].mean(), rtn_syn[rtnNow].std(), rtn_syn[rtnNow].size) leave1out1 = np.zeros(rtn_syn[rtnNow].size) leave1out2 = np.zeros(rtn_syn[rtnNow].size) for k in range(rtn_syn[rtnNow].size): leave1out1[k] = np.std(hstack([testRand1[:k],testRand1[k+1:]])) leave1out2[k] = np.std(hstack([testRand2[:k],testRand2[k+1:]])) l1o_diffRTN = np.std(rtn_syn[rtnNow]) - leave1out l1o_diffTR1 = np.std(testRand1) - leave1out1 l1o_diffTR2 = np.std(testRand2) - leave1out2 ax1.hist(pp.scale(darks_flat_med_axis0), bins=20, normed=True, alpha=0.25, label='DarksMed Rescaled') kde2 = sm.nonparametric.KDEUnivariate(pp.scale(darks_flat_med_axis0)) kde2.fit(kernel='uni', fft=False) ax1.plot(kde2.support, kde2.density, lw=2, color=rcParams['axes.color_cycle'][1]) ax1.hist((l1o_diffRTN - l1o_diffRTN.mean()) / l1o_diffRTN.std(), bins=20, label='Leave 1 Out Std RTN', normed=True,alpha=0.5); ax1.hist((l1o_diffTR1 - l1o_diffTR1.mean()) / l1o_diffTR1.std(), bins=20, label='Leave 1 Out rand 1', normed=True,alpha=0.5); ax1.hist((l1o_diffTR2 - l1o_diffTR2.mean()) / l1o_diffTR2.std(), bins=20, label='Leave 1 Out rand 2', normed=True,alpha=0.5); ax2.hist((rtn_syn[rtnNow] - rtn_syn[rtnNow].mean()) / rtn_syn[rtnNow].std(), bins=20, label='RTN Now', normed=True,alpha=0.5); ax2.hist((testRand1 - testRand1.mean()) / testRand1.std(), bins=20, label='testRand1', normed=True,alpha=0.5); ax2.hist((testRand2 - testRand2.mean()) / testRand2.std() , bins=20, label='testRand2', normed=True,alpha=0.5); ax1.legend(loc=0) ax2.legend(loc=0) fig.canvas.draw() # display.display(plt.gcf()) # display.clear_output(wait=True) # x0, y0 = flags_loc[2187] fig = figure() ax = fig.add_subplot(111) rtnNow = 1 for rtnNow in range(len(rtn_syn)): ax.clear() leave1out = np.zeros(rtn_syn[rtnNow].size) for k in range(rtn_syn[rtnNow].size): leave1out[k] = np.std(hstack([rtn_syn[rtnNow][:k],rtn_syn[rtnNow][k+1:]])) ax.plot(rescale(np.std(rtn_syn[rtnNow]) - leave1out), label='Leave 1 Out Std'); ax.plot(rescale(rtn_syn[rtnNow])+1, label='RTN Now'); ax.legend(loc=0) fig.canvas.draw() display.display(plt.gcf()) display.clear_output(wait=True) x0, y0 = flags_loc[2808]#413, 176 print(x0, y0) meandark = np.mean([darks[:,x0+1, y0+0]*std(darks[:,x0+1, y0+0]),darks[:,x0-1, y0+0]*std(darks[:,x0-1, y0+0]), \ darks[:,x0+0, y0+1]*std(darks[:,x0+0, y0+1]),darks[:,x0+0, y0-1]*std(darks[:,x0+0, y0-1])], axis=0) # 160 335 # 159 335 # 161 335 # 160 334 # 160 336 # meandark = np.mean([darks[:,159, 335],darks[:,161, 335], darks[:,160, 334],darks[:,160, 336]],axis=0) # meddark = np.median([darks[:,159, 335]*std(darks[:,159, 335]),darks[:,161, 335]*std(darks[:,161, 335]), \ # darks[:,160, 334]*std(darks[:,160, 334]),darks[:,160, 336]*std(darks[:,160, 336])],axis=0) # meandark = np.mean([darks[:,159, 335]*std(darks[:,159, 335]),darks[:,161, 335]*std(darks[:,161, 335]), \ # darks[:,160, 334]*std(darks[:,160, 334]),darks[:,160, 336]*std(darks[:,160, 336])], axis=0) fig = figure(figsize=(12,6)) plot((darks[:,x0, y0] - np.min(darks[:,x0, y0])), lw=4) # plot((meandark / np.min(meandark)), lw=4) # plot((meddark - meddark.min()) / (meddark.max() - meddark.min() ), lw=4) plot((darks[:,x0+1, y0+0]) - np.min((darks[:,x0+1, y0+0]))) plot((darks[:,x0-1, y0+0]) - np.min((darks[:,x0-1, y0+0]))) plot((darks[:,x0+0, y0+1]) - np.min((darks[:,x0+0, y0+1]))) plot((darks[:,x0+0, y0-1]) - np.min((darks[:,x0+0, y0-1]))) xlim(-1,110) x0, y0 = flags_loc[2808]#413, 176 print(x0, y0) meandark = np.mean([darks[:,x0+1, y0+0]*std(darks[:,x0+1, y0+0]),darks[:,x0-1, y0+0]*std(darks[:,x0-1, y0+0]), \ darks[:,x0+0, y0+1]*std(darks[:,x0+0, y0+1]),darks[:,x0+0, y0-1]*std(darks[:,x0+0, y0-1])], axis=0) # 160 335 # 159 335 # 161 335 # 160 334 # 160 336 # meandark = np.mean([darks[:,159, 335],darks[:,161, 335], darks[:,160, 334],darks[:,160, 336]],axis=0) # meddark = np.median([darks[:,159, 335]*std(darks[:,159, 335]),darks[:,161, 335]*std(darks[:,161, 335]), \ # darks[:,160, 334]*std(darks[:,160, 334]),darks[:,160, 336]*std(darks[:,160, 336])],axis=0) # meandark = np.mean([darks[:,159, 335]*std(darks[:,159, 335]),darks[:,161, 335]*std(darks[:,161, 335]), \ # darks[:,160, 334]*std(darks[:,160, 334]),darks[:,160, 336]*std(darks[:,160, 336])], axis=0) fig = figure(figsize=(12,12)) # plot(rescale(darks[:,x0, y0]), lw=4) # plot(rescale(meandark), lw=4) # plot((meddark - meddark.min()) / (meddark.max() - meddark.min() ), lw=4) axvline(argmax(diff(rescale(darks[:,x0, y0])))+1,lw=4) plot(rescale(darks[:,x0+1, y0+0])) plot(rescale(darks[:,x0-1, y0+0])) plot(rescale(darks[:,x0+0, y0+1])) plot(rescale(darks[:,x0+0, y0-1])) def rescale(arr): return (arr - arr.min()) / (arr.max() - arr.min()) # quirkCheck = np.zeros(len(quirks_store)) np.savetxt('quirkCheck_save_bkup.txt', quirkCheck) quirkCheck[np.where(quirkCheck ==0)[0].min()-1] = 0.0 fig = figure(figsize=(15,15)) ax1 = fig.add_subplot(221) ax2 = fig.add_subplot(222) ax3 = fig.add_subplot(223) ax4 = fig.add_subplot(224) # fig = figure(figsize=(15,5)) # ax1 = fig.add_subplot(141) # ax2 = fig.add_subplot(142) # ax3 = fig.add_subplot(143) # ax4 = fig.add_subplot(144) darkMed = darks_flat_med_axis0 - np.min(darks_flat_med_axis0) darksMed_scaled = darks_flat_med_axis0 / median(darks_flat_med_axis0)# pp.scale(darks_flat_med_axis0) diff_darks_flat_med_axis0 = np.zeros(darks_flat_med_axis0.size) diff_darks_flat_med_axis0[1:] = diff(darks_flat_med_axis0) classLabels = {1:'Noisy', 2:'HP', 3:'IHP', 4:'LHP', 5:'SHP', 6:'CR', 7:'RTN0', 8:'RTN1'} np.savetxt('quirkCheck_save_bkup.txt', quirkCheck) for iQuirk, quirkNow in enumerate(quirks_store): if quirkCheck[iQuirk]: continue ax1.clear() ax2.clear() ax3.clear() ax4.clear() classNow = classes_store[iQuirk] classOut = classNow if classNow == 3: classOut = 7 if classNow == 4: classOut = 6 if classNow == 5: classOut = 5 if classNow == 6: classOut = 3 # ax1.plot(darks_reshaped[irow][1:] / median(darks_reshaped[irow][1:]) - darks_flat_med_axis0_norm); # Plot Subtraction frame: (Darknow - min(Darknow)) - (DarkMed - min(DarkMed)) # darkNowMinusMed = (darks_reshaped[irow][1:] - np.min(darks_reshaped[irow][1:])) - \ # (darks_flat_med_axis0[1:] - np.min(darks_flat_med_axis0[1:])) quirkNow_scaled = pp.scale(quirkNow) quirkMinusMed = (quirkNow_scaled - np.min(quirkNow_scaled)) - (darksMed_scaled - np.min(darksMed_scaled)) quirkNowRescaled = pp.scale((quirkNow - np.min(quirkNow))-(darks_flat_med_axis0 - np.min(darks_flat_med_axis0))) _, xhist = np.histogram(quirkNowRescaled, bins=20, normed=True)#, alpha=0.50) kde1 = sm.nonparametric.KDEUnivariate(quirkNowRescaled) kde1.fit(kernel='uni', bw=0.33*median(diff(xhist)), fft=False) ax1.plot(kde1.support, rescale(kde1.density), lw=2, color=rcParams['axes.color_cycle'][0], label='QuirkNow Rescaled') # if classNow == 1: #ax1.hist(pp.scale(darks_flat_med_axis0), bins=20, normed=True, alpha=0.25, label='DarksMed Rescaled') kde2 = sm.nonparametric.KDEUnivariate(pp.scale(darks_flat_med_axis0)) kde2.fit(kernel='uni', fft=False) ax1.plot(kde2.support, rescale(kde2.density), lw=2, color=rcParams['axes.color_cycle'][1], label='DarksMed Rescaled') leave1out = np.zeros(nFrames) for k in range(nFrames): leave1out[k] = np.std(hstack([quirkNow[:k],quirkNow[k+1:]])) #ax1.hist(pp.scale(np.std(quirkNow) - leave1out), bins=20, alpha=0.5, normed=True, label='Leave1Out Rescaled'); kde3 = sm.nonparametric.KDEUnivariate(pp.scale(np.std(quirkNow) - leave1out)) kde3.fit(kernel='uni', fft=False) ax1.plot(kde3.support, rescale(kde3.density), lw=2, color=rcParams['axes.color_cycle'][2], label='Leave1Out Rescaled') ax1.legend() # else: # ax1.hist((darksAvg - np.median(darksAvg)), bins=20, normed=True, alpha=0.25) # kde2 = sm.nonparametric.KDEUnivariate((darksAvg - np.median(darksAvg))) # kde2.fit(kernel='uni', fft=False) # ax1.plot(kde2.support, kde2.density, lw=2, color=rcParams['axes.color_cycle'][1]) ylims = ax1.get_ylim() xlims = ax1.get_xlim() xyNow1 = [np.min(xlims) + 0.1*diff(xlims), np.min(ylims) + 0.9*diff(ylims)] ax1.annotate(str(classOut) + ': ' + classLabels[classOut], xyNow1, fontsize=75) # ax1.plot() # ax1.axvline(median(rtnNow), linestyle='--', color='k') ax1.set_xlabel('Subtraction Hist') # ax1.plot(darks_reshaped[irow][1:] / median(darks_reshaped[irow][1:]) / darks_flat_med_axis0_norm - 1); # Plot Normalized Frame: DarkNow vs DarMed ax2.plot((quirkNow - np.min(quirkNow))/darksMed_scaled,'o-'); ylims = ax2.get_ylim() xlims = ax2.get_xlim() xyNow2 = [np. min(xlims) + 0.1*diff(xlims), np.min(ylims) + 0.9*diff(ylims)] ax2.annotate(str(classOut) + ': ' + classLabels[classOut], xyNow2, fontsize=75) #ax2.plot(darksMed_scaled,'o-') ax2.set_xlabel('Normalized Frame') # Plot Common Mode Correlation Frame # ax3.plot((quirkNow - np.min(quirkNow))-(darks_flat_med_axis0 - np.min(darks_flat_med_axis0)), darksMed_scaled,'o') # ax3.plot(darksMed_scaled, darksMed_scaled,'o') ax3.plot(rescale(diff(quirkNow)),'o-') ax3.plot(rescale(diff_darks_flat_med_axis0), 'o-', alpha=0.25) ax3.axhline(np.median(rescale(diff_darks_flat_med_axis0)), c='k', lw=1) ax3.axhline(np.median(rescale(diff_darks_flat_med_axis0))+np.std(rescale(diff_darks_flat_med_axis0)), c='k', lw=1,ls='--') ax3.axhline(np.median(rescale(diff_darks_flat_med_axis0))-np.std(rescale(diff_darks_flat_med_axis0)), c='k', lw=1,ls='--') ylims = ax3.get_ylim() xlims = ax3.get_xlim() xyNow3 = [np.min(xlims) + 0.1*diff(xlims), np.min(ylims) + 0.9*diff(ylims)] ax3.annotate(str(classOut) + ': ' + classLabels[classOut], xyNow3, fontsize=75) ax3.set_xlabel('Diff Mode') # Plot Raw DN minus Min Dark Ramp: DarkNow - min(DarkNow) vs DarkMed - min(DarkMed) flagNow = flags_loc[iQuirk] dark0 = quirkNow - np.min(quirkNow) # ax4.plot(rescale((dark0 + diff_darks_flat_med_axis0)),'o-', color=rcParams['axes.color_cycle'][0]) ax4.plot(rescale((dark0 - diff_darks_flat_med_axis0)),'o-', color=rcParams['axes.color_cycle'][0]) avgCnt = 0 darksAvg = np.zeros(quirkNow.size) if flagNow[0] > 0: avgCnt += 1 darksAvg += (darks[:,flagNow[0]-1, flagNow[1]+0] - diff_darks_flat_med_axis0) * std(darks[:,flagNow[0]-1, flagNow[1]+0]) # ax4.plot(rescale(darks[:,flagNow[0]-1, flagNow[1]+0]),'o-') if flagNow[0] + 1 < darks.shape[1]: avgCnt += 1 darksAvg += (darks[:,flagNow[0]+1, flagNow[1]+0] - diff_darks_flat_med_axis0) * std(darks[:,flagNow[0]+1, flagNow[1]+0]) # ax4.plot(rescale(darks[:,flagNow[0]+1, flagNow[1]+0]),'o-') if flagNow[1] > 0: avgCnt += 1 darksAvg += (darks[:,flagNow[0]+0, flagNow[1]-1] - diff_darks_flat_med_axis0) * std(darks[:,flagNow[0]+0, flagNow[1]-1]) # ax4.plot(rescale(darks[:,flagNow[0]+0, flagNow[1]-1]),'o-') if flagNow[1] + 1 < darks.shape[1]: avgCnt += 1 darksAvg += (darks[:,flagNow[0]+0, flagNow[1]+1] - diff_darks_flat_med_axis0) * std(darks[:,flagNow[0]+0, flagNow[1]+1]) # ax4.plot(rescale(darks[:,flagNow[0]+0, flagNow[1]+1]),'o-',lw=4) darksAvg = darksAvg / avgCnt ax4.plot(rescale(darksAvg), 'o-', color=rcParams['axes.color_cycle'][3]) # ax2.plot((darksAvg - np.min(darksAvg))/darksMed_scaled,'o-'); ylims = ax4.get_ylim() xlims = ax4.get_xlim() xyNow4 = [np.min(xlims) + 0.1*diff(xlims), np.min(ylims) + 0.9*diff(ylims)] ax4.annotate(str(classOut) + ': ' + classLabels[classOut], xyNow4, fontsize=75) ax4.set_xlabel('Rescaled Nearby Pixels ' + str(flagNow[0]) + ',' + str(flagNow[1])) # ax4.set_ylim(-5,5) # ax.plot(darks_flat_med_axis0[1:] / median(darks_flat_med_axis0[1:])) fig.suptitle('iQuirk: ' + str(iQuirk) + ' / ' + str(len(quirks_store)), fontsize=20) # ax1.set_ylim(ax2.get_ylim()) fig.canvas.draw() display.display(plt.gcf()) inputNow = input('[1:Noisy, 2:HP, 3:IHP, 4:LHP, 5:SHP, 6:CR, 7:RTN0, 8:RTN1]? ') # inputNowBak = np.copy(inputNow) quirkCheck[iQuirk] = int(classOut) if inputNow == '': pass else: classOut = int(inputNow) doubleCheck = input(str(classNow) + " -> " + str(classOut) + "? ") #doubleCheck != '' and {'y':True, 'n':False}[doubleCheck.lower()[0]]: if doubleCheck.lower()[0] == 'y': print('Changed '+ str(classNow) + ': ' + classLabels[classNow] + " to " + str(classOut) + ': ' + classLabels[classOut] + "!") quirkCheck[iQuirk] = int(classNow) display.clear_output(wait=True) np.sum(quirkCheck == 0) fig = figure(figsize=(20,5)) ax1 = fig.add_subplot(141) ax2 = fig.add_subplot(142) ax3 = fig.add_subplot(143) ax4 = fig.add_subplot(144) darksMed_scaled = darks_flat_med_axis0 / median(darks_flat_med_axis0)# pp.scale(darks_flat_med_axis0) rtnCheck = [] for iRTN, rtnNow in enumerate(rtn_syn): ax1.clear() ax2.clear() ax3.clear() ax4.clear() # ax1.plot(darks_reshaped[irow][1:] / median(darks_reshaped[irow][1:]) - darks_flat_med_axis0_norm); # Plot Subtraction frame: (Darknow - min(Darknow)) - (DarkMed - min(DarkMed)) # darkNowMinusMed = (darks_reshaped[irow][1:] - np.min(darks_reshaped[irow][1:])) - \ # (darks_flat_med_axis0[1:] - np.min(darks_flat_med_axis0[1:])) rtnNow_scaled = pp.scale(rtnNow_scaled) rtnMinusMed = (rtnNow_scaled - np.min(rtnNow_scaled)) - (darksMed_scaled - np.min(darksMed_scaled)) ax1.hist((rtnNow - np.min(rtnNow))-(darks_flat_med_axis0 - np.min(darks_flat_med_axis0)), bins=20, normed=True) kde1 = sm.nonparametric.KDEUnivariate((rtnNow - np.min(rtnNow))-(darks_flat_med_axis0 - np.min(darks_flat_med_axis0))) # kde2 = sm.nonparametric.KDEUnivariate(rtnNow) kde1.fit() # kde2.fit() ax1.plot(kde1.support, kde1.density) # ax1.plot() # ax1.axvline(median(rtnNow), linestyle='--', color='k') ax1.set_title('Subtraction Hist') # ax1.plot(darks_reshaped[irow][1:] / median(darks_reshaped[irow][1:]) / darks_flat_med_axis0_norm - 1); # Plot Normalized Frame: DarkNow vs DarMed ax2.plot((rtnNow - np.min(rtnNow))/darksMed_scaled,'o-'); #ax2.plot(darksMed_scaled,'o-') ax2.set_title('Normalized Frame') # Plot Common Mode Correlation Frame ax3.plot((rtnNow - np.min(rtnNow))-(darks_flat_med_axis0 - np.min(darks_flat_med_axis0)), darksMed_scaled,'o') # ax3.plot(darksMed_scaled, darksMed_scaled,'o') ax3.set_title('Common Mode') # Plot Raw DN minus Min Dark Ramp: DarkNow - min(DarkNow) vs DarkMed - min(DarkMed) # ax4.plot(rtnNow - np.min(rtnNow),'o-') ax4.plot((rtnNow - np.min(rtnNow)),'o-') ax4.plot((darks_flat_med_axis0 - np.min(darks_flat_med_axis0)),'o-') ax4.set_title('Raw DN - Min') # ax.plot(darks_flat_med_axis0[1:] / median(darks_flat_med_axis0[1:])) fig.suptitle('Row:' + str(irow) + ' iRTN: ' + str(iRTN) + ' / ' + str(len(rtn_syn))) # ax1.set_ylim(ax2.get_ylim()) fig.canvas.draw() display.display(plt.gcf()) display.clear_output(wait=True) rtnCheck.append(input('[1:Noisy, 2:HP, 3:IHP, 4:LHP, 5:SHP, 6:CR, 7:RTN0, 8:RTN1]? ')) np.random.seed(42) nFlatDarks = 5000 df_inds = np.arange(darks_flat.shape[0]) df_sample = np.random.choice(df_inds, nFlatDarks, replace=False) darks_flat_sample = np.copy(darks_flat[df_sample]) plot(darks_flat_sample.T - median(darks_flat_sample,axis=1)); darks_flat_sample.shape for k in range(darks_flat_sample.shape[0]): if (darks_flat_sample[k] - median(darks_flat_sample[k]) > 200).any(): print(k) std(abs(darks_flat_sample[850] / median(darks_flat_sample[850]))-1), 2*darks_med_std darks_flat_sample0 = darks_flat_sample.copy() darks_flat_sample = vstack([darks_flat_sample0[:850], darks_flat_sample0[851:]])#.shape plot(np.mean(darks_flat,axis=0)) plot(np.median(darks_flat,axis=0)) np.median(darks_flat,axis=0).shape darks_flat_std_axis1.shape darks_flat_med_axis0.shape,darks_flat_med_axis1.shape darks_flat_sample_med1 = np.median(darks_flat_sample,axis=1) print(darks_flat_med_axis0.shape, darks_flat_sample_med1.shape) darks_flat_sample.shape plot(pp.scale(darks_flat_sample).T - pp.scale(darks_flat_med_axis0));
_____no_output_____
BSD-3-Clause
notebooks/RTN Classification - Active.ipynb
exowanderer/BadPixelDetector
Remove Common Mode variations from frame to frame (time series)---Probably related to bias drifting
plot(darks_flat_med_axis0) quirks_store_smooth = np.copy(quirks_store) #/ darks_flat_med_axis0 rtn_syn_smooth = np.copy(rtn_syn) #/ darks_flat_med_axis0 darks_flat_sample_smooth = np.copy(darks_flat_sample) #/ darks_flat_med_axis0 print(quirks_store_smooth.shape, rtn_syn_smooth.shape, darks_flat_sample_smooth.shape) plot(((quirks_store_smooth.T - np.min(quirks_store_smooth,axis=1)) / (np.max(quirks_store_smooth,axis=1) - np.min(quirks_store_smooth,axis=1)))); plot(((rtn_syn_smooth.T - np.min(rtn_syn_smooth,axis=1)) / (np.max(rtn_syn_smooth,axis=1) - np.min(rtn_syn_smooth,axis=1)))); quirksNoisy = quirks_store_smooth[classes_store==1] plot(((quirksNoisy.T - np.min(quirksNoisy,axis=1)) / (np.max(quirksNoisy,axis=1) - np.min(quirksNoisy,axis=1)))); plot(((darks_flat_sample_smooth.T - np.min(darks_flat_sample_smooth,axis=1)) / (np.max(darks_flat_sample_smooth,axis=1) - np.min(darks_flat_sample_smooth,axis=1))));
_____no_output_____
BSD-3-Clause
notebooks/RTN Classification - Active.ipynb
exowanderer/BadPixelDetector
Random Forest Classification--- Load Sci-kit Learn Libraries
from sklearn.ensemble import RandomForestClassifier from sklearn.utils import shuffle from sklearn.cross_validation import train_test_split from sklearn.externals import joblib
_____no_output_____
BSD-3-Clause
notebooks/RTN Classification - Active.ipynb
exowanderer/BadPixelDetector
darks_classes = np.zeros(darks_flat_sample_smooth.shape[0],dtype=int)rtn_classes = rtn_classes + 3samples_train_set = vstack([quirks_store_smooth, rtn_syn_smooth, darks_flat_sample_smooth])classes_train_set = vstack([classes_store[:,None], rtn_classes[:,None], darks_classes[:,None]])[:,0]
classes_store[np.where(classes_store > 3)] += 1 darks_classes = np.zeros(darks_flat_sample_smooth.shape[0],dtype=int) rtn_classes = rtn_classes + 3 samples_train_set = vstack([quirks_store, rtn_syn, darks_flat_sample]) classes_train_set = vstack([classes_store[:,None], rtn_classes[:,None], darks_classes[:,None]])[:,0] samples_train_set.shape
_____no_output_____
BSD-3-Clause
notebooks/RTN Classification - Active.ipynb
exowanderer/BadPixelDetector
sts_inds = np.arange(samples_train_set.shape[0])unsort_sts = np.random.choice(sts_inds, sts_inds.size, replace=False)
samples_train_set_resort = shuffle(np.copy(samples_train_set), random_state=42) classes_train_resort = shuffle(np.copy(classes_train_set), random_state=42)
_____no_output_____
BSD-3-Clause
notebooks/RTN Classification - Active.ipynb
exowanderer/BadPixelDetector
Rescaled all samples from 0 to 1
samples_train_set_resort.shape samples_train_set_resort_scaled = (( samples_train_set_resort.T - np.min(samples_train_set_resort,axis=1)) / \ (np.max(samples_train_set_resort,axis=1) - np.min(samples_train_set_resort,axis=1))).T samples_train_set_resort_scaled.shape plot(samples_train_set_resort_scaled.T);
_____no_output_____
BSD-3-Clause
notebooks/RTN Classification - Active.ipynb
exowanderer/BadPixelDetector
Establish Random Forest Classification- 1000 trees- OOB Score- Multiprocessing
rfc = RandomForestClassifier(n_estimators=1000, oob_score=True, n_jobs=-1, random_state=42, verbose=True)
_____no_output_____
BSD-3-Clause
notebooks/RTN Classification - Active.ipynb
exowanderer/BadPixelDetector
rfc2 = RandomForestClassifier(n_estimators=1000, oob_score=True, n_jobs=-1, random_state=42, verbose=True)
Split Samples into 75% Train and 25% Test
_____no_output_____
BSD-3-Clause
notebooks/RTN Classification - Active.ipynb
exowanderer/BadPixelDetector
X_train, X_test, Y_train, Y_test = train_test_split(samples_train_set_resort_scaled.T, classes_train_resort, test_size = 0.25, random_state=42) X_train.shape, X_test.shape, Y_train.shape, Y_test.shape
Shuffle Training Data Set
_____no_output_____
BSD-3-Clause
notebooks/RTN Classification - Active.ipynb
exowanderer/BadPixelDetector
X_train, Y_train = shuffle(X_train, Y_train, random_state=42)
Train Classifier with `rfc.fit`
_____no_output_____
BSD-3-Clause
notebooks/RTN Classification - Active.ipynb
exowanderer/BadPixelDetector
rfc.fit(X_train, Y_train)
rfc.fit(samples_train_set_resort_scaled, classes_train_resort)
_____no_output_____
BSD-3-Clause
notebooks/RTN Classification - Active.ipynb
exowanderer/BadPixelDetector
Score Classifier with Test Data Score
rfc.score(samples_train_set_resort_scaled, classes_train_resort)
_____no_output_____
BSD-3-Clause
notebooks/RTN Classification - Active.ipynb
exowanderer/BadPixelDetector
Score Classifier with Out-of-Bag Error
rfc.oob_score_
_____no_output_____
BSD-3-Clause
notebooks/RTN Classification - Active.ipynb
exowanderer/BadPixelDetector
Save Random Forest Classifier becuse 98% is AWESOME!
joblib.dump(rfc, 'trained_RF_Classifier/random_forest_classifier_trained_on_resorted_samples_train_set_RTN_CR_HP_Other_Norm.save') joblib.dump(dict(samples=samples_train_set_resort_scaled.T, classes=classes_train_resort), 'trained_RF_Classifier/RTN_CR_HP_Other_Norm_resorted_samples_train_set.save')
_____no_output_____
BSD-3-Clause
notebooks/RTN Classification - Active.ipynb
exowanderer/BadPixelDetector
joblib.dump(rfc2, 'trained_RF_Classifier/random_forest_classifier_trained_full_set_on_resorted_samples_train_set_RTN_CR_HP_Other_Norm.save')
darks_reshaped.shape step = 0 skipsize = 100 chunkNow = arange(step*darks_reshaped.shape[0]//skipsize,min((step+1)*darks_reshaped.shape[0]//skipsize, darks_reshaped.shape[0])) darks_reshaped_chunk = darks_reshaped[chunkNow] darks_reshaped_chunk_smooth = darks_reshaped_chunk #/ darks_flat_med_axis0 darks_reshaped_chunk_scaled = ((darks_reshaped_chunk_smooth.T - np.min(darks_reshaped_chunk_smooth,axis=1)) / \ ((np.max(darks_reshaped_chunk_smooth,axis=1) - np.min(darks_reshaped_chunk_smooth,axis=1)))).T samples_train_set_resort_scaled.shape, darks_reshaped_chunk_smooth.shape plot((darks_reshaped_chunk_scaled[::100]).T); rfc_pred = np.zeros(darks_reshaped.shape[0]) rfc2_pred= np.zeros(darks_reshaped.shape[0]) step = 0 skipsize = 50 gapSize = rfc_pred.size//skipsize start = time() for step in range(skipsize): chunkNow = arange(step*gapSize,min((step+1)*gapSize, rfc_pred.size)) print(chunkNow.min(), chunkNow.max(), end=" ") # # darks_reshaped_k_scaled = ((darks_reshaped[chunkNow].T - np.min(darks_reshaped[chunkNow],axis=1)) / \ # ((np.max(darks_reshaped[chunkNow],axis=1) - np.min(darks_reshaped[chunkNow],axis=1)))).T # darks_reshaped_chunk = darks_reshaped[chunkNow] # darks_reshaped_chunk_smooth = darks_reshaped_chunk #/ darks_flat_med_axis0 darks_reshaped_chunk_scaled = ((darks_reshaped[chunkNow].T - np.min(darks_reshaped[chunkNow],axis=1)) / \ ((np.max(darks_reshaped[chunkNow],axis=1) - np.min(darks_reshaped[chunkNow],axis=1)))).T # rfc_pred[chunkNow] = rfc.predict(darks_reshaped_chunk_scaled) badPix = rfc_pred != 0.0 numBad = badPix.sum() percentBad = str(numBad / rfc_pred.size * 100)[:5] + '%' print(percentBad, rfc_pred[badPix][::numBad//10]) print('Operation Took ' + str(time() - start) + ' seconds') print(str((rfc_pred != 0.0).sum() / rfc_pred.size * 100)[:5] + '%') for k in range(6): print(k,sum(rfc_pred == k)) rfc_pred_train = rfc.predict(samples_train_set_resort_scaled) 1-np.abs(rfc_pred_train - classes_train_resort).sum() / classes_train_resort.size, rfc.score(samples_train_set_resort_scaled, classes_train_resort) fig = figure()#figsize=(6,6)) ax = fig.add_subplot(111) # ax.plot([nan,nan]) class3Check = [] for iRTN, irow in enumerate(np.where(rfc_pred == 3)[0]): # ax.lines.pop() ax.clear() ax.plot(darks_reshaped[irow][1:]); ax.set_title('Row:' + str(irow) + ' iRTN:' + str(iRTN) + ' / ' + str(sum(rfc_pred == 3 ))) fig.canvas.draw() display.display(plt.gcf()) display.clear_output(wait=True) class3Check.append(input('Could this be an RTN? ')) class3Check_array = np.zeros(len(class3Check), dtype=bool) for k, c3cNow in enumerate(class3Check): if c3cNow.lower() in ['', 'y', 'd', 'yu']: class3Check_array[k] = True elif c3cNow.lower() in ['o']: class3Check_array[k-1] = True elif c3cNow.lower() in ['n']: class3Check_array[k] = False else: print(k, c3cNow) class3Check_array.size, len(class3Check) plot(darks_flat_med_axis0 / median(darks_flat_med_axis0)) darks_flat_med_axis0_norm = darks_flat_med_axis0[1:] / median(darks_flat_med_axis0[1:]) fig = figure(figsize=(20,5)) ax1 = fig.add_subplot(141) ax2 = fig.add_subplot(142) ax3 = fig.add_subplot(143) ax4 = fig.add_subplot(144) # ax.plot([nan,nan]) class1Check = [] for iNoisy, irow in enumerate(np.where(rfc_pred == 1)[0]): ax1.clear() ax2.clear() ax3.clear() ax4.clear() # ax1.plot(darks_reshaped[irow][1:] / median(darks_reshaped[irow][1:]) - darks_flat_med_axis0_norm); # Plot Subtraction frame: (Darknow - min(Darknow)) - (DarkMed - min(DarkMed)) # darkNowMinusMed = (darks_reshaped[irow][1:] - np.min(darks_reshaped[irow][1:])) - \ # (darks_flat_med_axis0[1:] - np.min(darks_flat_med_axis0[1:])) darkNowMinusMed = pp.scale(darks_reshaped[irow][1:]) - pp.scale(darks_flat_med_axis0_norm) ax1.plot(darkNowMinusMed,'o-') ax1.axhline(median(darkNowMinusMed), linestyle='--', color='k') ax1.set_title('Subtraction Frame') # ax1.plot(darks_reshaped[irow][1:] / median(darks_reshaped[irow][1:]) / darks_flat_med_axis0_norm - 1); # Plot Normalized Frame: DarkNow vs DarMed ax2.plot(pp.scale(darks_reshaped[irow][1:]),'o-'); ax2.plot(pp.scale(darks_flat_med_axis0_norm),'o-') ax2.set_title('Normalized Frame') # Plot Common Mode Correlation Frame ax3.plot(darks_reshaped[irow][1:] / median(darks_reshaped[irow][1:]), darks_flat_med_axis0_norm,'o') ax3.set_title('Common Mode') # Plot Raw DN minus Min Dark Ramp: DarkNow - min(DarkNow) vs DarkMed - min(DarkMed) ax4.plot(darks_reshaped[irow][1:] - np.min(darks_reshaped[irow][1:]),'o-') ax4.plot(darks_flat_med_axis0[1:] - np.min(darks_flat_med_axis0[1:]),'o-') ax4.set_title('Raw DN - Min') # ax.plot(darks_flat_med_axis0[1:] / median(darks_flat_med_axis0[1:])) #ax1.set_title('Row:' + str(irow) + ' iNoisy: ' + str(iNoisy) + ' / ' + str(sum(rfc_pred == 1))) fig.suptitle('Row:' + str(irow) + ' iNoisy: ' + str(iNoisy) + ' / ' + str(sum(rfc_pred == 1))) # ax1.set_ylim(ax2.get_ylim()) fig.canvas.draw() display.display(plt.gcf()) display.clear_output(wait=True) class1Check.append(input('Could this be an RTN? ')) fig = figure()#figsize=(6,6)) ax = fig.add_subplot(111) # ax.plot([nan,nan]) rtn_synCheck = [] for iRTN, rtnNow in enumerate(rtn_syn): # ax.lines.pop() ax.clear() ax.plot(rtnNow); ax.set_title('iRTN:' + str(iRTN) + ' / ' + str(len(rtn_syn))) fig.canvas.draw() display.display(plt.gcf()) display.clear_output(wait=True) #class3Check.append(input('Could this be an RTN? '))
_____no_output_____
BSD-3-Clause
notebooks/RTN Classification - Active.ipynb
exowanderer/BadPixelDetector
Getting Geo-coordinates for WSJ CollegesHere we are going to use a couple of Python tools to make a database of the Latitude / Longitude locations for the different schools contained in the report. I'm doing this to compare the speed and accuracy of the included Power BI ArcGIS maps with a hard-coding the coordinates. Our strategy is:- Create a search string using the college name and city.- Use `requests` to query Google Maps API.- Save the database as a new file.First, we read in the WSJ data and create a search string.
geodf.head() import pandas as pd wsj = pd.read_csv('wsj_data.csv') import os if os.path.exists('wsj_locs.csv'): geodf = pd.read_csv('wsj_locs.csv', index_col='loc_string') else: geodf = pd.DataFrame() geodf.index.name = 'loc_string' wsj.head()
_____no_output_____
Apache-2.0
Geocoding_Colleges.ipynb
stkbailey/WSJ_CollegeRankings2018
For each college, we're going to create a search string as if we were looking it up in Google Maps. It's important to include as much information as we have so that the location service doesn't get confused with institutions in other countries, for example.
overwrite_loc_string = None if overwrite_loc_string: wsj['loc_string'] = wsj.apply(lambda s: '{}, {}, USA'.format(s.college, s.city_state), axis=1) wsj.to_csv('wsj_data.csv', encoding='utf-8', index=None) print(wsj.loc_string[0:5]) def getCoords(search_string): '''Takes a search term, queries Google and returns the geocoordinates.''' import requests try: query = search_string.replace(' ', '+') response = requests.get('https://maps.googleapis.com/maps/api/geocode/json?address={}'.format(query)) response_from_google = response.json() address = response_from_google['results'][0]['formatted_address'] latitude = response_from_google['results'][0]['geometry']['location']['lat'] longitude = response_from_google['results'][0]['geometry']['location']['lng'] return pd.Series(name=search_string, \ data={'Address': address, 'Latitude': latitude, 'Longitude': longitude}) except: return pd.Series(name=search_string, data={'Address': None, 'Latitude': None, 'Longitude': None}) for ind, school in wsj.loc_string.iteritems(): if (not school in geodf.index) or (geodf.loc[school, 'Address'] == None): data = getCoords(school) geodf.loc[school] = data print(school, '\n\t\t ', data) geodf.to_csv('wsj_locs.csv', encoding='utf-8')
_____no_output_____
Apache-2.0
Geocoding_Colleges.ipynb
stkbailey/WSJ_CollegeRankings2018
Global Signals in Time Series DataBy Abigail Stevens Problem 1: Timmer and Koenig algorithm The algorithm outlined in Timmer & Koenig 1995 lets you define the shape of your power spectrum (a power law with some slope, a Lorentzian, a sum of a couple Lorentzians and a power law, etc.) then generate the random phases and amplitudes of the Fourier transform to simulate light curves defined by the power spectral shape. This is a great simulation tool to have in your back pocket (or, "maybe useful someday" github repo). Define some basic parameters for the power spectrum and resultant light curve
n_bins = 8192 ## number of total frequency bins in a FT segment; same as number of time bins in the light curve dt = 1./16. # time resolution of the output light curve df = 1. / dt / n_bins
_____no_output_____
MIT
Session9/Day4/workbook_globalsignals.ipynb
rmorgan10/LSSTC-DSFP-Sessions
1a. Make an array of Fourier frequenciesYes you can do this with scipy, but the order of frequencies in a T&K power spectrum is different than what you'd get by default from a standard FFT of a light curve.You want the zero frequency to be in the middle (at index n_bins/2) of the frequency array. The positive frequencies should have two more indices than the negative frequencies, because of the zero frequency and nyquist frequency. You can either do this with `np.arange` or with special options in `fftpack.fftfreq`.
#freq = fftpack.fftfreq(n_bins, d=df) freqs = np.arange(float(-n_bins/2)+1, float(n_bins/2)+1) * df pos_freq = freqs[np.where(freqs >= 0)] ## Positive should have 2 more than negative, ## because of the 0 freq and the nyquist freq neg_freq = freqs[np.where(freqs < 0)] nyquist = pos_freq[-1] len_pos = len(pos_freq)
_____no_output_____
MIT
Session9/Day4/workbook_globalsignals.ipynb
rmorgan10/LSSTC-DSFP-Sessions
1b. Define a Lorentzian function and power law function for the shape of the power spectrum
def lorentzian(v, v_0, gamma): """ Gives a Lorentzian centered on v_0 with a FWHM of gamma """ numerator = gamma / (np.pi * 2.0) denominator = (v - v_0) ** 2 + (1.0/2.0 * gamma) ** 2 L = numerator / denominator return L def powerlaw(v, beta): """Gives a powerlaw of (1/v)^-beta """ pl = np.zeros(len(v)) pl[1:] = v[1:] ** (-beta) pl[0] = np.inf return pl
_____no_output_____
MIT
Session9/Day4/workbook_globalsignals.ipynb
rmorgan10/LSSTC-DSFP-Sessions
Now the T&K algorithm. I've transcribed the 'recipe' section of the T&K95 paper, which you will convert to lines of code. 1c. Choose a power spectrum $S(\nu)$. We will use a sum of one Lorentzians (a QPO with a centroid frequency of 0.5 Hz and a FWHM of 0.01 Hz), and a Poisson-noise power law. The QPO should be 100 times larger amplitude than the power-law.
power_shape = 100 * lorentzian(pos_freq, 0.5, 0.01) + powerlaw(pos_freq, 0)
_____no_output_____
MIT
Session9/Day4/workbook_globalsignals.ipynb
rmorgan10/LSSTC-DSFP-Sessions
1d. For each Fourier frequency $\nu_i$ draw two gaussian-distributed random numbers, multiply them by $$\sqrt{\frac{1}{2}S(\nu_i)}$$ and use the result as the real and imaginary part of the Fourier transform $F$ of the desired data.In the case of an even number of data points, for reason of symmetry $F(\nu_{Nyquist})$ is always real. Thus only one gaussian distributed random number has to be drawn.
from numpy.random import randn np.random.seed(3) rand_r = np.random.standard_normal(len_pos) rand_i = np.random.standard_normal(len_pos-1) rand_i = np.append(rand_i, 0.0) # because the nyquist frequency should only have a real value ## Creating the real and imaginary values from the lists of random numbers and the frequencies r_values = rand_r * np.sqrt(0.5 * power_shape) i_values = rand_i * np.sqrt(0.5 * power_shape) r_values[np.where(pos_freq == 0)] = 0 i_values[np.where(pos_freq == 0)] = 0
_____no_output_____
MIT
Session9/Day4/workbook_globalsignals.ipynb
rmorgan10/LSSTC-DSFP-Sessions
1e. To obtain a real valued time series, choose the Fourier components for the negative frequencies according to $F(-\nu_i)=F*(\nu_i)$ where the asterisk denotes complex conjugation. Append to make one fourier transform array. Check that your T&K fourier transform has length `n_bins`. Again, for this algorithm, the zero Fourier frequency is in the middle of the array, the negative Fourier frequencies are in the first half, and the positive Fourier frequencies are in the second half.
FT_pos = r_values + i_values*1j FT_neg = np.conj(FT_pos[1:-1]) FT_neg = FT_neg[::-1] ## Need to flip direction of the negative frequency FT values so that they match up correctly FT = np.append(FT_pos, FT_neg)
_____no_output_____
MIT
Session9/Day4/workbook_globalsignals.ipynb
rmorgan10/LSSTC-DSFP-Sessions
1f. Obtain the time series by backward Fourier transformation of $F(\nu)$ from the frequency domain to the time domain.Note: I usually use `.real` after an iFFT to get rid of any lingering 1e-10 imaginary factors.
lc = fftpack.ifft(FT).real
_____no_output_____
MIT
Session9/Day4/workbook_globalsignals.ipynb
rmorgan10/LSSTC-DSFP-Sessions
Congratulations! 1g. Plot the power spectrum of your FT (only the positive frequencies) next to the light curve it makes. Remember: $$P(\nu_i)=|F(\nu_i)|^2$$
fig, (ax1, ax2) = plt.subplots(1,2, figsize=(12, 5)) ax1.loglog(pos_freq, np.abs(FT_pos)**2) ax2.plot(np.linspace(0, len(lc), len(lc)), lc) ax2.set_xlim(0, 200) fig.show()
/Users/rmorgan/anaconda3/lib/python3.7/site-packages/matplotlib/figure.py:457: UserWarning: matplotlib is currently using a non-GUI backend, so cannot show the figure "matplotlib is currently using a non-GUI backend, "
MIT
Session9/Day4/workbook_globalsignals.ipynb
rmorgan10/LSSTC-DSFP-Sessions
You'll want to change the x scale of your light curve plot to be like 20 seconds in length, and only use the positive Fourier frequencies when plotting the power spectrum. Yay! 1h. Play around with your new-found simulation powers (haha, it's a pun!) Make more power spectra with different features -- try at least 5 or 6, and plot each of them next to the corresponding light curve. Try red noise, flicker noise, a few broad Lorentzians at lower frequency, multiple QPOs, a delta function, etc. Here are some other functions you can use to define shapes of power spectra. This exercise is to help build your intuition of what a time signal looks like in the Fourier domain and vice-versa.
def gaussian(v, mean, std_dev): """ Gives a Gaussian with a mean of mean and a standard deviation of std_dev FWHM = 2 * np.sqrt(2 * np.log(2))*std_dev """ exp_numerator = -(v - mean)**2 exp_denominator = 2 * std_dev**2 G = np.exp(exp_numerator / exp_denominator) return G def powerlaw_expdecay(v, beta, alpha): """Gives a powerlaw of (1/v)^-beta with an exponential decay e^{-alpha*v} """ pl_exp = np.where(v != 0, (1.0 / v) ** beta * np.exp(-alpha * v), np.inf) return pl_exp def broken_powerlaw(v, v_b, beta_1, beta_2): """Gives two powerlaws, (1/v)^-beta_1 and (1/v)^-beta_2 that cross over at break frequency v_b.""" c = v_b ** (-beta_1 + beta_2) ## scale factor so that they're equal at the break frequency pl_1 = v[np.where(v <= v_b)] ** (-beta_1) pl_2 = c * v[np.where(v > v_b)] ** (-beta_2) pl = np.append(pl_1, pl_2) return pl
_____no_output_____
MIT
Session9/Day4/workbook_globalsignals.ipynb
rmorgan10/LSSTC-DSFP-Sessions
2. More realistic simulation with T&KNow you're able to simulate the power spectrum of a single segment of a light curve. However, as you learned this morning, we usually use multiple (~50+) segments of a light curve, take the power spectrum of each segment, and average them together. 2a. Turn the code from 1d to 1e into a function `make_TK_seg`Make it so that you can give a different random seed to each segment. 2b. Make the Fourier transform for a given power shape (as in Problem 1)Use a Lorentzian QPO + Poisson noise power shape at a centroid frequency of 0.5 Hz and a full width at half maximum (FWHM) of 0.01 Hz. Make the QPO 100 time stronger than the Poisson noise power-law. 2c. Put `make_TK_seg` in a loop to do for 50 segments. Make an array of integers that can be your random gaussian seed for the TK algorithm (otherwise, you run the risk of creating the exact same Fourier transform every time, and that will be boring).Keep a running average of the power spectrum of each segment (like we did this morning in problem 2). 2d. Compute the error on the average powerThe error on the power at index $i$ is$$ \delta P_i = \frac{P_i}{\sqrt{M}} $$where `M` is the number of segments averaged together. 2e. Use the re-binning algorithm described in the morning's workbook to re-bin the power spectrum by a factor of 1.05. Plot the average power spectrumRemember to use log scale for the y-axis and probably the x-axis too!
fig, ax = plt.subplots(1,1, figsize=(8,5)) ax.plot(rb_freq, rb_pow, linewidth=2.0) ax.set_xscale('log') ax.set_yscale('log') ax.set_xlabel(r'Frequency (Hz)', fontproperties=font_prop) ax.tick_params(axis='x', labelsize=16, bottom=True, top=True, labelbottom=True, labeltop=False) ax.tick_params(axis='y', labelsize=16, left=True, right=True, labelleft=True, labelright=False) plt.show()
_____no_output_____
MIT
Session9/Day4/workbook_globalsignals.ipynb
rmorgan10/LSSTC-DSFP-Sessions
2f. Re-do 2b through the plot above but slightly changing the power spectrum shape in each segment. Maybe you change the centroid frequency of the QPO, or the normalizing factors between the two components, or the slope of the power-law. Bonus problems: 1. Use a different definition of the Lorentzian (below) to make a power spectrum. Follow the same procedure. Start off with just one segment. Use the rms as the normalizing factor. 2. Using what you learned about data visualization earlier this week, turn the plots in this notebook (and the QPO one, if you're ambitious) into clear and easy-to-digest, publication-ready plots.
def lorentz_q(v, v_peak, q, rms): """ Form of the Lorentzian function defined in terms of peak frequency v_peak and quality factor q q = v_peak / fwhm with the integrated rms of the QPO as the normalizing factor. e.g. see Pottschmidt et al. 2003, A&A, 407, 1039 for more info """ f_res = v_peak / np.sqrt(1.0+(1.0/(4.0*q**2))) r = rms / np.sqrt(0.5-np.arctan(-2.0*q)/np.pi) lorentz = ((1/np.pi)*2*r**2*q*f_res) / (f_res**2+(4*q**2*(v-f_res)**2)) return lorentz
_____no_output_____
MIT
Session9/Day4/workbook_globalsignals.ipynb
rmorgan10/LSSTC-DSFP-Sessions
oneM2M - Access ControlThis notebook demonstrates how access control to resources can be done in oneM2M.- Create an &lt;ACP> resource with different credentials for a new originator- Create a second &lt;AE> resource with the new access controls policy- Succeed to add a &lt;Container> to the second &lg;AE> resource- Fail to update the second &lt;AE> resource IntitializationThe section does import necessary modules and configurations.
%run init.py
_____no_output_____
BSD-3-Clause
onem2m-05-accesscontrol.ipynb
lovele0107/oneM2M-jupyter
Create an &lt;ACP> ResourceAccess Control Policies are used to associate access control with credentials. They define the rules to for access control to resources. Each &lt;ACP> resource has two sections:- **pv (Privileges)** : The actual privileges defined by this policy.- **pvs (Self-Privileges)** : This defines the privileges necessary to access and control the &lt;ACP> resource itself.Each section has at least the following two parameters:- **acor (accessControlOriginators)** : This list includes the Originator information. The parameter comprises a list of domain, CSE-IDs, AE-IDs, the resource-ID of a &lt;Group> resource that contains &lt;AE> or &lt;remoteCSE> as member or Role-ID.- **acop (accessControlOperations)** : This number represents a bit-field of privileges. The following table shows the mapping:| Value | Interpretation ||-------|----------------|| 1 | CREATE || 2 | RETRIEVE || 4 | UPDATE || 8 | DELETE || 16 | NOTIFY || 32 | DISCOVERY |The following request creates a new &lt;ACP> that allows the originator *abc:xyz* only to send CREATE, RETRIEVE, NOTIFY, DELETE, and DISCOVERY requests to resources that have this &lt;ACP> resource assigned.
CREATE ( # CREATE request url, # Request Headers { 'X-M2M-Origin' : originator, # Set the originator 'X-M2M-RI' : '0', # Request identifier 'Accept' : 'application/json', # Response shall be JSON 'Content-Type' : 'application/json;ty=1' # Content is JSON, and represents an <ACP> resource }, # Request Body ''' { "m2m:acp": { "rn":"Notebook-ACP", "pv": { "acr": [{ "acor": [ "abcxyz" ], "acop": 59 }] }, "pvs": { "acr": [{ "acor": [ "%s" ], "acop": 63 }] } } } ''' % originator )
_____no_output_____
BSD-3-Clause
onem2m-05-accesscontrol.ipynb
lovele0107/oneM2M-jupyter
Create a second &lt;AE> Resource with the new &lt;ACP>We now create a new &lt;AE> resource that uses the just created &lt;ACP>.**This should succeed.**
CREATE ( # CREATE request url, # Request Headers { 'X-M2M-Origin' : 'C', # Set the originator 'X-M2M-RI' : '0', # Request identifier 'Accept' : 'application/json', # Response shall be JSON 'Content-Type' : 'application/json;ty=2' # Content is JSON, and represents an <AE> resource }, # Request Body ''' { "m2m:ae": { "rn": "Notebook-AE_2", "api": "AE", "acpi" : [ "%s/Notebook-ACP" ], "rr": true, "srv": ["3"] } } ''' % basename )
_____no_output_____
BSD-3-Clause
onem2m-05-accesscontrol.ipynb
lovele0107/oneM2M-jupyter
Try to Create &lt;Container> under the second &lt;AE> ResourceWe will update a &lt;Container> resource under the second &lt;AE> resource with the originator of *abc:xyz*. **This should work** since this originator is allowed to send CREATE requests.
CREATE ( # CREATE request url + '/Notebook-AE_2', # Request Headers { 'X-M2M-Origin' : "abcxyz", # Set the originator 'X-M2M-RI' : '0', # Request identifier 'Accept' : 'application/json', # Response shall be JSON 'Content-Type' : 'application/json;ty=3' # Content is JSON, and represents an <Container> resource }, # Request Body ''' { "m2m:cnt": { "rn":"Container", "acpi": [ "%s/Notebook-ACP" ] } } ''' % basename )
_____no_output_____
BSD-3-Clause
onem2m-05-accesscontrol.ipynb
lovele0107/oneM2M-jupyter
Try to Update the second &lt;AE> ResourceNow we try to update the new &lt;AE> resource (add a *lbl* attribute) with the other originator, *abc:xyz*. **This should fail**, since the associated &lt;ACP> doesn't allow UPDATE requests.
UPDATE ( # UPDATE request url + '/Notebook-AE_2', # Request Headers { 'X-M2M-Origin' : 'abcxyz', # Set the originator 'X-M2M-RI' : '0', # Request identifier 'Accept' : 'application/json', # Response shall be JSON 'Content-Type' : 'application/json;ty=2' # Content is JSON, and represents an <AE> resource }, # Request Body { "m2m:ae": { "lbl": [ "test:test" ] } } )
_____no_output_____
BSD-3-Clause
onem2m-05-accesscontrol.ipynb
lovele0107/oneM2M-jupyter
[View in Colaboratory](https://colab.research.google.com/github/JacksonIsaac/colab_notebooks/blob/master/kaggle_tgs_salt_identification.ipynb) Kaggle notebookFor *TGS Salt identification* competition:https://www.kaggle.com/c/tgs-salt-identification-challenge Setup kaggle and download dataset
!pip install kaggle ## Load Kaggle config JSON from googleapiclient.discovery import build import io, os from googleapiclient.http import MediaIoBaseDownload from google.colab import auth auth.authenticate_user() drive_service = build('drive', 'v3') results = drive_service.files().list( q="name = 'kaggle.json'", fields="files(id)").execute() kaggle_api_key = results.get('files', []) filename = "/content/.kaggle/kaggle.json" os.makedirs(os.path.dirname(filename), exist_ok=True) request = drive_service.files().get_media(fileId=kaggle_api_key[0]['id']) fh = io.FileIO(filename, 'wb') downloader = MediaIoBaseDownload(fh, request) done = False while done is False: status, done = downloader.next_chunk() print("Download %d%%." % int(status.progress() * 100)) os.chmod(filename, 600) !mkdir ~/.kaggle !cp /content/.kaggle/kaggle.json ~/.kaggle/kaggle.json !kaggle competitions download -c tgs-salt-identification-challenge !ls !unzip -q train.zip
_____no_output_____
MIT
kaggle_tgs_salt_identification.ipynb
JacksonIsaac/colab_notebooks
Install Dependencies
!pip install -q imageio !pip install -q torch !pip install -q ipywidgets import os import numpy as np import imageio import matplotlib.pyplot as plt import pandas as pd import torch from torch.utils import data
_____no_output_____
MIT
kaggle_tgs_salt_identification.ipynb
JacksonIsaac/colab_notebooks
Create class for input dataset
class TGSSaltDataSet(data.Dataset): def __init__(self, root_path, file_list): self.root_path = root_path self.file_list = file_list def __len__(self): return len(self.file_list) def __getitem__(self, index): file_id = self.file_list[index] # Image folder image_folder = os.path.join(self.root_path, 'images') image_path = os.path.join(image_folder, file_id+ '.png') # Label folder mask_folder = os.path.join(self.root_path, 'masks') mask_path = os.path.join(mask_folder, file_id+ '.png') image = np.array(imageio.imread(image_path), dtype=np.uint8) mask = np.array(imageio.imread(mask_path), dtype=np.uint8) return image, mask
_____no_output_____
MIT
kaggle_tgs_salt_identification.ipynb
JacksonIsaac/colab_notebooks
Load dataset csv
train_mask = pd.read_csv('train.csv') depth = pd.read_csv('depths.csv') train_path = './' file_list = list(train_mask['id'].values) dataset = TGSSaltDataSet(train_path, file_list)
_____no_output_____
MIT
kaggle_tgs_salt_identification.ipynb
JacksonIsaac/colab_notebooks
Visualize dataset
def plot2x2array(image, mask): fig, axs = plt.subplots(1, 2) axs[0].imshow(image) axs[1].imshow(mask) axs[0].grid() axs[1].grid() axs[0].set_title('Image') axs[1].set_title('Mask') for i in range(5): image, mask = dataset[np.random.randint(0, len(dataset))] plot2x2array(image, mask) plt.figure(figsize = (6, 6)) plt.hist(depth['z'], bins = 50) plt.title('Depth distribution')
_____no_output_____
MIT
kaggle_tgs_salt_identification.ipynb
JacksonIsaac/colab_notebooks
Convert RLE Mask to matrix
def rle_to_mask(rle_string, height, width): rows, cols = height, width try: rle_numbers = [int(numstr) for numstr in rle_string.split(' ')] rle_pairs = np.array(rle_numbers).reshape(-1, 2) img = np.zeros(rows * cols, dtype=np.uint8) for idx, length in rle_pairs: idx -= 1 img[idx:idx+length] = 255 img = img.reshape(cols, rows) img = img.T except: img = np.zeros((cols, rows)) return img def salt_proportion(img_array): try: unique, counts = np.unique(img_array, return_counts=True) return counts[1]/10201. except: return 0.0
_____no_output_____
MIT
kaggle_tgs_salt_identification.ipynb
JacksonIsaac/colab_notebooks
Create training mask
train_mask['mask'] = train_mask['rle_mask'].apply(lambda x: rle_to_mask(x, 101, 101)) train_mask['salt_proportion'] = train_mask['mask'].apply(lambda x: salt_proportion(x))
_____no_output_____
MIT
kaggle_tgs_salt_identification.ipynb
JacksonIsaac/colab_notebooks