code
stringlengths
38
801k
repo_path
stringlengths
6
263
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + import Bank as bank class Main: bank = bank.Bank() runing = True bank.load() while(runing): print(("Fiktiv Bank av <NAME>\n" "-----------------------------\n" "Skriv in det korrelerande numret för den funktion du vill använda.\n\n" "\t1: Print a list of every customer.\n" "\t2: Add a new customer.\n" "\t3: Change a customer's name.\n" "\t4: Remove an existing customer.\n" "\t5: Create a new account for am existing customer.\n" "\t6: Remove an account.\n" "\t7: Deposit to an account.\n" "\t8: Withdraw from an account.\n" "\t9: Print a list of every transaction for a specified account.\n" "\t10: Exit.\n")) in_str = input() if(in_str == "1"): customer_list = bank.get_customers() for customer in customer_list: print(customer[0] + " " + str(customer[1])) print() elif(in_str == "2"): runing2 = True print("What's the name of the new customer?") name = input() print() while(runing2): print("What's the personal number of the new customer? (YYYYMMDD)") pnr = input() print() runing2 = not bank.add_customer(name, int(pnr)) if(runing2): print("A customer in this bank has the personal number: " + pnr + ". Try again with a diffrent personal number.") print("Added customer: " + name + " with the personal number: " + str(pnr) + ".\n") elif(in_str == "3"): runing3 = True print("What's the new name of the customer?") name = input() print() while(runing3): print("What's the customer's personal number? (YYYYMMDD)") pnr = input() print() runing3 = not bank.change_customer_name(name, int(pnr)) if(runing3): print("No customer in this bank has the personal number: " + pnr + ". Try again with a diffrent personal number.") elif(in_str == "4"): print("What's the personal number of the customer? (YYYYMMDD)") pnr = input() print() result = bank.remove_customer(int(pnr)) if(result == -1): print("No customer in this bank has the personal number: " + pnr + ". Try again with a diffrent personal number.") else: print("removed customer: ") print(str(result[0]) + ", " + str(result[1]) + ", with the account('s): ") for acc in result[2]: print(str(acc[0]) + ", " + acc[1] + " with " + str(acc[2]) + "kr") print("Total amount paid back: " + str(result[3])) elif(in_str == "5"): print("What's the personal number of the customer? (YYYYMMDD)") pnr = input() print() result = bank.add_account(int(pnr)) if(result == -1): print("No customer in this bank has the personal number: " + pnr + ". Try again with a diffrent personal number.") else: print("created a new account with the id: " + str(result) + "\n") elif(in_str == "6"): print("What's the personal number of the customer? (YYYYMMDD)") pnr = input() result = bank.get_customer(int(pnr)) if(result == -1): print("No customer in this bank has the personal number: " + pnr + ". Try again with a diffrent personal number.") else: runing6 = True while(runing6): print("These account's are accessible to this customer: ") for acc in result[2]: print(str(acc[0]) + ", " + acc[1] + " with " + str(acc[2]) + "kr") print("\nWrite the id of the account you wan't to remove.") acc_id = input() print() for acc in result[2]: if(int(acc_id) == acc[0]): runing6 = False balance = bank.close_account(int(pnr), int(acc_id)) print("Total amount paid back: " + str(balance) + "\n") break if(runing6): print("Invalid id, Try again with an id") elif(in_str == "7"): print("What's the personal number of the customer? (YYYYMMDD)") pnr = input() result = bank.get_customer(int(pnr)) if(result == -1): print("No customer in this bank has the personal number: " + pnr + ". Try again with a diffrent personal number.") else: runing7 = True while(runing7): print("These account's are accessible to this customer: ") for acc in result[2]: print(str(acc[0]) + ", " + acc[1] + " with " + str(acc[2]) + "kr") print("\nWrite the id of the account you wan't to deposit to.") acc_id = input() print() for acc in result[2]: if(int(acc_id) == acc[0]): runing7 = False print("What amount do you wan't to deposit?") amount = input() print() bank.deposit(int(pnr), int(acc_id), float(amount)) print("deposited: " + amount + "\n") break if(runing7): print("Invalid id, Try again with an id") elif(in_str == "8"): print("What's the personal number of the customer? (YYYYMMDD)") pnr = input() result = bank.get_customer(int(pnr)) if(result == -1): print("No customer in this bank has the personal number: " + pnr + ". Try again with a diffrent personal number.") else: runing8 = True while(runing8): print("These account's are accessible to this customer: ") for acc in result[2]: print(str(acc[0]) + ", " + acc[1] + " with " + str(acc[2]) + "kr") print("\nWrite the id of the account you wan't to withdraw from.") acc_id = input() print() for acc in result[2]: if(int(acc_id) == acc[0]): runing8 = False print("What amount do you wan't to withdraw?") amount = input() print() bank.withdraw(int(pnr), int(acc_id), float(amount)) print("withdrawed: " + amount + "\n") break if(runing8): print("Invalid id, Try again with an id") elif(in_str == "9"): print("What's the personal number of the customer? (YYYYMMDD)") pnr = input() result = bank.get_customer(int(pnr)) if(result == -1): print("No customer in this bank has the personal number: " + pnr + ". Try again with a diffrent personal number.") else: runing9 = True while(runing9): print("These account's are accessible to this customer: ") for acc in result[2]: print(str(acc[0]) + ", " + acc[1] + " with " + str(acc[2]) + "kr") print("\nWrite the id of the account you wan't to show a list of every transaction.") acc_id = input() print() for acc in result[2]: if(int(acc_id) == acc[0]): runing9 = False transaction_list = bank.get_all_transactions_by_pnr_acc_nr(int(pnr), int(acc_id)) for transaction in transaction_list: print(str(transaction[0]) + ", " + str(transaction[3]) + ", " + str(transaction[4])) print() break if(runing9): print("Invalid id, Try again with an id") elif(in_str == "10"): runing = False else: print("invalid input, Try again with a valid input\n") # -
Bank_uppg/Bank_uppg.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="SJ3haQZWn_pX" # ## Software Engineering Practices # + [markdown] id="7-MK-YrTi0nt" # ### Clean and Modular Code # + [markdown] id="5_vdtOs0i4kj" # * Production code: Software running on production servers to handle live users and data of the intended audience. Note that this is different from production-quality code, which describes code that meets expectations for production in reliability, efficiency, and other aspects. Ideally, all code in production meets these expectations, but this is not always the case. # # * Clean code: Code that is readable, simple, and concise. Clean production-quality code is crucial for collaboration and maintainability in software development. # # * Modular code: Code that is logically broken up into functions and modules. Modular production-quality code that makes your code more organized, efficient, and reusable. # # * Module: A file. Modules allow code to be reused by encapsulating them into files that can be imported into other files. # + [markdown] id="niCLApGGjGji" # #### Refactoring Code # + [markdown] id="c2esu1evjIKl" # * Refactoring: Restructuring your code to improve its internal structure without changing its external functionality. This gives you a chance to clean and modularize your program after you've got it working. # # Since it isn't easy to write your best code while you're still trying to just get it working, allocating time to do this is essential to producing high-quality code. Despite the initial time and effort required, this really pays off by speeding up your development time in the long run. # # You become a much stronger programmer when you're constantly looking to improve your code. The more you refactor, the easier it will be to structure and write good code the first time. # + [markdown] id="PMO9jnd7jWEp" # #### Writing clean code # + [markdown] id="XF8dr3S8jiTO" # Use meaningful names. # # * Be descriptive and imply type: For booleans, you can prefix with is_ or has_ to make it clear it is a condition. You can also use parts of speech to imply types, like using verbs for functions and nouns for variables. # # * Be consistent but clearly differentiate: age_list and age is easier to differentiate than ages and age. # # * Avoid abbreviations and single letters: You can determine when to make these exceptions based on the audience for your code. If you work with other data scientists, certain variables may be common knowledge. While if you work with full stack engineers, it might be necessary to provide more descriptive names in these cases as well. (Exceptions include counters and common math variables.) # Long names aren't the same as descriptive names: You should be descriptive, but only with relevant information. For example, good function names describe what they do well without including details about implementation or highly specific uses. # # Try testing how effective your names are by asking a fellow programmer to guess the purpose of a function or variable based on its name, without looking at your code. Coming up with meaningful names often requires effort to get right. # + [markdown] id="Ni7cg4-ajygK" # Use whitespace properly. # # * Organize your code with consistent indentation: the standard is to use four spaces for each indent. You can make this a default in your text editor. # # * Separate sections with blank lines to keep your code well organized and readable. # # * Try to limit your lines to around 79 characters, which is the guideline given in the PEP 8 style guide. In many good text editors, there is a setting to display a subtle line that indicates where the 79 character limit is. # # For more guidelines, check out the code layout section of PEP 8 in the following notes. # + [markdown] id="gQw__Q-1j_r7" # #### Writing Modular Codes # + [markdown] id="fBqM1nwgkEIZ" # * DRY (Don't Repeat Yourself). Don't repeat yourself! Modularization allows you to reuse parts of your code. Generalize and consolidate repeated code in functions or loops. # # * Abstract out logic to improve readability. Abstracting out code into a function not only makes it less repetitive, but also improves readability with descriptive function names. Although your code can become more readable when you abstract out logic into functions, it is possible to over-engineer this and have way too many modules, so use your judgement. # # * Minimize the number of entities (functions, classes, modules, etc.) There are trade-offs to having function calls instead of inline logic. If you have broken up your code into an unnecessary amount of functions and modules, you'll have to jump around everywhere if you want to view the implementation details for something that may be too small to be worth it. Creating more modules doesn't necessarily result in effective modularization. # # * Functions should do one thing. Each function you write should be focused on doing one thing. If a function is doing multiple things, it becomes more difficult to generalize and reuse. Generally, if there's an "and" in your function name, consider refactoring. # # * Arbitrary variable names can be more effective in certain functions. Arbitrary variable names in general functions can actually make the code more readable. # # * Try to use fewer than three arguments per function # Try to use no more than three arguments when possible. This is not a hard rule and there are times when it is more appropriate to use many parameters. But in many cases, it's more effective to use fewer arguments. Remember we are modularizing to simplify our code and make it more efficient. If your function has a lot of parameters, you may want to rethink how you are splitting this up # + [markdown] id="kvOdMOWUkou-" # ### Efficient Code # + [markdown] id="RLUllllXksG4" # Knowing how to write code that runs efficiently is another essential skill in software development. Optimizing code to be more efficient can mean making it: # # * Execute faster # * Take up less space in memory/storage # # The project on which you're working determines which of these is more important to optimize for your company or product. When you're performing lots of different transformations on large amounts of data, this can make orders of magnitudes of difference in performance. # + [markdown] id="tb74a7dAlCpP" # ### Documentation # + [markdown] id="uhzh5ij_lEK0" # Documentation: Additional text or illustrated information that comes with or is embedded in the code of software Documentation is helpful for clarifying complex parts of code, making your code easier to navigate, and quickly conveying how and why different components of your program are used. # # Several types of documentation can be added at different levels of your program: # # * Inline comments - line level # * Docstrings - module and function level # * Project documentation - project level # + [markdown] id="6l02EtHklP48" # ### Inline Comments # + [markdown] id="43ffduCllR2F" # Inline comments are text following hash symbols throughout your code. They are used to explain parts of your code, and really help future contributors understand your work. # # Comments often document the major steps of complex code. Readers may not have to understand the code to follow what it does if the comments explain it. However, others would argue that this is using comments to justify bad code, and that if code requires comments to follow, it is a sign refactoring is needed. # # Comments are valuable for explaining where code cannot. For example, the history behind why a certain method was implemented a specific way. Sometimes an unconventional or seemingly arbitrary approach may be applied because of some obscure external variable causing side effects. These things are difficult to explain with code. # + [markdown] id="QParStvilWwO" # ### Docstrings # + [markdown] id="7tCuJVsIlikf" # Docstring, or documentation strings, are valuable pieces of documentation that explain the functionality of any function or module in your code. Ideally, each of your functions should always have a docstring. # # Docstrings are surrounded by triple quotes. The first line of the docstring is a brief explanation of the function's purpose. # + id="vfcPsEWvlkyM" def population_density(population, land_area): """Calculate the population density of an area.""" return population / land_area # + id="P8rQrggClmHz" def population_density(population, land_area): """Calculate the population density of an area. Args: population: int. The population of the area land_area: int or float. This function is unit-agnostic, if you pass in values in terms of square km or square miles the function will return a density in those units. Returns: population_density: population/land_area. The population density of a particular area. """ return population / land_area # + [markdown] id="I-DABcqhlphq" # The next element of a docstring is an explanation of the function's arguments. Here, you list the arguments, state their purpose, and state what types the arguments should be. Finally, it is common to provide some description of the output of the function. Every piece of the docstring is optional; however, doc strings are a part of good coding practice. # + [markdown] id="EVqoTDD-lsmV" # ### Project Documentation # + [markdown] id="nDRKXz6clwtV" # Project documentation is essential for getting others to understand why and how your code is relevant to them, whether they are potentials users of your project or developers who may contribute to your code. A great first step in project documentation is your README file. It will often be the first interaction most users will have with your project. # # Whether it's an application or a package, your project should absolutely come with a README file. At a minimum, this should explain what it does, list its dependencies, and provide sufficiently detailed instructions on how to use it. Make it as simple as possible for others to understand the purpose of your project and quickly get something working. # # Translating all your ideas and thoughts formally on paper can be a little difficult, but you'll get better over time, and doing so makes a significant difference in helping others realize the value of your project. Writing this documentation can also help you improve the design of your code, as you're forced to think through your design decisions more thoroughly. It also helps future contributors to follow your original intentions. # + [markdown] id="FK9BvNQQnBiJ" # ### Testing # + [markdown] id="O86evdRAnFTR" # Testing your code is essential before deployment. It helps you catch errors and faulty conclusions before they make any major impact. Today, employers are looking for data scientists with the skills to properly prepare their code for an industry setting, which includes testing their code. # + [markdown] id="1Wycf-BSnJSC" # #### Testing And Data Science # + [markdown] id="gaPJUY6xnL_0" # * Problems that could occur in data science aren’t always easily detectable; you might have values being encoded incorrectly, features being used inappropriately, or unexpected data breaking assumptions. # # * To catch these errors, you have to check for the quality and accuracy of your analysis in addition to the quality of your code. Proper testing is necessary to avoid unexpected surprises and have confidence in your results. # # * Test-driven development (TDD): A development process in which you write tests for tasks before you even write the code to implement those tasks. # # * Unit test: A type of test that covers a “unit” of code—usually a single function—independently from the rest of the program. # + [markdown] id="qqaZjOiun0DP" # #### Unit Test # + [markdown] id="jmPjVm5Hn139" # We want to test our functions in a way that is repeatable and automated. Ideally, we'd run a test program that runs all our unit tests and cleanly lets us know which ones failed and which ones succeeded. Fortunately, there are great tools available in Python that we can use to create effective unit tests! # # The advantage of unit tests is that they are isolated from the rest of your program, and thus, no dependencies are involved. They don't require access to databases, APIs, or other external sources of information. However, passing unit tests isn’t always enough to prove that our program is working successfully. To show that all the parts of our program work with each other properly, communicating and transferring data between them correctly, we use integration tests. In this lesson, we'll focus on unit tests; however, when you start building larger programs, you will want to use integration tests as well. # # To learn more about integration testing and how integration tests relate to unit tests, see Integration Testing. That article contains other very useful links as well. # # * Unit Testing Tools # # To install pytest, run pip install -U pytest in your terminal. You can see more information on getting started here. # # Create a test file starting with test_. # Define unit test functions that start with test_ inside the test file. # # Enter pytest into your terminal in the directory of your test file and it detects these tests for you. # test_ is the default; if you wish to change this, you can learn how in this pytest configuration. # # In the test output, periods represent successful unit tests and Fs represent failed unit tests. Since all you see is which test functions failed, it's wise to have only one assert statement per test. Otherwise, you won't know exactly how many tests failed or which tests failed. # # Your test won't be stopped by failed assert statements, but it will stop if you have syntax errors. # + [markdown] id="DsuzeLodpY9F" # #### Test-driven development and data science # + [markdown] id="ITI-n-9gn96J" # Test-driven development: Writing tests before you write the code that’s being tested. Your test fails at first, and you know you’ve finished implementing a task when the test passes. # # Tests can check for different scenarios and edge cases before you even start to write your function. When start implementing your function, you can run the test to get immediate feedback on whether it works or not as you tweak your function. # # When refactoring or adding to your code, tests help you rest assured that the rest of your code didn't break while you were making those changes. Tests also helps ensure that your function behavior is repeatable, regardless of external parameters such as hardware and time. # # Test-driven development for data science is relatively new and is experiencing a lot of experimentation and breakthroughs. You can learn more about it by exploring the following resources. # + [markdown] id="dUzrPm3mpai0" # https://www.linkedin.com/pulse/data-science-test-driven-development-sam-savage/ # + [markdown] id="kB-5QimopdWR" # http://engineering.pivotal.io/post/test-driven-development-for-data-science/ # + [markdown] id="NeMkdiGWpffq" # http://docs.python-guide.org/en/latest/writing/tests/ # + [markdown] id="Pgdp65FDpk2s" # Logging is valuable for understanding the events that occur while running your program. For example, if you run your model overnight and the results the following morning are not what you expect, log messages can help you understand more about the context in those results occurred. Let's learn about the qualities that make a log message effective. # + [markdown] id="CwaZjv2Fp3Cn" # ### Code Review # + [markdown] id="-8mst8V8p4_x" # #### Best Practices # + [markdown] id="dy522IYBp-Rx" # At my current company, we do a fair amount of code reviews. I had never done one before I started here so it was a new experience for me. I think it’s a good idea to crystalize some of the things I look for when I’m doing code reviews and talk about the best way I’ve found to approach them. # # Briefly, a code review is a discussion between two or more developers about changes to the code to address an issue. Many articles talk about the benefits of code reviews, including knowledge sharing, code quality, and developer growth. I’ve found fewer that talk about what to look for in a review and how to discuss code under review. # + [markdown] id="yYSrqOtXqBWc" # https://www.kevinlondon.com/2015/05/05/code-review-best-practices.html # + [markdown] id="bDik0QaTqJDa" # Questions to ask yourself when conducting a code review # # First, let's look over some of the questions we might ask ourselves while reviewing code. These are drawn from the concepts we've covered in these last two lessons. # # 1. Is the code clean and modular? # # * Can I understand the code easily? # * Does it use meaningful names and whitespace? # * Is there duplicated code? # * Can I provide another layer of abstraction? # * Is each function and module necessary? # * Is each function or module too long? # # # 2. Is the code efficient? # # * Are there loops or other steps I can vectorize? # * Can I use better data structures to optimize any steps? # * Can I shorten the number of calculations needed for any steps? # * Can I use generators or multiprocessing to optimize any steps? # # # 3. Is the documentation effective? # # * Are inline comments concise and meaningful? # * Is there complex code that's missing documentation? # * Do functions use effective docstrings? # * Is the necessary project documentation provided? # # # 4. Is the code well tested? # # * Does the code high test coverage? # * Do tests check for interesting cases? # * Are the tests readable? # * Can the tests be made more efficient? # # # 5. Is the logging effective? # # * Are log messages clear, concise, and professional? # * Do they include all relevant and useful information? # * Do they use the appropriate logging level? # + [markdown] id="9EPzisRXqxrO" # Now that we know what we're looking for, let's go over some tips on how to actually write your code review. When your coworker finishes up some code that they want to merge to the team's code base, they might send it to you for review. You provide feedback and suggestions, and then they may make changes and send it back to you. When you are happy with the code, you approve it and it gets merged to the team's code base. # # As you may have noticed, with code reviews you are now dealing with people, not just computers. So it's important to be thoughtful of their ideas and efforts. You are in a team and there will be differences in preferences. The goal of code review isn't to make all code follow your personal preferences, but to ensure it meets a standard of quality for the whole team. # # * Tip: Use a code linter # # This isn't really a tip for code review, but it can save you lots of time in a code review. Using a Python code linter like pylint can automatically check for coding standards and PEP 8 guidelines for you. It's also a good idea to agree on a style guide as a team to handle disagreements on code style, whether that's an existing style guide or one you create together incrementally as a team. # + [markdown] id="13AFgZxwrH7B" # Rather than commanding people to change their code a specific way because it's better, it will go a long way to explain to them the consequences of the current code and suggest changes to improve it. They will be much more receptive to your feedback if they understand your thought process and are accepting recommendations, rather than following commands. They also may have done it a certain way intentionally, and framing it as a suggestion promotes a constructive discussion, rather than opposition. # + id="XgJ0vH5ErJX-" BAD: Make model evaluation code its own module - too repetitive. BETTER: Make the model evaluation code its own module. This will simplify model s.py to be less repetitive and focus primarily on building models. GOOD: How about we consider making the model evaluation code its own module? This would simplify models.py to only include code for building models. Organizing these evaluations methods into separate functions would also allow us to reuse them with different models without repeating code. # + id="g0FlZmT-rTrA" BAD: I wouldnt groupby genre twice like you did here... Just compute it once and use that for your aggregations. BAD: You create this groupby dataframe twice here. Just compute it once, save it as groupby_genre and then use that to get your average prices and views. GOOD: Can we group by genre at the beginning of the function and then save that as a groupby object? We could then reference that object to get the average prices and views without computing groupby twice. # + [markdown] id="rQ2O3OdUrfk-" # When providing a code review, you can save the author time and make it easy for them to act on your feedback by writing out your code suggestions. This shows you are willing to spend some extra time to review their code and help them out. It can also just be much quicker for you to demonstrate concepts through code rather than explanations. # + id="itun8tKLrjHm" BAD: You can do this all in one step by using the pandas str.split method. GOOD: We can actually simplify this step to the line below using the pandas str.split method. Found this on this stack overflow post: https://stackoverflow.com/questions/14745022/how-to-split-a-column-into-two-columns df['first_name'], df['last_name'] = df['name'].str.split(' ', 1).str # + [markdown] id="aDXZHGCAr6Yw" # ### Introduction to Object-Oriented Programming # + [markdown] id="ddpIp8O3sIZK" # ### Upload a package to PyPi # + [markdown] id="Gc7B1I6msO6V" # ### Deploy a web data dashboard
Software_Engineering_Python_Practices.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/CarlBayking/LinearAlgebra/blob/main/Assignment6_Bayking.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="YMtOp6hHEY0_" # ###Linear Algebra For ECE # + [markdown] id="k9Vu2QNqEevk" # # # # # Now that you have a fundamental knowledge about representing and operating with vectors as well as the fundamental of matrices, we'll try to do the same operations with matrices and even more. # + [markdown] id="T_UkW8gqEluc" # ### Objectives # # at the end of this activity you will be able to: # # # 1. Be familiar with the fundamental matrix operations # 2. Apply the operations to solve intermediate equations # 3. Apply Matrix Algebra in engineering solutions # # # + [markdown] id="-cjiMzq9FIgV" # ### Discussion # + id="wjmJbI7qFMbt" import numpy as np import matplotlib.pyplot as plt # %matplotlib inline # + [markdown] id="6TlPqLMKHbdV" # ### Transposition # # one of the fundamental operations in matrix algebra in Transposition. the transpose of a matrix is done by flipping the values of its elements over its diagonal. with this, the rows and columns from the original matrix will be switched. so for a matrix $A$ is denoted as matrix $A^T$. # For Example: # + [markdown] id="pT8AYTo2Htjr" # $$A = \begin{bmatrix} 1 & 2 & 5\\5 & -1 &0 \\ 0 & -3 & 3\end{bmatrix} $$ # # # # # $$A^T = \begin{bmatrix} 1 & 5 & 0\\2 & -1 & -3 \\ 5 & 0 & 3\end{bmatrix}$$ # # + colab={"base_uri": "https://localhost:8080/"} id="UnegmvoeDw0B" outputId="e6f15ec8-680f-46e4-b5bc-4d83f4105416" A= np.array([ [1, 2, 5], [5, -1, 0], [0, -3, 3] ]) A # + colab={"base_uri": "https://localhost:8080/"} id="kfUQ_kvSJ7OM" outputId="574ac02c-459d-4ac6-9873-05f0d56ebc07" AT1 = np.transpose(A) AT1 # + colab={"base_uri": "https://localhost:8080/"} id="0fzvUmMMKnj8" outputId="99c05b13-00c6-4838-cbfa-dbdf49b30c79" AT2 = A.T AT2 # + colab={"base_uri": "https://localhost:8080/"} id="kI1MmkSxKtO7" outputId="e1bf7052-f1e7-430c-cb43-0f1a4fc912d3" np.array_equiv(AT1, AT2) # + colab={"base_uri": "https://localhost:8080/"} id="rbmQLvw2K062" outputId="0f925c83-bc94-4067-d790-08fb98dffd7c" B = np.array([ [1, 2, 3, 4], [1, 0, 2, 1], ]) B.shape # + colab={"base_uri": "https://localhost:8080/"} id="ErqMecLJLORr" outputId="de9e2ae4-de60-43d0-c36d-9524df526176" np.transpose(B).shape # + colab={"base_uri": "https://localhost:8080/"} id="2rLPeWDdLVAj" outputId="7dbb9524-f202-4fc1-958c-eeba2d01d6f1" B.T.shape # + [markdown] id="pcbFbAM7L8Mi" # ### Try to Create your own matrix (you can try non-square matrix to test transposition) # + colab={"base_uri": "https://localhost:8080/"} id="f9ntMozqLZJm" outputId="14153f2b-8123-4154-9d83-cdfa4d6749bb" Carl= np.array([ [8, 2, 7, 1], [9, 1, 2, 5], [5, 2, 1, 0], ]) Carl.shape # + colab={"base_uri": "https://localhost:8080/"} id="sTvk4cdML05W" outputId="a4ea5a60-a484-4e88-ebae-1cd77b12ad8f" np.transpose(Carl).shape # + colab={"base_uri": "https://localhost:8080/"} id="T9w-K16QL4kr" outputId="397ac0e3-8482-44ca-986c-8173424b2889" Carl.T.shape # + colab={"base_uri": "https://localhost:8080/"} id="QJtSypjPQu9z" outputId="8ef5151d-9427-4f73-ac5e-ea3641598115" CarlT = Carl.T CarlT # + [markdown] id="jHq2_QXROCOm" # ### Dot Product # # -If you recall Dot product from Labratory Activity before, We will try to implement the same operations with matrices. In matrix dot product we are going to get the sum of products of the vectors by row column parts so if we have 2 matrices $X$ and $Y$: # + [markdown] id="HvZ0P2h7SrWs" # $$X = \begin{bmatrix}x_{(0,0)}&x_{(0,1)}\\ x_{(1,0)}&x_{(1,1)}\end{bmatrix}, Y = \begin{bmatrix}y_{(0,0)}&y_{(0,1)}\\ y_{(1,0)}&y_{(1,1)}\end{bmatrix}$$ # # The dot product will then be computed as: # $$X \cdot Y= \begin{bmatrix} x_{(0,0)}*y_{(0,0)} + x_{(0,1)}*y_{(1,0)} & x_{(0,0)}*y_{(0,1)} + x_{(0,1)}*y_{(1,1)} \\ x_{(1,0)}*y_{(0,0)} + x_{(1,1)}*y_{(1,0)} & x_{(1,0)}*y_{(0,1)} + x_{(1,1)}*y_{(1,1)} # \end{bmatrix}$$ # # So if we assign values to $X$ and $Y$: # $$X = \begin{bmatrix}1&2\\ 0&1\end{bmatrix}, Y = \begin{bmatrix}-1&0\\ 2&2\end{bmatrix}$$ # + [markdown] id="trEmdZZ5Swji" # $$X \cdot Y= \begin{bmatrix} 1*-1 + 2*2 & 1*0 + 2*2 \\ 0*-1 + 1*2 & 0*0 + 1*2 \end{bmatrix} = \begin{bmatrix} 3 & 4 \\2 & 2 \end{bmatrix}$$ # This could be achieved programmatically using `np.dot()`, `np.matmul()` or the `@` operator. # + id="l9xzEvaEOHHb" X = np.array([ [1,2], [0,1] ]) Y = np.array([ [-1,0], [2,2] ]) # + colab={"base_uri": "https://localhost:8080/"} id="qF7-rnY7OYM8" outputId="f508d802-de55-4ed5-a639-b73e882d1d43" np.dot(X ,Y) # + colab={"base_uri": "https://localhost:8080/"} id="d67OdO6UOdcI" outputId="fa3b3543-126a-4374-f8a6-ad75e43bc4a0" X.dot(Y) # + colab={"base_uri": "https://localhost:8080/"} id="8yaxjej0Og6z" outputId="b96762ed-505f-4da1-ae66-1fe4dc272e76" X @ Y # + colab={"base_uri": "https://localhost:8080/"} id="R4tItcNtOjab" outputId="a07206af-4935-4df4-a12c-be8abf71f947" np.matmul(X, Y) # + [markdown] id="qvJFEdGIOs1j" # try # + id="14Quwth5Otx6" J = np.array([ [-1,2,3], [3,1,5], [8,9,0] ]) C = np.array([ [-1,0,9], [2,2,9], [9,9,9] ]) # + colab={"base_uri": "https://localhost:8080/"} id="3j8-QAHQPYI0" outputId="5a926960-d835-4033-c37e-251ddbf66dd0" np.dot(J,C) # + colab={"base_uri": "https://localhost:8080/"} id="hnhHf1eRPeET" outputId="3556b18a-5dff-41fc-f1ad-30d9cdbcf1f4" C.dot(J) # + colab={"base_uri": "https://localhost:8080/"} id="aWiskTSIPlpr" outputId="1f5280d5-0b55-4739-941c-5d5181e84459" np.matmul(J,C) # + [markdown] id="5q9iuSfDP5cT" # In matrix dot products there are additional rules compared with vector dot products. Since vector dot products were just in one dimension there are less restrictions. Since now we are dealing with Rank 2 vectors we need to consider some rules: # # ### Rule 1: The inner dimensions of the two matrices in question must be the same. # # So given a matrix $A$ with a shape of $(a,b)$ where $a$ and $b$ are any integers. If we want to do a dot product between $A$ and another matrix $B$, then matrix $B$ should have a shape of $(b,c)$ where $b$ and $c$ are any integers. So for given the following matrices: # # $$A = \begin{bmatrix}2&4\\5&-2\\0&1\end{bmatrix}, B = \begin{bmatrix}1&1\\3&3\\-1&-2\end{bmatrix}, C = \begin{bmatrix}0&1&1\\1&1&2\end{bmatrix}$$ # # So in this case $A$ has a shape of $(3,2)$, $B$ has a shape of $(3,2)$ and $C$ has a shape of $(2,3)$. So the only matrix pairs that is eligible to perform dot product is matrices $A \cdot C$, or $B \cdot C$. # + id="0jFZdis7QsSb" colab={"base_uri": "https://localhost:8080/"} outputId="96179fd0-e579-4bca-8029-d08bb86e54fc" A = np.array([ [2,4], [5,-2], [0,1] ]) B = np.array([ [1,1], [3,3], [-1,-2] ]) C = np.array([ [0,1,1], [1,2,2] ]) print(A.shape) print(B.shape) print(C.shape) # + colab={"base_uri": "https://localhost:8080/"} id="9HmZ-8TfSFxs" outputId="b2206230-78b1-4942-d64c-0abba868d44a" X = np.array([ [1,2,3,0] ]) Y = np.array([ [1,0,4,-1] ]) print(X.shape) print(Y.shape) # + id="hFlHumJOTkKb" colab={"base_uri": "https://localhost:8080/"} outputId="3c2f5af3-d17c-4beb-b0a8-cb159dd4c172" A @ C # + id="unVF-RVcVCEO" colab={"base_uri": "https://localhost:8080/"} outputId="8f189715-4e1b-4464-f88c-6fbbd746636a" B @ C # + [markdown] id="MtilVmEyThJs" # If you would notice the shape of the dot product changed and its shape is not the same as any of the matrices we used. The shape of a dot product is actually derived from the shapes of the matrices used. So recall matrix $A$ with a shape of $(a,b)$ and matrix $B$ with a shape of $(b,c)$, $A \cdot B$ should have a shape $(a,c)$. # + colab={"base_uri": "https://localhost:8080/"} id="XltBk5FdVctP" outputId="b9908858-2525-4ccc-8e08-d55ca0df8175" A @ B.T # + colab={"base_uri": "https://localhost:8080/"} id="lcSWHrR_SvL9" outputId="e9e43edf-9471-40a1-c20c-987080949322" Y.T @ X # + colab={"base_uri": "https://localhost:8080/"} id="-WskpVqHSzYd" outputId="dbd8d94d-54c2-432a-f983-2579f2dc4a8a" X @ Y.T # + [markdown] id="N3t18AHtVinL" # And youcan see that when you try to multiply A and B, it returns `ValueError` pertaining to matrix shape mismatch. # + [markdown] id="tSKnE6hCS4Ji" # ### Rule 2: Dot Product has special properties # # Dot products are prevalent in matrix algebra, this implies that it has several unique properties and it should be considered when formulation solutions: # 1. $A \cdot B \neq B \cdot A$ # 2. $A \cdot (B \cdot C) = (A \cdot B) \cdot C$ # 3. $A\cdot(B+C) = A\cdot B + A\cdot C$ # 4. $(B+C)\cdot A = B\cdot A + C\cdot A$ # 5. $A\cdot I = A$ # 6. $A\cdot \emptyset = \emptyset$ # + [markdown] id="7xJxYPWgV5hz" # ### Try Your Own Matrix Code # + colab={"base_uri": "https://localhost:8080/"} id="6V5VGEZDS9hN" outputId="6807d8a8-963a-4a7e-fbdc-1ce10babd553" L = np.array([ [1,2,3], [4,5,6], [7,8,9] ]) M = np.array([ [9,8,7], [6,5,4], [9,2,9] ]) N = np.array([ [4,32,9], [9,5,1], [9,9,-9] ]) print(L.shape) print(M.shape) print(N.shape) # + colab={"base_uri": "https://localhost:8080/"} id="om1oLciWXjw_" outputId="089f0338-bfc1-4bcc-e7f2-c693748a1f9b" np.eye(3) # + colab={"base_uri": "https://localhost:8080/"} id="CwrSv--LXriC" outputId="ded6bbf1-4523-46c2-e6c6-2d36b7be9b4a" L.dot(np.eye(3)) # + colab={"base_uri": "https://localhost:8080/"} id="Rz_ii5dLY2-l" outputId="d7bb2b14-7d96-4169-99e5-1b0d314e1f1f" np.array_equal(L@M, M@L) # + colab={"base_uri": "https://localhost:8080/"} id="pW8-dA4kYGtU" outputId="3fd032ce-b48c-43be-8c6e-f13f12cc8b3b" James = L @ (M @ N) James # + colab={"base_uri": "https://localhost:8080/"} id="D9-3tlhZTz5d" outputId="8f49bb5f-f197-43fe-9d4b-00300347c641" Carl = L @ M Carl # + colab={"base_uri": "https://localhost:8080/"} id="8L-PuIS4UBCa" outputId="7d129617-186a-4471-e997-a7881bcc5f84" Bayking = (L @ M) @ N Bayking # + colab={"base_uri": "https://localhost:8080/"} id="IMLuhMH6YQEq" outputId="4b318adc-8aa5-47ba-b233-53b9acc43e8b" np.array_equal(James, Carl) # + colab={"base_uri": "https://localhost:8080/"} id="gWwkWB8wVXBz" outputId="4f7e091c-9e0d-490c-8e75-f7c2352a520e" np.array_equiv(James, Bayking) # + colab={"base_uri": "https://localhost:8080/", "height": 279} id="nimf4O_ZZUia" outputId="1e486fd5-5abd-4045-bc82-7aca0ff8ab24" np.eye(L) # + colab={"base_uri": "https://localhost:8080/"} id="bP-ntDncZfiv" outputId="6127c047-aca9-4fd1-bb59-5de7d45f5e3e" L @ James # + colab={"base_uri": "https://localhost:8080/"} id="LPIbLgurZi2i" outputId="a90d23bb-b5df-4e70-cc01-77c350be065b" z_mat = np.zeros(L.shape) z_mat # + colab={"base_uri": "https://localhost:8080/"} id="rOzSuyxKZvEr" outputId="c4ea5e9b-5f3d-43a1-a2f4-fd18b98affac" a_dot_z = L.dot(np.zeros(L.shape)) a_dot_z # + colab={"base_uri": "https://localhost:8080/"} id="K-b4NnNQaQ96" outputId="5cd28307-7106-4af8-c792-e5af8ae48ac9" np.array_equal(z_mat, a_dot_z) # + colab={"base_uri": "https://localhost:8080/"} id="LanbhctMaeRK" outputId="6b4a5508-4eb9-430b-beaa-040a66b53a22" null_mat = np.empty(L.shape, dtype=float) null = np.array(null_mat,dtype=float) print(null) np.allclose(a_dot_z,null) # + [markdown] id="2muYdWd5ZAfT" # ###$ Determinant$ # # A determinant is a scalar value derived from a square matrix. The determinant is a fundamental and important value used in matrix algebra. Although it will not be evident in this laboratory on how it can be used practically, but it will be reatly used in future lessons. # # The determinant of some matrix $A$ is denoted as $det(A)$ or $|A|$. So let's say $A$ is represented as: # $$A = \begin{bmatrix}a_{(0,0)}&a_{(0,1)}\\a_{(1,0)}&a_{(1,1)}\end{bmatrix}$$ # We can compute for the determinant as: # $$|A| = a_{(0,0)}*a_{(1,1)} - a_{(1,0)}*a_{(0,1)}$$ # So if we have $A$ as: # $$A = \begin{bmatrix}1&4\\0&3\end{bmatrix}, |A| = 3$$ # # But you might wonder how about square matrices beyond the shape $(2,2)$? We can approach this problem by using several methods such as co-factor expansion and the minors method. This can be taught in the lecture of the laboratory but we can achieve the strenuous computation of high-dimensional matrices programmatically using Python. We can achieve this by using `np.linalg.det()`. # + colab={"base_uri": "https://localhost:8080/"} id="7eJqHj8wZCOC" outputId="c9919663-201d-4d70-f8b3-33e519d164ca" A = np.array([ [1,4], [0,3] ]) np.linalg.det(A) # + colab={"base_uri": "https://localhost:8080/"} id="17g5sf7iZqn5" outputId="7aa624d9-0542-44da-ae58-77e9d94975d3" B = np.array([ [1, 5, 2], [3, -1, -1], [0, -2, 1] ]) np.linalg.det(B) # + id="ae84du6HZ8Xi" colab={"base_uri": "https://localhost:8080/"} outputId="7c2a6fa7-c062-4d7b-fafe-aac781252e5c" B = np.array([ [1,3,5,6], [0,3,1,3], [3,1,8,2], [5,2,6,8] ]) np.linalg.det(B) # + [markdown] id="eDDkRxsGaSvI" # ### $Inverse$ # # The inverse of a matrix is another fundamental operation in matrix algebra. Determining the inverse of a matrix let us determine if its solvability and its characteristic as a system of linear equation — we'll expand on this in the nect module. Another use of the inverse matrix is solving the problem of divisibility between matrices. Although element-wise division exists but dividing the entire concept of matrices does not exists. Inverse matrices provides a related operation that could have the same concept of "dividing" matrices. # # Now to determine the inverse of a matrix we need to perform several steps. So let's say we have a matrix $M$: # $$M = \begin{bmatrix}1&7\\-3&5\end{bmatrix}$$ # First, we need to get the determinant of $M$. # $$|M| = (1)(5)-(-3)(7) = 26$$ # Next, we need to reform the matrix into the inverse form: # $$M^{-1} = \frac{1}{|M|} \begin{bmatrix} m_{(1,1)} & -m_{(0,1)} \\ -m_{(1,0)} & m_{(0,0)}\end{bmatrix}$$ # So that will be: # $$M^{-1} = \frac{1}{26} \begin{bmatrix} 5 & -7 \\ 3 & 1\end{bmatrix} = \begin{bmatrix} \frac{5}{26} & \frac{-7}{26} \\ \frac{3}{26} & \frac{1}{26}\end{bmatrix}$$ # For higher-dimension matrices you might need to use co-factors, minors, adjugates, and other reduction techinques. To solve this programmatially we can use `np.linalg.inv()`. # + colab={"base_uri": "https://localhost:8080/"} id="jy7hO16kaUwB" outputId="491dec05-60c5-4e14-d2ee-86e5ed0378ab" M = np.array([ [1, 7], [-3, 5] ]) np.array(M @ np.linalg.inv(M), dtype=int ) # + colab={"base_uri": "https://localhost:8080/"} id="-0YN6OWQa5DB" outputId="11d595f0-c01e-410c-9570-4751ea697ea6" P = np.array([ [6, 9, 0], [4, 2, -1], [3, 6, 7] ]) Q = np.linalg.inv(P) Q # + colab={"base_uri": "https://localhost:8080/"} id="devfV4sRbmD5" outputId="8d748552-5bb9-4286-ae44-88746b1e2b5b" P @ Q # + colab={"base_uri": "https://localhost:8080/"} id="1yVlIBwmdx6Q" outputId="fc5f533f-7c49-41a3-bfd2-811531b99b8a" N = np.array([ [18,5,23,1,0,33,5], [0,45,0,11,2,4,2], [5,9,20,0,0,0,3], [1,6,4,4,8,43,1], [8,6,8,7,1,6,1], [-5,15,2,0,0,6,-30], [-2,-5,1,2,1,20,12], ]) N_inv = np.linalg.inv(N) np.array(N @ N_inv,dtype=int) # + [markdown] id="KQHak_3Qd3qJ" # To validate the wether if the matric that you have solved is really the inverse, we follow this dot product property for a matrix $M$: # $$M\cdot M^{-1} = I$$ # + colab={"base_uri": "https://localhost:8080/"} id="ofzL4PyVd6xY" outputId="55297dd5-b48f-49c3-d89a-9a188fee94b4" squad = np.array([ [1.0, 1.0, 0.5], [0.7, 0.7, 0.9], [0.3, 0.3, 1.0] ]) weights = np.array([ [0.2, 0.2, 0.6] ]) p_grade = squad @ weights.T p_grade # + [markdown] id="PF3HkRQbeAx6" # ### $ACTIVITY:$ # # ### $TASK # 1:$ # Prove and implement the remaining 6 matrix multiplication properties. You may create your own matrices in which their shapes should not be lower than $(3,3)$. # In your methodology, create individual flowcharts for each property and discuss the property you would then present your proofs or validity of your implementation in the results section by comparing your result to present functions from NumPy. # + [markdown] id="1aRPQGNVnjFg" # $#1$ # + colab={"base_uri": "https://localhost:8080/"} id="IYE-mqhneY8x" outputId="25d760d3-93b0-4dd8-9889-e00972a3bb9e" (1) ## A.B =/= to B.A A = np.array([ [1,2,3,4], [4,5,6,7], [7,8,9,0], [4,3,2,1] ]) B = np.array([ [9,8,7,6], [6,5,4,3], [9,2,9,0], [1,2,3,4] ]) print(A.shape) print(B.shape) James = A @ B Bayking = B @ A # + colab={"base_uri": "https://localhost:8080/"} id="gi8SamFchapp" outputId="f92e23ca-df5a-4505-d56f-53fa1f9e4273" np.array_equiv(James, Bayking) # + [markdown] id="3_R0NhbunfrU" # $#2$ # + colab={"base_uri": "https://localhost:8080/"} id="qJJ4JvgAiQse" outputId="dd7e22c5-91ab-4ef5-88c3-e716336f8ac9" (2) ## A@(B@C) = (A@B)@C A_2 = np.array([ [1,2,3,4], [4,5,6,7], [72,8,8,0], [1,3,2,9] ]) B_2 = np.array([ [1,71,2,96], [9,1,2,6], [9,2,9,0], [1,2,3,4] ]) C_2 = np.array([ [1,2,3,4], [5,6,7,8], [1,0,9,9], [1,2,3,4] ]) print(A_2.shape) print(B_2.shape) print(C_2.shape) J_2 = A_2 @ (B_2 @ C_2) B_2 = (A_2 @ B_2) @ C_2 # + colab={"base_uri": "https://localhost:8080/"} id="MLhNHAVfotbv" outputId="1bbdb4d6-b64b-4c7e-f914-8ea7f03f3b24" J_2 # + colab={"base_uri": "https://localhost:8080/"} id="XoqNcaRpowa_" outputId="330d0ec7-0220-4fd4-f034-107c413de498" B_2 # + colab={"base_uri": "https://localhost:8080/"} id="l5uCmfi3o6rr" outputId="5c922193-f70d-42d8-b39c-05ca0f2f081f" np.array_equiv(J_2, B_2) # + [markdown] id="zIFdBTHgnbUJ" # $#3$ # + colab={"base_uri": "https://localhost:8080/"} id="fveodhSgis9T" outputId="549ea644-b3a0-471d-f024-9d07201acfe0" (3) ## A@(B+C) = A@B + A@C A = np.array([ [1,2,3,4], [4,5,6,7], [7,8,9,0], [4,3,2,1] ]) B = np.array([ [9,8,7,6], [6,5,4,3], [9,2,9,0], [1,2,3,4] ]) C = np.array([ [1,2,3,4], [5,6,7,8], [1,0,9,9], [1,2,3,4] ]) print(A.shape) print(B.shape) print(C.shape) James = A @ (B + C) Bayking = A @ B + A @ C James # + colab={"base_uri": "https://localhost:8080/"} id="uyyjuuAonSPq" outputId="20fc5c8b-b91e-4867-b898-b2d852c7c86e" Bayking # + colab={"base_uri": "https://localhost:8080/"} id="t0akhBs7kLLv" outputId="c163e2fd-36c0-44e3-b5e3-6c78a38374da" np.array_equal(James, Bayking) # + [markdown] id="v7bgcpE_nVvv" # $#4$ # + colab={"base_uri": "https://localhost:8080/"} id="wEbDh-Sqk3N4" outputId="f2db8e89-6a18-4748-94f5-27fcfd9b2e69" (4) ##(B+C)@A = B@A + C.A A = np.array([ [1,2,3,4], [9,9,8,8], [1,2,3,4], [1,1,1,1] ]) B = np.array([ [9,8,7,6], [2,0,1,2], [1,2,3,4], [1,2,3,4] ]) C = np.array([ [1,4,2,3], [5,6,7,8], [1,0,9,9], [7.5,9,1,2] ]) print(A.shape) print(B.shape) print(C.shape) J_4 = (B + C) @ A B_4 = B @ A + C @ A J_4 # + colab={"base_uri": "https://localhost:8080/"} id="dood33Kcm6DZ" outputId="d4f7a9ff-fcc3-4b2e-a24c-d94a042d3c92" B_4 # + colab={"base_uri": "https://localhost:8080/"} id="so2vDhn6mYA9" outputId="36b4acce-1219-47c1-aa23-9bfeb0ccc331" np.array_equal(J_4 , B_4) # + [markdown] id="0-DDatlynm7H" # $#5$ # + colab={"base_uri": "https://localhost:8080/"} id="5RjfUCGLnotb" outputId="47a6cb35-eb7e-4594-8f1c-ff9f1e91d677" (5) ## A @ I = A np.identity(4) # + id="V-glVnQ8JUTk" A_5 = np.array ([ [8,7,6,5], [1,9,0,9], [7,7,7,7], [1,2,9,8] ]) # + colab={"base_uri": "https://localhost:8080/"} id="kDr3Bsa2JVpI" outputId="9b1a5d2b-baf3-4963-b602-f7285d26b959" A_5.dot(np.identity(4)) # + colab={"base_uri": "https://localhost:8080/"} id="mnCaNb9KJiUW" outputId="80e40553-16a7-47eb-912f-94e8d4887e8d" np.array_equiv(A_4 , np.identity(4) @ A_4) # + [markdown] id="EGTYynpznpBn" # $#6$ # + colab={"base_uri": "https://localhost:8080/"} id="sq6KAX2en0X-" outputId="0f13ba6e-211f-4802-8595-4296611aa3fa" (6) ## A @ ∅ = ∅ A_6 = np.array([ [96,97,98,99], [99,99,99,99], [100,10000,100000,100000], [1,2,3,4] ]) print(A_6.shape) # + colab={"base_uri": "https://localhost:8080/"} id="4d3TjrQ3N6iX" outputId="f01f5b7d-4055-4fb4-bb0f-714867a47c51" np.zeros(A_6.shape) # + colab={"base_uri": "https://localhost:8080/"} id="DzNsO3TbM2fr" outputId="5f3d240b-dc73-4f81-f4c3-5c9eb69c9136" null_mat = np.empty(A_6.shape , dtype=float) null = np.array(null_mat,dtype=float) print(null) # + colab={"base_uri": "https://localhost:8080/"} id="4_M2x2SXLfqk" outputId="bce28a72-6742-4524-d777-8cf852d6d6e2" A_6.dot(np.zeros(A_6.shape)) # + colab={"base_uri": "https://localhost:8080/"} id="NPYKoAUOMktU" outputId="c8f2dfd0-08d3-417d-d449-36974bdcd317" np.array_equiv(A @ np.zeros(A_6.shape), null) # + [markdown] id="wQTJU-dRJirM" # ### $CONCLUSION:$ # # Basically, the laboratory experiment's goals were satisfied because we were able to implement the 6 matrix multiplication property. Using the phyton fundamentals that were taught to us in our earlier experiments, we were able to complete prove ,implement and run the 6 matrix properties and its result is as expected. # # Matrixes are important not only in mathematics, but also in everyday life. Matrices can also be used to represent real-world data like the world's population or a country's population and many more. They're used to make graphs, generate statistics, and perform scientific investigations and research across a wide range of topics. #
Assignment6_Bayking.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + from datetime import timedelta as delta from os import path from glob import glob import numpy as np import dask import math import xarray as xr from netCDF4 import Dataset import warnings import matplotlib.pyplot as plt import pickle warnings.simplefilter('ignore', category=xr.SerializationWarning) from operator import attrgetter from parcels import AdvectionRK4 from parcels import Field from parcels import FieldSet from parcels import JITParticle, ScipyParticle from parcels import ParticleFile from parcels import ParticleSet from parcels import Variable from parcels import VectorField from parcels import ErrorCode # + #input wstokes = False #False || True #data_in_waves = "/projects/0/topios/hydrodynamic_data" data_in_mit = "../../input/modelfields/MITgcm4km" data_out = "../../input/particles" filename_out = "Unbeaching_200810" galapagos_domain = [-94, -87, -3.5, 3] seeding_distance = 1 #unit: lon/lat degree seeding_resolution = 4 #unit: gridpoints seeding_frequency = 5 #unit: days advection_duration = 90 #unit: days output_frequency = 6 #unit: hours length_simulation = 4*365 #unit: days #Get indices for Galapagos domain to run simulation def getclosest_ij(lats,lons,latpt,lonpt): """Function to find the index of the closest point to a certain lon/lat value.""" dist_lat = (lats-latpt)**2 # find squared distance of every point on grid dist_lon = (lons-lonpt)**2 minindex_lat = dist_lat.argmin() # 1D index of minimum dist_sq element minindex_lon = dist_lon.argmin() return minindex_lat, minindex_lon # Get 2D index for latvals and lonvals arrays from 1D index dfile = Dataset(data_in_mit+'/RGEMS3_Surf_grid.nc') lon = dfile.variables['XG'][:] lat = dfile.variables['YG'][:] iy_min, ix_min = getclosest_ij(lat, lon, galapagos_domain[2], galapagos_domain[0]) iy_max, ix_max = getclosest_ij(lat, lon, galapagos_domain[3], galapagos_domain[1]) #Load distance and seaborder map file = open('distance_map', 'rb') data_distance = pickle.load(file) file.close() file = open('seaborder_map', 'rb') data_seaborder = pickle.load(file) file.close() lat_high = data_distance['lat'] lon_high = data_distance['lon'] distance_map = data_distance['distance'] seaborder_map = data_seaborder['seaborder'] # + # add MITgcm field varfiles = sorted(glob(data_in_mit + "/RGEMS_20*.nc")) meshfile = glob(data_in_mit+"/RGEMS3_Surf_grid.nc") files_MITgcm = {'U': {'lon': meshfile, 'lat': meshfile, 'data': varfiles}, 'V': {'lon': meshfile, 'lat': meshfile, 'data': varfiles}} variables_MITgcm = {'U': 'UVEL', 'V': 'VVEL'} dimensions_MITgcm = {'lon': 'XG', 'lat': 'YG', 'time': 'time'} indices_MITgcm = {'lon': range(ix_min,ix_max), 'lat': range(iy_min,iy_max)} fieldset_MITgcm = FieldSet.from_mitgcm(files_MITgcm, variables_MITgcm, dimensions_MITgcm, indices = indices_MITgcm,) fieldset = fieldset_MITgcm # + # add unbeaching field file_UnBeach = 'UnbeachingUV.nc' variables_UnBeach = {'U_unbeach': 'unBeachU', 'V_unbeach': 'unBeachV'} dimensions_UnBeach = {'lon': 'XG', 'lat': 'YG'} fieldset_UnBeach = FieldSet.from_c_grid_dataset(file_UnBeach, variables_UnBeach, dimensions_UnBeach, indices = indices_MITgcm, tracer_interp_method='cgrid_velocity') fieldset.add_field(fieldset_UnBeach.U_unbeach) fieldset.add_field(fieldset_UnBeach.V_unbeach) UVunbeach = VectorField('UVunbeach', fieldset.U_unbeach, fieldset.V_unbeach) fieldset.add_vector_field(UVunbeach) # + # add distance and seaborder map fieldset.add_field(Field('distance', data = distance_map, lon = lon_high, lat = lat_high, mesh='spherical', interp_method = 'nearest')) fieldset.add_field(Field('island', data = seaborder_map, lon = lon_high, lat = lat_high, mesh='spherical', interp_method = 'nearest')) # + # get all lon, lat that are land fU=fieldset_MITgcm.U fieldset_MITgcm.computeTimeChunk(fU.grid.time[0], 1) lon = np.array(fU.lon[:]) lat = np.array(fU.lat[:]) LandMask = fU.data[0,:,:] LandMask = np.array(LandMask) land = np.where(LandMask == 0) # seed particles at seeding_distance from land lons = np.array(fU.lon[::seeding_resolution]) lats = np.array(fU.lat[::seeding_resolution]) yy, xx = np.meshgrid(lats,lons) xcoord = np.reshape(xx,len(lons)*len(lats)) ycoord = np.reshape(yy,len(lons)*len(lats)) startlon=[] startlat=[] for i in range(xcoord.shape[0]): dist = (xcoord[i]-lon[land[1]])**2 + (ycoord[i]-lat[land[0]])**2 minindex = dist.argmin() if dist[minindex]<seeding_distance and dist[minindex] != 0: startlon.append(xcoord[i]) startlat.append(ycoord[i]) # + #functions to add to the kernel def AdvectionRK4(particle, fieldset, time): if particle.beached == 0: (u1, v1) = fieldset.UV[time, particle.depth, particle.lat, particle.lon] lon1, lat1 = (particle.lon + u1*.5*particle.dt, particle.lat + v1*.5*particle.dt) (u2, v2) = fieldset.UV[time + .5 * particle.dt, particle.depth, lat1, lon1] lon2, lat2 = (particle.lon + u2*.5*particle.dt, particle.lat + v2*.5*particle.dt) (u3, v3) = fieldset.UV[time + .5 * particle.dt, particle.depth, lat2, lon2] lon3, lat3 = (particle.lon + u3*particle.dt, particle.lat + v3*particle.dt) (u4, v4) = fieldset.UV[time + particle.dt, particle.depth, lat3, lon3] particle.lon += (u1 + 2*u2 + 2*u3 + u4) / 6. * particle.dt particle.lat += (v1 + 2*v2 + 2*v3 + v4) / 6. * particle.dt particle.beached = 2 def BeachTesting(particle, fieldset, time): if particle.beached == 2: (u, v) = fieldset.UV[time, particle.depth, particle.lat, particle.lon] particle.uvel = u particle.vvel = v if fabs(u) < 1e-14 and fabs(v) < 1e-14: particle.beached = 1 else: particle.beached = 0 def UnBeaching(particle, fieldset, time): if particle.beached == 1: (ub, vb) = fieldset.UVunbeach[time, particle.depth, particle.lat, particle.lon] particle.lon += ub * particle.dt * 400/particle.dt particle.lat += vb * particle.dt * 400/particle.dt particle.beached = 0 particle.unbeachCount += 1 def UnBeaching2(particle, fieldset, time): if particle.beached == 1: particle.lon = particle.prevlon particle.lat = particle.prevlat particle.beached = 0 particle.unbeachCount += 1 particle.prevlon = particle.lon particle.prevlat = particle.lat def Age(fieldset, particle, time): particle.age = particle.age + math.fabs(particle.dt) if particle.age > 90*86400: particle.delete() def SampleInfo(fieldset, particle, time): particle.distance = fieldset.distance[time, particle.depth, particle.lat, particle.lon] particle.island = fieldset.island[time, particle.depth, particle.lat, particle.lon] def DeleteParticle(particle, fieldset, time): particle.delete() class GalapagosParticle(JITParticle): age = Variable('age', dtype=np.float32, initial = 0.) beached = Variable('beached', dtype=np.int32, to_write=False, initial = 0.) unbeachCount = Variable('unbeachCount', dtype=np.int32, initial = 0.) distance = Variable('distance', dtype=np.float32, initial = 0.) island = Variable('island', dtype=np.int32, initial = 0.) prevlon = Variable('prevlon', dtype=np.float32, to_write=False, initial = attrgetter('lon')) prevlat = Variable('prevlat', dtype=np.float32, to_write=False, initial = attrgetter('lat')) vvel = Variable('vvel', dtype=np.float32, initial = 0.) uvel = Variable('uvel', dtype=np.float32, initial = 0.) # + # set particle conditions pset = ParticleSet(fieldset=fieldset, pclass=GalapagosParticle, lon=startlon, lat=startlat, repeatdt=delta(days=seeding_frequency)) beaching_kernel = 'usingUV' kernel = pset.Kernel(AdvectionRK4) + pset.Kernel(BeachTesting) if beaching_kernel == 'usingUV': kernel += pset.Kernel(UnBeaching) fname = path.join(data_out, filename_out + "_Test.nc") else: kernel += pset.Kernel(UnBeaching2) fname = path.join(data_out, filename_out + "_PrevPosition.nc") kernel += pset.Kernel(SampleInfo) + pset.Kernel(Age) outfile = pset.ParticleFile(name=fname, outputdt=delta(hours=output_frequency)) #pset.execute(kernel, # runtime=delta(days=length_simulation), # dt=delta(hours=1), # output_file=outfile, # recovery={ErrorCode.ErrorOutOfBounds: DeleteParticle}) pset.repeatdt = None pset.execute(kernel, runtime=delta(days=advection_duration), dt=delta(hours=1), output_file=outfile, recovery={ErrorCode.ErrorOutOfBounds: DeleteParticle}) outfile.export() outfile.close()
documentation/20.08_MakeMaps/particlesrun_fwd_MITgcm4km_unbeaching.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # Imports import numpy as np import pandas as pd # Visualisation # %matplotlib inline # %config InlineBackend.figure_format ='retina' import matplotlib.pyplot as plt import seaborn as sns sns.set_style('darkgrid') sns.mpl.rcParams['figure.figsize'] = (15.0, 9.0) # Display HTML from IPython.display import Image from IPython.core.display import HTML # Validation from sklearn.model_selection import train_test_split # Regression models from sklearn.linear_model import LinearRegression # Stats import statsmodels.api as sm from statsmodels.stats.outliers_influence import variance_inflation_factor import statsmodels.stats.api as sms import statsmodels.tsa.api as smt from scipy.stats.stats import pearsonr from scipy import stats # - Image(url="https://scikit-learn.org/stable/_static/ml_map.png") https://towardsdatascience.com/verifying-the-assumptions-of-linear-regression-in-python-and-r-f4cd2907d4c0 # + from sklearn.datasets import load_boston # load data boston = load_boston() X = pd.DataFrame(boston.data, columns=boston.feature_names) X.drop('CHAS', axis=1, inplace=True) y = pd.Series(boston.target, name='MEDV') # inspect data X.head() # - # Split into train & test X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=11) # # 1. Linear Regression # ## 1a) First version linear model # + # Whole dataset or just train on verifying assumptions? # X = X_train # y = y_train # - X_constant = sm.add_constant(X) lin_reg = sm.OLS(y, X_constant).fit() lin_reg.summary() # ## 1b) Check for linearity # + def linearity_test(model, y): ''' Function for visually inspecting the assumption of linearity in a linear regression model. It plots observed vs. predicted values and residuals vs. predicted values. Args: * model - fitted OLS model from statsmodels * y - observed values ''' fitted_vals = model.predict() resids = model.resid fig, ax = plt.subplots(1,2) sns.regplot(x=fitted_vals, y=y, lowess=True, ax=ax[0], line_kws={'color': 'red'}) ax[0].set_title('Observed vs. Predicted Values', fontsize=16) ax[0].set(xlabel='Predicted', ylabel='Observed') sns.regplot(x=fitted_vals, y=resids, lowess=True, ax=ax[1], line_kws={'color': 'red'}) ax[1].set_title('Residuals vs. Predicted Values', fontsize=16) ax[1].set(xlabel='Predicted', ylabel='Residuals') linearity_test(lin_reg, y) # + active="" # The inspection of the plots shows that the linearity assumption is not satisfied. # + active="" # Potential solutions: # non-linear transformations to dependent/independent variables # adding extra features which are a transformation of the already used ones (for example squared version) # adding features that were not considered before # - # ## 1c) Expectation (mean) of residuals is zero round(lin_reg.resid.mean(), 10) # ## 1d) No (perfect) multicollinearity: Variance Inflation Factor (VI) vif = [variance_inflation_factor(X_constant.values, i) for i in range(X_constant.shape[1])] pd.DataFrame({'vif': vif[1:]}, index=X.columns).T # + active="" # If no features are correlated, then all values for VIF will be 1. # + active="" # Potential solutions: # # To deal with multicollinearity we should iteratively remove features with high values of VIF. # A rule of thumb for removal could be VIF larger than 10 (5 is also common). # # Another possible solution is to use PCA to reduce features to a smaller set of uncorrelated components. # # Tip: we can also look at the correlation matrix of features to identify dependencies between them. # - # ## 1e) Homoscedasticity (equal variance) of residuals # + def homoscedasticity_test(model): ''' Function for testing the homoscedasticity of residuals in a linear regression model. It plots residuals and standardized residuals vs. fitted values and runs Breusch-Pagan and Goldfeld-Quandt tests. Args: * model - fitted OLS model from statsmodels ''' fitted_vals = model.predict() resids = model.resid resids_standardized = model.get_influence().resid_studentized_internal fig, ax = plt.subplots(1,2) sns.regplot(x=fitted_vals, y=resids, lowess=True, ax=ax[0], line_kws={'color': 'red'}) ax[0].set_title('Residuals vs Fitted', fontsize=16) ax[0].set(xlabel='Fitted Values', ylabel='Residuals') sns.regplot(x=fitted_vals, y=np.sqrt(np.abs(resids_standardized)), lowess=True, ax=ax[1], line_kws={'color': 'red'}) ax[1].set_title('Scale-Location', fontsize=16) ax[1].set(xlabel='Fitted Values', ylabel='sqrt(abs(Residuals))') bp_test = pd.DataFrame(sms.het_breuschpagan(resids, model.model.exog), columns=['value'], index=['Lagrange multiplier statistic', 'p-value', 'f-value', 'f p-value']) gq_test = pd.DataFrame(sms.het_goldfeldquandt(resids, model.model.exog)[:-1], columns=['value'], index=['F statistic', 'p-value']) print('\n Breusch-Pagan test ----') print(bp_test) print('\n Goldfeld-Quandt test ----') print(gq_test) print('\n Residuals plots ----') homoscedasticity_test(lin_reg) # + active="" # We can use two statistical tests: # Breusch-Pagan # Goldfeld-Quandt # # In both of them, the null hypothesis assumes homoscedasticity: # a p-value below a certain level (like 0.05) indicates we should reject the null in favor of heteroscedasticity. # + active="" # To identify homoscedasticity in the plots: # The placement of the points should be random and no pattern. # No increase/decrease in values of residuals should be visible. # The red line in the R plots should be flat. # + active="" # Potential solutions: # Log transformation of the dependent variable # # In case of time series, deflating a series if it concerns monetary value # # Using ARCH (auto-regressive conditional heteroscedasticity) models to model the error variance. # An example might be stock market, where data can exhibit periods of increased or decreased volatility over time # - # ## 1f) No autocorrelation of residuals acf = smt.graphics.plot_acf(lin_reg.resid, lags=40 , alpha=0.05) acf.show() lin_reg.summary() # + active="" # This assumption is especially dangerous in time-series models, # where serial correlation in the residuals implies that there is room for improvement in the model. # Extreme serial correlation is often a sign of a badly misspecified model. # + active="" # This assumption also has meaning in the case of non-time-series models. # # If residuals always have the same sign under particular conditions, # it means that the model systematically underpredicts/overpredicts what happens when the predictors have a particular configuration. # + active="" # Some notes on the Durbin-Watson test: # the test statistic always has a value between 0 and 4 # value of 2 means that there is no autocorrelation in the sample # values < 2 indicate positive autocorrelation, values > 2 negative one. # + active="" # Potential solutions: # in case of minor positive autocorrelation, there might be some room for fine-tuning the model, # for example, adding lags of the dependent/independent variables # # some seasonal components might not be captured by the model, # account for them using dummy variables or seasonally adjust the variables # # if DW < 1 it might indicate a possible problem in model specification, # consider stationarizing time-series variables by differencing, logging, and/or deflating (in case of monetary values) # # in case of significant negative correlation, # some of the variables might have been overdifferenced # # use Generalized Least Squares # # include a linear (trend) term in case of a consistent increasing/decreasing pattern in the residuals # - # ## 1g) The features and residuals are uncorrelated for column in X.columns: corr_test = pearsonr(X[column], lin_reg.resid) print(f'Variable: {column} --- correlation: {corr_test[0]:.4f}, p-value: {corr_test[1]:.4f}') # + active="" # Reports p-value for testing the LACK of correlation between the two considered series. # p > 0.05 means you CANNOT reject the null hypothesis for LACK of correlation # - # ## 1h) The number of observations must be greater than the number of features X.shape # ## 1i) There must be some variability in features X.apply(np.var, axis=0) # ## 1j) Normality of residuals # + def normality_of_residuals_test(model): ''' Function for drawing the normal QQ-plot of the residuals and running 4 statistical tests to investigate the normality of residuals. Arg: * model - fitted OLS models from statsmodels ''' sm.ProbPlot(model.resid).qqplot(line='s'); plt.title('Q-Q plot'); jb = stats.jarque_bera(model.resid) sw = stats.shapiro(model.resid) ad = stats.anderson(model.resid, dist='norm') ks = stats.kstest(model.resid, 'norm') print(f'Jarque-Bera test ---- statistic: {jb[0]:.4f}, p-value: {jb[1]}') print(f'Shapiro-Wilk test ---- statistic: {sw[0]:.4f}, p-value: {sw[1]:.4f}') print(f'Kolmogorov-Smirnov test ---- statistic: {ks.statistic:.4f}, p-value: {ks.pvalue:.4f}') print(f'Anderson-Darling test ---- statistic: {ad.statistic:.4f}, 5% critical value: {ad.critical_values[2]:.4f}') print('If the returned AD statistic is larger than the critical value, then for the 5% significance level, the null hypothesis that the data come from the Normal distribution should be rejected. ') normality_of_residuals_test(lin_reg) # + active="" # Some of the potential reasons causing non-normal residuals: # presence of a few large outliers in data # there might be some other problems (violations) with the model assumptions # another, better model specification might be better suited for this problem # + active="" # From the results above we can infer that the residuals do not follow Gaussian distribution # from the shape of the QQ plot, # as well as rejecting the null hypothesis in all statistical tests. # The reason why Kolmogorov-Smirnov from ols_test_normality shows different results is that it does not run the `two-sided` version of the test. # + active="" # Potential solutions: # # nonlinear transformation of target variable or features # # remove/treat potential outliers # # it can happen that there are two or more subsets of the data having different statistical properties, # in which case separate models might be considered # - # ## BONUS: Outliers This is not really an assumption, however, the existence of outliers in our data can lead to violations of some of the above-mentioned assumptions. # + active="" # I will not dive deep into outlier detection methods as there are already many articles about them. A few potential approaches: # Z-score # box plot # Leverage — a measure of how far away the feature values of a point are from the values of the different observations. # Cook’s distance — a measure of how deleting an observation impacts the regression model. # Isolation Forest — for more details see this article # - # # Lasso Regression # # Ridge Regression # # Random Forest Regressor # # Decision Tree Regressor # # Stochastic Gradient Descent Regressor
notebooks/machine_learning_algorithms/4a-Regression-Verify-Assumptions-for-Selection-of-Algorithm(s).ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <h1>NikTrans</h1> # # Python script to create Nikola sites from a list of schools. Edits conf.py file for site name and licence. import os import json os.system('python3 nikoladu.py') os.chdir('/home/wcmckee/nik1/') os.system('nikola build') os.system('rsync -azP /home/wcmckee/nik1/* <EMAIL>:/home/wcmckee/github/wcmckee.com/output/minedujobs') opccschho = open('/home/wcmckee/ccschool/cctru.json', 'r') opcz = opccschho.read() rssch = json.loads(opcz) filrma = ('/home/wcmckee/ccschol/') for rs in rssch.keys(): hythsc = (rs.replace(' ', '-')) hylow = hythsc.lower() hybrac = hylow.replace('(', '') hybaec = hybrac.replace(')', '') os.mkdir(filrma + hybaec) os.system('nikola init -q ' + filrma + hybaec) # I want to open each of the conf.py files and replace the nanme of the site with hythsc.lower # # Dir /home/wcmckee/ccschol has all the schools folders. Need to replace in conf.py Demo Name # with folder name of school. # # Schools name missing characters - eg ardmore lisschol = os.listdir('/home/wcmckee/ccschol/') findwat = ('LICENSE = """') def replacetext(findtext, replacetext): for lisol in lisschol: filereaz = ('/home/wcmckee/ccschol/' + hybaec + '/conf.py') f = open(filereaz,'r') filedata = f.read() f.close() newdata = filedata.replace(findtext, '"' + replacetext + '"') #print (newdata) f = open(filereaz,'w') f.write(newdata) f.close() replacetext('LICENSE = """', 'LICENSE = """<a rel="license" href="http://creativecommons.org/licenses/by/4.0/"><img alt="Creative Commons Attribution 4.0 International License" style="border-width:0; margin-bottom:12px;" src="https://i.creativecommons.org/l/by/4.0/88x31.png"></a>"') # + licfil = 'LICENSE = """<a rel="license" href="http://creativecommons.org/licenses/by/4.0/"><img alt="Creative Commons Attribution 4.0 International License" style="border-width:0; margin-bottom:12px;" src="https://i.creativecommons.org/l/by/4.0/88x31.png"></a>"' # - opwcm = ('/home/wcmckee/github/wcm.com/conf.py') for lisol in lisschol: print (lisol) rdwcm = open(opwcm, 'r') filewcm = rdwcm.read() newdata = filewcm.replace('wcmckee', lisol) rdwcm.close() #print (newdata) f = open('/home/wcmckee/ccschol/' + lisol + '/conf.py','w') f.write(newdata) f.close() for rdlin in rdwcm.readlines(): #print (rdlin) if 'BLOG_TITLE' in rdlin: print (rdlin) for lisol in lisschol: print (lisol) hythsc = (lisol.replace(' ', '-')) hylow = hythsc.lower() hybrac = hylow.replace('(', '') hybaec = hybrac.replace(')', '') filereaz = ('/home/wcmckee/ccschol/' + hybaec + '/conf.py') f = open(filereaz,'r') filedata = f.read() f.close() newdata = filedata.replace('LICENCE = """', licfil ) #print (newdata) f = open(filereaz,'w') f.write(newdata) f.close() # + for lisol in lisschol: print (lisol) hythsc = (lisol.replace(' ', '-')) hylow = hythsc.lower() hybrac = hylow.replace('(', '') hybaec = hybrac.replace(')', '') filereaz = ('/home/wcmckee/ccschol/' + hybaec + '/conf.py') f = open(filereaz,'r') filedata = f.read() f.close() newdata = filedata.replace('"Demo Site"', '"' + hybaec + '"') #print (newdata) f = open(filereaz,'w') f.write(newdata) f.close() # - for lisol in lisschol: print (lisol) hythsc = (lisol.replace(' ', '-')) hylow = hythsc.lower() hybrac = hylow.replace('(', '') hybaec = hybrac.replace(')', '') filereaz = ('/home/wcmckee/ccschol/' + hybaec + '/conf.py') f = open(filereaz,'r') filedata = f.read() f.close() newdata = filedata.replace('"Demo Site"', '"' + hybaec + '"') #print (newdata) f = open(filereaz,'w') f.write(newdata) f.close() # Perform Nikola build of all the sites in ccschol folder # + buildnik = input('Build school sites y/N ') # - for lisol in lisschol: print (lisol) os.chdir('/home/wcmckee/ccschol/' + lisol) if 'y' in buildnik: os.system('nikola build') makerst = open('/home/wcmckee/ccs') for rs in rssch.keys(): hythsc = (rs.replace(' ', '-')) hylow = hythsc.lower() hybrac = hylow.replace('(', '-') hybaec = hybrac.replace(')', '') #print (hylow()) filereaz = ('/home/wcmckee/ccschol/' + hybaec + '/conf.py') f = open(filereaz,'r') filedata = f.read() newdata = filedata.replace("Demo Site", hybaec) f.close() f = open(filereaz,'w') f.write(newdata) f.close()
posts/niktrans.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # 程序中的文档 # 你在调用函数的时候,你像是函数这个产品的用户。 # # 而你写一个函数,像是做一个产品,这个产品将来可能会被很多用户使用——包括你自己。 # # 产品,就应该有产品说明书,别人用得着,你自己也用得着——很久之后的你,很可能把当初的各种来龙去脉忘得一干二净,所以也同样需要产品说明书,别看那产品曾经是你自己设计的。 # # 产品说明书最好能说明这个函数的输入是什么,输出是什么,以及需要外部了解的实现细节(如果有的话,比如基于什么论文的什么算法,可能有什么局限等等)。 # # 但是程序员往往都很懒,写程序已经很烧脑和费时间了,还要写说明书,麻烦;而且还有个最大的问题是程序和说明书的同步,程序和家电不一样,经常会改,一旦改了就要同步说明书,这真是有点强人所难了。 # # Python 在这方面很用功,把函数的“产品说明书”当作语言内部的功能,也就是你在写代码的时候给每个函数写一点注释,只要这些注释按照某种格式去写,调用函数的人就可以方便的看到这些注释,还能用专门的工具自动生成说明书文档,甚至发布到网上去给你的用户(其他程序员)看。这并不是 Python 发明的方法,但 Python 绝对是主流语言里做的最好的之一,Python 社区提供了不是一个,而是好几个这类文档工具,比较流行的有 [Sphinx](http://www.sphinx-doc.org/en/master/index.html)、[pdoc](https://github.com/BurntSushi/pdoc)、[pydoctor](https://github.com/twisted/pydoctor) 和 [Doxygen](http://www.doxygen.nl/) 几个。 # # 这些工具各有优劣,一般来说对小型项目 [pdoc](https://github.com/BurntSushi/pdoc) 是最好最快的解决方案,而对比较复杂的项目 [Sphinx](http://www.sphinx-doc.org/en/master/index.html) 是公认的强者。 # ## Docstring 简介 # *Docstrings* 就是前面我们提到的“有一定格式要求的注释”,我们可以在我们书写的函数体最开始书写这些文档注释,然后就可以使用内置的 `help()` 函数,或者 *function* 类对象的 `__doc__` 这个属性去查到这段注释。 # # 我们看看下面这个例子,这是判断给定参数是否素数的一个函数: # # > 你会发现我们的例子经常换,似乎没啥必要的情况下也会用一些没写过的东西做例子,其实这都是小练习,我们强烈的鼓励你对每一个这样的例子,除了理解我们正在讲的东西(比如这一章讲函数文档),也要顺便搞清楚这个例子里其他东西,比如怎么判断一个数是不是素数之类的。编程是一门手艺,越练越精。 # + from math import sqrt def is_prime(n): """Return a boolean value based upon whether the argument n is a prime number.""" if n < 2: return False if n in (2, 3): return True for i in range(2, int(sqrt(n)) + 1): if n % i == 0: return False return True # - is_prime(23) help(is_prime) # 如上所示,*docstring* 是紧接着 `def` 语句,写在函数体第一行的一个字符串(写在函数体其他地方无效),用三个单引号或者双引号括起来,和函数体其他部分一样缩进,可以是一行也可以写成多行。只要存在这样的字符串,用函数 `help()` 就可以提取其内容显示出来,这样调用函数的人就可以不用查看你的源代码就读到你写的“产品手册”了。 is_prime.__doc__ # 后面我们会学到,函数也是一种对象(其实 Python 中几乎所有东西都是对象),它们也有一些属性,比如 `__doc__` 这个属性就会输出函数的 *docstring*。 # ## 书写 Docstring 的基本原则 # 规范,虽然是人们最好遵守的,但其实通常是很多人并不遵守的东西。 # # 既然学,就要**像样**——这真的很重要。所以,非常有必要认真阅读 Python [PEP 257](https://www.python.org/dev/peps/pep-0257/),也就是 Python 社区关于 *docstring* 的规范。 # # 简要总结一下 PEP 257 中所强调的部分要点: # # * 无论是单行还是多行的 *docstring*,一概使用三个双引号括起来; # * 在 *docstring* 前后都不要有空行; # * 多行 *docstring*,第一行是概要,随后空一行,再写其它部分; # * 书写良好的 *docstring* 应概括描述以下内容:参数、返回值、可能触发的错误类型、可能的副作用,以及函数的使用限制等。 # # 类定义也有相应的文档规范建议,你可以阅读 [PEP 257](https://www.python.org/dev/peps/pep-0257/) 文档作为起步,并参考 [Sphinx](https://sphinx-rtd-tutorial.readthedocs.io/en/latest/docstrings.html)、[numpy/scipy](https://numpydoc.readthedocs.io/en/latest/format.html)(这是非常著名的 Python 第三方库,我们以后会讲)和 [Google](https://sphinxcontrib-napoleon.readthedocs.io/en/latest/example_google.html) 的文档来学习大而规范的项目文档通常是怎么要求的。 # # 而关于 *docstring* 的内容,需要**格外注意**的是: # # > *Docstring* 是**写给人看的**,所以,在复杂代码的 *docstring* 中,写清楚 **why** 要远比写 *what* 更重要,因为 *what* 往往可以通过阅读代码来了解,而 *why* 就要难很多。你先记住这点,以后的体会自然会不断加深。 # ## 文档生成工具简介 # 前面提到过,除了 `help` 和 `__doc__` 这样的内置函数和属性可以方便我们使用 *docstring*,还有一系列文档工具可以从源代码中的注释自动生成在线文档。 # # 这些工具通常需要在一个项目中做好约定才能顺利使用起来,而且有自己一套对 *docstring* 的内容与格式要求,比如 [Sphinx](http://www.sphinx-doc.org/en/master/index.html) 标准的 *docstring* 写出来大致是这个样子的: # # ```python # class Vehicle(object): # ''' # The Vehicle object contains lots of vehicles # :param arg: The arg is used for ... # :type arg: str # :param `*args`: The variable arguments are used for ... # :param `**kwargs`: The keyword arguments are used for ... # :ivar arg: This is where we store arg # :vartype arg: str # ''' # # # def __init__(self, arg, *args, **kwargs): # self.arg = arg # # def cars(self, distance, destination): # '''We can't travel a certain distance in vehicles without fuels, so here's the fuels # # :param distance: The amount of distance traveled # :type amount: int # :param bool destinationReached: Should the fuels be refilled to cover required distance? # :raises: :class:`RuntimeError`: Out of fuel # # :returns: A Car mileage # :rtype: Cars # ''' # pass # ``` # # 这种格式叫做 *reStructureText*,通过 Sphinx 的工具处理之后可以生成在线文档,差不多是[这个样子](https://simpleble.readthedocs.io/en/latest/simpleble.html#the-simplebledevice-class)。 # # 通过插件 Sphinx 也能处理前面提过的 Numpy 和 Google 的格式文档。 # ## 小结 # * Python 提供内置的文档工具来书写和阅读程序文档; # * 对自己写的每个函数和类写一段简明扼要的 *docstring* 是培养好习惯的开始; # * 通过扩展阅读初步了解 Python 社区对文档格式的要求。
p2-2-docstrings.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <!-- # # author: <NAME> # created: 18 Aug 2016 # license: This code is released under the GNU GPL. Please feel free to use, modify, re-release at your will. You can not construct closed-source or proprietary software with this code. Please contact me if this is your desire. # # --> # ![Jupyter logo](http://jupyter.org/assets/main-logo.svg) # # # Introduction to Jupyter Notebooks and Python # # ### Welcome to your (maybe) first **Jupyter notebook**! # # In this class, we are learning to use the python language to solve problems in physics. So far, we have used an Integrated Development Enviroment (IDE) to write and execute python code. Today, we will use a Jupyter notebook to go through a brief overview of the python language. We will use both the IDE and the Jupyter notebooks to write python code this semester. # # ## Let's get started. # # From the Jupyter [documentation](http://jupyter.readthedocs.io/en/latest/index.html): # # > First and foremost, the Jupyter Notebook is an interactive environment for writing and running code. The notebook is capable of running code in a wide range of languages. However, each notebook is associated with a single kernel. # # In this class, we will use the Jupyter notebook to run Python code. You should therefore have this notebook opened in a Python kernel. Look at the top right of the browser window. If you see "Python 2" or "Python 3", this is a python notebook. # # ### Code cells allow you to enter and run code # You run code by holding `Shift` + `Enter`. # # #### Try this with the python code below. # a = 6 b = 2 c = a+b print(c) # #### Now, try to change a value of `a` or `b` above and re-execute the above cell. # # ### A *comment* is a non-code note within code. In python, a comment is preceded by a `#` for single-line comments. # # Comments are useful as notes to yourself (the programmer), and/or notes to a user of your code. # # Below, the code from the first cell is reproduced with helpful comments. # # #### Run this code again and note that the addition of the comments did not change the execution of the code # The following is simple python code, with comments, for adding two variables together a = 6 # assigning the value of 6 to variable a b = 2 # assigning the value 2 to the variable b c = a+b # adding the variables a and b and storing the result in c print(c) # printing the contents of c to the screen, right below the cell # ### A *function* is a block of code that can be called at any point after it has been evaluated def add(a,b): # a function is indicated by def. We named this function add. It takes two values as arguments c = a+b # we add the two values that were passed into the function return c # and return the result answer = add(6,2) # We store the result of add into a new variable answer print(answer) answer = add(5,100) # We overwrote the variable answer with the new value print(answer) test = add('h','i') # The + sign concatenates other data types, such as characters, rather that adding them print(test) # **The python language itself does not contain all of the mathematics and visualization that we would be interested in for exploring physics.** # # **To take advantage of the math and visualization that is useful to us in this course, we can to use python packages. These contain *modules*, which are files that define *objects* and *functions* that we will use this semester.** # # Let's start with `SymPy`, a package for symbolic (rather than numeric) calculations. # #### We must `import` the package of interest: from sympy.interactive import printing # for pretty printing of variables, etc printing.init_printing(use_latex='mathjax') # specifying what type of pretty printing (this will be LaTeX) # now import the package and define a namespace, sym, that will precede the function calls from the sympy function import sympy as sym # #### Now, let's use the package in a simple example x = sym.symbols("x") # defining x as a variable sym.Integral(1/x,x) # Calling the Integral function within sympy, this defines our integral sym.integrate(1/x,x) # Calling the itegrate function within sympy, which symbolically integrates the function # **The three most commonly used python packages for scientific computing are `math` (for math functions), `numpy` (for arrays), and `matplotlib` (for plotting). # In fact, you will sometimes see numpy and matplotlib imported together as one package called `scipy`.** # ### Below, I have written an example of receiving user input as a starting point. h = input("height in m: ") print("the height is", h, "meters") # ## Now, some final notes about the Jupyter notebooks: # #### Jupyter notebook files are designated by the *.ipynb* extension. # #### We can download notebooks in other formats, but they will not be executable. # #### The notebooks run *locally* (on your computer) in a web browser. # #### When the notebook application is launched, a local notebook server runs. # #### For each notebook you open, an iPython *kernel* starts running. # #### The kernel does *not* stop running when the browser tab is closed. # #### Make sure to shutdown each kernel that you begin, either from the notebook itself or the dashboard. # # ## I hope you had fun working through this Jupyter notebook! # # #### Please run everything as you would like me to see it, then go to File->Rename. # #### Save this file as lastname1_lastname2_jupyter.ipynb (you do not have to enter the extension if you don't see it)
Introduction_to_Jupyter_Notebooks_and_Python_Modified.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Text Similarity using Word Embeddings # # In this notebook we're going to play around with pre build word embeddings and do some fun calculations: # + tags=[] # %matplotlib inline import os from keras.utils import get_file import gensim import subprocess import numpy as np import matplotlib.pyplot as plt from IPython.core.pylabtools import figsize figsize(10, 10) from sklearn.manifold import TSNE import json from collections import Counter from itertools import chain from pathlib import Path # - # We'll start by downloading a pretrained model from Google News. We're using `zcat` to unzip the file, so you need to make sure you have that installed or replace it by something else. # + HOME_PATH = Path.home() MODELS_PATH = HOME_PATH / 'models' DATA_PATH = HOME_PATH / 'data' assert MODELS_PATH.is_dir() assert DATA_PATH.is_dir() # + tags=[] MODEL = 'GoogleNews-vectors-negative300.bin' zipped = MODEL + '.gz' URL = 'https://s3.amazonaws.com/dl4j-distribution/' + zipped path = get_file(MODELS_PATH / zipped, URL) unzipped = MODELS_PATH / MODEL if not unzipped.is_file(): with open(unzipped, 'wb') as fout: zcat = subprocess.Popen(['zcat'], stdin=open(path), stdout=fout) zcat.wait() assert unzipped.is_file() print('Model file is located at', unzipped) # - model = ( gensim .models .KeyedVectors .load_word2vec_format(unzipped, binary=True, limit=int(1e5)) ) # Let's take this model for a spin by looking at what things are most similar to espresso. As expected, coffee like items show up: model.most_similar(positive=['espresso']) # Now for the famous equation, what is like woman if king is like man? We create a quick method to these calculations here: # + def A_is_to_B_as_C_is_to(a, b, c, topn=1): a, b, c = map(lambda x: x if type(x) == list else [x], (a, b, c) ) # Converts inputs into lists res = model.most_similar(positive=b+c, negative=a, topn=topn) d = None if len(res): if topn == 1: d = res[0][0] elif topn is not None: d = [x[0] for x in res] return d A_is_to_B_as_C_is_to('man', 'woman', 'king', 5) # - # We can use this equation to acurately predict the capitals of countries by looking at what has the same relationship as Berlin has to Germany for selected countries: # + countries = [ 'Italy', 'France', 'India', 'China', 'Mexico', 'Russia', 'San_Francisco', ] for country in countries: print('%s is the capital of %s' % (A_is_to_B_as_C_is_to('Germany', 'Berlin', country), country)) # - # Or we can do the same for important products for given companies. Here we seed the products equation with two products, the iPhone for Apple and Starbucks_coffee for Starbucks. Note that numbers are replaced by # in the embedding model: for company in 'Google', 'IBM', 'Boeing', 'Microsoft', 'Samsung': products = A_is_to_B_as_C_is_to( ['Starbucks', 'Apple'], ['Starbucks_coffee', 'iPhone'], company, topn=5) print('%s -> %s' % (company, ', '.join(products))) # Let's do some clustering by picking three categories of items, drinks, countries and sports: # + beverages = ['espresso', 'beer', 'vodka', 'wine', 'cola', 'tea'] countries = ['Italy', 'Germany', 'Russia', 'France', 'USA', 'India'] sports = ['soccer', 'handball', 'hockey', 'cycling', 'basketball', 'cricket'] items = beverages + countries + sports len(items) # - # And looking up their vectors: item_vectors = [(item, model[item]) for item in items if item in model] len(item_vectors) # Now use TSNE for clustering: # + vectors = np.asarray([x[1] for x in item_vectors]) lengths = np.linalg.norm(vectors, axis=1) norm_vectors = (vectors.T / lengths).T # Transpose aligns vectors for broadcasting clf = TSNE(n_components=2, perplexity=10, verbose=2, random_state=0) tsne = clf.fit_transform(norm_vectors) # - # And matplotlib to show the results: # + x=tsne[:,0] y=tsne[:,1] fig, ax = plt.subplots() ax.scatter(x, y) for item, x1, y1 in zip(item_vectors, x, y): ax.annotate(item[0], (x1, y1), size=14) plt.show() # - # As you can see, the countries, sports and drinks all form their own little clusters, with arguably cricket and India attracting each other and maybe less clear, wine and France and Italy and espresso.
notebooks/03.1 Using pre trained word embeddings.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # generate toy data label 3, 2 features # + from sklearn.datasets.samples_generator import make_blobs from matplotlib.pyplot import figure import matplotlib.pyplot as plt import pandas as pd import numpy as np import os # generate 2d classification dataset f11=np.random.uniform(0,5,25) f12=np.random.uniform(5,10,20) f1213=np.random.uniform(6,7,11) f13=np.random.uniform(10,15,18) f14=np.random.uniform(0,5,33) f15=np.random.uniform(5,10,23) f16=np.random.uniform(10,15,7) f17=np.random.uniform(0,5,10) f18=np.random.uniform(5,10,11) f1819=np.random.uniform(11,12,11) f19=np.random.uniform(10,15,26) f1=np.concatenate((f11,f12,f1213,f13,f14,f15,f16,f17,f18,f1819,f19)) f21=np.random.uniform(0,5,25) f22=np.random.uniform(0,5,20) f2728=np.random.uniform(12,13,11) f23=np.random.uniform(0,5,18) f24=np.random.uniform(5,10,33) f25=np.random.uniform(5,10,23) f26=np.random.uniform(5,10,7) f27=np.random.uniform(10,15,10) f28=np.random.uniform(10,15,11) f2829=np.random.uniform(2,3,11) f29=np.random.uniform(10,15,26) f2=np.concatenate((f21,f22,f2728,f23,f24,f25,f26,f27,f28,f2829,f29)) X=np.vstack((f1, f2)).T y1=np.full((25, 1), 1,dtype = int) y2=np.full((20, 1), 2,dtype = int) y12=np.full((11, 1), 2,dtype = int) y3=np.full((18, 1), 2,dtype = int) y4=np.full((33, 1), 2,dtype = int) y5=np.full((23, 1), 3,dtype = int) y6=np.full((7, 1), 3,dtype = int) y7=np.full((10, 1), 3,dtype = int) y8=np.full((11, 1), 1,dtype = int) y89=np.full((11, 1), 1,dtype = int) y89=np.full((11, 1), 1,dtype = int) y9=np.full((26, 1), 1,dtype = int) y=np.concatenate((y1,y2,y12,y3,y4,y5,y6,y7,y8,y89,y9)) print(y) #print("y´s shape is :"+str(y.shape)) #print("x´s shape is :"+str(X.shape)) data = X=np.concatenate([X,y],axis = 1) print(data) df = pd.DataFrame(data,columns=['feature1','feature2','label']) colors = {1:'red', 2:'blue', 3:'green'} fig, ax = plt.subplots(ncols=1, nrows=1) ax.scatter(df['feature1'], df['feature2'], c=df['label'].apply(lambda x: colors[x])) figure(frameon=False,num=None, figsize=(10, 10), dpi=80) #adds a title and axes labels ax.set_title('classification data') ax.set_xlabel('feature1') ax.set_ylabel('feature2') ax.set_xticks([0, 5, 10,15]) ax.set_yticks([0, 5, 10,15]) #adds major gridlines ax.grid(color='orange', linestyle='-', linewidth=1, alpha=5) plt.show() # - # generate configuration file str_config = "algorithm = Chi Fuzzy Weighted Rule Learning Model" + "\n" str_config = str_config + "inputData = "+"\""+"../toy1/simple-10-1tra.dat" +"\""+" " str_config = str_config + "\"" + "../toy1/simple-10-1tra.dat" +"\""+" " str_config = str_config + "\"" + "../toy1/simple-10-1tst.dat" +"\""+ "\n" str_config = str_config + "outputData = "+"\""+" ../toy1/results/result0.tra" +"\""+" " str_config = str_config + "\"" + "../toy1/results/result0.tst" +"\"" +" " str_config = str_config + "\"" + "../toy1/results/result0e0.txt"+"\""+" " str_config = str_config + "\"" + "../toy1/results/result0e1.txt" +"\""+ "\n" str_config = str_config + "Number of Labels = 3" + "\n" str_config = str_config + "T-norm for the Computation of the Compatibility Degree = Product" + "\n" str_config = str_config + "Rule Weight = Penalized_Certainty_Factor" + "\n" str_config = str_config + "Fuzzy Reasoning Method = Winning_Rule" + "\n" print(str_config) # import os cwd = os.getcwd() config_file = cwd+"\\toy1\\simple_config0.txt" with open(config_file,'w') as configuration_file : configuration_file.write(str_config) configuration_file.close() # + # generate train # get config_max_min_array data_num = len(y) test_num = int(data_num/5) train_num = data_num - test_num print("data_num is :"+str(data_num)) print("test_num is :"+str(test_num)) print("train_num is :"+str(train_num)) data_train = np.empty((int(train_num),3)) data_train.shape=(int(train_num),3) print("data.shape:") print(data.shape) #data_train.dtype=dt print(data_train.dtype) print("data_train.shape:") print(data_train.shape) data_test = np.empty((int(test_num),3)) k_tra=0 k_tst=0 for i in range(0,len(y)): #save i/5==0 for test data if i%5==0: data_test[k_tst]=data[i] k_tst = k_tst + 1 else: data_train[k_tra]=data[i] k_tra = k_tra + 1 print(" test data number is :" +str(k_tst)) print(" train data number is :" +str(k_tra)) #df_train = pd.DataFrame.from_records(data_train,columns=['feature1','feature2','label']) config_max_min_array_train =[[None for y in range (3) ]for x in range (2)] feature_names=['feature1','feature2'] print("data_train.shape: ") print(data_train.shape) column_feature1_array = np.array(data_train[:0]) print(column_feature1_array) config_min_array_train = [None for x in range (2) ] config_max_array_train = [None for x in range (2) ] print(np.amin(data_train, axis=0)) config_min_array_train=np.amin(data_train, axis=0) config_max_array_train =np.amax(data_train, axis=0) print(np.amax(data_train, axis=0)) config_max_min_array_train[0][1]=config_min_array_train[0] config_max_min_array_train[0][2]=config_max_array_train[0] config_max_min_array_train[1][1]=config_min_array_train[1] config_max_min_array_train[1][2]=config_max_array_train[1] for i in range(0,2): # store each feature name, min, max values config_max_min_array_train[i][0]=feature_names[i] print("feature name [" + str(i) +"]"+ " is: " + config_max_min_array_train[i][0]) print("feature min [" + str(i) +"]"+ " is: " + str(config_max_min_array_train[i][1])) print("feature max [" + str(i) +"]"+ " is: " + str(config_max_min_array_train[i][2])) # data detail data_str = "@relation iris" + "\n" for i in range(0,2): data_str = data_str + "@attribute" + " " + str(config_max_min_array_train[i][0])+ " " data_str = data_str + "real"+" "+"["+str(config_max_min_array_train[i][1])+","+str(config_max_min_array_train[i][2])+"]" data_str = data_str + "\n" data_str = data_str + "@attribute class {1, 2, 3}" + "\n" data_str = data_str + "@inputs" + " " for i in range(0,2): data_str = data_str + str(config_max_min_array_train[i][0])+ "," data_str = data_str[:-1]#delete the last , data_str = data_str + "\n" data_str = data_str + "@outputs class" + "\n" data_str = data_str + "@data" + "\n" for i in range(0,k_tra): for j in range(0,3): if j==2: data_str = data_str + str(int(data_train[i][j])) + "," else: data_str = data_str + str(data_train[i][j]) + "," data_str = data_str[:-1] data_str = data_str + "\n" #print(data_str) cwd = os.getcwd() train_file = cwd+"\\toy1\\simple-10-1tra.dat" with open(train_file,'w') as trafile : trafile.write(data_str) trafile.close() # + #draw test data print(data_test.shape) df = pd.DataFrame(data_test,columns=['feature1','feature2','label']) colors = {1:'red', 2:'blue', 3:'green'} fig, ax = plt.subplots(ncols=1, nrows=1) ax.scatter(df['feature1'], df['feature2'], c=df['label'].apply(lambda x: colors[x])) figure(frameon=False,num=None, figsize=(10, 10), dpi=80) #adds a title and axes labels ax.set_title('classification test data') ax.set_xlabel('feature1') ax.set_ylabel('feature2') ax.set_xticks([0, 5, 10,15]) ax.set_yticks([0, 5, 10,15]) #adds major gridlines ax.grid(color='orange', linestyle='-', linewidth=1, alpha=5) plt.show() # + # draw train data print(data_train.shape) df = pd.DataFrame(data_train,columns=['feature1','feature2','label']) colors = {1:'red', 2:'blue', 3:'green'} fig, ax = plt.subplots(ncols=1, nrows=1) ax.scatter(df['feature1'], df['feature2'], c=df['label'].apply(lambda x: colors[x])) figure(frameon=False,num=None, figsize=(10, 10), dpi=80) #adds a title and axes labels ax.set_title('classification train data') ax.set_xlabel('feature1') ax.set_ylabel('feature2') ax.set_xticks([0, 5, 10,15]) ax.set_yticks([0, 5, 10,15]) #adds major gridlines ax.grid(color='orange', linestyle='-', linewidth=1, alpha=5) plt.show() # + # generate Test file data config_max_min_array_test =[[None for y in range (3) ]for x in range (2)] feature_names=['feature1','feature2'] print("data_test.shape: ") print(data_test.shape) column_feature1_array = np.array(data_train[:0]) print(column_feature1_array) config_min_array_test = [None for x in range (2) ] config_max_array_test = [None for x in range (2) ] print(np.amin(data_test, axis=0)) config_min_array_test=np.amin(data_test, axis=0) config_max_array_test =np.amax(data_test, axis=0) print(np.amax(data_test, axis=0)) config_max_min_array_test[0][1]=config_min_array_test[0] config_max_min_array_test[0][2]=config_max_array_test[0] config_max_min_array_test[1][1]=config_min_array_test[1] config_max_min_array_test[1][2]=config_max_array_test[1] for i in range(0,2): # store each feature name, min, max values config_max_min_array_test[i][0]=feature_names[i] print("feature name [" + str(i) +"]"+ " is: " + config_max_min_array_test[i][0]) print("feature min [" + str(i) +"]"+ " is: " + str(config_max_min_array_test[i][1])) print("feature max [" + str(i) +"]"+ " is: " + str(config_max_min_array_test[i][2])) # data detail data_str = "@relation iris" + "\n" for i in range(0,2): data_str = data_str + "@attribute" + " " + str(config_max_min_array_test[i][0])+ " " data_str = data_str + "real"+" "+"["+str(config_max_min_array_test[i][1])+","+str(config_max_min_array_test[i][2])+"]" data_str = data_str + "\n" data_str = data_str + "@attribute class {1, 2, 3}" + "\n" data_str = data_str + "@inputs" + " " for i in range(0,2): data_str = data_str + str(config_max_min_array_test[i][0])+ "," data_str = data_str[:-1] data_str = data_str + "\n" data_str = data_str + "@outputs class" + "\n" data_str = data_str + "@data" + "\n" for i in range(0,k_tst): for j in range(0,3): if j==2: data_str = data_str + str(int(data_test[i][j])) + "," else: data_str = data_str + str(data_test[i][j]) + "," data_str = data_str[:-1] data_str = data_str + "\n" #print(data_str) cwd = os.getcwd() train_file = cwd+"\\toy1\\simple-10-1tst.dat" with open(train_file,'w') as trafile : trafile.write(data_str) trafile.close()
.ipynb_checkpoints/generate_data_toy1_simple-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] colab_type="text" id="view-in-github" # <a href="https://colab.research.google.com/github/rocioparra/redes-neuronales/blob/master/03-clasificacion-texto/Ejercicio_Clasificaci%C3%B3n_Texto.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + colab={} colab_type="code" id="8mBlkilaNC2M" # Estos dos comandos evitan que haya que hacer reload cada vez que se modifica un paquete # %load_ext autoreload # %autoreload 2 # + [markdown] colab_type="text" id="4XI-bramQQLk" # # Ejercicio de clasificación de texto # # _22.45 - Redes Neuronales_ # # _2020 - Segundo cuatrimestre_ # # _<NAME>_ # # Naive Bayes es una técnica estadística que consiste en repetir el método anterior en problemas cuyos sucesos no son independientes, pero suponiendo independencia. # A lo largo de este trabajo desarrollarán un modelo de Naive Bayes para el problema de clasificación de artículos periodístios.En este caso podemos estimar la probabilidad de ocurrencia de cada palabra según la categoría a la que pertenece el artículo. # + [markdown] colab_type="text" id="zJ7IlPHXEDEJ" # ## Dataset # # + [markdown] colab_type="text" id="z8adA_beEXG7" # El primer paso es obtener el dataset que vamos a utilizar. El dataset a utilizar es el de TwentyNewsGroup(TNG) que está disponible en sklearn. # # Se puede encontrar más información del dataset en la documentación de scikit-learn. # + colab={} colab_type="code" id="HmSiAFf7L69o" from sklearn.datasets import fetch_20newsgroups from helper import autosave # autosave asegura que se guarden los datos en un archivo la primera vez que se corre, # y las veces siguiente que se llame con los mismos argumentos simplemente se lee de ahí @autosave('twenty-md={metadata}-{subset}') def get_20newsgroup(subset='train', metadata=False): if metadata: return fetch_20newsgroups(subset=subset, shuffle=True) else: # de acuerdo a recomendacion de sklearn, no considerar metadata # para obtener resultados mas representativos return fetch_20newsgroups(subset=subset, remove=('headers', 'footers', 'quotes'), shuffle=True) # + colab={} colab_type="code" id="AHaexjmQNQej" #Loading the data set - training data. twenty_train = get_20newsgroup(subset='train', metadata=False) # + [markdown] colab_type="text" id="u65YieUTEgR3" # El siguiente paso es analizar el contenido del dataset, como por ejemplo la cantidad de artículos, la cantidad de clases, etc. # + [markdown] colab_type="text" id="sx2_w9hbS9-D" # ### Preguntas # # 1) ¿Cuántos articulos tiene el dataset? # + colab={} colab_type="code" id="dmBRcNNbRvVu" len(twenty_train.data) # + [markdown] colab_type="text" id="AwXoKjPbL69y" # 2) ¿Cuántas clases tiene el dataset? # + colab={} colab_type="code" id="kMdZlCIzL69y" len(twenty_train.target_names) # + [markdown] colab_type="text" id="ZNtJzyqfL691" # 3) ¿Es un dataset balanceado? # + colab={} colab_type="code" id="EQdBhjEAL691" import numpy as np _, counts = np.unique(twenty_train.target, return_counts=True) if len(set(counts)) == 1: print('El dataset está balanceado') else: print('El dataset no está balanceado') # + [markdown] colab_type="text" id="-SIgt_PmL694" # 4) ¿Cuál es la probabilidad a priori de la clase 5? A que corresponde esta clase? # + colab={} colab_type="code" id="ESb00vAIL694" priori5 = counts[5]/sum(counts) print(f'La clase 5 ({twenty_train.target_names[5]}) tiene probabilidad a priori {priori5:.3}') # + [markdown] colab_type="text" id="5ec_R4C_L696" # 5) ¿Cuál es la clase con mayor probabilidad a priori? # + colab={} colab_type="code" id="CjA_Dl3XL697" max_class = np.argmax(counts) print(f'La clase {max_class} ({twenty_train.target_names[max_class]}) tiene la máxima probabilidad a priori, {counts[max_class]/sum(counts):.2}') # + [markdown] colab_type="text" id="84hQvk-UHX1V" # ## Preprocesamiento # # Para facilitar la comprensión de los algoritmos de preprocesamiento, se aplican primero a un solo artículo. # + [markdown] colab_type="text" id="l0zxZSOsFXpl" # # Mas info en: # http://text-processing.com/demo/stem/ # + colab={} colab_type="code" id="uFohGXubL69-" # %%capture # suppress output import nltk from nltk.tokenize import word_tokenize from nltk.stem import WordNetLemmatizer from nltk.corpus import stopwords from nltk.stem import PorterStemmer from helper import print_list nltk.download('punkt') nltk.download('wordnet') nltk.download('stopwords') stemmer = PorterStemmer() # - article = twenty_train.data[0] print(f'Artículo de clase {twenty_train.target[0]} ({twenty_train.target_names[twenty_train.target[0]]})') print(article) # + [markdown] colab_type="text" id="-geItZPyUWEO" # - **Tokenization (nltk):** # + colab={} colab_type="code" id="rKGHy7YRL6-B" tok = word_tokenize(article) print_list(tok) # + [markdown] colab_type="text" id="pksduBGKFMxt" # - **Lemmatization (nltk):** # + colab={} colab_type="code" id="7Kv4WDZIL6-E" lemmatizer = WordNetLemmatizer() lem=[lemmatizer.lemmatize(x, pos='v') for x in tok] print_list(lem) # + [markdown] colab_type="text" id="-zxqqFYAFOsN" # - **Stop Words (nltk):** # # + colab={} colab_type="code" id="2EPZnKNRL6-G" stop = [x for x in lem if x not in stopwords.words('english')] print_list(stop) # + [markdown] colab_type="text" id="UjKQKWLGFQRl" # - **Stemming (nltk):** # # + colab={} colab_type="code" id="0fn3_J-hL6-K" stem = [stemmer.stem(x) for x in stop] print_list(stem) # + [markdown] colab_type="text" id="HNfMRbqmFSIR" # - **Filtrado de palabras:** # # + colab={} colab_type="code" id="j6m-gGPaL6-N" alpha = [x for x in stem if x.isalpha()] print_list(alpha) # + [markdown] colab_type="text" id="yOzGVenvIESr" # ### Preprocesamiento completo # # Utilizar o no cada uno de los métodos vistos es una decisión que dependerá del caso particular de aplicación. Para este ejercicio vamos a considerar las siguientes combinaciones: # + [markdown] colab_type="text" id="5y31VaboL6-Q" # - Tokenización # - Tokenización, Lematización, Stemming. # - Tokenización, Stop Words. # - Tokenización, Lematización, Stop Words, Stemming. # - Tokenización, Lematización, Stop Words, Stemming, Filtrado. # + colab={} colab_type="code" id="ay3bgiGKL6-Q" def filter_article(article, filts): filts = filts.split() if 'lem' in filts: article = [lemmatizer.lemmatize(x,pos='v') for x in article] if 'stop' in filts: article = [x for x in article if x not in stopwords.words('english')] if 'stem' in filts: article = [stemmer.stem(x) for x in article] if 'filt' in filts: article = [x for x in article if x.isalpha()] return article @autosave(fmt='{name}-{filts}') def filter_articles(name, articles, filts): filtered_articles = [] for data in articles: tok = word_tokenize(data) # always tokenize filtered_articles.append(filter_article(tok, filts)) return filtered_articles # + colab={} colab_type="code" id="3mmFlpfAL6-T" ans_fmt = """Preprocesamiento: {preproc} Longitud del vocabulario: {vocab_len} """ preprocessing = ['tok', 'tok lem stem', 'tok stop', 'tok lem stop stem', 'tok lem stop stem filt'] for preproc in preprocessing: filtered_articles = filter_articles('train-nometadata', twenty_train.data, preproc) vocab = set([word for article in filtered_articles for word in article]) print(ans_fmt.format(preproc=preproc, vocab_len=len(vocab))) # - print(f"Número de palabras en stopwords: {len(stopwords.words('english'))}") # + [markdown] colab_type="text" id="wP3n8ZwFc7_D" # ### Preguntas # + [markdown] colab_type="text" id="4Xn3CaOKL6-X" # - **Cómo cambia el tamaño del vocabulario al agregar Lematización y Stemming?** # # Sólo con tokenización: 161698 palabras # # Con lematización y stemming: 126903 palabras (21.5% menor, 34795 palabras menos). # # El vocabulario se reduce no porque la cantidad total de palabras sea menor, sino porque palabras que originalmente eran distintas ahora son iguales (por ejemplo, "is" y "was" se convierten ambas en "be"). # # La reducción del 20% sugiere que de cada 5 palabras, dos son dos "versiones" de la misma, lo cual es razonable si se tiene en cuenta que es muy común usar una misma palabra en singular y plural en un mismo contexto ("car" / "cars"), adverbios y adjetivos con la misma raíz ("real" / "really"), pronombres en distintos casos ("I" / "me"), etcétera (en español esto probablemente sería incluso más pronunciado, al haber mayor cantidad de declinaciones verbales distintas, y tener géneros para adjetivos y artículos). # + [markdown] colab_type="text" id="vGVqDY1rL6-Y" # - **Cómo cambia el tamaño del vocabulario al Stop Words?** # + [markdown] colab_type="text" id="_soRRxO9L6-Y" # Sólo con tokenización: 161698 palabras # # Con stop words: 161533 palabras (0.1% menor, 165 palabras menos) # # El vocabulario se reduce porque se remueven las palabras contenidas en el conjunto de stop words. Es razonable entonces que la reducción del vocabulario sea menor de este caso, ya que como máximo se podrán remover tantas palabras como haya en la lista de stop words (en este caso, 179). # + [markdown] colab_type="text" id="rUjaXhRLL6-Z" # - **Analice muy brevemente ventajas y desventajas del tamaño del dataset en cada caso.** # + [markdown] colab_type="text" id="RsBFRFbpL6-Z" # Para el caso de stopwords, es útil porque es razonable pensar que ese conjunto de palabras estará presente en todos los artículos, y por lo tanto no aportará demasiada información. Por ejemplo, una palabra como "car", "player" o "God" nos da más información sobre de qué se está hablando que "in", "no" o "is". # # Sin embargo, también pueden imaginarse casos donde esto no sea cierto. Por ejemplo, si una clase se caracteriza por narraciones en primera persona, mientras que los demás suelen ser más impersonales, se estaría ignorando la información que aportan palabras como "I", "me", etc. # # En cuanto a la lematización y stemming, puede hacerse un análisis muy similar. En muchos casos, es muy útil combinar las probabilidades de palabras similares: en un artículo de deportes, las palabras "match" y "matches" apuntan a un contexto similar. Lo mismo puede decirse en cuanto a el tiempo de los verbos: no es particularmente relevante para determinar si se está hablando de deportes, ya que podría estarse especulando sobre o anunciando eventos que sucederán en el futuro ("if they win"), o relatando eventos del pasado ("after they won"). Por lo tanto, tratar ambos casos como idénticos podría ser beneficioso. # # Sin embargo, nuevamente pueden pensarse casos donde esto no será cierto: puede que una clase se caracterice por hablar sobre eventos del pasado, mientras que otra se dedique más a explicaciones en presente, y ese matiz se perdería con lematización y stemming. # + [markdown] colab_type="text" id="c-R-LPRoIV0E" # ## Vectorización de texto # # - **Obtención del vocabulario y obtención de la probabilidad** # # Como se vió en clase, los vectorizadores cuentan con dos parámetros de ajuste. # # - max_df: le asignamos una maxima frecuencia de aparición, eliminando las palabras comunes que no aportan información. # # - min_df: le asignamos la minima cantidad de veces que tiene que aparecer una palabra. # # + [markdown] colab_type="text" id="G7ob-IgsMRS7" # ## Entrenamiento del modelo # + [markdown] colab_type="text" id="a4bVz9QwMupE" # Primero deben separar correctamente el dataset para hacer validación del modelo. # Y luego deben entrenar el modelo de NaiveBayes con el dataset de train. # # Deben utilizar un modelo de NaiveBayes Multinomial y de Bernoulli. Ambos modelos estan disponibles en sklearn. # # Finalmente comprobar el accuracy en train. # + colab={} colab_type="code" id="81G78exKL6-b" @autosave(fmt='{name}-joined-{filts}') def get_filtered_joined_articles(name, articles, filts): articles = filter_articles(name, articles, filts) for i in range(len(articles)): articles[i] = ' '.join(articles[i]) return articles # + colab={} colab_type="code" id="C74USIEyS3B7" from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer from sklearn.naive_bayes import MultinomialNB, BernoulliNB import pandas as pd import os.path OUT_FILE = 'out-nometadata.csv' vectorizers = [CountVectorizer, TfidfVectorizer] classifiers = [MultinomialNB, BernoulliNB] max_dfs = [.05, 0.1, 0.25, 0.3, 0.5, 0.75, 0.8, 0.95] min_dfs = [1, 2, 5, 10] results = [] import pandas as pd total_its = len(vectorizers)*len(classifiers)*len(min_dfs)*len(max_dfs)*len(preprocessing) i = 1 if os.path.isfile(OUT_FILE): results = pd.read_csv(OUT_FILE) else: results = [] for filts in preprocessing: data = get_filtered_joined_articles(name='train-nometadata', filts=filts) for max_df in max_dfs: for min_df in min_dfs: for Vectorizer in vectorizers: count_vect = Vectorizer(max_df=max_df, min_df=min_df) raw_data = count_vect.fit_transform(data) for Classifier in classifiers: print(f'Computing: F={filts}, M={max_df}, m={min_df}, V={Vectorizer.__name__}, C={Classifier.__name__} ({i}/{total_its})...') i += 1 clf = Classifier() clf.fit(raw_data, twenty_train.target) score = clf.score(raw_data, twenty_train.target) results.append({ 'max_df': max_df, 'min_df': min_df, 'filts': filts, 'vectorizer': Vectorizer.__name__, 'model': Classifier.__name__, 'score': score }) print(f'Score: {score:.5%}') print(50 * '-') results = pd.DataFrame(results) results.to_csv(OUT_FILE, index=False) # + [markdown] colab_type="text" id="GPPo4Fr-eSON" # ### Preguntas # + [markdown] colab_type="text" id="lKoJGowhcufh" # - **¿Con que modelo obtuvo los mejores resultados? Explique por qué cree que fue así.** # + colab={} colab_type="code" id="3iqIoNyTTYVc" best = results['score'].max() best_filt = results['score']==best best_hypers = results[best_filt] best_hypers # + colab={} colab_type="code" id="3iqIoNyTTYVc" best_hyper = best_hypers.iloc[0] # + [markdown] colab_type="text" id="tJmsffK4ehiK" # Los mejores resultados se obtenieron utilizando: # - clasificador multinomial # - vectorizador TF-IDF, con min_df=1 y max_df=0.05 # - preprocesamiento: únicamente tokenización # # No se disminuyó más el valor de max_df para evitar overfitting (quedarse sólo con palabras que estén en uno o dos artículos, y por lo tanto poder determinar la clase casi determinísticamente para el set de train, pero probablemente obteniendo un modelo no muy útil para analizar otros sets de datos). # # El valor de max_df=0.05 sugiere que se eliminan del set palabras que sean comunes en más de una clase (si bien el set no está exactamente balanceado, se vio previamente que la máxima probabilidad es de 0.053, y si fuese 0.05 todas las clases tendrían la misma). Con min_df=1 no se descarta ninguna palabra que aparezca en al menos un artículo, y utilizar solo tok hace que las palabras queden tal como estaban. Considerando la naturaleza especializada de las clases, y lo distintas que son entre sí (ateísmo, hardware IBM, MS Windows, armas, política en el medio oriente, espacial, cripto...), probablemente lo que esté sucediendo es que se conserva sólo un vocabulario lo suficientemente especializado como para detectar con la mayor certeza posible a cuál de las 20 clases pertenece. # # Esto es consistente con que los mejores resultados se obtengan con TF-IDF, ya que de esta manera se está penalizando a palabras que son comunes entre muchos artículos, nuevamente haciendo foco en vocabulario especializado. # + colab={} colab_type="code" id="LzO8qZS3L6-f" results.sort_values(by=['score'], ascending=False, inplace=True) # + colab={} colab_type="code" id="0nV35IRbL6-p" len(results) # + colab={} colab_type="code" id="OAJqBBkOL6-h" results[results.model=='MultinomialNB'].sort_values(by='score', ascending=True).head() # + colab={} colab_type="code" id="YUTwVo0NL6-l" results[results.model=='BernoulliNB'].head() # + [markdown] colab_type="text" id="tHjTNR_yc2YC" # Se obtuvieron mejores resultados con el modelo multinomial. De las 640 combinaciones de hiperparámetros testeadas, absolutamente todas las pruebas con el modelo de Bernoulli arrojaron peores resultados que el peor scoring obtenido para multinomial (0.6828 para el mejor caso de Bernoulli, y 0.7235 para el peor de multinomial). # # Este resultado es el esperado, ya que el modelo multinomial representa más fielmente los datos de este problema en particular: la ocurrencia de una palabra en un artículo aporta mucha menos información si se considera binariamente (palabra está presente / no está presente en el artículo). Con el modelo de Bernoulli, en este caso, se está perdiendo información. # + [markdown] colab_type="text" id="mOGoiDhGe4MH" # ## Performance de los modelos # # En el caso anterior, para medir la cantidad de artículos clasificados correctamente se utilizó el mismo subconjunto del dataset que se utilizó para entrenar. # # Esta medida no es una medida del todo útil, ya que lo que interesa de un clasificador es su capacidad de clasificación de datos que no fueron utilizados para entrenar. Es por eso que se pide, para el clasificador entrenado con el subconjunto de training, cual es el porcentaje de artículos del subconjunto de testing clasificados correctamente. Comparar con el porcentaje anterior y explicar las diferencias. # # Finalmente deben observar las diferencias y extraer conclusiones en base al accuracy obtenido, el preprocesamiento y vectorización utilizado y el modelo, para cada combinación de posibilidades. # + colab={} colab_type="code" id="fsnhfAiwg-A-" #Loading the data set - training data. twenty_test = get_20newsgroup(subset='test', metadata=False) # + colab={} colab_type="code" id="Qx3hZfcRL6-5" train_data = get_filtered_joined_articles(name='train-nometadata', articles=twenty_train.data, filts=best_hyper.filts) test_data = get_filtered_joined_articles(name='test-nometadata', articles=twenty_test.data, filts=best_hyper.filts) # + colab={} colab_type="code" id="UpphW8cAVcrX" count_vect = eval(best_hyper.vectorizer)(max_df=best_hyper.max_df, min_df=best_hyper.min_df) raw_data_train = count_vect.fit_transform(train_data) raw_data_test = count_vect.transform(test_data) # + colab={} colab_type="code" id="hacL4g4UL6-9" clf = eval(best_hyper.model)() clf.fit(raw_data_train, twenty_train.target) score = clf.score(raw_data_test, twenty_test.target) # - print(f"Score en train: {best_hyper.score:.4}") print(f"Score en test: {score:.4}") # + [markdown] colab_type="text" id="iOf-BcvpeqKp" # ### Preguntas # # - **El accuracy en el dataset de test es mayor o menor que en train? Explique por qué.** # # La accuracy es menor en test que en train porque los parámetros del modelo son los estimados a partir de train, y por lo tanto está garantizado que dado un set de hiperparámetros y un modelo dado, será la mejor representación de los datos posible. Esto no es cierto con test - podría darse que en algún caso en particular, la información de test se ajuste a la obtenida a partir de train, incluso mejor que el mismo train, pero esto no es una garantía (y no es lo que se espera, ni en lo que en la mayoría de los casos ocurre).
03-clasificacion-texto/2245_RN20202Q_Texto_Parra.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %matplotlib inline # # Create a sequence distribution plot # ================================================== # # # Objective # ------------ # write objective here # # # Solution # ------------ # write solution here # # + import pysan as ps sequence = [1,1,1,2,2,3,2,2,3,3,2,1,1,2,3,3,3,2,2,2,3,2,1,1] ps.plot_sequence(sequence)
docs/auto_examples/plot_sequence_distribution.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Analyzing wind and stress measurements from IRGASON # # ## Experiment parameters # # * Date: 2019-11-21 # * Tank: SUSTAIN # * Start time: 16:10 UTC # * Wind only, no paddle # * Fan from 0 to 60 Hz in 5 Hz intervals, each run 600 s (10 minutes) # * Fresh water # * Mean water depth: 0.8 m # ## Loading the data from datetime import datetime, timedelta import glob import matplotlib.pyplot as plt import numpy as np from scipy.signal import detrend from sustain_drag_2020.irgason import read_irgason_from_toa5 import warnings warnings.filterwarnings('ignore') # + # data parameters DATAPATH = '/home/milan/Work/sustain/data/sustain-nsf-2019/20191121' irgason_files = glob.glob(DATAPATH + '/TOA5_SUSTAIN_Wind.FAST*.dat') # experiment parameters start_time = datetime(2019, 11, 21, 16, 10) fan = range(0, 65, 5) run_seconds = 600 # - # read IRGASON data from TOA5 files time, irg1, irg2 = read_irgason_from_toa5(irgason_files) irg1 irg2 # ## Raw velocity data fig = plt.figure(figsize=(12, 6)) plt.plot(time, irg1['u'], 'b-', lw=0.1) plt.grid() plt.xlabel('Time [UTC]') plt.ylabel('Velocity [m/s]') plt.title('u-component of wind, IRGASON 1') # This is raw u-velocity (along-tank direction), from IRGASON 1 (short fetch). Comments: # # * Some dropouts in data first appear at the beginning of the 50 Hz run, and some more at 55 and 60 Hz # * Mean wind seems to have a small negative trend, especially in higher winds. Perhaps due to water loss in the tank -- less water -> more air -> wind weakens over time? fig = plt.figure(figsize=(12, 6)) plt.plot(time, irg1['v'], 'g-', lw=0.1) plt.grid() plt.xlabel('Time [UTC]') plt.ylabel('Velocity [m/s]') plt.title('v-component of wind, IRGASON 1') # Raw values of cross-tank velocity. # # * Mean is biased and positive, which means the instrument is not perfectly aligned in the along-tank direction. This is fine -- cross-tank velocity will go into the horizontal velocity. # * Yes, some dropouts in the data here as well, although the values are less extreme than in the along-tank component. fig = plt.figure(figsize=(12, 6)) plt.plot(time, irg1['w'], 'r-', lw=0.1) plt.grid() plt.xlabel('Time [UTC]') plt.ylabel('Velocity [m/s]') plt.title('w-component of wind, IRGASON 1') # Raw values of vertical velocity: # # * Mean is biased and negative # * No apparent dropouts in the data (good!) # ## Cleaning up and rotating the data # # We can perform some basic cleaning of the data by setting some a priori maximum gust values and limiting the data at each fan speed to +/- gust range from the mean. This takes care of extreme velocities due to spray but is not ideal. def rotate(u, w, th): """Rotates the vector (u, w) by angle th.""" ur = np.cos(th) * u + np.sin(th) * w wr = -np.sin(th) * u + np.cos(th) * w return ur, wr def momentum_flux(irg, time, t0, t1): U, Ustd, Wstd, uw = [], [], [], [] max_u_gust = 10 max_w_gust = 5 for n in range(len(fan)): mask = (time >= t0[n]) & (time <= t1[n]) u, v, w = irg['u'][mask][:], irg['v'][mask][:], irg['w'][mask][:] # clean up um, vm, wm = np.nanmean(u), np.nanmean(v), np.nanmean(w) u[u > um + max_u_gust] = um + max_u_gust u[u < um - max_u_gust] = um - max_u_gust v[v > vm + max_u_gust] = vm + max_u_gust v[v < vm - max_u_gust] = vm - max_u_gust w[w > wm + max_w_gust] = wm + max_w_gust w[w < wm - max_w_gust] = wm - max_w_gust # horizontal velocity u = np.sqrt(u**2 + v**2) # rotate angle = np.arctan2(np.nanmean(w), np.nanmean(u)) u, w = rotate(u, w, angle) # time average um, wm = np.nanmean(u), np.nanmean(w) up, wp = u - um, w - wm U.append(um) Ustd.append(np.nanstd(u)) Wstd.append(np.nanstd(w)) uw.append(np.nanmean(up * wp)) return np.array(U), np.array(Ustd), np.array(Wstd), np.array(uw) # 9-minute time windows for each run; # we exclude the first minute (thus 9 and not 10) due to fan spinup t0 = [start_time + timedelta(seconds=n * run_seconds + 60) for n in range(len(fan))] t1 = [start_time + timedelta(seconds=(n + 1) * run_seconds) for n in range(len(fan))] U1, Ustd1, Wstd1, uw1 = momentum_flux(irg1, time, t0, t1) U2, Ustd2, Wstd2, uw2 = momentum_flux(irg2, time, t0, t1) fig = plt.figure(figsize=(8, 6)) plt.plot(fan, U1, color='tab:blue', marker='o', label='IRGASON 1') for n in range(U1.size): plt.plot([fan[n], fan[n]], [U1[n]-Ustd1[n], U1[n]+Ustd1[n]], color='tab:blue') plt.plot(fan, U2, color='tab:orange', marker='o', label='IRGASON 2') for n in range(U2.size): plt.plot([fan[n], fan[n]], [U2[n]-Ustd2[n], U2[n]+Ustd2[n]], color='tab:orange') plt.legend(loc='upper left', fancybox=True, shadow=True) plt.grid() plt.xlabel('Fan [Hz]') plt.ylabel('Wind speed [m/s]') plt.title('Mean wind speed vs. fan') # exclude some questionable data in high winds uw1[0] = np.nan uw2[0] = np.nan uw1[-2:] = np.nan uw2[-4:] = np.nan fig = plt.figure(figsize=(8, 6)) plt.plot(U1, uw1, color='tab:blue', marker='o', label='IRGASON 1') plt.plot(U2, uw2, color='tab:orange', marker='o', label='IRGASON 2') plt.legend(loc='upper left', fancybox=True, shadow=True) plt.grid() plt.xlim(0, 40) plt.ylim(0, 0.4) plt.xlabel('Wind speed [m/s]') plt.ylabel(r"$-\overline{u'w'}$ [$m^2/s^2$]") plt.title('Stress vs. mean wind speed')
irgason_stress_20191121.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.9.4 64-bit # name: venv # --- # ## Rows : 55-123,141 # + import pandas as pd import numpy as np train_data = pd.read_csv('Train_Data.csv') test_data = pd.read_csv('modified_test.csv') train_data.head() # - train_data['ad'].unique() test_data['ad'].unique() train_data = train_data[train_data.ad.isin(['ad 1', 'ad 2', 'ad 3', 'ad 4', 'ad 5', 'ad 6', 'ad 7', 'ad 8', 'ad 10','ad 56', 'ad 55'])] train_data['ad'].unique() train_data['date'] = pd.to_datetime(train_data['date'], format='%d-%m-%Y', errors='coerce') test_data['date'] = pd.to_datetime(test_data['date'], format='%d-%m-%Y', errors='coerce') # + train_data['date_month'] = train_data['date'].dt.month train_data['date_day'] = train_data['date'].dt.day test_data['date_month'] = test_data['date'].dt.month test_data['date_day'] = test_data['date'].dt.day # - train_data.drop('date', axis=1, inplace=True) train_data.drop('campaign', axis=1, inplace=True) test_data.drop('date', axis=1, inplace=True) test_data.drop('campaign', axis=1, inplace=True) train_data = pd.get_dummies(train_data) test_data = pd.get_dummies(test_data) train_data.head() test_data.head() X_train = train_data.drop(['revenue'], axis='columns') y_train = train_data['revenue'] X_test = test_data # + from sklearn.preprocessing import StandardScaler scaler = StandardScaler() X_train_scaled = scaler.fit_transform(X_train) X_train_scaled X_test_scaled = scaler.transform(X_test) X_test_scaled # + import tensorflow as tf model = tf.keras.Sequential([ tf.keras.layers.Dense(70), tf.keras.layers.Dense(30), tf.keras.layers.Dense(1), ]) # 2. Compile the model model.compile( loss = tf.keras.losses.mae, optimizer = tf.keras.optimizers.Adam(), metrics = tf.keras.metrics.RootMeanSquaredError()) # 3. Fit the model model.fit(X_train_scaled, y_train, epochs= 10) # - preds = model.predict(X_test_scaled) preds = preds.astype('int64') preds[preds < 20] = 0 preds prediction = pd.DataFrame(preds, columns=['revenue']).to_csv('cleaning_1.csv', index=False) import pandas as pd import numpy as np train_data = pd.read_csv('Train_Data.csv') test_data = pd.read_csv('modified_test.csv') train_data.head() train_data.drop('date', axis=1, inplace=True) train_data.drop('campaign', axis=1, inplace=True) test_data.drop('date', axis=1, inplace=True) test_data.drop('campaign', axis=1, inplace=True) train_data.drop('ad', axis=1, inplace=True) test_data.drop('ad', axis=1, inplace=True) train_data = pd.get_dummies(train_data) test_data = pd.get_dummies(test_data) train_data.head() X_train = train_data.drop(['revenue'], axis='columns') y_train = train_data['revenue'] X_test = test_data # + from sklearn.preprocessing import StandardScaler scaler = StandardScaler() X_train_scaled = scaler.fit_transform(X_train) X_train_scaled X_test_scaled = scaler.transform(X_test) X_test_scaled # + import tensorflow as tf model = tf.keras.Sequential([ tf.keras.layers.Dense(80), tf.keras.layers.Dense(20), tf.keras.layers.Dense(20), tf.keras.layers.Dense(1), ]) # 2. Compile the model model.compile( loss = tf.keras.losses.mae, optimizer = tf.keras.optimizers.Adam(), metrics = tf.keras.metrics.RootMeanSquaredError()) # 3. Fit the model model.fit(X_train_scaled, y_train, epochs= 30) # - preds = model.predict(X_test_scaled) preds = preds.astype('int64') preds[preds < 20] = 0 preds prediction = pd.DataFrame(preds, columns=['revenue']).to_csv('cleaning_2.csv', index=False)
WEEK 6 (Project)/cleaning_test.ipynb
;; --- ;; jupyter: ;; jupytext: ;; text_representation: ;; extension: .scm ;; format_name: light ;; format_version: '1.5' ;; jupytext_version: 1.14.4 ;; kernelspec: ;; display_name: MIT Scheme ;; language: scheme ;; name: mit-scheme ;; --- ;; + deletable=true editable=true 486 ;; + deletable=true editable=true (+ 137 349) ;; + deletable=true editable=true (- 1000 334) ;; + deletable=true editable=true (* 5 99) ;; + deletable=true editable=true (/ 10 5) ;; + deletable=true editable=true (+ 2.7 10) ;; + deletable=true editable=true (+ 21 35 12 7) ;; + deletable=true editable=true (* 25 4 12) ;; + deletable=true editable=true (+ (* 3 5) (- 10 5)) ;; + deletable=true editable=true (+ (* 3 (+ (* 2 4) (+ 3 5))) (+ (- 10 7) 6)) ;; + deletable=true editable=true (+ (* 3 (+ (* 2 4) (+ 3 5))) (+ (- 10 7) 6)) ;; + deletable=true editable=true (define size 2) size ;; + deletable=true editable=true (* 5 size) ;; + deletable=true editable=true (define pi 3.14159) (define radius 10) (* pi (* radius radius)) ;; + deletable=true editable=true (define circumference (* 2 pi radius)) circumference ;; + deletable=true editable=true (define (square x) (* x x)) ;; + deletable=true editable=true (square 21) ;; + deletable=true editable=true (square (+ 2 5)) ;; + deletable=true editable=true (square (square 3)) ;; + deletable=true editable=true (define (sum-of-squares x y) (+ (square x) (square y))) ;; + deletable=true editable=true (sum-of-squares 3 4) ;; + deletable=true editable=true (define (f a) (sum-of-squares (+ a 1) (* a 2))) (f 5) ;; + deletable=true editable=true (define (abs-cond x) (cond ((> x 0) x (= x 0) 0 (< x 0) (- x)))) (define (abs-if x) (if (< x 0) (- x) x)) ;; + deletable=true editable=true (define (>= x y) (and (> x y) (= x y))) ;; + deletable=true editable=true (/ (+ 5 4 (- 2 (- 3 (+ 6 (/ 4 5))))) (* 3 (- 6 2) (- 2 7)) ) ;; + deletable=true editable=true (define (ex1-3 a b c) (cond ( (and (>= a c) (>= b c)) (sum-of-squares a b) (and (>= b a) (>= c a)) (sum-of-squares b c) (else (sum-of-squares a c))))) ;; + deletable=true editable=true (define (p) (p)) (define (test x y) (if (= x 0) 0 y)) (test 0 (p)) ;; + deletable=true editable=true (define (square x) (* x x)) (define (sqrt-iter guess x) (if (good-enough? guess x) guess (sqrt-iter (improve guess x) x))) (define (improve guess x) (average guess (/ x guess))) (define (average x y) (/ (+ x y) 2)) (define (good-enough? guess x) (< (abs (- (square guess) x)) 0.001)) (define (squirt x) (sqrt-iter 1.0 x)) (squirt 0.001) ;; + deletable=true editable=true (define (square x) (* x x)) (define (sqrt-iter guess x) (if (good-enough? guess x) guess (sqrt-iter (improve guess x) x))) (define (improve guess x) (average guess (/ x guess))) (define (average x y) (/ (+ x y) 2)) (define tolerance 0.0001) (define (good-enough? guess x) (< (abs (- (square guess) x)) (* tolerance x))) (define (squirt x) (sqrt-iter 1.0 x)) (squirt 0.001) ;; + deletable=true editable=true (define (square x) (* x x)) (define (double x) (* 2 x)) (define (cube-root-iter guess x) (if (good-enough? guess x) guess (cube-root-iter (improve-cube guess x) x))) (define (improve-cube guess x) (/ (+ (/ x (square guess)) (double guess)) 3)) (define tolerance 0.0001) (define (good-enough? guess x) (< (abs (- (square guess) x)) (* tolerance x))) (define (cubrt x) (cube-root-iter 1.0 x)) (cubrt 100) ;; + deletable=true editable=true (define tolerance 0.0001) (define (average x y) (/ (+ x y) 2)) (define (square x) (* x x)) (define (squirtle x) (define (sqrt-iter guess) (if (good-enough? guess) guess (sqrt-iter (improve guess)))) (define (improve guess) (average guess (/ x guess))) (define (good-enough? guess) (< (abs (- (square guess) x)) (* tolerance x))) (sqrt-iter 1.0)) (squirtle 0.001)
Chapter 1 - Building Abstractions With Procedures/1.1 - Elements of Programming.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.8.3 64-bit (''pytorch'': conda)' # name: python38364bitpytorchconda70fdc7f787194f4c972bb3207dd25917 # --- # %matplotlib inline from d2l import torch as d2l import torch from torch import nn from torch.utils.data import DataLoader X = torch.normal(0.0, 1, (1000, 2)) A = torch.tensor([[1, 2], [-0.1, 0.5]]) b = torch.tensor([1, 2]) data = torch.mm(X, A) + b # + tags=[] d2l.set_figsize() d2l.plt.scatter(data[:100, 0].numpy(), data[:100, 1].numpy()); print(f'The covariance matrix is\n{torch.mm(A.T, A)}') # - batch_size = 8 data_iter = DataLoader(data, batch_size=batch_size) net_G = nn.Sequential(nn.Linear(2, 2)) net_D = nn.Sequential( nn.Linear(2, 5), nn.Tanh(), nn.Linear(5, 3), nn.Tanh(), nn.Linear(3, 1), nn.Sigmoid() ) def update_D(X, Z, net_D, net_G, loss, trainer_D): #@save """Update discriminator.""" batch_size = X.shape[0] ones = torch.ones(batch_size) zeros = torch.zeros(batch_size) trainer_D.zero_grad() real_Y = net_D(X) fake_X = net_G(Z) # Do not need to compute gradient for `net_G`, detach it from # computing gradients. fake_Y = net_D(fake_X.detach()) loss_D = (loss(real_Y, ones) + loss(fake_Y, zeros)) / 2 loss_D.backward() trainer_D.step() return loss_D def update_G(Z, net_D, net_G, loss, trainer_G): #@save """Update generator.""" batch_size = Z.shape[0] ones = torch.ones(batch_size) trainer_G.zero_grad() # We could reuse `fake_X` from `update_D` to save computation fake_X = net_G(Z) # Recomputing `fake_Y` is needed since `net_D` is changed fake_Y = net_D(fake_X) loss_G=loss(fake_Y,ones) loss_G.backward() trainer_G.step() return loss_G # https://atcold.github.io/pytorch-Deep-Learning/en/week09/09-3/ def train(net_D, net_G, data_iter, num_epochs, lr_D, lr_G, latent_dim, data): loss = nn.BCELoss() for w in net_D.parameters(): nn.init.normal_(w, 0, 0.02) for w in net_G.parameters(): nn.init.normal_(w, 0, 0.02) net_D.zero_grad() net_G.zero_grad() trainer_D = torch.optim.Adam(net_D.parameters(), lr=lr_D) trainer_G = torch.optim.Adam(net_G.parameters(), lr=lr_G) animator = d2l.Animator(xlabel='epoch', ylabel='loss', xlim=[1, num_epochs], nrows=2, figsize=(5, 5), legend=['discriminator', 'generator']) animator.fig.subplots_adjust(hspace=0.3) for epoch in range(num_epochs): # Train one epoch timer = d2l.Timer() metric = d2l.Accumulator(3) # loss_D, loss_G, num_examples for X in data_iter: batch_size = X.shape[0] Z = torch.normal(0, 1, size=(batch_size, latent_dim)) metric.add(update_D(X, Z, net_D, net_G, loss, trainer_D), update_G(Z, net_D, net_G, loss, trainer_G), batch_size) # Visualize generated examples Z = torch.normal(0, 1, size=(100, latent_dim)) fake_X = net_G(Z).detach().numpy() animator.axes[1].cla() animator.axes[1].scatter(data[:, 0], data[:, 1]) animator.axes[1].scatter(fake_X[:, 0], fake_X[:, 1]) animator.axes[1].legend(['real', 'generated']) # Show the losses loss_D, loss_G = metric[0]/metric[2], metric[1]/metric[2] animator.add(epoch + 1, (loss_D, loss_G)) print(f'loss_D {loss_D:.3f}, loss_G {loss_G:.3f}, ' f'{metric[2] / timer.stop():.1f} examples/sec') # + tags=[] lr_D, lr_G, latent_dim, num_epochs = 0.05, 0.005, 2, 20 train(net_D, net_G, data_iter, num_epochs, lr_D, lr_G, latent_dim, data[:100].numpy()) # -
chapter-generative-adversarial-networks/PR1308.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # + ## Line Graph year = [1950, 1951, 1952, 1953, 1954, 1955, 1956, 1957, 1958, 1959, 1960, 1961, 1962, 1963, 1964, 1965, 1966, 1967, 1968, 1969, 1970, 1971, 1972, 1973, 1974, 1975, 1976, 1977, 1978, 1979, 1980, 1981, 1982, 1983, 1984, 1985, 1986, 1987, 1988, 1989, 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018, 2019, 2020, 2021, 2022, 2023, 2024, 2025, 2026, 2027, 2028, 2029, 2030, 2031, 2032, 2033, 2034, 2035, 2036, 2037, 2038, 2039, 2040, 2041, 2042, 2043, 2044, 2045, 2046, 2047, 2048, 2049, 2050, 2051, 2052, 2053, 2054, 2055, 2056, 2057, 2058, 2059, 2060, 2061, 2062, 2063, 2064, 2065, 2066, 2067, 2068, 2069, 2070, 2071, 2072, 2073, 2074, 2075, 2076, 2077, 2078, 2079, 2080, 2081, 2082, 2083, 2084, 2085, 2086, 2087, 2088, 2089, 2090, 2091, 2092, 2093, 2094, 2095, 2096, 2097, 2098, 2099, 2100] pop = [2.53, 2.57, 2.62, 2.67, 2.71, 2.76, 2.81, 2.86, 2.92, 2.97, 3.03, 3.08, 3.14, 3.2, 3.26, 3.33, 3.4, 3.47, 3.54, 3.62, 3.69, 3.77, 3.84, 3.92, 4.0, 4.07, 4.15, 4.22, 4.3, 4.37, 4.45, 4.53, 4.61, 4.69, 4.78, 4.86, 4.95, 5.05, 5.14, 5.23, 5.32, 5.41, 5.49, 5.58, 5.66, 5.74, 5.82, 5.9, 5.98, 6.05, 6.13, 6.2, 6.28, 6.36, 6.44, 6.51, 6.59, 6.67, 6.75, 6.83, 6.92, 7.0, 7.08, 7.16, 7.24, 7.32, 7.4, 7.48, 7.56, 7.64, 7.72, 7.79, 7.87, 7.94, 8.01, 8.08, 8.15, 8.22, 8.29, 8.36, 8.42, 8.49, 8.56, 8.62, 8.68, 8.74, 8.8, 8.86, 8.92, 8.98, 9.04, 9.09, 9.15, 9.2, 9.26, 9.31, 9.36, 9.41, 9.46, 9.5, 9.55, 9.6, 9.64, 9.68, 9.73, 9.77, 9.81, 9.85, 9.88, 9.92, 9.96, 9.99, 10.03, 10.06, 10.09, 10.13, 10.16, 10.19, 10.22, 10.25, 10.28, 10.31, 10.33, 10.36, 10.38, 10.41, 10.43, 10.46, 10.48, 10.5, 10.52, 10.55, 10.57, 10.59, 10.61, 10.63, 10.65, 10.66, 10.68, 10.7, 10.72, 10.73, 10.75, 10.77, 10.78, 10.79, 10.81, 10.82, 10.83, 10.84, 10.85] # Import matplotlib.pyplot as plt import matplotlib.pyplot as plt # Make a line plot: year on the x-axis, pop on the y-axis plt.plot(year, pop) # Display the plot with plt.show() plt.show() # -
Intermediate_Python_For_Data_Science/Line Chart.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # <table align="left" width="100%"> <tr> # <td style="background-color:#ffffff;"><a href="https://qsoftware.lu.lv/index.php/qworld/" target="_blank"><img src="../images/qworld.jpg" width="35%" align="left"></a></td> # <td align="right" style="background-color:#ffffff;vertical-align:bottom;horizontal-align:right"> # prepared by <NAME> (<a href="http://qworld.lu.lv/index.php/qturkey/" target="_blank">QTurkey</a>) # </td> # </tr></table> # <table width="100%"><tr><td style="color:#bbbbbb;background-color:#ffffff;font-size:11px;font-style:italic;text-align:right;">This cell contains some macros. If there is a problem with displaying mathematical formulas, please run this cell to load these macros. </td></tr></table> # $\newcommand{\Mod}[1]{\ (\mathrm{mod}\ #1)}$ # $ \newcommand{\bra}[1]{\langle #1|} $ # $ \newcommand{\ket}[1]{|#1\rangle} $ # $ \newcommand{\braket}[2]{\langle #1|#2\rangle} $ # $ \newcommand{\dot}[2]{ #1 \cdot #2} $ # $ \newcommand{\biginner}[2]{\left\langle #1,#2\right\rangle} $ # $ \newcommand{\mymatrix}[2]{\left( \begin{array}{#1} #2\end{array} \right)} $ # $ \newcommand{\myvector}[1]{\mymatrix{c}{#1}} $ # $ \newcommand{\myrvector}[1]{\mymatrix{r}{#1}} $ # $ \newcommand{\mypar}[1]{\left( #1 \right)} $ # $ \newcommand{\mybigpar}[1]{ \Big( #1 \Big)} $ # $ \newcommand{\sqrttwo}{\frac{1}{\sqrt{2}}} $ # $ \newcommand{\dsqrttwo}{\dfrac{1}{\sqrt{2}}} $ # $ \newcommand{\onehalf}{\frac{1}{2}} $ # $ \newcommand{\donehalf}{\dfrac{1}{2}} $ # $ \newcommand{\hadamard}{ \mymatrix{rr}{ \sqrttwo & \sqrttwo \\ \sqrttwo & -\sqrttwo }} $ # $ \newcommand{\vzero}{\myvector{1\\0}} $ # $ \newcommand{\vone}{\myvector{0\\1}} $ # $ \newcommand{\stateplus}{\myvector{ \sqrttwo \\ \sqrttwo } } $ # $ \newcommand{\stateminus}{ \myrvector{ \sqrttwo \\ -\sqrttwo } } $ # $ \newcommand{\myarray}[2]{ \begin{array}{#1}#2\end{array}} $ # $ \newcommand{\X}{ \mymatrix{cc}{0 & 1 \\ 1 & 0} } $ # $ \newcommand{\Z}{ \mymatrix{rr}{1 & 0 \\ 0 & -1} } $ # $ \newcommand{\Htwo}{ \mymatrix{rrrr}{ \frac{1}{2} & \frac{1}{2} & \frac{1}{2} & \frac{1}{2} \\ \frac{1}{2} & -\frac{1}{2} & \frac{1}{2} & -\frac{1}{2} \\ \frac{1}{2} & \frac{1}{2} & -\frac{1}{2} & -\frac{1}{2} \\ \frac{1}{2} & -\frac{1}{2} & -\frac{1}{2} & \frac{1}{2} } } $ # $ \newcommand{\CNOT}{ \mymatrix{cccc}{1 & 0 & 0 & 0 \\ 0 & 1 & 0 & 0 \\ 0 & 0 & 0 & 1 \\ 0 & 0 & 1 & 0} } $ # $ \newcommand{\norm}[1]{ \left\lVert #1 \right\rVert } $ # $ \newcommand{\pstate}[1]{ \lceil \mspace{-1mu} #1 \mspace{-1.5mu} \rfloor } $ # <h1> <font color="blue"> Solutions for </font> Order Finding Algorithm</h1> # <a id="task1"></a> # <h3>Task 1</h3> # # Let $x=5$ and $N=21$. Plot $x^ i \Mod{N}$ for $i$ values between $1$ and $50$ and find the order of $x$. # <h3>Solution</h3> # + import matplotlib.pyplot as plt x=5 N=21 plt.plot([((x**i) % N) for i in range(50)]) # - # Let's check each integer and stop when we find an integer $r$ satisfying $x^r = 1 \Mod{N}$ # # $ 5^1 = 5 ~\mod 21 $ # # $ 5^2 = 4 ~\mod 21 $ # # $ 5^3 = 20 \mod 21 $ # # $ 5^4 = 16 \mod 21 $ # # $ 5^5 = 17 \mod 21 $ # # $ 5^6 = 1 ~\mod 21 $ # # Hence the order $r$ is equal to 6. # <a id="task2"></a> # <h3>Task 2 (on paper)</h3> # # Let $\ket{\psi_0}=\ket{1 \Mod{N}}+\ket{x\Mod{N}}+\ket{x^2\Mod{N}}+ \cdots + \ket{x^{r-1}\Mod{N}}$. # # What is $U_x \ket{\psi_0}$? What can you conclude about $\ket{\psi_0}$? # # Repeat the same task for $\ket{\psi_1}=\ket{1}+ \omega^{-1}\ket{x\Mod{N}}+\omega^{-2}\ket{x^2\Mod{N}}+ \cdots + \omega^{-(r-1)} \ket{x^{r-1}\Mod{N}}$ where $\omega=e^{-\frac{2{\pi}i}{r}}$. # <h3>Solution</h3> # \begin{align*} # U_x\ket{\psi_0} &= U_x\ket{1 \Mod{N}} + U_x \ket{x \Mod{N}}+ \dots + U_x \ket{x^{r-1} \Mod{N}}\\ # &= \ket{x \Mod{N}} + \ket{x^2\Mod{N}}+ \dots + \ket{1\Mod{N}} \\ # &= \ket{\psi_0} # \end{align*} # # Hence we can conclude that $\ket{\psi_0}$ is an eigenvector of the operator $U_x$ with eigenvalue 1. # Let's repeat the same for $\ket{\psi_1}$. # # \begin{align*} # U_x\ket{\psi_1} &= U_x\ket{1 \Mod{N}} + \omega^{-1}U_x \ket{x \Mod{N}}+ \dots +\omega^{-(r-1)} U_x \ket{x^{r-1} \Mod{N}}\\ # &= \ket{x \Mod{N}} + \omega^{-1}\ket{x^2 \Mod{N}}+ \dots + \omega^{-(r-1)}\ket{1 \Mod{N}} \\ # &= \omega( \omega^{-1}\ket{x \Mod{N}} + \omega^{-2}\ket{x^2 \Mod{N}}+ \dots + \omega^{-(r)}\ket{1 \Mod{N}}) \\ # &= \omega\ket{\psi_1} # \end{align*} # # Hence, $\ket{\psi_1}$ is an eigenvector of the operator $U_x$ with an eigenvalue $\omega$. # <a id="task3"></a> # <h3>Task 3 (on paper)</h3> # # Show that $\displaystyle \frac{1}{\sqrt{r}}\sum_{s=0}^{r-1}\ket{u_s}= \ket{1}$. # <h3>Solution</h3> # Let's replace $\ket{u_s}$ in the sum above. We have the following expression: # # $ \displaystyle \frac{1}{\sqrt{r}}\sum_{s=0}^{r-1}\ket{u_s}= \frac{1}{\sqrt{r}}\sum_{s=0}^{r-1}\frac{1}{\sqrt{r}}\sum_{k=0}^{r-1}e^{\frac{-2{\pi}i s k}{r}}\ket{{x^k} \Mod{N}}$ # If $ k=0 $, $ \displaystyle \sum_{s=0}^{r-1}e^{\frac{-2{\pi}i s k}{r}} = r $ and the state $ \ket{1} $ has amplitude 1 in which case for all other $ k $, all amplitudes are equal to 0. Hence $ \displaystyle \frac{1}{\sqrt{r}}\sum_{s=0}^{r-1}\ket{u_s}=\ket{1}.$ # # <a id="task4"></a> # # <h3>Task 4</h3> # # Find the continued fractions expression for $\frac{31}{13}$ and find the convergents first using pen and paper and then using the functions defined above. # <h3>Solution</h3> # $\frac{31}{13}$ can be expressed as $\frac{31}{13}=2+\frac{5}{13}$. Continuing like this, # # \begin{align*} # =2+\frac{1}{2+\frac{3}{5}} = 2+\frac{1}{2+\frac{1}{\frac{5}{3}}} # \end{align*} # # # The resulting expression will be # # \begin{align*} # 2+\frac{1}{2+\frac{1}{1 + \frac{1}{1+ \frac{1}{2}}}} # \end{align*} # # with the continued fraction expression $[2,2,1,1,2]$. # # The convergents are $c_1=2$, $c_2=2 + \frac{1}{2} = \frac{5}{2} $, $c_3 = 2 + \frac{1}{2 + \frac{1}{1}} = \frac{7}{3}$, $c_4 = 2+ \frac{ 1}{2 + \frac{1}{1 + \frac{1}{1}}} = \frac{12}{5}$, $c_5 = 2+\frac{1}{2+\frac{1}{1 + \frac{1}{1+ \frac{1}{2}}}} # = \frac{31}{13}$ # Let's find the continued fractions expression and convergents for $\frac{31}{13}$ using the functions defined in the notebook. # %run ../include/helpers.py cf=contFrac(31/13) print(cf) cv=convergents(cf) print(cv) # <a id="task5"></a> # # <h3>Task 5</h3> # # You are given a function named $U_x$ which implements $ U_x \ket{y} \rightarrow \ket{xy {\Mod{N}}}$ and returns its controlled version. Run the following cell to load the function. # %run operator.py from silver.orderFinding import Ux, U, CU import qiskit c = qiskit.QuantumCircuit(3) c.append(Ux(1,2), [0, 1]) c.draw() # In order to use the function you should pass $x$ and $N$ as parameter. # # ```python # CU=Ux(x,N) # ``` # Let $x=3$ and $N=20$. Use phase estimation procedure to find the estimates for ${s}/{r}$. Pick the correct values for $t$ and $L$. You can use the `qpe` function you have already implemented. Plot your results using a histogram. Where do the peaks occur? # <h3>Solution</h3> # # %load qpe.py import cirq def qpe(t,control, target, circuit, CU): #Apply Hadamard to control qubits circuit.append(cirq.H.on_each(control)) #Apply CU gates for i in range(t): #Obtain the power of CU gate CUi = CU**(2**i) #Apply CUi gate where t-i-1 is the control circuit.append(CUi(control[t-i-1],*target)) #Apply inverse QFT iqft(t,control,circuit) # + # # %load iqft.py import cirq from cirq.circuits import InsertStrategy from cirq import H, SWAP, CZPowGate def iqft(n,qubits,circuit): #Swap the qubits for i in range(n//2): circuit.append(SWAP(qubits[i],qubits[n-i-1]), strategy = InsertStrategy.NEW) #For each qubit for i in range(n-1,-1,-1): #Apply CR_k gates where j is the control and i is the target k=n-i #We start with k=n-i for j in range(n-1,i,-1): #Define and apply CR_k gate crk = CZPowGate(exponent = -2/2**(k)) circuit.append(crk(qubits[j],qubits[i]),strategy = InsertStrategy.NEW) k=k-1 #Decrement at each step #Apply Hadamard to the qubit circuit.append(H(qubits[i]),strategy = InsertStrategy.NEW) # - # $t$ should be picked as $2L + 1 + \big \lceil \log \big( 2 + \frac{1}{2\epsilon} \big) \big \rceil$. Let $\epsilon=0.1$. First let's find $L$. import math L=math.ceil(math.log2(20)) print(L) # $ L= \big \lceil \log N \big \rceil $ = $\big \lceil \log 20 \big \rceil =5.$ e = 0.1 num_t = 2*L+1+math.ceil(math.log2(2+1/(2*e))) print(num_t) # + import cirq import matplotlib #Create a circuit circuit = cirq.Circuit() #Assign the size of the registers t=num_t n=L #Create control and target qubits control = [cirq.LineQubit(i) for i in range(1,t+1) ] target = [cirq.LineQubit(i) for i in range(t+1,t+1+n) ] circuit.append(cirq.X(target[n-1])) #Create operator CU x=3 N=20 CU=Ux(x,N) #Call phase estimation circuit qpe(t,control, target, circuit, CU) #Measure the control register circuit.append(cirq.measure(*control, key='result')) #Sample the circuit s=cirq.Simulator() print('Sample the circuit:') samples=s.run(circuit, repetitions=1000) # Print a histogram of results results= samples.histogram(key='result') print(results) # + import matplotlib.pyplot as plt plt.bar([str(key) for key in results.keys()], results.values()) plt.show() # - # <a id="task6"></a> # <h3>Task 6</h3> # # For each one of the possible outcomes in Task 5, try to find out the value of $r$ using continued fractions algorithm. You can use the functions defined above. # <h3>Solution</h3> # The outcomes are 0, 4096, 8192 and 12288. # - From 0, we don't get any meaningful result. # # - Let's check $\frac{4096}{2^{14}}$. # %run ../include/helpers.py cf = contFrac(4096/2**14) cv = convergents(cf) print(cv) # The candidate is $s'=1$ and $r'=4$. Indeed 4 is the answer. Let's check the other cases as well. cf = contFrac(8192/2**14) cv = convergents(cf) print(cv) # From 8192, we can not get the correct result. The reason is that $s=2$ and $r=4$ which are not relatively prime and as a result we get $s'=1$ and $r'=2$. cf = contFrac(12288/2**14) cv = convergents(cf) print(cv) # The candidate is $s'=3$ and $r'=4$. We get the correct result. # <a id="task7"></a> # <h3>Task 7</h3> # # Repeat Task 5 and Task 6 with $x$=5 and $N=42$. # <h3>Solution</h3> # %run operator.py # # %load qpe.py import cirq def qpe(t,control, target, circuit, CU): #Apply Hadamard to control qubits circuit.append(cirq.H.on_each(control)) #Apply CU gates for i in range(t): #Obtain the power of CU gate CUi = CU**(2**i) #Apply CUi gate where t-i-1 is the control circuit.append(CUi(control[t-i-1],*target)) #Apply inverse QFT iqft(t,control,circuit) # + # # %load iqft.py import cirq from cirq.circuits import InsertStrategy from cirq import H, SWAP, CZPowGate def iqft(n,qubits,circuit): #Swap the qubits for i in range(n//2): circuit.append(SWAP(qubits[i],qubits[n-i-1]), strategy = InsertStrategy.NEW) #For each qubit for i in range(n-1,-1,-1): #Apply CR_k gates where j is the control and i is the target k=n-i #We start with k=n-i for j in range(n-1,i,-1): #Define and apply CR_k gate crk = CZPowGate(exponent = -2/2**(k)) circuit.append(crk(qubits[j],qubits[i]),strategy = InsertStrategy.NEW) k=k-1 #Decrement at each step #Apply Hadamard to the qubit circuit.append(H(qubits[i]),strategy = InsertStrategy.NEW) # - # $t$ should be picked as $2L + 1 + \big \lceil \log \big( 2 + \frac{1}{2\epsilon} \big) \big \rceil$. Let $\epsilon=0.1$. First let's find $L$. import math L=math.ceil(math.log2(42)) print(L) # $ L= \big \lceil \log N \big \rceil $ = $ L= \big \lceil \log 42 \big \rceil =6.$ e = 0.1 num_t = 2*L+1+math.ceil(math.log2(2+1/(2*e))) print(num_t) # + import cirq import matplotlib #Create a circuit circuit = cirq.Circuit() #Assign the size of the registers t=num_t n=L #Create control and target qubits control = [cirq.LineQubit(i) for i in range(1,t+1) ] target = [cirq.LineQubit(i) for i in range(t+1,t+1+n) ] circuit.append(cirq.X(target[n-1])) #Create operator CU x=5 N=42 CU=Ux(x,N) #Call phase estimation circuit qpe(t,control, target, circuit, CU) #Measure the control register circuit.append(cirq.measure(*control, key='result')) #Sample the circuit s=cirq.Simulator() print('Sample the circuit:') samples=s.run(circuit, repetitions=1000) # Print a histogram of results results= samples.histogram(key='result') print(results) # + import matplotlib.pyplot as plt plt.bar(results.keys(), results.values()) plt.show() # - # The peaks occur at 0, 10923, 21845 32768, 43691, 54613. # - From 0, we don't get any meaningful result. # # - Let's check $\frac{10923}{2^{16}}$. cf = contFrac(10923/2**16) cv = convergents(cf) print(cv) # The candidates are $s'=1$, $r'=5$ and $s''=1$, $r''=6$. Indeed the answer is 6. cf = contFrac(21845/2**16) cv = convergents(cf) print(cv) # From 21845, we can not get the correct result. The reason is that $s=2$ and $r=6$ which are not relatively prime and as a result we get $s'=1$ and $r'=3$. cf = contFrac(32768/2**16) cv = convergents(cf) print(cv) # From 32768, we can not get the correct result. The reason is that $s=3$ and $r=6$ which are not relatively prime and as a result we get $s'=1$ and $r'=2$. cf = contFrac(43691/2**16) cv = convergents(cf) print(cv) # From 43691, we can not get the correct result. The reason is that $s=4$ and $r=6$ which are not relatively prime and as a result we get $s'=2$ and $r'=3$. cf = contFrac(54613/2**16) cv = convergents(cf) print(cv) # The candidates are $s'=4$, $r'=5$ and $s''=5$, $r''=6$.
silver/D04_Order_Finding_Algorithm_Solutions.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import matplotlib import matplotlib.pyplot as plt import jax import jax.numpy as np from jax.random import PRNGKey import numpyro import numpyro.distributions as dist from numpyro.infer import MCMC, NUTS, Predictive import pandas as pd import covid import covid.util as util import covid.models.SEIRD_variable_detection import covid.models.SEIRD_bridge import covid.models.SEIRD_incident # - # # Run Inference # + #data = util.load_state_data() data = util.load_data() start = '2020-03-04' end = '2020-08-02' model_type = covid.models.SEIRD_incident.SEIRD #places = ['PR'] places = ['NC-Hertford'] places = ['CO-Mineral'] places = ['NY-New York City'] places = ['NV-Clark'] places = ['MO'] for place in places: util.run_place(data, place, start=start, end=end, model_type=model_type, rw_scale=1e-1, resample_high=80, resample_low=0, num_warmup=1000, num_samples=1000) # - # + #data = util.load_state_data() data = util.load_county_data() start = '2020-03-04' end=None #places = ['PR'] places = ['NC-Hertford'] save = True # Inspect and Save Results for place in places: util.gen_forecasts(data, place, model_type=model_type, start=start, end=end, save=save)
scripts/SIR.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.7.10 64-bit (''momo'': conda)' # name: python3710jvsc74a57bd0f8af005c6536e801c34ed1a329b6361f501640f22aff193c57cfc4ad0e1dbb64 # --- # # 2 - Updated Sentiment Analysis # # In the previous notebook, we got the fundamentals down for sentiment analysis. In this notebook, we'll actually get decent results. # # We will use: # - packed padded sequences # - pre-trained word embeddings # - different RNN architecture # - bidirectional RNN # - multi-layer RNN # - regularization # - a different optimizer # # This will allow us to achieve ~84% test accuracy. # ## Preparing Data # # As before, we'll set the seed, define the `Fields` and get the train/valid/test splits. # # We'll be using *packed padded sequences*, which will make our RNN only process the non-padded elements of our sequence, and for any padded element the `output` will be a zero tensor. To use packed padded sequences, we have to tell the RNN how long the actual sequences are. We do this by setting `include_lengths = True` for our `TEXT` field. This will cause `batch.text` to now be a tuple with the first element being our sentence (a numericalized tensor that has been padded) and the second element being the actual lengths of our sentences. # + import torch from torchtext import data SEED = 1234 torch.manual_seed(SEED) torch.backends.cudnn.deterministic = True TEXT = data.Field(tokenize = 'spacy', tokenizer_language = 'en_core_web_sm', include_lengths = True) LABEL = data.LabelField(dtype = torch.float) # - # We then load the IMDb dataset. # + from torchtext import datasets train_data, test_data = datasets.IMDB.splits(TEXT, LABEL) # - # Then create the validation set from our training set. # + import random train_data, valid_data = train_data.split(random_state = random.seed(SEED)) # - # Next is the use of pre-trained word embeddings. Now, instead of having our word embeddings initialized randomly, they are initialized with these pre-trained vectors. # We get these vectors simply by specifying which vectors we want and passing it as an argument to `build_vocab`. `TorchText` handles downloading the vectors and associating them with the correct words in our vocabulary. # # Here, we'll be using the `"glove.6B.100d" vectors"`. `glove` is the algorithm used to calculate the vectors, go [here](https://nlp.stanford.edu/projects/glove/) for more. `6B` indicates these vectors were trained on 6 billion tokens and `100d` indicates these vectors are 100-dimensional. # # You can see the other available vectors [here](https://github.com/pytorch/text/blob/master/torchtext/vocab.py#L113). # # The theory is that these pre-trained vectors already have words with similar semantic meaning close together in vector space, e.g. "terrible", "awful", "dreadful" are nearby. This gives our embedding layer a good initialization as it does not have to learn these relations from scratch. # # **Note**: these vectors are about 862MB, so watch out if you have a limited internet connection. # # By default, TorchText will initialize words in your vocabulary but not in your pre-trained embeddings to zero. We don't want this, and instead initialize them randomly by setting `unk_init` to `torch.Tensor.normal_`. This will now initialize those words via a Gaussian distribution. # + MAX_VOCAB_SIZE = 25_000 TEXT.build_vocab(train_data, max_size = MAX_VOCAB_SIZE, vectors = "glove.6B.100d", unk_init = torch.Tensor.normal_) LABEL.build_vocab(train_data) # - # As before, we create the iterators, placing the tensors on the GPU if one is available. # # Another thing for packed padded sequences all of the tensors within a batch need to be sorted by their lengths. This is handled in the iterator by setting `sort_within_batch = True`. # + BATCH_SIZE = 64 device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') train_iterator, valid_iterator, test_iterator = data.BucketIterator.splits( (train_data, valid_data, test_data), batch_size = BATCH_SIZE, sort_within_batch = True, device = device) # - # ## Build the Model # # The model features the most drastic changes. # # ### Different RNN Architecture # # We'll be using a different RNN architecture called a Long Short-Term Memory (LSTM). Why is an LSTM better than a standard RNN? Standard RNNs suffer from the [vanishing gradient problem](https://en.wikipedia.org/wiki/Vanishing_gradient_problem). LSTMs overcome this by having an extra recurrent state called a _cell_, $c$ - which can be thought of as the "memory" of the LSTM - and the use use multiple _gates_ which control the flow of information into and out of the memory. For more information, go [here](https://colah.github.io/posts/2015-08-Understanding-LSTMs/). We can simply think of the LSTM as a function of $x_t$, $h_t$ and $c_t$, instead of just $x_t$ and $h_t$. # # $$(h_t, c_t) = \text{LSTM}(x_t, h_t, c_t)$$ # # Thus, the model using an LSTM looks something like (with the embedding layers omitted): # # ![image.png](attachment:image.png) # # The initial cell state, $c_0$, like the initial hidden state is initialized to a tensor of all zeros. The sentiment prediction is still, however, only made using the final hidden state, not the final cell state, i.e. $\hat{y}=f(h_T)$. # # ### Bidirectional RNN # # The concept behind a bidirectional RNN is simple. As well as having an RNN processing the words in the sentence from the first to the last (a forward RNN), we have a second RNN processing the words in the sentence from the **last to the first** (a backward RNN). At time step $t$, the forward RNN is processing word $x_t$, and the backward RNN is processing word $x_{T-t+1}$. # # In PyTorch, the hidden state (and cell state) tensors returned by the forward and backward RNNs are stacked on top of each other in a single tensor. # # We make our sentiment prediction using a concatenation of the last hidden state from the forward RNN (obtained from final word of the sentence), $h_T^\rightarrow$, and the last hidden state from the backward RNN (obtained from the first word of the sentence), $h_T^\leftarrow$, i.e. $\hat{y}=f(h_T^\rightarrow, h_T^\leftarrow)$ # # The image below shows a bi-directional RNN, with the forward RNN in orange, the backward RNN in green and the linear layer in silver. # # ![image-2.png](attachment:image-2.png) # # ### Multi-layer RNN # # Multi-layer RNNs (also called *deep RNNs*) are another simple concept. The idea is that we add additional RNNs on top of the initial standard RNN, where each RNN added is another *layer*. The hidden state output by the first (bottom) RNN at time-step $t$ will be the input to the RNN above it at time step $t$. The prediction is then made from the final hidden state of the final (highest) layer. # # The image below shows a multi-layer unidirectional RNN, where the layer number is given as a superscript. Also note that each layer needs their own initial hidden state, $h_0^L$. # # ![image-3.png](attachment:image-3.png) # # ### Regularization # # Although we've added improvements to our model, each one adds additional parameters. Without going into overfitting into too much detail, the more parameters you have in in your model, the higher the probability that your model will overfit (memorize the training data, causing a low training error but high validation/testing error, i.e. poor generalization to new, unseen examples). To combat this, we use regularization. More specifically, we use a method of regularization called *dropout*. Dropout works by randomly *dropping out* (setting to 0) neurons in a layer during a forward pass. The probability that each neuron is dropped out is set by a hyperparameter and each neuron with dropout applied is considered indepenently. One theory about why dropout works is that a model with parameters dropped out can be seen as a "weaker" (less parameters) model. The predictions from all these "weaker" models (one for each forward pass) get averaged together withinin the parameters of the model. Thus, your one model can be thought of as an ensemble of weaker models, none of which are over-parameterized and thus should not overfit. # # ### Implementation Details # # Another addition to this model is that we are not going to learn the embedding for the `<pad>` token. This is because we want to explitictly tell our model that padding tokens are irrelevant to determining the sentiment of a sentence. This means the embedding for the pad token will remain at what it is initialized to (we initialize it to all zeros later). We do this by passing the index of our pad token as the `padding_idx` argument to the `nn.Embedding` layer. # # To use an LSTM instead of the standard RNN, we use `nn.LSTM` instead of `nn.RNN`. Also, note that the LSTM returns the `output` and a tuple of the final `hidden` state and the final `cell` state, whereas the standard RNN only returned the `output` and final `hidden` state. # # As the final hidden state of our LSTM has both a forward and a backward component, which will be concatenated together, the size of the input to the `nn.Linear` layer is twice that of the hidden dimension size. # # Implementing bidirectionality and adding additional layers are done by passing values for the `num_layers` and `bidirectional` arguments for the RNN/LSTM. # # Dropout is implemented by initializing an `nn.Dropout` layer (the argument is the probability of dropping out each neuron) and using it within the `forward` method after each layer we want to apply dropout to. **Note**: never use dropout on the input or output layers (`text` or `fc` in this case), you only ever want to use dropout on intermediate layers. The LSTM has a `dropout` argument which adds dropout on the connections between hidden states in one layer to hidden states in the next layer. # # As we are passing the lengths of our sentences to be able to use packed padded sequences, we have to add a second argument, `text_lengths`, to `forward`. # # Before we pass our embeddings to the RNN, we need to pack them, which we do with `nn.utils.rnn.packed_padded_sequence`. This will cause our RNN to only process the non-padded elements of our sequence. The RNN will then return `packed_output` (a packed sequence) as well as the `hidden` and `cell` states (both of which are tensors). Without packed padded sequences, `hidden` and `cell` are tensors from the last element in the sequence, which will most probably be a pad token, however when using packed padded sequences they are both from the last non-padded element in the sequence. Note that the `lengths` argument of `packed_padded_sequence` must be a CPU tensor so we explicitly make it one by using `.to('cpu')`. # # We then unpack the output sequence, with `nn.utils.rnn.pad_packed_sequence`, to transform it from a packed sequence to a tensor. The elements of `output` from padding tokens will be zero tensors (tensors where every element is zero). Usually, we only have to unpack output if we are going to use it later on in the model. Although we aren't in this case, we still unpack the sequence just to show how it is done. # # The final hidden state, `hidden`, has a shape of _**[num layers * num directions, batch size, hid dim]**_. These are ordered: **[forward_layer_0, backward_layer_0, forward_layer_1, backward_layer 1, ..., forward_layer_n, backward_layer n]**. As we want the final (top) layer forward and backward hidden states, we get the top two hidden layers from the first dimension, `hidden[-2,:,:]` and `hidden[-1,:,:]`, and concatenate them together before passing them to the linear layer (after applying dropout). # + import torch.nn as nn class RNN(nn.Module): def __init__(self, vocab_size, embedding_dim, hidden_dim, output_dim, n_layers, bidirectional, dropout, pad_idx): super().__init__() self.embedding = nn.Embedding(vocab_size, embedding_dim, padding_idx = pad_idx) self.rnn = nn.LSTM(embedding_dim, hidden_dim, num_layers=n_layers, bidirectional=bidirectional, dropout=dropout) self.fc = nn.Linear(hidden_dim * 2, output_dim) self.dropout = nn.Dropout(dropout) def forward(self, text, text_lengths): #text = [sent len, batch size] embedded = self.dropout(self.embedding(text)) #embedded = [sent len, batch size, emb dim] #pack sequence # lengths need to be on CPU! packed_embedded = nn.utils.rnn.pack_padded_sequence(embedded, text_lengths.to('cpu')) packed_output, (hidden, cell) = self.rnn(packed_embedded) #unpack sequence output, output_lengths = nn.utils.rnn.pad_packed_sequence(packed_output) #output = [sent len, batch size, hid dim * num directions] #output over padding tokens are zero tensors #hidden = [num layers * num directions, batch size, hid dim] #cell = [num layers * num directions, batch size, hid dim] #concat the final forward (hidden[-2,:,:]) and backward (hidden[-1,:,:]) hidden layers #and apply dropout hidden = self.dropout(torch.cat((hidden[-2,:,:], hidden[-1,:,:]), dim = 1)) #hidden = [batch size, hid dim * num directions] return self.fc(hidden) # - # Like before, we'll create an instance of our RNN class, with the new parameters and arguments for the number of layers, bidirectionality and dropout probability. # # To ensure the pre-trained vectors can be loaded into the model, the `EMBEDDING_DIM` must be equal to that of the pre-trained GloVe vectors loaded earlier. # # We get our pad token index from the vocabulary, getting the actual string representing the pad token from the field's `pad_token` attribute, which is `<pad>` by default. # + INPUT_DIM = len(TEXT.vocab) EMBEDDING_DIM = 100 HIDDEN_DIM = 256 OUTPUT_DIM = 1 N_LAYERS = 2 BIDIRECTIONAL = True DROPOUT = 0.5 PAD_IDX = TEXT.vocab.stoi[TEXT.pad_token] model = RNN(INPUT_DIM, EMBEDDING_DIM, HIDDEN_DIM, OUTPUT_DIM, N_LAYERS, BIDIRECTIONAL, DROPOUT, PAD_IDX) # - # We'll print out the number of parameters in our model. # # Notice how we have almost twice as many parameters as before! # + def count_parameters(model): return sum(p.numel() for p in model.parameters() if p.requires_grad) print(f'The model has {count_parameters(model):,} trainable parameters') # - # The final addition is copying the pre-trained word embeddings we loaded earlier into the `embedding` layer of our model. # # We retrieve the embeddings from the field's vocab, and check they're the correct size, _**[vocab size, embedding dim]**_ # + pretrained_embeddings = TEXT.vocab.vectors print(pretrained_embeddings.shape) # - # We then replace the initial weights of the `embedding` layer with the pre-trained embeddings. # # **Note**: this should always be done on the `weight.data` and not the `weight`! model.embedding.weight.data.copy_(pretrained_embeddings) # As our `<unk>` and `<pad>` token aren't in the pre-trained vocabulary they have been initialized using `unk_init` (an $\mathcal{N}(0,1)$ distribution) when building our vocab. It is preferable to initialize them both to all zeros to explicitly tell our model that, initially, they are irrelevant for determining sentiment. # # We do this by manually setting their row in the embedding weights matrix to zeros. We get their row by finding the index of the tokens, which we have already done for the padding index. # # **Note**: like initializing the embeddings, this should be done on the `weight.data` and not the `weight`! # + UNK_IDX = TEXT.vocab.stoi[TEXT.unk_token] model.embedding.weight.data[UNK_IDX] = torch.zeros(EMBEDDING_DIM) model.embedding.weight.data[PAD_IDX] = torch.zeros(EMBEDDING_DIM) print(model.embedding.weight.data) # - # We can now see the first two rows of the embedding weights matrix have been set to zeros. As we passed the index of the pad token to the `padding_idx` of the embedding layer it will remain zeros throughout training, however the `<unk>` token embedding will be learned. # ## Train the Model # Now to training the model. # # The only change we'll make here is changing the optimizer from `SGD` to `Adam`. SGD updates all parameters with the same learning rate and choosing this learning rate can be tricky. `Adam` adapts the learning rate for each parameter, giving parameters that are updated more frequently lower learning rates and parameters that are updated infrequently higher learning rates. More information about `Adam` (and other optimizers) can be found [here](http://ruder.io/optimizing-gradient-descent/index.html). # # To change `SGD` to `Adam`, we simply change `optim.SGD` to `optim.Adam`, also note how we do not have to provide an initial learning rate for Adam as PyTorch specifies a sensibile default initial learning rate. # + import torch.optim as optim optimizer = optim.Adam(model.parameters()) # - # The rest of the steps for training the model are unchanged. # # We define the criterion and place the model and criterion on the GPU (if available)... # + criterion = nn.BCEWithLogitsLoss() model = model.to(device) criterion = criterion.to(device) # - # We implement the function to calculate accuracy... def binary_accuracy(preds, y): """ Returns accuracy per batch, i.e. if you get 8/10 right, this returns 0.8, NOT 8 """ #round predictions to the closest integer rounded_preds = torch.round(torch.sigmoid(preds)) correct = (rounded_preds == y).float() #convert into float for division acc = correct.sum() / len(correct) return acc # We define a function for training our model. # # As we have set `include_lengths = True`, our `batch.text` is now a tuple with the first element being the numericalized tensor and the second element being the actual lengths of each sequence. We separate these into their own variables, `text` and `text_lengths`, before passing them to the model. # # **Note**: as we are now using dropout, we must remember to use `model.train()` to ensure the dropout is "turned on" while training. def train(model, iterator, optimizer, criterion): epoch_loss = 0 epoch_acc = 0 model.train() for batch in iterator: optimizer.zero_grad() text, text_lengths = batch.text predictions = model(text, text_lengths).squeeze(1) loss = criterion(predictions, batch.label) acc = binary_accuracy(predictions, batch.label) loss.backward() optimizer.step() epoch_loss += loss.item() epoch_acc += acc.item() return epoch_loss / len(iterator), epoch_acc / len(iterator) # Then we define a function for testing our model, again remembering to separate `batch.text`. # # **Note**: as we are now using dropout, we must remember to use `model.eval()` to ensure the dropout is "turned off" while evaluating. def evaluate(model, iterator, criterion): epoch_loss = 0 epoch_acc = 0 model.eval() with torch.no_grad(): for batch in iterator: text, text_lengths = batch.text predictions = model(text, text_lengths).squeeze(1) loss = criterion(predictions, batch.label) acc = binary_accuracy(predictions, batch.label) epoch_loss += loss.item() epoch_acc += acc.item() return epoch_loss / len(iterator), epoch_acc / len(iterator) # And also create a nice function to tell us how long our epochs are taking. # + import time def epoch_time(start_time, end_time): elapsed_time = end_time - start_time elapsed_mins = int(elapsed_time / 60) elapsed_secs = int(elapsed_time - (elapsed_mins * 60)) return elapsed_mins, elapsed_secs # - # Finally, we train our model... # + N_EPOCHS = 5 best_valid_loss = float('inf') for epoch in range(N_EPOCHS): start_time = time.time() train_loss, train_acc = train(model, train_iterator, optimizer, criterion) valid_loss, valid_acc = evaluate(model, valid_iterator, criterion) end_time = time.time() epoch_mins, epoch_secs = epoch_time(start_time, end_time) if valid_loss < best_valid_loss: best_valid_loss = valid_loss torch.save(model.state_dict(), 'tut2-model.pt') print(f'Epoch: {epoch+1:02} | Epoch Time: {epoch_mins}m {epoch_secs}s') print(f'\tTrain Loss: {train_loss:.3f} | Train Acc: {train_acc*100:.2f}%') print(f'\t Val. Loss: {valid_loss:.3f} | Val. Acc: {valid_acc*100:.2f}%') # - # ...and get our new and vastly improved test accuracy! # + model.load_state_dict(torch.load('tut2-model.pt')) test_loss, test_acc = evaluate(model, test_iterator, criterion) print(f'Test Loss: {test_loss:.3f} | Test Acc: {test_acc*100:.2f}%') # - # ## User Input # # We can now use our model to predict the sentiment of any sentence we give it. As it has been trained on movie reviews, the sentences provided should also be movie reviews. # # When using a model for inference it should always be in evaluation mode. If this tutorial is followed step-by-step then it should already be in evaluation mode (from doing `evaluate` on the test set), however we explicitly set it to avoid any risk. # # Our `predict_sentiment` function does a few things: # - sets the model to evaluation mode # - tokenizes the sentence, i.e. splits it from a raw string into a list of tokens # - indexes the tokens by converting them into their integer representation from our vocabulary # - gets the length of our sequence # - converts the indexes, which are a Python list into a PyTorch tensor # - add a batch dimension by `unsqueeze`ing # - converts the length into a tensor # - squashes the output prediction from a real number between 0 and 1 with the `sigmoid` function # - converts the tensor holding a single value into an integer with the `item()` method # # We are expecting reviews with a negative sentiment to return a value close to 0 and positive reviews to return a value close to 1. # + import spacy nlp = spacy.load('en_core_web_sm') def predict_sentiment(model, sentence): model.eval() tokenized = [tok.text for tok in nlp.tokenizer(sentence)] indexed = [TEXT.vocab.stoi[t] for t in tokenized] length = [len(indexed)] tensor = torch.LongTensor(indexed).to(device) tensor = tensor.unsqueeze(1) length_tensor = torch.LongTensor(length) prediction = torch.sigmoid(model(tensor, length_tensor)) return prediction.item() # - # An example negative review... predict_sentiment(model, "This film is terrible") # An example positive review... predict_sentiment(model, "This film is great") # ## Next Steps # # We've now built a decent sentiment analysis model for movie reviews! In the next notebook we'll implement a model that gets comparable accuracy with far fewer parameters and trains much, much faster.
2 - Upgraded Sentiment Analysis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [default] # language: python # name: python3 # --- # + import matplotlib.pyplot as plt # %matplotlib inline import numpy as np import numexpr as ne from scipy.ndimage import correlate1d from dphutils import scale import scipy.signal from timeit import Timer # + import pyfftw # test monkey patching (it doesn't work for rfftn) a = pyfftw.empty_aligned((512, 512), dtype="complex128") b = pyfftw.empty_aligned((512, 512), dtype="complex128") a[:] = np.random.randn(512, 512) + 1j * np.random.randn(512, 512) b[:] = np.random.randn(512, 512) + 1j * np.random.randn(512, 512) t = Timer(lambda: scipy.signal.fftconvolve(a, b, "same")) print("Time with scipy.fftpack: %1.3f seconds" % t.timeit(number=10)) # Monkey patch in fftn and ifftn from pyfftw.interfaces.scipy_fftpack scipy.signal.signaltools.fftn = pyfftw.interfaces.scipy_fftpack.fftn scipy.signal.signaltools.ifftn = pyfftw.interfaces.scipy_fftpack.ifftn scipy.signal.signaltools.fftpack = pyfftw.interfaces.scipy_fftpack # can't monkey patch the rfft because it's used through np in the package. scipy.signal.fftconvolve(a, b, "same") # We cheat a bit by doing the planning first # Turn on the cache for optimum performance pyfftw.interfaces.cache.enable() print("Time with monkey patched scipy_fftpack: %1.3f seconds" % t.timeit(number=10)) # + # Testing the best method to enforce positivity constraint. a = np.random.randn(1e3, 1e3) print(a.max(), a.min()) # %timeit a[a<0] = 0 print(a.max(), a.min()) a = np.random.randn(1e3, 1e3) b = np.zeros_like(a) print(a.max(), a.min()) # %timeit c = np.minimum(a,b) print(a.max(), a.min()) # + # testing speedups for numexpr a = np.random.randn(2**9, 2**9) b = np.random.randn(2**9, 2**9) # %timeit a-b # %timeit ne.evaluate("a-b") # %timeit a/b # %timeit ne.evaluate("a/b") # + # Standard Richardson-Lucy form skimage from skimage import color, data, restoration camera = color.rgb2gray(data.camera()) from scipy.signal import convolve2d psf = np.ones((5, 5)) / 25 camera = convolve2d(camera, psf, "same") camera += 0.1 * camera.std() * np.random.poisson(size=camera.shape) deconvolved = restoration.richardson_lucy(camera, psf, 30, False) plt.matshow(camera, cmap="Greys_r") plt.matshow(deconvolved, cmap="Greys_r", vmin=camera.min(), vmax=camera.max()) # + # test monkey patching properly. from pyfftw.interfaces.numpy_fft import ifftshift, fftshift, fftn, ifftn, rfftn, irfftn from scipy.signal.signaltools import ( _rfft_lock, _rfft_mt_safe, _next_regular, _check_valid_mode_shapes, _centered, ) def fftconvolve2(in1, in2, mode="full"): if in1.ndim == in2.ndim == 0: # scalar inputs return in1 * in2 elif not in1.ndim == in2.ndim: raise ValueError("in1 and in2 should have the same dimensionality") elif in1.size == 0 or in2.size == 0: # empty arrays return array([]) s1 = np.array(in1.shape) s2 = np.array(in2.shape) complex_result = np.issubdtype(in1.dtype, complex) or np.issubdtype(in2.dtype, complex) shape = s1 + s2 - 1 if mode == "valid": _check_valid_mode_shapes(s1, s2) # Speed up FFT by padding to optimal size for FFTPACK fshape = [_next_regular(int(d)) for d in shape] fslice = tuple([slice(0, int(sz)) for sz in shape]) # Pre-1.9 NumPy FFT routines are not threadsafe. For older NumPys, make # sure we only call rfftn/irfftn from one thread at a time. if not complex_result and (_rfft_mt_safe or _rfft_lock.acquire(False)): try: ret = irfftn(rfftn(in1, fshape) * rfftn(in2, fshape), fshape)[fslice].copy() finally: if not _rfft_mt_safe: _rfft_lock.release() else: # If we're here, it's either because we need a complex result, or we # failed to acquire _rfft_lock (meaning rfftn isn't threadsafe and # is already in use by another thread). In either case, use the # (threadsafe but slower) SciPy complex-FFT routines instead. ret = ifftn(fftn(in1, fshape) * fftn(in2, fshape))[fslice].copy() if not complex_result: ret = ret.real if mode == "full": return ret elif mode == "same": return _centered(ret, s1) elif mode == "valid": return _centered(ret, s1 - s2 + 1) else: raise ValueError("Acceptable mode flags are 'valid'," " 'same', or 'full'.") # - # %timeit scipy.signal.fftconvolve(camera, psf, 'same') # %timeit fftconvolve2(camera, psf, 'same') # + def tv(im): """ Calculate the total variation image (1) <NAME>.; <NAME>.; <NAME>. Application of Regularized Richardson–Lucy Algorithm for Deconvolution of Confocal Microscopy Images. Journal of Microscopy 2011, 243 (2), 124–140. dx.doi.org/10.1111/j.1365-2818.2011.03486.x """ def m(a, b): """ As described in (1) """ return (sign(a) + sign(b)) / 2 * minimum(abs(a), abs(b)) ndim = im.ndim g = np.zeros_like(p) i = 0 # g stores the gradients of out along each axis # e.g. g[0] is the first order finite difference along axis 0 for ax in range(ndim): a = 2 * ax # backward difference g[a] = correlate1d(im, [-1, 1], ax) # forward difference g[a + 1] = correlate1d(im, [-1, 1], ax, origin=-1) eps = finfo(float).eps oym, oyp, oxm, oxp = g return oxm * oxp / sqrt(oxp**2 + m(oyp, oym) ** 2 + eps) + oym * oyp / sqrt( oyp**2 + m(oxp, oxm) ** 2 + eps ) def rl_update(convolve_method, kwargs): """ A function that represents the core rl operation: $u^{(t+1)} = u^{(t)}\cdot\left(\frac{d}{u^{(t)}\otimes p}\otimes \hat{p}\right)$ Parameters ---------- image : ndarray original image to be deconvolved u_tm1 : ndarray previous u_t u_tp1 psf convolve_method """ image = kwargs["image"] psf = kwargs["psf"] # use the prediction step to iterate on y_t = kwargs["y_t"] u_t = kwargs["u_t"] u_tm1 = kwargs["u_tm1"] g_tm1 = kwargs["g_tm1"] psf_mirror = psf[::-1, ::-1] blur = convolve_method(y_t, psf, "same") relative_blur = ne.evaluate("image / blur") blur_blur = convolve_method(relative_blur, psf_mirror, "same") u_tp1 = ne.evaluate("y_t*blur_blur") u_tp1[u_tp1 < 0] = 0 # update kwargs.update( dict( u_tm2=u_tm1, u_tm1=u_t, u_t=u_tp1, blur=blur_blur, g_tm2=g_tm1, g_tm1=ne.evaluate("u_tp1 - y_t"), ) ) def richardson_lucy(image, psf, iterations=50, clip=False): """Richardson-Lucy deconvolution. Parameters ---------- image : ndarray Input degraded image (can be N dimensional). psf : ndarray The point spread function. iterations : int Number of iterations. This parameter plays the role of regularisation. clip : boolean, optional True by default. If true, pixel value of the result above 1 or under -1 are thresholded for skimage pipeline compatibility. Returns ------- im_deconv : ndarray The deconvolved image. Examples -------- >>> from skimage import color, data, restoration >>> camera = color.rgb2gray(data.camera()) >>> from scipy.signal import convolve2d >>> psf = np.ones((5, 5)) / 25 >>> camera = convolve2d(camera, psf, 'same') >>> camera += 0.1 * camera.std() * np.random.standard_normal(camera.shape) >>> deconvolved = restoration.richardson_lucy(camera, psf, 5, False) References ---------- .. [1] http://en.wikipedia.org/wiki/Richardson%E2%80%93Lucy_deconvolution """ # Stolen from the dev branch of skimage because stable branch is slow # compute the times for direct convolution and the fft method. The fft is of # complexity O(N log(N)) for each dimension and the direct method does # straight arithmetic (and is O(n*k) to add n elements k times) direct_time = np.prod(image.shape + psf.shape) fft_time = np.sum([n * np.log(n) for n in image.shape + psf.shape]) # see whether the fourier transform convolution method or the direct # convolution method is faster (discussed in scikit-image PR #1792) time_ratio = 40.032 * fft_time / direct_time if time_ratio <= 1 or len(image.shape) > 2: convolve_method = fftconvolve2 else: convolve_method = convolve image = image.astype(np.float) psf = psf.astype(np.float) im_deconv = 0.5 * np.ones(image.shape) psf_mirror = psf[::-1, ::-1] rl_dict = dict( image=image, u_tm2=None, u_tm1=None, g_tm2=None, g_tm1=None, u_t=None, y_t=image, psf=psf ) for i in range(iterations): # d/(u_t \otimes p) rl_update(convolve_method, rl_dict) alpha = 0 if rl_dict["g_tm1"] is not None and rl_dict["g_tm2"] is not None and i > 1: alpha = (rl_dict["g_tm1"] * rl_dict["g_tm2"]).sum() / (rl_dict["g_tm2"] ** 2).sum() alpha = max(min(alpha, 1), 0) if alpha != 0: if rl_dict["u_tm1"] is not None: h1_t = rl_dict["u_t"] - rl_dict["u_tm1"] h1_t if rl_dict["u_tm2"] is not None: h2_t = rl_dict["u_t"] - 2 * rl_dict["u_tm1"] + rl_dict["u_tm2"] else: h2_t = 0 else: h1_t = 0 else: h2_t = 0 h1_t = 0 rl_dict["y_t"] = rl_dict["u_t"] + alpha * h1_t + alpha**2 / 2 * h2_t rl_dict["y_t"][rl_dict["y_t"] < 0] = 0 im_deconv = rl_dict["u_t"] if clip: im_deconv[im_deconv > 1] = 1 im_deconv[im_deconv < -1] = -1 return rl_dict # + deconvolved2 = richardson_lucy(camera, psf, 10) plt.matshow(camera, cmap="Greys_r") plt.matshow(np.real(deconvolved2["u_t"]), cmap="Greys_r", vmin=camera.min(), vmax=camera.max()) # - # %timeit deconvolved2 = richardson_lucy(camera, psf, 10)
notebooks/Lucy Richardson Deconvolution.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] cell_id="00001-bcb75845-52fd-40a7-aae6-95cdd2da7845" deepnote_cell_type="markdown" output_cleared=false tags=[] # ## Comparison of the Machine Learning models performances for diabetes patients # # Mini-project to play with the classification methods: SVM, KNN and Random Forests. The goal is to understand the behavior of the classification methods and their applicability in different setups. # + [markdown] cell_id="00009-6ca87429-7ba1-400a-a878-6fa92e8a997b" deepnote_cell_type="markdown" output_cleared=false tags=[] # **Context:** # This dataset is originally from the National Institute of Diabetes and Digestive and Kidney Diseases. The objective of the dataset is to diagnostically predict whether or not a patient has diabetes, based on certain diagnostic measurements included in the dataset. Several constraints were placed on the selection of these instances from a larger database. In particular, all patients here are females at least 21 years old of Pima Indian heritage. # # **Content:** # The datasets consists of several medical predictor variables and one target variable, Outcome. Predictor variables includes the number of pregnancies the patient has had, their BMI, insulin level, age, and so on. # # # # **The dataset is composed of:** # - Pregnancies: Number of time pregnant # - Glucose: Plasma glucose concentration a 2 hours in an oral glucose tolerance test # - Blood Pressure: Diastolic blood pressure (mm Hg) # - SkinThickness: Triceps skinfold thickness (mm) # - Insulin: 2-Hour serum insulin (mu U/ml) # - BMI: Body mass index (weight in kg/(height in m)^2) # - Diabetes pedigree function: Diabetes pedigree function # - Age: Age(years) # - Outcome: 0 or 1 # - # ### Import packages and data # + cell_id="00001-31d7e691-baa5-4740-a6f8-ae82d1cac981" deepnote_cell_type="code" execution_millis=805 execution_start=1605894975794 output_cleared=false source_hash="614ba537" tags=[] import pandas as pd import numpy as np import math from datetime import datetime import time from sklearn.datasets import load_digits import seaborn as sns import matplotlib.pyplot as plt import missingno as msno import struct from sklearn import metrics from sklearn import svm from sklearn.preprocessing import StandardScaler from sklearn.model_selection import cross_val_score from sklearn.model_selection import GridSearchCV from sklearn.metrics import confusion_matrix from sklearn.neighbors import KNeighborsClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import accuracy_score,confusion_matrix,precision_score,recall_score,f1_score,roc_auc_score,roc_curve,make_scorer from sklearn.model_selection import train_test_split,cross_val_score,RandomizedSearchCV # %matplotlib inline from sklearn.inspection import permutation_importance # - df_diabetes = pd.read_csv('diabetes.csv') # + cell_id="00006-265fcea9-ab37-4735-b589-eb61993c6dc5" deepnote_cell_type="code" execution_millis=21 output_cleared=false source_hash="7a30aae0" tags=[] df_diabetes.head() # + cell_id="00007-120d9725-c77f-4e19-9604-1191637740a5" deepnote_cell_type="code" execution_millis=1 output_cleared=false source_hash="92d690fa" tags=[] df_diabetes.shape # + cell_id="00011-3575fc9d-b271-43af-ae64-21868ff1df26" deepnote_cell_type="code" execution_millis=3 output_cleared=false source_hash="9a1b4753" tags=[] df_diabetes.dtypes # + cell_id="00012-f769ed14-bb73-4328-ab85-7bf5fcc34111" deepnote_cell_type="code" execution_millis=5 output_cleared=false source_hash="909689fb" tags=[] df_diabetes.describe() # + [markdown] cell_id="00013-3bd2b07c-5b79-497a-8214-993c54207275" deepnote_cell_type="markdown" output_cleared=false tags=[] # As you can see above some of the variables (Glucose,BloodPressure,SkinThickness,Insulin,BMI) have 0 as minimum value - which is impossible. # # Therefore we evaluate them as missing values and replace them by NaN and will later fill them by the median. # + cell_id="00013-bc4f97df-3fad-47f1-a0c5-1d3b3cd364e1" deepnote_cell_type="code" execution_millis=3 output_cleared=false source_hash="80e7c86f" tags=[] # Replacing 0 values by NaN df_diabetes[['Glucose','BloodPressure','SkinThickness','Insulin','BMI']] = df_diabetes[['Glucose','BloodPressure','SkinThickness','Insulin','BMI']].replace(0,np.NaN) # + cell_id="00009-c406f669-b426-439a-a861-c078ad321227" deepnote_cell_type="code" execution_millis=456 output_cleared=false source_hash="29751a30" tags=[] # Check for missing values msno.matrix(df_diabetes) # + cell_id="00016-d640e073-08a5-4529-9ee6-9b7183c82540" deepnote_cell_type="code" execution_millis=1 output_cleared=false source_hash="2e2e2456" tags=[] # Filling NaN values df_diabetes['Glucose'].fillna(df_diabetes['Glucose'].median(), inplace = True) df_diabetes['BloodPressure'].fillna(df_diabetes['BloodPressure'].median(), inplace = True) df_diabetes['SkinThickness'].fillna(df_diabetes['SkinThickness'].median(), inplace = True) df_diabetes['Insulin'].fillna(df_diabetes['Insulin'].median(), inplace = True) df_diabetes['BMI'].fillna(df_diabetes['BMI'].mean(), inplace = True) # + cell_id="00013-c7236a82-f9d9-4e2e-86fb-66522863071d" deepnote_cell_type="code" execution_millis=8 output_cleared=false source_hash="5f47c132" tags=[] # Check the features print("Features: ", df_diabetes.columns) # check the target outcome df_diabetes.groupby('Outcome').size() # + [markdown] cell_id="00045-1f5eef9f-5bc0-4ed3-8760-e9e698969682" deepnote_cell_type="markdown" output_cleared=false tags=[] # #### Data visualization # + cell_id="00045-7d543023-36dc-490f-bc7b-d37b69d64738" deepnote_cell_type="code" execution_millis=1098 output_cleared=false source_hash="20dfcd52" tags=[] df_diabetes.hist(figsize=(10,8)) # + cell_id="00047-455e6daa-b1be-44f3-a57c-590470a984a3" deepnote_cell_type="code" execution_millis=1053 output_cleared=false source_hash="a484e863" tags=[] fig, axarr = plt.subplots(3, 2, figsize=(12, 8)) sns.distplot(df_diabetes['Pregnancies'],ax=axarr[0][0]) sns.distplot(df_diabetes['Glucose'],ax=axarr[0][1]) sns.distplot(df_diabetes['BloodPressure'],ax=axarr[1][0]) sns.distplot(df_diabetes['SkinThickness'],ax=axarr[1][1]) sns.distplot(df_diabetes['Insulin'],ax=axarr[2][0]) sns.distplot(df_diabetes['BMI'],ax=axarr[2][1]) plt.subplots_adjust(hspace=1) # + cell_id="00047-b5a8f145-0ca3-466b-b3a9-fce9ad50bf68" deepnote_cell_type="code" execution_millis=707 output_cleared=false source_hash="93daea7" tags=[] df_diabetes.plot(kind= 'box' , subplots=True, layout=(3,3), sharex=False, sharey=False, figsize=(10,8)) # + cell_id="00048-f48f643b-ffba-4aed-9960-95be1908a56d" deepnote_cell_type="code" execution_millis=1004 output_cleared=false source_hash="8530a8e9" tags=[] #Look for outliers in the df_diabetes fig, axarr = plt.subplots(4, 2, figsize=(12, 20)) sns.boxplot(df_diabetes['Outcome'], df_diabetes['Pregnancies'],ax=axarr[0][0]) sns.boxplot(df_diabetes['Outcome'], df_diabetes['Glucose'],ax=axarr[0][1]) sns.boxplot(df_diabetes['Outcome'], df_diabetes['BloodPressure'],ax=axarr[1][0]) sns.boxplot(df_diabetes['Outcome'], df_diabetes['SkinThickness'],ax=axarr[1][1]) sns.boxplot(df_diabetes['Outcome'], df_diabetes['Insulin'],ax=axarr[2][0]) sns.boxplot(df_diabetes['Outcome'], df_diabetes['BMI'],ax=axarr[2][1]) sns.boxplot(df_diabetes['Outcome'], df_diabetes['Age'],ax=axarr[3][0]) sns.boxplot(df_diabetes['Outcome'], df_diabetes['DiabetesPedigreeFunction'],ax=axarr[3][1]) # + cell_id="00014-2df78184-0981-421a-817e-0b5144228819" deepnote_cell_type="code" execution_millis=10 output_cleared=false source_hash="87303280" tags=[] X = df_diabetes.drop(columns='Outcome',axis=1) #X = df_diabetes[['Pregnancies','BMI','DiabetesPedigreeFunction']] Y = df_diabetes['Outcome'] X_train_diabetes, X_test_diabetes, Y_train_diabetes, Y_test_diabetes = train_test_split(X,Y, test_size=0.2,random_state=42) # 80% training and 20% test #Standard scaling sc = StandardScaler() X_train = sc.fit_transform(X_train_diabetes) X_test = sc.transform(X_test_diabetes) # + cell_id="00016-9e17f18c-7247-4f95-b39a-7b66964096d4" deepnote_cell_type="code" execution_millis=858 output_cleared=false source_hash="2040614d" tags=[] # Getting correlations of each features corrmat = df_diabetes.corr() top_corr_features = corrmat.index plt.figure(figsize = (10,8)) g = sns.heatmap(df_diabetes[top_corr_features].corr(),annot = True,cmap = "YlGnBu") # + [markdown] cell_id="00014-71774e51-b238-4969-a492-5611dd13a767" deepnote_cell_type="markdown" output_cleared=false tags=[] # ### 1. SVM # + cell_id="00014-6fbdb9a3-3616-4e5e-a297-03e0ec12e467" deepnote_cell_type="code" execution_millis=2712 output_cleared=false source_hash="351f7c41" tags=[] svm_t_start = time.time() #Create a svm Classifier svm_diabetes = svm.SVC(kernel='linear') # Linear Kernel svm_diabetes.fit(X_train_diabetes, Y_train_diabetes) y_pred_diabetes = svm_diabetes.predict(X_test_diabetes) print(f'Training took (s): {time.time()-svm_t_start}') # + cell_id="00016-5009adab-dd4a-4bdd-8ff0-180aafb6b010" deepnote_cell_type="code" execution_millis=4 output_cleared=false source_hash="c7bbbc60" tags=[] # Model Accuracy: how often is the classifier correct? svm_accuracy = metrics.accuracy_score(Y_test_diabetes, y_pred_diabetes) print("Accuracy:",svm_accuracy) #positive predictive value svm_precision = metrics.precision_score(Y_test_diabetes, y_pred_diabetes) print("Precision:",svm_precision) #sensitivity svm_recall = metrics.recall_score(Y_test_diabetes, y_pred_diabetes) print("Recall:",svm_recall) # + cell_id="00024-0ce64c77-ed7a-4ff7-a4be-8f7bb5326c84" deepnote_cell_type="code" execution_millis=3 output_cleared=false source_hash="654c5e21" tags=[] print (metrics.classification_report(Y_test_diabetes, y_pred_diabetes)) # + cell_id="00017-8f498551-f1a0-4fa6-8c82-83b00b54955f" deepnote_cell_type="code" execution_millis=21278 output_cleared=false source_hash="1cbbf0ce" tags=[] #Apply k-fold validation here svm_cv_diabetes = cross_val_score(svm_diabetes,X_train_diabetes,Y_train_diabetes,cv=10) svm_cv_diabetes svm_cv_mean = np.mean(svm_cv_diabetes) print(svm_cv_diabetes) print('svm_cv_diabetes mean:{}'.format(svm_cv_mean)) # + [markdown] cell_id="00021-fbf1a304-35c3-469c-8dcf-733c54ee13b2" deepnote_cell_type="markdown" output_cleared=false tags=[] # #### C Parameter # # The C parameter tells the SVM optimization how much you want to avoid misclassifying each training example. For large values of C, the optimization will choose a smaller-margin hyperplane if that hyperplane does a better job of getting all the training points classified correctly. Conversely, a very small value of C will cause the optimizer to look for a larger-margin separating hyperplane, even if that hyperplane misclassifies more points. For very tiny values of C, you should get misclassified examples, often even if your training data is linearly separable. # + cell_id="00022-33b99017-7a18-4627-89cb-6d78d980454f" deepnote_cell_type="code" execution_millis=1 output_cleared=false source_hash="9a2715d" tags=[] def cparameter(i): model = svm.SVC(kernel='linear',C=i) model.fit(X_train_diabetes, Y_train_diabetes) y_pred = model.predict(X_test_diabetes) matrix = confusion_matrix(Y_test_diabetes, y_pred) print(metrics.accuracy_score(Y_test_diabetes, y_pred)) print (matrix) # + cell_id="00024-ff981dd3-4cf6-4d59-861e-1ac8adb0db1b" deepnote_cell_type="code" execution_millis=15276 output_cleared=false source_hash="f20e9d53" tags=[] l = [0.1,1,10] for i in l: print("for the value of c = ", i) cparameter(i) # + [markdown] cell_id="00023-0fbe9141-4d44-4eff-8032-a881150e8942" deepnote_cell_type="markdown" output_cleared=false tags=[] # #### GridSearchCV # + cell_id="00023-eae2760b-88b6-4c17-a32f-c7ee1dc94893" deepnote_cell_type="code" execution_millis=0 output_cleared=false source_hash="57f652cf" tags=[] param_grid = [ {'C': [0.1,1,10,100], 'gamma': [0.01,0.001], 'kernel': ['rbf','sigmoid']}, ] # + cell_id="00024-e9797bee-b3bf-4ee5-b4c1-debf3f2a8024" deepnote_cell_type="code" execution_millis=3767 output_cleared=false source_hash="850a38c" tags=[] grid=GridSearchCV(svm_diabetes,param_grid=param_grid, cv=10, n_jobs=-1) grid.fit(X_train_diabetes,Y_train_diabetes) y_pred=grid.predict(X_test_diabetes) # + cell_id="00025-d8de1a42-1a58-4c2b-9e9c-2f85b026a743" deepnote_cell_type="code" execution_millis=4 output_cleared=false source_hash="4fc6697" tags=[] grid.best_score_ # + cell_id="00026-7d13ea8b-6542-49dd-971f-1b1ca8acd60d" deepnote_cell_type="code" execution_millis=2 output_cleared=false source_hash="bc2baa2b" tags=[] grid.best_params_ # + [markdown] cell_id="00034-1ae7b966-608f-47d6-9836-8b6fce9de232" deepnote_cell_type="markdown" output_cleared=false tags=[] # #### AUC # + cell_id="00035-656b3468-1494-4606-a35c-fb347982a3b9" deepnote_cell_type="code" execution_millis=2 output_cleared=false source_hash="769d2ac7" tags=[] fpr, tpr, thresholds = metrics.roc_curve(Y_test_diabetes, y_pred_diabetes) svm_auc = metrics.auc(fpr, tpr) # + [markdown] cell_id="00055-6779b68d-e621-4a9c-9237-7f8a8566d045" deepnote_cell_type="markdown" output_cleared=false tags=[] # #### Feature Importance # + cell_id="00056-027682a4-9ba2-472c-8bc4-443f291576d6" deepnote_cell_type="code" execution_millis=112 output_cleared=false source_hash="9a27cbcb" tags=[] pd.Series(abs(svm_diabetes.coef_[0]), index=X_train_diabetes.columns).nlargest(10).plot(kind='barh') # + cell_id="00057-64ad2486-2b8d-40f6-99c2-b99a43ea5ca6" deepnote_cell_type="code" execution_millis=1 output_cleared=false source_hash="dac7d2c8" tags=[] svm_feature_imp = abs(svm_diabetes.coef_[0]) # + [markdown] cell_id="00030-415989c8-f86a-46c2-9b01-c78e801ac0e4" deepnote_cell_type="markdown" output_cleared=false tags=[] # ### 2. KNN # + cell_id="00031-e7b28bed-c622-421d-85c6-caa52cec874c" deepnote_cell_type="code" execution_millis=489 output_cleared=false source_hash="c389c087" tags=[] neighbors = np.arange(1,18) train_accuracy =np.empty(len(neighbors)) test_accuracy = np.empty(len(neighbors)) for i,k in enumerate(neighbors): knn = KNeighborsClassifier(n_neighbors=k) knn.fit(X_train_diabetes, Y_train_diabetes) train_accuracy[i] = knn.score(X_train_diabetes, Y_train_diabetes) test_accuracy[i] = knn.score(X_test_diabetes, Y_test_diabetes) # + cell_id="00027-206cdf1d-9ed2-491c-bdbf-a587cdc7cd11" deepnote_cell_type="code" execution_millis=164 output_cleared=false source_hash="f6e0c235" tags=[] plt.figure(figsize=(12,7)) plt.title('k for k-NN') plt.plot(neighbors, test_accuracy, label='Testing Accuracy') plt.plot(neighbors, train_accuracy, label='Training accuracy') plt.legend() plt.xlabel('Number of neighbors') plt.ylabel('Accuracy') plt.show() # + [markdown] cell_id="00036-2ef62b69-9720-43e6-a9fe-a6f9909b0c69" deepnote_cell_type="markdown" output_cleared=false tags=[] # Observation: We can observe above that we get maximum testing accuracy for k=12 # + cell_id="00037-705e8f2a-c27c-422e-8e8e-d700a150373b" deepnote_cell_type="code" execution_millis=9 output_cleared=false source_hash="4cb33dc" tags=[] knn_t_start = time.time() # Create KNN Classifier knn_diabetes = KNeighborsClassifier(n_neighbors=14) knn_diabetes.fit(X_train_diabetes, Y_train_diabetes) knn_y_pred_diabetes = knn_diabetes.predict(X_test_diabetes) print(f'Training took (s): {time.time()-knn_t_start}') # + cell_id="00038-ca6ef599-bd65-465f-b9cc-439e844c901b" deepnote_cell_type="code" execution_millis=5 output_cleared=false source_hash="a7fc26d8" tags=[] # Model Accuracy: how often is the classifier correct? knn_accuracy = metrics.accuracy_score(Y_test_diabetes, knn_y_pred_diabetes) print("Accuracy:",knn_accuracy) # Model Precision: what percentage of positive tuples are labeled as such? knn_precision = metrics.precision_score(Y_test_diabetes, knn_y_pred_diabetes) print("Precision:",knn_precision) # Model Recall: what percentage of positive tuples are labelled as such? knn_recall = metrics.recall_score(Y_test_diabetes, knn_y_pred_diabetes) print("Recall:",knn_recall) # + cell_id="00039-61f17f24-db62-436b-9721-1d5793c03092" deepnote_cell_type="code" execution_millis=45 output_cleared=false source_hash="affb6952" tags=[] #Apply k-fold validation here knn_cv_diabetes = cross_val_score(knn_diabetes,X_train_diabetes,Y_train_diabetes,cv=5) knn_cv_mean = np.mean(knn_cv_diabetes) print(knn_cv_diabetes) print('knn_cv_scores mean:{}'.format(knn_cv_mean)) # + [markdown] cell_id="00072-295c226a-24fd-4f76-85cc-e890bdf09d7e" deepnote_cell_type="markdown" output_cleared=false tags=[] # #### GridSearchCV # + cell_id="00073-4fa4f1a4-7237-420b-a5ad-23dcfc588ff7" deepnote_cell_type="code" execution_millis=1 output_cleared=false source_hash="30340992" tags=[] param_grid_knn = [ {'n_neighbors': [10,12,14], 'weights': ['uniform','distance'], 'metric': ['euclidean','manhattan']}, ] # + cell_id="00072-262a48d6-30a7-445f-9fa2-5045e12c4cc5" deepnote_cell_type="code" execution_millis=1029 output_cleared=false source_hash="5340e821" tags=[] grid_knn=GridSearchCV(knn_diabetes,param_grid=param_grid_knn, cv=10, n_jobs=-1) grid_knn.fit(X_train_diabetes,Y_train_diabetes) y_pred=grid.predict(X_test_diabetes) # + cell_id="00077-b22d1a6c-aa2c-4236-a4c3-ff7483cb3900" deepnote_cell_type="code" execution_millis=2 output_cleared=false source_hash="71d0ebe4" tags=[] grid_knn.best_score_ # + cell_id="00078-8399891b-6b85-4b5b-8910-463d9c8a3877" deepnote_cell_type="code" execution_millis=1 output_cleared=false source_hash="e44d3110" tags=[] grid_knn.best_params_ # + [markdown] cell_id="00042-a2f23f3d-0c84-4c9d-9199-d7c94b47e4da" deepnote_cell_type="markdown" output_cleared=false tags=[] # ### Feature Importance # + cell_id="00043-5e0a6efd-2e8c-4a98-9550-07e25dcbfe9e" deepnote_cell_type="code" execution_millis=908 output_cleared=false source_hash="12a152f8" tags=[] results = permutation_importance(knn_diabetes, X_train_diabetes, Y_train_diabetes, scoring='accuracy') knn_feature_imp = results.importances_mean # + cell_id="00070-d8685a32-a90d-413c-8704-74eaa7fb4213" deepnote_cell_type="code" execution_millis=5 output_cleared=false source_hash="498e9b28" tags=[] knn_feature_imp # + cell_id="00044-74a2fe65-757a-487b-9ce7-ea6446eed7d3" deepnote_cell_type="code" execution_millis=162 output_cleared=false source_hash="497b83dc" tags=[] index = np.array(X_train_diabetes.columns) df_knn_imp_features = pd.DataFrame({'score':np.array(knn_feature_imp), 'features':np.array(X_train_diabetes.columns)}, index=index) df_knn_imp_features.sort_values(by="score", ascending = False).plot.bar(rot=90) # + [markdown] cell_id="00045-a526491e-02e6-4802-a1d5-bc49272f9d1f" deepnote_cell_type="markdown" output_cleared=false tags=[] # #### AUC # + cell_id="00046-49b679aa-4be5-419d-b111-3078b4a2da85" deepnote_cell_type="code" execution_millis=1 output_cleared=false source_hash="16de4ed4" tags=[] fpr, tpr, thresholds = metrics.roc_curve(Y_test_diabetes, knn_y_pred_diabetes) knn_auc = metrics.auc(fpr, tpr) # + [markdown] cell_id="00031-dd16fc84-b8f5-438b-8f89-6647ec30ff33" deepnote_cell_type="markdown" output_cleared=false tags=[] # ### 3. Random Forests # + cell_id="00034-e93c213b-9c35-4904-9dfb-f6319da149f4" deepnote_cell_type="code" execution_millis=44 output_cleared=false source_hash="6877a34" tags=[] # Instantiating random forest classifier clf = RandomForestClassifier(oob_score = True,n_jobs = -1,random_state = 100) clf # + cell_id="00039-2166c79d-9912-43ee-9ab3-a20d278cafec" deepnote_cell_type="code" execution_millis=0 output_cleared=false source_hash="abc2bb8c" tags=[] # Creating Predictor Matrix X = df_diabetes.drop('Outcome',axis = 1) # Target variable y = df_diabetes['Outcome'] # + cell_id="00039-d3ef6407-fe0a-4158-ba1c-9705de83e520" deepnote_cell_type="code" execution_millis=0 output_cleared=false source_hash="25e13116" tags=[] # Splitting the matrices into random train & test subsets where test data contains 25% data and rest considered as training data X_train,X_test,y_train,y_test = train_test_split(X,y,test_size = 0.2,random_state = 200) # + cell_id="00038-2d59f301-ed89-4aea-8215-c77761fefb41" deepnote_cell_type="code" execution_millis=2047 output_cleared=false source_hash="634ff14f" tags=[] # Performing K-fold cross validation with 5 folds scores = cross_val_score(clf,X_train,y_train,cv = 5,scoring = "f1_macro") scores.mean() # + cell_id="00039-81df645d-1b2b-4010-9add-5cd4816ffe3e" deepnote_cell_type="code" execution_millis=303 output_cleared=false source_hash="ff1ef80" tags=[] # Building a forest of trees from training set knn_t_start = time.time() clf.fit(X_train,y_train) print(f'Training took (s): {time.time()-knn_t_start}') # + cell_id="00040-519b4013-2384-4ada-95d5-5387b716603e" deepnote_cell_type="code" execution_millis=209 output_cleared=false source_hash="13fc754c" tags=[] #Predicting on classifier created train_pred = clf.predict(X_train) test_pred = clf.predict(X_test) # + cell_id="00041-1e602e3b-365f-42ec-8682-51c68d334e78" deepnote_cell_type="code" execution_millis=7 output_cleared=false source_hash="d9f3aa53" tags=[] # Finding F1 score of training and testing sets print("The training F1 score is: ",f1_score(train_pred,y_train)) print("The testing F1 score is :",f1_score(test_pred,y_test)) # + [markdown] cell_id="00047-613cac7a-7f75-48c5-ab5e-0a6cdaf52da8" deepnote_cell_type="markdown" output_cleared=false tags=[] # #### Model is overfitting as testing F1 score is lower than training. Hyperparameter tuning needs to be done and looking for high F1 score that is why scorer variable is defined. # + cell_id="00042-2cba4005-1478-4f4a-84f9-d3c6de84cbcc" deepnote_cell_type="code" execution_millis=0 output_cleared=false source_hash="464d6254" tags=[] # Tuning hyperparameters parameters = { "max_depth":[2,3,4], "n_estimators":[100,104,106], "min_samples_split":[3,4,5], "min_samples_leaf":[4,8,9] } # + cell_id="00111-c79d05c1-4e3f-46e2-bb66-3be69f859e77" deepnote_cell_type="code" execution_millis=3 output_cleared=false source_hash="82b5f633" tags=[] scorer = make_scorer(f1_score) # + cell_id="00043-06e414eb-35be-46ba-8bac-5f5fc90d629c" deepnote_cell_type="code" execution_millis=19359 output_cleared=false source_hash="b454524e" tags=[] # Using Randomized Search CV to find best optimal hyperparameter that best describe a classifier clf1 = RandomizedSearchCV(clf,parameters,scoring = scorer) # Fitting the model clf1.fit(X_train,y_train) # Getting best estimator having high score best_clf_random = clf1.best_estimator_ best_clf_random # + cell_id="00044-80f1f522-dd82-4e49-b4f9-c5fcaeb0862e" deepnote_cell_type="code" execution_millis=1812 output_cleared=false source_hash="5dbf3250" tags=[] # Again, finding cross validation score scores = cross_val_score(best_clf_random,X_train,y_train,cv = 5,scoring = "f1_macro") rf_cv_mean = scores.mean() # + [markdown] cell_id="00053-3a9d0487-b96a-413c-9424-df2affc9839c" deepnote_cell_type="markdown" output_cleared=false tags=[] # As you can see cross validation score has decreased as compared to earlier score. It should increase and for that you have to try changing hyperparameter values so that better cross validation score can be achieved. # + cell_id="00045-8cdf7440-d690-49aa-bd8e-47943f33c4d2" deepnote_cell_type="code" execution_millis=238 output_cleared=false source_hash="283fa6ad" tags=[] # Fitting the best estimator best_clf_random.fit(X_train,y_train) # + cell_id="00046-0cade367-9753-4700-9ec3-d1e9ae9d8202" deepnote_cell_type="code" execution_millis=0 output_cleared=false source_hash="6f0bb82e" tags=[] # Getting first estimator best_clf_random.estimators_[0] # + cell_id="00047-b1c3fbe1-6ef1-4a68-8945-aff76b93dc51" deepnote_cell_type="code" execution_millis=218 output_cleared=false source_hash="a4243693" tags=[] # Predicting on best estimator train_pred = best_clf_random.predict(X_train) test_pred = best_clf_random.predict(X_test) # + cell_id="00048-f937a119-f535-4d5b-9b12-804145d893ed" deepnote_cell_type="code" execution_millis=8 output_cleared=false source_hash="2bc026d5" tags=[] # Finding the F1 score of training & testing sets print("The training F1 score is: ",f1_score(train_pred,y_train)) print("The testing F1 score is :",f1_score(test_pred,y_test)) # + [markdown] cell_id="00058-fae06a1c-caa0-4351-a90c-5231274f3f11" deepnote_cell_type="markdown" output_cleared=false tags=[] # No the testing F1 score is higher than training score. # + cell_id="00049-a690ff4b-3979-4472-8f39-eab8df5bac65" deepnote_cell_type="code" execution_millis=4 output_cleared=false source_hash="9fa71423" tags=[] # Getting accuracy score rf_accuracy = accuracy_score(y_test,test_pred) # + cell_id="00050-c7370718-7dc0-462d-94a3-59fd3b5bba01" deepnote_cell_type="code" execution_millis=110 output_cleared=false source_hash="f7363a68" tags=[] # Computing ROC AUC from prediction scores rf_auc = roc_auc_score(y_test,best_clf_random.predict_proba(X_test)[:,1]) # + cell_id="00051-37820c14-51ba-4a82-bc0c-3c57e10702a5" deepnote_cell_type="code" execution_millis=216 output_cleared=false source_hash="c826614c" tags=[] # Plotting ROC curve fpr,tpr,thresholds = roc_curve(y_test,best_clf_random.predict_proba(X_test)[:,1]) plt.plot([0,1],[0,1],'k--') plt.plot(fpr,tpr) plt.xlabel('fpr') plt.ylabel('tpr') plt.show() # + cell_id="00052-8958d874-23ee-4888-a6e3-d942e39fe83b" deepnote_cell_type="code" execution_millis=52 output_cleared=false source_hash="17e691e7" tags=[] # Computing confusion matrix pd.crosstab(y_test,test_pred,rownames = ['True'],colnames = ['Predicted'],margins = True) # + cell_id="00053-8228f5d6-71f4-4ad8-95db-fe5b176d1074" deepnote_cell_type="code" execution_millis=249 output_cleared=false source_hash="5f334673" tags=[] # Plotting confusion matrix cnf_matrix = confusion_matrix(y_test,test_pred) p = sns.heatmap(pd.DataFrame(cnf_matrix),annot = True,cmap = "YlGnBu",fmt = 'g') plt.title("Confusion Matrix",y = 1.1) plt.xlabel('Predicted Label') plt.ylabel('Actual Label') # + cell_id="00054-dad64061-3f9b-4c53-a474-206c51504c58" deepnote_cell_type="code" execution_millis=8 output_cleared=false source_hash="915aa19d" tags=[] # Computing the precision rf_precision = precision_score(y_test,test_pred) # + cell_id="00055-d8304480-787b-4149-a883-fbacdf051d10" deepnote_cell_type="code" execution_millis=3 output_cleared=false source_hash="b3282a98" tags=[] # Computing the recall rf_recall = recall_score(y_test,test_pred) # + cell_id="00056-cc05a170-c487-45c0-a623-ffeadf4b3110" deepnote_cell_type="code" execution_millis=249 output_cleared=false source_hash="10c0a791" tags=[] # Getting feature importances rf_features = pd.Series(best_clf_random.feature_importances_,index = X.columns) rf_features.sort_values(ascending = False) # Plotting feature importances rf_features.sort_values(ascending = False).plot(kind = "bar") # + cell_id="00076-b2ee5766-c56f-4ca4-992e-f08cd68b01bf" deepnote_cell_type="code" execution_millis=0 output_cleared=false source_hash="9e96b965" tags=[] svm_f1 = 2 * (svm_precision * svm_recall) / (svm_precision + svm_recall) knn_f1 = 2 * (knn_precision * knn_recall) / (knn_precision + knn_recall) rf_f1 = 2 * (rf_precision * rf_recall) / (rf_precision + rf_recall) # + cell_id="00072-67c6b7fe-1a3a-47d2-a57c-12cae8edc543" deepnote_cell_type="code" execution_millis=156 output_cleared=false source_hash="8e1d8389" tags=[] plt.figure(figsize=(13,7)) score_name= ['Accuracy','CV Accuracy', 'AUC', 'F1 Score', 'Recall', 'Precision'] plt.plot(score_name, [svm_accuracy,svm_cv_mean,svm_f1,svm_auc,svm_recall,svm_precision], 'o', label='SVM') plt.plot(score_name, [knn_accuracy,knn_cv_mean,knn_auc,knn_f1,knn_recall,knn_precision], 'o', label='KNN') plt.plot(score_name, [rf_accuracy, rf_cv_mean, rf_auc, rf_f1, rf_recall, rf_precision], 'o', label='Random forests') plt.legend() plt.xlabel('Scores') plt.ylabel('Percentage') plt.title('Comparison of Models Scores of SVM, kNN, Random Forests') plt.show() # + [markdown] cell_id="00077-7fff2273-0047-4b09-a1b2-f27e7b815890" deepnote_cell_type="markdown" output_cleared=false tags=[] # ### Feature importances Comparison # + cell_id="00100-18036c1c-4269-43fa-ac03-857808bef8a2" deepnote_cell_type="code" execution_millis=324 output_cleared=false source_hash="255a9c89" tags=[] svm = svm_feature_imp knn = knn_feature_imp random_forests = best_clf_random.feature_importances_ index = np.array(X_train_diabetes.columns) df = pd.DataFrame({'SVM': svm, 'kNN': knn, 'Random Forests': random_forests}, index=index) ax = df.plot.bar(figsize=(13,7),rot=90, title='Comparison of Feature Importance of SVM, kNN, Random Forests')
notebook.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Residual Networks # # Welcome to the second assignment of this week! You will learn how to build very deep convolutional networks, using Residual Networks (ResNets). In theory, very deep networks can represent very complex functions; but in practice, they are hard to train. Residual Networks, introduced by [He et al.](https://arxiv.org/pdf/1512.03385.pdf), allow you to train much deeper networks than were previously practically feasible. # # **In this assignment, you will:** # - Implement the basic building blocks of ResNets. # - Put together these building blocks to implement and train a state-of-the-art neural network for image classification. # # This assignment will be done in Keras. # # Before jumping into the problem, let's run the cell below to load the required packages. # + import numpy as np from keras import layers from keras.layers import Input, Add, Dense, Activation, ZeroPadding2D, BatchNormalization, Flatten, Conv2D, AveragePooling2D, MaxPooling2D, GlobalMaxPooling2D from keras.models import Model, load_model from keras.preprocessing import image from keras.utils import layer_utils from keras.utils.data_utils import get_file from keras.applications.imagenet_utils import preprocess_input import pydot from IPython.display import SVG from keras.utils.vis_utils import model_to_dot from keras.utils import plot_model from resnets_utils import * from keras.initializers import glorot_uniform import scipy.misc from matplotlib.pyplot import imshow # %matplotlib inline import keras.backend as K K.set_image_data_format('channels_last') K.set_learning_phase(1) # - # ## 1 - The problem of very deep neural networks # # Last week, you built your first convolutional neural network. In recent years, neural networks have become deeper, with state-of-the-art networks going from just a few layers (e.g., AlexNet) to over a hundred layers. # # The main benefit of a very deep network is that it can represent very complex functions. It can also learn features at many different levels of abstraction, from edges (at the lower layers) to very complex features (at the deeper layers). However, using a deeper network doesn't always help. A huge barrier to training them is vanishing gradients: very deep networks often have a gradient signal that goes to zero quickly, thus making gradient descent unbearably slow. More specifically, during gradient descent, as you backprop from the final layer back to the first layer, you are multiplying by the weight matrix on each step, and thus the gradient can decrease exponentially quickly to zero (or, in rare cases, grow exponentially quickly and "explode" to take very large values). # # During training, you might therefore see the magnitude (or norm) of the gradient for the earlier layers descrease to zero very rapidly as training proceeds: # <img src="images/vanishing_grad_kiank.png" style="width:450px;height:220px;"> # <caption><center> <u> <font color='purple'> **Figure 1** </u><font color='purple'> : **Vanishing gradient** <br> The speed of learning decreases very rapidly for the early layers as the network trains </center></caption> # # You are now going to solve this problem by building a Residual Network! # ## 2 - Building a Residual Network # # In ResNets, a "shortcut" or a "skip connection" allows the gradient to be directly backpropagated to earlier layers: # # <img src="images/skip_connection_kiank.png" style="width:650px;height:200px;"> # <caption><center> <u> <font color='purple'> **Figure 2** </u><font color='purple'> : A ResNet block showing a **skip-connection** <br> </center></caption> # # The image on the left shows the "main path" through the network. The image on the right adds a shortcut to the main path. By stacking these ResNet blocks on top of each other, you can form a very deep network. # # We also saw in lecture that having ResNet blocks with the shortcut also makes it very easy for one of the blocks to learn an identity function. This means that you can stack on additional ResNet blocks with little risk of harming training set performance. (There is also some evidence that the ease of learning an identity function--even more than skip connections helping with vanishing gradients--accounts for ResNets' remarkable performance.) # # Two main types of blocks are used in a ResNet, depending mainly on whether the input/output dimensions are same or different. You are going to implement both of them. # ### 2.1 - The identity block # # The identity block is the standard block used in ResNets, and corresponds to the case where the input activation (say $a^{[l]}$) has the same dimension as the output activation (say $a^{[l+2]}$). To flesh out the different steps of what happens in a ResNet's identity block, here is an alternative diagram showing the individual steps: # # <img src="images/idblock2_kiank.png" style="width:650px;height:150px;"> # <caption><center> <u> <font color='purple'> **Figure 3** </u><font color='purple'> : **Identity block.** Skip connection "skips over" 2 layers. </center></caption> # # The upper path is the "shortcut path." The lower path is the "main path." In this diagram, we have also made explicit the CONV2D and ReLU steps in each layer. To speed up training we have also added a BatchNorm step. Don't worry about this being complicated to implement--you'll see that BatchNorm is just one line of code in Keras! # # In this exercise, you'll actually implement a slightly more powerful version of this identity block, in which the skip connection "skips over" 3 hidden layers rather than 2 layers. It looks like this: # # <img src="images/idblock3_kiank.png" style="width:650px;height:150px;"> # <caption><center> <u> <font color='purple'> **Figure 4** </u><font color='purple'> : **Identity block.** Skip connection "skips over" 3 layers.</center></caption> # # Here're the individual steps. # # First component of main path: # - The first CONV2D has $F_1$ filters of shape (1,1) and a stride of (1,1). Its padding is "valid" and its name should be `conv_name_base + '2a'`. Use 0 as the seed for the random initialization. # - The first BatchNorm is normalizing the channels axis. Its name should be `bn_name_base + '2a'`. # - Then apply the ReLU activation function. This has no name and no hyperparameters. # # Second component of main path: # - The second CONV2D has $F_2$ filters of shape $(f,f)$ and a stride of (1,1). Its padding is "same" and its name should be `conv_name_base + '2b'`. Use 0 as the seed for the random initialization. # - The second BatchNorm is normalizing the channels axis. Its name should be `bn_name_base + '2b'`. # - Then apply the ReLU activation function. This has no name and no hyperparameters. # # Third component of main path: # - The third CONV2D has $F_3$ filters of shape (1,1) and a stride of (1,1). Its padding is "valid" and its name should be `conv_name_base + '2c'`. Use 0 as the seed for the random initialization. # - The third BatchNorm is normalizing the channels axis. Its name should be `bn_name_base + '2c'`. Note that there is no ReLU activation function in this component. # # Final step: # - The shortcut and the input are added together. # - Then apply the ReLU activation function. This has no name and no hyperparameters. # # **Exercise**: Implement the ResNet identity block. We have implemented the first component of the main path. Please read over this carefully to make sure you understand what it is doing. You should implement the rest. # - To implement the Conv2D step: [See reference](https://keras.io/layers/convolutional/#conv2d) # - To implement BatchNorm: [See reference](https://faroit.github.io/keras-docs/1.2.2/layers/normalization/) (axis: Integer, the axis that should be normalized (typically the channels axis)) # - For the activation, use: `Activation('relu')(X)` # - To add the value passed forward by the shortcut: [See reference](https://keras.io/layers/merge/#add) # + # GRADED FUNCTION: identity_block def identity_block(X, f, filters, stage, block): """ Implementation of the identity block as defined in Figure 3 Arguments: X -- input tensor of shape (m, n_H_prev, n_W_prev, n_C_prev) f -- integer, specifying the shape of the middle CONV's window for the main path filters -- python list of integers, defining the number of filters in the CONV layers of the main path stage -- integer, used to name the layers, depending on their position in the network block -- string/character, used to name the layers, depending on their position in the network Returns: X -- output of the identity block, tensor of shape (n_H, n_W, n_C) """ # defining name basis conv_name_base = 'res' + str(stage) + block + '_branch' bn_name_base = 'bn' + str(stage) + block + '_branch' # Retrieve Filters F1, F2, F3 = filters # Save the input value. You'll need this later to add back to the main path. X_shortcut = X # First component of main path X = Conv2D(filters = F1, kernel_size = (1, 1), strides = (1,1), padding = 'valid', name = conv_name_base + '2a', kernel_initializer = glorot_uniform(seed=0))(X) X = BatchNormalization(axis = 3, name = bn_name_base + '2a')(X) X = Activation('relu')(X) ### START CODE HERE ### # Second component of main path (≈3 lines) X = Conv2D(filters = F2, kernel_size = (f, f), strides = (1,1), padding = 'same', name = conv_name_base + '2b', kernel_initializer = glorot_uniform(seed=0))(X) X = BatchNormalization(axis = 3, name = bn_name_base + '2b')(X) X = Activation('relu')(X) # Third component of main path (≈2 lines) X = Conv2D(filters = F3, kernel_size = (1, 1), strides = (1,1), padding = 'valid', name = conv_name_base + '2c', kernel_initializer = glorot_uniform(seed=0))(X) X = BatchNormalization(axis = 3, name = bn_name_base + '2c')(X) # Final step: Add shortcut value to main path, and pass it through a RELU activation (≈2 lines) X = Add()([X, X_shortcut]) X = Activation('relu')(X) ### END CODE HERE ### return X # + tf.reset_default_graph() with tf.Session() as test: np.random.seed(1) A_prev = tf.placeholder("float", [3, 4, 4, 6]) X = np.random.randn(3, 4, 4, 6) A = identity_block(A_prev, f = 2, filters = [2, 4, 6], stage = 1, block = 'a') test.run(tf.global_variables_initializer()) out = test.run([A], feed_dict={A_prev: X, K.learning_phase(): 0}) print("out = " + str(out[0][1][1][0])) # - # **Expected Output**: # # <table> # <tr> # <td> # **out** # </td> # <td> # [ 0.94822985 0. 1.16101444 2.747859 0. 1.36677003] # </td> # </tr> # # </table> # ## 2.2 - The convolutional block # # You've implemented the ResNet identity block. Next, the ResNet "convolutional block" is the other type of block. You can use this type of block when the input and output dimensions don't match up. The difference with the identity block is that there is a CONV2D layer in the shortcut path: # # <img src="images/convblock_kiank.png" style="width:650px;height:150px;"> # <caption><center> <u> <font color='purple'> **Figure 4** </u><font color='purple'> : **Convolutional block** </center></caption> # # The CONV2D layer in the shortcut path is used to resize the input $x$ to a different dimension, so that the dimensions match up in the final addition needed to add the shortcut value back to the main path. (This plays a similar role as the matrix $W_s$ discussed in lecture.) For example, to reduce the activation dimensions's height and width by a factor of 2, you can use a 1x1 convolution with a stride of 2. The CONV2D layer on the shortcut path does not use any non-linear activation function. Its main role is to just apply a (learned) linear function that reduces the dimension of the input, so that the dimensions match up for the later addition step. # # The details of the convolutional block are as follows. # # First component of main path: # - The first CONV2D has $F_1$ filters of shape (1,1) and a stride of (s,s). Its padding is "valid" and its name should be `conv_name_base + '2a'`. # - The first BatchNorm is normalizing the channels axis. Its name should be `bn_name_base + '2a'`. # - Then apply the ReLU activation function. This has no name and no hyperparameters. # # Second component of main path: # - The second CONV2D has $F_2$ filters of (f,f) and a stride of (1,1). Its padding is "same" and it's name should be `conv_name_base + '2b'`. # - The second BatchNorm is normalizing the channels axis. Its name should be `bn_name_base + '2b'`. # - Then apply the ReLU activation function. This has no name and no hyperparameters. # # Third component of main path: # - The third CONV2D has $F_3$ filters of (1,1) and a stride of (1,1). Its padding is "valid" and it's name should be `conv_name_base + '2c'`. # - The third BatchNorm is normalizing the channels axis. Its name should be `bn_name_base + '2c'`. Note that there is no ReLU activation function in this component. # # Shortcut path: # - The CONV2D has $F_3$ filters of shape (1,1) and a stride of (s,s). Its padding is "valid" and its name should be `conv_name_base + '1'`. # - The BatchNorm is normalizing the channels axis. Its name should be `bn_name_base + '1'`. # # Final step: # - The shortcut and the main path values are added together. # - Then apply the ReLU activation function. This has no name and no hyperparameters. # # **Exercise**: Implement the convolutional block. We have implemented the first component of the main path; you should implement the rest. As before, always use 0 as the seed for the random initialization, to ensure consistency with our grader. # - [Conv Hint](https://keras.io/layers/convolutional/#conv2d) # - [BatchNorm Hint](https://keras.io/layers/normalization/#batchnormalization) (axis: Integer, the axis that should be normalized (typically the features axis)) # - For the activation, use: `Activation('relu')(X)` # - [Addition Hint](https://keras.io/layers/merge/#add) # + # GRADED FUNCTION: convolutional_block def convolutional_block(X, f, filters, stage, block, s = 2): """ Implementation of the convolutional block as defined in Figure 4 Arguments: X -- input tensor of shape (m, n_H_prev, n_W_prev, n_C_prev) f -- integer, specifying the shape of the middle CONV's window for the main path filters -- python list of integers, defining the number of filters in the CONV layers of the main path stage -- integer, used to name the layers, depending on their position in the network block -- string/character, used to name the layers, depending on their position in the network s -- Integer, specifying the stride to be used Returns: X -- output of the convolutional block, tensor of shape (n_H, n_W, n_C) """ # defining name basis conv_name_base = 'res' + str(stage) + block + '_branch' bn_name_base = 'bn' + str(stage) + block + '_branch' # Retrieve Filters F1, F2, F3 = filters # Save the input value X_shortcut = X ##### MAIN PATH ##### # First component of main path X = Conv2D(F1, (1, 1), strides = (s,s), name = conv_name_base + '2a', kernel_initializer = glorot_uniform(seed=0))(X) X = BatchNormalization(axis = 3, name = bn_name_base + '2a')(X) X = Activation('relu')(X) ### START CODE HERE ### # Second component of main path (≈3 lines) X = Conv2D(F2, (f, f), strides = (1,1), padding='same', name = conv_name_base + '2b', kernel_initializer = glorot_uniform(seed=0))(X) X = BatchNormalization(axis = 3, name = bn_name_base + '2b')(X) X = Activation('relu')(X) # Third component of main path (≈2 lines) X = Conv2D(F3, (1, 1), strides = (1,1), padding='valid', name = conv_name_base + '2c', kernel_initializer = glorot_uniform(seed=0))(X) X = BatchNormalization(axis = 3, name = bn_name_base + '2c')(X) ##### SHORTCUT PATH #### (≈2 lines) X_shortcut = Conv2D(F3, (1, 1), strides = (s,s), padding='valid', name = conv_name_base + '1', kernel_initializer = glorot_uniform(seed=0))(X_shortcut) X_shortcut = BatchNormalization(axis = 3, name = bn_name_base + '1')(X_shortcut) # Final step: Add shortcut value to main path, and pass it through a RELU activation (≈2 lines) X = Add()([X, X_shortcut]) X = Activation('relu')(X) ### END CODE HERE ### return X # + tf.reset_default_graph() with tf.Session() as test: np.random.seed(1) A_prev = tf.placeholder("float", [3, 4, 4, 6]) X = np.random.randn(3, 4, 4, 6) A = convolutional_block(A_prev, f = 2, filters = [2, 4, 6], stage = 1, block = 'a') test.run(tf.global_variables_initializer()) out = test.run([A], feed_dict={A_prev: X, K.learning_phase(): 0}) print("out = " + str(out[0][1][1][0])) # - # **Expected Output**: # # <table> # <tr> # <td> # **out** # </td> # <td> # [ 0.09018463 1.23489773 0.46822017 0.0367176 0. 0.65516603] # </td> # </tr> # # </table> # ## 3 - Building your first ResNet model (50 layers) # # You now have the necessary blocks to build a very deep ResNet. The following figure describes in detail the architecture of this neural network. "ID BLOCK" in the diagram stands for "Identity block," and "ID BLOCK x3" means you should stack 3 identity blocks together. # # <img src="images/resnet_kiank.png" style="width:850px;height:150px;"> # <caption><center> <u> <font color='purple'> **Figure 5** </u><font color='purple'> : **ResNet-50 model** </center></caption> # # The details of this ResNet-50 model are: # - Zero-padding pads the input with a pad of (3,3) # - Stage 1: # - The 2D Convolution has 64 filters of shape (7,7) and uses a stride of (2,2). Its name is "conv1". # - BatchNorm is applied to the channels axis of the input. # - MaxPooling uses a (3,3) window and a (2,2) stride. # - Stage 2: # - The convolutional block uses three set of filters of size [64,64,256], "f" is 3, "s" is 1 and the block is "a". # - The 2 identity blocks use three set of filters of size [64,64,256], "f" is 3 and the blocks are "b" and "c". # - Stage 3: # - The convolutional block uses three set of filters of size [128,128,512], "f" is 3, "s" is 2 and the block is "a". # - The 3 identity blocks use three set of filters of size [128,128,512], "f" is 3 and the blocks are "b", "c" and "d". # - Stage 4: # - The convolutional block uses three set of filters of size [256, 256, 1024], "f" is 3, "s" is 2 and the block is "a". # - The 5 identity blocks use three set of filters of size [256, 256, 1024], "f" is 3 and the blocks are "b", "c", "d", "e" and "f". # - Stage 5: # - The convolutional block uses three set of filters of size [512, 512, 2048], "f" is 3, "s" is 2 and the block is "a". # - The 2 identity blocks use three set of filters of size [512, 512, 2048], "f" is 3 and the blocks are "b" and "c". # - The 2D Average Pooling uses a window of shape (2,2) and its name is "avg_pool". # - The flatten doesn't have any hyperparameters or name. # - The Fully Connected (Dense) layer reduces its input to the number of classes using a softmax activation. Its name should be `'fc' + str(classes)`. # # **Exercise**: Implement the ResNet with 50 layers described in the figure above. We have implemented Stages 1 and 2. Please implement the rest. (The syntax for implementing Stages 3-5 should be quite similar to that of Stage 2.) Make sure you follow the naming convention in the text above. # # You'll need to use this function: # - Average pooling [see reference](https://keras.io/layers/pooling/#averagepooling2d) # # Here're some other functions we used in the code below: # - Conv2D: [See reference](https://keras.io/layers/convolutional/#conv2d) # - BatchNorm: [See reference](https://keras.io/layers/normalization/#batchnormalization) (axis: Integer, the axis that should be normalized (typically the features axis)) # - Zero padding: [See reference](https://keras.io/layers/convolutional/#zeropadding2d) # - Max pooling: [See reference](https://keras.io/layers/pooling/#maxpooling2d) # - Fully conected layer: [See reference](https://keras.io/layers/core/#dense) # - Addition: [See reference](https://keras.io/layers/merge/#add) # + # GRADED FUNCTION: ResNet50 def ResNet50(input_shape = (64, 64, 3), classes = 6): """ Implementation of the popular ResNet50 the following architecture: CONV2D -> BATCHNORM -> RELU -> MAXPOOL -> CONVBLOCK -> IDBLOCK*2 -> CONVBLOCK -> IDBLOCK*3 -> CONVBLOCK -> IDBLOCK*5 -> CONVBLOCK -> IDBLOCK*2 -> AVGPOOL -> TOPLAYER Arguments: input_shape -- shape of the images of the dataset classes -- integer, number of classes Returns: model -- a Model() instance in Keras """ # Define the input as a tensor with shape input_shape X_input = Input(input_shape) # Zero-Padding X = ZeroPadding2D((3, 3))(X_input) # Stage 1 X = Conv2D(64, (7, 7), strides = (2, 2), name = 'conv1', kernel_initializer = glorot_uniform(seed=0))(X) X = BatchNormalization(axis = 3, name = 'bn_conv1')(X) X = Activation('relu')(X) X = MaxPooling2D((3, 3), strides=(2, 2))(X) # Stage 2 X = convolutional_block(X, f = 3, filters = [64, 64, 256], stage = 2, block='a', s = 1) X = identity_block(X, 3, [64, 64, 256], stage=2, block='b') X = identity_block(X, 3, [64, 64, 256], stage=2, block='c') ### START CODE HERE ### # Stage 3 (≈4 lines) X = convolutional_block(X, f = 3, filters = [128, 128, 512], stage = 3, block='a', s = 2) X = identity_block(X, 3, [128, 128, 512], stage=3, block='b') X = identity_block(X, 3, [128, 128, 512], stage=3, block='c') X = identity_block(X, 3, [128, 128, 512], stage=3, block='d') # Stage 4 (≈6 lines) X = convolutional_block(X, f = 3, filters = [256, 256, 1024], stage = 4, block='a', s = 2) X = identity_block(X, 3, [256, 256, 1024], stage=4, block='b') X = identity_block(X, 3, [256, 256, 1024], stage=4, block='c') X = identity_block(X, 3, [256, 256, 1024], stage=4, block='d') X = identity_block(X, 3, [256, 256, 1024], stage=4, block='e') X = identity_block(X, 3, [256, 256, 1024], stage=4, block='f') # Stage 5 (≈3 lines) X = convolutional_block(X, f = 3, filters = [512, 512, 2048], stage = 5, block='a', s = 2) X = identity_block(X, 3, [512, 512, 2048], stage=5, block='b') X = identity_block(X, 3, [512, 512, 2048], stage=5, block='c') # AVGPOOL (≈1 line). Use "X = AveragePooling2D(...)(X)" X = AveragePooling2D()(X) ### END CODE HERE ### # output layer X = Flatten()(X) X = Dense(classes, activation='softmax', name='fc' + str(classes), kernel_initializer = glorot_uniform(seed=0))(X) # Create model model = Model(inputs = X_input, outputs = X, name='ResNet50') return model # - # Run the following code to build the model's graph. If your implementation is not correct you will know it by checking your accuracy when running `model.fit(...)` below. model = ResNet50(input_shape = (64, 64, 3), classes = 6) # As seen in the Keras Tutorial Notebook, prior training a model, you need to configure the learning process by compiling the model. model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) # The model is now ready to be trained. The only thing you need is a dataset. # Let's load the SIGNS Dataset. # # <img src="images/signs_data_kiank.png" style="width:450px;height:250px;"> # <caption><center> <u> <font color='purple'> **Figure 6** </u><font color='purple'> : **SIGNS dataset** </center></caption> # # + X_train_orig, Y_train_orig, X_test_orig, Y_test_orig, classes = load_dataset() # Normalize image vectors X_train = X_train_orig/255. X_test = X_test_orig/255. # Convert training and test labels to one hot matrices Y_train = convert_to_one_hot(Y_train_orig, 6).T Y_test = convert_to_one_hot(Y_test_orig, 6).T print ("number of training examples = " + str(X_train.shape[0])) print ("number of test examples = " + str(X_test.shape[0])) print ("X_train shape: " + str(X_train.shape)) print ("Y_train shape: " + str(Y_train.shape)) print ("X_test shape: " + str(X_test.shape)) print ("Y_test shape: " + str(Y_test.shape)) # - # Run the following cell to train your model on 2 epochs with a batch size of 32. On a CPU it should take you around 5min per epoch. model.fit(X_train, Y_train, epochs = 2, batch_size = 32) # **Expected Output**: # # <table> # <tr> # <td> # ** Epoch 1/2** # </td> # <td> # loss: between 1 and 5, acc: between 0.2 and 0.5, although your results can be different from ours. # </td> # </tr> # <tr> # <td> # ** Epoch 2/2** # </td> # <td> # loss: between 1 and 5, acc: between 0.2 and 0.5, you should see your loss decreasing and the accuracy increasing. # </td> # </tr> # # </table> # Let's see how this model (trained on only two epochs) performs on the test set. preds = model.evaluate(X_test, Y_test) print ("Loss = " + str(preds[0])) print ("Test Accuracy = " + str(preds[1])) # **Expected Output**: # # <table> # <tr> # <td> # **Test Accuracy** # </td> # <td> # between 0.16 and 0.25 # </td> # </tr> # # </table> # For the purpose of this assignment, we've asked you to train the model only for two epochs. You can see that it achieves poor performances. Please go ahead and submit your assignment; to check correctness, the online grader will run your code only for a small number of epochs as well. # After you have finished this official (graded) part of this assignment, you can also optionally train the ResNet for more iterations, if you want. We get a lot better performance when we train for ~20 epochs, but this will take more than an hour when training on a CPU. # # Using a GPU, we've trained our own ResNet50 model's weights on the SIGNS dataset. You can load and run our trained model on the test set in the cells below. It may take ≈1min to load the model. model = load_model('ResNet50.h5') preds = model.evaluate(X_test, Y_test) print ("Loss = " + str(preds[0])) print ("Test Accuracy = " + str(preds[1])) # ResNet50 is a powerful model for image classification when it is trained for an adequate number of iterations. We hope you can use what you've learnt and apply it to your own classification problem to perform state-of-the-art accuracy. # # Congratulations on finishing this assignment! You've now implemented a state-of-the-art image classification system! # ## 4 - Test on your own image (Optional/Ungraded) # If you wish, you can also take a picture of your own hand and see the output of the model. To do this: # 1. Click on "File" in the upper bar of this notebook, then click "Open" to go on your Coursera Hub. # 2. Add your image to this Jupyter Notebook's directory, in the "images" folder # 3. Write your image's name in the following code # 4. Run the code and check if the algorithm is right! img_path = 'images/my_image.jpg' img = image.load_img(img_path, target_size=(64, 64)) x = image.img_to_array(img) x = np.expand_dims(x, axis=0) x = preprocess_input(x) print('Input image shape:', x.shape) my_image = scipy.misc.imread(img_path) imshow(my_image) print("class prediction vector [p(0), p(1), p(2), p(3), p(4), p(5)] = ") print(model.predict(x)) # You can also print a summary of your model by running the following code. model.summary() # Finally, run the code below to visualize your ResNet50. You can also download a .png picture of your model by going to "File -> Open...-> model.png". plot_model(model, to_file='model.png') SVG(model_to_dot(model).create(prog='dot', format='svg')) # <font color='blue'> # **What you should remember:** # - Very deep "plain" networks don't work in practice because they are hard to train due to vanishing gradients. # - The skip-connections help to address the Vanishing Gradient problem. They also make it easy for a ResNet block to learn an identity function. # - There are two main type of blocks: The identity block and the convolutional block. # - Very deep Residual Networks are built by stacking these blocks together. # ### References # # This notebook presents the ResNet algorithm due to He et al. (2015). The implementation here also took significant inspiration and follows the structure given in the github repository of Francois Chollet: # # - <NAME>, <NAME>, <NAME>, <NAME> - [Deep Residual Learning for Image Recognition (2015)](https://arxiv.org/abs/1512.03385) # - Francois Chollet's github repository: https://github.com/fchollet/deep-learning-models/blob/master/resnet50.py #
4- Convolutional Neural Networks/course-materials/Week 2/ResNets/Residual+Networks+-+v2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # CS5293 Spring 2020 Project 2 # ## By <NAME>/ # ## Loading packages import scipy from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.cluster import MiniBatchKMeans from sklearn.decomposition import LatentDirichletAllocation as LDA from gensim import corpora, models from sklearn.metrics import silhouette_score import networkx import random import re import pandas as pd import numpy as np import json import glob import nltk from nltk.stem import WordNetLemmatizer from nltk.corpus import wordnet from nltk.stem import PorterStemmer from sklearn.metrics import silhouette_score # ## Function to read files def readfiles(path, n): filenames=glob.glob(path) ## Get filepaths filen=len(filenames) number= random.randint(0,filen) ##Random file index percent = (n)/100 reqf=(filen) * percent end=number+reqf print("The files from index %d to %d have been taken" %(number , end)) print(int(reqf)) taken=filenames[int(number):int(end)] return taken # ## Function to normalize text def normalize(text): text = text.lower() ## Lowercase text sents=(nltk.sent_tokenize(text)) ##Sentence tokenization words=[] for sent in sents: sent.strip() words.extend(nltk.word_tokenize(sent)) ## Word tokenization stopword_list = nltk.corpus.stopwords.words('english') custom_stop_words = [ 'doi', 'preprint', 'copyright', 'peer', 'reviewed', 'org', 'https', 'et', 'al', 'author', 'figure', 'rights', 'reserved', 'permission', 'used', 'using', 'biorxiv', 'medrxiv', 'license', 'fig', 'fig.', 'al.', 'Elsevier', 'PMC', 'CZI', 'www' ] filtered_tokens = [token for token in words if token not in custom_stop_words] filtered_tokens1 = [token for token in filtered_tokens if token not in custom_stop_words] ##Stop word removal txt = ' '.join(filtered_tokens1) return txt # ## Function to create Dataframe using files list def createDB(filepath): dict_ = {'paper_id': [], 'abstract': [], 'body_text': []} ## Initializing directories for j in range(len(filepath)): with open(filepath[j]) as f: ##json text extraction data=json.load(f) paper_id = data['paper_id'] abstract = [] body_text = [] for entry in data['abstract']: abstract.append(entry['text']) for entry in data['body_text']: body_text.append(entry['text']) abstract = '\n'.join(abstract) body_text = '\n'.join(body_text) dict_['paper_id'].append(paper_id) if len(abstract) == 0: # if no abstract is provided dict_['abstract'].append("Not provided.") ## else: # abstract is provided dict_['abstract'].append(abstract) # dict_['abstract'].append(abstract) dict_['body_text'].append(body_text) df = pd.DataFrame(dict_, columns=['paper_id', 'abstract', 'body_text']) df['abstract'] = df['abstract'].apply(lambda x: re.sub('[^a-zA-z0-9\s]','',x)) ## Remove special charecters df['abstract'] = df['abstract'].apply(lambda x: normalize(x)) return df # ## Function to perform TextRank summarization def cleansed( Unfinished, txt, n): sent_tokens=nltk.sent_tokenize(txt) unfin = nltk.sent_tokenize(Unfinished) vectorizer = TfidfVectorizer(stop_words='english', max_features=2**12, smooth_idf=True, use_idf=True, ngram_range=(2,4)) docu=vectorizer.fit_transform(sent_tokens) sim_mat= docu*docu.T sim_graph= networkx.from_scipy_sparse_matrix(sim_mat) scores = networkx.pagerank(sim_graph) ranked_sentences = sorted(((score, index) for index, score in scores.items()), reverse=True) top_sentence_indices = [ranked_sentences[index][1] for index in range(0,n)] top_sentence_indices.sort() top_sentences = [unfin[index] for index in top_sentence_indices] summary =''.join(top_sentences) return summary # ## Function to write summary output def outputfiles(dataframe): for i in range(len(dataframe)): j=i+1 filename = ('output_%d.md'%(j)) with open(filename, 'w') as f: f.write('This is the output for cluster #%d\n\n'%(j)) for text in dataframe['summary'][i]: f.write(text) # ### Data collection filenames = readfiles('json files/*.json', 20) df = createDB(filenames) df.head() # ### Text vectorization vectorizer=TfidfVectorizer(stop_words='english', max_features=2**12, smooth_idf=True, use_idf=True, ngram_range=(2,4)) docu=vectorizer.fit_transform(df['abstract'].values) # ### Clustering using KMeans cluster=np.sqrt(int(len(filenames))/2) print(cluster) kmeans = MiniBatchKMeans(n_clusters=int(cluster),max_iter=5000, init='random') preds = kmeans.fit_predict(docu) preds # ### Dataframe with clustered text df['cluster']=preds df1 = df.groupby('cluster')['body_text'].apply(list).reset_index(name='text') df1['text'] = df1['text'].apply(lambda x: ' '.join(map(str, x)) ) df1.head() # ### Normalization of text # + df1['Normalized_text'] = 0 for j in range(len(df1)): df1['Normalized_text'][j]=normalize(df1['text'][j]) df1['summary'] = 0 # - # ### Summary creation for k in range(len(df1)): print(k) df1['summary'][k]= cleansed(df1['text'][k], df1['Normalized_text'][k], 15) # ### File output df1.head() outputfiles(df1)
text_summarizer.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernel_info: # name: python3 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Triangular Moving Average (TRIMA) # https://www.tradingtechnologies.com/xtrader-help/x-study/technical-indicator-definitions/triangular-moving-average-trima/ # + outputHidden=false inputHidden=false import numpy as np import pandas as pd import matplotlib.pyplot as plt import warnings warnings.filterwarnings("ignore") # fix_yahoo_finance is used to fetch data import fix_yahoo_finance as yf yf.pdr_override() # + outputHidden=false inputHidden=false # input symbol = 'AAPL' start = '2018-01-01' end = '2019-01-01' # Read data df = yf.download(symbol,start,end) # View Columns df.head() # + outputHidden=false inputHidden=false n = 7 sma = df['Adj Close'].rolling(center=False, window=n, min_periods=n - 1).mean() df['TRIMA'] = sma.rolling(center=False, window=n, min_periods=n - 1).mean() # + outputHidden=false inputHidden=false df.head(20) # + outputHidden=false inputHidden=false fig = plt.figure(figsize=(14,10)) ax1 = plt.subplot(2, 1, 1) ax1.plot(df['Adj Close']) ax1.set_title('Stock '+ symbol +' Closing Price') ax1.set_ylabel('Price') ax2 = plt.subplot(2, 1, 2) ax2.plot(df['TRIMA'], label='Triangular Moving Average', color='red') #ax2.axhline(y=0, color='blue', linestyle='--') ax2.grid() ax2.set_ylabel('Triangular Moving Average') ax2.set_xlabel('Date') ax2.legend(loc='best') # - # ## Candlestick with Triangular Moving Average # + outputHidden=false inputHidden=false from matplotlib import dates as mdates import datetime as dt dfc = df.copy() dfc['VolumePositive'] = dfc['Open'] < dfc['Adj Close'] #dfc = dfc.dropna() dfc = dfc.reset_index() dfc['Date'] = pd.to_datetime(dfc['Date']) dfc['Date'] = dfc['Date'].apply(mdates.date2num) dfc.head() # + outputHidden=false inputHidden=false from mpl_finance import candlestick_ohlc fig = plt.figure(figsize=(14,10)) ax1 = plt.subplot(2, 1, 1) candlestick_ohlc(ax1,dfc.values, width=0.5, colorup='g', colordown='r', alpha=1.0) ax1.xaxis_date() ax1.xaxis.set_major_formatter(mdates.DateFormatter('%d-%m-%Y')) ax1.grid(True, which='both') ax1.minorticks_on() ax1v = ax1.twinx() colors = dfc.VolumePositive.map({True: 'g', False: 'r'}) ax1v.bar(dfc.Date, dfc['Volume'], color=colors, alpha=0.4) ax1v.axes.yaxis.set_ticklabels([]) ax1v.set_ylim(0, 3*df.Volume.max()) ax1.set_title('Stock '+ symbol +' Closing Price') ax1.set_ylabel('Price') ax2 = plt.subplot(2, 1, 2) ax2.plot(df['TRIMA'], label='Triangular Moving Average', color='red') #ax2.axhline(y=0, color='blue', linestyle='--') ax2.grid() ax2.set_ylabel('Triangular Moving Average') ax2.set_xlabel('Date') ax2.legend(loc='best')
Python_Stock/Technical_Indicators/TRIMA.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### Naver Rest API # - 통합검색어 트렌드 api # - 파파고 번역 api # #### 1. 통학검색어 트렌드 # - https://developers.naver.com # - 1. Request Token 얻기 : 앱등록 -> app_key 획득 # - 2. app_key를 이용해서 데이터 가져오기 import json client_id = "" client_secret = "" url = "https://openapi.naver.com/v1/datalab/search" params = { "startDate" : "2018-01-01", "endDate" : "2020-06-26", "timeUnit" : "month", "keywordGroups" : [ {"groupName":"트위터", "keywords": ["트위터", "트윗"]}, {"groupName":"페이스북", "keywords": ["페이스북", "페북"]}, {"groupName":"인스타그램", "keywords": ["인스타그램", "인스타"]} ] } headers = { "X-Naver-Client-Id" : client_id, "X-Naver-Client-Secret" : client_secret, "Content-Type": "application/json" } # API 요청 response = requests.post(url, data = json.dumps(params), headers = headers) response response.text datas = response.json()["results"] # + dfs = [] for data in datas: df = pd.DataFrame(data["data"]) df["title"] = data["title"] # 트위터, 페이스북, 인스타그램 dfs.append(df) # - result_df = pd.concat(dfs, ignore_index=True) result_df.tail(2) result_df.pivot("period", "title", "ratio") # #### 2. 파파고 API # - 내 어플리케이션에 파파고 추가 url = "https://openapi.naver.com/v1/papago/n2mt" params = { "source": "ko", "target": "en", "text": "파이썬 웹 크롤링 및 자동화", } response = requests.post(url, json.dumps(params), headers =headers) response response.json()["message"]["result"]["translatedText"]
etc/crawling/200626_api_naver.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + active="" # .. _example_basic: # # .. currentmodule:: parcel_model # - # # Example: Basic Run # In this example, we will setup a simple parcel model simulation containing two aerosol modes. We will then run the model with a 1 m/s updraft, and observe how the aerosol population bifurcates into swelled aerosol and cloud droplets. # + # Suppress warnings import warnings warnings.simplefilter('ignore') import pyrcel as pm import numpy as np # %matplotlib inline import matplotlib.pyplot as plt # - # First, we indicate the parcel's initial thermodynamic conditions. P0 = 77500. # Pressure, Pa T0 = 274. # Temperature, K S0 = -0.02 # Supersaturation, 1-RH (98% here) # Next, we define the aerosols present in the parcel. The model itself is agnostic to how the aerosol are specified; it simply expects lists of the radii of wetted aerosol radii, their number concentration, and their hygroscopicity. We can make container objects (:class:`AerosolSpecies`) that wrap all of this information so that we never need to worry about it. # # Here, let's construct two aerosol modes: # # +----------+---------------------------+--------------------+---------+------------------------+ # | Mode | $\kappa$ (hygroscopicity) | Mean size (micron) | Std dev | Number Conc (cm\*\*-3) | # +==========+===========================+====================+=========+========================+ # | sulfate | 0.54 | 0.015 | 1.6 | 850 | # +----------+---------------------------+--------------------+---------+------------------------+ # | sea salt | 1.2 | 0.85 | 1.2 | 10 | # +----------+---------------------------+--------------------+---------+------------------------+ # # We'll define each mode using the :class:`Lognorm` distribution packaged with the model. sulfate = pm.AerosolSpecies('sulfate', pm.Lognorm(mu=0.015, sigma=1.6, N=850.), kappa=0.54, bins=200) sea_salt = pm.AerosolSpecies('sea salt', pm.Lognorm(mu=0.85, sigma=1.2, N=10.), kappa=1.2, bins=40) # The :class:`AerosolSpecies` class automatically computes gridded/binned representations of the size distributions. Let's double check that the aerosol distribution in the model will make sense by plotting the number concentration in each bin. # + fig = plt.figure(figsize=(10,5)) ax = fig.add_subplot(111) ax.grid(False, "minor") sul_c = "#CC0066" ax.bar(sulfate.rs[:-1], sulfate.Nis*1e-6, np.diff(sulfate.rs), color=sul_c, label="sulfate", edgecolor="#CC0066") sea_c = "#0099FF" ax.bar(sea_salt.rs[:-1], sea_salt.Nis*1e-6, np.diff(sea_salt.rs), color=sea_c, label="sea salt", edgecolor="#0099FF") ax.semilogx() ax.set_xlabel("Aerosol dry radius, micron") ax.set_ylabel("Aerosl number conc., cm$^{-3}$") ax.legend(loc='upper right') # - # Actually running the model is very straightforward, and involves just two steps: # # 1. Instantiate the model by creating a :class:`ParcelModel` object. # 2. Call the model's :method:`run` method. # # For convenience this process is encoded into several routines in the `driver` file, including both a single-strategy routine and an iterating routine which adjusts the the timestep and numerical tolerances if the model crashes. However, we can illustrate the simple model running process here in case you wish to develop your own scheme for running the model. # + initial_aerosols = [sulfate, sea_salt] V = 1.0 # updraft speed, m/s dt = 1.0 # timestep, seconds t_end = 250./V # end time, seconds... 250 meter simulation model = pm.ParcelModel(initial_aerosols, V, T0, S0, P0, console=False, accom=0.3) parcel_trace, aerosol_traces = model.run(t_end, dt, solver='cvode') # - # If `console` is set to `True`, then some basic debugging output will be written to the terminal, including the initial equilibrium droplet size distribution and some numerical solver diagnostics. The model output can be customized; by default, we get a DataFrame and a Panel of the parcel state vector and aerosol bin sizes as a function of time (and height). We can use this to visualize the simulation results, like in the package's [README](https://github.com/darothen/parcel_model/blob/master/README.md). # + fig, [axS, axA] = plt.subplots(1, 2, figsize=(10, 4), sharey=True) axS.plot(parcel_trace['S']*100., parcel_trace['z'], color='k', lw=2) axT = axS.twiny() axT.plot(parcel_trace['T'], parcel_trace['z'], color='r', lw=1.5) Smax = parcel_trace['S'].max()*100 z_at_smax = parcel_trace['z'].iloc[parcel_trace['S'].argmax()] axS.annotate("max S = %0.2f%%" % Smax, xy=(Smax, z_at_smax), xytext=(Smax-0.3, z_at_smax+50.), arrowprops=dict(arrowstyle="->", color='k', connectionstyle='angle3,angleA=0,angleB=90'), zorder=10) axS.set_xlim(0, 0.7) axS.set_ylim(0, 250) axT.set_xticks([270, 271, 272, 273, 274]) axT.xaxis.label.set_color('red') axT.tick_params(axis='x', colors='red') axS.set_xlabel("Supersaturation, %") axT.set_xlabel("Temperature, K") axS.set_ylabel("Height, m") sulf_array = aerosol_traces['sulfate'].values sea_array = aerosol_traces['sea salt'].values ss = axA.plot(sulf_array[:, ::10]*1e6, parcel_trace['z'], color=sul_c, label="sulfate") sa = axA.plot(sea_array*1e6, parcel_trace['z'], color=sea_c, label="sea salt") axA.semilogx() axA.set_xlim(1e-2, 10.) axA.set_xticks([1e-2, 1e-1, 1e0, 1e1], [0.01, 0.1, 1.0, 10.0]) axA.legend([ss[0], sa[0]], ['sulfate', 'sea salt'], loc='upper right') axA.set_xlabel("Droplet radius, micron") for ax in [axS, axA, axT]: ax.grid(False, 'both', 'both') # - # In this simple example, the sulfate aerosol population bifurcated into interstitial aerosol and cloud droplets, while the entire sea salt population activated. A peak supersaturation of about 0.63% was reached a few meters above cloud base, where the ambient relative humidity hit 100%. # # How many CDNC does this translate into? We can call upon helper methods from the `activation` package to perform these calculations for us: # + from pyrcel import binned_activation sulf_trace = aerosol_traces['sulfate'] sea_trace = aerosol_traces['sea salt'] ind_final = int(t_end/dt) - 1 T = parcel_trace['T'].iloc[ind_final] eq_sulf, kn_sulf, alpha_sulf, phi_sulf = \ binned_activation(Smax/100, T, sulf_trace.iloc[ind_final], sulfate) eq_sulf *= sulfate.total_N eq_sea, kn_sea, alpha_sea, phi_sea = \ binned_activation(Smax/100, T, sea_trace.iloc[ind_final], sea_salt) eq_sea *= sea_salt.total_N print(" CDNC(sulfate) = {:3.1f}".format(eq_sulf)) print(" CDNC(sea salt) = {:3.1f}".format(eq_sea)) print("------------------------") print(" total = {:3.1f} / {:3.0f} ~ act frac = {:1.2f}".format( eq_sulf+eq_sea, sea_salt.total_N+sulfate.total_N, (eq_sulf+eq_sea)/(sea_salt.total_N+sulfate.total_N) ))
doc/examples/basic_run.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: conda_pytorch_p36 # language: python # name: conda_pytorch_p36 # --- # # Plagiarism Detection Model # # Now that you've created training and test data, you are ready to define and train a model. Your goal in this notebook, will be to train a binary classification model that learns to label an answer file as either plagiarized or not, based on the features you provide the model. # # This task will be broken down into a few discrete steps: # # * Upload your data to S3. # * Define a binary classification model and a training script. # * Train your model and deploy it. # * Evaluate your deployed classifier and answer some questions about your approach. # # To complete this notebook, you'll have to complete all given exercises and answer all the questions in this notebook. # > All your tasks will be clearly labeled **EXERCISE** and questions as **QUESTION**. # # It will be up to you to explore different classification models and decide on a model that gives you the best performance for this dataset. # # --- # ## Load Data to S3 # # In the last notebook, you should have created two files: a `training.csv` and `test.csv` file with the features and class labels for the given corpus of plagiarized/non-plagiarized text data. # # >The below cells load in some AWS SageMaker libraries and creates a default bucket. After creating this bucket, you can upload your locally stored data to S3. # # Save your train and test `.csv` feature files, locally. To do this you can run the second notebook "2_Plagiarism_Feature_Engineering" in SageMaker or you can manually upload your files to this notebook using the upload icon in Jupyter Lab. Then you can upload local files to S3 by using `sagemaker_session.upload_data` and pointing directly to where the training data is saved. import pandas as pd import boto3 import sagemaker import os # + """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ # session and role sagemaker_session = sagemaker.Session() role = sagemaker.get_execution_role() # create an S3 bucket bucket = sagemaker_session.default_bucket() # - # ## EXERCISE: Upload your training data to S3 # # Specify the `data_dir` where you've saved your `train.csv` file. Decide on a descriptive `prefix` that defines where your data will be uploaded in the default S3 bucket. Finally, create a pointer to your training data by calling `sagemaker_session.upload_data` and passing in the required parameters. It may help to look at the [Session documentation](https://sagemaker.readthedocs.io/en/stable/session.html#sagemaker.session.Session.upload_data) or previous SageMaker code examples. # # You are expected to upload your entire directory. Later, the training script will only access the `train.csv` file. # + # should be the name of directory you created to save your features data data_dir = 'plagiarism_data' # set prefix, a descriptive name for a directory prefix = 'project-plagiarism' # upload all data to S3 train_location = sagemaker_session.upload_data(os.path.join(data_dir, 'train.csv'), key_prefix=prefix) test_location = sagemaker_session.upload_data(os.path.join(data_dir, 'test.csv'), key_prefix=prefix) # - # ### Test cell # # Test that your data has been successfully uploaded. The below cell prints out the items in your S3 bucket and will throw an error if it is empty. You should see the contents of your `data_dir` and perhaps some checkpoints. If you see any other files listed, then you may have some old model files that you can delete via the S3 console (though, additional files shouldn't affect the performance of model developed in this notebook). # + """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ # confirm that data is in S3 bucket empty_check = [] for obj in boto3.resource('s3').Bucket(bucket).objects.all(): empty_check.append(obj.key) print(obj.key) assert len(empty_check) !=0, 'S3 bucket is empty.' print('Test passed!') # - # --- # # # Modeling # # Now that you've uploaded your training data, it's time to define and train a model! # # The type of model you create is up to you. For a binary classification task, you can choose to go one of three routes: # * Use a built-in classification algorithm, like LinearLearner. # * Define a custom Scikit-learn classifier, a comparison of models can be found [here](https://scikit-learn.org/stable/auto_examples/classification/plot_classifier_comparison.html). # * Define a custom PyTorch neural network classifier. # # It will be up to you to test out a variety of models and choose the best one. Your project will be graded on the accuracy of your final model. # # --- # # ## EXERCISE: Complete a training script # # To implement a custom classifier, you'll need to complete a `train.py` script. You've been given the folders `source_sklearn` and `source_pytorch` which hold starting code for a custom Scikit-learn model and a PyTorch model, respectively. Each directory has a `train.py` training script. To complete this project **you only need to complete one of these scripts**; the script that is responsible for training your final model. # # A typical training script: # * Loads training data from a specified directory # * Parses any training & model hyperparameters (ex. nodes in a neural network, training epochs, etc.) # * Instantiates a model of your design, with any specified hyperparams # * Trains that model # * Finally, saves the model so that it can be hosted/deployed, later # # ### Defining and training a model # Much of the training script code is provided for you. Almost all of your work will be done in the `if __name__ == '__main__':` section. To complete a `train.py` file, you will: # 1. Import any extra libraries you need # 2. Define any additional model training hyperparameters using `parser.add_argument` # 2. Define a model in the `if __name__ == '__main__':` section # 3. Train the model in that same section # # Below, you can use `!pygmentize` to display an existing `train.py` file. Read through the code; all of your tasks are marked with `TODO` comments. # # **Note: If you choose to create a custom PyTorch model, you will be responsible for defining the model in the `model.py` file,** and a `predict.py` file is provided. If you choose to use Scikit-learn, you only need a `train.py` file; you may import a classifier from the `sklearn` library. # directory can be changed to: source_sklearn or source_pytorch # !pygmentize source_sklearn/train.py # ### Provided code # # If you read the code above, you can see that the starter code includes a few things: # * Model loading (`model_fn`) and saving code # * Getting SageMaker's default hyperparameters # * Loading the training data by name, `train.csv` and extracting the features and labels, `train_x`, and `train_y` # # If you'd like to read more about model saving with [joblib for sklearn](https://scikit-learn.org/stable/modules/model_persistence.html) or with [torch.save](https://pytorch.org/tutorials/beginner/saving_loading_models.html), click on the provided links. # --- # # Create an Estimator # # When a custom model is constructed in SageMaker, an entry point must be specified. This is the Python file which will be executed when the model is trained; the `train.py` function you specified above. To run a custom training script in SageMaker, construct an estimator, and fill in the appropriate constructor arguments: # # * **entry_point**: The path to the Python script SageMaker runs for training and prediction. # * **source_dir**: The path to the training script directory `source_sklearn` OR `source_pytorch`. # * **role**: Role ARN, which was specified, above. # * **train_instance_count**: The number of training instances (should be left at 1). # * **train_instance_type**: The type of SageMaker instance for training. Note: Because Scikit-learn does not natively support GPU training, Sagemaker Scikit-learn does not currently support training on GPU instance types. # * **sagemaker_session**: The session used to train on Sagemaker. # * **hyperparameters** (optional): A dictionary `{'name':value, ..}` passed to the train function as hyperparameters. # # Note: For a PyTorch model, there is another optional argument **framework_version**, which you can set to the latest version of PyTorch, `1.0`. # # ## EXERCISE: Define a Scikit-learn or PyTorch estimator # # To import your desired estimator, use one of the following lines: # ``` # from sagemaker.sklearn.estimator import SKLearn # ``` # ``` # from sagemaker.pytorch import PyTorch # ``` from sagemaker.pytorch import PyTorch # your import and estimator code, here output_path = 's3://{}/{}'.format(bucket, prefix) estimator = PyTorch(entry_point='train.py', source_dir='source_pytorch', role=role, framework_version='1.0', py_version='py3', train_instance_count=1, train_instance_type='ml.c4.xlarge', output_path=output_path, sagemaker_session=sagemaker_session, hyperparameters={ 'input_features': 4, 'hidden_dim': 10, 'output_dim': 1, 'epochs': 80 }) # ## EXERCISE: Train the estimator # # Train your estimator on the training data stored in S3. This should create a training job that you can monitor in your SageMaker console. # + # %%time # Train your estimator on S3 training data estimator.fit({'train': train_location}) # - # ## EXERCISE: Deploy the trained model # # After training, deploy your model to create a `predictor`. If you're using a PyTorch model, you'll need to create a trained `PyTorchModel` that accepts the trained `<model>.model_data` as an input parameter and points to the provided `source_pytorch/predict.py` file as an entry point. # # To deploy a trained model, you'll use `<model>.deploy`, which takes in two arguments: # * **initial_instance_count**: The number of deployed instances (1). # * **instance_type**: The type of SageMaker instance for deployment. # # Note: If you run into an instance error, it may be because you chose the wrong training or deployment instance_type. It may help to refer to your previous exercise code to see which types of instances we used. # + # %%time # uncomment, if needed from sagemaker.pytorch import PyTorchModel model = PyTorchModel(model_data=estimator.model_data, role = role, framework_version='1.0', py_version='py3', entry_point='predict.py', source_dir='source_pytorch') # deploy your model to create a predictor predictor = model.deploy(initial_instance_count=1, instance_type='ml.t2.medium') # - # --- # # Evaluating Your Model # # Once your model is deployed, you can see how it performs when applied to our test data. # # The provided cell below, reads in the test data, assuming it is stored locally in `data_dir` and named `test.csv`. The labels and features are extracted from the `.csv` file. # + """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ import os # read in test data, assuming it is stored locally test_data = pd.read_csv(os.path.join(data_dir, "test.csv"), header=None, names=None) # labels are in the first column test_y = test_data.iloc[:,0] test_x = test_data.iloc[:,1:] # - # ## EXERCISE: Determine the accuracy of your model # # Use your deployed `predictor` to generate predicted, class labels for the test data. Compare those to the *true* labels, `test_y`, and calculate the accuracy as a value between 0 and 1.0 that indicates the fraction of test data that your model classified correctly. You may use [sklearn.metrics](https://scikit-learn.org/stable/modules/classes.html#module-sklearn.metrics) for this calculation. # # **To pass this project, your model should get at least 90% test accuracy.** # + import numpy as np # + # First: generate predicted, class labels test_y_preds = np.squeeze(np.round(predictor.predict(test_x))) """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ # test that your model generates the correct number of labels assert len(test_y_preds)==len(test_y), 'Unexpected number of predictions.' print('Test passed!') # + # Second: calculate the test accuracy tp = np.logical_and(test_y, test_y_preds).sum() fp = np.logical_and(1-test_y, test_y_preds).sum() tn = np.logical_and(1-test_y, 1-test_y_preds).sum() fn = np.logical_and(test_y, 1-test_y_preds).sum() accuracy = (tp + tn) / (tp + fp + tn + fn) print(accuracy) ## print out the array of predicted and true labels, if you want print('\nPredicted class labels: ') print(test_y_preds) print('\nTrue class labels: ') print(test_y.values) # - {'TP': tp, 'FP': fp, 'FN': fn, 'TN': tn, 'Accuracy': accuracy} test_data # ### Question 1: How many false positives and false negatives did your model produce, if any? And why do you think this is? # **Answer**: # # My model didn't produce any false positives or negatives, it got 100% accuracy. I noticed in `test_x` that whenever the second feature, which is `c_3` (containment N=3), is below 0.1 the sample is classified as non plagiarism. I think this feature has a higher weight inside the model. # # ### Question 2: How did you decide on the type of model to use? # **Answer**: # # I decided for a pytorch because I had more experience with torch than scikit. As per layers, I just used one liner input and one hidden input because of simplicity and faster response. # # # ---- # ## EXERCISE: Clean up Resources # # After you're done evaluating your model, **delete your model endpoint**. You can do this with a call to `.delete_endpoint()`. You need to show, in this notebook, that the endpoint was deleted. Any other resources, you may delete from the AWS console, and you will find more instructions on cleaning up all your resources, below. # uncomment and fill in the line below! # <name_of_deployed_predictor>.delete_endpoint() predictor.delete_endpoint() # ### Deleting S3 bucket # # When you are *completely* done with training and testing models, you can also delete your entire S3 bucket. If you do this before you are done training your model, you'll have to recreate your S3 bucket and upload your training data again. # + # deleting bucket, uncomment lines below bucket_to_delete = boto3.resource('s3').Bucket(bucket) bucket_to_delete.objects.all().delete() # - # ### Deleting all your models and instances # # When you are _completely_ done with this project and do **not** ever want to revisit this notebook, you can choose to delete all of your SageMaker notebook instances and models by following [these instructions](https://docs.aws.amazon.com/sagemaker/latest/dg/ex1-cleanup.html). Before you delete this notebook instance, I recommend at least downloading a copy and saving it, locally. # --- # ## Further Directions # # There are many ways to improve or add on to this project to expand your learning or make this more of a unique project for you. A few ideas are listed below: # * Train a classifier to predict the *category* (1-3) of plagiarism and not just plagiarized (1) or not (0). # * Utilize a different and larger dataset to see if this model can be extended to other types of plagiarism. # * Use language or character-level analysis to find different (and more) similarity features. # * Write a complete pipeline function that accepts a source text and submitted text file, and classifies the submitted text as plagiarized or not. # * Use API Gateway and a lambda function to deploy your model to a web application. # # These are all just options for extending your work. If you've completed all the exercises in this notebook, you've completed a real-world application, and can proceed to submit your project. Great job!
Project_Plagiarism_Detection/3_Training_a_Model.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # 8.1 FGSM 공격 # # 정상 이미지와 노이즈를 더해 머신러닝 모델을 헷갈리게 하는 이미지가 # 바로 적대적 예제(Adversarial Example) 입니다. # 이 프로젝트에선 Fast Gradient Sign Method, 즉 줄여서 FGSM이라는 방식으로 # 적대적 예제를 생성해 미리 학습이 완료된 딥러닝 모델을 공격해보도록 하겠습니다. # # FGSM 학습이 필요 없지만 공격 목표를 정할 수 없는 Non-Targeted 방식의 공격입니다. # 또, 공격하고자 하는 모델의 정보가 필요한 White Box 방식입니다. # # 공격이 어떻게 진행되는지 단계별로 설명하도록 하겠습니다. # + import torch import torchvision.models as models import torchvision.transforms as transforms import numpy as np from PIL import Image import json # + # %matplotlib inline import matplotlib.pyplot as plt torch.manual_seed(1) # - # ## 학습된 모델 불러오기 # # `torchvison`은 `AlexNet`, `VGG`, `ResNet`, `SqueezeNet`, `DenseNet`, `Inception`등 여러가지 학습된 모델들을 제공합니다. # 대부분 ImageNet이라는 데이터셋으로 학습된 모델이며, # 컬러 이미지를 다루는 컴퓨터 비전 분야의 대표적인 데이터셋입니다. # # 간단하게 사용하고자 하는 모델을 고르고, # 함수 내에 `pretrained=True`를 명시하면 # 학습된 모델을 가져옵니다. # 이미 학습된 모델이므로 재학습을 시킬 필요 없이 우리가 원하는 # 이미지를 분류하게 할 수 있습니다. # # 본 예제에선 `ResNet101`이라는 모델을 사용하고 있습니다. # 너무 복잡하지도 않고, 너무 간단하지도 않은 적당한 모델이라 생각하여 채택하게 되었습니다. # ImageNet 테스트 데이터셋을 돌려보았을때 # Top-1 error 성능은 22.63, # Top-5 error는 6.44로 성능도 좋게 나오는 편입니다. # 모델을 바꾸고 싶다면 이름만 바꾸면 됩니다. # 성능을 더 끌어올리고 싶다면 `DenseNet`이나 `Inception v3`같은 모델을 사용하고, # 노트북 같은 컴퓨터를 사용해야된다면 `SqueezeNet`같이 가벼운 모델을 사용하면 됩니다. # model = models.resnet50(pretrained=True) model.eval() print(model) # ## 데이터셋 불러오기 # # 방금 불러온 모델을 그대로 사용할 수 있지만, # 실제 예측값을 보면 0부터 1000까지의 숫자를 내뱉을 뿐입니다. # 이건 ImageNet 데이터셋의 클래스들의 지정 숫자(인덱스) 입니다. # 사람이 각 클래스 숫자가 무엇을 의미하는지 알아보기 위해선 # 숫자와 클래스 이름을 이어주는 작업이 필요합니다. # # 미리 준비해둔 `imagenet_classes.json`이라는 파일에 각 숫자가 어떤 클래스 제목을 의미하는지에 대한 정보가 담겨있습니다. # `json`파일을 파이썬 사용자들에게 좀더 친숙한 # 딕셔너리 자료형으로 만들어 언제든 사용할 수 있도록 # 인덱스에서 클래스로 매핑해주는 `idx2class`와 # 반대로 클래스 이름을 숫자로 변환해주는`class2idx`을 만들어보겠습니다. CLASSES = json.load(open('./imagenet_samples/imagenet_classes.json')) idx2class = [CLASSES[str(i)] for i in range(1000)] class2idx = {v:i for i,v in enumerate(idx2class)} # ## 공격용 이미지 불러오기 # # 모델이 준비되었으니 공격하고자 하는 이미지를 불러오겠습니다. # 실제 공격에 사용될 데이터는 학습용 데이터에 존재하지 않을 것이므로 # 우리도 데이터셋에 존재하지 않는 이미지를 새로 준비해야 합니다. # # 인터넷에 존재하는 이미지는 다양한 사이즈가 있으므로 # 새로운 입력은 `torchvision`의 `transforms`를 이용하여 # 이미지넷과 같은 사이즈인 224 x 224로 바꿔주도록 하겠습니다. # 그리고 파이토치 텐서로 변환하고, 노말라이즈를 하는 기능을 추가하여 # `img_transforms`를 통과시키면 어떤 이미지던 입력으로 사용할 수 있도록 합니다. img_transforms = transforms.Compose( [transforms.Resize((224, 224), Image.BICUBIC), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) # 이미지넷 데이터셋에는 치와와(Chihuahua)라는 클래스가 존재합니다. # 그래서 약간 부담스럽지만 귀여운 치와와 사진을 준비해보았습니다. # + img = Image.open('imagenet_samples/chihuahua.jpg') img_tensor = img_transforms(img) plt.figure(figsize=(10,5)) plt.imshow(np.asarray(img)) # - # ## 공격 전 성능 확인하기 # # 공격을 하기 전에 우리가 준비한 학습용 데이터에 없는 # 이미지를 얼마나 잘 분류하나 확인하겠습니다. # 분류하는 것은 매우 간단한데, # 아까 준비한 모델에 이미지를 통과시키기만 하면 됩니다. # 모델에서 나온 값에 `Softmax`를 씌우면 # 각각의 레이블에 대한 확률 예측값으로 환산됩니다. # # ```python # out = model(img_tensor.unsqueeze(0)) # probs = softmax(out) # ``` # # 그리고 `argmax`를 이용하여 가장 큰 확률을 갖고 있는 인덱스, # 즉, 모델이 가장 확신하는 예측값을 가져올 수 있습니다. # # 우리가 준비한 ResNet101 모델은 정확하게 치와와라고 분류하는 것을 볼 수 있습니다. # 신뢰도도 99.87%로 매우 치와와라고 확신하고 있네요. # # ``` # 151:Chihuahua:18.289345:0.9987244 # ``` softmax = torch.nn.Softmax() img_tensor.requires_grad_(True) out = model(img_tensor.unsqueeze(0)) probs = softmax(out) cls_idx = np.argmax(out.data.numpy()) print(str(cls_idx) + ":" + idx2class[cls_idx] + ":" + str(out.data.numpy()[0][cls_idx]) + ":" + str(probs.data.numpy()[0][cls_idx])) # ### 이미지 변환하기 # # # 입력에 사용되는 이미지는 노말라이즈되어 있으므로, # 다시 사람의 눈에 보이게 하기 위해서는 반대로 변환시켜주는 작업이 필요합니다. # `norm`함수는 Normalize를, `unnorm`함수는 다시 사람의 눈에 보이게 # 복원시켜주는 역활을 합니다. # + def norm(x): return 2.*(x/255.-0.5) def unnorm(x): un_x = 255*(x*0.5+0.5) un_x[un_x > 255] = 255 un_x[un_x < 0] = 0 un_x = un_x.astype(np.uint8) return un_x # - # ## 적대적 예제 시각화 하기 # # 적대적 예제의 목적중에 하나가 바로 사람의 눈에는 다름이 없어야 함으로 # 시각화를 하여 결과물을 확인하는 것도 중요합니다. def draw_result(img, noise, adv_img): fig, ax = plt.subplots(1, 3, figsize=(15, 10)) orig_class, attack_class = get_class(img), get_class(adv_img) ax[0].imshow(reverse_trans(img[0])) ax[0].set_title('Original image: {}'.format(orig_class.split(',')[0])) ax[1].imshow(noise[0].cpu().numpy().transpose(1, 2, 0)) ax[1].set_title('Attacking noise') ax[2].imshow(reverse_trans(adv_img[0])) ax[2].set_title('Adversarial example: {}'.format(attack_class)) for i in range(3): ax[i].set_axis_off() plt.tight_layout() plt.show() # ## 모델 정보 추출하기 criterion = F.cross_entropy def fgsm_attack(model, x, y, eps): x_adv = x.clone().requires_grad_() h_adv = model(x_adv) cost = F.cross_entropy(h_adv, y) model.zero_grad() out[0,class2idx['wooden spoon']].backward() # + img_grad = img_tensor.grad img_tensor = img_tensor.detach() grad_sign = np.sign(img_grad.numpy()).astype(np.uint8) epsilon = 0.05 new_img_array = np.asarray(unnorm(img_tensor.numpy()))+epsilon*grad_sign new_img_array[new_img_array>255] = 255 new_img_array[new_img_array<0] = 0 new_img_array = new_img_array.astype(np.uint8) plt.figure(figsize=(10,5)) plt.subplot(1,2,1) plt.imshow(unnorm(img_tensor.numpy()).transpose(1,2,0)) plt.subplot(1,2,2) plt.imshow(new_img_array.transpose(1,2,0)) new_img_array = norm(new_img_array) new_img_var = torch.FloatTensor(new_img_array) new_img_var.requires_grad_(True) new_out = model(new_img_var.unsqueeze(0)) new_out_np = new_out.data.numpy() new_probs = softmax(new_out) new_cls_idx = np.argmax(new_out_np) print(str(new_cls_idx) + ":" + idx2class[new_cls_idx] + ":" + str(new_probs.data.numpy()[0][new_cls_idx])) # -
08-Hacking-Deep-Learning/01-fgsm-attack.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + [markdown] jp-MarkdownHeadingCollapsed=true tags=[] # # AI Hub Open API 서비스 # # https://aiopen.etri.re.kr/service_list.php # # ## 오픈 AI API·DATA 서비스 # # - ETRI에서 과학기술정보통신부 R&D 과제를 통해 개발된 최첨단 인공지능 기술들을 오픈 API 형태로 개발 # - 중소·벤처 기업, 학교, 개인 개발자 등의 다양한 사용자들에게 제공 # # # > API(Application Programming Interface): # 컴퓨터나 컴퓨터 프로그램 사이의 연결을 할 수 있도록 제공 # - # # 위키백과 QA API 란? # # > 자연어로 기술된 질문의 의미를 분석하여, 위키백과 문서에서 정답과 신뢰도 및 검색 단락을 추론하여 제공하는 API 입니다. # > # > 위키백과 QA는 질문에서 물어보고자 하는 대상의 정의와 속성에 대해 답을 제공할 수 있을 뿐만 아니라 인물, 장소, 작품 등 다양한 단답형 정답을 묻는 질문에도 답을 할 수 있습니다. 보다 정확한 답을 제공하기 위해 정보검색을 기반으로 언어분석된 결과로부터 정답후보를 찾는 NLP 기반 질의응답 시스템과 기계독해 기반의 딥러닝 기반 질의응답 시스템, 지식베이스 기반의 질의응답 시스템을 통합하여 위키백과 QA 시스템을 구성하였습니다. 정답을 찾는 데이터로는 한국어 위키피디아(2018년 10월 버전, 43만건 문서로 구성) 및 우리말샘 사전을 사용하였습니다. # > # > 위키백과QA API는HTTP 기반의 REST API 인터페이스로 JSON 포맷 기반의 입력 및 출력을 지원하며 ETRI에서 제공하는 API Key 인증을 통해 사용할 수 있는 Open API 입니다. # > # > https://aiopen.etri.re.kr/guide_wikiQA.php # # # ```python # #-*- coding:utf-8 -*- # import urllib3 # import json # # openApiURL = "http://aiopen.etri.re.kr:8000/WikiQA" # accessKey = "YOUR_ACCESS_KEY" # question = "YOUR_QUESTION" # type = "ENGINE_TYPE" # # requestJson = { # "access_key": accessKey, # "argument": { # "question": question, # "type": type # } # } # # http = urllib3.PoolManager() # response = http.request( # "POST", # openApiURL, # headers={"Content-Type": "application/json; charset=UTF-8"}, # body=json.dumps(requestJson) # ) # # print("[responseCode] " + str(response.status)) # print("[responBody]") # print(str(response.data,"utf-8")) # ``` def get_key(filename): mod = int(input("decryption code:")) with open(filename, 'r') as f: key_str = f.read() r_key = ([chr(ord(i)-mod) for i in key_str]) r_key = "".join(r_key) return r_key # + # https://aiopen.etri.re.kr/guide_wikiQA.php def wiki_qa(question:str): import urllib3 import json access_key = get_key('key.txt') openApiURL = "http://aiopen.etri.re.kr:8000/WikiQA" question = question engine_type = "hybridqa" requestJson = { "access_key": access_key, "argument": { "question": question, "type": engine_type } } http = urllib3.PoolManager() response = http.request( "POST", openApiURL, headers={"Content-Type": "application/json; charset=UTF-8"}, body=json.dumps(requestJson) ) response_json = json.loads(response.data) print("-------------------") print("[responseCode] " + str(response.status)) print("[responBody]") print(json.dumps(response_json, indent=2)) print("-------------------") if response_json['result'] == -1: return None return response_json # - response = wiki_qa("대한민국의 수도는?") # sample response # ```json # ------------------- # [responseCode] 200 # [responBody] # { # "result": 0, # "return_object": { # "WiKiInfo": { # "IRInfo": [ # { # "wiki_title": "", # "sent": "", # "url": "" # } # ], # "AnswerInfo": [ # { # "rank": 1.0, # "answer": "\uc11c\uc6b8\ud2b9\ubcc4\uc2dc(\uc11c\uc6b8\u7279\u5225\u5e02)\ub294 \ub300\ud55c\ubbfc\uad6d\uc758 \uc218\ub3c4\uc774\uc790 \ub3c4\uc2dc\uc774\ub2e4. \ubc31\uc81c\uc758 \uccab \uc218\ub3c4\uc778 \uc704\ub840\uc131\uc774\uc5c8\uace0, \uace0\ub824 \ub54c\ub294 \ub0a8\uacbd(\u5357\u4eac)\uc774\uc5c8\uc73c\uba70, \uc870\uc120\uc758 \uc218\ub3c4\uac00 \ub41c \uc774\ud6c4\ub85c \ud604\uc7ac\uae4c\uc9c0 \ub300\ud55c\ubbfc\uad6d \uc815\uce58\u00b7\uacbd\uc81c\u00b7\uc0ac\ud68c\u00b7\ubb38\ud654\uc758 \uc911\uc2ec\uc9c0 \uc5ed\ud560\uc744 \ud558\uace0 \uc788\ub2e4. \uc911\uc559\uc73c\ub85c \ud55c\uac15\uc774 \ud750\ub974\uace0, \ubd81\ud55c\uc0b0, \uad00\uc545\uc0b0, \ub3c4\ubd09\uc0b0, \ubd88\uc554\uc0b0, \uc778\ub2a5\uc0b0, \uc778\uc655\uc0b0, \uccad\uacc4\uc0b0 \ub4f1\uc758 \uc5ec\ub7ec \uc0b0\ub4e4\ub85c \ub458\ub7ec\uc2f8\uc778 \ubd84\uc9c0 \uc9c0\ud615\uc758 \ub3c4\uc2dc\uc774\ub2e4. \ub113\uc774\ub294 605.2 km\u00b2\uc73c\ub85c \ub300\ud55c\ubbfc\uad6d \uc804 \uad6d\ud1a0\uc758 0.6%\ub3c4 \ubabb \ubbf8\uce58\uc9c0\ub9cc, \ucc9c\ub9cc \uba85 \uc815\ub3c4\uc758 \uc778\uad6c\uac00 \uc0b4\uace0 \uc788\uc5b4 \uc778\uad6c\ubc00\ub3c4\ub294 \ud604\uc800\ud788 \ub192\ub2e4.", # "confidence": 0.0, # "url": [ # "https://ko.wikipedia.org/wiki/\uc11c\uc6b8\ud2b9\ubcc4\uc2dc" # ] # } # ] # } # } # } # ``` answer = response['return_object']['WiKiInfo']['AnswerInfo'][0]['answer'] print("정답:", answer) # # 사람 상태 이해 API 란? # # # > 영상 내에 존재하는 모든 사람 영역을 자동으로 검출하고 해당 사람의 상태를 판단하여 사용자에게 그 결과를 출력해주게 됩니다. 본 기술은 도심에서 주취, 기절 등과 같이 쓰러져 도움이 필요한 사람을 자동으로 검출하여 위험 상황 발생 전에 선제적으로 대응할 수 있는 시스템 개발에 사용 될 수 있습니다. 사람 상태 이해 API는 HTTP 기반의 REST API 인터페이스로 JSON 포맷 기반의 입력 및 출력을 지원하며 ETRI에서 제공하는 API Key 인증을 통해 사용할 수 있는 Open API입니다 # > # > https://aiopen.etri.re.kr/guide_humanstatus.php # # ```python # # #-*- coding:utf-8 -*- # import urllib3 # import json # import base64 # openApiURL = "http://aiopen.etri.re.kr:8000/HumanStatus" # accessKey = "YOUR_ACCESS_KEY" # imageFilePath = "IMAGE_FILE_PATH" # type = "IMAGE_FILE_TYPE" # # file = open(imageFilePath, "rb") # imageContents = base64.b64encode(file.read()).decode("utf8") # file.close() # # requestJson = { # "access_key": accessKey, # "argument": { # "type": type, # "file": imageContents # } # } # # http = urllib3.PoolManager() # response = http.request( # "POST", # openApiURL, # headers={"Content-Type": "application/json; charset=UTF-8"}, # body=json.dumps(requestJson) # ) # # print("[responseCode] " + str(response.status)) # print("[responBody]") # print(response.data) # ``` # aihub 에서 응답받은 데이터를 기반으로 그림 그리기 def draw_status(image_file, status_list, confidence_threshold=0.6): import matplotlib.pyplot as plt import matplotlib.patches as patches from PIL import Image im = Image.open(image_file) plt.imshow(im) ax = plt.gca() for status in status_list: confidence = float(status['confidence']) if confidence < confidence_threshold: continue x = float(status['x']) y = float(status['y']) width = float(status['width']) height = float(status['height']) text = f"{status['class']}:{confidence:.2f}" rect = patches.Rectangle((x,y), width, height, linewidth=2, edgecolor='blue', fill = False) ax.add_patch(rect) ax.text(x+5,y+20, text, c="blue") plt.show() # aihub 의 API 호출 def human_status(filename:str): #-*- coding:utf-8 -*- import urllib3 import json import base64 openApiURL = "http://aiopen.etri.re.kr:8000/HumanStatus" accessKey = get_key("key.txt") imageFilePath = filename file_type = "png" file = open(imageFilePath, "rb") imageContents = base64.b64encode(file.read()).decode("utf8") file.close() requestJson = { "access_key": accessKey, "argument": { "type": file_type, "file": imageContents } } http = urllib3.PoolManager() response = http.request( "POST", openApiURL, headers={"Content-Type": "application/json; charset=UTF-8"}, body=json.dumps(requestJson) ) response_json = json.loads(response.data) print("-------------------") print("[responseCode] " + str(response.status)) print("[responBody]") print(json.dumps(response_json, indent=2)) print("-------------------") if response_json['result'] == -1: return None return response_json image_file = "sample1.png" status_list = human_status(image_file)['return_object'][0]["data"][1:] # sample response # ```json # [responseCode] 200 # [responBody] # { # "result": 0, # "return_object": [ # { # "data": [ # "/home/deepview/ai-human-status-detect/src/../upload/c9238d0b-5441-43bc-9c58-6ebe5e47bf19/0.png", # { # "class": "Standing", # "confidence": "0.9608369469642639", # "x": 49.54057312011719, # "y": 8.193740844726562, # "width": 60.741249084472656, # "height": 200.3055419921875 # }, # { # "class": "Standing", # "confidence": "0.9430365562438965", # "x": 254.0928955078125, # "y": 11.717720985412598, # "width": 85.68212890625, # "height": 207.81676387786865 # }, # { # "class": "Standing", # "confidence": "0.939252495765686", # "x": 429.7633972167969, # "y": 4.128190040588379, # "width": 69.19580078125, # "height": 219.653883934021 # }, # { # "class": "Standing", # "confidence": "0.7528595328330994", # "x": 505.0583190917969, # "y": 32.893653869628906, # "width": 70.85635375976562, # "height": 179.47747039794922 # } # ] # } # ] # } # ``` draw_status(image_file, status_list, confidence_threshold=0.7)
practice/day5/answer/3.aihub.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Worksheet 1 # # First worksheet for COMSM0075 Information processing and the brain import math import numpy as np from numpy import where # ## Q1 # # Work out the marginal probability distribution and conditional distributon # + D = np.array([ [1/16, 1/2], [0, 1/4], [1/16, 1/8] ]) # marginal distribution from l to r: 1, 2, 3, a, b D.sum(1), D.sum(0) # + # condition distribution P(Y|X=a) p_x_given_a = D[:,0] p_x_given_a / p_x_given_a.sum() # - # ## Q2 # # Work out entropy # + # H(X) X_d = D.sum(0) # - (1/16+1/16)*log2(1/16*1/16) + (1/2+1/4+1/8)*log2(1/2+1/4+1/8) # -(X_d * np.log2(X_d)).sum() X_d # + # H(Y) Y_d = D.sum(1) -(Y_d * np.log2(Y_d)).sum() # + # H(X|Y) D_given_y = np.vstack(( D[0,:] / D[0,:].sum(), D[1,:] / D[1,:].sum(), D[2,:] / D[2,:].sum() )) -(D * where(D_given_y != 0, np.log2(D_given_y), 0)).sum() # + # H(Y|X) # First work out probability distribution for P(Y|X) D_given_x = np.column_stack(( D[:,0] / D[:,0].sum(), D[:,1] / D[:,1].sum() )) # Then work out the multiplications of all entries with their log2 -(D * where(D_given_x != 0, np.log2(D_given_x), 0)).sum() # + # H(X,Y) -(D * where(D != 0, np.log2(D), 0)).sum() # + # H(Y) - H(Y|X) H_y = -(Y_d * np.log2(Y_d)).sum() H_y_given_x = -(D * where(D_given_x != 0, np.log2(D_given_x), 0)).sum() H_y - H_y_given_x # + # I(X;Y) huh # - # ## Q3 # # Working out entropy (pt 2?) # + D = np.array([ [1/2**4,1/2**4], [4*1/2**5,4*1/2**5], [10*1/2**6,10*1/2**6], [20*1/2**7,20*1/2**7] ]) D_per_series = np.array([ [1/16], [1/32], [1/64], [1/128] ]) # + # H(X) X_d = D.sum(1) -(X_d * np.log2(D_per_series.T)).sum() # + # H(Y) Y_d = D.sum(1) -(Y_d * np.log2(Y_d)).sum() # + # H(X|Y) (-(X_d * np.log2(D_per_series.T)).sum()) - (-(Y_d * np.log2(Y_d)).sum()) # + # H(Y|X) is equal to 0 since the outcome determines the series length 0 # - # ## Q4 # # The average entropy # # For a distribution with two events $\{x_1,x_2\}$ # # $p(x_1)=p$ and $p(x_2)=1-p$, under the assumption that each value of $p$ is equally likely # # --- # # $H(X) = - (p \times \log_2 p + 1-p \times \log_" 1-p)$ # # To get the average entropy we need # # $\displaystyle\langle H\rangle_p \int_0^1 H(p)\text{ dp} = - \int_0^1p\log p\text{ dp}-\int_0^1(1-p)\log(1-p)\text{ dp}$ # # Given that the choice of which probability to call $p$ and which to call $1-p$ you may expect that the two are equal # # $\displaystyle\int_0^1p\log p\text{ dp} = \int_0^1(1-p)\log(1-p)\text{ dp}$ # # This is easy to check by substituting $q=1-p$. Hence: # # $\displaystyle\langle H\rangle_p = -2 \int_0^1 p\log p\text{ dp}$ # # To make the integration easier, let us switch to natural log # # $\displaystyle\langle H_e\rangle_p = -2 \int_0^1 p\ln p\text{ dp}$ # # It is easy to convert back since $\log p = \frac{\ln p}{\ln 2}$. Now use integration by parts with $u = \log p$ and $\text{dv} = p\text{ dp}$ # # $\displaystyle\int_0^1 p\ln p\text{ dp} = \frac{p^2}{2}\ln - \int_0^1\frac{p^2}{2}\frac{1}{p}\text{ dp} = \frac{p^2}{4}_0^1 = -\frac{1}{4}$ # # Therefore: # # $\displaystyle -2 \times -\frac{1}{4} = \frac{1}{2}$ # # and hence: # # $\displaystyle\langle H\rangle_p\frac{1}{2\ln 2}\approx 0.72$
worksheets/worksheet-1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Data Preparation # ## Importing Data dan Inspection # + import pandas as pd df = pd.read_csv('https://storage.googleapis.com/dqlab-dataset/data_retail.csv', sep=';') print('Lima data teratas:') print(df.head()) print('\nInfo dataset:') print(df.info()) # - # ## Data Cleansing # + # Kolom First_Transaction df['First_Transaction'] = pd.to_datetime(df['First_Transaction']/1000, unit='s', origin='1970-01-01') # Kolom Last_Transaction df['Last_Transaction'] = pd.to_datetime(df['Last_Transaction']/1000, unit='s', origin='1970-01-01') print('Lima data teratas:') print(df.head()) print('\nInfo dataset:') print(df.info()) # - # ## Churn Customers # + # Pengecekan transaksi terakhir dalam dataset print(max(df['Last_Transaction'])) # Klasifikasikan customer yang berstatus churn atau tidak dengan boolean df.loc[df['Last_Transaction'] <= '2018-08-01', 'is_churn'] = True df.loc[df['Last_Transaction'] > '2018-08-01', 'is_churn'] = False print('Lima data teratas:') print(df.head()) print('\nInfo dataset:') print(df.info()) # - # ## Menghapus kolom yang tidak diperlukan # + # Hapus kolom-kolom yang tidak diperlukan del df['no'] del df['Row_Num'] # Cetak lima data teratas print(df.head()) # - # # Data Visualization # ## Customer acquisition by year # + import matplotlib.pyplot as plt # Kolom tahun transaksi pertama df['Year_First_Transaction'] = df['First_Transaction'].dt.year # Kolom tahun transaksi terakhir df['Year_Last_Transaction'] = df['Last_Transaction'].dt.year df_year = df.groupby(['Year_First_Transaction'])['Customer_ID'].count() df_year.plot(x='Year_First_Transaction', y='Customer_ID', kind='bar', title='Graph of Customer Acquisition') plt.xlabel('Year_First_Transaction') plt.ylabel('Num_of_Customer') plt.tight_layout() plt.show() # - # ## Transaction by year plt.clf() df_year = df.groupby(['Year_First_Transaction'])['Count_Transaction'].sum() df_year.plot(x='Year_First_Transaction', y='Count_Transaction', kind='bar', title='Graph of Transaction Customer') plt.xlabel('Year_First_Transaction') plt.ylabel('Num_of_Transaction') plt.tight_layout() plt.show() # ## Average transaction amount by year # + import seaborn as sns plt.clf() sns.pointplot(data = df.groupby(['Product', 'Year_First_Transaction']).mean().reset_index(), x='Year_First_Transaction', y='Average_Transaction_Amount', hue='Product') plt.tight_layout() plt.show() # - # ## Proporsi churned customer untuk setiap produk plt.clf() # Melakukan pivot data dengan pivot_table df_piv = df.pivot_table(index='is_churn', columns='Product', values='Customer_ID', aggfunc='count', fill_value=0) # Mendapatkan Proportion Churn by Product plot_product = df_piv.count().sort_values(ascending=False).head(5).index # Plot pie chartnya df_piv = df_piv.reindex(columns=plot_product) df_piv.plot.pie(subplots=True, figsize=(10, 7), layout=(-1, 2), autopct='%1.0f%%', title='Proportion Churn by Product') plt.tight_layout() plt.show() # ## Distribusi kategorisasi count transaction # + plt.clf() # Kategorisasi jumlah transaksi def func(row): if row['Count_Transaction'] == 1: val = '1. 1' elif (row['Count_Transaction'] > 1 and row['Count_Transaction'] <= 3): val ='2. 2 - 3' elif (row['Count_Transaction'] > 3 and row['Count_Transaction'] <= 6): val ='3. 4 - 6' elif (row['Count_Transaction'] > 6 and row['Count_Transaction'] <= 10): val ='4. 7 - 10' else: val ='5. > 10' return val # Tambahkan kolom baru df['Count_Transaction_Group'] = df.apply(func, axis=1) df_year = df.groupby(['Count_Transaction_Group'])['Customer_ID'].count() df_year.plot(x='Count_Transaction_Group', y='Customer_ID', kind='bar', title='Customer Distribution by Count Transaction Group') plt.xlabel('Count_Transaction_Group') plt.ylabel('Num_of_Customer') plt.tight_layout() plt.show() # - # ## Distribusi kategorisasi average transaction amount # + plt.clf() # Kategorisasi rata-rata besar transaksi def f(row): if (row['Average_Transaction_Amount'] >= 100000 and row['Average_Transaction_Amount'] <=200000): val ='1. 100.000 - 250.000' elif (row['Average_Transaction_Amount'] >250000 and row['Average_Transaction_Amount'] <= 500000): val ='2. >250.000 - 500.000' elif (row['Average_Transaction_Amount'] >500000 and row['Average_Transaction_Amount'] <= 750000): val ='3. >500.000 - 750.000' elif (row['Average_Transaction_Amount'] >750000 and row['Average_Transaction_Amount'] <= 1000000): val ='4. >750.000 - 1.000.000' elif (row['Average_Transaction_Amount'] >1000000 and row['Average_Transaction_Amount'] <= 2500000): val ='5. >1.000.000 - 2.500.000' elif (row['Average_Transaction_Amount'] >2500000 and row['Average_Transaction_Amount'] <= 5000000): val ='6. >2.500.000 - 5.000.000' elif (row['Average_Transaction_Amount'] >5000000 and row['Average_Transaction_Amount'] <= 10000000): val ='7. >5.000.000 - 10.000.000' else: val ='8. >10.000.000' return val # Tambahkan kolom baru df['Average_Transaction_Amount_Group'] = df.apply(f, axis=1) df_year = df.groupby(['Average_Transaction_Amount_Group'])['Customer_ID'].count() df_year.plot(x='Average_Transaction_Amount_Group', y='Customer_ID', kind='bar', title='Customer Distribution by Average Transaction Amount Group') plt.xlabel('Average_Transaction_Amount_Group') plt.ylabel('Num_of_Customer') plt.tight_layout() plt.show() # - # # Modelling # ## Feature Columns dan Target # + # Feature column: Year_Diff df['Year_Diff'] = df['Year_Last_Transaction'] - df['Year_First_Transaction'] # Nama-nama feature columns feature_columns = ['Average_Transaction_Amount', 'Count_Transaction', 'Year_Diff'] # Features variable X = df[feature_columns] # Target variable y = df['is_churn'].astype('int') # Update I am using Python version 3.9.9 add "astype('int')", on DQLab Python 3.5.2 ignore that. # - # ## Split X dan y ke dalam bagian training dan testing # + from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=0) # - # ## Train, predict dan evaluate # + from sklearn.linear_model import LogisticRegression from sklearn.metrics import confusion_matrix # Inisiasi model logreg logreg = LogisticRegression() # fit the model with data logreg.fit(X_train, y_train) # Predict model y_pred = logreg.predict(X_test) # Evaluasi model menggunakan confusion matrix cnf_matrix = confusion_matrix(y_test, y_pred) print('Confusion Matrix:\n', cnf_matrix) # - # ## Visualisasi Confusion Matrix # + # import required modules import numpy as np plt.clf() # name of classes class_names = [0, 1] fig, ax = plt.subplots() tick_marks = np.arange(len(class_names)) plt.xticks(tick_marks, class_names) plt.yticks(tick_marks, class_names) # create heatmap sns.heatmap(pd.DataFrame(cnf_matrix), annot=True, cmap='YlGnBu', fmt='g') ax.xaxis.set_label_position('top') plt.title('Confusion Matrix', y=1.1) plt.ylabel('Actual') plt.xlabel('Predicted') plt.tight_layout() plt.show() # - # ## Accuracy, Precision, dan Recall # + from sklearn.metrics import accuracy_score, precision_score, recall_score #Menghitung Accuracy, Precision, dan Recall print('Accuracy :', accuracy_score(y_test, y_pred)) print('Precision:', precision_score(y_test, y_pred, average='micro')) print('Recall :', recall_score(y_test, y_pred, average='micro'))
Data Analyst Project - Business Decision Research.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # --- # # Working with Text Data # # > Status: **STABLE** # # Frictionless supports loading textual data # # ## Reading Text Data # # You can read Text Data using `Package/Resource` or `Table` API, for example: # + from frictionless import Resource resource = Resource(path='text://id,name\n1,english\n2,german.csv') print(resource.read_rows()) # - # ## Writing Text Data # # The same is actual for writing Text Data: # + from frictionless import Resource resource = Resource(data=[['id', 'name'], [1, 'english'], [2, 'german']]) resource.write(scheme='text', format='csv') # - # ## Configuring Text Data # # There are no options available in `TextControl`. # # References: # - [Text Control](https://frictionlessdata.io/tooling/python/controls-reference/#text)
docs/build/working-with-text/README.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + import pandas as pd import numpy as np from nltk.stem import WordNetLemmatizer lemmatizer = WordNetLemmatizer() pd.options.display.float_format = '{:,.3f}'.format import argparse pd.options.mode.chained_assignment = None import seaborn as sns # %matplotlib inline sns.set(style="whitegrid", font_scale = 1.5) sns.set_context(rc={"lines.markersize": 10}) # controls size of style markers in line plots import matplotlib import pickle as pkl from matplotlib import pyplot as plt new_plot_col=list(range(1800,2010,20)) from scipy.stats.stats import pearsonr from scipy.stats.stats import pearsonr from functools import reduce # + br_to_us=pd.read_excel("../data/Book.xlsx",skiprows=[0]) br_to_us_dict=dict(zip(br_to_us.UK.tolist(),br_to_us.US.tolist())) spelling_replacement={'modifier':br_to_us_dict,'head':br_to_us_dict} def lemma_maker(x, y): #print(lemmatizer.lemmatize(x,y)) return lemmatizer.lemmatize(x,y) # + parser = argparse.ArgumentParser(description='Compute features from embeddings') parser.add_argument('--temporal', type=int, help='Value to bin the temporal information: 0 (remove temporal information), 1 (no binning), 10 (binning to decades), 20 (binning each 20 years) or 50 (binning each 50 years)') parser.add_argument('--cutoff', type=int, default=50, help='Cut-off frequency for each compound per time period : none (0), 20, 50 and 100') parser.add_argument('--contextual', action='store_true', help='Is the model contextual') args = parser.parse_args('--temporal 10 --cutoff 20'.split()) # - print(f'Cutoff: {args.cutoff}') print(f'Time span: {args.temporal}') temp_cutoff_str=str(args.temporal)+'_'+str(args.cutoff) if args.contextual: comp_df_path='../../datasets/compounds_CompoundAware_'+temp_cutoff_str+'_300.pkl' mod_df_path='../../datasets/modifiers_CompoundAware_'+temp_cutoff_str+'_300.pkl' head_df_path='../../datasets/heads_CompoundAware_'+temp_cutoff_str+'_300.pkl' features_df_path='../../datasets/features_CompoundAware_'+temp_cutoff_str+'_300.pkl' else: comp_df_path='../../datasets/compounds_CompoundAgnostic_'+temp_cutoff_str+'_300.pkl' mod_df_path='../../datasets/constituents_CompoundAgnostic_'+temp_cutoff_str+'_300.pkl' head_df_path='../../datasets/constituents_CompoundAgnostic_'+temp_cutoff_str+'_300.pkl' features_df_path='../../datasets/features_CompoundAgnostic_'+temp_cutoff_str+'_300.pkl' # + heads=pd.read_pickle(head_df_path) if args.temporal!=0: heads.index.set_names('time', level=1,inplace=True) heads.index.set_names('head',level=0,inplace=True) # + modifiers=pd.read_pickle(mod_df_path) if args.temporal!=0: modifiers.index.set_names('time', level=1,inplace=True) modifiers.index.set_names('modifier',level=0,inplace=True) # + compounds=pd.read_pickle(comp_df_path) if args.temporal!=0: compounds.index.set_names('time', level=2,inplace=True) compounds.drop(['common'],axis=1,inplace=True) compounds=compounds+1 # - if args.temporal!=0: all_comps=compounds.reset_index()[['modifier','head','time']] mod_prod=compounds.groupby(['modifier','time']).size().to_frame() mod_prod.columns=['mod_prod'] head_prod=compounds.groupby(['head','time']).size().to_frame() head_prod.columns=['head_prod'] prod1=pd.merge(all_comps,mod_prod.reset_index(),how='left',on=['modifier','time']) productivity=pd.merge(prod1,head_prod.reset_index(),how='left',on=['head','time']) productivity.set_index(['modifier','head','time'],inplace=True) else: all_comps=compounds.reset_index()[['modifier','head']] mod_prod=compounds.groupby(['modifier']).size().to_frame() mod_prod.columns=['mod_prod'] head_prod=compounds.groupby(['head']).size().to_frame() head_prod.columns=['head_prod'] prod1=pd.merge(all_comps,mod_prod.reset_index(),how='left',on=['modifier']) productivity=pd.merge(prod1,head_prod.reset_index(),how='left',on=['head']) productivity.set_index(['modifier','head'],inplace=True) # + if args.temporal!=0: compound_decade_counts=compounds.groupby('time').sum().sum(axis=1).to_frame() compound_decade_counts.columns=['N'] XY=compounds.groupby(['modifier','head','time']).sum().sum(axis=1).to_frame() X_star=compounds.groupby(['modifier','time']).sum().sum(axis=1).to_frame() Y_star=compounds.groupby(['head','time']).sum().sum(axis=1).to_frame() else: XY=compounds.groupby(['modifier','head']).sum().sum(axis=1).to_frame() X_star=compounds.groupby(['modifier']).sum().sum(axis=1).to_frame() Y_star=compounds.groupby(['head']).sum().sum(axis=1).to_frame() XY.columns=['a'] X_star.columns=['x_star'] Y_star.columns=['star_y'] if args.temporal!=0: merge1=pd.merge(XY.reset_index(),X_star.reset_index(),on=['modifier','time']) information_feat=pd.merge(merge1,Y_star.reset_index(),on=['head','time']) else: merge1=pd.merge(XY.reset_index(),X_star.reset_index(),on=['modifier']) information_feat=pd.merge(merge1,Y_star.reset_index(),on=['head']) information_feat['b']=information_feat['x_star']-information_feat['a'] information_feat['c']=information_feat['star_y']-information_feat['a'] if args.temporal!=0: information_feat=pd.merge(information_feat,compound_decade_counts.reset_index(),on=['time']) else: information_feat['N']=compounds.reset_index().drop(['modifier','head'],axis=1).sum().sum() information_feat['d']=information_feat['N']-(information_feat['a']+information_feat['b']+information_feat['c']) information_feat['x_bar_star']=information_feat['N']-information_feat['x_star'] information_feat['star_y_bar']=information_feat['N']-information_feat['star_y'] if args.temporal!=0: information_feat.set_index(['modifier','head','time'],inplace=True) else: information_feat.set_index(['modifier','head'],inplace=True) information_feat.replace(0,0.0001,inplace=True) information_feat['log_ratio']=2*(information_feat['a']*np.log((information_feat['a']*information_feat['N'])/(information_feat['x_star']*information_feat['star_y']))+\ information_feat['b']*np.log((information_feat['b']*information_feat['N'])/(information_feat['x_star']*information_feat['star_y_bar']))+\ information_feat['c']*np.log((information_feat['c']*information_feat['N'])/(information_feat['x_bar_star']*information_feat['star_y']))+\ information_feat['d']*np.log((information_feat['d']*information_feat['N'])/(information_feat['x_bar_star']*information_feat['star_y_bar']))) information_feat['ppmi']=np.log2((information_feat['a']*information_feat['N'])/(information_feat['x_star']*information_feat['star_y'])) information_feat['local_mi']=information_feat['a']*information_feat['ppmi'] information_feat.ppmi.loc[information_feat.ppmi<=0]=0 information_feat.drop(['a','x_star','star_y','b','c','d','N','d','x_bar_star','star_y_bar'],axis=1,inplace=True) # - information_feat # + new_compounds=compounds-1 compound_modifier_sim=new_compounds.multiply(modifiers.reindex(new_compounds.index, method='ffill')).sum(axis=1).to_frame() compound_modifier_sim.columns=['sim_with_modifier'] compound_head_sim=new_compounds.multiply(heads.reindex(new_compounds.index, method='ffill')).sum(axis=1).to_frame() compound_head_sim.columns=['sim_with_head'] prod_mod=compound_modifier_sim.groupby('modifier').size().to_frame() prod_mod.columns=['modifier_prod'] prod_head=compound_modifier_sim.groupby('head').size().to_frame() prod_head.columns=['head_prod'] if args.temporal!=0: constituent_sim=new_compounds.reset_index()[['modifier','head','time']].merge(modifiers.reset_index(),how='left',on=['modifier','time']) constituent_sim.set_index(['modifier','head','time'],inplace=True) else: constituent_sim=new_compounds.reset_index()[['modifier','head']].merge(modifiers.reset_index(),how='left',on=['modifier']) constituent_sim.set_index(['modifier','head'],inplace=True) constituent_sim=constituent_sim.multiply(heads.reindex(constituent_sim.index, method='ffill')).sum(axis=1).to_frame() constituent_sim.columns=['sim_bw_constituents'] # - dfs = [constituent_sim, compound_head_sim, compound_modifier_sim, information_feat,productivity] compounds_final = reduce(lambda left,right: pd.merge(left,right,left_index=True, right_index=True), dfs) # + if args.temporal!=0: compounds_final=pd.pivot_table(compounds_final.reset_index(), index=['modifier','head'], columns=['time']) compounds_final.fillna(0,inplace=True) compounds_final -= compounds_final.min() compounds_final /= compounds_final.max() compounds_final_1=compounds_final.columns.get_level_values(0) compounds_final_2=compounds_final.columns.get_level_values(1) cur_year=0 new_columns=[] for year in compounds_final_2: new_columns.append(str(year)+"_"+compounds_final_1[cur_year]) cur_year+=1 compounds_final.columns=new_columns else: #compounds_final = reduce(lambda left,right: pd.merge(left,right,on=['modifier','head']), dfs) #compounds_final.drop(['head_denom','modifier_denom'],axis=1,inplace=True) compounds_final.set_index(['modifier','head'],inplace=True) compounds_final.fillna(0,inplace=True) compounds_final -= compounds_final.min() compounds_final /= compounds_final.max() # - reddy_comp=pd.read_csv("../data/reddy_compounds.txt",sep="\t") #print(reddy_comp.columns) reddy_comp.columns=['compound','to_divide'] reddy_comp['modifier_mean'],reddy_comp['modifier_std'],reddy_comp['head_mean'],reddy_comp['head_std'],reddy_comp['compound_mean'],reddy_comp['compound_std'],_=reddy_comp.to_divide.str.split(" ",7).str reddy_comp['modifier'],reddy_comp['head']=reddy_comp['compound'].str.split(" ",2).str reddy_comp.modifier=reddy_comp.modifier.str[:-2] reddy_comp['head']=reddy_comp['head'].str[:-2] reddy_comp.drop(['compound','to_divide'],axis=1,inplace=True) reddy_comp['modifier']=np.vectorize(lemma_maker)(reddy_comp['modifier'],'n') reddy_comp['head']=np.vectorize(lemma_maker)(reddy_comp['head'],'n') reddy_comp.replace(spelling_replacement,inplace=True) #reddy_comp['modifier']=reddy_comp['modifier']+"_noun" #reddy_comp['head']=reddy_comp['head']+"_noun" reddy_comp=reddy_comp.apply(pd.to_numeric, errors='ignore') #reddy_comp.set_index(['modifier','head'],inplace=True) comp_90=pd.read_csv("../data/compounds90.txt",sep="\t") comp_90['mod_pos'],comp_90['head_pos']=comp_90.compound_lemmapos.str.split('_').str comp_90['modifier'],comp_90['mod_pos']=comp_90.mod_pos.str.split('/').str comp_90['head'],comp_90['head_pos']=comp_90.head_pos.str.split('/').str comp_90=comp_90.loc[~(comp_90.mod_pos=="ADJ")] comp_90=comp_90.loc[:,['avgModifier','stdevModifier','avgHead','stdevHeadModifier','compositionality','stdevHeadModifier','modifier','head']] comp_90.columns=reddy_comp.columns # + comp_ext=pd.read_csv("../data/compounds_ext.txt",sep="\t") comp_ext['mod_pos'],comp_ext['head_pos']=comp_ext.compound_lemmapos.str.split('_').str comp_ext['modifier'],comp_ext['mod_pos']=comp_ext.mod_pos.str.split('/').str comp_ext['head'],comp_ext['head_pos']=comp_ext.head_pos.str.split('/').str comp_ext=comp_ext.loc[~(comp_ext.mod_pos=="ADJ")] comp_ext=comp_ext.loc[:,['avgModifier','stdevModifier','avgHead','stdevHeadModifier','compositionality','stdevHeadModifier','modifier','head']] comp_ext.columns=reddy_comp.columns # - all_compounds=pd.concat([reddy_comp,comp_ext,comp_90],ignore_index=True) all_compounds['modifier']=all_compounds['modifier']+"_noun" all_compounds['head']=all_compounds['head']+"_noun" all_compounds compounds_final['1800_index'].value_counts() # + merge_df=all_compounds.merge(compounds_final.reset_index(),on=['modifier','head'],how='inner') merge_df.set_index(["modifier", "head"], inplace = True) merge_df.to_csv(features_df_path,sep='\t')
src/Notebooks/Dimensionality_Reduction_Non_Contextual_Temporal.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd #This is the Richmond USGS Data gage river_richmnd = pd.read_csv('JR_Richmond02037500.csv') river_richmnd.dropna(); #Hurricane data for the basin - Names of Relevant Storms - This will be used for getting the storms from the larger set JR_stormnames = pd.read_csv('gis_match.csv') # Bring in the Big HURDAT data, from 1950 forward (satellites and data quality, etc.) HURDAT = pd.read_csv('hurdatcleanva_1950_present.csv') VA_JR_stormmatch = JR_stormnames.merge(HURDAT) # + # Now the common storms for the James Basin have been created. We now have time and storms together for the basin #checking some things about the data # - # How many unique storms within the basin since 1950? 62 here and 53 in the Data on the Coast.NOAA.gov's website. #I think we are close enough here, digging may show some other storms, but I think we have at least captured the ones #from NOAA len(VA_JR_stormmatch['Storm Number'].unique()); #double ck the lat and long parameters print(VA_JR_stormmatch['Lat'].min(), VA_JR_stormmatch['Lon'].min(), VA_JR_stormmatch['Lat'].max(), VA_JR_stormmatch['Lon'].max()) #Make a csv of this data VA_JR_stormmatch.to_csv('storms_in_basin.csv', sep=',',encoding = 'utf-8') #names of storms len(VA_JR_stormmatch['Storm Number'].unique()) VA_JR_stormmatch['Storm Number'].unique() numbers = VA_JR_stormmatch['Storm Number'] # + #grab a storm from this list and lok at the times #Bill = pd.DataFrame(VA_JR_stormmatch['Storm Number'=='AL032003']) storm = VA_JR_stormmatch[(VA_JR_stormmatch["Storm Number"] == 'AL081955')] storm #so this is the data for a storm named Bill that had a pth through the basin * BILL WAS A BACKDOOR Storm # + # plotting for the USGS river Gage data import matplotlib import matplotlib.pyplot as plt from climata.usgs import DailyValueIO from datetime import datetime from pandas.plotting import register_matplotlib_converters import numpy as np register_matplotlib_converters() plt.style.use('ggplot') plt.rcParams['figure.figsize'] = (20.0, 10.0) # set parameters nyears = 1 ndays = 365 * nyears station_id = "02037500" param_id = "00060" datelist = pd.date_range(end=datetime.today(), periods=ndays).tolist() #take an annual average for the river annual_data = DailyValueIO( start_date="1955-01-01", end_date="1956-01-01", station=station_id, parameter=param_id,) for series in annual_data: flow = [r[1] for r in series.data] si_flow_annual = np.asarray(flow) * 0.0283168 flow_mean = np.mean(si_flow_annual) #now for the storm dischg = DailyValueIO( start_date="1955-09-10", end_date="1955-09-28", station=station_id, parameter=param_id,) #create lists of date-flow values for series in dischg: flow = [r[1] for r in series.data] si_flow = np.asarray(flow) * 0.0283168 dates = [r[0] for r in series.data] plt.plot(dates, si_flow) plt.axhline(y=flow_mean, color='r', linestyle='-') plt.xlabel('Date') plt.ylabel('Discharge (m^3/s)') plt.title("HU Ione - 1955 (Atlantic)") plt.xticks(rotation='vertical') plt.show() # - percent_incr= (abs(max(si_flow)-flow_mean)/abs(flow_mean))*100 percent_incr #take an annual average for the river annual_data = DailyValueIO( start_date="1955-03-01", end_date="1955-10-01", station=station_id, parameter=param_id,) for series in annual_data: flow = [r[1] for r in series.data] si_flow_annual = np.asarray(flow) * 0.0283168 flow_mean_season = np.mean(si_flow_annual) print(abs(flow_mean-flow_mean_season))
HURDAT_JRDISCHG-Ione 1955.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/BatoolMM/fastai/blob/master/Chapter_5_Image_Classification.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="0Xc176TUEdu5" # # ## **Chapter 5: Image Classification** # # + id="ehmMdnRwAKqR" # !pip install -Uqq fastbook # !pip install fastai # !pip install utils # + id="EHSowlbUEs-A" from utils import * from fastai.vision.all import * from fastai.vision.widgets import ImageClassifierCleaner import matplotlib.pyplot as plt # + id="HLjBovZuPvTN" outputId="06084635-e80b-4263-adce-1867900b3736" colab={"base_uri": "https://localhost:8080/", "height": 17} path = untar_data(URLs.PETS) Path.BASE_PATH = path # + id="LZkvWPHxQAcT" outputId="ac2b8f6e-ea17-4dcd-cbe2-d09ae7d8b148" colab={"base_uri": "https://localhost:8080/", "height": 34} path.ls() # + id="GZRbZnSUQAiI" outputId="280dd14e-611a-4aae-9482-72c8266540d3" colab={"base_uri": "https://localhost:8080/", "height": 54} (path/"images").ls() # + id="AhTR-5RRQAlx" fname = (path/"images").ls()[0] # + id="OZC3ZFyHQApa" outputId="58db161d-52f6-4176-ff40-bd99699e5e27" colab={"base_uri": "https://localhost:8080/", "height": 34} re.findall(r'(.+)_\d+.jpg$', fname.name) # + id="mdbC5vEuQA3T" pets = DataBlock(blocks = (ImageBlock, CategoryBlock), get_items=get_image_files, splitter=RandomSplitter(seed=42), get_y=using_attr(RegexLabeller(r'(.+)_\d+.jpg$'), 'name'), item_tfms=Resize(460), batch_tfms=aug_transforms(size=224, min_scale=0.75)) dls = pets.dataloaders(path/"images") # + id="2RUuHFo3QT5r" outputId="b23173b9-6ab8-459b-e3dd-7f0500fede26" colab={"base_uri": "https://localhost:8080/", "height": 195} dls.show_batch(nrows=1, ncols=3) # + id="bHoZk0_bQgB0" outputId="f7a8e4a9-7abb-4b6f-d106-1706b049d5cb" colab={"base_uri": "https://localhost:8080/", "height": 240, "referenced_widgets": ["003ccdd73c3b449fa1c6a43f2303dbd0", "959477314f8945ea870f3f47407dbaf6", "b888ee04e23c4bf182f9536e618727a4", "<KEY>", "6250ad34e9ce460fa192dcb4f34dba47", "0e3d5aaee9a348d385bfeba673766a93", "4a28519194ed41dba4981c55e025db86", "<KEY>"]} learn = cnn_learner(dls, resnet34, metrics=error_rate) learn.fine_tune(2)
Chapter_5_Image_Classification.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Neural Nets with Keras # In this notebook you will learn how to implement neural networks using the Keras API. We will use TensorFlow's own implementation, *tf.keras*, which comes bundled with TensorFlow. # Don't hesitate to look at the documentation at [keras.io](https://keras.io/). All the code examples should work fine with tf.keras, the only difference is how to import Keras: # # ```python # # keras.io code: # from keras.layers import Dense # output_layer = Dense(10) # # # corresponding tf.keras code: # from tensorflow.keras.layers import Dense # output_layer = Dense(10) # # # or: # from tensorflow import keras # output_layer = keras.layers.Dense(10) # ``` # # In this notebook, we will not use any TensorFlow-specific code, so everything you see would run just the same way on [keras-team](https://github.com/keras-team/keras) or any other Python implementation of the Keras API (except for the imports). # ## Imports # %matplotlib inline # %load_ext tensorboard import matplotlib as mpl import matplotlib.pyplot as plt import numpy as np import os import pandas as pd import sklearn import sys import tensorflow as tf from tensorflow import keras # tf.keras import time print("python", sys.version) for module in mpl, np, pd, sklearn, tf, keras: print(module.__name__, module.__version__) assert sys.version_info >= (3, 5) # Python ≥3.5 required assert tf.__version__ >= "2.0" # TensorFlow ≥2.0 required # ![Exercise](https://c1.staticflickr.com/9/8101/8553474140_c50cf08708_b.jpg) # ## Exercise 1 – TensorFlow Playground # Visit the [TensorFlow Playground](http://playground.tensorflow.org). # * **Layers and patterns**: try training the default neural network by clicking the "Run" button (top left). Notice how it quickly finds a good solution for the classification task. Notice that the neurons in the first hidden layer have learned simple patterns, while the neurons in the second hidden layer have learned to combine the simple patterns of the first hidden layer into more complex patterns). In general, the more layers, the more complex the patterns can be. # * **Activation function**: try replacing the Tanh activation function with the ReLU activation function, and train the network again. Notice that it finds a solution even faster, but this time the boundaries are linear. This is due to the shape of the ReLU function. # * **Local minima**: modify the network architecture to have just one hidden layer with three neurons. Train it multiple times (to reset the network weights, just add and remove a neuron). Notice that the training time varies a lot, and sometimes it even gets stuck in a local minimum. # * **Too small**: now remove one neuron to keep just 2. Notice that the neural network is now incapable of finding a good solution, even if you try multiple times. The model has too few parameters and it systematically underfits the training set. # * **Large enough**: next, set the number of neurons to 8 and train the network several times. Notice that it is now consistently fast and never gets stuck. This highlights an important finding in neural network theory: large neural networks almost never get stuck in local minima, and even when they do these local optima are almost as good as the global optimum. However, they can still get stuck on long plateaus for a long time. # * **Deep net and vanishing gradients**: now change the dataset to be the spiral (bottom right dataset under "DATA"). Change the network architecture to have 4 hidden layers with 8 neurons each. Notice that training takes much longer, and often gets stuck on plateaus for long periods of time. Also notice that the neurons in the highest layers (i.e. on the right) tend to evolve faster than the neurons in the lowest layers (i.e. on the left). This problem, called the "vanishing gradients" problem, can be alleviated using better weight initialization and other techniques, better optimizers (such as AdaGrad or Adam), or using Batch Normalization. # * **More**: go ahead and play with the other parameters to get a feel of what they do. In fact, after this course you should definitely play with this UI for at least one hour, it will grow your intuitions about neural networks significantly. # ![Exercise](https://c1.staticflickr.com/9/8101/8553474140_c50cf08708_b.jpg) # ## Exercise 2 – Image classification with tf.keras # ### Load the Fashion MNIST dataset # Let's start by loading the fashion MNIST dataset. Keras has a number of functions to load popular datasets in `keras.datasets`. The dataset is already split for you between a training set and a test set, but it can be useful to split the training set further to have a validation set: fashion_mnist = keras.datasets.fashion_mnist (X_train_full, y_train_full), (X_test, y_test) = ( fashion_mnist.load_data()) X_valid, X_train = X_train_full[:5000], X_train_full[5000:] y_valid, y_train = y_train_full[:5000], y_train_full[5000:] # The training set contains 55,000 grayscale images, each 28x28 pixels: X_train.shape # Each pixel intensity is represented by a uint8 (byte) from 0 to 255: X_train[0] # You can plot an image using Matplotlib's `imshow()` function, with a `'binary'` # color map: plt.imshow(X_train[0], cmap="binary") plt.show() # The labels are the class IDs (represented as uint8), from 0 to 9: y_train # Here are the corresponding class names: class_names = ["T-shirt/top", "Trouser", "Pullover", "Dress", "Coat", "Sandal", "Shirt", "Sneaker", "Bag", "Ankle boot"] # So the first image in the training set is a coat: class_names[y_train[0]] # The validation set contains 5,000 images, and the test set contains 10,000 images: X_valid.shape X_test.shape # Let's take a look at a sample of the images in the dataset: n_rows = 5 n_cols = 10 plt.figure(figsize=(n_cols*1.4, n_rows * 1.6)) for row in range(n_rows): for col in range(n_cols): index = n_cols * row + col plt.subplot(n_rows, n_cols, index + 1) plt.imshow(X_train[index], cmap="binary", interpolation="nearest") plt.axis('off') plt.title(class_names[y_train[index]]) plt.show() # This dataset has the same structure as the famous MNIST dataset (which you can load using `keras.datasets.mnist.load_data()`), except the images represent fashion items rather than handwritten digits, and it is much more challenging. A simple linear model can reach 92% accuracy on MNIST, but only 83% on fashion MNIST. # ### Build a classification neural network with Keras # ### 2.1) # Build a `Sequential` model (`keras.models.Sequential`), without any argument, then and add four layers to it by calling its `add()` method: # * a `Flatten` layer (`keras.layers.Flatten`) to convert each 28x28 image to a single row of 784 pixel values. Since it is the first layer in your model, you should specify the `input_shape` argument, leaving out the batch size: `[28, 28]`. # * a `Dense` layer (`keras.layers.Dense`) with 300 neurons (aka units), and the `"relu"` activation function. # * Another `Dense` layer with 100 neurons, also with the `"relu"` activation function. # * A final `Dense` layer with 10 neurons (one per class), and with the `"softmax"` activation function to ensure that the sum of all the estimated class probabilities for each image is equal to 1. model = keras.models.Sequential() model.add(keras.layers.Flatten(input_shape=[28,28])) model.add(keras.layers.Dense(300, activation="relu")) # ### 2.2) # Alternatively, you can pass a list containing the 4 layers to the constructor of the `Sequential` model. The model's `layers` attribute holds the list of layers. # ### 2.3) # Call the model's `summary()` method and examine the output. Also, try using `keras.utils.plot_model()` to save an image of your model's architecture. Alternatively, you can uncomment the following code to display the image within Jupyter. # **Warning**: you will need `pydot` and `graphviz` to use `plot_model()`. # ### 2.4) # After a model is created, you must call its `compile()` method to specify the `loss` function and the `optimizer` to use. In this case, you want to use the `"sparse_categorical_crossentropy"` loss, and the `keras.optimizers.SGD(lr=1e-3)` optimizer (stochastic gradient descent with a learning rate of 1e-3). Moreover, you can optionally specify a list of additional metrics that should be measured during training. In this case you should specify `metrics=["accuracy"]`. **Note**: you can find more loss functions in `keras.losses`, more metrics in `keras.metrics` and more optimizers in `keras.optimizers`. # ### 2.5) # Now your model is ready to be trained. Call its `fit()` method, passing it the input features (`X_train`) and the target classes (`y_train`). Set `epochs=10` (or else it will just run for a single epoch). You can also (optionally) pass the validation data by setting `validation_data=(X_valid, y_valid)`. If you do, Keras will compute the loss and the additional metrics (the accuracy in this case) on the validation set at the end of each epoch. If the performance on the training set is much better than on the validation set, your model is probably overfitting the training set (or there is a bug, such as a mismatch between the training set and the validation set). # **Note**: the `fit()` method will return a `History` object containing training stats. Make sure to preserve it (`history = model.fit(...)`). # ### 2.6) # Try running `pd.DataFrame(history.history).plot()` to plot the learning curves. To make the graph more readable, you can also set `figsize=(8, 5)`, call `plt.grid(True)` and `plt.gca().set_ylim(0, 1)`. # ### 2.7) # Try running `model.fit()` again, and notice that training continues where it left off. # ### 2.8) # call the model's `evaluate()` method, passing it the test set (`X_test` and `y_test`). This will compute the loss (cross-entropy) on the test set, as well as all the additional metrics (in this case, the accuracy). Your model should achieve over 80% accuracy on the test set. # ### 2.9) # Define `X_new` as the first 10 instances of the test set. Call the model's `predict()` method to estimate the probability of each class for each instance (for better readability, you may use the output array's `round()` method): # ### 2.10) # Often, you may only be interested in the most likely class. Use `np.argmax()` to get the class ID of the most likely class for each instance. **Tip**: you want to set `axis=1`. # ### 2.11) # Call the model's `predict_classes()` method for `X_new`. You should get the same result as above. # ### 2.12) # (Optional) It is often useful to know how confident the model is for each prediction. Try finding the estimated probability for each predicted class using `np.max()`. # ### 2.13) # (Optional) It is frequent to want the top k classes and their estimated probabilities rather just the most likely class. You can use `np.argsort()` for this. # ![Exercise solution](https://camo.githubusercontent.com/250388fde3fac9135ead9471733ee28e049f7a37/68747470733a2f2f75706c6f61642e77696b696d656469612e6f72672f77696b6970656469612f636f6d6d6f6e732f302f30362f46696c6f735f736567756e646f5f6c6f676f5f253238666c69707065642532392e6a7067) # ## Exercise 2 - Solution # ### 2.1) # Build a `Sequential` model (`keras.models.Sequential`), without any argument, then and add four layers to it by calling its `add()` method: # * a `Flatten` layer (`keras.layers.Flatten`) to convert each 28x28 image to a single row of 784 pixel values. Since it is the first layer in your model, you should specify the `input_shape` argument, leaving out the batch size: `[28, 28]`. # * a `Dense` layer (`keras.layers.Dense`) with 300 neurons (aka units), and the `"relu"` activation function. # * Another `Dense` layer with 100 neurons, also with the `"relu"` activation function. # * A final `Dense` layer with 10 neurons (one per class), and with the `"softmax"` activation function to ensure that the sum of all the estimated class probabilities for each image is equal to 1. model = keras.models.Sequential() model.add(keras.layers.Flatten(input_shape=[28, 28])) model.add(keras.layers.Dense(300, activation="relu")) model.add(keras.layers.Dense(100, activation="relu")) model.add(keras.layers.Dense(10, activation="softmax")) # ### 2.2) # Alternatively, you can pass a list containing the 4 layers to the constructor of the `Sequential` model. The model's `layers` attribute holds the list of layers. model = keras.models.Sequential([ keras.layers.Flatten(input_shape=[28, 28]), keras.layers.Dense(300, activation="relu"), keras.layers.Dense(100, activation="relu"), keras.layers.Dense(10, activation="softmax") ]) model.layers # ### 2.3) # Call the model's `summary()` method and examine the output. Also, try using `keras.utils.plot_model()` to save an image of your model's architecture. Alternatively, you can uncomment the following code to display the image within Jupyter. model.summary() keras.utils.plot_model(model, "my_mnist_model.png", show_shapes=True) # **Warning**: at the present, you need `from tensorflow.python.keras.utils.vis_utils import model_to_dot`, instead of simply `keras.utils.model_to_dot`. See [TensorFlow issue 24639](https://github.com/tensorflow/tensorflow/issues/24639). from IPython.display import SVG from tensorflow.python.keras.utils.vis_utils import model_to_dot SVG(model_to_dot(model, show_shapes=True).create(prog='dot', format='svg')) # ### 2.4) # After a model is created, you must call its `compile()` method to specify the `loss` function and the `optimizer` to use. In this case, you want to use the `"sparse_categorical_crossentropy"` loss, and the `keras.optimizers.SGD(lr=1e-3)` optimizer (stochastic gradient descent with learning rate of 1e-3). Moreover, you can optionally specify a list of additional metrics that should be measured during training. In this case you should specify `metrics=["accuracy"]`. **Note**: you can find more loss functions in `keras.losses`, more metrics in `keras.metrics` and more optimizers in `keras.optimizers`. model.compile(loss="sparse_categorical_crossentropy", optimizer=keras.optimizers.SGD(lr=1e-3), metrics=["accuracy"]) # ### 2.5) # Now your model is ready to be trained. Call its `fit()` method, passing it the input features (`X_train`) and the target classes (`y_train`). Set `epochs=10` (or else it will just run for a single epoch). You can also (optionally) pass the validation data by setting `validation_data=(X_valid, y_valid)`. If you do, Keras will compute the loss and the additional metrics (the accuracy in this case) on the validation set at the end of each epoch. If the performance on the training set is much better than on the validation set, your model is probably overfitting the training set (or there is a bug, such as a mismatch between the training set and the validation set). # **Note**: the `fit()` method will return a `History` object containing training stats. Make sure to preserve it (`history = model.fit(...)`). history = model.fit(X_train, y_train, epochs=10, validation_data=(X_valid, y_valid)) # ### 2.6) # Try running `pd.DataFrame(history.history).plot()` to plot the learning curves. To make the graph more readable, you can also set `figsize=(8, 5)`, call `plt.grid(True)` and `plt.gca().set_ylim(0, 1)`. def plot_learning_curves(history): pd.DataFrame(history.history).plot(figsize=(8, 5)) plt.grid(True) plt.gca().set_ylim(0, 1) plt.show() plot_learning_curves(history) # ### 2.7) # Try running `model.fit()` again, and notice that training continues where it left off. history = model.fit(X_train, y_train, epochs=10, validation_data=(X_valid, y_valid)) # ### 2.8) # Call the model's `evaluate()` method, passing it the test set (`X_test` and `y_test`). This will compute the loss (cross-entropy) on the test set, as well as all the additional metrics (in this case, the accuracy). Your model should achieve over 80% accuracy on the test set. model.evaluate(X_test, y_test) # ### 2.9) # Define `X_new` as the first 10 instances of the test set. Call the model's `predict()` method to estimate the probability of each class for each instance (for better readability, you may use the output array's `round()` method): n_new = 10 X_new = X_test[:n_new] y_proba = model.predict(X_new) y_proba.round(2) # ### 2.10) # Often, you may only be interested in the most likely class. Use `np.argmax()` to get the class ID of the most likely class for each instance. **Tip**: you want to set `axis=1`. y_pred = y_proba.argmax(axis=1) y_pred # ### 2.11) # Call the model's `predict_classes()` method for `X_new`. You should get the same result as above. y_pred = model.predict_classes(X_new) y_pred # ### 2.12) # (Optional) It is often useful to know how confident the model is for each prediction. Try finding the estimated probability for each predicted class using `np.max()`. y_proba.max(axis=1).round(2) # ### 2.13) # (Optional) It is frequent to want the top k classes and their estimated probabilities rather just the most likely class. You can use `np.argsort()` for this. k = 3 top_k = np.argsort(-y_proba, axis=1)[:, :k] top_k row_indices = np.tile(np.arange(len(top_k)), [k, 1]).T y_proba[row_indices, top_k].round(2) # ![Exercise](https://c1.staticflickr.com/9/8101/8553474140_c50cf08708_b.jpg) # ## Exercise 3 – Scale the features # ### 3.1) # When using Gradient Descent, it is usually best to ensure that the features all have a similar scale, preferably with a Normal distribution. Try to standardize the pixel values and see if this improves the performance of your neural network. # # **Tips**: # * For each feature (pixel intensity), you must subtract the `mean()` of that feature (across all instances, so use `axis=0`) and divide by its standard deviation (`std()`, again `axis=0`). Alternatively, you can use Scikit-Learn's `StandardScaler`. # * Make sure you compute the means and standard deviations on the training set, and use these statistics to scale the training set, the validation set and the test set (you should not fit the validation set or the test set, and computing the means and standard deviations counts as "fitting"). # ### 3.2) # Plot the learning curves. Do they look better than earlier? # ![Exercise solution](https://camo.githubusercontent.com/250388fde3fac9135ead9471733ee28e049f7a37/68747470733a2f2f75706c6f61642e77696b696d656469612e6f72672f77696b6970656469612f636f6d6d6f6e732f302f30362f46696c6f735f736567756e646f5f6c6f676f5f253238666c69707065642532392e6a7067) # ## Exercise 3 – Solution # ### 3.1) # When using Gradient Descent, it is usually best to ensure that the features all have a similar scale, preferably with a Normal distribution. Try to standardize the pixel values and see if this improves the performance of your neural network. pixel_means = X_train.mean(axis = 0) pixel_stds = X_train.std(axis = 0) X_train_scaled = (X_train - pixel_means) / pixel_stds X_valid_scaled = (X_valid - pixel_means) / pixel_stds X_test_scaled = (X_test - pixel_means) / pixel_stds # + from sklearn.preprocessing import StandardScaler scaler = StandardScaler() X_train_scaled = scaler.fit_transform(X_train.astype(np.float32).reshape(-1, 28 * 28)).reshape(-1, 28, 28) X_valid_scaled = scaler.transform(X_valid.astype(np.float32).reshape(-1, 28 * 28)).reshape(-1, 28, 28) X_test_scaled = scaler.transform(X_test.astype(np.float32).reshape(-1, 28 * 28)).reshape(-1, 28, 28) # - model = keras.models.Sequential([ keras.layers.Flatten(input_shape=[28, 28]), keras.layers.Dense(300, activation="relu"), keras.layers.Dense(100, activation="relu"), keras.layers.Dense(10, activation="softmax") ]) model.compile(loss="sparse_categorical_crossentropy", optimizer=keras.optimizers.SGD(1e-3), metrics=["accuracy"]) history = model.fit(X_train_scaled, y_train, epochs=20, validation_data=(X_valid_scaled, y_valid)) model.evaluate(X_test_scaled, y_test) # ### 3.2) # Plot the learning curves. Do they look better than earlier? plot_learning_curves(history) # ![Exercise](https://c1.staticflickr.com/9/8101/8553474140_c50cf08708_b.jpg) # ## Exercise 4 – Use Callbacks # ### 4.1) # The `fit()` method accepts a `callbacks` argument. Try training your model with a large number of epochs, a validation set, and with a few callbacks from `keras.callbacks`: # * `TensorBoard`: specify a log directory. It should be a subdirectory of a root logdir, such as `./my_logs/run_1`, and it should be different every time you train your model. You can use a timestamp in the subdirectory's path to ensure that it changes at every run. # * `EarlyStopping`: specify `patience=5` # * `ModelCheckpoint`: specify the path of the checkpoint file to save (e.g., `"my_mnist_model.h5"`) and set `save_best_only=True` # # Notice that the `EarlyStopping` callback will interrupt training before it reaches the requested number of epochs. This reduces the risk of overfitting. root_logdir = os.path.join(os.curdir, "my_logs") # ### 4.2) # The Jupyter plugin for tensorboard was loaded at the beginning of this notebook (`%load_ext tensorboard`), so you can now simply start it by using the `%tensorboard` magic command. Explore the various tabs available, in particular the SCALARS tab to view learning curves, the GRAPHS tab to view the computation graph, and the PROFILE tab which is very useful to identify bottlenecks if you run into performance issues. # %tensorboard --logdir=./my_logs # ### 4.3) # The early stopping callback only stopped training after 10 epochs without progress, so your model may already have started to overfit the training set. Fortunately, since the `ModelCheckpoint` callback only saved the best models (on the validation set), the last saved model is the best on the validation set, so try loading it using `keras.models.load_model()`. Finally evaluate it on the test set. # ### 4.4) # Look at the list of available callbacks at https://keras.io/callbacks/ # ![Exercise solution](https://camo.githubusercontent.com/250388fde3fac9135ead9471733ee28e049f7a37/68747470733a2f2f75706c6f61642e77696b696d656469612e6f72672f77696b6970656469612f636f6d6d6f6e732f302f30362f46696c6f735f736567756e646f5f6c6f676f5f253238666c69707065642532392e6a7067) # ## Exercise 4 – Solution # ### 4.1) # The `fit()` method accepts a `callbacks` argument. Try training your model with a large number of epochs, a validation set, and with a few callbacks from `keras.callbacks`: # * `TensorBoard`: specify a log directory. It should be a subdirectory of a root logdir, such as `./my_logs/run_1`, and it should be different every time you train your model. You can use a timestamp in the subdirectory's path to ensure that it changes at every run. # * `EarlyStopping`: specify `patience=5` # * `ModelCheckpoint`: specify the path of the checkpoint file to save (e.g., `"my_mnist_model.h5"`) and set `save_best_only=True` # # Notice that the `EarlyStopping` callback will interrupt training before it reaches the requested number of epochs. This reduces the risk of overfitting. model = keras.models.Sequential([ keras.layers.Flatten(input_shape=[28, 28]), keras.layers.Dense(300, activation="relu"), keras.layers.Dense(100, activation="relu"), keras.layers.Dense(10, activation="softmax") ]) model.compile(loss="sparse_categorical_crossentropy", optimizer=keras.optimizers.SGD(1e-3), metrics=["accuracy"]) # + logdir = os.path.join(root_logdir, "run_{}".format(time.time())) callbacks = [ keras.callbacks.TensorBoard(logdir), keras.callbacks.EarlyStopping(patience=5), keras.callbacks.ModelCheckpoint("my_mnist_model.h5", save_best_only=True), ] history = model.fit(X_train_scaled, y_train, epochs=50, validation_data=(X_valid_scaled, y_valid), callbacks=callbacks) # - # ### 4.2) # Done # ### 4.3) # The early stopping callback only stopped training after 10 epochs without progress, so your model may already have started to overfit the training set. Fortunately, since the `ModelCheckpoint` callback only saved the best models (on the validation set), the last saved model is the best on the validation set, so try loading it using `keras.models.load_model()`. Finally evaluate it on the test set. model = keras.models.load_model("my_mnist_model.h5") model.evaluate(X_valid_scaled, y_valid) # ### 4.4) # Look at the list of available callbacks at https://keras.io/callbacks/ # ![Exercise](https://c1.staticflickr.com/9/8101/8553474140_c50cf08708_b.jpg) # ## Exercise 5 – A neural net for regression # ### 5.1) # Load the California housing dataset using `sklearn.datasets.fetch_california_housing`. This returns an object with a `DESCR` attribute describing the dataset, a `data` attribute with the input features, and a `target` attribute with the labels. The goal is to predict the price of houses in a district (a census block) given some stats about that district. This is a regression task (predicting values). # ### 5.2) # Split the dataset into a training set, a validation set and a test set using Scikit-Learn's `sklearn.model_selection.train_test_split()` function. # ### 5.3) # Scale the input features (e.g., using a `sklearn.preprocessing.StandardScaler`). Once again, don't forget that you should not fit the validation set or the test set, only the training set. # ### 5.4) # Now build, train and evaluate a neural network to tackle this problem. Then use it to make predictions on the test set. # # **Tips**: # * Since you are predicting a single value per district (the median house price), there should only be one neuron in the output layer. # * Usually for regression tasks you don't want to use any activation function in the output layer (in some cases you may want to use `"relu"` or `"softplus"` if you want to constrain the predicted values to be positive, or `"sigmoid"` or `"tanh"` if you want to constrain the predicted values to 0-1 or -1-1). # * A good loss function for regression is generally the `"mean_squared_error"` (aka `"mse"`). When there are many outliers in your dataset, you may prefer to use the `"mean_absolute_error"` (aka `"mae"`), which is a bit less precise but less sensitive to outliers. # ![Exercise solution](https://camo.githubusercontent.com/250388fde3fac9135ead9471733ee28e049f7a37/68747470733a2f2f75706c6f61642e77696b696d656469612e6f72672f77696b6970656469612f636f6d6d6f6e732f302f30362f46696c6f735f736567756e646f5f6c6f676f5f253238666c69707065642532392e6a7067) # ## Exercise 5 – Solution # ### 5.1) # Load the California housing dataset using `sklearn.datasets.fetch_california_housing`. This returns an object with a `DESCR` attribute describing the dataset, a `data` attribute with the input features, and a `target` attribute with the labels. The goal is to predict the price of houses in a district (a census block) given some stats about that district. This is a regression task (predicting values). from sklearn.datasets import fetch_california_housing housing = fetch_california_housing() print(housing.DESCR) housing.data.shape housing.target.shape # ### 5.2) # Split the dataset into a training set, a validation set and a test set using Scikit-Learn's `sklearn.model_selection.train_test_split()` function. # + from sklearn.model_selection import train_test_split X_train_full, X_test, y_train_full, y_test = train_test_split(housing.data, housing.target, random_state=42) X_train, X_valid, y_train, y_valid = train_test_split(X_train_full, y_train_full, random_state=42) # - len(X_train), len(X_valid), len(X_test) # ### 5.3) # Scale the input features (e.g., using a `sklearn.preprocessing.StandardScaler`). Once again, don't forget that you should not fit the validation set or the test set, only the training set. # + from sklearn.preprocessing import StandardScaler scaler = StandardScaler() X_train_scaled = scaler.fit_transform(X_train) X_valid_scaled = scaler.transform(X_valid) X_test_scaled = scaler.transform(X_test) # - # ### 5.4) # Now build, train and evaluate a neural network to tackle this problem. Then use it to make predictions on the test set. model = keras.models.Sequential([ keras.layers.Dense(30, activation="relu", input_shape=X_train.shape[1:]), keras.layers.Dense(1) ]) model.compile(loss="mean_squared_error", optimizer=keras.optimizers.SGD(1e-3)) callbacks = [keras.callbacks.EarlyStopping(patience=10)] history = model.fit(X_train_scaled, y_train, validation_data=(X_valid_scaled, y_valid), epochs=100, callbacks=callbacks) model.evaluate(X_test_scaled, y_test) model.predict(X_test_scaled) plot_learning_curves(history) # ![Exercise](https://c1.staticflickr.com/9/8101/8553474140_c50cf08708_b.jpg) # ## Exercise 6 – Hyperparameter search # ### 6.1) # Try training your model multiple times, with different a learning rate each time (e.g., 1e-4, 3e-4, 1e-3, 3e-3, 3e-2), and compare the learning curves. For this, you need to create a `keras.optimizers.SGD` optimizer and specify the `learning_rate` in its constructor, then pass this `SGD` instance to the `compile()` method using the `optimizer` argument. # ### 6.2) # Let's look at a more sophisticated way to tune hyperparameters. Create a `build_model()` function that takes three arguments, `n_hidden`, `n_neurons`, `learning_rate`, and builds, compiles and returns a model with the given number of hidden layers, the given number of neurons and the given learning rate. It is good practice to give a reasonable default value to each argument. # ### 6.3) # Create a `keras.wrappers.scikit_learn.KerasRegressor` and pass the `build_model` function to the constructor. This gives you a Scikit-Learn compatible predictor. Try training it and using it to make predictions. Note that you can pass the `n_epochs`, `callbacks` and `validation_data` to the `fit()` method. # ### 6.4) # Use a `sklearn.model_selection.RandomizedSearchCV` to search the hyperparameter space of your `KerasRegressor`. # # **Tips**: # * create a `param_distribs` dictionary where each key is the name of a hyperparameter you want to fine-tune (e.g., `"n_hidden"`), and each value is the list of values you want to explore (e.g., `[0, 1, 2, 3]`), or a Scipy distribution from `scipy.stats`. # * You can use the reciprocal distribution for the learning rate (e.g, `reciprocal(3e-3, 3e-2)`). # * Create a `RandomizedSearchCV`, passing the `KerasRegressor` and the `param_distribs` to its constructor, as well as the number of iterations (`n_iter`), and the number of cross-validation folds (`cv`). If you are short on time, you can set `n_iter=10` and `cv=3`. You may also want to set `verbose=2`. # * Finally, call the `RandomizedSearchCV`'s `fit()` method on the training set. Once again you can pass it `n_epochs`, `validation_data` and `callbacks` if you want to. # * The best parameters found will be available in the `best_params_` attribute, the best score will be in `best_score_`, and the best model will be in `best_estimator_`. # ### 6.5) # Evaluate the best model found on the test set. You can either use the best estimator's `score()` method, or get its underlying Keras model *via* its `model` attribute, and call this model's `evaluate()` method. Note that the estimator returns the negative mean square error (it's a score, not a loss, so higher is better). # ### 6.6) # Finally, save the best Keras model found. **Tip**: it is available via the best estimator's `model` attribute, and just need to call its `save()` method. # **Tip**: while a randomized search is nice and simple, there are more powerful (but complex) options available out there for hyperparameter search, for example: # * [Hyperopt](https://github.com/hyperopt/hyperopt) # * [Hyperas](https://github.com/maxpumperla/hyperas) # * [Sklearn-Deap](https://github.com/rsteca/sklearn-deap) # * [Scikit-Optimize](https://scikit-optimize.github.io/) # * [Spearmint](https://github.com/JasperSnoek/spearmint) # * [PyMC3](https://docs.pymc.io/) # * [GPFlow](https://gpflow.readthedocs.io/) # * [Yelp/MOE](https://github.com/Yelp/MOE) # * Commercial services such as: [Google Cloud ML Engine](https://cloud.google.com/ml-engine/docs/tensorflow/using-hyperparameter-tuning), [Arimo](https://arimo.com/) or [Oscar](http://oscar.calldesk.ai/) # ![Exercise solution](https://camo.githubusercontent.com/250388fde3fac9135ead9471733ee28e049f7a37/68747470733a2f2f75706c6f61642e77696b696d656469612e6f72672f77696b6970656469612f636f6d6d6f6e732f302f30362f46696c6f735f736567756e646f5f6c6f676f5f253238666c69707065642532392e6a7067) # ## Exercise 6 – Solution # ### 6.1) # Try training your model multiple times, with different a learning rate each time (e.g., 1e-4, 3e-4, 1e-3, 3e-3, 3e-2), and compare the learning curves. For this, you need to create a `keras.optimizers.SGD` optimizer and specify the `learning_rate` in its constructor, then pass this `SGD` instance to the `compile()` method using the `optimizer` argument. learning_rates = [1e-4, 3e-4, 1e-3, 3e-3, 1e-2, 3e-2] histories = [] for learning_rate in learning_rates: model = keras.models.Sequential([ keras.layers.Dense(30, activation="relu", input_shape=X_train.shape[1:]), keras.layers.Dense(1) ]) optimizer = keras.optimizers.SGD(learning_rate) model.compile(loss="mean_squared_error", optimizer=optimizer) callbacks = [keras.callbacks.EarlyStopping(patience=10)] history = model.fit(X_train_scaled, y_train, validation_data=(X_valid_scaled, y_valid), epochs=100, callbacks=callbacks) histories.append(history) for learning_rate, history in zip(learning_rates, histories): print("Learning rate:", learning_rate) plot_learning_curves(history) # ### 6.2) # Let's look at a more sophisticated way to tune hyperparameters. Create a `build_model()` function that takes three arguments, `n_hidden`, `n_neurons`, `learning_rate`, and builds, compiles and returns a model with the given number of hidden layers, the given number of neurons and the given learning rate. It is good practice to give a reasonable default value to each argument. def build_model(n_hidden=1, n_neurons=30, learning_rate=3e-3): model = keras.models.Sequential() options = {"input_shape": X_train.shape[1:]} for layer in range(n_hidden + 1): model.add(keras.layers.Dense(n_neurons, activation="relu", **options)) options = {} model.add(keras.layers.Dense(1, **options)) optimizer = keras.optimizers.SGD(learning_rate) model.compile(loss="mse", optimizer=optimizer) return model # ### 6.3) # Create a `keras.wrappers.scikit_learn.KerasRegressor` and pass the `build_model` function to the constructor. This gives you a Scikit-Learn compatible predictor. Try training it and using it to make predictions. Note that you can pass the `n_epochs`, `callbacks` and `validation_data` to the `fit()` method. keras_reg = keras.wrappers.scikit_learn.KerasRegressor(build_model) keras_reg.fit(X_train_scaled, y_train, epochs=100, validation_data=(X_valid_scaled, y_valid), callbacks=[keras.callbacks.EarlyStopping(patience=10)]) keras_reg.predict(X_test_scaled) # ### 6.4) # Use a `sklearn.model_selection.RandomizedSearchCV` to search the hyperparameter space of your `KerasRegressor`. # + from scipy.stats import reciprocal param_distribs = { "n_hidden": [0, 1, 2, 3], "n_neurons": np.arange(1, 100), "learning_rate": reciprocal(3e-4, 3e-2), } # + from sklearn.model_selection import RandomizedSearchCV rnd_search_cv = RandomizedSearchCV(keras_reg, param_distribs, n_iter=10, cv=3, verbose=2) # - rnd_search_cv.fit(X_train_scaled, y_train, epochs=100, validation_data=(X_valid_scaled, y_valid), callbacks=[keras.callbacks.EarlyStopping(patience=10)]) rnd_search_cv.best_params_ rnd_search_cv.best_score_ rnd_search_cv.best_estimator_ # ### 6.5) # Evaluate the best model found on the test set. You can either use the best estimator's `score()` method, or get its underlying Keras model *via* its `model` attribute, and call this model's `evaluate()` method. Note that the estimator returns the negative mean square error (it's a score, not a loss, so higher is better). rnd_search_cv.score(X_test_scaled, y_test) model = rnd_search_cv.best_estimator_.model model.evaluate(X_test_scaled, y_test) # ### 6.6) # Finally, save the best Keras model found. **Tip**: it is available via the best estimator's `model` attribute, and just need to call its `save()` method. model.save("my_fine_tuned_housing_model.h5") # ![Exercise](https://c1.staticflickr.com/9/8101/8553474140_c50cf08708_b.jpg) # ## Exercise 7 – The functional API # Not all neural network models are simply sequential. Some may have complex topologies. Some may have multiple inputs and/or multiple outputs. For example, a Wide & Deep neural network (see [paper](https://ai.google/research/pubs/pub45413)) connects all or part of the inputs directly to the output layer, as shown on the following diagram: # <img src="images/wide_and_deep_net.png" title="Wide and deep net" width=300 /> # ### 7.1) # Use Keras' functional API to implement a Wide & Deep network to tackle the California housing problem. # # **Tips**: # * You need to create a `keras.layers.Input` layer to represent the inputs. Don't forget to specify the input `shape`. # * Create the `Dense` layers, and connect them by using them like functions. For example, `hidden1 = keras.layers.Dense(30, activation="relu")(input)` and `hidden2 = keras.layers.Dense(30, activation="relu")(hidden1)` # * Use the `keras.layers.concatenate()` function to concatenate the input layer and the second hidden layer's output. # * Create a `keras.models.Model` and specify its `inputs` and `outputs` (e.g., `inputs=[input]`). # * Then use this model just like a `Sequential` model: you need to compile it, display its summary, train it, evaluate it and use it to make predictions. # ### 7.2) # After the Sequential API and the Functional API, let's try the Subclassing API: # * Create a subclass of the `keras.models.Model` class. # * Create all the layers you need in the constructor (e.g., `self.hidden1 = keras.layers.Dense(...)`). # * Use the layers to process the `input` in the `call()` method, and return the output. # * Note that you do not need to create a `keras.layers.Input` in this case. # * Also note that `self.output` is used by Keras, so you should use another name for the output layer (e.g., `self.output_layer`). # # **When should you use the Subclassing API?** # * Both the Sequential API and the Functional API are declarative: you first declare the list of layers you need and how they are connected, and only then can you feed your model with actual data. The models that these APIs build are just static graphs of layers. This has many advantages (easy inspection, debugging, saving, loading, sharing, etc.), and they cover the vast majority of use cases, but if you need to build a very dynamic model (e.g., with loops or conditional branching), or if you want to experiment with new ideas using an imperative programming style, then the Subclassing API is for you. You can pretty much do any computation you want in the `call()` method, possibly with loops and conditions, using Keras layers of even low-level TensorFlow operations. # * However, this extra flexibility comes at the cost of less transparency. Since the model is defined within the `call()` method, Keras cannot fully inspect it. All it sees is the list of model attributes (which include the layers you define in the constructor), so when you display the model summary you just see a list of unconnected layers. Consequently, you cannot save or load the model without writing extra code. So this API is best used only when you really need the extra flexibility. # + class MyModel(keras.models.Model): def __init__(self): super(MyModel, self).__init__() # create layers here def call(self, input): # write any code here, using layers or even low-level TF code return output model = MyModel() # - # ### 7.3) # Now suppose you want to send only features 0 to 4 directly to the output, and only features 2 to 7 through the hidden layers, as shown on the following diagram. Use the functional API to build, train and evaluate this model. # # **Tips**: # * You need to create two `keras.layers.Input` (`input_A` and `input_B`) # * Build the model using the functional API, as above, but when you build the `keras.models.Model`, remember to set `inputs=[input_A, input_B]` # * When calling `fit()`, `evaluate()` and `predict()`, instead of passing `X_train_scaled`, pass `(X_train_scaled_A, X_train_scaled_B)` (two NumPy arrays containing only the appropriate features copied from `X_train_scaled`). # <img src="images/multiple_inputs.png" title="Multiple inputs" width=300 /> # ### 7.4) # Build the multi-input and multi-output neural net represented in the following diagram. # # <img src="images/multiple_inputs_and_outputs.png" title="Multiple inputs and outputs" width=400 /> # # **Why?** # # There are many use cases in which having multiple outputs can be useful: # * Your task may require multiple outputs, for example, you may want to locate and classify the main object in a picture. This is both a regression task (finding the coordinates of the object's center, as well as its width and height) and a classification task. # * Similarly, you may have multiple independent tasks to perform based on the same data. Sure, you could train one neural network per task, but in many cases you will get better results on all tasks by training a single neural network with one output per task. This is because the neural network can learn features in the data that are useful across tasks. # * Another use case is as a regularization technique (i.e., a training constraint whose objective is to reduce overfitting and thus improve the model's ability to generalize). For example, you may want to add some auxiliary outputs in a neural network architecture (as shown in the diagram) to ensure that that the underlying part of the network learns something useful on its own, without relying on the rest of the network. # # **Tips**: # * Building the model is pretty straightforward using the functional API. Just make sure you specify both outputs when creating the `keras.models.Model`, for example `outputs=[output, aux_output]`. # * Each output has its own loss function. In this scenario, they will be identical, so you can either specify `loss="mse"` (this loss will apply to both outputs) or `loss=["mse", "mse"]`, which does the same thing. # * The final loss used to train the whole network is just a weighted sum of all loss functions. In this scenario, you want most to give a much smaller weight to the auxiliary output, so when compiling the model, you must specify `loss_weights=[0.9, 0.1]`. # * When calling `fit()` or `evaluate()`, you need to pass the labels for all outputs. In this scenario the labels will be the same for the main output and for the auxiliary output, so make sure to pass `(y_train, y_train)` instead of `y_train`. # * The `predict()` method will return both the main output and the auxiliary output. # ![Exercise solution](https://camo.githubusercontent.com/250388fde3fac9135ead9471733ee28e049f7a37/68747470733a2f2f75706c6f61642e77696b696d656469612e6f72672f77696b6970656469612f636f6d6d6f6e732f302f30362f46696c6f735f736567756e646f5f6c6f676f5f253238666c69707065642532392e6a7067) # ## Exercise 7 – Solution # ### 7.1) # Use Keras' functional API to implement a Wide & Deep network to tackle the California housing problem. input = keras.layers.Input(shape=X_train.shape[1:]) hidden1 = keras.layers.Dense(30, activation="relu")(input) hidden2 = keras.layers.Dense(30, activation="relu")(hidden1) concat = keras.layers.concatenate([input, hidden2]) output = keras.layers.Dense(1)(concat) model = keras.models.Model(inputs=[input], outputs=[output]) model.compile(loss="mean_squared_error", optimizer=keras.optimizers.SGD(1e-3)) model.summary() history = model.fit(X_train_scaled, y_train, epochs=10, validation_data=(X_valid_scaled, y_valid)) model.evaluate(X_test_scaled, y_test) model.predict(X_test_scaled) # ### 7.2) # After the Sequential API and the Functional API, let's try the Subclassing API: # * Create a subclass of the `keras.models.Model` class. # * Create all the layers you need in the constructor (e.g., `self.hidden1 = keras.layers.Dense(...)`). # * Use the layers to process the `input` in the `call()` method, and return the output. # * Note that you do not need to create a `keras.layers.Input` in this case. # * Also note that `self.output` is used by Keras, so you should use another name for the output layer (e.g., `self.output_layer`). # + class MyModel(keras.models.Model): def __init__(self): super(MyModel, self).__init__() self.hidden1 = keras.layers.Dense(30, activation="relu") self.hidden2 = keras.layers.Dense(30, activation="relu") self.output_ = keras.layers.Dense(1) def call(self, input): hidden1 = self.hidden1(input) hidden2 = self.hidden2(hidden1) concat = keras.layers.concatenate([input, hidden2]) output = self.output_(concat) return output model = MyModel() # - model.compile(loss="mse", optimizer=keras.optimizers.SGD(1e-3)) history = model.fit(X_train_scaled, y_train, epochs=10, validation_data=(X_valid_scaled, y_valid)) model.summary() model.evaluate(X_test_scaled, y_test) model.predict(X_test_scaled) # ### 7.3) # Now suppose you want to send only features 0 to 4 directly to the output, and only features 2 to 7 through the hidden layers, as shown on the diagram. Use the functional API to build, train and evaluate this model. input_A = keras.layers.Input(shape=[5]) input_B = keras.layers.Input(shape=[6]) hidden1 = keras.layers.Dense(30, activation="relu")(input_B) hidden2 = keras.layers.Dense(30, activation="relu")(hidden1) concat = keras.layers.concatenate([input_A, hidden2]) output = keras.layers.Dense(1)(concat) model = keras.models.Model(inputs=[input_A, input_B], outputs=[output]) model.compile(loss="mean_squared_error", optimizer=keras.optimizers.SGD(1e-3)) model.summary() X_train_scaled_A = X_train_scaled[:, :5] X_train_scaled_B = X_train_scaled[:, 2:] X_valid_scaled_A = X_valid_scaled[:, :5] X_valid_scaled_B = X_valid_scaled[:, 2:] X_test_scaled_A = X_test_scaled[:, :5] X_test_scaled_B = X_test_scaled[:, 2:] history = model.fit([X_train_scaled_A, X_train_scaled_B], y_train, epochs=10, validation_data=([X_valid_scaled_A, X_valid_scaled_B], y_valid)) model.evaluate([X_test_scaled_A, X_test_scaled_B], y_test) model.predict([X_test_scaled_A, X_test_scaled_B]) # ### 7.4) # Build the multi-input and multi-output neural net represented in the diagram. input_A = keras.layers.Input(shape=X_train_scaled_A.shape[1:]) input_B = keras.layers.Input(shape=X_train_scaled_B.shape[1:]) hidden1 = keras.layers.Dense(30, activation="relu")(input_B) hidden2 = keras.layers.Dense(30, activation="relu")(hidden1) concat = keras.layers.concatenate([input_A, hidden2]) output = keras.layers.Dense(1)(concat) aux_output = keras.layers.Dense(1)(hidden2) model = keras.models.Model(inputs=[input_A, input_B], outputs=[output, aux_output]) model.compile(loss="mean_squared_error", loss_weights=[0.9, 0.1], optimizer=keras.optimizers.SGD(1e-3)) model.summary() history = model.fit([X_train_scaled_A, X_train_scaled_B], [y_train, y_train], epochs=10, validation_data=([X_valid_scaled_A, X_valid_scaled_B], [y_valid, y_valid])) model.evaluate([X_test_scaled_A, X_test_scaled_B], [y_test, y_test]) y_pred, y_pred_aux = model.predict([X_test_scaled_A, X_test_scaled_B]) y_pred y_pred_aux # ![Exercise](https://c1.staticflickr.com/9/8101/8553474140_c50cf08708_b.jpg) # ## Exercise 8 – Deep Nets # Let's go back to Fashion MNIST and build deep nets to tackle it. We need to load it, split it and scale it. fashion_mnist = keras.datasets.fashion_mnist (X_train_full, y_train_full), (X_test, y_test) = fashion_mnist.load_data() X_valid, X_train = X_train_full[:5000], X_train_full[5000:] y_valid, y_train = y_train_full[:5000], y_train_full[5000:] # + from sklearn.preprocessing import StandardScaler scaler = StandardScaler() X_train_scaled = scaler.fit_transform(X_train.astype(np.float32).reshape(-1, 28 * 28)).reshape(-1, 28, 28) X_valid_scaled = scaler.transform(X_valid.astype(np.float32).reshape(-1, 28 * 28)).reshape(-1, 28, 28) X_test_scaled = scaler.transform(X_test.astype(np.float32).reshape(-1, 28 * 28)).reshape(-1, 28, 28) # - # ### 8.1) # Build a sequential model with 20 hidden dense layers, with 100 neurons each, using the ReLU activation function, plus the output layer (10 neurons, softmax activation function). Try to train it for 10 epochs on Fashion MNIST and plot the learning curves. Notice that progress is very slow. # ### 8.2) # Update the model to add a `BatchNormalization` layer after every hidden layer. Notice that performance progresses much faster per epoch, although computations are much more intensive. Display the model summary and notice all the non-trainable parameters (the scale $\gamma$ and offset $\beta$ parameters). # ### 8.3) # Try moving the BN layers before the hidden layers' activation functions. Does this affect the model's performance? # ### 8.4) # Remove all the BN layers, and just use the SELU activation function instead (always use SELU with LeCun Normal weight initialization). Notice that you get better performance than with BN but training is much faster. Isn't it marvelous? :-) # ### 8.5) # Try training for 10 additional epochs, and notice that the model starts overfitting. Try adding a Dropout layer (with a 50% dropout rate) just before the output layer. Does it reduce overfitting? What about the final validation accuracy? # # **Warning**: you should not use regular Dropout, as it breaks the self-normalizing property of the SELU activation function. Instead, use AlphaDropout, which is designed to work with SELU. # ![Exercise solution](https://camo.githubusercontent.com/250388fde3fac9135ead9471733ee28e049f7a37/68747470733a2f2f75706c6f61642e77696b696d656469612e6f72672f77696b6970656469612f636f6d6d6f6e732f302f30362f46696c6f735f736567756e646f5f6c6f676f5f253238666c69707065642532392e6a7067) # ## Exercise 8 – Solution # ### 8.1) # Build a sequential model with 20 hidden dense layers, with 100 neurons each, using the ReLU activation function, plus the output layer (10 neurons, softmax activation function). Try to train it for 10 epochs on Fashion MNIST and plot the learning curves. Notice that progress is very slow. model = keras.models.Sequential() model.add(keras.layers.Flatten(input_shape=[28, 28])) for _ in range(20): model.add(keras.layers.Dense(100, activation="relu")) model.add(keras.layers.Dense(10, activation="softmax")) model.compile(loss="sparse_categorical_crossentropy", optimizer=keras.optimizers.SGD(1e-3), metrics=["accuracy"]) history = model.fit(X_train_scaled, y_train, epochs=10, validation_data=(X_valid_scaled, y_valid)) plot_learning_curves(history) # ### 8.2) # Update the model to add a `BatchNormalization` layer after every hidden layer. Notice that performance progresses much faster per epoch, although computations are much more intensive. Display the model summary and notice all the non-trainable parameters (the scale $\gamma$ and offset $\beta$ parameters). model = keras.models.Sequential() model.add(keras.layers.Flatten(input_shape=[28, 28])) for _ in range(20): model.add(keras.layers.Dense(100, activation="relu")) model.add(keras.layers.BatchNormalization()) model.add(keras.layers.Dense(10, activation="softmax")) model.compile(loss="sparse_categorical_crossentropy", optimizer=keras.optimizers.SGD(1e-3), metrics=["accuracy"]) history = model.fit(X_train_scaled, y_train, epochs=10, validation_data=(X_valid_scaled, y_valid)) plot_learning_curves(history) model.summary() # ### 8.3) # Try moving the BN layers before the hidden layers' activation functions. Does this affect the model's performance? model = keras.models.Sequential() model.add(keras.layers.Flatten(input_shape=[28, 28])) for _ in range(20): model.add(keras.layers.Dense(100)) model.add(keras.layers.BatchNormalization()) model.add(keras.layers.Activation("relu")) model.add(keras.layers.Dense(10, activation="softmax")) model.compile(loss="sparse_categorical_crossentropy", optimizer=keras.optimizers.SGD(1e-3), metrics=["accuracy"]) history = model.fit(X_train_scaled, y_train, epochs=10, validation_data=(X_valid_scaled, y_valid)) plot_learning_curves(history) # ### 8.4) # Remove all the BN layers, and just use the SELU activation function instead (always use SELU with LeCun Normal weight initialization). Notice that you get better performance than with BN but training is much faster. Isn't it marvelous? :-) model = keras.models.Sequential() model.add(keras.layers.Flatten(input_shape=[28, 28])) for _ in range(20): model.add(keras.layers.Dense(100, activation="selu", kernel_initializer="lecun_normal")) model.add(keras.layers.Dense(10, activation="softmax")) model.compile(loss="sparse_categorical_crossentropy", optimizer=keras.optimizers.SGD(1e-3), metrics=["accuracy"]) history = model.fit(X_train_scaled, y_train, epochs=10, validation_data=(X_valid_scaled, y_valid)) plot_learning_curves(history) # ### 8.5) # Try training for 10 additional epochs, and notice that the model starts overfitting. Try adding a Dropout layer (with a 50% dropout rate) just before the output layer. Does it reduce overfitting? What about the final validation accuracy? history = model.fit(X_train_scaled, y_train, epochs=10, validation_data=(X_valid_scaled, y_valid)) plot_learning_curves(history) model = keras.models.Sequential() model.add(keras.layers.Flatten(input_shape=[28, 28])) for _ in range(20): model.add(keras.layers.Dense(100, activation="selu", kernel_initializer="lecun_normal")) model.add(keras.layers.AlphaDropout(rate=0.5)) model.add(keras.layers.Dense(10, activation="softmax")) model.compile(loss="sparse_categorical_crossentropy", optimizer=keras.optimizers.SGD(1e-3), metrics=["accuracy"]) history = model.fit(X_train_scaled, y_train, epochs=20, validation_data=(X_valid_scaled, y_valid)) plot_learning_curves(history)
01_neural_nets_with_keras.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/Marc-Serenio/OOP-1-1/blob/main/Midterm_Exam.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + colab={"base_uri": "https://localhost:8080/"} id="SyMf8O51Hec_" outputId="905586ea-5887-494b-ef4e-ea20a300e5d0" class TemperatureConversion: def __init__(self, temp=1): self._temp = temp class CelsiusToFahrenheit(TemperatureConversion): def conversion(self): return (self._temp * 9) / 5 + 32 class CelsiusToKelvin(TemperatureConversion): def conversion(self): return self._temp + 273.15 class FahrenheitToCelsius(TemperatureConversion): def conversion(self): return (self._temp - 32)*5/9 class KelvinToCelsius (TemperatureConversion): def conversion(self): return self._temp - 273.15 tempInCelsius = float(input("Enter the temperature in Celsius:")) convert = CelsiusToKelvin(tempInCelsius) print(str(convert.conversion()) + "Kelvin") convert = CelsiusToFahrenheit(tempInCelsius) print(str(convert.conversion()) + "Fahrenheit") tempInFahrenheit = float(input("Enter the temperature in Fahrenheit:")) convert = FahrenheitToCelsius(tempInFahrenheit) print(str(convert.conversion()) + "Celsius") tempInKelvin = float(input("Enter the temperature in Kelvin:")) convert = KelvinToCelsius(tempInKelvin) print(str(convert.conversion()) + "Celsius")
Midterm_Exam.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # <div style="width:900px;background:#F9EECF;border:1px solid black;text-align:left;padding:20px;"> # <span style="color:purple;font-size:13pt"><b>Data Dictionary</span></b> # </div> # # <div style="width:900px;float:left;padding:20px;align:left;"> # Descriptors: # <br><br> # <style type="text/css"> # .tg {border-collapse:collapse;border-spacing:0;} # .tg td{font-family:Arial, sans-serif;font-size:14px;padding:10px 5px;border-style:solid;border-width:1px;overflow:hidden;word-break:normal;} # .tg th{font-family:Arial, sans-serif;font-size:14px;font-weight:normal;padding:10px 5px;border-style:solid;border-width:1px;overflow:hidden;word-break:normal;} # .tg .tg-2lp6{font-weight:bold;background-color:#bbdaff;vertical-align:top} # .tg .tg-amwm{font-weight:bold;text-align:center;vertical-align:top} # .tg .tg-36xf{font-weight:bold;background-color:#bbdaff} # .tg .tg-yw4l{vertical-align:top} # .tg .tg-yw42{vertical-align:top;color:blue} # </style> # <table class="tg"> # # <tr> # <th class="tg-36xf">Feature Name</th> # <th class="tg-2lp6">Description</th> # <th class="tg-2lp6">Metrics</th> # </tr> # # <tr> # <td class="tg-yw4l">RecordID</td> # <td class="tg-yw4l">A unique integer for each ICU stay </td> # <td class="tg-yw4l">Integer</td> # </tr> # <tr> # <td class="tg-yw4l">Age</td> # <td class="tg-yw4l">Age</td> # <td class="tg-yw4l">(years)</td> # </tr> # # <tr> # <td class="tg-yw4l">Height</td> # <td class="tg-yw4l">Height</td> # <td class="tg-yw4l">(cm)</td> # </tr> # # <tr> # <td class="tg-yw4l">ICUtype</td> # <td class="tg-yw4l">ICU Type</td> # <td class="tg-yw4l">(1: Coronary Care Unit, 2: Cardiac Surgery Recovery Unit, <br>3: Medical ICU, or 4: Surgical ICU)</td> # </tr> # # <tr> # <td class="tg-yw4l">Gender</td> # <td class="tg-yw4l">Gender</td> # <td class="tg-yw4l">(0: female, or 1: male)</td> # </tr> # # </table> # <br><br> # These 37 variables may be observed once, more than once, or not at all in some cases: # <br><br> # # <table class="tg"> # <tr> # <th class="tg-36xf">Feature Name</th> # <th class="tg-2lp6">Description</th> # <th class="tg-2lp6">Metrics</th> # </tr> # # <tr> # <td class="tg-yw4l">Albumin</td> # <td class="tg-yw4l">Albumin </td> # <td class="tg-yw4l">(g/dL)</td> # </tr> # <tr> # <td class="tg-yw4l">ALP</td> # <td class="tg-yw4l">Alkaline phosphatase</td> # <td class="tg-yw4l">(IU/L)</td> # </tr> # <tr> # <td class="tg-yw4l">ALT</td> # <td class="tg-yw4l">Alanine transaminase</td> # <td class="tg-yw4l">(IU/L)</td> # </tr> # <tr> # <td class="tg-yw4l">AST</td> # <td class="tg-yw4l">Aspartate transaminase</td> # <td class="tg-yw4l">(IU/L)</td> # </tr> # <tr> # <td class="tg-yw4l">Bilirubin</td> # <td class="tg-yw4l">Bilirubin</td> # <td class="tg-yw4l">(mg/dL)</td> # </tr> # <tr> # <td class="tg-yw4l">BUN</td> # <td class="tg-yw4l">Blood urea nitrogen</td> # <td class="tg-yw4l">(mg/dL)</td> # </tr> # <tr> # <td class="tg-yw4l">Cholesterol</td> # <td class="tg-yw4l">Cholesterol</td> # <td class="tg-yw4l">(mg/dL)</td> # </tr> # <tr> # <td class="tg-yw4l">Creatinine</td> # <td class="tg-yw4l">Serum creatinine</td> # <td class="tg-yw4l">(mg/dL)</td> # </tr> # # <tr> # <td class="tg-yw4l">DiasABP <td class="tg-yw4l">Invasive diastolic arterial blood pressure <td class="tg-yw4l">(mmHg)</tr> # # <tr><td class="tg-yw4l">FiO2 <td class="tg-yw4l">Fractional inspired O2 <td class="tg-yw4l">(0-1)</tr> # <tr><td class="tg-yw4l">GCS <td class="tg-yw4l">Glasgow Coma Score <td class="tg-yw4l">(3-15)</tr> # <tr><td class="tg-yw4l">Glucose <td class="tg-yw4l">Serum glucose <td class="tg-yw4l">(mg/dL)</tr> # <tr><td class="tg-yw4l">HCO3 <td class="tg-yw4l">Serum bicarbonate <td class="tg-yw4l">(mmol/L)</tr> # <tr><td class="tg-yw4l">HCT <td class="tg-yw4l">Hematocrit <td class="tg-yw4l">(%)</tr> # <tr><td class="tg-yw4l">HR <td class="tg-yw4l">Heart rate <td class="tg-yw4l">(bpm)</tr> # <tr><td class="tg-yw4l">K <td class="tg-yw4l">Serum potassium <td class="tg-yw4l">(mEq/L)</tr> # <tr><td class="tg-yw4l">Lactate <td class="tg-yw4l">Lactate<td class="tg-yw4l">(mmol/L)</tr> # <tr><td class="tg-yw4l">Mg <td class="tg-yw4l">Serum magnesium <td class="tg-yw4l">(mmol/L)</tr> # <tr><td class="tg-yw4l">MAP <td class="tg-yw4l">Invasive mean arterial blood pressure <td class="tg-yw4l">(mmHg)</tr> # <tr><td class="tg-yw4l">MechVent <td class="tg-yw4l">Mechanical ventilation respiration <td class="tg-yw4l">(0:false, or 1:true)</tr> # <tr><td class="tg-yw4l">Na <td class="tg-yw4l">Serum sodium <td class="tg-yw4l">(mEq/L)</tr> # <tr><td class="tg-yw4l">NIDiasABP <td class="tg-yw4l">Non-invasive diastolic arterial blood pressure <td class="tg-yw4l">(mmHg)</tr> # <tr><td class="tg-yw4l">NIMAP <td class="tg-yw4l">Non-invasive mean arterial blood pressure <td class="tg-yw4l">(mmHg)</tr> # <tr><td class="tg-yw4l">NISysABP <td class="tg-yw4l">Non-invasive systolic arterial blood pressure <td class="tg-yw4l">(mmHg)</tr> # <tr><td class="tg-yw4l">PaCO2 <td class="tg-yw4l">partial pressure of arterial CO2 <td class="tg-yw4l">(mmHg)</tr> # <tr><td class="tg-yw4l">PaO2 <td class="tg-yw4l">Partial pressure of arterial O2 <td class="tg-yw4l">(mmHg)</tr> # <tr><td class="tg-yw4l">pH <td class="tg-yw4l">Arterial pH <td class="tg-yw4l">(0-14)</tr> # <tr><td class="tg-yw4l">Platelets <td class="tg-yw4l">Platelets<td class="tg-yw4l">(cells/nL)</tr> # <tr><td class="tg-yw4l">RespRate <td class="tg-yw4l">Respiration rate <td class="tg-yw4l">(bpm)</tr> # <tr><td class="tg-yw4l">SaO2 <td class="tg-yw4l">O2 saturation in hemoglobin <td class="tg-yw4l">(%)</tr> # <tr><td class="tg-yw4l">SysABP <td class="tg-yw4l">Invasive systolic arterial blood pressure <td class="tg-yw4l">(mmHg)</tr> # <tr><td class="tg-yw4l">Temp <td class="tg-yw4l">Temperature <td class="tg-yw4l">(°C)</tr> # <tr><td class="tg-yw4l">TropI <td class="tg-yw4l">Troponin-I <td class="tg-yw4l">(μg/L)</tr> # <tr><td class="tg-yw4l">TropT <td class="tg-yw4l">Troponin-T <td class="tg-yw4l">(μg/L)</tr> # <tr><td class="tg-yw4l">Urine <td class="tg-yw4l">Urine output <td class="tg-yw4l">(mL)</tr> # <tr><td class="tg-yw4l">WBC <td class="tg-yw4l">White blood cell count <td class="tg-yw4l">(cells/nL)</tr> # <tr><td class="tg-yw4l">Weight <td class="tg-yw4l">Weight<td class="tg-yw4l">(kg)</tr></table> # # <br><br> # Outcomes-Related Descriptors: # # <table class="tg"> # <tr> # <th class="tg-36xf">Outcomes</th> # <th class="tg-2lp6">Description</th> # <th class="tg-2lp6">Metrics</th> # </tr> # # <tr> # <td class="tg-yw4l">SAPS-I score</td> # <td class="tg-yw4l">(Le Gall et al., 1984) </td> # <td class="tg-yw4l">between 0 to 163</td> # </tr> # # <tr> # <td class="tg-yw4l">SOFA score</td> # <td class="tg-yw4l">(Ferreira et al., 2001) </td> # <td class="tg-yw4l">between 0 to 4</td> # </tr> # # <tr> # <td class="tg-yw4l">Length of stay</td> # <td class="tg-yw4l">Length of stay </td> # <td class="tg-yw4l">(days)</td> # </tr> # # <tr> # <td class="tg-yw4l">Survival</td> # <td class="tg-yw4l">Survival</td> # <td class="tg-yw4l">(days)</td> # </tr> # # <tr> # <td class="tg-yw42"><b>In-hospital death</b></td> # <td class="tg-yw42"><b>Target Variable</b></td> # <td class="tg-yw42"><b>(0: survivor, or 1: died in-hospital)</b></td> # </tr> # # <div style="width:900px;background:#F9EECF;border:1px solid black;text-align:left;padding:20px;"> # # <span style="color:purple;font-size:13pt"><b>Import Packages</span></b> # # </div> # + # Import packages import pandas as pd import numpy as np import seaborn as sns from sklearn.preprocessing import RobustScaler, OneHotEncoder from sklearn.pipeline import FeatureUnion from sklearn.base import BaseEstimator, TransformerMixin from statsmodels.stats.outliers_influence import variance_inflation_factor pd.set_option('display.max_columns', 200) pd.set_option('display.max_rows',200) sns.set_style('whitegrid') sns.set(rc={"figure.figsize": (15, 8)}) # %config InlineBackend.figure_format = 'retina' # %matplotlib inline # + # Simple cleaning on csv file mortality = pd.read_csv('mortality_filled_median.csv') mortality.drop(['Unnamed: 0'],axis=1,inplace=True) mortality.set_index('recordid',inplace=True) # - X = mortality.drop('in-hospital_death',axis=1) # identify predictors # <div style="width:900px;background:#F9EECF;border:1px solid black;text-align:left;padding:20px;"> # # <p> # # <span style="color:purple;font-size:13pt"><b>Feature Selection</span></b> # <br><br> # <b>Step 1: Preprocess</b> # <br> # Get dummy variables for categorical features # <br> # Scale numerical features with Robust Scaler # <br> # (Scaled with median and the interquartile range so that outliers can be taken into account) # <br><br> # <b>Step 2: Variance Inflation Factor</b> # <br>Drop columns that are multi-colinear with each other # <br><br> # <b>Step 3: Decision Trees on Feature Importance</b> # <br> Rank features based on importance # <br><br> # <b>Step 4: Reduce dimensionality using PCA</b> # </div> class Categorical_Extractor(BaseEstimator, TransformerMixin): def __init__(self): pass def get_dummy_(self,df): icu_dummy = pd.get_dummies(df['icutype'],drop_first=True) df = pd.concat((df['gender'],icu_dummy),axis=1) return df.values.reshape(-1, 4) def transform(self, X, *args): X = self.get_dummy_(X) return X def fit(self, X, *args): return self class Numerical_Extractor(BaseEstimator, TransformerMixin): def __init__(self): pass def robust_scale_(self,df,threshold=10): num_col = [] for col, num in df.nunique().iteritems(): if num > threshold: num_col.append(col) df = df[num_col] rscaler = RobustScaler() array = rscaler.fit_transform(df) return array.reshape(-1,array.shape[1]) def transform(self, X, *args): X = self.robust_scale_(X) return X def fit(self, X, *args): return self feature_union = FeatureUnion([ ('numerical_scaler',Numerical_Extractor()), ('categorical_transform',Categorical_Extractor()) ]) transformed_X = feature_union.transform(X) new_columns = list(X.columns) new_columns.extend(['icutype3','icutype4']) transformed_X = pd.DataFrame(transformed_X,columns=new_columns,index=X.index) transformed_X.rename(columns={'icutype':'icutype2'},inplace=True) transformed_X.head() transformed_X.shape # <div style="width:900px;background:#F9EECF;border:1px solid black;text-align:left;padding:20px;"> # # We have a total of 110 features now. # # </div> # + # Variance Inflation Factor vif = [variance_inflation_factor(transformed_X.iloc[:,:-4].values,i) for i in range(transformed_X.iloc[:,:-4].shape[1])] # + sns.set_style('whitegrid') sns.set(rc={"figure.figsize": (15, 15)}) vif_df = pd.DataFrame(transformed_X.iloc[:,:-4].columns,columns=['Features']) vif_df['VIF'] = vif sns.barplot(x='VIF',y='Features',data=vif_df) # - remove_col = list(vif_df[vif_df['VIF']>5]['Features']) selected_X = transformed_X.drop(remove_col,axis=1) selected_X.shape
__Project Files/.ipynb_checkpoints/FeatureSelection_withoutclass-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %pylab inline matplotlib.rcParams['figure.figsize'] = [16, 8] plt.rcParams.update({'font.size': 15}) import numpy as np plt.title("wealth levels for 3 generation") plt.plot(np.load("wealth_1984.npy"), label = "1984gen") plt.plot(np.load("wealth_1994.npy"), label = "1994gen") plt.plot(np.load("wealth_2004.npy"), label = "2004gen") plt.legend() plt.title("stock experience ratios for 3 generation") plt.plot(np.load("stockExp_1984.npy"), label = "1984gen") plt.plot(np.load("stockExp_1994.npy"), label = "1994gen") plt.plot(np.load("stockExp_2004.npy"), label = "2004gen") plt.legend() plt.title("house ownership ratios for 3 generation") plt.plot(np.load("H_1984.npy"), label = "1984gen") plt.plot(np.load("H_1994.npy"), label = "1994gen") plt.plot(np.load("H_2004.npy"), label = "2004gen") plt.legend()
20211003/table.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %matplotlib inline # # # Scale Way # # Set legend title and labels with a scale function. # # + import pandas as pd from lets_plot import * LetsPlot.setup_html() # - df = pd.read_csv('https://raw.githubusercontent.com/JetBrains/lets-plot-docs/master/data/mpg.csv') labels = ['premium', 'regular', 'ethanol', 'diesel', 'natural gas'] ggplot(df, aes(x='fl')) + \ geom_bar(aes(fill='fl')) + \ scale_fill_discrete(name='fuel type', labels=labels)
docs/_downloads/cde8aecaf34a6578746784bac955ae8a/plot__scale_way.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Summary: Three ways to do SVD in python # **The first is for normal matrix SVD using numpy.linalg; # The second and the third is specially for sparse SVD. # There are 2 differences between the second and the third: # 1) the second's singular values are in increasing order while the third's are in descending order. # 2) The left singular vector of the seocnd is the transpose of the third one.** from __future__ import division import numpy as np import random from numpy import linalg as la from sparsesvd import sparsesvd from scipy.sparse.linalg import norm import scipy.sparse as ss import scipy.io import random # ### Parameter Setting # - n1,n2 are row number and col number of a random Matrix respectively # - m is the scale of sampeling # - Omega is the sample space # - P_Omega_M is the sparse matrix we need to decompose with n1, n2, r = 150, 300, 10 m = r*(n1+n2-r); M = np.random.random((n1,r)).dot(np.random.random((r,n2))) ind = random.sample(range(n1*n2),m) Omega = np.unravel_index(ind, (n1,n2)) data = M[Omega] P_Omega_M = ss.csr_matrix((data,Omega),shape = (n1,n2)) # 1st U,s,V = la.svd(M) print (U.shape,s.shape,V.shape) S = np.zeros(M.shape) index = s.shape[0] S[:index, :index] = np.diag(s) np.dot(U,np.dot(S,V)) # 2nd u1,s1,v1 = ss.linalg.svds(P_Omega_M,6) print (u1.shape,s1.shape,v1.shape) print (s1) (u1*s1).dot(v1) # 3rd ut, s, vt = sparsesvd(ss.csc_matrix(P_Omega_M),6) print (ut.shape,s.shape,vt.shape) print (s) (ut.T*s).dot(vt)
SVT/res/Three ways to do SVD in python .ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Introduction to Python # # In this session, we're going to be following chapter 1 of scipy-lectures (with some small changes and omissions). # # http://scipy-lectures.org # ## Basic Stuff # comment print("hello") a = 5 print(a) # Here are some numerical *types* # + a = 4 print(type(a)) b = 2.1 print(type(b)) c = True print(type(c)) # - # You can do some basic math with these types # + a = 7 * 3 # multiplication print(a) b = 2 ** 4 # exponentiation (note it's not ^) print(b) c = 3 / 4 # division print(c) # - # ## Containers # ### Lists # # Lists are ordered collections of objects. They may have different types. l = [1, 2, 3, 4.0, 'hello'] print(l) # You can access elements of lists by their index. # # **Note**: indexes start at 0 in Python a = l[3] print(a) # Lists can be *sliced*, meaning you can access multiple elements at once. # # Slicing works like `l[start:stop:stride]`, and all slicing parameter are optional. # # **Note**: the index specified by `stop` is *not* included s = l[1:4] # start = 1, stop = 4, stride = default (1) print(s) s = l[:3] # start = default (0), stop = 3, stride = default (1) print(s) s = l[::2] # start = default (0), stop = default (end), stride = 2 print(s) # Lists are *mutable*, meaning they can be changed. # # **Note**: if you change your lists in a Jupyter notebook and run your cells out of order, you might get unexpected results! l[1] = 5 print(l) # Lists can be appended to and expanded. # + l = [1, 2, 3] print(l) l.append(4) # append single elements print(l) l.extend([-1, -2]) # extend a list print(l) # - # Arithmetic operations also work on lists, but they *don't* do "matrix math" (we'll use numpy for that). # + a = [1, 2, 3] b = [6, 7, 8] c = a + b # concatenates a and b into a single list print(c) d = 2 * a print(d) # - # ### Strings # # Strings can be specified in a few different ways. # + s = "hello" # double quotes print(s) t = 'hello' # single quotes print(t) # - # Strings can be used flexibly to print variables neatly. a = 1 b = 2.4397248 print("a is {} and b is {:.2f}".format(a, b)) # ### Dictionaries # # Dictionaries store a mapping from *keys* to *values*. # # You access the elements in a dictionary by their *key* instead of an index. # + # syntax: dictionary = {key : value, key: value, ...} d = {'first': 'Kenneth', 'last': 'Lyons', 'age': 25} print(d) last_name = d['last'] print(last_name) # - # ## Control Flow # ### if/elif/else # # Conditionally execute blocks of code. # # **Note**: whitespace (indentation) is important in Python a = 5 if a == 3: print("a is three") elif a < 6: print("a is less than six, but it is not equal to three") else: print("a is greater than or equal to 6") # ### for # # You can iterate over things with `for` my_list = [1, 3, 6, 2] for element in my_list: # element refers to the current element in my_list print(element) # The `range` function can be used to iterate over a specific set of numbers without creating a list for index in range(1, 10, 3): # range takes start (default is 0), stop, and stride parameters (default is 1) print(index) # The `break` command can be used to exit a loop l = [4, 2, 7, 8, 210, 1000] for element in l: print(element) if element > 10: break # The `continue` command can be used to skip the rest of the loop body and move on to the next element. for element in l: if element == 7: continue # don't run the print statement below print(element) # #### Useful tip # # You pretty much never need to create indexing variables and increment them yourself. # # You can keep track of an index variable in a loop using `enumerate` l = [1, 2, 3, 4] for index, element in enumerate(l): print("index is {}, element is {}".format(index, element)) # ## functions # ### Defining Functions # # Use `def` keyword to define a function. # # Function blocks must be indented like in `if`/`elif`/`else` and `for` blocks. # + def function(): # this is the "function block" print("running function") # call the function as much as you want function() function() # - # Functions can take inputs (arguments) and return things. # + def disk_area(radius): area = 3.14 * radius**2 return area print(disk_area(1.5)) print(disk_area(2.3)) # - # Functions can also take *optional* arguments that you specify a default value for. # + def multiply(num, multiplier=1): return multiplier * num a = multiply(4) # by default, it just multiplies by 1 print(a) b = multiply(4, multiplier=3) # specify a non-default value print(b) # - # **Note**: functions that take in a list and modify it affect the original list you pass in # + def modify_list(list_to_modify): list_to_modify[0] = 0 a = [1, 2, 3] print(a) modify_list(a) print(a) # - # ### Docstrings # # Docstrings are special comments in function blocks which allow you to describe what the function does. def function(arg): """One-line sentence describing the function. More detailed description of what it does. Docstrings are a very important part of writing code that someone else can use...sometimes "someone else" means you in the future. You'll usually also want to describe the arguments and the return value of the function. """ return 1 # Docstrings allow you to read about what a function does without reading the source code. help(function) # Jupyter notebooks also allow you to use a question mark to read the help in a separate window. # + # function? # - # ## Re-Using Code # ### Scripts # # You don't have to write Python code in a jupyter notebook. You can write files with the extension `.py` and run them as scripts. # Create a file called `script.py` # # ```python # print("hello") # # a = 1.3 # b = 4 * a # # print(b) # ``` # %run script.py # ### Modules # # Modules are a great way to re-use code between separate notebooks and/or scripts. # # Python has many modules built -- its "standard library" is extensive # # https://docs.python.org/3/library/ # + import time current_time = time.asctime() print(current_time) # - # You can (and should!) write your own modules. They are imported the same way. # Create a file called `module.py` # # ```python # some_variable = 6 # # def square(x): # return x**2 # ``` # + import module # you can access variables defined in the module print(module.some_variable) # you can use functions defined in the module two_squared = module.square(2) print(two_squared) # - # ## Input and Output # ### Reading a File # # You can read a file into a string # Create a file `file.txt` # # ``` # first line # second line # third line # ``` f = open('file.txt', 'r') # 'r' stands for "read" file_contents = f.read() print(file_contents) f.close() # remember to close the file when you're done with it # You can also iterate over a file line by line f = open('file.txt', 'r') for line in f: print(line) f.close()
content/materials/notebooks/intro_to_python.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + import matplotlib.pyplot as plt import numpy as np from sklearn.model_selection import train_test_split # + # beta - transmission rate # N - number of individuals # - def run_model(T=800, S=500, I=10, R=0, beta=0.05, D=0.005): N = S + I + R S = [S] I = [I] R = [R] for i in range(T-1): newly_infected = (beta * S[-1] * I[-1]) / N newly_resistant = I[-1] * D R.append(R[-1] + newly_resistant) I.append(I[-1] + newly_infected - newly_resistant) S.append(S[-1] - newly_infected) return np.column_stack([S, I, R]) model = run_model() # + plt.figure(figsize=(8,6)) plt.subplot(211) plt.plot(model[:,0], '-g', label="Susceptibles") plt.plot(model[:,2], '-k', label="Resistant") plt.xlabel("Time") plt.ylabel("Susceptibles and Resistant") plt.legend(loc=0) plt.subplot(212) plt.plot(model[:,1], '-r', label='Infected') plt.xlabel("Time") plt.ylabel("Infected") plt.show() # + observed = 100 observed_data = model[0:observed] test_frac = 0.2 train_index = int(observed * (1-test_frac)) train = model[0:train_index,:] test = model[train_index:observed] # - plt.clf() plt.plot(model[:,1], '-r', label='Infected', alpha=0.3) plt.plot(np.arange(0, train_index), train[:,1], '-g', label='Infected') plt.plot(np.arange(train_index, train_index + test.shape[0]), test[:,1], '-m', label='Infected') plt.xlabel("Time") plt.ylabel("Infected") plt.show() def I_loss(beta, D, test): model = run_model(T=test.shape[0], S=train[-1,0], I=train[-1,1], R=train[-1,2], beta=beta, D=D) mse = np.sqrt((model[:,1] - test[:,1])**2).mean() return mse loss = I_loss(test=test,beta=0.05, D=0.005) print("MSE loss: {:.2f}".format(loss)) loss = I_loss(test=test,beta=0.1, D=0.02335) print("MSE loss: {:.2f}".format(loss)) # + model2 = run_model(T=720, S=train[-1,0], I=train[-1,1], R=train[-1,2],beta=0.1, D=0.02335) model3 = run_model(T=720, S=train[-1,0], I=train[-1,1], R=train[-1,2],beta=0.1, D=0.02235) model4 = run_model(T=720, S=train[-1,0], I=train[-1,1], R=train[-1,2],beta=0.1, D=0.02348) model5 = run_model(T=720, S=train[-1,0], I=train[-1,1], R=train[-1,2],beta=0.1, D=0.02551) model6 = run_model(T=720, S=train[-1,0], I=train[-1,1], R=train[-1,2],beta=0.05, D=0.007) model7 = run_model(T=720, S=train[-1,0], I=train[-1,1], R=train[-1,2],beta=0.05, D=0.01) plt.clf() plt.plot(model[:,1], '-r', label='Infected', alpha=0.3) plt.plot(observed_data[:,1], '-r', label='Infected') plt.plot(np.arange(train_index, train_index + model2.shape[0]), model2[:,1], '-b', alpha=0.3, label='Infected') plt.plot(np.arange(train_index, train_index + model2.shape[0]), model3[:,1], '-b', alpha=0.3, label='Infected') plt.plot(np.arange(train_index, train_index + model2.shape[0]), model4[:,1], '-b', alpha=0.3, label='Infected') plt.plot(np.arange(train_index, train_index + model2.shape[0]), model5[:,1], '-b', alpha=0.3, label='Infected') plt.plot(np.arange(train_index, train_index + model2.shape[0]), model6[:,1], '-b', alpha=0.3, label='Infected') plt.plot(np.arange(train_index, train_index + model2.shape[0]), model7[:,1], '-b', alpha=0.3, label='Infected') plt.xlabel("Time") plt.ylabel("Infected") plt.show() # -
MLCONF2021/SIR.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + [markdown] tags=[] # # Introduction to programming # *Developed by <NAME>* # # info: https://docs.python.org/3/tutorial/introduction.html # + [markdown] tags=[] # ## Class 1 - Introduction # + [markdown] tags=[] # ### 1. What is programming? # - # The process or activity of "writing" computer programs. # + [markdown] tags=[] # #### 1.1 For 'writing' we need what? # - # A language, a pen (keyboard) and a paper (text editor). # + [markdown] tags=[] # ##### 1.1.2 Languages # - # In Programming there are many languages, C/C++, Python, Java, etc. It all depends on 'how'(high or low level) and 'what'(hardware) we need to write on. # In this course we will be mainly using python as is a high level language and the most "Portable, powerful, and a breeze to use". Python can be used to do pretty much anything, scripting applications and standalone programs. # But we will also compare the same code with C language to understand how different languages behave (semantics and syntax). Has this one is a more low level language that is general-purpose and provides constructs that map efficiently to typical machine instructions. Applications include operating systems and various application software for computer architectures that range from supercomputers to PLCs and embedded systems. # + [markdown] tags=[] # ##### 1.1.1 Text editor & IDE (SETUP) # - # Text editor or an IDE an integrated development environment: # - write, # - save, # - interpret/compile # - and debug the language, like this jupyter notebook. # <details open> # <summary>Install</summary> # # Please first install [miniconda](https://docs.conda.io/en/latest/miniconda.html) and [git](https://git-scm.com/book/en/v2/Getting-Started-Installing-Git). # <!-- $ sudo apt update && apt install -y libgl1-mesa-glx libsm6 libxext6 libxrender-dev --> # # ```bash # $ conda create -n iclasses python=3 -y # $ conda activate iclasses # $ git clone https://github.com/nmc-costa/iclasses.git # $ cd iclasses # $ conda install numpy scipy matplotlib pandas jupyterlab -y # $ jupyter lab # ``` # # After installing you can use start_jupyterlab_iclasses.bat for faster deployment. # # </details> # + [markdown] tags=[] # ### 2. Writing Basics: Variables and functions # + [markdown] tags=[] # #### 2.1 Calculate numbers. # Position your cursor in the code cell below and hit [shift][enter]. # + tags=[] 10 + 2 # - 10 * 2 10 // 3 10 / 3 10 - 2 # Operators: +(sum), - (subtraction), * (multiplication), // (integer division), / (float division) # + [markdown] tags=[] # #### 2.2 What is a variable? # - # When you are programming, you want to store your values in variables (memory allocation) for a period of time inside the program. variable_1 = 10 variable_2 = 2 variable_1 variable_2 variable_1 + variable_2 variable_1 * (variable_2 + 10) # A variable can be raised to a power by using `**` # (a hat `^`, as used in some other languages, doesn't work). variable_1 ** variable_2 # + [markdown] tags=[] # #### 2.2 What is a function? # - # A function is a set of code that solves a determinated problem. # Functions receive input parameters and return results. # In this case we will be using `print` function that solves the problem of print our results to the console whenever we want. print(variable_1 + variable_2) # print(): a function to print the value to the console print(variable_1 * variable_2) # print(): a function to print the value to the console # It also solves the problem of adding text to the print result. If you input more parameters a = "parameter_1" b = variable_1 * variable_2 c = "parameter_3" print("Before", a, b, c, " after") # + [markdown] tags=[] # ### <a name="ex1"></a> Exercise 1 # Compute the value of the polynomial $y=ax^2+bx+c$ at $x=1$, $x=2$, and $x=3$ using $a=1$, $b=1$, $c=6$ and print the results to the screen. # - # <a href="#ex1answer">Answer to Exercise 1</a> # + [markdown] tags=[] # ### 3. Potential # - # https://github.com/bycloudai/StyleCLIP-e4e-colab # ### Answers for the exercises # <a name="ex1answer">Answer to Exercise 1</a> a = 1 b = 1 c = 6 x = 1 y = a * x ** 2 + b * x + c print('y evaluated at x = 1 is', y) x = 2 y = a * x ** 2 + b * x + c print('y evaluated at x = 2 is', y) x = 3 y = a * x ** 2 + b * x + c print('y evaluated at x = 3 is', y) # Other way to represent the results : # + a = 1 b = 1 c = 6 #3 different results x1 = 1 x2 = 2 x3 = 3 y1 = a * x1 ** 2 + b*x1 + c #print em x1=-2 print(y1) y2 = a * x2 ** 2 + b*x2 + c #print em x2=-2 print(y2) y3 = a * x3 ** 2 + b*x3 + c #print em x3=-2 print(y3) print(y1, y2, y3) # - # Represent the results in a plot:: # %matplotlib inline import matplotlib.pyplot as plt plt.plot([x1, x2, x3], [y1, y2, y3], '--bo') # How to know how a function works: help(print) # <a href="#ex1">Back to Exercise 1</a> # <a name="ex1answer">Answer to Exercise 1</a> a = 1 b = 1 c = -6 x = -2 y = a * x ** 2 + b * x + c print('y evaluated at x = -2 is', y) x = 0 y = a * x ** 2 + b * x + c print('y evaluated at x = 0 is', y) x = 2.1 y = a * x ** 2 + b * x + c print('y evaluated at x = 2 is', y) # <a href="#ex1">Back to Exercise 1</a> # + [markdown] tags=[] # ## Extra Resources # - # Some good resources for learning jupyter in notebooks: # # https://jupyter.brynmawr.edu/hub/login # # Some good resources on learning Python in notebooks: # # http://mbakker7.github.io/exploratory_computing_with_python/
intro_to_programming_1st_year/c01_introduction.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + [markdown] tags=[] # ## Load Data # - import pandas as pd sample_person= pd.read_csv('sample_person.csv') sample_company=pd.read_csv('sample_company.csv') # + [markdown] tags=[] # ## Col-mapping Senzing # - [Full Glossary](https://senzing.zendesk.com/hc/en-us/article_attachments/4405461248915/Senzing_Generic_Entity_Specification_v2.8.1.pdf) # - # - RECORD_ID is unique identifier per Datasource # - Only rename columns which you want Senzing to map, and disgard other columns # ![Mapping Examples](Schema.png) # ## Load Data into Senzing # curl -X 'POST' \ # 'http://192.168.180.237:8250/data-sources?dataSource=CIVIL&dataSource=WATCHLIST&withRaw=false' \ # -H 'accept: application/json; charset=UTF-8' \ # -H 'Content-Type: application/json; charset=UTF-8' # ## Extract ER-ised data from Senzing # [Export Senzing Data](https://senzing.zendesk.com/hc/en-us/articles/115004915547) # - Access into Container # # sudo docker exec -it senzing-sshd /bin/bash<br/> # python3 /opt/senzing/g2/python/G2Export.py -o <some_dir>myExport.csv<br/> # exit<br/> # sudo docker cp senzing-sshd:<some_dir>myExport.csv <local_drive><br/> # ## Exported Data df = pd.read_csv('data/myExport.csv') df['RECORD_ID']=df['RECORD_ID'].astype(str) # ![Export Data fields](data/export_file.png) # __Just want to load relationship data__ mask=(df['MATCH_LEVEL']==0) | (df['MATCH_LEVEL']==1) df[mask].groupby(by='RESOLVED_ENTITY_ID')['RECORD_ID'].apply(','.join)
myscripts/sample/Senzing101.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: env37 # language: python # name: env37 # --- # + # default_exp data.core # + # hide import sys sys.path.append("..") import pandas as pd # - # hide from nbdev.showdoc import * # # Data Core # # > Using the fastai2 `Datasets` to make an time series dataset. # # For now all is univerable but in the future I would also like to add multiplevariable. # # export from fastseq.data.load import * from fastseq.core import * from fastcore.all import * from fastcore.imports import * from fastai2.basics import * from fastai2.data.transforms import * from fastai2.tabular.core import * # + # hide # just check previous nb still works. Sometimes import errors horizon,lookback = 2,5 ints = L(np.arange(7)[None,:],np.arange(7,15)[None,:],np.arange(15,25)[None,:]).map(tensor) dl = TSDataLoader(ints, horizon = horizon, lookback = lookback, bs=2) test_eq(dl.n, len(dl._ids)) test_eq(dl.n, 7) test_eq(len(dl),4) for o in dl:pass # - # # Transforms # # > Basic function to process time-series data before assembling it in a `DataLoaders`. # + # export class NormalizeTS(ItemTransform): "Normalize the Time-Series." def __init__(self, verbose=False, make_ones=True, eps=1e-7, mean = None): """ `make_ones` will make the std 1 if the std is smaller than `10*eps`. This is for blok seqences to not magnify the `y` part of the data. `mean` will set a mean instead of the mean of the x value. """ store_attr(self,'verbose, make_ones, eps, mean') self.m, self.s = 0, 0 def encodes(self, o): self.m, self.s = torch.mean(o[0],-1,keepdim=True), o[0].std(-1,keepdim=True) +self.eps if self.verbose: print('encodes',type(o),[a.shape for a in o], self.m,self.s) if self.mean: self.m = o[0][self.mean] if self.make_ones: self.s[self.s < self.eps*10] = 1 if self.verbose: print(o[0]) print(f"made {self.s < self.eps*10} to ones due to setting `make_ones`") print(f"m:{self.m}\n s:{self.s}") return Tuple([(o[i]-self.m)/self.s for i in range(len(o))]) def decodes(self, o): if o[0].is_cuda: self.m, self.s = to_device(self.m,'cuda'), to_device(self.s,'cuda') if sum([a.is_cuda for a in o]) != len(o): o = Tuple([to_device(a,'cuda') for a in o]) else: if sum([a.is_cuda==False for a in o]) != len(o): o = Tuple([to_cpu(a) for a in o]) self.m, self.s = to_cpu(self.m), to_cpu(self.s) if self.verbose: print('decodes',type(o),[a.shape for a in o], 'shape m/s',self.m.shape) return Tuple([(o[i]*self.s)+self.m for i in range(len(o))]) # + # hide # testing the flatline with a bumb (block sequence) from fastseq.data.load import * horizon,lookback = 2,5 a = np.ones(7)[None,:] a[:,-3:] = 1000 ints = L(a).map(tensor) dl = TSDataLoader(ints, horizon = horizon, lookback = lookback, bs=16, after_batch=NormalizeTS(), min_seq_len=6, num_workers = 1) for o in dl: test_close(o[0].mean(),0) test_close(o[0].std(),1,.5) test_eq(type(o),Tuple) o[0].show() o_new = dl.after_batch[0].decode(o) test_eq(o_new[0].mean()!=0, True) test_eq(o_new[0].std()!=1, True) test_eq(type(o_new),Tuple) test_eq(type(dl.one_batch()),Tuple) dl.show_batch() # + # hide from fastseq.data.load import * horizon,lookback = 2,5 ints = L( np.arange(7)[None,:]*1+100, np.arange(8)[None,:]*100+1,).map(tensor) dl = TSDataLoader(ints, horizon = horizon, lookback = lookback, bs=16, after_batch=NormalizeTS(), min_seq_len=6, num_workers = 1) for o in dl: test_close(o[0].mean(),0) test_close(o[0].std(),1,.5) test_eq(type(o),Tuple) o_new = dl.after_batch[0].decode(o) test_eq(o_new[0].mean()!=0, True) test_eq(o_new[0].std()!=1, True) test_eq(type(o_new),Tuple) test_eq(type(dl.one_batch()),Tuple) dl.show_batch() # + norm = NormalizeTS() o = (TSTensorSeq(torch.arange(10.)),TSTensorSeqy( torch.arange(10,15),x_len=10)) o_en = norm(o) test_eq(o_en[0].mean(), 0) test_eq(o_en[1].mean()==0, False) dec_o = norm.decode(o_en) test_eq(dec_o[0],o[0]) f,axs = plt.subplots(1,3, sharey=True) ax = o[0].show(axs[0]) o[1].show(ax) ax.plot([0,15],[0,0],'--') ax = o_en[0].show(axs[1]) o_en[1].show(ax) ax.plot([0,15],[0,0],'--') ax = dec_o[0].show(axs[2]) dec_o[1].show(ax) ax.plot([0,15],[0,0],'--') # + norm = NormalizeTS(mean=9) o = (TSTensorSeq(torch.arange(10.)),TSTensorSeqy( torch.arange(10,15),x_len=10)) o_en = norm(o) test_eq(o_en[0][-1], 0) test_eq(o_en[1][-1]==0, False) dec_o = norm.decode(o_en) test_eq(dec_o[0],o[0]) f,axs = plt.subplots(1,3, sharey=True) ax = o[0].show(axs[0]) o[1].show(ax) ax.plot([0,15],[0,0],'--') ax = o_en[0].show(axs[1]) o_en[1].show(ax) ax.plot([0,15],[0,0],'--') ax = dec_o[0].show(axs[2]) dec_o[1].show(ax) ax.plot([0,15],[0,0],'--') # + # hide if torch.cuda.is_available(): from fastseq.data.load import * horizon,lookback = 2,5 ints = L(tensor(o) for o in [ np.arange(7)[None,:]*1+100, np.arange(8)[None,:]*100+1,]) dl = TSDataLoader(ints, horizon = horizon, lookback = lookback, bs=16, after_batch=NormalizeTS(), num_workers = 1, device=torch.device('cuda')) for o in dl: # test if data on cuda will will stay there test_eq(o[0].is_cuda, True) dec_o = dl.after_batch[0].decode(o) test_eq(dec_o[0].is_cuda, True) # test if o is already moved to cpu dec_o = dl.after_batch[0].decode(to_cpu(o)) test_eq(dec_o[0].is_cuda, False) dl.show_batch() # - # hide # test if o is cuda but m,s are on cpu if torch.cuda.is_available(): norm = NormalizeTS() o = (TSTensorSeq(torch.arange(10.)),TSTensorSeqy( torch.arange(10.,15),x_len=10)) o_en = norm(o) test_eq(o_en[0].mean(), 0) test_eq(o_en[1].mean()==0, False) dec_o = norm.decode(to_device(o_en)) test_eq(dec_o[0],to_device(o[0])) # test if o_enc[0] and o_enc[1] are on different devices o_enc = Tuple([to_device(o_en[0],'cpu'),to_device(o_en[1],'cuda'),]) dec_o = norm.decode(o_enc) test_eq(dec_o[0],o[0]) o_enc = Tuple([to_device(o_en[0],'cuda'),to_device(o_en[1],'cpu'),]) dec_o = norm.decode(o_enc) test_eq(dec_o[0],to_device(o[0])) norm.m, norm.s = to_device(norm.m), to_device(norm.s) o_enc = Tuple([to_device(o_en[0],'cuda'),to_device(o_en[1],'cpu'),]) dec_o = norm.decode(o_enc) test_eq(dec_o[0],to_device(o[0])) # # TSDataLoaders # ## Utils # export def concat_ts_list(train, val): items=L() assert len(train) == len(val) for t, v in zip(train, val): items.append(np.concatenate([t,v],1)) return items a = [np.random.randn(3,10)]*50 b = [np.random.randn(3,5)]*50 r = concat_ts_list(a,b) test_eq(r[0].shape,(3,15)) test_eq(r[0], np.concatenate([a[0],b[0]],1)) # + # export def make_test(items:L(), horizon:int, lookback:int, keep_lookback:bool = False): """Splits the every ts in `items` based on `horizon + lookback`*, where the last part will go into `val` and the first in `train`. *if `keep_lookback`: it will only remove `horizon` from `train` otherwise also lookback. """ train, val = L(), L() for ts in items: val.append(ts[:, -(horizon+lookback):]) if keep_lookback: train.append(ts[:, :-(horizon)]) else: train.append(ts[:, :-(horizon+lookback)]) return train, val def make_test_pct(items:L(), pct:float): """Splits the every ts in `items` based on `pct`(percentage) of the length of the timeserie, where the last part will go into `val` and the first in `train`. """ train, val = L(), L() for ts in items: split_idx = int((1-pct)*ts.shape[1]) train.append(ts[:,:split_idx]) val.append(ts[:,split_idx:]) return train, val # + a = [np.random.randn(3,15)]*50 train, val = make_test(a,5,5) test_eq(train[0],a[0][:,:-10]) test_eq(val[0],a[0][:,-10:]) train, val = make_test(a,5,5,True) test_eq(train[0],a[0][:,:-5]) test_eq(val[0],a[0][:,-10:]) # - # ## Dataloaders d = {} d.pop('k',1) #export class TSDataLoaders(DataLoaders): @classmethod @delegates(TSDataLoader.__init__) def from_folder(cls, data_path:Path, valid_pct=.5, seed=None, horizon=None, lookback=None, step=1, nrows=None, skiprows=None, incl_test = True, path:Path='.', device=None, norm=True, **kwargs): """Create from M-compition style in `path` with `train`,`test` csv-files. The `DataLoader` for the test set will be save as an attribute under `test` """ train, test = get_ts_files(data_path, nrows=nrows, skiprows=skiprows) items = concat_ts_list(train, test).map(tensor) horizon = ifnone(horizon, len(test[0])) lookback = ifnone(lookback, horizon * 3) return cls.from_items(items, horizon, lookback = lookback, step = step, incl_test=incl_test, path=path, device=device, norm= norm,**kwargs) @classmethod @delegates(TSDataLoader.__init__) def from_items(cls, items:L, horizon:int, valid_pct=1.5, seed=None, lookback=None, step=1, incl_test = True, path:Path='.', device=None, norm=True, **kwargs): """Create an list of time series. The `DataLoader` for the test set will be save as an attribute under `test` """ if len(items[0].shape)==1: items = [i[None,:] for i in items] print(items[0].shape) lookback = ifnone(lookback, horizon * 4) if incl_test: items, test = make_test(items, horizon, lookback, keep_lookback = True) train, valid = make_test(items, horizon + int(valid_pct*horizon), lookback , keep_lookback = True) if norm: make_ones = kwargs.pop('make_ones',True) kwargs.update({'after_batch':L(kwargs.get('after_batch',None))+L(NormalizeTS(make_ones=make_ones))}) db = DataLoaders(*[TSDataLoader(items, horizon=horizon, lookback=lookback, step=step, **kwargs) for items in [train,valid]], path=path, device=device) if device is None: db.cuda() if incl_test: db.test = TSDataLoader(test, horizon=horizon, lookback=lookback, step=step, name='test') print(f"Train:{db.train.n}; Valid: {db.valid.n}; Test {db.test.n}") else: print(f"Train:{db.train.n}; Valid: {db.valid.n}") # TODO add with test_dl, currently give buges return db # hide horizon, lookback = 7,10 items = L(np.arange(-5,100)[None,:],np.arange(500,550)[None,:],np.arange(-110,-56)[None,:]).map(tensor) data = TSDataLoaders.from_items(items, horizon = horizon, lookback=lookback, step=5) test_eq(data.valid.dataset[0].shape[-1]>lookback+horizon, True) # hide horizon,lookback = 2,5 items = L(np.arange(20),np.arange(25,48),np.arange(8,27)).map(tensor) dbunch = TSDataLoaders.from_items(items, horizon=horizon, lookback = lookback, incl_test=False) test_eq(hasattr(dbunch,'test'),False) # hide path = untar_data(URLs.m4_daily) if torch.cuda.is_available(): dbunch = TSDataLoaders.from_folder(path, horizon = 14, step=5, bs=64, nrows=10) for o in dbunch.train.one_batch(): test_eq(o.is_cuda, True) break for o in dbunch.valid: test_eq(o[0].is_cuda, True) break # hide dbunch = TSDataLoaders.from_folder(path, horizon = 14, step=5, bs=64, nrows=10, device = 'cpu') for o in dbunch.train: test_eq(o[0].is_cuda, False) break for o in dbunch.valid: test_eq(o[0].is_cuda, False) break # hide test_eq(dbunch.train.n,1255) test_eq(dbunch.valid.n,50) test_eq(dbunch.test.n,10) # hide dl = dbunch.new(shuffle=True) dbunch = TSDataLoaders.from_folder(path, horizon = 14, step=5, bs=64, nrows=10, device = 'cpu', after_batch=noop) for o in dbunch[0]: test_close(o[0].mean(),0) test_close(o[0].std(),1,eps=.1) show_doc(TSDataLoaders.from_items, name='TSDataLoaders.from_items') show_doc(TSDataLoaders.from_folder, name='TSDataLoaders.from_folder') dbunch = TSDataLoaders.from_folder(path, horizon = 14, step=5, bs=64, nrows=100) dbunch.train.show_batch(max_n=4) dbunch.test.show_batch(max_n=4) # + # hide from nbdev.export import * notebook2script() # -
nbs/03_data.core.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.8.10 64-bit (''env'': venv)' # name: python3 # --- # # Introduction to Seaborn # # ## About the Data # In this notebook, we will be working with 2 datasets: # - Facebook's stock price throughout 2018 (obtained using the [`stock_analysis` package](https://github.com/stefmolin/stock-analysis)) # - Earthquake data from September 18, 2018 - October 13, 2018 (obtained from the US Geological Survey (USGS) using the [USGS API](https://earthquake.usgs.gov/fdsnws/event/1/)) # # ## Setup # + # %matplotlib inline import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns fb = pd.read_csv( 'data/fb_stock_prices_2018.csv', index_col='date', parse_dates=True ) quakes = pd.read_csv('data/earthquakes.csv') # - # ## Categorical data # A [7.5 magnitude earthquake on September 28, 2018 near Palu, Indonesia caused a devastating tsunami afterwards](https://www.livescience.com/63721-tsunami-earthquake-indonesia.html). Let's take a look at some visualizations to understand which magnitude techniques (`magType`) are used in Indonesia, the range of magnitudes there, and how many of the earthquakes are accompanied by a tsunami. quakes\ .assign( time=lambda x: pd.to_datetime(x.time, unit='ms') )\ .set_index('time')\ .loc['2018-09-28']\ .query( 'parsed_place == "Indonesia" and tsunami and mag == 7.5' ) # ### `stripplot()` # The `stripplot()` function helps us visualize categorical data on one axis and numerical data on the other. We also now have the option of coloring our points using a column of our data (with the `hue` parameter). Using a strip plot, we can see points for each earthquake that was measured with a given `magType` and what its magnitude was; however, it isn't too easy to see density of the points due to overlap: sns.stripplot( x='magType', y='mag', hue='tsunami', data=quakes.query('parsed_place == "Indonesia"') ) # ### `swarmplot()` # The bee swarm plot helps address this issue by keeping the points from overlapping. Notice how many more points we can see for the blue section of the `mb` column: sns.swarmplot( x='magType', y='mag', hue='tsunami', data=quakes.query('parsed_place == "Indonesia"'), size=3.5 # point size ) # ### `boxenplot()` # The `boxenplot()` function creates an enhanced box plot that shows additional quantiles: sns.boxenplot( x='magType', y='mag', data=quakes[['magType', 'mag']] ) plt.title('Comparing earthquake magnitude by magType') # *Read more about this plot type in this paper: [Letter-value plots: Boxplots for large data](https://vita.had.co.nz/papers/letter-value-plot.html)* # # ### `violinplot()` # Violin plots combine box plots and KDEs: # + fig, axes = plt.subplots(figsize=(10, 5)) sns.violinplot( x='magType', y='mag', data=quakes[['magType', 'mag']], ax=axes, scale='width' # all violins have same width ) plt.title('Comparing earthquake magnitude by magType') # - # ## Correlations and Heatmaps # # ### `heatmap()` # Last chapter, we saw how to create a correlation matrix heatmap using `pandas` and `matplotlib` (in the [`2-plotting_with_pandas.ipynb`](../ch_05/2-plotting_with_pandas.ipynb) notebook). An easier way is to use `seaborn`: sns.heatmap( fb\ .sort_index()\ .assign( log_volume=np.log(fb.volume), max_abs_change=fb.high - fb.low )\ .corr(), annot=True, center=0, vmin=-1, vmax=1 ) # ### `pairplot()` # The pair plot is seaborn's answer to the scatter matrix we saw in chapter 5 (`pandas.plotting.scatter_matrix()`) in the [`3-pandas_plotting_module.ipynb`](../ch_05/3-pandas_plotting_module.ipynb) notebook: sns.pairplot(fb) # Just as with `pandas` we can specify what to show along the diagonal; however, `seaborn` also allows us to color the data based on another column (or other data with the same shape): sns.pairplot( fb.assign(quarter=lambda x: x.index.quarter), diag_kind='kde', hue='quarter' ) # ### `jointplot()` # The joint plot allows us to visualize the relationship between two variables, like a scatter plot. However, we get the added benefit of being able to visualize their distributions at the same time (as a histogram or KDE). The default options give us a scatter plot in the center and histograms on the sides: sns.jointplot( x='log_volume', y='max_abs_change', data=fb.assign( log_volume=np.log(fb.volume), max_abs_change=fb.high - fb.low ) ) # We can pass `kind='hex'` for hexbins: sns.jointplot( x='log_volume', y='max_abs_change', kind='hex', data=fb.assign( log_volume=np.log(fb.volume), max_abs_change=fb.high - fb.low ) ) # If we pass `kind='kde'`, we get a contour plot of the joint density estimate with KDEs along the sides: sns.jointplot( x='log_volume', y='max_abs_change', kind='kde', data=fb.assign( log_volume=np.log(fb.volume), max_abs_change=fb.high - fb.low ) ) # If we specify `kind='reg'` instead, we get a regression line in the center and KDEs on the sides: sns.jointplot( x='log_volume', y='max_abs_change', kind='reg', data=fb.assign( log_volume=np.log(fb.volume), max_abs_change=fb.high - fb.low ) ) # If we pass `kind='resid'`, we get the residuals from the aforementioned regression: # + sns.jointplot( x='log_volume', y='max_abs_change', kind='resid', data=fb.assign( log_volume=np.log(fb.volume), max_abs_change=fb.high - fb.low ) ) # update y-axis label (discussed in the next notebook) plt.ylabel('residuals') # - # ## Regression plots # We are going to use `seaborn` to visualize a linear regression between the log of the volume traded in Facebook stock and the maximum absolute daily change (daily high stock price - daily low stock price). To do so, we first need to isolate this data: fb_reg_data = fb\ .assign( log_volume=np.log(fb.volume), max_abs_change=fb.high - fb.low )\ .iloc[:,-2:] # Since we want to visualize each column as the regressor, we need to look at permutations of their order. Permutations and combinations (among other things) are made easy in Python with `itertools`, so let's import it: import itertools # `itertools` gives us efficient iterators. Iterators are objects that we loop over, exhausting them. This is an iterator from `itertools`; notice how the second loop doesn't do anything: # + iterator = itertools.repeat("I'm an iterator", 2) for i in iterator: print(f'-->{i}') print('This printed once because the iterator has been exhausted') for i in iterator: print(f'-->{i}') # - # Iterables are objects that can be iterated over. When entering a loop, an iterator is made from the iterable to handle the iteration. Iterators are iterables, but not all iterables are iterators. A list is an iterable. If we turn that iterator into an iterable (a list in this case), the second loop runs: # + iterable = list(itertools.repeat("I'm an iterable", 2)) for i in iterable: print(f'-->{i}') print('This prints again because it\'s an iterable:') for i in iterable: print(f'-->{i}') # - # The `reg_resid_plots()` function from the `viz.py` module in this folder uses `regplot()` and `residplot()` from seaborn along with `itertools` to plot the regression and residuals side-by-side: # + from viz import reg_resid_plots # reg_resid_plots?? # - # Let's see what the output looks like for the Facebook data we isolated: from viz import reg_resid_plots reg_resid_plots(fb_reg_data) # We can use `lmplot()` to split our regression across subsets of our data. For example, we can perform a regression per quarter on the Facebook stock data: sns.lmplot( x='log_volume', y='max_abs_change', data=fb.assign( log_volume=np.log(fb.volume), max_abs_change=fb.high - fb.low, quarter=lambda x: x.index.quarter ), col='quarter' ) # ## Faceting # We can create subplots across subsets of our data by faceting. First, we create a `FacetGrid` specifying how to lay out the plots (which categorical column goes along the rows and which one along the columns). Then, we call the `map()` method of the `FacetGrid` and pass in the plotting function we want to use (along with any additional arguments). # # Let's use a facet grid to show the distribution of earthquake magnitudes in Indonesia and Papua New Guinea by `tsunami` and `parsed_place`: g = sns.FacetGrid( quakes.query( 'parsed_place.isin(["Indonesia", "Papua New Guinea"]) ' 'and magType == "mb"' ), row='tsunami', col='parsed_place', height=4 ) g = g.map( sns.histplot, 'mag', kde=True ) # <hr> # <div> # <a href="../ch_05/3-pandas_plotting_module.ipynb"> # <button>&#8592; Chapter 5</button> # </a> # <a href="./2-formatting_plots.ipynb"> # <button style="float: right;">Next Notebook &#8594;</button> # </a> # </div> # <hr>
ch_06/1-introduction_to_seaborn.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # NNabla by Examples # This tutorial demonstrates how you can write a script to train a neural network by using a simple hand digits classification task. # Note: This tutorial notebook requires [scikit-learn](http://scikit-learn.org) and [matplotlib](https://matplotlib.org/) installed in your Python environment. # First let us prepare some dependencies. # + # If you run this notebook on Google Colab, uncomment and run the following to set up dependencies. # # !pip install nnabla-ext-cuda100 # # !git clone https://github.com/sony/nnabla.git # # %cd nnabla/tutorial # - # Python2/3 compatibility from __future__ import print_function from __future__ import absolute_import from __future__ import division # + import nnabla as nn import nnabla.functions as F import nnabla.parametric_functions as PF import nnabla.solvers as S from nnabla.monitor import tile_images import numpy as np import matplotlib.pyplot as plt import tiny_digits # %matplotlib inline np.random.seed(0) imshow_opt = dict(cmap='gray', interpolation='nearest') # - # The `tiny_digits` module is located under this folder. It provides some utilities for loading a handwritten-digit classification dataset (MNIST) available in scikit-learn. # ## Logistic Regression # # We will first start by defining a computation graph for logistic regression. (For details on logistic regression, see Appendix A.) # # The training will be done by gradient descent, where gradients are calculated using the error backpropagation algorithm (backprop). # ### Preparing a Toy Dataset # This section just prepares a dataset to be used for demonstration of NNabla usage. digits = tiny_digits.load_digits(n_class=10) tiny_digits.plot_stats(digits) # The next block creates a dataset loader which is a generator providing images and labels as minibatches. Note that this dataset is just an example purpose and not a part of NNabla. data = tiny_digits.data_iterator_tiny_digits(digits, batch_size=64, shuffle=True) # A minibatch is as follows. `img` and `label` are in `numpy.ndarray`. img, label = data.next() plt.imshow(tile_images(img), **imshow_opt) print("labels:", label.reshape(8, 8)) print("Label shape:", label.shape) # ### Preparing the Computation Graph # # NNabla provides two different ways for backprop-based gradient descent optimization. One is with a static graph, and another is with a dynamic graph. We are going to show a static version first. # Forward pass x = nn.Variable(img.shape) # Define an image variable with nn.parameter_scope("affine1"): y = PF.affine(x, 10) # Output is 10 class # This code block shows one of the most important features in graph building in NNabla, the **parameter scope**. The first line defines an input variable `x`. The second line creates a **parameter scope**. The third line then applies `PF.affine` - an affine transform - to `x`, and creates a variable `y` holding that result. Here, the `PF` (parametric_function) module provides functions that contain learnable parameters, such as affine transforms (which contains weights), convolution (which contains kernels) and batch normalization (which contains transformation factors and coefficients). We will call these functions as **parametric functions**. The parameters are created and initialized randomly at function call, and registered by a name "affine1" using `parameter_scope` context. # Building a loss graph t = nn.Variable(label.shape) # Define an target variable loss = F.mean(F.softmax_cross_entropy(y, t)) # Softmax Xentropy fits multi-class classification problems # The remaining lines shown above define a target variable and attach functions for loss at the end of the graph. Note that the static graph build doesn't execute any computation, but the shapes of output variables are inferred. Therefore, we can inspect the shapes of each variable at this time: print("Printing shapes of variables") print(x.shape) print(y.shape) print(t.shape) print(loss.shape) # empty tuple means scalar # ### Executing a static graph # # You can execute the computation of the graph by calling the `forward()` method in a sink variable. Inputs can be set via `.d` accessor. It will borrow CPU array references as `numpy.ndarray`. # Set data x.d = img t.d = label # Execute a forward pass loss.forward() # Showing results print("Prediction score of 0-th image:", y.d[0]) print("Loss:", loss.d) # The output doesn't make sense since the network is just randomly initialized. # ### Backward propagation through the graph # # The parameters registered by `parameter_scope` management function can be queried by `get_parameters()` as a dict format. print(nn.get_parameters()) # Before executing backpropagation, we should initialize gradient buffers of all parameter to zeros. for param in nn.get_parameters().values(): param.grad.zero() # Then, you can execute backprop by calling `backward()` method at the sink variable. # Compute backward loss.backward() # Showing gradients. for name, param in nn.get_parameters().items(): print(name, param.shape, param.g.flat[:20]) # Showing first 20. # Gradient is stored in grad field of `Variable`. `.g` accessor can be used to access grad data in `numpy.ndarray` format. # ### Optimizing parameters (=Training) # # To optimize parameters, we provide solver module (aliased as S here). The solver module contains a bunch of optimizer implementations such as SGD, SGD with momentum, Adam etc. The below block creates SGD solver and sets parameters of logistic regression to it. # Create a solver (gradient-based optimizer) learning_rate = 1e-3 solver = S.Sgd(learning_rate) solver.set_parameters(nn.get_parameters()) # Set parameter variables to be updated. # In the next block, we demonstrate a single step of optimization loop. `solver.zero_grad()` line does equivalent to calling `.grad.zero()` for all parameters as we shown above. After backward computation, we apply weight decay, then applying gradient descent implemented in Sgd solver class as follows # # $$ # \theta \leftarrow \theta - \eta \nabla_{\theta} L(\theta, X_{\mathrm minibatch}) # $$ # # where $\eta$ denotes learning rate. # One step of training x.d, t.d = data.next() loss.forward() solver.zero_grad() # Initialize gradients of all parameters to zero. loss.backward() solver.weight_decay(1e-5) # Applying weight decay as an regularization solver.update() print(loss.d) # Next block iterates optimization steps, and shows the loss decreases. for i in range(1000): x.d, t.d = data.next() loss.forward() solver.zero_grad() # Initialize gradients of all parameters to zero. loss.backward() solver.weight_decay(1e-5) # Applying weight decay as an regularization solver.update() if i % 100 == 0: # Print for each 10 iterations print(i, loss.d) # ### Show prediction # # The following code displays training results. x.d, t.d = data.next() # Here we predict images from training set although it's useless. y.forward() # You can execute a sub graph. plt.imshow(tile_images(x.d), **imshow_opt) print("prediction:") print(y.d.argmax(axis=1).reshape(8, 8)) # Taking a class index based on prediction score. # ### Dynamic graph construction support # This is another way of running computation graph in NNabla. This example doesn't show how useful dynamic graph is, but shows a bit of flavor. # The next block just define computation graph building as functions for later use. # + def logreg_forward(x): with nn.parameter_scope("affine1"): y = PF.affine(x, 10) return y def logreg_loss(y, t): loss = F.mean(F.softmax_cross_entropy(y, t)) # Softmax Xentropy fits multi-class classification problems return loss # - # To run a computation graph dynamically during creation, you use `nnabla.auto_forward()` context as you see in the below block. By this, computation is fired immediately at functions are called. (You can also use `nnabla.set_auto_forward(auto)` to set the auto-forward state globally.) x = nn.Variable(img.shape) t = nn.Variable(label.shape) x.d, t.d = data.next() with nn.auto_forward(): # Graph are executed y = logreg_forward(x) loss = logreg_loss(y, t) print("Loss:", loss.d) plt.imshow(tile_images(x.d), **imshow_opt) print("prediction:") print(y.d.argmax(axis=1).reshape(8, 8)) # Backward computation can be done on a dynamically constructed graph. solver.zero_grad() loss.backward() # ## Multi-Layer Perceptron (MLP) # In this section, you see an example of MLP graph building and training. # # Before starting, we clear all parameters registered in the logistic regression example. nn.clear_parameters() # Clear all parameters # Here is the function that builds a MLP with an arbitrary depth and width for 10 class classification. def mlp(x, hidden=[16, 32, 16]): hs = [] with nn.parameter_scope("mlp"): # Parameter scope can be nested h = x for hid, hsize in enumerate(hidden): with nn.parameter_scope("affine{}".format(hid + 1)): h = F.tanh(PF.affine(h, hsize)) hs.append(h) with nn.parameter_scope("classifier"): y = PF.affine(h, 10) return y, hs # Construct a MLP graph y, hs = mlp(x) print("Printing shapes") print("x:", x.shape) for i, h in enumerate(hs): print("h{}:".format(i + 1), h.shape) print("y:", y.shape) # + # Training loss = logreg_loss(y, t) # Reuse logreg loss function. # Copied from the above logreg example. def training(steps, learning_rate): solver = S.Sgd(learning_rate) solver.set_parameters(nn.get_parameters()) # Set parameter variables to be updated. for i in range(steps): x.d, t.d = data.next() loss.forward() solver.zero_grad() # Initialize gradients of all parameters to zero. loss.backward() solver.weight_decay(1e-5) # Applying weight decay as an regularization solver.update() if i % 100 == 0: # Print for each 10 iterations print(i, loss.d) # Training training(1000, 1e-2) # + # Showing responses for each layer num_plot = len(hs) + 2 gid = 1 def scale01(h): return (h - h.min()) / (h.max() - h.min()) def imshow(img, title): global gid plt.subplot(num_plot, 1, gid) gid += 1 plt.title(title) plt.imshow(img, **imshow_opt) plt.axis('off') plt.figure(figsize=(2, 5)) imshow(x.d[0, 0], 'x') for hid, h in enumerate(hs): imshow(scale01(h.d[0]).reshape(-1, 8), 'h{}'.format(hid + 1)) imshow(scale01(y.d[0]).reshape(2, 5), 'y') # - # ## Convolutional Neural Network with CUDA acceleration # # Here we demonstrates a CNN with CUDA GPU acceleration. nn.clear_parameters() def cnn(x): with nn.parameter_scope("cnn"): # Parameter scope can be nested with nn.parameter_scope("conv1"): c1 = F.tanh(PF.batch_normalization( PF.convolution(x, 4, (3, 3), pad=(1, 1), stride=(2, 2)))) with nn.parameter_scope("conv2"): c2 = F.tanh(PF.batch_normalization( PF.convolution(c1, 8, (3, 3), pad=(1, 1)))) c2 = F.average_pooling(c2, (2, 2)) with nn.parameter_scope("fc3"): fc3 = F.tanh(PF.affine(c2, 32)) with nn.parameter_scope("classifier"): y = PF.affine(fc3, 10) return y, [c1, c2, fc3] # To enable CUDA extension in NNabla, you have to install nnabla-ext-cuda package first. See [the install guide](http://nnabla.readthedocs.io/en/latest/python/installation.html). # After installing the CUDA extension, you can easily switch to run on CUDA by specifying a context before building a graph. We strongly recommend using a CUDNN context that is fast. Although the context class can be instantiated by `nn.Context()`, specifying a context descriptor might be a bit complicated for users. There for we recommend create a context by using a helper function `get_extension_context()` found in the `nnabla.ext_utils` module. NNabla officially supports `cpu` and `cudnn` as a context specifier passed to the first argument (extension name). NOTE: By setting the cudnn context as a global default context, Functions and solves created are instantiated with CUDNN (preferred) mode. You can also specify a context using `with nn.context_scope()`. See [API reference](http://nnabla.readthedocs.io/en/latest/python/api/common.html#context) for details. # Run on CUDA from nnabla.ext_utils import get_extension_context cuda_device_id = 0 ctx = get_extension_context('cudnn', device_id=cuda_device_id) print("Context:", ctx) nn.set_default_context(ctx) # Set CUDA as a default context. y, hs = cnn(x) loss = logreg_loss(y, t) training(1000, 1e-1) # Showing responses for each layer num_plot = len(hs) + 2 gid = 1 plt.figure(figsize=(2, 8)) imshow(x.d[0, 0], 'x') imshow(tile_images(hs[0].d[0][:, None]), 'conv1') imshow(tile_images(hs[1].d[0][:, None]), 'conv2') imshow(hs[2].d[0].reshape(-1, 8), 'fc3') imshow(scale01(y.d[0]).reshape(2, 5), 'y') # `nn.save_parameters` writes parameters registered in `parameter_scope` system in HDF5 format. We use it a later example. path_cnn_params = "tmp.params.cnn.h5" nn.save_parameters(path_cnn_params) # ## Recurrent Neural Network (Elman RNN) # This is an example of recurrent neural network training. nn.clear_parameters() def rnn(xs, h0, hidden=32): hs = [] with nn.parameter_scope("rnn"): h = h0 # Time step loop for x in xs: # Note: Parameter scopes are reused over time # which means parameters are shared over time. with nn.parameter_scope("x2h"): x2h = PF.affine(x, hidden, with_bias=False) with nn.parameter_scope("h2h"): h2h = PF.affine(h, hidden) h = F.tanh(x2h + h2h) hs.append(h) with nn.parameter_scope("classifier"): y = PF.affine(h, 10) return y, hs # It is not meaningful, but just a demonstration purpose. We split an image into 2 by 2 grids, and feed them sequentially into RNN. def split_grid4(x): x0 = x[..., :4, :4] x1 = x[..., :4, 4:] x2 = x[..., 4:, :4] x3 = x[..., 4:, 4:] return x0, x1, x2, x3 hidden = 32 seq_img = split_grid4(img) seq_x = [nn.Variable(subimg.shape) for subimg in seq_img] h0 = nn.Variable((img.shape[0], hidden)) # Initial hidden state. y, hs = rnn(seq_x, h0, hidden) loss = logreg_loss(y, t) # + # Copied from the above logreg example. def training_rnn(steps, learning_rate): solver = S.Sgd(learning_rate) solver.set_parameters(nn.get_parameters()) # Set parameter variables to be updated. for i in range(steps): minibatch = data.next() img, t.d = minibatch seq_img = split_grid4(img) h0.d = 0 # Initialize as 0 for x, subimg in zip(seq_x, seq_img): x.d = subimg loss.forward() solver.zero_grad() # Initialize gradients of all parameters to zero. loss.backward() solver.weight_decay(1e-5) # Applying weight decay as an regularization solver.update() if i % 100 == 0: # Print for each 10 iterations print(i, loss.d) training_rnn(1000, 1e-1) # - # Showing responses for each layer num_plot = len(hs) + 2 gid = 1 plt.figure(figsize=(2, 8)) imshow(x.d[0, 0], 'x') for hid, h in enumerate(hs): imshow(scale01(h.d[0]).reshape(-1, 8), 'h{}'.format(hid + 1)) imshow(scale01(y.d[0]).reshape(2, 5), 'y') # ## Siamese Network # This example show how to embed an image in a categorical dataset into 2D space using deep learning. This also demonstrates how to reuse a pretrained network. # # First, we load parameters learned in the CNN example. nn.clear_parameters() # Loading CNN pretrained parameters. _ = nn.load_parameters(path_cnn_params) # We define embedding function. Note that the network structure and parameter hierarchy is identical to the previous CNN example. That enables you to reuse the saved parameters and finetune from it. # + def cnn_embed(x, test=False): # Note: Identical configuration with the CNN example above. # Parameters pretrained in the above CNN example are used. with nn.parameter_scope("cnn"): with nn.parameter_scope("conv1"): c1 = F.tanh(PF.batch_normalization(PF.convolution(x, 4, (3, 3), pad=(1, 1), stride=(2, 2)), batch_stat=not test)) with nn.parameter_scope("conv2"): c2 = F.tanh(PF.batch_normalization(PF.convolution(c1, 8, (3, 3), pad=(1, 1)), batch_stat=not test)) c2 = F.average_pooling(c2, (2, 2)) with nn.parameter_scope("fc3"): fc3 = PF.affine(c2, 32) # Additional affine for map into 2D. with nn.parameter_scope("embed2d"): embed = PF.affine(c2, 2) return embed, [c1, c2, fc3] def siamese_loss(e0, e1, t, margin=1.0, eps=1e-4): dist = F.sum(F.squared_error(e0, e1), axis=1) # Squared distance # Contrastive loss sim_cost = t * dist dissim_cost = (1 - t) * \ (F.maximum_scalar(margin - (dist + eps) ** (0.5), 0) ** 2) return F.mean(sim_cost + dissim_cost) # - # We build two stream CNNs and compare them with the contrastive loss function defined above. Note that both CNNs have the same parameter hierarchy, which means both parameters are shared. x0 = nn.Variable(img.shape) x1 = nn.Variable(img.shape) t = nn.Variable((img.shape[0],)) # Same class or not e0, hs0 = cnn_embed(x0) e1, hs1 = cnn_embed(x1) # NOTE: parameters are shared loss = siamese_loss(e0, e1, t) def training_siamese(steps): for i in range(steps): minibatchs = [] for _ in range(2): minibatch = data.next() minibatchs.append((minibatch[0].copy(), minibatch[1].copy())) x0.d, label0 = minibatchs[0] x1.d, label1 = minibatchs[1] t.d = (label0 == label1).astype(np.int).flat loss.forward() solver.zero_grad() # Initialize gradients of all parameters to zero. loss.backward() solver.weight_decay(1e-5) # Applying weight decay as an regularization solver.update() if i % 100 == 0: # Print for each 10 iterations print(i, loss.d) learning_rate = 1e-2 solver = S.Sgd(learning_rate) with nn.parameter_scope("embed2d"): # Only 2d embedding affine will be updated. solver.set_parameters(nn.get_parameters()) training_siamese(2000) # Decay learning rate solver.set_learning_rate(solver.learning_rate() * 0.1) training_siamese(2000) # We visualize embedded training images as following. You see the images from the same class embedded near each other. all_image = digits.images[:512, None] all_label = digits.target[:512] x_all = nn.Variable(all_image.shape) x_all.d = all_image with nn.auto_forward(): embed, _ = cnn_embed(x_all, test=True) plt.figure(figsize=(16, 9)) for i in range(10): c = plt.cm.Set1(i / 10.) # Maybe it doesn't work in an older version of Matplotlib where color map lies in [0, 256) plt.plot(embed.d[all_label == i, 0].flatten(), embed.d[ all_label == i, 1].flatten(), '.', c=c) plt.legend(list(map(str, range(10)))) plt.grid() # ## Appendix # # ### A. Logistic Regression # Here we demonstrate how to train the simplest neural network, logistic regression (single layer perceptron). Logistic regression is a linear classifier $f : {\cal R}^{D\times 1} \rightarrow {\cal R}^{K\times 1}$ # # $$ # \mathbf f(\mathbf x, \mathbf \Theta) = \mathbf W \mathbf x + \mathbf b # $$ # # where $\mathbf x \in {\cal R}^{D \times 1}$ is an input image flattened to a vector, $t \in \{0, 1, \cdots, K\}$ is a target label, $\mathbf W \in {\cal R}^{K \times D}$ is a weight matrix, $\mathbf b \in {\cal R}^{K \times 1}$ is a bias vector and $\mathbf \Theta \equiv \left\{\mathbf W, \mathbf b\right\}$. Loss function is defined as # # $$ # \mathbf L(\mathbf \Theta, \mathbf X) = \frac{1}{N} \sum_{\mathbf x, t \subset \mathbf X} # -log \left(\left[\sigma\left(f(\mathbf x, \mathbf \Theta)\right)\right]_{t}\right) # $$ # # where $\mathbf X \equiv \left\{\mathbf x_1, t_1, \cdots, \mathbf x_N, t_N\right\}$ denotes a dataset the network trained on, $\sigma(\mathbf z)$ is softmax operation defined as $\frac{\exp(-\mathbf z)}{\sum_{z \subset \mathbf z} \exp(-z)}$, and $\left[\mathbf z\right]_i$ denotes i-th element of $\mathbf z$.
tutorial/by_examples.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/GabrielaRodriguez29/daa_2021_1/blob/master/4noviembre.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="CT81jGWv0uGO" outputId="0f3e2807-8dfd-4f80-ffc7-0e24c2f4d24e" colab={"base_uri": "https://localhost:8080/"} contador = 0 horas=[] for hor_izq in range (3): for hor_der in range(10): if hor_izq ==2 and hor_der >=4 : continue for min_izq in range(6): for min_der in range(10): if hor_izq == min_der and hor_der == min_izq: print(f" {hor_izq}{hor_der}:{min_izq}{min_der}") contador += 1 print(f"Se encontraron {contador} palíndromos")
4noviembre.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: commons # language: python # name: commons # --- # + import pandas as pd train = pd.read_csv("../../data/interim/train.csv") train.head() # - from sklearn.preprocessing import KBinsDiscretizer kbins = KBinsDiscretizer(n_bins=5, encode='ordinal', strategy='quantile') # ‘uniform’, ‘quantile’, ‘kmeans’ train['amount_discretized'] = kbins.fit_transform(train[['amount']].values) agg_values = train.groupby(by=['amount_discretized']).mean() columns_to_agg = ['v1'] agg_values = agg_values[columns_to_agg] agg_values.columns = [x + "_mean_given_amount" for x in agg_values.columns] train = train.merge(agg_values, how='left', on=['amount_discretized']) train.drop(['amount_discretized'], axis=1, inplace=True) print(train.shape) train.head() from sklearn.base import BaseEstimator, TransformerMixin class AggByAmount(BaseEstimator, TransformerMixin): # Inputs: bins, encode, strategy ('uniform', 'quantile', 'kmeans'), number of top features, mean/max/min # Top features order: ['v1', 'v4', 'v10', 'v7', 'v18', 'v11', 'v20', 'amount', 'v3', 'v16', 'v13', 'v14', 'v8', 'v9', 'v19', 'v2', 'v5', 'v12', 'v26', 'v24', 'v25', 'v27', 'v17', 'v22', 'v23', 'v6', 'v15', 'v21'] def __init__(self, n_bins=5, encode='ordinal', strategy='quantile', columns_to_agg=['v1']): self.n_bins = n_bins self.encode = encode self.strategy = strategy self.columns_to_agg = columns_to_agg self.kbins = None self.initial_columns = None def fit(self, X, y=None): self.kbins = KBinsDiscretizer(n_bins=self.n_bins, encode=self.encode, strategy=self.strategy) self.kbins.fit(X[['amount']].values) self.initial_columns = list(X.columns) return self def transform(self, X, y=None): X['amount_discretized'] = self.kbins.transform(X[['amount']].values) agg_values = X.groupby(by=['amount_discretized']).mean() agg_values = agg_values[self.columns_to_agg] agg_values.columns = [x + "_mean_given_amount" for x in agg_values.columns] X = X.merge(agg_values, how='left', on=['amount_discretized']) X.drop(self.initial_columns + ['amount_discretized'], axis=1, inplace=True) return X agg_by_amount = AggByAmount() agg_by_amount.fit(train) agg_by_amount.transform(train).head()
notebooks/main/4.aggregation_exploration.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: conda_python3 # language: python # name: conda_python3 # --- # # Working with Tables # # When working with Python, there are two ways to do actions on DynamoDB tables - `DynamoDB.Client` and `DynamoDB.Resource`. `DynamoDB.Client` is a low-level client representing DynamoDB and matching SDK APIs as one on one. `DynamoDB.Resource`, on the other hand, provides abstract ways to communicate SDK APIs and more user-friendly. # # In this hands on, we're going to use `DynamoDB.Resources` to get our hands dirty. No worries because the two ways provide similar methods and if you get familiar with one of them, I believe you can do the other one easily as well. # # Python library, named `boto3`, are all documented [Boto3 Docs](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/dynamodb.html). Please always open this documentation to get familiar with APIs and parameters during this hands on. # ## Creating a Table # # You will create `Starbucks` table which stores all Starbucks locations in the world by the provided information in [Kaggle](https://www.kaggle.com/starbucks/store-locations). The provided file in the given link has been uploaded to `data/starbucks.csv`. # # When opening the file, you will see the csv-formatted data as follows. # # ```csv # Brand,Store Number,Store Name,Ownership Type,Street Address,City,State/Province,Country,Postcode,Phone Number,Timezone,Longitude,Latitude # Starbucks,47323-257470,Korea press center,Joint Venture,"Taepyungro, 25, Taepyungro1ga, Junggu, Seoul",Seoul,11,KR,4520,,GMT+09:00 Asia/Seoul,126.98,37.57 # Starbucks,20937-209004,Seoul Womens Univ.,Joint Venture,"621 Hwarang-ro Nowon-gu, Seoul, Korea",Seoul,11,KR,139-774,,GMT+09:00 Asia/Seoul,127.53,37.37 # [..] # ``` # # `Store Number` is the unique ID of branches and this can be the good candidate for primary key. We'll use the column as partition key and not set sort key. Here is a simple description for the table. # # ``` # Table Name: Starbucks # Partition Key: StoreNumber # ``` # # As you learnt, there are two capacity modes - provisioned and on-demand. In this hands on, let's use on-demand in all examples to make it simple because our goal is just getting accustomed to the data operation. # + # import and get dynamodb resource import boto3 from boto3.dynamodb.conditions import Key, Attr from botocore.exceptions import ClientError from pprint import pprint from decimal import Decimal dynamodb = boto3.resource('dynamodb') # + # Create StarbucksLocations table starbucks = dynamodb.create_table( TableName='Starbucks', AttributeDefinitions=[ { 'AttributeName': 'StoreNumber', 'AttributeType': 'S' } ], KeySchema=[ { 'AttributeName': 'StoreNumber', 'KeyType': 'HASH' } ], BillingMode='PROVISIONED', ProvisionedThroughput={ 'ReadCapacityUnits': 100, 'WriteCapacityUnits': 100 } ) print(starbucks) # - # Wait until the table has created starbucks.wait_until_exists() # ## Describing a Table # # To view details about a table, use the `DescribeTable` operation. If `DynamoDB.Resource` variable is set, the table variable itself contains the information as attributes. # # - archival_summary # - attribute_definitions # - billing_mode_summary # - creation_date_time # - global_secondary_indexes # - global_table_version # - item_count # - key_schema # - latest_stream_arn # - latest_stream_label # - local_secondary_indexes # - provisioned_throughput # - replicas # - restore_summary # - sse_description # - stream_specification # - table_arn # - table_id # - table_name # - table_size_bytes # - table_status # Refresh the table information starbucks.load() # Check various table information print('Capacity mode:') pprint(starbucks.billing_mode_summary) print('Provisioned capacity:') pprint(starbucks.provisioned_throughput) print('Key schema:') pprint(starbucks.key_schema) # ## Updating a Table # # Assume that you want to change the table's capacity mode from on-demand to provisioned mode. The mode is changeable and the change is allowed once in 24 hours. # # Here is a sample command to update the table's capacity mode. # Change capacity mode to provisioned # It will take roughly 5 mins to complete starbucks = starbucks.update( BillingMode='PAY_PER_REQUEST' ) # + # Reload table information and check capacity mode starbucks.load() print('Capacity mode:') pprint(starbucks.billing_mode_summary) print('Provisioned capacity:') pprint(starbucks.provisioned_throughput) print('Table status:') pprint(starbucks.table_status) # - starbucks.wait_until_exists() # As the application developers are requesting that they need a new access pattern to the table. They want to search the table with `Country` and `State#City` attributes. # # To accomodate the request, you need to create a GSI as described below. # # ``` # GSI Name: GSI_01_Locations # Partition Key: Country # Sort Key: StateCity (concatenation of State and City, e.g. AZ#Abu Dhabi) # ``` # Create a GSI with updating the table starbucks = starbucks.update( AttributeDefinitions=[ { 'AttributeName': 'Country', 'AttributeType': 'S' }, { 'AttributeName': 'StateCity', 'AttributeType': 'S' } ], GlobalSecondaryIndexUpdates=[ { 'Create': { 'IndexName': 'GSI_01_Locations', 'KeySchema': [ { 'AttributeName': 'Country', 'KeyType': 'HASH' }, { 'AttributeName': 'StateCity', 'KeyType': 'RANGE' } ], 'Projection': { 'ProjectionType': 'ALL' } } } ] ) # + # Reload table information and check capacity mode starbucks.load() print('Index status:') pprint(starbucks.global_secondary_indexes) # - # ## Deleting a Table # # You can remove an unused table with the `DeleteTable` operation. It is unrecoverable. # Just check the command, do not delete it actually starbucks.delete()
01_Working with Tables.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Deploying Machine Learning Models on GCP Kubernetes (with RBAC) # # <img src="images/deploy-graph.png" alt="predictor with canary" title="ml graph"/> # ## Prerequisites # - You need a running GCP cluster with kubernetes>1.8 with kubectl configured to use. # - If you wish to test the JSON schema checks you will need presently to enbale "alpha features" for your cluster (Jan 2018). # - [Git clone of Seldon Core](https://github.com/SeldonIO/seldon-core) # - [Helm](https://github.com/kubernetes/helm) # - [python grpc tools](https://grpc.io/docs/quickstart/python.html) # ## Install helm # !kubectl -n kube-system create sa tiller # !kubectl create clusterrolebinding tiller --clusterrole cluster-admin --serviceaccount=kube-system:tiller # !helm init --service-account tiller # ## Start Seldon-Core # !helm install ../helm-charts/seldon-core-crd --name seldon-core-crd \ # --set usage_metrics.enabled=true # !kubectl create namespace seldon # !helm install ../helm-charts/seldon-core --name seldon-core \ # --namespace seldon # ## Set up REST and gRPC methods # # **Ensure you port forward the seldon api-server REST and GRPC ports**: # # REST: # ``` # kubectl port-forward $(kubectl get pods -n seldon -l app=seldon-apiserver-container-app -o jsonpath='{.items[0].metadata.name}') -n seldon 8002:8080 # ``` # # GRPC: # ``` # kubectl port-forward $(kubectl get pods -n seldon -l app=seldon-apiserver-container-app -o jsonpath='{.items[0].metadata.name}') -n seldon 8003:5000 # ``` # !cp ../proto/prediction.proto ./proto # !python -m grpc.tools.protoc -I. --python_out=. --grpc_python_out=. ./proto/prediction.proto from seldon_utils import * API_GATEWAY_REST="localhost:8002" API_GATEWAY_GRPC="localhost:8003" # ## Normal Operation # ### Create Seldon Deployment # !kubectl create -f resources/model.json -n seldon # Get the status of the SeldonDeployment. **When ready the replicasAvailable should be 1**. # !kubectl get seldondeployments seldon-model -o jsonpath='{.status}' -n seldon # ### Get Predictions # #### REST Request rest_request_api_gateway("oauth-key","oauth-secret",API_GATEWAY_REST) # #### gRPC Request grpc_request_api_gateway("oauth-key","oauth-secret",API_GATEWAY_REST,API_GATEWAY_GRPC) # ## Update Deployment with Canary # !kubectl apply -f resources/model_with_canary.json -n seldon # Check the status of the deployments. Note: **Might need to run several times until replicasAvailable is 1 for both predictors**. # !kubectl get seldondeployments seldon-model -o jsonpath='{.status}' -n seldon rest_request_api_gateway("oauth-key","oauth-secret",API_GATEWAY_REST) grpc_request_api_gateway("oauth-key","oauth-secret",API_GATEWAY_REST,API_GATEWAY_GRPC) # ## Tear Down # !kubectl delete -f resources/model.json -n seldon # !helm delete seldon-core --purge # !helm delete seldon-core-crd --purge
notebooks/kubectl_demo_gcp.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- 1 Menor de dois pares: Escreva uma função que retorne o menor de dois números dados se ambos os números forem pares, mas retorna o maior se um dos dois for ímpar. Exemplo: menor_de_dois_pares(2,4) --> 2 menor_de_dois_pares (2,5) --> 5 # + def menor_de_dois_pares (num01, num02): if num01 %2 == 0 and num02 %2 == 0: if num01 > num02: return num02 else: return num01 else: if num01 > num02: return num01 else: return num02 print(menor_de_dois_pares(20,10)) print(menor_de_dois_pares(16,11)) print(menor_de_dois_pares(23,18)) print(menor_de_dois_pares(9,20)) # - # 2 Mesma letra: Escreva uma função que receba uma string com duas palavras e retorne True se ambas palavras começarem com a mesma letra. Exemplo: # mesma_letra('<NAME>') -> True mesma_letra('<NAME>') -> False # # + def iniciais_iguais (palavra): partes = palavra.split(" ") if partes[0][0] == partes[1][0]: return True return False print(iniciais_iguais("<NAME>")) print(iniciais_iguais("<NAME>")) print(iniciais_iguais("<NAME>")) print(iniciais_iguais("Scoot Summer")) # - print(iniciais_iguais('<NAME>')) 3 Mestre Yoda: Dada uma sentença, a função deve retornar a sentença com as palavras na ordem reversa. Exemplo: mestre_yoda('Eu estou em casa') --> 'casa em estou Eu' mestre_yoda('Estamos prontos') --> 'prontos Estamos' # + def frase_invertida (frase): lista = frase.split(" ") return lista[::-1] print(frase_invertida("juro solenemente não fazer nada de bom")) print(frase_invertida("mal feito feito")) print(frase_invertida("Alter all this time ... ALWAYS")) print(frase_invertida("babuinos bobocas balbuciando em bando")) # - 4 Tem 33: Faça uma função que retorne True se, dada uma lista de inteiros, houver em alguma posição da lista um 3 do lado de outro 3. Exemplo: tem_33([1,3,3]) --> True tem_33([1,3,1,3]) --> False tem_33([3,1,3]) --> False # + def tem_33 (lista): status = "Nada" for simbolo in lista: if simbolo == 3: if status == "Nada": status = "Tres" elif status == "Tres": return True else: if status == "Tres": status = "Nada" return False print(tem_33([1, 3, 3, 5, 6, 3])) print(tem_33([1, 6, 1, 3, 3])) print(tem_33([0, 9, 3])) # - # 5 Blackjack: Faça uma função que receba 3 inteiros entre 1 e 11. Se a soma deles for menor que 21, retorne o valor da soma. Se for mair do que 21 e houver um 11, # subtraia 10 da soma antes de apresentar o resultado. Se o valor da soma passar de 21, retorne ‘ESTOUROU’. Exemplo: # blackjack(5,6,7) --> 18 blackjack(9,9,9) --> 'ESTOUROU' blackjack(9,9,11) --> 19 # + def blackjack (a, b, c): soma = a + b + c if soma <= 21: return soma else: if 11 in [a, b, c]: return soma - 10 else: return ('ESTOUROU', soma) print(blackjack(4, 5, 10)) print(blackjack(9, 9, 11)) print(blackjack(7, 7, 7)) print(blackjack(9, 9, 9)) # - 6 Espião: Escreva uma função que receba uma lista de inteiros e retorne True se contém um 007 em ordem, mesmo que não contínuo. Exemplo: espiao([1,2,4,0,0,7,5]) --> True espiao([1,0,2,4,0,5,7]) --> True espiao([1,7,2,4,0,5,0]) --> False
Ex04.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Mathematically Formalizing the Conviction Voting Algorithm # # ## Background # --- # Conviction Voting is an approach to organizing a community's preferences into discrete decisions in the management of that community's resources. Strictly speaking conviction voting is less like voting and more like signal processing. Framing the approach and the initial algorithm design was done by <NAME> and published in a short research proposal [Social Sensor Fusion](https://github.com/BlockScience/conviction/blob/master/social-sensorfusion.pdf). This work is based on a dynamic resource allocation algorithm presented in Dr. Zargham's PhD Thesis. # # The work proceeded in collaboration with the Commons Stack, including expanding on the python implementation to makeup part of the Commons Simulator game. An implemention of Conviction Voting as a smart contract within the Aragon Framework was developed by [1Hive](https://1hive.org/) and is currently being used for community decision making around allocations of their community currency, Honey. # # ## Defining the Word Problem # ___ # # Suppose a group of people want to coordinate to make a collective decision. Social dynamics such as discussions, signaling, and even changing ones mind based on feedback from other's input play an important role in these processes. While the actual decision making process involves a lot of informal processes, in order to be fair the ultimate decision making process still requires a set of formal rules that the community collecively agrees to, which serves to functionally channel a plurality of preferences into discrete outcomes. In our case we are interested in a procedure which supports asynchronous interactions, and provides visibility into likely outcomes prior to their resolution to serve as a driver of good faith, debate, and healthy forms of coalition building. Furthermore, participations should be able to show support for multiple initiatives, and to vary the level of support shown. Participants have a quantity of signaling power which may be fixed or variable, homogenous or heterogenous. For the purpose of this document, we'll focus on the case where the discrete decisions to be made are decisions to allocate funds from a shared funding pool towards projects of interest to the community. # # ## Converting to a Math Problem # ___ # # Let's start taking these words and constructing a mathematical representation that supports a design that meets the description above. To start we need to define participants. # # ### Defining the Participants # # ___ # # Let $\mathcal{A}$ be the set of participants. Consider a participant $a\in \mathcal{A}$. Any participant $a$ has some capacity to participate in the voting process $h[a]$. In a fixed quantity, homogenous system $h[a] = h$ for all $a\in \mathcal{A}$ where $h$ is a constant. The access control process managing how one becomes a participant determines the total supply of "votes" $S = \sum_{a\in \mathcal{A}} = n\cdot h$ where the number of participants is $n = |\mathcal{A}|$. In a smart contract setting, the set $\mathcal{A}$ is a set of addresses, and $h[a]$ is a quantity of tokens held by each address $a\in \mathcal{A}$. # # ### Defining Proposals & Shared Resources # # ___ # # Next, we introduce the idea of proposals. Consider a proposal $i\in \mathcal{C}$. Any proposal $i$ is associated with a request for resources $r[i]$. Those requested resources would be allocated from a constrained pool of communal resources currently totaling $R$. The pool of resources may become depleted because when a proposal $i$ passes $R^+= R-r[i]$. Therefore it makes sense for us to consider what fraction of the shared resources are being requested $\mu_i = \frac{r[i]}{R}$, which means that the resource depletion from passing proposals can be bounded by requiring $\mu_i < \mu$ where $\mu$ is a constant representing the maximum fraction of the shared resources which can be dispersed by any one proposal. In order for the system to be sustainable a source of new resources is required. In the case where $R$ is funding, new funding can come from revenues, donations, or in some DAO use cases minting tokens. # # ### Defining Participants Preferences for Proposals # # ___ # # Most of the interesting information in this system is distributed amongst the participants and it manifests as preferences over the proposals. This can be thought of as a matrix $W\in \mathbb{R}^{n \times m}$. # ![image of participant/proposal affinity matrix](https://i.imgur.com/vERr8Zv.png) # # These private hidden signals drive discussions and voting actions. Each participant individually decides how to allocate their votes across the available proposals. Participant $a$ supports proposal $i$ by setting $x[a,i]>0$ but they are limited by their capacity $\sum_{k\in \mathcal{C}} x[a,k] \le h[a]$. Assuming each participant chooses a subset of the proposals to support, a support graph is formed. # ![pic](https://i.imgur.com/KRh8tKn.png) # # ## Aggregating Information # # ___ # # In order to break out of the synchronous ad-hoc voting model, a dynamical systems model of this system is introduced that fuses collective community preferences into a single signal. The mathematical derivation of this process can be found below. # # ### Participants Allocate Voting Power # # ___ # # ![pic](https://i.imgur.com/DZRDwk6.png) # In the above diagram, we examine the participant view. Participant $a$ with holdings $h$ at time $t$ supports proposals $i$ and $j$ with $x$ conviction. The sum of all conviction asserted by participant $a$ is between 0 and the total holdings of participant $a$. # # ### System Accounting of Proposal Conviction # # ___ # # ![pic](https://i.imgur.com/euAei5R.png) # # In the above diagram, we examine the proposal view. Proposal $j$ with total conviction $y$ at time $t$ is supported by participants $a$, $b$ and $c$ with $x$ conviction. The total conviction $y$ at time $t+1$ is equal to the total conviction at time $t$ decremented by an exponential decay $\\alpha$ plus the sum of all conviction from $k$ agents in time step $t$. # # ### Understanding the Alpha Parameter # ___ # For a deeper exploration of the $alpha$ parameter, please read more in the [Deriving Alpha notebook](https://nbviewer.jupyter.org/github/BlockScience/Aragon_Conviction_Voting/blob/master/models/v3/Deriving_Alpha.ipynb) # # # ## Converting Signals to Discrete Decisions # ___ # # Conviction can be considered like a fluctuating kinetic energy, with the Trigger function acting as a required activation energy for proposals to pass. This is the mechanism by which a continuous community preference turns into a discrete action event: passing a proposal. # # ### The Trigger Function # ___ # For a deeper exploration of the trigger function, please read more in the [Trigger Function Explainer notebook](https://nbviewer.jupyter.org/github/BlockScience/Aragon_Conviction_Voting/blob/master/models/v3/Trigger_Function_Explanation.ipynb) # # # ### Resolving Passed Proposals # ___ # # ![pic](https://i.imgur.com/lmOl9HE.png) # # ## Social Systems Modeling # ___ # # In the conviction voting model, multiple graph structures are used to represent participants and proposals to represent a subjective, exploratory modeling of the social system interacting. # # ### Sentiment: # # * Global Sentiment denotes the outside world appreciating the output of the community. # * Local Sentiment denotes the agents within the system feeling good about output of the community. # * Sentiment increases when proposals pass and work is completed in the community, and decreases when proposals fail and community progress stalls. # # ### Relationships between Participants: # # * Edges from participant to participant denote influence (to represent subjective social influence) and are assigned randomly via mixing processes. # # ![Participant Influence Graph](https://i.imgur.com/zQziRA7.png) # # ### Relationships between Proposals # # * Edges from proposal to proposal represent conflict, which can be positive or negative. # * Some proposals are synergistic (passing one makes the other more desirable). # * Some proposals are (partially) substitutable (passing one makes the other less desirable). # # ![Proposal Conflict Graph](https://i.imgur.com/CNjLhmr.png) # # ## Glossary of Notation # ___ # ### Summary of State Variables # # | Notation | Definition| # |--- | --- | # |$\mathcal{A}_t$ | The set of all governance agents/participants at time t | # |$\mathcal{C}_t$ | The set of all candidate proposals at time t | # |$n_t$ | The number of agents/participants at time t | # |$m_t$ | The number of candidate proposals at time t | # |$W_t$ | The matrix private preferences of n agents over m proposals | # |$h_t$ | The active token holdings of an agent at time t (Note: the sum of h over all agents is equivalent to the effective supply) | # |$x_t$ | The sum of tokens supporting a proposal at time t | # |$X_t$ | The matrix of tokens from n agents supporting m proposals at time t | # |$y_t$ | Total community conviction for a proposal at time t| # |$y^*_t$ | Trigger function threshold for a proposal at time t | # |$R_t$ | Total available resources in the proposal funding pool| # |$S_t$ | Effective supply of tokens available for community governance| # # <br> # <br> # # ## Summary Laws of Motion / State Transition # # * A new address $a$ joins the community of participants: # $\mathcal{A}_{t+1} = \mathcal{A}_t \cup \{a\}$ # $h_{t+1}[a]= \Delta h >0$ # # * An address $a$ leaves the community of participants: # $\mathcal{A}_{t+1} = \mathcal{A}_t \backslash \{a\}$ # $h_{t+1}[a]= 0$ # # * A proposal $i$ is added to the set of candidates # $\mathcal{C}_{t+1} = \mathcal{C}_t \cup \{i\}$ # # * A proposal $i$ is removed from the set of candidates # $\mathcal{C}_{t+1} = \mathcal{C}_t \backslash\{i\}$ # # * Resources are added to the shared resource pool # $R_{t+1}= R_t+ \Delta r$ # # * Update Conviction Required to pass proposals # $y^*_{t+1} = [\cdots ,f(\mu_i), \cdots]$ # where $\mu_i = \frac{r[i]}{R_t}$ # # * A participant allocates their support # $X_{t+1}[a,: ] = [\cdots,x[a,i],\cdots]$ # s.t. $\sum_{i\in \mathcal{C}_t}x[a,i]\le h[a]$ # # * A proposal is passed given $y_t[i] \ge y^*_t[i]$ # $\mathcal{C}_{t+1} = \mathcal{C}_t \backslash\{i\}$ # $R_{t+1}= R_t- r[i]$ # # * Update Conviction # $y_{t+1}[i] =\alpha\cdot y_t[i] + \sum_{a\in \mathcal{A}_t} x[a, i]$ # # <br> # # ## Parameters # # # | Notation | Definition| # |--- | --- | # |$\alpha$ | The decay rate for previously accumulated conviction | # |$\beta$ | Upper bound on share of funds dispersed in the example Trigger Function| # |$f(z)$| Trigger function that determines when a proposal has sufficient conviction to pass| # |$\rho$ | Scale Parameter for the example Trigger Function | # # Recall that the Trigger Function, $f(z)$ satisfies $f:[0,1]\rightarrow \mathbb{R}_+$ # e.g. $f(z) = \frac{\rho S }{(1-\alpha)(z-\beta)^2}$ # # <br> # # ## Additional Considerations when Deploying CV # # * Timescales # * whether your system is operating in block times, or more human understandable timescales like hours, days, or weeks, these considerations need to be factored into your model # * Minimum candidacy times # * proposals should be active for a minimum period to ensure appropriate dialog occurs within the community, regardless of level of support # * Minimum conviction required for small proposals # * to prevent small proposal spamming from draining the communal funding pool, all proposals should have some minimum conviction required to pass # * Effective supply # * to avoid slow conviction aggregation due to "inactive" tokens (e.g. locked up in cold storage or liquidity pool, without active participation in governance), effective supply is the portion of tokens that are active in community governance # * Proposal Approval & Feedback Process # * the proposal process could make use of additional mechanisms like fund escrow, proposal backlog processes, reviews/validation & disputability/contestation to ensure that the incentive to game the system is kept to a minimum through responsible community oversight #
algorithm_overview.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/cjsutton77/ML_toy_examples/blob/master/wineload.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="_n6DX3B7QyhO" import pandas as pd import numpy as np import torch from torch.utils.data import Dataset from torch.utils.data import DataLoader # + id="2tl0jSY_Rgxm" wine = pd.read_csv('wine.csv') # + colab={"base_uri": "https://localhost:8080/", "height": 305} id="FHGJM1lgRnKG" outputId="d5484453-c878-4b06-fdeb-3dfebcabbe43" wine.head() # + id="IZlpSC7mYgmJ" wine['Class'].unique() w = {'one':1, 'two':2, 'three':3 } wine['Class'] = wine['Class'].apply(lambda x: w[x]) # + colab={"base_uri": "https://localhost:8080/", "height": 305} id="jlkq54bJY_QM" outputId="7444dde8-60df-426d-9e65-b946e14dc993" wine.head() # + id="ZGEkbK2-R4eA" class MyDataset(Dataset): def __init__(self,df): self.full = torch.from_numpy(df.to_numpy()) self.label = self.full[:,-1] self.data = self.full[:,:-1] def __getitem__(self,index): return self.data[index],self.label[index] def __len__(self): return len(self.data) # + id="ZgnFTsXhS0kJ" md = MyDataset(wine) # + colab={"base_uri": "https://localhost:8080/"} id="J6ZDDsOhUm9S" outputId="391d56a9-d997-4ef4-a980-74cbf0620097" len(md) # + id="ka5qF4R3XFyL" x,y = md[0] # + colab={"base_uri": "https://localhost:8080/"} id="5vKHxPkKXmAU" outputId="bc20bbfc-4ce3-4a1b-dac3-1dd2ee5de791" x # + colab={"base_uri": "https://localhost:8080/"} id="6aDPKRtNX51b" outputId="135f4add-42c1-4023-9048-31fd09f007d9" y # + colab={"base_uri": "https://localhost:8080/"} id="ZB1vwd3SZTFk" outputId="35397874-1c93-4974-8e66-02debebe7ebf" # !lscpu # + id="n_OdRZZwX6Sn" dataloader = DataLoader(md,batch_size=4,shuffle=True,num_workers=2) # + id="4i13qZaKZc2d" dataiter = iter(dataloader) # + id="SOw-wsFeZlKo" data = dataiter.next() # + id="XRegiwt0ZpyZ" features,labels = data # + colab={"base_uri": "https://localhost:8080/"} id="zM4XbVGBZqS4" outputId="e789025d-8397-4cca-8df8-60ead9a2f42a" features # + id="6gy-Ah7JZxQy"
wineload.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Creator Name: <NAME>, <NAME>, <NAME> # Create Date: Aug 2020 # Module Name: Server.py # Project Name: Secure File Sharing import os import socket import hashlib import base64 import secrets from Crypto.Util import Counter from Crypto import Random import string import json import pandas as pd import pyodbc import random from threading import Thread from uuid import uuid4 from datetime import datetime from Crypto.Cipher import AES from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives import hashes,serialization from cryptography.hazmat.primitives.asymmetric import ec from cryptography.hazmat.primitives.kdf.hkdf import HKDF from password_strength import PasswordPolicy, PasswordStats # + conn = pyodbc.connect('Driver={SQL Server};' 'Server=DESKTOP-DH5KE9Q\SQLEXPRESS;' 'Database=SecureFileSharing;' 'Trusted_Connection=yes;') policy = PasswordPolicy.from_names( length=8, # min length: 8 uppercase=1, # need min. 2 uppercase letters numbers=1, # need min. 2 digits special=1, # need min. 2 special characters nonletters=1, # need min. 2 non-letter characters (digits, specials, anything) entropybits=30, # need a password that has minimum 30 entropy bits (the power of its alphabet) ) # + AouthCode = '0' def func(connection_sock): print(str(connection_sock.getsockname()[0])) key = key_exchange(connection_sock) if key == None: print('Connection has been blocked. Failed to set session key') return 0 while(1): global AouthCode data = connection_sock.recv(5000) if not data: c_IP , c_port = connection_sock.getpeername() print('client with ip = {} and port = {} has been disconnected at time {}' .format(c_IP, c_port, str(datetime.now()))) connection_sock.shutdown() connection_sock.close() return 1 data = data.decode('utf-8') text = decrypt(data, key) print('received_msg', text) content = '' text = text.split("\n") command = text[0].split() for i in range(1, len(text) - 1): content += text[i] AouthCode = text[len(text) - 1] print('AouthCode', AouthCode) print('command', command) print('content', content) msg = '' ############################################################################## if command[0] == "register" and len(command) == 5: userID = check_username(command[1], conn) register_status = 0 if userID == None: pass_str = CheckPasswordStrength(command[1], command[2]) if pass_str == '1': register_status = user_registeration(command[1], command[2], command[3], command[4], conn) msg = str(register_status) + " register " + command[1] + " " + str(datetime.now()) else: msg = "-3 register " + command[1] + " " + str(datetime.now()) + '\n' + pass_str else: msg = "-2 register " + command[1] + " " + str(datetime.now()) # register log add_registre_logs(command[1], command[3], command[4], str(register_status)) cipher_text = encrypt(msg, key) if cipher_text != None: connection_sock.send(cipher_text.encode('utf-8')) ############################################################################## elif command[0] == "login" and len(command) == 3: ip = str(connection_sock.getsockname()[0]) port = str(connection_sock.getsockname()[1]) ban_min = check_ban(command[1], conn) if ban_min < 0: msg = "-4 login " + command[1] + " " + str(datetime.now()) + '\n' + str(-ban_min) + '\n0' else: pass_status = check_password(command[1], command[2], conn) login_status = 0 AuthCode = 0 if pass_status: login_status, AuthCode = user_login(command[1], ip, port, conn) if login_status == 1: msg = "1 login " + command[1] + " " + str(datetime.now()) + '\n\n' + str(AuthCode) else: msg = "0 login " + command[1] + " " + str(datetime.now()) + '\n\n0' else: msg = "-1 login " + command[1] + " " + str(datetime.now()) + '\n\n0' #login log add_login_logs(command[1], command[2], ip, port, AuthCode, str(login_status)) update_ban_state(command[1], login_status, conn) cipher_text = encrypt(msg, key) if cipher_text != None: connection_sock.send(cipher_text.encode('utf-8')) ############################################################################## elif command[0] == "grant" and len(command) == 4: ip = str(connection_sock.getsockname()[0]) port = str(connection_sock.getsockname()[1]) fileID = check_file(command[1], conn) userID = check_AouthCode(ip, port, AouthCode, conn) owner_status = check_access(userID, fileID, 'o' , conn) userID_g = check_username(command[2], conn) file_conf, file_int = get_file_lables(fileID, conn) grant_status = 0 if userID != None: if fileID != None: if owner_status: insert_access(userID_g, fileID, command[3], conn) insert_access(userID_g, fileID, command[3], conn) msg = "1 grant " + command[1] + " " + str(datetime.now()) else: msg = "0 grant " + command[1] + " " + str(datetime.now()) else: msg = "-2 grant " + command[1] + " " + str(datetime.now()) else: msg = "-1 grant " + command[1] + " " + str(datetime.now()) cipher_text = encrypt(msg, key) if cipher_text != None: connection_sock.send(cipher_text.encode('utf-8')) ############################################################################## elif command[0] == "revoce" and len(command) == 4: ip = str(connection_sock.getsockname()[0]) port = str(connection_sock.getsockname()[1]) fileID = check_file(command[1], conn) userID = check_AouthCode(ip, port, AouthCode, conn) owner_status = check_access(userID, fileID, 'o' , conn) userID_g = check_username(command[2], conn) file_conf, file_int = get_file_lables(fileID, conn) grant_status = 0 if userID != None: if fileID != None: if owner_status: revoc_access(userID_g, fileID, command[3], conn) revoc_access(userID_g, fileID, command[3], conn) msg = "1 revoce " + command[1] + " " + str(datetime.now()) else: msg = "0 revoce " + command[1] + " " + str(datetime.now()) else: msg = "-2 revoce " + command[1] + " " + str(datetime.now()) else: msg = "-1 revoce " + command[1] + " " + str(datetime.now()) cipher_text = encrypt(msg, key) if cipher_text != None: connection_sock.send(cipher_text.encode('utf-8')) ############################################################################## elif command[0] == "put" and len(command) == 5: ip = str(connection_sock.getsockname()[0]) port = str(connection_sock.getsockname()[1]) creatorID = check_AouthCode(ip, port, AouthCode, conn) fileID = check_file(command[1], conn) put_status = 0 if creatorID != None: if fileID == None: put_status = put_file(command[1], content, creatorID, command[2], command[3], command[4], conn) fileID = check_file(command[1], conn) msg = str(put_status) + " put " + command[1] + " " + str(datetime.now()) if put_status: insert_access(creatorID, fileID, 'w', conn) insert_access(creatorID, fileID, 'r', conn) else: msg = "-2 put " + command[1] + " " + str(datetime.now()) else: msg = "-1 get " + command[1] + " " + str(datetime.now()) #put log add_put_logs(creatorID, fileID, command[1], command[2], command[3], content, str(put_status)) cipher_text = encrypt(msg, key) if cipher_text != None: connection_sock.send(cipher_text.encode('utf-8')) ############################################################################## elif command[0] == "get" and len(command) == 2: ip = str(connection_sock.getsockname()[0]) port = str(connection_sock.getsockname()[1]) userID = check_AouthCode(ip, port, AouthCode, conn) fileID = check_file(command[1], conn) owner_status = check_access(userID, fileID, 'o' , conn) file_conf, file_int = get_file_lables(fileID, conn) if userID != None: if fileID != None: if owner_status: content = get_file(command[1], conn) msg = "1 get " + command[1] + " " + str(datetime.now()) + "\n" + content revoc_access(userID, fileID, 'w', conn) revoc_access(userID, fileID, 'r', conn) else: msg = "0 get " + command[1] + " " + str(datetime.now()) + "\n " else: msg = "-2 get " + command[1] + " " + str(datetime.now()) + "\n " else: msg = "-1 get " + command[1] + " " + str(datetime.now()) + "\n " #get logs add_get_logs(userID, fileID, command[1], file_conf, file_int, str(owner_status)) cipher_text = encrypt(msg, key) if cipher_text != None: connection_sock.send(cipher_text.encode('utf-8')) ############################################################################## elif command[0] == "read" and len(command) == 2: ip = str(connection_sock.getsockname()[0]) port = str(connection_sock.getsockname()[1]) userID = check_AouthCode(ip, port, AouthCode, conn) fileID = check_file(command[1], conn) file_conf, file_int = get_file_lables(fileID, conn) read_status = 0 if userID != None: if fileID != None: cond, BIBA_status, BLP_status, ACL = check_access_mode(userID, fileID, 'r' , conn) if cond: content = read_file(command[1], conn) if content == None: msg = "0 1 1 read " + command[1] + " " + str(datetime.now()) + "\n " else: read_status = 1 msg = "1 1 1 read " + command[1] + " " + str(datetime.now()) + "\n" + content else: msg = "0 " + str(BLP_status) + " " + str(BIBA_status) + " read " + command[1] + " " + str(datetime.now()) + "\n " else: msg = "-2 read " + command[1] + " " + str(datetime.now()) + "\n " else: msg = "-1 read " + command[1] + " " + str(datetime.now()) + "\n " #read logs add_read_logs(userID, fileID, command[1], file_conf, file_int, str(read_status)) cipher_text = encrypt(msg, key) if cipher_text != None: connection_sock.send(cipher_text.encode('utf-8')) ############################################################################## elif command[0] == "write" and len(command) == 2: ip = str(connection_sock.getsockname()[0]) port = str(connection_sock.getsockname()[1]) userID = check_AouthCode(ip, port, AouthCode, conn) fileID = check_file(command[1], conn) file_conf, file_int = get_file_lables(fileID, conn) write_status = 0 if userID != None: if fileID != None: cond, BIBA_status, BLP_status, ACL = check_access_mode(userID, fileID, 'w' , conn) if cond: write_status = write_file(command[1], content, conn) if write_status == 0: msg = "0 1 1 write " + command[1] + " " + str(datetime.now()) else: msg = "1 1 1 write " + command[1] + " " + str(datetime.now()) else: msg = "0 " + str(BLP_status) + " " + str(BIBA_status) + " write " + command[1] + " " + str(datetime.now()) else: msg = "-2 write " + command[1] + " " + str(datetime.now()) else: msg = "-1 write " + command[1] + " " + str(datetime.now()) #write logs add_write_logs(userID, fileID, command[1], file_conf, file_int, content, str(write_status)) cipher_text = encrypt(msg, key) if cipher_text != None: connection_sock.send(cipher_text.encode('utf-8')) ############################################################################## elif command[0] == "list" and len(command) == 1: userID = check_AouthCode(ip, port, AouthCode, conn) ip = str(connection_sock.getsockname()[0]) port = str(connection_sock.getsockname()[1]) if userID == None: msg = "-1 list " + str(datetime.now()) else: msg = "1 list " + str(datetime.now()) + "\n " files = list_files() msg += files cipher_text = encrypt(msg, key) if cipher_text != None: connection_sock.send(cipher_text.encode('utf-8')) elif command[0] == "logout" and len(command) == 1: userID = check_AouthCode(ip, port, AouthCode, conn) ip = str(connection_sock.getsockname()[0]) port = str(connection_sock.getsockname()[1]) if userID != None: logout_status = user_logout(userID, ip, port, AouthCode, conn) else: msg = "-1 logout " + command[1] + " " + str(datetime.now()) msg = str(logout_status) + " logout " + str(datetime.now()) cipher_text = encrypt(msg, key) if cipher_text != None: connection_sock.send(cipher_text.encode('utf-8')) print(msg) # + #Socket handling ######################################################################################################### def setup_server(): s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) if s == 0: print ('error in server socket creation\n') server_ip = socket.gethostname() server_port = 8500 s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) s.bind((server_ip, server_port)) s.listen(5) print("server is listening for any connection ... ") return s def server_listening(s): connection_socket , client_addr = s.accept() print("client with ip = {} has been connected at time {}".format(client_addr, str(datetime.now()))) return connection_socket # + #Cryptography ######################################################################################################### def key_exchange(client_sock): # first makes a private key, sends and receives public keys, then derives the secret key try: backend = default_backend() client_rcv_pub = client_sock.recv(200) # client_rcv_pub is client received public key in bytes client_pub = serialization.load_pem_public_key(client_rcv_pub, backend) # client_pub is client public key in curve object server_pr = ec.generate_private_key(ec.SECP256R1(), backend) server_pub = server_pr.public_key() client_sock.send(server_pub.public_bytes(serialization.Encoding.PEM, serialization.PublicFormat.SubjectPublicKeyInfo)) shared_data = server_pr.exchange(ec.ECDH(), client_pub) secret_key = HKDF(hashes.SHA256(), 32, None, b'Key Exchange', backend).derive(shared_data) session_key = secret_key[-16:] # to choose the last 128-bit (16-byte) of secret key print('key exchanged successfully.') return session_key except: print('error in key exchange') return None def encrypt(plain_text, key): # returns a json containing ciphertext and nonce for CTR mode nonce1 = Random.get_random_bytes(8) countf = Counter.new(64, nonce1) cipher = AES.new(key, AES.MODE_CTR, counter=countf) cipher_text_bytes = cipher.encrypt(plain_text.encode('utf-8')) nonce = base64.b64encode(nonce1).decode('utf-8') cipher_text = base64.b64encode(cipher_text_bytes).decode('utf-8') result = json.dumps({'nonce':nonce, 'ciphertext':cipher_text}) return result def decrypt(data, key): try: b64 = json.loads(data) nonce = base64.b64decode(b64['nonce'].encode('utf-8')) cipher_text = base64.b64decode(b64['ciphertext']) countf = Counter.new(64, nonce) cipher = AES.new(key, AES.MODE_CTR, counter=countf) plain_text = cipher.decrypt(cipher_text) return plain_text.decode('utf-8') except ValueError: return None ######################################################################################################### # + def check_access_mode(userID, fileID, access_type , conn): MyQuery = ''' select AccessMode from ValidFiles where FileID = ? ''' sql_query = pd.read_sql( MyQuery ,conn, params={fileID,} ) acc_mode = int(str(sql_query['AccessMode']).split()[1]) cond = 1 BIBA = check_BIBA(userID, fileID, access_type, conn) BLP = check_BLP(userID, fileID, access_type, conn) ACL = check_access(userID, fileID, access_type, conn) if acc_mode % 2 == 0: cond = cond & BLP if acc_mode % 3 == 0: cond = cond & BIBA if acc_mode % 5 == 0: cond = cond & ACL return cond, BIBA, BLP, ACL def check_AouthCode(ip, port, AouthCode, conn): MyQuery = ''' select UserID from ValidConnections where [AouthCode] = ? and [Ip] = ? and [Port] = ? ''' sql_query = pd.read_sql( MyQuery ,conn, params=(AouthCode, ip, port,) ) if str(sql_query).split()[0] == 'Empty': return None else: return str(sql_query['UserID']).split()[1] def list_files(): MyQuery = ''' select FileName, LastModifiedDate from ValidFiles ''' sql_query = pd.read_sql( MyQuery ,conn ) return str(sql_query) def read_file(filename, conn): MyQuery = ''' select Content from ValidFiles where FileName = ? ''' sql_query = pd.read_sql( MyQuery ,conn, params={filename,} ) if str(sql_query).split()[0] == 'Empty': return None else: return str(sql_query['Content'][0]) def write_file(filename, content, conn): MyQuery = ''' update Files set Content = ? where [FileName] = ? and [Status] = '1' ''' cursor = conn.cursor() cursor.execute( MyQuery, content, filename) conn.commit() cursor.close() new_content = read_file(filename, conn) if content == new_content: return 1 return 0 def get_file(filename, conn): content = read_file(filename, conn) MyQuery = ''' update Files set [Status] = '0' where [FileName] = ? and [Status] = '1' ''' cursor = conn.cursor() cursor.execute( MyQuery, filename) conn.commit() cursor.close() return content # + def check_username(username, conn): MyQuery = ''' select UserID from Users where UserName = ? ''' sql_query = pd.read_sql( MyQuery ,conn, params={username,} ) if str(sql_query).split()[0] == 'Empty': return None else: return str(sql_query['UserID']).split()[1] #conf_range = ['Top Secret', 'Secret', 'Confidential', 'Unclassified'] #int_range = ['Very Trusted', 'Trusted', 'Slightly Trusted', 'UnTrusted'] int_range = ['1', '2', '3', '4'] conf_range = ['1', '2', '3', '4'] def user_registeration(username, password, conf_label, integrity_label, conn): if username != "all": salt = ''.join(secrets.choice(string.ascii_letters) for _ in range(25)) pass_hash = hashlib.sha256((password + salt).encode('utf-8')).hexdigest() pass_hash = str(pass_hash) MyQuery = ''' insert into Users ( UserName, PasswordHash, Salt, ConfLable, IntegrityLable) values (?, ?, ?, ?, ? ); ''' if conf_label in conf_range and integrity_label in int_range: cursor = conn.cursor() cursor.execute( MyQuery, username, pass_hash,salt, conf_label, integrity_label) conn.commit() cursor.close() if check_username(username, conn) == None: return 0 return 1 return 0 # + def CheckPasswordStrength(username, password): if password.find(username) != -1: return "password should not include username" #check condition 1 Condition1 = policy.test(password) if Condition1: return ''.join(str(Condition1)) #check condition 2 f = open("PwnedPasswordsTop100k.txt","r") for x in f: x = str(x).strip() if x == password: return "Password is in top 100,000 pwned passwords" return '1' def check_password(username, password, conn): MyQuery1 = ''' select [Salt] from Users where UserName = ? ''' MyQuery2 = ''' select dbo.CheckPassHash(?, ?) as correctness ''' sql_query = pd.read_sql( MyQuery1 ,conn, params={username,} ) salt = str(sql_query['Salt']).split()[1] print('salt',salt) pass_h = str(hashlib.sha256((password + salt).encode('utf-8')).hexdigest()) print('pass_h',pass_h) print(username) sql_query2 = pd.read_sql( MyQuery2 ,conn, params=(username, pass_h) ) print(sql_query2) if str(sql_query2['correctness']).split()[1] == '1': return 1 else: return 0 # + def check_file(filename, conn): MyQuery = ''' select FileID from ValidFiles where FileName = ? ''' sql_query = pd.read_sql( MyQuery ,conn, params={filename,} ) if str(sql_query).split()[0] == 'Empty': return None else: return str(sql_query['FileID']).split()[1] def put_file(filename, content, creatorID, conf_label, integrity_label, access_mode , conn): MyQuery = ''' insert into Files ([FileName], [FileCreatorID], [ConfLable], [IntegrityLable], [AccessMode], [Content], [Status]) values (?, ?, ?, ?, ?, ?, ?) ''' cursor = conn.cursor() cursor.execute(MyQuery, filename, creatorID, conf_label, integrity_label, access_mode, content, '1') conn.commit() cursor.close() if check_file(filename, conn) == None: return 0 return 1 def update_ban_state(username, status, conn): MyQuery1 = '''UPDATE BanUser SET StartBanTime = CURRENT_TIMESTAMP, BanLvl= ? where UserID = (select UserID from Users where UserName= ?) ''' cursor = conn.cursor() if status: cursor.execute(MyQuery1, 0, username) else: MyQuery2 = ''' select dbo.FindLastFailedLogin(?) as lastfail ''' sql_query2 = pd.read_sql( MyQuery2 ,conn, params={username,} ) lastfail = str(sql_query2['lastfail']).split()[1] lastfail = int(lastfail) if lastfail % 3 == 0: cursor.execute(MyQuery1, lastfail//3, username) conn.commit() cursor.close() def check_ban(username, conn): MyQuery2 = ''' select dbo.IsBan(?) as ban_min ''' sql_query2 = pd.read_sql( MyQuery2 ,conn, params={username,} ) return int(str(sql_query2['ban_min']).split()[1]) # + #Logs ############################################################################## def add_registre_logs(username, conf_lable, integrity_lable, status): MyQuery = ''' INSERT INTO RegisterLogs ( UserName, ConfLable, IntegrityLable, [Status]) VALUES (?, ?, ?, ?); ''' cursor = conn.cursor() cursor.execute(MyQuery, username, conf_lable, integrity_lable, status) conn.commit() cursor.close() def add_login_logs(username, password, ip, port, AouthCode, status): MyQuery = ''' INSERT INTO LoginLogs ( UserName, [password], ConnectionIp, ConnectionPort, AuthenticationCode, [Status]) VALUES (?, ?, ?, ?, ?, ?); ''' cursor = conn.cursor() cursor.execute(MyQuery, username, password, ip, port, AouthCode, status) conn.commit() cursor.close() def add_put_logs(creatorID, fileID, filename, file_conf, file_int, content, status): MyQuery = ''' INSERT INTO PutLogs ( CreatorID, FileID, FileName ,CurFileConfLable, CurFileIntegrityLable, Content, [Status]) VALUES (?, ?, ?, ?, ?, ?, ?); ''' if fileID == None: fileID = 0 cursor = conn.cursor() cursor.execute(MyQuery, creatorID, fileID, filename, file_conf, file_int, content, status) conn.commit() cursor.close() def add_get_logs(userID, fileID, filename, file_conf, file_int, status): MyQuery = ''' INSERT INTO GetLogs ( UserID, FileID, FileName ,CurFileConfLable, CurFileIntegrityLable, [Status]) VALUES (?, ?, ?, ?, ?, ?); ''' if fileID == None: fileID = 0 cursor = conn.cursor() cursor.execute(MyQuery, userID, fileID, filename, file_conf, file_int, status) conn.commit() cursor.close() def add_read_logs(userID, fileID, filename, file_conf, file_int, status): MyQuery = ''' INSERT INTO ReadLogs ( UserID, FileID, FileName, CurFileConfLable, CurFileIntegrityLable, [Status]) VALUES (?, ?, ?, ?, ?, ?); ''' if fileID == None: fileID = 0 print(userID, fileID, filename, file_conf, file_int, status) cursor = conn.cursor() cursor.execute(MyQuery, userID, fileID, filename, file_conf, file_int, status) conn.commit() cursor.close() def add_write_logs(userID, fileID, filename, file_conf, file_int, content, status): MyQuery = ''' INSERT INTO WriteLogs ( UserID, FileID, FileName, CurFileConfLable, CurFileIntegrityLable, Content, [Status]) VALUES (?, ?, ?, ?, ?, ?, ?); ''' if fileID == None: fileID = 0 cursor = conn.cursor() cursor.execute(MyQuery, userID, fileID, filename, file_conf, file_int, content, status) conn.commit() cursor.close() ############################################################################## # + #Access Control ############################################################################## def insert_access(userID, fileID, access_type, conn): MyQuery = ''' insert into AccessList ([UserID], [FileID], [AccessType]) values (?, ?, ?) ''' if check_access(userID, fileID, access_type, conn) == 0: cursor = conn.cursor() cursor.execute(MyQuery, userID, fileID, access_type) conn.commit() cursor.close() def revoc_access(userID, fileID, access_type, conn): MyQuery = ''' delete AccessList where UserID = ? and FileID = ? and AccessType = ? ''' cursor = conn.cursor() cursor.execute(MyQuery, userID, fileID, access_type) conn.commit() cursor.close() def check_access(userID, fileID, access_type, conn): if access_type == 'o': MyQuery = ''' select * from Files where FileID = ? and FileCreatorID = ? ''' sql_query = pd.read_sql( MyQuery ,conn, params=(fileID, userID,) ) else: MyQuery = ''' select * from AccessList where FileID = ? and UserID = ? and AccessType = ? ''' sql_query = pd.read_sql( MyQuery ,conn, params=(fileID, userID, access_type,) ) if str(sql_query).split()[0] == 'Empty': return 0 else: return 1 def get_file_lables(fileID, conn): MyQuery = ''' select[IntegrityLable], [ConfLable] from Files where FileID = ? ''' sql_query = pd.read_sql( MyQuery ,conn, params={fileID,} ) if str(sql_query).split()[0] == 'Empty': file_conf = '' file_int = '' else: file_conf = str(sql_query['ConfLable']).split()[1] file_int = str(sql_query['IntegrityLable']).split()[1] return file_conf, file_int def check_BLP(userID, fileID, access_type, conn): MyQuery1 = ''' select[ConfLable] from Users where UserID = ? ''' sql_query1 = pd.read_sql( MyQuery1 ,conn, params={userID,} ) MyQuery2 = ''' select[ConfLable] from Files where FileID = ? ''' sql_query2 = pd.read_sql( MyQuery2 ,conn, params={fileID,} ) user_conf = int(str(sql_query1['ConfLable']).split()[1]) file_conf = int(str(sql_query2['ConfLable']).split()[1]) if access_type == 'w': if(user_conf > file_conf): return 0 else: return 1 elif access_type == 'r': if(user_conf < file_conf): return 0 else: return 1 def check_BIBA(userID, fileID, access_type, conn): MyQuery1 = ''' select[IntegrityLable] from Users where UserID = ? ''' sql_query1 = pd.read_sql( MyQuery1 ,conn, params={userID,} ) MyQuery2 = ''' select[IntegrityLable] from Files where FileID = ? ''' sql_query2 = pd.read_sql( MyQuery2 ,conn, params={fileID,} ) user_int = int(str(sql_query1['IntegrityLable']).split()[1]) file_int = int(str(sql_query2['IntegrityLable']).split()[1]) if access_type == 'w': if(user_int < file_int): return 0 else: return 1 elif access_type == 'r': if(user_int > file_int): return 0 else: return 1 ############################################################################## # + def user_login(username, Ip, Port, conn): userID = check_username(username, conn) MyQuery = ''' insert into Connections ([UserID], [Ip], [Port], [AouthCode], [ConnectionDate], [Status]) values (?, ?, ?, ?, ?, ?) ''' AouthCode = uuid4() ConnectionDate = str(datetime.now())[:-3] cursor = conn.cursor() cursor.execute( MyQuery, userID, Ip, Port, AouthCode, ConnectionDate, '1') conn.commit() cursor.close() MyQuery2 = ''' select [UserID], [Ip], [Port], [AouthCode], [ConnectionDate], [Status] from connections where [UserID] = ? and [Ip] = ? and [Port] = ? and [AouthCode] = ? and [ConnectionDate] = ? and [Status] = '1' ''' sql_query2 = pd.read_sql( MyQuery2 ,conn, params=(userID, Ip, Port, AouthCode, ConnectionDate,) ) if str(sql_query2).split()[0] == 'Empty': return 0 , None return 1, AouthCode def user_logout(userID, Ip, Port, AouthCode, conn): MyQuery = ''' update connections set [Status] = '0', ConnectionCloseDate = ? where [UserID] = ? and [Ip] = ? and [Port] = ? and [AouthCode] = ? and [Status] = '1' ''' ConnectionCloseDate = str(datetime.now()) cursor = conn.cursor() cursor.execute( MyQuery, ConnectionCloseDate, userID, Ip, Port, AouthCode) conn.commit() cursor.close() MyQuery2 = ''' select [Status] from connections where [UserID] = ? and [Ip] = ? and [Port] = ? and [AouthCode] = ? and [Status] = '0' and ConnectionCloseDate = ? ''' sql_query2 = pd.read_sql( MyQuery2 ,conn, params=(userID, Ip, Port, AouthCode, ConnectionCloseDate) ) if str(sql_query2).split()[0] == 'Empty': return 0 return 1 # + if __name__ == "__main__": s = setup_server() while True: connection_sock = server_listening(s) try: Thread(target=func,args=(connection_sock,)).start() except: print('Server is busy. Unable to create more threads.') s.shutdown() s.close()
SecureComputing/Project(Secure-File-Sharing)/Server.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.9.7 64-bit (''base'': conda)' # language: python # name: python3 # --- import pandas as pd import numpy as np import matplotlib.pyplot as plt from sklearn.linear_model import LogisticRegression train=pd.read_csv("titanic/train.csv") test=pd.read_csv("titanic/test.csv") train.head() train.isna().values.any() train.isna().values.sum() test.isna().values.any() test.isna().values.sum() test.head() train=train.drop(["Name"],axis=1) test=test.drop(["Name"],axis=1) (train.shape,test.shape) train.info() num_cols_train=[col for col in train.columns if train[col].dtype!=object] obj_cols_train=[col for col in train.columns if train[col].dtype==object] num_cols_test=[col for col in test.columns if test[col].dtype!=object] obj_cols_test=[col for col in test.columns if test[col].dtype==object] # + fill_train={} for col in train.columns: if train[col].dtype==object: fill_train[col]=train[col].value_counts().index[0] else: fill_train[col]=train[col].mean() # - len(fill_train) # + fill_test={} for col in test.columns: if test[col].dtype==object: fill_test[col]=test[col].value_counts().index[0] else: fill_test[col]=test[col].mean() # - len(fill_test) train.fillna(fill_train,inplace=True) test.fillna(fill_test,inplace=True) (train.isnull().values.any(),test.isnull().values.any()) train test encoded_cols=[col for col in obj_cols_train if train[col].nunique()==test[col].nunique()] encoded_cols f_train=pd.get_dummies(train,columns=encoded_cols) f_test=pd.get_dummies(test,columns=encoded_cols) f_train f_test factor_cols=list(set(obj_cols_train)-set(encoded_cols)) factor_cols for col in factor_cols: f_train[col],_=pd.factorize(f_train[col]) f_test[col],_=pd.factorize(f_test[col]) f_train f_test droppedcol=f_train.pop('Survived') y_train=droppedcol.to_numpy() x_train=f_train.values (x_train.shape,y_train.shape) f_test.shape from sklearn.preprocessing import StandardScaler scaler = StandardScaler() scaler.fit(x_train) x_train_std = scaler.transform(x_train) model=LogisticRegression() model.fit(x_train_std,y_train) model.score(x_train_std,y_train) x_test=f_test.values x_test_std=scaler.transform(x_test) preds=model.predict(x_test_std) preds y_test=pd.read_csv("titanic/gender_submission.csv") y_test y_test=y_test.drop(["PassengerId"],axis=1) y_test=y_test.to_numpy() model.score(x_test_std,y_test) type(preds) data=pd.DataFrame(f_test["PassengerId"]) data data["Survived"]=preds data data.to_csv("gender_submission.csv",index=False)
Logistic Regression/titanic.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Licensed under the MIT License. # # Copyright (c) 2021-2031. All rights reserved. # # # LGBM Baseline # # * Using default LGBM settings, the performance serves as the baseline result to compare with later param tuning performance # * The performance is evaluated through cross validation # + import pandas as pd import numpy as np from tqdm import tqdm # tqdm._instances.clear() # run this when tqdm shows multiple bars in your notebook from matplotlib import pylab as plt from matplotlib.offsetbox import AnchoredText import lightgbm as lgb from sklearn.model_selection import train_test_split from sklearn.model_selection import StratifiedKFold from sklearn.metrics import balanced_accuracy_score # + df = pd.read_csv('../crystal_ball/data_collector/structured_data/leaf.csv') print(df.shape) df.head() # - def plot_performance_lst(performance_lst, y_label, title): plt.figure(figsize=(15,7)) ax = plt.gca() ax.set_ylim([0, 1]) # set y-axis range x = [i+1 for i in range(len(performance_lst))] y = performance_lst ax.plot(x, y, color='g') # anchor text to show text in the plot anchored_text = AnchoredText(f'Average {y_label} is {round(np.mean(performance_lst), 4)}', loc=3, prop={'size': 12}) # the location code: https://matplotlib.org/3.1.0/api/offsetbox_api.html ax.add_artist(anchored_text) # annotate y_value along the line for i,j in zip(x,y): ax.annotate(str(round(j, 4)),xy=(i,j)) plt.xlabel('epoch #') plt.ylabel(y_label) plt.title(title) plt.show() # + y = df['species'] X = df.drop('species', axis=1) # baseline performance through cross validation folds = StratifiedKFold(n_splits=5, shuffle=True, random_state=10) # small dataset, only use 5 folds lgbm = lgb.LGBMClassifier(objective='multiclass', random_state=10) metrics_lst = [] for train_idx, val_idx in tqdm(folds.split(X, y), total=folds.get_n_splits()): X_train, y_train = X.iloc[train_idx], y.iloc[train_idx] X_val, y_val = X.iloc[val_idx], y.iloc[val_idx] lgbm.fit(X_train, y_train) y_pred = lgbm.predict(X_val) cv_balanced_accuracy = balanced_accuracy_score(y_val, y_pred) metrics_lst.append(cv_balanced_accuracy) plot_performance_lst(metrics_lst, 'balanced_accuracy', 'Baseline LightGBM Performance through Cross Valication') # - lgb.plot_importance(lgbm)
code/queen_lotus/lgbm_baseline.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ##Tirmzi Analysis # n=1000 m+=1000 nm-=120 istep= 4 min=150 max=700 import sys sys.path import matplotlib.pyplot as plt import numpy as np import os from scipy import signal # ls import capsol.newanalyzecapsol as ac ac.get_gridparameters import glob folders = glob.glob("*NewTirmzi_large_range*/") folders all_data= dict() for folder in folders: params = ac.get_gridparameters(folder + 'capsol.in') data = ac.np.loadtxt(folder + 'C-Z.dat') process_data = ac.process_data(params, data, smoothing=False, std=5*10**-9, fortran=False) all_data[folder]= (process_data) all_params= dict() for folder in folders: params=ac.get_gridparameters(folder + 'capsol.in') all_params[folder]= (params) all_data all_data.keys() # + for key in {key: params for key, params in all_params.items() if params['Thickness_sample'] == 9.98}: data=all_data[key] thickness =all_params[key]['Thickness_sample'] rtip= all_params[key]['Rtip'] er=all_params[key]['eps_r'] plt.plot(data['z'], data['c'], label= f'{rtip} nm, {er}, {thickness} nm') plt.title('C v. Z for 1nm thick sample') plt.ylabel("C(m)") plt.xlabel("Z(m)") plt.legend() plt.savefig("C' v. Z for 1nm thick sample 06-28-2021.png") # - # cut off last experiment because capacitance was off the scale # + for key in {key: params for key, params in all_params.items() if params['Thickness_sample'] == 9.98}: data=all_data[key] thickness=all_params[key]['Thickness_sample'] rtip= all_params[key]['Rtip'] er=all_params[key]['eps_r'] s=slice(4,-3) plt.plot(data['z'][s], data['cz'][s], label=f'{rtip} nm, {er}, {thickness} nm' ) plt.title('Cz vs. Z for 1.0nm') plt.ylabel("Cz") plt.xlabel("Z(m)") plt.legend() plt.savefig("Cz v. Z for varying sample thickness, 06-28-2021.png") # + for key in {key: params for key, params in all_params.items() if params['Thickness_sample'] == 9.98}: data=all_data[key] thickness=all_params[key]['Thickness_sample'] rtip= all_params[key]['Rtip'] er=all_params[key]['eps_r'] s=slice(5,-5) plt.plot(data['z'][s], data['czz'][s], label=f'{rtip} nm, {er}, {thickness} nm' ) plt.title('Czz vs. Z for 1.0nm') plt.ylabel("Czz") plt.xlabel("Z(m)") plt.legend() plt.savefig("Czz v. Z for varying sample thickness, 06-28-2021.png") # - params # + for key in {key: params for key, params in all_params.items() if params['Thickness_sample'] == 9.98}: data=all_data[key] thickness=all_params[key]['Thickness_sample'] rtip= all_params[key]['Rtip'] er=all_params[key]['eps_r'] s=slice(8,-8) plt.plot(data['z'][s], data['alpha'][s], label=f'{rtip} nm, {er}, {thickness} nm' ) plt.title('alpha vs. Z for 1.0nm') plt.ylabel("$\\alpha$") plt.xlabel("Z(m)") plt.legend() plt.savefig("Alpha v. Z for varying sample thickness, 06-28-2021.png") # - data from scipy.optimize import curve_fit def Cz_model(z, a, n, b,): return(a*z**n + b) all_data.keys() data= all_data['capsol-calc\\0001-capsol\\'] z= data['z'][1:-1] cz= data['cz'][1:-1] popt, pcov= curve_fit(Cz_model, z, cz, p0=[cz[0]*z[0], -1, 0]) a=popt[0] n=popt[1] b=popt[2] std_devs= np.sqrt(pcov.diagonal()) sigma_a = std_devs[0] sigma_n = std_devs[1] model_output= Cz_model(z, a, n, b) rmse= np.sqrt(np.mean((cz - model_output)**2)) f"a= {a} ± {sigma_a}" f"n= {n}± {sigma_n}" model_output "Root Mean Square Error" rmse/np.mean(-cz)
data/Output-Python/852021_FOriginalTirmzi-Copy2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + papermill={"duration": 1.734567, "end_time": "2022-03-27T05:32:30.562389", "exception": false, "start_time": "2022-03-27T05:32:28.827822", "status": "completed"} tags=[] import os, json, sys, time, random import numpy as np import torch from easydict import EasyDict from math import floor from easydict import EasyDict from steves_utils.vanilla_train_eval_test_jig import Vanilla_Train_Eval_Test_Jig from steves_utils.torch_utils import get_dataset_metrics, independent_accuracy_assesment from steves_models.configurable_vanilla import Configurable_Vanilla from steves_utils.torch_sequential_builder import build_sequential from steves_utils.lazy_map import Lazy_Map from steves_utils.sequence_aggregator import Sequence_Aggregator from steves_utils.stratified_dataset.traditional_accessor import Traditional_Accessor_Factory from steves_utils.cnn_do_report import ( get_loss_curve, get_results_table, get_parameters_table, get_domain_accuracies, ) from steves_utils.torch_utils import ( confusion_by_domain_over_dataloader, independent_accuracy_assesment ) from steves_utils.utils_v2 import ( per_domain_accuracy_from_confusion, get_datasets_base_path ) # from steves_utils.ptn_do_report import TBD # + papermill={"duration": 0.021679, "end_time": "2022-03-27T05:32:30.603245", "exception": false, "start_time": "2022-03-27T05:32:30.581566", "status": "completed"} tags=[] required_parameters = { "experiment_name", "lr", "device", "dataset_seed", "seed", "labels", "domains_target", "domains_source", "num_examples_per_domain_per_label_source", "num_examples_per_domain_per_label_target", "batch_size", "n_epoch", "patience", "criteria_for_best", "normalize_source", "normalize_target", "x_net", "NUM_LOGS_PER_EPOCH", "BEST_MODEL_PATH", "pickle_name_source", "pickle_name_target", "torch_default_dtype", } # + papermill={"duration": 0.035725, "end_time": "2022-03-27T05:32:30.655236", "exception": false, "start_time": "2022-03-27T05:32:30.619511", "status": "completed"} tags=["parameters"] from steves_utils.ORACLE.utils_v2 import ( ALL_SERIAL_NUMBERS, ALL_DISTANCES_FEET_NARROWED, ) standalone_parameters = {} standalone_parameters["experiment_name"] = "MANUAL CORES CNN" standalone_parameters["lr"] = 0.0001 standalone_parameters["device"] = "cuda" standalone_parameters["dataset_seed"] = 1337 standalone_parameters["seed"] = 1337 standalone_parameters["labels"] = ALL_SERIAL_NUMBERS standalone_parameters["domains_source"] = [8,32,50] standalone_parameters["domains_target"] = [14,20,26,38,44,] standalone_parameters["num_examples_per_domain_per_label_source"]=-1 standalone_parameters["num_examples_per_domain_per_label_target"]=-1 standalone_parameters["pickle_name_source"] = "oracle.Run1_framed_2000Examples_stratified_ds.2022A.pkl" standalone_parameters["pickle_name_target"] = "oracle.Run2_framed_2000Examples_stratified_ds.2022A.pkl" standalone_parameters["torch_default_dtype"] = "torch.float32" standalone_parameters["batch_size"]=128 standalone_parameters["n_epoch"] = 3 standalone_parameters["patience"] = 10 standalone_parameters["criteria_for_best"] = "target_accuracy" standalone_parameters["normalize_source"] = False standalone_parameters["normalize_target"] = False standalone_parameters["x_net"] = [ {"class": "nnReshape", "kargs": {"shape":[-1, 1, 2, 256]}}, {"class": "Conv2d", "kargs": { "in_channels":1, "out_channels":256, "kernel_size":(1,7), "bias":False, "padding":(0,3), },}, {"class": "ReLU", "kargs": {"inplace": True}}, {"class": "BatchNorm2d", "kargs": {"num_features":256}}, {"class": "Conv2d", "kargs": { "in_channels":256, "out_channels":80, "kernel_size":(2,7), "bias":True, "padding":(0,3), },}, {"class": "ReLU", "kargs": {"inplace": True}}, {"class": "BatchNorm2d", "kargs": {"num_features":80}}, {"class": "Flatten", "kargs": {}}, {"class": "Linear", "kargs": {"in_features": 80*256, "out_features": 256}}, # 80 units per IQ pair {"class": "ReLU", "kargs": {"inplace": True}}, {"class": "BatchNorm1d", "kargs": {"num_features":256}}, {"class": "Linear", "kargs": {"in_features": 256, "out_features": len(standalone_parameters["labels"])}}, ] standalone_parameters["NUM_LOGS_PER_EPOCH"] = 10 standalone_parameters["BEST_MODEL_PATH"] = "./best_model.pth" # + papermill={"duration": 0.031245, "end_time": "2022-03-27T05:32:30.708979", "exception": false, "start_time": "2022-03-27T05:32:30.677734", "status": "completed"} tags=["injected-parameters"] # Parameters parameters = { "experiment_name": "cnn_2:oracle.run2", "labels": [ "3123D52", "3123D65", "3123D79", "3123D80", "3123D54", "3123D70", "3123D7B", "3123D89", "3123D58", "3123D76", "3123D7D", "3123EFE", "3123D64", "3123D78", "3123D7E", "3124E4A", ], "domains_source": [8, 32, 50, 14, 20, 26, 38, 44], "domains_target": [8, 32, 50, 14, 20, 26, 38, 44], "pickle_name_source": "oracle.Run2_10kExamples_stratified_ds.2022A.pkl", "pickle_name_target": "oracle.Run2_10kExamples_stratified_ds.2022A.pkl", "device": "cuda", "lr": 0.0001, "batch_size": 128, "normalize_source": False, "normalize_target": False, "num_examples_per_domain_per_label_source": -1, "num_examples_per_domain_per_label_target": -1, "torch_default_dtype": "torch.float32", "n_epoch": 50, "patience": 3, "criteria_for_best": "target_accuracy", "x_net": [ {"class": "nnReshape", "kargs": {"shape": [-1, 1, 2, 256]}}, { "class": "Conv2d", "kargs": { "in_channels": 1, "out_channels": 256, "kernel_size": [1, 7], "bias": False, "padding": [0, 3], }, }, {"class": "ReLU", "kargs": {"inplace": True}}, {"class": "BatchNorm2d", "kargs": {"num_features": 256}}, { "class": "Conv2d", "kargs": { "in_channels": 256, "out_channels": 80, "kernel_size": [2, 7], "bias": True, "padding": [0, 3], }, }, {"class": "ReLU", "kargs": {"inplace": True}}, {"class": "BatchNorm2d", "kargs": {"num_features": 80}}, {"class": "Flatten", "kargs": {}}, {"class": "Linear", "kargs": {"in_features": 20480, "out_features": 256}}, {"class": "ReLU", "kargs": {"inplace": True}}, {"class": "BatchNorm1d", "kargs": {"num_features": 256}}, {"class": "Linear", "kargs": {"in_features": 256, "out_features": 16}}, ], "NUM_LOGS_PER_EPOCH": 10, "BEST_MODEL_PATH": "./best_model.pth", "dataset_seed": 500, "seed": 500, } # + papermill={"duration": 0.023881, "end_time": "2022-03-27T05:32:30.753693", "exception": false, "start_time": "2022-03-27T05:32:30.729812", "status": "completed"} tags=[] # Set this to True if you want to run this template directly STANDALONE = False if STANDALONE: print("parameters not injected, running with standalone_parameters") parameters = standalone_parameters if not 'parameters' in locals() and not 'parameters' in globals(): raise Exception("Parameter injection failed") #Use an easy dict for all the parameters p = EasyDict(parameters) supplied_keys = set(p.keys()) if supplied_keys != required_parameters: print("Parameters are incorrect") if len(supplied_keys - required_parameters)>0: print("Shouldn't have:", str(supplied_keys - required_parameters)) if len(required_parameters - supplied_keys)>0: print("Need to have:", str(required_parameters - supplied_keys)) raise RuntimeError("Parameters are incorrect") # + papermill={"duration": 0.021742, "end_time": "2022-03-27T05:32:30.794910", "exception": false, "start_time": "2022-03-27T05:32:30.773168", "status": "completed"} tags=[] ################################### # Set the RNGs and make it all deterministic ################################### np.random.seed(p.seed) random.seed(p.seed) torch.manual_seed(p.seed) torch.use_deterministic_algorithms(True) # + papermill={"duration": 0.019946, "end_time": "2022-03-27T05:32:30.832820", "exception": false, "start_time": "2022-03-27T05:32:30.812874", "status": "completed"} tags=[] torch.set_default_dtype(eval(p.torch_default_dtype)) # + papermill={"duration": 0.057248, "end_time": "2022-03-27T05:32:30.906513", "exception": false, "start_time": "2022-03-27T05:32:30.849265", "status": "completed"} tags=[] ################################### # Build the network(s) # Note: It's critical to do this AFTER setting the RNG ################################### x_net = build_sequential(p.x_net) # + papermill={"duration": 0.020104, "end_time": "2022-03-27T05:32:30.946131", "exception": false, "start_time": "2022-03-27T05:32:30.926027", "status": "completed"} tags=[] start_time_secs = time.time() # + papermill={"duration": 36.745343, "end_time": "2022-03-27T05:33:07.708065", "exception": false, "start_time": "2022-03-27T05:32:30.962722", "status": "completed"} tags=[] def wrap_in_dataloader(p, ds): return torch.utils.data.DataLoader( ds, batch_size=p.batch_size, shuffle=True, num_workers=1, persistent_workers=True, prefetch_factor=50, pin_memory=True ) taf_source = Traditional_Accessor_Factory( labels=p.labels, domains=p.domains_source, num_examples_per_domain_per_label=p.num_examples_per_domain_per_label_source, pickle_path=os.path.join(get_datasets_base_path(), p.pickle_name_source), seed=p.dataset_seed ) train_original_source, val_original_source, test_original_source = \ taf_source.get_train(), taf_source.get_val(), taf_source.get_test() taf_target = Traditional_Accessor_Factory( labels=p.labels, domains=p.domains_target, num_examples_per_domain_per_label=p.num_examples_per_domain_per_label_source, pickle_path=os.path.join(get_datasets_base_path(), p.pickle_name_target), seed=p.dataset_seed ) train_original_target, val_original_target, test_original_target = \ taf_target.get_train(), taf_target.get_val(), taf_target.get_test() # For CNN We only use X and Y. And we only train on the source. # Properly form the data using a transform lambda and Lazy_Map. Finally wrap them in a dataloader transform_lambda = lambda ex: ex[:2] # Strip the tuple to just (x,y) train_processed_source = wrap_in_dataloader( p, Lazy_Map(train_original_source, transform_lambda) ) val_processed_source = wrap_in_dataloader( p, Lazy_Map(val_original_source, transform_lambda) ) test_processed_source = wrap_in_dataloader( p, Lazy_Map(test_original_source, transform_lambda) ) train_processed_target = wrap_in_dataloader( p, Lazy_Map(train_original_target, transform_lambda) ) val_processed_target = wrap_in_dataloader( p, Lazy_Map(val_original_target, transform_lambda) ) test_processed_target = wrap_in_dataloader( p, Lazy_Map(test_original_target, transform_lambda) ) datasets = EasyDict({ "source": { "original": {"train":train_original_source, "val":val_original_source, "test":test_original_source}, "processed": {"train":train_processed_source, "val":val_processed_source, "test":test_processed_source} }, "target": { "original": {"train":train_original_target, "val":val_original_target, "test":test_original_target}, "processed": {"train":train_processed_target, "val":val_processed_target, "test":test_processed_target} }, }) # + papermill={"duration": 5.307648, "end_time": "2022-03-27T05:33:13.037871", "exception": false, "start_time": "2022-03-27T05:33:07.730223", "status": "completed"} tags=[] ep = next(iter(test_processed_target)) ep[0].dtype # + papermill={"duration": 0.070255, "end_time": "2022-03-27T05:33:13.126854", "exception": false, "start_time": "2022-03-27T05:33:13.056599", "status": "completed"} tags=[] model = Configurable_Vanilla( x_net=x_net, label_loss_object=torch.nn.NLLLoss(), learning_rate=p.lr ) # + papermill={"duration": 1340.379965, "end_time": "2022-03-27T05:55:33.527211", "exception": false, "start_time": "2022-03-27T05:33:13.147246", "status": "completed"} tags=[] jig = Vanilla_Train_Eval_Test_Jig( model=model, path_to_best_model=p.BEST_MODEL_PATH, device=p.device, label_loss_object=torch.nn.NLLLoss(), ) jig.train( train_iterable=datasets.source.processed.train, source_val_iterable=datasets.source.processed.val, target_val_iterable=datasets.target.processed.val, patience=p.patience, num_epochs=p.n_epoch, num_logs_per_epoch=p.NUM_LOGS_PER_EPOCH, criteria_for_best=p.criteria_for_best ) # + papermill={"duration": 0.066248, "end_time": "2022-03-27T05:55:33.652283", "exception": false, "start_time": "2022-03-27T05:55:33.586035", "status": "completed"} tags=[] total_experiment_time_secs = time.time() - start_time_secs # + papermill={"duration": 114.979955, "end_time": "2022-03-27T05:57:28.694097", "exception": false, "start_time": "2022-03-27T05:55:33.714142", "status": "completed"} tags=[] source_test_label_accuracy, source_test_label_loss = jig.test(datasets.source.processed.test) target_test_label_accuracy, target_test_label_loss = jig.test(datasets.target.processed.test) source_val_label_accuracy, source_val_label_loss = jig.test(datasets.source.processed.val) target_val_label_accuracy, target_val_label_loss = jig.test(datasets.target.processed.val) history = jig.get_history() total_epochs_trained = len(history["epoch_indices"]) val_dl = wrap_in_dataloader(p, Sequence_Aggregator((datasets.source.original.val, datasets.target.original.val))) confusion = confusion_by_domain_over_dataloader(model, p.device, val_dl, forward_uses_domain=False) per_domain_accuracy = per_domain_accuracy_from_confusion(confusion) # Add a key to per_domain_accuracy for if it was a source domain for domain, accuracy in per_domain_accuracy.items(): per_domain_accuracy[domain] = { "accuracy": accuracy, "source?": domain in p.domains_source } # Do an independent accuracy assesment JUST TO BE SURE! # _source_test_label_accuracy = independent_accuracy_assesment(model, datasets.source.processed.test, p.device) # _target_test_label_accuracy = independent_accuracy_assesment(model, datasets.target.processed.test, p.device) # _source_val_label_accuracy = independent_accuracy_assesment(model, datasets.source.processed.val, p.device) # _target_val_label_accuracy = independent_accuracy_assesment(model, datasets.target.processed.val, p.device) # assert(_source_test_label_accuracy == source_test_label_accuracy) # assert(_target_test_label_accuracy == target_test_label_accuracy) # assert(_source_val_label_accuracy == source_val_label_accuracy) # assert(_target_val_label_accuracy == target_val_label_accuracy) ################################### # Write out the results ################################### experiment = { "experiment_name": p.experiment_name, "parameters": p, "results": { "source_test_label_accuracy": source_test_label_accuracy, "source_test_label_loss": source_test_label_loss, "target_test_label_accuracy": target_test_label_accuracy, "target_test_label_loss": target_test_label_loss, "source_val_label_accuracy": source_val_label_accuracy, "source_val_label_loss": source_val_label_loss, "target_val_label_accuracy": target_val_label_accuracy, "target_val_label_loss": target_val_label_loss, "total_epochs_trained": total_epochs_trained, "total_experiment_time_secs": total_experiment_time_secs, "confusion": confusion, "per_domain_accuracy": per_domain_accuracy, }, "history": history, "dataset_metrics": get_dataset_metrics(datasets, "cnn"), } # + papermill={"duration": 0.63181, "end_time": "2022-03-27T05:57:29.453972", "exception": false, "start_time": "2022-03-27T05:57:28.822162", "status": "completed"} tags=[] get_loss_curve(experiment) # + papermill={"duration": 0.429064, "end_time": "2022-03-27T05:57:29.947289", "exception": false, "start_time": "2022-03-27T05:57:29.518225", "status": "completed"} tags=[] get_results_table(experiment) # + papermill={"duration": 0.360813, "end_time": "2022-03-27T05:57:30.376155", "exception": false, "start_time": "2022-03-27T05:57:30.015342", "status": "completed"} tags=[] get_domain_accuracies(experiment) # + papermill={"duration": 0.073915, "end_time": "2022-03-27T05:57:30.519928", "exception": false, "start_time": "2022-03-27T05:57:30.446013", "status": "completed"} tags=[] print("Source Test Label Accuracy:", experiment["results"]["source_test_label_accuracy"], "Target Test Label Accuracy:", experiment["results"]["target_test_label_accuracy"]) print("Source Val Label Accuracy:", experiment["results"]["source_val_label_accuracy"], "Target Val Label Accuracy:", experiment["results"]["target_val_label_accuracy"]) # + papermill={"duration": 0.071997, "end_time": "2022-03-27T05:57:30.659363", "exception": false, "start_time": "2022-03-27T05:57:30.587366", "status": "completed"} tags=["experiment_json"] json.dumps(experiment)
experiments/cnn_2/oracle.run2/trials/4/trial.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Trajectory equations: # %matplotlib inline import matplotlib.pyplot as plt from sympy import * init_printing() Bx, By, Bz, B = symbols("B_x, B_y, B_z, B") x, y, z = symbols("x, y, z" ) x_0, y_0, z_0 = symbols("x_0, y_0, z_0") vx, vy, vz, v = symbols("v_x, v_y, v_z, v") vx_0, vy_0, vz_0 = symbols("v_x0, v_y0, v_z0") t = symbols("t") q, m = symbols("q, m") c, eps0 = symbols("c, epsilon_0") # The equation of motion: # $$ # \begin{gather*} # m \frac{d^2 \vec{r} }{dt^2} = \frac{q}{c} [ \vec{v} \vec{B} ] # \end{gather*} # $$ # For the case of a uniform magnetic field along the $z$-axis: # $$ \vec{B} = B_z = B, \quad B_x = 0, \quad B_y = 0 $$ # In Cartesian coordinates: eq_x = Eq( Derivative(x(t), t, 2), q / c / m * Bz * Derivative(y(t),t) ) eq_y = Eq( Derivative(y(t), t, 2), - q / c / m * Bz * Derivative(x(t),t) ) eq_z = Eq( Derivative(z(t), t, 2), 0 ) display( eq_x, eq_y, eq_z ) # Motion is uniform along the $z$-axis: z_eq = dsolve( eq_z, z(t) ) vz_eq = Eq( z_eq.lhs.diff(t), z_eq.rhs.diff(t) ) display( z_eq, vz_eq ) # The constants of integration can be found from the initial conditions $z(0) = z_0$ and $v_z(0) = v_{z0}$: # + c1_c2_system = [] initial_cond_subs = [(t, 0), (z(0), z_0), (diff(z(t),t).subs(t,0), vz_0) ] c1_c2_system.append( z_eq.subs( initial_cond_subs ) ) c1_c2_system.append( vz_eq.subs( initial_cond_subs ) ) c1, c2 = symbols("C1, C2") c1_c2 = solve( c1_c2_system, [c1, c2] ) c1_c2 # - # So that z_sol = z_eq.subs( c1_c2 ) vz_sol = vz_eq.subs( c1_c2 ).subs( [( diff(z(t),t), vz(t) ) ] ) display( z_sol, vz_sol ) # For some reason I have not been able to solve the system of differential equations for $x$ and $y$ directly # with Sympy's `dsolve` function: # + #dsolve( [eq_x, eq_y], [x(t),y(t)] ) # - # It is necessary to resort to the manual solution. The method is to differentiate one of them over # time and substitute the other. This will result in oscillator-type second-order equations for $v_y$ and $v_x$. Their solution is known. Integrating one more time, it is possible to obtain laws of motion $x(t)$ and $y(t)$. # + v_subs = [ (Derivative(x(t),t), vx(t)), (Derivative(y(t),t), vy(t)) ] eq_vx = eq_x.subs( v_subs ) eq_vy = eq_y.subs( v_subs ) display( eq_vx, eq_vy ) eq_d2t_vx = Eq( diff(eq_vx.lhs,t), diff(eq_vx.rhs,t)) eq_d2t_vx = eq_d2t_vx.subs( [(eq_vy.lhs, eq_vy.rhs)] ) display( eq_d2t_vx ) # - # The solution of the last equation is C1, C2, Omega = symbols( "C1, C2, Omega" ) vx_eq = Eq( vx(t), C1 * cos( Omega * t ) + C2 * sin( Omega * t )) display( vx_eq ) omega_eq = Eq( Omega, Bz * q / c / m ) display( omega_eq ) # where $\Omega$ is a cyclotron frequency. # + display( vx_eq ) vy_eq = Eq( vy(t), solve( Eq( diff(vx_eq.rhs,t), eq_vx.rhs ), ( vy(t) ) )[0] ) vy_eq = vy_eq.subs( [(Omega*c*m / Bz / q, omega_eq.rhs * c * m / Bz / q)]).simplify() display( vy_eq ) # - # For initial conditions $v_x(0) = v_{x0}, v_y(0) = v_{y0}$: # + initial_cond_subs = [(t,0), (vx(0), vx_0), (vy(0), vy_0) ] vx0_eq = vx_eq.subs( initial_cond_subs ) vy0_eq = vy_eq.subs( initial_cond_subs ) display( vx0_eq, vy0_eq ) c1_c2 = solve( [vx0_eq, vy0_eq] ) c1_c2_subs = [ ("C1", c1_c2[c1]), ("C2", c1_c2[c2]) ] vx_eq = vx_eq.subs( c1_c2_subs ) vy_eq = vy_eq.subs( c1_c2_subs ) display( vx_eq, vy_eq ) # - # These equations can be integrated to obtain the laws of motion: x_eq = vx_eq.subs( vx(t), diff(x(t),t)) x_eq = dsolve( x_eq ) y_eq = vy_eq.subs( vy(t), diff(y(t),t)) y_eq = dsolve( y_eq ).subs( C1, C2 ) display( x_eq, y_eq ) # For nonzero $\Omega$: x_eq = x_eq.subs( [(Omega, 123)] ).subs( [(123, Omega)] ).subs( [(Rational(1,123), 1/Omega)] ) y_eq = y_eq.subs( [(Omega, 123)] ).subs( [(123, Omega)] ).subs( [(Rational(1,123), 1/Omega)] ) display( x_eq, y_eq ) # For initial conditions $x(0) = x_0, y(0) = y_0$: # + initial_cond_subs = [(t,0), (x(0), x_0), (y(0), y_0) ] x0_eq = x_eq.subs( initial_cond_subs ) y0_eq = y_eq.subs( initial_cond_subs ) display( x0_eq, y0_eq ) c1_c2 = solve( [x0_eq, y0_eq] ) c1_c2_subs = [ ("C1", c1_c2[0][c1]), ("C2", c1_c2[0][c2]) ] x_eq = x_eq.subs( c1_c2_subs ) y_eq = y_eq.subs( c1_c2_subs ) display( x_eq, y_eq ) # - x_eq = x_eq.simplify() y_eq = y_eq.simplify() x_eq = x_eq.expand().collect(Omega) y_eq = y_eq.expand().collect(Omega) display( x_eq, y_eq ) # Finally display( x_eq, y_eq, z_sol ) display( vx_eq, vy_eq, vz_sol ) display( omega_eq )
examples/single_particle_in_magnetic_field/Single Particle in Uniform Magnetic Field.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.8.5 64-bit (''base'': conda)' # name: python385jvsc74a57bd0aec5ca42d32fca8a47903ca8af468e4bc5918afec374554979b05205766de186 # --- # <style type="text/css"> # .align-center {text-align: center; margin: auto;} # h1 { # color: #D41212; # font-family: Impact; # font-weight: bold; # font-size: 280%; # text-align: center; # text-transform: uppercase; # } # h2 { # color: #11A6D9; # font-family: Tahoma; # font-size: 230%; # } # h3 { # color: #14D65F; # font-family: Verdana; # font-size: 160%; # margin-left: 1.5em; # } # h4 { # color: #FBC623; # font-family: Verdana; # font-size: 160%; # margin-left: 3em; # } # </style> # <body style = "text-align:justify"> # <div class="align-center"> # <img src="https://minas.medellin.unal.edu.co/images/Escudo_color.png" title="UNAL" alt="UNAL" width="400"> # </div> # # #### Autor # # + [<NAME>](https://jeison-alarcon.netlify.app/) # # ___ # ___ # + [markdown] id="XDCc5ufwjQC0" # # **HOUSING VALUES IN SUBURBS OF BOSTON** # + [markdown] id="GlbgkIWz0KCk" # <center> # <img src = "https://journal.firsttuesday.us/wp-content/uploads/CA-Sales-Home-Volume.png" # alt = "Abc" # height = "320" # title = "California Housing Data"> # </center> # # El objetivo de este ejercicio es crear un modelo de precios de vivienda en California utilizando los datos del censo. Estos datos tienen métricas como la población, el ingreso medio, el precio medio de la vivienda, etc. para cada grupo de bloques en California. Los grupos de bloques son la unidad geográfica más pequeña para la cual la Oficina del Censo de EE. UU. publica datos de muestra (*un grupo de bloques generalmente tiene una población de $600$ a $3000$ personas*). Simplemente los llamaremos "distritos" para abreviar. # # Lo anterior es claramente una tarea típica de aprendizaje supervisado, ya que se le brindan ejemplos de capacitación etiquetados (*cada instancia viene con el resultado esperado, es decir, el precio medio de la vivienda del distrito*). Además, también es una tarea de regresión típica, ya que se le pide que prediga un valor. Más específicamente, este es un problema de regresión múltiple ya que el sistema usará múltiples características para hacer una predicción (*usará la población del distrito, el ingreso medio, etc.*). # + [markdown] id="GM<KEY>" # ## *Paquetes y Librerías* # + id="XLAPidYpftjG" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1619667387445, "user_tz": 300, "elapsed": 2653, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "06942422129677613067"}} outputId="5976a54d-cab3-401a-dab3-ba4629266785" import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt import tensorflow as tf print("Versión de Tensorflow = ", tf.__version__) import tensorflow.keras as keras from keras.models import Sequential from keras.layers import Dense, Activation, Flatten, Dropout from tensorflow.keras.utils import plot_model, to_categorical # + [markdown] id="fXOqxFHMzRj7" # ## *Lectura de los Datos* # + id="nNnUcDhfzUk8" colab={"base_uri": "https://localhost:8080/", "height": 221} executionInfo={"status": "ok", "timestamp": 1619667387447, "user_tz": 300, "elapsed": 2652, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "06942422129677613067"}} outputId="dd27efd9-d0b9-4fed-8ad0-2fcbf778cad6" from google.colab import drive drive.mount('/content/drive') Data = pd.read_csv("/content/drive/MyDrive/Colab Notebooks/DATA/HousingData.csv") Data.head() # + [markdown] id="emeOcdy60Ov9" # ## *Procesamiento de los Datos* # + id="AGjKtERS0RSm" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1619667387447, "user_tz": 300, "elapsed": 2649, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "06942422129677613067"}} outputId="ff81ebf6-2d4b-4577-9a4f-d0848c6520a6" # Detección y tratamiento de valores faltantes print(Data.isna().sum()) Data = Data.dropna() # + [markdown] id="w9fdPPzV2TT8" # ## *División del Dataset* # + id="RMiBrlRf2ZJz" executionInfo={"status": "ok", "timestamp": 1619667387448, "user_tz": 300, "elapsed": 2649, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "06942422129677613067"}} X_train = Data.sample(frac = 0.8, random_state = 2021) X_test = Data.drop(X_train.index) # + [markdown] id="pfucKCk4o-pL" # ## *Análisis Descriptivo* # + colab={"base_uri": "https://localhost:8080/", "height": 483} id="g8hBEOaFpIVA" executionInfo={"status": "ok", "timestamp": 1619667387449, "user_tz": 300, "elapsed": 2648, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "06942422129677613067"}} outputId="3fcc5ed5-c0d8-46d5-a775-eae3e17a4974" X_train.describe().transpose() # + colab={"base_uri": "https://localhost:8080/", "height": 920} id="pWNRczxgjCc0" executionInfo={"status": "ok", "timestamp": 1619667398560, "user_tz": 300, "elapsed": 13757, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "06942422129677613067"}} outputId="333cdb3d-d6aa-473e-df78-c69e9ceb507d" Columns = ["RM", "AGE", "DIS", "LSTAT", "MEDV"] sns.pairplot(Data[Columns], plot_kws = dict(marker = ".", linewidth = 0.5)) # + colab={"base_uri": "https://localhost:8080/", "height": 394} id="sq7hOBCqjHnu" executionInfo={"status": "ok", "timestamp": 1619667398880, "user_tz": 300, "elapsed": 14075, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "06942422129677613067"}} outputId="e0455420-a9c5-4517-c6e1-88bfb93585d2" corrMatrix = Data[Columns].corr() _, ax = plt.subplots(figsize = (12, 6)) mask = np.triu(np.ones_like(corrMatrix, dtype = np.bool)) mask = mask[1:, :-1] corr = corrMatrix.iloc[1:,:-1].copy() sns.heatmap(corr, mask = mask, annot = True, fmt = ".2f", vmin = -1, vmax = 1, linewidths = 3, cmap = "coolwarm", cbar_kws = {"shrink": .8}, square = True) plt.title("CORRELATION MATRIX", loc = "center", fontsize = 18) plt.show() # + id="gtUmaLGv2vpJ" executionInfo={"status": "ok", "timestamp": 1619667398882, "user_tz": 300, "elapsed": 14076, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "06942422129677613067"}} # Separación del vector objetivo y_train = X_train.pop("MEDV") y_test = X_test.pop("MEDV") # Extracción del promedio de cada variable X_train_stats = X_train.describe().transpose() # + [markdown] id="GQB9DzGn3dOd" # ## *Normalización de los Datos* # + id="O_JJeLR43jG4" executionInfo={"status": "ok", "timestamp": 1619667398882, "user_tz": 300, "elapsed": 14074, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "06942422129677613067"}} def Normalizacion(x): return (x - X_train_stats["mean"]) / X_train_stats["std"] Normed_Train = Normalizacion(X_train) Normed_Test = Normalizacion(X_test) # + [markdown] id="1A9HHqV-4wyw" # ## *Construcción del Modelo* # + [markdown] id="21tBajJlRyoO" # A continuación, utilizaremos un modelo secuencial con dos capas ocultas densamente conectadas y una capa de salida que devuelve un único valor continuo. # + id="qpV6hCZQR2Do" executionInfo={"status": "ok", "timestamp": 1619667398883, "user_tz": 300, "elapsed": 14072, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "06942422129677613067"}} Modelo = Sequential( [ Dense(128, activation = "relu", input_shape = (Normed_Train.shape[1], )), Dropout(0.4), Dense(64, activation = "relu"), Dropout(0.2), Dense(1) ] ) # + id="eVFQtpt-S3Uu" executionInfo={"status": "ok", "timestamp": 1619667398883, "user_tz": 300, "elapsed": 14070, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "06942422129677613067"}} # Compilación Optimizador = tf.keras.optimizers.RMSprop(learning_rate = 0.001) Modelo.compile(loss = "mse", optimizer = Optimizador, metrics = ["mae", "mse"] ) # + colab={"base_uri": "https://localhost:8080/", "height": 950} id="AX6euktmThNP" executionInfo={"status": "ok", "timestamp": 1619667399413, "user_tz": 300, "elapsed": 14598, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "06942422129677613067"}} outputId="67eedada-a8de-4deb-f7c1-3c2b45c7dd0e" # Resumen y plot del modelo Modelo.summary() from tensorflow.keras.utils import plot_model plot_model(Modelo, show_shapes = True, show_dtype = True) # + id="iNBQfSpp47nW" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1619667427733, "user_tz": 300, "elapsed": 42916, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "06942422129677613067"}} outputId="75e6605e-dc11-4a64-8bb1-182a8504ccab" # Mostrar el progreso del entrenamiento imprimiendo un solo punto para cada época completada class PrintDots(keras.callbacks.Callback): def on_epoch_end(self, epoch, logs): if epoch % 100 == 0: print("") print(".", end = "") # Entrenamiento Epocas = 500 History = Modelo.fit(Normed_Train, y_train, epochs = Epocas, validation_split = 0.2, verbose = 0, callbacks = [PrintDots()] ) # + colab={"base_uri": "https://localhost:8080/", "height": 553} id="ahSuwwAFXKHe" executionInfo={"status": "ok", "timestamp": 1619667428282, "user_tz": 300, "elapsed": 43464, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "06942422129677613067"}} outputId="eb68ff8a-6b57-4cfd-d2d0-94c4d396cb13" def History_Plot(historia): hist = pd.DataFrame(historia.history) hist["epoch"] = historia.epoch plt.figure() plt.xlabel("Época"); plt.ylabel("Error Absoluto Medio") plt.plot(hist["epoch"], hist["mae"], label = "Error en el entrenamiento") plt.plot(hist["epoch"], hist["val_mae"], label = "Valor del error") plt.ylim([0,5]) plt.legend() plt.figure() plt.xlabel("Época"); plt.ylabel("Error Cuadrático Medio") plt.plot(hist["epoch"], hist["mse"], label = "Error en el entrenamiento") plt.plot(hist["epoch"], hist["val_mse"], label = "Valor del error") plt.legend() plt.ylim([0,20]) plt.show() History_Plot(History) # + [markdown] id="ft603OzXuEZC" # ## *Predicciones* # # Finalmente, se predice los valores de MEDV utilizando datos en el conjunto de pruebas: # + id="Xe7RXH3N3CWU" colab={"base_uri": "https://localhost:8080/", "height": 279} executionInfo={"status": "ok", "timestamp": 1619667428688, "user_tz": 300, "elapsed": 43868, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "06942422129677613067"}} outputId="0712af4d-eebd-4306-892e-5a8f65a63eda" y_predictions = Modelo.predict(Normed_Test).flatten() plt.scatter(y_test, y_predictions) plt.xlabel("True Values [MEDV]") plt.ylabel("Predictions [MEDV]") plt.axis("equal") plt.axis("square") plt.xlim([0, plt.xlim()[1]]) plt.ylim([0, plt.ylim()[1]]) _ = plt.plot([-100, 100], [-100, 100]) # + [markdown] id="19wyogbOSU5t" # + Parece que nuestro modelo predice razonablemente bien. Echemos un vistazo a la distribución de errores. # + id="f-OHX4DiXd8x" colab={"base_uri": "https://localhost:8080/", "height": 279} executionInfo={"status": "ok", "timestamp": 1619667428874, "user_tz": 300, "elapsed": 44051, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "06942422129677613067"}} outputId="55ca5f52-1bdb-4f1c-b8bb-ccc5e678181d" Errores = y_predictions - y_test plt.hist(Errores, bins = 25) plt.xlabel("Prediction Error [MEDV]") _ = plt.ylabel("Count") # + [markdown] id="m0CB5tBjSU5w" # + No es del todo gaussiano, pero podríamos esperar eso porque el número de muestras es muy pequeño. # + [markdown] id="vgGQuV-yqYZH" # ## *Conclusión* # + [markdown] id="SGxKULw6tIot" # Este cuaderno introdujo algunas técnicas para manejar un problema de regresión. # # + El error cuadrático medio (MSE) es una función de pérdida común utilizada para problemas de regresión (*se utilizan diferentes funciones de pérdida para problemas de clasificación*). # + Del mismo modo, las métricas de evaluación utilizadas para la regresión difieren de la clasificación. Una métrica de regresión común es el error absoluto medio (MAE). # + Cuando las características de datos de entrada numéricos tienen valores con diferentes rangos, cada característica debe escalarse independientemente al mismo rango. # + Si no hay muchos datos de entrenamiento, una técnica es preferir una red pequeña con pocas capas ocultas para evitar el sobreajuste. # + La detención temprana es una técnica útil para evitar el sobreajuste. # + id="jakUHzwVlLQo" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1619667429121, "user_tz": 300, "elapsed": 44296, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "06942422129677613067"}} outputId="c2cdd20f-ea46-4437-e4dc-18c0c52fdef0" Loss, MAE, MSE = Modelo.evaluate(Normed_Test, y_test, verbose = 2) print("Error Absoluto Medio en el Conjunto de Testeo: {:5.2f} MEDV".format(MAE)) # + [markdown] id="Yu1VZ0Y4gFFb" # ___ # # # **BREAST CANCER WISCONSIN (DIAGNOSTIC)** # + [markdown] id="D11qlYZcgFUh" # <center> # <img src = "https://static.vecteezy.com/system/resources/previews/000/335/278/non_2x/breast-cancer-awareness-month-banner-vector.jpg" # alt = "Cáncer de mama." # height = "380" # title = "Cáncer de mama."> # </center> # # > Acerca del conjunto de datos: # # + Los conjuntos de datos de cáncer de mama están disponibles en el repositorio de aprendizaje automático UCI mantenido por la Universidad de California, Irvine. # + El conjunto de datos contiene $569$ muestras de células tumorales malignas y benignas. # + Las dos primeras columnas del conjunto de datos almacenan los números de identificación únicos de las muestras y el diagnóstico correspondiente (*M = maligno, B = benigno*), respectivamente. # + Las columnas $3$ a $32$ contienen $30$ características de valor real que se han calculado a partir de imágenes digitalizadas de los núcleos celulares, que se pueden utilizar para construir un modelo para predecir si un tumor es benigno o maligno. # + 1 = Maligno (*canceroso*) - Presente (M) # + 0 = Benigno (*no canceroso*) -Ausente (B) # # Se calculan diez características de valor real para cada núcleo celular: # + Radio (*media de las distancias desde el centro hasta los puntos del # perímetro*). # + Textura (*desviación estándar de los valores de escala de grises*). # + Perímetro. # + Área. # + Suavidad (*variación local en longitudes de radio*). # + Compacidad (*perímetro^2/área - 1.0*). # + Concavidad (*gravedad de las porciones cóncavas del contorno*). # + Puntos cóncavos (*número de porciones cóncavas del contorno*). # + Simetría. # + Dimensión fractal ("aproximación de la línea costera" - 1). # + [markdown] id="zaF0CLZbRaNi" # ## *Paquetes y Librerías* # + colab={"base_uri": "https://localhost:8080/"} id="BA1fqGPRRaNi" executionInfo={"status": "ok", "timestamp": 1619667429122, "user_tz": 300, "elapsed": 44295, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "06942422129677613067"}} outputId="f119b2df-a0d3-4ce7-9f4f-1c2b20af7537" import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt import tensorflow as tf print("Versión de Tensorflow = ", tf.__version__) import tensorflow.keras as keras from keras.models import Sequential from keras.layers import Dense, Activation, Flatten, Dropout from sklearn.model_selection import train_test_split from tensorflow.keras.utils import plot_model, to_categorical from sklearn.preprocessing import StandardScaler, LabelEncoder from sklearn.metrics import confusion_matrix # + [markdown] id="4IiyRbahRaNk" # ## *Lectura de los Datos* # + colab={"base_uri": "https://localhost:8080/", "height": 241} id="fhiSjfSTUQtV" executionInfo={"status": "ok", "timestamp": 1619667429123, "user_tz": 300, "elapsed": 44293, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "06942422129677613067"}} outputId="6e7ea170-2aa0-4248-ffde-de6c04b916ad" Data = pd.read_csv("/content/drive/MyDrive/Colab Notebooks/DATA/Breast_Cancer_Wisconsin.csv") del Data["Unnamed: 32"] Data.head() # + id="j9r04perZ_P6" executionInfo={"status": "ok", "timestamp": 1619667429326, "user_tz": 300, "elapsed": 44493, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "06942422129677613067"}} # Convirtiendo los objetos a tensores numpy x = Data.iloc[:,2:].values y = Data.iloc[:,1].values # Recodificando la variable objetivo y = LabelEncoder().fit_transform(y) # Dividiendo el conjunto en entrenamiento y prueba X_train, X_test, y_train, y_test = train_test_split(x, y, test_size = 0.1, random_state = 2021) # + [markdown] id="n010HuOtRaNm" # ## *Normalización de los Datos* # + id="NKrW2aYQ8a9P" executionInfo={"status": "ok", "timestamp": 1619667429327, "user_tz": 300, "elapsed": 44492, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "06942422129677613067"}} Scaler = StandardScaler() # Escala training y test X_train = Scaler.fit_transform(X_train) X_test = Scaler.transform(X_test) # + [markdown] id="fZkM-FBNRaNo" # ## *Construcción del Modelo* # + id="ZsEz4MEBNtyB" executionInfo={"status": "ok", "timestamp": 1619667429327, "user_tz": 300, "elapsed": 44491, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "06942422129677613067"}} Modelo = Sequential( [ Dense(units = 16, activation = "relu", input_shape = (30, )), Dropout(0.1), Dense(units = 16, activation = "relu"), Dropout(0.1), Dense(units = 1, activation = "sigmoid") ] ) # + colab={"base_uri": "https://localhost:8080/", "height": 950} id="EspB6OkSRaNo" executionInfo={"status": "ok", "timestamp": 1619667429766, "user_tz": 300, "elapsed": 44928, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "06942422129677613067"}} outputId="f8483c3d-e57f-483a-b3a8-08c218a7d407" # Resumen y plot del modelo Modelo.summary() plot_model(Modelo, show_shapes = True, show_dtype = True) # + id="f4-9O51uRaNp" executionInfo={"status": "ok", "timestamp": 1619667429767, "user_tz": 300, "elapsed": 44927, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "06942422129677613067"}} # Compilación Modelo.compile(loss = "binary_crossentropy", optimizer = "adam", metrics = ["accuracy"] ) # + colab={"base_uri": "https://localhost:8080/"} id="08fJVARgRaNp" executionInfo={"status": "ok", "timestamp": 1619667442411, "user_tz": 300, "elapsed": 57569, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "06942422129677613067"}} outputId="4e98409d-b404-4d93-ba8d-ac63967f7a47" # Mostrar el progreso del entrenamiento imprimiendo un solo punto para cada época completada class PrintDots(keras.callbacks.Callback): def on_epoch_end(self, epoch, logs): if epoch % 100 == 0: print("") print(".", end = "") # Entrenamiento Epocas = 200 History = Modelo.fit(X_train, y_train, epochs = Epocas, batch_size = 32, validation_split = 0.2, verbose = 0, callbacks = [PrintDots()] ) # + colab={"base_uri": "https://localhost:8080/", "height": 577} id="dFDD0C-6RaNp" executionInfo={"status": "ok", "timestamp": 1619667442997, "user_tz": 300, "elapsed": 58152, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "06942422129677613067"}} outputId="c6ebe0e3-421f-4ce8-b097-a57515f73208" def Metrics_Plot(historia, metrica): Train_metrics = historia.history[metrica] Valid_metrics = historia.history["val_" + metrica] Epocas = range(1, len(Train_metrics) + 1) plt.plot(Epocas, Train_metrics, 'bo--', markersize = 1) plt.plot(Epocas, Valid_metrics, 'ro-', markersize = 1) plt.title("Entrenamiento y Validación " + metrica) plt.xlabel("Épocas"); plt.ylabel(metrica) plt.legend(["Train_" + metrica, "val_" + metrica]) plt.show() Metrics_Plot(History, "loss") Metrics_Plot(History, "accuracy") # + [markdown] id="MkDmJEJNO3rA" # ## *Predicción* # + id="SChYpP2U39jO" executionInfo={"status": "ok", "timestamp": 1619667443219, "user_tz": 300, "elapsed": 58371, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "06942422129677613067"}} # Predecir los resultados en el conjunto de prueba y_pred = Modelo.predict(X_test) y_pred[y_pred > 0.5] = 1 y_pred[y_pred <=0.5] = 0 # + [markdown] id="L_O2DOuiO-rb" # ## *Matriz de Confusión* # + id="cAswJ1Mo39jO" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1619667443220, "user_tz": 300, "elapsed": 58371, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "06942422129677613067"}} outputId="6626a5f7-2a0f-4ab9-c396-bae7838b2c24" CM = confusion_matrix(y_test, y_pred) print("Nuestra precisión es {}%".format(((CM[0][0] + CM[1][1])/y_test.shape[0])*100)) # + colab={"base_uri": "https://localhost:8080/", "height": 282} id="6cEX9VkdPaB8" executionInfo={"status": "ok", "timestamp": 1619667443221, "user_tz": 300, "elapsed": 58370, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "06942422129677613067"}} outputId="b19dca4d-6048-4a01-fe70-2be27146b354" sns.heatmap(CM, annot = True)
NOTEBOOKS/Classification_Regression.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="g1Crjf89r-g8" colab_type="code" colab={} import numpy as np import tensorflow as tf import matplotlib.pyplot as plt # %matplotlib inline # + id="FPD9We4Rt-LA" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 530} outputId="eb1ca966-67bd-4ee8-bc69-174464cffa77" executionInfo={"status": "ok", "timestamp": 1558917801763, "user_tz": 240, "elapsed": 3316, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/-SDFi60AR5z8/AAAAAAAAAAI/AAAAAAAAABM/CA8giw-9K4M/s64/photo.jpg", "userId": "17902127106864715936"}} from tensorflow.examples.tutorials.mnist import input_data mnist = input_data.read_data_sets('MNIST_data', one_hot=True) # this is deprecated. # + id="2u5RYXgYudz_" colab_type="code" colab={} image_count = 10 data = mnist.train.next_batch(image_count) # + [markdown] id="n1YKeY5Vv7qc" colab_type="text" # ![](https://www.easy-tensorflow.com/images/NN/linear_classifier.png) # + id="b7SPByAewDd-" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="6c929fb9-7858-43cb-f47f-4dcfc9ef550f" executionInfo={"status": "ok", "timestamp": 1558917957728, "user_tz": 240, "elapsed": 281, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/-SDFi60AR5z8/AAAAAAAAAAI/AAAAAAAAABM/CA8giw-9K4M/s64/photo.jpg", "userId": "17902127106864715936"}} data[0].shape # + id="TG7cGehYu5Zj" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 303} outputId="6da86d9f-c7d0-4096-ff69-aff0ad808cff" executionInfo={"status": "ok", "timestamp": 1558917975599, "user_tz": 240, "elapsed": 670, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/-SDFi60AR5z8/AAAAAAAAAAI/AAAAAAAAABM/CA8giw-9K4M/s64/photo.jpg", "userId": "17902127106864715936"}} images = data[0] labels = data[1] for index, image in enumerate(images): print('Label:', labels[index]) print('Digit in the image', np.argmax(labels[index])) plt.imshow(image.reshape(28,28),cmap='gray') plt.show() break # + [markdown] id="XK5GD0YssTyd" colab_type="text" # # Optimization # + id="AKwdj4s4teQo" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 85} outputId="d1ec74a3-a2f4-4178-afeb-49c43dda4f29" executionInfo={"status": "ok", "timestamp": 1558918126272, "user_tz": 240, "elapsed": 771, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/-SDFi60AR5z8/AAAAAAAAAAI/AAAAAAAAABM/CA8giw-9K4M/s64/photo.jpg", "userId": "17902127106864715936"}} MNIST = input_data.read_data_sets("MNIST_data", one_hot=True) # + id="ViMiDNA7wteA" colab_type="code" colab={} # Define parameters for linear model learning_rate = 0.01 batch_size = 128 n_epochs = 25 # Create placeholders X = tf.placeholder(tf.float32, [batch_size, 784], name="image") Y = tf.placeholder(tf.float32, [batch_size, 10], name="label") # Create weights and bias w = tf.Variable(tf.random_normal(shape=[784, 10], stddev=0.01), name="weights") b = tf.Variable(tf.zeros([1,10]), name='bias') # calculate scores logits = tf.matmul(X, w) + b # Entropy cost function and loss entropy = tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits, labels=Y) loss = tf.reduce_mean(entropy) # Define optimizer optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(loss) # + id="e7jGLNbFwv25" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="fad40ada-e01a-4457-f157-8dd00f9bacbf" executionInfo={"status": "ok", "timestamp": 1558918761273, "user_tz": 240, "elapsed": 365335, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/-SDFi60AR5z8/AAAAAAAAAAI/AAAAAAAAABM/CA8giw-9K4M/s64/photo.jpg", "userId": "17902127106864715936"}} # Run optimization and test loss_history = [] acc_history = [] init = tf.global_variables_initializer() with tf.Session() as sess: sess.run(init) n_batches = int(MNIST.train.num_examples/batch_size) for i in range(n_epochs): for _ in range(n_batches): X_batch, Y_batch = MNIST.train.next_batch(batch_size) _, loss_value = sess.run([optimizer, loss], feed_dict={X: X_batch, Y:Y_batch}) loss_history.append(loss_value) # Check validation accuracy n_v_batches = int(MNIST.validation.num_examples/batch_size) total_correct_preds = 0 for j in range(n_v_batches): X_batch, Y_batch = MNIST.validation.next_batch(batch_size) _, loss_batch, logits_batch = sess.run([optimizer, loss, logits], feed_dict={X: X_batch, Y:Y_batch}) preds = tf.nn.softmax(logits_batch) correct_preds = tf.equal(tf.argmax(preds, 1), tf.argmax(Y_batch, 1)) accuracy = tf.reduce_sum(tf.cast(correct_preds, tf.float32)) total_correct_preds += sess.run(accuracy) validation_accuracy = total_correct_preds/MNIST.validation.num_examples acc_history.append(validation_accuracy) # Test the model n_batches = int(MNIST.test.num_examples/batch_size) total_correct_preds = 0 for i in range(n_batches): X_batch, Y_batch = MNIST.test.next_batch(batch_size) logits_batch = sess.run(logits, feed_dict={X: X_batch, Y:Y_batch}) preds = tf.nn.softmax(logits_batch) correct_preds = tf.equal(tf.argmax(preds, 1), tf.argmax(Y_batch, 1)) accuracy = tf.reduce_sum(tf.cast(correct_preds, tf.float32)) total_correct_preds += sess.run(accuracy) print("Test accuracy is {0}".format(total_correct_preds/MNIST.test.num_examples)) # + id="NBnkBJS7xtRz" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 621} outputId="fe3a24dd-e5ce-4403-e7f9-87058faa6e8b" executionInfo={"status": "ok", "timestamp": 1558918855272, "user_tz": 240, "elapsed": 1060, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/-SDFi60AR5z8/AAAAAAAAAAI/AAAAAAAAABM/CA8giw-9K4M/s64/photo.jpg", "userId": "17902127106864715936"}} plt.subplot(2,1,1) plt.plot(loss_history, '-o', label='Loss value') plt.title('Training Loss') plt.xlabel('Epoch') plt.ylabel('Loss Value') plt.legend(ncol=2, loc='upper right') plt.subplot(2,1,2) plt.plot(acc_history, '-o', label='Accuracy value') plt.title('Training Accuracy') plt.xlabel('Epoch') plt.ylabel('Accuracy') plt.legend(ncol=2, loc='lower right') plt.gcf().set_size_inches(10, 10) plt.show() # + id="5F-CRbTAzfXw" colab_type="code" colab={}
Resources/Colab/DL/tf_mnist_lin_regr.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="Tce3stUlHN0L" # ##### Copyright 2020 The TensorFlow Authors. # + cellView="form" id="tuOe1ymfHZPu" #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # + [markdown] id="qFdPvlXBOdUN" # # Introduction to gradients and automatic differentiation # + [markdown] id="MfBg1C5NB3X0" # <table class="tfo-notebook-buttons" align="left"> # <td> # <a target="_blank" href="https://www.tensorflow.org/guide/autodiff"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a> # </td> # <td> # <a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/guide/autodiff.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a> # </td> # <td> # <a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/en/guide/autodiff.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a> # </td> # <td> # <a href="https://storage.googleapis.com/tensorflow_docs/docs/site/en/guide/autodiff.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a> # </td> # </table> # + [markdown] id="r6P32iYYV27b" # ## Automatic Differentiation and Gradients # # [Automatic differentiation](https://en.wikipedia.org/wiki/Automatic_differentiation) # is useful for implementing machine learning algorithms such as # [backpropagation](https://en.wikipedia.org/wiki/Backpropagation) for training # neural networks. # # In this guide, you will explore ways to compute gradients with TensorFlow, especially in [eager execution](eager.ipynb). # + [markdown] id="MUXex9ctTuDB" # ## Setup # + id="IqR2PQG4ZaZ0" import numpy as np import matplotlib.pyplot as plt import tensorflow as tf # + [markdown] id="xHxb-dlhMIzW" # ## Computing gradients # # To differentiate automatically, TensorFlow needs to remember what operations happen in what order during the *forward* pass. Then, during the *backward pass*, TensorFlow traverses this list of operations in reverse order to compute gradients. # + [markdown] id="1CLWJl0QliB0" # ## Gradient tapes # # TensorFlow provides the `tf.GradientTape` API for automatic differentiation; that is, computing the gradient of a computation with respect to some inputs, usually `tf.Variable`s. # TensorFlow "records" relevant operations executed inside the context of a `tf.GradientTape` onto a "tape". TensorFlow then uses that tape to compute the gradients of a "recorded" computation using [reverse mode differentiation](https://en.wikipedia.org/wiki/Automatic_differentiation). # # Here is a simple example: # + id="Xq9GgTCP7a4A" x = tf.Variable(3.0) with tf.GradientTape() as tape: y = x**2 # + [markdown] id="CR9tFAP_7cra" # Once you've recorded some operations, use `GradientTape.gradient(target, sources)` to calculate the gradient of some target (often a loss) relative to some source (often the model's variables): # + id="LsvrwF6bHroC" # dy = 2x * dx dy_dx = tape.gradient(y, x) dy_dx.numpy() # + [markdown] id="Q2_aqsO25Vx1" # The above example uses scalars, but `tf.GradientTape` works as easily on any tensor: # + id="vacZ3-Ws5VdV" w = tf.Variable(tf.random.normal((3, 2)), name='w') b = tf.Variable(tf.zeros(2, dtype=tf.float32), name='b') x = [[1., 2., 3.]] with tf.GradientTape(persistent=True) as tape: y = x @ w + b loss = tf.reduce_mean(y**2) # + [markdown] id="i4eXOkrQ-9Pb" # To get the gradient of `loss` with respect to both variables, you can pass both as sources to the `gradient` method. The tape is flexible about how sources are passed and will accept any nested combination of lists or dictionaries and return the gradient structured the same way (see `tf.nest`). # + id="luOtK1Da_BR0" [dl_dw, dl_db] = tape.gradient(loss, [w, b]) # + [markdown] id="Ei4iVXi6qgM7" # The gradient with respect to each source has the shape of the source: # + id="aYbWRFPZqk4U" print(w.shape) print(dl_dw.shape) # + [markdown] id="dI_SzxHsvao1" # Here is the gradient calculation again, this time passing a dictionary of variables: # + id="d73cY6NOuaMd" my_vars = { 'w': w, 'b': b } grad = tape.gradient(loss, my_vars) grad['b'] # + [markdown] id="HZ2LvHifEMgO" # ## Gradients with respect to a model # # It's common to collect `tf.Variables` into a `tf.Module` or one of its subclasses (`layers.Layer`, `keras.Model`) for [checkpointing](checkpoint.ipynb) and [exporting](saved_model.ipynb). # # In most cases, you will want to calculate gradients with respect to a model's trainable variables. Since all subclasses of `tf.Module` aggregate their variables in the `Module.trainable_variables` property, you can calculate these gradients in a few lines of code: # + id="JvesHtbQESc-" layer = tf.keras.layers.Dense(2, activation='relu') x = tf.constant([[1., 2., 3.]]) with tf.GradientTape() as tape: # Forward pass y = layer(x) loss = tf.reduce_mean(y**2) # Calculate gradients with respect to every trainable variable grad = tape.gradient(loss, layer.trainable_variables) # + id="PR_ezr6UFrpI" for var, g in zip(layer.trainable_variables, grad): print(f'{var.name}, shape: {g.shape}') # + [markdown] id="f6Gx6LS714zR" # <a id="watches"></a> # # ## Controlling what the tape watches # + [markdown] id="N4VlqKFzzGaC" # The default behavior is to record all operations after accessing a trainable `tf.Variable`. The reasons for this are: # # * The tape needs to know which operations to record in the forward pass to calculate the gradients in the backwards pass. # * The tape holds references to intermediate outputs, so you don't want to record unnecessary operations. # * The most common use case involves calculating the gradient of a loss with respect to all a model's trainable variables. # # For example, the following fails to calculate a gradient because the `tf.Tensor` is not "watched" by default, and the `tf.Variable` is not trainable: # + id="Kj9gPckdB37a" # A trainable variable x0 = tf.Variable(3.0, name='x0') # Not trainable x1 = tf.Variable(3.0, name='x1', trainable=False) # Not a Variable: A variable + tensor returns a tensor. x2 = tf.Variable(2.0, name='x2') + 1.0 # Not a variable x3 = tf.constant(3.0, name='x3') with tf.GradientTape() as tape: y = (x0**2) + (x1**2) + (x2**2) grad = tape.gradient(y, [x0, x1, x2, x3]) for g in grad: print(g) # + [markdown] id="RkcpQnLgNxgi" # You can list the variables being watched by the tape using the `GradientTape.watched_variables` method: # + id="hwNwjW1eAkib" [var.name for var in tape.watched_variables()] # + [markdown] id="NB9I1uFvB4tf" # `tf.GradientTape` provides hooks that give the user control over what is or is not watched. # # To record gradients with respect to a `tf.Tensor`, you need to call `GradientTape.watch(x)`: # + id="tVN1QqFRDHBK" x = tf.constant(3.0) with tf.GradientTape() as tape: tape.watch(x) y = x**2 # dy = 2x * dx dy_dx = tape.gradient(y, x) print(dy_dx.numpy()) # + [markdown] id="qxsiYnf2DN8K" # Conversely, to disable the default behavior of watching all `tf.Variables`, set `watch_accessed_variables=False` when creating the gradient tape. This calculation uses two variables, but only connects the gradient for one of the variables: # + id="7QPzwWvSEwIp" x0 = tf.Variable(0.0) x1 = tf.Variable(10.0) with tf.GradientTape(watch_accessed_variables=False) as tape: tape.watch(x1) y0 = tf.math.sin(x0) y1 = tf.nn.softplus(x1) y = y0 + y1 ys = tf.reduce_sum(y) # + [markdown] id="TRduLbE1H2IJ" # Since `GradientTape.watch` was not called on `x0`, no gradient is computed with respect to it: # + id="e6GM-3evH1Sz" # dys/dx1 = exp(x1) / (1 + exp(x1)) = sigmoid(x1) grad = tape.gradient(ys, {'x0': x0, 'x1': x1}) print('dy/dx0:', grad['x0']) print('dy/dx1:', grad['x1'].numpy()) # + [markdown] id="2g1nKB6P-OnA" # ## Intermediate results # # You can also request gradients of the output with respect to intermediate values computed inside the `tf.GradientTape` context. # + id="7XaPRAwUyYms" x = tf.constant(3.0) with tf.GradientTape() as tape: tape.watch(x) y = x * x z = y * y # Use the tape to compute the gradient of z with respect to the # intermediate value y. # dz_dx = 2 * y, where y = x ** 2 print(tape.gradient(z, y).numpy()) # + [markdown] id="ISkXuY7YzIcS" # By default, the resources held by a `GradientTape` are released as soon as the `GradientTape.gradient` method is called. To compute multiple gradients over the same computation, create a gradient tape with `persistent=True`. This allows multiple calls to the `gradient` method as resources are released when the tape object is garbage collected. For example: # + id="zZaCm3-9zVCi" x = tf.constant([1, 3.0]) with tf.GradientTape(persistent=True) as tape: tape.watch(x) y = x * x z = y * y print(tape.gradient(z, x).numpy()) # 108.0 (4 * x**3 at x = 3) print(tape.gradient(y, x).numpy()) # 6.0 (2 * x) # + id="j8bv_jQFg6CN" del tape # Drop the reference to the tape # + [markdown] id="O_ZY-9BUB7vX" # ## Notes on performance # # * There is a tiny overhead associated with doing operations inside a gradient tape context. For most eager execution this will not be a noticeable cost, but you should still use tape context around the areas only where it is required. # # * Gradient tapes use memory to store intermediate results, including inputs and outputs, for use during the backwards pass. # # For efficiency, some ops (like `ReLU`) don't need to keep their intermediate results and they are pruned during the forward pass. However, if you use `persistent=True` on your tape, *nothing is discarded* and your peak memory usage will be higher. # + [markdown] id="9dLBpZsJebFq" # ## Gradients of non-scalar targets # + [markdown] id="7pldU9F5duP2" # A gradient is fundamentally an operation on a scalar. # + id="qI0sDV_WeXBb" x = tf.Variable(2.0) with tf.GradientTape(persistent=True) as tape: y0 = x**2 y1 = 1 / x print(tape.gradient(y0, x).numpy()) print(tape.gradient(y1, x).numpy()) # + [markdown] id="COEyYp34fxj4" # Thus, if you ask for the gradient of multiple targets, the result for each source is: # # * The gradient of the sum of the targets, or equivalently # * The sum of the gradients of each target. # + id="o4a6_YOcfWKS" x = tf.Variable(2.0) with tf.GradientTape() as tape: y0 = x**2 y1 = 1 / x print(tape.gradient({'y0': y0, 'y1': y1}, x).numpy()) # + [markdown] id="uvP-mkBMgbym" # Similarly, if the target(s) are not scalar the gradient of the sum is calculated: # + id="DArPWqsSh5un" x = tf.Variable(2.) with tf.GradientTape() as tape: y = x * [3., 4.] print(tape.gradient(y, x).numpy()) # + [markdown] id="flDbx68Zh5Lb" # This makes it simple to take the gradient of the sum of a collection of losses, or the gradient of the sum of an element-wise loss calculation. # # If you need a separate gradient for each item, refer to [Jacobians](advanced_autodiff.ipynb#jacobians). # + [markdown] id="iwFswok8RAly" # In some cases you can skip the Jacobian. For an element-wise calculation, the gradient of the sum gives the derivative of each element with respect to its input-element, since each element is independent: # + id="JQvk_jnMmTDS" x = tf.linspace(-10.0, 10.0, 200+1) with tf.GradientTape() as tape: tape.watch(x) y = tf.nn.sigmoid(x) dy_dx = tape.gradient(y, x) # + id="e_f2QgDPmcPE" plt.plot(x, y, label='y') plt.plot(x, dy_dx, label='dy/dx') plt.legend() _ = plt.xlabel('x') # + [markdown] id="6kADybtQzYj4" # ## Control flow # # Because a gradient tape records operations as they are executed, Python control flow is naturally handled (for example, `if` and `while` statements). # # Here a different variable is used on each branch of an `if`. The gradient only connects to the variable that was used: # + id="ciFLizhrrjy7" x = tf.constant(1.0) v0 = tf.Variable(2.0) v1 = tf.Variable(2.0) with tf.GradientTape(persistent=True) as tape: tape.watch(x) if x > 0.0: result = v0 else: result = v1**2 dv0, dv1 = tape.gradient(result, [v0, v1]) print(dv0) print(dv1) # + [markdown] id="HKnLaiapsjeP" # Just remember that the control statements themselves are not differentiable, so they are invisible to gradient-based optimizers. # # Depending on the value of `x` in the above example, the tape either records `result = v0` or `result = v1**2`. The gradient with respect to `x` is always `None`. # + id="8k05WmuAwPm7" dx = tape.gradient(result, x) print(dx) # + [markdown] id="egypBxISAHhx" # ## Getting a gradient of `None` # # When a target is not connected to a source you will get a gradient of `None`. # # + id="CU185WDM81Ut" x = tf.Variable(2.) y = tf.Variable(3.) with tf.GradientTape() as tape: z = y * y print(tape.gradient(z, x)) # + [markdown] id="sZbKpHfBRJym" # Here `z` is obviously not connected to `x`, but there are several less-obvious ways that a gradient can be disconnected. # + [markdown] id="eHDzDOiQ8xmw" # ### 1. Replaced a variable with a tensor # # In the section on ["controlling what the tape watches"](#watches) you saw that the tape will automatically watch a `tf.Variable` but not a `tf.Tensor`. # # One common error is to inadvertently replace a `tf.Variable` with a `tf.Tensor`, instead of using `Variable.assign` to update the `tf.Variable`. Here is an example: # + id="QPKY4Tn9zX7_" x = tf.Variable(2.0) for epoch in range(2): with tf.GradientTape() as tape: y = x+1 print(type(x).__name__, ":", tape.gradient(y, x)) x = x + 1 # This should be `x.assign_add(1)` # + [markdown] id="3gwZKxgA97an" # ### 2. Did calculations outside of TensorFlow # # The tape can't record the gradient path if the calculation exits TensorFlow. # For example: # + id="jmoLCDJb_yw1" x = tf.Variable([[1.0, 2.0], [3.0, 4.0]], dtype=tf.float32) with tf.GradientTape() as tape: x2 = x**2 # This step is calculated with NumPy y = np.mean(x2, axis=0) # Like most ops, reduce_mean will cast the NumPy array to a constant tensor # using `tf.convert_to_tensor`. y = tf.reduce_mean(y, axis=0) print(tape.gradient(y, x)) # + [markdown] id="p3YVfP3R-tp7" # ### 3. Took gradients through an integer or string # # Integers and strings are not differentiable. If a calculation path uses these data types there will be no gradient. # # Nobody expects strings to be differentiable, but it's easy to accidentally create an `int` constant or variable if you don't specify the `dtype`. # + id="9jlHXHqfASU3" x = tf.constant(10) with tf.GradientTape() as g: g.watch(x) y = x * x print(g.gradient(y, x)) # + [markdown] id="RsdP_mTHX9L1" # TensorFlow doesn't automatically cast between types, so, in practice, you'll often get a type error instead of a missing gradient. # + [markdown] id="WyAZ7C8qCEs6" # ### 4. Took gradients through a stateful object # # State stops gradients. When you read from a stateful object, the tape can only observe the current state, not the history that lead to it. # # A `tf.Tensor` is immutable. You can't change a tensor once it's created. It has a _value_, but no _state_. All the operations discussed so far are also stateless: the output of a `tf.matmul` only depends on its inputs. # # A `tf.Variable` has internal state—its value. When you use the variable, the state is read. It's normal to calculate a gradient with respect to a variable, but the variable's state blocks gradient calculations from going farther back. For example: # # + id="C1tLeeRFE479" x0 = tf.Variable(3.0) x1 = tf.Variable(0.0) with tf.GradientTape() as tape: # Update x1 = x1 + x0. x1.assign_add(x0) # The tape starts recording from x1. y = x1**2 # y = (x1 + x0)**2 # This doesn't work. print(tape.gradient(y, x0)) #dy/dx0 = 2*(x1 + x0) # + [markdown] id="xKA92-dqF2r-" # Similarly, `tf.data.Dataset` iterators and `tf.queue`s are stateful, and will stop all gradients on tensors that pass through them. # + [markdown] id="HHvcDGIbOj2I" # ## No gradient registered # + [markdown] id="aoc-A6AxVqry" # Some `tf.Operation`s are **registered as being non-differentiable** and will return `None`. Others have **no gradient registered**. # # The `tf.raw_ops` page shows which low-level ops have gradients registered. # # If you attempt to take a gradient through a float op that has no gradient registered the tape will throw an error instead of silently returning `None`. This way you know something has gone wrong. # # For example, the `tf.image.adjust_contrast` function wraps `raw_ops.AdjustContrastv2`, which could have a gradient but the gradient is not implemented: # # + id="HSb20FXc_V0U" image = tf.Variable([[[0.5, 0.0, 0.0]]]) delta = tf.Variable(0.1) with tf.GradientTape() as tape: new_image = tf.image.adjust_contrast(image, delta) try: print(tape.gradient(new_image, [image, delta])) assert False # This should not happen. except LookupError as e: print(f'{type(e).__name__}: {e}') # + [markdown] id="pDoutjzATiEm" # If you need to differentiate through this op, you'll either need to implement the gradient and register it (using `tf.RegisterGradient`) or re-implement the function using other ops. # + [markdown] id="GCTwc_dQXp2W" # ## Zeros instead of None # + [markdown] id="TYDrVogA89eA" # In some cases it would be convenient to get 0 instead of `None` for unconnected gradients. You can decide what to return when you have unconnected gradients using the `unconnected_gradients` argument: # + id="U6zxk1sf9Ixx" x = tf.Variable([2., 2.]) y = tf.Variable(3.) with tf.GradientTape() as tape: z = y**2 print(tape.gradient(z, x, unconnected_gradients=tf.UnconnectedGradients.ZERO))
site/en/guide/autodiff.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # <img src="Logo.png" width="100" align="left"/> # # # <center> Unit 3 Project </center> # # <center> First section : Data cleaning </center> # # In this notebook you will be cleaning your dataset and making sure it's fully ready for modeling. # # The used dataset is [Hepatitis C dataset](https://www.kaggle.com/fedesoriano/hepatitis-c-dataset) # ## Data preparation # As a first step we need to prepare the data : # # 1. Read the data set as a data frame ( file name is HepatitisCdata.csv) # 2. The here are the column of your dataset: # 1) X (Patient ID/No.) # 2) Category (diagnosis) (values: '0=Blood Donor', '0s=suspect Blood Donor', '1=Hepatitis', '2=Fibrosis', '3=Cirrhosis') ( the target ) # 3) Age (in years) # 4) Sex (f,m) # Attributes 5 to 14 refer to laboratory data: # 5) ALB : Albumin Blood Test # 6) ALP : Alkaline phosphatase # 7) ALT : Alanine Transaminase # 8) AST : Aspartate Transaminase # 9) BIL : Bilirubin # 10) CHE : Acetylcholinesterase # 11) CHOL : Cholesterol # 12) CREA : Creatinine # 13) GGT : Gamma-Glutamyl Transferase # 14) PROT : Proteins # # 3. Remember your model only accepts numbers so make sure you deal properly with the missing values and the data types and justify your solution choices # # 4. Make sure the dataset shape in the end is : rows 615 and 14 columns # # 5. Once finished save the cleaned dataset as "clean_HepatitisC.csv" file # import pandas as pd import numpy as np #To-Do: read the dataset data = pd.read_csv("HepatitisCdata.csv") data.info() #To-Do Start investigating the data types and correcting that # Provide explanation about each step in Markdown cells data.head() # 1. Dealing with the 'Category' column : converting the type from object to int # We notice that the first caracters are different: data.Category.unique() # - Replacing each value with the first caracter : # to not replace both Blood Donor and suspect Blood Donor to the same caracter '0' data['Category'].replace({'0s=suspect Blood Donor': '4'},inplace=True) data['Category'] = data['Category'].astype(str).str[0] # change the type to int data['Category'] = data['Category'].astype("int") # 2. Dealing with the 'Sex' column # Replacing 'm' with 1 and 'f' by 0 data['Sex'].replace({'m': 0},inplace=True) data['Sex'].replace({'f': 1},inplace=True) # - Cheking the changes data.info() data.Category.unique() data.Sex.unique() # > Data types are all numeric Now ! Next we need to deal with missing values for the feature columns # #To-Do list all the columns that contain missing values along with their counts # Which columns have missing values? data.columns[data.isna().any()] # + # To-Do Start providing solutions for each column that has missing data # Treat each case seperately # Hint : no data row should be deleted # Provide evidence that you filled those missing values after each step # - data['ALB'].fillna(data['ALB'].mean(), inplace= True) data['ALB'].isna().sum().sum() data['ALP'].fillna(data['ALP'].mean(), inplace= True) data['ALP'].isna().sum().sum() data['ALT'].fillna(data['ALT'].mean(), inplace= True) data['ALT'].isna().sum().sum() data['CHOL'].fillna(data['CHOL'].mean(), inplace= True) data['CHOL'].isna().sum().sum() data['PROT'].fillna(data['PROT'].mean(), inplace= True) data['PROT'].isna().sum().sum() # Is there any other missing values left ? data.isna().sum().sum() # > We have no more missing data # As an optional thing we can also rename the first column as 'index' or "ID" instead of Unnamed data.columns data.rename(columns={'Unnamed: 0': "ID"}, inplace=True) data.columns.tolist() # ### 6. Save the clean dataset : # Don't forget to drop the index data.to_csv('HepatitisCdata.csv', index=False)
1. Data cleaning.ipynb
// --- // jupyter: // jupytext: // text_representation: // extension: .cs // format_name: light // format_version: '1.5' // jupytext_version: 1.14.4 // kernelspec: // display_name: .NET (C#) // language: C# // name: .net-csharp // --- // + [markdown] dotnet_interactive={"language": "csharp"} // # What's new? // // - implementation of a saddle point problem into *BoSSS* (e.g. the Poisson equation as a system) // - central-difference-form and strong form // - comparison of both formulations // - advanced: algebraic reduction of the poisson problem // // # Prerequisites // // - implementation of numerical fluxes -> chapter *NumFlux* // - spatial operator -> chapter *SpatialOperator* // // // # 1 Problem statement // // Within this exercise, we are going to investigate // the discretization of a Poisson equation as a system. // Obviously, it is possible to discretize the Poisson equation as a system of // first-order-PDE's, introducing a vector field $\vec{\sigma}$: // $$ // \begin{align} // \vec{\sigma} + \nabla u & = 0, & & \text{ in } \Omega // \\ // \operatorname{div}(\vec{\sigma}) & = g_{\Omega}, & & \text{ in } \Omega // \\ // u & = g_D, & & \text{ on } \Gamma_D \\ // - \vec{\sigma} \cdot \vec{n}_{\partial \Omega} & = g_N, & & \text{ on } \Gamma_N // \end{align} // $$ // resp. in matrix-notation: // $$ // \begin{align*} // \begin{bmatrix} // 1 & \nabla \\ // \operatorname{div} & 0 \\ // \end{bmatrix}\cdot // \begin{bmatrix} // \vec{\sigma}\\ // u // \end{bmatrix}= // \begin{bmatrix} // 0 \\ // g_{\Omega} // \end{bmatrix} // \end{align*} // $$ // This exercise, together with the previous one, // will form the foundation for an incompressible Stokes- resp. Navier-Stokes solver. // // # 2 Solution within the BoSSS framework // + dotnet_interactive={"language": "csharp"} #r "BoSSSpad.dll" using System; using System.Collections.Generic; using System.Linq; using ilPSP; using ilPSP.Utils; using BoSSS.Platform; using BoSSS.Foundation; using BoSSS.Foundation.Grid; using BoSSS.Foundation.Grid.Classic; using BoSSS.Foundation.IO; using BoSSS.Solution; using BoSSS.Solution.Control; using BoSSS.Solution.GridImport; using BoSSS.Solution.Statistic; using BoSSS.Solution.Utils; using BoSSS.Solution.Gnuplot; using BoSSS.Application.BoSSSpad; using BoSSS.Application.XNSE_Solver; using static BoSSS.Application.BoSSSpad.BoSSSshell; Init(); // + dotnet_interactive={"language": "csharp"} using ilPSP.LinSolvers; using BoSSS.Solution.Tecplot; using ilPSP.Connectors.Matlab; // + [markdown] dotnet_interactive={"language": "csharp"} // // # 2.1 Tests on the divergence // // ## Common base-class for $\text{div}$-implementations // We are going to implement two different formulations of the // divergence-operator for which going to show equivalence. // We implement a common base-class for both formulations: // + dotnet_interactive={"language": "csharp"} abstract public class BaseDivergence : BoSSS.Foundation.IEdgeForm, // edge integrals BoSSS.Foundation.IVolumeForm // volume integrals { /// We don't use parameters (e.g. variable viscosity, ...) /// at this point: so the parameter list can be null, resp. empty: public IList<string> ParameterOrdering { get { return null; } } /// But we have a vector argument variable, /// $ [ \sigma_1, \sigma_2 ] = \vec{\sigma} $ /// (our trial function): public IList<String> ArgumentOrdering { get { return new string[] { "sigma1", "sigma2" }; } } public TermActivationFlags VolTerms { get { return TermActivationFlags.AllOn; } } public TermActivationFlags InnerEdgeTerms { get { return (TermActivationFlags.AllOn); } } public TermActivationFlags BoundaryEdgeTerms { get { return TermActivationFlags.AllOn; } } /// The following functions cover the actual math. /// For any discretization of the divergence-operator, we have to specify: /// \begin{itemize} /// \item a volume integrand, /// \item an edge integrand for inner edges, i.e. on $ \Gamma_i$, /// \item an edge integrand for boundary edges, /// i.e. on $\partial \Omega$. /// \end{itemize} /// These functions are declared as \code{abstract}, meaning that one has /// to specify them in classes derived from \code{BaseLaplace}. abstract public double VolumeForm(ref CommonParamsVol cpv, double[] U, double[,] GradU, double V, double[] GradV); abstract public double InnerEdgeForm(ref CommonParams inp, double[] U_IN, double[] U_OT, double[,] GradU_IN, double[,] GradU_OT, double V_IN, double V_OT, double[] GradV_IN, double[] GradV_OT); abstract public double BoundaryEdgeForm(ref CommonParamsBnd inp, double[] U_IN, double[,] GradU_IN, double V_IN, double[] GradV_OT); } // + dotnet_interactive={"language": "csharp"} /// We are going to use both, Dirichlet- and Neumann-boundary conditions /// in this exercise; the function \code{IsDirichletBndy} is used to /// specify the type of boundary condition at point \code{X}: static class BndyMap { static public Func<double[],bool> IsDirichletBndy = delegate(double[] X) { double x = X[0]; double y = X[1]; if(Math.Abs(x - (-1.0)) < 1.0e-8) return true; if(Math.Abs(y - (-1.0)) < 1.0e-8) return true; return false; }; } // + [markdown] dotnet_interactive={"language": "csharp"} // ## Formulation (i): Central-difference-form of $\text{div}$ // The implementation of the central-difference form is as follows: // + dotnet_interactive={"language": "csharp"} class Divergence_cendiff : BaseDivergence { /// The volume form is equal to /// $ -\vec{\sigma} \cdot \nabla v$: override public double VolumeForm(ref CommonParamsVol cpv, double[] Sigma, double[,] GradSigma, double V, double[] GradV) { double Acc = 0; for(int d = 0; d < cpv.D; d++) { Acc -= Sigma[d]*GradV[d]; } return Acc; } /// At the cell boundaries, we use a central-difference-flux, /// i.e. $\mean{\vec{\sigma}} \cdot \vec{n}_{\Gamma} \jump{v}$: override public double InnerEdgeForm(ref CommonParams inp, double[] Sigma_IN, double[] Sigma_OT, double[,] GradSigma_IN, double[,] GradSigma_OT, double V_IN, double V_OT, double[] GradV_IN, double[] GradV_OT) { double Acc = 0; for(int d = 0; d < inp.D; d++) { Acc += 0.5*(Sigma_IN[d] + Sigma_OT[d])*inp.Normal[d]*(V_IN - V_OT); } return Acc; } override public double BoundaryEdgeForm(ref CommonParamsBnd inp, double[] Sigma_IN, double[,] GradSigma_IN, double V_IN, double[] GradV_OT) { double Acc = 0; if(BndyMap.IsDirichletBndy(inp.X)) { /// Dirichlet-boundary: by taking the inner value of $\vec{\sigma}$, /// this is a free boundary with respect to $\vec{\sigma}$. for(int d = 0; d < inp.D; d++) { Acc += Sigma_IN[d]*inp.Normal[d]*V_IN; } } else { /// Neumann-boundary double gNeu = 0.0; Acc += gNeu*V_IN; } return Acc; } } // - // ## Formulation (ii): 'Strong' form of $\text{div}$: // Here, we use the form // $$ // b(\vec{\sigma},v) = // \oint_{\Gamma \backslash \Gamma_D} // M(v) J(\vec{\sigma}) \cdot \vec{n}_\Gamma // dA // - // \int_{\Omega} \text{div}(\vec{\sigma}) \cdot v dV // $$ // where M,J denote the mean and jump operator, respectively. This is actually the negative divergence, which will be more useful // later on. // + dotnet_interactive={"language": "csharp"} class Divergence_strong : BaseDivergence { /// We have to implement \code{VolumeForm}, /// \emph{InnerEdgeForm} and \code{BoundaryEdgeForm}: override public double VolumeForm(ref CommonParamsVol cpv, double[] Sigma, double[,] GradSigma, double V, double[] GradV) { double Acc = 0; for(int d = 0; d < cpv.D; d++) { Acc -= GradSigma[d,d]*V; } return Acc; } override public double InnerEdgeForm(ref CommonParams inp, double[] Sigma_IN, double[] Sigma_OT, double[,] GradSigma_IN, double[,] GradSigma_OT, double V_IN, double V_OT, double[] GradV_IN, double[] GradV_OT) { double Acc = 0; for(int d = 0; d < inp.D; d++) { Acc += 0.5*(V_IN + V_OT)*(Sigma_IN[d] - Sigma_OT[d])*inp.Normal[d]; } return Acc; } override public double BoundaryEdgeForm(ref CommonParamsBnd inp, double[] Sigma_IN, double[,] GradSigma_IN, double V_IN, double[] GradV_OT) { double Acc = 0; if(BndyMap.IsDirichletBndy(inp.X)) { Acc = 0; } else { double gNeu = 0.0; for(int d = 0; d < inp.D; d++) { Acc += Sigma_IN[d]*inp.Normal[d]*V_IN; } Acc -= gNeu*V_IN; } return Acc; } } // + [markdown] dotnet_interactive={"language": "csharp"} // // # 3 Equality test // We are going to test the equivalence of both formulationt // on a 2D grid, using a DG basis of degree 1: // + dotnet_interactive={"language": "csharp"} var grd2D = Grid2D.Cartesian2DGrid(GenericBlas.Linspace(-1,1,6), GenericBlas.Linspace(-1,1,7)); var b = new Basis(grd2D, 1); SinglePhaseField sigma1 = new SinglePhaseField(b,"sigma1"); SinglePhaseField sigma2 = new SinglePhaseField(b,"sigma2"); CoordinateVector sigma = new CoordinateVector(sigma1,sigma2); var TrialMapping = sigma.Mapping; var TestMapping = new UnsetteledCoordinateMapping(b); // + dotnet_interactive={"language": "csharp"} /// We create the matrix of the central-difference formulation: var OpDiv_cendiff = (new Divergence_cendiff()).Operator(); var MtxDiv_cendiff = OpDiv_cendiff.ComputeMatrix(TrialMapping, null, TestMapping); // - // We create the matrix of the strong formulation and show that the matrices of both formulations are equal. // // We use the \code{InfNorm(...)}-method to identify whether a matrix is (approximately) zero or not. // + dotnet_interactive={"language": "csharp"} var OpDiv_strong = (new Divergence_strong()).Operator(); var MtxDiv_strong = OpDiv_strong.ComputeMatrix(TrialMapping, null, TestMapping); var TestP = MtxDiv_cendiff + MtxDiv_strong; TestP.InfNorm(); // - // // # 4 The gradient-operator // // For the variational formulation of the gradient operator, a vector-valued // test-function is required. Unfourtunately, this is not supported by // *BoSSS*. Therefore we have to discretize the gradent component-wise, // i.e. as $\partial_{x}$ and $\partial_y$. // // A single derivative // can obviously be expressed as a divergence by the // identity $ \partial_{x_d} = \text{div}( \vec{e}_d u ) $. // + dotnet_interactive={"language": "csharp"} class Gradient_d : BoSSS.Foundation.IEdgeForm, // edge integrals BoSSS.Foundation.IVolumeForm // volume integrals { public Gradient_d(int _d) { this.d = _d; } /// The component index of the gradient: int d; /// As ususal, we do not use parameters: public IList<string> ParameterOrdering { get { return null; } } /// We have one argument $u$: public IList<String> ArgumentOrdering { get { return new string[] { "u" }; } } public TermActivationFlags VolTerms { get { return TermActivationFlags.AllOn; } } public TermActivationFlags InnerEdgeTerms { get { return (TermActivationFlags.AllOn); } } public TermActivationFlags BoundaryEdgeTerms { get { return TermActivationFlags.AllOn; } } /// Now, we implement /// \begin{itemize} /// \item the volume form $u \vec{e}_d \cdot \nabla v$ /// \item the boundary form /// $\mean{u \ \vec{e}_d} \cdot \vec{n}_\Gamma \jump{v}$ /// \end{itemize} public double VolumeForm(ref CommonParamsVol cpv, double[] U, double[,] GradU, double V, double[] GradV) { double Acc = 0; Acc -= U[0]*GradV[this.d]; return Acc; } public double InnerEdgeForm(ref CommonParams inp, double[] U_IN, double[] U_OT, double[,] GradU_IN, double[,] GradU_OT, double V_IN, double V_OT, double[] GradV_IN, double[] GradV_OT) { double Acc = 0; Acc += 0.5*(U_IN[0] + U_OT[0])*inp.Normal[this.d]*(V_IN - V_OT); return Acc; } public double BoundaryEdgeForm(ref CommonParamsBnd inp, double[] U_IN, double[,] GradU_IN, double V_IN, double[] GradV_OT) { double Acc = 0; if(BndyMap.IsDirichletBndy(inp.X)) { double u_Diri = 0.0; Acc += u_Diri*inp.Normal[this.d]*V_IN; } else { Acc += U_IN[0]*inp.Normal[this.d]*V_IN; } return Acc; } } // - // Now, we are ready to assemble the full $\nabla$ operator // as $\left[ \begin{array}{c} \partial_x \\ \partial_y \end{array} \right]$. // + dotnet_interactive={"language": "csharp"} var OpGrad = new SpatialOperator(1,2,QuadOrderFunc.Linear(),"u","c1","c2"); OpGrad.EquationComponents["c1"].Add(new Gradient_d(0)); OpGrad.EquationComponents["c2"].Add(new Gradient_d(1)); OpGrad.Commit(); // - // As an additional test, we create the gradient-matrix and verify that // its transpose // is equal to the negative **MtxDiv**-matrix: // + dotnet_interactive={"language": "csharp"} var MtxGrad = OpGrad.ComputeMatrix(TestMapping, null, TrialMapping); var Test2 = MtxGrad.Transpose() - MtxDiv_strong; Test2.InfNorm(); // + [markdown] dotnet_interactive={"language": "csharp"} // // # 5 The complete Poisson-system // ## Assembly of the system // // We also need the identity-matrix in the top-left corner // of the Poisson-system: // + dotnet_interactive={"language": "csharp"} public class Identity : BoSSS.Foundation.IVolumeForm { public IList<string> ParameterOrdering { get { return new string[0]; } } public string component; public IList<String> ArgumentOrdering { get { return new string[] { component }; } } public TermActivationFlags VolTerms { get { return TermActivationFlags.AllOn; } } public double VolumeForm(ref CommonParamsVol cpv, double[] U, double[,] GradU, double V, double[] GradV) { return U[0]*V; } } // - // // We are going to implement the linear Poisson-operator // $$ // \left[ \begin{array}{ccc} // 1 & 0 & \partial_x \\ // 0 & 1 & \partial_y \\ // -\partial_x & -\partial_y & 0 // \end{array} \right] // \cdot // \left[ \begin{array}{c} \sigma_0 \\ \sigma_1 \\ u \end{array} \right] // = // \left[ \begin{array}{c} c_0 \\ c_1 \\ c_2 \end{array} \right] // $$ // The variables $c_0$, $c_1$ and $c_2$, which correspond to the // test functions are also called co-domain variables of the operator. // We are using the negative divergence, since this will lead to a // symmetric matrix, instead of a anti-symmetric one. // By doing so, we can e.g. use a Cholesky-factorization to determine // whether the system is definite or not. // + dotnet_interactive={"language": "csharp"} var OpPoisson = new SpatialOperator(3, 3, QuadOrderFunc.Linear(), "sigma1", "sigma2", "u", // the domain-variables "c1", "c2", "c3"); // the co-domain variables /// Now we add all required components to \code{OpPoisson}: OpPoisson.EquationComponents["c1"].Add(new Gradient_d(0)); OpPoisson.EquationComponents["c1"].Add(new Identity() { component = "sigma1" }); OpPoisson.EquationComponents["c2"].Add(new Gradient_d(1)); OpPoisson.EquationComponents["c2"].Add(new Identity() { component = "sigma2" }); OpPoisson.EquationComponents["c3"].Add(new Divergence_strong()); OpPoisson.Commit(); // - // We create mappings $[\sigma_1, \sigma_2, u ]$: // three different combinations of DG orders will be investigated: // // - equal order: the same polynomial degree for $u$ and $\vec{\sigma}$ // - mixed order: the degree of $u$ is lower than the degree // of $\vec{\sigma}$. // - `strange' order: the degree of $u$ is higher than the degree of // $\vec{\sigma}$. // // + dotnet_interactive={"language": "csharp"} var b3 = new Basis(grd2D, 3); var b2 = new Basis(grd2D, 2); var b4 = new Basis(grd2D, 4); var EqualOrder = new UnsetteledCoordinateMapping(b3,b3,b3); var MixedOrder = new UnsetteledCoordinateMapping(b4,b4,b3); var StrngOrder = new UnsetteledCoordinateMapping(b2,b2,b3); // + dotnet_interactive={"language": "csharp"} var MtxPoisson_Equal = OpPoisson.ComputeMatrix(EqualOrder, null, EqualOrder); var MtxPoisson_Mixed = OpPoisson.ComputeMatrix(MixedOrder, null, MixedOrder); var MtxPoisson_Strng = OpPoisson.ComputeMatrix(StrngOrder, null, StrngOrder); // + [markdown] dotnet_interactive={"language": "csharp"} // We show that the matrices are symmetric // (use e.g. **SymmetryDeviation(...)**), but indefinite // (use e.g. **IsDefinite(...)**). // + dotnet_interactive={"language": "csharp"} double symDev_Equal = MtxPoisson_Equal.SymmetryDeviation(); symDev_Equal // + dotnet_interactive={"language": "csharp"} double symDev_Mixed = MtxPoisson_Mixed.SymmetryDeviation(); symDev_Mixed // + dotnet_interactive={"language": "csharp"} double symDev_Strng = MtxPoisson_Strng.SymmetryDeviation(); symDev_Strng // + dotnet_interactive={"language": "csharp"} MtxPoisson_Equal.IsDefinite() // + dotnet_interactive={"language": "csharp"} MtxPoisson_Mixed.IsDefinite() // + dotnet_interactive={"language": "csharp"} MtxPoisson_Strng.IsDefinite() // + dotnet_interactive={"language": "csharp"} /// BoSSScmdSilent BoSSSexeSilent NUnit.Framework.Assert.LessOrEqual(symDev_Equal, 1.0e-8); NUnit.Framework.Assert.LessOrEqual(symDev_Mixed, 1.0e-8); NUnit.Framework.Assert.LessOrEqual(symDev_Strng, 1.0e-8); // - // # 6 Advanced topics // + dotnet_interactive={"language": "csharp"} // + [markdown] dotnet_interactive={"language": "csharp"} // ## Algebraic reduction // // Since the top-left corner of our matrix // $$ // \left[ \begin{array}{cc} // 1 & B \\ // B^T & 0 // \end{array} \right] // $$ // is actually very easy to eliminate the variable $\vec{\sigma}$ // from our system algebraically. // The matrix of the reduces system is obviously $B^T \cdot B$. // - // # Extraction of sub-matrices and elimination // From the mapping, we can actually obtain index-lists for each variable, // which can then be used to extract sub-matrices from // **MtxPoisson\_Equal**, **MtxPoisson\_Mixed**, resp. // **MtxPoisson\_Strng**. // + dotnet_interactive={"language": "csharp"} long[] SigmaIdx_Equal = EqualOrder.GetSubvectorIndices(true, 0,1); long[] uIdx_Equal = EqualOrder.GetSubvectorIndices(true, 2); long[] SigmaIdx_Mixed = MixedOrder.GetSubvectorIndices(true, 0,1); long[] uIdx_Mixed = MixedOrder.GetSubvectorIndices(true, 2); long[] SigmaIdx_Strng = StrngOrder.GetSubvectorIndices(true, 0,1); long[] uIdx_Strng = StrngOrder.GetSubvectorIndices(true, 2); // - // The extraction of the sub-matrix and the elimination, for the equal order: // + dotnet_interactive={"language": "csharp"} var MtxPoissonRed_Equal = MtxPoisson_Equal.GetSubMatrix(uIdx_Equal, SigmaIdx_Equal) // -Divergence * MtxPoisson_Equal.GetSubMatrix(SigmaIdx_Equal, uIdx_Equal); // Gradient // + [markdown] dotnet_interactive={"language": "csharp"} // Finally, we also // create the reduced system for the mixed and the strange // order, test for the definiteness of the reduced system. // // Equal and mixed order are positive definite, while the strange order // is indefinite - a clear indication that something ist wrong: // + dotnet_interactive={"language": "csharp"} var MtxPoissonRed_Mixed = MtxPoisson_Mixed.GetSubMatrix(uIdx_Mixed, SigmaIdx_Mixed) // -Divergence * MtxPoisson_Mixed.GetSubMatrix(SigmaIdx_Mixed, uIdx_Mixed); // Gradient var MtxPoissonRed_Strng = MtxPoisson_Strng.GetSubMatrix(uIdx_Strng, SigmaIdx_Strng) // -Divergence * MtxPoisson_Strng.GetSubMatrix(SigmaIdx_Strng, uIdx_Strng); // Gradient // + dotnet_interactive={"language": "csharp"} bool isdef_red_Equal = MtxPoissonRed_Equal.IsDefinite(); isdef_red_Equal // + dotnet_interactive={"language": "csharp"} bool isdef_red_Mixed = MtxPoissonRed_Mixed.IsDefinite(); isdef_red_Mixed // + dotnet_interactive={"language": "csharp"} bool isdef_red_Strng = MtxPoissonRed_Strng.IsDefinite(); isdef_red_Strng // + dotnet_interactive={"language": "csharp"} /// BoSSScmdSilent BoSSSexeSilent NUnit.Framework.Assert.IsTrue(isdef_red_Equal); NUnit.Framework.Assert.IsTrue(isdef_red_Mixed); NUnit.Framework.Assert.IsFalse(isdef_red_Strng); // + [markdown] dotnet_interactive={"language": "csharp"} // We compute the condition number of all three matrices; we observe that // the mixed as well as the equal-order discretization result give rather // moderate condition numbers. // // For the strange orders, the condition number // of the system is far to high: // + dotnet_interactive={"language": "csharp"} double condest_Mixed = MtxPoissonRed_Mixed.condest(); condest_Mixed // + dotnet_interactive={"language": "csharp"} double condest_Equal = MtxPoissonRed_Equal.condest(); condest_Equal // + dotnet_interactive={"language": "csharp"} double condest_Strng = MtxPoissonRed_Strng.condest(); condest_Strng // + dotnet_interactive={"language": "csharp"} /// BoSSScmdSilent BoSSSexeSilent NUnit.Framework.Assert.LessOrEqual(condest_Mixed, 1e5); NUnit.Framework.Assert.LessOrEqual(condest_Equal, 1e5); NUnit.Framework.Assert.Greater(condest_Strng, 1e10); // + dotnet_interactive={"language": "csharp"}
doc/handbook/tutorial10-PoissonSystem/Poisson.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Week 6 # # Phew. Is it week 6 already? # # Last week we had an intro to machine learning and regression and this week we continue with some more ML but focusing on classification instead. There are lots of courses on machine learning at DTU. And across many research areas, people use ML for all kinds of things. So there's a good chance you're already familiar with what's going to happen today. # # In the following, we continue introducing fundamentals of ML, decision trees and start with some prediction tasks on crime data. You might ask, why are we doing this? Well, a couple of reasons: # # 1. It ties nicely with how we started this course: do you remember all we learnt about predictive policing in Week 1? So, today it is our turn to make predictions and see how well we can do with the data we have been exploring. # # 2. Visualization **AND** machine learning is a powerful combination. A combination that is pretty rare. # - Usually it's the case that people are either good at machine learning or data viz, but not both. # - So what we will be able to do in this class is an unusual combo: We can use ML to understand data and then visualize the outputs of the machine-learning. # # The plan for today is as follows: # # 1. In part 1, we go more in depth on fundamentals of machine learning; # 2. In part 2, we get an introduction to Decision Trees; # 3. In part 3, we put everything together to predict criminal recidivism. # ## Part 1: Fundamentals of machine learning # We continue with a couple of lectures from <NAME> about model selection and feature extraction. These connect nicely with what you should have already read in DSFS Chaper 11. If you did not read the chater yet, it is time for you to do it. # # Find it on DTU Learn under 'Course content' $\rightarrow$ 'Content' $\rightarrow$ 'Lecture 6 reading' # # **Model selection** # [![IMAGE ALT TEXT HERE](https://img.youtube.com/vi/MHhlAtw3Ces/0.jpg)](https://www.youtube.com/watch?v=MHhlAtw3Ces) # # **Feature extraction and selection** # [![IMAGE ALT TEXT HERE](https://img.youtube.com/vi/RZmitKn220Q/0.jpg)](https://www.youtube.com/watch?v=RZmitKn220Q) # > *Exercise 1*: A few questions about machine learning to see whether you've read the text and watched the videos. # > # > * What do we mean by a 'feature' in a machine learning model? # # <br> "Features are whatever inputs we provide to our model. + e.g. In the simplest case, features are simply given to you. If you want to predict someone’s salary based on her years of experience, then years of experience is the only feature you have. (maybe squaring, cubing.. your input features might improve your model performance) + feature can be categorical or numerical -> different models need different input (Naive Bayes classifier [yes-no-features], Regression models [numeric features], decision trees [numeric or categorical features] + might be hard to choose the right amount" (reading 1 week 6, chapter "Feature Extraction and Selection") # # > * What is the main problem with overfitting? # # <br> "A common danger in machine learning is overfitting—producing a model that performs well on the data you train it on but that generalizes poorly to any new data. This could involve learning noise in the data. Or it could involve learning to identify # specific inputs rather than whatever factors are actually predictive for the desired output (complex models lead to overfitting -> train (often 2/3-s of the data), test and validation split) -> overfitting goof performance on train data, but bad performance on test data (if so good hint, but maybe model still overfitts for the overall problem as there was a common patter in the train and test data + another issue: choosing the right model -> "split the data into three parts: a training set for building models, a validation set for choosing among trained models, and a test set for judging the final model.)." (reading 1 week 6, chapter "Overfitting, Underfitting") # # <br> the model complexity should reflect the complexity of the data + overfitting = fitting to the noice of the data (+ little data -> we need to recycle the data e.g. cross validation, nested cross validation) # # [Overfitting Error Model Complexitiy](https://github.com/Philipp-Otter/socialdata2022/blob/main/files/overfitting_underfitting_error.png) # # [Data Split](https://github.com/Philipp-Otter/socialdata2022/blob/main/files/data_split.png) # # [Cross Validation](https://github.com/Philipp-Otter/socialdata2022/blob/main/files/cross_validation.png) # # # > * Explain the connection between the bias-variance trade-off and overfitting/underfitting. # # <br> "Both are measures of what would happen if you were to retrain your model many times on different sets of training data (from the same larger population). + For example, the degree 0 model in “Overfitting and Underfitting” on page 142 will make a lot of mistakes for pretty much any training set (drawn from the same population), which means that it has a high bias. However, any two randomly chosen # training sets should give pretty similar models (since any two randomly chosen training sets should have pretty similar average values). So we say that it has a low variance. High bias and low variance typically correspond to underfitting. + On the other hand, the degree 9 model fit the training set perfectly. It has very low bias but very high variance (since any two training sets would likely give rise to very different models). This corresponds to overfitting. + If your model has high bias (which means it performs poorly even on your training data) then one thing to try is adding more features. Going from the degree 0 model in “Overfitting and Underfitting” on page 142 to the degree 1 model was a big improvement. + If your model has high variance, then you can similarly remove features. But another solution is to obtain more data (if you can). + Holding model complexity constant, the more data you have, the harder it is to overfit. + On the other hand, more data won’t help with bias. If your model doesn’t use enough features to capture regularities in the data, throwing more data at it won’t help." (reading 1 week 6, chapter "The Bias-Variance Trade-off") # # > * The `Luke is for leukemia` on page 145 in the reading is a great example of why accuracy is not a good measure in very unbalanced problems. Try to come up with a similar example based on a different type of data (either one you are interested in or one related to the SF crime dataset). # # <br> a kid named Chris becoming good at soccer # ## Part 2: Decision Tree Intro # Now we turn to decision trees. This is a fantastically useful supervised machine-learning method, that we use all the time in research. To get started on the decision trees, we asked you to read DSFS, chapter 17 (if you didn't read it you can find it in DTU Learn). # # And our little session on decision trees wouldn't be complete without hearing from Ole about these things. # # [![IMAGE ALT TEXT HERE](https://img.youtube.com/vi/LAA_CnkAEx8/0.jpg)](https://www.youtube.com/watch?v=LAA_CnkAEx8) # > *Exercise 2:* Just a few questions to make sure you've read the text (DSFS chapter 17) and/or watched the video. # > # > * There are two main kinds of decision trees depending on the type of output (numeric vs. categorical). What are they? # # <br> "Most people divide decision trees into classification trees (which produce categorical outputs) and regression trees (which produce numeric outputs)." (reading 2 week 6, chapter "What Is a Decision Tree?") # # > * Explain in your own words: Why is entropy useful when deciding where to split the data? # # <br> "Ideally, we’d like to choose questions whose answers give a lot of information about what our tree should predict. If there’s a single yes/no question for which “yes” answers always correspond to True outputs and “no” answers to False outputs (or vice versa), this would be an awesome question to pick. Conversely, a yes/no question for which neither answer gives you much new information about what the prediction should be is probably not a good choice. We capture this notion of “how much information” with entropy. You have probably heard this used to mean disorder. We use it to represent the uncertainty associated with data. + Imagine that we have a set S of data, each member of which is labeled as belonging to one of a finite number of classes C1, ..., Cn. If all the data points belong to a single class, then there is no real uncertainty, which means we’d like there to be low entropy. If the data points are evenly spread across the classes, there is a lot of uncertainty and we’d like there to be high entropy." (reading 2 week 6, chapter "Entropy") # <br> "Correspondingly, we’d like some notion of the entropy that results from partitioning a set of data in a certain way. We want a partition to have low entropy if it splits the data into subsets that themselves have low entropy (i.e., are highly certain), and high # entropy if it contains subsets that (are large and) have high entropy (i.e., are highly uncertain)." (reading 2 week 6, chapter "The Entropy of a Partition") # # > * Why are trees prone to overfitting? # # <br> "One problem with this approach is that partitioning by an attribute with many different values will result in a very low entropy due to overfitting. -> e.g. partitioning on social security numbers will produce one-person subsets with zero entropy -> based on social security number, so cannot be generalised beyond the training set" (reading 2 week 6, chapter "The Entropy of a Partition") # # # > * Explain (in your own words) how random forests help prevent overfitting. # # <br> "One way of avoiding overfitting is a technique called random forests, in which we build multiple decision trees and let them vote on how to classify inputs. + Since each tree is built using different data, each tree will be different from every other tree. + A second source of randomness involves changing the way we chose the best_attribute to split on. Rather than looking at all the remaining attributes, we first choose a random subset of them and then split on whichever of those is best." (reading 2 week 6, chapter "Random Forests") # **In the following I added some additional material for you to explore decision trees through some fantastic *visual* introductions.** # # *Decision Trees 1*: The visual introduction to decision trees on this webpage is AMAZING. Take a look to get an intuitive feel for how trees work. Do not miss this one, it's a treat! http://www.r2d3.us/visual-intro-to-machine-learning-part-1/ # # *Decision Trees 2*: the second part of the visual introduction is about the topic of model selection, and bias/variance tradeoffs that we looked into earlier during this lesson. But once again, here those topics are visualized in a fantastic and inspiring way, that will make it stick in your brain better. So check it out http://www.r2d3.us/visual-intro-to-machine-learning-part-2/ # # # # *Decision tree tutorials*: And of course the best way to learn how to get this stuff rolling in practice, is to work through a tutorial or two. We recommend the ones below: # * https://jakevdp.github.io/PythonDataScienceHandbook/05.08-random-forests.html # * https://towardsdatascience.com/random-forest-in-python-24d0893d51c0 (this one also has good considerations regarding the one-hot encodings) # # (But there are many other good ones out there.) # ## Part 3: Predicting criminal recidivism # It is now time to put everything together and use the models we have read about for prediction. Today, we are still going to focus on crimes, but with a different dataset. # # The dataset is related to an algorithm used by judges and parole officers for scoring criminal defendant’s likelihood of reoffending (recidivism). It consists of information about defendants and variables used to measure recidivism. # # I'll provide you with more information about this data and its source next week. But, for now I don't want to give you more spoilers (you'll know why next week 😇), so let's get started. In the next exercises, we will try to **loosely** recreate the algorithm to predict whether a person is going to re-commit a crime in the future. # > *Exercise 3.1:* Getting the data ready. Before getting to predictions, we need to get the data, select the features, and define the target. Follow these steps for success: # > # > * Download the dataset from [GitHub](https://raw.githubusercontent.com/suneman/socialdata2022/main/files/recidivism_dataset_sub.csv) and load it in a `pandas` dataframe. # > * Select the variables of interest. Here, a description of which one and their meaning: # > 1. `age`: age (in years) of the person,; # > 2. `sex`: either "Female" or "Male"; # > 3. `race`: a variable encoding the race of the person; # > 4. `juv_fel_count`: the number of previous juvenile felonies; # > 5. `juv_misd_count`: the number of previous juvenile misdemeanors; # > 6. `juv_other_count`: the number of prior juvenile convictions that are not considered either felonies or misdemeanors; # > 7. `priors_count`: the number of prior crimes committed; # > 8. `is_recid`: if the defendent has recommit a crime; # > 9. `days_b_screening_arrest`: Days between the arrest and screening. # > 9. `c_charge_degree`: Degree of the crime. It is either M (Misdemeanor), F (Felony), or O (not causing jail) # > # > * Finally, we need a target: # > * `two_year_recid` is what we want to predict. Its current values are $\in\left[0,1\right]$, where $0$ means the defendant did not recommit a crime within two years, and $1$ means the defendant recommitted a crime within two years. # + import pandas as pd import numpy as np import matplotlib.pyplot as plt # to see all columns when e.g. calling the head function pd.set_option('display.max_rows', 500) pd.set_option('display.max_columns', 500) pd.set_option('display.width', 1000) # - # import data recidivism = pd.read_csv('https://raw.githubusercontent.com/suneman/socialdata2022/main/files/recidivism_dataset_sub.csv') # Alright, we now have the data, but we still need a bit of **preprocessing** before we can get to the actual prediction. # # At the beginning, I wanted you to embed everything into a unique pipeline. I later found that it sometimes have issues (throw errors, takes long time when cross-validating, etc.). Thus, I have excluded this step from today's class. However, if you want to know more about pipelines, here, a nice optional tutorial for you: # # * https://towardsdatascience.com/step-by-step-tutorial-of-sci-kit-learn-pipeline-62402d5629b6 # > *Exercise 3.2:* Data preprocessing and label encoding. # > # > * To preprocess the data follow these steps: # > * filter out records where the `is_recid` feature is not known (i.e. where it is equal to -1); # > * only keep records that cause jail time; # > * only keep records that have between $-30$ and $30$ days between the arrest and screening. # > * Finally, drop `is_recid`, `c_charge_degree`, `days_b_screening_arrest` for the upcoming analysis. # > * Before we move on, let's explore the data with a few visualizations. Use the variable `two_year_recid` and create a plot with the following subplots: # > * A bar plot with the number of recommitted and non-recommitted crimes, e.g., number of 0s and 1s in `two_year_recid`. Now a couple of questions: What is the fraction of recommitted crimes over the total number of records? Is it balanced? # > * A bar plot with the fraction of recommitted crimes over total number of records per `sex`, e.g., the number of Females that recommitted a crime over the number of all female records. What do you observe? # > * A bar plot with the fraction of recommitted crimes over total number of records per `race` (compute as above). What do you observe? # > * A bar plot with the fraction of recommitted crimes over total number of records per `age` group (group ages as <20, 20-30, 30-40, etc. and compute as above). What do you observe? # > * Some features we are working with are categorical, so we need to deal with them by using encoders. There are many different types, but we will focus on the `OneHotEncoder` and the `LabelEncoder`: # > * Describe what these encoder do and choose one. Which one did you choose? Why? # > * What variables need to be transformed? # # <mark> **Note** The data source that I was using has changed, so the data currently doesn't include `is_recid=-1`and `c_charge_degree='O'`. Please, write the code as if you were filtering those variables anyway, it is a way for you to practice with `pandas`.</mark> # + # 3.2.1 Preprocessing # 0. drop all columns except 'age', 'sex', 'race', 'juv_fel_count', 'juv_misd_count', 'juv_other_count', 'priors_count', 'is_recid', # 'days_b_screening_arrest', 'c_charge_degree', 'two_year_recid' recidivism = recidivism[['age', 'sex', 'race', 'juv_fel_count', 'juv_misd_count', 'juv_other_count', 'priors_count', 'is_recid', 'days_b_screening_arrest', 'c_charge_degree', 'two_year_recid']] # 1. filter out records where the `is_recid` feature is not known (i.e. where it is equal to -1) recidivism = recidivism.drop(recidivism[recidivism.is_recid == -1].index) # 2. only keep records that cause jail time recidivism = recidivism.drop(recidivism[recidivism.c_charge_degree == 'O'].index) # 3. only keep records that have between $-30$ and $30$ days between the arrest and screening recidivism = recidivism.drop(recidivism[(recidivism.days_b_screening_arrest <= -30) | (recidivism.days_b_screening_arrest >= 30) | \ (recidivism.days_b_screening_arrest.isna())].index) # 4. drop `is_recid`, `c_charge_degree`, `days_b_screening_arrest` for the upcoming analysis recidivism = recidivism.drop(columns = ['is_recid', 'c_charge_degree', 'days_b_screening_arrest']) # + # 3.2.2 Some visualizations to explore the data -> Use the variable `two_year_recid` and create a plot with the following subplots fig = plt.figure(figsize=(15, 15)) # 1. subplot: A bar plot with the number of recommitted and non-recommitted crimes, e.g., number of 0s and 1s in `two_year_recid`. recidivism_count = recidivism.groupby(['two_year_recid']).agg({'two_year_recid':'count'}).rename(columns={'two_year_recid':'Count'})\ .reset_index() ax = fig.add_subplot(2, 2, 1) ax.bar(['Non-Recommitted', 'Recommitted'], recidivism_count['Count']) ax.set_ylabel('Counts') ax.set_title('Number of Recommitted and Non-Recommitted Crimes') # 2. subplot: A bar plot with the fraction of recommitted crimes over total number of records per `sex`, # e.g., the number of Females that recommitted a crime over the number of all female records. recidivism_by_sex = recidivism.groupby(['sex', 'two_year_recid']).agg({'two_year_recid':'count'}).\ rename(columns={'two_year_recid':'Fraction'}).reset_index() recidivism_by_sex['Fraction'] = recidivism_by_sex.Fraction / recidivism_by_sex.groupby("sex")["Fraction"].transform("sum") recidivism_by_sex = recidivism_by_sex.drop(recidivism_by_sex[recidivism_by_sex.two_year_recid == 0].index) ax = fig.add_subplot(2, 2, 2) ax.bar(recidivism_by_sex['sex'], recidivism_by_sex['Fraction']) ax.set_ylabel('Fraction') ax.set_title('Fraction of Recommited Crimes by Gender') # # 3. subplot: A bar plot with the fraction of recommitted crimes over total number of records per `race` (compute as above). recidivism_by_race = recidivism.groupby(['race', 'two_year_recid']).agg({'two_year_recid':'count'}).\ rename(columns={'two_year_recid':'Fraction'}).reset_index() recidivism_by_race['Fraction'] = recidivism_by_race.Fraction / recidivism_by_race.groupby("race")["Fraction"].transform("sum") recidivism_by_race = recidivism_by_race.drop(recidivism_by_race[recidivism_by_race.two_year_recid == 0].index) ax = fig.add_subplot(2, 2, 3) ax.bar(recidivism_by_race['race'], recidivism_by_race['Fraction']) ax.set_ylabel('Fraction') ax.set_title('Fraction of Recommited Crimes by Race') # # 4. subplot: A bar plot with the fraction of recommitted crimes over total number of records per `age` group # # (group ages as <20, 20-30, 30-40, etc. and compute as above). bins = [0, 19, 29, 39, 49, 59, 69, 79, 89, 99, 200] labels = ['<20', '20-29', '30-39', '40-49', '50-59', '60-69', '70-79', '80-89', '90-99', '<99'] recidivism['age_range'] = pd.cut(recidivism.age, bins, labels = labels,include_lowest = True) recidivism_by_age_group = recidivism.groupby(['age_range', 'two_year_recid']).agg({'two_year_recid':'count'})\ .rename(columns={'two_year_recid':'Fraction'}).reset_index() recidivism_by_age_group['Fraction'] = recidivism_by_age_group.Fraction / recidivism_by_age_group.groupby("age_range")["Fraction"].\ transform("sum") recidivism_by_age_group = recidivism_by_age_group.drop(recidivism_by_age_group[recidivism_by_age_group.two_year_recid == 0].index) ax = fig.add_subplot(2, 2, 4) ax.bar(recidivism_by_age_group['age_range'], recidivism_by_age_group['Fraction']) ax.set_ylabel('Fraction') ax.set_title('Fraction of Recommited Crimes by Age Group') recidivism = recidivism.drop(columns = ['age_range']) # plt.rcParams.update({'font.size': 9}) fig.tight_layout(rect=[0, 0.03, 1, 0.95]) fig.suptitle('Different Bar Plots of Exercise 3.2') plt.show() # clear variables # del ax, bins, fig, labels, recidivism_by_age_group, recidivism_by_race, recidivism_count, recidivism_by_sex # - # **Question subplot 1: What is the fraction of recommitted crimes over the total number of records? Is it balanced?** # <br> slightly less recommitted crimes # # **Question subplot 2: What do you observe?** # <br> way more men recommitted a crime # # **Question subplot 3: What do you observe?** # <br> weird that there is no bar for white americans # <br> graphs are not that meaningful as you do not have an idea about the sample size # # **Question subplot 4: What do you observe?** # <br> the younger the people the more people recommit a crime, but smaller 20 and 90-99 have rather small sample size # + # 3.2.3 Feature Engineering -> one hot encoding applied on the race column and label encoding on the sex column # Get one hot encoding of column race one_hot = pd.get_dummies(recidivism['race'], prefix = 'race') # Drop column race as it is now encoded recidivism = recidivism.drop('race',axis = 1) # Join the encoded df recidivism = recidivism.join(one_hot) # label encoding on the sex column (0 -> Female; 1 -> Male) recidivism['sex'] = recidivism['sex'].astype('category') recidivism['sex'] = recidivism['sex'].cat.codes # - # **Describe what these encoders (`OneHotEncoder` and `LabelEncoder`) do and choose one. Which one did you choose? Why?** # <br> Label-encoding assigns an ascending numerical value to the unique categorical values of a column. The problem is that the model will misunderstand the results, interpreting, for example, an order like 0 < 1 < 2. One-hot-encoding overcomes this problem by assigning a column to all unique categorical values of a column. If the sample (row) belongs to a certain categorical value, the value in this specific column will be one and in all the others zero. # # **What variables need to be transformed?** # <br> sex could be one hot encoded, but only two values, so label encoding is fine as well # <br> race should be one hot encoded # **We are almost there! It is now time to make predictions.** # > *Exercise 3.3:* Build a Decision Tree or a Random Forest. Now we are going to build a Decision Tree (or a Random Forest) classifier that takes as input the features defined above and predicts if a person is going to recommit the crime within two years. # > * Split the data in Train/Test sets. You can do this with `train_test_split` in `sklearn`, I used a 70/30 split, but you are free to try different ones. # > * **Note:** create a balanced dataset, that is, **grab an equal number of examples** from each target value. # > * Fit a model to your Train set. A good option is the `DecisionTreeClassifier` (or even better a [Random Forest](https://jakevdp.github.io/PythonDataScienceHandbook/05.08-random-forests.html), here is [another tutorial for Random Forests](https://towardsdatascience.com/random-forest-in-python-24d0893d51c0)). # > * Evaluate the performance of model on the test set (look at Accuracy, Precision, and Recall). What are your thoughts on these metrics? Is accuracy a good measure? # > * **hint:** Since you have created a balanced dataset, the baseline performance (random guess) is 50%. # > * Are your results tied to the specific training data/hyperparameter set you used? Try to perform a `RandomizedSearchCV` and recompute the performance metric above with the hyperparameters found. [Here](https://towardsdatascience.com/hyperparameter-tuning-the-random-forest-in-python-using-scikit-learn-28d2aa77dd74) a nice tutorial for you! And here one on [cross-validation](https://towardsdatascience.com/cross-validation-in-machine-learning-72924a69872f) for those of you who crave for more. # > * Visualize the tree. There are different options to do so. The easiest one is to use `plot_tree`, but there are other [options](https://mljar.com/blog/visualize-decision-tree/). If you chose Random Forest, you can visualize a tree as well by extracting a single tree with `model.estimators_[n]` (n is the index of the estimator you want to select). # > * Visualize the Feature Importance. What do you observe? # > * **(Optional)** If you find yourself with extra time, come back to this exercise and tweak the encoder, model, and variables you use to see if you can improve the performance of the tree. **Note**: It's not 100% given that adding variables will improve your predictive performance. # + # Exercise 3.3 # 3.3.1 # a) # split the data set in train and test set (70/30 split) -> balanced data set grab equal amount of each target value # train_test_split by sklearn X = recidivism.loc[:, ~recidivism.columns.isin(['two_year_recid'])] #0 = defendant did not recommit a crime within two years; 1 = the defendant recommitted a crime within two years y = recidivism['two_year_recid'] from sklearn.model_selection import train_test_split from imblearn.under_sampling import RandomUnderSampler rus = RandomUnderSampler(random_state=42, sampling_strategy=1) X_res, y_res = rus.fit_resample(X, y) X_train, X_test, y_train, y_test = train_test_split(X_res, y_res, test_size=0.3, stratify=y_res, random_state=42) # to check if equal amount of each target value print(np.unique(y_train, return_counts=True)) print(np.unique(y_test, return_counts=True)) # b) # fit randomforest or DecisionTreeClassifier # Import the model we are using from sklearn.ensemble import RandomForestClassifier # Instantiate model with 1000 decision trees rf = RandomForestClassifier(n_estimators = 1000) # Train the model on training data rf.fit(X_train, y_train) # + # 3.3.2 # Are your results tied to the specific training data/hyperparameter set you used? -> I guess # feature importance feature_imp = pd.Series(rf.feature_importances_,index=X.columns).sort_values(ascending=False) print(feature_imp) # drop features with low importance -> X = recidivism.loc[:, recidivism.columns.isin(['age', 'priors_count'])] might be enough # prediction y_pred=rf.predict(X_test) # Calculate the absolute errors errors = abs(y_pred - y_test) # Print out the mean absolute error (mae) print('Mean Absolute Error:', round(np.mean(errors), 2), 'degrees.') from sklearn import metrics print("Accuracy:",metrics.accuracy_score(y_test, y_pred).round(2)) # precision and recall print(metrics.classification_report(y_test, y_pred)) # !!! Confusion matrix would be also helpful !!! # - # **3.2.2 Thoughts on accuracy, precision and recall?** # (**hint:** Since you have created a balanced dataset, the baseline performance (random guess) is 50%.) # # <br> **accuracy:** "Accuracy is one metric for evaluating classification models. Informally, accuracy is the fraction of predictions our model got right." -> only use it, when data set is balanced! -> seems rather low as it only 10 percent better then flipping a coin # <br> **precision:** "Precision is the number of correctly-identified members of a class divided by all the times the model predicted that class. In the case of Aspens, the precision score would be the number of correctly-identified Aspens divided by the total number of times the classifier predicted “Aspen,” rightly or wrongly." -> # <br> **recall:** "Recall is the number of members of a class that the classifier identified correctly divided by the total number of members in that class. For Aspens, this would be the number of actual Aspens that the classifier correctly identified as such." -> # <br> **F1-score:** "F1 score is a little less intuitive because it combines precision and recall into one metric. If precision and recall are both high, F1 will be high, too. If they are both low, F1 will be low. If one is high and the other low, F1 will be low. F1 is a quick way to tell whether the classifier is actually good at identifying members of a class, or if it is finding shortcuts (e.g., just identifying everything as a member of a large class)." -> the model seems equally bad at predicting if the person recommitted a crime within in the next two years or not # # https://medium.com/analytics-vidhya/evaluating-a-random-forest-model-9d165595ad56 # + # 3.2.3 Results toed to specific training data/ hyperparameter set? # RandomizedSearchCV -> recompute the performance metric above with the hyperparameters found # Look at parameters used by our current forest print('Parameters currently in use:\n') print(rf.get_params()) # n_estimators = number of trees in the foreset # max_features = max number of features considered for splitting a node # max_depth = max number of levels in each decision tree # min_samples_split = min number of data points placed in a node before the node is split # min_samples_leaf = min number of data points allowed in a leaf node # bootstrap = method for sampling data points (with or without replacement) # Random Hyperparameter Grid # Number of trees in random forest (current random forest model n_estimators = 1000) n_estimators = [int(x) for x in np.linspace(start = 200, stop = 2000, num = 1000)] # Number of features to consider at every split max_features = ['auto', 'sqrt'] # Maximum number of levels in tree max_depth = [int(x) for x in np.linspace(10, 110, num = 11)] max_depth.append(None) # Minimum number of samples required to split a node min_samples_split = [2, 5, 10] # Minimum number of samples required at each leaf node min_samples_leaf = [1, 2, 4] # Method of selecting samples for training each tree bootstrap = [True, False] # Create the random grid random_grid = {'n_estimators': n_estimators, 'max_features': max_features, 'max_depth': max_depth, 'min_samples_split': min_samples_split, 'min_samples_leaf': min_samples_leaf, 'bootstrap': bootstrap} # Use the random grid to search for best hyperparameters # First create the base model to tune rf = RandomForestClassifier() # Random search of parameters, using 3 fold cross validation, # search across 100 different combinations, and use all available cores from sklearn.model_selection import RandomizedSearchCV rf_random = RandomizedSearchCV(estimator = rf, param_distributions = random_grid, n_iter = 100, cv = 3, verbose=2, random_state=42, n_jobs = -1) # Fit the random search model rf_random.fit(X_train, y_train) print(rf_random.best_params_) ### Evaluate Random Search def evaluate(model, X_test, y_test): y_pred = model.predict(X_test) errors = abs(y_pred - y_test) accuracy = metrics.accuracy_score(y_test, y_pred).round(2) print('Model Performance') print('Average Error: {:0.4f} degrees.'.format(np.mean(errors))) print('Accuracy = {:0.2f}%.'.format(accuracy)) return accuracy base_model = RandomForestClassifier(n_estimators = 1000, random_state = 42) base_model.fit(X_train, y_train) base_accuracy = evaluate(base_model, X_test, y_test) best_random = rf_random.best_estimator_ random_accuracy = evaluate(best_random, X_test, y_test) print('Improvement of {:0.2f}%.'.format( 100 * (random_accuracy - base_accuracy) / base_accuracy)) # + # continuation of 3.2.3 # based on rf_random.best_params_ from sklearn.model_selection import GridSearchCV # Create the parameter grid based on the results of random search param_grid = { 'bootstrap': [True], 'max_depth': [10], 'max_features': ['sqrt'], 'min_samples_leaf': [1], 'min_samples_split': [10], 'n_estimators': [162] } # Create a based model rf_grid = RandomForestClassifier() # Instantiate the grid search model grid_search = GridSearchCV(estimator = rf_grid, param_grid = param_grid, cv = 3, n_jobs = -1, verbose = 2) # Fit the grid search to the data grid_search.fit(X_train, y_train) print(grid_search.best_params_) best_grid = grid_search.best_estimator_ grid_accuracy = evaluate(best_grid, X_test, y_test) print('Improvement of {:0.2f}%.'.format( 100 * (grid_accuracy - base_accuracy) / base_accuracy)) # + # 3.2.4 Visualize the tree # use model.estimators_[n] as I used the random forest estimator from sklearn import tree rf.fit(X_train, y_train) plt.figure(figsize=(20,20)) _ = tree.plot_tree(rf.estimators_[0], feature_names=X.columns, filled=True) # + # 3.2.5 feature importance feature_imp = pd.Series(rf.feature_importances_,index=X.columns).sort_values(ascending=False) print(feature_imp) # drop features with low importance -> X = recidivism.loc[:, recidivism.columns.isin(['age', 'priors_count'])] might be enough # - # --- # Before you go, please, have a look at the following two activities: # # 1) # # <mark> Take a minute (it is really one minute) to fill this [form](https://forms.gle/9RwhFc96na4E2Fmg7). It is really important for me to continue improving and give you better feedbacks. </mark> # # --- # 2) # # <mark> Some of you consider this course too easy. So, it's time to spice things up: once you have the best model you could find, go to DTU Learn and submit your code together with your final accuracy/precision/recall scores under DTU-Learn $\rightarrow$ Assignments. I'll make a Leaderboard and we'll see who's gonna win 🥇!!</mark> # # **Constraints:** Use a 70/30 train/test split, and `random_seed=42`. # # **Note 1:** Even if it is in the form of an assignment on DTU Learn it is **not** going to be evaluated. So, take it really as an opportunity to play around with your model and see how well you can do. # # **Note 2:** You have time until **Thursday at 23.59** to submit your model/performance score.
lectures/Week6.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # Erasmus+ ICCT project (2018-1-SI01-KA203-047081) # Toggle cell visibility from IPython.display import HTML tag = HTML('''<script> code_show=true; function code_toggle() { if (code_show){ $('div.input').hide() } else { $('div.input').show() } code_show = !code_show } $( document ).ready(code_toggle); </script> Promijeni vidljivost <a href="javascript:code_toggle()">ovdje</a>.''') display(tag) # Hide the code completely # from IPython.display import HTML # tag = HTML('''<style> # div.input { # display:none; # } # </style>''') # display(tag) # - # ## Izgradnja promatrača za sustav masa-opruga-prigušivač # # Ovaj primjer pokazuje kako razviti promatrač za sustav masa-opruga-prigušivač. Sustav masa-opruga-prigušivač predstvaljen je u prethodnim interaktivnim lekcijama, a njegove jednadžbe u formi prostora stanja su: # # \begin{cases} # \begin{bmatrix} # \dot{x_1} \\ # \dot{x_2} # \end{bmatrix}=\underbrace{\begin{bmatrix} # 0 && 1 \\ # -\frac{k}{m} && -\frac{c}{m} # \end{bmatrix}}_{A}\begin{bmatrix} # x_1 \\ # x_2 # \end{bmatrix}+\underbrace{\begin{bmatrix} # 0 \\ # \frac{1}{m} # \end{bmatrix}}_{B}u \\ # y = \underbrace{\begin{bmatrix}1&0\end{bmatrix}}_{C}\begin{bmatrix} # x_1 \\ # x_2 # \end{bmatrix} # \end{cases} # uz $m=1\,$kg, $k=2\,$N/m i $c=1\,$Ns/m. Odgovarajuće svojstvene vrijednosti su $\lambda_{1,2} = -\frac{c}{2m} \pm \frac{\sqrt{c^2 - 4km}}{2m} = -\frac{1}{2} \pm i\frac{\sqrt{7}}{2}$. # # Matrica osmotrivosti ima puni rang i jednaka je: # $$ # \begin{bmatrix}C\\CA\end{bmatrix} = \begin{bmatrix}1&0\\0&1\end{bmatrix}, # $$ # tako da je sustav osmotriv i moguće je razviti promatrača stanja. Da bi odgovarajuća procjena konvergirala u razumnom vremenu, prikladno je postaviti bržu dinamiku pogrešaka ili barem jednaku 10 puta većoj od dinamike samog sustava. Odabrane svojstvene vrijednosti su $\lambda_{\text{err} 1,2}=-10\sqrt{\left(\frac{1}{2}\right)^2+\left(\frac{\sqrt{7}}{2}\right)^2}=-10\sqrt{2}$. # # Struktura promatrača jednaka je: # # $$ # \dot{\hat{\textbf{x}}}=A\hat{\textbf{x}}+B\textbf{u}+L\textbf{y}, # $$ # # s matricom $L$ definiranom kao $L = \begin{bmatrix}l_1&l_2\end{bmatrix}^T$. Potrebne vrijednosti za postizanje odgovarajuće konvergencije ($\dot{\textbf{e}}=(A-LC)\textbf{e}$) su sljedeće: # # \begin{cases} # l_1 = -c/m + 20\sqrt{2} = -1+20\sqrt{2}\\ # l_2 = \frac{c^2}{m^2} - 20\sqrt{2}\frac{c}{m} - \frac{k}{m} + 200 = 197-20\sqrt{2} # \end{cases} # # i dobivaju se nametanjem $\text{det}(\lambda I_{2\text{x}2}-A+LC) = \left(\lambda+10\sqrt{2}\right)^2$. # # ### Kako koristiti ovaj interaktivni primjer? # Razvijeni promatrač simuliran je u nastavku, a interaktivno sučelje omogućuje promjenu svih vrijednosti i prikaz promjena ponašanja. # + # %matplotlib inline import control as control import numpy import sympy as sym from IPython.display import display, Markdown import ipywidgets as widgets import matplotlib.pyplot as plt #print a matrix latex-like def bmatrix(a): """Returns a LaTeX bmatrix - by <NAME> (ICCT project) :a: numpy array :returns: LaTeX bmatrix as a string """ if len(a.shape) > 2: raise ValueError('bmatrix can at most display two dimensions') lines = str(a).replace('[', '').replace(']', '').splitlines() rv = [r'\begin{bmatrix}'] rv += [' ' + ' & '.join(l.split()) + r'\\' for l in lines] rv += [r'\end{bmatrix}'] return '\n'.join(rv) # Display formatted matrix: def vmatrix(a): if len(a.shape) > 2: raise ValueError('bmatrix can at most display two dimensions') lines = str(a).replace('[', '').replace(']', '').splitlines() rv = [r'\begin{vmatrix}'] rv += [' ' + ' & '.join(l.split()) + r'\\' for l in lines] rv += [r'\end{vmatrix}'] return '\n'.join(rv) #matrixWidget is a matrix looking widget built with a VBox of HBox(es) that returns a numPy array as value ! class matrixWidget(widgets.VBox): def updateM(self,change): for irow in range(0,self.n): for icol in range(0,self.m): self.M_[irow,icol] = self.children[irow].children[icol].value #print(self.M_[irow,icol]) self.value = self.M_ def dummychangecallback(self,change): pass def __init__(self,n,m): self.n = n self.m = m self.M_ = numpy.matrix(numpy.zeros((self.n,self.m))) self.value = self.M_ widgets.VBox.__init__(self, children = [ widgets.HBox(children = [widgets.FloatText(value=0.0, layout=widgets.Layout(width='90px')) for i in range(m)] ) for j in range(n) ]) #fill in widgets and tell interact to call updateM each time a children changes value for irow in range(0,self.n): for icol in range(0,self.m): self.children[irow].children[icol].value = self.M_[irow,icol] self.children[irow].children[icol].observe(self.updateM, names='value') #value = Unicode('<EMAIL>', help="The email value.").tag(sync=True) self.observe(self.updateM, names='value', type= 'All') def setM(self, newM): #disable callbacks, change values, and reenable self.unobserve(self.updateM, names='value', type= 'All') for irow in range(0,self.n): for icol in range(0,self.m): self.children[irow].children[icol].unobserve(self.updateM, names='value') self.M_ = newM self.value = self.M_ for irow in range(0,self.n): for icol in range(0,self.m): self.children[irow].children[icol].value = self.M_[irow,icol] for irow in range(0,self.n): for icol in range(0,self.m): self.children[irow].children[icol].observe(self.updateM, names='value') self.observe(self.updateM, names='value', type= 'All') #self.children[irow].children[icol].observe(self.updateM, names='value') #overlaod class for state space systems that DO NOT remove "useless" states (what "professor" of automatic control would do this?) class sss(control.StateSpace): def __init__(self,*args): #call base class init constructor control.StateSpace.__init__(self,*args) #disable function below in base class def _remove_useless_states(self): pass # + # Preparatory cell A = numpy.matrix('0 1; -2 -1') B = numpy.matrix('0; 1') C = numpy.matrix('1 0') X0 = numpy.matrix('2; 2') L = numpy.matrix([[-1+20*numpy.sqrt(2)],[197-20*numpy.sqrt(2)]]) sol1 = numpy.linalg.eig(A) Aw = matrixWidget(2,2) Aw.setM(A) Bw = matrixWidget(2,1) Bw.setM(B) Cw = matrixWidget(1,2) Cw.setM(C) X0w = matrixWidget(2,1) X0w.setM(X0) Lw = matrixWidget(2,1) Lw.setM(L) eig1o = matrixWidget(1,1) eig2o = matrixWidget(2,1) eig1o.setM(numpy.matrix([-10*numpy.sqrt(2)])) eig2o.setM(numpy.matrix([[-10*numpy.sqrt(2)],[0]])) # + # Interactive widgets m = widgets.FloatSlider( value=1, min=0.1, max=10.0, step=0.1, description='m [kg]:', disabled=False, continuous_update=False, orientation='horizontal', readout=True, readout_format='.1f', ) k = widgets.FloatSlider( value=2, min=0, max=10.0, step=0.1, description='k [N/m]:', disabled=False, continuous_update=False, orientation='horizontal', readout=True, readout_format='.1f', ) c = widgets.FloatSlider( value=1, min=0, max=10.0, step=0.1, description='c [Ns/m]:', disabled=False, continuous_update=False, orientation='horizontal', readout=True, readout_format='.1f', ) # Define the values of the input u = widgets.FloatSlider( value=1, min=0, max=20.0, step=0.1, description='ulaz u:', disabled=False, continuous_update=False, orientation='horizontal', readout=True, readout_format='.1f', ) period = widgets.FloatSlider( value=0.5, min=0.0, max=1, step=0.05, description='Period: ', disabled=False, continuous_update=False, orientation='horizontal', readout=True, readout_format='.2f', ) #create dummy widget DW = widgets.FloatText(layout=widgets.Layout(width='0px', height='0px')) #create button widget START = widgets.Button( description='Test', disabled=False, button_style='', # 'success', 'info', 'warning', 'danger' or '' tooltip='Test', icon='check' ) def on_start_button_clicked(b): #This is a workaround to have intreactive_output call the callback: # force the value of the dummy widget to change if DW.value> 0 : DW.value = -1 else: DW.value = 1 pass START.on_click(on_start_button_clicked) # Define type of method selm = widgets.Dropdown( options= ['Postavi L', 'Postavi svojstvene vrijednosti'], value= 'Postavi L', description='', disabled=False ) # Define the number of complex eigenvalues for the observer selo = widgets.Dropdown( options= ['0 kompleksnih svojstvenih vrijednosti', '2 kompleksne svojstvene vrijednosti'], value= '0 kompleksnih svojstvenih vrijednosti', description='Svojstvene vrijednosti:', disabled=False ) #define type of ipout selu = widgets.Dropdown( options=['impuls', 'step', 'sinus', 'Pravokutni val'], value='impuls', description='Tip ulaza:', disabled=False ) # + # Support functions def eigen_choice(selo): if selo == '0 kompleksnih svojstvenih vrijednosti': eig1o.children[0].children[0].disabled = False eig2o.children[1].children[0].disabled = True eigo = 0 if selo == '2 kompleksne svojstvene vrijednosti': eig1o.children[0].children[0].disabled = True eig2o.children[1].children[0].disabled = False eigo = 2 return eigo def method_choice(selm): if selm == 'Postavi L': method = 1 selo.disabled = True if selm == 'Postavi svojstvene vrijednosti': method = 2 selo.disabled = False return method # + def main_callback(m, k, c, X0w, L, eig1o, eig2o, u, period, selm, selo, selu, DW): A = numpy.matrix([[0,1],[-k/m,-c/m]]) eigo = eigen_choice(selo) method = method_choice(selm) if method == 1: sol = numpy.linalg.eig(A-L*C) if method == 2: if eigo == 0: L = control.acker(A.T, C.T, [eig1o[0,0], eig2o[0,0]]).T Lw.setM(L) if eigo == 2: L = control.acker(A.T, C.T, [numpy.complex(eig2o[0,0],eig2o[1,0]), numpy.complex(eig2o[0,0],-eig2o[1,0])]).T Lw.setM(L) sol = numpy.linalg.eig(A-L*C) print('Svojstvene vrijednosti sustava su:',round(sol1[0][0],4),'i',round(sol1[0][1],4)) print('Svojstvene vrijednosti promatrača su:',round(sol[0][0],4),'i',round(sol[0][1],4)) sys = sss(A,B,C,0) syso = sss(A-L*C, numpy.concatenate((B,L),axis=1), numpy.eye(2), numpy.zeros(4).reshape((2,2))) T = numpy.linspace(0, 6, 1000) if selu == 'impuls': #selu U = [0 for t in range(0,len(T))] U[0] = u T, yout, xout = control.forced_response(sys,T,U,X0w) T, youto, xouto = control.forced_response(syso,T,numpy.matrix([U,yout]),[[0],[0]]) if selu == 'step': U = [u for t in range(0,len(T))] T, yout, xout = control.forced_response(sys,T,U,X0w) T, youto, xouto = control.forced_response(syso,T,numpy.matrix([U,yout]),[[0],[0]]) if selu == 'sinus': U = u*numpy.sin(2*numpy.pi/period*T) T, yout, xout = control.forced_response(sys,T,U,X0w) T, youto, xouto = control.forced_response(syso,T,numpy.matrix([U,yout]),[[0],[0]]) if selu == 'Pravokutni val': U = u*numpy.sign(numpy.sin(2*numpy.pi/period*T)) T, yout, xout = control.forced_response(sys,T,U,X0w) T, youto, xouto = control.forced_response(syso,T,numpy.matrix([U,yout]),[[0],[0]]) fig = plt.figure(num='Simulacija', figsize=(16,10)) fig.add_subplot(211) plt.ylabel('Pozicija vs Pozicija_procjena (izlaz sustava)') plt.plot(T,xout[0]) plt.plot(T,xouto[0]) plt.xlabel('vrijeme [s]') plt.legend(['Stvarno','Procjena']) plt.axvline(x=0,color='black',linewidth=0.8) plt.axhline(y=0,color='black',linewidth=0.8) plt.grid() fig.add_subplot(212) plt.ylabel('Brzina vs Brzina_procjena') plt.plot(T,xout[1]) plt.plot(T,xouto[1]) plt.xlabel('vrijeme [s]') plt.legend(['Stvarno','Procjena']) plt.axvline(x=0,color='black',linewidth=0.8) plt.axhline(y=0,color='black',linewidth=0.8) plt.grid() alltogether = widgets.VBox([widgets.HBox([m, k, c]), widgets.HBox([selm, selo, selu]), widgets.Label(' ',border=3), widgets.HBox([widgets.Label('L:',border=3), Lw, widgets.Label(' ',border=3), widgets.Label(' ',border=3), widgets.Label('Eigenvalues:',border=3), eig1o, eig2o, widgets.Label(' ',border=3), widgets.Label(' ',border=3), widgets.Label('X0:',border=3), X0w]), widgets.Label(' ',border=3), widgets.HBox([u, period, START])]) out = widgets.interactive_output(main_callback, {'m':m, 'k':k, 'c':c, 'X0w':X0w, 'L':Lw, 'eig1o':eig1o, 'eig2o':eig2o, 'u':u, 'period':period, 'selm':selm, 'selo':selo, 'selu':selu, 'DW':DW}) out.layout.height = '640px' display(out, alltogether)
ICCT_hr/examples/04/SS-28-Promatrac_za_sustav_masa_opruga_prigusivac.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Project 1: Trading with Momentum # ## Instructions # Each problem consists of a function to implement and instructions on how to implement the function. The parts of the function that need to be implemented are marked with a `# TODO` comment. After implementing the function, run the cell to test it against the unit tests we've provided. For each problem, we provide one or more unit tests from our `project_tests` package. These unit tests won't tell you if your answer is correct, but will warn you of any major errors. Your code will be checked for the correct solution when you submit it to Udacity. # # ## Packages # When you implement the functions, you'll only need to you use the packages you've used in the classroom, like [Pandas](https://pandas.pydata.org/) and [Numpy](http://www.numpy.org/). These packages will be imported for you. We recommend you don't add any import statements, otherwise the grader might not be able to run your code. # # The other packages that we're importing are `helper`, `project_helper`, and `project_tests`. These are custom packages built to help you solve the problems. The `helper` and `project_helper` module contains utility functions and graph functions. The `project_tests` contains the unit tests for all the problems. # # ### Install Packages import sys # !{sys.executable} -m pip install -r requirements.txt # ### Load Packages import pandas as pd import numpy as np import helper import project_helper import project_tests # ## Market Data # ### Load Data # The data we use for most of the projects is end of day data. This contains data for many stocks, but we'll be looking at stocks in the S&P 500. We also made things a little easier to run by narrowing down our range of time period instead of using all of the data. # + df = pd.read_csv('../../data/project_1/eod-quotemedia.csv', parse_dates=['date'], index_col=False) close = df.reset_index().pivot(index='date', columns='ticker', values='adj_close') print('Loaded Data') # - close.head() # ### View Data # Run the cell below to see what the data looks like for `close`. project_helper.print_dataframe(close) # ### Stock Example # Let's see what a single stock looks like from the closing prices. For this example and future display examples in this project, we'll use Apple's stock (AAPL). If we tried to graph all the stocks, it would be too much information. apple_ticker = 'AAPL' project_helper.plot_stock(close[apple_ticker], '{} Stock'.format(apple_ticker)) # ## Resample Adjusted Prices # # The trading signal you'll develop in this project does not need to be based on daily prices, for instance, you can use month-end prices to perform trading once a month. To do this, you must first resample the daily adjusted closing prices into monthly buckets, and select the last observation of each month. # # Implement the `resample_prices` to resample `close_prices` at the sampling frequency of `freq`. close_prices_ pd.DataFrame(close_prices_.resample('M').mean()) # + def resample_prices(close_prices, freq='M'): """ Resample close prices for each ticker at specified frequency. Parameters ---------- close_prices : DataFrame Close prices for each ticker and date freq : str What frequency to sample at For valid freq choices, see http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases Returns ------- prices_resampled : DataFrame Resampled prices for each ticker and date """ return pd.DataFrame(close_prices.resample(freq).last()) project_tests.test_resample_prices(resample_prices) # - # ### View Data # Let's apply this function to `close` and view the results. monthly_close = resample_prices(close) project_helper.plot_resampled_prices( monthly_close.loc[:, apple_ticker], close.loc[:, apple_ticker], '{} Stock - Close Vs Monthly Close'.format(apple_ticker)) # ## Compute Log Returns # # Compute log returns ($R_t$) from prices ($P_t$) as your primary momentum indicator: # # $$R_t = log_e(P_t) - log_e(P_{t-1})$$ # # Implement the `compute_log_returns` function below, such that it accepts a dataframe (like one returned by `resample_prices`), and produces a similar dataframe of log returns. Use Numpy's [log function](https://docs.scipy.org/doc/numpy/reference/generated/numpy.log.html) to help you calculate the log returns. prices_ = None prices_ np.log(prices_) - np.log(prices_.shift(1)) # + def compute_log_returns(prices): """ Compute log returns for each ticker. Parameters ---------- prices : DataFrame Prices for each ticker and date Returns ------- log_returns : DataFrame Log returns for each ticker and date """ # TODO: Implement Function # global prices_ # prices_ = prices return np.log(prices) - np.log(prices.shift(1)) project_tests.test_compute_log_returns(compute_log_returns) # - # ### View Data # Using the same data returned from `resample_prices`, we'll generate the log returns. monthly_close_returns = compute_log_returns(monthly_close) project_helper.plot_returns( monthly_close_returns.loc[:, apple_ticker], 'Log Returns of {} Stock (Monthly)'.format(apple_ticker)) # ## Shift Returns # Implement the `shift_returns` function to shift the log returns to the previous or future returns in the time series. For example, the parameter `shift_n` is 2 and `returns` is the following: # # ``` # Returns # A B C D # 2013-07-08 0.015 0.082 0.096 0.020 ... # 2013-07-09 0.037 0.095 0.027 0.063 ... # 2013-07-10 0.094 0.001 0.093 0.019 ... # 2013-07-11 0.092 0.057 0.069 0.087 ... # ... ... ... ... ... # ``` # # the output of the `shift_returns` function would be: # ``` # Shift Returns # A B C D # 2013-07-08 NaN NaN NaN NaN ... # 2013-07-09 NaN NaN NaN NaN ... # 2013-07-10 0.015 0.082 0.096 0.020 ... # 2013-07-11 0.037 0.095 0.027 0.063 ... # ... ... ... ... ... # ``` # Using the same `returns` data as above, the `shift_returns` function should generate the following with `shift_n` as -2: # ``` # Shift Returns # A B C D # 2013-07-08 0.094 0.001 0.093 0.019 ... # 2013-07-09 0.092 0.057 0.069 0.087 ... # ... ... ... ... ... ... # ... ... ... ... ... ... # ... NaN NaN NaN NaN ... # ... NaN NaN NaN NaN ... # ``` # _Note: The "..." represents data points we're not showing._ # + def shift_returns(returns, shift_n): """ Generate shifted returns Parameters ---------- returns : DataFrame Returns for each ticker and date shift_n : int Number of periods to move, can be positive or negative Returns ------- shifted_returns : DataFrame Shifted returns for each ticker and date """ # TODO: Implement Function return returns.shift(shift_n) project_tests.test_shift_returns(shift_returns) # - # ### View Data # Let's get the previous month's and next month's returns. # + prev_returns = shift_returns(monthly_close_returns, 1) lookahead_returns = shift_returns(monthly_close_returns, -1) project_helper.plot_shifted_returns( prev_returns.loc[:, apple_ticker], monthly_close_returns.loc[:, apple_ticker], 'Previous Returns of {} Stock'.format(apple_ticker)) project_helper.plot_shifted_returns( lookahead_returns.loc[:, apple_ticker], monthly_close_returns.loc[:, apple_ticker], 'Lookahead Returns of {} Stock'.format(apple_ticker)) # - # ## Generate Trading Signal # # A trading signal is a sequence of trading actions, or results that can be used to take trading actions. A common form is to produce a "long" and "short" portfolio of stocks on each date (e.g. end of each month, or whatever frequency you desire to trade at). This signal can be interpreted as rebalancing your portfolio on each of those dates, entering long ("buy") and short ("sell") positions as indicated. # # Here's a strategy that we will try: # > For each month-end observation period, rank the stocks by _previous_ returns, from the highest to the lowest. Select the top performing stocks for the long portfolio, and the bottom performing stocks for the short portfolio. # # Implement the `get_top_n` function to get the top performing stock for each month. Get the top performing stocks from `prev_returns` by assigning them a value of 1. For all other stocks, give them a value of 0. For example, using the following `prev_returns`: # # ``` # Previous Returns # A B C D E F G # 2013-07-08 0.015 0.082 0.096 0.020 0.075 0.043 0.074 # 2013-07-09 0.037 0.095 0.027 0.063 0.024 0.086 0.025 # ... ... ... ... ... ... ... ... # ``` # # The function `get_top_n` with `top_n` set to 3 should return the following: # ``` # Previous Returns # A B C D E F G # 2013-07-08 0 1 1 0 1 0 0 # 2013-07-09 0 1 0 1 0 1 0 # ... ... ... ... ... ... ... ... # ``` # *Note: You may have to use Panda's [`DataFrame.iterrows`](https://pandas.pydata.org/pandas-docs/version/0.21/generated/pandas.DataFrame.iterrows.html) with [`Series.nlargest`](https://pandas.pydata.org/pandas-docs/version/0.21/generated/pandas.Series.nlargest.html) in order to implement the function. This is one of those cases where creating a vecorization solution is too difficult.* prev_ = None prev_ for index, row in prev_.iterrows(): prev_.at[index] = prev_.loc[index].nlargest(3) prev_ prev_.notnull().astype('int') # + def get_top_n(prev_returns, top_n): """ Select the top performing stocks Parameters ---------- prev_returns : DataFrame Previous shifted returns for each ticker and date top_n : int The number of top performing stocks to get Returns ------- top_stocks : DataFrame Top stocks for each ticker and date marked with a 1 """ # TODO: Implement Function prev_ = prev_returns.copy() for index, row in prev_returns.iterrows(): prev_.at[index] = prev_.loc[index].nlargest(3) return prev_.notnull().astype('int') project_tests.test_get_top_n(get_top_n) # - # ### View Data # We want to get the best performing and worst performing stocks. To get the best performing stocks, we'll use the `get_top_n` function. To get the worst performing stocks, we'll also use the `get_top_n` function. However, we pass in `-1*prev_returns` instead of just `prev_returns`. Multiplying by negative one will flip all the positive returns to negative and negative returns to positive. Thus, it will return the worst performing stocks. top_bottom_n = 50 df_long = get_top_n(prev_returns, top_bottom_n) df_short = get_top_n(-1*prev_returns, top_bottom_n) project_helper.print_top(df_long, 'Longed Stocks') project_helper.print_top(df_short, 'Shorted Stocks') # ## Projected Returns # It's now time to check if your trading signal has the potential to become profitable! # # We'll start by computing the net returns this portfolio would return. For simplicity, we'll assume every stock gets an equal dollar amount of investment. This makes it easier to compute a portfolio's returns as the simple arithmetic average of the individual stock returns. # # Implement the `portfolio_returns` function to compute the expected portfolio returns. Using `df_long` to indicate which stocks to long and `df_short` to indicate which stocks to short, calculate the returns using `lookahead_returns`. To help with calculation, we've provided you with `n_stocks` as the number of stocks we're investing in a single period. # + def portfolio_returns(df_long, df_short, lookahead_returns, n_stocks): """ Compute expected returns for the portfolio, assuming equal investment in each long/short stock. Parameters ---------- df_long : DataFrame Top stocks for each ticker and date marked with a 1 df_short : DataFrame Bottom stocks for each ticker and date marked with a 1 lookahead_returns : DataFrame Lookahead returns for each ticker and date n_stocks: int The number number of stocks chosen for each month Returns ------- portfolio_returns : DataFrame Expected portfolio returns for each ticker and date """ # TODO: Implement Function return None project_tests.test_portfolio_returns(portfolio_returns) # - # ### View Data # Time to see how the portfolio did. expected_portfolio_returns = portfolio_returns(df_long, df_short, lookahead_returns, 2*top_bottom_n) project_helper.plot_returns(expected_portfolio_returns.T.sum(), 'Portfolio Returns') # ## Statistical Tests # ### Annualized Rate of Return # + expected_portfolio_returns_by_date = expected_portfolio_returns.T.sum().dropna() portfolio_ret_mean = expected_portfolio_returns_by_date.mean() portfolio_ret_ste = expected_portfolio_returns_by_date.sem() portfolio_ret_annual_rate = (np.exp(portfolio_ret_mean * 12) - 1) * 100 print(""" Mean: {:.6f} Standard Error: {:.6f} Annualized Rate of Return: {:.2f}% """.format(portfolio_ret_mean, portfolio_ret_ste, portfolio_ret_annual_rate)) # - # The annualized rate of return allows you to compare the rate of return from this strategy to other quoted rates of return, which are usually quoted on an annual basis. # # ### T-Test # Our null hypothesis ($H_0$) is that the actual mean return from the signal is zero. We'll perform a one-sample, one-sided t-test on the observed mean return, to see if we can reject $H_0$. # # We'll need to first compute the t-statistic, and then find its corresponding p-value. The p-value will indicate the probability of observing a t-statistic equally or more extreme than the one we observed if the null hypothesis were true. A small p-value means that the chance of observing the t-statistic we observed under the null hypothesis is small, and thus casts doubt on the null hypothesis. It's good practice to set a desired level of significance or alpha ($\alpha$) _before_ computing the p-value, and then reject the null hypothesis if $p < \alpha$. # # For this project, we'll use $\alpha = 0.05$, since it's a common value to use. # # Implement the `analyze_alpha` function to perform a t-test on the sample of portfolio returns. We've imported the `scipy.stats` module for you to perform the t-test. # # Note: [`scipy.stats.ttest_1samp`](https://docs.scipy.org/doc/scipy-1.0.0/reference/generated/scipy.stats.ttest_1samp.html) performs a two-sided test, so divide the p-value by 2 to get 1-sided p-value # + from scipy import stats def analyze_alpha(expected_portfolio_returns_by_date): """ Perform a t-test with the null hypothesis being that the expected mean return is zero. Parameters ---------- expected_portfolio_returns_by_date : Pandas Series Expected portfolio returns for each date Returns ------- t_value T-statistic from t-test p_value Corresponding p-value """ # TODO: Implement Function return None project_tests.test_analyze_alpha(analyze_alpha) # - # ### View Data # Let's see what values we get with our portfolio. After you run this, make sure to answer the question below. t_value, p_value = analyze_alpha(expected_portfolio_returns_by_date) print(""" Alpha analysis: t-value: {:.3f} p-value: {:.6f} """.format(t_value, p_value)) # ### Question: What p-value did you observe? And what does that indicate about your signal? # *#TODO: Put Answer In this Cell* # ## Submission # Now that you're done with the project, it's time to submit it. Click the submit button in the bottom right. One of our reviewers will give you feedback on your project with a pass or not passed grade. You can continue to the next section while you wait for feedback.
project1/.ipynb_checkpoints/project_1_starter-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.8.5 64-bit (''zonmw'': conda)' # name: python3 # --- # # Stats about Train / Dev / Test sets # + import pandas as pd import sys sys.path.insert(0, '../') from utils.config import PATHS from utils.latex import show_latex, TABLES # - domains=['ADM', 'ATT', 'BER', 'ENR', 'ETN', 'FAC', 'INS', 'MBW', 'STM'] # # Domains classifier # ## Load data datapath = PATHS.getpath('data_expr_sept') train = pd.read_pickle(datapath / 'clf_domains/train_excl_bck_add_pilot.pkl') test = pd.read_pickle(datapath / 'clf_domains/test.pkl') dev = pd.read_pickle(datapath / 'clf_domains/dev.pkl') # ## Check correct split # # i.e. there are no notes that appear in more than one set print(test.NotitieID.isin(train.NotitieID).any()) print(dev.NotitieID.isin(train.NotitieID).any()) print(dev.NotitieID.isin(test.NotitieID).any()) # ## Number of sentences # # - A sentence can contain more than one domain and therefore be counted more than once. # - The last column is the total number of sentences in the dataset (incl. all negative examples) # + caption = "Domain classification: datasets, sentence-level" label = "domains_datasets_sents" data = pd.concat([ train.assign(dataset = 'train'), test.assign(dataset = 'test'), dev.assign(dataset = 'dev'), ]) balance = pd.DataFrame( index = pd.MultiIndex.from_frame(data[['dataset', 'pad_sen_id']]), columns = domains, data = data.labels.to_list() ) dataset_sizes = balance.pivot_table( index='dataset', aggfunc='size', ).rename('n_sentences') piv = balance.pivot_table( index='dataset', aggfunc='sum', ).join(dataset_sizes) piv.loc['total'] = piv.sum() piv.pipe(show_latex, caption=caption, label=label) # - # ## Number of notes # + caption = "Domain classification: datasets, note-level" label = "domains_datasets_notes" data = pd.concat([ train.assign(dataset = 'train'), test.assign(dataset = 'test'), dev.assign(dataset = 'dev'), ]) balance = pd.DataFrame( index = pd.MultiIndex.from_frame(data[['dataset', 'NotitieID']]), columns = domains, data = data.labels.to_list() ).groupby(level=[0,1]).any() dataset_sizes = balance.pivot_table( index='dataset', aggfunc='size', ).rename('n_notes') piv = balance.pivot_table( index='dataset', aggfunc='sum', ).join(dataset_sizes) piv.loc['total'] = piv.sum() piv.pipe(show_latex, caption=caption, label=label) # - # # Levels classifiers # ## Number of sentences # + caption = "Levels classification: datasets, sentence-level" label = "levels_datasets_sents" table = pd.DataFrame(index=['train', 'dev', 'test']) for dom in domains: datapath = PATHS.getpath('data_expr_sept') / f'clf_levels_{dom}_sents' train = pd.read_pickle(datapath / 'train.pkl') test = pd.read_pickle(datapath / 'test.pkl') dev = pd.read_pickle(datapath / 'dev.pkl') table.loc['train', dom] = len(train) table.loc['dev', dom] = len(dev) table.loc['test', dom] = len(test) table.astype(int).pipe(show_latex, caption=caption, label=label) # - # ## Number of notes # + caption = "Levels classification: datasets, note-level" label = "levels_datasets_notes" table = pd.DataFrame(index=['train', 'dev', 'test']) for dom in domains: datapath = PATHS.getpath('data_expr_sept') / f'clf_levels_{dom}_sents' train = pd.read_pickle(datapath / 'train.pkl') test = pd.read_pickle(datapath / 'test.pkl') dev = pd.read_pickle(datapath / 'dev.pkl') table.loc['train', dom] = train.NotitieID.nunique() table.loc['dev', dom] = dev.NotitieID.nunique() table.loc['test', dom] = test.NotitieID.nunique() table.astype(int).pipe(show_latex, caption=caption, label=label) # - # # Save tables prefix = 'ml_datasets' for idx, table in enumerate(TABLES): with open(f'./tables/{prefix}_{idx}.tex', 'w', encoding='utf8') as f: f.write(table)
nb_data_analysis/datasets_for_ml.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Reinforcement Learning: Double Deep Q Networks using Pytorch # ### Custom Environment to train our model on # + import gym from gym import spaces import numpy as np import random from copy import deepcopy class gridworld_custom(gym.Env): """Custom Environment that follows gym interface""" metadata = {'render.modes': ['human']} def __init__(self, *args, **kwargs): super(gridworld_custom, self).__init__() self.current_step = 0 self.reward_range = (-10, 100) self.action_space = spaces.Discrete(2) self.observation_space = spaces.Box(low=np.array( [0, 0]), high=np.array([4, 4]), dtype=np.int64) self.target_coord = (4, 4) self.death_coord = [(3, 1), (4, 2)] def Reward_Function(self, obs): if (obs[0] == self.target_coord[0] and obs[1] == self.target_coord[1]): return 20 elif (obs[0] == self.death_coord[0][0] and obs[1] == self.death_coord[0][1]) or \ (obs[0] == self.death_coord[1][0] and obs[1] == self.death_coord[1][1]): return -10 else: return -1 return 0 def reset(self): self.current_step = 0 self.prev_obs = [random.randint(0, 4), random.randint(0, 4)] if (self.prev_obs[0] == self.target_coord[0] and self.prev_obs[1] == self.target_coord[1]): return self.reset() return self.prev_obs def step(self, action): action = int(action) self.current_step += 1 obs = deepcopy(self.prev_obs) if(action == 0): if(self.prev_obs[0] < 4): obs[0] = obs[0] + 1 else: obs[0] = obs[0] if(action == 1): if(self.prev_obs[0] > 0): obs[0] = obs[0] - 1 else: obs[0] = obs[0] if(action == 2): if(self.prev_obs[1] < 4): obs[1] = obs[1] + 1 else: obs[1] = obs[1] if(action == 3): if(self.prev_obs[1] > 0): obs[1] = obs[1] - 1 else: obs[1] = obs[1] reward = self.Reward_Function(obs) if (obs[0] == self.target_coord[0] and obs[1] == self.target_coord[1]) or (self.current_step >= 250): done = True else: done = False self.prev_obs = obs return obs, reward, done, {} def render(self, mode='human', close=False): for i in range(0, 5): for j in range(0, 5): if i == self.prev_obs[0] and j == self.prev_obs[1]: print("*", end=" ") elif i == self.target_coord[0] and j == self.target_coord[1]: print("w", end=" ") elif (i == self.death_coord[0][0] and j == self.death_coord[0][1]) or \ (i == self.death_coord[1][0] and j == self.death_coord[1][1]): print("D", end=" ") else: print("_", end=" ") print() print() print() # - # ### Import required Packages # + import numpy as np import matplotlib.pyplot as plt from copy import deepcopy from statistics import mean import pandas as pd # + import torch import torch.nn as nn import torch.nn.functional as F from tqdm.auto import tqdm #from tqdm import tqdm # - # ### Build The neuralnet class NeuralNetwork(nn.Module): def __init__(self): super(NeuralNetwork, self).__init__() self.layer1 = nn.Linear(2, 8) self.layer2 = nn.Linear(8, 8) self.layer3 = nn.Linear(8, 4) def forward(self, x): l1 = self.layer1(x) l1 = F.relu(l1) l2 = self.layer2(l1) l2 = F.relu(l2) l3 = self.layer3(l2) output = l3 return output # #### Check to see if there is a GPU which can be used to accelerate the workflows # + device = 'cuda' if torch.cuda.is_available() else 'cpu' ## Force Use a Device #device = 'cuda' #for GPU #device = 'cpu' #for CPU print(f'Using {device} device') # - # ### Initialize the neuralnets # + q_network = NeuralNetwork().to(device) target_network = deepcopy(q_network) loss_function = nn.MSELoss() optimizer = torch.optim.Adam(q_network.parameters(), lr = 1e-3) # - # ### Initialise the environment env = gridworld_custom() # #### Check up the functionality of epsilon greedy. Just for reference. # + epsilon = 1 epsilon_decay = 0.999 episodes = 5000 epsilon_copy = deepcopy(epsilon) eps = [] for i in range(episodes): epsilon_copy = epsilon_copy * epsilon_decay eps.append(epsilon_copy) plt.plot(eps) plt.show() # - # ### Run everything # + gamma = 0.99 batch_size = 32 pbar = tqdm(range(episodes)) last_loss = 0.0 target_network_update_freq = 10 losses_array = [] rewards_array = [] for episode in pbar: prev_obs = env.reset() done = False mem_size = 0 curr_state_mem = np.array([[0,0]] * batch_size) prev_state_mem = np.array([[0,0]] * batch_size) action_mem = np.array([0] * batch_size) reward_mem = np.array([0] * batch_size) rewards = [] epsilon = epsilon * epsilon_decay while not(done) : if(random.uniform(0, 1) > epsilon): with torch.no_grad(): prev_q = q_network(torch.tensor(prev_obs, device=device).float()) prev_q = prev_q.cpu().detach().numpy() action = np.argmax(prev_q) else: action = random.randint(0,3) obs, reward, done, _ = env.step(action) rewards.append(reward) prev_state_mem[mem_size] = prev_obs curr_state_mem[mem_size] = obs action_mem[mem_size] = action reward_mem[mem_size] = reward mem_size = mem_size + 1 prev_obs = obs if(mem_size == batch_size): with torch.no_grad(): target_q = target_network(torch.tensor(curr_state_mem, device=device).float()).max(1)[0].detach() expected_q_mem = torch.tensor(reward_mem, device=device).float() + ( gamma * target_q ) network_q_mem = q_network(torch.tensor(prev_state_mem, device=device).float()).gather(1, torch.tensor(action_mem, device=device).type(torch.int64).unsqueeze(1)).squeeze(1) loss = loss_function(network_q_mem, expected_q_mem) last_loss = "{:.3f}".format(loss.item()) mem_size = 0 optimizer.zero_grad() loss.backward() optimizer.step() if episode % target_network_update_freq == 0: target_network = deepcopy(q_network) pbar.set_description("loss = %s" % last_loss) losses_array.append(last_loss) rewards_array.append(mean(rewards)) # - # ### Plot Losses plt.plot(losses_array, label="loss") plt.legend() plt.show() # ### Plot Loss Trend # + resolution = 50 cumsum_losses = np.array(pd.Series(np.array(losses_array)).rolling(window=resolution).mean() ) plt.plot(cumsum_losses, label="loss") plt.legend() plt.show() # - # ### Plot Rewards plt.plot(rewards_array, label="rewards") plt.legend() plt.show() # ### Plot reward trend # + resolution = 50 cumsum_rewards = np.array(pd.Series(np.array(rewards_array)).rolling(window=resolution).mean() ) plt.plot(cumsum_rewards, label="rewards") plt.legend() plt.show() # - # ### Test the trained model prev_obs = env.reset() done = False env.render() while not(done): with torch.no_grad(): prev_q = q_network(torch.tensor(prev_obs, device=device).float()) prev_q = prev_q.cpu().detach().numpy() action = np.argmax(prev_q) obs, reward, done, _ = env.step(action) prev_obs = obs env.render()
Week1/4_DQN_Torch.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # --- # <a # href="https://colab.research.google.com/github/LearnPythonWithRune/FinancialDataAnalysisWithPython/blob/main/colab/starter/06 - Simple Calculations - Volatility, SMA, and EMA.ipynb" # target="_parent"> # <img # src="https://colab.research.google.com/assets/colab-badge.svg" # alt="Open In Colab"/> # </a> # # Calculating simple # - Pct change # - Log returns # - Standard deviation (Volatility) # - Rolling # - Simple Moving Avarage # - Exponential Moving Average # ### Standard deviation # # - $\sigma_{p} = \sigma_{daily}\times \sqrt{p}$ # - $\sigma_{annually} = \sigma_{daily}\times \sqrt{252}$ # # *(252 trading days per year)* import pandas as pd import matplotlib.pyplot as plt # %matplotlib notebook remote_file = "https://raw.githubusercontent.com/LearnPythonWithRune/FinancialDataAnalysisWithPython/main/files/AAPL.csv" data = pd.read_csv(remote_file, index_col=0, parse_dates=True)
Financial Data Analysis With Python/colab/starter/06 - Simple Calculations - Volatility, SMA, and EMA.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Converting BEL graphs into inputs for GAT2VEC. # # ## Imports # + import os import bio2bel_phewascatalog from GAT2VEC import paths as gat2vec_paths from networkx import DiGraph, write_adjlist from networkx.relabel import convert_node_labels_to_integers from pybel import from_url, from_path from pybel.dsl import BaseEntity, gene, protein, rna from pybel.struct.mutation.collapse import collapse_to_genes, collapse_all_variants from pybel_tools.assembler.reified_graph import assembler import guilty_phewas.utils as phewas_utils # - # ## Define the file names. # + basedir = 'C:/Users/Mauricio/Thesis/bel_data' url = { 'tau': 'https://raw.githubusercontent.com/neurommsig/neurommsig-knowledge' + \ '/master/neurommsig_knowledge/Tau%20protein%20subgraph.bel', 'gsk3': 'https://raw.githubusercontent.com/neurommsig/neurommsig-knowledge/' + \ 'master/neurommsig_knowledge/GSK3%20subgraph.bel', 'alzh': 'https://raw.githubusercontent.com/neurommsig/neurommsig-knowledge/' + \ 'master/neurommsig_knowledge/GSK3%20subgraph.bel' } def get_base_dir(basedir, path): return os.path.join(basedir, path) def get_local_bel_file(basedir, path): dir_ = get_base_dir(basedir, path) return os.path.join(dir_, path + '.bel') def get_struct_file(basedir, path): dir_ = get_base_dir(basedir, path) return os.path.join(dir_, path + '_graph.adjlist') def get_attr_file(basedir, path): dir_ = get_base_dir(basedir, path) return os.path.join(dir_, path + '_na.adjlist') def get_labels_file(basedir, path): dir_ = get_base_dir(basedir, path) return os.path.join(dir_, 'labels_maped.txt') bel = 'alzh' # bel = 'tau' # + # possible inputs # url = param_dict['url'] local = get_local_bel_file(basedir, bel) # Output files struct_file = get_struct_file(basedir, bel) attr_file = get_attr_file(basedir, bel) labels_file = get_labels_file(basedir, bel) # - # ## Read and reify a BEL graph. # + # graph = from_url(url[bel]) graph = from_path(local) print("Starting nodes", len(graph.nodes)) print("Starting edges", len(graph.edges)) # Collapse all variants is removing also the pmod(X) from the BaseEntity collapse_all_variants(graph) # TODO collapse_to_genes removes the ptm information before converting # collapse_to_genes(graph) print("Nodes after collapse", len(graph.nodes)) print("Edges after collapse", len(graph.edges)) # Reify the edges rbg = assembler.reify_bel_graph(graph) print("Nodes after edge reification", len(rbg.nodes)) print("Edges after edge reification", len(rbg.edges)) # - # ## Assess the converted entity and predicate nodes. # + qty_predicate = {} qty_prot, qty_rna, qty_gene = 0, 0, 0 for i in rbg.nodes: if isinstance(i, BaseEntity): if isinstance(i, rna): qty_rna += 1 elif isinstance(i, protein): qty_prot += 1 elif isinstance(i, gene): qty_gene += 1 else: if rbg.nodes[i]['label'] in qty_predicate: qty_predicate[rbg.nodes[i]['label']] += 1 else: qty_predicate[rbg.nodes[i]['label']] = 1 print("Predicates") print(qty_predicate) print(f"Proteins {qty_prot}") print(f"RNA {qty_rna}") print(f"Gene {qty_gene}") # - # ## Adding PheWAS annotation (as attributes). # # ### Load the links between Phenotype and genes phewas_manager = bio2bel_phewascatalog.Manager() pw_dict = phewas_manager.to_dict() # ### Save the disease list into the graph # + start_ind = len(rbg.nodes) phenotypes_list = set([phe for _list in pw_dict.values() for odds, phe in _list]) # unique_phenotypes = set().union(*pw_dict.values()) enum_phenotypes = enumerate(phenotypes_list, start=start_ind) att_mappings = {phenot: num for num, phenot in enum_phenotypes} annotated_graph = rbg.copy() phewas_utils.add_disease_attribute(annotated_graph, pw_dict) # - # ## Genarate the labels for classification. phewas_utils.download_for_disease('EFO_0000249', 'ot_entrez.txt') targets = phewas_utils.parse_gene_list('ot_entrez.txt', rbg) print('Targets found in the network:', len(targets)) # ### Targets not present at the network with open('ot_entrez.txt') as f: alltargets = set(f.read().split()) print(alltargets.difference(targets)) # Targets found in the network: 86, without taking non causal correlations # Targets found in the network: 91, taking non causal correlations # missing targets were: """'IMPA1', 'ADORA3', 'PDE3A', 'ADRA2A', 'GNRHR', 'ADRA1B', 'HTR3A', 'HTR3D', 'SCN8A', 'APH1B', 'CSF2RA', 'FAAH', 'ADRA2C', 'MAOA', 'SCN2A', 'ALDH5A1', 'AGTR1', 'DDB1', 'DRD3', 'SCN11A', 'SCN1A', 'HTR3C', 'VKORC1', 'CACNA1B', 'SV2A', 'SCN5A', 'PAH', 'SCN3A', 'SCN9A', 'SLC6A2', 'ADORA1', 'FGFR3', 'HTR2C', 'HTR3B', 'PGR', 'RXRB', 'ADRA1A', 'SCN4A', 'DRD2', 'ADORA2B', 'SCN10A', 'CUL4A', 'SLC6A3', 'RARG', 'RBX1', 'PPARD', 'IFNAR2', 'SLC5A2', 'ADRA1D', 'ADRA2B', 'HTR3E', 'MAOB', 'CRBN', 'KIT', 'RARA', 'ATP4A', 'HRH3', 'CACNA2D1', 'SCN7A', 'SLC6A4'"""; # ## Create adjacency list and labels files for GAT2VEC # + import logging # Structure graph out_rbg = convert_node_labels_to_integers(annotated_graph, # first_label=1, label_attribute='old_label') write_adjlist(out_rbg, struct_file) # print(att_mappings) # Attribute graph phewas_utils.write_adj_file_attribute(out_rbg, attr_file, att_mappings) # - phewas_utils.write_labels_file(labels_file, out_rbg, targets)
notebooks/BEL2Gat2Vec.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # default_exp load_data # # %reload_ext autoreload # # %autoreload 2 from nbdev.showdoc import show_doc # !date # # Load data # > Prepare photons and exposure, given a source # This module defines the function `load_source_data`. # # Depends on module Exposure for SC data # + #export import pickle, healpy import pandas as pd import numpy as np import matplotlib.pyplot as plt from wtlike.config import (Config, UTC, MJD) from wtlike.exposure import binned_exposure, sc_data_selection from wtlike.data_man import get_week_files # - # export def _get_photons_near_source(config, source, week): #tzero, photon_df): """ Select the photons near a source - source : a PointSource object - week : dict with - tzero : start time for the photon - photon_df : DataFrame with photon data Returns a DF with - `band` index, - `time` in MJD (added tstart and converted from MET) - `pixel` index, nest indexing - `radius` distance in deg from source direction """ def _cone(config, source, nest=True): # cone geometry stuff: get corresponding pixels and center vector l,b,radius = source.l, source.b, config.radius cart = lambda l,b: healpy.dir2vec(l,b, lonlat=True) conepix = healpy.query_disc(config.nside, cart(l,b), np.radians(radius), nest=nest) center = healpy.dir2vec(l,b, lonlat=True) return center, conepix center, conepix = _cone(config,source) df = week['photons'] tstart = week['tstart'] allpix = df.nest_index.values # select by comparing high-order pixels (faster) shift=11 a = np.right_shift(allpix, shift) c = np.unique(np.right_shift(conepix, shift)) incone = np.isin(a,c) if sum(incone)<2: if config.verbose>1: print(f'\nWeek starting {UTC(MJD(tstart))} has 0 or 1 photons') return if config.verbose>2: a, b = sum(incone), len(allpix) print(f'Select photons for source {source.name}:\n\tPixel cone cut: select {a} from {b} ({100*a/b:.1f}%)') # cut df to entries in the cone dfc = df[incone].copy() if 'trun' in dfc: time = dfc.run_id.astype(float) + dfc.trun * config.offset_size else: # old: convert to float, add tstart, convert to MJD time = np.array(dfc.time, float)+tstart dfc.loc[:,'time'] = MJD(time) # assemble the DataFrame, remove those outside the radius out_df = dfc # make sure times are monotonic by sorting (needed since runs not in order in most # week-files after March 2018) out_df = dfc.sort_values(by='time') if config.verbose>2: print(f'selected photons:\n{out_df.head()}') return out_df # exporti class ProcessWeek(object): """ Process a week's photon and livetime info into the source-related photon and exposure tables. """ def __init__(self, config, source, week_file): """ """ with open(week_file, 'rb') as inp: week = pickle.load(inp) pdf = week['photons'] sc_data = edf = week['sc_data'] self.start = MJD(week['tstart']) self.config = config if config.verbose>1: print(f'Opened week file "{week_file.name}" of {UTC(self.start)}') print(f'\tFound {len(pdf):,} photons, {len(edf):,} SC entries)') self.sc_df = sc_df = sc_data_selection(config, source, sc_data) # interleaved start/stop self.stime = np.empty(2*len(sc_df.start)) self.stime[0::2]=sc_df.start.values self.stime[1::2]=sc_df.stop.values assert np.all(np.diff(self.stime)>=0), 'Time-ordering failure' self.lt = sc_df.livetime.values self.ct = sc_df.cos_theta.values pdf = _get_photons_near_source(config,source, week) if pdf is None or len(pdf)<3 : self.photons = None else: assert pdf is not None and len(pdf)>0 # set weights from the weight table, removing those with no weight pdf = source.wtman.add_weights(pdf) # finally set the time and the exposure per remaining photons self.photons = self.photon_times( pdf ) def __str__(self): return f'Data for week of {UTC(self.start)}: {len(self.photons):,} photons' def __repr__(self): return self.__str__() def photon_times(self, pdf): # construct the time from the run number and offset ptime = MJD(pdf.run_id.astype(float) + pdf.trun * self.config.offset_size) pdf.loc[:,'time'] = ptime # select the subset with exposure info tk = np.searchsorted(self.stime, ptime) good_exp = np.mod(tk,2)==1 pdfg = pdf[good_exp].copy() if len(pdfg)==0: return None pdfg.drop(columns=['trun'], inplace=True) # time edges-- same for each band #xp = np.append(self.stime[0::2],self.stime[-1]) return pdfg def hist_spacecraft(self): self.sc_df.hist('livetime cos_theta exp'.split(), bins=100, layout=(1,3), figsize=(12,3)); def hist_photons(self): self.photons.hist('band time'.split(), bins=100, log=True, figsize=(12,3), layout=(1,3)); def __call__(self): return dict( start= self.start, photons=self.photons, exposure=self.sc_df, ) # + #exporti class TWeek(): # This is a funtcor wrapping ProcessWeek needed to be global. def __init__(self, config, source): self.config=config self.source=source def __call__(self, wkf): print('.', end='') eman = ProcessWeek( self.config, self.source, wkf) return (eman.photons, eman.sc_df) def multiprocess_week_data(config, source, week_range, processes=None): """ Manage processing of set of week files with multiprocessing """ from multiprocessing import Pool processes = processes or config.pool_size week_files = get_week_files(config, week_range) txt = f', using {processes} processes ' if processes>1 else '' if config.verbose>0: print(f'\tProcessing {len(week_files)} week files{txt}', end='', flush=True) process_week = TWeek(config, source) if processes>1: with Pool(processes=processes) as pool: week_data = pool.map(process_week, week_files) else: week_data = map(process_week, week_files) print('\n') pp = [] ee = [] for wk in week_data: # append week data to photons, weighted exposure, band exposure pdf,edf = wk if pdf is not None and len(pdf)>2: pp.append(pdf) if len(edf)>0: ee.append(edf) return pp,ee # - #export def load_source_data(config, source, week_range=None, key='', clear=False): """ This is a client of SourceData. - week_range [None] -- if None, select all weeks - key [''] -- key to use for cache, construct from name if not set - clear [False] Returns a tuple of - photons - exposure """ if config.datapath/'data_files' is None and key not in config.cache: raise Exception(f'Data for {source.name} is not cached, and config.datapath/"data_files" is not set') def load_from_weekly_data(config, source, week_range=None): pp, ee = multiprocess_week_data(config, source, week_range) # concatenate the two lists of DataFrames p_df = pd.concat(pp, ignore_index=True) p_df.loc[:,'run_id'] = pd.Categorical(p_df.run_id) e_df = pd.concat(ee, ignore_index=True) return p_df, e_df description=f'SourceData: {source.name}' used_key = None # change if used cache if week_range is not None or key is None: # always load directly if weeks specified or key set to None print(description) r = load_from_weekly_data(config, source, week_range=week_range) else: # use the cache used_key = f'{source.filename}_data' if key=='' else key r = config.cache(used_key, load_from_weekly_data, config, source, week_range=None, overwrite=clear, description=description) # append key used for retrieval return list(r) + [used_key] # ### Cache check # If the most recent week has been changed, load it and replace only the most recent part in the cache. # # Test with Geminga # + # from wtlike.sources import PointSource # from wtlike.data_man import FermiData # source = PointSource('Geminga') # config = Config() # ret = load_source_data(config, source) # stop = ret[1].iloc[-1].stop # print(f'Cache ends at {stop:.1f} [UTC {UTC(stop)}] , in week {FermiData().find_week(stop)}') # + # from wtlike.data_man import check_data # ff, ww, days = check_data() # eman = ProcessWeek( config, source, ff[-1]) # eman.photons.head() # x = ret[0].run_id.dtype # A = set(x.categories); len(A) # rr = eman.photons.run_id # B = set(rr.dtype.categories); len(B) # A & B # - # #### Test multiprocessing # + # #hide # from wtlike.sources import PointSource # from wtlike.config import Timer # config=Config(); # if config.valid: # test_weeks = (9,108) # source =PointSource('Geminga', config=config) # with Timer() as t1: # config.pool_size=processes=4 # sd1 = load_source_data(config, source, week_range=test_weeks, key=None ) # print(t1) # with Timer() as t2: # config.pool_size=1 # sd2 = load_source_data(config, source, week_range=test_weeks, key=None ) # print(t2) # print(f'Timing ratio using {processes} proceses: {t2.elapsed/t1.elapsed:.2f}') # assert( len(sd1[0])==len(sd2[0]) ), 'Fail multiprocessing test' # - # #### Test with a Geminga week #hide config=Config(); config.verbose=2 name="Geminga" # name='PSR B1259-63' if config.valid: # testing from wtlike.sources import PointSource source =PointSource(name, config=config) week_files = get_week_files(config, (665,665)); week_file = week_files[0] self = ProcessWeek(config, source, week_file) self.hist_photons() self.hist_spacecraft() config.use_kerr=False nk = ProcessWeek(config, source, week_file) nk.hist_spacecraft() # ### Look at the ratio of Geminga exposures vs. $\cos\theta$. expratio = nk.sc_df.exp/self.sc_df.exp ax = plt.gca() ax.plot(self.sc_df.cos_theta, expratio, '.'); ax.grid(alpha=0.5); ax.set(xlabel='cos theta', ylabel='power law / source spectrum', xlim=(0.4,1), ylim=(1.0, 1.6) ); ax.axhline(1,color='grey'); #hide from nbdev.export import notebook2script notebook2script() # !date
nbs/04_load_data.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Given two words word1 and word2, find the minimum number of operations required to convert word1 to word2. # # You have the following 3 operations permitted on a word: # # Insert a character # Delete a character # Replace a character # Example 1: # # Input: word1 = "horse", word2 = "ros" # Output: 3 # Explanation: # horse -> rorse (replace 'h' with 'r') # rorse -> rose (remove 'r') # rose -> ros (remove 'e') # Example 2: # # Input: word1 = "intention", word2 = "execution" # Output: 5 # Explanation: # intention -> inention (remove 't') # inention -> enention (replace 'i' with 'e') # enention -> exention (replace 'n' with 'x') # exention -> exection (replace 'n' with 'c') # exection -> execution (insert 'u') # - [Edit Distance 编辑距离](https://www.cnblogs.com/grandyang/p/4344107.html) # + class Solution: def minDistance(self, word1: str, word2: str) -> int: len1 = len(word1) + 1 len2 = len(word2) + 1 dp = [[0 for i in range(len2)] for j in range(len1)] for i in range(len1): dp[i][0] = i for j in range(len2): dp[0][j] = j for i in range(1, len1): for j in range(1, len2): if word1[i - 1] == word2[j - 1]: dp[i][j] = dp [i -1][j - 1] else: dp[i][j] = min(dp[i - 1][j - 1], min(dp[i - 1][j], dp[i][j - 1])) + 1 return dp[-1][-1] # test word1 = "<PASSWORD>" word2 = "<PASSWORD>" Solution().minDistance(word1, word2)
DSA/dp/minDistance.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] colab_type="text" id="punL79CN7Ox6" # ##### Copyright 2020 The TensorFlow Authors. # + cellView="form" colab={} colab_type="code" id="_ckMIh7O7s6D" #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # + [markdown] colab_type="text" id="QrxSyyyhygUR" # # Tweaking the Model # + [markdown] colab_type="text" id="S5Uhzt6vVIB2" # <table class="tfo-notebook-buttons" align="left"> # <td> # <a target="_blank" href="https://colab.research.google.com/github/tensorflow/examples/blob/master/courses/udacity_intro_to_tensorflow_for_deep_learning/l09c05_nlp_tweaking_the_model.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a> # </td> # <td> # <a target="_blank" href="https://github.com/tensorflow/examples/blob/master/courses/udacity_intro_to_tensorflow_for_deep_learning/l09c05_nlp_tweaking_the_model.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a> # </td> # </table> # + [markdown] colab_type="text" id="xiWacy71Cu54" # In this colab, you'll investigate how various tweaks to data processing and the model itself can impact results. At the end, you'll once again be able to visualize how the network sees the related sentiment of each word in the dataset. # + [markdown] colab_type="text" id="hY-fjvwfy2P9" # ## Import TensorFlow and related functions # + colab={} colab_type="code" id="drsUfVVXyxJl" import tensorflow as tf from tensorflow.keras.preprocessing.text import Tokenizer from tensorflow.keras.preprocessing.sequence import pad_sequences # + [markdown] colab_type="text" id="ZIf1N46jy6Ed" # ## Get the dataset # # We'll once again use the dataset containing Amazon and Yelp reviews. This dataset was originally extracted from [here](https://www.kaggle.com/marklvl/sentiment-labelled-sentences-data-set). # + colab={} colab_type="code" id="m83g42sJzGO0" # !wget --no-check-certificate \ # https://drive.google.com/uc?id=13ySLC_ue6Umt9RJYSeM2t-V0kCv-4C-P \ # -O /tmp/sentiment.csv # + colab={} colab_type="code" id="y4e6GG2CzJUq" import numpy as np import pandas as pd dataset = pd.read_csv('/tmp/sentiment.csv') sentences = dataset['text'].tolist() labels = dataset['sentiment'].tolist() # Separate out the sentences and labels into training and test sets training_size = int(len(sentences) * 0.8) training_sentences = sentences[0:training_size] testing_sentences = sentences[training_size:] training_labels = labels[0:training_size] testing_labels = labels[training_size:] # Make labels into numpy arrays for use with the network later training_labels_final = np.array(training_labels) testing_labels_final = np.array(testing_labels) # + [markdown] colab_type="text" id="drDkTFMuzW6N" # ## Tokenize the dataset (with tweaks!) # # Now, we'll tokenize the dataset, but we can make some changes to this from before. Previously, we used: # ``` # vocab_size = 1000 # embedding_dim = 16 # max_length = 100 # trunc_type='post' # padding_type='post' # ``` # # How might changing the `vocab_size`, `embedding_dim` or `max_length` affect how the model performs? # + colab={} colab_type="code" id="hjPUJFhQzuee" vocab_size = 500 embedding_dim = 16 max_length = 50 trunc_type='post' padding_type='post' oov_tok = "<OOV>" tokenizer = Tokenizer(num_words = vocab_size, oov_token=oov_tok) tokenizer.fit_on_texts(training_sentences) word_index = tokenizer.word_index training_sequences = tokenizer.texts_to_sequences(training_sentences) training_padded = pad_sequences(training_sequences, maxlen=max_length, padding=padding_type, truncating=trunc_type) testing_sequences = tokenizer.texts_to_sequences(testing_sentences) testing_padded = pad_sequences(testing_sequences, maxlen=max_length, padding=padding_type, truncating=trunc_type) # + [markdown] colab_type="text" id="FwFjO1kg0UUK" # ## Train a Sentiment Model (with tweaks!) # # We'll use a slightly different model here, using `GlobalAveragePooling1D` instead of `Flatten()`. # + colab={} colab_type="code" id="ectP92fl0dFO" model = tf.keras.Sequential([ tf.keras.layers.Embedding(vocab_size, embedding_dim, input_length=max_length), tf.keras.layers.GlobalAveragePooling1D(), tf.keras.layers.Dense(6, activation='relu'), tf.keras.layers.Dense(1, activation='sigmoid') ]) model.compile(loss='binary_crossentropy',optimizer='adam',metrics=['accuracy']) model.summary() # + colab={} colab_type="code" id="7TQIaGjs073w" num_epochs = 30 history = model.fit(training_padded, training_labels_final, epochs=num_epochs, validation_data=(testing_padded, testing_labels_final)) # + [markdown] colab_type="text" id="alAlYort7gWV" # ## Visualize the training graph # # You can use the code below to visualize the training and validation accuracy while you try out different tweaks to the hyperparameters and model. # + colab={} colab_type="code" id="o9l5vBeU71vH" import matplotlib.pyplot as plt def plot_graphs(history, string): plt.plot(history.history[string]) plt.plot(history.history['val_'+string]) plt.xlabel("Epochs") plt.ylabel(string) plt.legend([string, 'val_'+string]) plt.show() plot_graphs(history, "accuracy") plot_graphs(history, "loss") # + [markdown] colab_type="text" id="SZzXE-pT8K57" # ## Get files for visualizing the network # # The code below will download two files for visualizing how your network "sees" the sentiment related to each word. Head to http://projector.tensorflow.org/ and load these files, then click the checkbox to "sphereize" the data. # # Note: You may run into errors with the projection if your `vocab_size` earlier was larger than the actual number of words in the vocabulary, in which case you'll need to decrease this variable and re-train in order to visualize. # + colab={} colab_type="code" id="2Ex4o7Lc8Njl" # First get the weights of the embedding layer e = model.layers[0] weights = e.get_weights()[0] print(weights.shape) # shape: (vocab_size, embedding_dim) # + colab={} colab_type="code" id="bUL1zk5p8WIV" import io # Create the reverse word index reverse_word_index = dict([(value, key) for (key, value) in word_index.items()]) # Write out the embedding vectors and metadata out_v = io.open('vecs.tsv', 'w', encoding='utf-8') out_m = io.open('meta.tsv', 'w', encoding='utf-8') for word_num in range(1, vocab_size): word = reverse_word_index[word_num] embeddings = weights[word_num] out_m.write(word + "\n") out_v.write('\t'.join([str(x) for x in embeddings]) + "\n") out_v.close() out_m.close() # + colab={} colab_type="code" id="lqyV8QYnD46U" # Download the files try: from google.colab import files except ImportError: pass else: files.download('vecs.tsv') files.download('meta.tsv') # + [markdown] colab_type="text" id="XUXAlNNk59gG" # ## Predicting Sentiment in New Reviews # # Below, we've again included some example new reviews you can test your results on. # + colab={} colab_type="code" id="JbFTTcaK6Dan" # Use the model to predict a review fake_reviews = ['I love this phone', 'I hate spaghetti', 'Everything was cold', 'Everything was hot exactly as I wanted', 'Everything was green', 'the host seated us immediately', 'they gave us free chocolate cake', 'not sure about the wilted flowers on the table', 'only works when I stand on tippy toes', 'does not work when I stand on my head'] print(fake_reviews) # Create the sequences padding_type='post' sample_sequences = tokenizer.texts_to_sequences(fake_reviews) fakes_padded = pad_sequences(sample_sequences, padding=padding_type, maxlen=max_length) print('\nHOT OFF THE PRESS! HERE ARE SOME NEWLY MINTED, ABSOLUTELY GENUINE REVIEWS!\n') classes = model.predict(fakes_padded) # The closer the class is to 1, the more positive the review is deemed to be for x in range(len(fake_reviews)): print(fake_reviews[x]) print(classes[x]) print('\n') # Try adding reviews of your own # Add some negative words (such as "not") to the good reviews and see what happens # For example: # they gave us free chocolate cake and did not charge us
courses/udacity_intro_to_tensorflow_for_deep_learning/l09c05_nlp_tweaking_the_model.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # 3. CNN # ## Run name # + import time project_name = 'DigitRecognizer' step_name = 'Preprocess' date_str = time.strftime("%Y%m%d", time.localtime()) time_str = time.strftime("%Y%m%d_%H%M%S", time.localtime()) run_name = '%s_%s_%s' % (project_name, step_name, time_str) print('run_name: %s' % run_name) t0 = time.time() # - # ## Important Params # + from multiprocessing import cpu_count batch_size = 8 random_state = 2019 print('cpu_count:\t', cpu_count()) print('batch_size:\t', batch_size) print('random_state:\t', random_state) # - # ## Import PKGs # + import numpy as np import pandas as pd import matplotlib.pyplot as plt import matplotlib.image as mpimg # %matplotlib inline from IPython.display import display import os import gc import math import shutil import zipfile import pickle import h5py from PIL import Image from tqdm import tqdm from sklearn.model_selection import train_test_split from sklearn.metrics import confusion_matrix, accuracy_score # - from keras.utils.np_utils import to_categorical # convert to one-hot-encoding from keras.models import Sequential from keras.layers import Dense, Dropout, Input, Flatten, Conv2D, MaxPooling2D, BatchNormalization from keras.optimizers import Adam from keras.preprocessing.image import ImageDataGenerator from keras.callbacks import LearningRateScheduler, TensorBoard # ## Basic folders # + cwd = os.getcwd() input_folder = os.path.join(cwd, 'input') log_folder = os.path.join(cwd, 'log') model_folder = os.path.join(cwd, 'model') output_folder = os.path.join(cwd, 'output') print('input_folder: \t\t%s' % input_folder) print('log_folder: \t\t%s' % log_folder) print('model_folder: \t\t%s' % model_folder) print('output_folder: \t\t%s'% output_folder) train_csv_file = os.path.join(input_folder, 'train.csv') test_csv_file = os.path.join(input_folder, 'test.csv') print('\ntrain_csv_file: \t%s' % train_csv_file) print('test_csv_file: \t\t%s' % test_csv_file) processed_data_file = os.path.join(input_folder, '%s_%s.p' % (project_name, step_name)) print('processed_data_file: \t%s' % processed_data_file) # - # ## Basic functions def show_data_images(rows, fig_column, y_data, *args): columns = len(args) figs, axes = plt.subplots(rows, columns, figsize=(rows, fig_column*columns)) print(axes.shape) for i, ax in enumerate(axes): y_data_str = '' if type(y_data) != type(None): y_data_str = '_' + str(y_data[i]) ax[0].set_title('28x28' + y_data_str) for j, arg in enumerate(args): ax[j].imshow(arg[i]) # ## Preview data # + # %%time raw_data = np.loadtxt(train_csv_file, skiprows=1, dtype='int', delimiter=',') x_data = raw_data[:,1:] y_data = raw_data[:,0] x_test = np.loadtxt(test_csv_file, skiprows=1, dtype='int', delimiter=',') print(x_data.shape) print(y_data.shape) print(x_test.shape) # + x_data = x_data/255. x_test = x_test/255. y_data_cat = to_categorical(y_data) describe(x_data) describe(x_test) describe(y_data) describe(y_data_cat) x_data = x_data.reshape(-1, 28, 28, 1) x_test = x_test.reshape(-1, 28, 28, 1) describe(x_data) describe(x_test) # print(x_data[0]) print(y_data[0: 10]) # + index = 0 fig, ax = plt.subplots(2, 2, figsize=(12, 6)) ax[0, 0].plot(x_data[index].reshape(784,)) ax[0, 0].set_title('784x1 data') ax[0, 1].imshow(x_data[index].reshape(28, 28), cmap='gray') ax[0, 1].set_title('28x28 data => ' + str(y_data[index])) ax[1, 0].plot(x_test[index].reshape(784,)) ax[1, 0].set_title('784x1 data') ax[1, 1].imshow(x_test[index].reshape(28, 28), cmap='gray') ax[1, 1].set_title('28x28 data') # - # ## Split train and val # + x_train, x_val, y_train_cat, y_val_cat = train_test_split(x_data, y_data_cat, test_size=0.1, random_state=random_state) print(x_train.shape) print(y_train_cat.shape) print(x_val.shape) print(y_val_cat.shape) # - # ## Build model def build_model(input_shape): model = Sequential() # Block 1 model.add(Conv2D(filters = 32, kernel_size = (3, 3), activation='relu', padding = 'Same', input_shape = input_shape)) model.add(BatchNormalization()) model.add(Conv2D(filters = 32, kernel_size = (3, 3), activation='relu', padding = 'Same')) model.add(BatchNormalization()) model.add(MaxPooling2D(strides=(2,2))) model.add(Dropout(0.25)) # Block 2 model.add(Conv2D(filters = 64, kernel_size = (3, 3), activation='relu', padding = 'Same')) model.add(BatchNormalization()) model.add(Conv2D(filters = 64, kernel_size = (3, 3), activation='relu', padding = 'Same')) model.add(BatchNormalization()) model.add(MaxPooling2D(strides=(2,2))) model.add(Dropout(0.25)) # Output model.add(Flatten()) model.add(Dense(128, activation='relu')) model.add(Dropout(0.25)) model.add(Dense(128, activation='relu')) model.add(Dropout(0.25)) model.add(Dense(10, activation='softmax')) return model model = build_model(x_train.shape[1:]) model.compile(loss='categorical_crossentropy', optimizer = Adam(lr=1e-4), metrics=["accuracy"]) # + train_datagen = ImageDataGenerator( zoom_range = 0.2, rotation_range = 20, height_shift_range = 0.2, width_shift_range = 0.2 ) val_datagen = ImageDataGenerator() # + # annealer = LearningRateScheduler(lambda x: 1e-4 * 0.995 ** x) def get_lr(x): if x <= 10: return 1e-4 elif x <= 20: return 3e-5 else: return 1e-5 [print(get_lr(x), end=' ') for x in range(1, 31)] annealer = LearningRateScheduler(get_lr) callbacks = [annealer] # + # %%time steps_per_epoch = x_train.shape[0] / batch_size print('steps_per_epoch:\t', steps_per_epoch) hist = model.fit_generator( train_datagen.flow(x_train, y_train_cat, batch_size=batch_size, seed=random_state), steps_per_epoch=steps_per_epoch, epochs=2, #Increase this when not on Kaggle kernel verbose=1, #1 for ETA, 0 for silent callbacks=callbacks, max_queue_size=batch_size*4, workers=cpu_count(), validation_steps=100, validation_data=val_datagen.flow(x_val, y_val_cat, batch_size=batch_size, seed=random_state) ) # -
digit-recognizer/3. CNN.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.9.10 ('base') # language: python # name: python3 # --- # # Deploying a Lookup Analytic in the Browser # # In this notebook, we will show how to create a `.air` file to perform a simple lookup of analytic results from an analytic that has already been run. To do this, we will create a simple example which will utilize a regular expression to identify strings of digits which could potentially be phone numbers. Once those are identified, the phone numbers will be stripped of all punctuation (characters such as parentheses and hyphens) and looked up in a fictional customer database to see if there is any additional information about that customer. # ## Dependencies # # For this notebook, the following dependencies are required: # # - `aisquared` # # This package is avilable on on [pypi](https://pypi.org) via `pip`. The following cell also runs the commands to install this dependency as well as imports them into the notebook environment. # + # ! pip install aisquared import aisquared import json # - # ## Analytic Creation # # Now that the required packages have been installed and imported, it is time to create the results of the analytic. These results are going to be recorded in JSON form via a Python dictionary, with the top-level key being the digits of the phone number and the lower-level keys being the associated individual's name and customer ID. # # This dictionary will then be saved to a JSON file, and we will then configure the `.air` file configuration around it. # + # Configure the example results results = { '1111111111' : { 'name' : '<NAME>', 'customerID' : 11111 }, '2222222222' : { 'name' : '<NAME>', 'customerID' : 22222 }, '3333333333' : { 'name' : '<NAME>', 'customerID' : 33333 }, '4444444444' : { 'name' : '<NAME>', 'customerID' : 44444 }, '5555555555' : { 'name' : '<NAME>', 'customerID' : 55555 }, '6666666666' : { 'name' : '<NAME>', 'customerID' : 66666 }, '7777777777' : { 'name' : '<NAME>', 'customerID' : 77777 }, '8888888888' : { 'name' : '<NAME>', 'customerID' : 88888 }, '9999999999' : { 'name' : '<NAME>', 'customerID' : 99999 } } # Save the analytic as a JSON file file_name = 'analytic.json' with open(file_name, 'w') as f: json.dump(results, f) # - # ## Create the ModelConfiguration # # In the following cell, we configure the harvesting, preprocessing, analytic, postprocessing, and rendering steps. Once those are created, we add them all to the `ModelConfiguration` object and compile them into the `.air` file. # + # Harvesting # For harvesting, we need to harvest using a regular expression # that identifies possible phone numbers. The following lines of # code configure such harvesting regex = '/^[\.-)( ]*([0-9]{3})[\.-)( ]*([0-9]{3})[\.-)( ]*([0-9]{4})$/' harvester = aisquared.config.harvesting.TextHarvester( how = 'regex', regex = regex ) # + # Preprocessing # The only preprocessing step that has to be conducted is the # removal of all punctuation, since the phone numbers in the # analytic JSON are all strictly digits step = aisquared.config.preprocessing.RemoveCharacters( remove_digits = False, remove_punctuation = True ) preprocesser = aisquared.config.preprocessing.TextPreprocessor() preprocesser.add_step(step) # + # Analytic # The analytic for this configuration is going to be a LocalAnalytic # class, where we pass the saved file to the object analytic = aisquared.config.analytic.LocalAnalytic(file_name) # + # Postprocessing # No postprocessing steps are needed, so we can set that value to None postprocesser = None # + # Rendering # To render results, we are going to use the WordRendering class to # initialize the rendering of badges for individual words. By default, # the WordRendering class renders the specific words/tokens that were # input into the analytics renderer = aisquared.config.rendering.WordRendering() # + # Putting it all together # Finally, we will take the previous objects and put them all # together into a single ModelConfiguration object, which is then # compiled into the .air file config = aisquared.config.ModelConfiguration( name = 'PhoneNumberLookup', harvesting_steps = harvester, preprocessing_steps = preprocesser, analytic = analytic, postprocessing_steps = postprocesser, rendering_steps = renderer, version = None, description = 'Phone number lookup which shows name and customer ID (if present) for phone numbers found', mlflow_uri = None, mlflow_user = None, mlflow_token = None ) # compile to create .air file config.compile()
LookupAnalytic.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + language="javascript" # MathJax.Hub.Config({ # TeX: { equationNumbers: { autoNumber: "AMS" } } # }); # # # - # <img src="optimal_transport.png"> # Optimal transport studies turning one measure to another while some cost of transportation is minimized. It started in 17 century with practical problems. The theory behind optimal transport developed extensively in nineteenth and twenteenth centruries. # In this post, I review the main theoretical ideas of optimal transport and their interpretations. # There are two major historical points in optimal transport marked with two names _Monge (1781)_ and _Kantorovich (1940)_. In order to compress in one sentence, Monge tried to find a deterministic transport map while Kantorovich is fine with a probabilistic map as well. # We start with a definition of _image measure_ that will be recurring over and over in this post. # **Definition 1.** _Given $X, Y$ spaces, take $T:X\to Y$ and $\mu\in\mathcal{P}(X)$. The image measure represented # $T_{\#}\mu\in\mathcal{P}(Y)$ as_ # \begin{equation} # \forall A\in \mathcal{B}(Y): \quad (T_\#\mu)(A):= \mu(T^{-1}(A)) # \end{equation} # It can be shown that $T_\#\mu$ is a probability measure on $Y$. Notice that this defintion is one-way, i.e., the pullback operator $S^\#$ does not necessarily give a probability measure. # One practical way to check $\nu\stackrel{?}{=}T_\#\mu$ is to integrate all Borel and Bounded maps $\phi:Y\to\mathbb{R}$ against the measures of both sides # \begin{equation} # \int_{Y} \varphi(y) d \nu(y)=\int_{X} \varphi(T(x)) d \mu(x) # \end{equation} # As a corollary, the _push-forward_ condition is known as # \begin{equation}\label{eq:push_forward_condition} # \nu=T_{\#} \mu \Rightarrow \int_{Y} \varphi d\left(T_{\#} \mu\right)=\int_{X} \varphi \circ T d \mu # \end{equation} # for any $\phi:X\to\mathbb{R}$ Borel and bounded. # # Measures can be pushed forward via the composition of functions # \begin{equation} # (S \circ T)_{\#} \mu=S_{\#}\left(T_{\#} \mu\right) # \end{equation} # **Definition 2.** _Given $\mu\in\mathcal{P}(X)$ and $\nu\in\mathcal{P}(Y)$, a map $T:X\to Y$ # is called a **transport map** from $\mu$ to $\nu$ if $T_\#\mu=\nu$. _ # With this defintion, given a $\mu$ and $\nu$, there might be no map that transports $\mu$ to $\nu$. # To generalize a little bit, **coupling** is defined as a probabilistic map # **Definition 3.** $\gamma\in\mathcal{P}(X\times Y)$ is called a coupling of $\mu$ and $\nu$ if_ # \begin{equation} # \left(\pi_{X}\right)_{\#} \gamma=\mu \text { and }\left(\pi_{Y}\right)_{\#} \gamma=\nu # \end{equation} # where # \begin{equation} # \pi_{X}(x, y)=x, \quad \pi_{Y}(x, y)=y \quad \forall(x, y) \in X \times Y # \end{equation} # According to the push-forward condition \eqref{eq:push_forward_condition}, Borel bounded functions $\phi$ and $\psi$ can be used verify a coupling as # \begin{equation} # \forall \varphi: X \rightarrow \mathbb{R} \quad \int_{X \times Y} \varphi(x) d \gamma(x, y)=\int_{X \times Y} \varphi \circ \pi_{X}(x, y) d \gamma(x, y)=\int_{X} \varphi(x) d \mu(x) # \end{equation} # and # \begin{equation} # \forall \psi: Y \rightarrow \mathbb{R} \quad \int_{X \times Y} \psi(y) d \gamma(x, y)=\int_{X \times Y} \psi \circ \pi_{Y}(x, y) d \gamma(x, y)=\int_{Y} \psi(y) d \nu(y) # \end{equation} # Let denote the set of all couplings of the measures $\mu$ and $\nu$ by $\Gamma(\mu, \nu)$. Despite the transport map that can be non-existing for a given $\mu$ and $\nu$, $\Gamma(\mu, \nu)$ is always non-empty. Because at least $\gamma=\mu \otimes \nu$. # # Any transport map $T$ induces a couple denoted by $\gamma_T$ defined as # # + language="javascript" # MathJax.Hub.Queue( # ["resetEquationNumbers", MathJax.InputJax.TeX], # ["PreProcess", MathJax.Hub], # ["Reprocess", MathJax.Hub] # ); # -
_jupyter/.ipynb_checkpoints/Optimal Transport-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Limpieza de datos # # Vamos a aplicar el [checklist del Banco Mundial](https://dimewiki.worldbank.org/wiki/Checklist:_Data_Cleaning) en Python usando # el [SciPy stack](https://www.scipy.org/stackspec.html), principalmente pandas # # ## 0. Prerequisitos # # Aclaración: La próxima celda es para compatilibidad con Colab, NO ES RECOMENDADO realizar pip install desde un notebook. # + import sys import pandas as pd in_colab = 'google.colab' in sys.modules if in_colab: BASE_DIR = "https://github.com/DiploDatos/AnalisisYCuracion/raw/master/" file_16 = BASE_DIR + '/input/kickstarter-projects/ks-projects-201612.csv' file_18 = BASE_DIR + '/input/kickstarter-projects/ks-projects-201801.csv' else: BASE_DIR = "https://github.com/DiploDatos/AnalisisYCuracion/raw/master/" file_16 = BASE_DIR + '/input/kickstarter-projects/ks-projects-201612.csv' file_18 = BASE_DIR + '/input/kickstarter-projects/ks-projects-201801.csv' if 'ftfy' not in sys.modules: # !pip install 'ftfy<5.6' # + [markdown] colab_type="text" id="XzR8-UpTIS8S" # # ## 2. Pasos necesarios # # + colab={} colab_type="code" id="jY_rB6dIIXTS" #import pandas as pd kickstarter_2018 = pd.read_csv( file_18, index_col='ID', parse_dates=['deadline','launched'] ) # + colab={"base_uri": "https://localhost:8080/", "height": 452} colab_type="code" id="dXFOyYeiIXTV" outputId="cb3596c7-8b19-4671-ea94-85942ce603fa" kickstarter_2018.describe(include='all', datetime_is_numeric=True) # + [markdown] colab_type="text" id="OUA2hGvQIXTX" # ### 2.1. Etiquetas de variables/columnas: no usar caracteres especiales # # # + colab={} colab_type="code" id="BkdI2pVDIXTX" # helpful character encoding module import chardet # + [markdown] colab_type="text" id="lfyjvGsmIXTZ" # ¿Por qué? Por que aun hay limitaciones para trabajar con estos caracteres. # # ¿Cúales son los caracteres "normales"? # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="fTc3NFPpIXTZ" outputId="af8cd308-845a-4a51-8ca4-d60b994aea86" import string string.ascii_letters + string.digits # + [markdown] colab_type="text" id="tWbcEuejIXTb" # ¿Qué es ASCII? Es una de las primeras codificaciones (o encoding) disponibles. # Ver https://en.wikipedia.org/wiki/ASCII # # Un encoding es un mapa de caracteres a una representación en bits # (por ejemplo 1000001). ASCII es uno de los primeros estandares para # interoperatividad entre computadoras (antes cada fabricante de computadora usa # su propia codificación), contempla sólo los caracteres ingleses y usa 7 bits, # por ejemplo *1000001* codifica el caracter *A* # # Cómo sólo representaban los caracteres ingleses, empiezan a aparecer variantes # para distintos idiomas, Microsoft hizo los propios para internacionalizar # Windows, hubo otro estandar ISO 8859, hasta que llegó el éstandar Unicode que # hizo múltiples encodings pero cada uno contempla múltiples idiomas e incluso # es extensible a futuro. UTF-8 es de esos encodings el más utilizado. # Ver https://en.wikipedia.org/wiki/Unicode # # En particular, Python 3 utiliza UTF-8 por defecto # (lo que no sucedía en Python 2). # # Veamos un ejemplo práctico # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="uiUtEXG1IXTb" outputId="9a4ffa22-e792-4bb0-d75d-6c11109f7ee0" # start with a string before = "This is the euro symbol: €" # check to see what datatype it is type(before) # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="ISgFFMiiIXTc" outputId="b7565a62-ad57-4f86-fab5-51846579acf4" # encode it to a different encoding, replacing characters that raise errors after = before.encode("utf-8", errors = "replace") # check the type type(after) # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="psYCTaZCIXTe" outputId="9e1e2a5f-c069-4d55-8f67-3e513d0fd721" # take a look at what the bytes look like after # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="fkbXONwzIXTf" outputId="d887c0f7-be63-4443-c42c-484740f198fe" # convert it back to utf-8 print(after.decode("utf-8")) # - # try to decode our bytes with the ascii encoding # # ```python # print(after.decode("ascii")) # --------------------------------------------------------------------------- # UnicodeDecodeError Traceback (most recent call last) # <ipython-input-10-50fd8662e3ae> in <module> # 1 # try to decode our bytes with the ascii encoding # ----> 2 print(after.decode("ascii")) # # UnicodeDecodeError: 'ascii' codec can't decode byte 0xe2 in position 25: ordinal not in range(128) # ``` # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="3RRKGvArIXTk" outputId="f1aa113f-7489-40f6-ad58-591770a2dbbe" # start with a string before = "This is the euro symbol: €" # encode it to a different encoding, replacing characters that raise errors after = before.encode("ascii", errors = "replace") # convert it back to utf-8 print(after.decode("ascii")) # We've lost the original underlying byte string! It's been # replaced with the underlying byte string for the unknown character :( # + [markdown] colab_type="text" id="IkaMZurZIXTm" # Este error ya lo vimos... # # ```python # kickstarter_2016 = pd.read_csv("../input/ks-projects-201612.csv") # ----------------------------------------------------------------- # # UnicodeDecodeError: 'utf-8' codec can't decode byte 0x99 in position 11: invalid start byte # ``` # El error `UnicodeDecodeError` nos indica que el archivo no estaría en Unicode. # Si no está en algún metadato o no fue indicado es dificil detectar el encoding, # por suerte hay un paquete para esto en Python, que aplica unas heurísticas # para detectar encoding. # - # !wget "https://github.com/DiploDatos/AnalisisYCuracion/raw/master/input/kickstarter-projects/ks-projects-201612.csv" \ # -P ../input/ # + colab={"base_uri": "https://localhost:8080/", "height": 306} colab_type="code" id="KVSigmfDIXTv" outputId="21ac2cca-8522-4b51-f71e-cbb4bd1d9ec0" DOWN_DIR = "../input/" # look at the first ten thousand bytes to guess the character encoding with open(DOWN_DIR + "/ks-projects-201612.csv", 'rb') as rawdata: result = chardet.detect(rawdata.read(10000)) # check what the character encoding might be result # + [markdown] colab_type="text" id="BRvRjn15IXTy" # Entonces le podemos indicar el encoding al leer el archivos # + colab={"base_uri": "https://localhost:8080/", "height": 340} colab_type="code" id="HpHQldSDIXTz" outputId="2bcd5bf7-63cd-4a0c-a034-570a4eca9af2" # read in the file with the encoding detected by chardet kickstarter_2016 = pd.read_csv( "../input/ks-projects-201612.csv", encoding='Windows-1252', low_memory=False) # look at the first few lines kickstarter_2016.head() # + [markdown] colab_type="text" id="EaBiRboHIXT6" # Hay otro problema con los caracteres que es más sutíl: **[Mojibake](https://en.wikipedia.org/wiki/Mojibake)** # # Sucede al leer UTF-8 como ASCII extendido (ie. `Windows-*`, `iso-8859-*` entre otros) # # Ejemplo de mojibake: # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="5dFhIonJIXT6" outputId="11408e32-6f3d-4609-9eef-b7d9659c479a" "cigüeña".encode('utf-8').decode('iso-8859-1') # + [markdown] colab_type="text" id="GYrNcEWGIXT8" # Para esto vamos a usar un módulo específico [ftfy](https://ftfy.readthedocs.io/en/latest/). # # Que nos permite ordenar cadenas por rareza # + colab={"base_uri": "https://localhost:8080/", "height": 1345} colab_type="code" id="qRTGjyZ-IXT-" outputId="0ba0a077-6a0f-415b-beab-70dc792b6df9" import ftfy.badness as bad def weird(val): if isinstance(val, float): return 0 return bad.sequence_weirdness(val) kickstarter_2018['name_weirdness'] = kickstarter_2018['name'].apply(weird) kickstarter_2018[kickstarter_2018['name_weirdness'] > 1] # + [markdown] colab_type="text" id="IWvV8C1LIXUA" # Volviendo a la consigna original, chequeamos que no haya caracteres fuera de a-Z, 0-9 y _ en los nombres de columnas # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="dll59dNxIXUA" outputId="d6fea973-d48c-446b-904c-68cd07f7ba22" kickstarter_2018.columns[~kickstarter_2018.columns.str.match(r'^(\w+)$')] # - # ```python # kickstarter_2018.usd_pledged.head() # # --------------------------------------------------------------------------- # AttributeError Traceback (most recent call last) # <ipython-input-16-82c7184671dd> in <module> # ----> 1 kickstarter_2018.usd_pledged.head() # # ~/development/dipdata/curacion/cura/lib/python3.8/site-packages/pandas/core/generic.py in __getattr__(self, name) # 5272 if self._info_axis._can_hold_identifiers_and_holds_name(name): # 5273 return self[name] # -> 5274 return object.__getattribute__(self, name) # 5275 # 5276 def __setattr__(self, name: str, value) -> None: # # AttributeError: 'DataFrame' object has no attribute 'usd_pledged' # ``` # + colab={"base_uri": "https://localhost:8080/", "height": 320} colab_type="code" id="qq6sydy8IXUG" outputId="d6175b2a-d48d-4302-bc2c-2561ebfb2e8b" kickstarter_2018.columns = kickstarter_2018.columns.str.replace(' ', '_') kickstarter_2018.usd_pledged.head() # + [markdown] colab_type="text" id="gpE1h9G0ae55" # #### Ejercicio 3 # # Comparar la cantidad de nombres raros en kickstarter_2018 con la que obtenemos # al cargar 'ks-projects-201801.csv' con encoding iso-8859-1. # - kick_code_18 = pd.read_csv(file_18, encoding='iso-8859-1') kick_code_18.columns kick_code_18['name_weirdness'] = kick_code_18['name'].apply(weird) kick_code_18[kick_code_18['name_weirdness'] > 1] # + [markdown] colab_type="text" id="ahRhnkZ7IXUI" # ### 2.2. Tratar valores faltantes # # Veamos cuantos valores nulos tenemos # + colab={"base_uri": "https://localhost:8080/", "height": 68} colab_type="code" id="UkbDCMZtIXUJ" outputId="d36346da-cc9b-4415-a256-fc748a51e829" # get the number of missing data points per column missing_values_count = kickstarter_2018.isnull().sum() # look at the # of missing points in the first ten columns missing_values_count[missing_values_count > 0] # + [markdown] colab_type="text" id="93q9UdaVae56" # Esta medida en sí, no dice nada más que con cuantos valores debemos lidiar. # # Primero tenemos que ver si en términos relativos su impacto. # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="1WC9vl6mIXUK" outputId="3bf2bc45-1574-4d25-c367-9d060ecbf213" len(kickstarter_2018.dropna())/len(kickstarter_2018) # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="eqv839nVIXUN" outputId="b3bcb873-de86-4518-8f97-a141bf0a8ee2" len(kickstarter_2018.dropna(subset=['name']))/len(kickstarter_2018) # + [markdown] colab_type="text" id="9uGFPKTzae5-" # También debemos ver que significan en términos de representatividad e # importancia de nuestro análisis. # + colab={"base_uri": "https://localhost:8080/", "height": 204} colab_type="code" id="2Aa4DZm1IXUO" outputId="93ae971f-c2a5-45f6-f9a6-b16fb8a5e844" kickstarter_2018[kickstarter_2018.name.isnull()] # + [markdown] colab_type="text" id="1OrFMBLYae6A" # Ejemplo, cuantos de los proyectos suspendidos no tienen nombre # + colab={} colab_type="code" id="-kfl2l31ae6A" outputId="1132a889-bedd-4de8-f96e-0afdbfa17845" len( kickstarter_2018[kickstarter_2018.state == 'suspended'].dropna(subset=['name']) ) / len(kickstarter_2018[kickstarter_2018.state == 'suspended']) # + [markdown] colab_type="text" id="gspm76pfae6C" # En este caso, creemos que es seguro descartar los proyectos sin nombre # + colab={} colab_type="code" id="7ig47EETIXUP" kickstarter_2018 = kickstarter_2018.dropna(subset=['name']) # + colab={"base_uri": "https://localhost:8080/", "height": 51} colab_type="code" id="_mguXGJ7IXUQ" outputId="1dfb6e32-f432-4e44-b738-d99af0d475af" missing_values_count = kickstarter_2018.isnull().sum() missing_values_count[missing_values_count > 0] # + colab={"base_uri": "https://localhost:8080/", "height": 3020} colab_type="code" id="Xi85q0ytIXUR" outputId="524ed410-bde2-423f-dcd5-991af88b6d93" kickstarter_2018[kickstarter_2018.usd_pledged.isnull()] # + colab={"base_uri": "https://localhost:8080/", "height": 503} colab_type="code" id="hIHOdVWuIXUU" outputId="a0d80c14-4daa-4a76-b320-caa884d79b84" kickstarter_2018[kickstarter_2018.usd_pledged.isnull()].describe( include='all', datetime_is_numeric=True) # + [markdown] colab_type="text" id="n-b461kdae6J" # Interesante, todos los datos pertenecen a un país indeterminado. # # Todos los datos de ese "país" no tienen dinero prometido? # + colab={"base_uri": "https://localhost:8080/", "height": 80} colab_type="code" id="V1fJwrLFIXUW" outputId="0a6d1cf9-6e63-4da3-a7f9-d94f56ecc11e" kickstarter_2018[ (kickstarter_2018.country == 'N,0"') & ~(kickstarter_2018.usd_pledged.isnull()) ].head() # + [markdown] colab_type="text" id="_9KOajlwae6L" # A priori como los proyectos no tienen patrocinantes, completamos en 0 los valores # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="GRx81S7wIXUX" outputId="03b695a4-f982-4eb4-b42a-7a4e788cf943" kickstarter_2018 = kickstarter_2018.fillna(0) missing_values_count = kickstarter_2018.isnull().sum() missing_values_count[missing_values_count > 0] # + colab={} colab_type="code" id="A8t17SOLae6N" outputId="a376fb14-3bd6-47ad-c739-2186eab54f04" kickstarter_2018.shape # + [markdown] colab_type="text" id="kMX1NiqXIXUY" # Más métodos disponibles en # https://pandas.pydata.org/pandas-docs/stable/api.html#api-dataframe-missing y # http://scikit-learn.org/stable/modules/classes.html#module-sklearn.preprocessing # + [markdown] colab_type="text" id="wg5g7VivIXUY" # ## 2.3. Codificar variables # # Para trabajar con los algoritmos de aprendizaje automático, las variables # categóricas estas deben ser codificadas como variables numéricas, no como # cadenas. # # Para esta tarea también hay diferentes estrategias, dos comunes son: asociar # cadena a número y asociar cadena a columna. # + colab={} colab_type="code" id="vSA_t4gkIXUY" from sklearn import preprocessing kickstarter_2018.describe(include=['O']) # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="Furd8rzbIXUa" outputId="283bc357-ca50-48db-e52d-70bf49c94999" column='main_category' # Create a label (category) encoder object le = preprocessing.LabelEncoder() # Fit the encoder to the pandas column le.fit(kickstarter_2018[column]) # + colab={"base_uri": "https://localhost:8080/", "height": 272} colab_type="code" id="cit8woaJIXUb" outputId="d14afbbe-48da-444d-8b38-6704a5e33bdb" # View encoder mapping dict(zip(le.classes_,le.transform(le.classes_))) # + colab={"base_uri": "https://localhost:8080/", "height": 320} colab_type="code" id="ugnDG6WNIXUc" outputId="a536dada-2601-40b0-bb2e-7565d4a924d4" # Apply the fitted encoder to the pandas column kickstarter_2018[column] = le.transform(kickstarter_2018[column]) kickstarter_2018.head() # + colab={"base_uri": "https://localhost:8080/", "height": 320} colab_type="code" id="04eBH7hoIXUd" outputId="327ca7dd-3980-4037-c730-79092375407a" # Reversing encoding kickstarter_2018[column] = le.inverse_transform(kickstarter_2018[column]) kickstarter_2018.head() # + colab={"base_uri": "https://localhost:8080/", "height": 359} colab_type="code" id="CwNKqbgbIXUd" outputId="d50111ef-7fe6-4fc6-f6cc-3425b21c3098" from sklearn.preprocessing import LabelBinarizer other_column = 'state' lb = LabelBinarizer() lb_results = lb.fit_transform(kickstarter_2018[other_column]) pd.DataFrame(lb_results, columns=((other_column + '_') + pd.Series(lb.classes_))).head(10) # + [markdown] colab_type="text" id="8N1YbkwSIXUe" # More about preprocessing in # http://scikit-learn.org/stable/modules/classes.html#module-sklearn.preprocessing # + [markdown] colab_type="text" id="rAM7Yk6Sae6b" # #### Ejercicio 4 # # Codificar `currency` con ambas estrategias # + column='currency' # Create a label (category) encoder object lcurrency = preprocessing.LabelEncoder() # Fit the encoder to the pandas column lcurrency.fit(kickstarter_2018[column]) print(dict(zip(lcurrency.classes_, lcurrency.transform(lcurrency.classes_)))) print() # Apply the fitted encoder to the pandas column kickstarter_2018[column] = lcurrency.transform(kickstarter_2018[column]) kickstarter_2018.head() # + kickstarter_2018[column] = lcurrency.inverse_transform(kickstarter_2018[column]) lb_curr = LabelBinarizer() lb_results = lb_curr.fit_transform(kickstarter_2018[column]) pd.DataFrame( lb_results, columns=((column + '_') + pd.Series(lb_curr.classes_)) ).head(10) # + [markdown] colab_type="text" id="c2qVOSsQIXUf" # ### 2.4. No cambiar los nombres de las variables de la fuente de origen # # + [markdown] colab_type="text" id="k9XvL-yMIXUf" # ### 2.5. Verificar la consistencia de las variables # Aplicar reglas de integridad # # + colab={"base_uri": "https://localhost:8080/", "height": 320} colab_type="code" id="y5W24vJwIXUf" outputId="251b5a15-f3a7-4155-bce2-8a2a2ee1d837" kickstarter_2018.head() # + [markdown] colab_type="text" id="4EAwb-5BIXUh" # ¿Hay proyectos sin patrocinadores pero con plata prometida? # + colab={"base_uri": "https://localhost:8080/", "height": 3020} colab_type="code" id="x0YWqWntIXUh" outputId="a9702107-e115-44d7-a76e-0bd59ab1d2a5" kickstarter_2018[ (kickstarter_2018.backers == 0) & (kickstarter_2018.usd_pledged > 0)] # + [markdown] colab_type="text" id="8oFs6r9CIXUj" # ¿Hay proyecto no exitosos que pasaron el objetivo? # + colab={"base_uri": "https://localhost:8080/", "height": 3037} colab_type="code" id="_5lPvwqYIXUj" outputId="f432680c-543b-4d52-9eb4-35bc293e714e" kickstarter_2018[ (kickstarter_2018.state != 'successful') & (kickstarter_2018.pledged > kickstarter_2018.goal) ] # + [markdown] colab_type="text" id="JcmsJSYXIXUl" # ¿Cómo es, en promedio, la correlación entre el objetivo y la plata obtenida? # + colab={"base_uri": "https://localhost:8080/", "height": 266} colab_type="code" id="vAI0_9m-IXUl" outputId="4d83d960-b7f4-4a52-e101-eeaff781f207" df = kickstarter_2018.groupby(['state'])[['goal','pledged']].agg('mean') df['pledged'] / df['goal'] # + [markdown] colab_type="text" id="0alq6UDrIXUn" # ¿Cuánta plata se obtiene en promedio por estado y categoría? # + colab={"base_uri": "https://localhost:8080/", "height": 266} colab_type="code" id="ZDDTnENBIXUn" outputId="0008cab9-4365-41a5-fa9f-b4e9dac4e7ba" kickstarter_2018.pivot_table( values='usd_pledged', index='state', columns='main_category' ) # + [markdown] colab_type="text" id="zXXhkv4Lae6j" # #### Ejercicio 5 # # 1. ¿Hay proyecto éxitosos que no consiguieron el objetivo? Si hay, ¿Qué porcentaje sí y cuál no? # 2. Calcular una tabla con la cantidad de proyectos por categoría principal y estado. # # #### Respuesta 5 # 1. ¿Hay proyecto éxitosos que no consiguieron el objetivo? Si hay, ¿Qué porcentaje sí y cuál no? # - kickstarter_2018[ (kickstarter_2018.state == 'successful') & (kickstarter_2018.pledged < kickstarter_2018.goal) ] # Porcentajes de proyectos que fueron exitosos que no consiguieron el objetivo. # # + print('Porcentaje que SI consiguieron', len(kickstarter_2018[ (kickstarter_2018.state == 'successful') & (kickstarter_2018.pledged < kickstarter_2018.goal)] ) / len( kickstarter_2018[kickstarter_2018.pledged < kickstarter_2018.goal] ) ) print('Porcentaje que NO consiguieron', len(kickstarter_2018[ (kickstarter_2018.state != 'successful') & (kickstarter_2018.pledged < kickstarter_2018.goal)] ) / len( kickstarter_2018[kickstarter_2018.pledged < kickstarter_2018.goal] ) ) # - # 2. Calcular una tabla con la cantidad de proyectos por categoría principal y estado. cols = ['name', 'state', 'main_category'] kickstarter_2018[cols].pivot_table( values='name', index='state', columns='main_category', aggfunc='count' ) # + [markdown] colab_type="text" id="3ci4iq7MIXUq" # ### 2.6. Identificar y documentar valores atípicos/outliers # # + [markdown] colab_type="text" id="CFiuuPAlIXUq" # Queremos analizar las características de los proyectos que obtienen dinero pero sin considerar los casos atípicos. # + colab={"base_uri": "https://localhost:8080/", "height": 299} colab_type="code" id="krohU7VxIXUq" outputId="bd6552ce-322e-4b09-fb31-fb13ad2bffa1" import matplotlib.pyplot as plt fig = plt.figure(figsize=(8, 8)) ax = plt.subplot(1, 1, 1) kickstarter_2018.usd_pledged.hist(ax=ax); ax.set_title('Histogram') ax.set_xlabel('usd_pledged') plt.show() # + colab={"base_uri": "https://localhost:8080/", "height": 294} colab_type="code" id="4KNUgM_xIXUr" outputId="1c71222e-4be8-43b6-a0a9-15aad2567016" fig = plt.figure(figsize=(8, 8)) ax = plt.subplot(1, 1, 1) kickstarter_2018.boxplot(column='usd_pledged', ax=ax); ax.set_title('boxplot') plt.show() # + [markdown] colab_type="text" id="2JzBu_TmIXUw" # "There are many rules of thumb for how to define an outlier but there is no # silver bullet. One rule of thumb is that any data point that is three standard # deviations away from the mean of the same data point for all observations." # + colab={"base_uri": "https://localhost:8080/", "height": 3088} colab_type="code" id="crqXG6cQIXUx" outputId="af30db2b-96fc-4ded-afbe-c2583ea86d5f" outliers = kickstarter_2018[ kickstarter_2018.usd_pledged > (kickstarter_2018.usd_pledged.mean() + 3 * kickstarter_2018.usd_pledged.std()) ] outliers # + colab={} colab_type="code" id="5Jq7qqFyae6p" outputId="911ce68e-c9ba-48d1-ebf9-705c451e2a37" fig = plt.figure(figsize=(8, 8)) ax = plt.subplot(1, 1, 1) kickstarter_2018.drop(outliers.index).boxplot(column='usd_pledged', ax=ax); ax.set_title('boxplot') plt.show() # + colab={"base_uri": "https://localhost:8080/", "height": 318} colab_type="code" id="GUIlg7xXIXUz" outputId="e16433c9-d25e-43bf-bad0-93ea16572fb4" fig = plt.figure(figsize=(8, 8)) ax = plt.subplot(1, 1, 1) kickstarter_2018.drop(outliers.index).boxplot( column='usd_pledged', by='state', ax=ax); plt.show() # + [markdown] colab_type="text" id="QV1qvLDRae6t" # Analizar outliers por categorías # + colab={"base_uri": "https://localhost:8080/", "height": 318} colab_type="code" id="Q3sawM-JIXUx" outputId="06ecba4e-1c03-47fe-8ee0-efb68c166309" fig = plt.figure(figsize=(12, 6)) ax1 = plt.subplot(1, 2, 1) kickstarter_2018.boxplot(column='usd_pledged',by='state', ax=ax1); ax2 = plt.subplot(1, 2, 2) kickstarter_2018.boxplot(column='usd_goal_real',by='state', ax=ax2); plt.show() # + [markdown] colab_type="text" id="-sAHsb1vae6u" # #### Ejercicio 6 # # Calcular los valores atípicos de 'usd_goal_real' y graficar los boxplots, con y sin estos valores por categoría # + fig = plt.figure(figsize=(12, 6)) ax1 = plt.subplot(1, 2, 1) kickstarter_2018.boxplot(column='usd_goal_real', ax=ax1); ax1.set_title('CON valores atipicos') ax1.set_ylim((0, 1.2e8)) ax2 = plt.subplot(1, 2, 2) kickstarter_2018[ kickstarter_2018.usd_goal_real <= (kickstarter_2018.usd_goal_real.mean() + 3 * kickstarter_2018.usd_goal_real.std()) ].boxplot(column='usd_goal_real', ax=ax2); ax2.set_title('SIN valores atipicos') ax2.set_ylim((0, 1.2e8)) plt.show() # + [markdown] colab_type="text" id="oGI_4tR0IXUz" # ## 2.7. Evaluar cómo comprimir los datos # para su almacenamiento más eficiente. # # En la actualidad, en las nubes el almacenamiento no suele ser problema sino la # velocidad para leerlo que suele ser determinante a la hora de determinar el # formato de almacenamiento. # # El formato preferido es `feather` por la velocidad de carga acorde a este # [post](https://towardsdatascience.com/the-best-format-to-save-pandas-data-414dca023e0d) # Pero no tiene soporte para python 3.8+, el segundo formato es `pickle` # - df.to_pickle('../output/ks-projects-201801-for-pandas.pickle') # + [markdown] colab_type="text" id="9y8QxZwdIXU0" # ## 2.8. Guardar el set de datos con un nombre informativo. # # + [markdown] colab_type="text" id="3aaOkntbIXU0" # EL problema de la ingeniería de software
curacion/notebooks/03-importando_datos.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # The DFT: Numerical Aspects # # In this notebook we will look at some numerical issues associated to the DFT; in particular we we will look at the differences in precision between the "naive" way of computing the DFT and the FFT algorithm. # # As a quick reminder, the definitions of the direct and inverse DFT for a length-$N$ signal are: # # \begin{align*} # X[k] &= \sum_{n=0}^{N-1} x[n]\, e^{-j\frac{2\pi}{N}nk}, \quad k=0, \ldots, N-1 \\ # x[n] &= \frac{1}{N}\sum_{k=0}^{N-1} X[k]\, e^{j\frac{2\pi}{N}nk}, \quad n=0, \ldots, N-1 # \end{align*} # # The DFT produces a complex-valued vector that we can represent either via its real and imaginary parts or via its magnitude $|X[k]|$ and phase $\angle X[k] = \arctan \frac{\text{Im}\{X[k]\}}{\text{Re}\{X[k]\}}$. # ## Direct Implementation # # ### Numerical errors in real and imaginary parts # # The DFT can be easily implemented using the change of basis matrix ${W}_N$. This is an $N\times N$ complex-valued matrix whose elements are # # $$ # {W}_N(n,k)=e^{-j\frac{2\pi}{N}nk} # $$ # # so that the DFT of a vector $\mathbf{x}$ is simply $\mathbf{X} = W_N\mathbf{x}$. Note that the inverse DFT can be obtained by simply conjugating ${W}_N$ so that $\mathbf{x} = W_N^*\mathbf{X}$. # # We can easily generate the matrix ${W}_N$ in Python like so: # first our usual bookkeeping # %matplotlib inline import matplotlib import matplotlib.pyplot as plt import numpy as np plt.rcParams["figure.figsize"] = (14,4) def dft_matrix(N): # create a 1xN matrix containing indices 0 to N-1 a = np.expand_dims(np.arange(N), 0) # take advantage of numpy broadcasting to create the matrix W = np.exp(-2j * (np.pi / N) * (a.T * a)) return W # Let's try it out on a short signal and verify the invertibility of the transform # + x = np.array([5, 7, 9]) # DFT matrix N = len(x) W = dft_matrix(N); # DFT X = np.dot(W, x) # inverse DFT x_hat = np.dot(W.T.conjugate(), X) / N print(x-x_hat) # - # As you can see, the difference between the original vector and the "reconstructed" vector is not exactly zero. This is due to the small numerical errors that accumulate in the $N^2$ multiplications and additions needed by the direct and inverse transforms. # # While minor in this case, this numerical imprecision can be very annoying if we switch to a magnitude/phase representation, as we will see now. # ### Numerical errors in phase # # Let's first define a more interesting signal such as a length-128 step signal: # # $$ # x[n] = \begin{cases} # 1 & \mbox{for $0 \leq n < 64$} \\ # 0 & \mbox{for $64 \leq n < 128$} # \end{cases} # $$ # # Conveniently, we can compute its DFT analytically (it's just a geometric series) and we have # # $$ # X[k] = \begin{cases} # 64 & \mbox{for $k=0$} \\ # 0 & \mbox{for $k \neq 0$, $k$ even} \\ # \frac{(-1)^{(k-1)/2}\,e^{-j\pi\frac{63}{128}k}}{\sin(\frac{\pi}{128}k)} & \mbox{for $k$ odd} # \end{cases} # $$ # # From this it's easy to compute the phase; we will set its value to zero whenever the magnitude is zero (i.e. for even-indexed values) and we have # # $$ # \angle X[k] = \begin{cases} # 0 & \mbox{for $k$ even} \\ # -\pi + \frac{\pi}{128}k & \mbox{for $k$ odd} # \end{cases} # $$ # # However, let's see what happens if we compute all of this numerically: # + N = 128 x = np.zeros(N) x[0:64] = 1 plt.stem(x); # + W = dft_matrix(N); # DFT X = np.dot(W, x) plt.stem(abs(X)); plt.show(); plt.stem(np.angle(X)); # - # Clearly we have a problem with the phase, although the magnitude looks nice. This is inherent to the fact that the phase is computed by taking the arctangent of a ratio. When the computed DFT values are close to zero, the denominator of the ratio will be also close to zero and any numerical error in its value will lead to large errors in the phase. As we will see in the next section, this problem can be alleviated by using smarter algorithms than the direct naive method. # # Let's still verify the inverse DFT: # + x_hat = np.dot(W.T.conjugate(), X) / N plt.stem(np.real(x_hat - x)); plt.show(); plt.stem(np.imag(x_hat)); # - # Again, the error is very small but clearly not zero. # ## The FFT Algorithm # # The FFT algorithm computes the DFT recursively by successively splitting the data vector into smaller pieces and recombining the results. The most well-known version of the FFT operates on data lengths that are a power of two but efficient algorithms exist for all lengths that are factorizable into powers of small primes. # # The FFT algorithm is not only much faster than the direct method but it's also better conditioned numerically. This is because in the FFT implementation great care is applied to minimizing the number of trigonometric factors. # # As you can see in the examples below, the phase is now accurate and the reconstruction error is almost two orders of magnitude smaller, basically equal to the numerical precision of floating point variables. # + X = np.fft.fft(x) x_hat = np.fft.ifft(X) plt.stem(np.angle(X)); plt.show(); plt.stem(np.real(x_hat - x)); plt.show(); plt.stem(np.imag(x_hat));
week3/DFTprecision.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### Exercise 1.3: Build a Linear Regression model to predict Australia temperature # # #### 1. Import necessary Python packages for the exercise import pandas as pd import sklearn # #### 2. Read the csv file into dataframe. df =pd.read_csv("tas_1991_2016_AUS.csv") df.head() # #### 3. Rename the columns of the DataFrame df.columns = ['temperature', 'year', 'month', 'country', 'cty'] df.head() # #### 4. Drop columns, country and cty # + copy_df = df.copy() df =df.drop(['country', 'cty'], axis=1) df.head() # - # ##### 5. Apply feature engineering to the dataframe # ##### a. Convert year column’s datatype to Category year_cat = list(range(1991, 2017)) #<-- categorical intervaler df['year'] = df['year'].astype('category', categories=year_cat, ordered=True).cat.codes df.head() for i in range(len(df.year.unique())): print(f'{df.year.unique()[i]} => {year_cat[i]}') # ##### b. Encode month column to number # + from sklearn.preprocessing import LabelEncoder dic = dict() for colname in df.columns: if df[colname].dtype == 'object': dic[colname]=LabelEncoder() df[colname] = dic[colname].fit_transform(df[colname]) df.head() # - # #### 6. Split dataframe into training and test set # + from sklearn.model_selection import train_test_split X= df[['year', 'month']] y= df['temperature'] x_train, x_test, y_train, y_test = train_test_split(X,y, test_size=0.3, random_state=0) # - # #### 7. Use training set to train a regression model # + from sklearn.linear_model import LinearRegression model = LinearRegression() model.fit(x_train, y_train) # - # #### 8. Use the model to predict temperature predictions = model.predict(x_test) predictions[:10] # #### 8. Check performance of the model # + from sklearn.metrics import mean_absolute_error mae= mean_absolute_error(y_test, predictions) mae # - # #### 9. Save the model from sklearn.externals import joblib joblib.dump(model, 'model.pkl')
Chapter01-Python_Overview_and_Main_Packages/Exercise1.03.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + raw_mimetype="text/restructuredtext" active="" # .. _nb_gradients: # - # # Gradients # If the problem is implemented using autograd then the gradients through automatic differentiation are available out of the box. Let us consider the following problem definition for a simple quadratic function without any constraints: # + tags=[] import autograd.numpy as anp from pymoo.core.problem import Problem from pymoo.problems.bounds_as_constr import BoundariesAsConstraints from pymoo.problems.autodiff import AutomaticDifferentiation class MyProblem(Problem): def __init__(self): super().__init__(n_var=10, n_obj=1, n_constr=0, xl=-5, xu=5) def _evaluate(self, x, out, *args, **kwargs): out["F"] = anp.sum(anp.power(x, 2), axis=1) problem = AutomaticDifferentiation(MyProblem()) # - # The gradients can be retrieved by appending `F` to the `return_values_of` parameter: # + tags=[] X = anp.array([anp.arange(10)]) F, dF = problem.evaluate(X, return_values_of=["F", "dF"]) # - # The resulting gradients are stored in `dF` and the shape is (n_rows, n_objective, n_vars): # + tags=[] print(X, F) print(dF.shape) print(dF) # - # Analogously, the gradient of constraints can be retrieved by appending `dG`.
source/problems/gradients.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.7.7 64-bit (''OptimalTransport'': conda)' # language: python # name: python37764bitoptimaltransportconda38cd42aab3c640dc8d34473d4e53c23f # --- # + import os import ot import json import pprint import networkx import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt from sklearn.svm import SVC from sklearn.metrics import accuracy_score from sklearn.metrics import confusion_matrix from sklearn.model_selection import train_test_split from utils.preprocessing import feature_scaling from utils.preprocessing import feature_normalization from utils.plot import generate_hierarchical_graph_json from utils.plot import generate_links_json from domain_adaptation import compute_mass_flow from domain_adaptation import undesired_mass_flow_index plt.rcParams['mathtext.fontset'] = 'custom' plt.rcParams['mathtext.rm'] = 'Bitstream Vera Sans' plt.rcParams['mathtext.it'] = 'Bitstream Vera Sans:italic' plt.rcParams['mathtext.bf'] = 'Bitstream Vera Sans:bold' plt.rcParams['font.size'] = 16 plt.rcParams['mathtext.fontset'] = 'stix' plt.rcParams['font.family'] = 'STIXGeneral' plt.style.use('science') palette = [ 'royalblue', 'firebrick', 'darkgoldenrod', 'darkseagreen', 'grey', 'darkslategray', 'crimson', 'midnightblue', 'saddlebrown', 'orange', 'seagreen', 'dodgerblue', 'black' ] # + data_mat = np.load('./data/cstr_rawdata.npy') X = data_mat[:, :-4] y = data_mat[:, -4] d = data_mat[:, -3] domain = 1 Xs, ys = X[np.where(d == 0)[0]], y[np.where(d == 0)[0]] # - for domain in np.unique(d)[1:]: print("Starting calculations for Domain {}".format(domain)) Xt, yt = X[np.where(d == domain)[0]], y[np.where(d == domain)[0]] all_y = np.concatenate([ys, yt]) custom_order_index = np.concatenate([ np.where(all_y == i)[0] for i in range(13) ]) ns = Xs.shape[0] nt = Xt.shape[0] a = ot.unif(ns) b = ot.unif(nt) M = ot.dist(feature_normalization(Xs), feature_normalization(Xt), metric='cityblock') M = M / np.max(M) Gemd = ot.emd(a, b, M) Aemd = np.zeros([ns + nt, ns + nt]) Gmax = np.max(Gemd) Gmin = np.min(Gemd) for i in range(ns): for j in range(nt): if Gemd[i, j] > 0.0: Aemd[i, ns + j] = Gemd[i, j] Aemd[ns + j, i] = Gemd[i, j] nodes = ['Src{}'.format(i) for i in range(ns)] + ['Tgt{}'.format(j) for j in range(nt)] faults = [] for yi in np.concatenate([ys, yt], axis=0): if yi != 12: faults.append('Fault{}'.format(int(yi))) else: faults.append('NormalOperation') hierarchical_graph = generate_hierarchical_graph_json(nodes, faults, Aemd) links = generate_links_json(nodes, faults, Aemd) with open('./results/HierarchicalGraph_EMD_Target{}.json'.format(domain), 'w') as f: f.write(json.dumps(hierarchical_graph)) with open('./results/Links_EMD_Target{}.json'.format(domain), 'w') as f: f.write(json.dumps(links)) # ## Distributional Shift vs. Undesired Mass Flow data_mat = np.load('./data/cstr_rawdata.npy') X = data_mat[:, :-4] y = data_mat[:, -4] d = data_mat[:, -3] # + umf_emd = [] umf_sink = [] wdists_emd = [] wdists_sink = [] for domain in np.unique(d)[1:]: Xs, ys = X[np.where(d == 0)[0]], y[np.where(d == 0)[0]] Xt, yt = X[np.where(d == domain)[0]], y[np.where(d == domain)[0]] ns = Xs.shape[0] nt = Xt.shape[0] a = ot.unif(ns) b = ot.unif(nt) M = ot.dist(feature_normalization(Xs), feature_normalization(Xt), metric='cityblock') M = M / np.max(M) Gemd = ot.emd(a, b, M) Gsink = ot.sinkhorn(a, b, M, reg=1e-2) wdists_emd.append(np.sum(M * Gemd)) wdists_sink.append(np.sum(M * Gsink)) umf_emd.append(undesired_mass_flow_index(Gemd, ys, yt)) umf_sink.append(undesired_mass_flow_index(Gsink, ys, yt)) # + fig, ax = plt.subplots(figsize=(9, 2.5)) bar_pos1 = np.arange(6) * 3 bar_pos2 = np.arange(6) * 3 + 1 ax.bar(bar_pos1, umf_emd, width=1) ax.bar(bar_pos2, umf_sink, width=1) for pos, umf in zip(bar_pos1, umf_emd): ax.text(pos, umf + 1e-2, s="{}".format(np.round(umf, 2)), ha='center') for pos, umf in zip(bar_pos2, umf_sink): ax.text(pos, umf + 1e-2, s="{}".format(np.round(umf, 2)), ha='center') ax.set_ylabel('UMF$(\gamma)$') ax.set_xticks(.5 * (bar_pos1 + bar_pos2)) ax.set_xticklabels([r'$\epsilon=0.10$' + '\n' + r'$N = 1.0$', r'$\epsilon=0.15$' + '\n' + r'$N = 1.0$', r'$\epsilon=0.20$' + '\n' + r'$N = 1.0$', r'$\epsilon=0.15$' + '\n' + r'$N = 0.5$', r'$\epsilon=0.15$' + '\n' + r'$N = 1.5$', r'$\epsilon=0.15$' + '\n' + r'$N = 2.0$']) ax.set_ylim([0.0, 0.46]) # plt.savefig('./Figures/Ch6/UndesiredMassFlow.pdf') # + fig, ax = plt.subplots(figsize=(8, 4)) bar_pos1 = np.arange(6) * 3 ax.bar(bar_pos1, umf_emd, width=2) for pos, umf in zip(bar_pos1, umf_emd): ax.text(pos, umf + 1e-2, s="{}".format(np.round(umf, 2)), ha='center') ax.set_ylabel('UMF$(\gamma)$') ax.set_xticks(bar_pos1) ax.set_xticklabels([r'$\epsilon=0.10$' + '\n' + r'$N = 1.0$', r'$\epsilon=0.15$' + '\n' + r'$N = 1.0$', r'$\epsilon=0.20$' + '\n' + r'$N = 1.0$', r'$\epsilon=0.15$' + '\n' + r'$N = 0.5$', r'$\epsilon=0.15$' + '\n' + r'$N = 1.5$', r'$\epsilon=0.15$' + '\n' + r'$N = 2.0$']) ax.set_ylim([0.0, 0.46]) plt.savefig('./Figures/Presentation/UndesiredMassFlow.pdf') # - # ## Classification Performance vs. UMF # + data_mat = np.load('./data/cstr_rawdata.npy') X = data_mat[:, :-4] y = data_mat[:, -4] d = data_mat[:, -3] Xs, ys = X[np.where(d == 0)[0]], y[np.where(d == 0)[0]] dataset = np.load('./data/DistributionalShiftData/raw_cstr_distrshift.npy') X = dataset[:, :-3] y = dataset[:, -3] E = dataset[:, -2] N = dataset[:, -1] targets_E = [ (eps, X[np.intersect1d(np.where(E == eps)[0], np.where(N == 1))], y[np.intersect1d(np.where(E == eps)[0], np.where(N == 1))]) for eps in [i / 10 for i in range(1, 10)] ] targets_N = [ (Ni, X[np.intersect1d(np.where(E == 0.15)[0], np.where(N == Ni))], y[np.intersect1d(np.where(E == 0.15)[0], np.where(N == Ni))]) for Ni in [0.25, 0.5, 0.75, 1, 1.25, 1.75, 2.0] ] # + umf_emd = [] emd_acc = [] umf_sink = [] sink_acc = [] clf = SVC(kernel='linear', max_iter=1e+6) emd = ot.da.EMDTransport(norm='max') sink = ot.da.SinkhornTransport(reg_e=1e-3, norm='max') for param, Xt, yt in targets_E + targets_N: Xtr = feature_normalization(Xs) Xts = feature_normalization(Xt) emd.fit(Xs=Xtr, Xt=Xts) sink.fit(Xs=Xtr, Xt=Xts) Gemd = emd.coupling_ Gsink = sink.coupling_ umf_emd.append(undesired_mass_flow_index(Gemd, ys, yt)) umf_sink.append(undesired_mass_flow_index(Gsink, ys, yt)) TXtr = emd.transform(Xtr) clf.fit(TXtr, ys) yp = clf.predict(Xts) emd_acc.append(accuracy_score(yt, yp)) TXtr = sink.transform(Xtr) clf.fit(TXtr, ys) yp = clf.predict(Xts) sink_acc.append(accuracy_score(yt, yp)) # + umf_emd = [] emd_acc = [] umf_sink = [] sink_acc = [] clf = SVC(kernel='linear', max_iter=1e+6) for param, Xt, yt in targets_E + targets_N: Xtr = feature_normalization(Xs) Xts = feature_normalization(Xt) ns = Xs.shape[0] nt = Xt.shape[0] a = ot.unif(ns) b = ot.unif(nt) M = ot.dist(Xtr, Xts, metric='cityblock') M = M / np.max(M) Gemd = ot.emd(a, b, M) Gsink = ot.sinkhorn(a, b, M, reg=1e-3) umf_emd.append(undesired_mass_flow_index(Gemd, ys, yt)) umf_sink.append(undesired_mass_flow_index(Gsink, ys, yt)) TXtr = ns * np.dot(Gemd, Xts) clf.fit(TXtr, ys) yp = clf.predict(Xts) emd_acc.append(accuracy_score(yt, yp)) print(param, emd_acc[-1], umf_emd[-1]) TXtr = ns * np.dot(Gsink, Xts) clf.fit(TXtr, ys) yp = clf.predict(Xts) sink_acc.append(accuracy_score(yt, yp)) # + fig, ax = plt.subplots(figsize=(9, 5)) sns.regplot(umf_emd, 1 - np.array(emd_acc), ax=ax, label='LP') sns.regplot(umf_sink, 1 - np.array(sink_acc), ax=ax, label='Sinkhorn') ax.set_ylabel('$\hat{\mathcal{R}}_{T}(h_{TL})$') ax.set_xlabel('$UMF(\gamma)$') ax.legend(title='Solver') plt.tight_layout() plt.savefig('./Figures/Ch6/UMFxRisk.pdf', transparent=True) # + fig, ax = plt.subplots(figsize=(9, 5)) sns.regplot(umf_emd, 1 - np.array(emd_acc), ax=ax) # sns.regplot(umf_sink, 1 - np.array(sink_acc), ax=ax, label='Sinkhorn') ax.set_ylabel('$\hat{\mathcal{R}}_{T}(h_{TL})$') ax.set_xlabel('$UMF(\gamma)$') # ax.legend(title='Solver') plt.tight_layout() plt.savefig('./Figures/Presentation/UMFxRisk.pdf', transparent=True) # -
Ch5 - Undesired Mass Flow Analysis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from __future__ import print_function import getpass import os import sys from subprocess import Popen, check_output from sys import stderr from time import sleep import io import matplotlib.pyplot as plt import pandas as pd import tempfile import subprocess import psutil # + def FetchCPUProcesses(mostImportant=10): procs = [] for proc in psutil.process_iter(): if proc.pid == os.getpid(): continue if getpass.getuser() in proc.username(): procs.append({'pid': proc.pid, 'cpu_percent': proc.cpu_percent(), for proc in psutil.process_iter(): if proc.pid == os.getpid(): continue if proc.status() != 'running': continue if getpass.getuser() == proc.username(): procs.append({'pid': proc.pid, 'cpu_percent': proc.cpu_percent(), 'memory_percent': proc.memory_percent(), 'name': proc.name(), 'exe': proc.exe(), 'status': proc.status() }) process_log = pd.DataFrame(procs) 'memory_percent': proc.memory_percent(), 'name': proc.name(), 'exe': proc.exe(), 'status': proc.status() }) process_log = pd.DataFrame(procs) tmp = process_log.sort_values(['memory_percent'], ascending=False)[:10] return tmp # - def GetProcessAttributes(pids): processAttributes = [] for pid in pids: proc = psutil.Process(pid) processAttributes.append({'pid': pid, 'name': proc.name(), 'cmdline': proc.cmdline(), 'status': proc.status()}) processAttributes = pd.DataFrame(processAttributes) return processAttributes def MonitorNvidiaGPU(): 'Function that monitors Running Processes on Nvidia GPU' ''' Returns a DataFrame (pid, process_name, cmdline, used_gpu_memory, utilization) ''' getGPUProcesses = 'nvidia-smi pmon -c 1 -s mu' proc = subprocess.Popen(getGPUProcesses, shell=True, stdout=subprocess.PIPE) output = proc.stdout.read().decode('utf-8').split('\n') # Remove the line describing the units of each feature del output[1] # convert to csv format... output[0] = output[0].replace('# ', '') output = [line.strip() for line in output] output = [','.join(line.split()) for line in output] # ...and drop the command feature (will be added later)... output = [','.join(line.split(',')[:8]) for line in output] # ...and convert to DataFrame procsGPU = pd.read_csv(io.StringIO('\n'.join(output)), header=0) procsGPUFeats = GetProcessAttributes(procsGPU.pid.values) return procsGPU.merge(procsGPUFeats, on='pid', how='inner') def full_outer_join(df_x, df_y, on=None, left_on=None, right_on=None): df = pd.merge(df_x, df_y, on='pid', how='outer', suffixes=('', '_y')) # list comprehension of the cols that end with '_y' to_drop = [x for x in df if x.endswith('_y')] df.drop(to_drop, axis=1, inplace=True) return df # %timeit MonitorNvidiaGPU() # ### Notes: # * ### Decide if we use the default Nvidia thread or our own (So Far: our own) # + # #!/usr/bin/python from subprocess import Popen, PIPE, STDOUT import pty import os cmd = 'nvidia-smi pmon -c 1 -s mu' proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE) output = proc.stdout.read().decode('utf-8').split('\n') output # + procs = [] for proc in psutil.process_iter(): if proc.pid == os.getpid(): continue try: if getpass.getuser() == proc.username(): procs.append({'pid': proc.pid, 'cpu_percent': proc.cpu_percent(), 'memory_percent': proc.memory_percent(), 'name': proc.name(), 'exe': proc.exe(), 'user': proc.username(), 'status': proc.status() }) except psutil.AccessDenied: continue process_log = pd.DataFrame(procs) process_log
notebooks/Monitoring-Nvidia-GPU.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/timgluz/colab_notebooks/blob/master/EEML2020_RL_Tutorial.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] colab_type="text" id="ULdrhOaVbsdO" # #RL Tutorial # # <a href="https://colab.research.google.com/github/eemlcommunity/PracticalSessions2020/blob/master/rl/EEML2020_RL_Tutorial.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # # Contact us at <EMAIL> & <EMAIL> for any questions/comments :) # # Special thanks to <NAME> and <NAME>. # # # # # # # + [markdown] id="Dv-846KxIqPD" colab_type="text" # The tutorial covers a number of important reinforcement learning (RL) algorithms, including policy iteration, Q-Learning, and Neural Fitted Q. In the first part, we will guide you through the general interaction between RL agents and environments, where the agents ought to take actions in order to maximize returns (i.e. cumulative reward). Next, we will implement Policy Iteration, SARSA, and Q-Learning for a simple tabular environment. The core ideas in the latter will be scaled to more complex MDPs through the use of function approximation. Lastly, we will provide a short introduction to deep reinforcement learning and the DQN algorithm. # # + [markdown] id="ffeeXVm4AuZ6" colab_type="text" # # Overview # + [markdown] id="kT8rkxTUAZzL" colab_type="text" # The agent interacts with the environment in a loop corresponding to the following diagram. The environment defines a set of <font color='blue'>**actions**</font> that an agent can take. The agent takes an action informed by the <font color='red'>**observations**</font> it recieves, and will get a <font color='green'>**reward**</font> from the environment after each action. The goal in RL is to find an agent whose actions maximize the total accumulation of rewards obtained from the environment. # # # <center><img src="https://drive.google.com/uc?id=1nAj1yf0kmTb369dVkIgXaMSPEpTUslnC" width="500" /></center> # # # The tutorial is mainly focused on **value based methods**: agents are maintaining a value for all state-action pairs and use those estimates to choose actions that maximize that value (instead of maintaining a policy directly, like in policy gradient methods). # # We represent the action-value function (otherwise known as Q-function) associated with following/employing a policy $\pi$ in a given MDP as: # # $$ Q^{\pi}(\color{red}{s},\color{blue}{a}) = \mathbb{E}_{\tau \sim P^{\pi}} \left[ \sum_t \gamma^t \color{green}{R_t}| s_0=\color{red}s,a=\color{blue}{a_0} \right]$$ # # where $\tau = \{\color{red}{s_0}, \color{blue}{a_0}, \color{green}{r_0}, \color{red}{s_1}, \color{blue}{a_1}, \color{green}{r_1}, \cdots \}$ # # # Recall that efficient value estimations are based on the famous **_Bellman Optimallity Equation_**: # # $$ Q^\pi(\color{red}{s},\color{blue}{a}) = \color{green}{r}(\color{red}{s},\color{blue}{a}) + \gamma \sum_{\color{red}{s'}\in \color{red}{\mathcal{S}}} P(\color{red}{s'} |\color{red}{s},\color{blue}{a}) V^\pi(\color{red}{s'}) $$ # # where $V^\pi$ is the expected $Q^\pi$ value for a particular state, i.e. $V^\pi(\color{red}{s}) = \sum_{\color{blue}{a} \in \color{blue}{\mathcal{A}}} \pi(\color{blue}{a} |\color{red}{s}) Q^\pi(\color{red}{s},\color{blue}{a})$. # + [markdown] colab_type="text" id="xaJxoatMhJ71" # ## Installation # + [markdown] colab_type="text" id="ovuCuHCC78Zu" # ### Install required libraries # # 1. [Acme](https://github.com/deepmind/acme) is a library of reinforcement learning (RL) agents and agent building blocks. Acme strives to expose simple, efficient, and readable agents, that serve both as reference implementations of popular algorithms and as strong baselines, while still providing enough flexibility to do novel research. The design of Acme also attempts to provide multiple points of entry to the RL problem at differing levels of complexity. # # # 2. [Haiku](https://github.com/deepmind/dm-haiku) is a simple neural network library for JAX developed by some of the authors of Sonnet, a neural network library for TensorFlow. # # 3. [dm_env](https://github.com/deepmind/dm_env): DeepMind Environment API, which will be covered in more details in the [Environment subsection](https://colab.research.google.com/drive/1oKyyhOFAFSBTpVnmuOm9HXh5D5ekqhh5#scrollTo=I6KuVGSk4uc9) below. # + cellView="form" colab_type="code" id="KH3O0zcXUeun" colab={} #@title Installations { form-width: "30%" } # !pip install dm-acme # !pip install dm-acme[reverb] # !pip install dm-acme[jax] # !pip install dm-acme[tf] # !pip install dm-acme[envs] # !pip install dm-env # !pip install dm-haiku # !sudo apt-get install -y xvfb ffmpeg # !pip install imageio from IPython.display import clear_output clear_output() # + [markdown] colab_type="text" id="c-H2d6UZi7Sf" # ## Import Modules # + cellView="form" colab_type="code" id="HJ74Id-8MERq" colab={} #@title Imports { form-width: "30%" } import IPython import acme from acme import environment_loop from acme import datasets from acme import specs from acme import wrappers from acme.wrappers import gym_wrapper from acme.agents.jax import dqn from acme.adders import reverb as adders from acme.utils import counting from acme.utils import loggers import base64 import collections from collections import namedtuple import dm_env import enum import functools import gym import haiku as hk import io import imageio import itertools import jax from jax import tree_util from jax.experimental import optix import jax.numpy as jnp import matplotlib.pyplot as plt import numpy as np import pandas as pd import random import reverb import rlax import time import warnings warnings.filterwarnings('ignore') np.set_printoptions(precision=3, suppress=1) plt.style.use('seaborn-notebook') plt.style.use('seaborn-whitegrid') # + [markdown] id="HeGPIOMkUTEn" colab_type="text" # # RL Lab - Part 0: Environment & Agent # + [markdown] colab_type="text" id="I6KuVGSk4uc9" # ## Environment # # + [markdown] id="UhZwB__DPcyM" colab_type="text" # # We will focus on a simple grid world environment for this practical session. # # # <img src="https://drive.google.com/uc?id=1qBjh_PRdZ4GWTDqB9pmjLEOlUAsOfrZi" width="500" /> # # # # This environment consists of either walls and empty cells. The agent starts from an initial location and needs to navigate to reach a goal location. # # + id="inIAhwLKuHKr" colab_type="code" cellView="form" colab={} #@title Gridworld Implementation { form-width: "30%" } class ObservationType(enum.IntEnum): STATE_INDEX = enum.auto() AGENT_ONEHOT = enum.auto() GRID = enum.auto() AGENT_GOAL_POS = enum.auto() class GridWorld(dm_env.Environment): def __init__(self, layout, start_state, goal_state=None, observation_type=ObservationType.STATE_INDEX, discount=0.9, penalty_for_walls=-5, reward_goal=10, max_episode_length=None, randomize_goals=False): """Build a grid environment. Simple gridworld defined by a map layout, a start and a goal state. Layout should be a NxN grid, containing: * 0: empty * -1: wall * Any other positive value: value indicates reward; episode will terminate Args: layout: NxN array of numbers, indicating the layout of the environment. start_state: Tuple (y, x) of starting location. goal_state: Optional tuple (y, x) of goal location. Will be randomly sampled once if None. observation_type: Enum observation type to use. One of: * ObservationType.STATE_INDEX: int32 index of agent occupied tile. * ObservationType.AGENT_ONEHOT: NxN float32 grid, with a 1 where the agent is and 0 elsewhere. * ObservationType.GRID: NxNx3 float32 grid of feature channels. First channel contains walls (1 if wall, 0 otherwise), second the agent position (1 if agent, 0 otherwise) and third goal position (1 if goal, 0 otherwise) * ObservationType.AGENT_GOAL_POS: float32 tuple with (agent_y, agent_x, goal_y, goal_x) discount: Discounting factor included in all Timesteps. penalty_for_walls: Reward added when hitting a wall (should be negative). reward_goal: Reward added when finding the goal (should be positive). max_episode_length: If set, will terminate an episode after this many steps. randomize_goals: If true, randomize goal at every episode. """ if observation_type not in ObservationType: raise ValueError('observation_type should be a ObservationType instace.') self._layout = np.array(layout) self._start_state = start_state self._state = self._start_state self._number_of_states = np.prod(np.shape(self._layout)) self._discount = discount self._penalty_for_walls = penalty_for_walls self._reward_goal = reward_goal self._observation_type = observation_type self._layout_dims = self._layout.shape self._max_episode_length = max_episode_length self._num_episode_steps = 0 self._randomize_goals = randomize_goals if goal_state is None: # Randomly sample goal_state if not provided goal_state = self._sample_goal() self.goal_state = goal_state def _sample_goal(self): """Randomly sample reachable non-starting state.""" # Sample a new goal n = 0 max_tries = 1e5 while n < max_tries: goal_state = tuple(np.random.randint(d) for d in self._layout_dims) if goal_state != self._state and self._layout[goal_state] == 0: # Reachable state found! return goal_state n += 1 raise ValueError('Failed to sample a goal state.') @property def number_of_states(self): return self._number_of_states @property def goal_state(self): return self._goal_state def set_state(self, x, y): self._state = (y, x) @goal_state.setter def goal_state(self, new_goal): if new_goal == self._state or self._layout[new_goal] < 0: raise ValueError('This is not a valid goal!') # Zero out any other goal self._layout[self._layout > 0] = 0 # Setup new goal location self._layout[new_goal] = self._reward_goal self._goal_state = new_goal def observation_spec(self): if self._observation_type is ObservationType.AGENT_ONEHOT: return specs.Array( shape=self._layout_dims, dtype=np.float32, name='observation_agent_onehot') elif self._observation_type is ObservationType.GRID: return specs.Array( shape=self._layout_dims + (3,), dtype=np.float32, name='observation_grid') elif self._observation_type is ObservationType.AGENT_GOAL_POS: return specs.Array( shape=(4,), dtype=np.float32, name='observation_agent_goal_pos') elif self._observation_type is ObservationType.STATE_INDEX: return specs.DiscreteArray( self._number_of_states, dtype=int, name='observation_state_index') def action_spec(self): return specs.DiscreteArray(4, dtype=int, name='action') def get_obs(self): if self._observation_type is ObservationType.AGENT_ONEHOT: obs = np.zeros(self._layout.shape, dtype=np.float32) # Place agent obs[self._state] = 1 return obs elif self._observation_type is ObservationType.GRID: obs = np.zeros(self._layout.shape + (3,), dtype=np.float32) obs[..., 0] = self._layout < 0 obs[self._state[0], self._state[1], 1] = 1 obs[self._goal_state[0], self._goal_state[1], 2] = 1 return obs elif self._observation_type is ObservationType.AGENT_GOAL_POS: return np.array(self._state + self._goal_state, dtype=np.float32) elif self._observation_type is ObservationType.STATE_INDEX: y, x = self._state return y * self._layout.shape[1] + x def reset(self): self._state = self._start_state self._num_episode_steps = 0 if self._randomize_goals: self.goal_state = self._sample_goal() return dm_env.TimeStep( step_type=dm_env.StepType.FIRST, reward=None, discount=None, observation=self.get_obs()) def step(self, action): y, x = self._state if action == 0: # up new_state = (y - 1, x) elif action == 1: # right new_state = (y, x + 1) elif action == 2: # down new_state = (y + 1, x) elif action == 3: # left new_state = (y, x - 1) else: raise ValueError( 'Invalid action: {} is not 0, 1, 2, or 3.'.format(action)) new_y, new_x = new_state step_type = dm_env.StepType.MID if self._layout[new_y, new_x] == -1: # wall reward = self._penalty_for_walls discount = self._discount new_state = (y, x) elif self._layout[new_y, new_x] == 0: # empty cell reward = 0. discount = self._discount else: # a goal reward = self._layout[new_y, new_x] discount = 0. new_state = self._start_state step_type = dm_env.StepType.LAST self._state = new_state self._num_episode_steps += 1 if (self._max_episode_length is not None and self._num_episode_steps >= self._max_episode_length): step_type = dm_env.StepType.LAST return dm_env.TimeStep( step_type=step_type, reward=np.float32(reward), discount=discount, observation=self.get_obs()) def plot_grid(self, add_start=True): plt.figure(figsize=(4, 4)) plt.imshow(self._layout <= -1, interpolation='nearest') ax = plt.gca() ax.grid(0) plt.xticks([]) plt.yticks([]) # Add start/goal if add_start: plt.text( self._start_state[1], self._start_state[0], r'$\mathbf{S}$', fontsize=16, ha='center', va='center') plt.text( self._goal_state[1], self._goal_state[0], r'$\mathbf{G}$', fontsize=16, ha='center', va='center') h, w = self._layout.shape for y in range(h - 1): plt.plot([-0.5, w - 0.5], [y + 0.5, y + 0.5], '-k', lw=2) for x in range(w - 1): plt.plot([x + 0.5, x + 0.5], [-0.5, h - 0.5], '-k', lw=2) def plot_state(self, return_rgb=False): self.plot_grid(add_start=False) # Add the agent location plt.text( self._state[1], self._state[0], u'😃', fontname='symbola', fontsize=18, ha='center', va='center', ) if return_rgb: fig = plt.gcf() plt.axis('tight') plt.subplots_adjust(0, 0, 1, 1, 0, 0) fig.canvas.draw() data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='') w, h = fig.canvas.get_width_height() data = data.reshape((h, w, 3)) plt.close(fig) return data def plot_policy(self, policy): action_names = [ r'$\uparrow$', r'$\rightarrow$', r'$\downarrow$', r'$\leftarrow$' ] self.plot_grid() plt.title('Policy Visualization') h, w = self._layout.shape for y in range(h): for x in range(w): # if ((y, x) != self._start_state) and ((y, x) != self._goal_state): if (y, x) != self._goal_state: action_name = action_names[policy[y, x]] plt.text(x, y, action_name, ha='center', va='center') def plot_greedy_policy(self, q): greedy_actions = np.argmax(q, axis=2) self.plot_policy(greedy_actions) def build_gridworld_task(task, discount=0.9, penalty_for_walls=-5, observation_type=ObservationType.STATE_INDEX, max_episode_length=200): """Construct a particular Gridworld layout with start/goal states. Args: task: string name of the task to use. One of {'simple', 'obstacle', 'random_goal'}. discount: Discounting factor included in all Timesteps. penalty_for_walls: Reward added when hitting a wall (should be negative). observation_type: Enum observation type to use. One of: * ObservationType.STATE_INDEX: int32 index of agent occupied tile. * ObservationType.AGENT_ONEHOT: NxN float32 grid, with a 1 where the agent is and 0 elsewhere. * ObservationType.GRID: NxNx3 float32 grid of feature channels. First channel contains walls (1 if wall, 0 otherwise), second the agent position (1 if agent, 0 otherwise) and third goal position (1 if goal, 0 otherwise) * ObservationType.AGENT_GOAL_POS: float32 tuple with (agent_y, agent_x, goal_y, goal_x). max_episode_length: If set, will terminate an episode after this many steps. """ tasks_specifications = { 'simple': { 'layout': [ [-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], [-1, 0, 0, 0, 0, 0, 0, 0, 0, -1], [-1, 0, 0, 0, -1, -1, 0, 0, 0, -1], [-1, 0, 0, 0, -1, -1, 0, 0, 0, -1], [-1, 0, 0, 0, -1, -1, 0, 0, 0, -1], [-1, 0, 0, 0, 0, 0, 0, 0, 0, -1], [-1, 0, 0, 0, 0, 0, 0, 0, 0, -1], [-1, 0, 0, 0, 0, 0, 0, 0, 0, -1], [-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], ], 'start_state': (2, 2), 'goal_state': (7, 2) }, 'obstacle': { 'layout': [ [-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], [-1, 0, 0, 0, 0, 0, -1, 0, 0, -1], [-1, 0, 0, 0, -1, 0, 0, 0, 0, -1], [-1, 0, 0, 0, -1, -1, 0, 0, 0, -1], [-1, 0, 0, 0, -1, -1, 0, 0, 0, -1], [-1, 0, 0, 0, 0, 0, 0, 0, 0, -1], [-1, 0, 0, 0, 0, 0, 0, 0, 0, -1], [-1, 0, 0, 0, 0, 0, 0, 0, 0, -1], [-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], ], 'start_state': (2, 2), 'goal_state': (2, 8) }, 'random_goal': { 'layout': [ [-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], [-1, 0, 0, 0, 0, 0, 0, 0, 0, -1], [-1, 0, 0, 0, -1, -1, 0, 0, 0, -1], [-1, 0, 0, 0, -1, -1, 0, 0, 0, -1], [-1, 0, 0, 0, -1, -1, 0, 0, 0, -1], [-1, 0, 0, 0, 0, 0, 0, 0, 0, -1], [-1, 0, 0, 0, 0, 0, 0, 0, 0, -1], [-1, 0, 0, 0, 0, 0, 0, 0, 0, -1], [-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], ], 'start_state': (2, 2), # 'randomize_goals': True }, } return GridWorld( discount=discount, penalty_for_walls=penalty_for_walls, observation_type=observation_type, max_episode_length=max_episode_length, **tasks_specifications[task]) def setup_environment(environment): # Make sure the environment outputs single-precision floats. environment = wrappers.SinglePrecisionWrapper(environment) # Grab the spec of the environment. environment_spec = specs.make_environment_spec(environment) return environment, environment_spec # + [markdown] id="ZizdE9SQS-cN" colab_type="text" # # We will use two distinct tabular GridWorlds: # * `simple` where the goal is at the bottom left of the grid, little navigation required. # * `obstacle` where the goal is behind an obstacle to avoid. # # You can visualize the grid worlds by running the cell below. # # Note that `S` indicates the start state and `G` indicates the goal. # # + id="7Xdnh3Odc63Q" colab_type="code" cellView="form" colab={} # @title Visualise gridworlds { form-width: "30%" } # Instantiate two tabular environments, a simple task, and one that involves # the avoidance of an obstacle. simple_grid = build_gridworld_task( task='simple', observation_type=ObservationType.GRID) obstacle_grid = build_gridworld_task( task='obstacle', observation_type=ObservationType.GRID) # Plot them. simple_grid.plot_grid() plt.title('Simple') obstacle_grid.plot_grid() plt.title('Obstacle'); # + [markdown] id="RTsiWgDSCL7C" colab_type="text" # # In this environment, the agent has four possible <font color='blue'>**Actions**</font>: `up`, `right`, `down`, and `left`. <font color='green'>**Reward**</font> is `-5` for bumping into a wall, `+10` for reaching the goal, and `0` otherwise. The episode ends when the agent reaches the goal, and otherwise continues. **Discount** on continuing steps, is $\gamma = 0.9$. # # Before we start building an agent to interact with this environment, let's first look at the types of objects the environment either returns (e.g. observations) or consumes (e.g. actions). The `environment_spec` will show you the form of the *observations*, *rewards* and *discounts* that the environment exposes and the form of the *actions* that can be taken. # # + id="rmKop4FECVV6" colab_type="code" colab={} environment, environment_spec = setup_environment(simple_grid) print('actions:\n', environment_spec.actions, '\n') print('observations:\n', environment_spec.observations, '\n') print('rewards:\n', environment_spec.rewards, '\n') print('discounts:\n', environment_spec.discounts, '\n') # + [markdown] id="0VVTmep2UK6U" colab_type="text" # # We first set the environment to its initial location by calling the `reset` method which returns the first observation. # # + id="rHden9m9FNPK" colab_type="code" colab={} environment.reset() environment.plot_state() # + [markdown] id="pXb7u9epFWnX" colab_type="text" # Now we want to take an action using the `step` method to interact with the environment which returns a `TimeStep` # namedtuple with fields: # # ```none # step_type, reward, discount, observation # ``` # # We can then visualise the updated state of the grid. # + id="LY1eopIWFe95" colab_type="code" colab={} timestep = environment.step(1) environment.plot_state() # + id="pSFDZPksEGpl" colab_type="code" cellView="form" colab={} #@title Run loop { form-width: "30%" } def run_loop(environment, agent, num_episodes=None, num_steps=None, logger_time_delta=1., label='training_loop', log_loss=False, ): """Perform the run loop. We are following the Acme run loop. Run the environment loop for `num_episodes` episodes. Each episode is itself a loop which interacts first with the environment to get an observation and then give that observation to the agent in order to retrieve an action. Upon termination of an episode a new episode will be started. If the number of episodes is not given then this will interact with the environment infinitely. Args: environment: dm_env used to generate trajectories. agent: acme.Actor for selecting actions in the run loop. num_steps: number of episodes to run the loop for. If `None` (default), runs without limit. num_episodes: number of episodes to run the loop for. If `None` (default), runs without limit. logger_time_delta: time interval (in seconds) between consecutive logging steps. label: optional label used at logging steps. """ logger = loggers.TerminalLogger(label=label, time_delta=logger_time_delta) iterator = range(num_episodes) if num_episodes else itertools.count() all_returns = [] num_total_steps = 0 for episode in iterator: # Reset any counts and start the environment. start_time = time.time() episode_steps = 0 episode_return = 0 episode_loss = 0 timestep = environment.reset() # Make the first observation. agent.observe_first(timestep) # Run an episode. while not timestep.last(): # Generate an action from the agent's policy and step the environment. action = agent.select_action(timestep.observation) timestep = environment.step(action) # Have the agent observe the timestep and let the agent update itself. agent.observe(action, next_timestep=timestep) agent.update() # Book-keeping. episode_steps += 1 num_total_steps += 1 episode_return += timestep.reward if log_loss: episode_loss += agent.last_loss if num_steps is not None and num_total_steps >= num_steps: break # Collect the results and combine with counts. steps_per_second = episode_steps / (time.time() - start_time) result = { 'episode': episode, 'episode_length': episode_steps, 'episode_return': episode_return, } if log_loss: result['loss_avg'] = episode_loss/episode_steps all_returns.append(episode_return) # Log the given results. logger.write(result) if num_steps is not None and num_total_steps >= num_steps: break return all_returns # + id="_gatpjQ8QA_H" colab_type="code" cellView="form" colab={} #@title Evaluation loop { form-width: "30%" } def evaluate(environment, agent, evaluation_episodes): frames = [] for episode in range(evaluation_episodes): timestep = environment.reset() episode_return = 0 steps = 0 while not timestep.last(): frames.append(environment.plot_state(return_rgb=True)) action = agent.select_action(timestep.observation) timestep = environment.step(action) steps += 1 episode_return += timestep.reward print( f'Episode {episode} ended with reward {episode_return} in {steps} steps' ) return frames def display_video(frames, filename='temp.mp4', frame_repeat=1): """Save and display video.""" # Write video with imageio.get_writer(filename, fps=60) as video: for frame in frames: for _ in range(frame_repeat): video.append_data(frame) # Read video and display the video video = open(filename, 'rb').read() b64_video = base64.b64encode(video) video_tag = ('<video width="320" height="240" controls alt="test" ' 'src="data:video/mp4;base64,{0}">').format(b64_video.decode()) return IPython.display.HTML(video_tag) # + [markdown] id="_0YgLdsi3kXw" colab_type="text" # ## Agent # # We will be implementing Tabular & Function Approximation agents. Tabular agents are purely in Python while for Function Approximation agents, we will use JAX. # + [markdown] id="4MAYbEvtJ1kT" colab_type="text" # ### Agent Implementation # # Each agent implements the following functions: # # # # > `__init__(self, number_of_actions, number_of_states, ...)` # # # The constructor will provide the agent the number of actions and number of # states. # # > `select_action(self, observation)`: # # This is the policy used by the actor to interact with the environment. # # > `observe_first(self, timestep)`: # # This function provides the agent with initial timestep in a given episode. Note # that this is not the result of an action choice by the agent, hence it will only # have `timestep.observation` set to a proper value. # # > `observe(self, action, next_timestep)`: # # This function provides the agent with the timestep that resulted from the given # action choice. The timestep provides a `reward`, a `discount`, and an # `observation`, all results of the previous action choice. # # Note: `timestep.step_type` will be either `MID` or `LAST` and should be used to # determine whether this is the last observation in the episode. # # + [markdown] id="7XD9bXC3UCHd" colab_type="text" # ### Random Agent # # We can just choose actions randomly to move around this environment. # + id="0lU-ybzz4Ng7" colab_type="code" cellView="form" colab={} #@title Build a random Agent { form-width: "30%" } # Uniform random policy def random_policy(q): return np.random.randint(4) # (Do not worry about the details here, we will explain the Actor class below) class RandomAgent(acme.Actor): def select_action(self, observation): return random_policy(None) def observe_first(self, timestep): """The agent is being notified that environment was reset.""" pass def observe(self, action, next_timestep): """The agent is being notified of an environment step.""" pass def update(self): """Agent should update its parameters.""" pass # + id="oxjzoRO03jGH" colab_type="code" cellView="form" colab={} #@title Visualise agent's behaviour { form-width: "30%" } # This is how the random policy moves around frames = evaluate(environment, RandomAgent(), evaluation_episodes=1) display_video(frames, frame_repeat=5) # + [markdown] id="YPc0CrguF4GV" colab_type="text" # # RL Lab - Part 1: Tabular Agents # # The first set of execises are based on the simpler case where the number of states is small enough for our agents to maintain a table of values for each individual state that it will ever encounter. # # In particular, we will consider the case where the GridWorld has a fixed layout, and the goal is always at the same location, hence the state is fully determined by the location of the agent. As such, the <font color='red'>observation</font> from the environment is changed to be an integer corresponding to each one of approximately 50 locations on the grid. # # + id="zL8J6nVc2zlq" colab_type="code" colab={} # Environment grid = build_gridworld_task( task='simple', observation_type=ObservationType.STATE_INDEX, max_episode_length=200) environment, environment_spec = setup_environment(grid) # + [markdown] id="sAiZn23xICKh" colab_type="text" # # ## 1.0: Overview # # We will cover three basic RL tabular algorithms: # - Policy iteration # - SARSA Agent # - Q-learning Agent # # # # # # + [markdown] id="JhHsnLcFID1u" colab_type="text" # ## 1.1: Policy iteration # # The first RL learning algorithm we will explore is **policy iteration**, which is repeating (1) Policy Evaluation and (2) Greedy Improvement until convergence. # # <center><img src="https://drive.google.com/uc?id=1lP2dFEXCBgYW744S3Lr3zMzVfOEYowdJ" width="300" /></center> # # For this exercise, we'll show you how to implement the "first 2 arrows", we will not repeat these steps to convergence yet. # # ### 1. Policy Evaluation # # The purpose here is to evaluate a given policy $\pi_e$: # # Compute the value function associated with following/employing this policy in a given MDP. # # $$ Q^{\pi_e}(\color{red}{s},\color{blue}{a}) = \mathbb{E}_{\tau \sim P^{\pi_b}} \left[ \sum_t \gamma^t \color{green}{R_t}| s_0=\color{red}s,a=\color{blue}{a_0} \right]$$ # # where $\tau = \{\color{red}{s_0}, \color{blue}{a_0}, \color{green}{r_0}, \color{red}{s_1}, \color{blue}{a_1}, \color{green}{r_1}, \cdots \}$. # # # Algorithm: # # **Initialize** $Q(\color{red}{s}, \color{blue}{a})$ for all $\color{red}{s}$ ∈ $\mathcal{\color{red}S}$ and $\color{blue}a$ ∈ $\mathcal{\color{blue}A}(\color{red}s)$ # # **Loop forever**: # # 1. $\color{red}{s} \gets{}$current (nonterminal) state # # 2. $\color{blue}{a} \gets{} \text{behaviour_policy }\pi_b(\color{red}s)$ # # 3. Take action $\color{blue}{a}$; observe resulting reward $\color{green}{r}$, discount $\gamma$, and state, $\color{red}{s'}$ # # 4. Compute TD-error: $\delta = \color{green}R + \gamma Q(\color{red}{s'}, \underbrace{\pi_e(\color{red}{s'}}_{\color{blue}{a'}})) − Q(\color{red}s, \color{blue}a)$ # # 4. Update Q-value with a small $\alpha$ step: $Q(\color{red}s, \color{blue}a) \gets Q(\color{red}s, \color{blue}a) + \alpha \delta$ # # ### 2. Greedy Policy Improvement # # Once a good approximation to the Q-value of a policy is obtained, we can improve this policy by simply changing action selection towards those that are evaluated higher. # # $$ \pi_{greedy} (\color{blue}a|\color{red}s) = \arg\max_\color{blue}a Q^{\pi_e}(\color{red}s,\color{blue}a) $$ # + [markdown] id="cqrSos8dDPFb" colab_type="text" # ### Create a policy evaluation agent # # An ACME `Actor` is the part of our framework that directly interacts with an environment by generating actions. Here we borrow a figure from Acme to show how this interaction occurs: # # <center><img src="https://drive.google.com/uc?id=1T7FTpA9RgDYFkciDFZK4brNyURZN_ZGp" width="500" /></center> # # While you can always write your own actor, we also provide a number of useful premade versions. # # Tabular agents implement a function `q_values()` returning a matrix of Q values # of shape: (`number_of_states`, `number_of_actions`) # # In this section, we will implement a `PolicyEvalAgent` as an ACME actor: given an `evaluation_policy` and a `behaviour_policy`, it will use the `behaviour_policy` to choose actions, and it will use the corresponding trajectory data to evaluate the `evaluation_policy` (i.e. compute the Q-values as if you were following the `evaluation_policy`). # + id="iGtP3XRLF3qE" colab_type="code" cellView="form" colab={} #@title **[Coding Task]** Policy Evaluation Agent { form-width: "30%" } class PolicyEvalAgent(acme.Actor): def __init__( self, number_of_states, number_of_actions, evaluated_policy, behaviour_policy=random_policy, step_size=0.1): self._state = None self._number_of_states = number_of_states self._number_of_actions = number_of_actions self._step_size = step_size self._behaviour_policy = behaviour_policy self._evaluated_policy = evaluated_policy # ============ YOUR CODE HERE ============= # initialize your q-values (this is a table of state and action pairs # Note: this can be random, but the code was tested w/ zero-initialization pass @property def q_values(self): # ============ YOUR CODE HERE ============= pass def select_action(self, observation): # ============ YOUR CODE HERE ============= pass def observe_first(self, timestep): self._state = timestep.observation def observe(self, action, next_timestep): s = self._state a = action r = next_timestep.reward g = next_timestep.discount next_s = next_timestep.observation # ============ YOUR CODE HERE ============= # Compute TD-Error. # self._td_error = pass def update(self): # ============ YOUR CODE HERE ============= # Q-value table update. pass # + id="BXMd87q0JVdr" colab_type="code" cellView="form" colab={} #@title **[Solution]** Policy Evaluation Agent{ form-width: "30%" } class PolicyEvalAgent(acme.Actor): def __init__( self, number_of_states, number_of_actions, evaluated_policy, behaviour_policy=random_policy, step_size=0.1): self._state = None self._number_of_states = number_of_states self._number_of_actions = number_of_actions self._step_size = step_size self._behaviour_policy = behaviour_policy self._evaluated_policy = evaluated_policy self._q = np.zeros((number_of_states, number_of_actions)) self._action = None self._next_state = None @property def q_values(self): return self._q def select_action(self, observation): return self._behaviour_policy(self._q[observation]) def observe_first(self, timestep): self._state = timestep.observation def observe(self, action, next_timestep): s = self._state a = action r = next_timestep.reward g = next_timestep.discount next_s = next_timestep.observation # Compute TD-Error. self._action = a self._next_state = next_s next_a = self._evaluated_policy(self._q[next_s]) self._td_error = r + g * self._q[next_s, next_a] - self._q[s, a] def update(self): # Q-value table update. s = self._state a = self._action self._q[s, a] += self._step_size * self._td_error self._state = self._next_state # + id="FA8FRfY-Dsth" colab_type="code" cellView="form" colab={} #@title Helper functions for visualisation { form-width: "30%" } map_from_action_to_subplot = lambda a: (2, 6, 8, 4)[a] map_from_action_to_name = lambda a: ("up", "right", "down", "left")[a] def plot_values(values, colormap='pink', vmin=-1, vmax=10): plt.imshow(values, interpolation="nearest", cmap=colormap, vmin=vmin, vmax=vmax) plt.yticks([]) plt.xticks([]) plt.colorbar(ticks=[vmin, vmax]) def plot_state_value(action_values, epsilon=0.1): q = action_values fig = plt.figure(figsize=(4, 4)) vmin = np.min(action_values) vmax = np.max(action_values) v = (1 - epsilon) * np.max(q, axis=-1) + epsilon * np.mean(q, axis=-1) plot_values(v, colormap='summer', vmin=vmin, vmax=vmax) plt.title("$v(s)$") def plot_action_values(action_values, epsilon=0.1): q = action_values fig = plt.figure(figsize=(8, 8)) fig.subplots_adjust(wspace=0.3, hspace=0.3) vmin = np.min(action_values) vmax = np.max(action_values) dif = vmax - vmin for a in [0, 1, 2, 3]: plt.subplot(3, 3, map_from_action_to_subplot(a)) plot_values(q[..., a], vmin=vmin - 0.05*dif, vmax=vmax + 0.05*dif) action_name = map_from_action_to_name(a) plt.title(r"$q(s, \mathrm{" + action_name + r"})$") plt.subplot(3, 3, 5) v = (1 - epsilon) * np.max(q, axis=-1) + epsilon * np.mean(q, axis=-1) plot_values(v, colormap='summer', vmin=vmin, vmax=vmax) plt.title("$v(s)$") def smooth(x, window=10): return x[:window*(len(x)//window)].reshape(len(x)//window, window).mean(axis=1) def plot_stats(stats, window=10): plt.figure(figsize=(16,4)) plt.subplot(121) xline = range(0, len(stats.episode_lengths), window) plt.plot(xline, smooth(stats.episode_lengths, window=window)) plt.ylabel('Episode Length') plt.xlabel('Episode Count') plt.subplot(122) plt.plot(xline, smooth(stats.episode_rewards, window=window)) plt.ylabel('Episode Return') plt.xlabel('Episode Count') # + [markdown] id="KsuM-9sEKeT1" colab_type="text" # We will first see how this works on the `simple` GridWorld task. # # **Task 1**: Run the policy evaluation agent, evaluating the uniformly random policy on the `simple` task. # # Try different number of training steps, e.g. $\texttt{num_steps} = 1e3, 1e5$. # # Visualise the resulting value functions $Q(\color{red}s,\color{blue}a)$. # The plotting function is provided for you and it takes in a table of q-values. # + id="YMumNsJIKhn_" colab_type="code" cellView="form" colab={} num_steps = 1e3 #@param {type:"number"} # environment grid = build_gridworld_task(task='simple') environment, environment_spec = setup_environment(grid) # agent agent = PolicyEvalAgent( number_of_states=environment_spec.observations.num_values, number_of_actions=environment_spec.actions.num_values, evaluated_policy=random_policy, behaviour_policy=random_policy, step_size=0.1) # run experiment and get the value functions from agent returns = run_loop(environment=environment, agent=agent, num_steps=int(num_steps)) # get the q-values q = agent.q_values.reshape(grid._layout.shape + (4,)) # visualize value functions print('AFTER {} STEPS ...'.format(num_steps)) plot_action_values(q, epsilon=1.) # + [markdown] id="0EfsRbSUN0B1" colab_type="text" # ### Greedy Policy Improvement # # **Task 2**: Compute and Visualise the greedy policy based on the above evaluation, at the end of training. # # # $$ \pi_{greedy} (\color{blue}a|\color{red}s) = \arg\max_\color{blue}a Q^{\pi_e}(\color{red}s,\color{blue}a) $$ # # **Q:** What do you observe? How does it compare to the behaviour policy we started from? # + id="qQFPI_0c1YXo" colab_type="code" cellView="form" colab={} # @title **[Coding task]** Greedy policy def greedy(q_values): pass # + id="iTMx-QHU1f_j" colab_type="code" cellView="form" colab={} # @title **[Solution]** Greedy policy def greedy(q_values): return np.argmax(q_values) # + id="bCcnLnvLOBYZ" colab_type="code" cellView="form" colab={} # @title Visualize the policy on `simple` { form-width: "30%" } # Do here whatever works for you, but you should be able to see what the agent # would do at each step/state. pi = np.zeros(grid._layout_dims, dtype=np.int32) for i in range(grid._layout_dims[0]): for j in range(grid._layout_dims[1]): pi[i, j] = greedy(q[i, j]) grid.plot_policy(pi) # + [markdown] id="W3fDpu9eOUm0" colab_type="text" # **Task 3**: Now try on the harder `obstacle` task and visualise the resulting value functions and the greedy policy on top of these values at the end of training. # # **Q:** What do you observe? # - How does this policy compare with the optimal one? # - Try running the training process longer -- what do you observe? # + id="Y4dbI0DLOeqC" colab_type="code" cellView="form" colab={} num_steps = 1e5 #@param {type:"number"} # environment grid = build_gridworld_task(task='obstacle') environment, environment_spec = setup_environment(grid) # agent agent = PolicyEvalAgent( number_of_states=environment_spec.observations.num_values, number_of_actions=environment_spec.actions.num_values, evaluated_policy=random_policy, behaviour_policy=random_policy, step_size=0.1) # run experiment and get the value functions from agent returns = run_loop(environment=environment, agent=agent, num_steps=int(num_steps)) # get the q-values q = agent.q_values.reshape(grid._layout.shape + (4,)) # visualize value functions print('AFTER {} STEPS ...'.format(num_steps)) plot_action_values(q, epsilon=1.) # + id="D4KxWhW8PCD9" colab_type="code" cellView="form" colab={} # @title Visualise the greedy policy on `obstacle` { form-width: "30%" } grid.plot_greedy_policy(q) # + [markdown] id="EcrhrNnIr3kX" colab_type="text" # ## 1.2 On-policy control: SARSA Agent # In this section, we are focusing on control RL algorithms, which perform the evaluation and improvement of the policy synchronously. That is, the policy that is being evaluated improves as the agent is using it to interact with the environent. # # # The first algorithm we are going to be looking at is SARSA. This is an **on-policy algorithm** -- i.e: the data collection is done by leveraging the policy we're trying to optimize (and not just another fixed behaviour policy). # # As discussed during lectures, a greedy policy with respect to a given estimate of $Q^\pi$ fails to explore the environment as needed; we will use instead an $\epsilon$-greedy policy WRT $Q^\pi$. # # ### SARSA Algorithm # # # **Initialize** $Q(\color{red}{s}, \color{blue}{a})$ for all $\color{red}{s}$ ∈ $\mathcal{\color{red}S}$ and $\color{blue}a$ ∈ $\mathcal{\color{blue}A}(\color{red}s)$ # # **Loop forever**: # # 1. $\color{red}s \gets{}$current (nonterminal) state # # 2. $\color{blue}a \gets{} \text{epsilon_greedy}(Q(\color{red}s, \cdot))$ # # 3. Take action $\color{blue}a$; observe resultant reward $\color{green}r$, discount $\gamma$, and state, $\color{red}{s'}$ # # 4. $Q(\color{red}s, \color{blue}a) \gets Q(\color{red}s, \color{blue}a) + \alpha (\color{green}r + \gamma Q(\color{red}{s'}, \color{blue}{a'}) − Q(\color{red}s, \color{blue}a))$ # + id="xNfVHzosN2P0" colab_type="code" cellView="form" colab={} # @title **[Coding Task]** Epilson-greedy policy { form-width: "30%" } # Input(s): Q(s,:), epsilon # Output: Sampled action based on epsilon-Greedy(Q(s,:)) def epsilon_greedy(q_values, epsilon=0.1): pass #return the epsilon greedy action # + id="XWqlIWbwN7Mk" colab_type="code" cellView="form" colab={} # @title **[Solution]** Epilson-greedy policy { form-width: "30%" } def epsilon_greedy(q_values, epsilon=0.1): if epsilon < np.random.random(): return np.argmax(q_values) else: return np.random.randint(np.array(q_values).shape[-1]) # + id="7bmAV4Kcr7Zz" colab_type="code" cellView="form" colab={} #@title **[Coding Task]** SARSA Agent { form-width: "30%" } class SarsaAgent(acme.Actor): def __init__( self, number_of_states, number_of_actions, epsilon, step_size=0.1): self._q = np.zeros((number_of_states, number_of_actions)) self._step_size = step_size self._epsilon = epsilon self._state = None self._action = None self._next_state = None @property def q_values(self): return self._q def select_action(self, observation): return epsilon_greedy(self._q[observation], self._epsilon) def observe_first(self, timestep): self._state = timestep.observation def observe(self, action, next_timestep): s = self._state a = action r = next_timestep.reward g = next_timestep.discount next_s = next_timestep.observation # ============ YOUR CODE HERE ============= # Online Q-value update # self._td_error = pass def update(self): # ============ YOUR CODE HERE ============= # Q-value table update pass # + id="JtlH1tU7sCEm" colab_type="code" cellView="form" colab={} #@title **[Solution]** SARSA Agent { form-width: "30%" } class SarsaAgent(acme.Actor): def __init__( self, number_of_states, number_of_actions, epsilon, step_size=0.1): self._q = np.zeros((number_of_states, number_of_actions)) self._number_of_states = number_of_states self._number_of_actions = number_of_actions self._step_size = step_size self._epsilon = epsilon self._state = None self._action = None self._next_state = None @property def q_values(self): return self._q def select_action(self, observation): return epsilon_greedy(self._q[observation], self._epsilon) def observe_first(self, timestep): self._state = timestep.observation def observe(self, action, next_timestep): s = self._state a = action r = next_timestep.reward g = next_timestep.discount next_s = next_timestep.observation next_a = epsilon_greedy(self._q[next_s], self._epsilon) # Online Q-value update self._action = a self._next_state = next_s self._td_error = r + g * self._q[next_s, next_a] - self._q[s, a] def update(self): s = self._state a = self._action self._q[s, a] += self._step_size * self._td_error self._state = self._next_state # + [markdown] id="K8eBOcXZu1fM" colab_type="text" # ### **Task**: Run your SARSA agent on the `obstacle` environment # + id="xKYEB2d2uGaa" colab_type="code" cellView="form" colab={} num_steps = 1e5 #@param {type:"number"} # environment grid = build_gridworld_task(task='obstacle') environment, environment_spec = setup_environment(grid) # agent agent = SarsaAgent( number_of_states=environment_spec.observations.num_values, number_of_actions=environment_spec.actions.num_values, epsilon=0.1, step_size=0.1) # run experiment and get the value functions from agent returns = run_loop(environment=environment, agent=agent, num_steps=int(num_steps)) # get the q-values q = agent.q_values.reshape(grid._layout.shape + (4,)) # visualize value functions print('AFTER {} STEPS ...'.format(num_steps)) plot_action_values(q, epsilon=1.) # visualise the greedy policy grid.plot_greedy_policy(q) # + [markdown] id="pFGX_zGcvb8D" colab_type="text" # ## 1.3 Off-policy control: Q-learning Agent # # Reminder: Q-learning is a very powerful and general algorithm, that enables control (figuring out the optimal policy/value function) both on and off-policy. # # **Initialize** $Q(\color{red}{s}, \color{blue}{a})$ for all $\color{red}{s} \in \color{red}{\mathcal{S}}$ and $\color{blue}{a} \in \color{blue}{\mathcal{A}}(\color{red}{s})$ # # **Loop forever**: # # 1. $\color{red}{s} \gets{}$current (nonterminal) state # # 2. $\color{blue}{a} \gets{} \text{behaviour_policy}(\color{red}{s})$ # # 3. Take action $\color{blue}{a}$; observe resultant reward $\color{green}{R}$, discount $\gamma$, and state, $\color{red}{s'}$ # # 4. $Q(\color{red}{s}, \color{blue}{a}) \gets Q(\color{red}{s}, \color{blue}{a}) + \alpha (\color{green}{R} + \gamma \max_{\color{blue}{a'}} Q(\color{red}{s'}, \color{blue}{a'}) − Q(\color{red}{s}, \color{blue}{a}))$ # + id="I6s820jAwoVA" colab_type="code" cellView="form" colab={} #@title **[Coding Task]** Q-Learning Agent { form-width: "30%" } class QLearningAgent(acme.Actor): def __init__( self, number_of_states, number_of_actions, behaviour_policy, step_size=0.1): self._q = np.zeros((number_of_states, number_of_actions)) self._step_size = step_size self._behaviour_policy = behaviour_policy self._state = None self._action = None self._next_state = None @property def q_values(self): return self._q def select_action(self, observation): return self._behaviour_policy(self._q[observation]) def observe_first(self, timestep): self._state = timestep.observation def observe(self, action, next_timestep): s = self._state a = action r = next_timestep.reward g = next_timestep.discount next_s = next_timestep.observation # ============ YOUR CODE HERE ============= # Offline Q-value update # self._td_error = pass def update(self): # ============ YOUR CODE HERE ============= pass # + id="ak1T5PNV8Pbk" colab_type="code" cellView="form" colab={} #@title **[Solution]** Q-Learning Agent { form-width: "30%" } class QLearningAgent(acme.Actor): def __init__( self, number_of_states, number_of_actions, behaviour_policy, step_size=0.1): self._q = np.zeros((number_of_states, number_of_actions)) self._step_size = step_size self._behaviour_policy = behaviour_policy self._state = None self._action = None self._next_state = None @property def q_values(self): return self._q def select_action(self, observation): return self._behaviour_policy(self._q[observation]) def observe_first(self, timestep): self._state = timestep.observation def observe(self, action, next_timestep): s = self._state a = action r = next_timestep.reward g = next_timestep.discount next_s = next_timestep.observation # Offline Q-value update self._action = a self._next_state = next_s self._td_error = r + g * np.max(self._q[next_s]) - self._q[s, a] def update(self): s = self._state a = self._action self._q[s, a] += self._step_size * self._td_error self._state = self._next_state # + [markdown] id="2RqdV3rjwcAh" colab_type="text" # ### **Task 1**: Run your Q-learning agent on `obstacle` # # # + id="LL4PgT-jwi3-" colab_type="code" cellView="form" colab={} epsilon = 1 #@param {type:"number"} num_steps = 1e5 #@param {type:"number"} # environment grid = build_gridworld_task(task='obstacle') environment, environment_spec = setup_environment(grid) # behavior policy behavior_policy = lambda qval: epsilon_greedy(qval, epsilon=epsilon) # agent agent = QLearningAgent( number_of_states=environment_spec.observations.num_values, number_of_actions=environment_spec.actions.num_values, behaviour_policy=behavior_policy, step_size=0.1) # run experiment and get the value functions from agent returns = run_loop(environment=environment, agent=agent, num_steps=int(num_steps)) # get the q-values q = agent.q_values.reshape(grid._layout.shape + (4,)) # visualize value functions print('AFTER {} STEPS ...'.format(num_steps)) plot_action_values(q, epsilon=epsilon) # visualise the greedy policy grid.plot_greedy_policy(q) # + [markdown] id="cMk2ArG-weg_" colab_type="text" # ### **Task 2:** Experiment with different levels of 'greediness' # * The default was $\epsilon=1.$, what does this correspond to? # * Try also $\epsilon =0.1, 0.5$. What do you observe? Does the behaviour policy affect the training in any way? # + [markdown] id="Lqg1n48y81ei" colab_type="text" # ## 1.4 **[Homework]** Experience Replay # # Implement an agent that uses **Experience Replay** to learn action values, at each step: # * select actions randomly # * accumulate all observed transitions *(s, a, r, s')* in the environment in a *replay buffer*, # * apply an online Q-learning # * apply multiple Q-learning updates based on transitions sampled from the *replay buffer* (in addition to the online updates). # # # **Initialize** $Q(\color{red}s, \color{blue}a)$ for all $\color{red}{s} ∈ \mathcal{\color{red}S}$ and $\color{blue}a ∈ \mathcal{\color{blue}A}(\color{red}s)$ # # **Loop forever**: # # 1. $\color{red}{s} \gets{}$current (nonterminal) state # # 2. $\color{blue}{a} \gets{} \text{random_action}(\color{red}{s})$ # # 3. Take action $\color{blue}{a}$; observe resultant reward $\color{green}{r}$, discount $\gamma$, and state, $\color{red}{s'}$ # # 4. $Q(\color{red}{s}, \color{blue}{a}) \gets Q(\color{red}{s}, \color{blue}{a}) + \alpha (\color{green}{r} + \gamma Q(\color{red}{s'}, \color{blue}{a'}) − Q(\color{red}{s}, \color{blue}{a}))$ # # 5. $\text{ReplayBuffer.append_transition}(s, a, r, \gamma, s')$ # # 6. Loop repeat n times: # # 1. $\color{red}{s}, \color{blue}{a}, \color{green}{r}, \gamma, \color{red}{s'} \gets \text{ReplayBuffer}.\text{sample_transition}()$ # # 4. $Q(\color{red}{s}, \color{blue}{a}) \gets Q(\color{red}{s}, \color{blue}{a}) + \alpha (\color{green}{r} + \gamma \max_\color{blue}{a'} Q(\color{red}{s'}, \color{blue}{a'}) − Q(\color{red}{s}, \color{blue}{a}))$ # + id="ietFnV739JwD" colab_type="code" cellView="form" colab={} #@title **[Coding Task]** Q-learning AGENT with a simple replay buffer { form-width: "30%" } class ReplayQLearningAgent(acme.Actor): def __init__( self, number_of_states, number_of_actions, behaviour_policy, num_offline_updates=0, step_size=0.1): self._q = np.zeros((number_of_states, number_of_actions)) self._step_size = step_size self._behaviour_policy = behaviour_policy self._num_offline_updates = num_offline_updates self._state = None self._action = None self._next_state = None self._replay_buffer = [] @property def q_values(self): return self._q def select_action(self, observation): return self._behaviour_policy(self._q[observation]) def observe_first(self, timestep): self._state = timestep.observation def observe(self, action, next_timestep): s = self._state a = action r = next_timestep.reward g = next_timestep.discount next_s = next_timestep.observation # Offline Q-value update self._action = a self._next_state = next_s self._td_error = r + g * np.max(self._q[next_s]) - self._q[s, a] if self._num_offline_updates > 0: # ============ YOUR CODE HERE ============= # Update replay buffer. pass def update(self): s = self._state a = self._action self._q[s, a] += self._step_size * self._td_error self._state = self._next_state # Offline Q-value update # ============ YOUR CODE HERE ============= # + id="I6Lunsx1-kmf" colab_type="code" cellView="form" colab={} #@title **[Solution]** Q-learning AGENT with a simple replay buffer { form-width: "30%" } class ReplayQLearningAgent(acme.Actor): def __init__( self, number_of_states, number_of_actions, behaviour_policy, num_offline_updates=0, step_size=0.1): self._q = np.zeros((number_of_states, number_of_actions)) self._step_size = step_size self._behaviour_policy = behaviour_policy self._num_offline_updates = num_offline_updates self._state = None self._action = None self._next_state = None self._replay_buffer = [] @property def q_values(self): return self._q def select_action(self, observation): return self._behaviour_policy(self._q[observation]) def observe_first(self, timestep): self._state = timestep.observation def observe(self, action, next_timestep): s = self._state a = action r = next_timestep.reward g = next_timestep.discount next_s = next_timestep.observation # Offline Q-value update self._action = a self._next_state = next_s self._td_error = r + g * np.max(self._q[next_s]) - self._q[s, a] if self._num_offline_updates > 0: self._replay_buffer.append((s, a, r, g, next_s)) def update(self): s = self._state a = self._action self._q[s, a] += self._step_size * self._td_error self._state = self._next_state # Offline Q-value update if len(self._replay_buffer) > self._num_offline_updates: for i in range(self._num_offline_updates): idx = np.random.randint(0, len(self._replay_buffer)) s, a, r, g, next_s = self._replay_buffer[idx] td_error = r + g * np.max(self._q[next_s]) - self._q[s, a] self._q[s, a] += self._step_size * td_error # + [markdown] id="k3J6CE2M_AdF" colab_type="text" # ### **Task**: Compare Q-learning with/without experience replay # # Use a small number of training steps (e.g. `num_steps = 1e3`) and vary `num_offline_updates` between `0` and `30`. # + id="9yLCXKBH_F0j" colab_type="code" cellView="form" colab={} num_offline_updates = 0#@param {type:"integer"} num_steps = 1e3 grid = build_gridworld_task(task='obstacle') environment, environment_spec = setup_environment(grid) agent = ReplayQLearningAgent( number_of_states=environment_spec.observations.num_values, number_of_actions=environment_spec.actions.num_values, behaviour_policy=random_policy, num_offline_updates=num_offline_updates, step_size=0.1) # run experiment and get the value functions from agent returns = run_loop(environment=environment, agent=agent, num_steps=int(num_steps)) q = agent.q_values.reshape(grid._layout.shape + (4,)) plot_action_values(q) grid.plot_greedy_policy(q) # + [markdown] id="Rkn2ud_0Pn2o" colab_type="text" # # RL Lab - Part 2: Function Approximation # + [markdown] id="yxqnvCLoe3KU" colab_type="text" # <img src="https://drive.google.com/uc?id=1oqIQNM_tMPmP8l38C_3yp5uUego3S8kV" width="500" /> # # So far we only considered look-up tables. In all previous cases every state and action pair $(\color{red}{s}, \color{blue}{a})$, had an entry in our Q table. Again, this is possible in this environment as the number of states is a equal to the number of cells in the grid. But this is not scalable to situations where, say, the goal location changes or the obstacles are in different locations at every episode (consider how big the table should be in this situation?). # # As example (not covered in this tutorial) is ATARI from pixels, where the number of possible frames an agent can see is exponential in the number of pixels on the screen. # # <center><img width="200" alt="portfolio_view" src="https://miro.medium.com/max/1760/1*XyIpmXXAjbXerDzmGQL1yA.gif"></center> # # But what we **really** want is just being able to *compute* the Q-value, when fed with a particular $(\color{red}{s}, \color{blue}{a})$ pair. So if we had a way to get a function to do this work instead of keeping a big table, we'd get around this problem. # # To address this, we can use **Function Approximation** as a way to generalize Q-values over some representation of the very large state space, and **train** them to output the values they should. In this section, we will explore Q-Learning with function approximation, which, although theoretically proven to diverge for some degenerate MDPs, can yield impressive results in very large environments. In particular, we will look at [Neural Fitted Q (NFQ) Iteration](http://ml.informatik.uni-freiburg.de/former/_media/publications/rieecml05.pdf). # # # # + [markdown] id="OTAYVPnaJN0t" colab_type="text" # ## 2.1 NFQ agent # + [markdown] id="-omtUOQCS8VI" colab_type="text" # [Neural Fitted Q Iteration](http://ml.informatik.uni-freiburg.de/former/_media/publications/rieecml05.pdf) was one of the first papers to demonstrate how to leverage recent advances in Deep Learning to approximate the Q-value by a neural network $^1$. # # We represent $Q(\color{red}s, \color{blue}a)$ as a neural network $f()$. which given a vector $\color{red}s$, will output a vector of Q-values for all possible actions $\color{blue}a$.$^2$ # # When introducing function approximations, and neural networks in particular, we need to have a loss to optimize. But looking back at the tabular setting above, you can see that we already have some notion of error: the **TD error**. # # By training our neural network to output values such that the *TD error is minimized*, we will also satisfy the Bellman Optimality Equation, which is a good sufficient condition to enforce, so that we may obtain an optimal policy. # Thanks to automatic differentiation, we can just write the TD error as a loss (e.g. with a $L2$ loss, but others would work too), compute its gradient (which are now gradients with respect to individual parameters of the neural network) and slowly improve our Q-value approximation: # # $$Loss = \mathbb{E}\left[ \left( \color{green}{r} + \gamma \max_\color{blue}{a'} Q(\color{red}{s'}, \color{blue}{a'}) − Q(\color{red}{s}, \color{blue}{a}) \right)^2\right]$$ # # # NFQ builds on Q-learning, but if one were to update the Q-values online directly, the training can be unstable and very slow. # Instead, NFQ uses a Replay buffer, similar to what you just implemented above, to update the Q-value in a batched setting. # # When it was introduced, it also was entirely off-policy (i.e. one would use a random policy to collect data), and is prone to unstability when applied to more complex environments (e.g. when the input are pixels or the tasks are longer and complicated). # But it is a good stepping stone to the more complex agents used today. Here, we will look at a slightly different and modernised implementation of NFQ. # # <br /> # # --- # # <sub>*$^1$ if you read the NFQ paper, they use a "control" notation, where there is a "cost to minimize", instead of "rewards to maximize", so don't be surprised if signs/max/min do not correspond.* </sub> # # <sub>*$^2$ we could feed it $\color{blue}a$ as well and ask $f$ for a single scalar value, but given we have a fixed number of actions and we usually need to take an $argmax$ over them, it's easiest to just output them all in one pass.*</sub> # + [markdown] id="_NjO3wD-Sphk" colab_type="text" # <center><img src="https://drive.google.com/uc?id=1ivTQBHWkYi_J9vWwXFd2sSWg5f2TB5T-" width="400" /></center> # + id="KULsnljicr9t" colab_type="code" cellView="form" colab={} #@title **[Coding Task]** NFQ Agent { form-width: "30%" } Transitions = collections.namedtuple('Transitions', ['s_t', 'a_t', 'r_t', 'd_t', 's_tp1']) TrainingState = namedtuple('TrainingState', 'params, opt_state, step') class NeuralFittedQAgent(acme.Actor): def __init__(self, q_network, observation_spec, replay_capacity=100000, epsilon=0.1, batch_size=1, learning_rate=3e-4): self._observation_spec = observation_spec self.epsilon = epsilon self._batch_size = batch_size self._replay_buffer = ReplayBuffer(replay_capacity) self.last_loss = 0 # Setup Network and loss with Haiku self._rng = hk.PRNGSequence(1) self._q_network = hk.transform(q_network) # Initialize network # ============ YOUR CODE HERE ============= # init_params = # Setup optimizer self._optimizer = optix.adam(learning_rate) initial_optimizer_state = self._optimizer.init(initial_params) self._state = TrainingState( params=initial_params, opt_state=initial_optimizer_state, step=0) @functools.partial(jax.jit, static_argnums=(0,)) def _policy(self, params: hk.Params, rng_key: jnp.ndarray, observation: jnp.ndarray, epsilon: float): # You can use rlax.epsilon_greedy here # ============ YOUR CODE HERE ============= pass def select_action(self, observation): return self._policy(self._state.params, next(self._rng), observation, self.epsilon) def q_values(self, observation): return jnp.squeeze( self._q_network.apply(self._state.params, observation[None, ...]), axis=0) @functools.partial(jax.jit, static_argnums=(0,)) def _loss(self, params: hk.Params, transitions: Transitions): def _td_error(q_s, q_next_s, a, r, d): """TD error for a single transition.""" # ============ YOUR CODE HERE ============= pass batch_td_error = jax.vmap(_td_error) # Compute batched Q-values [Batch, actions] q_s = self._q_network.apply(params, transitions.s_t) q_next_s = self._q_network.apply(params, transitions.s_tp1) # Get batched td errors td_errors = batch_td_error(q_s, q_next_s, transitions.a_t, transitions.r_t, transitions.d_t) losses = 0.5 * td_errors**2. # [Batch] return jnp.mean(losses) @functools.partial(jax.jit, static_argnums=(0,)) def _train_step(self, state: TrainingState, transitions: Transitions): # Do one learning step on the batch of transitions # ============ YOUR CODE HERE ============= # Use jax.value_and_grad to compute gradients and values from _loss, # and optix.apply_updates to compute new parameters for the network. new_state = TrainingState( params=new_params, opt_state=new_opt_state, step=state.step + 1) return new_state, loss def update(self): if self._replay_buffer.is_ready(self._batch_size): # Collect a minibatch of random transitions transitions = Transitions(*self._replay_buffer.sample(self._batch_size)) # Compute loss and update parameters self._state, self.last_loss = self._train_step(self._state, transitions) def observe_first(self, timestep): self._replay_buffer.push(timestep, None) def observe(self, action, next_timestep): self._replay_buffer.push(next_timestep, action) class ReplayBuffer(object): """A simple Python replay buffer.""" def __init__(self, capacity): self._prev = None self._action = None self._latest = None self.buffer = collections.deque(maxlen=capacity) def push(self, timestep, action): self._prev = self._latest self._action = action self._latest = timestep if action is not None: self.buffer.append( (self._prev.observation, self._action, self._latest.reward, self._latest.discount, self._latest.observation)) def sample(self, batch_size): obs_tm1, a_tm1, r_t, discount_t, obs_t = zip( *random.sample(self.buffer, batch_size)) return (jnp.stack(obs_tm1), jnp.asarray(a_tm1), jnp.asarray(r_t), jnp.asarray(discount_t), jnp.stack(obs_t)) def is_ready(self, batch_size): return batch_size <= len(self.buffer) # + id="DWrRFI_qLmmt" colab_type="code" cellView="form" colab={} #@title **[Solution]** NFQ Agent { form-width: "30%" } Transitions = collections.namedtuple('Transitions', ['s_t', 'a_t', 'r_t', 'd_t', 's_tp1']) TrainingState = namedtuple('TrainingState', 'params, opt_state, step') class NeuralFittedQAgent(acme.Actor): def __init__(self, q_network, observation_spec, replay_capacity=100000, epsilon=0.1, batch_size=1, learning_rate=3e-4): self._observation_spec = observation_spec self.epsilon = epsilon self._batch_size = batch_size self._replay_buffer = ReplayBuffer(replay_capacity) self.last_loss = 0 # Setup Network and loss with Haiku self._rng = hk.PRNGSequence(1) self._q_network = hk.transform(q_network) # Initialize network dummy_observation = observation_spec.generate_value() initial_params = self._q_network.init( next(self._rng), dummy_observation[None, ...]) # Setup optimizer self._optimizer = optix.adam(learning_rate) initial_optimizer_state = self._optimizer.init(initial_params) self._state = TrainingState( params=initial_params, opt_state=initial_optimizer_state, step=0) @functools.partial(jax.jit, static_argnums=(0,)) def _policy(self, params: hk.Params, rng_key: jnp.ndarray, observation: jnp.ndarray, epsilon: float): q_values = self._q_network.apply(params, observation[None, ...]) actions = rlax.epsilon_greedy(epsilon).sample(rng_key, q_values) return jnp.squeeze(actions, axis=0) def select_action(self, observation): return self._policy(self._state.params, next(self._rng), observation, self.epsilon) def q_values(self, observation): return jnp.squeeze( self._q_network.apply(self._state.params, observation[None, ...]), axis=0) @functools.partial(jax.jit, static_argnums=(0,)) def _loss(self, params: hk.Params, transitions: Transitions): def _td_error(q_s, q_next_s, a, r, d): """TD error for a single transition.""" target_s = r + d * jnp.max(q_next_s) td_error = jax.lax.stop_gradient(target_s) - q_s[a] # Task: think of why we are not using td_error = target_s - q_s[a]? return td_error batch_td_error = jax.vmap(_td_error) # Compute batched Q-values [Batch, actions] q_s = self._q_network.apply(params, transitions.s_t) q_next_s = self._q_network.apply(params, transitions.s_tp1) # Get batched td errors td_errors = batch_td_error(q_s, q_next_s, transitions.a_t, transitions.r_t, transitions.d_t) losses = 0.5 * td_errors**2. # [Batch] return jnp.mean(losses) @functools.partial(jax.jit, static_argnums=(0,)) def _train_step(self, state: TrainingState, transitions: Transitions): # Do one learning step on the batch of transitions compute_loss_and_grad = jax.value_and_grad(self._loss) loss, dloss_dparams = compute_loss_and_grad(state.params, transitions) updates, new_opt_state = self._optimizer.update(dloss_dparams, state.opt_state) new_params = optix.apply_updates(state.params, updates) new_state = TrainingState( params=new_params, opt_state=new_opt_state, step=state.step + 1) return new_state, loss def update(self): if self._replay_buffer.is_ready(self._batch_size): # Collect a minibatch of random transitions transitions = Transitions(*self._replay_buffer.sample(self._batch_size)) # Compute loss and update parameters self._state, self.last_loss = self._train_step(self._state, transitions) def observe_first(self, timestep): self._replay_buffer.push(timestep, None) def observe(self, action, next_timestep): self._replay_buffer.push(next_timestep, action) class ReplayBuffer(object): """A simple Python replay buffer.""" def __init__(self, capacity): self._prev = None self._action = None self._latest = None self.buffer = collections.deque(maxlen=capacity) def push(self, timestep, action): self._prev = self._latest self._action = action self._latest = timestep if action is not None: self.buffer.append( (self._prev.observation, self._action, self._latest.reward, self._latest.discount, self._latest.observation)) def sample(self, batch_size): obs_tm1, a_tm1, r_t, discount_t, obs_t = zip( *random.sample(self.buffer, batch_size)) return (jnp.stack(obs_tm1), jnp.asarray(a_tm1), jnp.asarray(r_t), jnp.asarray(discount_t), jnp.stack(obs_t)) def is_ready(self, batch_size): return batch_size <= len(self.buffer) # + [markdown] id="MQoI1y88Mfsz" colab_type="text" # ### **Task: Train a NFQ agent** # # # + id="g7QmF3UGgYJa" colab_type="code" cellView="form" colab={} #@title Training the NFQ Agent. { form-width: "30%" } epsilon = 1. #@param {type:"number"} max_episode_length = 200 # Environment grid = build_gridworld_task( task='simple', observation_type=ObservationType.AGENT_GOAL_POS, max_episode_length=max_episode_length) environment, environment_spec = setup_environment(grid) # Define function approximation for the Q-values # i.e. Q_a(s) for a in num_actions. def q_network(observation: np.ndarray): """Outputs action values given an observation.""" model = hk.Sequential([ hk.Flatten(), # Flattens everything except the batch dimension hk.nets.MLP([50, 50, environment_spec.actions.num_values]) ]) return model(observation) # Build the trainable Q-learning agent agent = NeuralFittedQAgent( q_network, environment_spec.observations, epsilon=epsilon, replay_capacity=100000, batch_size=10, learning_rate=1e-3) returns = run_loop( environment=environment, agent=agent, num_episodes=300, logger_time_delta=1., log_loss=True) # + [markdown] id="YWbMwjdgmxGe" colab_type="text" # ### Evaluate the policy it learned # + id="bZM2TNJ0PB6F" colab_type="code" cellView="form" colab={} #@title Evaluating the agent. { form-width: "30%" } # Change epsilon to be more greedy agent.epsilon = 0.05 # Look at a few episodes frames = evaluate(environment, agent, evaluation_episodes=5) display_video(frames, frame_repeat=10) # + id="vYmDVoZ4sDjJ" colab_type="code" cellView="form" colab={} #@title Visualise the learned Q values # Evaluate the policy for every state, similar to tabular agents above. environment.reset() pi = np.zeros(grid._layout_dims, dtype=np.int32) q = np.zeros(grid._layout_dims + (4,)) for y in range(grid._layout_dims[0]): for x in range(grid._layout_dims[1]): # Hack observation to see what the Q-network would output at that point. environment.set_state(x, y) obs = environment.get_obs() q[y, x] = np.asarray(agent.q_values(obs)) pi[y, x] = np.asarray(agent.select_action(obs)) plot_action_values(q) # + id="ek3MNCu0LGBE" colab_type="code" cellView="form" colab={} #@title Compare the greedy policy with the behaviour policy { form-width: "30%" } grid.plot_greedy_policy(q) plt.title('Greedy policy using the learnt Q-values') grid.plot_policy(pi) _ = plt.title("Policy using the agent's behaviour policy") # + [markdown] id="Clv_QlpgoY1J" colab_type="text" # # RL Lab - Part 3: Deep Reinforcement Learning # + [markdown] id="gjR8zkBdjIrB" colab_type="text" # # <!-- <center><img src="https://drive.google.com/uc?id=1ivTQBHWkYi_J9vWwXFd2sSWg5f2TB5T-" width="500" /></center> --> # # <center><img src="https://media.springernature.com/full/springer-static/image/art%3A10.1038%2Fnature14236/MediaObjects/41586_2015_Article_BFnature14236_Fig1_HTML.jpg" width="500" /></center> # # In this subsection, we will look at an advanced deep RL Agent based on the following publication, [Playing Atari with Deep Reinforcement Learning](https://deepmind.com/research/publications/playing-atari-deep-reinforcement-learning), which introduced the first deep learning model to successfully learn control policies directly from high-dimensional pixel inputs using RL. # # + [markdown] colab_type="text" id="BukOfOsmtSQn" # ## 3.1 Create an ACME DQN agent # + id="NbHdPc-nxO2j" colab_type="code" cellView="form" colab={} #@title Create the environment grid = build_gridworld_task( task='simple', observation_type=ObservationType.GRID, max_episode_length=200) environment, environment_spec = setup_environment(grid) # + colab_type="code" id="3Jcjk1w6oHVX" cellView="form" colab={} #@title Construct the agent and a training loop { form-width: "30%" } # Build agent networks def network(x): model = hk.Sequential([ hk.Conv2D(32, kernel_shape=[4,4], stride=[2,2], padding='VALID'), jax.nn.relu, hk.Conv2D(64, kernel_shape=[3,3], stride=[1,1], padding='VALID'), jax.nn.relu, hk.Flatten(), hk.nets.MLP([50, 50, environment_spec.actions.num_values]) ]) return model(x) # Avoid logging from Acme class DummyLogger(object): def write(self, data): pass # Use library agent implementation. agent = dqn.DQN( environment_spec=environment_spec, network=network, batch_size=10, samples_per_insert=2, epsilon=0.05, min_replay_size=10,) # + id="0dHDdPDr3QxI" colab_type="code" cellView="form" colab={} # @title Run a training loop { form-width: "30%" } # Run a `num_episodes` training episodes. # Rerun this cell until the agent has learned the given task. returns = run_loop(environment=environment, agent=agent, num_episodes=100, num_steps=100000, logger_time_delta=0.2 ) # + id="8ksVITeN5_Vq" colab_type="code" cellView="form" colab={} # @title Visualise the learned Q values { form-width: "30%" } # get agent parameters params = agent._learner.get_variables([])[0] # Evaluate the policy for every state, similar to tabular agents above. pi = np.zeros(grid._layout_dims, dtype=np.int32) q = np.zeros(grid._layout_dims + (4,)) for y in range(grid._layout_dims[0]): for x in range(grid._layout_dims[1]): # Hack observation to see what the Q-network would output at that point. environment.set_state(x, y) obs = environment.get_obs() q[y, x] = np.asarray(agent._learner._forward(params, np.expand_dims(obs, axis=0))) pi[y, x] = np.asarray(agent.select_action(obs)) plot_action_values(q) # + id="6PQaQej4LsU-" colab_type="code" cellView="form" colab={} #@title Compare the greedy policy with the agent's policy { form-width: "30%" } grid.plot_greedy_policy(q) plt.title('Greedy policy using the learnt Q-values') grid.plot_policy(pi) plt.title("Policy using the agent's policy") # + [markdown] id="acqPbd8zXH_K" colab_type="text" # ## 3.2 **[Advanced]** DQN Algorithm. # # The following coding exercise implements the loss function described in the DQN paper. This loss function is used by a learner class to compute gradients for the parameters $\theta_i$ of the Q-network $Q( \cdot; \theta_i)$: # # ```none # loss(params: hk.Params, target_params: hk.Params, sample: reverb.ReplaySample) # ``` # which, at iteration `i` computes the DQN loss $L_i$ on the parameters $\theta_i$, based on a the set of target parameters $\theta_{i-1}$ and a given batch of sampled trajectories `sample`. As described in the manuscript, the loss function is defined as: # # $$L_i (\theta_i) = \mathbb{E}_{\color{red}{s},\color{blue}{a} \sim \rho(\cdot)} \left[ \left( y_i - Q(\color{red}{s},\color{blue}{a} ;\theta_i) \right)^2\right]$$ # # where the target $y_i$ is computed using a bootstrap value computed from Q-value network with target parameters: # # $$ y_i = \mathbb{E}_{\color{red}{s'} \sim \mathcal{E}} \left[ \color{green}{r} + \gamma \max_{\color{blue}{a'} \in \color{blue}{\mathcal{A}}} Q(\color{red}{s'}, \color{blue}{a'} ; \theta^{\text{target}}_i) \; | \; \color{red}{s}, \color{blue}{a} \right] $$ # # The batch of data `sample` is prepackaged by the agent to match the sampling distributions $\rho$ and $\mathcal{E}$. To get the explicit data items, use the following: # # ```none # o_tm1, a_tm1, r_t, d_t, o_t = sample.data # ``` # # The function is expected to return # * `mean_loss` is the mean of the above loss over the batched data, # * (`keys`, `priorities`) will pair the `keys` corresponding to each batch item to the absolute TD-error used to compute the `mean_loss` above. The agent uses these to update priorities for samples in the replay buffer. # # # **Note**. A full implementation of a DQN agent is outside the scope of this tutorial, but we encoruage you to explore the code (in a cell below) to understand where the learner fits with other the services used by the agent. Moreover, if you feel ambitious, we prepared a separate exercise where you are expected to implement the learner itself (see *DQN Learner [Coding Task - Hard]*). # # # # **[Optional]** # - use a Double-Q Learning Loss function instead of the original published loss (see [`rlax.double_q_learning`](https://github.com/deepmind/rlax/blob/870cba1ea8ad36725f4f3a790846298657b6fd4b/rlax/_src/value_learning.py#L233)) for more details. # - for more stable optimization, use the Huber Loss instead of $L_2$, as prescribed in the manuscript (see [`rlax.huber_loss`](https://github.com/deepmind/rlax/blob/870cba1ea8ad36725f4f3a790846298657b6fd4b/rlax/_src/clipping.py#L31)). # # + id="liZKoQbaj4Wq" colab_type="code" cellView="form" colab={} # @title **[Coding Task - Easy]** DQN Loss function { form-width: "30%" } TrainingState = namedtuple('TrainingState', 'params, target_params, opt_state, step') LearnerOutputs = namedtuple('LearnerOutputs', 'keys, priorities') class DQNLearner(acme.Learner): """DQN learner.""" _state: TrainingState def __init__(self, network, obs_spec, discount, importance_sampling_exponent, target_update_period, data_iterator, optimizer, rng, replay_client, max_abs_reward=1., huber_loss_parameter=1., ): """Initializes the learner.""" # Transform network into a pure function. network = hk.transform(network) def loss(params: hk.Params, target_params: hk.Params, sample: reverb.ReplaySample): o_tm1, a_tm1, r_t, d_t, o_t = sample.data keys, probs = sample.info[:2] # ============ YOUR CODE HERE ============= # return mean_loss, (keys, priorities) pass def sgd_step(state, samples): # Compute gradients on the given loss function and update the network # using the optimizer provided at init time. grad_fn = jax.grad(loss, has_aux=True) gradients, (keys, priorities) = grad_fn(state.params, state.target_params, samples) updates, new_opt_state = optimizer.update(gradients, state.opt_state) new_params = optix.apply_updates(state.params, updates) # Update the internal state for the learner with (1) network parameters, # (2) parameters of the target network, (3) the state of the optimizer, # (4) Numbers of SGD steps performed by the agent. new_state = TrainingState( params=new_params, target_params=state.target_params, opt_state=new_opt_state, step=state.step + 1) outputs = LearnerOutputs(keys=keys, priorities=priorities) return new_state, outputs # Internalise agent components (replay buffer, networks, optimizer). self._replay_client = replay_client self._iterator = data_iterator # Since sampling is base on a priority experience replay, we need to pass # the absolute td-loss values to the replay client to update priorities # accordingly. def update_priorities(outputs: LearnerOutputs): for key, priority in zip(outputs.keys, outputs.priorities): self._replay_client.mutate_priorities( table='priority_table', updates={key: priority}) self._update_priorities = update_priorities # Internalise the hyperparameters. self._target_update_period = target_update_period # Internalise logging/counting objects. self._counter = counting.Counter() self._logger = loggers.TerminalLogger('learner', time_delta=1.) # Initialise parameters and optimiser state. def initialization_fn(values): values = tree_util.tree_map(lambda x: jnp.zeros(x.shape, x.dtype), values) # Add batch dim. return tree_util.tree_map(lambda x: jnp.expand_dims(x, axis=0), values) initial_params = network.init(next(rng), initialization_fn(obs_spec)) initial_target_params = network.init(next(rng), initialization_fn(obs_spec)) initial_opt_state = optimizer.init(initial_params) self._state = TrainingState( params=initial_params, target_params=initial_target_params, opt_state=initial_opt_state, step=0) self._forward = jax.jit(network.apply) self._sgd_step = jax.jit(sgd_step) def step(self): samples = next(self._iterator) # Do a batch of SGD. self._state, outputs = self._sgd_step(self._state, samples) # Update our counts and record it. result = self._counter.increment(steps=1) # Periodically update target network parameters. if self._state.step % self._target_update_period == 0: self._state = self._state._replace(target_params=self._state.params) # Update priorities in replay. self._update_priorities(outputs) # Write to logs. self._logger.write(result) def get_variables(self): """Network variables after a number of SGD steps.""" return self._state.params # + id="jbULeKGwURlL" colab_type="code" cellView="form" colab={} # @title **[Coding Task - Hard]** DQN Learner { form-width: "30%" } TrainingState = namedtuple('TrainingState', 'params, target_params, opt_state, step') LearnerOutputs = namedtuple('LearnerOutputs', 'keys, priorities') class DQNLearner(acme.Learner): """DQN learner.""" _state: TrainingState def __init__(self, network, obs_spec, discount, importance_sampling_exponent, target_update_period, data_iterator, optimizer, rng, replay_client, max_abs_reward=1., huber_loss_parameter=1., ): """Initializes the learner.""" # ============ YOUR CODE HERE ============= # Use provided params to initialize any jax functions used in the `step` # function. # Internalise agent components (replay buffer, networks, optimizer). self._replay_client = replay_client self._iterator = data_iterator # Since sampling is base on a priority experience replay, we need to pass # the absolute td-loss values to the replay client to update priorities # accordingly. def update_priorities(outputs: LearnerOutputs): for key, priority in zip(outputs.keys, outputs.priorities): self._replay_client.mutate_priorities( table='priority_table', updates={key: priority}) self._update_priorities = update_priorities # Internalise the hyperparameters. self._target_update_period = target_update_period # Internalise logging/counting objects. self._counter = counting.Counter() self._logger = loggers.TerminalLogger('learner', time_delta=1.) # Initialise parameters and optimiser state. # Transform network into a pure function. network = hk.transform(network) def initialization_fn(values): values = tree_util.tree_map(lambda x: jnp.zeros(x.shape, x.dtype), values) # Add batch dim. return tree_util.tree_map(lambda x: jnp.expand_dims(x, axis=0), values) initial_params = network.init(next(rng), initialization_fn(obs_spec)) initial_target_params = network.init(next(rng), initialization_fn(obs_spec)) initial_opt_state = optimizer.init(initial_params) self._state = TrainingState( params=initial_params, target_params=initial_target_params, opt_state=initial_opt_state, step=0) def step(self): samples = next(self._iterator) # Do a batch of SGD and update self._state accordingly. # ============ YOUR CODE HERE ============= # Update our counts and record it. result = self._counter.increment(steps=1) # Periodically update target network parameters. # ============ YOUR CODE HERE ============= # Update priorities in replay. self._update_priorities(outputs) # Write to logs. self._logger.write(result) def get_variables(self): """Network variables after a number of SGD steps.""" return self._state.params # + colab_type="code" cellView="form" id="eZuigF_bD0DP" colab={} # @title **[Solution]** DQN Learner { form-width: "30%" } TrainingState = namedtuple('TrainingState', 'params, target_params, opt_state, step') LearnerOutputs = namedtuple('LearnerOutputs', 'keys, priorities') class DQNLearner(acme.Learner): """DQN learner.""" _state: TrainingState def __init__(self, network, obs_spec, discount, importance_sampling_exponent, target_update_period, data_iterator, optimizer, rng, replay_client, max_abs_reward=1., huber_loss_parameter=1., ): """Initializes the learner.""" # Transform network into a pure function. network = hk.transform(network) def loss(params: hk.Params, target_params: hk.Params, sample: reverb.ReplaySample): o_tm1, a_tm1, r_t, d_t, o_t = sample.data keys, probs = sample.info[:2] # Forward pass. q_tm1 = network.apply(params, o_tm1) q_t_value = network.apply(target_params, o_t) q_t_selector = network.apply(params, o_t) # Cast and clip rewards. d_t = (d_t * discount).astype(jnp.float32) r_t = jnp.clip(r_t, -max_abs_reward, max_abs_reward).astype(jnp.float32) # Compute double Q-learning n-step TD-error. batch_error = jax.vmap(rlax.double_q_learning) td_error = batch_error(q_tm1, a_tm1, r_t, d_t, q_t_value, q_t_selector) batch_loss = rlax.huber_loss(td_error, huber_loss_parameter) # Importance weighting. importance_weights = (1. / probs).astype(jnp.float32) importance_weights **= importance_sampling_exponent importance_weights /= jnp.max(importance_weights) # Reweight. mean_loss = jnp.mean(importance_weights * batch_loss) # [] priorities = jnp.abs(td_error).astype(jnp.float64) return mean_loss, (keys, priorities) def sgd_step(state, samples): # Compute gradients on the given loss function and update the network # using the optimizer provided at init time. grad_fn = jax.grad(loss, has_aux=True) gradients, (keys, priorities) = grad_fn(state.params, state.target_params, samples) updates, new_opt_state = optimizer.update(gradients, state.opt_state) new_params = optix.apply_updates(state.params, updates) # Update the internal state for the learner with (1) network parameters, # (2) parameters of the target network, (3) the state of the optimizer, # (4) Numbers of SGD steps performed by the agent. new_state = TrainingState( params=new_params, target_params=state.target_params, opt_state=new_opt_state, step=state.step + 1) outputs = LearnerOutputs(keys=keys, priorities=priorities) return new_state, outputs # Internalise agent components (replay buffer, networks, optimizer). self._replay_client = replay_client self._iterator = data_iterator # Since sampling is base on a priority experience replay, we need to pass # the absolute td-loss values to the replay client to update priorities # accordingly. def update_priorities(outputs: LearnerOutputs): for key, priority in zip(outputs.keys, outputs.priorities): self._replay_client.mutate_priorities( table='priority_table', updates={key: priority}) self._update_priorities = update_priorities # Internalise the hyperparameters. self._target_update_period = target_update_period # Internalise logging/counting objects. self._counter = counting.Counter() self._logger = loggers.TerminalLogger('learner', time_delta=1.) # Initialise parameters and optimiser state. def initialization_fn(values): values = tree_util.tree_map(lambda x: jnp.zeros(x.shape, x.dtype), values) # Add batch dim. return tree_util.tree_map(lambda x: jnp.expand_dims(x, axis=0), values) initial_params = network.init(next(rng), initialization_fn(obs_spec)) initial_target_params = network.init(next(rng), initialization_fn(obs_spec)) initial_opt_state = optimizer.init(initial_params) self._state = TrainingState( params=initial_params, target_params=initial_target_params, opt_state=initial_opt_state, step=0) self._forward = jax.jit(network.apply) self._sgd_step = jax.jit(sgd_step) def step(self): samples = next(self._iterator) # Do a batch of SGD. self._state, outputs = self._sgd_step(self._state, samples) # Update our counts and record it. result = self._counter.increment(steps=1) # Periodically update target network parameters. if self._state.step % self._target_update_period == 0: self._state = self._state._replace(target_params=self._state.params) # Update priorities in replay. self._update_priorities(outputs) # Write to logs. self._logger.write(result) def get_variables(self): """Network variables after a number of SGD steps.""" return self._state.params # + id="ywObWtqgaSXx" colab_type="code" cellView="form" colab={} # @title DQN Agent implementation (use for reference only) { form-width: "30%" } class DQN(acme.Actor): def __init__( self, environment_spec, network, batch_size=256, prefetch_size=4, target_update_period=100, samples_per_insert=32.0, min_replay_size=1000, max_replay_size=1000000, importance_sampling_exponent=0.2, priority_exponent=0.6, n_step=5, epsilon=0., learning_rate=1e-3, discount=0.99, ): # Create a replay server to add data to. This is initialized as a # table, and a Learner (defined separately) will be in charge of updating # sample priorities based on the corresponding learner loss. replay_table = reverb.Table( name='priority_table', sampler=reverb.selectors.Prioritized(priority_exponent), remover=reverb.selectors.Fifo(), max_size=max_replay_size, rate_limiter=reverb.rate_limiters.MinSize(1)) self._server = reverb.Server([replay_table], port=None) address = f'localhost:{self._server.port}' # Use ACME reverb adder as a tool to add transition data into the replay # buffer defined above. self._adder = adders.NStepTransitionAdder( client=reverb.Client(address), n_step=n_step, discount=discount) # ACME datasets provides an interface to easily sample from a replay server. dataset = datasets.make_reverb_dataset( client=reverb.TFClient(address), environment_spec=environment_spec, batch_size=batch_size, prefetch_size=prefetch_size, transition_adder=True) data_iterator = dataset.as_numpy_iterator() # Create a learner that updates the parameters (and initializes them). self._learner = DQNLearner( network=network, obs_spec=environment_spec.observations, rng=hk.PRNGSequence(1), optimizer=optix.adam(learning_rate), discount=discount, importance_sampling_exponent=importance_sampling_exponent, target_update_period=target_update_period, data_iterator=data_iterator, replay_client=reverb.Client(address), ) # Create a feed forward actor that obtains its variables from the DQNLearner # above. def policy(params, key, observation): action_values = hk.transform(network).apply(params, observation) return rlax.epsilon_greedy(epsilon).sample(key, action_values) self._policy = policy self._rng = hk.PRNGSequence(1) # We'll ignore the first min_observations when determining whether to take # a step and we'll do so by making sure num_observations >= 0. self._num_observations = -max(batch_size, min_replay_size) observations_per_step = float(batch_size) / samples_per_insert if observations_per_step >= 1.0: self._observations_per_update = int(observations_per_step) self._learning_steps_per_update = 1 else: self._observations_per_update = 1 self._learning_steps_per_update = int(1.0 / observations_per_step) def select_action(self, observation): observation = tree_util.tree_map(lambda x: jnp.expand_dims(x, axis=0), observation) key = next(self._rng) params = self._learner.get_variables() action = self._policy(params, key, observation) action = tree_util.tree_map(lambda x: np.array(x).squeeze(axis=0), action) return action def observe_first(self, timestep): self._adder.add_first(timestep) def observe(self, action, next_timestep): self._num_observations += 1 self._adder.add(action, next_timestep) def update(self): # Only allow updates after some minimum number of observations have been and # then at some period given by observations_per_update. if (self._num_observations >= 0 and self._num_observations % self._observations_per_update == 0): self._num_observations = 0 # Run a number of learner steps (usually gradient steps). for _ in range(self._learning_steps_per_update): self._learner.step() # + id="0iBoBqLvcy14" colab_type="code" cellView="form" colab={} # @title Run a training loop { form-width: "30%" } # Run a `num_episodes` training episodes. # Rerun this cell until the agent has learned the given task. grid = build_gridworld_task( task='simple', observation_type=ObservationType.GRID, max_episode_length=100, ) environment, environment_spec = setup_environment(grid) agent = DQN( environment_spec=environment_spec, network=network, batch_size=16, samples_per_insert=2, epsilon=0.1, min_replay_size=100) returns = run_loop(environment=environment, agent=agent, num_episodes=200, logger_time_delta=0.2) # + [markdown] id="iJi7LDrn0eO4" colab_type="text" # ### DQN agent on the Gym Cartpole environment # # Here we show that you can apply what you learned to other environments such as Cartpole in [Gym](https://gym.openai.com/). # # # <center><img src="https://user-images.githubusercontent.com/10624937/42135683-dde5c6f0-7d13-11e8-90b1-8770df3e40cf.gif" height="250" /></center> # # + id="DIERzZVk0xIh" colab_type="code" cellView="form" colab={} #@title Construct the agent and run the training loop { form-width: "30%" } # Try different parameters to see how learning is affected. env = gym_wrapper.GymWrapper(gym.make('CartPole-v0')) env = wrappers.SinglePrecisionWrapper(env) environment, environment_spec = setup_environment(env) # Build agent networks def network(x): model = hk.Sequential([ hk.Flatten(), hk.nets.MLP([100, environment_spec.actions.num_values]) ]) return model(x) agent = dqn.DQN( environment_spec=environment_spec, network=network, batch_size=64, epsilon=0.01, learning_rate=1e-4, min_replay_size=100) returns = run_loop(environment=environment, agent=agent, num_episodes=300, logger_time_delta=0.2) # + id="dDmLcICc98Z8" colab_type="code" cellView="form" colab={} #@title Visualise training curve { form-width: "30%" } # Compute rolling average over returns returns_avg = pd.Series(returns).rolling(10, center=True).mean() plt.figure(figsize=(8, 5)) plt.plot(range(len(returns)), returns_avg) plt.xlabel('Episodes') plt.ylabel('Total reward'); # + [markdown] id="rzqrYxAtH11S" colab_type="text" # # Want to learn more? # # # + [markdown] id="odBz1OO0JIXY" colab_type="text" # This Colab was inspired by the [EEML 2019 RL practical](https://github.com/eemlcommunity/PracticalSessions2019/blob/master/rl/RL_Tutorial.ipynb) and the [Acme tutorial](https://github.com/deepmind/acme/blob/master/examples/tutorial.ipynb). # # Books and lecture notes # * [Reinforcement Learning: an Introduction by Sutton & Barto](http://incompleteideas.net/book/RLbook2018.pdf) # * [Algorithms for Reinforcement Learning by <NAME>](https://sites.ualberta.ca/~szepesva/papers/RLAlgsInMDPs.pdf) # # Lectures and course # * [RL Course by <NAME>](https://www.youtube.com/playlist?list=PLzuuYNsE1EZAXYR4FJ75jcJseBmo4KQ9-) # * [Reinforcement Learning Course | UCL & DeepMind](https://www.youtube.com/playlist?list=PLqYmG7hTraZBKeNJ-JE_eyJHZ7XgBoAyb) # * [<NAME>skill Stanford RL Course](https://www.youtube.com/playlist?list=PLoROMvodv4rOSOPzutgyCTapiGlY2Nd8u) # * [RL Course on Coursera by <NAME> & <NAME>](https://www.coursera.org/specializations/reinforcement-learning) # # More practical: # * [Spinning Up in Deep RL by <NAME>](https://spinningup.openai.com/en/latest/) # * [Acme white paper](http://go/arxiv/2006.00979) # # # #
EEML2020_RL_Tutorial.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # --- # # Mathematical Basis # # This section summarizes the basic knowledge of linear algebra, differentiation, and probability required to understand the contents in this book. To avoid long discussions of mathematical knowledge not required to understand this book, a few definitions in this section are slightly simplified. # # # ## Linear Algebra # # Below we summarize the concepts of vectors, matrices, operations, norms, eigenvectors, and eigenvalues. # # ### Vectors # # Vectors in this book refer to column vectors. An $n$-dimensional vector $\boldsymbol{x}$ can be written as # # $$ # \boldsymbol{x} = # \begin{bmatrix} # x_{1} \\ # x_{2} \\ # \vdots \\ # x_{n} # \end{bmatrix}, # $$ # # where $x_1, \ldots, x_n$ are elements of the vector. To express that $\boldsymbol{x}$ is an $n$-dimensional vector with elements from the set of real numbers, we write $\boldsymbol{x} \in \mathbb{R}^{n}$ or $\boldsymbol{x} \in \mathbb{R}^{n \times 1}$. # # # ### Matrices # # An expression for a matrix with $m$ rows and $n$ columns can be written as # # $$ # \boldsymbol{X} = # \begin{bmatrix} # x_{11} & x_{12} & \dots & x_{1n} \\ # x_{21} & x_{22} & \dots & x_{2n} \\ # \vdots & \vdots & \ddots & \vdots \\ # x_{m1} & x_{m2} & \dots & x_{mn} # \end{bmatrix}, # $$ # # Here, $x_{ij}$ is the element in row $i$ and column $j$ in the matrix $\boldsymbol{X}$ ($1 \leq i \leq m, 1 \leq j \leq n$). To express that $\boldsymbol{X}$ is a matrix with $m$ rows and $n$ columns consisting of elements from the set of real numbers, we write $\boldsymbol{X} \in \mathbb{R}^{m \times n}$. It is not difficult to see that vectors are a special class of matrices. # # # ### Operations # # Assume the elements in the $n$-dimensional vector $\boldsymbol{a}$ are $a_1, \ldots, a_n$, and the elements in the $n$-dimensional vector $\boldsymbol{b}$ are $b_1, \ldots, b_n$. The dot product (internal product) of vectors $\boldsymbol{a}$ and $\boldsymbol{b}$ is a scalar: # # $$ # \boldsymbol{a} \cdot \boldsymbol{b} = a_1 b_1 + \ldots + a_n b_n. # $$ # # # Assume two matrices with $m$ rows and $n$ columns: # # $$ # \boldsymbol{A} = # \begin{bmatrix} # a_{11} & a_{12} & \dots & a_{1n} \\ # a_{21} & a_{22} & \dots & a_{2n} \\ # \vdots & \vdots & \ddots & \vdots \\ # a_{m1} & a_{m2} & \dots & a_{mn} # \end{bmatrix},\quad # \boldsymbol{B} = # \begin{bmatrix} # b_{11} & b_{12} & \dots & b_{1n} \\ # b_{21} & b_{22} & \dots & b_{2n} \\ # \vdots & \vdots & \ddots & \vdots \\ # b_{m1} & b_{m2} & \dots & b_{mn} # \end{bmatrix}. # $$ # # The transpose of a matrix $\boldsymbol{A}$ with $m$ rows and $n$ columns is a matrix with $n$ rows and $m$ columns whose rows are formed from the columns of the original matrix: # # $$ # \boldsymbol{A}^\top = # \begin{bmatrix} # a_{11} & a_{21} & \dots & a_{m1} \\ # a_{12} & a_{22} & \dots & a_{m2} \\ # \vdots & \vdots & \ddots & \vdots \\ # a_{1n} & a_{2n} & \dots & a_{mn} # \end{bmatrix}. # $$ # # # To add two matrices of the same shape, we add them element-wise: # # $$ # \boldsymbol{A} + \boldsymbol{B} = # \begin{bmatrix} # a_{11} + b_{11} & a_{12} + b_{12} & \dots & a_{1n} + b_{1n} \\ # a_{21} + b_{21} & a_{22} + b_{22} & \dots & a_{2n} + b_{2n} \\ # \vdots & \vdots & \ddots & \vdots \\ # a_{m1} + b_{m1} & a_{m2} + b_{m2} & \dots & a_{mn} + b_{mn} # \end{bmatrix}. # $$ # # We use the symbol $\odot$ to indicate the element-wise multiplication of two matrices: # # $$ # \boldsymbol{A} \odot \boldsymbol{B} = # \begin{bmatrix} # a_{11} b_{11} & a_{12} b_{12} & \dots & a_{1n} b_{1n} \\ # a_{21} b_{21} & a_{22} b_{22} & \dots & a_{2n} b_{2n} \\ # \vdots & \vdots & \ddots & \vdots \\ # a_{m1} b_{m1} & a_{m2} b_{m2} & \dots & a_{mn} b_{mn} # \end{bmatrix}. # $$ # # Define a scalar $k$. Multiplication of scalars and matrices is also an element-wise multiplication: # # # $$ # k\boldsymbol{A} = # \begin{bmatrix} # ka_{11} & ka_{12} & \dots & ka_{1n} \\ # ka_{21} & ka_{22} & \dots & ka_{2n} \\ # \vdots & \vdots & \ddots & \vdots \\ # ka_{m1} & ka_{m2} & \dots & ka_{mn} # \end{bmatrix}. # $$ # # Other operations such as scalar and matrix addition, and division by an element are similar to the multiplication operation in the above equation. Calculating the square root or taking logarithms of a matrix are performed by calculating the square root or logarithm, respectively, of each element of the matrix to obtain a matrix with the same shape as the original matrix. # # Matrix multiplication is different from element-wise matrix multiplication. Assume $\boldsymbol{A}$ is a matrix with $m$ rows and $p$ columns and $\boldsymbol{B}$ is a matrix with $p$ rows and $n$ columns. The product (matrix multiplication) of these two matrices is denoted # # $$ # \boldsymbol{A} \boldsymbol{B} = # \begin{bmatrix} # a_{11} & a_{12} & \dots & a_{1p} \\ # a_{21} & a_{22} & \dots & a_{2p} \\ # \vdots & \vdots & \ddots & \vdots \\ # a_{i1} & a_{i2} & \dots & a_{ip} \\ # \vdots & \vdots & \ddots & \vdots \\ # a_{m1} & a_{m2} & \dots & a_{mp} # \end{bmatrix} # \begin{bmatrix} # b_{11} & b_{12} & \dots & b_{1j} & \dots & b_{1n} \\ # b_{21} & b_{22} & \dots & b_{2j} & \dots & b_{2n} \\ # \vdots & \vdots & \ddots & \vdots & \ddots & \vdots \\ # b_{p1} & b_{p2} & \dots & b_{pj} & \dots & b_{pn} # \end{bmatrix}, # $$ # # is a matrix with $m$ rows and $n$ columns, with the element in row $i$ and column $j$ ($1 \leq i \leq m, 1 \leq j \leq n$) equal to # # $$a_{i1}b_{1j} + a_{i2}b_{2j} + \ldots + a_{ip}b_{pj} = \sum_{k=1}^p a_{ik}b_{kj}.$$ # # # ### Norms # # Assume the elements in the $n$-dimensional vector $\boldsymbol{x}$ are $x_1, \ldots, x_n$. The $L_p$ norm of the vector $\boldsymbol{x}$ is # # $$\|\boldsymbol{x}\|_p = \left(\sum_{i=1}^n \left|x_i \right|^p \right)^{1/p}.$$ # # For example, the $L_1$ norm of $\boldsymbol{x}$ is the sum of the absolute values ​​of the vector elements: # # $$\|\boldsymbol{x}\|_1 = \sum_{i=1}^n \left|x_i \right|.$$ # # While the $L_2$ norm of $\boldsymbol{x}$ is the square root of the sum of the squares of the vector elements: # # $$\|\boldsymbol{x}\|_2 = \sqrt{\sum_{i=1}^n x_i^2}.$$ # # We usually use $\|\boldsymbol{x}\|$ to refer to the $L_2$ norm of $\boldsymbol{x}$. # # Assume $\boldsymbol{X}$ is a matrix with $m$ rows and $n$ columns. The Frobenius norm of matrix $\boldsymbol{X}$ is the square root of the sum of the squares of the matrix elements: # # $$\|\boldsymbol{X}\|_F = \sqrt{\sum_{i=1}^m \sum_{j=1}^n x_{ij}^2},$$ # # Here, $x_{ij}$ is the element of matrix $\boldsymbol{X}$ in row $i$ and column $j$. # # # ### Eigenvectors and Eigenvalues # # # Let $\boldsymbol{A}$ be a matrix with $n$ rows and $n$ columns. If $\lambda$ is a scalar and $\boldsymbol{v}$ is a non-zero $n$-dimensional vector with # # $$\boldsymbol{A} \boldsymbol{v} = \lambda \boldsymbol{v},$$ # # then $\boldsymbol{v}$ is an called eigenvector vector of matrix $\boldsymbol{A}$, and $\lambda$ is called an eigenvalue of $\boldsymbol{A}$ corresponding to $\boldsymbol{v}$. # # # # ## Differentials # # Here we briefly introduce some basic concepts and calculations for differentials. # # # ### Derivatives and Differentials # # Assume the input and output of function $f: \mathbb{R} \rightarrow \mathbb{R}$ are both scalars. The derivative $f$ is defined as # # $$f'(x) = \lim_{h \rightarrow 0} \frac{f(x+h) - f(x)}{h},$$ # # when the limit exists (and $f$ is said to be differentiable). Given $y = f(x)$, where $x$ and $y$ are the arguments and dependent variables of function $f$, respectively, the following derivative and differential expressions are equivalent: # # $$f'(x) = y' = \frac{\text{d}y}{\text{d}x} = \frac{\text{d}f}{\text{d}x} = \frac{\text{d}}{\text{d}x} f(x) = \text{D}f(x) = \text{D}_x f(x),$$ # # Here, the symbols $\text{D}$ and $\text{d}/\text{d}x$ are also called differential operators. Common differential calculations are $\text{D}C = 0$ ($C$ is a constant), $\text{D}x^n = nx^{n-1}$ ($n$ is a constant), and $\text{D}e^x = e^x$, $\text{D}\ln(x) = 1/x$. # # If functions $f$ and $g$ are both differentiable and $C$ is a constant, then # # $$ # \begin{aligned} # \frac{\text{d}}{\text{d}x} [Cf(x)] &= C \frac{\text{d}}{\text{d}x} f(x),\\ # \frac{\text{d}}{\text{d}x} [f(x) + g(x)] &= \frac{\text{d}}{\text{d}x} f(x) + \frac{\text{d}}{\text{d}x} g(x),\\ # \frac{\text{d}}{\text{d}x} [f(x)g(x)] &= f(x) \frac{\text{d}}{\text{d}x} [g(x)] + g(x) \frac{\text{d}}{\text{d}x} [f(x)],\\ # \frac{\text{d}}{\text{d}x} \left[\frac{f(x)}{g(x)}\right] &= \frac{g(x) \frac{\text{d}}{\text{d}x} [f(x)] - f(x) \frac{\text{d}}{\text{d}x} [g(x)]}{[g(x)]^2}. # \end{aligned} # $$ # # # If functions $y=f(u)$ and $u=g(x)$ are both differentiable, then the Chain Rule states that # # $$\frac{\text{d}y}{\text{d}x} = \frac{\text{d}y}{\text{d}u} \frac{\text{d}u}{\text{d}x}.$$ # # # ### Taylor Expansion # # The Taylor expansion of function $f$ is given by the infinite sum # # $$f(x) = \sum_{n=0}^\infty \frac{f^{(n)}(a)}{n!} (x-a)^n,$$ # # when it exists. Here, $f^{(n)}$ is the $n$th derivative of $f$, and $n!$ is the factorial of $n$. For a sufficiently small number $\epsilon$, we can replace $x$ and $a$ with $x+\epsilon$ and $x$ respectively to obtain # # $$f(x + \epsilon) \approx f(x) + f'(x) \epsilon + \mathcal{O}(\epsilon^2).$$ # # Because $\epsilon$ is sufficiently small, the above formula can be simplified to # # $$f(x + \epsilon) \approx f(x) + f'(x) \epsilon.$$ # # # # ### Partial Derivatives # # Let $u = f(x_1, x_2, \ldots, x_n)$ be a function with $n$ arguments. The partial derivative of $u$ with respect to its $i$th parameter $x_i$ is # # $$ \frac{\partial u}{\partial x_i} = \lim_{h \rightarrow 0} \frac{f(x_1, \ldots, x_{i-1}, x_i+h, x_{i+1}, \ldots, x_n) - f(x_1, \ldots, x_i, \ldots, x_n)}{h}.$$ # # # The following partial derivative expressions are equivalent: # # $$\frac{\partial u}{\partial x_i} = \frac{\partial f}{\partial x_i} = f_{x_i} = f_i = D_i f = D_{x_i} f.$$ # # To calculate $\partial u/\partial x_i$, we simply treat $x_1, \ldots, x_{i-1}, x_{i+1}, \ldots, x_n$ as constants and calculate the derivative of $u$ with respect to $x_i$. # # # # ### Gradients # # # Assume the input of function $f: \mathbb{R}^n \rightarrow \mathbb{R}$ is an $n$-dimensional vector $\boldsymbol{x} = [x_1, x_2, \ldots, x_n]^\top$ and the output is a scalar. The gradient of function $f(\boldsymbol{x})$ with respect to $\boldsymbol{x}$ is a vector of $n$ partial derivatives: # # $$\nabla_{\boldsymbol{x}} f(\boldsymbol{x}) = \bigg[\frac{\partial f(\boldsymbol{x})}{\partial x_1}, \frac{\partial f(\boldsymbol{x})}{\partial x_2}, \ldots, \frac{\partial f(\boldsymbol{x})}{\partial x_n}\bigg]^\top.$$ # # # To be concise, we sometimes use $\nabla f(\boldsymbol{x})$ to replace $\nabla_{\boldsymbol{x}} f(\boldsymbol{x})$. # # If $\boldsymbol{A}$ is a matrix with $m$ rows and $n$ columns, and $\boldsymbol{x}$ is an $n$-dimensional vector, the following identities hold: # # $$ # \begin{aligned} # \nabla_{\boldsymbol{x}} \boldsymbol{A} \boldsymbol{x} &= \boldsymbol{A}^\top, \\ # \nabla_{\boldsymbol{x}} \boldsymbol{x}^\top \boldsymbol{A} &= \boldsymbol{A}, \\ # \nabla_{\boldsymbol{x}} \boldsymbol{x}^\top \boldsymbol{A} \boldsymbol{x} &= (\boldsymbol{A} + \boldsymbol{A}^\top)\boldsymbol{x},\\ # \nabla_{\boldsymbol{x}} \|\boldsymbol{x} \|^2 &= \nabla_{\boldsymbol{x}} \boldsymbol{x}^\top \boldsymbol{x} = 2\boldsymbol{x}. # \end{aligned} # $$ # # Similarly if $\boldsymbol{X}$ is a matrix, the # $$\nabla_{\boldsymbol{X}} \|\boldsymbol{X} \|_F^2 = 2\boldsymbol{X}.$$ # # # # # ### Hessian Matrices # # Assume the input of function $f: \mathbb{R}^n \rightarrow \mathbb{R}$ is an $n$-dimensional vector $\boldsymbol{x} = [x_1, x_2, \ldots, x_n]^\top$ and the output is a scalar. If all second-order partial derivatives of function $f$ exist and are continuous, then the Hessian matrix $\boldsymbol{H}$ of $f$ is a matrix with $m$ rows and $n$ columns given by # # $$ # \boldsymbol{H} = # \begin{bmatrix} # \frac{\partial^2 f}{\partial x_1^2} & \frac{\partial^2 f}{\partial x_1 \partial x_2} & \dots & \frac{\partial^2 f}{\partial x_1 \partial x_n} \\ # \frac{\partial^2 f}{\partial x_2 \partial x_1} & \frac{\partial^2 f}{\partial x_2^2} & \dots & \frac{\partial^2 f}{\partial x_2 \partial x_n} \\ # \vdots & \vdots & \ddots & \vdots \\ # \frac{\partial^2 f}{\partial x_n \partial x_1} & \frac{\partial^2 f}{\partial x_n \partial x_2} & \dots & \frac{\partial^2 f}{\partial x_n^2} # \end{bmatrix}, # $$ # # Here, the second-order partial derivative is evaluated # # $$\frac{\partial^2 f}{\partial x_i \partial x_j} = \frac{\partial }{\partial x_i} \left(\frac{\partial f}{ \partial x_j}\right).$$ # # # # ## Probability # # Finally, we will briefly introduce conditional probability, expectation, and uniform distribution. # # ### Conditional Probability # # Denote the probability of event $A$ and event $B$ as $\mathbb{P}(A)$ and $\mathbb{P}(B)$, respectively. The probability of the simultaneous occurrence of the two events is denoted as $\mathbb{P}(A \cap B)$ or $\mathbb{P}(A, B)$. If $B$ has non-zero probability, the conditional probability of event $A$ given that $B$ has occurred is # # $$\mathbb{P}(A \mid B) = \frac{\mathbb{P}(A \cap B)}{\mathbb{P}(B)},$$ # # That is, # # $$\mathbb{P}(A \cap B) = \mathbb{P}(B) \mathbb{P}(A \mid B) = \mathbb{P}(A) \mathbb{P}(B \mid A).$$ # # If # # $$\mathbb{P}(A \cap B) = \mathbb{P}(A) \mathbb{P}(B),$$ # # then $A$ and $B$ are said to be independent of each other. # # # ### Expectation # # A random variable takes values that represent possible outcomes of an experiment. The expectation (or average) of the random variable $X$ is denoted as # # $$\mathbb{E}(X) = \sum_{x} x \mathbb{P}(X = x).$$ # # # ### Uniform Distribution # # Assume random variable $X$ obeys a uniform distribution over $[a, b]$, i.e. $X \sim U( a, b)$. In this case, random variable $X$ has the same probability of being any number between $a$ and $b$. # # # ## Summary # # * This section summarizes the basic knowledge of linear algebra, differentiation, and probability required to understand the contents in this book. # # # ## Exercise # # * Find the gradient of function $f(\boldsymbol{x}) = 3x_1^2 + 5e^{x_2}$. # # ## Scan the QR Code to [Discuss](https://discuss.mxnet.io/t/2397) # # ![](../img/qr_math.svg)
book-d2l-en/chapter_appendix/math.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # How to draw a *scatter* plot with folium # + import numpy as np import sys sys.path.insert(0, 'folium') sys.path.insert(0, 'branca') import branca import folium # - # We create a sample of data thanks to `numpy`. n = 100 lats = np.random.uniform(38, 53, n) lngs = np.random.uniform(-17, 23, n) sizes = np.random.uniform(2, 20, n) colors = np.random.uniform(0, 50, n) # We create a colormap thanks to `branca`. You can also create your own function or use `matplotlib`. cm = branca.colormap.LinearColormap(['green', 'yellow', 'red'], vmin=0, vmax=50) print(cm(25)) cm # We create a `FeatureGroup` with all the points. # + f = folium.map.FeatureGroup() for lat, lng, size, color in zip(lats, lngs, sizes, colors): print lat,lng,size,color f.add_child( folium.features.CircleMarker( [lat, lng], radius=size, color=None, fill_color=cm(color), fill_opacity=0.6) ) # - # And draw the map ! m = folium.Map(location=[46,4], tiles='https://api.mapbox.com/styles/v1/sbell/cj48p13bt265k2qvucuielhm7/tiles/256/{z}/{x}/{y}?access_token=<KEY>', attr='North Star - MapBox', zoom_start=6) m.add_child(f) # That's all folks !
tutorials_and_examples/external_tutorials/test_scatter_plot.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # + [markdown] colab_type="text" id="p9FfatPz6MU3" # # **Homework 1: Linear Regression** # + [markdown] colab_type="text" id="fsoaNwrZA0ui" # 本次目標:由前 9 個小時的 18 個 features (包含 PM2.5)預測的 10 個小時的 PM2.5。<!-- 可以參考 <link> 獲知更細項的作業說明。--> # # <!-- 首先,從 https://drive.google.com/open?id=1El0zvTkrSuqCTDcMpijXpADvJzZC2Jpa 將整個資料夾下載下來,並將下載下來的資料夾放到自己的 Google Drive(注意:上傳到自己 Google Drive 的是資料夾 hw1-regression,而非壓縮檔) --> # # # 若有任何問題,歡迎來信至助教信箱 <EMAIL> # + [markdown] colab_type="text" id="U7RiAkkjCc6l" # # **Load 'train.csv'** # train.csv 的資料為 12 個月中,每個月取 20 天,每天 24 小時的資料(每小時資料有 18 個 features)。 # - # cd /data/jupyter/root/MachineLearning/Lee/wk1/ # + colab={"base_uri": "https://localhost:8080/", "height": 715} colab_type="code" id="1AfNX-hB3kN8" outputId="00b28ee9-4c33-445b-8110-b56a4b3db333" import sys import pandas as pd import numpy as np # from google.colab import drive # # !gdown --id '1wNKAxQ29G15kgpBy_asjTcZRRgmsCZRm' --output data.zip # # !unzip data.zip # 这里有个瓜,第一行应该去掉header = 0 data = pd.read_csv('./data/train.csv', header = 0, encoding = 'big5') # data = pd.read_csv('./train.csv', encoding = 'big5') print(len(data)) data[:22] # + [markdown] colab_type="text" id="gqUdj00pDTpo" # # **Preprocessing** # 取需要的數值部分,將 'RAINFALL' 欄位全部補 0。 # 另外,如果要在 colab 重覆這段程式碼的執行,請從頭開始執行(把上面的都重新跑一次),以避免跑出不是自己要的結果(若自己寫程式不會遇到,但 colab 重複跑這段會一直往下取資料。意即第一次取原本資料的第三欄之後的資料,第二次取第一次取的資料掉三欄之後的資料,...)。 # + colab={"base_uri": "https://localhost:8080/", "height": 907} colab_type="code" id="AIGP7XUYD_Yb" outputId="0394ad73-dbfa-4d7f-c006-85df5b51b7fb" data = data.iloc[:, 3:] data[data == 'NR'] = 0 raw_data = data.to_numpy() raw_data[0 : 18, :]# 第一天的18个features # + [markdown] colab_type="text" id="V7PCrVwX6jBF" # # **Extract Features (1)** # ![圖片說明](https://drive.google.com/uc?id=1LyaqD4ojX07oe5oDzPO99l9ts5NRyArH) # ![圖片說明](https://drive.google.com/uc?id=1ZroBarcnlsr85gibeqEF-MtY13xJTG47) # # 將原始 4320 * 18 的資料依照每個月分重組成 12 個 18 (features) * 480 (hours) 的資料。 480 = 24 * 20(每个月取20天) # + colab={"base_uri": "https://localhost:8080/", "height": 67} colab_type="code" id="B5sxtUY2o_wa" outputId="007a4df4-53e6-4003-ef85-d7f30de9ee85" raw_data[0 : 18, :][0]# 这种记录会有480条,组成一个月的sample,但是sample[0],是第一个features的汇总 # + colab={"base_uri": "https://localhost:8080/", "height": 50} colab_type="code" id="HBnrGYXu9dZQ" outputId="896413ac-158e-4752-9bbe-f373887d110a" month_data = {} for month in range(12): sample = np.empty([18, 480]) for day in range(20): sample[:, day * 24 : (day + 1) * 24] = raw_data[18 * (20 * month + day) : 18 * (20 * month + day + 1), :] month_data[month] = sample month_data[0][0][:24]# 第一月WS_HR汇总(顺序反了WS_HR排到最前面) # + [markdown] colab_type="text" id="WhVmtFEQ9D6t" # # **Extract Features (2)** # ![alt text](https://drive.google.com/uc?id=1wKoPuaRHoX682LMiBgIoOP4PDyNKsJLK) # ![alt text](https://drive.google.com/uc?id=1FRWWiXQ-Qh0i9tyx0LiugHYF_xDdkhLN) # # 每個月會有 480hrs,每 9 小時形成一個 data,每個月會有 471 個 data,故總資料數為 471 * 12 筆,而每筆 data 有 9 * 18 的 features (一小時 18 個 features * 9 小時)。 # # 對應的 target 則有 471 * 12 個(第 10 個小時的 PM2.5) # + colab={"base_uri": "https://localhost:8080/", "height": 353} colab_type="code" id="dcOrC4Fi-n3i" outputId="7e8f90da-2108-42d2-dd0f-df59774135b5" np.set_printoptions(suppress = True) x = np.empty([12 * 472, 18 * 8], dtype = float) y = np.empty([12 * 472, 1], dtype = float) for month in range(12): for day in range(20): for hour in range(24): if day == 19 and hour > 14: continue x[month * 472 + day * 24 + hour, :] = month_data[month][:,day * 24 + hour : day * 24 + hour + 8].reshape(1, -1) #vector dim:18*9 y[month * 472 + day * 24 + hour, 0] = month_data[month][9, day * 24 + hour + 8] #value print(x) print(y) # + colab={"base_uri": "https://localhost:8080/", "height": 319} colab_type="code" id="Bw67iZ7UqwH7" outputId="5a960bc7-fdbf-4e03-cce3-2892354d28fd" x[25]#第一笔的第一个features(8小时)AMB_TEMP 9 * 18 # + [markdown] colab_type="text" id="1wOii0TX8IwE" # # **Normalize (1)** # # + colab={} colab_type="code" id="ceMqFoNI8ftQ" mean_x = np.mean(x, axis = 0) #18 * 9 std_x = np.std(x, axis = 0) #18 * 9 for i in range(len(x)): #12 * 471 for j in range(len(x[0])): #18 * 9 if std_x[j] != 0: x[i][j] = (x[i][j] - mean_x[j]) / std_x[j] # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="2qt0Dg2v4fDG" outputId="b8521e6b-e164-4d32-a180-768fd1330568" len(x)# 5652 = 12 * 471 # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="lfQiIOPE4xKN" outputId="9ffcd6a7-ca2f-4bcf-cce9-2c97b255ea88" len(x[0])# 162 = 9 * 18 # + colab={} colab_type="code" id="nGF0kSneGgEL" tag = x[0] # + [markdown] colab_type="text" id="NzvXP5Jya64j" # #**Split Training Data Into "train_set" and "validation_set"** # 這部分是針對作業中 report 的第二題、第三題做的簡單示範,以生成比較中用來訓練的 train_set 和不會被放入訓練、只是用來驗證的 validation_set。 # + colab={"base_uri": "https://localhost:8080/", "height": 756} colab_type="code" id="feF4XXOQb5SC" outputId="c8706aff-a3c6-4a2c-a25f-d561e0c46ba3" import math x_train_set = x[: math.floor(len(x) * 0.8), :] y_train_set = y[: math.floor(len(y) * 0.8), :] x_validation = x[math.floor(len(x) * 0.8): , :] y_validation = y[math.floor(len(y) * 0.8): , :] print(x_train_set) print(y_train_set) print(x_validation) print(y_validation) print(len(x_train_set)) print(len(y_train_set)) print(len(x_validation)) print(len(y_validation)) # + [markdown] colab_type="text" id="Q-qAu0KR_ZRR" # # **Training** # ![alt text](https://drive.google.com/uc?id=1xIXvqZ4EGgmxrp7c9r0LOVbcvd4d9H4N) # ![alt text](https://drive.google.com/uc?id=1S42g06ON5oJlV2f9RukxawjbE4NpsaB6) # ![alt text](https://drive.google.com/uc?id=1BbXu-oPB9EZBHDQ12YCkYqtyAIil3bGj) # # (和上圖不同處: 下面的 code 採用 Root Mean Square Error) # # 因為常數項的存在,所以 dimension (dim) 需要多加一欄;eps 項是避免 adagrad 的分母為 0 而加的極小數值。 # # 每一個 dimension (dim) 會對應到各自的 gradient, weight (w),透過一次次的 iteration (iter_time) 學習。 # + colab={"base_uri": "https://localhost:8080/", "height": 50} colab_type="code" id="LfPOtbWJ5Mxu" outputId="703fc647-2c73-43f4-9d14-20848baf08a7" dim = 18 * 8 + 1 w = np.zeros([dim, 1]) x_ = np.concatenate((np.ones([12 * 472, 1]), x), axis = 1).astype(float) print(len(x_[0])) print(len(w)) # + [markdown] colab_type="text" id="ECOONa1E605y" # 这里补了为1的一列,用于常数项使用,即$x_{np} * w_p = w_p$ # + colab={"base_uri": "https://localhost:8080/", "height": 336} colab_type="code" id="cCzDfxBFBFqp" outputId="4e6c0b52-dd41-4c88-c0bc-b077c8868021" learning_rate = 200 iter_time = 20000 adagrad = np.zeros([dim, 1]) eps = 0.0000000001 for t in range(iter_time): loss = np.sqrt(np.sum(np.power(np.dot(x_, w) - y, 2))/472/12)#rmse if(t%100==0): print(str(t) + ":" + str(loss)) gradient = 2 * np.dot(x_.transpose(), np.dot(x_, w) - y) #dim*1 adagrad += gradient ** 2 w = w - learning_rate * gradient / np.sqrt(adagrad + eps) np.save('weight.npy', w) w # + [markdown] colab_type="text" id="ZqNdWKsYBK28" # # **Testing** # ![alt text](https://drive.google.com/uc?id=1165ETzZyE6HStqKvgR0gKrJwgFLK6-CW) # # 載入 test data,並且以相似於訓練資料預先處理和特徵萃取的方式處理,使 test data 形成 240 個維度為 18 * 9 + 1 的資料。 # + colab={} colab_type="code" id="AALygqJFCWOA" # testdata = pd.read_csv('gdrive/My Drive/hw1-regression/test.csv', header = None, encoding = 'big5') testdata = pd.read_csv('data/test.csv', header = None, encoding = 'big5') test_data = testdata.iloc[:, 2:] testdata[:20] # + colab={} colab_type="code" id="CwxTzd8_AslU" test_data[:20] # + colab={} colab_type="code" id="UUlvYSUj8Ygk" test_data[test_data == 'NR'] = 0 test_data = test_data.to_numpy() test_x = np.empty([240, 18*8], dtype = float) test_y = np.empty([240, 1], dtype = float) for i in range(240): test_x[i, :] = test_data[18 * i: 18* (i + 1), :8].reshape(1, -1) test_y[i, :] = test_data[9 * (i+1), 8] test_x[0] test_y[0] # + colab={} colab_type="code" id="SFzXYGRS8VO2" mean_x = np.mean(test_x, axis = 0) #18 * 9 std_x = np.std(test_x, axis = 0) #18 * 9 for i in range(len(test_x)): for j in range(len(test_x[0])): if std_x[j] != 0: test_x[i][j] = (test_x[i][j] - mean_x[j]) / std_x[j] test_x[0] # + colab={} colab_type="code" id="jfz83743Gl6H" tag # + colab={} colab_type="code" id="idQ_DtStGVFY" len(test_x[0]) # + colab={} colab_type="code" id="QHoen7JeGU8I" test_x = np.concatenate((np.ones([240, 1]), test_x), axis = 1).astype(float) # + [markdown] colab_type="text" id="dJQks9JEHR6W" # # **Prediction** # 說明圖同上 # # ![alt text](https://drive.google.com/uc?id=1165ETzZyE6HStqKvgR0gKrJwgFLK6-CW) # # 有了 weight 和測試資料即可預測 target。 # + colab={} colab_type="code" id="5DbpH4iaJbiX" w = np.load('weight.npy') # + colab={} colab_type="code" id="j9FnQetyJbZf" w # + colab={} colab_type="code" id="jNyB229jHsEQ" ans_y = np.dot(test_x, w) print(ans_y[:10]) print(test_y[:10]) # + [markdown] colab_type="text" id="HKMKW7RzHwuO" # # **Save Prediction to CSV File** # # + colab={} colab_type="code" id="Dwfpqqy0H8en" import csv with open('submit.csv', mode='w', newline='') as submit_file: csv_writer = csv.writer(submit_file) header = ['id', 'value'] print(header) csv_writer.writerow(header) for i in range(240): row = ['id_' + str(i), ans_y[i][0]] csv_writer.writerow(row) print(row) # + [markdown] colab_type="text" id="Y54yWq9cIPR4" # 相關 reference 可以參考: # # Adagrad : # https://youtu.be/yKKNr-QKz2Q?list=PLJV_el3uVTsPy9oCRY30oBPNLCo89yu49&t=705 # # RMSprop : # https://www.youtube.com/watch?v=5Yt-obwvMHI # # Adam # https://www.youtube.com/watch?v=JXQT_vxqwIs # # # 以上 print 的部分主要是為了看一下資料和結果的呈現,拿掉也無妨。另外,在自己的 linux 系統,可以將檔案寫死的的部分換成 sys.argv 的使用 (可在 terminal 自行輸入檔案和檔案位置)。 # # 最後,可以藉由調整 learning rate、iter_time (iteration 次數)、取用 features 的多寡(取幾個小時,取哪些特徵欄位),甚至是不同的 model 來超越 baseline。 # # Report 的問題模板請參照 : https://docs.google.com/document/d/1s84RXs2AEgZr54WCK9IgZrfTF-6B1td-AlKR9oqYa4g/edit # + colab={} colab_type="code" id="UEgAOfVBi-Lw"
Hw/H1_Regression/hw1_regression (1).ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .jl # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Julia 1.6.0 # language: julia # name: julia-1.6 # --- # # Chapter-9 Standard Libraries # This notebook contains the sample source code explained in the book *Hands-On Julia Programming, <NAME>, 2021, bpb Publications. All Rights Reserved*. using Pkg pkg"activate ." pkg"instantiate" # ## 9.1 Introduction # # There are various functions and methods that are part of Julia already. A developer needs to use them directly rather than having to code for them. This chapter is a bird's eyeview of such methods. # ### Modules # # Namespaces where a method or a type reside. Using a method or type outside the module may require explicit qualification. # + module m1 function f() println("Defined as m1.f") end function g() println("Defined as m1.g") end function h() println("Defined as m1.h") end export f end module m2 function f() println("Defined as m2.f") end function g() println("Defined as m2.g") end export g end # - names(m1) names(m2) f() m1.f() m2.f() using .m1 f() g() using Main.m2 g() using .m1: g g() using .m1: h h() # ## 9.2 Standard Modules # # `Core`, `Base` and `Main` the bare minimum modules of a Julia shell. `m1` and `m2` here are submodules of `Main`. varinfo() x = 1 varinfo() # ## 9.3 System # # The submodule that interacts with the operating system. System command execution being one of them. run(`cmd /c dir`) Sys.isunix(), Sys.iswindows() # ## 9.4 Filesystem # # File organization and manipulation (not read and write). old_dir = pwd() cd("..") pwd() cd(old_dir) pwd() cd(".."); pwd() readdir() #Same as readdir(pwd()) for (root, dirs, files) in walkdir("Chapter-09") println("Directories in $root") for dir in dirs println(joinpath(root, dir)) # path to directories end println("Files in $root") for file in files println(joinpath(root, file)) # path to files end end cd(old_dir); pwd() fs = stat("Chapter-9 Standard Libraries.ipynb") # ## 9.5 Parallel Programming # # Execution of programs in tandem in various execution environments, yet be able to obtain meaningful and synchronized results. Performance and resource utilization are two major outcomes of this kind of programming. # ### Asynchronous Programming # # Breaking the code into small chunks of executable pieces that can be executed in parallel. Depending on the kind of processing permitted by the hardware and OS, these may run in one execution thread or multiple. # #### Tasks t = Task() do sleep(10) println("done") end schedule(t) t = Task() do sleep(10) println("done") end schedule(t); wait(t) t = @task begin sleep(5) println("done") end @sync begin sleep(5) println("done") end # #### Channels c = Channel(8) c = Channel{Int}(4) # + @async begin for i = 1:100 println("Adding $i to channel") put!(c, i) end end @async begin for i = 1:100 v = take!(c) println("Removing value: $v") end end # - close(c) # ### Multithreading # # Julia can be made to run on a multiple threads mode. In such a conditions the tasks can be parallelized on these threads. Synchronization objects can be used across the executing threads. Threads.nthreads() Threads.@spawn for i=1:100 sleep(1) println("Step: $i") end # ### Distributed Computing # # This is cluster computing infrastrcuture, where a cluster can be configured across Julia processes in the same machine or in another machine. These independent processes can execute independently yet be able to share data, pass messages and communicate over a shared channel. # ## 9.6 IO and Network # # Julia accesses input and output devices as streams. The network devices are also interfaced like sockets. IO is an important stream based interface to understand. # ### Default Stream # # `stdin`, `stdout` and `stderr` are the standard streams for input, output and error in most processes. `stdin` integration with Jupyter is limited. All those samples may best be reviewed in a REPL console. write(stdout, "Hello World") write(stdout, "\u2200 x \u2203 y") # ### Text I/O # ### Binary I/O # ### File I/O # # Files are also accessed as `IO` streams for both read and write operations. However, the `IO` streams are obtained by `open` call. Any open stream must be closed with a `close` function call. write("hello.txt", "Hello World") fd = open("hello.txt", "r"); data = read(fd); write(fd, "This is second line") close(fd) write(stdout, data) open("hello.txt", "r") do fd data = read(fd) write(stdout, data) end write("hello.txt", "Hello World"); data = read("hello.txt"); write(stdout, data); # Files are opened for `read` or `write`. A file opened for reading cannot be written into. open("hello.txt", "r") do fd while !eof(fd) b = read(fd, 1) write(stdout, b) end end rm("hello.txt") # ### Delimited Files # # Generalized command separated value (CSV) kind of files. Used extensively in spreadsheets. Also interfaced in data science applications as an easier way to share data. using DelimitedFiles x = collect(1:100); y = Float64.(collect(101:200)); open("values.csv", "w") do f println(f, "X,Y") writedlm(f, [x y], ',') end data, header = readdlm("values.csv", ',', header=true); size(data, 1) header rm("values.csv") # ### Network # # Just like files are opened with open to obtain the `IO` stream to interface, sockets are equivalent network communication end points to obtain connection streams. However, if you could use download method to provide a URL and download files. download("https://github.com/JuliaLang/julia/releases/download/v1.5.3/julia-1.5.3.tar.gz", "julia-1.5.3.tar.gz") using Sockets @async begin server = listen(3000) while true s = accept(server) @async while isopen(s) msg = readline(s, keep=true) write(s, "Server Response: $msg") end end end c = connect(3000) @async while isopen(c) println(stdout, readline(c, keep=true)) end for i = 1:5 println(c, "Client Msg id: $i") end close(c) # ### Memory I/O # # Strings cannot be manipulated. It may help to create a byte array for `IO` in the memory and use that for text manipulations. In the end the byte array can be converted to a String. io = IOBuffer() println(io, "This is Line 1") println(io, "This is Line 2") str = String(take!(io)) A = fill(10, (5, 20)) A[2, 3] = 20 open("mmap.bin", "w+") do fd write(fd, size(A, 1)) write(fd, size(A, 2)) write(fd, A) end # Memory mapping is reverse problem. A file can be mapped to memory and accessed selectively page by page. This way the whole file need not be loaded in the memory. using Mmap fd = open("mmap.bin", "r") nr = read(fd, Int) nc = read(fd, Int) A2 = Mmap.mmap(fd, Matrix{Int}, (nr, nc)) println("A2[1, 1]: ", A2[1, 1], " A2[2, 3]: ", A2[2, 3]) close(fd) A2 = nothing GC.gc() rm("mmap.bin") # ## 9.7 Constants # # There are many system and Julia related constants. Mathematical constants are also defined in the standard library. VERSION C_NULL Sys.BINDIR Sys.CPU_THREADS Sys.WORD_SIZE Sys.KERNEL Sys.MACHINE Sys.ARCH Sys.ENV MathConstants.pi MathConstants.e # ## 9.8 Notable Modules # # Miscellaneous modules of Julia library. # ### Dates # # User renderable date and time representations. In the system the date is just a continuously increasing counter from a datum. These functions make meaningful period associations and provide the correct date and time. using Dates now() DateTime(2020) t = DateTime(2020, 10, 31, 01, 02, 03) Date(2020) d = Date(t) DateTime(d) t > d DateTime(2020) < now() DateTime(2020) == Date(2020) p = Month(3) + Day(10) + Hour(15) typeof(p) now() + p Date(now())+ Day(300) now() - DateTime(2020) now() - Date(2020) Date(now()) - Date(2020) Year(now()) Month(now()) Day(now()) Week(now()) year(now()) tonext(today()) do d dayofweek(d) == Thursday && dayofweekofmonth(d) == 4 && month(d) == November end # ### Logging # # Long running processes or servers running in Julia need to provide consistent and standard log reporting. This module helps create such reports. @debug "This is a debug message $(sum(rand(100)))" @info "This message is just informational" @warn "This is a warning message" @error "This is an error message" A = ones(3, 4) @info "A is all ones" A # The `Logging` module provides interfaces to implement complete loggers. It also provides the ability to override the global logger with a different local one. using Logging open("file.log", "w") do f with_logger(SimpleLogger(f)) do @info "This is my simple info log" @error "This is an error written to file" @warn "This is a warning message" @info "Current logger is same as global logger" current_logger() == global_logger() end end write(stdout, read("file.log")); rm("file.log") @info "Current logger is same as global logger" current_logger() == global_logger() # ### Statistics # # This module provides simple functions for central tendencies like mean, median and standard deviations etc. Functions for correlations and covariances are also provided. However, the treatment of statistics in thi module are highly rudimentary. using Statistics mean(1:10) median(1:10) std(1:10) var(1:10) cor(1:10, rand(10)) cor(1:10, 11:20) cov(1:10, rand(10)) # ### Random Numbers # # Provides methods for random number generation and other permutation and combination functions. using Random rand(Int, 2) rand(2, 3) rand!(zeros(2, 3)) rand(1:4, (2, 3)) # ## 9.9 Conclusion # # ## Exercises
Chapter 09/Chapter-9 Standard Libraries.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # ## YAML mask # # We can also a set of Design Of Experiments in a YAML file together with placing information # + attributes={"classes": [], "id": "", "n": "1"} import pathlib import pytest import gdsfactory as gf from gdsfactory.autoplacer.yaml_placer import place_from_yaml from gdsfactory.mask.merge_metadata import merge_metadata def test_mask(): """ """ cwd = pathlib.Path().cwd() does_path = cwd / "does.yml" doe_root_path = cwd / "build" / "cache_doe_directory" mask_path = cwd / "build" / "mask" gdspath = mask_path / "mask.gds" mask_path.mkdir(parents=True, exist_ok=True) gf.sweep.write_sweeps( str(does_path), doe_root_path=doe_root_path, ) top_level = place_from_yaml(does_path, root_does=doe_root_path) top_level.write(str(gdspath)) merge_metadata(gdspath) assert gdspath.exists() return gdspath gdspath_mask = test_mask() gf.show(gdspath_mask) # -
docs/notebooks/12_YAML_mask.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + from optparse import OptionParser import inspect import torch from torch.autograd import Variable def hook_pytorch(): torch.FloatTensor.old___add__ = torch.FloatTensor.__add__ def new___add__(self,other): print("__add__") return self.old___add__(other) torch.FloatTensor.__add__ = new___add__ torch.FloatTensor.old_add = torch.FloatTensor.add def new_add(self,other): print("add") return self.old_add(other) torch.FloatTensor.add = new_add hook_pytorch() # - x = torch.FloatTensor([1,2,3,4]) x.add(x) x + x x = Variable(torch.FloatTensor([1,2,3,4]),requires_grad=True) y = Variable(torch.FloatTensor([2,3,4,5]),requires_grad=True) z = x*y z z.backward(torch.ones(4))
notebooks/PyTorch Network Tensor.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # ## Exercise 1: # # - consedering the following code # # ```python # import datetime # we will use this for date objects # # class Person: # # def __init__(self, name, surname, birthdate, address, telephone, email): # self.name = name # self.surname = surname # self.birthdate = birthdate # # self.address = address # self.telephone = telephone # self.email = email # # def age(self): # today = datetime.date.today() # age = today.year - self.birthdate.year # # if today < datetime.date(today.year, self.birthdate.month, self.birthdate.day): # age -= 1 # # return age # # person = Person( # "Jane", # "Doe", # datetime.date(1992, 3, 12), # year, month, day # "No. 12 Short Street, Greenville", # "555 456 0987", # "<EMAIL>" # ) # # print(person.name) # print(person.email) # print(person.age()) # ``` # Explain what the following variables refer to, and their scope: # 1. Person # 2. person # 3. surname # 4. self # 5. age (the function name) # 6. age (the variable used inside the function) # 7. self.email # 8. person.email # ### write your answer here # 1. ... # 2. ... # 3. ... # 4. ... # 5. ... # 6. ... # 7. ... # 8. ... # ## Exercise 2 # 1. Rewrite the `Person` class so that a person’s age is calculated for the first time when a new person instance is created, and recalculated (when it is requested) if the day has changed since the last time that it was calculated. # + import datetime # we will use this for date objects class Person: def __init__(self, name, surname, birthdate, address, telephone, email): self.name = name self.surname = surname self.birthdate = birthdate self.address = address self.telephone = telephone self.email = email today = datetime.date.today() age = today.year - self.birthdate.year if today < datetime.date(today.year, self.birthdate.month, self.birthdate.day): age -= 1 self.age = age person = Person( "Jane", "Doe", datetime.date(2000, 1, 6), # year, month, day "No. 12 Short Street, Greenville", "555 456 0987", "<EMAIL>" ) # - # ## Exercise 3 # 1. Explain the differences between the attributes `name`, `surname` and `profession`, and what values they can have in different instances of this class: # ```python # class Smith: # surname = "Smith" # profession = "smith" # # def __init__(self, name, profession=None): # self.name = name # if profession is not None: # self.profession = profession # ``` # ## Exercise 4: # 1. Create a class called `Numbers`, which has a single class attribute called `MULTIPLIER`, and a constructor which takes the parameters `x` and `y` (these should all be numbers). # 1. Write a method called `add` which returns the sum of the attributes `x` and `y`. # 2. Write a class method called `multiply`, which takes a single number parameter `a` and returns the product of `a` and `MULTIPLIER`. # 3. Write a static method called `subtract`, which takes two number parameters, `b` and `c`, and returns `b - c`. # 4. Write a method called `value` which returns a tuple containing the values of `x` and `y`. Make this method into a property, and write a setter and a deleter for manipulating the values of `x` and `y`. # ## Exercise 5: # 1. Create an instance of the `Person` class from example 2. Use the `dir` function on the instance. Then use the `dir` function on the class. # 1. What happens if you call the `__str__` method on the instance? Verify that you get the same result if you call the `str` function with the instance as a parameter. # 2. What is the type of the instance? # 3. What is the type of the class? # 4. Write a function which prints out the names and values of all the custom attributes of any object that is passed in as a parameter. # ## Exercise 6: # Write a class for creating completely generic objects: its `__init__` function should accept any number of keyword parameters, and set them on the object as attributes with the keys as names. Write a `__str__` method for the class – the string it returns should include the name of the class and the values of all the object’s custom instance attributes.
exercices/python_oop.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # e-periodica: accessing metadata and fulltexts # + [markdown] toc=true # <h1><span class="tocSkip"></span></h1> # <div class="toc"><ul class="toc-item"><li><span><a href="#0-Introduction" data-toc-modified-id="0-Introduction-0">0 Introduction</a></span><ul class="toc-item"><li><span><a href="#0.0-Scope-and-content" data-toc-modified-id="0.0-Scope-and-content-0.1">0.0 Scope and content</a></span></li><li><span><a href="#0.1-E-periodica" data-toc-modified-id="0.1-E-periodica-0.2">0.1 E-periodica</a></span></li><li><span><a href="#0.2-OAI-PMH" data-toc-modified-id="0.2-OAI-PMH-0.3">0.2 OAI-PMH</a></span></li></ul></li><li><span><a href="#1-Metadata-access-with-Polymatheia" data-toc-modified-id="1-Metadata-access-with-Polymatheia-1">1 Metadata access with Polymatheia</a></span><ul class="toc-item"><li><span><a href="#1.0-Prerequisites" data-toc-modified-id="1.0-Prerequisites-1.1">1.0 Prerequisites</a></span></li><li><span><a href="#1.1-Start-with-the-OAI-interface-via-Polymatheia" data-toc-modified-id="1.1-Start-with-the-OAI-interface-via-Polymatheia-1.2">1.1 Start with the OAI interface via Polymatheia</a></span></li><li><span><a href="#1.2-Retrieve-metadata-records-via-Polymatheia" data-toc-modified-id="1.2-Retrieve-metadata-records-via-Polymatheia-1.3">1.2 Retrieve metadata records via Polymatheia</a></span></li><li><span><a href="#1.3-Save--and-recover-complex-metadata-structures" data-toc-modified-id="1.3-Save--and-recover-complex-metadata-structures-1.4">1.3 Save and recover complex metadata structures</a></span></li></ul></li><li><span><a href="#2-Direct-metadata-access-via-OAI-PMH" data-toc-modified-id="2-Direct-metadata-access-via-OAI-PMH-2">2 Direct metadata access via OAI-PMH</a></span><ul class="toc-item"><li><span><a href="#2.0-Prerequisites" data-toc-modified-id="2.0-Prerequisites-2.1">2.0 Prerequisites</a></span></li><li><span><a href="#2.1-Start-with-the-native-OAI-interface" data-toc-modified-id="2.1-Start-with-the-native-OAI-interface-2.2">2.1 Start with the native OAI interface</a></span></li><li><span><a href="#2.2--Download-metadata-records" data-toc-modified-id="2.2--Download-metadata-records-2.3">2.2 Download metadata records</a></span></li><li><span><a href="#2.3-Download-metadata-by-set" data-toc-modified-id="2.3-Download-metadata-by-set-2.4">2.3 Download metadata by set</a></span></li></ul></li><li><span><a href="#3-Download-fulltext-files-from-e-periodica-website" data-toc-modified-id="3-Download-fulltext-files-from-e-periodica-website-3">3 Download fulltext files from e-periodica website</a></span><ul class="toc-item"><li><span><a href="#3.0-Prerequisites" data-toc-modified-id="3.0-Prerequisites-3.1">3.0 Prerequisites</a></span></li><li><span><a href="#3.1-Download-fulltext-files-by-e-periodica-ID" data-toc-modified-id="3.1-Download-fulltext-files-by-e-periodica-ID-3.2">3.1 Download fulltext files by e-periodica ID</a></span></li><li><span><a href="#3.2-Download-fulltext-files-by-set" data-toc-modified-id="3.2-Download-fulltext-files-by-set-3.3">3.2 Download fulltext files by set</a></span></li></ul></li></ul></div> # - # ## 0 Introduction # ### 0.0 Scope and content # This Python [Jupyter notebook](https://jupyter.org/) aims to help you with **accessing metadata and fulltexts of the [e-periodica platform](https://www.e-periodica.ch/)**. It uses the OAI-PMH interface of the e-periodica service for retrieving metadata in different formats, and the e-rara website in addition for downloading fulltexts. # # The notebook consists of three parts: # 1. Metadata access with Polymatheia # 2. Direct metadata access via OAI-PMH # 3. Download fulltext files from e-periodica website. # # So, there are two ways to access e-periodica metadata. The **first chapter** introduces the Polymatheia library, which allows very convenient requests to the OAI interface by wrapping otherwise more elaborate functions. Working with Polymatheia is an **easy solution for quick access** without going deep into coding. # # The **second (and the third) chapter** shows how to access the OAI interface natively. Hence, more code will be needed and **some functions will be defined**. You can use the functions without deeper programming skills - nevertheless these might be helpful if you want to adapt those functions. # # You may start from the beginning and walk trough the whole notebook or jump to the section that suits you. Also, it's a good idea to play around with the code in the cells and see what happens. Have fun! # # Have any comments, questions and the like? Try kathi.woitas[at]ub.unibe.ch. # ### 0.1 E-periodica # [E-periodica](https://www.e-periodica.ch/?lang=en) is the online platform for journals from Switzerland. It holds more than 500 freely accessible journals from the 18th century through to the present, covering subjects from natural sciences through architecture, mathematics, history, geography, art and culture to the environment and social policies. You may consult e-periodica's [Terms of Use](https://www.e-periodica.ch/digbib/about3?lang=en) to check the licences of the e-periodica documents. # ### 0.2 OAI-PMH # The **Open Archives Initiative Protocol for Metadata Harvesting** (**OAI-PMH**) is a well-known interface for libraries, # archives etc. for delivering their metadata in various formats - librarian's specific like *[MODS](http://www.loc.gov/standards/mods/index.html)* and common ones like *[Dublin Core](https://www.dublincore.org/specifications/dublin-core/dces/)* alike. # Further information on OAI-PMH is available [here](http://www.openarchives.org/OAI/openarchivesprotocol.html). # # First of all, a few OAI-PMH related concepts should be introduced: # # **repository**: # A repository is a server-side application that exposes metadata via OAI-PMH. It can process the *six OAI-PMH request types* aka *OAI verbs*. So, the e-periodica OAI-PMH facility is a repository in this sense. # # **harvester**: OAI-PMH client applications are called harvesters. When you are approaching the OAI-PMH interface and requesting records, you do *harvesting*. # # **resource**: A resource is the object that the delivered metadata is "about". Of course in case of e-periodica OAI-PMH, the referred resources are the publications of the e-periodica platform. Note that resources themselves are always outside of the OAI-PMH. # # **record**: A record is the XML-encoded container for the metadata of a single resource (i.e. publication) item. It consists of a header and a metadata section. # # **header**: # The record header contains the unique identifier of the record, a datestamp and optionally the set specification. # # **metadata**: The record metadata contains the resource (i.e. publication) metadata in a defined metadata format. # # **set**: A structure for grouping records for selective harvesting. Sets often refer to collections of thematic scopes/subjects, to collections of different owners/institutions (in case of aggregated content) or to collections of certain publication types. # # Now let's look at some example requests of the e-periodica OAI interface with the **six OAI verbs**: # # - Identify ([specification](http://www.openarchives.org/OAI/openarchivesprotocol.html#Identify)): # https://www.e-periodica.ch/oai?verb=Identify # # - ListSets ([spec](http://www.openarchives.org/OAI/openarchivesprotocol.html#ListSets)): # https://www.e-periodica.ch/oai?verb=ListSets # # - ListMetadataFormats ([spec](http://www.openarchives.org/OAI/openarchivesprotocol.html#ListMetadataFormats)): # https://www.e-periodica.ch/oai?verb=ListMetadataFormats # - ListIdentifiers ([spec](http://www.openarchives.org/OAI/openarchivesprotocol.html#ListIdentifiers)): # https://www.e-periodica.ch/oai?verb=ListIdentifiers&metadataPrefix=oai_dc&set=ddc:360 # # - GetRecord ([spec](http://www.openarchives.org/OAI/openarchivesprotocol.html#GetRecord)): # https://www.e-periodica.ch/oai?verb=GetRecord&metadataPrefix=oai_dc&identifier=oai:agora.ch:acd-002:fdf8:f53e:61e4::18 # # - ListRecords ([spec](http://www.openarchives.org/OAI/openarchivesprotocol.html#ListRecords)): # https://www.e-periodica.ch/oai?verb=ListRecords&set=ddc:490&metadataPrefix=oai_dc # # These examples with the given *parameters* are somewhat easy to encode - and so is building similar request URLs. # But how to download the delivered data and to interact with it? That's the aim of this Notebook. So, here we go! # ## 1 Metadata access with Polymatheia # ### 1.0 Prerequisites # First, some basic Python libraries have to be imported. Just **click on the arrow icon** on the left side of the code cell - or first click into the cell and then select 'Crtl' + 'Enter' or 'Shift' + 'Enter'. When the code runs, a star symbol next to the cell appears and when it's done a number turns up. And most important, the provoked output is given beneath the code cell. import os # navigate and manipulate file directories import pandas as pd # pandas is the Python standard library to work with dataframes from IPython.display import IFrame # embed website views in Jupyter Notebook print("Successfully imported necessary libraries") # **Polymatheia** is a Python library to support working with digital library/archive metadata. It supports accessing metadata of different formats from OAI-PMH and also offers methods to handle the retrieved data. The metadata will be turned into a Python-style ['navigable dictionary'](https://polymatheia.readthedocs.io/en/latest/concepts.html), which allows convenient access to certain metadata fields. # Its aim is not necessarily to cover all ways of working with metadata, but to make it easy to undertake most types of tasks and analysis. See the [documentation](https://polymatheia.readthedocs.io/en/latest/) of the Polymatheia library. # # Using Polymatheia package **for the first time**, you will need to **install this code library**: Just remove the `#` from the second line of code, and then execute the cell like the one before. # de-comment !pip command for installing polymatheia # #!pip install polymatheia from polymatheia.data.reader import OAISetReader # list OAI sets from polymatheia.data.reader import OAIMetadataFormatReader # list available metadata formats from polymatheia.data.reader import OAIRecordReader # read one metadata record from OAI from polymatheia.data.writer import PandasDFWriter # easy transformation of flat data into a dataframe print("Successfully imported necessary libraries") # https://www.e-periodica.ch/oai/ will be the **base URL** for all OAI requests. To make live easier we put it into the variable `oai`. oai = 'https://www.e-periodica.ch/oai/' # ### 1.1 Start with the OAI interface via Polymatheia # First, it's good to know **which collections or *sets* are available**. To take a look at the sets from the native OAI interface let's take a look of https://www.e-periodica.ch/oai?verb=ListSets with the `IFrame` function. For every set, there is the `setName`, and a `setSpec`, which is a short cut for the set name and will be used as parameter with the OAI accesses. IFrame('https://www.e-periodica.ch/oai?verb=ListSets', width=970, height=300) # That's nice, but how to retrieve these contents as data? Polymatheia's 'OAISetReader' does this conveniently. Here's how it works. # + reader = OAISetReader(oai) # instantiate ('make') a OAISetReader named reader # 'Instantiation' is a standard procedure with Python, so it's a good idea to get familiar with it. print(type(reader)) # print the object type of 'reader' for information # - for x in reader: # for-loop which iterates through the reader-content and prints each entry print(x) # note that 'x' is an arbitrary term # We might put this together and then turn the retrieved data into a *Pandas dataframe* with the 'PandasDFWriter' command. A **dataframe** is a table-like data object, which is a nice breakdown and moreover an useful format for further investigation. *Pandas* is the standard library in Python for dataframe handling. # + reader = OAISetReader(oai) setspec = [] # make an empty list named 'setspec' for x in reader: setspec.append(x) # .append adds all the single reader-contents to the list 'setspec' print(setspec[0:3]) # print the first 3 items of the list (of key-value pairs), just to see print('---') # print a separating line df = PandasDFWriter().write(setspec) # write list 'setspec' into a Pandas dataframe named 'df' df # shows 'df' # - # If a great number of sets are given, you might **search for a certain collection by string**. This can be also helpful to **get to know the set short cut** `setSpec` used by the OAI interface for further investigation of a certain set. # Example: Searching for strings 'art' or 'Art' in the 'setName' column for i in df.index: # for-loop which iterates through 'df' contents if 'art' in df.setName[i] or 'Art' in df.setName[i]: # if-condition which looks for 'art' or 'Art' # in the 'setName' column print(df.loc[i]) # print 'df' row, if if-condition is True # It's also very useful to know in which **formats the metadata records** are available. The genuine interface does this by requesting the URL https://www.e-periodica.ch/oai?verb=ListMetadataFormats. Here, we use the 'OAIMetadataFormatReader' from Polymatheia. # # As you might see, you can directly select some information like `metadataPrefix` and `metadataNamespace` from the retrieved data by **using the dot-notation**. Dot-notation just adds the desired subordinated element after a dot. reader = OAIMetadataFormatReader(oai) for formats in reader: print(formats) print(formats.metadataPrefix) # dot-notation: chooses sub-element 'metadataPrefix' reader = OAIMetadataFormatReader(oai) [formats.metadataNamespace for formats in reader] # shorter notation for the for-loops above, which outputs a list # ### 1.2 Retrieve metadata records via Polymatheia # Retrieving available **metadata as a bunch** is simple with the 'OAIRecordReader' command. Just specify the following parameters in the 'OAIRecordReader' function: # # - `metadata_prefix`: mandatory # - `set_spec` (the short cut for the set you want to retrieve): not mandatory, but default will be *all = many* available records! # - `max_records` (the number of records): not mandatory, but default will be *all = many* available records! # # To compare this result with the native OAI interface you might check the top item of # https://www.e-periodica.ch/oai?verb=ListRecords&set=ddc:720&metadataPrefix=oai_dc. # reader = OAIRecordReader(oai, metadata_prefix='oai_dc', set_spec='ddc:720', max_records=1) [record for record in reader] # To access a certain metadata content, you can **follow down the *navigable dictionary* path** with dot-notation, like the following example. The `identifier` element in the record's `header` section denotes the record identifier and the `setSpec` element the set short cut, which was used for selection. reader = OAIRecordReader(oai, set_spec='ddc:720', metadata_prefix='oai_dc', max_records=1) for record in reader: print(record.header.identifier._text) # compare to the first lines of the output above print(record.header.setSpec._text) # For retrieving contents from the `metadata` section a certain insertion has to be done according to its qualifying string `'{http://www.openarchives.org/OAI/2.0/oai_dc/}dc'`. To give some background information here: This string refers to the `metadataNamespace` element we've seen at retrieving the available metadata formats above. reader = OAIRecordReader(oai, set_spec='ddc:720', metadata_prefix='oai_dc', max_records=1) for record in reader: print(record.metadata['{http://www.openarchives.org/OAI/2.0/oai_dc/}dc'].dc_title._text) print(record.metadata['{http://www.openarchives.org/OAI/2.0/oai_dc/}dc'].dc_creator._text) print(record.metadata['{http://www.openarchives.org/OAI/2.0/oai_dc/}dc'].dc_publisher._text) print(record.metadata['{http://www.openarchives.org/OAI/2.0/oai_dc/}dc'].dc_date._text) # Not always metadata content is a simple flat value like the identifier above. **Some fields in structured metadata formats are lists** as they hold multiple values. A good example is the `metadata` field `dc_type` which holds the information about the different types a document falls into. reader = OAIRecordReader(oai, set_spec='ddc:720', metadata_prefix='oai_dc', max_records=1) for record in reader: print(record.metadata['{http://www.openarchives.org/OAI/2.0/oai_dc/}dc'].dc_type) # The surrounding square brackets `[ ]` indicate a list (here of key-value pairs). To access each content of the list items of its own you might use *subsetting*, which calls the relevant item by its number in the list. reader = OAIRecordReader(oai, set_spec='ddc:720', metadata_prefix='oai_dc', max_records=1) for record in reader: print(record.metadata['{http://www.openarchives.org/OAI/2.0/oai_dc/}dc'].dc_type[0]._text) print(record.metadata['{http://www.openarchives.org/OAI/2.0/oai_dc/}dc'].dc_type[1]._text) # Another example with subsetting reader = OAIRecordReader(oai, set_spec='ddc:720', metadata_prefix='oai_dc', max_records=1) for record in reader: print(record.metadata['{http://www.openarchives.org/OAI/2.0/oai_dc/}dc'].dc_identifier[0]._text) print(record.metadata['{http://www.openarchives.org/OAI/2.0/oai_dc/}dc'].dc_identifier[1]._text) print(record.metadata['{http://www.openarchives.org/OAI/2.0/oai_dc/}dc'].dc_identifier[2]._text) # With e-periodica **journal articles** are retrieved. Therefore, the Dublin Core element `dc_source` is highly significant: It represents the various **metadata information of the periodical** in which the article was published. Eight `dc_source` elements are delivered and their sequence bears a deeper meaning: # # 1. Title of the periodical # 2. ZDB ID - ID of the Zeitschriftendatenbank (see [example](https://zdb-katalog.de/title.xhtml?idn=011220082&view=full)) # 3. ISSN # 4. Volume # 5. Year # 6. Issue # 7. # 8. Start page. reader = OAIRecordReader(oai, set_spec='ddc:720', metadata_prefix='oai_dc', max_records=1) for record in reader: print(record.metadata['{http://www.openarchives.org/OAI/2.0/oai_dc/}dc'].dc_source) # Because drilling down the *navigable dictionary* path can lead to long commands - which might not be very clear, either - there is a catchier way to do so with the `get` command applied on the records. Also, there is **no issue anymore with single values versus lists and qualifying strings**. Just putting the terms together as a list of `get` parameters! # # Note that in the case of more than one of the same element (like `dc_source`) a result list in squared brackets will be created. reader = OAIRecordReader(oai, metadata_prefix='oai_dc', set_spec='ddc:720', max_records=1) for record in reader: print(record.get(['metadata', '{http://www.openarchives.org/OAI/2.0/oai_dc/}dc', 'dc_creator', '_text'])) print('---') print(record.get(['metadata', '{http://www.openarchives.org/OAI/2.0/oai_dc/}dc', 'dc_source', '_text'])) # So it's really easy to access whatever metadata content you like. # This also works with the shorter form of for-loops. But mind that it delivers a nested - or 'doubled' - list, if there are the same elements several times, like `dc_source` here. # # + reader = OAIRecordReader(oai, set_spec='ddc:720', metadata_prefix='oai_dc', max_records=1) [record.get(['metadata', '{http://www.openarchives.org/OAI/2.0/oai_dc/}dc', 'dc_source', '_text']) \ for record in reader] # '\' indicates that command proceeds on the next line # - # Now let's **create a small dataframe with the creator, title and source** elements. # # There is a convenient way for this, relying on Python and Pandas standard procedures. So, to do this, first retrieve the single elements - as done before - and write them separately into lists (`sources`, `titles`, `creators`). Then, bind the lists into a dictionary (a genuine data type with Python), and finally turn this dictionary into a dataframe. Done! # + reader = OAIRecordReader(oai, set_spec='ddc:290', metadata_prefix='oai_dc', max_records=10) # Make lists from Dublin Core elements sources = [record.get(['metadata', '{http://www.openarchives.org/OAI/2.0/oai_dc/}dc', 'dc_source', '_text']) \ for record in reader] titles = [record.get(['metadata', '{http://www.openarchives.org/OAI/2.0/oai_dc/}dc', 'dc_title', '_text']) \ for record in reader] creators = [record.get(['metadata', '{http://www.openarchives.org/OAI/2.0/oai_dc/}dc', 'dc_creator', '_text']) \ for record in reader] # Create a dictionary from the lists and turn the dictionary into a dataframe dic = {'dc_creator': creators, 'dc_title': titles, 'dc_source': sources} df = pd.DataFrame(dic) df.style # - # We might go only a small step further to get some dedicated content from the various `dc_source` elements like the title of the journal and the publication year. The code only needs slight adjustment. # + reader = OAIRecordReader(oai, set_spec='ddc:350', metadata_prefix='oai_dc', max_records=10) # Make lists from Dublin Core elements titles = [record.get(['metadata', '{http://www.openarchives.org/OAI/2.0/oai_dc/}dc', 'dc_title', '_text']) \ for record in reader] creators = [record.get(['metadata', '{http://www.openarchives.org/OAI/2.0/oai_dc/}dc', 'dc_creator', '_text']) \ for record in reader] # Create empty lists 'periodicals' and 'years' to fill periodicals = [] years = [] # Fill these lists with the respective dc_source element for record in reader: periodicals.append(record.metadata['{http://www.openarchives.org/OAI/2.0/oai_dc/}dc'].dc_source[0]._text) years.append(record.metadata['{http://www.openarchives.org/OAI/2.0/oai_dc/}dc'].dc_source[4]._text) # Create a dictionary from the lists and turn the dictionary into a dataframe dic = {'dc_creator': creators, 'dc_title': titles, 'periodical': periodicals, 'year': years} df = pd.DataFrame(dic) df.style # - # ### 1.3 Save and recover complex metadata structures # Before any data will be downloaded, let's build a folder `data` in our working directory to save any data. print(os.getcwd()) # print current working directory # In case you might change your directory you can easily do this with `os.chdir` or `os.chdir(os.pardir)`. While `os.chdir()` changes the working directory to a subdirectory, `os.chdir(os.pardir)` will change it to the parent directory. Just uncomment (and maybe multiply) the commands you need. #os.chdir(os.pardir) # change to parent directory #os.chdir(...) # change to subdirectory os.makedirs('data', exist_ok=True) # make new folder 'data' - if there isn't already one os.chdir('data') # change to 'data' folder # To **download a whole bunch of metadata items** in nested formats like *MODS*, the 'JSONWriter' from Polymatheia is very helpful. # It creates a complex folder structure and JSON files to reproduce the structured metadata. And with 'JSONReader' one can easily recover the metadata set. from polymatheia.data.writer import JSONWriter # also available: CSVReader (for flat data), XMLReader and Writer from polymatheia.data.reader import JSONReader # 'JSONWriter' takes two parameters: # - The first is the name of the directory into which the data should be stored. # - The second is the dot-notated path (via its `header.identifier`) used to access the item's metadata. # # For more clarity, these are the contents of `header.identifier` for the first six records in the *DDC 690* set we will refer to: reader = OAIRecordReader(oai, set_spec='ddc:690', metadata_prefix='oai_dc', max_records=6) for record in reader: print(record.header.identifier._text) # Download and save the first six records from Dublin Core format # 'poly_metadata' = directory to store into reader = OAIRecordReader(oai, set_spec='ddc:690', metadata_prefix='oai_dc', max_records=6) writer = JSONWriter('poly_metadata', 'header.identifier._text') writer.write(reader) # Recover the six records from local disk reader = JSONReader('poly_metadata') [record for record in reader] # The stored data **can be used just the same way** as the direct accessed one, like for instance for the `dc_title` element. Note that the order of the first six records is shuffled now. reader = JSONReader('poly_metadata') for record in reader: print(record.header.identifier._text) print(record.get(['metadata', '{http://www.openarchives.org/OAI/2.0/oai_dc/}dc', 'dc_title', '_text'])) print('---') # Of course, there is also the way to read out certain metadata fields **via basic dot-notation**. But this will take a bit more of code to cope with the list vs. single value issue. reader = JSONReader('poly_metadata') for record in reader: print(record.header.identifier._text) if isinstance(record.metadata['{http://www.openarchives.org/OAI/2.0/oai_dc/}dc'].dc_creator, list): le = len(record.metadata['{http://www.openarchives.org/OAI/2.0/oai_dc/}dc'].dc_creator) for i in range(le): print(record.metadata['{http://www.openarchives.org/OAI/2.0/oai_dc/}dc'].dc_creator[i]._text) else: print(record.metadata['{http://www.openarchives.org/OAI/2.0/oai_dc/}dc'].dc_creator._text) print('---') # ## 2 Direct metadata access via OAI-PMH # Unfortunately, the Polymatheia library doesn't offer methods for *all* OAI verbs. For instance, there is no `ListIdentifiers` method (which delivers only the identifiers of a given set) and no `GetRecord` for retrieving the metadata of a certain item using its e-periodica ID. # # That's where especially the common libraries **requests** and **BeautifulSoup** come into play, and more manually coding is needed. # # ### 2.0 Prerequisites # Load the necessary libraries import requests # request URLs from bs4 import BeautifulSoup as soup # webscrape and parse HTML and XML import lxml # XML parser supported by bs4 # call with soup(markup, 'lxml-xml' OR 'xml') import os # navigate and manipulate file directories import time # work with time stamps import pandas as pd # pandas is the Python standard library to work with dataframes from IPython.display import IFrame # embed website views in jupyter notebook import math # work with mathematical functions import re # work with regular expressions print("Succesfully imported necessary libraries") # https://www.e-periodica.ch/oai/ will be the **base URL** for all OAI requests. To make life easier we put it into the variable `oai`. oai = 'https://www.e-periodica.ch/oai/' # ### 2.1 Start with the native OAI interface # The very **core of all operations on the OAI interface** will be a small function called `load_xml()`. It simply requests the base URL with the various parameters and decodes the answer to XML. Therefore, it can be used with all OAI verbs and their respective parameters. def load_xml(params): ''' Accesses the OAI interface according to given parameters and scrapes its content. Parameters: All available native OAI verbs and parameter/value pairs. ''' base_url = oai response = requests.get(base_url, params=params) output_soup = soup(response.content, "lxml") return output_soup # You may use it to read out the basic `Identify` response of the OAI interface. # # Note, that the parameters to be used by the `load_xml` function are the same as in the respective URL `https://www.e-periodica.ch/oai?verb=Identify`. That is, `verb` as the parameter key, and `Identify` as the parameter value. Therefore, we need a **parameter key-value pair**, which will be indicated by enclosing them in curly braces. xml_soup = load_xml({'verb': 'Identify'}) xml_soup # You can easily check with the `IFrame` method underneath. IFrame('https://www.e-periodica.ch/oai?verb=Identify', width=970, height=330) # ### 2.2 Download metadata records # The same can be done with the `GetRecord` OAI verb, here `metadataPrefix`and `identifier` are mandatory parameters, naturally. Since the e-periodica identifier is not simply an integer, you ought to put it in quotation marks every time. # + # Example for accessing a single metadata record # https://www.e-periodica.ch/oai?verb=GetRecord&metadataPrefix=oai_dc&identifier=oai:agora.ch:fde-001:fc00:db20:35b:7399::5 xml_soup = load_xml({'verb': 'GetRecord', 'metadataPrefix': 'oai_dc', \ 'identifier': 'oai:agora.ch:fde-001:fc00:db20:35b:7399::5'}) xml_soup # - # Again before downloading, first make a designated folder for the retrieved metadata. print(os.getcwd()) # print current working directory # In case you might change your directory you can easily do this with `os.chdir` or `os.chdir(os.pardir)`. While `os.chdir()` changes the working directory to a subdirectory, `os.chdir(os.pardir)` will change it to the parent directory. Just uncomment (and maybe multiply) the commands you need. #os.chdir(os.pardir) # change to parent directory #os.chdir(...) # change to subdirectory '...' os.makedirs('metadata', exist_ok=True) # make folder 'metadata' - if it is not already there os.chdir('metadata') # change to folder 'metadata' # You might want to **download the metadata record directly** by its e-periodica ID. The `download_record()` function does this for you easily. def download_record(ID, filename): ''' Downloads a certain metadata record from OAI to a single XML file. Throws a notice if metadata file already exists and leaves the existing one. Parameters: ID = E-periodica ID of the desired record. filename = File name to choose for the downloaded record. ''' path = os.getcwd() output_soup = load_xml({'verb': 'GetRecord', 'metadataPrefix': 'oai_dc', 'identifier': 'oai:agora.ch:' + str(ID)}) outfile = path + '/{}.xml'.format(filename) try: with open(outfile, mode='x', encoding='utf-8') as f: f.write(output_soup.decode()) print("Metadata file {}.xml saved".format(filename)) except FileExistsError: print("Metadata file {}.xml exists already".format(filename)) finally: pass # + # Example for downloading a single metadata record # https://www.e-periodica.ch/oai?verb=GetRecord&metadataPrefix=oai_dc&identifier=oai:agora.ch:adi-001:fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b download_record('fde-001:fc00:db20:35b:7399::5', 'freidenker_programm') # - # ### 2.3 Download metadata by set # Scraping the OAI interface output directly yields a problem with bigger data. The output is **split into segments of 100 records, which are presented on single webpages**. Looking at a sample request with `ListIdentifier` method, you will find the `resumptionToken` element, which holds the resumption token. The [resumption token](http://www.openarchives.org/OAI/openarchivesprotocol.html#FlowControl) is required to access the next segment webpage, which again includes a resumtpiotn token to the next page and so on. # Scroll to the end of the page for the resumption token IFrame('https://www.e-periodica.ch/oai?verb=ListIdentifiers&set=ddc:340&metadataPrefix=oai_dc', \ width=970, height=300) # Because of this, access metadata in bulk directly from the OAI interface is a bit more complex. With `retrieve_set_metadata()` we create a function to **retrieve metadata records of a set** and save the XML files into a created folder. As the e-periodica IDs aren't suitable for file names, the downloaded files will be serially numbered. # # **WARNING:** Mind, that entire sets of e-periodica are large! You might rather limit the number of records to download. therefore, a default value of 50 records will be defined in the function. def retrieve_set_metadata(Set, foldername, max_records=50): ''' Downloads metadata records of a given set from OAI to XML files in a certain folder structure. Therefore it * creates a folder to hold the records * requests e-periodica OAI-PMH interface according to a set * retrieves the set's e-periodica IDs * downloads Dublin Core metadata records according to IDs * writes them into single serially numbered XML files in the folder. Parameters: Set = The desired OAI set. foldername = name of the folder in which the records will be stored. max_records = (Maximum) Number of records to retrieve. Default value is 50. ''' start = time.perf_counter() number = 0 # Set parameters to the interface base_url = oai recordsearch_term = {'verb': 'GetRecord', 'metadataPrefix': 'oai_dc'} listsearch_term = {'verb': 'ListIdentifiers', 'metadataPrefix': 'oai_dc', 'set': Set} # Make a folder for the files named according to parameter 'foldername' path = os.getcwd() + '/' + foldername try: os.makedirs(path, exist_ok = True) print("Path {} is already available or created successfully".format(path)) except OSError as error: print("Path {} can not be created".format(path)) # Basic functions def load_xml(params): ''' Accesses the OAI interface according to given parameters and scrapes its content. Parameters: All available native OAI verbs and parameter/value pairs. ''' response = requests.get(base_url, params=params) output_soup = soup(response.content, "lxml") return output_soup def download_record(ID): ''' Downloads a certain metadata record from OAI to a single XML file. Throws a notice if metadata file already exists and leaves the existing one. Parameter: ID = E-periodica ID of the desired record. ''' output_soup = load_xml({'verb': 'GetRecord', 'metadataPrefix': 'oai_dc', 'identifier': ID}) outfile = path + '/{}.xml'.format(number) try: with open(outfile, mode='x', encoding='utf-8') as f: f.write(output_soup.decode()) except FileExistsError: print("Metadata file {}.xml exists already".format(number)) finally: pass # Start with the first access to OAI interface - get the item IDs of a set xml_soup = load_xml(listsearch_term) while xml_soup.find('resumptiontoken') and number <= max_records: if number == 0: # First access for item IDs - first page xml_soup_new = load_xml(listsearch_term) else: # Following accesses for item IDs xml_soup_new = load_xml({'verb': 'ListIdentifiers', 'resumptionToken': resumption_token}) # Scraping out the e-periodica IDs ids = [] for ID in [(i.contents[0]) for i in xml_soup_new.find_all('identifier')]: ids.append(ID) # Download the metadata records according to retrieved e-periodica IDs print('Retrieving metadata for e-periodica IDs') for ID in ids: number += 1 if number <= max_records: download_record(ID) else: pass ids = [] # Actualize the resumtpion token to retrieve the the next page try: new_token = xml_soup.find('resumptiontoken').get_text() resumption_token = new_token print('New resumption token:', resumption_token) except AttributeError: print('Reached end of IDs/results list') # notice when last page is accessed finally: pass with os.scandir(path) as entries: count = 0 for entry in entries: count += 1 print("{} metadata files in {}".format(count, path)) finish = time.perf_counter() print("Finished in {} second(s)".format(round(finish - start, 2))) # Just choose the appropriate set short cut, the desired folder name and the number of records retrieve_set_metadata('ddc:370', 'DDC_370', 10) # ## 3 Download fulltext files from e-periodica website # ### 3.0 Prerequisites # + # Load the necessary libraries import requests # request URLs import urllib.request # open URLs, e.g. PDF files on URLs # #!pip install pdfplumber import pdfplumber # read - available - text out from PDFs from bs4 import BeautifulSoup as soup # webscrape and parse HTML and XML import lxml # XML parser supported by bs4 # call with soup(markup, 'lxml-xml' OR 'xml') import os # navigate and manipulate file directories import time # work with time stamps import pandas as pd # pandas is the Python standard library to work with dataframes from IPython.display import IFrame # embed website views in jupyter notebook import math # work with mathematical functions import re # work with regular expressions print("Successfully imported necessary libraries") # - # https://www.e-periodica.ch/oai/ will be the **base URL** for all OAI requests. To make life easier we put it into the variable `oai`. oai = 'https://www.e-periodica.ch/oai/' # ### 3.1 Download fulltext files by e-periodica ID # Downloading e-periodica fulltetxts can be done from the e-periodica website. Fulltext is currently only available via PDF format. IFrame('https://www.e-periodica.ch/cntmng?type=pdf&pid=act-001:fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b', width=970, height=600) # At first, next to the `metadata` folder a new directory `fulltexts`will be created. print(os.getcwd()) # print current working directory # In case you might change your directory you can easily do this with `os.chdir` or `os.chdir(os.pardir)`. While `os.chdir()` changes the working directory to a subdirectory, `os.chdir(os.pardir)` will change it to the parent directory. os.chdir(os.pardir) # change to parent directory os.makedirs('fulltexts', exist_ok=True) # make new folder 'fulltexts' os.chdir('fulltexts') # change to 'fulltexts' folder # A single fulltext file can be retrieved by a given e-periodica ID with the following function `download_fulltext()`. Note that **for fulltexts a different base URL** - in combination with the given e-periodica ID - has to be used: `https://www.e-periodica.ch/cntmng?type=pdf&pid=`. Since e-periodica IDs don't make a suitable fulltext filename, you have to choose one manually. def download_fulltext(ID, filename): ''' Downloads the PDF file of a certain e-periodica document by its ID. Builds with e-periodica ID the fulltext URL, and saves the PDF file on local disk. Parameters: ID = E-periodica ID of the desired fulltext/PDF file. filename = The file name to choose for the retrieved PDF file. ''' baseurl_fulltext = "https://www.e-periodica.ch/cntmng?type=pdf&pid=" pdf_url = baseurl_fulltext + str(ID) response = urllib.request.urlopen(pdf_url) outfile = '{}.pdf'.format(filename) try: with open(outfile, 'wb') as f: f.write(response.read()) print("Fulltext file {} saved".format(outfile)) except FileExistsError: print("Fulltext file {} exists already".format(outfile)) except: print("Saving fulltext file {} failed".format(outfile)) finally: pass # Retrieving example PDF with e-periodica ID download_fulltext('act-001:fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b', 'tropenkaufleute') download_fulltext('fde-001:fc00:db20:35b:7399::5', 'freidenker_programm') # Now, we might **check the PDF files we've just downloaded**. Of course you can open the files with your default PDF viewer. But furthermore, you can take a somewhat deeper look on them. With the small, but mighty Python library *pdfplumber* we can **read out some information form the files**, for instance, the technical metadata. # # For the full capabilities of *pdfplumber* you might visit https://github.com/jsvine/pdfplumber. with open('tropenkaufleute.pdf', 'rb') as f: pdf = pdfplumber.open(f) print(pdf.metadata) # By defining a small function called `tech_metadata` there's an easy way to get a better formatted output. def tech_metadata(pdf_path): ''' Reads the technical metadata of a PDF formatted file and prints a summary. Parameters: pdf_path = The path of the PDF file to be read. ''' with open(pdf_path, 'rb') as f: pdf = pdfplumber.open(f) md = pdf.metadata num_pages = len(pdf.pages) txt = f""" Information about {pdf_path}: Author: {md['Author']} Title: {md['Title']} Number of pages: {num_pages} Creator: {md['Creator']} Producer: {md['Producer']} """ print(txt) tech_metadata('tropenkaufleute.pdf') tech_metadata('freidenker_programm.pdf') # But *pdfplumber* can also **read out the raw text of the pages** stored in the PDF. Let's try to take a look at the very first page of the *freidenker_programm.pdf* file with a small code snippet. This **first page** is indeed a cover sheet generated by e-periodica to get an overview about the document's **bibliographic metadata** and **terms of use** (in German). with open('freidenker_programm.pdf', 'rb') as f: pdf = pdfplumber.open(f) # creating a reader object first_page = pdf.pages[0] # creating a page object from the first PDF page = cover sheet print(first_page.extract_text()) # extracting text form the page object # Of course, you can read out all the pages at once and **get the whole raw text**, which is saved in the PDF file. Furthermore it is very easy to skip the cover sheet in doing so. We might define a small function named `read_pdf()` for printing out the whole raw text of the article. def read_pdf(pdf_path): ''' Extracts the raw text of a PDF formatted file and prints it. Omits the first page of the PDF file, which is a cover sheet and not part of the article's genuine text. Parameters: pdf_path = The path of the PDF file to be read. ''' with open(pdf_path, 'rb') as f: pdf = pdfplumber.open(f) for i in range(1, len(pdf.pages)): # start with the second page to skip the first one = cover sheet page = pdf.pages[i] # creating a page object text = page.extract_text() # extracting text form the page object print(text) read_pdf('tropenkaufleute.pdf') # Similarly, with `pdf_to_txt()` we can define a function to write the extracted raw text from PDF into a TXT file immediately. def pdf_to_txt(pdf_path): ''' Extracts the raw text of a PDF formatted file and writes it into a TXT file of the same name (with '.txt' file extension respectively). Omits the first page of the PDF file, which is a cover sheet and not part of the article's genuine text. Parameters: pdf_path = The path of the PDF file to be read. ''' fulltext = '' with open(pdf_path, 'rb') as f: pdf = pdfplumber.open(f) for i in range(1, len(pdf.pages)): # start with the second page to skip the first one = cover sheet page = pdf.pages[i] # creating a page object page_text = page.extract_text() # extracting text form the page object fulltext += page_text # bind page texts together to whole text match = re.search('(\S+).pdf', pdf_path) filename = match.group(1) outfile = filename + '.txt' try: with open(outfile, 'w', encoding='utf-8') as f: f.write(fulltext) print("Fulltext file {}.txt saved".format(filename)) except: print("Saving fulltext file {}.txt failed".format(filename)) finally: pass pdf_to_txt('tropenkaufleute.pdf') # check the content of the generated TXT file with open('tropenkaufleute.txt', 'r', encoding='utf-8') as f: fulltext = f.read() print(fulltext) # So everything is fine? Unfortunately not. # # First, you might notice that the **headlines and footers of the pages as their footnotes are included** sequentially in the text output. So keep in mind, that in many cases, you won't get the spotless clean article fulltext to read by a human. But if you are looking for a text mining resource, this outcome will quite do the job. # # Secondly, you might have **more complex PDF files**, for instance older ones with a **column layout**. Here, *pdfplumber* will get to its end. read_pdf('freidenker_programm.pdf') # To handle such challenging PDF files you will come back on a **stronger tool**. *Apache Tika* is a well-known open source toolkit which extracts metadata and text from wide range of file formats. *Apache Tika* is Java-based, and using it as shown beneath will install Java, the Tika REST server and a Python wrapper to speak with it. # # You might see the Tika documentation at https://pypi.org/project/tika/ and the documentation of the Tika wrapper: https://pypi.org/project/tika/. # # The Tika parser also distinguishes between the file's metadata and its content. Just try the following two short code snippets. # + # #!pip install tika from tika import parser text = parser.from_file('freidenker_programm.pdf') print(text['metadata']) # - with open('freidenker_programm.pdf', 'rb') as f: text = parser.from_file(f) print(text['content']) # Looks fine! But there's one more problem: The extracted text **includes also the text of the added cover sheet**. To deal with this, we first look after the string which separates the cover sheet text from the article's one. As you can see above, this is the Digital Object Identifier (DOI) link of the document (http://doi.org/10.5169/seals-405882). This DOI link appears two times on the cover sheet overall. So we have to choose the third section of the document split by the DOI link to address the article's raw text only. # # So a new function `read_pdf_tika()` for reading out the article's text with the Tika parser can be defined. def read_pdf_tika(pdf_path): ''' Extracts the raw text of a PDF formatted file with Apache Tika and prints it. Parameters: pdf_path = The path of the PDF file to be read. ''' with open(pdf_path, 'rb') as f: pdf = pdfplumber.open(f) first_page = pdf.pages[0] text_first_page = first_page.extract_text() match = re.search('http://doi.org/10.5169/seals-(\S+)', text_first_page) # look for the DOI links with open(pdf_path, 'rb') as f: raw = parser.from_file(f) print(raw['content'].split(match.group())[2]) # split the document by the DOI links # and choose the third document split read_pdf_tika('freidenker_programm.pdf') def pdf_to_txt_tika(pdf_path): ''' Extracts the raw text of a PDF formatted file with Apache Tika and writes it into a TXT file of the same name (with '.txt' file extension respectively). Parameters: pdf_path = The path of the PDF file to be read. ''' with open(pdf_path, 'rb') as f: pdf = pdfplumber.open(f) first_page = pdf.pages[0] text_first_page = first_page.extract_text() match = re.search('http://doi.org/10.5169/seals-(\S+)', text_first_page) # look for the DOI links with open(pdf_path, 'rb') as f: text = parser.from_file(f) fulltext = text['content'].split(match.group())[2] # split the document by the DOI links # and choose the third document spli match_name = re.search('(\S+).pdf', pdf_path) filename = match_name.group(1) outfile = filename + '.txt' try: with open(outfile, 'w', encoding='utf-8') as f: f.write(fulltext) print("Fulltext file {}.txt saved".format(filename)) except: print("Saving fulltext file {}.txt failed".format(filename)) finally: pass pdf_to_txt_tika('freidenker_programm.pdf') # Check the content of the generated TXT file with open('freidenker_programm.txt', 'r', encoding='utf-8') as f: fulltext = f.read() print(fulltext) # That looks really good! But finally a short caveat has to be stated, again. # # Mind, that Tika (and presumably most PDF readers) **will parse whole pages by default**. If there is more than one article on a page, you'll get more than the one article you want in raw text. That's also the case above. Furthermore the **constraints regarding headlines, footers and footnotes** noted before also apply with various PDF readers. # # So, as mentioned before: The outcome will make quite a good text mining resource, but might be confusing here and there for human readers. # ### 3.2 Download fulltext files by set # Finally, let's build a function `retrieve_set_fulltexts` to **retrieve fulltexts of a certain e-periodica set**. # # **WARNING**: As with the metadata records, fulltext sets of e-periodica are large, so it's a good idea to limit the number of fulltexts to download. The default number of fulltexts in the function will be 20. Of course, you can also change that easily. # In case you might change your directory you can easily do this with `os.chdir` or `os.chdir(os.pardir)`. While `os.chdir()` changes the working directory to a subdirectory, `os.chdir(os.pardir)` will change it to the parent directory. print(os.getcwd()) def retrieve_set_fulltexts(Set, foldername, max_fulltexts=20): ''' Downloads PDF fulltexts of a given DDC set from e-periodica website to files in a certain folder. Therefore it * creates the folder according to the parameter foldername * requests e-periodica OAI-PMH interface according to a OAI set * retrieves the set's e-periodica IDs * downloads PDF fulltexts according to IDs from e-periodica website Parameters: Set = The desired OAI set. foldername = name of the folder in which the fulltexts will be stored. max_fulltext = (Maximum) Number of fulltexts to retrieve. Default value is 20. ''' start = time.perf_counter() number = 0 # Set parameters to the interface base_url = oai baseurl_fulltext = "https://www.e-periodica.ch/cntmng?type=pdf&pid=" listsearch_term = {'verb': 'ListIdentifiers', 'metadataPrefix': 'oai_dc', 'set': Set} # Make a folder <foldername> to store files in it directory = foldername parent_dir = os.getcwd() path = os.path.join(parent_dir, directory) try: os.makedirs(path, exist_ok = True) print('Path {} is already available or created successfully'.format(path)) except OSError as error: print('Path {} could not be created'.format(path)) # Basic functions def load_xml(params): ''' Accesses the OAI interface according to given parameters and scrapes its content. ''' response = requests.get(base_url, params=params) output_soup = soup(response.content, "lxml") return output_soup def download_fulltext(ID): ''' Downloads the PDF file of a certain e-periodica document by its ID. Builds with e-periodica ID the fulltext URL, and saves the PDF file on local disk. Parameter: ID = E-periodica ID of the desired fulltext/PDF file. ''' pdf_url = baseurl_fulltext + str(ID) response = urllib.request.urlopen(pdf_url) outfile = path + '/{}.pdf'.format(number) try: with open(outfile, 'wb') as f: f.write(response.read()) #print("Fulltext file {}.pdf saved".format(number)) except FileExistsError: print("Fulltext file {}.pdf exists already".format(number)) except: print("Saving fulltext file {}.pdf failed".format(number)) finally: pass # Start with the first access to OAI interface xml_soup = load_xml(listsearch_term) while xml_soup.find('resumptiontoken') and number <= max_fulltexts: if number == 0: # First access for item IDs - first page xml_soup_new = load_xml(listsearch_term) else: # Following accesses for item IDs xml_soup_new = load_xml({'verb': 'ListIdentifiers', 'resumptionToken': resumption_token}) # Scraping out the e-periodica IDs ids = [] for ID in [(i.contents[0]) for i in xml_soup_new.find_all('identifier')]: match = re.search('oai:agora.ch:(\w{3}-\d{3}:\d{4}:\d+::\d+)', ID) # extract the string following 'oai:agora.ch:' if match: ids.append(match.group(1)) # second parenthesized subgroup of group() = number # Download the fulltext files according to retrieved e-periodica IDs print('Retrieving PDF fulltexts for e-periodica IDs') for ID in ids: number += 1 if number <= max_fulltexts: download_fulltext(ID) else: pass ids = [] # Actualize the resumption token to retrieve the the next page try: new_token = xml_soup_new.find('resumptiontoken').get_text() resumption_token = new_token print('New resumption token:', resumption_token) except AttributeError: print('Reached end of IDs/results list') # notice when last results page is accessed finally: pass count = 0 with os.scandir(path) as entries: for entry in entries: count += 1 print("{} fulltext files in {}".format(count, path)) finish = time.perf_counter() print("Finished in {} second(s)".format(round(finish - start, 2))) retrieve_set_fulltexts('ddc:450', 'DDC_450', 10) # You can easily **check the technical metadata of the downloaded PDF files** with a small loop over the included files using the `os.listdir()` command on the new *DDC_450* folder. # + path = os.path.join(os.getcwd(), 'DDC_450') for entry in os.listdir(path): if entry.endswith('.pdf'): tech_metadata(path + '/' + entry) # - # Finally, you may **process all the PDF files batch-wise** in a given folder and **write TXT files from their raw text**. There's even a shorter notation for the general processing loop above. Both ways, using the lightweight *pdfplumber* library, and the *Apache Tika* wrapper for Python are shown beneath with a sample outcome. # + # Using pdfplumber library path = os.path.join(os.getcwd(), 'DDC_450') [pdf_to_txt(path + '/' + entry) for entry in os.listdir(path) if entry.endswith('.pdf')] # - # Check the content of a generated TXT file with open('DDC_450/9.txt', 'r', encoding='utf-8') as f: fulltext = f.read() print(fulltext) # + # Using Apache Tika parser path = os.path.join(os.getcwd(), 'DDC_450') [pdf_to_txt_tika(path + '/' + entry) for entry in os.listdir(path) if entry.endswith('.pdf')] # - # check the content of a generated TXT file with open('DDC_450/9.txt', 'r', encoding='utf-8') as f: fulltext = f.read() print(fulltext)
e_periodica_metadata_fulltext.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: literature # language: python # name: literature # --- # # Cleaning Data # import glob import pandas as pd dfs = [] for filename in glob.glob("../data/Auction_*.json"): dfs.append(pd.read_json(filename)) df = pd.concat(dfs, ignore_index=True) df.provenance.unique() len(df.title.unique()), len(df.unique_key.unique()) provenance_size = ( df.groupby(["unique_key", "provenance"]) .size() .reset_index() .groupby("provenance") .size() ) provenance_size df = df[~(df["date"] < 1974)] df = df[~(df["date"] > 2018)] df.to_json("../data/auction_November_2018.json") df.author = df.author.str.lower() # Duplicate articles # ------------------ table = ( df.groupby(["title", "unique_key"]).size().reset_index().groupby("title").count() ) duplicates = table[table["unique_key"] > 1] duplicates_title = df[df["title"].isin(duplicates.index)]["title"].unique() duplicates_in_arxiv = df[ (df["title"].isin(duplicates.index)) & (df["provenance"] == "arXiv") ]["title"].unique() diff = list(set(duplicates_title) - set(duplicates_in_arxiv)) df_without_arxiv = df[~(df["provenance"] == "arXiv")] df_without_arxiv = df_without_arxiv.drop_duplicates(subset="title") # df_without_arxiv.to_json('../data/pd_November_2018_without_arxiv.json') # **Drop duplicates.** articles_to_drop = df[ (df["title"].isin(duplicates.index)) & (df["provenance"] == "arXiv") ]["unique_key"].unique() df = df[~df["unique_key"].isin(articles_to_drop)] len(df["title"].unique()), len(df["unique_key"].unique()) df = df[~(df["date"] < 1974)] # **Export clean json.** df.to_json("../data/auction_November_2018_clean.json") df.to_csv("../data/price_of_anarchy_articles_meta_data.csv")
src/nbs/0.3. Cleaning Data Auction.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %matplotlib inline import matplotlib.pyplot as plt import numpy as np # # Unsupervised Learning Part 2 -- Clustering # Clustering is the task of gathering samples into groups of similar # samples according to some predefined similarity or distance (dissimilarity) # measure, such as the Euclidean distance. # # <img width="60%" src='figures/clustering.png'/> # In this section we will explore a basic clustering task on some synthetic and real-world datasets. # # Here are some common applications of clustering algorithms: # # - Compression for data reduction # - Summarizing data as a reprocessing step for recommender systems # - Similarly: # - grouping related web news (e.g. Google News) and web search results # - grouping related stock quotes for investment portfolio management # - building customer profiles for market analysis # - Building a code book of prototype samples for unsupervised feature extraction # Let's start by creating a simple, 2-dimensional, synthetic dataset: # + from sklearn.datasets import make_blobs X, y = make_blobs(random_state=42) X.shape # - plt.figure(figsize=(8, 8)) plt.scatter(X[:, 0], X[:, 1]) # In the scatter plot above, we can see three separate groups of data points and we would like to recover them using clustering -- think of "discovering" the class labels that we already take for granted in a classification task. # # Even if the groups are obvious in the data, it is hard to find them when the data lives in a high-dimensional space, which we can't visualize in a single histogram or scatterplot. # Now we will use one of the simplest clustering algorithms, K-means. # This is an iterative algorithm which searches for three cluster # centers such that the distance from each point to its cluster is # minimized. The standard implementation of K-means uses the Euclidean distance, which is why we want to make sure that all our variables are measured on the same scale if we are working with real-world datastets. In the previous notebook, we talked about one technique to achieve this, namely, standardization. # # <br/> # <div class="alert alert-success"> # <b>Question</b>: # <ul> # <li> # what would you expect the output to look like? # </li> # </ul> # </div> # + from sklearn.cluster import KMeans kmeans = KMeans(n_clusters=3, random_state=42) # - # We can get the cluster labels either by calling fit and then accessing the # ``labels_`` attribute of the K means estimator, or by calling ``fit_predict``. # Either way, the result contains the ID of the cluster that each point is assigned to. labels = kmeans.fit_predict(X) labels np.all(y == labels) # Let's visualize the assignments that have been found plt.figure(figsize=(8, 8)) plt.scatter(X[:, 0], X[:, 1], c=labels) # Compared to the true labels: plt.figure(figsize=(8, 8)) plt.scatter(X[:, 0], X[:, 1], c=y) # Here, we are probably satisfied with the clustering results. But in general we might want to have a more quantitative evaluation. How about comparing our cluster labels with the ground truth we got when generating the blobs? # + from sklearn.metrics import confusion_matrix, accuracy_score print('Accuracy score:', accuracy_score(y, labels)) print(confusion_matrix(y, labels)) # - np.mean(y == labels) # <div class="alert alert-success"> # <b>EXERCISE</b>: # <ul> # <li> # After looking at the "True" label array y, and the scatterplot and `labels` above, can you figure out why our computed accuracy is 0.0, not 1.0, and can you fix it? # </li> # </ul> # </div> # Even though we recovered the partitioning of the data into clusters perfectly, the cluster IDs we assigned were arbitrary, # and we can not hope to recover them. Therefore, we must use a different scoring metric, such as ``adjusted_rand_score``, which is invariant to permutations of the labels: # + from sklearn.metrics import adjusted_rand_score adjusted_rand_score(y, labels) # - # One of the "short-comings" of K-means is that we have to specify the number of clusters, which we often don't know *apriori*. For example, let's have a look what happens if we set the number of clusters to 2 in our synthetic 3-blob dataset: kmeans = KMeans(n_clusters=2, random_state=42) labels = kmeans.fit_predict(X) plt.figure(figsize=(8, 8)) plt.scatter(X[:, 0], X[:, 1], c=labels) kmeans.cluster_centers_ # #### The Elbow Method # # The Elbow method is a "rule-of-thumb" approach to finding the optimal number of clusters. Here, we look at the cluster dispersion for different values of k: # + distortions = [] for i in range(1, 11): km = KMeans(n_clusters=i, random_state=0) km.fit(X) distortions.append(km.inertia_) plt.plot(range(1, 11), distortions, marker='o') plt.xlabel('Number of clusters') plt.ylabel('Distortion') plt.show() # - # Then, we pick the value that resembles the "pit of an elbow." As we can see, this would be k=3 in this case, which makes sense given our visual expection of the dataset previously. # **Clustering comes with assumptions**: A clustering algorithm finds clusters by making assumptions with samples should be grouped together. Each algorithm makes different assumptions and the quality and interpretability of your results will depend on whether the assumptions are satisfied for your goal. For K-means clustering, the model is that all clusters have equal, spherical variance. # # **In general, there is no guarantee that structure found by a clustering algorithm has anything to do with what you were interested in**. # # We can easily create a dataset that has non-isotropic clusters, on which kmeans will fail: # + plt.figure(figsize=(12, 12)) n_samples = 1500 random_state = 170 X, y = make_blobs(n_samples=n_samples, random_state=random_state) # Incorrect number of clusters y_pred = KMeans(n_clusters=2, random_state=random_state).fit_predict(X) plt.subplot(221) plt.scatter(X[:, 0], X[:, 1], c=y_pred) plt.title("Incorrect Number of Blobs") # Anisotropicly distributed data transformation = [[0.60834549, -0.63667341], [-0.40887718, 0.85253229]] X_aniso = np.dot(X, transformation) y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_aniso) plt.subplot(222) plt.scatter(X_aniso[:, 0], X_aniso[:, 1], c=y_pred) plt.title("Anisotropicly Distributed Blobs") # Different variance X_varied, y_varied = make_blobs(n_samples=n_samples, cluster_std=[1.0, 2.5, 0.5], random_state=random_state) y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_varied) plt.subplot(223) plt.scatter(X_varied[:, 0], X_varied[:, 1], c=y_pred) plt.title("Unequal Variance") # Unevenly sized blobs X_filtered = np.vstack((X[y == 0][:500], X[y == 1][:100], X[y == 2][:10])) y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_filtered) plt.subplot(224) plt.scatter(X_filtered[:, 0], X_filtered[:, 1], c=y_pred) plt.title("Unevenly Sized Blobs") # - # ## Some Notable Clustering Routines # The following are two well-known clustering algorithms. # # - `sklearn.cluster.KMeans`: <br/> # The simplest, yet effective clustering algorithm. Needs to be provided with the # number of clusters in advance, and assumes that the data is normalized as input # (but use a PCA model as preprocessor). # - `sklearn.cluster.MeanShift`: <br/> # Can find better looking clusters than KMeans but is not scalable to high number of samples. # - `sklearn.cluster.DBSCAN`: <br/> # Can detect irregularly shaped clusters based on density, i.e. sparse regions in # the input space are likely to become inter-cluster boundaries. Can also detect # outliers (samples that are not part of a cluster). # - `sklearn.cluster.AffinityPropagation`: <br/> # Clustering algorithm based on message passing between data points. # - `sklearn.cluster.SpectralClustering`: <br/> # KMeans applied to a projection of the normalized graph Laplacian: finds # normalized graph cuts if the affinity matrix is interpreted as an adjacency matrix of a graph. # - `sklearn.cluster.Ward`: <br/> # Ward implements hierarchical clustering based on the Ward algorithm, # a variance-minimizing approach. At each step, it minimizes the sum of # squared differences within all clusters (inertia criterion). # # Of these, Ward, SpectralClustering, DBSCAN and Affinity propagation can also work with precomputed similarity matrices. # <img src="figures/cluster_comparison.png" width="900"> # <div class="alert alert-success"> # <b>EXERCISE: digits clustering</b>: # <ul> # <li> # Perform K-means clustering on the digits data, searching for ten clusters. # Visualize the cluster centers as images (i.e. reshape each to 8x8 and use # ``plt.imshow``) Do the clusters seem to be correlated with particular digits? What is the ``adjusted_rand_score``? # </li> # <li> # Visualize the projected digits as in the last notebook, but this time use the # cluster labels as the color. What do you notice? # </li> # </ul> # </div> from sklearn.datasets import load_digits digits = load_digits() # ... # + # # %load solutions/08B_digits_clustering.py
scikit-learn/08.Unsupervised_Learning-Clustering.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python # language: python # name: conda-env-python-py # --- # <a href="https://cognitiveclass.ai/?utm_medium=Exinfluencer&utm_source=Exinfluencer&utm_content=000026UJ&utm_term=10006555&utm_id=NA-SkillsNetwork-Channel-SkillsNetworkCoursesIBMDeveloperSkillsNetworkPY0101ENSkillsNetwork19487395-2021-01-01"> # <img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Ad/CCLog.png" width="200" align="center"> # </a> # # <h1> HTTP and Requests</h1> # # Estimated time needed: **15** minutes # # ## Objectives # # After completing this lab you will be able to: # # * Understand HTTP # * Handle HTTP Requests # # <h2>Table of Contents</h2> # # <div class="alert alert-block alert-info" style="margin-top: 20px"> # <ul> # <li> # <a href="https://#index">Overview of HTTP </a> # <ul> # <li><a href="https://#HTTP">Uniform Resource Locator:URL</a></li> # <li><a href="https://slice/?utm_medium=Exinfluencer&utm_source=Exinfluencer&utm_content=000026UJ&utm_term=10006555&utm_id=NA-SkillsNetwork-Channel-SkillsNetworkCoursesIBMDeveloperSkillsNetworkPY0101ENSkillsNetwork19487395-2021-01-01">Request</a></li> # <li><a href="https://stride/?utm_medium=Exinfluencer&utm_source=Exinfluencer&utm_content=000026UJ&utm_term=10006555&utm_id=NA-SkillsNetwork-Channel-SkillsNetworkCoursesIBMDeveloperSkillsNetworkPY0101ENSkillsNetwork19487395-2021-01-01">Response</a></li> # </ul> # </li> # <li> # <a href="https://#RP">Requests in Python </a> # <ul> # <li><a href="https://#get">Get Request with URL Parameters</a></li> # <li><a href="https://#post">Post Requests </a></li> # # </ul> # # </div> # # <hr> # # <h2 id="">Overview of HTTP </h2> # # When you, the **client**, use a web page your browser sends an **HTTP** request to the **server** where the page is hosted. The server tries to find the desired **resource** by default "<code>index.html</code>". If your request is successful, the server will send the object to the client in an **HTTP response**. This includes information like the type of the **resource**, the length of the **resource**, and other information. # # <p> # The figure below represents the process. The circle on the left represents the client, the circle on the right represents the Web server. The table under the Web server represents a list of resources stored in the web server. In this case an <code>HTML</code> file, <code>png</code> image, and <code>txt</code> file . # </p> # <p> # The <b>HTTP</b> protocol allows you to send and receive information through the web including webpages, images, and other web resources. In this lab, we will provide an overview of the Requests library for interacting with the <code>HTTP</code> protocol. # </p # # <div class="alert alert-block alert-info" style="margin-top: 20px"> # <img src="https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-PY0101EN-SkillsNetwork/labs/Module%205/images/reqest_basics.png" width="750" align="center"> # # </div> # # <h2 id="URL">Uniform Resource Locator: URL</h2> # # Uniform resource locator (URL) is the most popular way to find resources on the web. We can break the URL into three parts. # # <ul> # <li><b>scheme</b> this is this protocol, for this lab it will always be <code>http://</code> </li> # <li><b> Internet address or Base URL </b> this will be used to find the location here are some examples: <code>www.ibm.com</code> and <code> www.gitlab.com </code> </li> # <li><b>route</b> location on the web server for example: <code>/images/IDSNlogo.png</code> </li> # </ul> # # You may also hear the term Uniform Resource Identifier (URI), URL are actually a subset of URIs. Another popular term is endpoint, this is the URL of an operation provided by a Web server. # # <h2 id="RE">Request </h2> # # The process can be broken into the <b>request</b> and <b>response </b> process. The request using the get method is partially illustrated below. In the start line we have the <code>GET</code> method, this is an <code>HTTP</code> method. Also the location of the resource <code>/index.html</code> and the <code>HTTP</code> version. The Request header passes additional information with an <code>HTTP</code> request: # # <div class="alert alert-block alert-info" style="margin-top: 20px"> # <img src="https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-PY0101EN-SkillsNetwork/labs/Module%205/images/reqest_messege.png" width="400" align="center"> # </div> # # When an <code>HTTP</code> request is made, an <code>HTTP</code> method is sent, this tells the server what action to perform. A list of several <code>HTTP</code> methods is shown below. We will go over more examples later. # # <div class="alert alert-block alert-info" style="margin-top: 20px"> # <img src="https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-PY0101EN-SkillsNetwork/labs/Module%205/images/http_methods.png" width="400" align="center"> # </div> # # <h2 id="RES">Response</h2> # # The figure below represents the response; the response start line contains the version number <code>HTTP/1.0</code>, a status code (200) meaning success, followed by a descriptive phrase (OK). The response header contains useful information. Finally, we have the response body containing the requested file, an <code> HTML </code> document. It should be noted that some requests have headers. # # <div class="alert alert-block alert-info" style="margin-top: 20px"> # <img src="https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-PY0101EN-SkillsNetwork/labs/Module%205/images/response_message.png" width="400" align="center"> # </div> # # Some status code examples are shown in the table below, the prefix indicates the class. These are shown in yellow, with actual status codes shown in white. Check out the following <a href="https://developer.mozilla.org/en-US/docs/Web/HTTP/Status?utm_medium=Exinfluencer&utm_source=Exinfluencer&utm_content=000026UJ&utm_term=10006555&utm_id=NA-SkillsNetwork-Channel-SkillsNetworkCoursesIBMDeveloperSkillsNetworkPY0101ENSkillsNetwork19487395-2021-01-01">link </a> for more descriptions. # # <div class="alert alert-block alert-info" style="margin-top: 20px"> # <img src="https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-PY0101EN-SkillsNetwork/labs/Module%205/images/status_code.png" width="300" align="center"> # </div> # # <h2 id="RP">Requests in Python</h2> # # Requests is a Python Library that allows you to send <code>HTTP/1.1</code> requests easily. We can import the library as follows: # import requests # We will also use the following libraries: # import os from PIL import Image from IPython.display import IFrame # You can make a <code>GET</code> request via the method <code>get</code> to [www.ibm.com](http://www.ibm.com/?utm_medium=Exinfluencer&utm_source=Exinfluencer&utm_content=000026UJ&utm_term=10006555&utm_id=NA-SkillsNetwork-Channel-SkillsNetworkCoursesIBMDeveloperSkillsNetworkPY0101ENSkillsNetwork19487395-2021-01-01): # url='https://www.ibm.com/' r=requests.get(url) # We have the response object <code>r</code>, this has information about the request, like the status of the request. We can view the status code using the attribute <code>status_code</code>. # r.status_code # You can view the request headers: # print(r.request.headers) # You can view the request body, in the following line, as there is no body for a get request we get a <code>None</code>: # print("request body:", r.request.body) # You can view the <code>HTTP</code> response header using the attribute <code>headers</code>. This returns a python dictionary of <code>HTTP</code> response headers. # header=r.headers print(r.headers) # We can obtain the date the request was sent using the key <code>Date</code> # header['date'] # <code>Content-Type</code> indicates the type of data: # header['Content-Type'] # You can also check the <code>encoding</code>: # r.encoding # As the <code>Content-Type</code> is <code>text/html</code> we can use the attribute <code>text</code> to display the <code>HTML</code> in the body. We can review the first 100 characters: # r.text[0:100] # You can load other types of data for non-text requests, like images. Consider the URL of the following image: # # Use single quotation marks for defining string url='https://gitlab.com/ibm/skills-network/courses/placeholder101/-/raw/master/labs/module%201/images/IDSNlogo.png' # We can make a get request: # r=requests.get(url) # We can look at the response header: # print(r.headers) # We can see the <code>'Content-Type'</code> # r.headers['Content-Type'] # An image is a response object that contains the image as a <a href="https://docs.python.org/3/glossary.html?utm_medium=Exinfluencer&utm_source=Exinfluencer&utm_content=000026UJ&utm_term=10006555&utm_id=NA-SkillsNetwork-Channel-SkillsNetworkCoursesIBMDeveloperSkillsNetworkPY0101ENSkillsNetwork19487395-2021-01-01#term-bytes-like-object">bytes-like object</a>. As a result, we must save it using a file object. First, we specify the file path and # name # path=os.path.join(os.getcwd(),'image.png') path # We save the file, in order to access the body of the response we use the attribute <code>content</code> then save it using the <code>open</code> function and write <code>method</code>: # with open(path,'wb') as f: f.write(r.content) # We can view the image: # Image.open(path) # <h3>Question 1: write <a href="https://www.gnu.org/software/wget/?utm_medium=Exinfluencer&utm_source=Exinfluencer&utm_content=000026UJ&utm_term=10006555&utm_id=NA-SkillsNetwork-Channel-SkillsNetworkCoursesIBMDeveloperSkillsNetworkPY0101ENSkillsNetwork19487395-2021-01-01"><code> wget </code></a></h3> # # In the previous section, we used the <code>wget</code> function to retrieve content from the web server as shown below. Write the python code to perform the same task. The code should be the same as the one used to download the image, but the file name should be <code>'Example1.txt'</code>. # # <code>!wget -O /resources/data/Example1.txt <https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-PY0101EN-SkillsNetwork/labs/Module%205/data/Example1.txt></code> # url='https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-PY0101EN-SkillsNetwork/labs/Module%205/data/Example1.txt' path=os.path.join(os.getcwd(),'example1.txt') r=requests.get(url) with open(path,'wb') as f: f.write(r.content) # <details><summary>Click here for the solution</summary> # # ```python # url='https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-PY0101EN-SkillsNetwork/labs/Module%205/data/Example1.txt' # path=os.path.join(os.getcwd(),'example1.txt') # r=requests.get(url) # with open(path,'wb') as f: # f.write(r.content) # # ``` # # </details> # # <h2 id="URL_P">Get Request with URL Parameters </h2> # # You can use the <b>GET</b> method to modify the results of your query, for example retrieving data from an API. We send a <b>GET</b> request to the server. Like before we have the <b>Base URL</b>, in the <b>Route</b> we append <code>/get</code>, this indicates we would like to preform a <code>GET</code> request. This is demonstrated in the following table: # # <div class="alert alert-block alert-info" style="margin-top: 20px"> # <img src="https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-PY0101EN-SkillsNetwork/labs/Module%205/images/base_URL_Route.png" width="400" align="center"> # </div> # # The Base URL is for <code>[http://httpbin.org/](http://httpbin.org/?utm_medium=Exinfluencer&utm_source=Exinfluencer&utm_content=000026UJ&utm_term=10006555&utm_id=NA-SkillsNetwork-Channel-SkillsNetworkCoursesIBMDeveloperSkillsNetworkPY0101ENSkillsNetwork19487395-2021-01-01)</code> is a simple HTTP Request & Response Service. The <code>URL</code> in Python is given by: # url_get='http://httpbin.org/get' # A <a href="https://en.wikipedia.org/wiki/Query_string?utm_medium=Exinfluencer&utm_source=Exinfluencer&utm_content=000026UJ&utm_term=10006555&utm_id=NA-SkillsNetwork-Channel-SkillsNetworkCoursesIBMDeveloperSkillsNetworkPY0101ENSkillsNetwork19487395-2021-01-01">query string</a> is a part of a uniform resource locator (URL), this sends other information to the web server. The start of the query is a <code>?</code>, followed by a series of parameter and value pairs, as shown in the table below. The first parameter name is <code>name</code> and the value is <code>Joseph</code>. The second parameter name is <code>ID</code> and the Value is <code>123</code>. Each pair, parameter, and value is separated by an equals sign, <code>=</code>. # The series of pairs is separated by the ampersand <code>&</code>. # # <div class="alert alert-block alert-info" style="margin-top: 20px"> # <img src="https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-PY0101EN-SkillsNetwork/labs/Module%205/images/query_string.png" width="500" align="center"> # </div> # # To create a Query string, add a dictionary. The keys are the parameter names and the values are the value of the Query string. # payload={"name":"Joseph","ID":"123"} # Then passing the dictionary <code>payload</code> to the <code>params</code> parameter of the <code> get()</code> function: # r=requests.get(url_get,params=payload) # We can print out the <code>URL</code> and see the name and values # r.url # There is no request body # print("request body:", r.request.body) # We can print out the status code # print(r.status_code) # We can view the response as text: # print(r.text) # We can look at the <code>'Content-Type'</code>. # r.headers['Content-Type'] # As the content <code>'Content-Type'</code> is in the <code>JSON</code> format we can use the method <code>json()</code>, it returns a Python <code>dict</code>: # r.json() # The key <code>args</code> has the name and values: # r.json()['args'] # <h2 id="POST">Post Requests </h2> # # Like a <code>GET</code> request, a <code>POST</code> is used to send data to a server, but the <code>POST</code> request sends the data in a request body. In order to send the Post Request in Python, in the <code>URL</code> we change the route to <code>POST</code>: # url_post='http://httpbin.org/post' # This endpoint will expect data as a file or as a form. A form is convenient way to configure an HTTP request to send data to a server. # # To make a <code>POST</code> request we use the <code>post()</code> function, the variable <code>payload</code> is passed to the parameter <code> data </code>: # r_post=requests.post(url_post,data=payload) # Comparing the URL from the response object of the <code>GET</code> and <code>POST</code> request we see the <code>POST</code> request has no name or value pairs. # print("POST request URL:",r_post.url ) print("GET request URL:",r.url) # We can compare the <code>POST</code> and <code>GET</code> request body, we see only the <code>POST</code> request has a body: # print("POST request body:",r_post.request.body) print("GET request body:",r.request.body) # We can view the form as well: # r_post.json()['form'] # There is a lot more you can do. Check out <a href="https://requests.readthedocs.io/en/master/?utm_medium=Exinfluencer&utm_source=Exinfluencer&utm_content=000026UJ&utm_term=10006555&utm_id=NA-SkillsNetwork-Channel-SkillsNetworkCoursesIBMDeveloperSkillsNetworkPY0101ENSkillsNetwork19487395-2021-01-01">Requests </a> for more. # # <hr> # # ## Authors # # <p><a href="https://www.linkedin.com/in/joseph-s-50398b136/?utm_medium=Exinfluencer&utm_source=Exinfluencer&utm_content=000026UJ&utm_term=10006555&utm_id=NA-SkillsNetwork-Channel-SkillsNetworkCoursesIBMDeveloperSkillsNetworkPY0101ENSkillsNetwork19487395-2021-01-01" target="_blank"><NAME></a> <br>A Data Scientist at IBM, and holds a PhD in Electrical Engineering. His research focused on using Machine Learning, Signal Processing, and Computer Vision to determine how videos impact human cognition. Joseph has been working for IBM since he completed his PhD.</p> # # ### Other Contributors # # <a href="https://www.linkedin.com/in/jiahui-mavis-zhou-a4537814a?utm_medium=Exinfluencer&utm_source=Exinfluencer&utm_content=000026UJ&utm_term=10006555&utm_id=NA-SkillsNetwork-Channel-SkillsNetworkCoursesIBMDeveloperSkillsNetworkPY0101ENSkillsNetwork19487395-2021-01-01"><NAME></a> # # ## Change Log # # | Date (YYYY-MM-DD) | Version | Changed By | Change Description | # | ----------------- | ------- | ---------- | ---------------------------- | # | 2021-12-20 | 2.1 | Malika | Updated the links | # | 2020-09-02 | 2.0 | Simran | Template updates to the file | # | | | | | # | | | | | # # ## <h3 align="center"> © IBM Corporation 2020. All rights reserved. <h3/> #
PY0101EN-5.3_Requests_HTTP.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Forecasting with HCrystalBall forecaster # This tutorial shows, how to use [HCrystalBall](https://github.com/heidelbergcement/hcrystalball)'s compliant estimator within `sktime` eco-system for the time-series forecasting. # # There is an existing [documentation](https://hcrystalball.readthedocs.io/en/latest/) containing full set of features (also with [tutorial](https://hcrystalball.readthedocs.io/en/latest/examples/02_tutorial.html)), so the main goal of this notebook is highlighting differences and additions to `sktime` possibilites. # ## Setup # `hcrystalball` is a soft dependency, that you would need to install with `conda install -c conda-forge hcrystalball` or `pip install hcrystalball` # ## Usage in sktime # + from warnings import simplefilter import numpy as np simplefilter("ignore", FutureWarning) # - from sktime.datasets import load_airline from sktime.forecasting.model_selection import temporal_train_test_split from sktime.performance_metrics.forecasting import smape_loss from sktime.utils.plotting import plot_series y = load_airline() # our HCrystalball wrapper currently only supports pd.DatetimeIndex types y.index = y.index.to_timestamp("M") y_train, y_test = temporal_train_test_split(y, test_size=36) print(y_train.shape[0], y_test.shape[0]) # + from hcrystalball.ensemble import SimpleEnsemble from hcrystalball.wrappers import ( ExponentialSmoothingWrapper, SarimaxWrapper, get_sklearn_wrapper, ) from sklearn.ensemble import RandomForestRegressor from sklearn.linear_model import ElasticNet from sktime.forecasting.hcrystalball import HCrystalBallForecaster # - models = { "sarimax": SarimaxWrapper(init_with_autoarima=True, autoarima_dict={"m": 12}), "smoothing": ExponentialSmoothingWrapper( trend="add", seasonal="multiplicative", seasonal_periods=12 ), "sklearn_linear": get_sklearn_wrapper(ElasticNet), "sklearn_forest": get_sklearn_wrapper(RandomForestRegressor, n_estimators=100), "avg_ensemble": SimpleEnsemble( base_learners=[ get_sklearn_wrapper(ElasticNet, name="sklearn_linear"), ExponentialSmoothingWrapper( trend="add", seasonal="multiplicative", seasonal_periods=12, name="smoothing", ), ], ensemble_func="mean", ), } # + # from hcrystalball.feature_extraction import HolidayTransformer, SeasonalityTransformer # from hcrystalball.wrappers import ProphetWrapper # other_models = { # # needs fbprophet with its dependencies installed # "prophet": ProphetWrapper(), # # works only with daily data frequency # "sklearn_forest_holiday": Pipeline( # [ # ("seasonality", SeasonalityTransformer(freq="M")), # ( # "holiday", # HolidayTransformer( # country_code="DE", days_before=2, days_after=1, bridge_days=True # ), # )("model", get_sklearn_wrapper(RandomForestRegressor)), # ] # ), # } # - fh = np.arange(len(y_test)) + 1 for model_name, model in models.items(): forecaster = HCrystalBallForecaster(model) # if provided in the data, exogenous variables are supported over X_train, X_pred forecaster.fit(y_train) y_pred = forecaster.predict(fh) plot_series( y_train, y_test, y_pred, labels=[ "y_train", "y_test", f"y_pred_{model_name}[smape_loss:{smape_loss(y_pred, y_test):.3}]", ], ) # *Note: As we see, the `RandomForestRegressor` does not cope well with trended data (cannot predict value from before unseen range). Adding detrender and other features might be used here.* # ## Main Functionality # # ### HolidayTransformer # - create feature for **holidays** given county ISO code (optionally also region, e.g. DE-NW) from the **day** (data need to have **daily frequency**) # - allows simple modelling of **before and after holidays effects** (use-case: "Our company does not sell much in the **Christmas time period**") # - allows usage of **multiple holiday codes** (use-case: "Our company is located at the borders, having sales influenced by holidays in **Germany and France**") # - [see examples](https://hcrystalball.readthedocs.io/en/latest/examples/tutorial/wrappers/04_seasonalities_and_holidays.html) # # ### SeasonalityTransformer # - create features for day of the week and similar # - can be automatically inferred from the data frequency # # ### ProphetWrapper # - wrapper for [fbprophet](https://facebook.github.io/prophet/) library # - [learn more on the usage](https://hcrystalball.readthedocs.io/en/latest/examples/tutorial/wrappers/06_advanced_prophet.html) # # >_Prophet is a procedure for forecasting time series data based on an additive model where non-linear trends are fit with yearly, weekly, and daily seasonality, plus holiday effects. It works best with time series that have strong seasonal effects and several seasons of historical data. Prophet is robust to missing data and shifts in the trend, and typically handles outliers well._ # # >_<NAME>, <NAME>. 2017. [Forecasting at scale](https://doi.org/10.7287/peerj.preprints.3190v2). PeerJ Preprints 5:e3190v2 https://doi.org/10.7287/peerj.preprints.3190v2_ # # ### StackingEnsemble # - own implementation of stacked ensemble # - meta model allows for having (apart from predictions from base models) features for weekday and horizon # - (use-case: **exponential smoothing is better in modelling short horizon, while sklearn for long ones, meta model might pick that information**) # - [learn more](https://hcrystalball.readthedocs.io/en/latest/examples/tutorial/wrappers/08_ensembles.html#Stacking-Ensembles) # # ### SklearnWrapper # - own implementation of sklearn compatible regressors -> to time-series forecasters # - uses lags depending on the forecasting horizon, that needs to be know already at the fitting time # - [learn more](https://hcrystalball.readthedocs.io/en/latest/examples/tutorial/wrappers/02_ar_modelling_in_sklearn.html) # # ### TBATSWrapper # - wrapper for [tbats](https://github.com/intive-DataScience/tbats) library # - python implementation of methods for complex seasonal patterns # # >_<NAME>., <NAME>., & <NAME>. (2011), Forecasting time series with complex seasonal patterns using exponential sm_oothing, Journal of the American Statistical Association, 106(496), 1513-1527._ # ## Performance # Depending on the dataset (frequency, additonal features, trend, seasonality, ...), there are certain things that (dis)allow for usage of different techinques. Take a look at the [HCrystalBall docs]((https://hcrystalball.readthedocs.io/en/latest/examples/02_tutorial.html)) to see performance on the **daily data** with **exogenous variables** and **holiday** information (domain HCrystalBall was mainly developed against)
examples/forecasting_with_hcrystalball.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Import our libraries import pandas as pd import numpy as np import yfinance as yf import matplotlib.pyplot as plt from matplotlib.pyplot import figure from bs4 import BeautifulSoup import requests import datetime as dt import itertools import csv import yfinance as yf # %matplotlib inline # + ############################################################## ### OUTLINE FOR ANALYST RANKING LEADERBOARD ### ### History of Recommendations for stock(s) ### ### Leaderboard Ranking based on: Accuracy, Influence, Clout, etc ### ### Industry and Sector Forecasts ### ############################################################## # + ############################################################## ### OUTLINE FOR NEWS & OPINION POLLS ### ############################################################## ### NEWS ### ### Relevant News Stream With Watchlist Mentions ### ### News and Social Media Sentiment Metrics ### ### News Aggregation Stream ### ### Market News: IPOs, Defaults, M&A, Movements ### ############################################################## ### OPINION POLLS ### ### Bull/Bear Polls ### ### VIX & VIX Sentiment ### ### Margin Debt Indicator ### ### Short Interest and Short Indicators ### ### Unusual Options Activity ### # + ############################################################## ### OUTLINE FOR STOCK INFORMATION ### ### Stock Metrics ### ### Information ### ### Error Handling ### ### Charts ### ### Analysts Activity and Ranking ### # + ############################################################## ### OUTLINE FOR GLOBAL MOVEMENTS ### # Here we want to gather and display data from indicies around the world indicies = {'^GSPC': 'SP500', '^IXIC': 'Nasdaq,', '^VIX': 'VIX', '^FTSE': 'FTSE 100 (UK)', '^FCHI': 'CAC40 (FRANCE)', '^N225': 'Nikkei (Japan)', '^MXX': 'IPC MX (Mexico)'} tickers = indicies.keys() ticknames = indicies.values() def plotindicies(ticker): tick = yf.Ticker(ticker) index = tick.history(period='30d').Close indexdf = pd.DataFrame(index) plot = indexdf.plot(title = '%s 30 day view' % ticker) return plot # building our dataframe def globalmarkets(tickers): indicies1 = pd.DataFrame(columns = ticknames) for tick in tickers: indicies = yf.Ticker(tick) indicies = indicies.history(period='5y').Close indicies = pd.DataFrame(indicies) #indicies.columns = [tick] indicies1.append(indicies) # # # indicies = indicies.append(indicies) #closedf = pd.DataFrame(indicies) return indicies plotindicies('MSFT') for tick in tickers: plotindicies(tick) ### Indicies Price Changes ### ### Indicies Metrics ### ### Sector Metrics ### ### Sector Price Changes ### # + ############################################################## ### OUTLINE FOR GLOBAL NEWS & SENTIMENT ### ### Global Foreign News Translator ### ### Foreign Social Media Sentiment Monitor ### ### Monetary Policy and Fiscal Policy News Monitor ### ### Government Movements Monitor ### ### Top Central Banks Movements and Bal. Sheet Changes ### # - # + def analystrecom(ticker): stock = yf.Ticker(ticker) # Get recommendations from 2020 - Present data1 = stock.recommendations['2020'::] data2 = pd.DataFrame(data1['To Grade']) # Calculate total buys, holds, sells buys = data2.where((data2=='Buy') | (data2=='Overweight') | (data2=='Outperform') | (data2=='Strong Buy') | (data2=='Positive')) holds = data2.where((data2=='Neutral') | (data2=='Hold') | (data2=='Market Perform') | (data2=='Equal-Weight') | (data2=='Sector Weight') | (data2=='Sector Perform')) sells = data2.where((data2=='Sell') | (data2=='Underweight') | (data2=='Underperform')) # Convert totals to numbers num_buys = buys.count() num_holds = holds.count() num_sells = sells.count() # Create a dataframe from them # Recoms = pd.DataFrame(data = [num_buys, num_sells, num_holds], columns=['Buys','Holds', 'Sells']) # Plotting #fig = plt.figure() #ax = fig.add_axes([0,0,1,1]) #values = [num_buys, num_sells, num_holds] #recoms = ['Buys', 'Holds', 'Sells'] #ax.bar(values, recoms) #plt.show() Recoms = {'Buys': num_buys,'Sells': num_sells, 'Holds': num_holds} Recommendations = pd.DataFrame(Recoms) #Recommendations.plot.bar(rot=0) Recommendations.insert(loc=0,column='ticker',value=ticker) Recommendations.reset_index(drop=True, inplace=True) return(Recommendations) #opendf = pd.DataFrame() #for i in tickers: # opendf.append(Recommendations) # print(opendf) # continue #opendf = pd.DataFrame(columns=['ticker','Buys','Sells','Holds']) #for i in tickers: #opendf = opendf.append(analystrecom(i)) #AnalystRecom = pd.DataFrame(opendf.append(analystrecom(i))) #print(AnalystRecom) #analystrecom('aapl') def recoms(tickers): seriesofdf = tickers.apply(analystrecom).values masterdf = pd.concat(seriesofdf) masterdf.reset_index(drop=True, inplace=True) return masterdf # - analystrecom('aapl') ### Function to show recomendations by year def analystrecom(ticker): stock = yf.Ticker(ticker) # Get recommendations from 2020 - Present data1 = stock.recommendations[::] data2 = pd.DataFrame(data1['To Grade']) # Calculate total buys, holds, sells buys = data2.where((data2=='Buy') | (data2=='Overweight') | (data2=='Outperform') | (data2=='Strong Buy') | (data2=='Positive')) holds = data2.where((data2=='Neutral') | (data2=='Hold') | (data2=='Market Perform') | (data2=='Equal-Weight') | (data2=='Sector Weight') | (data2=='Sector Perform')) sells = data2.where((data2=='Sell') | (data2=='Underweight') | (data2=='Underperform')) # Group by year yts = data2.asfreq('Y') # Convert totals to numbers num_buys = buys.count() num_holds = holds.count() num_sells = sells.count() # Create a dataframe from them Recoms = pd.DataFrame(data = [num_buys, num_sells, num_holds], columns=['Buys','Holds', 'Sells']) # # Plotting # #fig = plt.figure() # #ax = fig.add_axes([0,0,1,1]) # #values = [num_buys, num_sells, num_holds] # #recoms = ['Buys', 'Holds', 'Sells'] # #ax.bar(values, recoms) # #plt.show() # Recoms = {'Buys': num_buys,'Sells': num_sells, 'Holds': num_holds} # Recommendations = pd.DataFrame(Recoms) # #Recommendations.plot.bar(rot=0) # Recommendations.insert(loc=0,column='ticker',value=ticker) # Recommendations.reset_index(drop=True, inplace=True) return yts.index analystrecom('aapl') pip install yahoofinancials from yahoofinancials import YahooFinancials # + # income statement data ticker = 'MSFT' financials = YahooFinancials(ticker) income_stmt = financials.get_financial_stmts('annual', 'income') income_stmt # + # Dividend history stocks = ['AAPL', 'MSFT', 'JPM', 'INTC'] start = '2000-1-1' end = '2015-1-1' financials = YahooFinancials(stocks) dividend_hist = financials.get_daily_dividend_data(start, end) dividend_hist # + # Turning dividend history into dataframe MSFT = dividend_hist['MSFT'] MSFTdiv_hist = pd.DataFrame.from_dict(MSFT) MSFTdiv_hist.drop(columns=['date']) # + # Retrives stock history with dividends highlighted tick = yf.Ticker(ticker) stock_hist = tick.history(period='10Y') adj_stock_hist = stock_hist[['Close', 'Volume', 'Dividends']] close = adj_stock_hist['Close'] volume = adj_stock_hist['Volume'] dividends = adj_stock_hist['Dividends'] # Plotting the data # size of plot (needs work) plt.figure(figsize=(20,8)) # content of plot plt.plot(close) # set plot title, x-label, y-label plt.title(('{} 10 year history').format(ticker)) plt.xlabel('Time') plt.ylabel('Price') plt.show() # Volume plot plt.figure(figsize=(20,3.3)) plt.plot(volume/1000000) plt.title(('Volume of {}').format(ticker)) plt.ylabel('Shares traded in millions') plt.xlabel('Time') # + # plot the volume data fig = plt.figure(figsize=(20,3.3)) plt.plot(volume) # - reqyahoo = requests.get('https://www.marketbeat.com/dividends/ex-dividend-date-list/') reqyahoo.status_code # Create a soup for BS4 to work: coverpage = reqyahoo.content yahoosoup = BeautifulSoup(coverpage) # + coverpage_stocklist = yahoosoup.find_all('td', class_='data-sort-value') stock_list = [] # + for i in np.arange(0, len(coverpage_stocklist)): stocks = coverpage_stocklist[i].get_text() stock_list.append(stocks) stock_list coverpage_stocklist # + def future_div_stocks(): # Establish a connection to our site reqyahoo = requests.get('https://www.marketbeat.com/dividends/ex-dividend-date-list/') print(reqyahoo.status_code) if reqyahoo.status_code: print('Connection sucessful...gathering data...') else: print('Connection was not established... Terminating process') # Gather our data coverpage = reqyahoo.content yahoosoup = BeautifulSoup(coverpage) # Defined data scrapers coverpage_stocklist = yahoosoup.find_all('div', class_='ticker-area') getco_names = yahoosoup.find_all('div', class_='title-area') getex_div_date = yahoosoup.find_all('td', class_='data-sort-value') # These need work getco_names = yahoosoup.find_all('div', class_='title-area') getex_div_date = yahoosoup.find_all('td', class_='data-sort-value') getpayout_freqs = yahoosoup.find_all('div', class_='ticker-area') getdiv_amount = yahoosoup.find_all('div', class_='ticker-area') getyeild = yahoosoup.find_all('div', class_='ticker-area') getex_div_date = yahoosoup.find_all('td', class_='data-sort-value') getrecord_date = yahoosoup.find_all('div', class_='ticker-area') getpayable_date = yahoosoup.find_all('div', class_='ticker-area') # empty lists and dataframe to store data future_dividends = pd.DataFrame() stock_list = [] co_name = [] payout_freq = [] div_amount = [] yeild = [] ex_div_date = [] record_date = [] payable_date = [] # For loop to gather our stock list for i in np.arange(0, len(coverpage_stocklist)): stocks = coverpage_stocklist[i].get_text() stock_list.append(stocks) for i in np.arange(0, len(getco_names)): co_names = getco_names[i].get_text() co_name.append(co_names) for i in np.arange(0, len(getex_div_date)): ex_dates = getex_div_date[i].get_text() ex_div_date.append(ex_dates) return stock_list, co_name, ex_div_date # + #future_div_stocks() # - table = yahoosoup.find('table') first_td = table.find_all('td') # + def future_div_stocks(): # Establish a connection to our site reqyahoo = requests.get('https://eresearch.fidelity.com/eresearch/conferenceCalls.jhtml?tab=dividends') print(reqyahoo.status_code) if reqyahoo.status_code: print('Connection sucessful...gathering data...') else: print('Connection was not established... Terminating process') # Gather our data coverpage = reqyahoo.content yahoosoup = BeautifulSoup(coverpage) # Defined data scrapers coverpage_stocklist = yahoosoup.find_all('td', class_='lft-rt-border center blue-links') getco_names = yahoosoup.find_all('th', class_='blue-links') getex_div_date = yahoosoup.find_all('td', class_='data-sort-value') getdiv_amount = yahoosoup.find_all('td', class_='right') # These need work getco_names = yahoosoup.find_all('div', class_='title-area') getex_div_date = yahoosoup.find_all('td', class_='data-sort-value') getpayout_freqs = yahoosoup.find_all('div', class_='ticker-area') getdiv_amount = yahoosoup.find_all('div', class_='ticker-area') getyeild = yahoosoup.find_all('div', class_='ticker-area') getex_div_date = yahoosoup.find_all('td', class_='data-sort-value') getrecord_date = yahoosoup.find_all('div', class_='ticker-area') getpayable_date = yahoosoup.find_all('div', class_='ticker-area') # empty lists and dataframe to store data future_dividends = pd.DataFrame() stock_list = [] co_name = [] payout_freq = [] div_amount = [] yeild = [] ex_div_date = [] record_date = [] payable_date = [] # For loop to gather our stock list for i in np.arange(0, len(coverpage_stocklist)): stocks = coverpage_stocklist[i].get_text().strip() stock_list.append(stocks) #for i in np.arange(0, len(getdiv_amount)): dividends = getdiv_amount[i].get_text().strip() div_amount.append(dividends) for i in np.arange(0, len(getex_div_date)): ex_dates = getex_div_date[i].get_text() ex_div_date.append(ex_dates) return stock_list, div_amount, ex_div_date # - future_div_stocks() # + ### DIVIDEND INFORMATION CALENDAR APP ### # Date and Time handling date = '4/16/2021' date_today = dt.date.today() format_today = date_today.strftime('%m/%d/%Y') csvformat_today = date_today.strftime('%m-%d-%Y') # URL and BS4 handling url = ('https://eresearch.fidelity.com/eresearch/conferenceCalls.jhtml?tab=dividends&begindate={}'.format(format_today)) geturl = requests.get(url) print(geturl.status_code) coverpage = geturl.content yahoosoup = BeautifulSoup(coverpage) # Ticker information ticker_list = [] def generate_report(): # Ticker information html_tickerinfo = yahoosoup.find_all('td', class_='lft-rt-border center blue-links') try: for symbol in html_tickerinfo: # print(html_tickerinfo) ticker_list.append(symbol.a.contents[0]) tickers_df = pd.DataFrame(ticker_list, columns=['Ticker']) except: for symbol in html_tickerinfo: # print(html_tickerinfo) print((symbol.a)) ticker_list.append(symbol.a) tickers_df = pd.DataFrame(ticker_list, columns=['Ticker']) ticker_col = pd.Series(tickers_df['Ticker']) #print(ticker_col) # Ex-dividend date information ex_div_dates_df = pd.DataFrame() len_tickers = len(tickers_df) ex_date_list = list(itertools.repeat(format_today, len_tickers)) ex_div_dates_df.insert(0, 'Ex-Date', ex_date_list) #ex_div_dates.append() # Dividend Amount information dividend_list = [] html_divs = yahoosoup.find_all('td', class_='right') for div in html_divs: dividend_list.append(div.contents[0]) dividend_df = pd.DataFrame(dividend_list, columns=['Dividends']) # Record Date information # Pay Date information pay_days = [] html_pay_dates = yahoosoup.find_all('td', class_='lft-rt-border') #for dates in html_pay_dates: #print(dates.td.con) # Join and merge all information to one dataframe frames = [tickers_df, dividend_df, ex_div_dates_df] masterframe = pd.concat(frames, axis=1) master_df = pd.DataFrame(masterframe) #return masterframe # Write the master dataframe to csv file # masterframe.to_csv(('CSVfiles/{}_Dividend_list').format(format_today)) masterframe.to_csv('CSVfiles/test{}.csv'.format(csvformat_today)) return master_df # Additional Financial information module #ticks = pd.DataFrame(ticker_col) #print(ticks.values) generate_report() # + ### DIVIDEND INFORMATION CALENDAR APP ### TEST ENV # Date and Time handling date = '4/16/2021' date_today = dt.date.today() format_today = date_today.strftime('%m/%d/%Y') # URL and BS4 handling url = ('https://eresearch.fidelity.com/eresearch/conferenceCalls.jhtml?tab=dividends&begindate={}'.format(format_today)) geturl = requests.get(url) print(geturl.status_code) coverpage = geturl.content yahoosoup = BeautifulSoup(coverpage) # Ticker information ticker_list = [] def generate_report(): # Ticker information html_tickerinfo = yahoosoup.find_all('td', class_='lft-rt-border center blue-links') try: for symbol in html_tickerinfo: # print(html_tickerinfo) ticker_list.append(symbol.a.contents[0]) tickers_df = pd.DataFrame(ticker_list, columns=['Ticker']) except: for symbol in html_tickerinfo: # print(html_tickerinfo) ticker_list.append(symbol.a) tickers_df = pd.DataFrame(ticker_list, columns=['Ticker']) # Ex-dividend date information ex_div_dates_df = pd.DataFrame() len_tickers = len(tickers_df) ex_date_list = list(itertools.repeat(format_today, len_tickers)) ex_div_dates_df.insert(0, 'Ex-Date', ex_date_list) print(len_tickers) #ex_div_dates.append() # Dividend Amount information dividend_list = [] html_divs = yahoosoup.find_all('td', class_='right') for div in html_divs: dividend_list.append(div.contents[0]) dividend_df = pd.DataFrame(dividend_list, columns=['Dividends']) # Record Date information # Pay Date information pay_days = [] html_pay_dates = yahoosoup.find_all('td', class_='lft-rt-border') #for dates in html_pay_dates: #print(dates.td.con) # Join and merge all information to one dataframe frames = [tickers_df, dividend_df, ex_div_dates_df] masterframe = pd.concat(frames, axis=1) return masterframe generate_report() # + masterdf = pd.DataFrame() date = '4/13/2021' url = ('https://eresearch.fidelity.com/eresearch/conferenceCalls.jhtml?tab=dividends&begindate={}'.format(date)) reqyahoo = requests.get(url) coverpage = reqyahoo.content yahoosoup = BeautifulSoup(coverpage) getdiv_amount = yahoosoup.find_all('td', class_='right') get_ticker = yahoosoup.find_all('td', class_='lft-rt-border center blue-links') url # - # Gets tickers for day ticker_list = [] def get_tickers(): html_tickerinfo = yahoosoup.find_all('td', class_='lft-rt-border center blue-links') for symbol in html_tickerinfo: ticker_list.append(symbol.a.contents[0]) return ticker_list get_tickers() # + # Gets ex-div date for tickers for day exdiv_list = [] def get_exdates(): html_exdiv_info = yahoosoup.find_all('td', class_='lft-rt-border') for date in html_exdiv_info: exdiv_list.append(date.contents[0]) return exdiv_list get_exdates() # - splits = extracting.split() listd = [] import re for n in np.arange(splits): dividend_fromstr = [float(s) for s in re.findall(r'-?\d+\.?\d*', splits[n])] listd.append(dividend_fromstr) # + ### WEEKLY EARNINGS AND REPORTING INFORMATION ### # List of companies reporting earnings this week # List of companies reporting earnings NEXT week # Options data showing market maker predictions and ranges of possibilty ### WEEKLY SEC FILING NEWS ### # Unusual & Unique Filings # Insider trades # Notes & Supplemental information # + import datetime from yahoo_earnings_calendar import YahooEarningsCalendar date_from = datetime.datetime.strptime( 'March 5 2020', '%b %d %Y %I:%M%p') date_to = datetime.datetime.strptime( 'March 30 2020 1:00PM', '%b %d %Y %I:%M%p') yec = YahooEarningsCalendar() #print(yec.earnings_on(date_from)) march_earnings = yec.earnings_between(date_from, date_to) # + import yahoo_fin.stock_info as si # Get Earnings history for ticker ticker = 'NVDA' # Download Earnings history stock_earnings = si.get_earnings_history(ticker) # Turn earnings list into dataframe earnings_frame = pd.DataFrame.from_dict(stock_earnings) # Get next earnings dates next_earnings = si.get_next_earnings_date(ticker) # Get eanrings data from specific date earnings_next_wk = si.get_earnings_for_date('04/20/2021') # put in dataframe earningsframes = pd.DataFrame(earnings_next_wk) # Get earnings with date range earnings_this_week = si.get_earnings_in_date_range('04/25/2021', '04/30/2021') # put in dataframe earnings_thiswk = pd.DataFrame(earnings_this_week) # + import yahoo_fin.stock_info as si earnings_this_week = si.get_earnings_in_date_range('04/25/2021', '04/30/2021') earnings_thiswk = pd.DataFrame(earnings_this_week) print(earnings_thiswk) # + # Weekly Earnings Calendar App # Add mkt cap colum to our df earnings_thiswk['Market Cap'] = 0 # Loop over tickers for ticker in earnings_thiswk['ticker']: tickers = yf.Ticker(i) print(tickers.info) # + ### Earnings this week def earnings_this_week(): earnings_this_week = si.get_earnings_in_date_range('05/10/2021', '05/16/2021') earnings_thiswk = pd.DataFrame(earnings_this_week) earnings_thiswk.to_csv('CSVfiles/earnings_for_thisweek.csv') return earnings_thiswk earnings_this_week() # + # handling and manipulating dates, weeks, months # Today today = dt.date.today() # one day one_day = dt.timedelta(days=1) # one week one_week = dt.timedelta(days=7) yesterday = today - one_day one_week_ahead = today + one_week print(yesterday) print(last_wk) # - today # + ### CALCULATING PROBABILITY OF DROP IN STOCK PRICE AFTER DIVIDEND def get_stock_history(stock): ticker = yf.Ticker(stock) div_history = ticker.dividends actions = ticker.actions print(actions) price_history = ticker.history() price_history.where(['Dividends'] > 0.0) return price_history get_stock_history('aapl') # - .1 + .2
Work in Progress/Outlines.ipynb