title
stringlengths 3
221
| text
stringlengths 17
477k
| parsed
listlengths 0
3.17k
|
---|---|---|
Coin game of two corners (Greedy Approach) - GeeksforGeeks | 07 Apr, 2021
Consider a two player coin game where each player gets turn one by one. There is a row of even number of coins, and a player on his/her turn can pick a coin from any of the two corners of the row. The player that collects coins with more value wins the game. Develop a strategy for the player making the first turn, such he/she never looses the game.
Note that the strategy to pick maximum of two corners may not work. In the following example, first player looses the game when he/she uses strategy to pick maximum of two corners.
Example:
18 20 15 30 10 14
First Player picks 18, now row of coins is
20 15 30 10 14
Second player picks 20, now row of coins is
15 30 10 14
First Player picks 15, now row of coins is
30 10 14
Second player picks 30, now row of coins is
10 14
First Player picks 14, now row of coins is
10
Second player picks 10, game over.
The total value collected by second player is more (20 +
30 + 10) compared to first player (18 + 15 + 14).
So the second player wins.
Note that this problem is different from Optimal Strategy for a Game | DP-31. There the target is to get maximum value. Here the target is to not loose. We have a Greedy Strategy here. The idea is to count sum of values of all even coins and odd coins, compare the two values. The player that makes the first move can always make sure that the other player is never able to choose an even coin if sum of even coins is higher. Similarly, he/she can make sure that the other player is never able to choose an odd coin if sum of odd coins is higher.
Example:
18 20 15 30 10 14
Sum of odd coins = 18 + 15 + 10 = 43
Sum of even coins = 20 + 30 + 14 = 64.
Since the sum of even coins is more, the first
player decides to collect all even coins. He first
picks 14, now the other player can only pick a coin
(10 or 18). Whichever is picked the other player,
the first player again gets an opportunity to pick
an even coin and block all even coins.
C++
Java
Python3
C#
PHP
Javascript
// CPP program to find coins to be picked to make sure// that we never loose.#include <iostream>using namespace std; // Returns optimal value possible that a player can collect// from an array of coins of size n. Note than n must be evenvoid printCoins(int arr[], int n){ // Find sum of odd positioned coins int oddSum = 0; for (int i = 0; i < n; i += 2) oddSum += arr[i]; // Find sum of even positioned coins int evenSum = 0; for (int i = 1; i < n; i += 2) evenSum += arr[i]; // Print even or odd coins depending upon // which sum is greater. int start = ((oddSum > evenSum) ? 0 : 1); for (int i = start; i < n; i += 2) cout << arr[i] << " ";} // Driver program to test above functionint main(){ int arr1[] = { 8, 15, 3, 7 }; int n = sizeof(arr1) / sizeof(arr1[0]); printCoins(arr1, n); cout << endl; int arr2[] = { 2, 2, 2, 2 }; n = sizeof(arr2) / sizeof(arr2[0]); printCoins(arr2, n); cout << endl; int arr3[] = { 20, 30, 2, 2, 2, 10 }; n = sizeof(arr3) / sizeof(arr3[0]); printCoins(arr3, n); return 0;}
// Java program to find coins to be// picked to make sure that we never loose.class GFG{ // Returns optimal value possible// that a player can collect from// an array of coins of size n.// Note than n must be evenstatic void printCoins(int arr[], int n){// Find sum of odd positioned coinsint oddSum = 0;for (int i = 0; i < n; i += 2) oddSum += arr[i]; // Find sum of even positioned coinsint evenSum = 0;for (int i = 1; i < n; i += 2) evenSum += arr[i]; // Print even or odd coins depending// upon which sum is greater.int start = ((oddSum > evenSum) ? 0 : 1);for (int i = start; i < n; i += 2) System.out.print(arr[i]+" ");} // Driver Codepublic static void main(String[] args){ int arr1[] = { 8, 15, 3, 7 }; int n = arr1.length; printCoins(arr1, n); System.out.println(); int arr2[] = { 2, 2, 2, 2 }; n = arr2.length; printCoins(arr2, n); System.out.println(); int arr3[] = { 20, 30, 2, 2, 2, 10 }; n = arr3.length; printCoins(arr3, n);}} // This code is contributed by ChitraNayal
# Python3 program to find coins# to be picked to make sure that# we never loose # Returns optimal value possible# that a player can collect from# an array of coins of size n.# Note than n must be evendef printCoins(arr, n) : oddSum = 0 # Find sum of odd positioned coins for i in range(0, n, 2) : oddSum += arr[i] evenSum = 0 # Find sum of even # positioned coins for i in range(1, n, 2) : evenSum += arr[i] # Print even or odd # coins depending upon # which sum is greater. if oddSum > evenSum : start = 0 else : start = 1 for i in range(start, n, 2) : print(arr[i], end = " ") # Driver codeif __name__ == "__main__" : arr1 = [8, 15, 3, 7] n = len(arr1) printCoins(arr1, n) print() arr2 = [2, 2, 2, 2] n = len(arr2) printCoins(arr2, n) print() arr3 = [20, 30, 2, 2, 2, 10] n = len(arr3) printCoins(arr3, n) # This code is contributed by ANKITRAI1
// C# program to find coins to be// picked to make sure that we never loose.using System; class GFG{ // Returns optimal value possible// that a player can collect from// an array of coins of size n.// Note than n must be evenstatic void printCoins(int[] arr, int n){ // Find sum of odd positioned coinsint oddSum = 0;for (int i = 0; i < n; i += 2) oddSum += arr[i]; // Find sum of even positioned coinsint evenSum = 0;for (int i = 1; i < n; i += 2) evenSum += arr[i]; // Print even or odd coins depending// upon which sum is greater.int start = ((oddSum > evenSum) ? 0 : 1);for (int i = start; i < n; i += 2) Console.Write(arr[i]+" ");} // Driver Codepublic static void Main(){ int[] arr1 = { 8, 15, 3, 7 }; int n = arr1.Length; printCoins(arr1, n); Console.Write("\n"); int[] arr2 = { 2, 2, 2, 2 }; n = arr2.Length; printCoins(arr2, n); Console.Write("\n"); int[] arr3 = { 20, 30, 2, 2, 2, 10 }; n = arr3.Length; printCoins(arr3, n);}} // This code is contributed by ChitraNayal
<?php// PHP program to find coins to be// picked to make sure that we never loose. // Returns optimal value possible// that a player can collect from// an array of coins of size n.// Note than n must be evenfunction printCoins(&$arr, $n){ // Find sum of odd positioned coins $oddSum = 0; for ($i = 0; $i < $n; $i += 2) $oddSum += $arr[$i]; // Find sum of even positioned coins $evenSum = 0; for ($i = 1; $i < $n; $i += 2) $evenSum += $arr[$i]; // Print even or odd coins depending // upon which sum is greater. $start = (($oddSum > $evenSum) ? 0 : 1); for ($i = $start; $i < $n; $i += 2) echo $arr[$i]." ";} // Driver Code$arr1 = array( 8, 15, 3, 7 );$n = sizeof($arr1);printCoins($arr1, $n);echo "\n"; $arr2 = array( 2, 2, 2, 2 );$n = sizeof($arr2);printCoins($arr2, $n);echo "\n"; $arr3 = array( 20, 30, 2, 2, 2, 10 );$n = sizeof($arr3);printCoins($arr3, $n); // This code is contributed by ChitraNayal?>
<script> // Javascript program to find coins to// be picked to make sure that we never// loose. // Returns optimal value possible that// a player can collect from an array// of coins of size n. Note than n must be evenfunction printCoins(arr, n){ // Find sum of odd positioned coins var oddSum = 0; for(var i = 0; i < n; i += 2) oddSum += arr[i]; // Find sum of even positioned coins var evenSum = 0; for(var i = 1; i < n; i += 2) evenSum += arr[i]; // Print even or odd coins depending upon // which sum is greater. var start = ((oddSum > evenSum) ? 0 : 1); for(var i = start; i < n; i += 2) document.write(arr[i] + " ");} // Driver codevar arr1 = [ 8, 15, 3, 7 ]var n = arr1.length;printCoins(arr1, n);document.write("<br>"); var arr2 = [ 2, 2, 2, 2 ]var n = arr2.length;printCoins(arr2, n);document.write("<br>"); var arr3 = [ 20, 30, 2, 2, 2, 10 ]n = arr3.length;printCoins(arr3, n); // This code is contributed by noob2000 </script>
15 7
2 2
30 2 10
ankthon
ukasp
BhagatGurung
noob2000
Arrays
Game Theory
Greedy
Arrays
Greedy
Game Theory
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here.
Chocolate Distribution Problem
Reversal algorithm for array rotation
Window Sliding Technique
Next Greater Element
Find duplicates in O(n) time and O(1) extra space | Set 1
Minimax Algorithm in Game Theory | Set 1 (Introduction)
Minimax Algorithm in Game Theory | Set 3 (Tic-Tac-Toe AI - Finding optimal move)
Minimax Algorithm in Game Theory | Set 4 (Alpha-Beta Pruning)
Implementation of Tic-Tac-Toe game
Optimal Strategy for a Game | DP-31 | [
{
"code": null,
"e": 26177,
"s": 26149,
"text": "\n07 Apr, 2021"
},
{
"code": null,
"e": 26529,
"s": 26177,
"text": "Consider a two player coin game where each player gets turn one by one. There is a row of even number of coins, and a player on his/her turn can pick a coin from any of the two corners of the row. The player that collects coins with more value wins the game. Develop a strategy for the player making the first turn, such he/she never looses the game. "
},
{
"code": null,
"e": 26712,
"s": 26531,
"text": "Note that the strategy to pick maximum of two corners may not work. In the following example, first player looses the game when he/she uses strategy to pick maximum of two corners."
},
{
"code": null,
"e": 26722,
"s": 26712,
"text": "Example: "
},
{
"code": null,
"e": 27187,
"s": 26722,
"text": " 18 20 15 30 10 14\nFirst Player picks 18, now row of coins is\n 20 15 30 10 14\nSecond player picks 20, now row of coins is\n 15 30 10 14\nFirst Player picks 15, now row of coins is\n 30 10 14\nSecond player picks 30, now row of coins is\n 10 14\nFirst Player picks 14, now row of coins is\n 10 \nSecond player picks 10, game over.\n\nThe total value collected by second player is more (20 + \n30 + 10) compared to first player (18 + 15 + 14).\nSo the second player wins. "
},
{
"code": null,
"e": 27734,
"s": 27187,
"text": "Note that this problem is different from Optimal Strategy for a Game | DP-31. There the target is to get maximum value. Here the target is to not loose. We have a Greedy Strategy here. The idea is to count sum of values of all even coins and odd coins, compare the two values. The player that makes the first move can always make sure that the other player is never able to choose an even coin if sum of even coins is higher. Similarly, he/she can make sure that the other player is never able to choose an odd coin if sum of odd coins is higher."
},
{
"code": null,
"e": 27745,
"s": 27734,
"text": "Example: "
},
{
"code": null,
"e": 28137,
"s": 27745,
"text": " 18 20 15 30 10 14\nSum of odd coins = 18 + 15 + 10 = 43\nSum of even coins = 20 + 30 + 14 = 64. \nSince the sum of even coins is more, the first \nplayer decides to collect all even coins. He first\npicks 14, now the other player can only pick a coin \n(10 or 18). Whichever is picked the other player, \nthe first player again gets an opportunity to pick \nan even coin and block all even coins. "
},
{
"code": null,
"e": 28141,
"s": 28137,
"text": "C++"
},
{
"code": null,
"e": 28146,
"s": 28141,
"text": "Java"
},
{
"code": null,
"e": 28154,
"s": 28146,
"text": "Python3"
},
{
"code": null,
"e": 28157,
"s": 28154,
"text": "C#"
},
{
"code": null,
"e": 28161,
"s": 28157,
"text": "PHP"
},
{
"code": null,
"e": 28172,
"s": 28161,
"text": "Javascript"
},
{
"code": "// CPP program to find coins to be picked to make sure// that we never loose.#include <iostream>using namespace std; // Returns optimal value possible that a player can collect// from an array of coins of size n. Note than n must be evenvoid printCoins(int arr[], int n){ // Find sum of odd positioned coins int oddSum = 0; for (int i = 0; i < n; i += 2) oddSum += arr[i]; // Find sum of even positioned coins int evenSum = 0; for (int i = 1; i < n; i += 2) evenSum += arr[i]; // Print even or odd coins depending upon // which sum is greater. int start = ((oddSum > evenSum) ? 0 : 1); for (int i = start; i < n; i += 2) cout << arr[i] << \" \";} // Driver program to test above functionint main(){ int arr1[] = { 8, 15, 3, 7 }; int n = sizeof(arr1) / sizeof(arr1[0]); printCoins(arr1, n); cout << endl; int arr2[] = { 2, 2, 2, 2 }; n = sizeof(arr2) / sizeof(arr2[0]); printCoins(arr2, n); cout << endl; int arr3[] = { 20, 30, 2, 2, 2, 10 }; n = sizeof(arr3) / sizeof(arr3[0]); printCoins(arr3, n); return 0;}",
"e": 29272,
"s": 28172,
"text": null
},
{
"code": "// Java program to find coins to be// picked to make sure that we never loose.class GFG{ // Returns optimal value possible// that a player can collect from// an array of coins of size n.// Note than n must be evenstatic void printCoins(int arr[], int n){// Find sum of odd positioned coinsint oddSum = 0;for (int i = 0; i < n; i += 2) oddSum += arr[i]; // Find sum of even positioned coinsint evenSum = 0;for (int i = 1; i < n; i += 2) evenSum += arr[i]; // Print even or odd coins depending// upon which sum is greater.int start = ((oddSum > evenSum) ? 0 : 1);for (int i = start; i < n; i += 2) System.out.print(arr[i]+\" \");} // Driver Codepublic static void main(String[] args){ int arr1[] = { 8, 15, 3, 7 }; int n = arr1.length; printCoins(arr1, n); System.out.println(); int arr2[] = { 2, 2, 2, 2 }; n = arr2.length; printCoins(arr2, n); System.out.println(); int arr3[] = { 20, 30, 2, 2, 2, 10 }; n = arr3.length; printCoins(arr3, n);}} // This code is contributed by ChitraNayal",
"e": 30301,
"s": 29272,
"text": null
},
{
"code": "# Python3 program to find coins# to be picked to make sure that# we never loose # Returns optimal value possible# that a player can collect from# an array of coins of size n.# Note than n must be evendef printCoins(arr, n) : oddSum = 0 # Find sum of odd positioned coins for i in range(0, n, 2) : oddSum += arr[i] evenSum = 0 # Find sum of even # positioned coins for i in range(1, n, 2) : evenSum += arr[i] # Print even or odd # coins depending upon # which sum is greater. if oddSum > evenSum : start = 0 else : start = 1 for i in range(start, n, 2) : print(arr[i], end = \" \") # Driver codeif __name__ == \"__main__\" : arr1 = [8, 15, 3, 7] n = len(arr1) printCoins(arr1, n) print() arr2 = [2, 2, 2, 2] n = len(arr2) printCoins(arr2, n) print() arr3 = [20, 30, 2, 2, 2, 10] n = len(arr3) printCoins(arr3, n) # This code is contributed by ANKITRAI1",
"e": 31289,
"s": 30301,
"text": null
},
{
"code": "// C# program to find coins to be// picked to make sure that we never loose.using System; class GFG{ // Returns optimal value possible// that a player can collect from// an array of coins of size n.// Note than n must be evenstatic void printCoins(int[] arr, int n){ // Find sum of odd positioned coinsint oddSum = 0;for (int i = 0; i < n; i += 2) oddSum += arr[i]; // Find sum of even positioned coinsint evenSum = 0;for (int i = 1; i < n; i += 2) evenSum += arr[i]; // Print even or odd coins depending// upon which sum is greater.int start = ((oddSum > evenSum) ? 0 : 1);for (int i = start; i < n; i += 2) Console.Write(arr[i]+\" \");} // Driver Codepublic static void Main(){ int[] arr1 = { 8, 15, 3, 7 }; int n = arr1.Length; printCoins(arr1, n); Console.Write(\"\\n\"); int[] arr2 = { 2, 2, 2, 2 }; n = arr2.Length; printCoins(arr2, n); Console.Write(\"\\n\"); int[] arr3 = { 20, 30, 2, 2, 2, 10 }; n = arr3.Length; printCoins(arr3, n);}} // This code is contributed by ChitraNayal",
"e": 32317,
"s": 31289,
"text": null
},
{
"code": "<?php// PHP program to find coins to be// picked to make sure that we never loose. // Returns optimal value possible// that a player can collect from// an array of coins of size n.// Note than n must be evenfunction printCoins(&$arr, $n){ // Find sum of odd positioned coins $oddSum = 0; for ($i = 0; $i < $n; $i += 2) $oddSum += $arr[$i]; // Find sum of even positioned coins $evenSum = 0; for ($i = 1; $i < $n; $i += 2) $evenSum += $arr[$i]; // Print even or odd coins depending // upon which sum is greater. $start = (($oddSum > $evenSum) ? 0 : 1); for ($i = $start; $i < $n; $i += 2) echo $arr[$i].\" \";} // Driver Code$arr1 = array( 8, 15, 3, 7 );$n = sizeof($arr1);printCoins($arr1, $n);echo \"\\n\"; $arr2 = array( 2, 2, 2, 2 );$n = sizeof($arr2);printCoins($arr2, $n);echo \"\\n\"; $arr3 = array( 20, 30, 2, 2, 2, 10 );$n = sizeof($arr3);printCoins($arr3, $n); // This code is contributed by ChitraNayal?>",
"e": 33277,
"s": 32317,
"text": null
},
{
"code": "<script> // Javascript program to find coins to// be picked to make sure that we never// loose. // Returns optimal value possible that// a player can collect from an array// of coins of size n. Note than n must be evenfunction printCoins(arr, n){ // Find sum of odd positioned coins var oddSum = 0; for(var i = 0; i < n; i += 2) oddSum += arr[i]; // Find sum of even positioned coins var evenSum = 0; for(var i = 1; i < n; i += 2) evenSum += arr[i]; // Print even or odd coins depending upon // which sum is greater. var start = ((oddSum > evenSum) ? 0 : 1); for(var i = start; i < n; i += 2) document.write(arr[i] + \" \");} // Driver codevar arr1 = [ 8, 15, 3, 7 ]var n = arr1.length;printCoins(arr1, n);document.write(\"<br>\"); var arr2 = [ 2, 2, 2, 2 ]var n = arr2.length;printCoins(arr2, n);document.write(\"<br>\"); var arr3 = [ 20, 30, 2, 2, 2, 10 ]n = arr3.length;printCoins(arr3, n); // This code is contributed by noob2000 </script>",
"e": 34273,
"s": 33277,
"text": null
},
{
"code": null,
"e": 34292,
"s": 34273,
"text": "15 7 \n2 2 \n30 2 10"
},
{
"code": null,
"e": 34302,
"s": 34294,
"text": "ankthon"
},
{
"code": null,
"e": 34308,
"s": 34302,
"text": "ukasp"
},
{
"code": null,
"e": 34321,
"s": 34308,
"text": "BhagatGurung"
},
{
"code": null,
"e": 34330,
"s": 34321,
"text": "noob2000"
},
{
"code": null,
"e": 34337,
"s": 34330,
"text": "Arrays"
},
{
"code": null,
"e": 34349,
"s": 34337,
"text": "Game Theory"
},
{
"code": null,
"e": 34356,
"s": 34349,
"text": "Greedy"
},
{
"code": null,
"e": 34363,
"s": 34356,
"text": "Arrays"
},
{
"code": null,
"e": 34370,
"s": 34363,
"text": "Greedy"
},
{
"code": null,
"e": 34382,
"s": 34370,
"text": "Game Theory"
},
{
"code": null,
"e": 34480,
"s": 34382,
"text": "Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here."
},
{
"code": null,
"e": 34511,
"s": 34480,
"text": "Chocolate Distribution Problem"
},
{
"code": null,
"e": 34549,
"s": 34511,
"text": "Reversal algorithm for array rotation"
},
{
"code": null,
"e": 34574,
"s": 34549,
"text": "Window Sliding Technique"
},
{
"code": null,
"e": 34595,
"s": 34574,
"text": "Next Greater Element"
},
{
"code": null,
"e": 34653,
"s": 34595,
"text": "Find duplicates in O(n) time and O(1) extra space | Set 1"
},
{
"code": null,
"e": 34709,
"s": 34653,
"text": "Minimax Algorithm in Game Theory | Set 1 (Introduction)"
},
{
"code": null,
"e": 34790,
"s": 34709,
"text": "Minimax Algorithm in Game Theory | Set 3 (Tic-Tac-Toe AI - Finding optimal move)"
},
{
"code": null,
"e": 34852,
"s": 34790,
"text": "Minimax Algorithm in Game Theory | Set 4 (Alpha-Beta Pruning)"
},
{
"code": null,
"e": 34887,
"s": 34852,
"text": "Implementation of Tic-Tac-Toe game"
}
] |
Self-Balancing-Binary-Search-Trees (Comparisons) - GeeksforGeeks | 19 Jul, 2021
Self-Balancing Binary Search Trees are height-balanced binary search trees that automatically keeps height as small as possible when insertion and deletion operations are performed on tree. The height is typically maintained in order of Log n so that all operations take O(Log n) time on average.
Examples : Red Black Tree
AVL Tree:
Language Implementations : set and map in C++ STL. TreeSet and TreeMap in Java. Most of the library implementations use Red Black Tree. Python standard library does not support Self Balancing BST. In Python, we can use bisect module to keep a set of sorted data. We can also use PyPi modules like rbtree (implementation of red black tree) and pyavl (implementation of AVL tree).
How do Self-Balancing-Tree maintain height? A typical operation done by trees is rotation. Following are two basic operations that can be performed to re-balance a BST without violating the BST property (keys(left) < key(root) < keys(right)). 1) Left Rotation 2) Right Rotation
T1, T2 and T3 are subtrees of the tree
rooted with y (on the left side) or x (on
the right side)
y x
/ \ Right Rotation / \
x T3 - - - - - - - > T1 y
/ \ < - - - - - - - / \
T1 T2 Left Rotation T2 T3
Keys in both of the above trees follow the
following order
keys(T1) < key(x) < keys(T2) < key(y) < keys(T3)
So BST property is not violated anywhere.
We have already discussed AVL tree, Red Black Tree and Splay Tree. In this acrticle, we will compare the efficiency of these trees:
ssaksham
AVL-Tree
Advanced Data Structure
Binary Search Tree
Binary Search Tree
AVL-Tree
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here.
Ordered Set and GNU C++ PBDS
2-3 Trees | (Search, Insert and Deletion)
Extendible Hashing (Dynamic approach to DBMS)
Suffix Array | Set 1 (Introduction)
Difference between Backtracking and Branch-N-Bound technique
Binary Search Tree | Set 1 (Search and Insertion)
Binary Search Tree | Set 2 (Delete)
A program to check if a binary tree is BST or not
Construct BST from given preorder traversal | Set 1
Sorted Array to Balanced BST | [
{
"code": null,
"e": 26282,
"s": 26254,
"text": "\n19 Jul, 2021"
},
{
"code": null,
"e": 26580,
"s": 26282,
"text": "Self-Balancing Binary Search Trees are height-balanced binary search trees that automatically keeps height as small as possible when insertion and deletion operations are performed on tree. The height is typically maintained in order of Log n so that all operations take O(Log n) time on average. "
},
{
"code": null,
"e": 26608,
"s": 26580,
"text": "Examples : Red Black Tree "
},
{
"code": null,
"e": 26620,
"s": 26608,
"text": "AVL Tree: "
},
{
"code": null,
"e": 27000,
"s": 26620,
"text": "Language Implementations : set and map in C++ STL. TreeSet and TreeMap in Java. Most of the library implementations use Red Black Tree. Python standard library does not support Self Balancing BST. In Python, we can use bisect module to keep a set of sorted data. We can also use PyPi modules like rbtree (implementation of red black tree) and pyavl (implementation of AVL tree). "
},
{
"code": null,
"e": 27279,
"s": 27000,
"text": "How do Self-Balancing-Tree maintain height? A typical operation done by trees is rotation. Following are two basic operations that can be performed to re-balance a BST without violating the BST property (keys(left) < key(root) < keys(right)). 1) Left Rotation 2) Right Rotation "
},
{
"code": null,
"e": 27754,
"s": 27281,
"text": "T1, T2 and T3 are subtrees of the tree \nrooted with y (on the left side) or x (on \nthe right side) \n y x\n / \\ Right Rotation / \\\n x T3 - - - - - - - > T1 y \n / \\ < - - - - - - - / \\\n T1 T2 Left Rotation T2 T3\nKeys in both of the above trees follow the \nfollowing order \n keys(T1) < key(x) < keys(T2) < key(y) < keys(T3)\nSo BST property is not violated anywhere."
},
{
"code": null,
"e": 27888,
"s": 27754,
"text": "We have already discussed AVL tree, Red Black Tree and Splay Tree. In this acrticle, we will compare the efficiency of these trees: "
},
{
"code": null,
"e": 27899,
"s": 27890,
"text": "ssaksham"
},
{
"code": null,
"e": 27908,
"s": 27899,
"text": "AVL-Tree"
},
{
"code": null,
"e": 27932,
"s": 27908,
"text": "Advanced Data Structure"
},
{
"code": null,
"e": 27951,
"s": 27932,
"text": "Binary Search Tree"
},
{
"code": null,
"e": 27970,
"s": 27951,
"text": "Binary Search Tree"
},
{
"code": null,
"e": 27979,
"s": 27970,
"text": "AVL-Tree"
},
{
"code": null,
"e": 28077,
"s": 27979,
"text": "Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here."
},
{
"code": null,
"e": 28106,
"s": 28077,
"text": "Ordered Set and GNU C++ PBDS"
},
{
"code": null,
"e": 28148,
"s": 28106,
"text": "2-3 Trees | (Search, Insert and Deletion)"
},
{
"code": null,
"e": 28194,
"s": 28148,
"text": "Extendible Hashing (Dynamic approach to DBMS)"
},
{
"code": null,
"e": 28230,
"s": 28194,
"text": "Suffix Array | Set 1 (Introduction)"
},
{
"code": null,
"e": 28291,
"s": 28230,
"text": "Difference between Backtracking and Branch-N-Bound technique"
},
{
"code": null,
"e": 28341,
"s": 28291,
"text": "Binary Search Tree | Set 1 (Search and Insertion)"
},
{
"code": null,
"e": 28377,
"s": 28341,
"text": "Binary Search Tree | Set 2 (Delete)"
},
{
"code": null,
"e": 28427,
"s": 28377,
"text": "A program to check if a binary tree is BST or not"
},
{
"code": null,
"e": 28479,
"s": 28427,
"text": "Construct BST from given preorder traversal | Set 1"
}
] |
java.util.zip.ZipOutputStream.putNextEntry() Method Example | The java.util.zip.ZipOutputStream.putNextEntry(ZipEntry e) method begins writing a new ZIP file entry and positions the stream to the start of the entry data. Closes the current entry if still active. The default compression method will be used if no compression method was specified for the entry, and the current time will be used if the entry has no set modification time.
Following is the declaration for java.util.zip.ZipOutputStream.putNextEntry(ZipEntry e) method.
public void putNextEntry(ZipEntry e)
throws IOException
e − the ZIP entry to be written.
e − the ZIP entry to be written.
ZipException − if a ZIP file error has occurred.
ZipException − if a ZIP file error has occurred.
IOException − if an I/O error has occurred.
IOException − if an I/O error has occurred.
Create a file Hello.txt in D:> test > directory with the following content.
This is an example.
The following example shows the usage of java.util.zip.ZipOutputStream.putNextEntry(ZipEntry e) method.
package com.tutorialspoint;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.util.Date;
import java.util.zip.Adler32;
import java.util.zip.CheckedOutputStream;
import java.util.zip.ZipEntry;
import java.util.zip.ZipInputStream;
import java.util.zip.ZipOutputStream;
public class ZipOutputStreamDemo {
private static String SOURCE_FILE = "D:\\test\\Hello.txt";
private static String TARGET_FILE = "D:\\test\\Hello.zip";
public static void main(String[] args) {
try {
createZipFile();
readZipFile();
} catch(IOException ioe) {
System.out.println("IOException : " + ioe);
}
}
private static void createZipFile() throws IOException{
FileOutputStream fout = new FileOutputStream(TARGET_FILE);
CheckedOutputStream checksum = new CheckedOutputStream(fout, new Adler32());
ZipOutputStream zout = new ZipOutputStream(checksum);
FileInputStream fin = new FileInputStream(SOURCE_FILE);
ZipEntry zipEntry = new ZipEntry(SOURCE_FILE);
zout.putNextEntry(zipEntry);
int length;
byte[] buffer = new byte[1024];
while((length = fin.read(buffer)) > 0) {
zout.write(buffer, 0, length);
}
zout.closeEntry();
zout.finish();
fin.close();
zout.close();
}
private static void readZipFile() throws IOException{
ZipInputStream zin = new ZipInputStream(new FileInputStream(TARGET_FILE));
ZipEntry entry;
while((entry = zin.getNextEntry())!=null){
System.out.printf("File: %s Modified on %TD %n",
entry.getName(), new Date(entry.getTime()));
extractFile(entry, zin);
System.out.printf("Zip file %s extracted successfully.", SOURCE_FILE);
zin.closeEntry();
}
zin.close();
}
private static void extractFile(final ZipEntry entry, ZipInputStream is)
throws IOException {
FileOutputStream fos = null;
try {
fos = new FileOutputStream(entry.getName());
while(is.available() != 0){
fos.write(is.read());
}
} catch (IOException ioex) {
fos.close();
}
}
}
Let us compile and run the above program, this will produce the following result −
Zip File: D:\test\Hello.zip, Contains 1 file(s).
File: D:\test\Hello.txt Size 1026 Modified on 05/22/17
Zip file D:\test\Hello.txt extracted successfully.
Print
Add Notes
Bookmark this page | [
{
"code": null,
"e": 2568,
"s": 2192,
"text": "The java.util.zip.ZipOutputStream.putNextEntry(ZipEntry e) method begins writing a new ZIP file entry and positions the stream to the start of the entry data. Closes the current entry if still active. The default compression method will be used if no compression method was specified for the entry, and the current time will be used if the entry has no set modification time."
},
{
"code": null,
"e": 2664,
"s": 2568,
"text": "Following is the declaration for java.util.zip.ZipOutputStream.putNextEntry(ZipEntry e) method."
},
{
"code": null,
"e": 2724,
"s": 2664,
"text": "public void putNextEntry(ZipEntry e)\n throws IOException\n"
},
{
"code": null,
"e": 2758,
"s": 2724,
"text": "e − the ZIP entry to be written."
},
{
"code": null,
"e": 2792,
"s": 2758,
"text": "e − the ZIP entry to be written."
},
{
"code": null,
"e": 2842,
"s": 2792,
"text": "ZipException − if a ZIP file error has occurred."
},
{
"code": null,
"e": 2892,
"s": 2842,
"text": "ZipException − if a ZIP file error has occurred."
},
{
"code": null,
"e": 2937,
"s": 2892,
"text": "IOException − if an I/O error has occurred."
},
{
"code": null,
"e": 2982,
"s": 2937,
"text": "IOException − if an I/O error has occurred."
},
{
"code": null,
"e": 3059,
"s": 2982,
"text": "Create a file Hello.txt in D:> test > directory with the following content."
},
{
"code": null,
"e": 3080,
"s": 3059,
"text": "This is an example.\n"
},
{
"code": null,
"e": 3184,
"s": 3080,
"text": "The following example shows the usage of java.util.zip.ZipOutputStream.putNextEntry(ZipEntry e) method."
},
{
"code": null,
"e": 5400,
"s": 3184,
"text": "package com.tutorialspoint;\n\nimport java.io.FileInputStream;\nimport java.io.FileOutputStream;\nimport java.io.IOException;\nimport java.util.Date;\nimport java.util.zip.Adler32;\nimport java.util.zip.CheckedOutputStream;\nimport java.util.zip.ZipEntry;\nimport java.util.zip.ZipInputStream;\nimport java.util.zip.ZipOutputStream;\n\npublic class ZipOutputStreamDemo {\n private static String SOURCE_FILE = \"D:\\\\test\\\\Hello.txt\";\n private static String TARGET_FILE = \"D:\\\\test\\\\Hello.zip\";\n\n public static void main(String[] args) {\n try {\n createZipFile();\n readZipFile();\n } catch(IOException ioe) {\n System.out.println(\"IOException : \" + ioe);\n }\n }\n\n private static void createZipFile() throws IOException{\n FileOutputStream fout = new FileOutputStream(TARGET_FILE);\n CheckedOutputStream checksum = new CheckedOutputStream(fout, new Adler32());\n ZipOutputStream zout = new ZipOutputStream(checksum);\n\n FileInputStream fin = new FileInputStream(SOURCE_FILE);\n ZipEntry zipEntry = new ZipEntry(SOURCE_FILE);\n zout.putNextEntry(zipEntry);\n int length;\n byte[] buffer = new byte[1024];\n while((length = fin.read(buffer)) > 0) {\n zout.write(buffer, 0, length);\n }\n\n zout.closeEntry();\n zout.finish();\n fin.close();\n zout.close();\n }\n\n private static void readZipFile() throws IOException{\n ZipInputStream zin = new ZipInputStream(new FileInputStream(TARGET_FILE)); \n\n ZipEntry entry;\n while((entry = zin.getNextEntry())!=null){\n System.out.printf(\"File: %s Modified on %TD %n\", \n entry.getName(), new Date(entry.getTime()));\n extractFile(entry, zin); \n System.out.printf(\"Zip file %s extracted successfully.\", SOURCE_FILE);\n zin.closeEntry();\n }\n zin.close();\n }\n\n private static void extractFile(final ZipEntry entry, ZipInputStream is) \n throws IOException {\n FileOutputStream fos = null; \n try { \n fos = new FileOutputStream(entry.getName()); \n while(is.available() != 0){\n fos.write(is.read()); \n }\n } catch (IOException ioex) { \n fos.close(); \n } \n }\n}"
},
{
"code": null,
"e": 5483,
"s": 5400,
"text": "Let us compile and run the above program, this will produce the following result −"
},
{
"code": null,
"e": 5641,
"s": 5483,
"text": "Zip File: D:\\test\\Hello.zip, Contains 1 file(s). \nFile: D:\\test\\Hello.txt Size 1026 Modified on 05/22/17 \nZip file D:\\test\\Hello.txt extracted successfully.\n"
},
{
"code": null,
"e": 5648,
"s": 5641,
"text": " Print"
},
{
"code": null,
"e": 5659,
"s": 5648,
"text": " Add Notes"
}
] |
Mathematics | Introduction and types of Relations - GeeksforGeeks | 28 Jun, 2021
Relation or Binary relation R from set A to B is a subset of AxB which can be defined asaRb ↔ (a,b) € R ↔ R(a,b).A Binary relation R on a single set A is defined as a subset of AxA. For two distinct set, A and B with cardinalities m and n, the maximum cardinality of the relation R from A to B is mn.Domain and Range:if there are two sets A and B and Relation from A to B is R(a,b), then domain is defined as the set { a | (a,b) € R for some b in B} and Range is defined as the set {b | (a,b) € R for some a in A}.
Empty Relation: A relation R on a set A is called Empty if the set A is empty set.Full Relation: A binary relation R on a set A and B is called full if AXB.Reflexive Relation: A relation R on a set A is called reflexive if (a,a) € R holds for every element a € A .i.e. if set A = {a,b} then R = {(a,a), (b,b)} is reflexive relation.Irreflexive relation : A relation R on a set A is called reflexive if no (a,a) € R holds for every element a € A.i.e. if set A = {a,b} then R = {(a,b), (b,a)} is irreflexive relation.Symmetric Relation: A relation R on a set A is called symmetric if (b,a) € R holds when (a,b) € R.i.e. The relation R={(4,5),(5,4),(6,5),(5,6)} on set A={4,5,6} is symmetric.AntiSymmetric Relation: A relation R on a set A is called antisymmetric if (a,b)€ R and (b,a) € R then a = b is called antisymmetric.i.e. The relation R = {(a,b)→ R|a ≤ b} is anti-symmetric since a ≤ b and b ≤ a implies a = b.Transitive Relation: A relation R on a set A is called transitive if (a,b) € R and (b,c) € R then (a,c) € R for all a,b,c € A.i.e. Relation R={(1,2),(2,3),(1,3)} on set A={1,2,3} is transitive.Equivalence Relation: A relation is an Equivalence Relation if it is reflexive, symmetric, and transitive. i.e. relation R={(1,1),(2,2),(3,3),(1,2),(2,1),(2,3),(3,2),(1,3),(3,1)} on set A={1,2,3} is equivalence relation as it is reflexive, symmetric, and transitive.Asymmetric relation: Asymmetric relation is opposite of symmetric relation. A relation R on a set A is called asymmetric if no (b,a) € R when (a,b) € R.Important Points:1. Symmetric and anti-symmetric relations are not opposite because a relation R can contain both the properties or may not.2. A relation is asymmetric if and only if it is both anti-symmetric and irreflexive.3. Number of different relation from a set with n elements to a set with m elements is 2mnEx:
if R ={r1, r2, r3......rn} and S ={s1, s2, s3.....sm}
then Cartesian product of R and S is:
R X S = {(r1, s1), (r1, s2), (r1, s3)........., (r1, sn),
(r2, s1), (r2, s2), (r2, s3).........., (r2, sn),
.................
(rn, s1),(rn, s2), (rn, s3),........., (rn, sn)}
This set of ordered pairs contains mn pairs.
Now these pairs can be present in R X S or can be absent.
So total number of possible relation = 2mn
4. Number of Reflexive Relations on a set with n elements : 2n(n-1).A relation has ordered pairs (a,b). Now a can be chosen in n ways and same for b. So set of ordered pairs contains n2 pairs. Now for a reflexive relation, (a,a) must be present in these ordered pairs. And there will be total n pairs of (a,a), so number of ordered pairs will be n2-n pairs. So total number of reflexive relations is equal to 2n(n-1).5. Number of Symmetric Relations on a set with n elements : 2n(n+1)/2.A relation has ordered pairs (a,b). Now for a symmetric relation, if (a,b) is present in R, then (b,a) must be present in R.In Matrix form, if a12 is present in relation, then a21 is also present in relation and As we know reflexive relation is part of symmetric relation.So from total n2 pairs, only n(n+1)/2 pairs will be chosen for symmetric relation. So total number of symmetric relation will be 2n(n+1)/2.6. Number of Anti-Symmetric Relations on a set with n elements: 2n 3n(n-1)/2.A relation has ordered pairs (a,b). For anti-symmetric relation, if (a,b) and (b,a) is present in relation R, then a = b.(That means a is in relation with itself for any a).So for (a,a), total number of ordered pairs = n and total number of relation = 2n.if (a,b) and (b,a) both are not present in relation or Either (a,b) or (b,a) is not present in relation. So there are three possibilities and total number of ordered pairs for this condition is n(n-1)/2. (selecting a pair is same as selecting the two numbers from n without repetition) As we have to find number of ordered pairs where a ≠ b. it is like opposite of symmetric relation means total number of ordered pairs = (n2) – symmetric ordered pairs(n(n+1)/2) = n(n-1)/2. So, total number of relation is 3n(n-1)/2. So total number of anti-symmetric relation is 2n.3n(n-1)/2.7. Number of Asymmetric Relations on a set with n elements : 3n(n-1)/2.In Asymmetric Relations, element a can not be in relation with itself. (i.e. there is no aRa ∀ a∈A relation.) And Then it is same as Anti-Symmetric Relations.(i.e. you have three choice for pairs (a,b) (b,a)). Therefore there are 3n(n-1)/2 Asymmetric Relations possible.8. Irreflexive Relations on a set with n elements : 2n(n-1).A relation has ordered pairs (a,b). For Irreflexive relation, no (a,a) holds for every element a in R. It is also opposite of reflexive relation.Now for a Irreflexive relation, (a,a) must not be present in these ordered pairs means total n pairs of (a,a) is not present in R, So number of ordered pairs will be n2-n pairs.So total number of reflexive relations is equal to 2n(n-1).9. Reflexive and symmetric Relations on a set with n elements : 2n(n-1)/2.A relation has ordered pairs (a,b). Reflexive and symmetric Relations means (a,a) is included in R and (a,b)(b,a) pairs can be included or not. (In Symmetric relation for pair (a,b)(b,a) (considered as a pair). whether it is included in relation or not) So total number of Reflexive and symmetric Relations is 2n(n-1)/2 .This article is contributed by Nitika Bansal.Please write comments if you find anything incorrect, or you want to share more information about the topic discussed aboveRelated Articles:Relations and their representationsMy Personal Notes
arrow_drop_upSave
Empty Relation: A relation R on a set A is called Empty if the set A is empty set.
Full Relation: A binary relation R on a set A and B is called full if AXB.
Reflexive Relation: A relation R on a set A is called reflexive if (a,a) € R holds for every element a € A .i.e. if set A = {a,b} then R = {(a,a), (b,b)} is reflexive relation.
Irreflexive relation : A relation R on a set A is called reflexive if no (a,a) € R holds for every element a € A.i.e. if set A = {a,b} then R = {(a,b), (b,a)} is irreflexive relation.
Symmetric Relation: A relation R on a set A is called symmetric if (b,a) € R holds when (a,b) € R.i.e. The relation R={(4,5),(5,4),(6,5),(5,6)} on set A={4,5,6} is symmetric.
AntiSymmetric Relation: A relation R on a set A is called antisymmetric if (a,b)€ R and (b,a) € R then a = b is called antisymmetric.i.e. The relation R = {(a,b)→ R|a ≤ b} is anti-symmetric since a ≤ b and b ≤ a implies a = b.
Transitive Relation: A relation R on a set A is called transitive if (a,b) € R and (b,c) € R then (a,c) € R for all a,b,c € A.i.e. Relation R={(1,2),(2,3),(1,3)} on set A={1,2,3} is transitive.
Equivalence Relation: A relation is an Equivalence Relation if it is reflexive, symmetric, and transitive. i.e. relation R={(1,1),(2,2),(3,3),(1,2),(2,1),(2,3),(3,2),(1,3),(3,1)} on set A={1,2,3} is equivalence relation as it is reflexive, symmetric, and transitive.
Asymmetric relation: Asymmetric relation is opposite of symmetric relation. A relation R on a set A is called asymmetric if no (b,a) € R when (a,b) € R.Important Points:1. Symmetric and anti-symmetric relations are not opposite because a relation R can contain both the properties or may not.2. A relation is asymmetric if and only if it is both anti-symmetric and irreflexive.3. Number of different relation from a set with n elements to a set with m elements is 2mnEx:
if R ={r1, r2, r3......rn} and S ={s1, s2, s3.....sm}
then Cartesian product of R and S is:
R X S = {(r1, s1), (r1, s2), (r1, s3)........., (r1, sn),
(r2, s1), (r2, s2), (r2, s3).........., (r2, sn),
.................
(rn, s1),(rn, s2), (rn, s3),........., (rn, sn)}
This set of ordered pairs contains mn pairs.
Now these pairs can be present in R X S or can be absent.
So total number of possible relation = 2mn
4. Number of Reflexive Relations on a set with n elements : 2n(n-1).A relation has ordered pairs (a,b). Now a can be chosen in n ways and same for b. So set of ordered pairs contains n2 pairs. Now for a reflexive relation, (a,a) must be present in these ordered pairs. And there will be total n pairs of (a,a), so number of ordered pairs will be n2-n pairs. So total number of reflexive relations is equal to 2n(n-1).5. Number of Symmetric Relations on a set with n elements : 2n(n+1)/2.A relation has ordered pairs (a,b). Now for a symmetric relation, if (a,b) is present in R, then (b,a) must be present in R.In Matrix form, if a12 is present in relation, then a21 is also present in relation and As we know reflexive relation is part of symmetric relation.So from total n2 pairs, only n(n+1)/2 pairs will be chosen for symmetric relation. So total number of symmetric relation will be 2n(n+1)/2.6. Number of Anti-Symmetric Relations on a set with n elements: 2n 3n(n-1)/2.A relation has ordered pairs (a,b). For anti-symmetric relation, if (a,b) and (b,a) is present in relation R, then a = b.(That means a is in relation with itself for any a).So for (a,a), total number of ordered pairs = n and total number of relation = 2n.if (a,b) and (b,a) both are not present in relation or Either (a,b) or (b,a) is not present in relation. So there are three possibilities and total number of ordered pairs for this condition is n(n-1)/2. (selecting a pair is same as selecting the two numbers from n without repetition) As we have to find number of ordered pairs where a ≠ b. it is like opposite of symmetric relation means total number of ordered pairs = (n2) – symmetric ordered pairs(n(n+1)/2) = n(n-1)/2. So, total number of relation is 3n(n-1)/2. So total number of anti-symmetric relation is 2n.3n(n-1)/2.7. Number of Asymmetric Relations on a set with n elements : 3n(n-1)/2.In Asymmetric Relations, element a can not be in relation with itself. (i.e. there is no aRa ∀ a∈A relation.) And Then it is same as Anti-Symmetric Relations.(i.e. you have three choice for pairs (a,b) (b,a)). Therefore there are 3n(n-1)/2 Asymmetric Relations possible.8. Irreflexive Relations on a set with n elements : 2n(n-1).A relation has ordered pairs (a,b). For Irreflexive relation, no (a,a) holds for every element a in R. It is also opposite of reflexive relation.Now for a Irreflexive relation, (a,a) must not be present in these ordered pairs means total n pairs of (a,a) is not present in R, So number of ordered pairs will be n2-n pairs.So total number of reflexive relations is equal to 2n(n-1).9. Reflexive and symmetric Relations on a set with n elements : 2n(n-1)/2.A relation has ordered pairs (a,b). Reflexive and symmetric Relations means (a,a) is included in R and (a,b)(b,a) pairs can be included or not. (In Symmetric relation for pair (a,b)(b,a) (considered as a pair). whether it is included in relation or not) So total number of Reflexive and symmetric Relations is 2n(n-1)/2 .This article is contributed by Nitika Bansal.Please write comments if you find anything incorrect, or you want to share more information about the topic discussed aboveRelated Articles:Relations and their representationsMy Personal Notes
arrow_drop_upSave
Important Points:1. Symmetric and anti-symmetric relations are not opposite because a relation R can contain both the properties or may not.2. A relation is asymmetric if and only if it is both anti-symmetric and irreflexive.3. Number of different relation from a set with n elements to a set with m elements is 2mn
Ex:
if R ={r1, r2, r3......rn} and S ={s1, s2, s3.....sm}
then Cartesian product of R and S is:
R X S = {(r1, s1), (r1, s2), (r1, s3)........., (r1, sn),
(r2, s1), (r2, s2), (r2, s3).........., (r2, sn),
.................
(rn, s1),(rn, s2), (rn, s3),........., (rn, sn)}
This set of ordered pairs contains mn pairs.
Now these pairs can be present in R X S or can be absent.
So total number of possible relation = 2mn
4. Number of Reflexive Relations on a set with n elements : 2n(n-1).
A relation has ordered pairs (a,b). Now a can be chosen in n ways and same for b. So set of ordered pairs contains n2 pairs. Now for a reflexive relation, (a,a) must be present in these ordered pairs. And there will be total n pairs of (a,a), so number of ordered pairs will be n2-n pairs. So total number of reflexive relations is equal to 2n(n-1).
5. Number of Symmetric Relations on a set with n elements : 2n(n+1)/2.
A relation has ordered pairs (a,b). Now for a symmetric relation, if (a,b) is present in R, then (b,a) must be present in R.In Matrix form, if a12 is present in relation, then a21 is also present in relation and As we know reflexive relation is part of symmetric relation.So from total n2 pairs, only n(n+1)/2 pairs will be chosen for symmetric relation. So total number of symmetric relation will be 2n(n+1)/2.
6. Number of Anti-Symmetric Relations on a set with n elements: 2n 3n(n-1)/2.
A relation has ordered pairs (a,b). For anti-symmetric relation, if (a,b) and (b,a) is present in relation R, then a = b.(That means a is in relation with itself for any a).So for (a,a), total number of ordered pairs = n and total number of relation = 2n.
if (a,b) and (b,a) both are not present in relation or Either (a,b) or (b,a) is not present in relation. So there are three possibilities and total number of ordered pairs for this condition is n(n-1)/2. (selecting a pair is same as selecting the two numbers from n without repetition) As we have to find number of ordered pairs where a ≠ b. it is like opposite of symmetric relation means total number of ordered pairs = (n2) – symmetric ordered pairs(n(n+1)/2) = n(n-1)/2. So, total number of relation is 3n(n-1)/2. So total number of anti-symmetric relation is 2n.3n(n-1)/2.
7. Number of Asymmetric Relations on a set with n elements : 3n(n-1)/2.
In Asymmetric Relations, element a can not be in relation with itself. (i.e. there is no aRa ∀ a∈A relation.) And Then it is same as Anti-Symmetric Relations.(i.e. you have three choice for pairs (a,b) (b,a)). Therefore there are 3n(n-1)/2 Asymmetric Relations possible.
8. Irreflexive Relations on a set with n elements : 2n(n-1).
A relation has ordered pairs (a,b). For Irreflexive relation, no (a,a) holds for every element a in R. It is also opposite of reflexive relation.Now for a Irreflexive relation, (a,a) must not be present in these ordered pairs means total n pairs of (a,a) is not present in R, So number of ordered pairs will be n2-n pairs.So total number of reflexive relations is equal to 2n(n-1).
9. Reflexive and symmetric Relations on a set with n elements : 2n(n-1)/2.
A relation has ordered pairs (a,b). Reflexive and symmetric Relations means (a,a) is included in R and (a,b)(b,a) pairs can be included or not. (In Symmetric relation for pair (a,b)(b,a) (considered as a pair). whether it is included in relation or not) So total number of Reflexive and symmetric Relations is 2n(n-1)/2 .This article is contributed by Nitika Bansal.
Please write comments if you find anything incorrect, or you want to share more information about the topic discussed above
Related Articles:Relations and their representations
VaibhavRai3
Engineering Mathematics
GATE CS
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here.
Inequalities in LaTeX
Activation Functions
Arrow Symbols in LaTeX
Newton's Divided Difference Interpolation Formula
Set Notations in LaTeX
Layers of OSI Model
ACID Properties in DBMS
TCP/IP Model
Types of Operating Systems
Normal Forms in DBMS | [
{
"code": null,
"e": 31387,
"s": 31359,
"text": "\n28 Jun, 2021"
},
{
"code": null,
"e": 31902,
"s": 31387,
"text": "Relation or Binary relation R from set A to B is a subset of AxB which can be defined asaRb ↔ (a,b) € R ↔ R(a,b).A Binary relation R on a single set A is defined as a subset of AxA. For two distinct set, A and B with cardinalities m and n, the maximum cardinality of the relation R from A to B is mn.Domain and Range:if there are two sets A and B and Relation from A to B is R(a,b), then domain is defined as the set { a | (a,b) € R for some b in B} and Range is defined as the set {b | (a,b) € R for some a in A}."
},
{
"code": null,
"e": 37469,
"s": 31902,
"text": "Empty Relation: A relation R on a set A is called Empty if the set A is empty set.Full Relation: A binary relation R on a set A and B is called full if AXB.Reflexive Relation: A relation R on a set A is called reflexive if (a,a) € R holds for every element a € A .i.e. if set A = {a,b} then R = {(a,a), (b,b)} is reflexive relation.Irreflexive relation : A relation R on a set A is called reflexive if no (a,a) € R holds for every element a € A.i.e. if set A = {a,b} then R = {(a,b), (b,a)} is irreflexive relation.Symmetric Relation: A relation R on a set A is called symmetric if (b,a) € R holds when (a,b) € R.i.e. The relation R={(4,5),(5,4),(6,5),(5,6)} on set A={4,5,6} is symmetric.AntiSymmetric Relation: A relation R on a set A is called antisymmetric if (a,b)€ R and (b,a) € R then a = b is called antisymmetric.i.e. The relation R = {(a,b)→ R|a ≤ b} is anti-symmetric since a ≤ b and b ≤ a implies a = b.Transitive Relation: A relation R on a set A is called transitive if (a,b) € R and (b,c) € R then (a,c) € R for all a,b,c € A.i.e. Relation R={(1,2),(2,3),(1,3)} on set A={1,2,3} is transitive.Equivalence Relation: A relation is an Equivalence Relation if it is reflexive, symmetric, and transitive. i.e. relation R={(1,1),(2,2),(3,3),(1,2),(2,1),(2,3),(3,2),(1,3),(3,1)} on set A={1,2,3} is equivalence relation as it is reflexive, symmetric, and transitive.Asymmetric relation: Asymmetric relation is opposite of symmetric relation. A relation R on a set A is called asymmetric if no (b,a) € R when (a,b) € R.Important Points:1. Symmetric and anti-symmetric relations are not opposite because a relation R can contain both the properties or may not.2. A relation is asymmetric if and only if it is both anti-symmetric and irreflexive.3. Number of different relation from a set with n elements to a set with m elements is 2mnEx: \n if R ={r1, r2, r3......rn} and S ={s1, s2, s3.....sm} \n then Cartesian product of R and S is:\n R X S = {(r1, s1), (r1, s2), (r1, s3)........., (r1, sn), \n (r2, s1), (r2, s2), (r2, s3).........., (r2, sn),\n ................. \n (rn, s1),(rn, s2), (rn, s3),........., (rn, sn)}\nThis set of ordered pairs contains mn pairs. \nNow these pairs can be present in R X S or can be absent. \nSo total number of possible relation = 2mn\n4. Number of Reflexive Relations on a set with n elements : 2n(n-1).A relation has ordered pairs (a,b). Now a can be chosen in n ways and same for b. So set of ordered pairs contains n2 pairs. Now for a reflexive relation, (a,a) must be present in these ordered pairs. And there will be total n pairs of (a,a), so number of ordered pairs will be n2-n pairs. So total number of reflexive relations is equal to 2n(n-1).5. Number of Symmetric Relations on a set with n elements : 2n(n+1)/2.A relation has ordered pairs (a,b). Now for a symmetric relation, if (a,b) is present in R, then (b,a) must be present in R.In Matrix form, if a12 is present in relation, then a21 is also present in relation and As we know reflexive relation is part of symmetric relation.So from total n2 pairs, only n(n+1)/2 pairs will be chosen for symmetric relation. So total number of symmetric relation will be 2n(n+1)/2.6. Number of Anti-Symmetric Relations on a set with n elements: 2n 3n(n-1)/2.A relation has ordered pairs (a,b). For anti-symmetric relation, if (a,b) and (b,a) is present in relation R, then a = b.(That means a is in relation with itself for any a).So for (a,a), total number of ordered pairs = n and total number of relation = 2n.if (a,b) and (b,a) both are not present in relation or Either (a,b) or (b,a) is not present in relation. So there are three possibilities and total number of ordered pairs for this condition is n(n-1)/2. (selecting a pair is same as selecting the two numbers from n without repetition) As we have to find number of ordered pairs where a ≠ b. it is like opposite of symmetric relation means total number of ordered pairs = (n2) – symmetric ordered pairs(n(n+1)/2) = n(n-1)/2. So, total number of relation is 3n(n-1)/2. So total number of anti-symmetric relation is 2n.3n(n-1)/2.7. Number of Asymmetric Relations on a set with n elements : 3n(n-1)/2.In Asymmetric Relations, element a can not be in relation with itself. (i.e. there is no aRa ∀ a∈A relation.) And Then it is same as Anti-Symmetric Relations.(i.e. you have three choice for pairs (a,b) (b,a)). Therefore there are 3n(n-1)/2 Asymmetric Relations possible.8. Irreflexive Relations on a set with n elements : 2n(n-1).A relation has ordered pairs (a,b). For Irreflexive relation, no (a,a) holds for every element a in R. It is also opposite of reflexive relation.Now for a Irreflexive relation, (a,a) must not be present in these ordered pairs means total n pairs of (a,a) is not present in R, So number of ordered pairs will be n2-n pairs.So total number of reflexive relations is equal to 2n(n-1).9. Reflexive and symmetric Relations on a set with n elements : 2n(n-1)/2.A relation has ordered pairs (a,b). Reflexive and symmetric Relations means (a,a) is included in R and (a,b)(b,a) pairs can be included or not. (In Symmetric relation for pair (a,b)(b,a) (considered as a pair). whether it is included in relation or not) So total number of Reflexive and symmetric Relations is 2n(n-1)/2 .This article is contributed by Nitika Bansal.Please write comments if you find anything incorrect, or you want to share more information about the topic discussed aboveRelated Articles:Relations and their representationsMy Personal Notes\narrow_drop_upSave"
},
{
"code": null,
"e": 37552,
"s": 37469,
"text": "Empty Relation: A relation R on a set A is called Empty if the set A is empty set."
},
{
"code": null,
"e": 37627,
"s": 37552,
"text": "Full Relation: A binary relation R on a set A and B is called full if AXB."
},
{
"code": null,
"e": 37804,
"s": 37627,
"text": "Reflexive Relation: A relation R on a set A is called reflexive if (a,a) € R holds for every element a € A .i.e. if set A = {a,b} then R = {(a,a), (b,b)} is reflexive relation."
},
{
"code": null,
"e": 37988,
"s": 37804,
"text": "Irreflexive relation : A relation R on a set A is called reflexive if no (a,a) € R holds for every element a € A.i.e. if set A = {a,b} then R = {(a,b), (b,a)} is irreflexive relation."
},
{
"code": null,
"e": 38163,
"s": 37988,
"text": "Symmetric Relation: A relation R on a set A is called symmetric if (b,a) € R holds when (a,b) € R.i.e. The relation R={(4,5),(5,4),(6,5),(5,6)} on set A={4,5,6} is symmetric."
},
{
"code": null,
"e": 38390,
"s": 38163,
"text": "AntiSymmetric Relation: A relation R on a set A is called antisymmetric if (a,b)€ R and (b,a) € R then a = b is called antisymmetric.i.e. The relation R = {(a,b)→ R|a ≤ b} is anti-symmetric since a ≤ b and b ≤ a implies a = b."
},
{
"code": null,
"e": 38584,
"s": 38390,
"text": "Transitive Relation: A relation R on a set A is called transitive if (a,b) € R and (b,c) € R then (a,c) € R for all a,b,c € A.i.e. Relation R={(1,2),(2,3),(1,3)} on set A={1,2,3} is transitive."
},
{
"code": null,
"e": 38851,
"s": 38584,
"text": "Equivalence Relation: A relation is an Equivalence Relation if it is reflexive, symmetric, and transitive. i.e. relation R={(1,1),(2,2),(3,3),(1,2),(2,1),(2,3),(3,2),(1,3),(3,1)} on set A={1,2,3} is equivalence relation as it is reflexive, symmetric, and transitive."
},
{
"code": null,
"e": 43044,
"s": 38851,
"text": "Asymmetric relation: Asymmetric relation is opposite of symmetric relation. A relation R on a set A is called asymmetric if no (b,a) € R when (a,b) € R.Important Points:1. Symmetric and anti-symmetric relations are not opposite because a relation R can contain both the properties or may not.2. A relation is asymmetric if and only if it is both anti-symmetric and irreflexive.3. Number of different relation from a set with n elements to a set with m elements is 2mnEx: \n if R ={r1, r2, r3......rn} and S ={s1, s2, s3.....sm} \n then Cartesian product of R and S is:\n R X S = {(r1, s1), (r1, s2), (r1, s3)........., (r1, sn), \n (r2, s1), (r2, s2), (r2, s3).........., (r2, sn),\n ................. \n (rn, s1),(rn, s2), (rn, s3),........., (rn, sn)}\nThis set of ordered pairs contains mn pairs. \nNow these pairs can be present in R X S or can be absent. \nSo total number of possible relation = 2mn\n4. Number of Reflexive Relations on a set with n elements : 2n(n-1).A relation has ordered pairs (a,b). Now a can be chosen in n ways and same for b. So set of ordered pairs contains n2 pairs. Now for a reflexive relation, (a,a) must be present in these ordered pairs. And there will be total n pairs of (a,a), so number of ordered pairs will be n2-n pairs. So total number of reflexive relations is equal to 2n(n-1).5. Number of Symmetric Relations on a set with n elements : 2n(n+1)/2.A relation has ordered pairs (a,b). Now for a symmetric relation, if (a,b) is present in R, then (b,a) must be present in R.In Matrix form, if a12 is present in relation, then a21 is also present in relation and As we know reflexive relation is part of symmetric relation.So from total n2 pairs, only n(n+1)/2 pairs will be chosen for symmetric relation. So total number of symmetric relation will be 2n(n+1)/2.6. Number of Anti-Symmetric Relations on a set with n elements: 2n 3n(n-1)/2.A relation has ordered pairs (a,b). For anti-symmetric relation, if (a,b) and (b,a) is present in relation R, then a = b.(That means a is in relation with itself for any a).So for (a,a), total number of ordered pairs = n and total number of relation = 2n.if (a,b) and (b,a) both are not present in relation or Either (a,b) or (b,a) is not present in relation. So there are three possibilities and total number of ordered pairs for this condition is n(n-1)/2. (selecting a pair is same as selecting the two numbers from n without repetition) As we have to find number of ordered pairs where a ≠ b. it is like opposite of symmetric relation means total number of ordered pairs = (n2) – symmetric ordered pairs(n(n+1)/2) = n(n-1)/2. So, total number of relation is 3n(n-1)/2. So total number of anti-symmetric relation is 2n.3n(n-1)/2.7. Number of Asymmetric Relations on a set with n elements : 3n(n-1)/2.In Asymmetric Relations, element a can not be in relation with itself. (i.e. there is no aRa ∀ a∈A relation.) And Then it is same as Anti-Symmetric Relations.(i.e. you have three choice for pairs (a,b) (b,a)). Therefore there are 3n(n-1)/2 Asymmetric Relations possible.8. Irreflexive Relations on a set with n elements : 2n(n-1).A relation has ordered pairs (a,b). For Irreflexive relation, no (a,a) holds for every element a in R. It is also opposite of reflexive relation.Now for a Irreflexive relation, (a,a) must not be present in these ordered pairs means total n pairs of (a,a) is not present in R, So number of ordered pairs will be n2-n pairs.So total number of reflexive relations is equal to 2n(n-1).9. Reflexive and symmetric Relations on a set with n elements : 2n(n-1)/2.A relation has ordered pairs (a,b). Reflexive and symmetric Relations means (a,a) is included in R and (a,b)(b,a) pairs can be included or not. (In Symmetric relation for pair (a,b)(b,a) (considered as a pair). whether it is included in relation or not) So total number of Reflexive and symmetric Relations is 2n(n-1)/2 .This article is contributed by Nitika Bansal.Please write comments if you find anything incorrect, or you want to share more information about the topic discussed aboveRelated Articles:Relations and their representationsMy Personal Notes\narrow_drop_upSave"
},
{
"code": null,
"e": 43360,
"s": 43044,
"text": "Important Points:1. Symmetric and anti-symmetric relations are not opposite because a relation R can contain both the properties or may not.2. A relation is asymmetric if and only if it is both anti-symmetric and irreflexive.3. Number of different relation from a set with n elements to a set with m elements is 2mn"
},
{
"code": null,
"e": 43846,
"s": 43360,
"text": "Ex: \n if R ={r1, r2, r3......rn} and S ={s1, s2, s3.....sm} \n then Cartesian product of R and S is:\n R X S = {(r1, s1), (r1, s2), (r1, s3)........., (r1, sn), \n (r2, s1), (r2, s2), (r2, s3).........., (r2, sn),\n ................. \n (rn, s1),(rn, s2), (rn, s3),........., (rn, sn)}\nThis set of ordered pairs contains mn pairs. \nNow these pairs can be present in R X S or can be absent. \nSo total number of possible relation = 2mn\n"
},
{
"code": null,
"e": 43915,
"s": 43846,
"text": "4. Number of Reflexive Relations on a set with n elements : 2n(n-1)."
},
{
"code": null,
"e": 44265,
"s": 43915,
"text": "A relation has ordered pairs (a,b). Now a can be chosen in n ways and same for b. So set of ordered pairs contains n2 pairs. Now for a reflexive relation, (a,a) must be present in these ordered pairs. And there will be total n pairs of (a,a), so number of ordered pairs will be n2-n pairs. So total number of reflexive relations is equal to 2n(n-1)."
},
{
"code": null,
"e": 44336,
"s": 44265,
"text": "5. Number of Symmetric Relations on a set with n elements : 2n(n+1)/2."
},
{
"code": null,
"e": 44748,
"s": 44336,
"text": "A relation has ordered pairs (a,b). Now for a symmetric relation, if (a,b) is present in R, then (b,a) must be present in R.In Matrix form, if a12 is present in relation, then a21 is also present in relation and As we know reflexive relation is part of symmetric relation.So from total n2 pairs, only n(n+1)/2 pairs will be chosen for symmetric relation. So total number of symmetric relation will be 2n(n+1)/2."
},
{
"code": null,
"e": 44826,
"s": 44748,
"text": "6. Number of Anti-Symmetric Relations on a set with n elements: 2n 3n(n-1)/2."
},
{
"code": null,
"e": 45082,
"s": 44826,
"text": "A relation has ordered pairs (a,b). For anti-symmetric relation, if (a,b) and (b,a) is present in relation R, then a = b.(That means a is in relation with itself for any a).So for (a,a), total number of ordered pairs = n and total number of relation = 2n."
},
{
"code": null,
"e": 45661,
"s": 45082,
"text": "if (a,b) and (b,a) both are not present in relation or Either (a,b) or (b,a) is not present in relation. So there are three possibilities and total number of ordered pairs for this condition is n(n-1)/2. (selecting a pair is same as selecting the two numbers from n without repetition) As we have to find number of ordered pairs where a ≠ b. it is like opposite of symmetric relation means total number of ordered pairs = (n2) – symmetric ordered pairs(n(n+1)/2) = n(n-1)/2. So, total number of relation is 3n(n-1)/2. So total number of anti-symmetric relation is 2n.3n(n-1)/2."
},
{
"code": null,
"e": 45733,
"s": 45661,
"text": "7. Number of Asymmetric Relations on a set with n elements : 3n(n-1)/2."
},
{
"code": null,
"e": 46004,
"s": 45733,
"text": "In Asymmetric Relations, element a can not be in relation with itself. (i.e. there is no aRa ∀ a∈A relation.) And Then it is same as Anti-Symmetric Relations.(i.e. you have three choice for pairs (a,b) (b,a)). Therefore there are 3n(n-1)/2 Asymmetric Relations possible."
},
{
"code": null,
"e": 46065,
"s": 46004,
"text": "8. Irreflexive Relations on a set with n elements : 2n(n-1)."
},
{
"code": null,
"e": 46447,
"s": 46065,
"text": "A relation has ordered pairs (a,b). For Irreflexive relation, no (a,a) holds for every element a in R. It is also opposite of reflexive relation.Now for a Irreflexive relation, (a,a) must not be present in these ordered pairs means total n pairs of (a,a) is not present in R, So number of ordered pairs will be n2-n pairs.So total number of reflexive relations is equal to 2n(n-1)."
},
{
"code": null,
"e": 46522,
"s": 46447,
"text": "9. Reflexive and symmetric Relations on a set with n elements : 2n(n-1)/2."
},
{
"code": null,
"e": 46889,
"s": 46522,
"text": "A relation has ordered pairs (a,b). Reflexive and symmetric Relations means (a,a) is included in R and (a,b)(b,a) pairs can be included or not. (In Symmetric relation for pair (a,b)(b,a) (considered as a pair). whether it is included in relation or not) So total number of Reflexive and symmetric Relations is 2n(n-1)/2 .This article is contributed by Nitika Bansal."
},
{
"code": null,
"e": 47013,
"s": 46889,
"text": "Please write comments if you find anything incorrect, or you want to share more information about the topic discussed above"
},
{
"code": null,
"e": 47066,
"s": 47013,
"text": "Related Articles:Relations and their representations"
},
{
"code": null,
"e": 47078,
"s": 47066,
"text": "VaibhavRai3"
},
{
"code": null,
"e": 47102,
"s": 47078,
"text": "Engineering Mathematics"
},
{
"code": null,
"e": 47110,
"s": 47102,
"text": "GATE CS"
},
{
"code": null,
"e": 47208,
"s": 47110,
"text": "Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here."
},
{
"code": null,
"e": 47230,
"s": 47208,
"text": "Inequalities in LaTeX"
},
{
"code": null,
"e": 47251,
"s": 47230,
"text": "Activation Functions"
},
{
"code": null,
"e": 47274,
"s": 47251,
"text": "Arrow Symbols in LaTeX"
},
{
"code": null,
"e": 47324,
"s": 47274,
"text": "Newton's Divided Difference Interpolation Formula"
},
{
"code": null,
"e": 47347,
"s": 47324,
"text": "Set Notations in LaTeX"
},
{
"code": null,
"e": 47367,
"s": 47347,
"text": "Layers of OSI Model"
},
{
"code": null,
"e": 47391,
"s": 47367,
"text": "ACID Properties in DBMS"
},
{
"code": null,
"e": 47404,
"s": 47391,
"text": "TCP/IP Model"
},
{
"code": null,
"e": 47431,
"s": 47404,
"text": "Types of Operating Systems"
}
] |
All Features of ES2020 with Examples - GeeksforGeeks | 08 Jan, 2021
ES2020 is a continuation of ES’s (ECMAscript) JavaScript improvement. In the year 2020 ES’s important changes are the following:
BigInt is a new numeric primitive in JavaScript. In BigInt type You can store integers that are arbitrary precision. BigInt can store numbers that are greater than max_safe_integer (253-1). Before this feature, a programmer needs to use some third party libraries which leads to more load time, more compile-time, more parsing time.
Filename: index.js
Javascript
let max = Number.MAX_SAFE_INTEGER; console.log(max);// 9007199254740991 console.log(++max);// 9007199254740992 console.log(++max);// 9007199254740992 console.log(++max);// 9007199254740992 let big = 9007199254740991n; console.log(big)// 9007199254740991n console.log(++big);// 9007199254740992n console.log(++big);// 9007199254740993n console.log(++big);// 9007199254740994n console.log(typeof big);// bigint
Output:
9007199254740991
9007199254740992
9007199254740992
9007199254740992
9007199254740991n
9007199254740992n
9007199254740993n
9007199254740994n
bigint
Note: To create a BigInt, you have to add ‘n’ at the end of any Integer and it will become a BigInt. Doesn’t matter the integer is less than or greater than MAX_SAFE_INTEGER.
This feature is made for making the regex (or simple searching without regex) matches easier where we have to deal with more than one match inside a String. The matchAll method takes an argument which is a regular expression or the value which we want to search inside the String.
Filename: index.js
Javascript
let gfg = "Geeks for Geeks"; for(iter of gfg.matchAll('e')) { console.log(iter[0])} // Using regexconst string = 'Magic hex numbers: DEADBEEF CAFE';const regex = /\b\p{ASCII_Hex_Digit}+\b/gu; for (const match of string.matchAll(regex)) { console.log(match[0]);}
Output:
e
e
e
e
DEADBEEF
CAFE
Earlier ECMA2015 already implemented the import functionality. Before Dynamic import, we only import a script on the top of the code but using dynamic import, we can import a script anywhere and according to your conditionals (e.g. you can use if/else to make sure if the script should be loaded or not).
Javascript
<script> /* Some sample code.... Now ask user for array and if user says yes the call it from another file */ let userDecision = confirm( "Do you want an array and print it"); if (userDecision == true) { let arr = import("./gfg.js").then((res) => { // [1, 2, 3, 4, 5, 6] console.log(res.default) }).catch((er) => { console.log(er) }) } else { cnosole.log("Nothing to do") }</script>
Filename: gfg.js
Javascript
const x = () => { return [1,2, 3, 4, 5, 6]} export default x()
This is an amazing feature. There are many JavaScript environments like node environment, browser environment, service worker environment and all of these environments has their own global this for example in the browser the global this is the window, for node environment it is Global and for service worker it is Self.
Filename: index.js
Javascript
let findGlobal = () => { if (typeof self !== 'undefined') { return self }; if (typeof window !== 'undefined') { return window }; if (typeof global !== 'undefined') { return global; }}; // Function callconsole.log(findGlobal());
Output:
Object [global] {
global: [Circular],
clearInterval: [Function: clearInterval],
....
....
....
setImmediate: [Function: setImmediate] {
[Symbol(nodejs.util.promisify.custom)]: [Function]
}
}
This method is lengthy and not so accurate that’s why ECMA introduced the globalThis keyword and now we can find the global this value in a single line as shown below:
Filename: index.js
Javascript
console.log(globalThis);
Output:
Object [global] {
global: [Circular],
clearInterval: [Function: clearInterval],
....
....
....
setImmediate: [Function: setImmediate] {
[Symbol(nodejs.util.promisify.custom)]: [Function]
}
}
Sometimes in JavaScript promises, we just want the outcome of all the promises, if it is settled or not. We don’t need the outcome of the promise whether it is resolved or rejected but the promise should not be pending state. To achieve this we have allSettled( ) function in ECMA2020.
Javascript
<script> const myProms = [ Promise.resolve("Hello"), Promise.resolve(1234), Promise.resolve("Some other Promise") ] Promise.allSettled(myProms).then((res) => { console.log(res) }).catch( (err) => { console.log(err) })</script>
Output:
This is a very useful feature that the ECMA introduced. Using optional chaining you can go deeper and deeper without knowing the property name of a nested object. Suppose we have a nested object, and we want to go deeper, and we are not sure what is the name of the property names at many steps then we can use the optional chaining operator (?.) if we just use the dot (.) to access the property names and accidentally we entered any wrong property name then we will get an error but by using optional chaining operator (?.) we will get undefined instead of the error message.
Filename: index.js
Javascript
let gfg = { user : { firstUser : { name: { firstName: "Geeks", lastName : "forGeeks" } } }} console.log(gfg.user.firstUser.name.firstName)// Geeks console.log(gfg.user.ANY_RANNDOM_STRING?.name.firstName)// undefined console.log(gfg.user.ANY_RANNDOM_STRING.name.firstName)// Uncaught TypeError: Cannot read property // 'name' of undefined
Output:
Geeks
undefined
Uncaught TypeError: Cannot read property 'name' of undefined....
JavaScript-Misc
JavaScript
Web Technologies
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here.
Remove elements from a JavaScript Array
Difference between var, let and const keywords in JavaScript
Difference Between PUT and PATCH Request
JavaScript | Promises
How to get character array from string in JavaScript?
Remove elements from a JavaScript Array
Installation of Node.js on Linux
How to fetch data from an API in ReactJS ?
How to insert spaces/tabs in text using HTML/CSS?
Difference between var, let and const keywords in JavaScript | [
{
"code": null,
"e": 26545,
"s": 26517,
"text": "\n08 Jan, 2021"
},
{
"code": null,
"e": 26674,
"s": 26545,
"text": "ES2020 is a continuation of ES’s (ECMAscript) JavaScript improvement. In the year 2020 ES’s important changes are the following:"
},
{
"code": null,
"e": 27008,
"s": 26674,
"text": "BigInt is a new numeric primitive in JavaScript. In BigInt type You can store integers that are arbitrary precision. BigInt can store numbers that are greater than max_safe_integer (253-1). Before this feature, a programmer needs to use some third party libraries which leads to more load time, more compile-time, more parsing time. "
},
{
"code": null,
"e": 27027,
"s": 27008,
"text": "Filename: index.js"
},
{
"code": null,
"e": 27038,
"s": 27027,
"text": "Javascript"
},
{
"code": "let max = Number.MAX_SAFE_INTEGER; console.log(max);// 9007199254740991 console.log(++max);// 9007199254740992 console.log(++max);// 9007199254740992 console.log(++max);// 9007199254740992 let big = 9007199254740991n; console.log(big)// 9007199254740991n console.log(++big);// 9007199254740992n console.log(++big);// 9007199254740993n console.log(++big);// 9007199254740994n console.log(typeof big);// bigint",
"e": 27467,
"s": 27038,
"text": null
},
{
"code": null,
"e": 27475,
"s": 27467,
"text": "Output:"
},
{
"code": null,
"e": 27625,
"s": 27475,
"text": "9007199254740991\n9007199254740992 \n9007199254740992 \n9007199254740992 \n9007199254740991n\n9007199254740992n\n9007199254740993n\n9007199254740994n\nbigint"
},
{
"code": null,
"e": 27800,
"s": 27625,
"text": "Note: To create a BigInt, you have to add ‘n’ at the end of any Integer and it will become a BigInt. Doesn’t matter the integer is less than or greater than MAX_SAFE_INTEGER."
},
{
"code": null,
"e": 28081,
"s": 27800,
"text": "This feature is made for making the regex (or simple searching without regex) matches easier where we have to deal with more than one match inside a String. The matchAll method takes an argument which is a regular expression or the value which we want to search inside the String."
},
{
"code": null,
"e": 28100,
"s": 28081,
"text": "Filename: index.js"
},
{
"code": null,
"e": 28111,
"s": 28100,
"text": "Javascript"
},
{
"code": "let gfg = \"Geeks for Geeks\"; for(iter of gfg.matchAll('e')) { console.log(iter[0])} // Using regexconst string = 'Magic hex numbers: DEADBEEF CAFE';const regex = /\\b\\p{ASCII_Hex_Digit}+\\b/gu; for (const match of string.matchAll(regex)) { console.log(match[0]);}",
"e": 28378,
"s": 28111,
"text": null
},
{
"code": null,
"e": 28387,
"s": 28378,
"text": "Output: "
},
{
"code": null,
"e": 28409,
"s": 28387,
"text": "e\ne\ne\ne\nDEADBEEF\nCAFE"
},
{
"code": null,
"e": 28715,
"s": 28409,
"text": "Earlier ECMA2015 already implemented the import functionality. Before Dynamic import, we only import a script on the top of the code but using dynamic import, we can import a script anywhere and according to your conditionals (e.g. you can use if/else to make sure if the script should be loaded or not). "
},
{
"code": null,
"e": 28726,
"s": 28715,
"text": "Javascript"
},
{
"code": "<script> /* Some sample code.... Now ask user for array and if user says yes the call it from another file */ let userDecision = confirm( \"Do you want an array and print it\"); if (userDecision == true) { let arr = import(\"./gfg.js\").then((res) => { // [1, 2, 3, 4, 5, 6] console.log(res.default) }).catch((er) => { console.log(er) }) } else { cnosole.log(\"Nothing to do\") }</script>",
"e": 29180,
"s": 28726,
"text": null
},
{
"code": null,
"e": 29197,
"s": 29180,
"text": "Filename: gfg.js"
},
{
"code": null,
"e": 29208,
"s": 29197,
"text": "Javascript"
},
{
"code": "const x = () => { return [1,2, 3, 4, 5, 6]} export default x()",
"e": 29275,
"s": 29208,
"text": null
},
{
"code": null,
"e": 29597,
"s": 29275,
"text": "This is an amazing feature. There are many JavaScript environments like node environment, browser environment, service worker environment and all of these environments has their own global this for example in the browser the global this is the window, for node environment it is Global and for service worker it is Self. "
},
{
"code": null,
"e": 29616,
"s": 29597,
"text": "Filename: index.js"
},
{
"code": null,
"e": 29627,
"s": 29616,
"text": "Javascript"
},
{
"code": "let findGlobal = () => { if (typeof self !== 'undefined') { return self }; if (typeof window !== 'undefined') { return window }; if (typeof global !== 'undefined') { return global; }}; // Function callconsole.log(findGlobal());",
"e": 29878,
"s": 29627,
"text": null
},
{
"code": null,
"e": 29887,
"s": 29878,
"text": "Output: "
},
{
"code": null,
"e": 30096,
"s": 29887,
"text": "Object [global] {\n global: [Circular],\n clearInterval: [Function: clearInterval],\n ....\n ....\n ....\n setImmediate: [Function: setImmediate] {\n [Symbol(nodejs.util.promisify.custom)]: [Function]\n }\n}"
},
{
"code": null,
"e": 30264,
"s": 30096,
"text": "This method is lengthy and not so accurate that’s why ECMA introduced the globalThis keyword and now we can find the global this value in a single line as shown below:"
},
{
"code": null,
"e": 30283,
"s": 30264,
"text": "Filename: index.js"
},
{
"code": null,
"e": 30294,
"s": 30283,
"text": "Javascript"
},
{
"code": "console.log(globalThis);",
"e": 30319,
"s": 30294,
"text": null
},
{
"code": null,
"e": 30328,
"s": 30319,
"text": "Output: "
},
{
"code": null,
"e": 30537,
"s": 30328,
"text": "Object [global] {\n global: [Circular],\n clearInterval: [Function: clearInterval],\n ....\n ....\n ....\n setImmediate: [Function: setImmediate] {\n [Symbol(nodejs.util.promisify.custom)]: [Function]\n }\n}"
},
{
"code": null,
"e": 30824,
"s": 30537,
"text": "Sometimes in JavaScript promises, we just want the outcome of all the promises, if it is settled or not. We don’t need the outcome of the promise whether it is resolved or rejected but the promise should not be pending state. To achieve this we have allSettled( ) function in ECMA2020."
},
{
"code": null,
"e": 30835,
"s": 30824,
"text": "Javascript"
},
{
"code": "<script> const myProms = [ Promise.resolve(\"Hello\"), Promise.resolve(1234), Promise.resolve(\"Some other Promise\") ] Promise.allSettled(myProms).then((res) => { console.log(res) }).catch( (err) => { console.log(err) })</script>",
"e": 31098,
"s": 30835,
"text": null
},
{
"code": null,
"e": 31107,
"s": 31098,
"text": "Output: "
},
{
"code": null,
"e": 31686,
"s": 31107,
"text": "This is a very useful feature that the ECMA introduced. Using optional chaining you can go deeper and deeper without knowing the property name of a nested object. Suppose we have a nested object, and we want to go deeper, and we are not sure what is the name of the property names at many steps then we can use the optional chaining operator (?.) if we just use the dot (.) to access the property names and accidentally we entered any wrong property name then we will get an error but by using optional chaining operator (?.) we will get undefined instead of the error message. "
},
{
"code": null,
"e": 31705,
"s": 31686,
"text": "Filename: index.js"
},
{
"code": null,
"e": 31716,
"s": 31705,
"text": "Javascript"
},
{
"code": "let gfg = { user : { firstUser : { name: { firstName: \"Geeks\", lastName : \"forGeeks\" } } }} console.log(gfg.user.firstUser.name.firstName)// Geeks console.log(gfg.user.ANY_RANNDOM_STRING?.name.firstName)// undefined console.log(gfg.user.ANY_RANNDOM_STRING.name.firstName)// Uncaught TypeError: Cannot read property // 'name' of undefined",
"e": 32129,
"s": 31716,
"text": null
},
{
"code": null,
"e": 32137,
"s": 32129,
"text": "Output:"
},
{
"code": null,
"e": 32218,
"s": 32137,
"text": "Geeks\nundefined\nUncaught TypeError: Cannot read property 'name' of undefined...."
},
{
"code": null,
"e": 32234,
"s": 32218,
"text": "JavaScript-Misc"
},
{
"code": null,
"e": 32245,
"s": 32234,
"text": "JavaScript"
},
{
"code": null,
"e": 32262,
"s": 32245,
"text": "Web Technologies"
},
{
"code": null,
"e": 32360,
"s": 32262,
"text": "Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here."
},
{
"code": null,
"e": 32400,
"s": 32360,
"text": "Remove elements from a JavaScript Array"
},
{
"code": null,
"e": 32461,
"s": 32400,
"text": "Difference between var, let and const keywords in JavaScript"
},
{
"code": null,
"e": 32502,
"s": 32461,
"text": "Difference Between PUT and PATCH Request"
},
{
"code": null,
"e": 32524,
"s": 32502,
"text": "JavaScript | Promises"
},
{
"code": null,
"e": 32578,
"s": 32524,
"text": "How to get character array from string in JavaScript?"
},
{
"code": null,
"e": 32618,
"s": 32578,
"text": "Remove elements from a JavaScript Array"
},
{
"code": null,
"e": 32651,
"s": 32618,
"text": "Installation of Node.js on Linux"
},
{
"code": null,
"e": 32694,
"s": 32651,
"text": "How to fetch data from an API in ReactJS ?"
},
{
"code": null,
"e": 32744,
"s": 32694,
"text": "How to insert spaces/tabs in text using HTML/CSS?"
}
] |
Python | Imputation using the KNNimputer() - GeeksforGeeks | 05 Sep, 2020
KNNimputer is a scikit-learn class used to fill out or predict the missing values in a dataset. It is a more useful method which works on the basic approach of the KNN algorithm rather than the naive approach of filling all the values with mean or the median. In this approach, we specify a distance from the missing values which is also known as the K parameter. The missing value will be predicted in reference to the mean of the neighbours.
It is implemented by the KNNimputer() method which contains the following arguments:
n_neighbors: number of data points to include closer to the missing value.metric: the distance metric to be used for searching.values – {nan_euclidean. callable} by default – nan_euclideanweights: to determine on what basis should the neighboring values be treatedvalues -{uniform , distance, callable} by default- uniform.
Code: Python code to illustrate KNNimputor class
# import necessary librariesimport numpy as npimport pandas as pd # import the KNNimputer classfrom sklearn.impute import KNNImputer # create dataset for marks of a studentdict = {'Maths':[80, 90, np.nan, 95], 'Chemistry': [60, 65, 56, np.nan], 'Physics':[np.nan, 57, 80, 78], 'Biology' : [78,83,67,np.nan]} # creating a data frame from the list Before_imputation = pd.DataFrame(dict)#print dataset before imputaionprint("Data Before performing imputation\n",Before_imputation) # create an object for KNNImputerimputer = KNNImputer(n_neighbors=2)After_imputation = imputer.fit_transform(Before_imputation)# print dataset after performing the operationprint("\n\nAfter performing imputation\n",After_imputation)
Output:
Data Before performing imputation
Maths Chemistry Physics Biology
0 80.0 60.0 NaN 78.0
1 90.0 65.0 57.0 83.0
2 NaN 56.0 80.0 67.0
3 95.0 NaN 78.0 NaN
After performing imputation
[[80. 60. 68.5 78. ]
[90. 65. 57. 83. ]
[87.5 56. 80. 67. ]
[95. 58. 78. 72.5]]
Note: After transforming the data becomes a numpy array.
Machine Learning
Python
Machine Learning
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here.
ML | Linear Regression
Decision Tree
Reinforcement learning
Decision Tree Introduction with example
Python | Decision tree implementation
Read JSON file using Python
Adding new column to existing DataFrame in Pandas
Python map() function
How to get column names in Pandas dataframe
Taking input in Python | [
{
"code": null,
"e": 25659,
"s": 25631,
"text": "\n05 Sep, 2020"
},
{
"code": null,
"e": 26103,
"s": 25659,
"text": "KNNimputer is a scikit-learn class used to fill out or predict the missing values in a dataset. It is a more useful method which works on the basic approach of the KNN algorithm rather than the naive approach of filling all the values with mean or the median. In this approach, we specify a distance from the missing values which is also known as the K parameter. The missing value will be predicted in reference to the mean of the neighbours."
},
{
"code": null,
"e": 26188,
"s": 26103,
"text": "It is implemented by the KNNimputer() method which contains the following arguments:"
},
{
"code": null,
"e": 26512,
"s": 26188,
"text": "n_neighbors: number of data points to include closer to the missing value.metric: the distance metric to be used for searching.values – {nan_euclidean. callable} by default – nan_euclideanweights: to determine on what basis should the neighboring values be treatedvalues -{uniform , distance, callable} by default- uniform."
},
{
"code": null,
"e": 26561,
"s": 26512,
"text": "Code: Python code to illustrate KNNimputor class"
},
{
"code": "# import necessary librariesimport numpy as npimport pandas as pd # import the KNNimputer classfrom sklearn.impute import KNNImputer # create dataset for marks of a studentdict = {'Maths':[80, 90, np.nan, 95], 'Chemistry': [60, 65, 56, np.nan], 'Physics':[np.nan, 57, 80, 78], 'Biology' : [78,83,67,np.nan]} # creating a data frame from the list Before_imputation = pd.DataFrame(dict)#print dataset before imputaionprint(\"Data Before performing imputation\\n\",Before_imputation) # create an object for KNNImputerimputer = KNNImputer(n_neighbors=2)After_imputation = imputer.fit_transform(Before_imputation)# print dataset after performing the operationprint(\"\\n\\nAfter performing imputation\\n\",After_imputation)",
"e": 27300,
"s": 26561,
"text": null
},
{
"code": null,
"e": 27308,
"s": 27300,
"text": "Output:"
},
{
"code": null,
"e": 27658,
"s": 27308,
"text": "Data Before performing imputation\n Maths Chemistry Physics Biology\n0 80.0 60.0 NaN 78.0\n1 90.0 65.0 57.0 83.0\n2 NaN 56.0 80.0 67.0\n3 95.0 NaN 78.0 NaN\n\n\nAfter performing imputation\n [[80. 60. 68.5 78. ]\n [90. 65. 57. 83. ]\n [87.5 56. 80. 67. ]\n [95. 58. 78. 72.5]]\n"
},
{
"code": null,
"e": 27715,
"s": 27658,
"text": "Note: After transforming the data becomes a numpy array."
},
{
"code": null,
"e": 27732,
"s": 27715,
"text": "Machine Learning"
},
{
"code": null,
"e": 27739,
"s": 27732,
"text": "Python"
},
{
"code": null,
"e": 27756,
"s": 27739,
"text": "Machine Learning"
},
{
"code": null,
"e": 27854,
"s": 27756,
"text": "Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here."
},
{
"code": null,
"e": 27877,
"s": 27854,
"text": "ML | Linear Regression"
},
{
"code": null,
"e": 27891,
"s": 27877,
"text": "Decision Tree"
},
{
"code": null,
"e": 27914,
"s": 27891,
"text": "Reinforcement learning"
},
{
"code": null,
"e": 27954,
"s": 27914,
"text": "Decision Tree Introduction with example"
},
{
"code": null,
"e": 27992,
"s": 27954,
"text": "Python | Decision tree implementation"
},
{
"code": null,
"e": 28020,
"s": 27992,
"text": "Read JSON file using Python"
},
{
"code": null,
"e": 28070,
"s": 28020,
"text": "Adding new column to existing DataFrame in Pandas"
},
{
"code": null,
"e": 28092,
"s": 28070,
"text": "Python map() function"
},
{
"code": null,
"e": 28136,
"s": 28092,
"text": "How to get column names in Pandas dataframe"
}
] |
Wine data set: A Classification Problem | by Ali Faghihnejad | Towards Data Science | The wine data set consists of 13 different parameters of wine such as alcohol and ash content which was measured for 178 wine samples. These wines were grown in the same region in Italy but derived from three different cultivars; therefore there are three different classes of wine. The goal here is to find a model that can predict the class of wine given the 13 measured parameters and find out the major differences among the three different classes. This is a classification problem and here I will describe four models and asses the accuracy of each model. Furthermore, I will use principal component analysis to identify and explore the differences among the three classes.
Multinomial Logistic Regression
As there are three classes of wine, we have to use multinomial logistic regression instead of logistic regression which is used when there are two classes. In order to do that, I use multinom function from the nnet package.
> dim(wine)[1] 178 14> attach(wine)> test=sample(178,45)> library(nnet)> LogReg=multinom(class~.,data=wine[-test,])> summary(LogReg)> Pre=predict(LogReg,wine[test,])> table(Pre,wine[test,]$class)
As we can see from Table 1, there are 5 misclassifications out of 45 observations; so the accuracy of the multinomial logistic regression model is 89%.
We can repeat the above process multiple times to get a more accurate estimate of the performance of Multinomial Logistic Regression model by following these commands:
> Accuracy=rep(0,50)> for (i in 1:50) {+ test=sample(178,45)+ LogReg=multinom(class~.,data=wine[-test,])+ Pre=predict(LogReg,wine[test,])+ Accuracy[i]=mean(Pre==wine[test,]$class)+ }> sum(Accuracy)/50[1] 0.944
Linear Discriminant Analysis (LDA)
LDA is useful when we have more than two classes and when the number of observations is small. LDA is also more stable when distribution of the predictors are normal in each class.
> library(MASS)> lda.fit=lda(class~.,data=wine[-test,])> lda.fit
The last command will generate more details about the model as shown in Table 2.
We then evaluate the performance of the model on test data:
> lda.pred=predict(lda.fit,wine[test,])> table(lda.pred$class,wine[test,]$class)
We can see from Table 3 that LDA has accuracy of 100% in predicting classes of test data.
We can also visualize classification of training data by LDA using below command and result is shown in Figure 1:
> plot(lda.fit)
As there are three classes in data set, only two linear discriminants are needed to classify each observation. Figure 1 shows plot of training data on LD1 and LD2 space and the corresponding class for each data point. LD1 and LD2 values are computed based on coefficients of LDA model.
We can repeat the above process multiple times to get a more accurate estimate of the performance of LDA model by following these commands:
> for (i in 1:50) {+ test=sample(178,45)+ lda.fit=lda(class~.,data=wine[-test,])+ lda.pred=predict(lda.fit,wine[test,])+ Accuracy[i]=mean(lda.pred$class==wine[test,]$class)+ }> sum(Accuracy)/50[1] 0.9844444
Quadratic Discriminant Analysis (QDA)
Another classifier is QDA model which has a similar syntax as LDA in R. We can run the process many times to get a more accurate estimate of the performance of the QDA model as follows:
> qda.fit=qda(class~.,data=wine[-test,])> qda.pred=predict(qda.fit,wine[test,])> table(qda.pred$class,wine[test,]$class)> for (i in 1:50) {+ test=sample(178,45)+ qda.fit=qda(class~.,data=wine[-test,])+ qda.pred=predict(qda.fit,wine[test,])+ Accuracy[i]=mean(qda.pred$class==wine[test,]$class)+ }> sum(Accuracy)/50[1] 0.9866667
K-Nearest Neighbors (KNN)
KNN is a non-parametric approach in which an observation is classified based on the class of its K-nearest neighbors. It’s a useful model when the decision boundary is non-linear but it will not tell us about which predictors are important.
> library(class)> knn.pred=knn(wine[-test,2:14],wine[test,2:14],wine[-test,]$class,k=1)> table(knn.pred,wine[test,]$class)> mean(knn.pred==wine[test,]$class)[1] 0.7777778
We can repeat the above process multiple times to get a more accurate estimate of the performance of KNN model by following these commands:
> for (i in 1:50){+ test=sample(178,45)+ knn.pred=knn(wine[-test,2:14],wine[test,2:14],wine[-test,]$class,k=1)+ Accuracy[i]=mean(knn.pred==wine[test,]$class)+ }> sum(Accuracy)/50[1] 0.7435556
We can repeat the same process for k=2 to 5 and results are shown in the middle column of Table 4.
We can see from the middle column of Table 4 that results of the KNN model are not very impressive. That is because KNN model uses Euclidean distance to measure the distance between two points and if features have different scales it can impact the model. As each of the 13 features have different scales, it is important to normalize data so that all features have the same range of values. We can re-run the KNN model after scaling data as follows:
> for (i in 1:50){+ test=sample(178,45)+ knn.pred=knn(scale(wine[-test,2:14]),scale(wine[test,2:14]),wine[-test,]$class,k=1)+ Accuracy[i]=mean(knn.pred==wine[test,]$class)+ }> sum(Accuracy)/50[1] 0.9382222
Results of KNN model are summarized in Table 4 and we can see that scaling data greatly improve the performance of the model.
The accuracy of different classification models on the wine data set is summarized in Table 5. LDA and QDA have the highest accuracy followed by KNN (k=5) model.
Principal Component Analysis (PCA)
The models that were described above can predict the class of wine based on 13 measured predictors. However, we are also interested to know what the major differences among each of these three classes are and what predictors are important. In order to do that, we can perform PCA which is a useful tool for exploratory data analysis.
> pr.out=prcomp(wine[,-1],scale=TRUE)> pr.out$rotation> biplot(pr.out,scale=0)> plot(pr.out$x[,1:2],col=wine$class)
The first two PC scores and the corresponding loading vectors are shown in Figure 2.
Figure 2 and Figure 3 show that data points are segregated into three different groups which corresponds to the three classes of wine. Class 1 and Class 3 have relatively the same PC2 scores but their PC1 score differs a lot. On the other hand, Class 2 has PC1 scores between that of Class 1 and Class 3 and its PC2 score is lower than the other two classes. We can further examine the difference between each class by looking into PC loading vectors (arrows in Figure 2). For example, the direction of ‘alcalinity of ash’ is towards high values of PC1 and Class 3 data points. Therefore, we can expect that Class 3 have high values of ‘alcalinity of ash’ followed by Class 2 and Class 1. We can do similar investigation for all the other 13 predictors. The results of such investigation are summarized in Tables 6 and 7 which show the major differences among the three classes of wine. These findings are in line with the results of LDA shown in Table 2.
Four classification methods were used to evaluate the accuracy of each model in predicting classes of wine. QDA and LDA have the highest accuracy followed by KNN and multinomial logistic regression. It is important to normalize data before applying KNN model to get accurate classification. Principal component analysis was used to identify the major differences between the three classes of wine.
UCI Machine Learning Repository: Wine Data Set. “ Creative Commons Attribution 4.0 International (CC BY 4.0) license”.
An Introduction to Statistical Learning (statlearning.com) | [
{
"code": null,
"e": 852,
"s": 172,
"text": "The wine data set consists of 13 different parameters of wine such as alcohol and ash content which was measured for 178 wine samples. These wines were grown in the same region in Italy but derived from three different cultivars; therefore there are three different classes of wine. The goal here is to find a model that can predict the class of wine given the 13 measured parameters and find out the major differences among the three different classes. This is a classification problem and here I will describe four models and asses the accuracy of each model. Furthermore, I will use principal component analysis to identify and explore the differences among the three classes."
},
{
"code": null,
"e": 884,
"s": 852,
"text": "Multinomial Logistic Regression"
},
{
"code": null,
"e": 1108,
"s": 884,
"text": "As there are three classes of wine, we have to use multinomial logistic regression instead of logistic regression which is used when there are two classes. In order to do that, I use multinom function from the nnet package."
},
{
"code": null,
"e": 1304,
"s": 1108,
"text": "> dim(wine)[1] 178 14> attach(wine)> test=sample(178,45)> library(nnet)> LogReg=multinom(class~.,data=wine[-test,])> summary(LogReg)> Pre=predict(LogReg,wine[test,])> table(Pre,wine[test,]$class)"
},
{
"code": null,
"e": 1456,
"s": 1304,
"text": "As we can see from Table 1, there are 5 misclassifications out of 45 observations; so the accuracy of the multinomial logistic regression model is 89%."
},
{
"code": null,
"e": 1624,
"s": 1456,
"text": "We can repeat the above process multiple times to get a more accurate estimate of the performance of Multinomial Logistic Regression model by following these commands:"
},
{
"code": null,
"e": 1834,
"s": 1624,
"text": "> Accuracy=rep(0,50)> for (i in 1:50) {+ test=sample(178,45)+ LogReg=multinom(class~.,data=wine[-test,])+ Pre=predict(LogReg,wine[test,])+ Accuracy[i]=mean(Pre==wine[test,]$class)+ }> sum(Accuracy)/50[1] 0.944"
},
{
"code": null,
"e": 1869,
"s": 1834,
"text": "Linear Discriminant Analysis (LDA)"
},
{
"code": null,
"e": 2050,
"s": 1869,
"text": "LDA is useful when we have more than two classes and when the number of observations is small. LDA is also more stable when distribution of the predictors are normal in each class."
},
{
"code": null,
"e": 2115,
"s": 2050,
"text": "> library(MASS)> lda.fit=lda(class~.,data=wine[-test,])> lda.fit"
},
{
"code": null,
"e": 2196,
"s": 2115,
"text": "The last command will generate more details about the model as shown in Table 2."
},
{
"code": null,
"e": 2256,
"s": 2196,
"text": "We then evaluate the performance of the model on test data:"
},
{
"code": null,
"e": 2337,
"s": 2256,
"text": "> lda.pred=predict(lda.fit,wine[test,])> table(lda.pred$class,wine[test,]$class)"
},
{
"code": null,
"e": 2427,
"s": 2337,
"text": "We can see from Table 3 that LDA has accuracy of 100% in predicting classes of test data."
},
{
"code": null,
"e": 2541,
"s": 2427,
"text": "We can also visualize classification of training data by LDA using below command and result is shown in Figure 1:"
},
{
"code": null,
"e": 2557,
"s": 2541,
"text": "> plot(lda.fit)"
},
{
"code": null,
"e": 2843,
"s": 2557,
"text": "As there are three classes in data set, only two linear discriminants are needed to classify each observation. Figure 1 shows plot of training data on LD1 and LD2 space and the corresponding class for each data point. LD1 and LD2 values are computed based on coefficients of LDA model."
},
{
"code": null,
"e": 2983,
"s": 2843,
"text": "We can repeat the above process multiple times to get a more accurate estimate of the performance of LDA model by following these commands:"
},
{
"code": null,
"e": 3190,
"s": 2983,
"text": "> for (i in 1:50) {+ test=sample(178,45)+ lda.fit=lda(class~.,data=wine[-test,])+ lda.pred=predict(lda.fit,wine[test,])+ Accuracy[i]=mean(lda.pred$class==wine[test,]$class)+ }> sum(Accuracy)/50[1] 0.9844444"
},
{
"code": null,
"e": 3228,
"s": 3190,
"text": "Quadratic Discriminant Analysis (QDA)"
},
{
"code": null,
"e": 3414,
"s": 3228,
"text": "Another classifier is QDA model which has a similar syntax as LDA in R. We can run the process many times to get a more accurate estimate of the performance of the QDA model as follows:"
},
{
"code": null,
"e": 3741,
"s": 3414,
"text": "> qda.fit=qda(class~.,data=wine[-test,])> qda.pred=predict(qda.fit,wine[test,])> table(qda.pred$class,wine[test,]$class)> for (i in 1:50) {+ test=sample(178,45)+ qda.fit=qda(class~.,data=wine[-test,])+ qda.pred=predict(qda.fit,wine[test,])+ Accuracy[i]=mean(qda.pred$class==wine[test,]$class)+ }> sum(Accuracy)/50[1] 0.9866667"
},
{
"code": null,
"e": 3767,
"s": 3741,
"text": "K-Nearest Neighbors (KNN)"
},
{
"code": null,
"e": 4008,
"s": 3767,
"text": "KNN is a non-parametric approach in which an observation is classified based on the class of its K-nearest neighbors. It’s a useful model when the decision boundary is non-linear but it will not tell us about which predictors are important."
},
{
"code": null,
"e": 4179,
"s": 4008,
"text": "> library(class)> knn.pred=knn(wine[-test,2:14],wine[test,2:14],wine[-test,]$class,k=1)> table(knn.pred,wine[test,]$class)> mean(knn.pred==wine[test,]$class)[1] 0.7777778"
},
{
"code": null,
"e": 4319,
"s": 4179,
"text": "We can repeat the above process multiple times to get a more accurate estimate of the performance of KNN model by following these commands:"
},
{
"code": null,
"e": 4511,
"s": 4319,
"text": "> for (i in 1:50){+ test=sample(178,45)+ knn.pred=knn(wine[-test,2:14],wine[test,2:14],wine[-test,]$class,k=1)+ Accuracy[i]=mean(knn.pred==wine[test,]$class)+ }> sum(Accuracy)/50[1] 0.7435556"
},
{
"code": null,
"e": 4610,
"s": 4511,
"text": "We can repeat the same process for k=2 to 5 and results are shown in the middle column of Table 4."
},
{
"code": null,
"e": 5061,
"s": 4610,
"text": "We can see from the middle column of Table 4 that results of the KNN model are not very impressive. That is because KNN model uses Euclidean distance to measure the distance between two points and if features have different scales it can impact the model. As each of the 13 features have different scales, it is important to normalize data so that all features have the same range of values. We can re-run the KNN model after scaling data as follows:"
},
{
"code": null,
"e": 5267,
"s": 5061,
"text": "> for (i in 1:50){+ test=sample(178,45)+ knn.pred=knn(scale(wine[-test,2:14]),scale(wine[test,2:14]),wine[-test,]$class,k=1)+ Accuracy[i]=mean(knn.pred==wine[test,]$class)+ }> sum(Accuracy)/50[1] 0.9382222"
},
{
"code": null,
"e": 5393,
"s": 5267,
"text": "Results of KNN model are summarized in Table 4 and we can see that scaling data greatly improve the performance of the model."
},
{
"code": null,
"e": 5555,
"s": 5393,
"text": "The accuracy of different classification models on the wine data set is summarized in Table 5. LDA and QDA have the highest accuracy followed by KNN (k=5) model."
},
{
"code": null,
"e": 5590,
"s": 5555,
"text": "Principal Component Analysis (PCA)"
},
{
"code": null,
"e": 5924,
"s": 5590,
"text": "The models that were described above can predict the class of wine based on 13 measured predictors. However, we are also interested to know what the major differences among each of these three classes are and what predictors are important. In order to do that, we can perform PCA which is a useful tool for exploratory data analysis."
},
{
"code": null,
"e": 6040,
"s": 5924,
"text": "> pr.out=prcomp(wine[,-1],scale=TRUE)> pr.out$rotation> biplot(pr.out,scale=0)> plot(pr.out$x[,1:2],col=wine$class)"
},
{
"code": null,
"e": 6125,
"s": 6040,
"text": "The first two PC scores and the corresponding loading vectors are shown in Figure 2."
},
{
"code": null,
"e": 7081,
"s": 6125,
"text": "Figure 2 and Figure 3 show that data points are segregated into three different groups which corresponds to the three classes of wine. Class 1 and Class 3 have relatively the same PC2 scores but their PC1 score differs a lot. On the other hand, Class 2 has PC1 scores between that of Class 1 and Class 3 and its PC2 score is lower than the other two classes. We can further examine the difference between each class by looking into PC loading vectors (arrows in Figure 2). For example, the direction of ‘alcalinity of ash’ is towards high values of PC1 and Class 3 data points. Therefore, we can expect that Class 3 have high values of ‘alcalinity of ash’ followed by Class 2 and Class 1. We can do similar investigation for all the other 13 predictors. The results of such investigation are summarized in Tables 6 and 7 which show the major differences among the three classes of wine. These findings are in line with the results of LDA shown in Table 2."
},
{
"code": null,
"e": 7479,
"s": 7081,
"text": "Four classification methods were used to evaluate the accuracy of each model in predicting classes of wine. QDA and LDA have the highest accuracy followed by KNN and multinomial logistic regression. It is important to normalize data before applying KNN model to get accurate classification. Principal component analysis was used to identify the major differences between the three classes of wine."
},
{
"code": null,
"e": 7598,
"s": 7479,
"text": "UCI Machine Learning Repository: Wine Data Set. “ Creative Commons Attribution 4.0 International (CC BY 4.0) license”."
}
] |
NumPy - Copies & Views | While executing the functions, some of them return a copy of the input array, while some return the view. When the contents are physically stored in another location, it is called Copy. If on the other hand, a different view of the same memory content is provided, we call it as View.
Simple assignments do not make the copy of array object. Instead, it uses the same id() of the original array to access it. The id() returns a universal identifier of Python object, similar to the pointer in C.
Furthermore, any changes in either gets reflected in the other. For example, the changing shape of one will change the shape of the other too.
import numpy as np
a = np.arange(6)
print 'Our array is:'
print a
print 'Applying id() function:'
print id(a)
print 'a is assigned to b:'
b = a
print b
print 'b has same id():'
print id(b)
print 'Change shape of b:'
b.shape = 3,2
print b
print 'Shape of a also gets changed:'
print a
It will produce the following output −
Our array is:
[0 1 2 3 4 5]
Applying id() function:
139747815479536
a is assigned to b:
[0 1 2 3 4 5]
b has same id():
139747815479536
Change shape of b:
[[0 1]
[2 3]
[4 5]]
Shape of a also gets changed:
[[0 1]
[2 3]
[4 5]]
NumPy has ndarray.view() method which is a new array object that looks at the same data of the original array. Unlike the earlier case, change in dimensions of the new array doesn’t change dimensions of the original.
import numpy as np
# To begin with, a is 3X2 array
a = np.arange(6).reshape(3,2)
print 'Array a:'
print a
print 'Create view of a:'
b = a.view()
print b
print 'id() for both the arrays are different:'
print 'id() of a:'
print id(a)
print 'id() of b:'
print id(b)
# Change the shape of b. It does not change the shape of a
b.shape = 2,3
print 'Shape of b:'
print b
print 'Shape of a:'
print a
It will produce the following output −
Array a:
[[0 1]
[2 3]
[4 5]]
Create view of a:
[[0 1]
[2 3]
[4 5]]
id() for both the arrays are different:
id() of a:
140424307227264
id() of b:
140424151696288
Shape of b:
[[0 1 2]
[3 4 5]]
Shape of a:
[[0 1]
[2 3]
[4 5]]
Slice of an array creates a view.
import numpy as np
a = np.array([[10,10], [2,3], [4,5]])
print 'Our array is:'
print a
print 'Create a slice:'
s = a[:, :2]
print s
It will produce the following output −
Our array is:
[[10 10]
[ 2 3]
[ 4 5]]
Create a slice:
[[10 10]
[ 2 3]
[ 4 5]]
The ndarray.copy() function creates a deep copy. It is a complete copy of the array and its data, and doesn’t share with the original array.
import numpy as np
a = np.array([[10,10], [2,3], [4,5]])
print 'Array a is:'
print a
print 'Create a deep copy of a:'
b = a.copy()
print 'Array b is:'
print b
#b does not share any memory of a
print 'Can we write b is a'
print b is a
print 'Change the contents of b:'
b[0,0] = 100
print 'Modified array b:'
print b
print 'a remains unchanged:'
print a
It will produce the following output −
Array a is:
[[10 10]
[ 2 3]
[ 4 5]]
Create a deep copy of a:
Array b is:
[[10 10]
[ 2 3]
[ 4 5]]
Can we write b is a
False
Change the contents of b:
Modified array b:
[[100 10]
[ 2 3]
[ 4 5]]
a remains unchanged:
[[10 10]
[ 2 3]
[ 4 5]]
63 Lectures
6 hours
Abhilash Nelson
19 Lectures
8 hours
DATAhill Solutions Srinivas Reddy
12 Lectures
3 hours
DATAhill Solutions Srinivas Reddy
10 Lectures
2.5 hours
Akbar Khan
20 Lectures
2 hours
Pruthviraja L
63 Lectures
6 hours
Anmol
Print
Add Notes
Bookmark this page | [
{
"code": null,
"e": 2528,
"s": 2243,
"text": "While executing the functions, some of them return a copy of the input array, while some return the view. When the contents are physically stored in another location, it is called Copy. If on the other hand, a different view of the same memory content is provided, we call it as View."
},
{
"code": null,
"e": 2739,
"s": 2528,
"text": "Simple assignments do not make the copy of array object. Instead, it uses the same id() of the original array to access it. The id() returns a universal identifier of Python object, similar to the pointer in C."
},
{
"code": null,
"e": 2882,
"s": 2739,
"text": "Furthermore, any changes in either gets reflected in the other. For example, the changing shape of one will change the shape of the other too."
},
{
"code": null,
"e": 3192,
"s": 2882,
"text": "import numpy as np \na = np.arange(6) \n\nprint 'Our array is:' \nprint a \n\nprint 'Applying id() function:' \nprint id(a) \n\nprint 'a is assigned to b:' \nb = a \nprint b \n\nprint 'b has same id():' \nprint id(b) \n\nprint 'Change shape of b:' \nb.shape = 3,2 \nprint b \n\nprint 'Shape of a also gets changed:' \nprint a"
},
{
"code": null,
"e": 3231,
"s": 3192,
"text": "It will produce the following output −"
},
{
"code": null,
"e": 3464,
"s": 3231,
"text": "Our array is:\n[0 1 2 3 4 5]\n\nApplying id() function:\n139747815479536\n\na is assigned to b:\n[0 1 2 3 4 5]\nb has same id():\n139747815479536\n\nChange shape of b:\n[[0 1]\n [2 3]\n [4 5]]\n\nShape of a also gets changed:\n[[0 1]\n [2 3]\n [4 5]]\n"
},
{
"code": null,
"e": 3681,
"s": 3464,
"text": "NumPy has ndarray.view() method which is a new array object that looks at the same data of the original array. Unlike the earlier case, change in dimensions of the new array doesn’t change dimensions of the original."
},
{
"code": null,
"e": 4101,
"s": 3681,
"text": "import numpy as np \n# To begin with, a is 3X2 array \na = np.arange(6).reshape(3,2) \n\nprint 'Array a:' \nprint a \n\nprint 'Create view of a:' \nb = a.view() \nprint b \n\nprint 'id() for both the arrays are different:' \nprint 'id() of a:'\nprint id(a) \nprint 'id() of b:' \nprint id(b) \n\n# Change the shape of b. It does not change the shape of a \nb.shape = 2,3 \n\nprint 'Shape of b:' \nprint b \n\nprint 'Shape of a:' \nprint a"
},
{
"code": null,
"e": 4140,
"s": 4101,
"text": "It will produce the following output −"
},
{
"code": null,
"e": 4375,
"s": 4140,
"text": "Array a:\n[[0 1]\n [2 3]\n [4 5]]\n\nCreate view of a:\n[[0 1]\n [2 3]\n [4 5]]\n\nid() for both the arrays are different:\nid() of a:\n140424307227264\nid() of b:\n140424151696288\n\nShape of b:\n[[0 1 2]\n [3 4 5]]\n\nShape of a:\n[[0 1]\n [2 3]\n [4 5]]\n"
},
{
"code": null,
"e": 4409,
"s": 4375,
"text": "Slice of an array creates a view."
},
{
"code": null,
"e": 4551,
"s": 4409,
"text": "import numpy as np \na = np.array([[10,10], [2,3], [4,5]]) \n\nprint 'Our array is:' \nprint a \n\nprint 'Create a slice:' \ns = a[:, :2] \nprint s "
},
{
"code": null,
"e": 4590,
"s": 4551,
"text": "It will produce the following output −"
},
{
"code": null,
"e": 4674,
"s": 4590,
"text": "Our array is:\n[[10 10]\n [ 2 3]\n [ 4 5]]\n\nCreate a slice:\n[[10 10]\n [ 2 3]\n [ 4 5]]\n"
},
{
"code": null,
"e": 4815,
"s": 4674,
"text": "The ndarray.copy() function creates a deep copy. It is a complete copy of the array and its data, and doesn’t share with the original array."
},
{
"code": null,
"e": 5192,
"s": 4815,
"text": "import numpy as np \na = np.array([[10,10], [2,3], [4,5]]) \n\nprint 'Array a is:' \nprint a \n\nprint 'Create a deep copy of a:' \nb = a.copy() \nprint 'Array b is:' \nprint b \n\n#b does not share any memory of a \nprint 'Can we write b is a' \nprint b is a \n\nprint 'Change the contents of b:' \nb[0,0] = 100 \n\nprint 'Modified array b:' \nprint b \n\nprint 'a remains unchanged:' \nprint a"
},
{
"code": null,
"e": 5231,
"s": 5192,
"text": "It will produce the following output −"
},
{
"code": null,
"e": 5480,
"s": 5231,
"text": "Array a is:\n[[10 10]\n [ 2 3]\n [ 4 5]]\n\nCreate a deep copy of a:\nArray b is:\n[[10 10]\n [ 2 3]\n [ 4 5]]\nCan we write b is a\nFalse\n\nChange the contents of b:\nModified array b:\n[[100 10]\n [ 2 3]\n [ 4 5]]\n\na remains unchanged:\n[[10 10]\n [ 2 3]\n [ 4 5]]\n"
},
{
"code": null,
"e": 5513,
"s": 5480,
"text": "\n 63 Lectures \n 6 hours \n"
},
{
"code": null,
"e": 5530,
"s": 5513,
"text": " Abhilash Nelson"
},
{
"code": null,
"e": 5563,
"s": 5530,
"text": "\n 19 Lectures \n 8 hours \n"
},
{
"code": null,
"e": 5598,
"s": 5563,
"text": " DATAhill Solutions Srinivas Reddy"
},
{
"code": null,
"e": 5631,
"s": 5598,
"text": "\n 12 Lectures \n 3 hours \n"
},
{
"code": null,
"e": 5666,
"s": 5631,
"text": " DATAhill Solutions Srinivas Reddy"
},
{
"code": null,
"e": 5701,
"s": 5666,
"text": "\n 10 Lectures \n 2.5 hours \n"
},
{
"code": null,
"e": 5713,
"s": 5701,
"text": " Akbar Khan"
},
{
"code": null,
"e": 5746,
"s": 5713,
"text": "\n 20 Lectures \n 2 hours \n"
},
{
"code": null,
"e": 5761,
"s": 5746,
"text": " Pruthviraja L"
},
{
"code": null,
"e": 5794,
"s": 5761,
"text": "\n 63 Lectures \n 6 hours \n"
},
{
"code": null,
"e": 5801,
"s": 5794,
"text": " Anmol"
},
{
"code": null,
"e": 5808,
"s": 5801,
"text": " Print"
},
{
"code": null,
"e": 5819,
"s": 5808,
"text": " Add Notes"
}
] |
Stateful model serving: how we accelerate inference using ONNX Runtime | by Lester Solbakken | Towards Data Science | By Lester Solbakken from Verizon Media and Pranav Sharma from Microsoft.
There’s a difference between stateless and stateful machine-learned model serving.
Stateless model serving is what one usually thinks about when using a machine-learned model in production. For instance, a web application handling live traffic can call out to a model server from somewhere in the serving stack. The output of this model service depends purely on the input. This is fine for many tasks, such as classification, text generation, object detection, and translation, where the model is evaluated once per query.
There are, however, some applications where the input is combined with stored or persisted data to generate a result. We call this stateful model evaluation. Applications such as search and recommendation need to evaluate models with a potentially large number of items for each query. A model server can quickly become a scalability bottleneck in these cases, regardless of how efficient the model inference is.
In other words, stateless model serving requires sending all necessary input data to the model. In stateful model serving, the model should be computed where the data is stored.
At Vespa.ai, we are concerned with efficient stateful model evaluation. Vespa.ai is an open-source platform for building applications that do real-time data processing over large data sets. Designed to be highly performant and web-scalable, it is used for such diverse tasks as search, personalization, recommendation, ads, auto-complete, image and similarity search, comment ranking, and even for finding love.
It has become increasingly important for us to be able to evaluate complex machine-learned models efficiently. Delivering low latency, fast inference and low serving cost is challenging while at the same time providing support for the various model training frameworks.
We eventually chose to leverage ONNX Runtime (ORT) for this task. ONNX Runtime is an accelerator for model inference. It has vastly increased Vespa.ai’s capacity for evaluating large models, both in performance and model types we support. ONNX Runtime’s capabilities within hardware acceleration and model optimizations, such as quantization, has enabled efficient evaluation of large NLP models like BERT and other Transformer models in Vespa.ai.
In this post, we’ll share our journey on why and how we eventually chose ONNX Runtime and share some of our experiences with it.
Vespa.ai has a rich history. Its lineage comes from a search engine born in 1997. Initially powering the web search at alltheweb.com, it was flexible enough to be used in various more specialized products, or verticals, such as document search, mobile search, yellow pages, and banking. This flexibility in being a vertical search platform eventually gave rise to its name, Vespa.
The technology was acquired by Yahoo in 2003. There, Vespa cemented itself as a core piece of technology that powers hundreds of applications, including many of Yahoo’s most essential services. We open-sourced Vespa in 2017 and today it serves hundreds of thousands of queries per second worldwide at any given time, with billions of content items for hundreds of millions of users.
Although Yahoo was eventually acquired by Verizon, it is interesting to note that our team has stayed remarkably stable over the years. Indeed, a few of the engineers that started working on that initial engine over 20 years ago are still here. Our team counts about 30 developers, and we are situated in Trondheim in Norway.
Building upon experience gained over many years, Vespa.ai has evolved substantially to become what it is today. It now stands as a battle-proven general engine for real-time computation over large data sets. It has many features that make it suitable for web-scale applications. It stores and indexes data with instant writes so that queries, selection, and processing over the data can be performed efficiently at serving time. It’s elastic and fault-tolerant, so nodes can be added, removed, or replaced live without losing data. It’s easy to configure, operate, and add custom logic. Importantly, it contains built-in capabilities for advanced computation, including machine learned models.
Vespa.ai is a distributed application consisting of stateless nodes and a set of stateful content nodes containing the data. A Vespa.ai application is fully defined in an application package. This is a single unit containing everything needed to set up an application, including all configuration, custom components, schemas, and machine-learned models. When the application package is deployed, the admin layer takes care of configuring all the services across all the system’s nodes. This includes distributing all models to all content nodes.
Application packages contain one or more document schemas. The schema primarily consists of:
The data fields for each document and how they should be stored and indexed.
The ranking profiles which define how each document should be scored during query handling.
The ranking profiles contain ranking expressions, which are mathematical expressions combining ranking features. Some features retrieve data from sources such as the query, stored data, or constants. Others compute or aggregate data in various ways. Ranking profiles support multi-phased evaluation, so a cheap model can be evaluated in the first phase and a more expensive model for the second. Both sparse and dense tensors are supported for more advanced computation.
After the application is deployed, it is ready to handle data writes and queries. Data feeds are first processed on the stateless layer before content is distributed (with redundancy) to the content nodes. Similarly, queries go through the stateless layer before being fanned out to the content nodes where data-dependent computation is handled. They return their results back to the stateless layer, where the globally best results are determined, and a response is ultimately returned.
A guiding principle in Vespa.ai is to move computation to the data rather than the other way around. Machine-learned models are automatically deployed to all content nodes and evaluated there for each query. This alleviates the cost of query-time data transportation. Also, as Vespa.ai takes care of distributing data to the content nodes and redistributing elastically, one can scale up computationally by adding more content nodes, thus distributing computation as well.
In summary, Vespa.ai offers ease of deployment, flexibility in combining many types of models and computations out of the box without any plugins or extensions, efficient evaluation without moving data around and a less complex system to maintain. This makes Vespa.ai an attractive platform.
In the last few years, it has become increasingly important for Vespa.ai to support various types of machine learned models from different frameworks. This led to us introducing initial support for ONNX models in 2018.
The Open Neural Network Exchange (ONNX) is an open standard for distributing machine learned models between different systems. The goal of ONNX is interoperability between model training frameworks and inference engines, avoiding any vendor lock-in. For instance, HuggingFace’s Transformer library includes export to ONNX, PyTorch has native ONNX export, and TensorFlow models can be converted to ONNX. From our perspective, supporting ONNX is obviously interesting as it would maximize the range of models we could support.
To support ONNX in Vespa.ai, we introduced a special onnx ranking feature. When used in a ranking expression this would instruct the framework to evaluate the ONNX model. This is one of the unique features of Vespa.ai, as one has the flexibility to combine results from various features and string models together. For instance, one could use a small, fast model in an early phase, and a more complex and computationally expensive model that only runs on the most promising candidates. For instance:
document my_document { field text_embedding type tensor(x[769]) { indexing: attribute | index attribute { distance-metric: euclidean } } field text_tokens type tensor(d0[256]) { indexing: summary | attribute }}onnx-model my_model { file: files/my_model.onnx input input_0: ... input input_1: ... output output_0: ...}rank-profile my_profile { first-phase { expression: closeness(field, text_embedding) } second-phase { rerank-count: 10 expression: onnx(my_model) }}
This is an example of configuring Vespa.ai to calculate the euclidean distance between a query vector and the stored text_embedding vector in the first stage. This is usually used together with an approximate nearest neighbor search. The top 10 candidates are sent to the ONNX model in the second stage. Note that this is per content node, so with 10 content nodes, the model is running effectively on 100 candidates.
The model is set up in the onnx-model section. The file refers to an ONNX model somewhere in the application package. Inputs to the model, while not actually shown here for brevity, can come from various sources such as constants, the query, a document, or some combination expressed through a user-defined function. While the output of models are tensors, the resulting value of a first or second phase expression needs to be a single scalar, as documents are sorted according to this score before being returned.
Our initial implementation of the onnx ranking feature was to import the ONNX model and convert the entire graph into native Vespa.ai expressions. This was feasible because of the flexibility of the various tensor operations Vespa.ai supports. For instance, a single neural network layer could be converted like this:
Here, weights and bias would be stored as constant tensors, whereas the input tensor could be retrieved either from the query, a document field, or some combination of both.
Initially, this worked fine. We implemented the various ONNX operators using the available tensor operations. However, we only supported a subset of the 150+ ONNX operators at first, as we considered that only certain types of models were viable for use in Vespa.ai due to its low-latency requirement. For instance, the ranking expression language does not support iterations, making it more challenging to implement operators used in convolutional or recurrent neural networks. Instead, we opted to continuously add operator support as new model types were used in Vespa.ai.
The advantage of this was that the various optimizations we introduced to our tensor evaluation engine to efficiently evaluate the models benefitted all other applications using tensors as well.
Unfortunately, we ran into problems as we started developing support for Transformer models. Our first attempt at supporting a 12-layer BERT-base model failed. This was a model converted from TensorFlow to ONNX. The evaluation result was incorrect, with relatively poor performance.
We spent significant efforts on this. Quite a few operators had to be rewritten due to, sometimes very subtle, edge cases. We introduced a dozen or so performance optimizations, to avoid doing silly stuff such as calculating the same expressions multiple times and allocating memory unnecessarily. Ultimately, we were able to increase performance by more than two orders of magnitude.
During this development we turned to ONNX Runtime for reference. ONNX Runtime is very easy to use:
import onnxruntime as ortsession = ort.InferenceSession(“model.onnx”)session.run( output_names=[...], input_feed={...} )
This was invaluable, providing us with a reference for correctness and a performance target.
At one point, we started toying with the idea of actually using ONNX Runtime directly for the model inference instead of converting it to Vespa.ai expressions. The Vespa.ai content node is written in C++, so this entailed integrating with the C++ interface of ONNX Runtime. It should be mentioned that adding dependencies to Vespa.ai is not something we often do, as we prefer to avoid dependencies and thus own the entire stack.
Within a couple of weeks, we had a proof of concept up and running which showed a lot of promise. So we decided to go ahead and start using ONNX Runtime to evaluate all ONNX models in Vespa.ai.
This proved to be a game-changer for us. It vastly increases the capabilities of evaluating large deep-learning models in Vespa.ai in terms of model types we support and evaluation performance. We can leverage ONNX Runtime’s use of MLAS, a compute library containing processor-optimized kernels. ONNX Runtime also contains model-specific optimizations for BERT models (such as multi-head attention node fusion) and makes it easy to evaluate precision-reduced models by quantization for even more efficient inference.
Consider the following:
onnx-model my_model { file: files/my_model.onnx input input_0: query(my_query_input) input input_1: attribute(my_doc_field)}rank-profile my_profile { first-phase { expression: sum( onnx(my_model) ) }}
Here we have a single ONNX model that has two inputs. During application deployment, Vespa.ai distributes this ONNX model to all content nodes. There, the ranking expressions are parsed, and the feature executors that implement the ranking features are set up in preparation for handling traffic. Here we have 4 features:
query(...) which retrieves a tensor from the query.
attribute(...) which retrieves a tensor from a field stored in the document.
onnx(...) which evaluates the ONNX model.
sum(...) which reduces and sums the argument tensor to a single scalar value.
These features are wired together during initialization, so the outputs of query and attribute are used as inputs to onnx, and the output of the onnx feature is the input to the sum feature. The onnx feature basically sets up ONNX Runtime to evaluate the model.
Vespa.ai’s scoring framework is written in C++, so we use the C/C++ API provided by ONNX Runtime. While the integration with ONNX Runtime worked smoothly out of the box, there are two areas worth mentioning here: multi-threading and input/output tensor allocations.
Multi-threading
During setup, we initialize an ONNX Runtime session for each onnx feature and thread:
#include <onnxruntime/onnxruntime_cxx_api.h>Ort::Env shared_env;Ort::SessionOptions options;options.SetIntraOpNumThreads(1);options.SetInterOpNumThreads(1);options.SetGraphOptimizationLevel(ORT_ENABLE_ALL);Ort::Session session = Ort::Session(shared_env, “local_file_path”, options);
The session includes options for thread management. ONNX Runtime supports 2 modes of execution: sequential and parallel. This controls whether the operators in a graph run sequentially or in parallel. Parallel execution of operators is scheduled on an inter-op thread pool. The execution of an individual operator is parallelized using an intra-op thread pool. A heavily home-optimized variant of an Eigen thread pool is used for inter-op parallelism, while OpenMP is used for intra-op.
Vespa.ai handles several queries in parallel. In addition, Vespa.ai can be configured to use several threads per query. Because of this Vespa.ai needs to tightly manage thread usage. Using additional threads inside an ONNX Runtime session causes system-level throughput to become unpredictable, with large deviations in performance. Since Vespa.ai has thread control outside of ONNX Runtime, we need to instruct ONNX Runtime to only use a single thread. By ensuring that the total number of threads do not exceed the number of physical cores in a machine, we can improve cache utilization. Vespa also supports processor pinning.
As we instruct ONNX Runtime to in effect run sequentially, inference times increase but total throughput also increases. For instance, we measured a 50% improvement in throughput on a BERT ranking application. We have not yet exposed ONNX Runtime’s thread management settings for cases where users would like to tune this themselves. This is an option we might consider in the future. In that case, each session having their own set of thread pools would be inefficient. However, ONNX Runtime provides an option to share thread pools between sessions. This is achieved using the CreateEnvWithGlobalThreadPools C API to set up the shared_env object, which in Vespa.ai is shared between all feature executors.
When we started using ONNX Runtime, its C++ distribution was bundled with OpenMP. This was problematic for us as the intra-operation thread setting was overridden by OpenMP, so we ended up compiling our own ONNX Runtime without OpenMP enabled. However, starting from version 1.6, ONNX Runtime ships a version without OpenMP.
Input and output tensors
As much as possible, memory allocation and ownership of input and output tensors happen within Vespa.ai. Consider the following types:
std::vector<const char *> input_names;std::vector<const char *> output_names;std::vector<Ort::Value> input_values;std::vector<Ort::Value> output_values;
The input values come from other ranking features using Vespa.ai’s tensor framework. The values in the input vector are wrappers for Vespa.ai tensors. So ONNX Runtime accepts the memory layout from Vespa.ai without copying to internal buffers. The values in the output vector are pre-allocated ONNX Runtime vectors which are wrapped when used subsequently in other ranking features.
We use these directly when evaluating the model:
Ort::RunOptions run_opts(nullptr);session.Run(run_opts, input_names.data(), input_values.data(), input_values.size(), output_names.data(), output_values.data(),output_values.size());
This zero-copying of tensors is obviously desirable from a performance perspective. This works for outputs as tensors in Vespa.ai currently are fixed size, meaning that the dimensions are known during application deployment. So even though models accept inputs with dynamic sizes, from a Vespa.ai perspective, they must currently be fixed. Supporting dynamic sizes is future work.
One limitation here is that Vespa.ai currently only supports double and float value types in tensors. If possible, Vespa.ai takes care of converting to the type that ONNX Runtime expects. For instance, inputs to Transformer models are token sequences usually of type int64. Since Vespa.ai does not currently support int types, they must be represented as for instance float32. Converting from float32 to int64 can be lossy, but we haven’t met any inaccuracies yet. Models that accept strings as input are not yet supported in Vespa.ai.
Integrating with ONNX Runtime was relatively painless. Even though we initially had to compile our own ONNX Runtime distribution due to OpenMP, we had no significant issues with dependencies. We are looking forward to the next release so we don’t have to have to do this.
While ONNX Runtime’s C/C++ API documentation is currently relatively scarce we found it to be sufficient. The ONNX Runtime API is concrete, clean and works as one would expect. We didn’t have any issues here at all, really.
All in all, our experience with ONNX Runtime has been great. It has shown itself to be a fantastic fit for Vespa.ai, delivering superior performance and easy to work with. One example here is the BERT model optimizations in ONNX Runtime. Also of particular note is the evaluation of quantized models, which would have been laborious to implement in Vespa.ai given the current tensor types we support.
Going forward, there are other features of ONNX Runtime we would like to experiment with. One of these is GPU support. While we are currently unsure if this is beneficial from a ranking point of view, there are other cases where this is interesting. One example is calculating vector representations of documents during data writing from a Transformer model.
Vespa.ai’s use case so far has mostly been focused on natural language understanding using Transformers such as BERT. Exporting a HuggingFace model to ONNX is easy and using it in Vespa is straightforward. ONNX Runtime was essential for us when implementing an open-domain question-answering application. Also, quantization had the effect of drastically increasing performance of this application, where a key takeaway there was that a larger model with weights with reduced precision outperformed smaller models with normal precision.
We’re excited to see what our users will use this for in the future. | [
{
"code": null,
"e": 245,
"s": 172,
"text": "By Lester Solbakken from Verizon Media and Pranav Sharma from Microsoft."
},
{
"code": null,
"e": 328,
"s": 245,
"text": "There’s a difference between stateless and stateful machine-learned model serving."
},
{
"code": null,
"e": 769,
"s": 328,
"text": "Stateless model serving is what one usually thinks about when using a machine-learned model in production. For instance, a web application handling live traffic can call out to a model server from somewhere in the serving stack. The output of this model service depends purely on the input. This is fine for many tasks, such as classification, text generation, object detection, and translation, where the model is evaluated once per query."
},
{
"code": null,
"e": 1182,
"s": 769,
"text": "There are, however, some applications where the input is combined with stored or persisted data to generate a result. We call this stateful model evaluation. Applications such as search and recommendation need to evaluate models with a potentially large number of items for each query. A model server can quickly become a scalability bottleneck in these cases, regardless of how efficient the model inference is."
},
{
"code": null,
"e": 1360,
"s": 1182,
"text": "In other words, stateless model serving requires sending all necessary input data to the model. In stateful model serving, the model should be computed where the data is stored."
},
{
"code": null,
"e": 1772,
"s": 1360,
"text": "At Vespa.ai, we are concerned with efficient stateful model evaluation. Vespa.ai is an open-source platform for building applications that do real-time data processing over large data sets. Designed to be highly performant and web-scalable, it is used for such diverse tasks as search, personalization, recommendation, ads, auto-complete, image and similarity search, comment ranking, and even for finding love."
},
{
"code": null,
"e": 2042,
"s": 1772,
"text": "It has become increasingly important for us to be able to evaluate complex machine-learned models efficiently. Delivering low latency, fast inference and low serving cost is challenging while at the same time providing support for the various model training frameworks."
},
{
"code": null,
"e": 2490,
"s": 2042,
"text": "We eventually chose to leverage ONNX Runtime (ORT) for this task. ONNX Runtime is an accelerator for model inference. It has vastly increased Vespa.ai’s capacity for evaluating large models, both in performance and model types we support. ONNX Runtime’s capabilities within hardware acceleration and model optimizations, such as quantization, has enabled efficient evaluation of large NLP models like BERT and other Transformer models in Vespa.ai."
},
{
"code": null,
"e": 2619,
"s": 2490,
"text": "In this post, we’ll share our journey on why and how we eventually chose ONNX Runtime and share some of our experiences with it."
},
{
"code": null,
"e": 3000,
"s": 2619,
"text": "Vespa.ai has a rich history. Its lineage comes from a search engine born in 1997. Initially powering the web search at alltheweb.com, it was flexible enough to be used in various more specialized products, or verticals, such as document search, mobile search, yellow pages, and banking. This flexibility in being a vertical search platform eventually gave rise to its name, Vespa."
},
{
"code": null,
"e": 3383,
"s": 3000,
"text": "The technology was acquired by Yahoo in 2003. There, Vespa cemented itself as a core piece of technology that powers hundreds of applications, including many of Yahoo’s most essential services. We open-sourced Vespa in 2017 and today it serves hundreds of thousands of queries per second worldwide at any given time, with billions of content items for hundreds of millions of users."
},
{
"code": null,
"e": 3709,
"s": 3383,
"text": "Although Yahoo was eventually acquired by Verizon, it is interesting to note that our team has stayed remarkably stable over the years. Indeed, a few of the engineers that started working on that initial engine over 20 years ago are still here. Our team counts about 30 developers, and we are situated in Trondheim in Norway."
},
{
"code": null,
"e": 4403,
"s": 3709,
"text": "Building upon experience gained over many years, Vespa.ai has evolved substantially to become what it is today. It now stands as a battle-proven general engine for real-time computation over large data sets. It has many features that make it suitable for web-scale applications. It stores and indexes data with instant writes so that queries, selection, and processing over the data can be performed efficiently at serving time. It’s elastic and fault-tolerant, so nodes can be added, removed, or replaced live without losing data. It’s easy to configure, operate, and add custom logic. Importantly, it contains built-in capabilities for advanced computation, including machine learned models."
},
{
"code": null,
"e": 4949,
"s": 4403,
"text": "Vespa.ai is a distributed application consisting of stateless nodes and a set of stateful content nodes containing the data. A Vespa.ai application is fully defined in an application package. This is a single unit containing everything needed to set up an application, including all configuration, custom components, schemas, and machine-learned models. When the application package is deployed, the admin layer takes care of configuring all the services across all the system’s nodes. This includes distributing all models to all content nodes."
},
{
"code": null,
"e": 5042,
"s": 4949,
"text": "Application packages contain one or more document schemas. The schema primarily consists of:"
},
{
"code": null,
"e": 5119,
"s": 5042,
"text": "The data fields for each document and how they should be stored and indexed."
},
{
"code": null,
"e": 5211,
"s": 5119,
"text": "The ranking profiles which define how each document should be scored during query handling."
},
{
"code": null,
"e": 5682,
"s": 5211,
"text": "The ranking profiles contain ranking expressions, which are mathematical expressions combining ranking features. Some features retrieve data from sources such as the query, stored data, or constants. Others compute or aggregate data in various ways. Ranking profiles support multi-phased evaluation, so a cheap model can be evaluated in the first phase and a more expensive model for the second. Both sparse and dense tensors are supported for more advanced computation."
},
{
"code": null,
"e": 6170,
"s": 5682,
"text": "After the application is deployed, it is ready to handle data writes and queries. Data feeds are first processed on the stateless layer before content is distributed (with redundancy) to the content nodes. Similarly, queries go through the stateless layer before being fanned out to the content nodes where data-dependent computation is handled. They return their results back to the stateless layer, where the globally best results are determined, and a response is ultimately returned."
},
{
"code": null,
"e": 6643,
"s": 6170,
"text": "A guiding principle in Vespa.ai is to move computation to the data rather than the other way around. Machine-learned models are automatically deployed to all content nodes and evaluated there for each query. This alleviates the cost of query-time data transportation. Also, as Vespa.ai takes care of distributing data to the content nodes and redistributing elastically, one can scale up computationally by adding more content nodes, thus distributing computation as well."
},
{
"code": null,
"e": 6935,
"s": 6643,
"text": "In summary, Vespa.ai offers ease of deployment, flexibility in combining many types of models and computations out of the box without any plugins or extensions, efficient evaluation without moving data around and a less complex system to maintain. This makes Vespa.ai an attractive platform."
},
{
"code": null,
"e": 7154,
"s": 6935,
"text": "In the last few years, it has become increasingly important for Vespa.ai to support various types of machine learned models from different frameworks. This led to us introducing initial support for ONNX models in 2018."
},
{
"code": null,
"e": 7679,
"s": 7154,
"text": "The Open Neural Network Exchange (ONNX) is an open standard for distributing machine learned models between different systems. The goal of ONNX is interoperability between model training frameworks and inference engines, avoiding any vendor lock-in. For instance, HuggingFace’s Transformer library includes export to ONNX, PyTorch has native ONNX export, and TensorFlow models can be converted to ONNX. From our perspective, supporting ONNX is obviously interesting as it would maximize the range of models we could support."
},
{
"code": null,
"e": 8179,
"s": 7679,
"text": "To support ONNX in Vespa.ai, we introduced a special onnx ranking feature. When used in a ranking expression this would instruct the framework to evaluate the ONNX model. This is one of the unique features of Vespa.ai, as one has the flexibility to combine results from various features and string models together. For instance, one could use a small, fast model in an early phase, and a more complex and computationally expensive model that only runs on the most promising candidates. For instance:"
},
{
"code": null,
"e": 8685,
"s": 8179,
"text": "document my_document { field text_embedding type tensor(x[769]) { indexing: attribute | index attribute { distance-metric: euclidean } } field text_tokens type tensor(d0[256]) { indexing: summary | attribute }}onnx-model my_model { file: files/my_model.onnx input input_0: ... input input_1: ... output output_0: ...}rank-profile my_profile { first-phase { expression: closeness(field, text_embedding) } second-phase { rerank-count: 10 expression: onnx(my_model) }}"
},
{
"code": null,
"e": 9103,
"s": 8685,
"text": "This is an example of configuring Vespa.ai to calculate the euclidean distance between a query vector and the stored text_embedding vector in the first stage. This is usually used together with an approximate nearest neighbor search. The top 10 candidates are sent to the ONNX model in the second stage. Note that this is per content node, so with 10 content nodes, the model is running effectively on 100 candidates."
},
{
"code": null,
"e": 9618,
"s": 9103,
"text": "The model is set up in the onnx-model section. The file refers to an ONNX model somewhere in the application package. Inputs to the model, while not actually shown here for brevity, can come from various sources such as constants, the query, a document, or some combination expressed through a user-defined function. While the output of models are tensors, the resulting value of a first or second phase expression needs to be a single scalar, as documents are sorted according to this score before being returned."
},
{
"code": null,
"e": 9936,
"s": 9618,
"text": "Our initial implementation of the onnx ranking feature was to import the ONNX model and convert the entire graph into native Vespa.ai expressions. This was feasible because of the flexibility of the various tensor operations Vespa.ai supports. For instance, a single neural network layer could be converted like this:"
},
{
"code": null,
"e": 10110,
"s": 9936,
"text": "Here, weights and bias would be stored as constant tensors, whereas the input tensor could be retrieved either from the query, a document field, or some combination of both."
},
{
"code": null,
"e": 10686,
"s": 10110,
"text": "Initially, this worked fine. We implemented the various ONNX operators using the available tensor operations. However, we only supported a subset of the 150+ ONNX operators at first, as we considered that only certain types of models were viable for use in Vespa.ai due to its low-latency requirement. For instance, the ranking expression language does not support iterations, making it more challenging to implement operators used in convolutional or recurrent neural networks. Instead, we opted to continuously add operator support as new model types were used in Vespa.ai."
},
{
"code": null,
"e": 10881,
"s": 10686,
"text": "The advantage of this was that the various optimizations we introduced to our tensor evaluation engine to efficiently evaluate the models benefitted all other applications using tensors as well."
},
{
"code": null,
"e": 11164,
"s": 10881,
"text": "Unfortunately, we ran into problems as we started developing support for Transformer models. Our first attempt at supporting a 12-layer BERT-base model failed. This was a model converted from TensorFlow to ONNX. The evaluation result was incorrect, with relatively poor performance."
},
{
"code": null,
"e": 11549,
"s": 11164,
"text": "We spent significant efforts on this. Quite a few operators had to be rewritten due to, sometimes very subtle, edge cases. We introduced a dozen or so performance optimizations, to avoid doing silly stuff such as calculating the same expressions multiple times and allocating memory unnecessarily. Ultimately, we were able to increase performance by more than two orders of magnitude."
},
{
"code": null,
"e": 11648,
"s": 11549,
"text": "During this development we turned to ONNX Runtime for reference. ONNX Runtime is very easy to use:"
},
{
"code": null,
"e": 11769,
"s": 11648,
"text": "import onnxruntime as ortsession = ort.InferenceSession(“model.onnx”)session.run( output_names=[...], input_feed={...} )"
},
{
"code": null,
"e": 11862,
"s": 11769,
"text": "This was invaluable, providing us with a reference for correctness and a performance target."
},
{
"code": null,
"e": 12292,
"s": 11862,
"text": "At one point, we started toying with the idea of actually using ONNX Runtime directly for the model inference instead of converting it to Vespa.ai expressions. The Vespa.ai content node is written in C++, so this entailed integrating with the C++ interface of ONNX Runtime. It should be mentioned that adding dependencies to Vespa.ai is not something we often do, as we prefer to avoid dependencies and thus own the entire stack."
},
{
"code": null,
"e": 12486,
"s": 12292,
"text": "Within a couple of weeks, we had a proof of concept up and running which showed a lot of promise. So we decided to go ahead and start using ONNX Runtime to evaluate all ONNX models in Vespa.ai."
},
{
"code": null,
"e": 13003,
"s": 12486,
"text": "This proved to be a game-changer for us. It vastly increases the capabilities of evaluating large deep-learning models in Vespa.ai in terms of model types we support and evaluation performance. We can leverage ONNX Runtime’s use of MLAS, a compute library containing processor-optimized kernels. ONNX Runtime also contains model-specific optimizations for BERT models (such as multi-head attention node fusion) and makes it easy to evaluate precision-reduced models by quantization for even more efficient inference."
},
{
"code": null,
"e": 13027,
"s": 13003,
"text": "Consider the following:"
},
{
"code": null,
"e": 13236,
"s": 13027,
"text": "onnx-model my_model { file: files/my_model.onnx input input_0: query(my_query_input) input input_1: attribute(my_doc_field)}rank-profile my_profile { first-phase { expression: sum( onnx(my_model) ) }}"
},
{
"code": null,
"e": 13558,
"s": 13236,
"text": "Here we have a single ONNX model that has two inputs. During application deployment, Vespa.ai distributes this ONNX model to all content nodes. There, the ranking expressions are parsed, and the feature executors that implement the ranking features are set up in preparation for handling traffic. Here we have 4 features:"
},
{
"code": null,
"e": 13610,
"s": 13558,
"text": "query(...) which retrieves a tensor from the query."
},
{
"code": null,
"e": 13687,
"s": 13610,
"text": "attribute(...) which retrieves a tensor from a field stored in the document."
},
{
"code": null,
"e": 13729,
"s": 13687,
"text": "onnx(...) which evaluates the ONNX model."
},
{
"code": null,
"e": 13807,
"s": 13729,
"text": "sum(...) which reduces and sums the argument tensor to a single scalar value."
},
{
"code": null,
"e": 14069,
"s": 13807,
"text": "These features are wired together during initialization, so the outputs of query and attribute are used as inputs to onnx, and the output of the onnx feature is the input to the sum feature. The onnx feature basically sets up ONNX Runtime to evaluate the model."
},
{
"code": null,
"e": 14335,
"s": 14069,
"text": "Vespa.ai’s scoring framework is written in C++, so we use the C/C++ API provided by ONNX Runtime. While the integration with ONNX Runtime worked smoothly out of the box, there are two areas worth mentioning here: multi-threading and input/output tensor allocations."
},
{
"code": null,
"e": 14351,
"s": 14335,
"text": "Multi-threading"
},
{
"code": null,
"e": 14437,
"s": 14351,
"text": "During setup, we initialize an ONNX Runtime session for each onnx feature and thread:"
},
{
"code": null,
"e": 14720,
"s": 14437,
"text": "#include <onnxruntime/onnxruntime_cxx_api.h>Ort::Env shared_env;Ort::SessionOptions options;options.SetIntraOpNumThreads(1);options.SetInterOpNumThreads(1);options.SetGraphOptimizationLevel(ORT_ENABLE_ALL);Ort::Session session = Ort::Session(shared_env, “local_file_path”, options);"
},
{
"code": null,
"e": 15207,
"s": 14720,
"text": "The session includes options for thread management. ONNX Runtime supports 2 modes of execution: sequential and parallel. This controls whether the operators in a graph run sequentially or in parallel. Parallel execution of operators is scheduled on an inter-op thread pool. The execution of an individual operator is parallelized using an intra-op thread pool. A heavily home-optimized variant of an Eigen thread pool is used for inter-op parallelism, while OpenMP is used for intra-op."
},
{
"code": null,
"e": 15836,
"s": 15207,
"text": "Vespa.ai handles several queries in parallel. In addition, Vespa.ai can be configured to use several threads per query. Because of this Vespa.ai needs to tightly manage thread usage. Using additional threads inside an ONNX Runtime session causes system-level throughput to become unpredictable, with large deviations in performance. Since Vespa.ai has thread control outside of ONNX Runtime, we need to instruct ONNX Runtime to only use a single thread. By ensuring that the total number of threads do not exceed the number of physical cores in a machine, we can improve cache utilization. Vespa also supports processor pinning."
},
{
"code": null,
"e": 16544,
"s": 15836,
"text": "As we instruct ONNX Runtime to in effect run sequentially, inference times increase but total throughput also increases. For instance, we measured a 50% improvement in throughput on a BERT ranking application. We have not yet exposed ONNX Runtime’s thread management settings for cases where users would like to tune this themselves. This is an option we might consider in the future. In that case, each session having their own set of thread pools would be inefficient. However, ONNX Runtime provides an option to share thread pools between sessions. This is achieved using the CreateEnvWithGlobalThreadPools C API to set up the shared_env object, which in Vespa.ai is shared between all feature executors."
},
{
"code": null,
"e": 16869,
"s": 16544,
"text": "When we started using ONNX Runtime, its C++ distribution was bundled with OpenMP. This was problematic for us as the intra-operation thread setting was overridden by OpenMP, so we ended up compiling our own ONNX Runtime without OpenMP enabled. However, starting from version 1.6, ONNX Runtime ships a version without OpenMP."
},
{
"code": null,
"e": 16894,
"s": 16869,
"text": "Input and output tensors"
},
{
"code": null,
"e": 17029,
"s": 16894,
"text": "As much as possible, memory allocation and ownership of input and output tensors happen within Vespa.ai. Consider the following types:"
},
{
"code": null,
"e": 17186,
"s": 17029,
"text": "std::vector<const char *> input_names;std::vector<const char *> output_names;std::vector<Ort::Value> input_values;std::vector<Ort::Value> output_values;"
},
{
"code": null,
"e": 17569,
"s": 17186,
"text": "The input values come from other ranking features using Vespa.ai’s tensor framework. The values in the input vector are wrappers for Vespa.ai tensors. So ONNX Runtime accepts the memory layout from Vespa.ai without copying to internal buffers. The values in the output vector are pre-allocated ONNX Runtime vectors which are wrapped when used subsequently in other ranking features."
},
{
"code": null,
"e": 17618,
"s": 17569,
"text": "We use these directly when evaluating the model:"
},
{
"code": null,
"e": 17807,
"s": 17618,
"text": "Ort::RunOptions run_opts(nullptr);session.Run(run_opts, input_names.data(), input_values.data(), input_values.size(), output_names.data(), output_values.data(),output_values.size());"
},
{
"code": null,
"e": 18188,
"s": 17807,
"text": "This zero-copying of tensors is obviously desirable from a performance perspective. This works for outputs as tensors in Vespa.ai currently are fixed size, meaning that the dimensions are known during application deployment. So even though models accept inputs with dynamic sizes, from a Vespa.ai perspective, they must currently be fixed. Supporting dynamic sizes is future work."
},
{
"code": null,
"e": 18724,
"s": 18188,
"text": "One limitation here is that Vespa.ai currently only supports double and float value types in tensors. If possible, Vespa.ai takes care of converting to the type that ONNX Runtime expects. For instance, inputs to Transformer models are token sequences usually of type int64. Since Vespa.ai does not currently support int types, they must be represented as for instance float32. Converting from float32 to int64 can be lossy, but we haven’t met any inaccuracies yet. Models that accept strings as input are not yet supported in Vespa.ai."
},
{
"code": null,
"e": 18996,
"s": 18724,
"text": "Integrating with ONNX Runtime was relatively painless. Even though we initially had to compile our own ONNX Runtime distribution due to OpenMP, we had no significant issues with dependencies. We are looking forward to the next release so we don’t have to have to do this."
},
{
"code": null,
"e": 19220,
"s": 18996,
"text": "While ONNX Runtime’s C/C++ API documentation is currently relatively scarce we found it to be sufficient. The ONNX Runtime API is concrete, clean and works as one would expect. We didn’t have any issues here at all, really."
},
{
"code": null,
"e": 19621,
"s": 19220,
"text": "All in all, our experience with ONNX Runtime has been great. It has shown itself to be a fantastic fit for Vespa.ai, delivering superior performance and easy to work with. One example here is the BERT model optimizations in ONNX Runtime. Also of particular note is the evaluation of quantized models, which would have been laborious to implement in Vespa.ai given the current tensor types we support."
},
{
"code": null,
"e": 19980,
"s": 19621,
"text": "Going forward, there are other features of ONNX Runtime we would like to experiment with. One of these is GPU support. While we are currently unsure if this is beneficial from a ranking point of view, there are other cases where this is interesting. One example is calculating vector representations of documents during data writing from a Transformer model."
},
{
"code": null,
"e": 20516,
"s": 19980,
"text": "Vespa.ai’s use case so far has mostly been focused on natural language understanding using Transformers such as BERT. Exporting a HuggingFace model to ONNX is easy and using it in Vespa is straightforward. ONNX Runtime was essential for us when implementing an open-domain question-answering application. Also, quantization had the effect of drastically increasing performance of this application, where a key takeaway there was that a larger model with weights with reduced precision outperformed smaller models with normal precision."
}
] |
Can I define more than one public class in a Java package? | No, while defining multiple classes in a single Java file you need to make sure that only one class among them is public. If you have more than one public classes a single file a compile-time error will be generated.
In the following example we have two classes Student and AccessData we are having both of them in the same class and declared both public.
Live Demo
import java.util.Scanner;
public class Student {
private String name;
private int age;
Student(){
this.name = "Rama";
this.age = 29;
}
Student(String name, int age){
this.name = name;
this.age = age;
}
public void display() {
System.out.println("name: "+this.name);
System.out.println("age: "+this.age);
}
}
public class AccessData{
public static void main(String args[]) {
//Reading values from user
Scanner sc = new Scanner(System.in);
System.out.println("Enter the name of the student: ");
String name = sc.nextLine();
System.out.println("Enter the age of the student: ");
int age = sc.nextInt();
Student obj1 = new Student(name, age);
obj1.display();
Student obj2 = new Student();
obj2.display();
}
}
On compiling, the above program generates the following compile-time error.
AccessData.java:2: error: class Student is public, should be declared in a file named Student.java
public class Student {
^
1 error
To resolve this either you need to shift one of the classes into a separate file or,
Remove the public declaration before the class that doesn’t contain a public static void main(String args) method.
Remove the public declaration before the class that doesn’t contain a public static void main(String args) method.
Name the file with the class name that contains main method.
Name the file with the class name that contains main method.
In this case, remove the public before the Student class. Name the file as “AccessData.java”. | [
{
"code": null,
"e": 1279,
"s": 1062,
"text": "No, while defining multiple classes in a single Java file you need to make sure that only one class among them is public. If you have more than one public classes a single file a compile-time error will be generated."
},
{
"code": null,
"e": 1418,
"s": 1279,
"text": "In the following example we have two classes Student and AccessData we are having both of them in the same class and declared both public."
},
{
"code": null,
"e": 1429,
"s": 1418,
"text": " Live Demo"
},
{
"code": null,
"e": 2260,
"s": 1429,
"text": "import java.util.Scanner;\npublic class Student {\n private String name;\n private int age;\n Student(){\n this.name = \"Rama\";\n this.age = 29;\n }\n Student(String name, int age){\n this.name = name;\n this.age = age;\n }\n public void display() {\n System.out.println(\"name: \"+this.name);\n System.out.println(\"age: \"+this.age);\n }\n}\npublic class AccessData{\n public static void main(String args[]) {\n //Reading values from user\n Scanner sc = new Scanner(System.in);\n System.out.println(\"Enter the name of the student: \");\n String name = sc.nextLine();\n System.out.println(\"Enter the age of the student: \");\n int age = sc.nextInt();\n Student obj1 = new Student(name, age);\n obj1.display();\n Student obj2 = new Student();\n obj2.display();\n }\n}"
},
{
"code": null,
"e": 2336,
"s": 2260,
"text": "On compiling, the above program generates the following compile-time error."
},
{
"code": null,
"e": 2475,
"s": 2336,
"text": "AccessData.java:2: error: class Student is public, should be declared in a file named Student.java\npublic class Student {\n ^\n1 error"
},
{
"code": null,
"e": 2560,
"s": 2475,
"text": "To resolve this either you need to shift one of the classes into a separate file or,"
},
{
"code": null,
"e": 2675,
"s": 2560,
"text": "Remove the public declaration before the class that doesn’t contain a public static void main(String args) method."
},
{
"code": null,
"e": 2790,
"s": 2675,
"text": "Remove the public declaration before the class that doesn’t contain a public static void main(String args) method."
},
{
"code": null,
"e": 2851,
"s": 2790,
"text": "Name the file with the class name that contains main method."
},
{
"code": null,
"e": 2912,
"s": 2851,
"text": "Name the file with the class name that contains main method."
},
{
"code": null,
"e": 3006,
"s": 2912,
"text": "In this case, remove the public before the Student class. Name the file as “AccessData.java”."
}
] |
GPT-3 Explained. Understanding Transformer-Based... | by Rohan Jagtap | Towards Data Science | In this article, we’ll be discussing the renowned GPT-3 model proposed in the paper “Language Models are Few-Shot Learners” by OpenAI. It is the successor of GPT-2, which has a very similar architecture to that of GPT-3.
If you’re unaware of GPT-2, consider giving my article on GPT-2 a read, as most of GPT-3 is based on it and would help in understanding the model better.
Going back to GPT-2, it is essentially an autoregressive model based on the Transformer architecture (Vaswani et al.). But the novelty of GPT-2 lies in its pre-training approach.
The pre-training leverages multi-task learning at a dataset-level. It basically means that the input tells the model to perform a specific NLP task.
For example, a translation example can be of the format, “translate to french, <english text>, <french text>” in the original document itself. Or a reading comprehension task sample could be of the format, “answer the given question using, <document>, <question>, <answer>.”
— GPT-2 Blog
They call this zero-shot task-transfer or meta-learning, or in-context learning. This way, the model need not be fine-tuned on downstream NLP tasks, which is a step towards the unification of models and general intelligence.
GPT-3 is based on the same principle of in-context learning, but with some improvements in the model and the overall approach. The paper also addresses the issues with this approach and tries to achieve state-of-the-art results. We will see this in the upcoming sections.
As said earlier, GPT-3 is also based on the idea of in-context learning. This is shown in the above figure. Obviously, there is no direct “inner loop” of training on the sequences. For example, a sequence can be:
5 + 8 = 13, 7 + 2 = 9, 1 + 0 = 1, and so on...
And the model will be trained on the autoregressive objective, i.e., given the previous tokens, maximize the probability of the next. So in this way, it can capture patterns from the input directly.
The thought is simple; humans do not train on large datasets for each and every task. Sometimes,
“please tell me if this sentence describes something happy or something sad”
is enough to give us a context of what is expected. Or for someone naive,
“this sounds happy, this sounds sad, how do you think this sounds?”
is sufficient to make them understand the task at a reasonable competency.
Although the concept is promising, previous approaches like GPT-2 have far more inferior results than the state-of-the-art, which is mostly based on the pretraining-finetuning approaches.
Recent progress in Transformer-based models shows that scaling the model higher has substantially improved the fine-tuning results. And it really makes sense to scale the model higher in meta-learning objectives since the model captures many skills and tasks simultaneously and within the same model capacity (Note that in a fine-tuning setting, a new model is trained for each downstream task).
Hence, the authors trained a 175 BILLION parameter model!
It has at least 10x more parameters than the previous biggest model.
The architecture is pretty much the same as GPT-2, just scaled up by a huge factor. It includes custom weights initialization, pre-normalization, and byte-pair encoding. I have covered this in my article on GPT-2. Consider giving it a read if you’re interested.
Apart from this, some ideas are taken from Sparse Transformer, which adds a few modifications to the attention computation to reduce the complexity and support longer sequences. Basically, in full (dense) attention, every token attends every other token in the sequence, which results in O(n2) space, i.e., it scales quadratically with sequence length. To overcome this, Sparse Transformer suggests that every token may attend only a subset of tokens in the sequence, such that:
And,
Next comes making efficient choices for the subsets (A). Further discussion on this model is out of this article’s scope. You can refer to the Sparse Transformer paper if you’re interested.
GPT-3 alternates between dense and sparse attention patterns. However, it is not clear how exactly this alternating is done, but presumably, it’s either between layers or between residual blocks.
Moreover, the authors have trained GPT-3 in 8 different sizes to study the dependence of model performance on model size. Following are the configurations of these 8 models:
Additionally, the model is shared in the depth as well as in the width dimension across GPUs to minimize data transfer between the nodes.
This is the part we are interested in the most. GPT-3 is evaluated on more than two dozen datasets. For each of these tasks, it is evaluated for 3 settings — zero-shot, one-shot, and few-shot. We will see what these are, and we’ll compare them with the fine-tuning approach in this section.
In this approach, we first pre-train a model (mostly on an autoregressive or a cloze objective), which helps the model capture general patterns in the language; then, we retrain it separately for specific downstream NLP tasks. In the figure above, the model is fine-tuned for a translation task.
The main disadvantage of this approach is the need for large datasets for individual tasks. Plus, fine-tuning on one dataset may not provide good generalization over others for the same task. Although fine-tuning results are strong, it proves to be an unfair comparison with human performance.
In this setting, after pre-training the model (with in-context learning), we directly provide the model with the input for the task without any special training for that task. Just tell the model “what to do” along with the input. This is the most challenging setting, and in some cases, it may be “unfairly hard.” For example, for inputs like “make a table of world records for the 200m dash,” the output format is ambiguous.
Nevertheless, for at least some settings zero-shot is closest to how humans perform tasks.
— GPT-3 Paper
For example, in the above translation example, the input is enough clarification for a human to understand what is expected. Also, this is pretty much what GPT-2 offers.
In this setting, we provide 1) “what to do,” 2) exactly one example (one-shot) of the task, and then 3) the input. The example is meant for conditioning only, i.e., it is meant to provide some context on the task. You can see this as providing some kind of analogy to the model. We saw the “unfairly difficult” example in the zero-shot setting. In tasks like those, the input becomes more plausible to answer if at least one demonstration of the task is provided.
Finally, in the few-shot setting, the input includes 1) “what to do,” 2) a few examples (few-shot), and then 3) the input. This setting provides better conditioning over the input for the model to predict the output. Typically, K examples are added to the input where K is between 10 and 100. The model supports a context length of 2048, so approximately, at max, K = 100 examples can fit in the context window. The few-shot setting greatly reduces the amount of data required than fine-tuning. But there is no denying that at least some amount of task-specific data is required. The main disadvantage of this setting is that so far, the results obtained in this setting were way worse than the state-of-the-art. However, GPT-3 has managed to achieve results very similar to the state-of-the-art on many tasks.
Note that, unlike fine-tuning, the model is never trained on the examples in all these settings. The examples are just meant for conditioning, i.e., to provide some context on the input. They are used directly in the input at inference.
Hence, Language Models are Few-Shot Learners!
Like most language models, GPT-3, too, is trained on the CommonCrawl dataset. 41 shards of monthly CommonCrawl covering 2016 to 2019 with 45TB of data is collected. However, unfiltered or lightly filtered data from CommonCrawl tend to have lower quality than filtered datasets. So the authors have taken 3 steps to filter it:
The authors took some high-quality corpora and based on similarity with these corpora, they have filtered CommonCrawl.Fuzzy de-duplication is used to remove redundancies within and across datasets. This also ensures integrity, i.e., the model doesn’t train on validation data.And finally, known high-quality reference data are added to increase the diversity of the dataset.
The authors took some high-quality corpora and based on similarity with these corpora, they have filtered CommonCrawl.
Fuzzy de-duplication is used to remove redundancies within and across datasets. This also ensures integrity, i.e., the model doesn’t train on validation data.
And finally, known high-quality reference data are added to increase the diversity of the dataset.
Leaves us with 570GB data ~400B byte-pair tokens.
A major concern about large language models trained on a huge amount of Internet data is that there are chances that the model may have already seen the test data for downstream tasks during pre-training. To mitigate this data contamination, the authors have made attempts to find and remove such overlapping data.
However, a bug in filtering caused some data to overlap in the pre-training, and training the model again was not feasible due to the training cost.
Despite the celebrated pros, the GPT-3 does have a few limitations:
Although the quality of text generation is great, there are still times when the model generates repetitive text. While generating long documents, it may lose coherence, contradict itself, and sometimes even completely lose the context. The authors have also noticed that GPT-3 has trouble with “common sense physics” questions; specifically, with questions like “If I put cheese into the fridge, will it melt?”.
We know that for some tasks like ‘fill in the blanks,’ bidirectional models have outperformed autoregressive models. So, it can be said that a large autoencoding model can outperform GPT-3 at such tasks. This also opens a research opportunity for training bidirectional models at scale or training them on zero-shot or few-shot settings.
Since the model is trained generically, it does not have any task-specific bias. The model weighs all the tokens equally. Therefore, some real-world applications like virtual assistants may benefit from the fine-tuning approaches as they provide a more goal-oriented outcome rather than just predictions.
A model of the scale of GPT-3 would obviously struggle at inference. It is both expensive and inconvenient to perform inference on. This raises questions about the practical applicability of the model. This also opens research branches for the development of distilled versions of the model.
This one is a limitation for most of the deep learning models — the decisions taken by a model are based on the data it is trained on. So ultimately, there is a strong chance for the model to have stereotyped and prejudiced opinions. This is a topic of concern from a societal perspective. For details on this, you can refer to the ‘Broader Impacts’ Section in the GPT-3 Paper.
In this section, I’ll paste a few pieces of text generated by GPT-3:
A generated news article that humans had the greatest difficulty distinguishing from a human-written article:
Title: United Methodists Agree to Historic SplitSubtitle: Those who oppose gay marriage will form their own denominationArticle: After two days of intense debate, the United Methodist Church has agreed to a historic split - one that is expected to end in the creation of a new denomination, one that will be "theologically and socially conservative," according to The Washington Post. The majority of delegates attending the church's annual General Conference in May voted to strengthen a ban on the ordination of LGBTQ clergy and to write new rules that will "discipline" clergy who officiate at same-sex weddings. But those who opposed these measures have a new plan: They say they will form a separate denomination by 2020, calling their church the Christian Methodist denomination.The Post notes that the denomination, which claims 12.5 million members, was in the early 20th century the "largest Protestant denomination in the U.S.,"but that it has been shrinking in recent decades. The new split will be the second in the church's history. The first occurred in 1968, when roughly10 percent of the denomination left to form the Evangelical United Brethren Church. The Post notes that the proposed split "comes at a critical time for the church, which has been losing members for years," which has been"pushed toward the brink of a schism over the role of LGBTQ people in the church." Gay marriage is not the only issue that has divided the church. In 2016, the denomination was split over ordination of transgender clergy, with the North Pacific regional conference voting to ban them from serving as clergy, and the South Pacific regional conference voting to allow them.
Text completion examples:
To do a "farduddle" means to jump up and down really fast. An example of a sentence that uses the word farduddle is:One day when I was playing tag with my little sister, she got really excited and she started doing these crazy farduddles.A "yalubalu" is a type of vegetable that looks like a big pumpkin. An example of a sentence that uses the word yalubalu is:I was on a trip to Africa and I tried this yalubalu vegetable that was grown in a garden there. It was delicious.
Generated Poem:
In this article, we’ve discussed the famous GPT-3 model and saw how it works. The model has produced astounding results that have even fooled humans in their own game. However, there is still a lot of room for improvement, which is a motivation for further research. | [
{
"code": null,
"e": 393,
"s": 172,
"text": "In this article, we’ll be discussing the renowned GPT-3 model proposed in the paper “Language Models are Few-Shot Learners” by OpenAI. It is the successor of GPT-2, which has a very similar architecture to that of GPT-3."
},
{
"code": null,
"e": 547,
"s": 393,
"text": "If you’re unaware of GPT-2, consider giving my article on GPT-2 a read, as most of GPT-3 is based on it and would help in understanding the model better."
},
{
"code": null,
"e": 726,
"s": 547,
"text": "Going back to GPT-2, it is essentially an autoregressive model based on the Transformer architecture (Vaswani et al.). But the novelty of GPT-2 lies in its pre-training approach."
},
{
"code": null,
"e": 875,
"s": 726,
"text": "The pre-training leverages multi-task learning at a dataset-level. It basically means that the input tells the model to perform a specific NLP task."
},
{
"code": null,
"e": 1150,
"s": 875,
"text": "For example, a translation example can be of the format, “translate to french, <english text>, <french text>” in the original document itself. Or a reading comprehension task sample could be of the format, “answer the given question using, <document>, <question>, <answer>.”"
},
{
"code": null,
"e": 1163,
"s": 1150,
"text": "— GPT-2 Blog"
},
{
"code": null,
"e": 1388,
"s": 1163,
"text": "They call this zero-shot task-transfer or meta-learning, or in-context learning. This way, the model need not be fine-tuned on downstream NLP tasks, which is a step towards the unification of models and general intelligence."
},
{
"code": null,
"e": 1660,
"s": 1388,
"text": "GPT-3 is based on the same principle of in-context learning, but with some improvements in the model and the overall approach. The paper also addresses the issues with this approach and tries to achieve state-of-the-art results. We will see this in the upcoming sections."
},
{
"code": null,
"e": 1873,
"s": 1660,
"text": "As said earlier, GPT-3 is also based on the idea of in-context learning. This is shown in the above figure. Obviously, there is no direct “inner loop” of training on the sequences. For example, a sequence can be:"
},
{
"code": null,
"e": 1920,
"s": 1873,
"text": "5 + 8 = 13, 7 + 2 = 9, 1 + 0 = 1, and so on..."
},
{
"code": null,
"e": 2119,
"s": 1920,
"text": "And the model will be trained on the autoregressive objective, i.e., given the previous tokens, maximize the probability of the next. So in this way, it can capture patterns from the input directly."
},
{
"code": null,
"e": 2216,
"s": 2119,
"text": "The thought is simple; humans do not train on large datasets for each and every task. Sometimes,"
},
{
"code": null,
"e": 2293,
"s": 2216,
"text": "“please tell me if this sentence describes something happy or something sad”"
},
{
"code": null,
"e": 2367,
"s": 2293,
"text": "is enough to give us a context of what is expected. Or for someone naive,"
},
{
"code": null,
"e": 2435,
"s": 2367,
"text": "“this sounds happy, this sounds sad, how do you think this sounds?”"
},
{
"code": null,
"e": 2510,
"s": 2435,
"text": "is sufficient to make them understand the task at a reasonable competency."
},
{
"code": null,
"e": 2698,
"s": 2510,
"text": "Although the concept is promising, previous approaches like GPT-2 have far more inferior results than the state-of-the-art, which is mostly based on the pretraining-finetuning approaches."
},
{
"code": null,
"e": 3094,
"s": 2698,
"text": "Recent progress in Transformer-based models shows that scaling the model higher has substantially improved the fine-tuning results. And it really makes sense to scale the model higher in meta-learning objectives since the model captures many skills and tasks simultaneously and within the same model capacity (Note that in a fine-tuning setting, a new model is trained for each downstream task)."
},
{
"code": null,
"e": 3152,
"s": 3094,
"text": "Hence, the authors trained a 175 BILLION parameter model!"
},
{
"code": null,
"e": 3221,
"s": 3152,
"text": "It has at least 10x more parameters than the previous biggest model."
},
{
"code": null,
"e": 3483,
"s": 3221,
"text": "The architecture is pretty much the same as GPT-2, just scaled up by a huge factor. It includes custom weights initialization, pre-normalization, and byte-pair encoding. I have covered this in my article on GPT-2. Consider giving it a read if you’re interested."
},
{
"code": null,
"e": 3962,
"s": 3483,
"text": "Apart from this, some ideas are taken from Sparse Transformer, which adds a few modifications to the attention computation to reduce the complexity and support longer sequences. Basically, in full (dense) attention, every token attends every other token in the sequence, which results in O(n2) space, i.e., it scales quadratically with sequence length. To overcome this, Sparse Transformer suggests that every token may attend only a subset of tokens in the sequence, such that:"
},
{
"code": null,
"e": 3967,
"s": 3962,
"text": "And,"
},
{
"code": null,
"e": 4157,
"s": 3967,
"text": "Next comes making efficient choices for the subsets (A). Further discussion on this model is out of this article’s scope. You can refer to the Sparse Transformer paper if you’re interested."
},
{
"code": null,
"e": 4353,
"s": 4157,
"text": "GPT-3 alternates between dense and sparse attention patterns. However, it is not clear how exactly this alternating is done, but presumably, it’s either between layers or between residual blocks."
},
{
"code": null,
"e": 4527,
"s": 4353,
"text": "Moreover, the authors have trained GPT-3 in 8 different sizes to study the dependence of model performance on model size. Following are the configurations of these 8 models:"
},
{
"code": null,
"e": 4665,
"s": 4527,
"text": "Additionally, the model is shared in the depth as well as in the width dimension across GPUs to minimize data transfer between the nodes."
},
{
"code": null,
"e": 4956,
"s": 4665,
"text": "This is the part we are interested in the most. GPT-3 is evaluated on more than two dozen datasets. For each of these tasks, it is evaluated for 3 settings — zero-shot, one-shot, and few-shot. We will see what these are, and we’ll compare them with the fine-tuning approach in this section."
},
{
"code": null,
"e": 5252,
"s": 4956,
"text": "In this approach, we first pre-train a model (mostly on an autoregressive or a cloze objective), which helps the model capture general patterns in the language; then, we retrain it separately for specific downstream NLP tasks. In the figure above, the model is fine-tuned for a translation task."
},
{
"code": null,
"e": 5546,
"s": 5252,
"text": "The main disadvantage of this approach is the need for large datasets for individual tasks. Plus, fine-tuning on one dataset may not provide good generalization over others for the same task. Although fine-tuning results are strong, it proves to be an unfair comparison with human performance."
},
{
"code": null,
"e": 5973,
"s": 5546,
"text": "In this setting, after pre-training the model (with in-context learning), we directly provide the model with the input for the task without any special training for that task. Just tell the model “what to do” along with the input. This is the most challenging setting, and in some cases, it may be “unfairly hard.” For example, for inputs like “make a table of world records for the 200m dash,” the output format is ambiguous."
},
{
"code": null,
"e": 6064,
"s": 5973,
"text": "Nevertheless, for at least some settings zero-shot is closest to how humans perform tasks."
},
{
"code": null,
"e": 6078,
"s": 6064,
"text": "— GPT-3 Paper"
},
{
"code": null,
"e": 6248,
"s": 6078,
"text": "For example, in the above translation example, the input is enough clarification for a human to understand what is expected. Also, this is pretty much what GPT-2 offers."
},
{
"code": null,
"e": 6712,
"s": 6248,
"text": "In this setting, we provide 1) “what to do,” 2) exactly one example (one-shot) of the task, and then 3) the input. The example is meant for conditioning only, i.e., it is meant to provide some context on the task. You can see this as providing some kind of analogy to the model. We saw the “unfairly difficult” example in the zero-shot setting. In tasks like those, the input becomes more plausible to answer if at least one demonstration of the task is provided."
},
{
"code": null,
"e": 7523,
"s": 6712,
"text": "Finally, in the few-shot setting, the input includes 1) “what to do,” 2) a few examples (few-shot), and then 3) the input. This setting provides better conditioning over the input for the model to predict the output. Typically, K examples are added to the input where K is between 10 and 100. The model supports a context length of 2048, so approximately, at max, K = 100 examples can fit in the context window. The few-shot setting greatly reduces the amount of data required than fine-tuning. But there is no denying that at least some amount of task-specific data is required. The main disadvantage of this setting is that so far, the results obtained in this setting were way worse than the state-of-the-art. However, GPT-3 has managed to achieve results very similar to the state-of-the-art on many tasks."
},
{
"code": null,
"e": 7760,
"s": 7523,
"text": "Note that, unlike fine-tuning, the model is never trained on the examples in all these settings. The examples are just meant for conditioning, i.e., to provide some context on the input. They are used directly in the input at inference."
},
{
"code": null,
"e": 7806,
"s": 7760,
"text": "Hence, Language Models are Few-Shot Learners!"
},
{
"code": null,
"e": 8132,
"s": 7806,
"text": "Like most language models, GPT-3, too, is trained on the CommonCrawl dataset. 41 shards of monthly CommonCrawl covering 2016 to 2019 with 45TB of data is collected. However, unfiltered or lightly filtered data from CommonCrawl tend to have lower quality than filtered datasets. So the authors have taken 3 steps to filter it:"
},
{
"code": null,
"e": 8507,
"s": 8132,
"text": "The authors took some high-quality corpora and based on similarity with these corpora, they have filtered CommonCrawl.Fuzzy de-duplication is used to remove redundancies within and across datasets. This also ensures integrity, i.e., the model doesn’t train on validation data.And finally, known high-quality reference data are added to increase the diversity of the dataset."
},
{
"code": null,
"e": 8626,
"s": 8507,
"text": "The authors took some high-quality corpora and based on similarity with these corpora, they have filtered CommonCrawl."
},
{
"code": null,
"e": 8785,
"s": 8626,
"text": "Fuzzy de-duplication is used to remove redundancies within and across datasets. This also ensures integrity, i.e., the model doesn’t train on validation data."
},
{
"code": null,
"e": 8884,
"s": 8785,
"text": "And finally, known high-quality reference data are added to increase the diversity of the dataset."
},
{
"code": null,
"e": 8934,
"s": 8884,
"text": "Leaves us with 570GB data ~400B byte-pair tokens."
},
{
"code": null,
"e": 9249,
"s": 8934,
"text": "A major concern about large language models trained on a huge amount of Internet data is that there are chances that the model may have already seen the test data for downstream tasks during pre-training. To mitigate this data contamination, the authors have made attempts to find and remove such overlapping data."
},
{
"code": null,
"e": 9398,
"s": 9249,
"text": "However, a bug in filtering caused some data to overlap in the pre-training, and training the model again was not feasible due to the training cost."
},
{
"code": null,
"e": 9466,
"s": 9398,
"text": "Despite the celebrated pros, the GPT-3 does have a few limitations:"
},
{
"code": null,
"e": 9879,
"s": 9466,
"text": "Although the quality of text generation is great, there are still times when the model generates repetitive text. While generating long documents, it may lose coherence, contradict itself, and sometimes even completely lose the context. The authors have also noticed that GPT-3 has trouble with “common sense physics” questions; specifically, with questions like “If I put cheese into the fridge, will it melt?”."
},
{
"code": null,
"e": 10217,
"s": 9879,
"text": "We know that for some tasks like ‘fill in the blanks,’ bidirectional models have outperformed autoregressive models. So, it can be said that a large autoencoding model can outperform GPT-3 at such tasks. This also opens a research opportunity for training bidirectional models at scale or training them on zero-shot or few-shot settings."
},
{
"code": null,
"e": 10522,
"s": 10217,
"text": "Since the model is trained generically, it does not have any task-specific bias. The model weighs all the tokens equally. Therefore, some real-world applications like virtual assistants may benefit from the fine-tuning approaches as they provide a more goal-oriented outcome rather than just predictions."
},
{
"code": null,
"e": 10814,
"s": 10522,
"text": "A model of the scale of GPT-3 would obviously struggle at inference. It is both expensive and inconvenient to perform inference on. This raises questions about the practical applicability of the model. This also opens research branches for the development of distilled versions of the model."
},
{
"code": null,
"e": 11192,
"s": 10814,
"text": "This one is a limitation for most of the deep learning models — the decisions taken by a model are based on the data it is trained on. So ultimately, there is a strong chance for the model to have stereotyped and prejudiced opinions. This is a topic of concern from a societal perspective. For details on this, you can refer to the ‘Broader Impacts’ Section in the GPT-3 Paper."
},
{
"code": null,
"e": 11261,
"s": 11192,
"text": "In this section, I’ll paste a few pieces of text generated by GPT-3:"
},
{
"code": null,
"e": 11371,
"s": 11261,
"text": "A generated news article that humans had the greatest difficulty distinguishing from a human-written article:"
},
{
"code": null,
"e": 13050,
"s": 11371,
"text": "Title: United Methodists Agree to Historic SplitSubtitle: Those who oppose gay marriage will form their own denominationArticle: After two days of intense debate, the United Methodist Church has agreed to a historic split - one that is expected to end in the creation of a new denomination, one that will be \"theologically and socially conservative,\" according to The Washington Post. The majority of delegates attending the church's annual General Conference in May voted to strengthen a ban on the ordination of LGBTQ clergy and to write new rules that will \"discipline\" clergy who officiate at same-sex weddings. But those who opposed these measures have a new plan: They say they will form a separate denomination by 2020, calling their church the Christian Methodist denomination.The Post notes that the denomination, which claims 12.5 million members, was in the early 20th century the \"largest Protestant denomination in the U.S.,\"but that it has been shrinking in recent decades. The new split will be the second in the church's history. The first occurred in 1968, when roughly10 percent of the denomination left to form the Evangelical United Brethren Church. The Post notes that the proposed split \"comes at a critical time for the church, which has been losing members for years,\" which has been\"pushed toward the brink of a schism over the role of LGBTQ people in the church.\" Gay marriage is not the only issue that has divided the church. In 2016, the denomination was split over ordination of transgender clergy, with the North Pacific regional conference voting to ban them from serving as clergy, and the South Pacific regional conference voting to allow them."
},
{
"code": null,
"e": 13076,
"s": 13050,
"text": "Text completion examples:"
},
{
"code": null,
"e": 13551,
"s": 13076,
"text": "To do a \"farduddle\" means to jump up and down really fast. An example of a sentence that uses the word farduddle is:One day when I was playing tag with my little sister, she got really excited and she started doing these crazy farduddles.A \"yalubalu\" is a type of vegetable that looks like a big pumpkin. An example of a sentence that uses the word yalubalu is:I was on a trip to Africa and I tried this yalubalu vegetable that was grown in a garden there. It was delicious."
},
{
"code": null,
"e": 13567,
"s": 13551,
"text": "Generated Poem:"
}
] |
The match Function in Python | This function attempts to match RE pattern to string with optional flags.
Here is the syntax for this function −
re.match(pattern, string, flags=0)
Here is the description of the parameters −
The re.match function returns a match object on success, None on failure. We usegroup(num) or groups() function of match object to get matched expression.
Live Demo
#!/usr/bin/python
import re
line = "Cats are smarter than dogs"
matchObj = re.match( r'(.*) are (.*?) .*', line, re.M|re.I)
if matchObj:
print "matchObj.group() : ", matchObj.group()
print "matchObj.group(1) : ", matchObj.group(1)
print "matchObj.group(2) : ", matchObj.group(2)
else:
print "No match!!"
When the above code is executed, it produces the following result −
matchObj.group() : Cats are smarter than dogs
matchObj.group(1) : Cats
matchObj.group(2) : smarter | [
{
"code": null,
"e": 1136,
"s": 1062,
"text": "This function attempts to match RE pattern to string with optional flags."
},
{
"code": null,
"e": 1175,
"s": 1136,
"text": "Here is the syntax for this function −"
},
{
"code": null,
"e": 1210,
"s": 1175,
"text": "re.match(pattern, string, flags=0)"
},
{
"code": null,
"e": 1254,
"s": 1210,
"text": "Here is the description of the parameters −"
},
{
"code": null,
"e": 1409,
"s": 1254,
"text": "The re.match function returns a match object on success, None on failure. We usegroup(num) or groups() function of match object to get matched expression."
},
{
"code": null,
"e": 1420,
"s": 1409,
"text": " Live Demo"
},
{
"code": null,
"e": 1736,
"s": 1420,
"text": "#!/usr/bin/python\nimport re\nline = \"Cats are smarter than dogs\"\nmatchObj = re.match( r'(.*) are (.*?) .*', line, re.M|re.I)\nif matchObj:\n print \"matchObj.group() : \", matchObj.group()\n print \"matchObj.group(1) : \", matchObj.group(1)\n print \"matchObj.group(2) : \", matchObj.group(2)\nelse:\n print \"No match!!\""
},
{
"code": null,
"e": 1804,
"s": 1736,
"text": "When the above code is executed, it produces the following result −"
},
{
"code": null,
"e": 1903,
"s": 1804,
"text": "matchObj.group() : Cats are smarter than dogs\nmatchObj.group(1) : Cats\nmatchObj.group(2) : smarter"
}
] |
Swap two variables in one line in using Python? | In this section, we are going to swap two variable in one line using python. The standard way to swap two variables in python is very simple and easy−
>>> a = 20;b=30
>>> a
20
>>> b
30
>>> #Swap two variable in one line
>>> a, b = b, a
>>> a
30
>>> b
20
Above code generate swapped values of a and b.
Python evaluates expression from left to right. However, while evaluating an assignment, the right-hand side is evaluated before the left-hand side.
That means the following for the expression a, b = b, a
The right-hand side ‘b, a’ is evaluated, that is to say a tuple of two elements is created in the memory. The two element are the objects designated by the identifiers b and a, that were existing before the instruction is encountered during an execution of program.
The right-hand side ‘b, a’ is evaluated, that is to say a tuple of two elements is created in the memory. The two element are the objects designated by the identifiers b and a, that were existing before the instruction is encountered during an execution of program.
Once the tuple is created, but no assignment of this tuple object is made yet, but that’s not a issue, as python internally knows where it is.
Once the tuple is created, but no assignment of this tuple object is made yet, but that’s not a issue, as python internally knows where it is.
Then the left-hand side is evaluated, that is- the tuple which is stored in memory is assigned to the left-hand side as the left-hand side is composed of two identifiers a and b. the tuple is unpacked in order that the first identifier a(left-side) is assigned by the first element of the tuple (.i.e. b) and the second identifier b is assigned by the second element of the tuple (.i.e. a).
Then the left-hand side is evaluated, that is- the tuple which is stored in memory is assigned to the left-hand side as the left-hand side is composed of two identifiers a and b. the tuple is unpacked in order that the first identifier a(left-side) is assigned by the first element of the tuple (.i.e. b) and the second identifier b is assigned by the second element of the tuple (.i.e. a).
In short, the expression: “ a, b = b, a”, first right gets assigned to first left and second right get assigned to second left at the same time therefore swap values of a and b. | [
{
"code": null,
"e": 1213,
"s": 1062,
"text": "In this section, we are going to swap two variable in one line using python. The standard way to swap two variables in python is very simple and easy−"
},
{
"code": null,
"e": 1316,
"s": 1213,
"text": ">>> a = 20;b=30\n>>> a\n20\n>>> b\n30\n>>> #Swap two variable in one line\n>>> a, b = b, a\n>>> a\n30\n>>> b\n20"
},
{
"code": null,
"e": 1363,
"s": 1316,
"text": "Above code generate swapped values of a and b."
},
{
"code": null,
"e": 1512,
"s": 1363,
"text": "Python evaluates expression from left to right. However, while evaluating an assignment, the right-hand side is evaluated before the left-hand side."
},
{
"code": null,
"e": 1568,
"s": 1512,
"text": "That means the following for the expression a, b = b, a"
},
{
"code": null,
"e": 1834,
"s": 1568,
"text": "The right-hand side ‘b, a’ is evaluated, that is to say a tuple of two elements is created in the memory. The two element are the objects designated by the identifiers b and a, that were existing before the instruction is encountered during an execution of program."
},
{
"code": null,
"e": 2100,
"s": 1834,
"text": "The right-hand side ‘b, a’ is evaluated, that is to say a tuple of two elements is created in the memory. The two element are the objects designated by the identifiers b and a, that were existing before the instruction is encountered during an execution of program."
},
{
"code": null,
"e": 2243,
"s": 2100,
"text": "Once the tuple is created, but no assignment of this tuple object is made yet, but that’s not a issue, as python internally knows where it is."
},
{
"code": null,
"e": 2386,
"s": 2243,
"text": "Once the tuple is created, but no assignment of this tuple object is made yet, but that’s not a issue, as python internally knows where it is."
},
{
"code": null,
"e": 2777,
"s": 2386,
"text": "Then the left-hand side is evaluated, that is- the tuple which is stored in memory is assigned to the left-hand side as the left-hand side is composed of two identifiers a and b. the tuple is unpacked in order that the first identifier a(left-side) is assigned by the first element of the tuple (.i.e. b) and the second identifier b is assigned by the second element of the tuple (.i.e. a)."
},
{
"code": null,
"e": 3168,
"s": 2777,
"text": "Then the left-hand side is evaluated, that is- the tuple which is stored in memory is assigned to the left-hand side as the left-hand side is composed of two identifiers a and b. the tuple is unpacked in order that the first identifier a(left-side) is assigned by the first element of the tuple (.i.e. b) and the second identifier b is assigned by the second element of the tuple (.i.e. a)."
},
{
"code": null,
"e": 3346,
"s": 3168,
"text": "In short, the expression: “ a, b = b, a”, first right gets assigned to first left and second right get assigned to second left at the same time therefore swap values of a and b."
}
] |
Formatted Strings Using Template Strings in JavaScript | Following is the code for formatted strings using template strings in Javascript −
Live Demo
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<title>Document</title>
<style>
body {
font-family: "Segoe UI", Tahoma, Geneva, Verdana, sans-serif;
}
.result,
.sample {
font-size: 20px;
font-weight: 500;
color: blueviolet;
}
.sample {
color: red;
}
</style>
</head>
<body>
<h1>Formatted Strings Using Template Strings JavaScript</h1>
<div class="sample">
`The person name is ${personObj.name}. His age and rollno are
${personObj.age} and ${personObj.rollno} respectively`
</div>
<div class="result"></div>
<br />
<button class="Btn">CLICK HERE</button>
<h3>Click on the above button to see the template string formatted with the
personObj values</h3>
<script>
let BtnEle = document.querySelector(".Btn");
let resEle = document.querySelector(".result");
let personObj = { name: "Rohan Sharma", age: 12, rollno: 22 };
let arr = [22, 55, 11, 19, 55];
BtnEle.addEventListener("click", (event) => {
resEle.innerHTML = `The person name is ${personObj.name}. His age and rollno are
${personObj.age} and ${personObj.rollno} respectively`;
});
</script>
</body>
</html>
On clicking the ‘CLICK HERE’ button − | [
{
"code": null,
"e": 1145,
"s": 1062,
"text": "Following is the code for formatted strings using template strings in Javascript −"
},
{
"code": null,
"e": 1156,
"s": 1145,
"text": " Live Demo"
},
{
"code": null,
"e": 2389,
"s": 1156,
"text": "<!DOCTYPE html>\n<html lang=\"en\">\n<head>\n<meta charset=\"UTF-8\" />\n<meta name=\"viewport\" content=\"width=device-width, initial-scale=1.0\" />\n<title>Document</title>\n<style>\n body {\n font-family: \"Segoe UI\", Tahoma, Geneva, Verdana, sans-serif;\n }\n .result,\n .sample {\n font-size: 20px;\n font-weight: 500;\n color: blueviolet;\n }\n .sample {\n color: red;\n }\n</style>\n</head>\n<body>\n<h1>Formatted Strings Using Template Strings JavaScript</h1>\n<div class=\"sample\">\n`The person name is ${personObj.name}. His age and rollno are\n${personObj.age} and ${personObj.rollno} respectively`\n</div>\n<div class=\"result\"></div>\n<br />\n<button class=\"Btn\">CLICK HERE</button>\n<h3>Click on the above button to see the template string formatted with the\npersonObj values</h3>\n<script>\n let BtnEle = document.querySelector(\".Btn\");\n let resEle = document.querySelector(\".result\");\n let personObj = { name: \"Rohan Sharma\", age: 12, rollno: 22 };\n let arr = [22, 55, 11, 19, 55];\n BtnEle.addEventListener(\"click\", (event) => {\n resEle.innerHTML = `The person name is ${personObj.name}. His age and rollno are\n ${personObj.age} and ${personObj.rollno} respectively`;\n });\n</script>\n</body>\n</html>"
},
{
"code": null,
"e": 2427,
"s": 2389,
"text": "On clicking the ‘CLICK HERE’ button −"
}
] |
How to open a file in read and write mode with Python? | To open files in read/write mode, specify 'w+' as the mode. For example,
f = open('my_file.txt', 'w+')
file_content = f.read()
f.write('Hello World')
f.close()
Above code opens my_file.txt in write mode, stores the file content in file_content variable and rewrites the file to contain "Hello World". You can also use r+ mode as it doesn't truncate the file. | [
{
"code": null,
"e": 1135,
"s": 1062,
"text": "To open files in read/write mode, specify 'w+' as the mode. For example,"
},
{
"code": null,
"e": 1222,
"s": 1135,
"text": "f = open('my_file.txt', 'w+')\nfile_content = f.read()\nf.write('Hello World')\nf.close()"
},
{
"code": null,
"e": 1423,
"s": 1222,
"text": " Above code opens my_file.txt in write mode, stores the file content in file_content variable and rewrites the file to contain \"Hello World\". You can also use r+ mode as it doesn't truncate the file. "
}
] |
Big Data Analytics - Quick Guide | The volume of data that one has to deal has exploded to unimaginable levels in the past decade, and at the same time, the price of data storage has systematically reduced. Private companies and research institutions capture terabytes of data about their users’ interactions, business, social media, and also sensors from devices such as mobile phones and automobiles. The challenge of this era is to make sense of this sea of data. This is where big data analytics comes into picture.
Big Data Analytics largely involves collecting data from different sources, munge it in a way that it becomes available to be consumed by analysts and finally deliver data products useful to the organization business.
The process of converting large amounts of unstructured raw data, retrieved from different sources to a data product useful for organizations forms the core of Big Data Analytics.
In order to provide a framework to organize the work needed by an organization and deliver clear insights from Big Data, it’s useful to think of it as a cycle with different stages. It is by no means linear, meaning all the stages are related with each other. This cycle has superficial similarities with the more traditional data mining cycle as described in CRISP methodology.
The CRISP-DM methodology that stands for Cross Industry Standard Process for Data Mining, is a cycle that describes commonly used approaches that data mining experts use to tackle problems in traditional BI data mining. It is still being used in traditional BI data mining teams.
Take a look at the following illustration. It shows the major stages of the cycle as described by the CRISP-DM methodology and how they are interrelated.
CRISP-DM was conceived in 1996 and the next year, it got underway as a European Union project under the ESPRIT funding initiative. The project was led by five companies: SPSS, Teradata, Daimler AG, NCR Corporation, and OHRA (an insurance company). The project was finally incorporated into SPSS. The methodology is extremely detailed oriented in how a data mining project should be specified.
Let us now learn a little more on each of the stages involved in the CRISP-DM life cycle −
Business Understanding − This initial phase focuses on understanding the project objectives and requirements from a business perspective, and then converting this knowledge into a data mining problem definition. A preliminary plan is designed to achieve the objectives. A decision model, especially one built using the Decision Model and Notation standard can be used.
Business Understanding − This initial phase focuses on understanding the project objectives and requirements from a business perspective, and then converting this knowledge into a data mining problem definition. A preliminary plan is designed to achieve the objectives. A decision model, especially one built using the Decision Model and Notation standard can be used.
Data Understanding − The data understanding phase starts with an initial data collection and proceeds with activities in order to get familiar with the data, to identify data quality problems, to discover first insights into the data, or to detect interesting subsets to form hypotheses for hidden information.
Data Understanding − The data understanding phase starts with an initial data collection and proceeds with activities in order to get familiar with the data, to identify data quality problems, to discover first insights into the data, or to detect interesting subsets to form hypotheses for hidden information.
Data Preparation − The data preparation phase covers all activities to construct the final dataset (data that will be fed into the modeling tool(s)) from the initial raw data. Data preparation tasks are likely to be performed multiple times, and not in any prescribed order. Tasks include table, record, and attribute selection as well as transformation and cleaning of data for modeling tools.
Data Preparation − The data preparation phase covers all activities to construct the final dataset (data that will be fed into the modeling tool(s)) from the initial raw data. Data preparation tasks are likely to be performed multiple times, and not in any prescribed order. Tasks include table, record, and attribute selection as well as transformation and cleaning of data for modeling tools.
Modeling − In this phase, various modeling techniques are selected and applied and their parameters are calibrated to optimal values. Typically, there are several techniques for the same data mining problem type. Some techniques have specific requirements on the form of data. Therefore, it is often required to step back to the data preparation phase.
Modeling − In this phase, various modeling techniques are selected and applied and their parameters are calibrated to optimal values. Typically, there are several techniques for the same data mining problem type. Some techniques have specific requirements on the form of data. Therefore, it is often required to step back to the data preparation phase.
Evaluation − At this stage in the project, you have built a model (or models) that appears to have high quality, from a data analysis perspective. Before proceeding to final deployment of the model, it is important to evaluate the model thoroughly and review the steps executed to construct the model, to be certain it properly achieves the business objectives.
A key objective is to determine if there is some important business issue that has not been sufficiently considered. At the end of this phase, a decision on the use of the data mining results should be reached.
Evaluation − At this stage in the project, you have built a model (or models) that appears to have high quality, from a data analysis perspective. Before proceeding to final deployment of the model, it is important to evaluate the model thoroughly and review the steps executed to construct the model, to be certain it properly achieves the business objectives.
A key objective is to determine if there is some important business issue that has not been sufficiently considered. At the end of this phase, a decision on the use of the data mining results should be reached.
Deployment − Creation of the model is generally not the end of the project. Even if the purpose of the model is to increase knowledge of the data, the knowledge gained will need to be organized and presented in a way that is useful to the customer.
Depending on the requirements, the deployment phase can be as simple as generating a report or as complex as implementing a repeatable data scoring (e.g. segment allocation) or data mining process.
Deployment − Creation of the model is generally not the end of the project. Even if the purpose of the model is to increase knowledge of the data, the knowledge gained will need to be organized and presented in a way that is useful to the customer.
Depending on the requirements, the deployment phase can be as simple as generating a report or as complex as implementing a repeatable data scoring (e.g. segment allocation) or data mining process.
In many cases, it will be the customer, not the data analyst, who will carry out the deployment steps. Even if the analyst deploys the model, it is important for the customer to understand upfront the actions which will need to be carried out in order to actually make use of the created models.
SEMMA is another methodology developed by SAS for data mining modeling. It stands for Sample, Explore, Modify, Model, and Asses. Here is a brief description of its stages −
Sample − The process starts with data sampling, e.g., selecting the dataset for modeling. The dataset should be large enough to contain sufficient information to retrieve, yet small enough to be used efficiently. This phase also deals with data partitioning.
Sample − The process starts with data sampling, e.g., selecting the dataset for modeling. The dataset should be large enough to contain sufficient information to retrieve, yet small enough to be used efficiently. This phase also deals with data partitioning.
Explore − This phase covers the understanding of the data by discovering anticipated and unanticipated relationships between the variables, and also abnormalities, with the help of data visualization.
Explore − This phase covers the understanding of the data by discovering anticipated and unanticipated relationships between the variables, and also abnormalities, with the help of data visualization.
Modify − The Modify phase contains methods to select, create and transform variables in preparation for data modeling.
Modify − The Modify phase contains methods to select, create and transform variables in preparation for data modeling.
Model − In the Model phase, the focus is on applying various modeling (data mining) techniques on the prepared variables in order to create models that possibly provide the desired outcome.
Model − In the Model phase, the focus is on applying various modeling (data mining) techniques on the prepared variables in order to create models that possibly provide the desired outcome.
Assess − The evaluation of the modeling results shows the reliability and usefulness of the created models.
Assess − The evaluation of the modeling results shows the reliability and usefulness of the created models.
The main difference between CRISM–DM and SEMMA is that SEMMA focuses on the modeling aspect, whereas CRISP-DM gives more importance to stages of the cycle prior to modeling such as understanding the business problem to be solved, understanding and preprocessing the data to be used as input, for example, machine learning algorithms.
In today’s big data context, the previous approaches are either incomplete or suboptimal. For example, the SEMMA methodology disregards completely data collection and preprocessing of different data sources. These stages normally constitute most of the work in a successful big data project.
A big data analytics cycle can be described by the following stage −
Business Problem Definition
Research
Human Resources Assessment
Data Acquisition
Data Munging
Data Storage
Exploratory Data Analysis
Data Preparation for Modeling and Assessment
Modeling
Implementation
In this section, we will throw some light on each of these stages of big data life cycle.
This is a point common in traditional BI and big data analytics life cycle. Normally it is a non-trivial stage of a big data project to define the problem and evaluate correctly how much potential gain it may have for an organization. It seems obvious to mention this, but it has to be evaluated what are the expected gains and costs of the project.
Analyze what other companies have done in the same situation. This involves looking for solutions that are reasonable for your company, even though it involves adapting other solutions to the resources and requirements that your company has. In this stage, a methodology for the future stages should be defined.
Once the problem is defined, it’s reasonable to continue analyzing if the current staff is able to complete the project successfully. Traditional BI teams might not be capable to deliver an optimal solution to all the stages, so it should be considered before starting the project if there is a need to outsource a part of the project or hire more people.
This section is key in a big data life cycle; it defines which type of profiles would be needed to deliver the resultant data product. Data gathering is a non-trivial step of the process; it normally involves gathering unstructured data from different sources. To give an example, it could involve writing a crawler to retrieve reviews from a website. This involves dealing with text, perhaps in different languages normally requiring a significant amount of time to be completed.
Once the data is retrieved, for example, from the web, it needs to be stored in an easyto-use format. To continue with the reviews examples, let’s assume the data is retrieved from different sites where each has a different display of the data.
Suppose one data source gives reviews in terms of rating in stars, therefore it is possible to read this as a mapping for the response variable y ∈ {1, 2, 3, 4, 5}. Another data source gives reviews using two arrows system, one for up voting and the other for down voting. This would imply a response variable of the form y ∈ {positive, negative}.
In order to combine both the data sources, a decision has to be made in order to make these two response representations equivalent. This can involve converting the first data source response representation to the second form, considering one star as negative and five stars as positive. This process often requires a large time allocation to be delivered with good quality.
Once the data is processed, it sometimes needs to be stored in a database. Big data technologies offer plenty of alternatives regarding this point. The most common alternative is using the Hadoop File System for storage that provides users a limited version of SQL, known as HIVE Query Language. This allows most analytics task to be done in similar ways as would be done in traditional BI data warehouses, from the user perspective. Other storage options to be considered are MongoDB, Redis, and SPARK.
This stage of the cycle is related to the human resources knowledge in terms of their abilities to implement different architectures. Modified versions of traditional data warehouses are still being used in large scale applications. For example, teradata and IBM offer SQL databases that can handle terabytes of data; open source solutions such as postgreSQL and MySQL are still being used for large scale applications.
Even though there are differences in how the different storages work in the background, from the client side, most solutions provide a SQL API. Hence having a good understanding of SQL is still a key skill to have for big data analytics.
This stage a priori seems to be the most important topic, in practice, this is not true. It is not even an essential stage. It is possible to implement a big data solution that would be working with real-time data, so in this case, we only need to gather data to develop the model and then implement it in real time. So there would not be a need to formally store the data at all.
Once the data has been cleaned and stored in a way that insights can be retrieved from it, the data exploration phase is mandatory. The objective of this stage is to understand the data, this is normally done with statistical techniques and also plotting the data. This is a good stage to evaluate whether the problem definition makes sense or is feasible.
This stage involves reshaping the cleaned data retrieved previously and using statistical preprocessing for missing values imputation, outlier detection, normalization, feature extraction and feature selection.
The prior stage should have produced several datasets for training and testing, for example, a predictive model. This stage involves trying different models and looking forward to solving the business problem at hand. In practice, it is normally desired that the model would give some insight into the business. Finally, the best model or combination of models is selected evaluating its performance on a left-out dataset.
In this stage, the data product developed is implemented in the data pipeline of the company. This involves setting up a validation scheme while the data product is working, in order to track its performance. For example, in the case of implementing a predictive model, this stage would involve applying the model to new data and once the response is available, evaluate the model.
In terms of methodology, big data analytics differs significantly from the traditional statistical approach of experimental design. Analytics starts with data. Normally we model the data in a way to explain a response. The objectives of this approach is to predict the response behavior or understand how the input variables relate to a response. Normally in statistical experimental designs, an experiment is developed and data is retrieved as a result. This allows to generate data in a way that can be used by a statistical model, where certain assumptions hold such as independence, normality, and randomization.
In big data analytics, we are presented with the data. We cannot design an experiment that fulfills our favorite statistical model. In large-scale applications of analytics, a large amount of work (normally 80% of the effort) is needed just for cleaning the data, so it can be used by a machine learning model.
We don’t have a unique methodology to follow in real large-scale applications. Normally once the business problem is defined, a research stage is needed to design the methodology to be used. However general guidelines are relevant to be mentioned and apply to almost all problems.
One of the most important tasks in big data analytics is statistical modeling, meaning supervised and unsupervised classification or regression problems. Once the data is cleaned and preprocessed, available for modeling, care should be taken in evaluating different models with reasonable loss metrics and then once the model is implemented, further evaluation and results should be reported. A common pitfall in predictive modeling is to just implement the model and never measure its performance.
As mentioned in the big data life cycle, the data products that result from developing a big data product are in most of the cases some of the following −
Machine learning implementation − This could be a classification algorithm, a regression model or a segmentation model.
Machine learning implementation − This could be a classification algorithm, a regression model or a segmentation model.
Recommender system − The objective is to develop a system that recommends choices based on user behavior. Netflix is the characteristic example of this data product, where based on the ratings of users, other movies are recommended.
Recommender system − The objective is to develop a system that recommends choices based on user behavior. Netflix is the characteristic example of this data product, where based on the ratings of users, other movies are recommended.
Dashboard − Business normally needs tools to visualize aggregated data. A dashboard is a graphical mechanism to make this data accessible.
Dashboard − Business normally needs tools to visualize aggregated data. A dashboard is a graphical mechanism to make this data accessible.
Ad-Hoc analysis − Normally business areas have questions, hypotheses or myths that can be answered doing ad-hoc analysis with data.
Ad-Hoc analysis − Normally business areas have questions, hypotheses or myths that can be answered doing ad-hoc analysis with data.
In large organizations, in order to successfully develop a big data project, it is needed to have management backing up the project. This normally involves finding a way to show the business advantages of the project. We don’t have a unique solution to the problem of finding sponsors for a project, but a few guidelines are given below −
Check who and where are the sponsors of other projects similar to the one that interests you.
Check who and where are the sponsors of other projects similar to the one that interests you.
Having personal contacts in key management positions helps, so any contact can be triggered if the project is promising.
Having personal contacts in key management positions helps, so any contact can be triggered if the project is promising.
Who would benefit from your project? Who would be your client once the project is on track?
Who would benefit from your project? Who would be your client once the project is on track?
Develop a simple, clear, and exiting proposal and share it with the key players in your organization.
Develop a simple, clear, and exiting proposal and share it with the key players in your organization.
The best way to find sponsors for a project is to understand the problem and what would be the resulting data product once it has been implemented. This understanding will give an edge in convincing the management of the importance of the big data project.
A data analyst has reporting-oriented profile, having experience in extracting and analyzing data from traditional data warehouses using SQL. Their tasks are normally either on the side of data storage or in reporting general business results. Data warehousing is by no means simple, it is just different to what a data scientist does.
Many organizations struggle hard to find competent data scientists in the market. It is however a good idea to select prospective data analysts and teach them the relevant skills to become a data scientist. This is by no means a trivial task and would normally involve the person doing a master degree in a quantitative field, but it is definitely a viable option. The basic skills a competent data analyst must have are listed below −
Business understanding
SQL programming
Report design and implementation
Dashboard development
The role of a data scientist is normally associated with tasks such as predictive modeling, developing segmentation algorithms, recommender systems, A/B testing frameworks and often working with raw unstructured data.
The nature of their work demands a deep understanding of mathematics, applied statistics and programming. There are a few skills common between a data analyst and a data scientist, for example, the ability to query databases. Both analyze data, but the decision of a data scientist can have a greater impact in an organization.
Here is a set of skills a data scientist normally need to have −
Programming in a statistical package such as: R, Python, SAS, SPSS, or Julia
Able to clean, extract, and explore data from different sources
Research, design, and implementation of statistical models
Deep statistical, mathematical, and computer science knowledge
In big data analytics, people normally confuse the role of a data scientist with that of a data architect. In reality, the difference is quite simple. A data architect defines the tools and the architecture the data would be stored at, whereas a data scientist uses this architecture. Of course, a data scientist should be able to set up new tools if needed for ad-hoc projects, but the infrastructure definition and design should not be a part of his task.
Through this tutorial, we will develop a project. Each subsequent chapter in this tutorial deals with a part of the larger project in the mini-project section. This is thought to be an applied tutorial section that will provide exposure to a real-world problem. In this case, we would start with the problem definition of the project.
The objective of this project would be to develop a machine learning model to predict the hourly salary of people using their curriculum vitae (CV) text as input.
Using the framework defined above, it is simple to define the problem. We can define X = {x1, x2, ..., xn} as the CV’s of users, where each feature can be, in the simplest way possible, the amount of times this word appears. Then the response is real valued, we are trying to predict the hourly salary of individuals in dollars.
These two considerations are enough to conclude that the problem presented can be solved with a supervised regression algorithm.
Problem Definition is probably one of the most complex and heavily neglected stages in the big data analytics pipeline. In order to define the problem a data product would solve, experience is mandatory. Most data scientist aspirants have little or no experience in this stage.
Most big data problems can be categorized in the following ways −
Supervised classification
Supervised regression
Unsupervised learning
Learning to rank
Let us now learn more about these four concepts.
Given a matrix of features X = {x1, x2, ..., xn} we develop a model M to predict different classes defined as y = {c1, c2, ..., cn}. For example: Given transactional data of customers in an insurance company, it is possible to develop a model that will predict if a client would churn or not. The latter is a binary classification problem, where there are two classes or target variables: churn and not churn.
Other problems involve predicting more than one class, we could be interested in doing digit recognition, therefore the response vector would be defined as: y = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, a-state-of-the-art model would be convolutional neural network and the matrix of features would be defined as the pixels of the image.
In this case, the problem definition is rather similar to the previous example; the difference relies on the response. In a regression problem, the response y ∈ R, this means the response is real valued. For example, we can develop a model to predict the hourly salary of individuals given the corpus of their CV.
Management is often thirsty for new insights. Segmentation models can provide this insight in order for the marketing department to develop products for different segments. A good approach for developing a segmentation model, rather than thinking of algorithms, is to select features that are relevant to the segmentation that is desired.
For example, in a telecommunications company, it is interesting to segment clients by their cellphone usage. This would involve disregarding features that have nothing to do with the segmentation objective and including only those that do. In this case, this would be selecting features as the number of SMS used in a month, the number of inbound and outbound minutes, etc.
This problem can be considered as a regression problem, but it has particular characteristics and deserves a separate treatment. The problem involves given a collection of documents we seek to find the most relevant ordering given a query. In order to develop a supervised learning algorithm, it is needed to label how relevant an ordering is, given a query.
It is relevant to note that in order to develop a supervised learning algorithm, it is needed to label the training data. This means that in order to train a model that will, for example, recognize digits from an image, we need to label a significant amount of examples by hand. There are web services that can speed up this process and are commonly used for this task such as amazon mechanical turk. It is proven that learning algorithms improve their performance when provided with more data, so labeling a decent amount of examples is practically mandatory in supervised learning.
Data collection plays the most important role in the Big Data cycle. The Internet provides almost unlimited sources of data for a variety of topics. The importance of this area depends on the type of business, but traditional industries can acquire a diverse source of external data and combine those with their transactional data.
For example, let’s assume we would like to build a system that recommends restaurants. The first step would be to gather data, in this case, reviews of restaurants from different websites and store them in a database. As we are interested in raw text, and would use that for analytics, it is not that relevant where the data for developing the model would be stored. This may sound contradictory with the big data main technologies, but in order to implement a big data application, we simply need to make it work in real time.
Once the problem is defined, the following stage is to collect the data. The following miniproject idea is to work on collecting data from the web and structuring it to be used in a machine learning model. We will collect some tweets from the twitter rest API using the R programming language.
First of all create a twitter account, and then follow the instructions in the twitteR package vignette to create a twitter developer account. This is a summary of those instructions −
Go to https://twitter.com/apps/new and log in.
Go to https://twitter.com/apps/new and log in.
After filling in the basic info, go to the "Settings" tab and select "Read, Write and Access direct messages".
After filling in the basic info, go to the "Settings" tab and select "Read, Write and Access direct messages".
Make sure to click on the save button after doing this
Make sure to click on the save button after doing this
In the "Details" tab, take note of your consumer key and consumer secret
In the "Details" tab, take note of your consumer key and consumer secret
In your R session, you’ll be using the API key and API secret values
In your R session, you’ll be using the API key and API secret values
Finally run the following script. This will install the twitteR package from its repository on github.
Finally run the following script. This will install the twitteR package from its repository on github.
install.packages(c("devtools", "rjson", "bit64", "httr"))
# Make sure to restart your R session at this point
library(devtools)
install_github("geoffjentry/twitteR")
We are interested in getting data where the string "big mac" is included and finding out which topics stand out about this. In order to do this, the first step is collecting the data from twitter. Below is our R script to collect required data from twitter. This code is also available in bda/part1/collect_data/collect_data_twitter.R file.
rm(list = ls(all = TRUE)); gc() # Clears the global environment
library(twitteR)
Sys.setlocale(category = "LC_ALL", locale = "C")
### Replace the xxx’s with the values you got from the previous instructions
# consumer_key = "xxxxxxxxxxxxxxxxxxxx"
# consumer_secret = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
# access_token = "xxxxxxxxxx-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
# access_token_secret= "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
# Connect to twitter rest API
setup_twitter_oauth(consumer_key, consumer_secret, access_token, access_token_secret)
# Get tweets related to big mac
tweets <- searchTwitter(’big mac’, n = 200, lang = ’en’)
df <- twListToDF(tweets)
# Take a look at the data
head(df)
# Check which device is most used
sources <- sapply(tweets, function(x) x$getStatusSource())
sources <- gsub("</a>", "", sources)
sources <- strsplit(sources, ">")
sources <- sapply(sources, function(x) ifelse(length(x) > 1, x[2], x[1]))
source_table = table(sources)
source_table = source_table[source_table > 1]
freq = source_table[order(source_table, decreasing = T)]
as.data.frame(freq)
# Frequency
# Twitter for iPhone 71
# Twitter for Android 29
# Twitter Web Client 25
# recognia 20
Once the data is collected, we normally have diverse data sources with different characteristics. The most immediate step would be to make these data sources homogeneous and continue to develop our data product. However, it depends on the type of data. We should ask ourselves if it is practical to homogenize the data.
Maybe the data sources are completely different, and the information loss will be large if the sources would be homogenized. In this case, we can think of alternatives. Can one data source help me build a regression model and the other one a classification model? Is it possible to work with the heterogeneity on our advantage rather than just lose information? Taking these decisions are what make analytics interesting and challenging.
In the case of reviews, it is possible to have a language for each data source. Again, we have two choices −
Homogenization − It involves translating different languages to the language where we have more data. The quality of translations services is acceptable, but if we would like to translate massive amounts of data with an API, the cost would be significant. There are software tools available for this task, but that would be costly too.
Homogenization − It involves translating different languages to the language where we have more data. The quality of translations services is acceptable, but if we would like to translate massive amounts of data with an API, the cost would be significant. There are software tools available for this task, but that would be costly too.
Heterogenization − Would it be possible to develop a solution for each language? As it is simple to detect the language of a corpus, we could develop a recommender for each language. This would involve more work in terms of tuning each recommender according to the amount of languages available but is definitely a viable option if we have a few languages available.
Heterogenization − Would it be possible to develop a solution for each language? As it is simple to detect the language of a corpus, we could develop a recommender for each language. This would involve more work in terms of tuning each recommender according to the amount of languages available but is definitely a viable option if we have a few languages available.
In the present case we need to first clean the unstructured data and then convert it to a data matrix in order to apply topics modelling on it. In general, when getting data from twitter, there are several characters we are not interested in using, at least in the first stage of the data cleansing process.
For example, after getting the tweets we get these strange characters: "<ed><U+00A0><U+00BD><ed><U+00B8><U+008B>". These are probably emoticons, so in order to clean the data, we will just remove them using the following script. This code is also available in bda/part1/collect_data/cleaning_data.R file.
rm(list = ls(all = TRUE)); gc() # Clears the global environment
source('collect_data_twitter.R')
# Some tweets
head(df$text)
[1] "I’m not a big fan of turkey but baked Mac &
cheese <ed><U+00A0><U+00BD><ed><U+00B8><U+008B>"
[2] "@Jayoh30 Like no special sauce on a big mac. HOW"
### We are interested in the text - Let’s clean it!
# We first convert the encoding of the text from latin1 to ASCII
df$text <- sapply(df$text,function(row) iconv(row, "latin1", "ASCII", sub = ""))
# Create a function to clean tweets
clean.text <- function(tx) {
tx <- gsub("htt.{1,20}", " ", tx, ignore.case = TRUE)
tx = gsub("[^#[:^punct:]]|@|RT", " ", tx, perl = TRUE, ignore.case = TRUE)
tx = gsub("[[:digit:]]", " ", tx, ignore.case = TRUE)
tx = gsub(" {1,}", " ", tx, ignore.case = TRUE)
tx = gsub("^\\s+|\\s+$", " ", tx, ignore.case = TRUE)
return(tx)
}
clean_tweets <- lapply(df$text, clean.text)
# Cleaned tweets
head(clean_tweets)
[1] " WeNeedFeminlsm MAC s new make up line features men woc and big girls "
[1] " TravelsPhoto What Happens To Your Body One Hour After A Big Mac "
The final step of the data cleansing mini project is to have cleaned text we can convert to a matrix and apply an algorithm to. From the text stored in the clean_tweets vector we can easily convert it to a bag of words matrix and apply an unsupervised learning algorithm.
Reporting is very important in big data analytics. Every organization must have a regular provision of information to support its decision making process. This task is normally handled by data analysts with SQL and ETL (extract, transfer, and load) experience.
The team in charge of this task has the responsibility of spreading the information produced in the big data analytics department to different areas of the organization.
The following example demonstrates what summarization of data means. Navigate to the folder bda/part1/summarize_data and inside the folder, open the summarize_data.Rproj file by double clicking it. Then, open the summarize_data.R script and take a look at the code, and follow the explanations presented.
# Install the following packages by running the following code in R.
pkgs = c('data.table', 'ggplot2', 'nycflights13', 'reshape2')
install.packages(pkgs)
The ggplot2 package is great for data visualization. The data.table package is a great option to do fast and memory efficient summarization in R. A recent benchmark shows it is even faster than pandas, the python library used for similar tasks.
Take a look at the data using the following code. This code is also available in bda/part1/summarize_data/summarize_data.Rproj file.
library(nycflights13)
library(ggplot2)
library(data.table)
library(reshape2)
# Convert the flights data.frame to a data.table object and call it DT
DT <- as.data.table(flights)
# The data has 336776 rows and 16 columns
dim(DT)
# Take a look at the first rows
head(DT)
# year month day dep_time dep_delay arr_time arr_delay carrier
# 1: 2013 1 1 517 2 830 11 UA
# 2: 2013 1 1 533 4 850 20 UA
# 3: 2013 1 1 542 2 923 33 AA
# 4: 2013 1 1 544 -1 1004 -18 B6
# 5: 2013 1 1 554 -6 812 -25 DL
# 6: 2013 1 1 554 -4 740 12 UA
# tailnum flight origin dest air_time distance hour minute
# 1: N14228 1545 EWR IAH 227 1400 5 17
# 2: N24211 1714 LGA IAH 227 1416 5 33
# 3: N619AA 1141 JFK MIA 160 1089 5 42
# 4: N804JB 725 JFK BQN 183 1576 5 44
# 5: N668DN 461 LGA ATL 116 762 5 54
# 6: N39463 1696 EWR ORD 150 719 5 54
The following code has an example of data summarization.
### Data Summarization
# Compute the mean arrival delay
DT[, list(mean_arrival_delay = mean(arr_delay, na.rm = TRUE))]
# mean_arrival_delay
# 1: 6.895377
# Now, we compute the same value but for each carrier
mean1 = DT[, list(mean_arrival_delay = mean(arr_delay, na.rm = TRUE)),
by = carrier]
print(mean1)
# carrier mean_arrival_delay
# 1: UA 3.5580111
# 2: AA 0.3642909
# 3: B6 9.4579733
# 4: DL 1.6443409
# 5: EV 15.7964311
# 6: MQ 10.7747334
# 7: US 2.1295951
# 8: WN 9.6491199
# 9: VX 1.7644644
# 10: FL 20.1159055
# 11: AS -9.9308886
# 12: 9E 7.3796692
# 13: F9 21.9207048
# 14: HA -6.9152047
# 15: YV 15.5569853
# 16: OO 11.9310345
# Now let’s compute to means in the same line of code
mean2 = DT[, list(mean_departure_delay = mean(dep_delay, na.rm = TRUE),
mean_arrival_delay = mean(arr_delay, na.rm = TRUE)),
by = carrier]
print(mean2)
# carrier mean_departure_delay mean_arrival_delay
# 1: UA 12.106073 3.5580111
# 2: AA 8.586016 0.3642909
# 3: B6 13.022522 9.4579733
# 4: DL 9.264505 1.6443409
# 5: EV 19.955390 15.7964311
# 6: MQ 10.552041 10.7747334
# 7: US 3.782418 2.1295951
# 8: WN 17.711744 9.6491199
# 9: VX 12.869421 1.7644644
# 10: FL 18.726075 20.1159055
# 11: AS 5.804775 -9.9308886
# 12: 9E 16.725769 7.3796692
# 13: F9 20.215543 21.9207048
# 14: HA 4.900585 -6.9152047
# 15: YV 18.996330 15.5569853
# 16: OO 12.586207 11.9310345
### Create a new variable called gain
# this is the difference between arrival delay and departure delay
DT[, gain:= arr_delay - dep_delay]
# Compute the median gain per carrier
median_gain = DT[, median(gain, na.rm = TRUE), by = carrier]
print(median_gain)
Exploratory data analysis is a concept developed by John Tuckey (1977) that consists on a new perspective of statistics. Tuckey’s idea was that in traditional statistics, the data was not being explored graphically, is was just being used to test hypotheses. The first attempt to develop a tool was done in Stanford, the project was called prim9. The tool was able to visualize data in nine dimensions, therefore it was able to provide a multivariate perspective of the data.
In recent days, exploratory data analysis is a must and has been included in the big data analytics life cycle. The ability to find insight and be able to communicate it effectively in an organization is fueled with strong EDA capabilities.
Based on Tuckey’s ideas, Bell Labs developed the S programming language in order to provide an interactive interface for doing statistics. The idea of S was to provide extensive graphical capabilities with an easy-to-use language. In today’s world, in the context of Big Data, R that is based on the S programming language is the most popular software for analytics.
The following program demonstrates the use of exploratory data analysis.
The following is an example of exploratory data analysis. This code is also available in part1/eda/exploratory_data_analysis.R file.
library(nycflights13)
library(ggplot2)
library(data.table)
library(reshape2)
# Using the code from the previous section
# This computes the mean arrival and departure delays by carrier.
DT <- as.data.table(flights)
mean2 = DT[, list(mean_departure_delay = mean(dep_delay, na.rm = TRUE),
mean_arrival_delay = mean(arr_delay, na.rm = TRUE)),
by = carrier]
# In order to plot data in R usign ggplot, it is normally needed to reshape the data
# We want to have the data in long format for plotting with ggplot
dt = melt(mean2, id.vars = ’carrier’)
# Take a look at the first rows
print(head(dt))
# Take a look at the help for ?geom_point and geom_line to find similar examples
# Here we take the carrier code as the x axis
# the value from the dt data.table goes in the y axis
# The variable column represents the color
p = ggplot(dt, aes(x = carrier, y = value, color = variable, group = variable)) +
geom_point() + # Plots points
geom_line() + # Plots lines
theme_bw() + # Uses a white background
labs(list(title = 'Mean arrival and departure delay by carrier',
x = 'Carrier', y = 'Mean delay'))
print(p)
# Save the plot to disk
ggsave('mean_delay_by_carrier.png', p,
width = 10.4, height = 5.07)
The code should produce an image such as the following −
In order to understand data, it is often useful to visualize it. Normally in Big Data applications, the interest relies in finding insight rather than just making beautiful plots. The following are examples of different approaches to understanding data using plots.
To start analyzing the flights data, we can start by checking if there are correlations between numeric variables. This code is also available in bda/part1/data_visualization/data_visualization.R file.
# Install the package corrplot by running
install.packages('corrplot')
# then load the library
library(corrplot)
# Load the following libraries
library(nycflights13)
library(ggplot2)
library(data.table)
library(reshape2)
# We will continue working with the flights data
DT <- as.data.table(flights)
head(DT) # take a look
# We select the numeric variables after inspecting the first rows.
numeric_variables = c('dep_time', 'dep_delay',
'arr_time', 'arr_delay', 'air_time', 'distance')
# Select numeric variables from the DT data.table
dt_num = DT[, numeric_variables, with = FALSE]
# Compute the correlation matrix of dt_num
cor_mat = cor(dt_num, use = "complete.obs")
print(cor_mat)
### Here is the correlation matrix
# dep_time dep_delay arr_time arr_delay air_time distance
# dep_time 1.00000000 0.25961272 0.66250900 0.23230573 -0.01461948 -0.01413373
# dep_delay 0.25961272 1.00000000 0.02942101 0.91480276 -0.02240508 -0.02168090
# arr_time 0.66250900 0.02942101 1.00000000 0.02448214 0.05429603 0.04718917
# arr_delay 0.23230573 0.91480276 0.02448214 1.00000000 -0.03529709 -0.06186776
# air_time -0.01461948 -0.02240508 0.05429603 -0.03529709 1.00000000 0.99064965
# distance -0.01413373 -0.02168090 0.04718917 -0.06186776 0.99064965 1.00000000
# We can display it visually to get a better understanding of the data
corrplot.mixed(cor_mat, lower = "circle", upper = "ellipse")
# save it to disk
png('corrplot.png')
print(corrplot.mixed(cor_mat, lower = "circle", upper = "ellipse"))
dev.off()
This code generates the following correlation matrix visualization −
We can see in the plot that there is a strong correlation between some of the variables in the dataset. For example, arrival delay and departure delay seem to be highly correlated. We can see this because the ellipse shows an almost lineal relationship between both variables, however, it is not simple to find causation from this result.
We can’t say that as two variables are correlated, that one has an effect on the other. Also we find in the plot a strong correlation between air time and distance, which is fairly reasonable to expect as with more distance, the flight time should grow.
We can also do univariate analysis of the data. A simple and effective way to visualize distributions are box-plots. The following code demonstrates how to produce box-plots and trellis charts using the ggplot2 library. This code is also available in bda/part1/data_visualization/boxplots.R file.
source('data_visualization.R')
### Analyzing Distributions using box-plots
# The following shows the distance as a function of the carrier
p = ggplot(DT, aes(x = carrier, y = distance, fill = carrier)) + # Define the carrier
in the x axis and distance in the y axis
geom_box-plot() + # Use the box-plot geom
theme_bw() + # Leave a white background - More in line with tufte's
principles than the default
guides(fill = FALSE) + # Remove legend
labs(list(title = 'Distance as a function of carrier', # Add labels
x = 'Carrier', y = 'Distance'))
p
# Save to disk
png(‘boxplot_carrier.png’)
print(p)
dev.off()
# Let's add now another variable, the month of each flight
# We will be using facet_wrap for this
p = ggplot(DT, aes(carrier, distance, fill = carrier)) +
geom_box-plot() +
theme_bw() +
guides(fill = FALSE) +
facet_wrap(~month) + # This creates the trellis plot with the by month variable
labs(list(title = 'Distance as a function of carrier by month',
x = 'Carrier', y = 'Distance'))
p
# The plot shows there aren't clear differences between distance in different months
# Save to disk
png('boxplot_carrier_by_month.png')
print(p)
dev.off()
This section is devoted to introduce the users to the R programming language. R can be downloaded from the cran website. For Windows users, it is useful to install rtools and the rstudio IDE.
The general concept behind R is to serve as an interface to other software developed in compiled languages such as C, C++, and Fortran and to give the user an interactive tool to analyze data.
Navigate to the folder of the book zip file bda/part2/R_introduction and open the R_introduction.Rproj file. This will open an RStudio session. Then open the 01_vectors.R file. Run the script line by line and follow the comments in the code. Another useful option in order to learn is to just type the code, this will help you get used to R syntax. In R comments are written with the # symbol.
In order to display the results of running R code in the book, after code is evaluated, the results R returns are commented. This way, you can copy paste the code in the book and try directly sections of it in R.
# Create a vector of numbers
numbers = c(1, 2, 3, 4, 5)
print(numbers)
# [1] 1 2 3 4 5
# Create a vector of letters
ltrs = c('a', 'b', 'c', 'd', 'e')
# [1] "a" "b" "c" "d" "e"
# Concatenate both
mixed_vec = c(numbers, ltrs)
print(mixed_vec)
# [1] "1" "2" "3" "4" "5" "a" "b" "c" "d" "e"
Let’s analyze what happened in the previous code. We can see it is possible to create vectors with numbers and with letters. We did not need to tell R what type of data type we wanted beforehand. Finally, we were able to create a vector with both numbers and letters. The vector mixed_vec has coerced the numbers to character, we can see this by visualizing how the values are printed inside quotes.
The following code shows the data type of different vectors as returned by the function class. It is common to use the class function to "interrogate" an object, asking him what his class is.
### Evaluate the data types using class
### One dimensional objects
# Integer vector
num = 1:10
class(num)
# [1] "integer"
# Numeric vector, it has a float, 10.5
num = c(1:10, 10.5)
class(num)
# [1] "numeric"
# Character vector
ltrs = letters[1:10]
class(ltrs)
# [1] "character"
# Factor vector
fac = as.factor(ltrs)
class(fac)
# [1] "factor"
R supports two-dimensional objects also. In the following code, there are examples of the two most popular data structures used in R: the matrix and data.frame.
# Matrix
M = matrix(1:12, ncol = 4)
# [,1] [,2] [,3] [,4]
# [1,] 1 4 7 10
# [2,] 2 5 8 11
# [3,] 3 6 9 12
lM = matrix(letters[1:12], ncol = 4)
# [,1] [,2] [,3] [,4]
# [1,] "a" "d" "g" "j"
# [2,] "b" "e" "h" "k"
# [3,] "c" "f" "i" "l"
# Coerces the numbers to character
# cbind concatenates two matrices (or vectors) in one matrix
cbind(M, lM)
# [,1] [,2] [,3] [,4] [,5] [,6] [,7] [,8]
# [1,] "1" "4" "7" "10" "a" "d" "g" "j"
# [2,] "2" "5" "8" "11" "b" "e" "h" "k"
# [3,] "3" "6" "9" "12" "c" "f" "i" "l"
class(M)
# [1] "matrix"
class(lM)
# [1] "matrix"
# data.frame
# One of the main objects of R, handles different data types in the same object.
# It is possible to have numeric, character and factor vectors in the same data.frame
df = data.frame(n = 1:5, l = letters[1:5])
df
# n l
# 1 1 a
# 2 2 b
# 3 3 c
# 4 4 d
# 5 5 e
As demonstrated in the previous example, it is possible to use different data types in the same object. In general, this is how data is presented in databases, APIs part of the data is text or character vectors and other numeric. In is the analyst job to determine which statistical data type to assign and then use the correct R data type for it. In statistics we normally consider variables are of the following types −
Numeric
Nominal or categorical
Ordinal
In R, a vector can be of the following classes −
Numeric - Integer
Factor
Ordered Factor
R provides a data type for each statistical type of variable. The ordered factor is however rarely used, but can be created by the function factor, or ordered.
The following section treats the concept of indexing. This is a quite common operation, and deals with the problem of selecting sections of an object and making transformations to them.
# Let's create a data.frame
df = data.frame(numbers = 1:26, letters)
head(df)
# numbers letters
# 1 1 a
# 2 2 b
# 3 3 c
# 4 4 d
# 5 5 e
# 6 6 f
# str gives the structure of a data.frame, it’s a good summary to inspect an object
str(df)
# 'data.frame': 26 obs. of 2 variables:
# $ numbers: int 1 2 3 4 5 6 7 8 9 10 ...
# $ letters: Factor w/ 26 levels "a","b","c","d",..: 1 2 3 4 5 6 7 8 9 10 ...
# The latter shows the letters character vector was coerced as a factor.
# This can be explained by the stringsAsFactors = TRUE argumnet in data.frame
# read ?data.frame for more information
class(df)
# [1] "data.frame"
### Indexing
# Get the first row
df[1, ]
# numbers letters
# 1 1 a
# Used for programming normally - returns the output as a list
df[1, , drop = TRUE]
# $numbers
# [1] 1
#
# $letters
# [1] a
# Levels: a b c d e f g h i j k l m n o p q r s t u v w x y z
# Get several rows of the data.frame
df[5:7, ]
# numbers letters
# 5 5 e
# 6 6 f
# 7 7 g
### Add one column that mixes the numeric column with the factor column
df$mixed = paste(df$numbers, df$letters, sep = ’’)
str(df)
# 'data.frame': 26 obs. of 3 variables:
# $ numbers: int 1 2 3 4 5 6 7 8 9 10 ...
# $ letters: Factor w/ 26 levels "a","b","c","d",..: 1 2 3 4 5 6 7 8 9 10 ...
# $ mixed : chr "1a" "2b" "3c" "4d" ...
### Get columns
# Get the first column
df[, 1]
# It returns a one dimensional vector with that column
# Get two columns
df2 = df[, 1:2]
head(df2)
# numbers letters
# 1 1 a
# 2 2 b
# 3 3 c
# 4 4 d
# 5 5 e
# 6 6 f
# Get the first and third columns
df3 = df[, c(1, 3)]
df3[1:3, ]
# numbers mixed
# 1 1 1a
# 2 2 2b
# 3 3 3c
### Index columns from their names
names(df)
# [1] "numbers" "letters" "mixed"
# This is the best practice in programming, as many times indeces change, but
variable names don’t
# We create a variable with the names we want to subset
keep_vars = c("numbers", "mixed")
df4 = df[, keep_vars]
head(df4)
# numbers mixed
# 1 1 1a
# 2 2 2b
# 3 3 3c
# 4 4 4d
# 5 5 5e
# 6 6 6f
### subset rows and columns
# Keep the first five rows
df5 = df[1:5, keep_vars]
df5
# numbers mixed
# 1 1 1a
# 2 2 2b
# 3 3 3c
# 4 4 4d
# 5 5 5e
# subset rows using a logical condition
df6 = df[df$numbers < 10, keep_vars]
df6
# numbers mixed
# 1 1 1a
# 2 2 2b
# 3 3 3c
# 4 4 4d
# 5 5 5e
# 6 6 6f
# 7 7 7g
# 8 8 8h
# 9 9 9i
SQL stands for structured query language. It is one of the most widely used languages for extracting data from databases in traditional data warehouses and big data technologies. In order to demonstrate the basics of SQL we will be working with examples. In order to focus on the language itself, we will be using SQL inside R. In terms of writing SQL code this is exactly as would be done in a database.
The core of SQL are three statements: SELECT, FROM and WHERE. The following examples make use of the most common use cases of SQL. Navigate to the folder bda/part2/SQL_introduction and open the SQL_introduction.Rproj file. Then open the 01_select.R script. In order to write SQL code in R we need to install the sqldf package as demonstrated in the following code.
# Install the sqldf package
install.packages('sqldf')
# load the library
library('sqldf')
library(nycflights13)
# We will be working with the fligths dataset in order to introduce SQL
# Let’s take a look at the table
str(flights)
# Classes 'tbl_d', 'tbl' and 'data.frame': 336776 obs. of 16 variables:
# $ year : int 2013 2013 2013 2013 2013 2013 2013 2013 2013 2013 ...
# $ month : int 1 1 1 1 1 1 1 1 1 1 ...
# $ day : int 1 1 1 1 1 1 1 1 1 1 ...
# $ dep_time : int 517 533 542 544 554 554 555 557 557 558 ...
# $ dep_delay: num 2 4 2 -1 -6 -4 -5 -3 -3 -2 ...
# $ arr_time : int 830 850 923 1004 812 740 913 709 838 753 ...
# $ arr_delay: num 11 20 33 -18 -25 12 19 -14 -8 8 ...
# $ carrier : chr "UA" "UA" "AA" "B6" ...
# $ tailnum : chr "N14228" "N24211" "N619AA" "N804JB" ...
# $ flight : int 1545 1714 1141 725 461 1696 507 5708 79 301 ...
# $ origin : chr "EWR" "LGA" "JFK" "JFK" ...
# $ dest : chr "IAH" "IAH" "MIA" "BQN" ...
# $ air_time : num 227 227 160 183 116 150 158 53 140 138 ...
# $ distance : num 1400 1416 1089 1576 762 ...
# $ hour : num 5 5 5 5 5 5 5 5 5 5 ...
# $ minute : num 17 33 42 44 54 54 55 57 57 58 ...
The select statement is used to retrieve columns from tables and do calculations on them. The simplest SELECT statement is demonstrated in ej1. We can also create new variables as shown in ej2.
### SELECT statement
ej1 = sqldf("
SELECT
dep_time
,dep_delay
,arr_time
,carrier
,tailnum
FROM
flights
")
head(ej1)
# dep_time dep_delay arr_time carrier tailnum
# 1 517 2 830 UA N14228
# 2 533 4 850 UA N24211
# 3 542 2 923 AA N619AA
# 4 544 -1 1004 B6 N804JB
# 5 554 -6 812 DL N668DN
# 6 554 -4 740 UA N39463
# In R we can use SQL with the sqldf function. It works exactly the same as in
a database
# The data.frame (in this case flights) represents the table we are querying
and goes in the FROM statement
# We can also compute new variables in the select statement using the syntax:
# old_variables as new_variable
ej2 = sqldf("
SELECT
arr_delay - dep_delay as gain,
carrier
FROM
flights
")
ej2[1:5, ]
# gain carrier
# 1 9 UA
# 2 16 UA
# 3 31 AA
# 4 -17 B6
# 5 -19 DL
One of the most common used features of SQL is the group by statement. This allows to compute a numeric value for different groups of another variable. Open the script 02_group_by.R.
### GROUP BY
# Computing the average
ej3 = sqldf("
SELECT
avg(arr_delay) as mean_arr_delay,
avg(dep_delay) as mean_dep_delay,
carrier
FROM
flights
GROUP BY
carrier
")
# mean_arr_delay mean_dep_delay carrier
# 1 7.3796692 16.725769 9E
# 2 0.3642909 8.586016 AA
# 3 -9.9308886 5.804775 AS
# 4 9.4579733 13.022522 B6
# 5 1.6443409 9.264505 DL
# 6 15.7964311 19.955390 EV
# 7 21.9207048 20.215543 F9
# 8 20.1159055 18.726075 FL
# 9 -6.9152047 4.900585 HA
# 10 10.7747334 10.552041 MQ
# 11 11.9310345 12.586207 OO
# 12 3.5580111 12.106073 UA
# 13 2.1295951 3.782418 US
# 14 1.7644644 12.869421 VX
# 15 9.6491199 17.711744 WN
# 16 15.5569853 18.996330 YV
# Other aggregations
ej4 = sqldf("
SELECT
avg(arr_delay) as mean_arr_delay,
min(dep_delay) as min_dep_delay,
max(dep_delay) as max_dep_delay,
carrier
FROM
flights
GROUP BY
carrier
")
# We can compute the minimun, mean, and maximum values of a numeric value
ej4
# mean_arr_delay min_dep_delay max_dep_delay carrier
# 1 7.3796692 -24 747 9E
# 2 0.3642909 -24 1014 AA
# 3 -9.9308886 -21 225 AS
# 4 9.4579733 -43 502 B6
# 5 1.6443409 -33 960 DL
# 6 15.7964311 -32 548 EV
# 7 21.9207048 -27 853 F9
# 8 20.1159055 -22 602 FL
# 9 -6.9152047 -16 1301 HA
# 10 10.7747334 -26 1137 MQ
# 11 11.9310345 -14 154 OO
# 12 3.5580111 -20 483 UA
# 13 2.1295951 -19 500 US
# 14 1.7644644 -20 653 VX
# 15 9.6491199 -13 471 WN
# 16 15.5569853 -16 387 YV
### We could be also interested in knowing how many observations each carrier has
ej5 = sqldf("
SELECT
carrier, count(*) as count
FROM
flights
GROUP BY
carrier
")
ej5
# carrier count
# 1 9E 18460
# 2 AA 32729
# 3 AS 714
# 4 B6 54635
# 5 DL 48110
# 6 EV 54173
# 7 F9 685
# 8 FL 3260
# 9 HA 342
# 10 MQ 26397
# 11 OO 32
# 12 UA 58665
# 13 US 20536
# 14 VX 5162
# 15 WN 12275
# 16 YV 601
The most useful feature of SQL are joins. A join means that we want to combine table A and table B in one table using one column to match the values of both tables. There are different types of joins, in practical terms, to get started these will be the most useful ones: inner join and left outer join.
# Let’s create two tables: A and B to demonstrate joins.
A = data.frame(c1 = 1:4, c2 = letters[1:4])
B = data.frame(c1 = c(2,4,5,6), c2 = letters[c(2:5)])
A
# c1 c2
# 1 a
# 2 b
# 3 c
# 4 d
B
# c1 c2
# 2 b
# 4 c
# 5 d
# 6 e
### INNER JOIN
# This means to match the observations of the column we would join the tables by.
inner = sqldf("
SELECT
A.c1, B.c2
FROM
A INNER JOIN B
ON A.c1 = B.c1
")
# Only the rows that match c1 in both A and B are returned
inner
# c1 c2
# 2 b
# 4 c
### LEFT OUTER JOIN
# the left outer join, sometimes just called left join will return the
# first all the values of the column used from the A table
left = sqldf("
SELECT
A.c1, B.c2
FROM
A LEFT OUTER JOIN B
ON A.c1 = B.c1
")
# Only the rows that match c1 in both A and B are returned
left
# c1 c2
# 1 <NA>
# 2 b
# 3 <NA>
# 4 c
The first approach to analyzing data is to visually analyze it. The objectives at doing this are normally finding relations between variables and univariate descriptions of the variables. We can divide these strategies as −
Univariate analysis
Multivariate analysis
Univariate is a statistical term. In practice, it means we want to analyze a variable independently from the rest of the data. The plots that allow to do this efficiently are −
Box-Plots are normally used to compare distributions. It is a great way to visually inspect if there are differences between distributions. We can see if there are differences between the price of diamonds for different cut.
# We will be using the ggplot2 library for plotting
library(ggplot2)
data("diamonds")
# We will be using the diamonds dataset to analyze distributions of numeric variables
head(diamonds)
# carat cut color clarity depth table price x y z
# 1 0.23 Ideal E SI2 61.5 55 326 3.95 3.98 2.43
# 2 0.21 Premium E SI1 59.8 61 326 3.89 3.84 2.31
# 3 0.23 Good E VS1 56.9 65 327 4.05 4.07 2.31
# 4 0.29 Premium I VS2 62.4 58 334 4.20 4.23 2.63
# 5 0.31 Good J SI2 63.3 58 335 4.34 4.35 2.75
# 6 0.24 Very Good J VVS2 62.8 57 336 3.94 3.96 2.48
### Box-Plots
p = ggplot(diamonds, aes(x = cut, y = price, fill = cut)) +
geom_box-plot() +
theme_bw()
print(p)
We can see in the plot there are differences in the distribution of diamonds price in different types of cut.
source('01_box_plots.R')
# We can plot histograms for each level of the cut factor variable using
facet_grid
p = ggplot(diamonds, aes(x = price, fill = cut)) +
geom_histogram() +
facet_grid(cut ~ .) +
theme_bw()
p
# the previous plot doesn’t allow to visuallize correctly the data because of
the differences in scale
# we can turn this off using the scales argument of facet_grid
p = ggplot(diamonds, aes(x = price, fill = cut)) +
geom_histogram() +
facet_grid(cut ~ ., scales = 'free') +
theme_bw()
p
png('02_histogram_diamonds_cut.png')
print(p)
dev.off()
The output of the above code will be as follows −
Multivariate graphical methods in exploratory data analysis have the objective of finding relationships among different variables. There are two ways to accomplish this that are commonly used: plotting a correlation matrix of numeric variables or simply plotting the raw data as a matrix of scatter plots.
In order to demonstrate this, we will use the diamonds dataset. To follow the code, open the script bda/part2/charts/03_multivariate_analysis.R.
library(ggplot2)
data(diamonds)
# Correlation matrix plots
keep_vars = c('carat', 'depth', 'price', 'table')
df = diamonds[, keep_vars]
# compute the correlation matrix
M_cor = cor(df)
# carat depth price table
# carat 1.00000000 0.02822431 0.9215913 0.1816175
# depth 0.02822431 1.00000000 -0.0106474 -0.2957785
# price 0.92159130 -0.01064740 1.0000000 0.1271339
# table 0.18161755 -0.29577852 0.1271339 1.0000000
# plots
heat-map(M_cor)
The code will produce the following output −
This is a summary, it tells us that there is a strong correlation between price and caret, and not much among the other variables.
A correlation matrix can be useful when we have a large number of variables in which case plotting the raw data would not be practical. As mentioned, it is possible to show the raw data also −
library(GGally)
ggpairs(df)
We can see in the plot that the results displayed in the heat-map are confirmed, there is a 0.922 correlation between the price and carat variables.
It is possible to visualize this relationship in the price-carat scatterplot located in the (3, 1) index of the scatterplot matrix.
There are a variety of tools that allow a data scientist to analyze data effectively. Normally the engineering aspect of data analysis focuses on databases, data scientist focus in tools that can implement data products. The following section discusses the advantages of different tools with a focus on statistical packages data scientist use in practice most often.
R is an open source programming language with a focus on statistical analysis. It is competitive with commercial tools such as SAS, SPSS in terms of statistical capabilities. It is thought to be an interface to other programming languages such as C, C++ or Fortran.
Another advantage of R is the large number of open source libraries that are available. In CRAN there are more than 6000 packages that can be downloaded for free and in Github there is a wide a variety of R packages available.
In terms of performance, R is slow for intensive operations, given the large amount of libraries available the slow sections of the code are written in compiled languages. But if you are intending to do operations that require writing deep for loops, then R wouldn’t be your best alternative. For data analysis purpose, there are nice libraries such as data.table, glmnet, ranger, xgboost, ggplot2, caret that allow to use R as an interface to faster programming languages.
Python is a general purpose programming language and it contains a significant number of libraries devoted to data analysis such as pandas, scikit-learn, theano, numpy and scipy.
Most of what’s available in R can also be done in Python but we have found that R is simpler to use. In case you are working with large datasets, normally Python is a better choice than R. Python can be used quite effectively to clean and process data line by line. This is possible from R but it’s not as efficient as Python for scripting tasks.
For machine learning, scikit-learn is a nice environment that has available a large amount of algorithms that can handle medium sized datasets without a problem. Compared to R’s equivalent library (caret), scikit-learn has a cleaner and more consistent API.
Julia is a high-level, high-performance dynamic programming language for technical computing. Its syntax is quite similar to R or Python, so if you are already working with R or Python it should be quite simple to write the same code in Julia. The language is quite new and has grown significantly in the last years, so it is definitely an option at the moment.
We would recommend Julia for prototyping algorithms that are computationally intensive such as neural networks. It is a great tool for research. In terms of implementing a model in production probably Python has better alternatives. However, this is becoming less of a problem as there are web services that do the engineering of implementing models in R, Python and Julia.
SAS is a commercial language that is still being used for business intelligence. It has a base language that allows the user to program a wide variety of applications. It contains quite a few commercial products that give non-experts users the ability to use complex tools such as a neural network library without the need of programming.
Beyond the obvious disadvantage of commercial tools, SAS doesn’t scale well to large datasets. Even medium sized dataset will have problems with SAS and make the server crash. Only if you are working with small datasets and the users aren’t expert data scientist, SAS is to be recommended. For advanced users, R and Python provide a more productive environment.
SPSS, is currently a product of IBM for statistical analysis. It is mostly used to analyze survey data and for users that are not able to program, it is a decent alternative. It is probably as simple to use as SAS, but in terms of implementing a model, it is simpler as it provides a SQL code to score a model. This code is normally not efficient, but it’s a start whereas SAS sells the product that scores models for each database separately. For small data and an unexperienced team, SPSS is an option as good as SAS is.
The software is however rather limited, and experienced users will be orders of magnitude more productive using R or Python.
There are other tools available such as Matlab or its open source version (Octave). These tools are mostly used for research. In terms of capabilities R or Python can do all that’s available in Matlab or Octave. It only makes sense to buy a license of the product if you are interested in the support they provide.
When analyzing data, it is possible to have a statistical approach. The basic tools that are needed to perform basic analysis are −
Correlation analysis
Analysis of Variance
Hypothesis Testing
When working with large datasets, it doesn’t involve a problem as these methods aren’t computationally intensive with the exception of Correlation Analysis. In this case, it is always possible to take a sample and the results should be robust.
Correlation Analysis seeks to find linear relationships between numeric variables. This can be of use in different circumstances. One common use is exploratory data analysis, in section 16.0.2 of the book there is a basic example of this approach. First of all, the correlation metric used in the mentioned example is based on the Pearson coefficient. There is however, another interesting metric of correlation that is not affected by outliers. This metric is called the spearman correlation.
The spearman correlation metric is more robust to the presence of outliers than the Pearson method and gives better estimates of linear relations between numeric variable when the data is not normally distributed.
library(ggplot2)
# Select variables that are interesting to compare pearson and spearman
correlation methods.
x = diamonds[, c('x', 'y', 'z', 'price')]
# From the histograms we can expect differences in the correlations of both
metrics.
# In this case as the variables are clearly not normally distributed, the
spearman correlation
# is a better estimate of the linear relation among numeric variables.
par(mfrow = c(2,2))
colnm = names(x)
for(i in 1:4) {
hist(x[[i]], col = 'deepskyblue3', main = sprintf('Histogram of %s', colnm[i]))
}
par(mfrow = c(1,1))
From the histograms in the following figure, we can expect differences in the correlations of both metrics. In this case, as the variables are clearly not normally distributed, the spearman correlation is a better estimate of the linear relation among numeric variables.
In order to compute the correlation in R, open the file bda/part2/statistical_methods/correlation/correlation.R that has this code section.
## Correlation Matrix - Pearson and spearman
cor_pearson <- cor(x, method = 'pearson')
cor_spearman <- cor(x, method = 'spearman')
### Pearson Correlation
print(cor_pearson)
# x y z price
# x 1.0000000 0.9747015 0.9707718 0.8844352
# y 0.9747015 1.0000000 0.9520057 0.8654209
# z 0.9707718 0.9520057 1.0000000 0.8612494
# price 0.8844352 0.8654209 0.8612494 1.0000000
### Spearman Correlation
print(cor_spearman)
# x y z price
# x 1.0000000 0.9978949 0.9873553 0.9631961
# y 0.9978949 1.0000000 0.9870675 0.9627188
# z 0.9873553 0.9870675 1.0000000 0.9572323
# price 0.9631961 0.9627188 0.9572323 1.0000000
The chi-squared test allows us to test if two random variables are independent. This means that the probability distribution of each variable doesn’t influence the other. In order to evaluate the test in R we need first to create a contingency table, and then pass the table to the chisq.test R function.
For example, let’s check if there is an association between the variables: cut and color from the diamonds dataset. The test is formally defined as −
H0: The variable cut and diamond are independent
H1: The variable cut and diamond are not independent
We would assume there is a relationship between these two variables by their name, but the test can give an objective "rule" saying how significant this result is or not.
In the following code snippet, we found that the p-value of the test is 2.2e-16, this is almost zero in practical terms. Then after running the test doing a Monte Carlo simulation, we found that the p-value is 0.0004998 which is still quite lower than the threshold 0.05. This result means that we reject the null hypothesis (H0), so we believe the variables cut and color are not independent.
library(ggplot2)
# Use the table function to compute the contingency table
tbl = table(diamonds$cut, diamonds$color)
tbl
# D E F G H I J
# Fair 163 224 312 314 303 175 119
# Good 662 933 909 871 702 522 307
# Very Good 1513 2400 2164 2299 1824 1204 678
# Premium 1603 2337 2331 2924 2360 1428 808
# Ideal 2834 3903 3826 4884 3115 2093 896
# In order to run the test we just use the chisq.test function.
chisq.test(tbl)
# Pearson’s Chi-squared test
# data: tbl
# X-squared = 310.32, df = 24, p-value < 2.2e-16
# It is also possible to compute the p-values using a monte-carlo simulation
# It's needed to add the simulate.p.value = TRUE flag and the amount of
simulations
chisq.test(tbl, simulate.p.value = TRUE, B = 2000)
# Pearson’s Chi-squared test with simulated p-value (based on 2000 replicates)
# data: tbl
# X-squared = 310.32, df = NA, p-value = 0.0004998
The idea of t-test is to evaluate if there are differences in a numeric variable # distribution between different groups of a nominal variable. In order to demonstrate this, I will select the levels of the Fair and Ideal levels of the factor variable cut, then we will compare the values a numeric variable among those two groups.
data = diamonds[diamonds$cut %in% c('Fair', 'Ideal'), ]
data$cut = droplevels.factor(data$cut) # Drop levels that aren’t used from the
cut variable
df1 = data[, c('cut', 'price')]
# We can see the price means are different for each group
tapply(df1$price, df1$cut, mean)
# Fair Ideal
# 4358.758 3457.542
The t-tests are implemented in R with the t.test function. The formula interface to t.test is the simplest way to use it, the idea is that a numeric variable is explained by a group variable.
For example: t.test(numeric_variable ~ group_variable, data = data). In the previous example, the numeric_variable is price and the group_variable is cut.
From a statistical perspective, we are testing if there are differences in the distributions of the numeric variable among two groups. Formally the hypothesis test is described with a null (H0) hypothesis and an alternative hypothesis (H1).
H0: There are no differences in the distributions of the price variable among the Fair and Ideal groups
H0: There are no differences in the distributions of the price variable among the Fair and Ideal groups
H1 There are differences in the distributions of the price variable among the Fair and Ideal groups
H1 There are differences in the distributions of the price variable among the Fair and Ideal groups
The following can be implemented in R with the following code −
t.test(price ~ cut, data = data)
# Welch Two Sample t-test
#
# data: price by cut
# t = 9.7484, df = 1894.8, p-value < 2.2e-16
# alternative hypothesis: true difference in means is not equal to 0
# 95 percent confidence interval:
# 719.9065 1082.5251
# sample estimates:
# mean in group Fair mean in group Ideal
# 4358.758 3457.542
# Another way to validate the previous results is to just plot the
distributions using a box-plot
plot(price ~ cut, data = data, ylim = c(0,12000),
col = 'deepskyblue3')
We can analyze the test result by checking if the p-value is lower than 0.05. If this is the case, we keep the alternative hypothesis. This means we have found differences of price among the two levels of the cut factor. By the names of the levels we would have expected this result, but we wouldn’t have expected that the mean price in the Fail group would be higher than in the Ideal group. We can see this by comparing the means of each factor.
The plot command produces a graph that shows the relationship between the price and cut variable. It is a box-plot; we have covered this plot in section 16.0.1 but it basically shows the distribution of the price variable for the two levels of cut we are analyzing.
Analysis of Variance (ANOVA) is a statistical model used to analyze the differences among group distribution by comparing the mean and variance of each group, the model was developed by Ronald Fisher. ANOVA provides a statistical test of whether or not the means of several groups are equal, and therefore generalizes the t-test to more than two groups.
ANOVAs are useful for comparing three or more groups for statistical significance because doing multiple two-sample t-tests would result in an increased chance of committing a statistical type I error.
In terms of providing a mathematical explanation, the following is needed to understand the test.
xij = x + (xi − x) + (xij − x)
This leads to the following model −
xij = μ + αi + ∈ij
where μ is the grand mean and αi is the ith group mean. The error term ∈ij is assumed to be iid from a normal distribution. The null hypothesis of the test is that −
α1 = α2 = ... = αk
In terms of computing the test statistic, we need to compute two values −
Sum of squares for between group difference −
SSDB=∑ik∑jn(xi ̄ ̄−x ̄)2
Sums of squares within groups
SSDW=∑ik∑jn(xij ̄ ̄−xi ̄ ̄)2
where SSDB has a degree of freedom of k−1 and SSDW has a degree of freedom of N−k. Then we can define the mean squared differences for each metric.
MSB = SSDB / (k - 1)
MSw = SSDw / (N - k)
Finally, the test statistic in ANOVA is defined as the ratio of the above two quantities
F = MSB / MSw
which follows a F-distribution with k−1 and N−k degrees of freedom. If null hypothesis is true, F would likely be close to 1. Otherwise, the between group mean square MSB is likely to be large, which results in a large F value.
Basically, ANOVA examines the two sources of the total variance and sees which part contributes more. This is why it is called analysis of variance although the intention is to compare group means.
In terms of computing the statistic, it is actually rather simple to do in R. The following example will demonstrate how it is done and plot the results.
library(ggplot2)
# We will be using the mtcars dataset
head(mtcars)
# mpg cyl disp hp drat wt qsec vs am gear carb
# Mazda RX4 21.0 6 160 110 3.90 2.620 16.46 0 1 4 4
# Mazda RX4 Wag 21.0 6 160 110 3.90 2.875 17.02 0 1 4 4
# Datsun 710 22.8 4 108 93 3.85 2.320 18.61 1 1 4 1
# Hornet 4 Drive 21.4 6 258 110 3.08 3.215 19.44 1 0 3 1
# Hornet Sportabout 18.7 8 360 175 3.15 3.440 17.02 0 0 3 2
# Valiant 18.1 6 225 105 2.76 3.460 20.22 1 0 3 1
# Let's see if there are differences between the groups of cyl in the mpg variable.
data = mtcars[, c('mpg', 'cyl')]
fit = lm(mpg ~ cyl, data = mtcars)
anova(fit)
# Analysis of Variance Table
# Response: mpg
# Df Sum Sq Mean Sq F value Pr(>F)
# cyl 1 817.71 817.71 79.561 6.113e-10 ***
# Residuals 30 308.33 10.28
# Signif. codes: 0 *** 0.001 ** 0.01 * 0.05 .
# Plot the distribution
plot(mpg ~ as.factor(cyl), data = mtcars, col = 'deepskyblue3')
The code will produce the following output −
The p-value we get in the example is significantly smaller than 0.05, so R returns the symbol '***' to denote this. It means we reject the null hypothesis and that we find differences between the mpg means among the different groups of the cyl variable.
Machine learning is a subfield of computer science that deals with tasks such as pattern recognition, computer vision, speech recognition, text analytics and has a strong link with statistics and mathematical optimization. Applications include the development of search engines, spam filtering, Optical Character Recognition (OCR) among others. The boundaries between data mining, pattern recognition and the field of statistical learning are not clear and basically all refer to similar problems.
Machine learning can be divided in two types of task −
Supervised Learning
Unsupervised Learning
Supervised learning refers to a type of problem where there is an input data defined as a matrix X and we are interested in predicting a response y. Where X = {x1, x2, ..., xn} has n predictors and has two values y = {c1, c2}.
An example application would be to predict the probability of a web user to click on ads using demographic features as predictors. This is often called to predict the click through rate (CTR). Then y = {click, doesn’t − click} and the predictors could be the used IP address, the day he entered the site, the user’s city, country among other features that could be available.
Unsupervised learning deals with the problem of finding groups that are similar within each other without having a class to learn from. There are several approaches to the task of learning a mapping from predictors to finding groups that share similar instances in each group and are different with each other.
An example application of unsupervised learning is customer segmentation. For example, in the telecommunications industry a common task is to segment users according to the usage they give to the phone. This would allow the marketing department to target each group with a different product.
Naive Bayes is a probabilistic technique for constructing classifiers. The characteristic assumption of the naive Bayes classifier is to consider that the value of a particular feature is independent of the value of any other feature, given the class variable.
Despite the oversimplified assumptions mentioned previously, naive Bayes classifiers have good results in complex real-world situations. An advantage of naive Bayes is that it only requires a small amount of training data to estimate the parameters necessary for classification and that the classifier can be trained incrementally.
Naive Bayes is a conditional probability model: given a problem instance to be classified, represented by a vector x = (x1, ..., xn) representing some n features (independent variables), it assigns to this instance probabilities for each of K possible outcomes or classes.
p(Ck|x1,.....,xn)
The problem with the above formulation is that if the number of features n is large or if a feature can take on a large number of values, then basing such a model on probability tables is infeasible. We therefore reformulate the model to make it simpler. Using Bayes theorem, the conditional probability can be decomposed as −
p(Ck|x)=p(Ck)p(x|Ck)p(x)
This means that under the above independence assumptions, the conditional distribution over the class variable C is −
p(Ck|x1,.....,xn)=1Zp(Ck)∏i=1np(xi|Ck)
where the evidence Z = p(x) is a scaling factor dependent only on x1, ..., xn, that is a constant if the values of the feature variables are known. One common rule is to pick the hypothesis that is most probable; this is known as the maximum a posteriori or MAP decision rule. The corresponding classifier, a Bayes classifier, is the function that assigns a class label y^=Ck for some k as follows −
y^=argmaxp(Ck)∏i=1np(xi|Ck)
Implementing the algorithm in R is a straightforward process. The following example demonstrates how train a Naive Bayes classifier and use it for prediction in a spam filtering problem.
The following script is available in the bda/part3/naive_bayes/naive_bayes.R file.
# Install these packages
pkgs = c("klaR", "caret", "ElemStatLearn")
install.packages(pkgs)
library('ElemStatLearn')
library("klaR")
library("caret")
# Split the data in training and testing
inx = sample(nrow(spam), round(nrow(spam) * 0.9))
train = spam[inx,]
test = spam[-inx,]
# Define a matrix with features, X_train
# And a vector with class labels, y_train
X_train = train[,-58]
y_train = train$spam
X_test = test[,-58]
y_test = test$spam
# Train the model
nb_model = train(X_train, y_train, method = 'nb',
trControl = trainControl(method = 'cv', number = 3))
# Compute
preds = predict(nb_model$finalModel, X_test)$class
tbl = table(y_test, yhat = preds)
sum(diag(tbl)) / sum(tbl)
# 0.7217391
As we can see from the result, the accuracy of the Naive Bayes model is 72%. This means the model correctly classifies 72% of the instances.
k-means clustering aims to partition n observations into k clusters in which each observation belongs to the cluster with the nearest mean, serving as a prototype of the cluster. This results in a partitioning of the data space into Voronoi cells.
Given a set of observations (x1, x2, ..., xn), where each observation is a d-dimensional real vector, k-means clustering aims to partition the n observations into k groups G = {G1, G2, ..., Gk} so as to minimize the within-cluster sum of squares (WCSS) defined as follows −
argmin∑i=1k∑x∈Si∥x−μi∥2
The later formula shows the objective function that is minimized in order to find the optimal prototypes in k-means clustering. The intuition of the formula is that we would like to find groups that are different with each other and each member of each group should be similar with the other members of each cluster.
The following example demonstrates how to run the k-means clustering algorithm in R.
library(ggplot2)
# Prepare Data
data = mtcars
# We need to scale the data to have zero mean and unit variance
data <- scale(data)
# Determine number of clusters
wss <- (nrow(data)-1)*sum(apply(data,2,var))
for (i in 2:dim(data)[2]) {
wss[i] <- sum(kmeans(data, centers = i)$withinss)
}
# Plot the clusters
plot(1:dim(data)[2], wss, type = "b", xlab = "Number of Clusters",
ylab = "Within groups sum of squares")
In order to find a good value for K, we can plot the within groups sum of squares for different values of K. This metric normally decreases as more groups are added, we would like to find a point where the decrease in the within groups sum of squares starts decreasing slowly. In the plot, this value is best represented by K = 6.
Now that the value of K has been defined, it is needed to run the algorithm with that value.
# K-Means Cluster Analysis
fit <- kmeans(data, 5) # 5 cluster solution
# get cluster means
aggregate(data,by = list(fit$cluster),FUN = mean)
# append cluster assignment
data <- data.frame(data, fit$cluster)
Let I = i1, i2, ..., in be a set of n binary attributes called items. Let D = t1, t2, ..., tm be a set of transactions called the database. Each transaction in D has a unique transaction ID and contains a subset of the items in I. A rule is defined as an implication of the form X ⇒ Y where X, Y ⊆ I and X ∩ Y = ∅.
The sets of items (for short item-sets) X and Y are called antecedent (left-hand-side or LHS) and consequent (right-hand-side or RHS) of the rule.
To illustrate the concepts, we use a small example from the supermarket domain. The set of items is I = {milk, bread, butter, beer} and a small database containing the items is shown in the following table.
An example rule for the supermarket could be {milk, bread} ⇒ {butter} meaning that if milk and bread is bought, customers also buy butter. To select interesting rules from the set of all possible rules, constraints on various measures of significance and interest can be used. The best-known constraints are minimum thresholds on support and confidence.
The support supp(X) of an item-set X is defined as the proportion of transactions in the data set which contain the item-set. In the example database in Table 1, the item-set {milk, bread} has a support of 2/5 = 0.4 since it occurs in 40% of all transactions (2 out of 5 transactions). Finding frequent item-sets can be seen as a simplification of the unsupervised learning problem.
The confidence of a rule is defined conf(X ⇒ Y ) = supp(X ∪ Y )/supp(X). For example, the rule {milk, bread} ⇒ {butter} has a confidence of 0.2/0.4 = 0.5 in the database in Table 1, which means that for 50% of the transactions containing milk and bread the rule is correct. Confidence can be interpreted as an estimate of the probability P(Y|X), the probability of finding the RHS of the rule in transactions under the condition that these transactions also contain the LHS.
In the script located in bda/part3/apriori.R the code to implement the apriori algorithm can be found.
# Load the library for doing association rules
# install.packages(’arules’)
library(arules)
# Data preprocessing
data("AdultUCI")
AdultUCI[1:2,]
AdultUCI[["fnlwgt"]] <- NULL
AdultUCI[["education-num"]] <- NULL
AdultUCI[[ "age"]] <- ordered(cut(AdultUCI[[ "age"]], c(15,25,45,65,100)),
labels = c("Young", "Middle-aged", "Senior", "Old"))
AdultUCI[[ "hours-per-week"]] <- ordered(cut(AdultUCI[[ "hours-per-week"]],
c(0,25,40,60,168)), labels = c("Part-time", "Full-time", "Over-time", "Workaholic"))
AdultUCI[[ "capital-gain"]] <- ordered(cut(AdultUCI[[ "capital-gain"]],
c(-Inf,0,median(AdultUCI[[ "capital-gain"]][AdultUCI[[ "capitalgain"]]>0]),Inf)),
labels = c("None", "Low", "High"))
AdultUCI[[ "capital-loss"]] <- ordered(cut(AdultUCI[[ "capital-loss"]],
c(-Inf,0, median(AdultUCI[[ "capital-loss"]][AdultUCI[[ "capitalloss"]]>0]),Inf)),
labels = c("none", "low", "high"))
In order to generate rules using the apriori algorithm, we need to create a transaction matrix. The following code shows how to do this in R.
# Convert the data into a transactions format
Adult <- as(AdultUCI, "transactions")
Adult
# transactions in sparse format with
# 48842 transactions (rows) and
# 115 items (columns)
summary(Adult)
# Plot frequent item-sets
itemFrequencyPlot(Adult, support = 0.1, cex.names = 0.8)
# generate rules
min_support = 0.01
confidence = 0.6
rules <- apriori(Adult, parameter = list(support = min_support, confidence = confidence))
rules
inspect(rules[100:110, ])
# lhs rhs support confidence lift
# {occupation = Farming-fishing} => {sex = Male} 0.02856148 0.9362416 1.4005486
# {occupation = Farming-fishing} => {race = White} 0.02831579 0.9281879 1.0855456
# {occupation = Farming-fishing} => {native-country 0.02671881 0.8758389 0.9759474
= United-States}
A Decision Tree is an algorithm used for supervised learning problems such as classification or regression. A decision tree or a classification tree is a tree in which each internal (nonleaf) node is labeled with an input feature. The arcs coming from a node labeled with a feature are labeled with each of the possible values of the feature. Each leaf of the tree is labeled with a class or a probability distribution over the classes.
A tree can be "learned" by splitting the source set into subsets based on an attribute value test. This process is repeated on each derived subset in a recursive manner called recursive partitioning. The recursion is completed when the subset at a node has all the same value of the target variable, or when splitting no longer adds value to the predictions. This process of top-down induction of decision trees is an example of a greedy algorithm, and it is the most common strategy for learning decision trees.
Decision trees used in data mining are of two main types −
Classification tree − when the response is a nominal variable, for example if an email is spam or not.
Classification tree − when the response is a nominal variable, for example if an email is spam or not.
Regression tree − when the predicted outcome can be considered a real number (e.g. the salary of a worker).
Regression tree − when the predicted outcome can be considered a real number (e.g. the salary of a worker).
Decision trees are a simple method, and as such has some problems. One of this issues is the high variance in the resulting models that decision trees produce. In order to alleviate this problem, ensemble methods of decision trees were developed. There are two groups of ensemble methods currently used extensively −
Bagging decision trees − These trees are used to build multiple decision trees by repeatedly resampling training data with replacement, and voting the trees for a consensus prediction. This algorithm has been called random forest.
Bagging decision trees − These trees are used to build multiple decision trees by repeatedly resampling training data with replacement, and voting the trees for a consensus prediction. This algorithm has been called random forest.
Boosting decision trees − Gradient boosting combines weak learners; in this case, decision trees into a single strong learner, in an iterative fashion. It fits a weak tree to the data and iteratively keeps fitting weak learners in order to correct the error of the previous model.
Boosting decision trees − Gradient boosting combines weak learners; in this case, decision trees into a single strong learner, in an iterative fashion. It fits a weak tree to the data and iteratively keeps fitting weak learners in order to correct the error of the previous model.
# Install the party package
# install.packages('party')
library(party)
library(ggplot2)
head(diamonds)
# We will predict the cut of diamonds using the features available in the
diamonds dataset.
ct = ctree(cut ~ ., data = diamonds)
# plot(ct, main="Conditional Inference Tree")
# Example output
# Response: cut
# Inputs: carat, color, clarity, depth, table, price, x, y, z
# Number of observations: 53940
#
# 1) table <= 57; criterion = 1, statistic = 10131.878
# 2) depth <= 63; criterion = 1, statistic = 8377.279
# 3) table <= 56.4; criterion = 1, statistic = 226.423
# 4) z <= 2.64; criterion = 1, statistic = 70.393
# 5) clarity <= VS1; criterion = 0.989, statistic = 10.48
# 6) color <= E; criterion = 0.997, statistic = 12.829
# 7)* weights = 82
# 6) color > E
#Table of prediction errors
table(predict(ct), diamonds$cut)
# Fair Good Very Good Premium Ideal
# Fair 1388 171 17 0 14
# Good 102 2912 499 26 27
# Very Good 54 998 3334 249 355
# Premium 44 711 5054 11915 1167
# Ideal 22 114 3178 1601 19988
# Estimated class probabilities
probs = predict(ct, newdata = diamonds, type = "prob")
probs = do.call(rbind, probs)
head(probs)
Logistic regression is a classification model in which the response variable is categorical. It is an algorithm that comes from statistics and is used for supervised classification problems. In logistic regression we seek to find the vector β of parameters in the following equation that minimize the cost function.
logit(pi)=ln(pi1−pi)=β0+β1x1,i+...+βkxk,i
The following code demonstrates how to fit a logistic regression model in R. We will use here the spam dataset to demonstrate logistic regression, the same that was used for Naive Bayes.
From the predictions results in terms of accuracy, we find that the regression model achieves a 92.5% accuracy in the test set, compared to the 72% achieved by the Naive Bayes classifier.
library(ElemStatLearn)
head(spam)
# Split dataset in training and testing
inx = sample(nrow(spam), round(nrow(spam) * 0.8))
train = spam[inx,]
test = spam[-inx,]
# Fit regression model
fit = glm(spam ~ ., data = train, family = binomial())
summary(fit)
# Call:
# glm(formula = spam ~ ., family = binomial(), data = train)
#
# Deviance Residuals:
# Min 1Q Median 3Q Max
# -4.5172 -0.2039 0.0000 0.1111 5.4944
# Coefficients:
# Estimate Std. Error z value Pr(>|z|)
# (Intercept) -1.511e+00 1.546e-01 -9.772 < 2e-16 ***
# A.1 -4.546e-01 2.560e-01 -1.776 0.075720 .
# A.2 -1.630e-01 7.731e-02 -2.108 0.035043 *
# A.3 1.487e-01 1.261e-01 1.179 0.238591
# A.4 2.055e+00 1.467e+00 1.401 0.161153
# A.5 6.165e-01 1.191e-01 5.177 2.25e-07 ***
# A.6 7.156e-01 2.768e-01 2.585 0.009747 **
# A.7 2.606e+00 3.917e-01 6.652 2.88e-11 ***
# A.8 6.750e-01 2.284e-01 2.955 0.003127 **
# A.9 1.197e+00 3.362e-01 3.559 0.000373 ***
# Signif. codes: 0 *** 0.001 ** 0.01 * 0.05 . 0.1 1
### Make predictions
preds = predict(fit, test, type = ’response’)
preds = ifelse(preds > 0.5, 1, 0)
tbl = table(target = test$spam, preds)
tbl
# preds
# target 0 1
# email 535 23
# spam 46 316
sum(diag(tbl)) / sum(tbl)
# 0.925
Time series is a sequence of observations of categorical or numeric variables indexed by a date, or timestamp. A clear example of time series data is the time series of a stock price. In the following table, we can see the basic structure of time series data. In this case the observations are recorded every hour.
Normally, the first step in time series analysis is to plot the series, this is normally done with a line chart.
The most common application of time series analysis is forecasting future values of a numeric value using the temporal structure of the data. This means, the available observations are used to predict values from the future.
The temporal ordering of the data, implies that traditional regression methods are not useful. In order to build robust forecast, we need models that take into account the temporal ordering of the data.
The most widely used model for Time Series Analysis is called Autoregressive Moving Average (ARMA). The model consists of two parts, an autoregressive (AR) part and a moving average (MA) part. The model is usually then referred to as the ARMA(p, q) model where p is the order of the autoregressive part and q is the order of the moving average part.
The AR(p) is read as an autoregressive model of order p. Mathematically it is written as −
Xt=c+∑i=1PφiXt−i+εt
where {φ1, ..., φp} are parameters to be estimated, c is a constant, and the random variable εt represents the white noise. Some constraints are necessary on the values of the parameters so that the model remains stationary.
The notation MA(q) refers to the moving average model of order q −
Xt=μ+εt+∑i=1qθiεt−i
where the θ1, ..., θq are the parameters of the model, μ is the expectation of Xt, and the εt, εt − 1, ... are, white noise error terms.
The ARMA(p, q) model combines p autoregressive terms and q moving-average terms. Mathematically the model is expressed with the following formula −
Xt=c+εt+∑i=1PφiXt−1+∑i=1qθiεt−i
We can see that the ARMA(p, q) model is a combination of AR(p) and MA(q) models.
To give some intuition of the model consider that the AR part of the equation seeks to estimate parameters for Xt − i observations of in order to predict the value of the variable in Xt. It is in the end a weighted average of the past values. The MA section uses the same approach but with the error of previous observations, εt − i. So in the end, the result of the model is a weighted average.
The following code snippet demonstrates how to implement an ARMA(p, q) in R.
# install.packages("forecast")
library("forecast")
# Read the data
data = scan('fancy.dat')
ts_data <- ts(data, frequency = 12, start = c(1987,1))
ts_data
plot.ts(ts_data)
Plotting the data is normally the first step to find out if there is a temporal structure in the data. We can see from the plot that there are strong spikes at the end of each year.
The following code fits an ARMA model to the data. It runs several combinations of models and selects the one that has less error.
# Fit the ARMA model
fit = auto.arima(ts_data)
summary(fit)
# Series: ts_data
# ARIMA(1,1,1)(0,1,1)[12]
# Coefficients:
# ar1 ma1 sma1
# 0.2401 -0.9013 0.7499
# s.e. 0.1427 0.0709 0.1790
#
# sigma^2 estimated as 15464184: log likelihood = -693.69
# AIC = 1395.38 AICc = 1395.98 BIC = 1404.43
# Training set error measures:
# ME RMSE MAE MPE MAPE MASE ACF1
# Training set 328.301 3615.374 2171.002 -2.481166 15.97302 0.4905797 -0.02521172
In this chapter, we will be using the data scraped in the part 1 of the book. The data has text that describes profiles of freelancers, and the hourly rate they are charging in USD. The idea of the following section is to fit a model that given the skills of a freelancer, we are able to predict its hourly salary.
The following code shows how to convert the raw text that in this case has skills of a user in a bag of words matrix. For this we use an R library called tm. This means that for each word in the corpus we create variable with the amount of occurrences of each variable.
library(tm)
library(data.table)
source('text_analytics/text_analytics_functions.R')
data = fread('text_analytics/data/profiles.txt')
rate = as.numeric(data$rate)
keep = !is.na(rate)
rate = rate[keep]
### Make bag of words of title and body
X_all = bag_words(data$user_skills[keep])
X_all = removeSparseTerms(X_all, 0.999)
X_all
# <<DocumentTermMatrix (documents: 389, terms: 1422)>>
# Non-/sparse entries: 4057/549101
# Sparsity : 99%
# Maximal term length: 80
# Weighting : term frequency - inverse document frequency (normalized) (tf-idf)
### Make a sparse matrix with all the data
X_all <- as_sparseMatrix(X_all)
Now that we have the text represented as a sparse matrix we can fit a model that will give a sparse solution. A good alternative for this case is using the LASSO (least absolute shrinkage and selection operator). This is a regression model that is able to select the most relevant features to predict the target.
train_inx = 1:200
X_train = X_all[train_inx, ]
y_train = rate[train_inx]
X_test = X_all[-train_inx, ]
y_test = rate[-train_inx]
# Train a regression model
library(glmnet)
fit <- cv.glmnet(x = X_train, y = y_train,
family = 'gaussian', alpha = 1,
nfolds = 3, type.measure = 'mae')
plot(fit)
# Make predictions
predictions = predict(fit, newx = X_test)
predictions = as.vector(predictions[,1])
head(predictions)
# 36.23598 36.43046 51.69786 26.06811 35.13185 37.66367
# We can compute the mean absolute error for the test data
mean(abs(y_test - predictions))
# 15.02175
Now we have a model that given a set of skills is able to predict the hourly salary of a freelancer. If more data is collected, the performance of the model will improve, but the code to implement this pipeline would be the same.
Online learning is a subfield of machine learning that allows to scale supervised learning models to massive datasets. The basic idea is that we don’t need to read all the data in memory to fit a model, we only need to read each instance at a time.
In this case, we will show how to implement an online learning algorithm using logistic regression. As in most of supervised learning algorithms, there is a cost function that is minimized. In logistic regression, the cost function is defined as −
J(θ)=−1m[∑i=1my(i)log(hθ(x(i)))+(1−y(i))log(1−hθ(x(i)))]
where J(θ) represents the cost function and hθ(x) represents the hypothesis. In the case of logistic regression it is defined with the following formula −
hθ(x)=11+eθTx
Now that we have defined the cost function we need to find an algorithm to minimize it. The simplest algorithm for achieving this is called stochastic gradient descent. The update rule of the algorithm for the weights of the logistic regression model is defined as −
θj:=θj−α(hθ(x)−y)x
There are several implementations of the following algorithm, but the one implemented in the vowpal wabbit library is by far the most developed one. The library allows training of large scale regression models and uses small amounts of RAM. In the creators own words it is described as: "The Vowpal Wabbit (VW) project is a fast out-of-core learning system sponsored by Microsoft Research and (previously) Yahoo! Research".
We will be working with the titanic dataset from a kaggle competition. The original data can be found in the bda/part3/vw folder. Here, we have two files −
We have training data (train_titanic.csv), and
unlabeled data in order to make new predictions (test_titanic.csv).
In order to convert the csv format to the vowpal wabbit input format use the csv_to_vowpal_wabbit.py python script. You will obviously need to have python installed for this. Navigate to the bda/part3/vw folder, open the terminal and execute the following command −
python csv_to_vowpal_wabbit.py
Note that for this section, if you are using windows you will need to install a Unix command line, enter the cygwin website for that.
Open the terminal and also in the folder bda/part3/vw and execute the following command −
vw train_titanic.vw -f model.vw --binary --passes 20 -c -q ff --sgd --l1
0.00000001 --l2 0.0000001 --learning_rate 0.5 --loss_function logistic
Let us break down what each argument of the vw call means.
-f model.vw − means that we are saving the model in the model.vw file for making predictions later
-f model.vw − means that we are saving the model in the model.vw file for making predictions later
--binary − Reports loss as binary classification with -1,1 labels
--binary − Reports loss as binary classification with -1,1 labels
--passes 20 − The data is used 20 times to learn the weights
--passes 20 − The data is used 20 times to learn the weights
-c − create a cache file
-c − create a cache file
-q ff − Use quadratic features in the f namespace
-q ff − Use quadratic features in the f namespace
--sgd − use regular/classic/simple stochastic gradient descent update, i.e., nonadaptive, non-normalized, and non-invariant.
--sgd − use regular/classic/simple stochastic gradient descent update, i.e., nonadaptive, non-normalized, and non-invariant.
--l1 --l2 − L1 and L2 norm regularization
--l1 --l2 − L1 and L2 norm regularization
--learning_rate 0.5 − The learning rate αas defined in the update rule formula
--learning_rate 0.5 − The learning rate αas defined in the update rule formula
The following code shows the results of running the regression model in the command line. In the results, we get the average log-loss and a small report of the algorithm performance.
-loss_function logistic
creating quadratic features for pairs: ff
using l1 regularization = 1e-08
using l2 regularization = 1e-07
final_regressor = model.vw
Num weight bits = 18
learning rate = 0.5
initial_t = 1
power_t = 0.5
decay_learning_rate = 1
using cache_file = train_titanic.vw.cache
ignoring text input in favor of cache input
num sources = 1
average since example example current current current
loss last counter weight label predict features
0.000000 0.000000 1 1.0 -1.0000 -1.0000 57
0.500000 1.000000 2 2.0 1.0000 -1.0000 57
0.250000 0.000000 4 4.0 1.0000 1.0000 57
0.375000 0.500000 8 8.0 -1.0000 -1.0000 73
0.625000 0.875000 16 16.0 -1.0000 1.0000 73
0.468750 0.312500 32 32.0 -1.0000 -1.0000 57
0.468750 0.468750 64 64.0 -1.0000 1.0000 43
0.375000 0.281250 128 128.0 1.0000 -1.0000 43
0.351562 0.328125 256 256.0 1.0000 -1.0000 43
0.359375 0.367188 512 512.0 -1.0000 1.0000 57
0.274336 0.274336 1024 1024.0 -1.0000 -1.0000 57 h
0.281938 0.289474 2048 2048.0 -1.0000 -1.0000 43 h
0.246696 0.211454 4096 4096.0 -1.0000 -1.0000 43 h
0.218922 0.191209 8192 8192.0 1.0000 1.0000 43 h
finished run
number of examples per pass = 802
passes used = 11
weighted example sum = 8822
weighted label sum = -2288
average loss = 0.179775 h
best constant = -0.530826
best constant’s loss = 0.659128
total feature number = 427878
Now we can use the model.vw we trained to generate predictions with new data.
vw -d test_titanic.vw -t -i model.vw -p predictions.txt
The predictions generated in the previous command are not normalized to fit between the [0, 1] range. In order to do this, we use a sigmoid transformation.
# Read the predictions
preds = fread('vw/predictions.txt')
# Define the sigmoid function
sigmoid = function(x) {
1 / (1 + exp(-x))
}
probs = sigmoid(preds[[1]])
# Generate class labels
preds = ifelse(probs > 0.5, 1, 0)
head(preds)
# [1] 0 1 0 0 1 0
65 Lectures
6 hours
Arnab Chakraborty
18 Lectures
1.5 hours
Pranjal Srivastava, Harshit Srivastava
23 Lectures
2 hours
John Shea
18 Lectures
1.5 hours
Pranjal Srivastava
46 Lectures
3.5 hours
Pranjal Srivastava
37 Lectures
3.5 hours
Pranjal Srivastava, Harshit Srivastava
Print
Add Notes
Bookmark this page | [
{
"code": null,
"e": 3039,
"s": 2554,
"text": "The volume of data that one has to deal has exploded to unimaginable levels in the past decade, and at the same time, the price of data storage has systematically reduced. Private companies and research institutions capture terabytes of data about their users’ interactions, business, social media, and also sensors from devices such as mobile phones and automobiles. The challenge of this era is to make sense of this sea of data. This is where big data analytics comes into picture."
},
{
"code": null,
"e": 3257,
"s": 3039,
"text": "Big Data Analytics largely involves collecting data from different sources, munge it in a way that it becomes available to be consumed by analysts and finally deliver data products useful to the organization business."
},
{
"code": null,
"e": 3437,
"s": 3257,
"text": "The process of converting large amounts of unstructured raw data, retrieved from different sources to a data product useful for organizations forms the core of Big Data Analytics."
},
{
"code": null,
"e": 3816,
"s": 3437,
"text": "In order to provide a framework to organize the work needed by an organization and deliver clear insights from Big Data, it’s useful to think of it as a cycle with different stages. It is by no means linear, meaning all the stages are related with each other. This cycle has superficial similarities with the more traditional data mining cycle as described in CRISP methodology."
},
{
"code": null,
"e": 4096,
"s": 3816,
"text": "The CRISP-DM methodology that stands for Cross Industry Standard Process for Data Mining, is a cycle that describes commonly used approaches that data mining experts use to tackle problems in traditional BI data mining. It is still being used in traditional BI data mining teams."
},
{
"code": null,
"e": 4250,
"s": 4096,
"text": "Take a look at the following illustration. It shows the major stages of the cycle as described by the CRISP-DM methodology and how they are interrelated."
},
{
"code": null,
"e": 4643,
"s": 4250,
"text": "CRISP-DM was conceived in 1996 and the next year, it got underway as a European Union project under the ESPRIT funding initiative. The project was led by five companies: SPSS, Teradata, Daimler AG, NCR Corporation, and OHRA (an insurance company). The project was finally incorporated into SPSS. The methodology is extremely detailed oriented in how a data mining project should be specified."
},
{
"code": null,
"e": 4734,
"s": 4643,
"text": "Let us now learn a little more on each of the stages involved in the CRISP-DM life cycle −"
},
{
"code": null,
"e": 5103,
"s": 4734,
"text": "Business Understanding − This initial phase focuses on understanding the project objectives and requirements from a business perspective, and then converting this knowledge into a data mining problem definition. A preliminary plan is designed to achieve the objectives. A decision model, especially one built using the Decision Model and Notation standard can be used."
},
{
"code": null,
"e": 5472,
"s": 5103,
"text": "Business Understanding − This initial phase focuses on understanding the project objectives and requirements from a business perspective, and then converting this knowledge into a data mining problem definition. A preliminary plan is designed to achieve the objectives. A decision model, especially one built using the Decision Model and Notation standard can be used."
},
{
"code": null,
"e": 5783,
"s": 5472,
"text": "Data Understanding − The data understanding phase starts with an initial data collection and proceeds with activities in order to get familiar with the data, to identify data quality problems, to discover first insights into the data, or to detect interesting subsets to form hypotheses for hidden information."
},
{
"code": null,
"e": 6094,
"s": 5783,
"text": "Data Understanding − The data understanding phase starts with an initial data collection and proceeds with activities in order to get familiar with the data, to identify data quality problems, to discover first insights into the data, or to detect interesting subsets to form hypotheses for hidden information."
},
{
"code": null,
"e": 6489,
"s": 6094,
"text": "Data Preparation − The data preparation phase covers all activities to construct the final dataset (data that will be fed into the modeling tool(s)) from the initial raw data. Data preparation tasks are likely to be performed multiple times, and not in any prescribed order. Tasks include table, record, and attribute selection as well as transformation and cleaning of data for modeling tools."
},
{
"code": null,
"e": 6884,
"s": 6489,
"text": "Data Preparation − The data preparation phase covers all activities to construct the final dataset (data that will be fed into the modeling tool(s)) from the initial raw data. Data preparation tasks are likely to be performed multiple times, and not in any prescribed order. Tasks include table, record, and attribute selection as well as transformation and cleaning of data for modeling tools."
},
{
"code": null,
"e": 7237,
"s": 6884,
"text": "Modeling − In this phase, various modeling techniques are selected and applied and their parameters are calibrated to optimal values. Typically, there are several techniques for the same data mining problem type. Some techniques have specific requirements on the form of data. Therefore, it is often required to step back to the data preparation phase."
},
{
"code": null,
"e": 7590,
"s": 7237,
"text": "Modeling − In this phase, various modeling techniques are selected and applied and their parameters are calibrated to optimal values. Typically, there are several techniques for the same data mining problem type. Some techniques have specific requirements on the form of data. Therefore, it is often required to step back to the data preparation phase."
},
{
"code": null,
"e": 8164,
"s": 7590,
"text": "Evaluation − At this stage in the project, you have built a model (or models) that appears to have high quality, from a data analysis perspective. Before proceeding to final deployment of the model, it is important to evaluate the model thoroughly and review the steps executed to construct the model, to be certain it properly achieves the business objectives.\nA key objective is to determine if there is some important business issue that has not been sufficiently considered. At the end of this phase, a decision on the use of the data mining results should be reached.\n"
},
{
"code": null,
"e": 8526,
"s": 8164,
"text": "Evaluation − At this stage in the project, you have built a model (or models) that appears to have high quality, from a data analysis perspective. Before proceeding to final deployment of the model, it is important to evaluate the model thoroughly and review the steps executed to construct the model, to be certain it properly achieves the business objectives."
},
{
"code": null,
"e": 8737,
"s": 8526,
"text": "A key objective is to determine if there is some important business issue that has not been sufficiently considered. At the end of this phase, a decision on the use of the data mining results should be reached."
},
{
"code": null,
"e": 9185,
"s": 8737,
"text": "Deployment − Creation of the model is generally not the end of the project. Even if the purpose of the model is to increase knowledge of the data, the knowledge gained will need to be organized and presented in a way that is useful to the customer.\nDepending on the requirements, the deployment phase can be as simple as generating a report or as complex as implementing a repeatable data scoring (e.g. segment allocation) or data mining process.\n"
},
{
"code": null,
"e": 9434,
"s": 9185,
"text": "Deployment − Creation of the model is generally not the end of the project. Even if the purpose of the model is to increase knowledge of the data, the knowledge gained will need to be organized and presented in a way that is useful to the customer."
},
{
"code": null,
"e": 9632,
"s": 9434,
"text": "Depending on the requirements, the deployment phase can be as simple as generating a report or as complex as implementing a repeatable data scoring (e.g. segment allocation) or data mining process."
},
{
"code": null,
"e": 9928,
"s": 9632,
"text": "In many cases, it will be the customer, not the data analyst, who will carry out the deployment steps. Even if the analyst deploys the model, it is important for the customer to understand upfront the actions which will need to be carried out in order to actually make use of the created models."
},
{
"code": null,
"e": 10101,
"s": 9928,
"text": "SEMMA is another methodology developed by SAS for data mining modeling. It stands for Sample, Explore, Modify, Model, and Asses. Here is a brief description of its stages −"
},
{
"code": null,
"e": 10360,
"s": 10101,
"text": "Sample − The process starts with data sampling, e.g., selecting the dataset for modeling. The dataset should be large enough to contain sufficient information to retrieve, yet small enough to be used efficiently. This phase also deals with data partitioning."
},
{
"code": null,
"e": 10619,
"s": 10360,
"text": "Sample − The process starts with data sampling, e.g., selecting the dataset for modeling. The dataset should be large enough to contain sufficient information to retrieve, yet small enough to be used efficiently. This phase also deals with data partitioning."
},
{
"code": null,
"e": 10820,
"s": 10619,
"text": "Explore − This phase covers the understanding of the data by discovering anticipated and unanticipated relationships between the variables, and also abnormalities, with the help of data visualization."
},
{
"code": null,
"e": 11021,
"s": 10820,
"text": "Explore − This phase covers the understanding of the data by discovering anticipated and unanticipated relationships between the variables, and also abnormalities, with the help of data visualization."
},
{
"code": null,
"e": 11140,
"s": 11021,
"text": "Modify − The Modify phase contains methods to select, create and transform variables in preparation for data modeling."
},
{
"code": null,
"e": 11259,
"s": 11140,
"text": "Modify − The Modify phase contains methods to select, create and transform variables in preparation for data modeling."
},
{
"code": null,
"e": 11449,
"s": 11259,
"text": "Model − In the Model phase, the focus is on applying various modeling (data mining) techniques on the prepared variables in order to create models that possibly provide the desired outcome."
},
{
"code": null,
"e": 11639,
"s": 11449,
"text": "Model − In the Model phase, the focus is on applying various modeling (data mining) techniques on the prepared variables in order to create models that possibly provide the desired outcome."
},
{
"code": null,
"e": 11747,
"s": 11639,
"text": "Assess − The evaluation of the modeling results shows the reliability and usefulness of the created models."
},
{
"code": null,
"e": 11855,
"s": 11747,
"text": "Assess − The evaluation of the modeling results shows the reliability and usefulness of the created models."
},
{
"code": null,
"e": 12189,
"s": 11855,
"text": "The main difference between CRISM–DM and SEMMA is that SEMMA focuses on the modeling aspect, whereas CRISP-DM gives more importance to stages of the cycle prior to modeling such as understanding the business problem to be solved, understanding and preprocessing the data to be used as input, for example, machine learning algorithms."
},
{
"code": null,
"e": 12481,
"s": 12189,
"text": "In today’s big data context, the previous approaches are either incomplete or suboptimal. For example, the SEMMA methodology disregards completely data collection and preprocessing of different data sources. These stages normally constitute most of the work in a successful big data project."
},
{
"code": null,
"e": 12550,
"s": 12481,
"text": "A big data analytics cycle can be described by the following stage −"
},
{
"code": null,
"e": 12578,
"s": 12550,
"text": "Business Problem Definition"
},
{
"code": null,
"e": 12587,
"s": 12578,
"text": "Research"
},
{
"code": null,
"e": 12614,
"s": 12587,
"text": "Human Resources Assessment"
},
{
"code": null,
"e": 12631,
"s": 12614,
"text": "Data Acquisition"
},
{
"code": null,
"e": 12644,
"s": 12631,
"text": "Data Munging"
},
{
"code": null,
"e": 12657,
"s": 12644,
"text": "Data Storage"
},
{
"code": null,
"e": 12683,
"s": 12657,
"text": "Exploratory Data Analysis"
},
{
"code": null,
"e": 12728,
"s": 12683,
"text": "Data Preparation for Modeling and Assessment"
},
{
"code": null,
"e": 12737,
"s": 12728,
"text": "Modeling"
},
{
"code": null,
"e": 12752,
"s": 12737,
"text": "Implementation"
},
{
"code": null,
"e": 12842,
"s": 12752,
"text": "In this section, we will throw some light on each of these stages of big data life cycle."
},
{
"code": null,
"e": 13192,
"s": 12842,
"text": "This is a point common in traditional BI and big data analytics life cycle. Normally it is a non-trivial stage of a big data project to define the problem and evaluate correctly how much potential gain it may have for an organization. It seems obvious to mention this, but it has to be evaluated what are the expected gains and costs of the project."
},
{
"code": null,
"e": 13504,
"s": 13192,
"text": "Analyze what other companies have done in the same situation. This involves looking for solutions that are reasonable for your company, even though it involves adapting other solutions to the resources and requirements that your company has. In this stage, a methodology for the future stages should be defined."
},
{
"code": null,
"e": 13860,
"s": 13504,
"text": "Once the problem is defined, it’s reasonable to continue analyzing if the current staff is able to complete the project successfully. Traditional BI teams might not be capable to deliver an optimal solution to all the stages, so it should be considered before starting the project if there is a need to outsource a part of the project or hire more people."
},
{
"code": null,
"e": 14341,
"s": 13860,
"text": "This section is key in a big data life cycle; it defines which type of profiles would be needed to deliver the resultant data product. Data gathering is a non-trivial step of the process; it normally involves gathering unstructured data from different sources. To give an example, it could involve writing a crawler to retrieve reviews from a website. This involves dealing with text, perhaps in different languages normally requiring a significant amount of time to be completed."
},
{
"code": null,
"e": 14586,
"s": 14341,
"text": "Once the data is retrieved, for example, from the web, it needs to be stored in an easyto-use format. To continue with the reviews examples, let’s assume the data is retrieved from different sites where each has a different display of the data."
},
{
"code": null,
"e": 14934,
"s": 14586,
"text": "Suppose one data source gives reviews in terms of rating in stars, therefore it is possible to read this as a mapping for the response variable y ∈ {1, 2, 3, 4, 5}. Another data source gives reviews using two arrows system, one for up voting and the other for down voting. This would imply a response variable of the form y ∈ {positive, negative}."
},
{
"code": null,
"e": 15309,
"s": 14934,
"text": "In order to combine both the data sources, a decision has to be made in order to make these two response representations equivalent. This can involve converting the first data source response representation to the second form, considering one star as negative and five stars as positive. This process often requires a large time allocation to be delivered with good quality."
},
{
"code": null,
"e": 15813,
"s": 15309,
"text": "Once the data is processed, it sometimes needs to be stored in a database. Big data technologies offer plenty of alternatives regarding this point. The most common alternative is using the Hadoop File System for storage that provides users a limited version of SQL, known as HIVE Query Language. This allows most analytics task to be done in similar ways as would be done in traditional BI data warehouses, from the user perspective. Other storage options to be considered are MongoDB, Redis, and SPARK."
},
{
"code": null,
"e": 16233,
"s": 15813,
"text": "This stage of the cycle is related to the human resources knowledge in terms of their abilities to implement different architectures. Modified versions of traditional data warehouses are still being used in large scale applications. For example, teradata and IBM offer SQL databases that can handle terabytes of data; open source solutions such as postgreSQL and MySQL are still being used for large scale applications."
},
{
"code": null,
"e": 16471,
"s": 16233,
"text": "Even though there are differences in how the different storages work in the background, from the client side, most solutions provide a SQL API. Hence having a good understanding of SQL is still a key skill to have for big data analytics."
},
{
"code": null,
"e": 16852,
"s": 16471,
"text": "This stage a priori seems to be the most important topic, in practice, this is not true. It is not even an essential stage. It is possible to implement a big data solution that would be working with real-time data, so in this case, we only need to gather data to develop the model and then implement it in real time. So there would not be a need to formally store the data at all."
},
{
"code": null,
"e": 17209,
"s": 16852,
"text": "Once the data has been cleaned and stored in a way that insights can be retrieved from it, the data exploration phase is mandatory. The objective of this stage is to understand the data, this is normally done with statistical techniques and also plotting the data. This is a good stage to evaluate whether the problem definition makes sense or is feasible."
},
{
"code": null,
"e": 17420,
"s": 17209,
"text": "This stage involves reshaping the cleaned data retrieved previously and using statistical preprocessing for missing values imputation, outlier detection, normalization, feature extraction and feature selection."
},
{
"code": null,
"e": 17843,
"s": 17420,
"text": "The prior stage should have produced several datasets for training and testing, for example, a predictive model. This stage involves trying different models and looking forward to solving the business problem at hand. In practice, it is normally desired that the model would give some insight into the business. Finally, the best model or combination of models is selected evaluating its performance on a left-out dataset."
},
{
"code": null,
"e": 18225,
"s": 17843,
"text": "In this stage, the data product developed is implemented in the data pipeline of the company. This involves setting up a validation scheme while the data product is working, in order to track its performance. For example, in the case of implementing a predictive model, this stage would involve applying the model to new data and once the response is available, evaluate the model."
},
{
"code": null,
"e": 18842,
"s": 18225,
"text": "In terms of methodology, big data analytics differs significantly from the traditional statistical approach of experimental design. Analytics starts with data. Normally we model the data in a way to explain a response. The objectives of this approach is to predict the response behavior or understand how the input variables relate to a response. Normally in statistical experimental designs, an experiment is developed and data is retrieved as a result. This allows to generate data in a way that can be used by a statistical model, where certain assumptions hold such as independence, normality, and randomization."
},
{
"code": null,
"e": 19153,
"s": 18842,
"text": "In big data analytics, we are presented with the data. We cannot design an experiment that fulfills our favorite statistical model. In large-scale applications of analytics, a large amount of work (normally 80% of the effort) is needed just for cleaning the data, so it can be used by a machine learning model."
},
{
"code": null,
"e": 19434,
"s": 19153,
"text": "We don’t have a unique methodology to follow in real large-scale applications. Normally once the business problem is defined, a research stage is needed to design the methodology to be used. However general guidelines are relevant to be mentioned and apply to almost all problems."
},
{
"code": null,
"e": 19933,
"s": 19434,
"text": "One of the most important tasks in big data analytics is statistical modeling, meaning supervised and unsupervised classification or regression problems. Once the data is cleaned and preprocessed, available for modeling, care should be taken in evaluating different models with reasonable loss metrics and then once the model is implemented, further evaluation and results should be reported. A common pitfall in predictive modeling is to just implement the model and never measure its performance."
},
{
"code": null,
"e": 20088,
"s": 19933,
"text": "As mentioned in the big data life cycle, the data products that result from developing a big data product are in most of the cases some of the following −"
},
{
"code": null,
"e": 20208,
"s": 20088,
"text": "Machine learning implementation − This could be a classification algorithm, a regression model or a segmentation model."
},
{
"code": null,
"e": 20328,
"s": 20208,
"text": "Machine learning implementation − This could be a classification algorithm, a regression model or a segmentation model."
},
{
"code": null,
"e": 20561,
"s": 20328,
"text": "Recommender system − The objective is to develop a system that recommends choices based on user behavior. Netflix is the characteristic example of this data product, where based on the ratings of users, other movies are recommended."
},
{
"code": null,
"e": 20794,
"s": 20561,
"text": "Recommender system − The objective is to develop a system that recommends choices based on user behavior. Netflix is the characteristic example of this data product, where based on the ratings of users, other movies are recommended."
},
{
"code": null,
"e": 20933,
"s": 20794,
"text": "Dashboard − Business normally needs tools to visualize aggregated data. A dashboard is a graphical mechanism to make this data accessible."
},
{
"code": null,
"e": 21072,
"s": 20933,
"text": "Dashboard − Business normally needs tools to visualize aggregated data. A dashboard is a graphical mechanism to make this data accessible."
},
{
"code": null,
"e": 21204,
"s": 21072,
"text": "Ad-Hoc analysis − Normally business areas have questions, hypotheses or myths that can be answered doing ad-hoc analysis with data."
},
{
"code": null,
"e": 21336,
"s": 21204,
"text": "Ad-Hoc analysis − Normally business areas have questions, hypotheses or myths that can be answered doing ad-hoc analysis with data."
},
{
"code": null,
"e": 21675,
"s": 21336,
"text": "In large organizations, in order to successfully develop a big data project, it is needed to have management backing up the project. This normally involves finding a way to show the business advantages of the project. We don’t have a unique solution to the problem of finding sponsors for a project, but a few guidelines are given below −"
},
{
"code": null,
"e": 21769,
"s": 21675,
"text": "Check who and where are the sponsors of other projects similar to the one that interests you."
},
{
"code": null,
"e": 21863,
"s": 21769,
"text": "Check who and where are the sponsors of other projects similar to the one that interests you."
},
{
"code": null,
"e": 21984,
"s": 21863,
"text": "Having personal contacts in key management positions helps, so any contact can be triggered if the project is promising."
},
{
"code": null,
"e": 22105,
"s": 21984,
"text": "Having personal contacts in key management positions helps, so any contact can be triggered if the project is promising."
},
{
"code": null,
"e": 22197,
"s": 22105,
"text": "Who would benefit from your project? Who would be your client once the project is on track?"
},
{
"code": null,
"e": 22289,
"s": 22197,
"text": "Who would benefit from your project? Who would be your client once the project is on track?"
},
{
"code": null,
"e": 22391,
"s": 22289,
"text": "Develop a simple, clear, and exiting proposal and share it with the key players in your organization."
},
{
"code": null,
"e": 22493,
"s": 22391,
"text": "Develop a simple, clear, and exiting proposal and share it with the key players in your organization."
},
{
"code": null,
"e": 22750,
"s": 22493,
"text": "The best way to find sponsors for a project is to understand the problem and what would be the resulting data product once it has been implemented. This understanding will give an edge in convincing the management of the importance of the big data project."
},
{
"code": null,
"e": 23086,
"s": 22750,
"text": "A data analyst has reporting-oriented profile, having experience in extracting and analyzing data from traditional data warehouses using SQL. Their tasks are normally either on the side of data storage or in reporting general business results. Data warehousing is by no means simple, it is just different to what a data scientist does."
},
{
"code": null,
"e": 23522,
"s": 23086,
"text": "Many organizations struggle hard to find competent data scientists in the market. It is however a good idea to select prospective data analysts and teach them the relevant skills to become a data scientist. This is by no means a trivial task and would normally involve the person doing a master degree in a quantitative field, but it is definitely a viable option. The basic skills a competent data analyst must have are listed below −"
},
{
"code": null,
"e": 23545,
"s": 23522,
"text": "Business understanding"
},
{
"code": null,
"e": 23561,
"s": 23545,
"text": "SQL programming"
},
{
"code": null,
"e": 23594,
"s": 23561,
"text": "Report design and implementation"
},
{
"code": null,
"e": 23616,
"s": 23594,
"text": "Dashboard development"
},
{
"code": null,
"e": 23834,
"s": 23616,
"text": "The role of a data scientist is normally associated with tasks such as predictive modeling, developing segmentation algorithms, recommender systems, A/B testing frameworks and often working with raw unstructured data."
},
{
"code": null,
"e": 24162,
"s": 23834,
"text": "The nature of their work demands a deep understanding of mathematics, applied statistics and programming. There are a few skills common between a data analyst and a data scientist, for example, the ability to query databases. Both analyze data, but the decision of a data scientist can have a greater impact in an organization."
},
{
"code": null,
"e": 24227,
"s": 24162,
"text": "Here is a set of skills a data scientist normally need to have −"
},
{
"code": null,
"e": 24304,
"s": 24227,
"text": "Programming in a statistical package such as: R, Python, SAS, SPSS, or Julia"
},
{
"code": null,
"e": 24368,
"s": 24304,
"text": "Able to clean, extract, and explore data from different sources"
},
{
"code": null,
"e": 24427,
"s": 24368,
"text": "Research, design, and implementation of statistical models"
},
{
"code": null,
"e": 24490,
"s": 24427,
"text": "Deep statistical, mathematical, and computer science knowledge"
},
{
"code": null,
"e": 24948,
"s": 24490,
"text": "In big data analytics, people normally confuse the role of a data scientist with that of a data architect. In reality, the difference is quite simple. A data architect defines the tools and the architecture the data would be stored at, whereas a data scientist uses this architecture. Of course, a data scientist should be able to set up new tools if needed for ad-hoc projects, but the infrastructure definition and design should not be a part of his task."
},
{
"code": null,
"e": 25283,
"s": 24948,
"text": "Through this tutorial, we will develop a project. Each subsequent chapter in this tutorial deals with a part of the larger project in the mini-project section. This is thought to be an applied tutorial section that will provide exposure to a real-world problem. In this case, we would start with the problem definition of the project."
},
{
"code": null,
"e": 25446,
"s": 25283,
"text": "The objective of this project would be to develop a machine learning model to predict the hourly salary of people using their curriculum vitae (CV) text as input."
},
{
"code": null,
"e": 25775,
"s": 25446,
"text": "Using the framework defined above, it is simple to define the problem. We can define X = {x1, x2, ..., xn} as the CV’s of users, where each feature can be, in the simplest way possible, the amount of times this word appears. Then the response is real valued, we are trying to predict the hourly salary of individuals in dollars."
},
{
"code": null,
"e": 25904,
"s": 25775,
"text": "These two considerations are enough to conclude that the problem presented can be solved with a supervised regression algorithm."
},
{
"code": null,
"e": 26182,
"s": 25904,
"text": "Problem Definition is probably one of the most complex and heavily neglected stages in the big data analytics pipeline. In order to define the problem a data product would solve, experience is mandatory. Most data scientist aspirants have little or no experience in this stage."
},
{
"code": null,
"e": 26248,
"s": 26182,
"text": "Most big data problems can be categorized in the following ways −"
},
{
"code": null,
"e": 26274,
"s": 26248,
"text": "Supervised classification"
},
{
"code": null,
"e": 26296,
"s": 26274,
"text": "Supervised regression"
},
{
"code": null,
"e": 26318,
"s": 26296,
"text": "Unsupervised learning"
},
{
"code": null,
"e": 26335,
"s": 26318,
"text": "Learning to rank"
},
{
"code": null,
"e": 26384,
"s": 26335,
"text": "Let us now learn more about these four concepts."
},
{
"code": null,
"e": 26794,
"s": 26384,
"text": "Given a matrix of features X = {x1, x2, ..., xn} we develop a model M to predict different classes defined as y = {c1, c2, ..., cn}. For example: Given transactional data of customers in an insurance company, it is possible to develop a model that will predict if a client would churn or not. The latter is a binary classification problem, where there are two classes or target variables: churn and not churn."
},
{
"code": null,
"e": 27122,
"s": 26794,
"text": "Other problems involve predicting more than one class, we could be interested in doing digit recognition, therefore the response vector would be defined as: y = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, a-state-of-the-art model would be convolutional neural network and the matrix of features would be defined as the pixels of the image."
},
{
"code": null,
"e": 27436,
"s": 27122,
"text": "In this case, the problem definition is rather similar to the previous example; the difference relies on the response. In a regression problem, the response y ∈ R, this means the response is real valued. For example, we can develop a model to predict the hourly salary of individuals given the corpus of their CV."
},
{
"code": null,
"e": 27775,
"s": 27436,
"text": "Management is often thirsty for new insights. Segmentation models can provide this insight in order for the marketing department to develop products for different segments. A good approach for developing a segmentation model, rather than thinking of algorithms, is to select features that are relevant to the segmentation that is desired."
},
{
"code": null,
"e": 28149,
"s": 27775,
"text": "For example, in a telecommunications company, it is interesting to segment clients by their cellphone usage. This would involve disregarding features that have nothing to do with the segmentation objective and including only those that do. In this case, this would be selecting features as the number of SMS used in a month, the number of inbound and outbound minutes, etc."
},
{
"code": null,
"e": 28508,
"s": 28149,
"text": "This problem can be considered as a regression problem, but it has particular characteristics and deserves a separate treatment. The problem involves given a collection of documents we seek to find the most relevant ordering given a query. In order to develop a supervised learning algorithm, it is needed to label how relevant an ordering is, given a query."
},
{
"code": null,
"e": 29092,
"s": 28508,
"text": "It is relevant to note that in order to develop a supervised learning algorithm, it is needed to label the training data. This means that in order to train a model that will, for example, recognize digits from an image, we need to label a significant amount of examples by hand. There are web services that can speed up this process and are commonly used for this task such as amazon mechanical turk. It is proven that learning algorithms improve their performance when provided with more data, so labeling a decent amount of examples is practically mandatory in supervised learning."
},
{
"code": null,
"e": 29424,
"s": 29092,
"text": "Data collection plays the most important role in the Big Data cycle. The Internet provides almost unlimited sources of data for a variety of topics. The importance of this area depends on the type of business, but traditional industries can acquire a diverse source of external data and combine those with their transactional data."
},
{
"code": null,
"e": 29952,
"s": 29424,
"text": "For example, let’s assume we would like to build a system that recommends restaurants. The first step would be to gather data, in this case, reviews of restaurants from different websites and store them in a database. As we are interested in raw text, and would use that for analytics, it is not that relevant where the data for developing the model would be stored. This may sound contradictory with the big data main technologies, but in order to implement a big data application, we simply need to make it work in real time."
},
{
"code": null,
"e": 30246,
"s": 29952,
"text": "Once the problem is defined, the following stage is to collect the data. The following miniproject idea is to work on collecting data from the web and structuring it to be used in a machine learning model. We will collect some tweets from the twitter rest API using the R programming language."
},
{
"code": null,
"e": 30431,
"s": 30246,
"text": "First of all create a twitter account, and then follow the instructions in the twitteR package vignette to create a twitter developer account. This is a summary of those instructions −"
},
{
"code": null,
"e": 30478,
"s": 30431,
"text": "Go to https://twitter.com/apps/new and log in."
},
{
"code": null,
"e": 30525,
"s": 30478,
"text": "Go to https://twitter.com/apps/new and log in."
},
{
"code": null,
"e": 30636,
"s": 30525,
"text": "After filling in the basic info, go to the \"Settings\" tab and select \"Read, Write and Access direct messages\"."
},
{
"code": null,
"e": 30747,
"s": 30636,
"text": "After filling in the basic info, go to the \"Settings\" tab and select \"Read, Write and Access direct messages\"."
},
{
"code": null,
"e": 30802,
"s": 30747,
"text": "Make sure to click on the save button after doing this"
},
{
"code": null,
"e": 30857,
"s": 30802,
"text": "Make sure to click on the save button after doing this"
},
{
"code": null,
"e": 30930,
"s": 30857,
"text": "In the \"Details\" tab, take note of your consumer key and consumer secret"
},
{
"code": null,
"e": 31003,
"s": 30930,
"text": "In the \"Details\" tab, take note of your consumer key and consumer secret"
},
{
"code": null,
"e": 31072,
"s": 31003,
"text": "In your R session, you’ll be using the API key and API secret values"
},
{
"code": null,
"e": 31141,
"s": 31072,
"text": "In your R session, you’ll be using the API key and API secret values"
},
{
"code": null,
"e": 31244,
"s": 31141,
"text": "Finally run the following script. This will install the twitteR package from its repository on github."
},
{
"code": null,
"e": 31347,
"s": 31244,
"text": "Finally run the following script. This will install the twitteR package from its repository on github."
},
{
"code": null,
"e": 31520,
"s": 31347,
"text": "install.packages(c(\"devtools\", \"rjson\", \"bit64\", \"httr\")) \n\n# Make sure to restart your R session at this point \nlibrary(devtools) \ninstall_github(\"geoffjentry/twitteR\") \n"
},
{
"code": null,
"e": 31861,
"s": 31520,
"text": "We are interested in getting data where the string \"big mac\" is included and finding out which topics stand out about this. In order to do this, the first step is collecting the data from twitter. Below is our R script to collect required data from twitter. This code is also available in bda/part1/collect_data/collect_data_twitter.R file."
},
{
"code": null,
"e": 33145,
"s": 31861,
"text": "rm(list = ls(all = TRUE)); gc() # Clears the global environment\nlibrary(twitteR)\nSys.setlocale(category = \"LC_ALL\", locale = \"C\")\n\n### Replace the xxx’s with the values you got from the previous instructions\n\n# consumer_key = \"xxxxxxxxxxxxxxxxxxxx\"\n# consumer_secret = \"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\"\n\n# access_token = \"xxxxxxxxxx-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\"\n\n# access_token_secret= \"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\"\n\n# Connect to twitter rest API\nsetup_twitter_oauth(consumer_key, consumer_secret, access_token, access_token_secret)\n\n# Get tweets related to big mac\ntweets <- searchTwitter(’big mac’, n = 200, lang = ’en’)\ndf <- twListToDF(tweets)\n\n# Take a look at the data\nhead(df)\n\n# Check which device is most used\nsources <- sapply(tweets, function(x) x$getStatusSource())\nsources <- gsub(\"</a>\", \"\", sources)\nsources <- strsplit(sources, \">\")\nsources <- sapply(sources, function(x) ifelse(length(x) > 1, x[2], x[1]))\nsource_table = table(sources)\nsource_table = source_table[source_table > 1]\nfreq = source_table[order(source_table, decreasing = T)]\nas.data.frame(freq)\n\n# Frequency\n# Twitter for iPhone 71\n# Twitter for Android 29\n# Twitter Web Client 25\n# recognia 20"
},
{
"code": null,
"e": 33465,
"s": 33145,
"text": "Once the data is collected, we normally have diverse data sources with different characteristics. The most immediate step would be to make these data sources homogeneous and continue to develop our data product. However, it depends on the type of data. We should ask ourselves if it is practical to homogenize the data."
},
{
"code": null,
"e": 33903,
"s": 33465,
"text": "Maybe the data sources are completely different, and the information loss will be large if the sources would be homogenized. In this case, we can think of alternatives. Can one data source help me build a regression model and the other one a classification model? Is it possible to work with the heterogeneity on our advantage rather than just lose information? Taking these decisions are what make analytics interesting and challenging."
},
{
"code": null,
"e": 34012,
"s": 33903,
"text": "In the case of reviews, it is possible to have a language for each data source. Again, we have two choices −"
},
{
"code": null,
"e": 34348,
"s": 34012,
"text": "Homogenization − It involves translating different languages to the language where we have more data. The quality of translations services is acceptable, but if we would like to translate massive amounts of data with an API, the cost would be significant. There are software tools available for this task, but that would be costly too."
},
{
"code": null,
"e": 34684,
"s": 34348,
"text": "Homogenization − It involves translating different languages to the language where we have more data. The quality of translations services is acceptable, but if we would like to translate massive amounts of data with an API, the cost would be significant. There are software tools available for this task, but that would be costly too."
},
{
"code": null,
"e": 35051,
"s": 34684,
"text": "Heterogenization − Would it be possible to develop a solution for each language? As it is simple to detect the language of a corpus, we could develop a recommender for each language. This would involve more work in terms of tuning each recommender according to the amount of languages available but is definitely a viable option if we have a few languages available."
},
{
"code": null,
"e": 35418,
"s": 35051,
"text": "Heterogenization − Would it be possible to develop a solution for each language? As it is simple to detect the language of a corpus, we could develop a recommender for each language. This would involve more work in terms of tuning each recommender according to the amount of languages available but is definitely a viable option if we have a few languages available."
},
{
"code": null,
"e": 35726,
"s": 35418,
"text": "In the present case we need to first clean the unstructured data and then convert it to a data matrix in order to apply topics modelling on it. In general, when getting data from twitter, there are several characters we are not interested in using, at least in the first stage of the data cleansing process."
},
{
"code": null,
"e": 36031,
"s": 35726,
"text": "For example, after getting the tweets we get these strange characters: \"<ed><U+00A0><U+00BD><ed><U+00B8><U+008B>\". These are probably emoticons, so in order to clean the data, we will just remove them using the following script. This code is also available in bda/part1/collect_data/cleaning_data.R file."
},
{
"code": null,
"e": 37118,
"s": 36031,
"text": "rm(list = ls(all = TRUE)); gc() # Clears the global environment\nsource('collect_data_twitter.R')\n# Some tweets\nhead(df$text)\n\n[1] \"I’m not a big fan of turkey but baked Mac &\ncheese <ed><U+00A0><U+00BD><ed><U+00B8><U+008B>\"\n[2] \"@Jayoh30 Like no special sauce on a big mac. HOW\"\n### We are interested in the text - Let’s clean it!\n\n# We first convert the encoding of the text from latin1 to ASCII\ndf$text <- sapply(df$text,function(row) iconv(row, \"latin1\", \"ASCII\", sub = \"\"))\n\n# Create a function to clean tweets\nclean.text <- function(tx) {\n tx <- gsub(\"htt.{1,20}\", \" \", tx, ignore.case = TRUE)\n tx = gsub(\"[^#[:^punct:]]|@|RT\", \" \", tx, perl = TRUE, ignore.case = TRUE)\n tx = gsub(\"[[:digit:]]\", \" \", tx, ignore.case = TRUE)\n tx = gsub(\" {1,}\", \" \", tx, ignore.case = TRUE)\n tx = gsub(\"^\\\\s+|\\\\s+$\", \" \", tx, ignore.case = TRUE)\n return(tx)\n} \n\nclean_tweets <- lapply(df$text, clean.text)\n\n# Cleaned tweets\nhead(clean_tweets)\n[1] \" WeNeedFeminlsm MAC s new make up line features men woc and big girls \"\n[1] \" TravelsPhoto What Happens To Your Body One Hour After A Big Mac \""
},
{
"code": null,
"e": 37390,
"s": 37118,
"text": "The final step of the data cleansing mini project is to have cleaned text we can convert to a matrix and apply an algorithm to. From the text stored in the clean_tweets vector we can easily convert it to a bag of words matrix and apply an unsupervised learning algorithm."
},
{
"code": null,
"e": 37651,
"s": 37390,
"text": "Reporting is very important in big data analytics. Every organization must have a regular provision of information to support its decision making process. This task is normally handled by data analysts with SQL and ETL (extract, transfer, and load) experience."
},
{
"code": null,
"e": 37821,
"s": 37651,
"text": "The team in charge of this task has the responsibility of spreading the information produced in the big data analytics department to different areas of the organization."
},
{
"code": null,
"e": 38126,
"s": 37821,
"text": "The following example demonstrates what summarization of data means. Navigate to the folder bda/part1/summarize_data and inside the folder, open the summarize_data.Rproj file by double clicking it. Then, open the summarize_data.R script and take a look at the code, and follow the explanations presented."
},
{
"code": null,
"e": 38283,
"s": 38126,
"text": "# Install the following packages by running the following code in R. \npkgs = c('data.table', 'ggplot2', 'nycflights13', 'reshape2') \ninstall.packages(pkgs)\n"
},
{
"code": null,
"e": 38528,
"s": 38283,
"text": "The ggplot2 package is great for data visualization. The data.table package is a great option to do fast and memory efficient summarization in R. A recent benchmark shows it is even faster than pandas, the python library used for similar tasks."
},
{
"code": null,
"e": 38661,
"s": 38528,
"text": "Take a look at the data using the following code. This code is also available in bda/part1/summarize_data/summarize_data.Rproj file."
},
{
"code": null,
"e": 39990,
"s": 38661,
"text": "library(nycflights13) \nlibrary(ggplot2) \nlibrary(data.table) \nlibrary(reshape2) \n\n# Convert the flights data.frame to a data.table object and call it DT \nDT <- as.data.table(flights) \n\n# The data has 336776 rows and 16 columns \ndim(DT) \n\n# Take a look at the first rows \nhead(DT) \n\n# year month day dep_time dep_delay arr_time arr_delay carrier \n# 1: 2013 1 1 517 2 830 11 UA \n# 2: 2013 1 1 533 4 850 20 UA \n# 3: 2013 1 1 542 2 923 33 AA \n# 4: 2013 1 1 544 -1 1004 -18 B6 \n# 5: 2013 1 1 554 -6 812 -25 DL \n# 6: 2013 1 1 554 -4 740 12 UA \n\n# tailnum flight origin dest air_time distance hour minute \n# 1: N14228 1545 EWR IAH 227 1400 5 17 \n# 2: N24211 1714 LGA IAH 227 1416 5 33 \n# 3: N619AA 1141 JFK MIA 160 1089 5 42 \n# 4: N804JB 725 JFK BQN 183 1576 5 44 \n# 5: N668DN 461 LGA ATL 116 762 5 54 \n# 6: N39463 1696 EWR ORD 150 719 5 54\n"
},
{
"code": null,
"e": 40047,
"s": 39990,
"text": "The following code has an example of data summarization."
},
{
"code": null,
"e": 42358,
"s": 40047,
"text": "### Data Summarization\n# Compute the mean arrival delay \nDT[, list(mean_arrival_delay = mean(arr_delay, na.rm = TRUE))] \n# mean_arrival_delay \n# 1: 6.895377 \n# Now, we compute the same value but for each carrier \nmean1 = DT[, list(mean_arrival_delay = mean(arr_delay, na.rm = TRUE)), \n by = carrier] \nprint(mean1) \n# carrier mean_arrival_delay \n# 1: UA 3.5580111 \n# 2: AA 0.3642909 \n# 3: B6 9.4579733 \n# 4: DL 1.6443409 \n# 5: EV 15.7964311 \n# 6: MQ 10.7747334 \n# 7: US 2.1295951 \n# 8: WN 9.6491199 \n# 9: VX 1.7644644 \n# 10: FL 20.1159055 \n# 11: AS -9.9308886 \n# 12: 9E 7.3796692\n# 13: F9 21.9207048 \n# 14: HA -6.9152047 \n# 15: YV 15.5569853 \n# 16: OO 11.9310345\n\n# Now let’s compute to means in the same line of code \nmean2 = DT[, list(mean_departure_delay = mean(dep_delay, na.rm = TRUE), \n mean_arrival_delay = mean(arr_delay, na.rm = TRUE)), \n by = carrier] \nprint(mean2) \n\n# carrier mean_departure_delay mean_arrival_delay \n# 1: UA 12.106073 3.5580111 \n# 2: AA 8.586016 0.3642909 \n# 3: B6 13.022522 9.4579733 \n# 4: DL 9.264505 1.6443409 \n# 5: EV 19.955390 15.7964311 \n# 6: MQ 10.552041 10.7747334 \n# 7: US 3.782418 2.1295951 \n# 8: WN 17.711744 9.6491199 \n# 9: VX 12.869421 1.7644644 \n# 10: FL 18.726075 20.1159055 \n# 11: AS 5.804775 -9.9308886 \n# 12: 9E 16.725769 7.3796692 \n# 13: F9 20.215543 21.9207048 \n# 14: HA 4.900585 -6.9152047 \n# 15: YV 18.996330 15.5569853 \n# 16: OO 12.586207 11.9310345\n\n### Create a new variable called gain \n# this is the difference between arrival delay and departure delay \nDT[, gain:= arr_delay - dep_delay] \n\n# Compute the median gain per carrier \nmedian_gain = DT[, median(gain, na.rm = TRUE), by = carrier] \nprint(median_gain)\n"
},
{
"code": null,
"e": 42834,
"s": 42358,
"text": "Exploratory data analysis is a concept developed by John Tuckey (1977) that consists on a new perspective of statistics. Tuckey’s idea was that in traditional statistics, the data was not being explored graphically, is was just being used to test hypotheses. The first attempt to develop a tool was done in Stanford, the project was called prim9. The tool was able to visualize data in nine dimensions, therefore it was able to provide a multivariate perspective of the data."
},
{
"code": null,
"e": 43075,
"s": 42834,
"text": "In recent days, exploratory data analysis is a must and has been included in the big data analytics life cycle. The ability to find insight and be able to communicate it effectively in an organization is fueled with strong EDA capabilities."
},
{
"code": null,
"e": 43442,
"s": 43075,
"text": "Based on Tuckey’s ideas, Bell Labs developed the S programming language in order to provide an interactive interface for doing statistics. The idea of S was to provide extensive graphical capabilities with an easy-to-use language. In today’s world, in the context of Big Data, R that is based on the S programming language is the most popular software for analytics."
},
{
"code": null,
"e": 43515,
"s": 43442,
"text": "The following program demonstrates the use of exploratory data analysis."
},
{
"code": null,
"e": 43648,
"s": 43515,
"text": "The following is an example of exploratory data analysis. This code is also available in part1/eda/exploratory_data_analysis.R file."
},
{
"code": null,
"e": 44910,
"s": 43648,
"text": "library(nycflights13) \nlibrary(ggplot2) \nlibrary(data.table) \nlibrary(reshape2) \n\n# Using the code from the previous section \n# This computes the mean arrival and departure delays by carrier. \nDT <- as.data.table(flights) \nmean2 = DT[, list(mean_departure_delay = mean(dep_delay, na.rm = TRUE), \n mean_arrival_delay = mean(arr_delay, na.rm = TRUE)), \n by = carrier] \n\n# In order to plot data in R usign ggplot, it is normally needed to reshape the data \n# We want to have the data in long format for plotting with ggplot \ndt = melt(mean2, id.vars = ’carrier’) \n\n# Take a look at the first rows \nprint(head(dt)) \n\n# Take a look at the help for ?geom_point and geom_line to find similar examples \n# Here we take the carrier code as the x axis \n# the value from the dt data.table goes in the y axis \n\n# The variable column represents the color \np = ggplot(dt, aes(x = carrier, y = value, color = variable, group = variable)) +\n geom_point() + # Plots points \n geom_line() + # Plots lines \n theme_bw() + # Uses a white background \n labs(list(title = 'Mean arrival and departure delay by carrier', \n x = 'Carrier', y = 'Mean delay')) \nprint(p) \n\n# Save the plot to disk \nggsave('mean_delay_by_carrier.png', p, \n width = 10.4, height = 5.07)\n"
},
{
"code": null,
"e": 44967,
"s": 44910,
"text": "The code should produce an image such as the following −"
},
{
"code": null,
"e": 45233,
"s": 44967,
"text": "In order to understand data, it is often useful to visualize it. Normally in Big Data applications, the interest relies in finding insight rather than just making beautiful plots. The following are examples of different approaches to understanding data using plots."
},
{
"code": null,
"e": 45435,
"s": 45233,
"text": "To start analyzing the flights data, we can start by checking if there are correlations between numeric variables. This code is also available in bda/part1/data_visualization/data_visualization.R file."
},
{
"code": null,
"e": 47038,
"s": 45435,
"text": "# Install the package corrplot by running\ninstall.packages('corrplot') \n\n# then load the library \nlibrary(corrplot) \n\n# Load the following libraries \nlibrary(nycflights13) \nlibrary(ggplot2) \nlibrary(data.table) \nlibrary(reshape2) \n\n# We will continue working with the flights data \nDT <- as.data.table(flights) \nhead(DT) # take a look \n\n# We select the numeric variables after inspecting the first rows. \nnumeric_variables = c('dep_time', 'dep_delay', \n 'arr_time', 'arr_delay', 'air_time', 'distance')\n\n# Select numeric variables from the DT data.table \ndt_num = DT[, numeric_variables, with = FALSE] \n\n# Compute the correlation matrix of dt_num \ncor_mat = cor(dt_num, use = \"complete.obs\") \n\nprint(cor_mat) \n### Here is the correlation matrix \n# dep_time dep_delay arr_time arr_delay air_time distance \n# dep_time 1.00000000 0.25961272 0.66250900 0.23230573 -0.01461948 -0.01413373 \n# dep_delay 0.25961272 1.00000000 0.02942101 0.91480276 -0.02240508 -0.02168090 \n# arr_time 0.66250900 0.02942101 1.00000000 0.02448214 0.05429603 0.04718917 \n# arr_delay 0.23230573 0.91480276 0.02448214 1.00000000 -0.03529709 -0.06186776 \n# air_time -0.01461948 -0.02240508 0.05429603 -0.03529709 1.00000000 0.99064965 \n# distance -0.01413373 -0.02168090 0.04718917 -0.06186776 0.99064965 1.00000000 \n\n# We can display it visually to get a better understanding of the data \ncorrplot.mixed(cor_mat, lower = \"circle\", upper = \"ellipse\") \n\n# save it to disk \npng('corrplot.png') \nprint(corrplot.mixed(cor_mat, lower = \"circle\", upper = \"ellipse\")) \ndev.off()\n"
},
{
"code": null,
"e": 47107,
"s": 47038,
"text": "This code generates the following correlation matrix visualization −"
},
{
"code": null,
"e": 47446,
"s": 47107,
"text": "We can see in the plot that there is a strong correlation between some of the variables in the dataset. For example, arrival delay and departure delay seem to be highly correlated. We can see this because the ellipse shows an almost lineal relationship between both variables, however, it is not simple to find causation from this result."
},
{
"code": null,
"e": 47700,
"s": 47446,
"text": "We can’t say that as two variables are correlated, that one has an effect on the other. Also we find in the plot a strong correlation between air time and distance, which is fairly reasonable to expect as with more distance, the flight time should grow."
},
{
"code": null,
"e": 47997,
"s": 47700,
"text": "We can also do univariate analysis of the data. A simple and effective way to visualize distributions are box-plots. The following code demonstrates how to produce box-plots and trellis charts using the ggplot2 library. This code is also available in bda/part1/data_visualization/boxplots.R file."
},
{
"code": null,
"e": 49235,
"s": 47997,
"text": "source('data_visualization.R') \n### Analyzing Distributions using box-plots \n# The following shows the distance as a function of the carrier \n\np = ggplot(DT, aes(x = carrier, y = distance, fill = carrier)) + # Define the carrier \n in the x axis and distance in the y axis \n geom_box-plot() + # Use the box-plot geom \n theme_bw() + # Leave a white background - More in line with tufte's \n principles than the default \n guides(fill = FALSE) + # Remove legend \n labs(list(title = 'Distance as a function of carrier', # Add labels \n x = 'Carrier', y = 'Distance')) \np \n# Save to disk \npng(‘boxplot_carrier.png’) \nprint(p) \ndev.off() \n\n# Let's add now another variable, the month of each flight \n# We will be using facet_wrap for this \np = ggplot(DT, aes(carrier, distance, fill = carrier)) + \n geom_box-plot() + \n theme_bw() + \n guides(fill = FALSE) + \n facet_wrap(~month) + # This creates the trellis plot with the by month variable\n labs(list(title = 'Distance as a function of carrier by month', \n x = 'Carrier', y = 'Distance')) \np \n# The plot shows there aren't clear differences between distance in different months \n\n# Save to disk \npng('boxplot_carrier_by_month.png') \nprint(p) \ndev.off()\n"
},
{
"code": null,
"e": 49427,
"s": 49235,
"text": "This section is devoted to introduce the users to the R programming language. R can be downloaded from the cran website. For Windows users, it is useful to install rtools and the rstudio IDE."
},
{
"code": null,
"e": 49620,
"s": 49427,
"text": "The general concept behind R is to serve as an interface to other software developed in compiled languages such as C, C++, and Fortran and to give the user an interactive tool to analyze data."
},
{
"code": null,
"e": 50014,
"s": 49620,
"text": "Navigate to the folder of the book zip file bda/part2/R_introduction and open the R_introduction.Rproj file. This will open an RStudio session. Then open the 01_vectors.R file. Run the script line by line and follow the comments in the code. Another useful option in order to learn is to just type the code, this will help you get used to R syntax. In R comments are written with the # symbol."
},
{
"code": null,
"e": 50227,
"s": 50014,
"text": "In order to display the results of running R code in the book, after code is evaluated, the results R returns are commented. This way, you can copy paste the code in the book and try directly sections of it in R."
},
{
"code": null,
"e": 50530,
"s": 50227,
"text": "# Create a vector of numbers \nnumbers = c(1, 2, 3, 4, 5) \nprint(numbers) \n\n# [1] 1 2 3 4 5 \n# Create a vector of letters \nltrs = c('a', 'b', 'c', 'd', 'e') \n# [1] \"a\" \"b\" \"c\" \"d\" \"e\" \n\n# Concatenate both \nmixed_vec = c(numbers, ltrs) \nprint(mixed_vec) \n# [1] \"1\" \"2\" \"3\" \"4\" \"5\" \"a\" \"b\" \"c\" \"d\" \"e\"\n"
},
{
"code": null,
"e": 50930,
"s": 50530,
"text": "Let’s analyze what happened in the previous code. We can see it is possible to create vectors with numbers and with letters. We did not need to tell R what type of data type we wanted beforehand. Finally, we were able to create a vector with both numbers and letters. The vector mixed_vec has coerced the numbers to character, we can see this by visualizing how the values are printed inside quotes."
},
{
"code": null,
"e": 51122,
"s": 50930,
"text": "The following code shows the data type of different vectors as returned by the function class. It is common to use the class function to \"interrogate\" an object, asking him what his class is."
},
{
"code": null,
"e": 51489,
"s": 51122,
"text": "### Evaluate the data types using class\n\n### One dimensional objects \n# Integer vector \nnum = 1:10 \nclass(num) \n# [1] \"integer\" \n\n# Numeric vector, it has a float, 10.5 \nnum = c(1:10, 10.5) \nclass(num) \n# [1] \"numeric\" \n\n# Character vector \nltrs = letters[1:10] \nclass(ltrs) \n# [1] \"character\" \n\n# Factor vector \nfac = as.factor(ltrs) \nclass(fac) \n# [1] \"factor\"\n"
},
{
"code": null,
"e": 51650,
"s": 51489,
"text": "R supports two-dimensional objects also. In the following code, there are examples of the two most popular data structures used in R: the matrix and data.frame."
},
{
"code": null,
"e": 52599,
"s": 51650,
"text": "# Matrix\nM = matrix(1:12, ncol = 4) \n# [,1] [,2] [,3] [,4] \n# [1,] 1 4 7 10 \n# [2,] 2 5 8 11 \n# [3,] 3 6 9 12 \nlM = matrix(letters[1:12], ncol = 4) \n# [,1] [,2] [,3] [,4] \n# [1,] \"a\" \"d\" \"g\" \"j\" \n# [2,] \"b\" \"e\" \"h\" \"k\" \n# [3,] \"c\" \"f\" \"i\" \"l\" \n\n# Coerces the numbers to character \n# cbind concatenates two matrices (or vectors) in one matrix \ncbind(M, lM) \n# [,1] [,2] [,3] [,4] [,5] [,6] [,7] [,8] \n# [1,] \"1\" \"4\" \"7\" \"10\" \"a\" \"d\" \"g\" \"j\" \n# [2,] \"2\" \"5\" \"8\" \"11\" \"b\" \"e\" \"h\" \"k\" \n# [3,] \"3\" \"6\" \"9\" \"12\" \"c\" \"f\" \"i\" \"l\" \n\nclass(M) \n# [1] \"matrix\" \nclass(lM) \n# [1] \"matrix\" \n\n# data.frame \n# One of the main objects of R, handles different data types in the same object. \n# It is possible to have numeric, character and factor vectors in the same data.frame \n\ndf = data.frame(n = 1:5, l = letters[1:5]) \ndf \n# n l \n# 1 1 a \n# 2 2 b \n# 3 3 c \n# 4 4 d \n# 5 5 e \n"
},
{
"code": null,
"e": 53021,
"s": 52599,
"text": "As demonstrated in the previous example, it is possible to use different data types in the same object. In general, this is how data is presented in databases, APIs part of the data is text or character vectors and other numeric. In is the analyst job to determine which statistical data type to assign and then use the correct R data type for it. In statistics we normally consider variables are of the following types −"
},
{
"code": null,
"e": 53029,
"s": 53021,
"text": "Numeric"
},
{
"code": null,
"e": 53052,
"s": 53029,
"text": "Nominal or categorical"
},
{
"code": null,
"e": 53060,
"s": 53052,
"text": "Ordinal"
},
{
"code": null,
"e": 53109,
"s": 53060,
"text": "In R, a vector can be of the following classes −"
},
{
"code": null,
"e": 53127,
"s": 53109,
"text": "Numeric - Integer"
},
{
"code": null,
"e": 53134,
"s": 53127,
"text": "Factor"
},
{
"code": null,
"e": 53149,
"s": 53134,
"text": "Ordered Factor"
},
{
"code": null,
"e": 53309,
"s": 53149,
"text": "R provides a data type for each statistical type of variable. The ordered factor is however rarely used, but can be created by the function factor, or ordered."
},
{
"code": null,
"e": 53495,
"s": 53309,
"text": "The following section treats the concept of indexing. This is a quite common operation, and deals with the problem of selecting sections of an object and making transformations to them."
},
{
"code": null,
"e": 56419,
"s": 53495,
"text": "# Let's create a data.frame\ndf = data.frame(numbers = 1:26, letters) \nhead(df) \n# numbers letters \n# 1 1 a \n# 2 2 b \n# 3 3 c \n# 4 4 d \n# 5 5 e \n# 6 6 f \n\n# str gives the structure of a data.frame, it’s a good summary to inspect an object \nstr(df) \n# 'data.frame': 26 obs. of 2 variables: \n# $ numbers: int 1 2 3 4 5 6 7 8 9 10 ... \n# $ letters: Factor w/ 26 levels \"a\",\"b\",\"c\",\"d\",..: 1 2 3 4 5 6 7 8 9 10 ... \n\n# The latter shows the letters character vector was coerced as a factor. \n# This can be explained by the stringsAsFactors = TRUE argumnet in data.frame \n# read ?data.frame for more information \n\nclass(df) \n# [1] \"data.frame\" \n\n### Indexing\n# Get the first row \ndf[1, ] \n# numbers letters \n# 1 1 a \n\n# Used for programming normally - returns the output as a list \ndf[1, , drop = TRUE] \n# $numbers \n# [1] 1 \n# \n# $letters \n# [1] a \n# Levels: a b c d e f g h i j k l m n o p q r s t u v w x y z \n\n# Get several rows of the data.frame \ndf[5:7, ] \n# numbers letters \n# 5 5 e \n# 6 6 f \n# 7 7 g \n\n### Add one column that mixes the numeric column with the factor column \ndf$mixed = paste(df$numbers, df$letters, sep = ’’) \n\nstr(df) \n# 'data.frame': 26 obs. of 3 variables: \n# $ numbers: int 1 2 3 4 5 6 7 8 9 10 ...\n# $ letters: Factor w/ 26 levels \"a\",\"b\",\"c\",\"d\",..: 1 2 3 4 5 6 7 8 9 10 ... \n# $ mixed : chr \"1a\" \"2b\" \"3c\" \"4d\" ... \n\n### Get columns \n# Get the first column \ndf[, 1] \n# It returns a one dimensional vector with that column \n\n# Get two columns \ndf2 = df[, 1:2] \nhead(df2) \n\n# numbers letters \n# 1 1 a \n# 2 2 b \n# 3 3 c \n# 4 4 d \n# 5 5 e \n# 6 6 f \n\n# Get the first and third columns \ndf3 = df[, c(1, 3)] \ndf3[1:3, ] \n\n# numbers mixed \n# 1 1 1a\n# 2 2 2b \n# 3 3 3c \n\n### Index columns from their names \nnames(df) \n# [1] \"numbers\" \"letters\" \"mixed\" \n# This is the best practice in programming, as many times indeces change, but \nvariable names don’t \n# We create a variable with the names we want to subset \nkeep_vars = c(\"numbers\", \"mixed\") \ndf4 = df[, keep_vars] \n\nhead(df4) \n# numbers mixed \n# 1 1 1a \n# 2 2 2b \n# 3 3 3c \n# 4 4 4d \n# 5 5 5e \n# 6 6 6f \n\n### subset rows and columns \n# Keep the first five rows \ndf5 = df[1:5, keep_vars] \ndf5 \n\n# numbers mixed \n# 1 1 1a \n# 2 2 2b\n# 3 3 3c \n# 4 4 4d \n# 5 5 5e \n\n# subset rows using a logical condition \ndf6 = df[df$numbers < 10, keep_vars] \ndf6 \n\n# numbers mixed \n# 1 1 1a \n# 2 2 2b \n# 3 3 3c \n# 4 4 4d \n# 5 5 5e \n# 6 6 6f \n# 7 7 7g \n# 8 8 8h \n# 9 9 9i \n"
},
{
"code": null,
"e": 56824,
"s": 56419,
"text": "SQL stands for structured query language. It is one of the most widely used languages for extracting data from databases in traditional data warehouses and big data technologies. In order to demonstrate the basics of SQL we will be working with examples. In order to focus on the language itself, we will be using SQL inside R. In terms of writing SQL code this is exactly as would be done in a database."
},
{
"code": null,
"e": 57189,
"s": 56824,
"text": "The core of SQL are three statements: SELECT, FROM and WHERE. The following examples make use of the most common use cases of SQL. Navigate to the folder bda/part2/SQL_introduction and open the SQL_introduction.Rproj file. Then open the 01_select.R script. In order to write SQL code in R we need to install the sqldf package as demonstrated in the following code."
},
{
"code": null,
"e": 58398,
"s": 57189,
"text": "# Install the sqldf package\ninstall.packages('sqldf') \n\n# load the library \nlibrary('sqldf') \nlibrary(nycflights13) \n\n# We will be working with the fligths dataset in order to introduce SQL \n\n# Let’s take a look at the table \nstr(flights) \n# Classes 'tbl_d', 'tbl' and 'data.frame': 336776 obs. of 16 variables: \n\n# $ year : int 2013 2013 2013 2013 2013 2013 2013 2013 2013 2013 ... \n# $ month : int 1 1 1 1 1 1 1 1 1 1 ... \n# $ day : int 1 1 1 1 1 1 1 1 1 1 ... \n# $ dep_time : int 517 533 542 544 554 554 555 557 557 558 ... \n# $ dep_delay: num 2 4 2 -1 -6 -4 -5 -3 -3 -2 ... \n# $ arr_time : int 830 850 923 1004 812 740 913 709 838 753 ... \n# $ arr_delay: num 11 20 33 -18 -25 12 19 -14 -8 8 ...\n# $ carrier : chr \"UA\" \"UA\" \"AA\" \"B6\" ... \n\n# $ tailnum : chr \"N14228\" \"N24211\" \"N619AA\" \"N804JB\" ... \n# $ flight : int 1545 1714 1141 725 461 1696 507 5708 79 301 ... \n# $ origin : chr \"EWR\" \"LGA\" \"JFK\" \"JFK\" ... \n# $ dest : chr \"IAH\" \"IAH\" \"MIA\" \"BQN\" ... \n# $ air_time : num 227 227 160 183 116 150 158 53 140 138 ... \n# $ distance : num 1400 1416 1089 1576 762 ... \n# $ hour : num 5 5 5 5 5 5 5 5 5 5 ... \n# $ minute : num 17 33 42 44 54 54 55 57 57 58 ...\n"
},
{
"code": null,
"e": 58592,
"s": 58398,
"text": "The select statement is used to retrieve columns from tables and do calculations on them. The simplest SELECT statement is demonstrated in ej1. We can also create new variables as shown in ej2."
},
{
"code": null,
"e": 59665,
"s": 58592,
"text": "### SELECT statement\nej1 = sqldf(\" \n SELECT \n dep_time \n ,dep_delay \n ,arr_time \n ,carrier \n ,tailnum \n FROM \n flights\n\") \n\nhead(ej1) \n# dep_time dep_delay arr_time carrier tailnum \n# 1 517 2 830 UA N14228 \n# 2 533 4 850 UA N24211 \n# 3 542 2 923 AA N619AA \n# 4 544 -1 1004 B6 N804JB \n# 5 554 -6 812 DL N668DN \n# 6 554 -4 740 UA N39463 \n\n# In R we can use SQL with the sqldf function. It works exactly the same as in \na database \n\n# The data.frame (in this case flights) represents the table we are querying \nand goes in the FROM statement \n# We can also compute new variables in the select statement using the syntax: \n\n# old_variables as new_variable \nej2 = sqldf(\" \n SELECT \n arr_delay - dep_delay as gain, \n carrier \n FROM \n flights\n\") \n\nej2[1:5, ] \n# gain carrier \n# 1 9 UA \n# 2 16 UA \n# 3 31 AA \n# 4 -17 B6 \n# 5 -19 DL\n"
},
{
"code": null,
"e": 59848,
"s": 59665,
"text": "One of the most common used features of SQL is the group by statement. This allows to compute a numeric value for different groups of another variable. Open the script 02_group_by.R."
},
{
"code": null,
"e": 62707,
"s": 59848,
"text": "### GROUP BY \n\n# Computing the average \nej3 = sqldf(\" \n SELECT \n avg(arr_delay) as mean_arr_delay, \n avg(dep_delay) as mean_dep_delay, \n carrier \n FROM \n flights \n GROUP BY \n carrier \n\") \n\n# mean_arr_delay mean_dep_delay carrier \n# 1 7.3796692 16.725769 9E \n# 2 0.3642909 8.586016 AA \n# 3 -9.9308886 5.804775 AS \n# 4 9.4579733 13.022522 B6 \n# 5 1.6443409 9.264505 DL \n# 6 15.7964311 19.955390 EV \n# 7 21.9207048 20.215543 F9 \n# 8 20.1159055 18.726075 FL \n# 9 -6.9152047 4.900585 HA \n# 10 10.7747334 10.552041 MQ\n# 11 11.9310345 12.586207 OO \n# 12 3.5580111 12.106073 UA \n# 13 2.1295951 3.782418 US \n# 14 1.7644644 12.869421 VX \n# 15 9.6491199 17.711744 WN \n# 16 15.5569853 18.996330 YV \n\n# Other aggregations \nej4 = sqldf(\" \n SELECT \n avg(arr_delay) as mean_arr_delay, \n min(dep_delay) as min_dep_delay, \n max(dep_delay) as max_dep_delay, \n carrier \n FROM \n flights \n GROUP BY \n carrier \n\") \n\n# We can compute the minimun, mean, and maximum values of a numeric value \nej4 \n# mean_arr_delay min_dep_delay max_dep_delay carrier \n# 1 7.3796692 -24 747 9E \n# 2 0.3642909 -24 1014 AA \n# 3 -9.9308886 -21 225 AS \n# 4 9.4579733 -43 502 B6\n# 5 1.6443409 -33 960 DL \n# 6 15.7964311 -32 548 EV \n# 7 21.9207048 -27 853 F9 \n# 8 20.1159055 -22 602 FL \n# 9 -6.9152047 -16 1301 HA \n# 10 10.7747334 -26 1137 MQ \n# 11 11.9310345 -14 154 OO \n# 12 3.5580111 -20 483 UA \n# 13 2.1295951 -19 500 US \n# 14 1.7644644 -20 653 VX \n# 15 9.6491199 -13 471 WN \n# 16 15.5569853 -16 387 YV \n\n### We could be also interested in knowing how many observations each carrier has \nej5 = sqldf(\" \n SELECT \n carrier, count(*) as count \n FROM \n flights \n GROUP BY \n carrier \n\") \n\nej5 \n# carrier count \n# 1 9E 18460\n# 2 AA 32729 \n# 3 AS 714 \n# 4 B6 54635 \n# 5 DL 48110 \n# 6 EV 54173 \n# 7 F9 685 \n# 8 FL 3260 \n# 9 HA 342 \n# 10 MQ 26397 \n# 11 OO 32 \n# 12 UA 58665 \n# 13 US 20536 \n# 14 VX 5162 \n# 15 WN 12275 \n# 16 YV 601 \n"
},
{
"code": null,
"e": 63011,
"s": 62707,
"text": "The most useful feature of SQL are joins. A join means that we want to combine table A and table B in one table using one column to match the values of both tables. There are different types of joins, in practical terms, to get started these will be the most useful ones: inner join and left outer join."
},
{
"code": null,
"e": 63942,
"s": 63011,
"text": "# Let’s create two tables: A and B to demonstrate joins.\nA = data.frame(c1 = 1:4, c2 = letters[1:4]) \nB = data.frame(c1 = c(2,4,5,6), c2 = letters[c(2:5)]) \n\nA \n# c1 c2 \n# 1 a \n# 2 b \n# 3 c \n# 4 d \n\nB \n# c1 c2 \n# 2 b \n# 4 c \n# 5 d \n# 6 e \n\n### INNER JOIN \n# This means to match the observations of the column we would join the tables by. \ninner = sqldf(\" \n SELECT \n A.c1, B.c2 \n FROM \n A INNER JOIN B \n ON A.c1 = B.c1 \n\") \n\n# Only the rows that match c1 in both A and B are returned \ninner \n# c1 c2 \n# 2 b \n# 4 c \n\n### LEFT OUTER JOIN\n# the left outer join, sometimes just called left join will return the \n# first all the values of the column used from the A table \nleft = sqldf(\" \n SELECT \n A.c1, B.c2 \n FROM \n A LEFT OUTER JOIN B \n ON A.c1 = B.c1 \n\") \n\n# Only the rows that match c1 in both A and B are returned \nleft \n# c1 c2 \n# 1 <NA> \n# 2 b \n# 3 <NA> \n# 4 c \n"
},
{
"code": null,
"e": 64166,
"s": 63942,
"text": "The first approach to analyzing data is to visually analyze it. The objectives at doing this are normally finding relations between variables and univariate descriptions of the variables. We can divide these strategies as −"
},
{
"code": null,
"e": 64186,
"s": 64166,
"text": "Univariate analysis"
},
{
"code": null,
"e": 64208,
"s": 64186,
"text": "Multivariate analysis"
},
{
"code": null,
"e": 64385,
"s": 64208,
"text": "Univariate is a statistical term. In practice, it means we want to analyze a variable independently from the rest of the data. The plots that allow to do this efficiently are −"
},
{
"code": null,
"e": 64610,
"s": 64385,
"text": "Box-Plots are normally used to compare distributions. It is a great way to visually inspect if there are differences between distributions. We can see if there are differences between the price of diamonds for different cut."
},
{
"code": null,
"e": 65486,
"s": 64610,
"text": "# We will be using the ggplot2 library for plotting\nlibrary(ggplot2) \ndata(\"diamonds\") \n\n# We will be using the diamonds dataset to analyze distributions of numeric variables \nhead(diamonds) \n\n# carat cut color clarity depth table price x y z \n# 1 0.23 Ideal E SI2 61.5 55 326 3.95 3.98 2.43 \n# 2 0.21 Premium E SI1 59.8 61 326 3.89 3.84 2.31 \n# 3 0.23 Good E VS1 56.9 65 327 4.05 4.07 2.31 \n# 4 0.29 Premium I VS2 62.4 58 334 4.20 4.23 2.63 \n# 5 0.31 Good J SI2 63.3 58 335 4.34 4.35 2.75 \n# 6 0.24 Very Good J VVS2 62.8 57 336 3.94 3.96 2.48 \n\n### Box-Plots\np = ggplot(diamonds, aes(x = cut, y = price, fill = cut)) + \n geom_box-plot() + \n theme_bw() \nprint(p)\n"
},
{
"code": null,
"e": 65596,
"s": 65486,
"text": "We can see in the plot there are differences in the distribution of diamonds price in different types of cut."
},
{
"code": null,
"e": 66197,
"s": 65596,
"text": "source('01_box_plots.R')\n\n# We can plot histograms for each level of the cut factor variable using \nfacet_grid \np = ggplot(diamonds, aes(x = price, fill = cut)) + \n geom_histogram() + \n facet_grid(cut ~ .) + \n theme_bw() \n\np \n# the previous plot doesn’t allow to visuallize correctly the data because of \nthe differences in scale \n# we can turn this off using the scales argument of facet_grid \n\np = ggplot(diamonds, aes(x = price, fill = cut)) + \n geom_histogram() + \n facet_grid(cut ~ ., scales = 'free') + \n theme_bw() \np \n\npng('02_histogram_diamonds_cut.png') \nprint(p) \ndev.off()\n"
},
{
"code": null,
"e": 66247,
"s": 66197,
"text": "The output of the above code will be as follows −"
},
{
"code": null,
"e": 66553,
"s": 66247,
"text": "Multivariate graphical methods in exploratory data analysis have the objective of finding relationships among different variables. There are two ways to accomplish this that are commonly used: plotting a correlation matrix of numeric variables or simply plotting the raw data as a matrix of scatter plots."
},
{
"code": null,
"e": 66698,
"s": 66553,
"text": "In order to demonstrate this, we will use the diamonds dataset. To follow the code, open the script bda/part2/charts/03_multivariate_analysis.R."
},
{
"code": null,
"e": 67189,
"s": 66698,
"text": "library(ggplot2)\ndata(diamonds) \n\n# Correlation matrix plots \nkeep_vars = c('carat', 'depth', 'price', 'table') \ndf = diamonds[, keep_vars] \n# compute the correlation matrix \nM_cor = cor(df) \n\n# carat depth price table \n# carat 1.00000000 0.02822431 0.9215913 0.1816175 \n# depth 0.02822431 1.00000000 -0.0106474 -0.2957785 \n# price 0.92159130 -0.01064740 1.0000000 0.1271339 \n# table 0.18161755 -0.29577852 0.1271339 1.0000000 \n\n# plots \nheat-map(M_cor)\n"
},
{
"code": null,
"e": 67234,
"s": 67189,
"text": "The code will produce the following output −"
},
{
"code": null,
"e": 67365,
"s": 67234,
"text": "This is a summary, it tells us that there is a strong correlation between price and caret, and not much among the other variables."
},
{
"code": null,
"e": 67558,
"s": 67365,
"text": "A correlation matrix can be useful when we have a large number of variables in which case plotting the raw data would not be practical. As mentioned, it is possible to show the raw data also −"
},
{
"code": null,
"e": 67588,
"s": 67558,
"text": "library(GGally)\nggpairs(df) \n"
},
{
"code": null,
"e": 67737,
"s": 67588,
"text": "We can see in the plot that the results displayed in the heat-map are confirmed, there is a 0.922 correlation between the price and carat variables."
},
{
"code": null,
"e": 67869,
"s": 67737,
"text": "It is possible to visualize this relationship in the price-carat scatterplot located in the (3, 1) index of the scatterplot matrix."
},
{
"code": null,
"e": 68236,
"s": 67869,
"text": "There are a variety of tools that allow a data scientist to analyze data effectively. Normally the engineering aspect of data analysis focuses on databases, data scientist focus in tools that can implement data products. The following section discusses the advantages of different tools with a focus on statistical packages data scientist use in practice most often."
},
{
"code": null,
"e": 68502,
"s": 68236,
"text": "R is an open source programming language with a focus on statistical analysis. It is competitive with commercial tools such as SAS, SPSS in terms of statistical capabilities. It is thought to be an interface to other programming languages such as C, C++ or Fortran."
},
{
"code": null,
"e": 68729,
"s": 68502,
"text": "Another advantage of R is the large number of open source libraries that are available. In CRAN there are more than 6000 packages that can be downloaded for free and in Github there is a wide a variety of R packages available."
},
{
"code": null,
"e": 69203,
"s": 68729,
"text": "In terms of performance, R is slow for intensive operations, given the large amount of libraries available the slow sections of the code are written in compiled languages. But if you are intending to do operations that require writing deep for loops, then R wouldn’t be your best alternative. For data analysis purpose, there are nice libraries such as data.table, glmnet, ranger, xgboost, ggplot2, caret that allow to use R as an interface to faster programming languages."
},
{
"code": null,
"e": 69382,
"s": 69203,
"text": "Python is a general purpose programming language and it contains a significant number of libraries devoted to data analysis such as pandas, scikit-learn, theano, numpy and scipy."
},
{
"code": null,
"e": 69729,
"s": 69382,
"text": "Most of what’s available in R can also be done in Python but we have found that R is simpler to use. In case you are working with large datasets, normally Python is a better choice than R. Python can be used quite effectively to clean and process data line by line. This is possible from R but it’s not as efficient as Python for scripting tasks."
},
{
"code": null,
"e": 69987,
"s": 69729,
"text": "For machine learning, scikit-learn is a nice environment that has available a large amount of algorithms that can handle medium sized datasets without a problem. Compared to R’s equivalent library (caret), scikit-learn has a cleaner and more consistent API."
},
{
"code": null,
"e": 70349,
"s": 69987,
"text": "Julia is a high-level, high-performance dynamic programming language for technical computing. Its syntax is quite similar to R or Python, so if you are already working with R or Python it should be quite simple to write the same code in Julia. The language is quite new and has grown significantly in the last years, so it is definitely an option at the moment."
},
{
"code": null,
"e": 70723,
"s": 70349,
"text": "We would recommend Julia for prototyping algorithms that are computationally intensive such as neural networks. It is a great tool for research. In terms of implementing a model in production probably Python has better alternatives. However, this is becoming less of a problem as there are web services that do the engineering of implementing models in R, Python and Julia."
},
{
"code": null,
"e": 71062,
"s": 70723,
"text": "SAS is a commercial language that is still being used for business intelligence. It has a base language that allows the user to program a wide variety of applications. It contains quite a few commercial products that give non-experts users the ability to use complex tools such as a neural network library without the need of programming."
},
{
"code": null,
"e": 71424,
"s": 71062,
"text": "Beyond the obvious disadvantage of commercial tools, SAS doesn’t scale well to large datasets. Even medium sized dataset will have problems with SAS and make the server crash. Only if you are working with small datasets and the users aren’t expert data scientist, SAS is to be recommended. For advanced users, R and Python provide a more productive environment."
},
{
"code": null,
"e": 71947,
"s": 71424,
"text": "SPSS, is currently a product of IBM for statistical analysis. It is mostly used to analyze survey data and for users that are not able to program, it is a decent alternative. It is probably as simple to use as SAS, but in terms of implementing a model, it is simpler as it provides a SQL code to score a model. This code is normally not efficient, but it’s a start whereas SAS sells the product that scores models for each database separately. For small data and an unexperienced team, SPSS is an option as good as SAS is."
},
{
"code": null,
"e": 72072,
"s": 71947,
"text": "The software is however rather limited, and experienced users will be orders of magnitude more productive using R or Python."
},
{
"code": null,
"e": 72387,
"s": 72072,
"text": "There are other tools available such as Matlab or its open source version (Octave). These tools are mostly used for research. In terms of capabilities R or Python can do all that’s available in Matlab or Octave. It only makes sense to buy a license of the product if you are interested in the support they provide."
},
{
"code": null,
"e": 72519,
"s": 72387,
"text": "When analyzing data, it is possible to have a statistical approach. The basic tools that are needed to perform basic analysis are −"
},
{
"code": null,
"e": 72540,
"s": 72519,
"text": "Correlation analysis"
},
{
"code": null,
"e": 72561,
"s": 72540,
"text": "Analysis of Variance"
},
{
"code": null,
"e": 72580,
"s": 72561,
"text": "Hypothesis Testing"
},
{
"code": null,
"e": 72824,
"s": 72580,
"text": "When working with large datasets, it doesn’t involve a problem as these methods aren’t computationally intensive with the exception of Correlation Analysis. In this case, it is always possible to take a sample and the results should be robust."
},
{
"code": null,
"e": 73318,
"s": 72824,
"text": "Correlation Analysis seeks to find linear relationships between numeric variables. This can be of use in different circumstances. One common use is exploratory data analysis, in section 16.0.2 of the book there is a basic example of this approach. First of all, the correlation metric used in the mentioned example is based on the Pearson coefficient. There is however, another interesting metric of correlation that is not affected by outliers. This metric is called the spearman correlation."
},
{
"code": null,
"e": 73532,
"s": 73318,
"text": "The spearman correlation metric is more robust to the presence of outliers than the Pearson method and gives better estimates of linear relations between numeric variable when the data is not normally distributed."
},
{
"code": null,
"e": 74112,
"s": 73532,
"text": "library(ggplot2)\n\n# Select variables that are interesting to compare pearson and spearman \ncorrelation methods. \nx = diamonds[, c('x', 'y', 'z', 'price')] \n\n# From the histograms we can expect differences in the correlations of both \nmetrics. \n# In this case as the variables are clearly not normally distributed, the \nspearman correlation \n\n# is a better estimate of the linear relation among numeric variables. \npar(mfrow = c(2,2)) \ncolnm = names(x) \nfor(i in 1:4) { \n hist(x[[i]], col = 'deepskyblue3', main = sprintf('Histogram of %s', colnm[i])) \n} \npar(mfrow = c(1,1)) "
},
{
"code": null,
"e": 74383,
"s": 74112,
"text": "From the histograms in the following figure, we can expect differences in the correlations of both metrics. In this case, as the variables are clearly not normally distributed, the spearman correlation is a better estimate of the linear relation among numeric variables."
},
{
"code": null,
"e": 74523,
"s": 74383,
"text": "In order to compute the correlation in R, open the file bda/part2/statistical_methods/correlation/correlation.R that has this code section."
},
{
"code": null,
"e": 75279,
"s": 74523,
"text": "## Correlation Matrix - Pearson and spearman\ncor_pearson <- cor(x, method = 'pearson') \ncor_spearman <- cor(x, method = 'spearman') \n\n### Pearson Correlation \nprint(cor_pearson) \n# x y z price \n# x 1.0000000 0.9747015 0.9707718 0.8844352 \n# y 0.9747015 1.0000000 0.9520057 0.8654209 \n# z 0.9707718 0.9520057 1.0000000 0.8612494 \n# price 0.8844352 0.8654209 0.8612494 1.0000000 \n\n### Spearman Correlation \nprint(cor_spearman) \n# x y z price \n# x 1.0000000 0.9978949 0.9873553 0.9631961 \n# y 0.9978949 1.0000000 0.9870675 0.9627188 \n# z 0.9873553 0.9870675 1.0000000 0.9572323 \n# price 0.9631961 0.9627188 0.9572323 1.0000000 \n"
},
{
"code": null,
"e": 75584,
"s": 75279,
"text": "The chi-squared test allows us to test if two random variables are independent. This means that the probability distribution of each variable doesn’t influence the other. In order to evaluate the test in R we need first to create a contingency table, and then pass the table to the chisq.test R function."
},
{
"code": null,
"e": 75734,
"s": 75584,
"text": "For example, let’s check if there is an association between the variables: cut and color from the diamonds dataset. The test is formally defined as −"
},
{
"code": null,
"e": 75783,
"s": 75734,
"text": "H0: The variable cut and diamond are independent"
},
{
"code": null,
"e": 75836,
"s": 75783,
"text": "H1: The variable cut and diamond are not independent"
},
{
"code": null,
"e": 76007,
"s": 75836,
"text": "We would assume there is a relationship between these two variables by their name, but the test can give an objective \"rule\" saying how significant this result is or not."
},
{
"code": null,
"e": 76401,
"s": 76007,
"text": "In the following code snippet, we found that the p-value of the test is 2.2e-16, this is almost zero in practical terms. Then after running the test doing a Monte Carlo simulation, we found that the p-value is 0.0004998 which is still quite lower than the threshold 0.05. This result means that we reject the null hypothesis (H0), so we believe the variables cut and color are not independent."
},
{
"code": null,
"e": 77359,
"s": 76401,
"text": "library(ggplot2)\n\n# Use the table function to compute the contingency table \ntbl = table(diamonds$cut, diamonds$color) \ntbl \n\n# D E F G H I J \n# Fair 163 224 312 314 303 175 119 \n# Good 662 933 909 871 702 522 307 \n# Very Good 1513 2400 2164 2299 1824 1204 678 \n# Premium 1603 2337 2331 2924 2360 1428 808 \n# Ideal 2834 3903 3826 4884 3115 2093 896 \n\n# In order to run the test we just use the chisq.test function. \nchisq.test(tbl) \n\n# Pearson’s Chi-squared test \n# data: tbl \n# X-squared = 310.32, df = 24, p-value < 2.2e-16\n# It is also possible to compute the p-values using a monte-carlo simulation \n# It's needed to add the simulate.p.value = TRUE flag and the amount of \nsimulations \nchisq.test(tbl, simulate.p.value = TRUE, B = 2000) \n\n# Pearson’s Chi-squared test with simulated p-value (based on 2000 replicates) \n# data: tbl \n# X-squared = 310.32, df = NA, p-value = 0.0004998\n"
},
{
"code": null,
"e": 77690,
"s": 77359,
"text": "The idea of t-test is to evaluate if there are differences in a numeric variable # distribution between different groups of a nominal variable. In order to demonstrate this, I will select the levels of the Fair and Ideal levels of the factor variable cut, then we will compare the values a numeric variable among those two groups."
},
{
"code": null,
"e": 78008,
"s": 77690,
"text": "data = diamonds[diamonds$cut %in% c('Fair', 'Ideal'), ]\n\ndata$cut = droplevels.factor(data$cut) # Drop levels that aren’t used from the \ncut variable \ndf1 = data[, c('cut', 'price')] \n\n# We can see the price means are different for each group \ntapply(df1$price, df1$cut, mean) \n# Fair Ideal \n# 4358.758 3457.542\n"
},
{
"code": null,
"e": 78200,
"s": 78008,
"text": "The t-tests are implemented in R with the t.test function. The formula interface to t.test is the simplest way to use it, the idea is that a numeric variable is explained by a group variable."
},
{
"code": null,
"e": 78355,
"s": 78200,
"text": "For example: t.test(numeric_variable ~ group_variable, data = data). In the previous example, the numeric_variable is price and the group_variable is cut."
},
{
"code": null,
"e": 78596,
"s": 78355,
"text": "From a statistical perspective, we are testing if there are differences in the distributions of the numeric variable among two groups. Formally the hypothesis test is described with a null (H0) hypothesis and an alternative hypothesis (H1)."
},
{
"code": null,
"e": 78700,
"s": 78596,
"text": "H0: There are no differences in the distributions of the price variable among the Fair and Ideal groups"
},
{
"code": null,
"e": 78804,
"s": 78700,
"text": "H0: There are no differences in the distributions of the price variable among the Fair and Ideal groups"
},
{
"code": null,
"e": 78904,
"s": 78804,
"text": "H1 There are differences in the distributions of the price variable among the Fair and Ideal groups"
},
{
"code": null,
"e": 79004,
"s": 78904,
"text": "H1 There are differences in the distributions of the price variable among the Fair and Ideal groups"
},
{
"code": null,
"e": 79068,
"s": 79004,
"text": "The following can be implemented in R with the following code −"
},
{
"code": null,
"e": 79613,
"s": 79068,
"text": "t.test(price ~ cut, data = data)\n\n# Welch Two Sample t-test \n# \n# data: price by cut \n# t = 9.7484, df = 1894.8, p-value < 2.2e-16 \n# alternative hypothesis: true difference in means is not equal to 0 \n# 95 percent confidence interval: \n# 719.9065 1082.5251 \n# sample estimates: \n# mean in group Fair mean in group Ideal \n# 4358.758 3457.542 \n\n# Another way to validate the previous results is to just plot the \ndistributions using a box-plot \nplot(price ~ cut, data = data, ylim = c(0,12000), \n col = 'deepskyblue3') \n"
},
{
"code": null,
"e": 80061,
"s": 79613,
"text": "We can analyze the test result by checking if the p-value is lower than 0.05. If this is the case, we keep the alternative hypothesis. This means we have found differences of price among the two levels of the cut factor. By the names of the levels we would have expected this result, but we wouldn’t have expected that the mean price in the Fail group would be higher than in the Ideal group. We can see this by comparing the means of each factor."
},
{
"code": null,
"e": 80327,
"s": 80061,
"text": "The plot command produces a graph that shows the relationship between the price and cut variable. It is a box-plot; we have covered this plot in section 16.0.1 but it basically shows the distribution of the price variable for the two levels of cut we are analyzing."
},
{
"code": null,
"e": 80681,
"s": 80327,
"text": "Analysis of Variance (ANOVA) is a statistical model used to analyze the differences among group distribution by comparing the mean and variance of each group, the model was developed by Ronald Fisher. ANOVA provides a statistical test of whether or not the means of several groups are equal, and therefore generalizes the t-test to more than two groups."
},
{
"code": null,
"e": 80883,
"s": 80681,
"text": "ANOVAs are useful for comparing three or more groups for statistical significance because doing multiple two-sample t-tests would result in an increased chance of committing a statistical type I error."
},
{
"code": null,
"e": 80981,
"s": 80883,
"text": "In terms of providing a mathematical explanation, the following is needed to understand the test."
},
{
"code": null,
"e": 81012,
"s": 80981,
"text": "xij = x + (xi − x) + (xij − x)"
},
{
"code": null,
"e": 81048,
"s": 81012,
"text": "This leads to the following model −"
},
{
"code": null,
"e": 81067,
"s": 81048,
"text": "xij = μ + αi + ∈ij"
},
{
"code": null,
"e": 81233,
"s": 81067,
"text": "where μ is the grand mean and αi is the ith group mean. The error term ∈ij is assumed to be iid from a normal distribution. The null hypothesis of the test is that −"
},
{
"code": null,
"e": 81252,
"s": 81233,
"text": "α1 = α2 = ... = αk"
},
{
"code": null,
"e": 81326,
"s": 81252,
"text": "In terms of computing the test statistic, we need to compute two values −"
},
{
"code": null,
"e": 81372,
"s": 81326,
"text": "Sum of squares for between group difference −"
},
{
"code": null,
"e": 81397,
"s": 81372,
"text": "SSDB=∑ik∑jn(xi ̄ ̄−x ̄)2"
},
{
"code": null,
"e": 81427,
"s": 81397,
"text": "Sums of squares within groups"
},
{
"code": null,
"e": 81456,
"s": 81427,
"text": "SSDW=∑ik∑jn(xij ̄ ̄−xi ̄ ̄)2"
},
{
"code": null,
"e": 81604,
"s": 81456,
"text": "where SSDB has a degree of freedom of k−1 and SSDW has a degree of freedom of N−k. Then we can define the mean squared differences for each metric."
},
{
"code": null,
"e": 81625,
"s": 81604,
"text": "MSB = SSDB / (k - 1)"
},
{
"code": null,
"e": 81646,
"s": 81625,
"text": "MSw = SSDw / (N - k)"
},
{
"code": null,
"e": 81735,
"s": 81646,
"text": "Finally, the test statistic in ANOVA is defined as the ratio of the above two quantities"
},
{
"code": null,
"e": 81749,
"s": 81735,
"text": "F = MSB / MSw"
},
{
"code": null,
"e": 81977,
"s": 81749,
"text": "which follows a F-distribution with k−1 and N−k degrees of freedom. If null hypothesis is true, F would likely be close to 1. Otherwise, the between group mean square MSB is likely to be large, which results in a large F value."
},
{
"code": null,
"e": 82175,
"s": 81977,
"text": "Basically, ANOVA examines the two sources of the total variance and sees which part contributes more. This is why it is called analysis of variance although the intention is to compare group means."
},
{
"code": null,
"e": 82329,
"s": 82175,
"text": "In terms of computing the statistic, it is actually rather simple to do in R. The following example will demonstrate how it is done and plot the results."
},
{
"code": null,
"e": 83400,
"s": 82329,
"text": "library(ggplot2)\n# We will be using the mtcars dataset \n\nhead(mtcars) \n# mpg cyl disp hp drat wt qsec vs am gear carb \n# Mazda RX4 21.0 6 160 110 3.90 2.620 16.46 0 1 4 4 \n# Mazda RX4 Wag 21.0 6 160 110 3.90 2.875 17.02 0 1 4 4 \n# Datsun 710 22.8 4 108 93 3.85 2.320 18.61 1 1 4 1 \n# Hornet 4 Drive 21.4 6 258 110 3.08 3.215 19.44 1 0 3 1 \n# Hornet Sportabout 18.7 8 360 175 3.15 3.440 17.02 0 0 3 2 \n# Valiant 18.1 6 225 105 2.76 3.460 20.22 1 0 3 1 \n\n# Let's see if there are differences between the groups of cyl in the mpg variable. \ndata = mtcars[, c('mpg', 'cyl')] \nfit = lm(mpg ~ cyl, data = mtcars) \nanova(fit) \n\n# Analysis of Variance Table \n# Response: mpg \n# Df Sum Sq Mean Sq F value Pr(>F) \n# cyl 1 817.71 817.71 79.561 6.113e-10 *** \n# Residuals 30 308.33 10.28 \n# Signif. codes: 0 *** 0.001 ** 0.01 * 0.05 . \n# Plot the distribution \nplot(mpg ~ as.factor(cyl), data = mtcars, col = 'deepskyblue3')\n"
},
{
"code": null,
"e": 83445,
"s": 83400,
"text": "The code will produce the following output −"
},
{
"code": null,
"e": 83699,
"s": 83445,
"text": "The p-value we get in the example is significantly smaller than 0.05, so R returns the symbol '***' to denote this. It means we reject the null hypothesis and that we find differences between the mpg means among the different groups of the cyl variable."
},
{
"code": null,
"e": 84197,
"s": 83699,
"text": "Machine learning is a subfield of computer science that deals with tasks such as pattern recognition, computer vision, speech recognition, text analytics and has a strong link with statistics and mathematical optimization. Applications include the development of search engines, spam filtering, Optical Character Recognition (OCR) among others. The boundaries between data mining, pattern recognition and the field of statistical learning are not clear and basically all refer to similar problems."
},
{
"code": null,
"e": 84252,
"s": 84197,
"text": "Machine learning can be divided in two types of task −"
},
{
"code": null,
"e": 84272,
"s": 84252,
"text": "Supervised Learning"
},
{
"code": null,
"e": 84294,
"s": 84272,
"text": "Unsupervised Learning"
},
{
"code": null,
"e": 84521,
"s": 84294,
"text": "Supervised learning refers to a type of problem where there is an input data defined as a matrix X and we are interested in predicting a response y. Where X = {x1, x2, ..., xn} has n predictors and has two values y = {c1, c2}."
},
{
"code": null,
"e": 84897,
"s": 84521,
"text": "An example application would be to predict the probability of a web user to click on ads using demographic features as predictors. This is often called to predict the click through rate (CTR). Then y = {click, doesn’t − click} and the predictors could be the used IP address, the day he entered the site, the user’s city, country among other features that could be available."
},
{
"code": null,
"e": 85208,
"s": 84897,
"text": "Unsupervised learning deals with the problem of finding groups that are similar within each other without having a class to learn from. There are several approaches to the task of learning a mapping from predictors to finding groups that share similar instances in each group and are different with each other."
},
{
"code": null,
"e": 85500,
"s": 85208,
"text": "An example application of unsupervised learning is customer segmentation. For example, in the telecommunications industry a common task is to segment users according to the usage they give to the phone. This would allow the marketing department to target each group with a different product."
},
{
"code": null,
"e": 85761,
"s": 85500,
"text": "Naive Bayes is a probabilistic technique for constructing classifiers. The characteristic assumption of the naive Bayes classifier is to consider that the value of a particular feature is independent of the value of any other feature, given the class variable."
},
{
"code": null,
"e": 86093,
"s": 85761,
"text": "Despite the oversimplified assumptions mentioned previously, naive Bayes classifiers have good results in complex real-world situations. An advantage of naive Bayes is that it only requires a small amount of training data to estimate the parameters necessary for classification and that the classifier can be trained incrementally."
},
{
"code": null,
"e": 86366,
"s": 86093,
"text": "Naive Bayes is a conditional probability model: given a problem instance to be classified, represented by a vector x = (x1, ..., xn) representing some n features (independent variables), it assigns to this instance probabilities for each of K possible outcomes or classes."
},
{
"code": null,
"e": 86384,
"s": 86366,
"text": "p(Ck|x1,.....,xn)"
},
{
"code": null,
"e": 86711,
"s": 86384,
"text": "The problem with the above formulation is that if the number of features n is large or if a feature can take on a large number of values, then basing such a model on probability tables is infeasible. We therefore reformulate the model to make it simpler. Using Bayes theorem, the conditional probability can be decomposed as −"
},
{
"code": null,
"e": 86736,
"s": 86711,
"text": "p(Ck|x)=p(Ck)p(x|Ck)p(x)"
},
{
"code": null,
"e": 86854,
"s": 86736,
"text": "This means that under the above independence assumptions, the conditional distribution over the class variable C is −"
},
{
"code": null,
"e": 86893,
"s": 86854,
"text": "p(Ck|x1,.....,xn)=1Zp(Ck)∏i=1np(xi|Ck)"
},
{
"code": null,
"e": 87293,
"s": 86893,
"text": "where the evidence Z = p(x) is a scaling factor dependent only on x1, ..., xn, that is a constant if the values of the feature variables are known. One common rule is to pick the hypothesis that is most probable; this is known as the maximum a posteriori or MAP decision rule. The corresponding classifier, a Bayes classifier, is the function that assigns a class label y^=Ck for some k as follows −"
},
{
"code": null,
"e": 87321,
"s": 87293,
"text": "y^=argmaxp(Ck)∏i=1np(xi|Ck)"
},
{
"code": null,
"e": 87508,
"s": 87321,
"text": "Implementing the algorithm in R is a straightforward process. The following example demonstrates how train a Naive Bayes classifier and use it for prediction in a spam filtering problem."
},
{
"code": null,
"e": 87591,
"s": 87508,
"text": "The following script is available in the bda/part3/naive_bayes/naive_bayes.R file."
},
{
"code": null,
"e": 88326,
"s": 87591,
"text": "# Install these packages \npkgs = c(\"klaR\", \"caret\", \"ElemStatLearn\") \ninstall.packages(pkgs) \nlibrary('ElemStatLearn') \nlibrary(\"klaR\") \nlibrary(\"caret\") \n\n# Split the data in training and testing \ninx = sample(nrow(spam), round(nrow(spam) * 0.9)) \ntrain = spam[inx,] \ntest = spam[-inx,] \n\n# Define a matrix with features, X_train \n# And a vector with class labels, y_train \nX_train = train[,-58] \ny_train = train$spam \nX_test = test[,-58] \ny_test = test$spam \n# Train the model \nnb_model = train(X_train, y_train, method = 'nb', \n trControl = trainControl(method = 'cv', number = 3)) \n\n# Compute \npreds = predict(nb_model$finalModel, X_test)$class \ntbl = table(y_test, yhat = preds) \nsum(diag(tbl)) / sum(tbl) \n# 0.7217391 \n"
},
{
"code": null,
"e": 88467,
"s": 88326,
"text": "As we can see from the result, the accuracy of the Naive Bayes model is 72%. This means the model correctly classifies 72% of the instances."
},
{
"code": null,
"e": 88715,
"s": 88467,
"text": "k-means clustering aims to partition n observations into k clusters in which each observation belongs to the cluster with the nearest mean, serving as a prototype of the cluster. This results in a partitioning of the data space into Voronoi cells."
},
{
"code": null,
"e": 88990,
"s": 88715,
"text": "Given a set of observations (x1, x2, ..., xn), where each observation is a d-dimensional real vector, k-means clustering aims to partition the n observations into k groups G = {G1, G2, ..., Gk} so as to minimize the within-cluster sum of squares (WCSS) defined as follows −"
},
{
"code": null,
"e": 89014,
"s": 88990,
"text": "argmin∑i=1k∑x∈Si∥x−μi∥2"
},
{
"code": null,
"e": 89331,
"s": 89014,
"text": "The later formula shows the objective function that is minimized in order to find the optimal prototypes in k-means clustering. The intuition of the formula is that we would like to find groups that are different with each other and each member of each group should be similar with the other members of each cluster."
},
{
"code": null,
"e": 89416,
"s": 89331,
"text": "The following example demonstrates how to run the k-means clustering algorithm in R."
},
{
"code": null,
"e": 89851,
"s": 89416,
"text": "library(ggplot2)\n# Prepare Data \ndata = mtcars \n\n# We need to scale the data to have zero mean and unit variance \ndata <- scale(data) \n\n# Determine number of clusters \nwss <- (nrow(data)-1)*sum(apply(data,2,var)) \nfor (i in 2:dim(data)[2]) { \n wss[i] <- sum(kmeans(data, centers = i)$withinss) \n} \n\n# Plot the clusters \nplot(1:dim(data)[2], wss, type = \"b\", xlab = \"Number of Clusters\", \n ylab = \"Within groups sum of squares\")"
},
{
"code": null,
"e": 90182,
"s": 89851,
"text": "In order to find a good value for K, we can plot the within groups sum of squares for different values of K. This metric normally decreases as more groups are added, we would like to find a point where the decrease in the within groups sum of squares starts decreasing slowly. In the plot, this value is best represented by K = 6."
},
{
"code": null,
"e": 90275,
"s": 90182,
"text": "Now that the value of K has been defined, it is needed to run the algorithm with that value."
},
{
"code": null,
"e": 90491,
"s": 90275,
"text": "# K-Means Cluster Analysis\nfit <- kmeans(data, 5) # 5 cluster solution \n\n# get cluster means \naggregate(data,by = list(fit$cluster),FUN = mean) \n\n# append cluster assignment \ndata <- data.frame(data, fit$cluster) \n"
},
{
"code": null,
"e": 90806,
"s": 90491,
"text": "Let I = i1, i2, ..., in be a set of n binary attributes called items. Let D = t1, t2, ..., tm be a set of transactions called the database. Each transaction in D has a unique transaction ID and contains a subset of the items in I. A rule is defined as an implication of the form X ⇒ Y where X, Y ⊆ I and X ∩ Y = ∅."
},
{
"code": null,
"e": 90953,
"s": 90806,
"text": "The sets of items (for short item-sets) X and Y are called antecedent (left-hand-side or LHS) and consequent (right-hand-side or RHS) of the rule."
},
{
"code": null,
"e": 91160,
"s": 90953,
"text": "To illustrate the concepts, we use a small example from the supermarket domain. The set of items is I = {milk, bread, butter, beer} and a small database containing the items is shown in the following table."
},
{
"code": null,
"e": 91514,
"s": 91160,
"text": "An example rule for the supermarket could be {milk, bread} ⇒ {butter} meaning that if milk and bread is bought, customers also buy butter. To select interesting rules from the set of all possible rules, constraints on various measures of significance and interest can be used. The best-known constraints are minimum thresholds on support and confidence."
},
{
"code": null,
"e": 91897,
"s": 91514,
"text": "The support supp(X) of an item-set X is defined as the proportion of transactions in the data set which contain the item-set. In the example database in Table 1, the item-set {milk, bread} has a support of 2/5 = 0.4 since it occurs in 40% of all transactions (2 out of 5 transactions). Finding frequent item-sets can be seen as a simplification of the unsupervised learning problem."
},
{
"code": null,
"e": 92372,
"s": 91897,
"text": "The confidence of a rule is defined conf(X ⇒ Y ) = supp(X ∪ Y )/supp(X). For example, the rule {milk, bread} ⇒ {butter} has a confidence of 0.2/0.4 = 0.5 in the database in Table 1, which means that for 50% of the transactions containing milk and bread the rule is correct. Confidence can be interpreted as an estimate of the probability P(Y|X), the probability of finding the RHS of the rule in transactions under the condition that these transactions also contain the LHS."
},
{
"code": null,
"e": 92475,
"s": 92372,
"text": "In the script located in bda/part3/apriori.R the code to implement the apriori algorithm can be found."
},
{
"code": null,
"e": 93393,
"s": 92475,
"text": "# Load the library for doing association rules\n# install.packages(’arules’) \nlibrary(arules) \n\n# Data preprocessing \ndata(\"AdultUCI\") \nAdultUCI[1:2,] \nAdultUCI[[\"fnlwgt\"]] <- NULL \nAdultUCI[[\"education-num\"]] <- NULL \n\nAdultUCI[[ \"age\"]] <- ordered(cut(AdultUCI[[ \"age\"]], c(15,25,45,65,100)), \n labels = c(\"Young\", \"Middle-aged\", \"Senior\", \"Old\")) \nAdultUCI[[ \"hours-per-week\"]] <- ordered(cut(AdultUCI[[ \"hours-per-week\"]], \n c(0,25,40,60,168)), labels = c(\"Part-time\", \"Full-time\", \"Over-time\", \"Workaholic\")) \nAdultUCI[[ \"capital-gain\"]] <- ordered(cut(AdultUCI[[ \"capital-gain\"]], \n c(-Inf,0,median(AdultUCI[[ \"capital-gain\"]][AdultUCI[[ \"capitalgain\"]]>0]),Inf)), \n labels = c(\"None\", \"Low\", \"High\")) \nAdultUCI[[ \"capital-loss\"]] <- ordered(cut(AdultUCI[[ \"capital-loss\"]], \n c(-Inf,0, median(AdultUCI[[ \"capital-loss\"]][AdultUCI[[ \"capitalloss\"]]>0]),Inf)), \n labels = c(\"none\", \"low\", \"high\"))\n"
},
{
"code": null,
"e": 93535,
"s": 93393,
"text": "In order to generate rules using the apriori algorithm, we need to create a transaction matrix. The following code shows how to do this in R."
},
{
"code": null,
"e": 94424,
"s": 93535,
"text": "# Convert the data into a transactions format\nAdult <- as(AdultUCI, \"transactions\") \nAdult \n# transactions in sparse format with \n# 48842 transactions (rows) and \n# 115 items (columns) \n\nsummary(Adult) \n# Plot frequent item-sets \nitemFrequencyPlot(Adult, support = 0.1, cex.names = 0.8) \n\n# generate rules \nmin_support = 0.01 \nconfidence = 0.6 \nrules <- apriori(Adult, parameter = list(support = min_support, confidence = confidence))\n\nrules \ninspect(rules[100:110, ]) \n# lhs rhs support confidence lift\n# {occupation = Farming-fishing} => {sex = Male} 0.02856148 0.9362416 1.4005486\n# {occupation = Farming-fishing} => {race = White} 0.02831579 0.9281879 1.0855456\n# {occupation = Farming-fishing} => {native-country 0.02671881 0.8758389 0.9759474\n = United-States} \n"
},
{
"code": null,
"e": 94861,
"s": 94424,
"text": "A Decision Tree is an algorithm used for supervised learning problems such as classification or regression. A decision tree or a classification tree is a tree in which each internal (nonleaf) node is labeled with an input feature. The arcs coming from a node labeled with a feature are labeled with each of the possible values of the feature. Each leaf of the tree is labeled with a class or a probability distribution over the classes."
},
{
"code": null,
"e": 95374,
"s": 94861,
"text": "A tree can be \"learned\" by splitting the source set into subsets based on an attribute value test. This process is repeated on each derived subset in a recursive manner called recursive partitioning. The recursion is completed when the subset at a node has all the same value of the target variable, or when splitting no longer adds value to the predictions. This process of top-down induction of decision trees is an example of a greedy algorithm, and it is the most common strategy for learning decision trees."
},
{
"code": null,
"e": 95433,
"s": 95374,
"text": "Decision trees used in data mining are of two main types −"
},
{
"code": null,
"e": 95536,
"s": 95433,
"text": "Classification tree − when the response is a nominal variable, for example if an email is spam or not."
},
{
"code": null,
"e": 95639,
"s": 95536,
"text": "Classification tree − when the response is a nominal variable, for example if an email is spam or not."
},
{
"code": null,
"e": 95747,
"s": 95639,
"text": "Regression tree − when the predicted outcome can be considered a real number (e.g. the salary of a worker)."
},
{
"code": null,
"e": 95855,
"s": 95747,
"text": "Regression tree − when the predicted outcome can be considered a real number (e.g. the salary of a worker)."
},
{
"code": null,
"e": 96172,
"s": 95855,
"text": "Decision trees are a simple method, and as such has some problems. One of this issues is the high variance in the resulting models that decision trees produce. In order to alleviate this problem, ensemble methods of decision trees were developed. There are two groups of ensemble methods currently used extensively −"
},
{
"code": null,
"e": 96403,
"s": 96172,
"text": "Bagging decision trees − These trees are used to build multiple decision trees by repeatedly resampling training data with replacement, and voting the trees for a consensus prediction. This algorithm has been called random forest."
},
{
"code": null,
"e": 96634,
"s": 96403,
"text": "Bagging decision trees − These trees are used to build multiple decision trees by repeatedly resampling training data with replacement, and voting the trees for a consensus prediction. This algorithm has been called random forest."
},
{
"code": null,
"e": 96915,
"s": 96634,
"text": "Boosting decision trees − Gradient boosting combines weak learners; in this case, decision trees into a single strong learner, in an iterative fashion. It fits a weak tree to the data and iteratively keeps fitting weak learners in order to correct the error of the previous model."
},
{
"code": null,
"e": 97196,
"s": 96915,
"text": "Boosting decision trees − Gradient boosting combines weak learners; in this case, decision trees into a single strong learner, in an iterative fashion. It fits a weak tree to the data and iteratively keeps fitting weak learners in order to correct the error of the previous model."
},
{
"code": null,
"e": 98542,
"s": 97196,
"text": "# Install the party package\n# install.packages('party') \nlibrary(party) \nlibrary(ggplot2) \n\nhead(diamonds) \n# We will predict the cut of diamonds using the features available in the \ndiamonds dataset. \nct = ctree(cut ~ ., data = diamonds) \n\n# plot(ct, main=\"Conditional Inference Tree\") \n# Example output \n# Response: cut \n# Inputs: carat, color, clarity, depth, table, price, x, y, z \n\n# Number of observations: 53940 \n# \n# 1) table <= 57; criterion = 1, statistic = 10131.878 \n# 2) depth <= 63; criterion = 1, statistic = 8377.279 \n# 3) table <= 56.4; criterion = 1, statistic = 226.423 \n# 4) z <= 2.64; criterion = 1, statistic = 70.393 \n# 5) clarity <= VS1; criterion = 0.989, statistic = 10.48 \n# 6) color <= E; criterion = 0.997, statistic = 12.829 \n# 7)* weights = 82 \n# 6) color > E \n\n#Table of prediction errors \ntable(predict(ct), diamonds$cut) \n# Fair Good Very Good Premium Ideal \n# Fair 1388 171 17 0 14 \n# Good 102 2912 499 26 27 \n# Very Good 54 998 3334 249 355 \n# Premium 44 711 5054 11915 1167 \n# Ideal 22 114 3178 1601 19988 \n# Estimated class probabilities \nprobs = predict(ct, newdata = diamonds, type = \"prob\") \nprobs = do.call(rbind, probs) \nhead(probs)\n"
},
{
"code": null,
"e": 98858,
"s": 98542,
"text": "Logistic regression is a classification model in which the response variable is categorical. It is an algorithm that comes from statistics and is used for supervised classification problems. In logistic regression we seek to find the vector β of parameters in the following equation that minimize the cost function."
},
{
"code": null,
"e": 98900,
"s": 98858,
"text": "logit(pi)=ln(pi1−pi)=β0+β1x1,i+...+βkxk,i"
},
{
"code": null,
"e": 99087,
"s": 98900,
"text": "The following code demonstrates how to fit a logistic regression model in R. We will use here the spam dataset to demonstrate logistic regression, the same that was used for Naive Bayes."
},
{
"code": null,
"e": 99275,
"s": 99087,
"text": "From the predictions results in terms of accuracy, we find that the regression model achieves a 92.5% accuracy in the test set, compared to the 72% achieved by the Naive Bayes classifier."
},
{
"code": null,
"e": 100694,
"s": 99275,
"text": "library(ElemStatLearn)\nhead(spam) \n\n# Split dataset in training and testing \ninx = sample(nrow(spam), round(nrow(spam) * 0.8)) \ntrain = spam[inx,] \ntest = spam[-inx,] \n\n# Fit regression model \nfit = glm(spam ~ ., data = train, family = binomial()) \nsummary(fit) \n\n# Call: \n# glm(formula = spam ~ ., family = binomial(), data = train) \n# \n\n# Deviance Residuals: \n# Min 1Q Median 3Q Max \n# -4.5172 -0.2039 0.0000 0.1111 5.4944\n# Coefficients: \n# Estimate Std. Error z value Pr(>|z|) \n# (Intercept) -1.511e+00 1.546e-01 -9.772 < 2e-16 *** \n# A.1 -4.546e-01 2.560e-01 -1.776 0.075720 . \n# A.2 -1.630e-01 7.731e-02 -2.108 0.035043 * \n# A.3 1.487e-01 1.261e-01 1.179 0.238591 \n# A.4 2.055e+00 1.467e+00 1.401 0.161153 \n# A.5 6.165e-01 1.191e-01 5.177 2.25e-07 *** \n# A.6 7.156e-01 2.768e-01 2.585 0.009747 ** \n# A.7 2.606e+00 3.917e-01 6.652 2.88e-11 *** \n# A.8 6.750e-01 2.284e-01 2.955 0.003127 ** \n# A.9 1.197e+00 3.362e-01 3.559 0.000373 *** \n# Signif. codes: 0 *** 0.001 ** 0.01 * 0.05 . 0.1 1 \n\n### Make predictions \npreds = predict(fit, test, type = ’response’) \npreds = ifelse(preds > 0.5, 1, 0) \ntbl = table(target = test$spam, preds) \ntbl \n\n# preds \n# target 0 1 \n# email 535 23 \n# spam 46 316 \nsum(diag(tbl)) / sum(tbl) \n# 0.925\n"
},
{
"code": null,
"e": 101009,
"s": 100694,
"text": "Time series is a sequence of observations of categorical or numeric variables indexed by a date, or timestamp. A clear example of time series data is the time series of a stock price. In the following table, we can see the basic structure of time series data. In this case the observations are recorded every hour."
},
{
"code": null,
"e": 101122,
"s": 101009,
"text": "Normally, the first step in time series analysis is to plot the series, this is normally done with a line chart."
},
{
"code": null,
"e": 101347,
"s": 101122,
"text": "The most common application of time series analysis is forecasting future values of a numeric value using the temporal structure of the data. This means, the available observations are used to predict values from the future."
},
{
"code": null,
"e": 101550,
"s": 101347,
"text": "The temporal ordering of the data, implies that traditional regression methods are not useful. In order to build robust forecast, we need models that take into account the temporal ordering of the data."
},
{
"code": null,
"e": 101900,
"s": 101550,
"text": "The most widely used model for Time Series Analysis is called Autoregressive Moving Average (ARMA). The model consists of two parts, an autoregressive (AR) part and a moving average (MA) part. The model is usually then referred to as the ARMA(p, q) model where p is the order of the autoregressive part and q is the order of the moving average part."
},
{
"code": null,
"e": 101991,
"s": 101900,
"text": "The AR(p) is read as an autoregressive model of order p. Mathematically it is written as −"
},
{
"code": null,
"e": 102011,
"s": 101991,
"text": "Xt=c+∑i=1PφiXt−i+εt"
},
{
"code": null,
"e": 102236,
"s": 102011,
"text": "where {φ1, ..., φp} are parameters to be estimated, c is a constant, and the random variable εt represents the white noise. Some constraints are necessary on the values of the parameters so that the model remains stationary."
},
{
"code": null,
"e": 102303,
"s": 102236,
"text": "The notation MA(q) refers to the moving average model of order q −"
},
{
"code": null,
"e": 102323,
"s": 102303,
"text": "Xt=μ+εt+∑i=1qθiεt−i"
},
{
"code": null,
"e": 102460,
"s": 102323,
"text": "where the θ1, ..., θq are the parameters of the model, μ is the expectation of Xt, and the εt, εt − 1, ... are, white noise error terms."
},
{
"code": null,
"e": 102608,
"s": 102460,
"text": "The ARMA(p, q) model combines p autoregressive terms and q moving-average terms. Mathematically the model is expressed with the following formula −"
},
{
"code": null,
"e": 102640,
"s": 102608,
"text": "Xt=c+εt+∑i=1PφiXt−1+∑i=1qθiεt−i"
},
{
"code": null,
"e": 102721,
"s": 102640,
"text": "We can see that the ARMA(p, q) model is a combination of AR(p) and MA(q) models."
},
{
"code": null,
"e": 103117,
"s": 102721,
"text": "To give some intuition of the model consider that the AR part of the equation seeks to estimate parameters for Xt − i observations of in order to predict the value of the variable in Xt. It is in the end a weighted average of the past values. The MA section uses the same approach but with the error of previous observations, εt − i. So in the end, the result of the model is a weighted average."
},
{
"code": null,
"e": 103194,
"s": 103117,
"text": "The following code snippet demonstrates how to implement an ARMA(p, q) in R."
},
{
"code": null,
"e": 103375,
"s": 103194,
"text": "# install.packages(\"forecast\")\nlibrary(\"forecast\") \n\n# Read the data \ndata = scan('fancy.dat') \nts_data <- ts(data, frequency = 12, start = c(1987,1)) \nts_data \nplot.ts(ts_data)\n"
},
{
"code": null,
"e": 103557,
"s": 103375,
"text": "Plotting the data is normally the first step to find out if there is a temporal structure in the data. We can see from the plot that there are strong spikes at the end of each year."
},
{
"code": null,
"e": 103688,
"s": 103557,
"text": "The following code fits an ARMA model to the data. It runs several combinations of models and selects the one that has less error."
},
{
"code": null,
"e": 104249,
"s": 103688,
"text": "# Fit the ARMA model\nfit = auto.arima(ts_data) \nsummary(fit) \n\n# Series: ts_data \n# ARIMA(1,1,1)(0,1,1)[12] \n# Coefficients: \n# ar1 ma1 sma1 \n# 0.2401 -0.9013 0.7499 \n# s.e. 0.1427 0.0709 0.1790 \n\n# \n# sigma^2 estimated as 15464184: log likelihood = -693.69 \n# AIC = 1395.38 AICc = 1395.98 BIC = 1404.43 \n\n# Training set error measures: \n# ME RMSE MAE MPE MAPE MASE ACF1 \n# Training set 328.301 3615.374 2171.002 -2.481166 15.97302 0.4905797 -0.02521172\n"
},
{
"code": null,
"e": 104564,
"s": 104249,
"text": "In this chapter, we will be using the data scraped in the part 1 of the book. The data has text that describes profiles of freelancers, and the hourly rate they are charging in USD. The idea of the following section is to fit a model that given the skills of a freelancer, we are able to predict its hourly salary."
},
{
"code": null,
"e": 104834,
"s": 104564,
"text": "The following code shows how to convert the raw text that in this case has skills of a user in a bag of words matrix. For this we use an R library called tm. This means that for each word in the corpus we create variable with the amount of occurrences of each variable."
},
{
"code": null,
"e": 105494,
"s": 104834,
"text": "library(tm)\nlibrary(data.table) \n\nsource('text_analytics/text_analytics_functions.R') \ndata = fread('text_analytics/data/profiles.txt') \nrate = as.numeric(data$rate) \nkeep = !is.na(rate) \nrate = rate[keep] \n\n### Make bag of words of title and body \nX_all = bag_words(data$user_skills[keep]) \nX_all = removeSparseTerms(X_all, 0.999) \nX_all \n\n# <<DocumentTermMatrix (documents: 389, terms: 1422)>> \n# Non-/sparse entries: 4057/549101 \n# Sparsity : 99% \n# Maximal term length: 80 \n# Weighting : term frequency - inverse document frequency (normalized) (tf-idf) \n\n### Make a sparse matrix with all the data \nX_all <- as_sparseMatrix(X_all)\n"
},
{
"code": null,
"e": 105807,
"s": 105494,
"text": "Now that we have the text represented as a sparse matrix we can fit a model that will give a sparse solution. A good alternative for this case is using the LASSO (least absolute shrinkage and selection operator). This is a regression model that is able to select the most relevant features to predict the target."
},
{
"code": null,
"e": 106408,
"s": 105807,
"text": "train_inx = 1:200\nX_train = X_all[train_inx, ] \ny_train = rate[train_inx] \nX_test = X_all[-train_inx, ] \ny_test = rate[-train_inx] \n\n# Train a regression model \nlibrary(glmnet) \nfit <- cv.glmnet(x = X_train, y = y_train, \n family = 'gaussian', alpha = 1, \n nfolds = 3, type.measure = 'mae') \nplot(fit) \n\n# Make predictions \npredictions = predict(fit, newx = X_test) \npredictions = as.vector(predictions[,1]) \nhead(predictions) \n\n# 36.23598 36.43046 51.69786 26.06811 35.13185 37.66367 \n# We can compute the mean absolute error for the test data \nmean(abs(y_test - predictions)) \n# 15.02175\n"
},
{
"code": null,
"e": 106638,
"s": 106408,
"text": "Now we have a model that given a set of skills is able to predict the hourly salary of a freelancer. If more data is collected, the performance of the model will improve, but the code to implement this pipeline would be the same."
},
{
"code": null,
"e": 106887,
"s": 106638,
"text": "Online learning is a subfield of machine learning that allows to scale supervised learning models to massive datasets. The basic idea is that we don’t need to read all the data in memory to fit a model, we only need to read each instance at a time."
},
{
"code": null,
"e": 107135,
"s": 106887,
"text": "In this case, we will show how to implement an online learning algorithm using logistic regression. As in most of supervised learning algorithms, there is a cost function that is minimized. In logistic regression, the cost function is defined as −"
},
{
"code": null,
"e": 107192,
"s": 107135,
"text": "J(θ)=−1m[∑i=1my(i)log(hθ(x(i)))+(1−y(i))log(1−hθ(x(i)))]"
},
{
"code": null,
"e": 107347,
"s": 107192,
"text": "where J(θ) represents the cost function and hθ(x) represents the hypothesis. In the case of logistic regression it is defined with the following formula −"
},
{
"code": null,
"e": 107361,
"s": 107347,
"text": "hθ(x)=11+eθTx"
},
{
"code": null,
"e": 107628,
"s": 107361,
"text": "Now that we have defined the cost function we need to find an algorithm to minimize it. The simplest algorithm for achieving this is called stochastic gradient descent. The update rule of the algorithm for the weights of the logistic regression model is defined as −"
},
{
"code": null,
"e": 107647,
"s": 107628,
"text": "θj:=θj−α(hθ(x)−y)x"
},
{
"code": null,
"e": 108071,
"s": 107647,
"text": "There are several implementations of the following algorithm, but the one implemented in the vowpal wabbit library is by far the most developed one. The library allows training of large scale regression models and uses small amounts of RAM. In the creators own words it is described as: \"The Vowpal Wabbit (VW) project is a fast out-of-core learning system sponsored by Microsoft Research and (previously) Yahoo! Research\"."
},
{
"code": null,
"e": 108227,
"s": 108071,
"text": "We will be working with the titanic dataset from a kaggle competition. The original data can be found in the bda/part3/vw folder. Here, we have two files −"
},
{
"code": null,
"e": 108274,
"s": 108227,
"text": "We have training data (train_titanic.csv), and"
},
{
"code": null,
"e": 108342,
"s": 108274,
"text": "unlabeled data in order to make new predictions (test_titanic.csv)."
},
{
"code": null,
"e": 108608,
"s": 108342,
"text": "In order to convert the csv format to the vowpal wabbit input format use the csv_to_vowpal_wabbit.py python script. You will obviously need to have python installed for this. Navigate to the bda/part3/vw folder, open the terminal and execute the following command −"
},
{
"code": null,
"e": 108640,
"s": 108608,
"text": "python csv_to_vowpal_wabbit.py\n"
},
{
"code": null,
"e": 108774,
"s": 108640,
"text": "Note that for this section, if you are using windows you will need to install a Unix command line, enter the cygwin website for that."
},
{
"code": null,
"e": 108864,
"s": 108774,
"text": "Open the terminal and also in the folder bda/part3/vw and execute the following command −"
},
{
"code": null,
"e": 109010,
"s": 108864,
"text": "vw train_titanic.vw -f model.vw --binary --passes 20 -c -q ff --sgd --l1 \n0.00000001 --l2 0.0000001 --learning_rate 0.5 --loss_function logistic\n"
},
{
"code": null,
"e": 109069,
"s": 109010,
"text": "Let us break down what each argument of the vw call means."
},
{
"code": null,
"e": 109168,
"s": 109069,
"text": "-f model.vw − means that we are saving the model in the model.vw file for making predictions later"
},
{
"code": null,
"e": 109267,
"s": 109168,
"text": "-f model.vw − means that we are saving the model in the model.vw file for making predictions later"
},
{
"code": null,
"e": 109333,
"s": 109267,
"text": "--binary − Reports loss as binary classification with -1,1 labels"
},
{
"code": null,
"e": 109399,
"s": 109333,
"text": "--binary − Reports loss as binary classification with -1,1 labels"
},
{
"code": null,
"e": 109460,
"s": 109399,
"text": "--passes 20 − The data is used 20 times to learn the weights"
},
{
"code": null,
"e": 109521,
"s": 109460,
"text": "--passes 20 − The data is used 20 times to learn the weights"
},
{
"code": null,
"e": 109546,
"s": 109521,
"text": "-c − create a cache file"
},
{
"code": null,
"e": 109571,
"s": 109546,
"text": "-c − create a cache file"
},
{
"code": null,
"e": 109621,
"s": 109571,
"text": "-q ff − Use quadratic features in the f namespace"
},
{
"code": null,
"e": 109671,
"s": 109621,
"text": "-q ff − Use quadratic features in the f namespace"
},
{
"code": null,
"e": 109796,
"s": 109671,
"text": "--sgd − use regular/classic/simple stochastic gradient descent update, i.e., nonadaptive, non-normalized, and non-invariant."
},
{
"code": null,
"e": 109921,
"s": 109796,
"text": "--sgd − use regular/classic/simple stochastic gradient descent update, i.e., nonadaptive, non-normalized, and non-invariant."
},
{
"code": null,
"e": 109963,
"s": 109921,
"text": "--l1 --l2 − L1 and L2 norm regularization"
},
{
"code": null,
"e": 110005,
"s": 109963,
"text": "--l1 --l2 − L1 and L2 norm regularization"
},
{
"code": null,
"e": 110084,
"s": 110005,
"text": "--learning_rate 0.5 − The learning rate αas defined in the update rule formula"
},
{
"code": null,
"e": 110163,
"s": 110084,
"text": "--learning_rate 0.5 − The learning rate αas defined in the update rule formula"
},
{
"code": null,
"e": 110346,
"s": 110163,
"text": "The following code shows the results of running the regression model in the command line. In the results, we get the average log-loss and a small report of the algorithm performance."
},
{
"code": null,
"e": 112101,
"s": 110346,
"text": "-loss_function logistic\ncreating quadratic features for pairs: ff \nusing l1 regularization = 1e-08 \nusing l2 regularization = 1e-07 \n\nfinal_regressor = model.vw \nNum weight bits = 18 \nlearning rate = 0.5 \ninitial_t = 1 \npower_t = 0.5 \ndecay_learning_rate = 1 \nusing cache_file = train_titanic.vw.cache \nignoring text input in favor of cache input \nnum sources = 1 \n\naverage since example example current current current \nloss last counter weight label predict features \n0.000000 0.000000 1 1.0 -1.0000 -1.0000 57 \n0.500000 1.000000 2 2.0 1.0000 -1.0000 57 \n0.250000 0.000000 4 4.0 1.0000 1.0000 57 \n0.375000 0.500000 8 8.0 -1.0000 -1.0000 73 \n0.625000 0.875000 16 16.0 -1.0000 1.0000 73 \n0.468750 0.312500 32 32.0 -1.0000 -1.0000 57 \n0.468750 0.468750 64 64.0 -1.0000 1.0000 43 \n0.375000 0.281250 128 128.0 1.0000 -1.0000 43 \n0.351562 0.328125 256 256.0 1.0000 -1.0000 43 \n0.359375 0.367188 512 512.0 -1.0000 1.0000 57 \n0.274336 0.274336 1024 1024.0 -1.0000 -1.0000 57 h \n0.281938 0.289474 2048 2048.0 -1.0000 -1.0000 43 h \n0.246696 0.211454 4096 4096.0 -1.0000 -1.0000 43 h \n0.218922 0.191209 8192 8192.0 1.0000 1.0000 43 h \n\nfinished run \nnumber of examples per pass = 802 \npasses used = 11 \nweighted example sum = 8822 \nweighted label sum = -2288 \naverage loss = 0.179775 h \nbest constant = -0.530826 \nbest constant’s loss = 0.659128 \ntotal feature number = 427878\n"
},
{
"code": null,
"e": 112179,
"s": 112101,
"text": "Now we can use the model.vw we trained to generate predictions with new data."
},
{
"code": null,
"e": 112237,
"s": 112179,
"text": "vw -d test_titanic.vw -t -i model.vw -p predictions.txt \n"
},
{
"code": null,
"e": 112393,
"s": 112237,
"text": "The predictions generated in the previous command are not normalized to fit between the [0, 1] range. In order to do this, we use a sigmoid transformation."
},
{
"code": null,
"e": 112659,
"s": 112393,
"text": "# Read the predictions\npreds = fread('vw/predictions.txt') \n\n# Define the sigmoid function \nsigmoid = function(x) { \n 1 / (1 + exp(-x)) \n} \nprobs = sigmoid(preds[[1]]) \n\n# Generate class labels \npreds = ifelse(probs > 0.5, 1, 0) \nhead(preds) \n# [1] 0 1 0 0 1 0 "
},
{
"code": null,
"e": 112692,
"s": 112659,
"text": "\n 65 Lectures \n 6 hours \n"
},
{
"code": null,
"e": 112711,
"s": 112692,
"text": " Arnab Chakraborty"
},
{
"code": null,
"e": 112746,
"s": 112711,
"text": "\n 18 Lectures \n 1.5 hours \n"
},
{
"code": null,
"e": 112786,
"s": 112746,
"text": " Pranjal Srivastava, Harshit Srivastava"
},
{
"code": null,
"e": 112819,
"s": 112786,
"text": "\n 23 Lectures \n 2 hours \n"
},
{
"code": null,
"e": 112830,
"s": 112819,
"text": " John Shea"
},
{
"code": null,
"e": 112865,
"s": 112830,
"text": "\n 18 Lectures \n 1.5 hours \n"
},
{
"code": null,
"e": 112885,
"s": 112865,
"text": " Pranjal Srivastava"
},
{
"code": null,
"e": 112920,
"s": 112885,
"text": "\n 46 Lectures \n 3.5 hours \n"
},
{
"code": null,
"e": 112940,
"s": 112920,
"text": " Pranjal Srivastava"
},
{
"code": null,
"e": 112975,
"s": 112940,
"text": "\n 37 Lectures \n 3.5 hours \n"
},
{
"code": null,
"e": 113015,
"s": 112975,
"text": " Pranjal Srivastava, Harshit Srivastava"
},
{
"code": null,
"e": 113022,
"s": 113015,
"text": " Print"
},
{
"code": null,
"e": 113033,
"s": 113022,
"text": " Add Notes"
}
] |
TypeScript - Number toPrecision() | This method returns a string representing the number object to the specified precision.
number.toPrecision( [ precision ] )
precision − An integer specifying the number of significant digits.
Returns a string representing a Number object in fixed-point or exponential notation rounded to precision significant digits.
var num = new Number(7.123456);
console.log(num.toPrecision());
console.log(num.toPrecision(1));
console.log(num.toPrecision(2));
On compiling, it will generate the same code in JavaScript.
The code will produce the following output −
7.123456
7
7.1
45 Lectures
4 hours
Antonio Papa
41 Lectures
7 hours
Haider Malik
60 Lectures
2.5 hours
Skillbakerystudios
77 Lectures
8 hours
Sean Bradley
77 Lectures
3.5 hours
TELCOMA Global
19 Lectures
3 hours
Christopher Frewin
Print
Add Notes
Bookmark this page | [
{
"code": null,
"e": 2136,
"s": 2048,
"text": "This method returns a string representing the number object to the specified precision."
},
{
"code": null,
"e": 2173,
"s": 2136,
"text": "number.toPrecision( [ precision ] )\n"
},
{
"code": null,
"e": 2241,
"s": 2173,
"text": "precision − An integer specifying the number of significant digits."
},
{
"code": null,
"e": 2367,
"s": 2241,
"text": "Returns a string representing a Number object in fixed-point or exponential notation rounded to precision significant digits."
},
{
"code": null,
"e": 2501,
"s": 2367,
"text": "var num = new Number(7.123456); \nconsole.log(num.toPrecision()); \nconsole.log(num.toPrecision(1)); \nconsole.log(num.toPrecision(2));\n"
},
{
"code": null,
"e": 2561,
"s": 2501,
"text": "On compiling, it will generate the same code in JavaScript."
},
{
"code": null,
"e": 2606,
"s": 2561,
"text": "The code will produce the following output −"
},
{
"code": null,
"e": 2624,
"s": 2606,
"text": "7.123456 \n7 \n7.1\n"
},
{
"code": null,
"e": 2657,
"s": 2624,
"text": "\n 45 Lectures \n 4 hours \n"
},
{
"code": null,
"e": 2671,
"s": 2657,
"text": " Antonio Papa"
},
{
"code": null,
"e": 2704,
"s": 2671,
"text": "\n 41 Lectures \n 7 hours \n"
},
{
"code": null,
"e": 2718,
"s": 2704,
"text": " Haider Malik"
},
{
"code": null,
"e": 2753,
"s": 2718,
"text": "\n 60 Lectures \n 2.5 hours \n"
},
{
"code": null,
"e": 2773,
"s": 2753,
"text": " Skillbakerystudios"
},
{
"code": null,
"e": 2806,
"s": 2773,
"text": "\n 77 Lectures \n 8 hours \n"
},
{
"code": null,
"e": 2820,
"s": 2806,
"text": " Sean Bradley"
},
{
"code": null,
"e": 2855,
"s": 2820,
"text": "\n 77 Lectures \n 3.5 hours \n"
},
{
"code": null,
"e": 2871,
"s": 2855,
"text": " TELCOMA Global"
},
{
"code": null,
"e": 2904,
"s": 2871,
"text": "\n 19 Lectures \n 3 hours \n"
},
{
"code": null,
"e": 2924,
"s": 2904,
"text": " Christopher Frewin"
},
{
"code": null,
"e": 2931,
"s": 2924,
"text": " Print"
},
{
"code": null,
"e": 2942,
"s": 2931,
"text": " Add Notes"
}
] |
Java Program to Find Sum of the Series 1/1! + 2/2! + 3/3! + ......1/N! - GeeksforGeeks | 13 Jan, 2022
Factorial of a number simply returns out the multiplication of that number with all numbers lesser than the number up to 1 over which factorial is applied. Now the question arises whether the series is convergent or divergent. According to concepts of Infinite series and factorial in mathematics, the series may not converge but it can contain a convergent subsequence.
Illustration:
n! = n * (n-1) × (n-2) × (n-3) × (n-4) × ...... × 4 × 3 × 2 × 1
Input : n = 5
Processing : 1/1! + 2/2! + 3/3! + 4/4! + 5/5!
1 + 2/2 + 3/6 + 4/24 + 5/120
Output : 2.708333333333333
Approach :
Enter the number of terms N.
Create a function to calculate the sum of series, say, calculateSum.
In calculateSum function(), create a variable sum which stores the total sum of the series.
Run a loop N times.
Call factorial function() to calculate the factorial of a given number.
Return sum.
Implementation:
Java
// Java Program to Find Sum of the Series// 1/1! + 2/2! + 3/3! + ......1/N! // Importing java generic librariesimport java.io.*; class GFG { // Function(recursive) to calculate factorial public static double factorial(int i) { // Step1: Base case if (i == 1) { return 1; } // Step2&3: Recursion execution & call statements return i * factorial(i - 1); } // Function to calculate sum of series public static double calculateSum(int N) { // Store total_sum in sum double sum = 0; // Iteration by running a loop N times for (int i = 1; i <= N; i++) { sum = sum + ((double)i / factorial(i)); } // Return calculated final sum return sum; } // Main driver method public static void main(String[] args) { /* No of terms in series taken inn order to show output */ int N = 5; // Print sum of series by // calling function calculating sum of series System.out.println("The sum of series upto " + N + " terms is : " + calculateSum(N)); }}
The sum of series upto 5 terms is : 2.708333333333333
Time Complexity: O(n)
kalrap615
clintra
Picked
Java
Java Programs
Java
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here.
Comments
Old Comments
Different ways of Reading a text file in Java
Stream In Java
Constructors in Java
Generics in Java
Exceptions in Java
Convert a String to Character array in Java
Java Programming Examples
Convert Double to Integer in Java
Implementing a Linked List in Java using Class
How to Iterate HashMap in Java? | [
{
"code": null,
"e": 23948,
"s": 23920,
"text": "\n13 Jan, 2022"
},
{
"code": null,
"e": 24320,
"s": 23948,
"text": "Factorial of a number simply returns out the multiplication of that number with all numbers lesser than the number up to 1 over which factorial is applied. Now the question arises whether the series is convergent or divergent. According to concepts of Infinite series and factorial in mathematics, the series may not converge but it can contain a convergent subsequence."
},
{
"code": null,
"e": 24334,
"s": 24320,
"text": "Illustration:"
},
{
"code": null,
"e": 24398,
"s": 24334,
"text": "n! = n * (n-1) × (n-2) × (n-3) × (n-4) × ...... × 4 × 3 × 2 × 1"
},
{
"code": null,
"e": 24553,
"s": 24398,
"text": "Input : n = 5\nProcessing : 1/1! + 2/2! + 3/3! + 4/4! + 5/5!\n 1 + 2/2 + 3/6 + 4/24 + 5/120\nOutput : 2.708333333333333 "
},
{
"code": null,
"e": 24565,
"s": 24553,
"text": "Approach : "
},
{
"code": null,
"e": 24594,
"s": 24565,
"text": "Enter the number of terms N."
},
{
"code": null,
"e": 24663,
"s": 24594,
"text": "Create a function to calculate the sum of series, say, calculateSum."
},
{
"code": null,
"e": 24755,
"s": 24663,
"text": "In calculateSum function(), create a variable sum which stores the total sum of the series."
},
{
"code": null,
"e": 24775,
"s": 24755,
"text": "Run a loop N times."
},
{
"code": null,
"e": 24847,
"s": 24775,
"text": "Call factorial function() to calculate the factorial of a given number."
},
{
"code": null,
"e": 24859,
"s": 24847,
"text": "Return sum."
},
{
"code": null,
"e": 24875,
"s": 24859,
"text": "Implementation:"
},
{
"code": null,
"e": 24880,
"s": 24875,
"text": "Java"
},
{
"code": "// Java Program to Find Sum of the Series// 1/1! + 2/2! + 3/3! + ......1/N! // Importing java generic librariesimport java.io.*; class GFG { // Function(recursive) to calculate factorial public static double factorial(int i) { // Step1: Base case if (i == 1) { return 1; } // Step2&3: Recursion execution & call statements return i * factorial(i - 1); } // Function to calculate sum of series public static double calculateSum(int N) { // Store total_sum in sum double sum = 0; // Iteration by running a loop N times for (int i = 1; i <= N; i++) { sum = sum + ((double)i / factorial(i)); } // Return calculated final sum return sum; } // Main driver method public static void main(String[] args) { /* No of terms in series taken inn order to show output */ int N = 5; // Print sum of series by // calling function calculating sum of series System.out.println(\"The sum of series upto \" + N + \" terms is : \" + calculateSum(N)); }}",
"e": 26056,
"s": 24880,
"text": null
},
{
"code": null,
"e": 26110,
"s": 26056,
"text": "The sum of series upto 5 terms is : 2.708333333333333"
},
{
"code": null,
"e": 26132,
"s": 26110,
"text": "Time Complexity: O(n)"
},
{
"code": null,
"e": 26142,
"s": 26132,
"text": "kalrap615"
},
{
"code": null,
"e": 26150,
"s": 26142,
"text": "clintra"
},
{
"code": null,
"e": 26157,
"s": 26150,
"text": "Picked"
},
{
"code": null,
"e": 26162,
"s": 26157,
"text": "Java"
},
{
"code": null,
"e": 26176,
"s": 26162,
"text": "Java Programs"
},
{
"code": null,
"e": 26181,
"s": 26176,
"text": "Java"
},
{
"code": null,
"e": 26279,
"s": 26181,
"text": "Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here."
},
{
"code": null,
"e": 26288,
"s": 26279,
"text": "Comments"
},
{
"code": null,
"e": 26301,
"s": 26288,
"text": "Old Comments"
},
{
"code": null,
"e": 26347,
"s": 26301,
"text": "Different ways of Reading a text file in Java"
},
{
"code": null,
"e": 26362,
"s": 26347,
"text": "Stream In Java"
},
{
"code": null,
"e": 26383,
"s": 26362,
"text": "Constructors in Java"
},
{
"code": null,
"e": 26400,
"s": 26383,
"text": "Generics in Java"
},
{
"code": null,
"e": 26419,
"s": 26400,
"text": "Exceptions in Java"
},
{
"code": null,
"e": 26463,
"s": 26419,
"text": "Convert a String to Character array in Java"
},
{
"code": null,
"e": 26489,
"s": 26463,
"text": "Java Programming Examples"
},
{
"code": null,
"e": 26523,
"s": 26489,
"text": "Convert Double to Integer in Java"
},
{
"code": null,
"e": 26570,
"s": 26523,
"text": "Implementing a Linked List in Java using Class"
}
] |
Find the number of islands | Practice | GeeksforGeeks | Given a grid of size n*m (n is number of rows and m is number of columns grid has) consisting of '0's(Water) and '1's(Land). Find the number of islands.
Note: An island is surrounded by water and is formed by connecting adjacent lands horizontally or vertically or diagonally i.e., in all 8 directions.
Example 1:
Input:
grid = {{0,1},{1,0},{1,1},{1,0}}
Output:
1
Explanation:
The grid is-
0 1
1 0
1 1
1 0
All lands are connected.
Example 2:
Input:
grid = {{0,1,1,1,0,0,0},{0,0,1,1,0,1,0}}
Output:
2
Expanation:
The grid is-
0 1 1 1 0 0 0
0 0 1 1 0 1 0
There are two islands one is colored in blue
and other in orange.
Your Task:
You don't need to read or print anything. Your task is to complete the function numIslands() which takes grid as input parameter and returns the total number of islands.
Expected Time Compelxity: O(n*m)
Expected Space Compelxity: O(n*m)
Constraints:
1 ≤ n, m ≤ 500
0
thakuraditya6211 day ago
Java Easy To Understand :)
// Function to find the number of islands.
public int numIslands(char[][] grid) {
// Code here
boolean [][]visited = new boolean[grid.length][grid[0].length];
int count = 0;
for(int i = 0; i < grid.length; i++){
for(int j = 0; j < grid[i].length; j++){
if(grid[i][j] == '1' && visited[i][j] == false){
dfs(grid, i, j , visited);
count++;
}
}
}
return count;
}
public void dfs(char[][] grid, int i, int j, boolean[][] visited){
// base case like out of
if(i < 0 || j < 0 || i >= grid.length || j >= grid[0].length || grid[i][j] == '0' || visited[i][j] == true){
return ;
}
visited[i][j] = true;
// vertically and horizontally
dfs(grid, i-1, j, visited);
dfs(grid, i, j+1, visited);
dfs(grid, i, j-1, visited);
dfs(grid, i+1, j, visited);
// diagonally
dfs(grid, i+1, j+1, visited);
dfs(grid, i-1, j-1, visited);
dfs(grid, i-1, j+1, visited);
dfs(grid, i+1, j-1, visited);
}
+1
kharsh8221 day ago
Java Easy Solution
// { Driver Code Starts
import java.util.*;
import java.lang.*;
import java.io.*;
class GFG {
public static void main(String[] args) throws IOException {
BufferedReader br =
new BufferedReader(new InputStreamReader(System.in));
int T = Integer.parseInt(br.readLine().trim());
while (T-- > 0) {
String[] s = br.readLine().trim().split(" ");
int n = Integer.parseInt(s[0]);
int m = Integer.parseInt(s[1]);
char[][] grid = new char[n][m];
for (int i = 0; i < n; i++) {
String[] S = br.readLine().trim().split(" ");
for (int j = 0; j < m; j++) {
grid[i][j] = S[j].charAt(0);
}
}
Solution obj = new Solution();
int ans = obj.numIslands(grid);
System.out.println(ans);
}
}
}// } Driver Code Ends
class Solution {
// Function to find the number of islands.
public int numIslands(char[][] grid) {
// Code here
boolean [][] visited=new boolean[grid.length][grid[0].length];
int count=0;
for(int i =0;i<grid.length;i++){
for(int j =0;j<grid[i].length;j++){
if(grid[i][j]=='1' && visited[i][j]==false){
helper(grid,visited,i,j);
count++;
}
}
}
return count ;
}
public void helper(char[][] grid,boolean [][] visited,int r,int c){
if(r<0 || c<0 || r>=grid.length || c>=grid[0].length || grid[r][c]=='0' || visited[r][c]==true ){
return ;
}
visited[r][c]=true;
// Vertical and Horizontal Direction
helper(grid,visited,r+1,c);
helper(grid,visited,r-1,c);
helper(grid,visited,r,c+1);
helper(grid,visited,r,c-1);
// Diagonally
helper(grid,visited,r+1,c+1);
helper(grid,visited,r-1,c-1);
helper(grid,visited,r-1,c+1);
helper(grid,visited,r+1,c-1);
}
}
0
gourangp293 days ago
class Solution { public: // Function to find the number of islands. void dfs(int i,int j, vector<vector<int>>& visited, vector<vector<char>>& grid, int& n, int& m) { if(i<0 || i>=n || j<0 || j>=m || grid[i][j] == '0' || visited[i][j] == 1) return; visited[i][j] = 1; int dx[8] = {-1,-1,-1,0,0,1,1,1}; int dy[8] = {-1,0,1,-1,1,-1,0,1}; for(int k=0; k<8; k++) { dfs(i+dx[k],j+dy[k],visited,grid,n,m); } } int numIslands(vector<vector<char>>& grid) { // Code here int n = grid.size(); int m = grid[0].size(); int ans=0; vector<vector<int>> visited(n, vector<int> (m,0)); for(int i=0; i<n; i++) { for(int j=0; j<m; j++) { if(visited[i][j] == 0 && grid[i][j] == '1') { ans++; dfs(i,j,visited,grid,n,m); } } } return ans; }};
0
jordiamposta1 week ago
// C++
//---
class Solution{
public:
int numIslands(vector<vector<char>>& grid) {
if (grid.empty())
return 0;
int isle_counter = 0;
auto visited_cells = std::vector<std::vector<bool>>(
grid.size(), std::vector<bool>(grid[0].size(), false));
const std::vector<std::pair<int, int>> look_positions_ {
{1, -1}, {1, 0}, {1, 1},
{-1, -1}, {-1, 0}, {-1, 1},
{0, -1}, {0, 1},
};
auto _grow = [&](int row, int col, auto && _grow) -> void {
for (auto &position: look_positions_) {
if (
(position.first + row < 0 || position.first + row >= grid.size()) ||
(position.second + col < 0 || position.second + col >= grid[0].size()) ||
visited_cells[position.first + row][position.second + col]
){
continue;
}
auto land = grid[position.first + row][position.second + col];
visited_cells[position.first + row][position.second + col] = true;
if (land == '1') {
_grow(position.first + row, position.second + col, _grow);
}
}
};
for(int i=0; i < grid.size(); i++)
{
for (int j = 0; j < grid[i].size(); j++) {
if (grid[i][j]=='1' && (!visited_cells[i][j])) {
visited_cells[i][j] = true;
_grow(i, j, _grow);
isle_counter++;
}
}
}
return isle_counter;
}
};
0
amishasahu3282 weeks ago
void mark_current_island(vector<vector<char>>& grid, int r, int c, int row, int col)
{
// Boundary conditions
if(r < 0 || r >= row || c < 0 || c >= col || grid[r][c] != '1')
return;
// mark current island as visited
grid[r][c] = '2';
// mark all its neighbour visisted
mark_current_island(grid, r-1, c, row, col); // up
mark_current_island(grid, r+1, c, row, col); // down
mark_current_island(grid, r, c-1, row, col); // left
mark_current_island(grid, r, c+1, row, col); // right
mark_current_island(grid, r-1, c-1, row, col); // left up diagonal
mark_current_island(grid, r+1, c-1, row, col); // left down diagonal
mark_current_island(grid, r+1, c+1, row, col); // right down diagonal
mark_current_island(grid, r-1, c+1, row, col); // right up diagonal
}
int numIslands(vector<vector<char>>& grid) {
// Code here
int row = grid.size();
int col = grid[0].size();
int numberOfIsland = 0;
for(int i = 0; i < row; i++)
{
for(int j = 0; j < col; j++)
{
if(grid[i][j] == '1')
{
mark_current_island(grid, i, j , row, col);
numberOfIsland++;
}
}
}
return numberOfIsland;
}
0
lilith2 weeks ago
JAVA Soln
class Solution {
// Function to find the number of islands.
public int numIslands(char[][] grid) {
// Code here
int res = 0;
boolean[][] visited = new boolean[grid.length][grid[0].length];
for(int i = 0; i < grid.length; i++)
for(int j = 0; j < grid[0].length; j++)
if(grid[i][j] == '1' && visited[i][j] != true) {
findIsLands(grid, i , j, visited);
res++;
}
return res;
}
public void findIsLands(char[][] grid, int i, int j, boolean[][] visited) {
if(i < 0 || i >= grid.length || j < 0 || j >= grid[0].length || grid[i][j] == '0' || visited[i][j])
return;
visited[i][j] = true;
findIsLands(grid, i - 1, j, visited);
findIsLands(grid, i + 1, j, visited);
findIsLands(grid, i, j - 1, visited);
findIsLands(grid, i, j + 1, visited);
findIsLands(grid, i + 1, j - 1, visited);
findIsLands(grid, i - 1, j + 1, visited);
findIsLands(grid, i - 1, j - 1, visited);
findIsLands(grid, i + 1, j + 1, visited);
}
}
0
lilith
This comment was deleted.
-1
joyrockok3 weeks ago
class Solution { // Function to find the number of islands.int dirRow[] = {-1, -1, -1, 0, 0, 1, 1, 1}; int dirCol[] = {-1, 0, 1, -1, 1, -1, 0, 1};int n, m;public boolean isOk(char[][] grid, int i, int j, boolean visited[][]) { if(i>=0 && i < n && j >=0 && j < m && visited[i][j] == false && (grid[i][j]-'0') == 1) { return true; } return false;}public void DFS(char[][] grid, int row, int col, boolean visited[][]) { visited[row][col] = true; for(int k=0; k<8; k++) { if(isOk(grid, row+dirRow[k], col+dirCol[k], visited)) { DFS(grid, row+dirRow[k], col+dirCol[k], visited); } }} public int numIslands(char[][] grid) { // Code here int count=0; n = grid.length; m = grid[0].length; boolean [][]visited = new boolean[n][m]; for(int i=0; i<n; i++) { for(int j=0; j<m; j++) { if(isOk(grid, i, j, visited) == true) { DFS(grid, i, j, visited); count++; } } } return count; }}
0
patildhiren443 weeks ago
JAVA - 0.2
void isValid(int i, int j, boolean[][]visit, char[][]arr, int n, int m){
if(i<0 || j<0 || i>=n || j>=m){
return ;
}
if(arr[i][j]=='0'){
return;
}
if(visit[i][j]==false){
visit[i][j]=true;
isValid(i+1, j, visit, arr, n, m);
isValid(i, j+1, visit, arr, n, m);
isValid(i-1, j, visit, arr, n, m);
isValid(i, j-1, visit, arr, n, m);
isValid(i+1, j+1, visit, arr, n, m);
isValid(i+1, j-1, visit, arr, n, m);
isValid(i-1, j-1, visit, arr, n, m);
isValid(i-1, j+1, visit, arr, n, m);
}
}
public int numIslands(char[][] grid) {
// Code here
int n=grid.length, m=grid[0].length;
boolean[][]visit = new boolean[n][m];
int cou=0;
for(int i=0; i<n; i++){
for(int j=0; j<m; j++){
if(grid[i][j]=='1' && visit[i][j]==false){
isValid(i, j, visit, grid, n, m);
cou++;
}
}
}
return cou;
}
+1
milindprajapatmst193 weeks ago
int dx[] = { 0, 1, 1, 1, 0, -1, -1, -1 };
int dy[] = { 1, 1, 0, -1, -1, -1, 0, 1 };
class Solution {
public:
void dfs(int x, int y, int n, int m, vector<vector<char>>& grid) {
grid[x][y] = '0';
for (int k = 0; k < 8; k++) {
int a = x + dx[k], b = y + dy[k];
if (0 <= a && a < n && 0 <= b && b < m && grid[a][b] == '1')
dfs(a, b, n, m, grid);
}
}
int numIslands(vector<vector<char>>& grid) {
int n = grid.size(), m = grid[0].size(), result = 0;
for (int x = 0; x < n; x++) {
for (int y = 0; y < m; y++) {
if (grid[x][y] == '0')
continue;
result++;
dfs(x, y, n, m, grid);
}
}
return result;
}
};
We strongly recommend solving this problem on your own before viewing its editorial. Do you still
want to view the editorial?
Login to access your submissions.
Problem
Contest
Reset the IDE using the second button on the top right corner.
Avoid using static/global variables in your code as your code is tested against multiple test cases and these tend to retain their previous values.
Passing the Sample/Custom Test cases does not guarantee the correctness of code. On submission, your code is tested against multiple test cases consisting of all possible corner cases and stress constraints.
You can access the hints to get an idea about what is expected of you as well as the final solution code.
You can view the solutions submitted by other users from the submission tab. | [
{
"code": null,
"e": 543,
"s": 238,
"text": "Given a grid of size n*m (n is number of rows and m is number of columns grid has) consisting of '0's(Water) and '1's(Land). Find the number of islands.\nNote: An island is surrounded by water and is formed by connecting adjacent lands horizontally or vertically or diagonally i.e., in all 8 directions.\n "
},
{
"code": null,
"e": 554,
"s": 543,
"text": "Example 1:"
},
{
"code": null,
"e": 672,
"s": 554,
"text": "Input:\ngrid = {{0,1},{1,0},{1,1},{1,0}}\nOutput:\n1\nExplanation:\nThe grid is-\n0 1\n1 0\n1 1\n1 0\nAll lands are connected.\n"
},
{
"code": null,
"e": 683,
"s": 672,
"text": "Example 2:"
},
{
"code": null,
"e": 863,
"s": 683,
"text": "Input:\ngrid = {{0,1,1,1,0,0,0},{0,0,1,1,0,1,0}}\nOutput:\n2\nExpanation:\nThe grid is-\n0 1 1 1 0 0 0\n0 0 1 1 0 1 0 \nThere are two islands one is colored in blue \nand other in orange.\n"
},
{
"code": null,
"e": 1048,
"s": 865,
"text": "Your Task:\nYou don't need to read or print anything. Your task is to complete the function numIslands() which takes grid as input parameter and returns the total number of islands.\n "
},
{
"code": null,
"e": 1117,
"s": 1048,
"text": "Expected Time Compelxity: O(n*m)\nExpected Space Compelxity: O(n*m)\n "
},
{
"code": null,
"e": 1145,
"s": 1117,
"text": "Constraints:\n1 ≤ n, m ≤ 500"
},
{
"code": null,
"e": 1147,
"s": 1145,
"text": "0"
},
{
"code": null,
"e": 1172,
"s": 1147,
"text": "thakuraditya6211 day ago"
},
{
"code": null,
"e": 1199,
"s": 1172,
"text": "Java Easy To Understand :)"
},
{
"code": null,
"e": 2374,
"s": 1199,
"text": "// Function to find the number of islands.\n public int numIslands(char[][] grid) {\n // Code here\n boolean [][]visited = new boolean[grid.length][grid[0].length];\n int count = 0;\n for(int i = 0; i < grid.length; i++){\n for(int j = 0; j < grid[i].length; j++){\n if(grid[i][j] == '1' && visited[i][j] == false){\n dfs(grid, i, j , visited);\n count++;\n }\n }\n }\n return count;\n }\n \n public void dfs(char[][] grid, int i, int j, boolean[][] visited){\n // base case like out of \n if(i < 0 || j < 0 || i >= grid.length || j >= grid[0].length || grid[i][j] == '0' || visited[i][j] == true){\n return ;\n }\n visited[i][j] = true;\n \n // vertically and horizontally\n dfs(grid, i-1, j, visited);\n dfs(grid, i, j+1, visited);\n dfs(grid, i, j-1, visited);\n dfs(grid, i+1, j, visited);\n \n // diagonally\n dfs(grid, i+1, j+1, visited);\n dfs(grid, i-1, j-1, visited);\n dfs(grid, i-1, j+1, visited);\n dfs(grid, i+1, j-1, visited);\n\n }"
},
{
"code": null,
"e": 2377,
"s": 2374,
"text": "+1"
},
{
"code": null,
"e": 2396,
"s": 2377,
"text": "kharsh8221 day ago"
},
{
"code": null,
"e": 2415,
"s": 2396,
"text": "Java Easy Solution"
},
{
"code": null,
"e": 4525,
"s": 2415,
"text": "// { Driver Code Starts\nimport java.util.*;\nimport java.lang.*;\nimport java.io.*;\nclass GFG {\n public static void main(String[] args) throws IOException {\n BufferedReader br =\n new BufferedReader(new InputStreamReader(System.in));\n int T = Integer.parseInt(br.readLine().trim());\n while (T-- > 0) {\n String[] s = br.readLine().trim().split(\" \");\n int n = Integer.parseInt(s[0]);\n int m = Integer.parseInt(s[1]);\n char[][] grid = new char[n][m];\n for (int i = 0; i < n; i++) {\n String[] S = br.readLine().trim().split(\" \");\n for (int j = 0; j < m; j++) {\n grid[i][j] = S[j].charAt(0);\n }\n }\n Solution obj = new Solution();\n int ans = obj.numIslands(grid);\n System.out.println(ans);\n }\n }\n}// } Driver Code Ends\n\n\nclass Solution {\n // Function to find the number of islands.\n \n public int numIslands(char[][] grid) {\n // Code here\n boolean [][] visited=new boolean[grid.length][grid[0].length];\n int count=0;\n for(int i =0;i<grid.length;i++){\n for(int j =0;j<grid[i].length;j++){\n if(grid[i][j]=='1' && visited[i][j]==false){\n helper(grid,visited,i,j);\n count++;\n }\n }\n }\n \n return count ;\n }\n \n public void helper(char[][] grid,boolean [][] visited,int r,int c){\n \n if(r<0 || c<0 || r>=grid.length || c>=grid[0].length || grid[r][c]=='0' || visited[r][c]==true ){\n return ;\n }\n \n visited[r][c]=true;\n \n // Vertical and Horizontal Direction \n helper(grid,visited,r+1,c);\n helper(grid,visited,r-1,c);\n helper(grid,visited,r,c+1);\n helper(grid,visited,r,c-1);\n \n // Diagonally \n helper(grid,visited,r+1,c+1);\n helper(grid,visited,r-1,c-1);\n helper(grid,visited,r-1,c+1);\n helper(grid,visited,r+1,c-1);\n \n }\n}"
},
{
"code": null,
"e": 4527,
"s": 4525,
"text": "0"
},
{
"code": null,
"e": 4548,
"s": 4527,
"text": "gourangp293 days ago"
},
{
"code": null,
"e": 5494,
"s": 4548,
"text": "class Solution { public: // Function to find the number of islands. void dfs(int i,int j, vector<vector<int>>& visited, vector<vector<char>>& grid, int& n, int& m) { if(i<0 || i>=n || j<0 || j>=m || grid[i][j] == '0' || visited[i][j] == 1) return; visited[i][j] = 1; int dx[8] = {-1,-1,-1,0,0,1,1,1}; int dy[8] = {-1,0,1,-1,1,-1,0,1}; for(int k=0; k<8; k++) { dfs(i+dx[k],j+dy[k],visited,grid,n,m); } } int numIslands(vector<vector<char>>& grid) { // Code here int n = grid.size(); int m = grid[0].size(); int ans=0; vector<vector<int>> visited(n, vector<int> (m,0)); for(int i=0; i<n; i++) { for(int j=0; j<m; j++) { if(visited[i][j] == 0 && grid[i][j] == '1') { ans++; dfs(i,j,visited,grid,n,m); } } } return ans; }};"
},
{
"code": null,
"e": 5496,
"s": 5494,
"text": "0"
},
{
"code": null,
"e": 5519,
"s": 5496,
"text": "jordiamposta1 week ago"
},
{
"code": null,
"e": 7328,
"s": 5519,
"text": "// C++\n//---\nclass Solution{\npublic:\n\n int numIslands(vector<vector<char>>& grid) {\n if (grid.empty())\n return 0;\n\n int isle_counter = 0;\n\n auto visited_cells = std::vector<std::vector<bool>>(\n grid.size(), std::vector<bool>(grid[0].size(), false));\n\n const std::vector<std::pair<int, int>> look_positions_ {\n {1, -1}, {1, 0}, {1, 1},\n {-1, -1}, {-1, 0}, {-1, 1},\n {0, -1}, {0, 1},\n };\n\n auto _grow = [&](int row, int col, auto && _grow) -> void {\n for (auto &position: look_positions_) {\n if (\n (position.first + row < 0 || position.first + row >= grid.size()) ||\n (position.second + col < 0 || position.second + col >= grid[0].size()) ||\n visited_cells[position.first + row][position.second + col]\n ){\n continue;\n }\n\n auto land = grid[position.first + row][position.second + col];\n visited_cells[position.first + row][position.second + col] = true;\n\n if (land == '1') {\n _grow(position.first + row, position.second + col, _grow);\n }\n }\n };\n\n for(int i=0; i < grid.size(); i++)\n {\n for (int j = 0; j < grid[i].size(); j++) {\n if (grid[i][j]=='1' && (!visited_cells[i][j])) {\n visited_cells[i][j] = true;\n _grow(i, j, _grow);\n isle_counter++;\n }\n }\n }\n\n return isle_counter;\n }\n};"
},
{
"code": null,
"e": 7330,
"s": 7328,
"text": "0"
},
{
"code": null,
"e": 7355,
"s": 7330,
"text": "amishasahu3282 weeks ago"
},
{
"code": null,
"e": 8777,
"s": 7355,
"text": "void mark_current_island(vector<vector<char>>& grid, int r, int c, int row, int col)\n {\n // Boundary conditions\n if(r < 0 || r >= row || c < 0 || c >= col || grid[r][c] != '1')\n return;\n \n // mark current island as visited\n grid[r][c] = '2';\n \n // mark all its neighbour visisted\n mark_current_island(grid, r-1, c, row, col); // up\n mark_current_island(grid, r+1, c, row, col); // down\n mark_current_island(grid, r, c-1, row, col); // left\n mark_current_island(grid, r, c+1, row, col); // right\n mark_current_island(grid, r-1, c-1, row, col); // left up diagonal\n mark_current_island(grid, r+1, c-1, row, col); // left down diagonal\n mark_current_island(grid, r+1, c+1, row, col); // right down diagonal\n mark_current_island(grid, r-1, c+1, row, col); // right up diagonal\n \n }\n int numIslands(vector<vector<char>>& grid) {\n // Code here\n int row = grid.size();\n int col = grid[0].size();\n \n int numberOfIsland = 0;\n for(int i = 0; i < row; i++)\n {\n for(int j = 0; j < col; j++)\n {\n if(grid[i][j] == '1')\n {\n mark_current_island(grid, i, j , row, col);\n numberOfIsland++;\n }\n }\n }\n return numberOfIsland;\n }"
},
{
"code": null,
"e": 8779,
"s": 8777,
"text": "0"
},
{
"code": null,
"e": 8797,
"s": 8779,
"text": "lilith2 weeks ago"
},
{
"code": null,
"e": 8807,
"s": 8797,
"text": "JAVA Soln"
},
{
"code": null,
"e": 10004,
"s": 8807,
"text": "class Solution {\n // Function to find the number of islands.\n public int numIslands(char[][] grid) {\n // Code here\n int res = 0;\n boolean[][] visited = new boolean[grid.length][grid[0].length];\n \n for(int i = 0; i < grid.length; i++)\n for(int j = 0; j < grid[0].length; j++) \n if(grid[i][j] == '1' && visited[i][j] != true) {\n findIsLands(grid, i , j, visited);\n res++;\n }\n \n return res;\n }\n \n public void findIsLands(char[][] grid, int i, int j, boolean[][] visited) {\n if(i < 0 || i >= grid.length || j < 0 || j >= grid[0].length || grid[i][j] == '0' || visited[i][j])\n return;\n \n visited[i][j] = true;\n \n findIsLands(grid, i - 1, j, visited);\n findIsLands(grid, i + 1, j, visited);\n findIsLands(grid, i, j - 1, visited);\n findIsLands(grid, i, j + 1, visited);\n findIsLands(grid, i + 1, j - 1, visited);\n findIsLands(grid, i - 1, j + 1, visited);\n findIsLands(grid, i - 1, j - 1, visited);\n findIsLands(grid, i + 1, j + 1, visited);\n }\n}"
},
{
"code": null,
"e": 10006,
"s": 10004,
"text": "0"
},
{
"code": null,
"e": 10013,
"s": 10006,
"text": "lilith"
},
{
"code": null,
"e": 10039,
"s": 10013,
"text": "This comment was deleted."
},
{
"code": null,
"e": 10042,
"s": 10039,
"text": "-1"
},
{
"code": null,
"e": 10063,
"s": 10042,
"text": "joyrockok3 weeks ago"
},
{
"code": null,
"e": 11028,
"s": 10063,
"text": "class Solution { // Function to find the number of islands.int dirRow[] = {-1, -1, -1, 0, 0, 1, 1, 1}; int dirCol[] = {-1, 0, 1, -1, 1, -1, 0, 1};int n, m;public boolean isOk(char[][] grid, int i, int j, boolean visited[][]) { if(i>=0 && i < n && j >=0 && j < m && visited[i][j] == false && (grid[i][j]-'0') == 1) { return true; } return false;}public void DFS(char[][] grid, int row, int col, boolean visited[][]) { visited[row][col] = true; for(int k=0; k<8; k++) { if(isOk(grid, row+dirRow[k], col+dirCol[k], visited)) { DFS(grid, row+dirRow[k], col+dirCol[k], visited); } }} public int numIslands(char[][] grid) { // Code here int count=0; n = grid.length; m = grid[0].length; boolean [][]visited = new boolean[n][m]; for(int i=0; i<n; i++) { for(int j=0; j<m; j++) { if(isOk(grid, i, j, visited) == true) { DFS(grid, i, j, visited); count++; } } } return count; }}"
},
{
"code": null,
"e": 11030,
"s": 11028,
"text": "0"
},
{
"code": null,
"e": 11055,
"s": 11030,
"text": "patildhiren443 weeks ago"
},
{
"code": null,
"e": 11066,
"s": 11055,
"text": "JAVA - 0.2"
},
{
"code": null,
"e": 12240,
"s": 11068,
"text": "void isValid(int i, int j, boolean[][]visit, char[][]arr, int n, int m){\n if(i<0 || j<0 || i>=n || j>=m){\n return ;\n }\n \n if(arr[i][j]=='0'){\n return;\n }\n \n if(visit[i][j]==false){\n visit[i][j]=true;\n \n isValid(i+1, j, visit, arr, n, m);\n isValid(i, j+1, visit, arr, n, m);\n isValid(i-1, j, visit, arr, n, m);\n isValid(i, j-1, visit, arr, n, m);\n \n isValid(i+1, j+1, visit, arr, n, m);\n isValid(i+1, j-1, visit, arr, n, m);\n isValid(i-1, j-1, visit, arr, n, m);\n isValid(i-1, j+1, visit, arr, n, m);\n }\n }\n \n public int numIslands(char[][] grid) {\n // Code here\n int n=grid.length, m=grid[0].length;\n boolean[][]visit = new boolean[n][m];\n \n int cou=0;\n for(int i=0; i<n; i++){\n for(int j=0; j<m; j++){\n if(grid[i][j]=='1' && visit[i][j]==false){\n isValid(i, j, visit, grid, n, m);\n cou++;\n }\n }\n }\n return cou;\n }"
},
{
"code": null,
"e": 12245,
"s": 12242,
"text": "+1"
},
{
"code": null,
"e": 12276,
"s": 12245,
"text": "milindprajapatmst193 weeks ago"
},
{
"code": null,
"e": 13080,
"s": 12276,
"text": "int dx[] = { 0, 1, 1, 1, 0, -1, -1, -1 };\nint dy[] = { 1, 1, 0, -1, -1, -1, 0, 1 };\nclass Solution {\n public:\n void dfs(int x, int y, int n, int m, vector<vector<char>>& grid) {\n grid[x][y] = '0';\n for (int k = 0; k < 8; k++) {\n int a = x + dx[k], b = y + dy[k];\n if (0 <= a && a < n && 0 <= b && b < m && grid[a][b] == '1')\n dfs(a, b, n, m, grid);\n }\n }\n int numIslands(vector<vector<char>>& grid) {\n int n = grid.size(), m = grid[0].size(), result = 0;\n for (int x = 0; x < n; x++) {\n for (int y = 0; y < m; y++) {\n if (grid[x][y] == '0')\n continue;\n result++;\n dfs(x, y, n, m, grid);\n }\n }\n return result;\n }\n};"
},
{
"code": null,
"e": 13226,
"s": 13080,
"text": "We strongly recommend solving this problem on your own before viewing its editorial. Do you still\n want to view the editorial?"
},
{
"code": null,
"e": 13262,
"s": 13226,
"text": " Login to access your submissions. "
},
{
"code": null,
"e": 13272,
"s": 13262,
"text": "\nProblem\n"
},
{
"code": null,
"e": 13282,
"s": 13272,
"text": "\nContest\n"
},
{
"code": null,
"e": 13345,
"s": 13282,
"text": "Reset the IDE using the second button on the top right corner."
},
{
"code": null,
"e": 13493,
"s": 13345,
"text": "Avoid using static/global variables in your code as your code is tested against multiple test cases and these tend to retain their previous values."
},
{
"code": null,
"e": 13701,
"s": 13493,
"text": "Passing the Sample/Custom Test cases does not guarantee the correctness of code. On submission, your code is tested against multiple test cases consisting of all possible corner cases and stress constraints."
},
{
"code": null,
"e": 13807,
"s": 13701,
"text": "You can access the hints to get an idea about what is expected of you as well as the final solution code."
}
] |
How to create a JSON object in JavaScript? Explain with an example. | Following is the code to create a JSON object in JavaScript.
Live Demo
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<title>Document</title>
<style>
body {
font-family: "Segoe UI", Tahoma, Geneva, Verdana, sans-serif;
}
.result {
font-size: 20px;
font-weight: 500;
}
</style>
</head>
<body>
<h1>JSON object in JavaScript</h1>
<div class="sample"></div>
<div style="color: green;" class="result">JSON OBJECT <br></div>
<button class="Btn">CLICK HERE</button>
<h3>
Click on the above button to create a JSON object and display it
</h3>
<script>
let resEle = document.querySelector(".result");
let obj = {
firstName:'Rohan',
lastName: 'Sharma',
Age:14,
}
document.querySelector(".Btn").addEventListener("click", () => {
for(i in obj){
resEle.innerHTML += 'key = '+i+' value = ' + obj[i] + '<br>';
}
});
</script>
</body>
</html>
The above code will produce the following output −
On clicking the ‘CLICK HERE’ button − | [
{
"code": null,
"e": 1123,
"s": 1062,
"text": "Following is the code to create a JSON object in JavaScript."
},
{
"code": null,
"e": 1134,
"s": 1123,
"text": " Live Demo"
},
{
"code": null,
"e": 2065,
"s": 1134,
"text": "<!DOCTYPE html>\n<html lang=\"en\">\n<head>\n<meta charset=\"UTF-8\" />\n<meta name=\"viewport\" content=\"width=device-width, initial-scale=1.0\" />\n<title>Document</title>\n<style>\n body {\n font-family: \"Segoe UI\", Tahoma, Geneva, Verdana, sans-serif;\n }\n .result {\n font-size: 20px;\n font-weight: 500;\n }\n</style>\n</head>\n<body>\n<h1>JSON object in JavaScript</h1>\n<div class=\"sample\"></div>\n<div style=\"color: green;\" class=\"result\">JSON OBJECT <br></div>\n<button class=\"Btn\">CLICK HERE</button>\n<h3>\nClick on the above button to create a JSON object and display it\n</h3>\n<script>\n let resEle = document.querySelector(\".result\");\n let obj = {\n firstName:'Rohan',\n lastName: 'Sharma',\n Age:14,\n }\n document.querySelector(\".Btn\").addEventListener(\"click\", () => {\n for(i in obj){\n resEle.innerHTML += 'key = '+i+' value = ' + obj[i] + '<br>';\n }\n });\n</script>\n</body>\n</html>"
},
{
"code": null,
"e": 2116,
"s": 2065,
"text": "The above code will produce the following output −"
},
{
"code": null,
"e": 2154,
"s": 2116,
"text": "On clicking the ‘CLICK HERE’ button −"
}
] |
HTML5 Canvas - Drawing Paths | We require the following methods to draw paths on the canvas −
beginPath()
This method resets the current path.
moveTo(x, y)
This method creates a new subpath with the given point.
closePath()
This method marks the current subpath as closed, and starts a new subpath with a point the same as the start and end of the newly closed subpath.
fill()
This method fills the subpaths with the current fill style.
stroke()
This method strokes the subpaths with the current stroke style.
arc(x, y, radius, startAngle, endAngle, anticlockwise)
Adds points to the subpath such that the arc described by the circumference of the circle described by the arguments, starting at the given start angle and ending at the given end angle, going in the given direction, is added to the path, connected to the previous point by a straight line.
Following is a simple example which makes use of above mentioned methods to draw a shape.
<!DOCTYPE HTML>
<html>
<head>
<style>
#test {
width: 100px;
height:100px;
margin: 0px auto;
}
</style>
<script type = "text/javascript">
function drawShape() {
// get the canvas element using the DOM
var canvas = document.getElementById('mycanvas');
// Make sure we don't execute when canvas isn't supported
if (canvas.getContext) {
// use getContext to use the canvas for drawing
var ctx = canvas.getContext('2d');
// Draw shapes
ctx.beginPath();
ctx.arc(75,75,50,0,Math.PI*2,true); // Outer circle
ctx.moveTo(110,75);
ctx.arc(75,75,35,0,Math.PI,false); // Mouth
ctx.moveTo(65,65);
ctx.arc(60,65,5,0,Math.PI*2,true); // Left eye
ctx.moveTo(95,65);
ctx.arc(90,65,5,0,Math.PI*2,true); // Right eye
ctx.stroke();
} else {
alert('You need Safari or Firefox 1.5+ to see this demo.');
}
}
</script>
</head>
<body id = "test" onload = "drawShape();">
<canvas id = "mycanvas"></canvas>
</body>
</html>
It will produce the following output −
19 Lectures
2 hours
Anadi Sharma
16 Lectures
1.5 hours
Anadi Sharma
18 Lectures
1.5 hours
Frahaan Hussain
57 Lectures
5.5 hours
DigiFisk (Programming Is Fun)
54 Lectures
6 hours
DigiFisk (Programming Is Fun)
45 Lectures
5.5 hours
DigiFisk (Programming Is Fun)
Print
Add Notes
Bookmark this page | [
{
"code": null,
"e": 2671,
"s": 2608,
"text": "We require the following methods to draw paths on the canvas −"
},
{
"code": null,
"e": 2683,
"s": 2671,
"text": "beginPath()"
},
{
"code": null,
"e": 2720,
"s": 2683,
"text": "This method resets the current path."
},
{
"code": null,
"e": 2733,
"s": 2720,
"text": "moveTo(x, y)"
},
{
"code": null,
"e": 2789,
"s": 2733,
"text": "This method creates a new subpath with the given point."
},
{
"code": null,
"e": 2801,
"s": 2789,
"text": "closePath()"
},
{
"code": null,
"e": 2947,
"s": 2801,
"text": "This method marks the current subpath as closed, and starts a new subpath with a point the same as the start and end of the newly closed subpath."
},
{
"code": null,
"e": 2954,
"s": 2947,
"text": "fill()"
},
{
"code": null,
"e": 3014,
"s": 2954,
"text": "This method fills the subpaths with the current fill style."
},
{
"code": null,
"e": 3023,
"s": 3014,
"text": "stroke()"
},
{
"code": null,
"e": 3087,
"s": 3023,
"text": "This method strokes the subpaths with the current stroke style."
},
{
"code": null,
"e": 3142,
"s": 3087,
"text": "arc(x, y, radius, startAngle, endAngle, anticlockwise)"
},
{
"code": null,
"e": 3433,
"s": 3142,
"text": "Adds points to the subpath such that the arc described by the circumference of the circle described by the arguments, starting at the given start angle and ending at the given end angle, going in the given direction, is added to the path, connected to the previous point by a straight line."
},
{
"code": null,
"e": 3523,
"s": 3433,
"text": "Following is a simple example which makes use of above mentioned methods to draw a shape."
},
{
"code": null,
"e": 4899,
"s": 3523,
"text": "<!DOCTYPE HTML>\n\n<html>\n <head>\n \n <style>\n #test {\n width: 100px;\n height:100px;\n margin: 0px auto;\n }\n </style>\n \n <script type = \"text/javascript\">\n function drawShape() {\n \n // get the canvas element using the DOM\n var canvas = document.getElementById('mycanvas');\n\n // Make sure we don't execute when canvas isn't supported\n if (canvas.getContext) {\n \n // use getContext to use the canvas for drawing\n var ctx = canvas.getContext('2d');\n\n // Draw shapes\n ctx.beginPath();\n ctx.arc(75,75,50,0,Math.PI*2,true); // Outer circle\n \n ctx.moveTo(110,75);\n ctx.arc(75,75,35,0,Math.PI,false); // Mouth\n \n ctx.moveTo(65,65);\n ctx.arc(60,65,5,0,Math.PI*2,true); // Left eye\n \n ctx.moveTo(95,65);\n ctx.arc(90,65,5,0,Math.PI*2,true); // Right eye\n ctx.stroke();\n } else {\n alert('You need Safari or Firefox 1.5+ to see this demo.');\n }\n }\n </script>\n </head>\n\t\n <body id = \"test\" onload = \"drawShape();\">\n <canvas id = \"mycanvas\"></canvas>\n </body>\n\t\n</html>"
},
{
"code": null,
"e": 4938,
"s": 4899,
"text": "It will produce the following output −"
},
{
"code": null,
"e": 4971,
"s": 4938,
"text": "\n 19 Lectures \n 2 hours \n"
},
{
"code": null,
"e": 4985,
"s": 4971,
"text": " Anadi Sharma"
},
{
"code": null,
"e": 5020,
"s": 4985,
"text": "\n 16 Lectures \n 1.5 hours \n"
},
{
"code": null,
"e": 5034,
"s": 5020,
"text": " Anadi Sharma"
},
{
"code": null,
"e": 5069,
"s": 5034,
"text": "\n 18 Lectures \n 1.5 hours \n"
},
{
"code": null,
"e": 5086,
"s": 5069,
"text": " Frahaan Hussain"
},
{
"code": null,
"e": 5121,
"s": 5086,
"text": "\n 57 Lectures \n 5.5 hours \n"
},
{
"code": null,
"e": 5152,
"s": 5121,
"text": " DigiFisk (Programming Is Fun)"
},
{
"code": null,
"e": 5185,
"s": 5152,
"text": "\n 54 Lectures \n 6 hours \n"
},
{
"code": null,
"e": 5216,
"s": 5185,
"text": " DigiFisk (Programming Is Fun)"
},
{
"code": null,
"e": 5251,
"s": 5216,
"text": "\n 45 Lectures \n 5.5 hours \n"
},
{
"code": null,
"e": 5282,
"s": 5251,
"text": " DigiFisk (Programming Is Fun)"
},
{
"code": null,
"e": 5289,
"s": 5282,
"text": " Print"
},
{
"code": null,
"e": 5300,
"s": 5289,
"text": " Add Notes"
}
] |
Minimum time required to schedule K processes | 13 Jul, 2021
Given a positive integer K and an array arr[] consisting of N positive integers, such that arr[i] is the number of processes ith processor can schedule in 1 second. The task is to minimize the total time required to schedule K processes such that after scheduling by the ith processor, arr[i] is reduced to floor(arr[i]/2).
Examples:
Input: N = 5, arr[] = {3, 1, 7, 2, 4}, K = 15Output: 4Explanation:The order of scheduled process are as follows:
The 3rd process is scheduled first. The array arr[] modifies to {3, 1, 3, 2, 4}, as arr[2] = floor(arr[2] / 2) = floor(7 / 2) = 3.The 5th process is scheduled next. The array arr[] modifies to {3, 1, 3, 2, 2}.The 1st process is scheduled next. The array arr[] modifies to {1, 1, 3, 2, 2}.The 2nd process is scheduled next. The array arr[] modifies to {3, 0, 3, 2, 4}.
The 3rd process is scheduled first. The array arr[] modifies to {3, 1, 3, 2, 4}, as arr[2] = floor(arr[2] / 2) = floor(7 / 2) = 3.
The 5th process is scheduled next. The array arr[] modifies to {3, 1, 3, 2, 2}.
The 1st process is scheduled next. The array arr[] modifies to {1, 1, 3, 2, 2}.
The 2nd process is scheduled next. The array arr[] modifies to {3, 0, 3, 2, 4}.
The total processes scheduled by all the process = 7 + 4 + 3 + 1 = 15(= K) and the total time required is 4 seconds.
Input: N = 4, arr[] = {1, 5, 8, 6}, K = 10Output: 2
Naive Approach: The simplest approach to solve the given problem is to sort the given list in ascending order and choose the processor with the highest ability and reduce the value of K by that value and delete that processor from the list and add half of that in the sorted list again. Repeat the above process until at least K processes are scheduled and print the time required after scheduling at least K processes.
Time Complexity: O(N*log N)Auxiliary Space: O(N)
Efficient Approach: The above approach can also be optimized by using the concept of Hashing. Follow the below steps to solve the problem:
Initialize an auxiliary array tmp[] of the size of the maximum element present in the given array.
Initialize a variable, say count to store the minimum time to schedule all processes respectively.
Traverse the given array tmp[] from the end and perform the following steps:If the current element in tmp[] is greater than 0 and i * tmp[i] is smaller than K.Decrease the value of K by the value i * tmp[i].Increase tmp[i/2] by tmp[i] as the ability of the processor will decrease by half.Increase the value of count by the value tmp[i].If the value of K is already smaller than or equal to 0, then print the value of count as the result.If the current element in the array tmp[] is at least 0 and the value of i * tmp[i] is at least K, then perform the following steps:If K is divisible by the current index, then increment the value of count by K / i.Otherwise, increment the value of count by K/i +1.
If the current element in tmp[] is greater than 0 and i * tmp[i] is smaller than K.Decrease the value of K by the value i * tmp[i].Increase tmp[i/2] by tmp[i] as the ability of the processor will decrease by half.Increase the value of count by the value tmp[i].If the value of K is already smaller than or equal to 0, then print the value of count as the result.
Decrease the value of K by the value i * tmp[i].
Increase tmp[i/2] by tmp[i] as the ability of the processor will decrease by half.
Increase the value of count by the value tmp[i].
If the value of K is already smaller than or equal to 0, then print the value of count as the result.
If the current element in the array tmp[] is at least 0 and the value of i * tmp[i] is at least K, then perform the following steps:If K is divisible by the current index, then increment the value of count by K / i.Otherwise, increment the value of count by K/i +1.
If K is divisible by the current index, then increment the value of count by K / i.
Otherwise, increment the value of count by K/i +1.
After completing the above steps, print -1 if it is not possible to schedule all processes. Otherwise, print the count as the minimum time required.
Below is the implementation of the above approach:
C++
Java
Python3
C#
Javascript
// C++ program for the above approach#include <bits/stdc++.h>using namespace std; // Function to find minimum required// time to schedule all processint minTime(int A[], int n, int K){ // Stores max element from A[] int max_ability = A[0]; // Find the maximum element for(int i = 1; i < n; i++) { max_ability = max(max_ability, A[i]); } // Stores frequency of each element int tmp[max_ability + 1] = {0}; // Stores minimum time required // to schedule all process int count = 0; // Count frequencies of elements for(int i = 0; i < n; i++) { tmp[A[i]]++; } // Find the minimum time for(int i = max_ability; i >= 0; i--) { if (tmp[i] != 0) { if (tmp[i] * i < K) { // Decrease the value // of K K -= (i * tmp[i]); // Increment tmp[i/2] tmp[i / 2] += tmp[i]; // Increment the count count += tmp[i]; // Return count, if all // process are scheduled if (K <= 0) { return count; } } else { // Increment count if (K % i != 0) { count += (K / i) + 1; } else { count += (K / i); } // Return the count return count; } } } // If it is not possible to // schedule all process return -1;} // Driver codeint main(){ int arr[] = { 3, 1, 7, 2, 4 }; int N = 5; int K = 15; cout << minTime(arr, N, K); return 0;} // This code is contributed by mohit kumar 29
// Java program for the above approach import java.util.*;import java.lang.*; class GFG { // Function to find minimum required // time to schedule all process static int minTime(int[] A, int n, int K) { // Stores max element from A[] int max_ability = A[0]; // Find the maximum element for (int i = 1; i < n; i++) { max_ability = Math.max( max_ability, A[i]); } // Stores frequency of each element int tmp[] = new int[max_ability + 1]; // Stores minimum time required // to schedule all process int count = 0; // Count frequencies of elements for (int i = 0; i < n; i++) { tmp[A[i]]++; } // Find the minimum time for (int i = max_ability; i >= 0; i--) { if (tmp[i] != 0) { if (tmp[i] * i < K) { // Decrease the value // of K K -= (i * tmp[i]); // Increment tmp[i/2] tmp[i / 2] += tmp[i]; // Increment the count count += tmp[i]; // Return count, if all // process are scheduled if (K <= 0) { return count; } } else { // Increment count if (K % i != 0) { count += (K / i) + 1; } else { count += (K / i); } // Return the count return count; } } } // If it is not possible to // schedule all process return -1; } // Driver Code public static void main(String[] args) { int arr[] = { 3, 1, 7, 2, 4 }; int N = arr.length; int K = 15; System.out.println( minTime(arr, N, K)); }}
# Python3 program for the above approach # Function to find minimum required# time to schedule all processdef minTime(A, n, K): # Stores max element from A[] max_ability = A[0] # Find the maximum element for i in range(1, n): max_ability = max(max_ability, A[i]) # Stores frequency of each element tmp = [0 for i in range(max_ability + 1)] # Stores minimum time required # to schedule all process count = 0 # Count frequencies of elements for i in range(n): tmp[A[i]] += 1 # Find the minimum time i = max_ability while(i >= 0): if (tmp[i] != 0): if (tmp[i] * i < K): # Decrease the value # of K K -= (i * tmp[i]) # Increment tmp[i/2] tmp[i // 2] += tmp[i] # Increment the count count += tmp[i] # Return count, if all # process are scheduled if (K <= 0): return count else: # Increment count if (K % i != 0): count += (K // i) + 1 else: count += (K // i) # Return the count return count i -= 1 # If it is not possible to # schedule all process return -1 # Driver codeif __name__ == '__main__': arr = [ 3, 1, 7, 2, 4 ] N = 5 K = 15 print(minTime(arr, N, K)) # This code is contributed by SURENDRA_GANGWAR
// C# program for the above approachusing System; class GFG{ // Function to find minimum required// time to schedule all processstatic int minTime(int[] A, int n, int K){ // Stores max element from A[] int max_ability = A[0]; // Find the maximum element for(int i = 1; i < n; i++) { max_ability = Math.Max( max_ability, A[i]); } // Stores frequency of each element int []tmp = new int[max_ability + 1]; // Stores minimum time required // to schedule all process int count = 0; // Count frequencies of elements for(int i = 0; i < n; i++) { tmp[A[i]]++; } // Find the minimum time for(int i = max_ability; i >= 0; i--) { if (tmp[i] != 0) { if (tmp[i] * i < K) { // Decrease the value // of K K -= (i * tmp[i]); // Increment tmp[i/2] tmp[i / 2] += tmp[i]; // Increment the count count += tmp[i]; // Return count, if all // process are scheduled if (K <= 0) { return count; } } else { // Increment count if (K % i != 0) { count += (K / i) + 1; } else { count += (K / i); } // Return the count return count; } } } // If it is not possible to // schedule all process return -1;} // Driver Codepublic static void Main(string[] args){ int []arr = { 3, 1, 7, 2, 4 }; int N = arr.Length; int K = 15; Console.WriteLine(minTime(arr, N, K));}} // This code is contributed by ukasp
<script> // JavaScript Program to implement// the above approach // Function to find minimum required // time to schedule all process function minTime(A, n, K) { // Stores max element from A[] let max_ability = A[0]; // Find the maximum element for (let i = 1; i < n; i++) { max_ability = Math.max( max_ability, A[i]); } // Stores frequency of each element let tmp = Array.from({length: max_ability + 1}, (_, i) => 0); // Stores minimum time required // to schedule all process let count = 0; // Count frequencies of elements for (let i = 0; i < n; i++) { tmp[A[i]]++; } // Find the minimum time for (let i = max_ability; i >= 0; i--) { if (tmp[i] != 0) { if (tmp[i] * i < K) { // Decrease the value // of K K -= (i * tmp[i]); // Increment tmp[i/2] tmp[(i / 2)] += tmp[i]; // Increment the count count += tmp[i]; // Return count, if all // process are scheduled if (K <= 0) { return count; } } else { // Increment count if (K % i != 0) { count += Math.floor(K / i) + 1; } else { count += Math.floor(K / i); } // Return the count return count; } } } // If it is not possible to // schedule all process return -1; } // Driver Code let arr = [ 3, 1, 7, 2, 4 ]; let N = arr.length; let K = 15; document.write( minTime(arr, N, K)); </script>
4
Time Complexity: O(M), where M is the maximum element in the array.Auxiliary Space: O(M)
Alternative Approach(Using STL): The given problem can be solved by using the Greedy Approach with the help of max-heap. Follow the steps below to solve the problem:
Initialize a priority queue, say PQ, and insert all the elements of the given array into PQ.
Initialize a variable, say ans as 0 to store the resultant maximum diamond gained.
Iterate a loop until the priority queue PQ is not empty and the value of K > 0:Pop the top element of the priority queue and add the popped element to the variable ans.Divide the popped element by 2 and insert it into the priority queue PQ.Decrement the value of K by 1.
Pop the top element of the priority queue and add the popped element to the variable ans.
Divide the popped element by 2 and insert it into the priority queue PQ.
Decrement the value of K by 1.
After completing the above steps, print the value of ans as the result.
Below is the implementation of the above approach:
C++14
Python3
Javascript
// C++ program for the above approach#include <bits/stdc++.h>using namespace std; // Function to execute k processes that can be gained in// minimum amount of timevoid executeProcesses(int A[], int N, int K){ // Stores all the array elements priority_queue<int> pq; // Push all the elements to the // priority queue for (int i = 0; i < N; i++) { pq.push(A[i]); } // Stores the required result int ans = 0; // Loop while the queue is not // empty and K is positive while (!pq.empty() && K > 0) { // Store the top element // from the pq int top = pq.top(); // Pop it from the pq pq.pop(); // Add it to the answer ans ++; // Divide it by 2 and push it // back to the pq K = K - top; top = top / 2; pq.push(top); } // Print the answer cout << ans;} // Driver Codeint main(){ int A[] = { 3, 1, 7, 4, 2 }; int K = 15; int N = sizeof(A) / sizeof(A[0]); executeProcesses(A, N, K); return 0;}
# Python3 program for the above approach # Function to execute k processes that# can be gained in minimum amount of timedef executeProcesses(A, N, K): # Stores all the array elements pq = [] # Push all the elements to the # priority queue for i in range(N): pq.append(A[i]) # Stores the required result ans = 0 pq.sort() # Loop while the queue is not # empty and K is positive while (len(pq) > 0 and K > 0): # Store the top element # from the pq top = pq.pop() # Add it to the answer ans += 1 # Divide it by 2 and push it # back to the pq K -= top top //=2 pq.append(top) pq.sort() # Print the answer print(ans) # Driver CodeA = [ 3, 1, 7, 4, 2 ]K = 15N=len(A)executeProcesses(A, N, K) # This code is contributed by patel2127
<script> // Javascript program for the above approach // Function to execute k processes that// can be gained in minimum amount of timefunction executeProcesses(A, N, K){ // Stores all the array elements let pq = []; // Push all the elements to the // priority queue for(let i = 0; i < N; i++) { pq.push(A[i]); } // Stores the required result let ans = 0; pq.sort(function(a, b){return a - b;}); // Loop while the queue is not // empty and K is positive while (pq.length > 0 && K > 0) { // Store the top element // from the pq let top = pq.pop(); // Add it to the answer ans++; // Divide it by 2 and push it // back to the pq K -= top; top = Math.floor(top / 2); pq.push(top); pq.sort(function(a, b){return a - b;}); } // Print the answer document.write(ans)} // Driver Codelet A = [ 3, 1, 7, 4, 2 ];let K = 15;let N = A.length; executeProcesses(A, N, K); // This code is contributed by avanitrachhadiya2155 </script>
4
Time Complexity: O((N + K)*log N)
Auxiliary Space: O(N)
mohit kumar 29
ukasp
sanjoy_62
SURENDRA_GANGWAR
Ask149
avanitrachhadiya2155
patel2127
arorakashish0911
Amazon
Amazon-Question
interview-preparation
Arrays
Hash
Heap
Mathematical
Amazon
Arrays
Hash
Mathematical
Heap
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here. | [
{
"code": null,
"e": 52,
"s": 24,
"text": "\n13 Jul, 2021"
},
{
"code": null,
"e": 376,
"s": 52,
"text": "Given a positive integer K and an array arr[] consisting of N positive integers, such that arr[i] is the number of processes ith processor can schedule in 1 second. The task is to minimize the total time required to schedule K processes such that after scheduling by the ith processor, arr[i] is reduced to floor(arr[i]/2)."
},
{
"code": null,
"e": 386,
"s": 376,
"text": "Examples:"
},
{
"code": null,
"e": 499,
"s": 386,
"text": "Input: N = 5, arr[] = {3, 1, 7, 2, 4}, K = 15Output: 4Explanation:The order of scheduled process are as follows:"
},
{
"code": null,
"e": 867,
"s": 499,
"text": "The 3rd process is scheduled first. The array arr[] modifies to {3, 1, 3, 2, 4}, as arr[2] = floor(arr[2] / 2) = floor(7 / 2) = 3.The 5th process is scheduled next. The array arr[] modifies to {3, 1, 3, 2, 2}.The 1st process is scheduled next. The array arr[] modifies to {1, 1, 3, 2, 2}.The 2nd process is scheduled next. The array arr[] modifies to {3, 0, 3, 2, 4}."
},
{
"code": null,
"e": 998,
"s": 867,
"text": "The 3rd process is scheduled first. The array arr[] modifies to {3, 1, 3, 2, 4}, as arr[2] = floor(arr[2] / 2) = floor(7 / 2) = 3."
},
{
"code": null,
"e": 1078,
"s": 998,
"text": "The 5th process is scheduled next. The array arr[] modifies to {3, 1, 3, 2, 2}."
},
{
"code": null,
"e": 1158,
"s": 1078,
"text": "The 1st process is scheduled next. The array arr[] modifies to {1, 1, 3, 2, 2}."
},
{
"code": null,
"e": 1238,
"s": 1158,
"text": "The 2nd process is scheduled next. The array arr[] modifies to {3, 0, 3, 2, 4}."
},
{
"code": null,
"e": 1355,
"s": 1238,
"text": "The total processes scheduled by all the process = 7 + 4 + 3 + 1 = 15(= K) and the total time required is 4 seconds."
},
{
"code": null,
"e": 1407,
"s": 1355,
"text": "Input: N = 4, arr[] = {1, 5, 8, 6}, K = 10Output: 2"
},
{
"code": null,
"e": 1827,
"s": 1407,
"text": "Naive Approach: The simplest approach to solve the given problem is to sort the given list in ascending order and choose the processor with the highest ability and reduce the value of K by that value and delete that processor from the list and add half of that in the sorted list again. Repeat the above process until at least K processes are scheduled and print the time required after scheduling at least K processes."
},
{
"code": null,
"e": 1876,
"s": 1827,
"text": "Time Complexity: O(N*log N)Auxiliary Space: O(N)"
},
{
"code": null,
"e": 2015,
"s": 1876,
"text": "Efficient Approach: The above approach can also be optimized by using the concept of Hashing. Follow the below steps to solve the problem:"
},
{
"code": null,
"e": 2114,
"s": 2015,
"text": "Initialize an auxiliary array tmp[] of the size of the maximum element present in the given array."
},
{
"code": null,
"e": 2213,
"s": 2114,
"text": "Initialize a variable, say count to store the minimum time to schedule all processes respectively."
},
{
"code": null,
"e": 2917,
"s": 2213,
"text": "Traverse the given array tmp[] from the end and perform the following steps:If the current element in tmp[] is greater than 0 and i * tmp[i] is smaller than K.Decrease the value of K by the value i * tmp[i].Increase tmp[i/2] by tmp[i] as the ability of the processor will decrease by half.Increase the value of count by the value tmp[i].If the value of K is already smaller than or equal to 0, then print the value of count as the result.If the current element in the array tmp[] is at least 0 and the value of i * tmp[i] is at least K, then perform the following steps:If K is divisible by the current index, then increment the value of count by K / i.Otherwise, increment the value of count by K/i +1."
},
{
"code": null,
"e": 3280,
"s": 2917,
"text": "If the current element in tmp[] is greater than 0 and i * tmp[i] is smaller than K.Decrease the value of K by the value i * tmp[i].Increase tmp[i/2] by tmp[i] as the ability of the processor will decrease by half.Increase the value of count by the value tmp[i].If the value of K is already smaller than or equal to 0, then print the value of count as the result."
},
{
"code": null,
"e": 3329,
"s": 3280,
"text": "Decrease the value of K by the value i * tmp[i]."
},
{
"code": null,
"e": 3412,
"s": 3329,
"text": "Increase tmp[i/2] by tmp[i] as the ability of the processor will decrease by half."
},
{
"code": null,
"e": 3461,
"s": 3412,
"text": "Increase the value of count by the value tmp[i]."
},
{
"code": null,
"e": 3563,
"s": 3461,
"text": "If the value of K is already smaller than or equal to 0, then print the value of count as the result."
},
{
"code": null,
"e": 3829,
"s": 3563,
"text": "If the current element in the array tmp[] is at least 0 and the value of i * tmp[i] is at least K, then perform the following steps:If K is divisible by the current index, then increment the value of count by K / i.Otherwise, increment the value of count by K/i +1."
},
{
"code": null,
"e": 3913,
"s": 3829,
"text": "If K is divisible by the current index, then increment the value of count by K / i."
},
{
"code": null,
"e": 3964,
"s": 3913,
"text": "Otherwise, increment the value of count by K/i +1."
},
{
"code": null,
"e": 4113,
"s": 3964,
"text": "After completing the above steps, print -1 if it is not possible to schedule all processes. Otherwise, print the count as the minimum time required."
},
{
"code": null,
"e": 4164,
"s": 4113,
"text": "Below is the implementation of the above approach:"
},
{
"code": null,
"e": 4168,
"s": 4164,
"text": "C++"
},
{
"code": null,
"e": 4173,
"s": 4168,
"text": "Java"
},
{
"code": null,
"e": 4181,
"s": 4173,
"text": "Python3"
},
{
"code": null,
"e": 4184,
"s": 4181,
"text": "C#"
},
{
"code": null,
"e": 4195,
"s": 4184,
"text": "Javascript"
},
{
"code": "// C++ program for the above approach#include <bits/stdc++.h>using namespace std; // Function to find minimum required// time to schedule all processint minTime(int A[], int n, int K){ // Stores max element from A[] int max_ability = A[0]; // Find the maximum element for(int i = 1; i < n; i++) { max_ability = max(max_ability, A[i]); } // Stores frequency of each element int tmp[max_ability + 1] = {0}; // Stores minimum time required // to schedule all process int count = 0; // Count frequencies of elements for(int i = 0; i < n; i++) { tmp[A[i]]++; } // Find the minimum time for(int i = max_ability; i >= 0; i--) { if (tmp[i] != 0) { if (tmp[i] * i < K) { // Decrease the value // of K K -= (i * tmp[i]); // Increment tmp[i/2] tmp[i / 2] += tmp[i]; // Increment the count count += tmp[i]; // Return count, if all // process are scheduled if (K <= 0) { return count; } } else { // Increment count if (K % i != 0) { count += (K / i) + 1; } else { count += (K / i); } // Return the count return count; } } } // If it is not possible to // schedule all process return -1;} // Driver codeint main(){ int arr[] = { 3, 1, 7, 2, 4 }; int N = 5; int K = 15; cout << minTime(arr, N, K); return 0;} // This code is contributed by mohit kumar 29",
"e": 6049,
"s": 4195,
"text": null
},
{
"code": "// Java program for the above approach import java.util.*;import java.lang.*; class GFG { // Function to find minimum required // time to schedule all process static int minTime(int[] A, int n, int K) { // Stores max element from A[] int max_ability = A[0]; // Find the maximum element for (int i = 1; i < n; i++) { max_ability = Math.max( max_ability, A[i]); } // Stores frequency of each element int tmp[] = new int[max_ability + 1]; // Stores minimum time required // to schedule all process int count = 0; // Count frequencies of elements for (int i = 0; i < n; i++) { tmp[A[i]]++; } // Find the minimum time for (int i = max_ability; i >= 0; i--) { if (tmp[i] != 0) { if (tmp[i] * i < K) { // Decrease the value // of K K -= (i * tmp[i]); // Increment tmp[i/2] tmp[i / 2] += tmp[i]; // Increment the count count += tmp[i]; // Return count, if all // process are scheduled if (K <= 0) { return count; } } else { // Increment count if (K % i != 0) { count += (K / i) + 1; } else { count += (K / i); } // Return the count return count; } } } // If it is not possible to // schedule all process return -1; } // Driver Code public static void main(String[] args) { int arr[] = { 3, 1, 7, 2, 4 }; int N = arr.length; int K = 15; System.out.println( minTime(arr, N, K)); }}",
"e": 8079,
"s": 6049,
"text": null
},
{
"code": "# Python3 program for the above approach # Function to find minimum required# time to schedule all processdef minTime(A, n, K): # Stores max element from A[] max_ability = A[0] # Find the maximum element for i in range(1, n): max_ability = max(max_ability, A[i]) # Stores frequency of each element tmp = [0 for i in range(max_ability + 1)] # Stores minimum time required # to schedule all process count = 0 # Count frequencies of elements for i in range(n): tmp[A[i]] += 1 # Find the minimum time i = max_ability while(i >= 0): if (tmp[i] != 0): if (tmp[i] * i < K): # Decrease the value # of K K -= (i * tmp[i]) # Increment tmp[i/2] tmp[i // 2] += tmp[i] # Increment the count count += tmp[i] # Return count, if all # process are scheduled if (K <= 0): return count else: # Increment count if (K % i != 0): count += (K // i) + 1 else: count += (K // i) # Return the count return count i -= 1 # If it is not possible to # schedule all process return -1 # Driver codeif __name__ == '__main__': arr = [ 3, 1, 7, 2, 4 ] N = 5 K = 15 print(minTime(arr, N, K)) # This code is contributed by SURENDRA_GANGWAR",
"e": 9641,
"s": 8079,
"text": null
},
{
"code": "// C# program for the above approachusing System; class GFG{ // Function to find minimum required// time to schedule all processstatic int minTime(int[] A, int n, int K){ // Stores max element from A[] int max_ability = A[0]; // Find the maximum element for(int i = 1; i < n; i++) { max_ability = Math.Max( max_ability, A[i]); } // Stores frequency of each element int []tmp = new int[max_ability + 1]; // Stores minimum time required // to schedule all process int count = 0; // Count frequencies of elements for(int i = 0; i < n; i++) { tmp[A[i]]++; } // Find the minimum time for(int i = max_ability; i >= 0; i--) { if (tmp[i] != 0) { if (tmp[i] * i < K) { // Decrease the value // of K K -= (i * tmp[i]); // Increment tmp[i/2] tmp[i / 2] += tmp[i]; // Increment the count count += tmp[i]; // Return count, if all // process are scheduled if (K <= 0) { return count; } } else { // Increment count if (K % i != 0) { count += (K / i) + 1; } else { count += (K / i); } // Return the count return count; } } } // If it is not possible to // schedule all process return -1;} // Driver Codepublic static void Main(string[] args){ int []arr = { 3, 1, 7, 2, 4 }; int N = arr.Length; int K = 15; Console.WriteLine(minTime(arr, N, K));}} // This code is contributed by ukasp",
"e": 11530,
"s": 9641,
"text": null
},
{
"code": "<script> // JavaScript Program to implement// the above approach // Function to find minimum required // time to schedule all process function minTime(A, n, K) { // Stores max element from A[] let max_ability = A[0]; // Find the maximum element for (let i = 1; i < n; i++) { max_ability = Math.max( max_ability, A[i]); } // Stores frequency of each element let tmp = Array.from({length: max_ability + 1}, (_, i) => 0); // Stores minimum time required // to schedule all process let count = 0; // Count frequencies of elements for (let i = 0; i < n; i++) { tmp[A[i]]++; } // Find the minimum time for (let i = max_ability; i >= 0; i--) { if (tmp[i] != 0) { if (tmp[i] * i < K) { // Decrease the value // of K K -= (i * tmp[i]); // Increment tmp[i/2] tmp[(i / 2)] += tmp[i]; // Increment the count count += tmp[i]; // Return count, if all // process are scheduled if (K <= 0) { return count; } } else { // Increment count if (K % i != 0) { count += Math.floor(K / i) + 1; } else { count += Math.floor(K / i); } // Return the count return count; } } } // If it is not possible to // schedule all process return -1; } // Driver Code let arr = [ 3, 1, 7, 2, 4 ]; let N = arr.length; let K = 15; document.write( minTime(arr, N, K)); </script>",
"e": 13512,
"s": 11530,
"text": null
},
{
"code": null,
"e": 13514,
"s": 13512,
"text": "4"
},
{
"code": null,
"e": 13603,
"s": 13514,
"text": "Time Complexity: O(M), where M is the maximum element in the array.Auxiliary Space: O(M)"
},
{
"code": null,
"e": 13769,
"s": 13603,
"text": "Alternative Approach(Using STL): The given problem can be solved by using the Greedy Approach with the help of max-heap. Follow the steps below to solve the problem:"
},
{
"code": null,
"e": 13862,
"s": 13769,
"text": "Initialize a priority queue, say PQ, and insert all the elements of the given array into PQ."
},
{
"code": null,
"e": 13945,
"s": 13862,
"text": "Initialize a variable, say ans as 0 to store the resultant maximum diamond gained."
},
{
"code": null,
"e": 14216,
"s": 13945,
"text": "Iterate a loop until the priority queue PQ is not empty and the value of K > 0:Pop the top element of the priority queue and add the popped element to the variable ans.Divide the popped element by 2 and insert it into the priority queue PQ.Decrement the value of K by 1."
},
{
"code": null,
"e": 14306,
"s": 14216,
"text": "Pop the top element of the priority queue and add the popped element to the variable ans."
},
{
"code": null,
"e": 14379,
"s": 14306,
"text": "Divide the popped element by 2 and insert it into the priority queue PQ."
},
{
"code": null,
"e": 14410,
"s": 14379,
"text": "Decrement the value of K by 1."
},
{
"code": null,
"e": 14482,
"s": 14410,
"text": "After completing the above steps, print the value of ans as the result."
},
{
"code": null,
"e": 14533,
"s": 14482,
"text": "Below is the implementation of the above approach:"
},
{
"code": null,
"e": 14539,
"s": 14533,
"text": "C++14"
},
{
"code": null,
"e": 14547,
"s": 14539,
"text": "Python3"
},
{
"code": null,
"e": 14558,
"s": 14547,
"text": "Javascript"
},
{
"code": "// C++ program for the above approach#include <bits/stdc++.h>using namespace std; // Function to execute k processes that can be gained in// minimum amount of timevoid executeProcesses(int A[], int N, int K){ // Stores all the array elements priority_queue<int> pq; // Push all the elements to the // priority queue for (int i = 0; i < N; i++) { pq.push(A[i]); } // Stores the required result int ans = 0; // Loop while the queue is not // empty and K is positive while (!pq.empty() && K > 0) { // Store the top element // from the pq int top = pq.top(); // Pop it from the pq pq.pop(); // Add it to the answer ans ++; // Divide it by 2 and push it // back to the pq K = K - top; top = top / 2; pq.push(top); } // Print the answer cout << ans;} // Driver Codeint main(){ int A[] = { 3, 1, 7, 4, 2 }; int K = 15; int N = sizeof(A) / sizeof(A[0]); executeProcesses(A, N, K); return 0;}",
"e": 15599,
"s": 14558,
"text": null
},
{
"code": "# Python3 program for the above approach # Function to execute k processes that# can be gained in minimum amount of timedef executeProcesses(A, N, K): # Stores all the array elements pq = [] # Push all the elements to the # priority queue for i in range(N): pq.append(A[i]) # Stores the required result ans = 0 pq.sort() # Loop while the queue is not # empty and K is positive while (len(pq) > 0 and K > 0): # Store the top element # from the pq top = pq.pop() # Add it to the answer ans += 1 # Divide it by 2 and push it # back to the pq K -= top top //=2 pq.append(top) pq.sort() # Print the answer print(ans) # Driver CodeA = [ 3, 1, 7, 4, 2 ]K = 15N=len(A)executeProcesses(A, N, K) # This code is contributed by patel2127",
"e": 16511,
"s": 15599,
"text": null
},
{
"code": "<script> // Javascript program for the above approach // Function to execute k processes that// can be gained in minimum amount of timefunction executeProcesses(A, N, K){ // Stores all the array elements let pq = []; // Push all the elements to the // priority queue for(let i = 0; i < N; i++) { pq.push(A[i]); } // Stores the required result let ans = 0; pq.sort(function(a, b){return a - b;}); // Loop while the queue is not // empty and K is positive while (pq.length > 0 && K > 0) { // Store the top element // from the pq let top = pq.pop(); // Add it to the answer ans++; // Divide it by 2 and push it // back to the pq K -= top; top = Math.floor(top / 2); pq.push(top); pq.sort(function(a, b){return a - b;}); } // Print the answer document.write(ans)} // Driver Codelet A = [ 3, 1, 7, 4, 2 ];let K = 15;let N = A.length; executeProcesses(A, N, K); // This code is contributed by avanitrachhadiya2155 </script>",
"e": 17614,
"s": 16511,
"text": null
},
{
"code": null,
"e": 17616,
"s": 17614,
"text": "4"
},
{
"code": null,
"e": 17650,
"s": 17616,
"text": "Time Complexity: O((N + K)*log N)"
},
{
"code": null,
"e": 17672,
"s": 17650,
"text": "Auxiliary Space: O(N)"
},
{
"code": null,
"e": 17687,
"s": 17672,
"text": "mohit kumar 29"
},
{
"code": null,
"e": 17693,
"s": 17687,
"text": "ukasp"
},
{
"code": null,
"e": 17703,
"s": 17693,
"text": "sanjoy_62"
},
{
"code": null,
"e": 17720,
"s": 17703,
"text": "SURENDRA_GANGWAR"
},
{
"code": null,
"e": 17727,
"s": 17720,
"text": "Ask149"
},
{
"code": null,
"e": 17748,
"s": 17727,
"text": "avanitrachhadiya2155"
},
{
"code": null,
"e": 17758,
"s": 17748,
"text": "patel2127"
},
{
"code": null,
"e": 17775,
"s": 17758,
"text": "arorakashish0911"
},
{
"code": null,
"e": 17782,
"s": 17775,
"text": "Amazon"
},
{
"code": null,
"e": 17798,
"s": 17782,
"text": "Amazon-Question"
},
{
"code": null,
"e": 17820,
"s": 17798,
"text": "interview-preparation"
},
{
"code": null,
"e": 17827,
"s": 17820,
"text": "Arrays"
},
{
"code": null,
"e": 17832,
"s": 17827,
"text": "Hash"
},
{
"code": null,
"e": 17837,
"s": 17832,
"text": "Heap"
},
{
"code": null,
"e": 17850,
"s": 17837,
"text": "Mathematical"
},
{
"code": null,
"e": 17857,
"s": 17850,
"text": "Amazon"
},
{
"code": null,
"e": 17864,
"s": 17857,
"text": "Arrays"
},
{
"code": null,
"e": 17869,
"s": 17864,
"text": "Hash"
},
{
"code": null,
"e": 17882,
"s": 17869,
"text": "Mathematical"
},
{
"code": null,
"e": 17887,
"s": 17882,
"text": "Heap"
}
] |
Stack indexOf() method in Java with Example | 24 Dec, 2018
The Java.util.Stack.indexOf(Object element) method is used to check and find the occurrence of a particular element in the Stack. If the element is present then the index of the first occurrence of the element is returned otherwise -1 is returned if the Stack does not contain the element.
Syntax:
Stack.indexOf(Object element)
Parameters: This method accepts a mandatory parameter element of the type of Stack. It specifies the element whose occurrence is needed to be checked in the Stack.
Return Value: This method returns the index or position of the first occurrence of the element in the Stack. Else it returns -1 if the element is not present in the Stack. The returned value is of integer type.
Below programs illustrate the Java.util.Stack.indexOf() method:
Program 1:
// Java code to illustrate indexOf()import java.util.*; public class StackDemo { public static void main(String args[]) { // Creating an empty Stack Stack<String> stack = new Stack<String>(); // Use add() method to add elements in the Stack stack.add("Geeks"); stack.add("for"); stack.add("Geeks"); stack.add("10"); stack.add("20"); // Displaying the Stack System.out.println("Stack: " + stack); // The first position of an element // is returned System.out.println("The first occurrence" + " of Geeks is at index:" + stack.indexOf("Geeks")); System.out.println("The first occurrence" + " of 10 is at index: " + stack.indexOf("10")); }}
Stack: [Geeks, for, Geeks, 10, 20]
The first occurrence of Geeks is at index:0
The first occurrence of 10 is at index: 3
Program 2:
// Java code to illustrate indexOf()import java.util.*; public class StackDemo { public static void main(String args[]) { // Creating an empty Stack Stack<Integer> stack = new Stack<Integer>(); // Use add() method to add elements in the Stack stack.add(1); stack.add(2); stack.add(3); stack.add(10); stack.add(20); // Displaying the Stack System.out.println("Stack: " + stack); // The first position of an element // is returned System.out.println("The first occurrence" + " of Geeks is at index:" + stack.indexOf(2)); System.out.println("The first occurrence" + " of 10 is at index: " + stack.indexOf(20)); }}
Stack: [1, 2, 3, 10, 20]
The first occurrence of Geeks is at index:1
The first occurrence of 10 is at index: 4
Java - util package
Java-Collections
Java-Functions
Java-Stack
Java
Java
Java-Collections
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here.
Arrays in Java
Split() String method in Java with examples
Arrays.sort() in Java with examples
Reverse a string in Java
Object Oriented Programming (OOPs) Concept in Java
For-each loop in Java
How to iterate any Map in Java
Interfaces in Java
HashMap in Java with Examples
Stream In Java | [
{
"code": null,
"e": 28,
"s": 0,
"text": "\n24 Dec, 2018"
},
{
"code": null,
"e": 318,
"s": 28,
"text": "The Java.util.Stack.indexOf(Object element) method is used to check and find the occurrence of a particular element in the Stack. If the element is present then the index of the first occurrence of the element is returned otherwise -1 is returned if the Stack does not contain the element."
},
{
"code": null,
"e": 326,
"s": 318,
"text": "Syntax:"
},
{
"code": null,
"e": 356,
"s": 326,
"text": "Stack.indexOf(Object element)"
},
{
"code": null,
"e": 520,
"s": 356,
"text": "Parameters: This method accepts a mandatory parameter element of the type of Stack. It specifies the element whose occurrence is needed to be checked in the Stack."
},
{
"code": null,
"e": 731,
"s": 520,
"text": "Return Value: This method returns the index or position of the first occurrence of the element in the Stack. Else it returns -1 if the element is not present in the Stack. The returned value is of integer type."
},
{
"code": null,
"e": 795,
"s": 731,
"text": "Below programs illustrate the Java.util.Stack.indexOf() method:"
},
{
"code": null,
"e": 806,
"s": 795,
"text": "Program 1:"
},
{
"code": "// Java code to illustrate indexOf()import java.util.*; public class StackDemo { public static void main(String args[]) { // Creating an empty Stack Stack<String> stack = new Stack<String>(); // Use add() method to add elements in the Stack stack.add(\"Geeks\"); stack.add(\"for\"); stack.add(\"Geeks\"); stack.add(\"10\"); stack.add(\"20\"); // Displaying the Stack System.out.println(\"Stack: \" + stack); // The first position of an element // is returned System.out.println(\"The first occurrence\" + \" of Geeks is at index:\" + stack.indexOf(\"Geeks\")); System.out.println(\"The first occurrence\" + \" of 10 is at index: \" + stack.indexOf(\"10\")); }}",
"e": 1663,
"s": 806,
"text": null
},
{
"code": null,
"e": 1785,
"s": 1663,
"text": "Stack: [Geeks, for, Geeks, 10, 20]\nThe first occurrence of Geeks is at index:0\nThe first occurrence of 10 is at index: 3\n"
},
{
"code": null,
"e": 1796,
"s": 1785,
"text": "Program 2:"
},
{
"code": "// Java code to illustrate indexOf()import java.util.*; public class StackDemo { public static void main(String args[]) { // Creating an empty Stack Stack<Integer> stack = new Stack<Integer>(); // Use add() method to add elements in the Stack stack.add(1); stack.add(2); stack.add(3); stack.add(10); stack.add(20); // Displaying the Stack System.out.println(\"Stack: \" + stack); // The first position of an element // is returned System.out.println(\"The first occurrence\" + \" of Geeks is at index:\" + stack.indexOf(2)); System.out.println(\"The first occurrence\" + \" of 10 is at index: \" + stack.indexOf(20)); }}",
"e": 2627,
"s": 1796,
"text": null
},
{
"code": null,
"e": 2739,
"s": 2627,
"text": "Stack: [1, 2, 3, 10, 20]\nThe first occurrence of Geeks is at index:1\nThe first occurrence of 10 is at index: 4\n"
},
{
"code": null,
"e": 2759,
"s": 2739,
"text": "Java - util package"
},
{
"code": null,
"e": 2776,
"s": 2759,
"text": "Java-Collections"
},
{
"code": null,
"e": 2791,
"s": 2776,
"text": "Java-Functions"
},
{
"code": null,
"e": 2802,
"s": 2791,
"text": "Java-Stack"
},
{
"code": null,
"e": 2807,
"s": 2802,
"text": "Java"
},
{
"code": null,
"e": 2812,
"s": 2807,
"text": "Java"
},
{
"code": null,
"e": 2829,
"s": 2812,
"text": "Java-Collections"
},
{
"code": null,
"e": 2927,
"s": 2829,
"text": "Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here."
},
{
"code": null,
"e": 2942,
"s": 2927,
"text": "Arrays in Java"
},
{
"code": null,
"e": 2986,
"s": 2942,
"text": "Split() String method in Java with examples"
},
{
"code": null,
"e": 3022,
"s": 2986,
"text": "Arrays.sort() in Java with examples"
},
{
"code": null,
"e": 3047,
"s": 3022,
"text": "Reverse a string in Java"
},
{
"code": null,
"e": 3098,
"s": 3047,
"text": "Object Oriented Programming (OOPs) Concept in Java"
},
{
"code": null,
"e": 3120,
"s": 3098,
"text": "For-each loop in Java"
},
{
"code": null,
"e": 3151,
"s": 3120,
"text": "How to iterate any Map in Java"
},
{
"code": null,
"e": 3170,
"s": 3151,
"text": "Interfaces in Java"
},
{
"code": null,
"e": 3200,
"s": 3170,
"text": "HashMap in Java with Examples"
}
] |
Java – Lambda Expression Variable Capturing with Examples | 28 Jul, 2021
Variable defined by the enclosing scope of a lambda expression are accessible within the lambda expression. For example, a lambda expression can use an instance or static variable defined by its enclosing class. A lambda expression also has access to (both explicitly and implicitly), which refers to the invoking instance of the lambda expression’s enclosing class. Thus, a lambda expression can obtain or set the value of an intrinsic or static variable and call a method define by its enclosing class.
Lambda expression in java Using a local variable is as stated.
However, when a lambda expression uses a local variable from its enclosing scope, a special situation is created that is referred to as a variable capture. In this case, a lambda expression may only use local variables that are effectively final. An effectively final variable is one whose value does not change after it is first assigned. There is no need to explicitly declare such a variable as final, although doing so would not be an error.
It is important to understand that a local variable of the enclosing scope cannot be modified by the lambda expression. Doing so would remove its effectively final status, thus rendering it illegal for capture.
There are certain keypoints to be remembered, which are as follows:
Any local variable, formal parameter, or exception parameter used but not declared in a lambda expression must either be declared final or be effectively final , or a compile-time error occurs where the use is attempted.Any local variable used but not declared in a lambda body must be definitely assigned before the lambda body, or a compile-time error occurs.Similar rules on variable use apply in the body of an inner class . The restriction to effectively final variables prohibits access to dynamically-changing local variables, whose capture would likely introduce concurrency problems. Compared to the final restriction, it reduces the clerical burden on programmers.The restriction to effectively final variables includes standard loop variables, but not enhanced-for loop variables, which are treated as distinct for each iteration of the loop.
Any local variable, formal parameter, or exception parameter used but not declared in a lambda expression must either be declared final or be effectively final , or a compile-time error occurs where the use is attempted.
Any local variable used but not declared in a lambda body must be definitely assigned before the lambda body, or a compile-time error occurs.
Similar rules on variable use apply in the body of an inner class . The restriction to effectively final variables prohibits access to dynamically-changing local variables, whose capture would likely introduce concurrency problems. Compared to the final restriction, it reduces the clerical burden on programmers.
The restriction to effectively final variables includes standard loop variables, but not enhanced-for loop variables, which are treated as distinct for each iteration of the loop.
The following program illustrates the difference between effectively final and mutable local variables:
Example 1
Java
// Java Program Illustrating Difference between// Effectively final and Mutable Local Variables // Importing reqiored classesimport java.io.*;// An example of capturing a local variable from the// enclosing scope // Inrterfaceinterface MyFunction { // Method inside the interface int func(int n);} // Main classclass GFG { // Main driver method public static void main(String[] args) { // Custom local variable that can be captured int number = 10; MyFunction myLambda = (n) -> { // This use of number is OK It does not modify // num int value = number + n; // However, the following is illegal because it // attempts to modify the value of number // number++; return value; }; // The following line would also cause an error, // because it would remove the effectively final // status from num. number = 9; System.out.println("GFG!"); }}
GFG!
Output explanation:
As the comments indicate, number is effectively final and can, therefore, be used inside myLambda. However, if number were to be modified, either inside the lambda or outside of it, number would lose its effective final status. This would cause an error, and the program would not compile.
Example 2
Java
// Java Program Illustrating Difference between// Effectively final and Mutable Local Variables // Importing input output classesimport java.io.*; // Interfaceinterface MyInterface { // Method inside the interface void myFunction();} // Main classclass GFG { // Custom initialization int data = 170; // Main driver method public static void main(String[] args) { // Creating object of this class // inside the main() method GFG gfg = new GFG(); // Creating object of interface // inside the main() method MyInterface intFace = () -> { System.out.println("Data : " + gfg.data); gfg.data += 500; System.out.println("Data : " + gfg.data); }; intFace.myFunction(); gfg.data += 200; System.out.println("Data : " + gfg.data); }}
Data : 170
Data : 670
Data : 870
Note: It is important to emphasize that a lambda expression can use and modify an instance variable from its invoking class. It just can’t use a local variable of its enclosing scope unless that variable is effectively final.
java-lambda
Picked
Java
Java
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here.
Object Oriented Programming (OOPs) Concept in Java
How to iterate any Map in Java
Interfaces in Java
HashMap in Java with Examples
ArrayList in Java
Collections in Java
Stream In Java
Multidimensional Arrays in Java
Singleton Class in Java
Set in Java | [
{
"code": null,
"e": 52,
"s": 24,
"text": "\n28 Jul, 2021"
},
{
"code": null,
"e": 559,
"s": 52,
"text": "Variable defined by the enclosing scope of a lambda expression are accessible within the lambda expression. For example, a lambda expression can use an instance or static variable defined by its enclosing class. A lambda expression also has access to (both explicitly and implicitly), which refers to the invoking instance of the lambda expression’s enclosing class. Thus, a lambda expression can obtain or set the value of an intrinsic or static variable and call a method define by its enclosing class."
},
{
"code": null,
"e": 623,
"s": 559,
"text": " Lambda expression in java Using a local variable is as stated."
},
{
"code": null,
"e": 1069,
"s": 623,
"text": "However, when a lambda expression uses a local variable from its enclosing scope, a special situation is created that is referred to as a variable capture. In this case, a lambda expression may only use local variables that are effectively final. An effectively final variable is one whose value does not change after it is first assigned. There is no need to explicitly declare such a variable as final, although doing so would not be an error."
},
{
"code": null,
"e": 1280,
"s": 1069,
"text": "It is important to understand that a local variable of the enclosing scope cannot be modified by the lambda expression. Doing so would remove its effectively final status, thus rendering it illegal for capture."
},
{
"code": null,
"e": 1349,
"s": 1280,
"text": "There are certain keypoints to be remembered, which are as follows: "
},
{
"code": null,
"e": 2203,
"s": 1349,
"text": "Any local variable, formal parameter, or exception parameter used but not declared in a lambda expression must either be declared final or be effectively final , or a compile-time error occurs where the use is attempted.Any local variable used but not declared in a lambda body must be definitely assigned before the lambda body, or a compile-time error occurs.Similar rules on variable use apply in the body of an inner class . The restriction to effectively final variables prohibits access to dynamically-changing local variables, whose capture would likely introduce concurrency problems. Compared to the final restriction, it reduces the clerical burden on programmers.The restriction to effectively final variables includes standard loop variables, but not enhanced-for loop variables, which are treated as distinct for each iteration of the loop."
},
{
"code": null,
"e": 2424,
"s": 2203,
"text": "Any local variable, formal parameter, or exception parameter used but not declared in a lambda expression must either be declared final or be effectively final , or a compile-time error occurs where the use is attempted."
},
{
"code": null,
"e": 2566,
"s": 2424,
"text": "Any local variable used but not declared in a lambda body must be definitely assigned before the lambda body, or a compile-time error occurs."
},
{
"code": null,
"e": 2880,
"s": 2566,
"text": "Similar rules on variable use apply in the body of an inner class . The restriction to effectively final variables prohibits access to dynamically-changing local variables, whose capture would likely introduce concurrency problems. Compared to the final restriction, it reduces the clerical burden on programmers."
},
{
"code": null,
"e": 3060,
"s": 2880,
"text": "The restriction to effectively final variables includes standard loop variables, but not enhanced-for loop variables, which are treated as distinct for each iteration of the loop."
},
{
"code": null,
"e": 3164,
"s": 3060,
"text": "The following program illustrates the difference between effectively final and mutable local variables:"
},
{
"code": null,
"e": 3174,
"s": 3164,
"text": "Example 1"
},
{
"code": null,
"e": 3179,
"s": 3174,
"text": "Java"
},
{
"code": "// Java Program Illustrating Difference between// Effectively final and Mutable Local Variables // Importing reqiored classesimport java.io.*;// An example of capturing a local variable from the// enclosing scope // Inrterfaceinterface MyFunction { // Method inside the interface int func(int n);} // Main classclass GFG { // Main driver method public static void main(String[] args) { // Custom local variable that can be captured int number = 10; MyFunction myLambda = (n) -> { // This use of number is OK It does not modify // num int value = number + n; // However, the following is illegal because it // attempts to modify the value of number // number++; return value; }; // The following line would also cause an error, // because it would remove the effectively final // status from num. number = 9; System.out.println(\"GFG!\"); }}",
"e": 4197,
"s": 3179,
"text": null
},
{
"code": null,
"e": 4202,
"s": 4197,
"text": "GFG!"
},
{
"code": null,
"e": 4223,
"s": 4202,
"text": "Output explanation: "
},
{
"code": null,
"e": 4514,
"s": 4223,
"text": "As the comments indicate, number is effectively final and can, therefore, be used inside myLambda. However, if number were to be modified, either inside the lambda or outside of it, number would lose its effective final status. This would cause an error, and the program would not compile. "
},
{
"code": null,
"e": 4524,
"s": 4514,
"text": "Example 2"
},
{
"code": null,
"e": 4529,
"s": 4524,
"text": "Java"
},
{
"code": "// Java Program Illustrating Difference between// Effectively final and Mutable Local Variables // Importing input output classesimport java.io.*; // Interfaceinterface MyInterface { // Method inside the interface void myFunction();} // Main classclass GFG { // Custom initialization int data = 170; // Main driver method public static void main(String[] args) { // Creating object of this class // inside the main() method GFG gfg = new GFG(); // Creating object of interface // inside the main() method MyInterface intFace = () -> { System.out.println(\"Data : \" + gfg.data); gfg.data += 500; System.out.println(\"Data : \" + gfg.data); }; intFace.myFunction(); gfg.data += 200; System.out.println(\"Data : \" + gfg.data); }}",
"e": 5405,
"s": 4529,
"text": null
},
{
"code": null,
"e": 5438,
"s": 5405,
"text": "Data : 170\nData : 670\nData : 870"
},
{
"code": null,
"e": 5664,
"s": 5438,
"text": "Note: It is important to emphasize that a lambda expression can use and modify an instance variable from its invoking class. It just can’t use a local variable of its enclosing scope unless that variable is effectively final."
},
{
"code": null,
"e": 5676,
"s": 5664,
"text": "java-lambda"
},
{
"code": null,
"e": 5683,
"s": 5676,
"text": "Picked"
},
{
"code": null,
"e": 5688,
"s": 5683,
"text": "Java"
},
{
"code": null,
"e": 5693,
"s": 5688,
"text": "Java"
},
{
"code": null,
"e": 5791,
"s": 5693,
"text": "Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here."
},
{
"code": null,
"e": 5842,
"s": 5791,
"text": "Object Oriented Programming (OOPs) Concept in Java"
},
{
"code": null,
"e": 5873,
"s": 5842,
"text": "How to iterate any Map in Java"
},
{
"code": null,
"e": 5892,
"s": 5873,
"text": "Interfaces in Java"
},
{
"code": null,
"e": 5922,
"s": 5892,
"text": "HashMap in Java with Examples"
},
{
"code": null,
"e": 5940,
"s": 5922,
"text": "ArrayList in Java"
},
{
"code": null,
"e": 5960,
"s": 5940,
"text": "Collections in Java"
},
{
"code": null,
"e": 5975,
"s": 5960,
"text": "Stream In Java"
},
{
"code": null,
"e": 6007,
"s": 5975,
"text": "Multidimensional Arrays in Java"
},
{
"code": null,
"e": 6031,
"s": 6007,
"text": "Singleton Class in Java"
}
] |
Get parent of current directory using Python | 13 Jul, 2021
In Python, OS module is used to interact with the operating system. It comes under Python’s standard utility modules. This module provides a portable way of using operating system dependent functionality. The *os* and *os.path* modules include many functions to interact with the file system. OS module provides various ways for getting the parent directory. Some of the ways are:
Using os.path.abspath()
Using os.path.dirname()
Using os.path.relpath() and os.path.dirname()
os.path.abspath() can be used to get the parent directory. This method is used to get the normalized version of the path. This function also needs the help of os.path.join() and os.pardir(). os.path.join() method in Python join one or more path components intelligently. This method concatenates various path components with exactly one directory separator (‘/’) following each non-empty part except the last path component. If the last path component to be joined is empty then a directory separator (‘/’) is put at the end.
Syntax: os.path.abspath(path)Parameters: path: A path-like object representing a file system path.Return Type: Returns a string that is a normalized version of the path.
Example:
Python3
# Python program to get parent# directory import os # get current directorypath = os.getcwd()print("Current Directory", path) # prints parent directoryprint(os.path.abspath(os.path.join(path, os.pardir)))
Output:
os.path.dirname() method in Python is used to get the directory name from the specified path.
Syntax: os.path.dirname(path)Parameter: path: A path-like object representing a file system path.Return Type: This method returns a string value which represents the directory name from the specified path.
Example:
Python3
# Python program to get parent# directory import os # get current directorypath = os.getcwd()print("Current Directory", path)print() # parent directoryparent = os.path.dirname(path)print("Parent directory", parent)
Output:
In the above examples, getting the parent directory was limited to one level, i.e. we were only able to get the parent of current directory upto one level only. Suppose we want to find the parent to the parent directory, then the above code fails. This can be achieved by using os.path.relpath() and os.path.dirname() together. os.path.relpath() method in Python is used to get a relative filepath to the given path either from the current working directory or from the given directory.
Syntax: os.path.relpath(path, start = os.curdir)Parameter: path: A path-like object representing the file system path. start (optional): A path-like object representing the file system path. The relative path for given path will be computed with respect to the directory indicated by start. The default value of this parameter is os.curdir which is a constant string used by the operating system to refer to the current directory.A path-like object is either a string or bytes object representing a path.Return Type: This method returns a string value which represents the relative file path to given path from the start directory.0222
Example:To get the parent directory according to levels specified by the user, we will create a function getParent() which will take path and levels as arguments. Inside the function, a for loop will iterate level+1 numbers of time and os.path.dirname() will be called inside the for loop. Calling this function inside the for loop will give us the starting point from which os.path.relpath() will give the relative file path.Below is the implementation.
Python3
# Python program to get the# parent directory import os.path # function to get parentdef getParent(path, levels = 1): common = path # Using for loop for getting # starting point required for # os.path.relpath() for i in range(levels + 1): # Starting point common = os.path.dirname(common) # Parent directory upto specified # level return os.path.relpath(path, common) path = 'D:/Pycharm projects / GeeksforGeeks / Nikhil / gfg.txt'print(getParent(path, 2))
Output:
kunalmalvi18
anikakapoor
Python file-handling-programs
Python os-module-programs
Python OS-path-module
python-file-handling
Python
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here.
Python Dictionary
Different ways to create Pandas Dataframe
Enumerate() in Python
Read a file line by line in Python
How to Install PIP on Windows ?
Python String | replace()
*args and **kwargs in Python
Python Classes and Objects
Python OOPs Concepts
Iterate over a list in Python | [
{
"code": null,
"e": 28,
"s": 0,
"text": "\n13 Jul, 2021"
},
{
"code": null,
"e": 411,
"s": 28,
"text": "In Python, OS module is used to interact with the operating system. It comes under Python’s standard utility modules. This module provides a portable way of using operating system dependent functionality. The *os* and *os.path* modules include many functions to interact with the file system. OS module provides various ways for getting the parent directory. Some of the ways are: "
},
{
"code": null,
"e": 437,
"s": 411,
"text": "Using os.path.abspath() "
},
{
"code": null,
"e": 463,
"s": 437,
"text": "Using os.path.dirname() "
},
{
"code": null,
"e": 511,
"s": 463,
"text": "Using os.path.relpath() and os.path.dirname() "
},
{
"code": null,
"e": 1041,
"s": 513,
"text": "os.path.abspath() can be used to get the parent directory. This method is used to get the normalized version of the path. This function also needs the help of os.path.join() and os.pardir(). os.path.join() method in Python join one or more path components intelligently. This method concatenates various path components with exactly one directory separator (‘/’) following each non-empty part except the last path component. If the last path component to be joined is empty then a directory separator (‘/’) is put at the end. "
},
{
"code": null,
"e": 1213,
"s": 1041,
"text": "Syntax: os.path.abspath(path)Parameters: path: A path-like object representing a file system path.Return Type: Returns a string that is a normalized version of the path. "
},
{
"code": null,
"e": 1223,
"s": 1213,
"text": "Example: "
},
{
"code": null,
"e": 1231,
"s": 1223,
"text": "Python3"
},
{
"code": "# Python program to get parent# directory import os # get current directorypath = os.getcwd()print(\"Current Directory\", path) # prints parent directoryprint(os.path.abspath(os.path.join(path, os.pardir)))",
"e": 1437,
"s": 1231,
"text": null
},
{
"code": null,
"e": 1447,
"s": 1437,
"text": "Output: "
},
{
"code": null,
"e": 1544,
"s": 1449,
"text": "os.path.dirname() method in Python is used to get the directory name from the specified path. "
},
{
"code": null,
"e": 1752,
"s": 1544,
"text": "Syntax: os.path.dirname(path)Parameter: path: A path-like object representing a file system path.Return Type: This method returns a string value which represents the directory name from the specified path. "
},
{
"code": null,
"e": 1763,
"s": 1752,
"text": "Example: "
},
{
"code": null,
"e": 1771,
"s": 1763,
"text": "Python3"
},
{
"code": "# Python program to get parent# directory import os # get current directorypath = os.getcwd()print(\"Current Directory\", path)print() # parent directoryparent = os.path.dirname(path)print(\"Parent directory\", parent)",
"e": 1987,
"s": 1771,
"text": null
},
{
"code": null,
"e": 1996,
"s": 1987,
"text": "Output: "
},
{
"code": null,
"e": 2486,
"s": 1998,
"text": "In the above examples, getting the parent directory was limited to one level, i.e. we were only able to get the parent of current directory upto one level only. Suppose we want to find the parent to the parent directory, then the above code fails. This can be achieved by using os.path.relpath() and os.path.dirname() together. os.path.relpath() method in Python is used to get a relative filepath to the given path either from the current working directory or from the given directory. "
},
{
"code": null,
"e": 3124,
"s": 2486,
"text": "Syntax: os.path.relpath(path, start = os.curdir)Parameter: path: A path-like object representing the file system path. start (optional): A path-like object representing the file system path. The relative path for given path will be computed with respect to the directory indicated by start. The default value of this parameter is os.curdir which is a constant string used by the operating system to refer to the current directory.A path-like object is either a string or bytes object representing a path.Return Type: This method returns a string value which represents the relative file path to given path from the start directory.0222 "
},
{
"code": null,
"e": 3580,
"s": 3124,
"text": "Example:To get the parent directory according to levels specified by the user, we will create a function getParent() which will take path and levels as arguments. Inside the function, a for loop will iterate level+1 numbers of time and os.path.dirname() will be called inside the for loop. Calling this function inside the for loop will give us the starting point from which os.path.relpath() will give the relative file path.Below is the implementation. "
},
{
"code": null,
"e": 3588,
"s": 3580,
"text": "Python3"
},
{
"code": "# Python program to get the# parent directory import os.path # function to get parentdef getParent(path, levels = 1): common = path # Using for loop for getting # starting point required for # os.path.relpath() for i in range(levels + 1): # Starting point common = os.path.dirname(common) # Parent directory upto specified # level return os.path.relpath(path, common) path = 'D:/Pycharm projects / GeeksforGeeks / Nikhil / gfg.txt'print(getParent(path, 2))",
"e": 4087,
"s": 3588,
"text": null
},
{
"code": null,
"e": 4096,
"s": 4087,
"text": "Output: "
},
{
"code": null,
"e": 4111,
"s": 4098,
"text": "kunalmalvi18"
},
{
"code": null,
"e": 4123,
"s": 4111,
"text": "anikakapoor"
},
{
"code": null,
"e": 4153,
"s": 4123,
"text": "Python file-handling-programs"
},
{
"code": null,
"e": 4179,
"s": 4153,
"text": "Python os-module-programs"
},
{
"code": null,
"e": 4201,
"s": 4179,
"text": "Python OS-path-module"
},
{
"code": null,
"e": 4222,
"s": 4201,
"text": "python-file-handling"
},
{
"code": null,
"e": 4229,
"s": 4222,
"text": "Python"
},
{
"code": null,
"e": 4327,
"s": 4229,
"text": "Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here."
},
{
"code": null,
"e": 4345,
"s": 4327,
"text": "Python Dictionary"
},
{
"code": null,
"e": 4387,
"s": 4345,
"text": "Different ways to create Pandas Dataframe"
},
{
"code": null,
"e": 4409,
"s": 4387,
"text": "Enumerate() in Python"
},
{
"code": null,
"e": 4444,
"s": 4409,
"text": "Read a file line by line in Python"
},
{
"code": null,
"e": 4476,
"s": 4444,
"text": "How to Install PIP on Windows ?"
},
{
"code": null,
"e": 4502,
"s": 4476,
"text": "Python String | replace()"
},
{
"code": null,
"e": 4531,
"s": 4502,
"text": "*args and **kwargs in Python"
},
{
"code": null,
"e": 4558,
"s": 4531,
"text": "Python Classes and Objects"
},
{
"code": null,
"e": 4579,
"s": 4558,
"text": "Python OOPs Concepts"
}
] |
ReactJS | State in React | 08 Oct, 2021
In the previous article on ReactJS | Components, we got to know that React Components can be broadly classified into Functional and Class Components. It is also seen that Functional Components are faster and much simpler than Class Components. The primary difference between the two is the availability of the State.
What is State?
The state is an instance of React Component Class can be defined as an object of a set of observable properties that control the behavior of the component. In other words, the State of a component is an object that holds some information that may change over the lifetime of the component. For example, let us think of the clock that we created in this article, we were calling the render() method every second explicitly, but React provides a better way to achieve the same result and that is by using State, storing the value of time as a member of the component’s state. We will look into this more elaborately later in the article.
Difference of Props and State.
We have already learned about Props and we got to know that Props are also objects that hold information to control the behavior of that particular component, sounds familiar to State indeed but props and states are nowhere near be same. Let us differentiate the two.
Props are immutable i.e. once set the props cannot be changed, while State is an observable object that is to be used to hold data that may change over time and to control the behavior after each change.
States can be used in Class Components, Functional components with the use of React Hooks (useState and other methods) while Props don’t have this limitation.
While Props are set by the parent component, State is generally updated by event handlers. For example, let us consider the toggle the theme of the GeeksforGeeks {IDE} page. It can be implemented using State where the probable values of the State can be either light or dark and upon selection, the IDE changes its color.
Now we have learned the basics of State and are able to differentiate it from Props. We have also seen a few places where we can use State now all that is left is to know about the basic conventions of using the React State before implementing one for ourselves.Conventions of Using State in React:
State of a component should prevail throughout the lifetime, thus we must first have some initial state, to do so we should define the State in the constructor of the component’s class. To define a state of any Class we can use the sample format below.javascriptjavascriptClass MyClass extends React.Component{ constructor(props) { super(props); this.state = { attribute : "value" }; }}
javascript
Class MyClass extends React.Component{ constructor(props) { super(props); this.state = { attribute : "value" }; }}
State should never be updated explicitly. React uses an observable object as the state that observes what changes are made to the state and helps the component behave accordingly. For example, if we update the state of any component like the following the webpage will not re-render itself because React State will not be able to detect the changes made.
this.state.attribute = "new-value";
Thus, React provides its own method setState(). setState() method takes a single parameter and expects an object which should contain the set of values to be updated. Once the update is done the method implicitly calls the render() method to repaint the page. Hence, the correct method of updating the value of a state will be similar to the code below.
this.setState({attribute: "new-value"});
The only time we are allowed to define the state explicitly is in the constructor to provide the initial state.
React is highly efficient and thus uses asynchronous state updates i.e. React may update multiple setState() updates in a single go. Thus using the value of the current state may not always generate the desired result. For example, let us take a case where we must keep a count (Likes of a Post). Many developers may miswrite the code as below.
this.setState({counter: this.state.count + this.props.diff});
Now due to asynchronous processing, this.state.count may produce an undesirable result. A more appropriate approach would be to use the following.
this.setState((prevState, props) => ({
counter: prevState.count + props.diff
}));
IN the above code we are using the ES6 thick arrow function format to take the previous state and props of the component as parameters and are updating the counter. The same can be written using the default functional way as follows.
this.setState(function(prevState, props){
return {counter: prevState.count + props.diff};
});
State updates are independent. The state object of a component may contain multiple attributes and React allows to use setState() function to update only a subset of those attributes as well as using multiple setState() methods to update each attribute value independently. For example, let us take the following component state into account.
this.state = {
darkTheme: False,
searchTerm: ''
};
The above definition has two attributes we can use a single setState() method to update both together, or we can use separate setState() methods to update the attributes independently. React internally merges setState() methods or updates only those attributes which are needed.
After going through the article we should have a clear concept of State in React, but other than the constructor and render methods can we add user-defined functions as well? Yes, we can also create user-defined functions inside a class but how to call them? React provides a few special methods that are called at some proper context that solves this problem. We will see these special functions in the next article on the Lifecycle of a component.
For the implementation of the state go to the given link below:
Implementing state & Lifecycle.
YashRajBothra
ManasChhabra2
nidhi_biet
shubhamyadav4
react-js
Web Technologies
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here.
Installation of Node.js on Linux
Top 10 Projects For Beginners To Practice HTML and CSS Skills
Difference between var, let and const keywords in JavaScript
How to insert spaces/tabs in text using HTML/CSS?
How to fetch data from an API in ReactJS ?
Remove elements from a JavaScript Array
REST API (Introduction)
Node.js fs.readFileSync() Method
How to set the default value for an HTML <select> element ?
How to create footer to stay at the bottom of a Web page? | [
{
"code": null,
"e": 52,
"s": 24,
"text": "\n08 Oct, 2021"
},
{
"code": null,
"e": 370,
"s": 52,
"text": "In the previous article on ReactJS | Components, we got to know that React Components can be broadly classified into Functional and Class Components. It is also seen that Functional Components are faster and much simpler than Class Components. The primary difference between the two is the availability of the State. "
},
{
"code": null,
"e": 385,
"s": 370,
"text": "What is State?"
},
{
"code": null,
"e": 1022,
"s": 385,
"text": "The state is an instance of React Component Class can be defined as an object of a set of observable properties that control the behavior of the component. In other words, the State of a component is an object that holds some information that may change over the lifetime of the component. For example, let us think of the clock that we created in this article, we were calling the render() method every second explicitly, but React provides a better way to achieve the same result and that is by using State, storing the value of time as a member of the component’s state. We will look into this more elaborately later in the article. "
},
{
"code": null,
"e": 1053,
"s": 1022,
"text": "Difference of Props and State."
},
{
"code": null,
"e": 1322,
"s": 1053,
"text": "We have already learned about Props and we got to know that Props are also objects that hold information to control the behavior of that particular component, sounds familiar to State indeed but props and states are nowhere near be same. Let us differentiate the two. "
},
{
"code": null,
"e": 1526,
"s": 1322,
"text": "Props are immutable i.e. once set the props cannot be changed, while State is an observable object that is to be used to hold data that may change over time and to control the behavior after each change."
},
{
"code": null,
"e": 1685,
"s": 1526,
"text": "States can be used in Class Components, Functional components with the use of React Hooks (useState and other methods) while Props don’t have this limitation."
},
{
"code": null,
"e": 2007,
"s": 1685,
"text": "While Props are set by the parent component, State is generally updated by event handlers. For example, let us consider the toggle the theme of the GeeksforGeeks {IDE} page. It can be implemented using State where the probable values of the State can be either light or dark and upon selection, the IDE changes its color."
},
{
"code": null,
"e": 2308,
"s": 2007,
"text": "Now we have learned the basics of State and are able to differentiate it from Props. We have also seen a few places where we can use State now all that is left is to know about the basic conventions of using the React State before implementing one for ourselves.Conventions of Using State in React: "
},
{
"code": null,
"e": 2718,
"s": 2308,
"text": "State of a component should prevail throughout the lifetime, thus we must first have some initial state, to do so we should define the State in the constructor of the component’s class. To define a state of any Class we can use the sample format below.javascriptjavascriptClass MyClass extends React.Component{ constructor(props) { super(props); this.state = { attribute : \"value\" }; }}"
},
{
"code": null,
"e": 2729,
"s": 2718,
"text": "javascript"
},
{
"code": "Class MyClass extends React.Component{ constructor(props) { super(props); this.state = { attribute : \"value\" }; }}",
"e": 2867,
"s": 2729,
"text": null
},
{
"code": null,
"e": 3224,
"s": 2867,
"text": "State should never be updated explicitly. React uses an observable object as the state that observes what changes are made to the state and helps the component behave accordingly. For example, if we update the state of any component like the following the webpage will not re-render itself because React State will not be able to detect the changes made. "
},
{
"code": null,
"e": 3260,
"s": 3224,
"text": "this.state.attribute = \"new-value\";"
},
{
"code": null,
"e": 3616,
"s": 3260,
"text": "Thus, React provides its own method setState(). setState() method takes a single parameter and expects an object which should contain the set of values to be updated. Once the update is done the method implicitly calls the render() method to repaint the page. Hence, the correct method of updating the value of a state will be similar to the code below. "
},
{
"code": null,
"e": 3657,
"s": 3616,
"text": "this.setState({attribute: \"new-value\"});"
},
{
"code": null,
"e": 3771,
"s": 3657,
"text": "The only time we are allowed to define the state explicitly is in the constructor to provide the initial state. "
},
{
"code": null,
"e": 4118,
"s": 3771,
"text": "React is highly efficient and thus uses asynchronous state updates i.e. React may update multiple setState() updates in a single go. Thus using the value of the current state may not always generate the desired result. For example, let us take a case where we must keep a count (Likes of a Post). Many developers may miswrite the code as below. "
},
{
"code": null,
"e": 4180,
"s": 4118,
"text": "this.setState({counter: this.state.count + this.props.diff});"
},
{
"code": null,
"e": 4329,
"s": 4180,
"text": "Now due to asynchronous processing, this.state.count may produce an undesirable result. A more appropriate approach would be to use the following. "
},
{
"code": null,
"e": 4413,
"s": 4329,
"text": "this.setState((prevState, props) => ({\n counter: prevState.count + props.diff\n}));"
},
{
"code": null,
"e": 4649,
"s": 4413,
"text": "IN the above code we are using the ES6 thick arrow function format to take the previous state and props of the component as parameters and are updating the counter. The same can be written using the default functional way as follows. "
},
{
"code": null,
"e": 4747,
"s": 4649,
"text": "this.setState(function(prevState, props){\n return {counter: prevState.count + props.diff};\n});"
},
{
"code": null,
"e": 5092,
"s": 4747,
"text": "State updates are independent. The state object of a component may contain multiple attributes and React allows to use setState() function to update only a subset of those attributes as well as using multiple setState() methods to update each attribute value independently. For example, let us take the following component state into account. "
},
{
"code": null,
"e": 5143,
"s": 5092,
"text": "this.state = {\ndarkTheme: False,\nsearchTerm: ''\n};"
},
{
"code": null,
"e": 5424,
"s": 5143,
"text": "The above definition has two attributes we can use a single setState() method to update both together, or we can use separate setState() methods to update the attributes independently. React internally merges setState() methods or updates only those attributes which are needed. "
},
{
"code": null,
"e": 5874,
"s": 5424,
"text": "After going through the article we should have a clear concept of State in React, but other than the constructor and render methods can we add user-defined functions as well? Yes, we can also create user-defined functions inside a class but how to call them? React provides a few special methods that are called at some proper context that solves this problem. We will see these special functions in the next article on the Lifecycle of a component."
},
{
"code": null,
"e": 5938,
"s": 5874,
"text": "For the implementation of the state go to the given link below:"
},
{
"code": null,
"e": 5971,
"s": 5938,
"text": "Implementing state & Lifecycle. "
},
{
"code": null,
"e": 5985,
"s": 5971,
"text": "YashRajBothra"
},
{
"code": null,
"e": 5999,
"s": 5985,
"text": "ManasChhabra2"
},
{
"code": null,
"e": 6010,
"s": 5999,
"text": "nidhi_biet"
},
{
"code": null,
"e": 6024,
"s": 6010,
"text": "shubhamyadav4"
},
{
"code": null,
"e": 6033,
"s": 6024,
"text": "react-js"
},
{
"code": null,
"e": 6050,
"s": 6033,
"text": "Web Technologies"
},
{
"code": null,
"e": 6148,
"s": 6050,
"text": "Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here."
},
{
"code": null,
"e": 6181,
"s": 6148,
"text": "Installation of Node.js on Linux"
},
{
"code": null,
"e": 6243,
"s": 6181,
"text": "Top 10 Projects For Beginners To Practice HTML and CSS Skills"
},
{
"code": null,
"e": 6304,
"s": 6243,
"text": "Difference between var, let and const keywords in JavaScript"
},
{
"code": null,
"e": 6354,
"s": 6304,
"text": "How to insert spaces/tabs in text using HTML/CSS?"
},
{
"code": null,
"e": 6397,
"s": 6354,
"text": "How to fetch data from an API in ReactJS ?"
},
{
"code": null,
"e": 6437,
"s": 6397,
"text": "Remove elements from a JavaScript Array"
},
{
"code": null,
"e": 6461,
"s": 6437,
"text": "REST API (Introduction)"
},
{
"code": null,
"e": 6494,
"s": 6461,
"text": "Node.js fs.readFileSync() Method"
},
{
"code": null,
"e": 6554,
"s": 6494,
"text": "How to set the default value for an HTML <select> element ?"
}
] |
Kotlin Regex Patterns | 07 Feb, 2022
Regular expression is used to search for text and more advanced text manipulation. Regular Expressions are a fundamental part of almost every programming language and Kotlin is no exception to it. In Kotlin, the support for regular expression is provided through Regex class. An object of this class represents a regular expression, that can be used for string matching purposes.We can easily find use of regular expressions in different kind of software, from simplest to incredibly complex applications.Kotlin regular expression In Kotlin, we build regular expressions with the Regex.
Regex("pen")
"pen".toRegex()
Regex.fromLiteral("pen")
A pattern defines the text we need to search for or manipulate. It consists of text literals and metacharacters. Metacharacters are special characters that control the evaluation of the regular expression. For example, with \s we search for white spaces.In Kotlin, some of the regex patterns are given in the table below.
Note :- First we have create a pattern, then we can use one of the functions to apply to the pattern on a text string. The functions include find(), findall(), replace(), and split().
It returns the first match of a regular expression in the input, starting at the specified start index. In Kotlin, the default start index is 0.Kotlin program of using find method –
Kotlin
fun main(args : Array<String>) { val company = "GeeksforGeeks : A computer science portal for students" val pattern = "science".toRegex() val found = pattern.find(company) val m = found?.value val index = found?.range println("$m found at indexes: $index") }
Output:
science found at indexes: 27..33
It returns a sequence of all the occurrences of a regular expression within the given input string.Kotlin program of using findAll method –
Kotlin
fun main(args : Array<String>) { val company = "GeeksforGeeks" val pattern = "Geeks".toRegex() val patt = pattern.findAll(company) patt.forEach { f -> val m = f.value val index = f.range println("$m indexes are: $index") }}
Output:
Geeks indexes are: 0..4
Geeks indexes are: 8..1
The dot (.) metacharacter denotes any of the single characters in the text. Kotlin program –
Kotlin
fun main(args : Array<String>) { val names = listOf("GeeksforGeeks", "GeekyAnts", "McGeek") val pattern = "..Geek".toRegex() names.forEach { name -> if (pattern.containsMatchIn(name)) { println("$name matches") } }}
Output:
GeeksforGeeks matches
McGeek matches
nidhi_biet
ayushpandey3july
Kotlin Regex
Kotlin
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here.
How to Add Views Dynamically and Store Data in Arraylist in Android?
Android RecyclerView in Kotlin
How to Communicate Between Fragments in Android?
Retrofit with Kotlin Coroutine in Android
Kotlin constructor
Kotlin Setters and Getters
How to Add and Customize Back Button of Action Bar in Android?
Suspend Function In Kotlin Coroutines
How to Change the Color of Status Bar in an Android App?
Kotlin when expression | [
{
"code": null,
"e": 52,
"s": 24,
"text": "\n07 Feb, 2022"
},
{
"code": null,
"e": 641,
"s": 52,
"text": "Regular expression is used to search for text and more advanced text manipulation. Regular Expressions are a fundamental part of almost every programming language and Kotlin is no exception to it. In Kotlin, the support for regular expression is provided through Regex class. An object of this class represents a regular expression, that can be used for string matching purposes.We can easily find use of regular expressions in different kind of software, from simplest to incredibly complex applications.Kotlin regular expression In Kotlin, we build regular expressions with the Regex. "
},
{
"code": null,
"e": 695,
"s": 641,
"text": "Regex(\"pen\")\n\"pen\".toRegex()\nRegex.fromLiteral(\"pen\")"
},
{
"code": null,
"e": 1018,
"s": 695,
"text": "A pattern defines the text we need to search for or manipulate. It consists of text literals and metacharacters. Metacharacters are special characters that control the evaluation of the regular expression. For example, with \\s we search for white spaces.In Kotlin, some of the regex patterns are given in the table below. "
},
{
"code": null,
"e": 1203,
"s": 1018,
"text": "Note :- First we have create a pattern, then we can use one of the functions to apply to the pattern on a text string. The functions include find(), findall(), replace(), and split(). "
},
{
"code": null,
"e": 1387,
"s": 1203,
"text": "It returns the first match of a regular expression in the input, starting at the specified start index. In Kotlin, the default start index is 0.Kotlin program of using find method – "
},
{
"code": null,
"e": 1394,
"s": 1387,
"text": "Kotlin"
},
{
"code": "fun main(args : Array<String>) { val company = \"GeeksforGeeks : A computer science portal for students\" val pattern = \"science\".toRegex() val found = pattern.find(company) val m = found?.value val index = found?.range println(\"$m found at indexes: $index\") }",
"e": 1676,
"s": 1394,
"text": null
},
{
"code": null,
"e": 1686,
"s": 1676,
"text": "Output: "
},
{
"code": null,
"e": 1719,
"s": 1686,
"text": "science found at indexes: 27..33"
},
{
"code": null,
"e": 1863,
"s": 1721,
"text": "It returns a sequence of all the occurrences of a regular expression within the given input string.Kotlin program of using findAll method – "
},
{
"code": null,
"e": 1870,
"s": 1863,
"text": "Kotlin"
},
{
"code": "fun main(args : Array<String>) { val company = \"GeeksforGeeks\" val pattern = \"Geeks\".toRegex() val patt = pattern.findAll(company) patt.forEach { f -> val m = f.value val index = f.range println(\"$m indexes are: $index\") }}",
"e": 2135,
"s": 1870,
"text": null
},
{
"code": null,
"e": 2145,
"s": 2135,
"text": "Output: "
},
{
"code": null,
"e": 2193,
"s": 2145,
"text": "Geeks indexes are: 0..4\nGeeks indexes are: 8..1"
},
{
"code": null,
"e": 2290,
"s": 2195,
"text": "The dot (.) metacharacter denotes any of the single characters in the text. Kotlin program – "
},
{
"code": null,
"e": 2297,
"s": 2290,
"text": "Kotlin"
},
{
"code": "fun main(args : Array<String>) { val names = listOf(\"GeeksforGeeks\", \"GeekyAnts\", \"McGeek\") val pattern = \"..Geek\".toRegex() names.forEach { name -> if (pattern.containsMatchIn(name)) { println(\"$name matches\") } }}",
"e": 2554,
"s": 2297,
"text": null
},
{
"code": null,
"e": 2564,
"s": 2554,
"text": "Output: "
},
{
"code": null,
"e": 2601,
"s": 2564,
"text": "GeeksforGeeks matches\nMcGeek matches"
},
{
"code": null,
"e": 2614,
"s": 2603,
"text": "nidhi_biet"
},
{
"code": null,
"e": 2631,
"s": 2614,
"text": "ayushpandey3july"
},
{
"code": null,
"e": 2644,
"s": 2631,
"text": "Kotlin Regex"
},
{
"code": null,
"e": 2651,
"s": 2644,
"text": "Kotlin"
},
{
"code": null,
"e": 2749,
"s": 2651,
"text": "Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here."
},
{
"code": null,
"e": 2818,
"s": 2749,
"text": "How to Add Views Dynamically and Store Data in Arraylist in Android?"
},
{
"code": null,
"e": 2849,
"s": 2818,
"text": "Android RecyclerView in Kotlin"
},
{
"code": null,
"e": 2898,
"s": 2849,
"text": "How to Communicate Between Fragments in Android?"
},
{
"code": null,
"e": 2940,
"s": 2898,
"text": "Retrofit with Kotlin Coroutine in Android"
},
{
"code": null,
"e": 2959,
"s": 2940,
"text": "Kotlin constructor"
},
{
"code": null,
"e": 2986,
"s": 2959,
"text": "Kotlin Setters and Getters"
},
{
"code": null,
"e": 3049,
"s": 2986,
"text": "How to Add and Customize Back Button of Action Bar in Android?"
},
{
"code": null,
"e": 3087,
"s": 3049,
"text": "Suspend Function In Kotlin Coroutines"
},
{
"code": null,
"e": 3144,
"s": 3087,
"text": "How to Change the Color of Status Bar in an Android App?"
}
] |
How to Create a Range of Dates in R | 23 Aug, 2021
R programming language makes it extremely easy to generate range of integers as well as singular characters. It is also possible to store Date objects in R in different formats and increment the sequentially to produce a range using both base packages as well external ones.
This article discusses various ways by which a range of dates can be defined.
The date objects are stored as the number of days calculated starting January 1, 1970, where negative numbers are used to refer to earlier dates. The Date objects support basic arithmetic directly, where in the integers are added or subtracted directly from the Dates. The Date object can also specify different formats to contain the dates.
The as.Date() method takes as input a character date object and converts it to a Date object.
Syntax:
as.Date(character date object)
The seq() method in R can be used to generate regular sequences which are incremental or detrimental sequentially arranged. The “by” parameter may contain strings or integers to increment the sequence by.
Syntax:
seq(from, to, by, length.out)
Parameter:
from – Beginning of the sequence
to – End of the sequence
by – The steps to increment the sequence by
length.out – The total length of the sequence
Thus by combining these two methods we can easily get the job done. seq() will increment each entry by 1.
Example: Creating a range of dates
R
# defining start datedate <- as.Date("2021/08/04") # defining length of range len <- 9 # generating range of datesseq(date, by = "day", length.out = len)
Output
[1] “2021-08-04” “2021-08-05” “2021-08-06” “2021-08-07” “2021-08-08”
[6] “2021-08-09” “2021-08-10” “2021-08-11” “2021-08-12”
Example: Code snippet that defines a start and end date and incrementing each entry by the number of “days” and adding entries by adding days.
R
# defining start datestart_date <- as.Date("2021/08/04") # defining end dateend_date <- as.Date("2021/08/11") # generating range of datesrange <- seq(start_date, end_date,"days")print(range)
Output
[1] “2021-08-04” “2021-08-05” “2021-08-06” “2021-08-07” “2021-08-08”
[6] “2021-08-09” “2021-08-10” “2021-08-11”
Lubridate package in R is used to work with date and time objects. It makes it easier to parse and manipulate the objects and needs to be installed and loaded into the working space by the following command :
install.packages("lubridate")
The ymd() method can be used to convert a character date to a date format consisting of year-month-date using the lubridate package. This is followed by the application of seq() method of base R.
Example: Creating a range of dates
R
library("lubridate") # defining start datestart_date <- ymd("2021/08/04") # defining end dateend_date <- ymd("2021/08/11") # generating range of datesrange <- seq(start_date, end_date,"days")print(range)
Output
[1] “2021-08-04” “2021-08-05” “2021-08-06” “2021-08-07” “2021-08-08”
[6] “2021-08-09” “2021-08-10” “2021-08-11”
Picked
R-DateTime
R Language
R Programs
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here.
Change Color of Bars in Barchart using ggplot2 in R
How to Split Column Into Multiple Columns in R DataFrame?
Group by function in R using Dplyr
How to Change Axis Scales in R Plots?
How to filter R DataFrame by values in a column?
How to Split Column Into Multiple Columns in R DataFrame?
How to filter R DataFrame by values in a column?
Replace Specific Characters in String in R
Merge DataFrames by Column Names in R
How to Sort a DataFrame in R ? | [
{
"code": null,
"e": 28,
"s": 0,
"text": "\n23 Aug, 2021"
},
{
"code": null,
"e": 304,
"s": 28,
"text": "R programming language makes it extremely easy to generate range of integers as well as singular characters. It is also possible to store Date objects in R in different formats and increment the sequentially to produce a range using both base packages as well external ones. "
},
{
"code": null,
"e": 382,
"s": 304,
"text": "This article discusses various ways by which a range of dates can be defined."
},
{
"code": null,
"e": 725,
"s": 382,
"text": "The date objects are stored as the number of days calculated starting January 1, 1970, where negative numbers are used to refer to earlier dates. The Date objects support basic arithmetic directly, where in the integers are added or subtracted directly from the Dates. The Date object can also specify different formats to contain the dates. "
},
{
"code": null,
"e": 819,
"s": 725,
"text": "The as.Date() method takes as input a character date object and converts it to a Date object."
},
{
"code": null,
"e": 827,
"s": 819,
"text": "Syntax:"
},
{
"code": null,
"e": 858,
"s": 827,
"text": "as.Date(character date object)"
},
{
"code": null,
"e": 1064,
"s": 858,
"text": "The seq() method in R can be used to generate regular sequences which are incremental or detrimental sequentially arranged. The “by” parameter may contain strings or integers to increment the sequence by. "
},
{
"code": null,
"e": 1072,
"s": 1064,
"text": "Syntax:"
},
{
"code": null,
"e": 1102,
"s": 1072,
"text": "seq(from, to, by, length.out)"
},
{
"code": null,
"e": 1113,
"s": 1102,
"text": "Parameter:"
},
{
"code": null,
"e": 1146,
"s": 1113,
"text": "from – Beginning of the sequence"
},
{
"code": null,
"e": 1171,
"s": 1146,
"text": "to – End of the sequence"
},
{
"code": null,
"e": 1215,
"s": 1171,
"text": "by – The steps to increment the sequence by"
},
{
"code": null,
"e": 1261,
"s": 1215,
"text": "length.out – The total length of the sequence"
},
{
"code": null,
"e": 1367,
"s": 1261,
"text": "Thus by combining these two methods we can easily get the job done. seq() will increment each entry by 1."
},
{
"code": null,
"e": 1402,
"s": 1367,
"text": "Example: Creating a range of dates"
},
{
"code": null,
"e": 1404,
"s": 1402,
"text": "R"
},
{
"code": "# defining start datedate <- as.Date(\"2021/08/04\") # defining length of range len <- 9 # generating range of datesseq(date, by = \"day\", length.out = len)",
"e": 1560,
"s": 1404,
"text": null
},
{
"code": null,
"e": 1567,
"s": 1560,
"text": "Output"
},
{
"code": null,
"e": 1637,
"s": 1567,
"text": "[1] “2021-08-04” “2021-08-05” “2021-08-06” “2021-08-07” “2021-08-08” "
},
{
"code": null,
"e": 1693,
"s": 1637,
"text": "[6] “2021-08-09” “2021-08-10” “2021-08-11” “2021-08-12”"
},
{
"code": null,
"e": 1836,
"s": 1693,
"text": "Example: Code snippet that defines a start and end date and incrementing each entry by the number of “days” and adding entries by adding days."
},
{
"code": null,
"e": 1838,
"s": 1836,
"text": "R"
},
{
"code": "# defining start datestart_date <- as.Date(\"2021/08/04\") # defining end dateend_date <- as.Date(\"2021/08/11\") # generating range of datesrange <- seq(start_date, end_date,\"days\")print(range)",
"e": 2031,
"s": 1838,
"text": null
},
{
"code": null,
"e": 2038,
"s": 2031,
"text": "Output"
},
{
"code": null,
"e": 2107,
"s": 2038,
"text": "[1] “2021-08-04” “2021-08-05” “2021-08-06” “2021-08-07” “2021-08-08”"
},
{
"code": null,
"e": 2151,
"s": 2107,
"text": "[6] “2021-08-09” “2021-08-10” “2021-08-11” "
},
{
"code": null,
"e": 2360,
"s": 2151,
"text": "Lubridate package in R is used to work with date and time objects. It makes it easier to parse and manipulate the objects and needs to be installed and loaded into the working space by the following command :"
},
{
"code": null,
"e": 2390,
"s": 2360,
"text": "install.packages(\"lubridate\")"
},
{
"code": null,
"e": 2586,
"s": 2390,
"text": "The ymd() method can be used to convert a character date to a date format consisting of year-month-date using the lubridate package. This is followed by the application of seq() method of base R."
},
{
"code": null,
"e": 2621,
"s": 2586,
"text": "Example: Creating a range of dates"
},
{
"code": null,
"e": 2623,
"s": 2621,
"text": "R"
},
{
"code": "library(\"lubridate\") # defining start datestart_date <- ymd(\"2021/08/04\") # defining end dateend_date <- ymd(\"2021/08/11\") # generating range of datesrange <- seq(start_date, end_date,\"days\")print(range)",
"e": 2830,
"s": 2623,
"text": null
},
{
"code": null,
"e": 2837,
"s": 2830,
"text": "Output"
},
{
"code": null,
"e": 2906,
"s": 2837,
"text": "[1] “2021-08-04” “2021-08-05” “2021-08-06” “2021-08-07” “2021-08-08”"
},
{
"code": null,
"e": 2950,
"s": 2906,
"text": "[6] “2021-08-09” “2021-08-10” “2021-08-11” "
},
{
"code": null,
"e": 2957,
"s": 2950,
"text": "Picked"
},
{
"code": null,
"e": 2968,
"s": 2957,
"text": "R-DateTime"
},
{
"code": null,
"e": 2979,
"s": 2968,
"text": "R Language"
},
{
"code": null,
"e": 2990,
"s": 2979,
"text": "R Programs"
},
{
"code": null,
"e": 3088,
"s": 2990,
"text": "Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here."
},
{
"code": null,
"e": 3140,
"s": 3088,
"text": "Change Color of Bars in Barchart using ggplot2 in R"
},
{
"code": null,
"e": 3198,
"s": 3140,
"text": "How to Split Column Into Multiple Columns in R DataFrame?"
},
{
"code": null,
"e": 3233,
"s": 3198,
"text": "Group by function in R using Dplyr"
},
{
"code": null,
"e": 3271,
"s": 3233,
"text": "How to Change Axis Scales in R Plots?"
},
{
"code": null,
"e": 3320,
"s": 3271,
"text": "How to filter R DataFrame by values in a column?"
},
{
"code": null,
"e": 3378,
"s": 3320,
"text": "How to Split Column Into Multiple Columns in R DataFrame?"
},
{
"code": null,
"e": 3427,
"s": 3378,
"text": "How to filter R DataFrame by values in a column?"
},
{
"code": null,
"e": 3470,
"s": 3427,
"text": "Replace Specific Characters in String in R"
},
{
"code": null,
"e": 3508,
"s": 3470,
"text": "Merge DataFrames by Column Names in R"
}
] |
variable === undefined vs. typeof variable === “undefined” in JavaScript | 28 Oct, 2021
Undefined comes into a picture when any variable is defined already but not has been assigned any value. Undefined is not a keyword. A function can also be undefined when it doesn’t have the value returned. There are two ways to determine if a variable is not defined, Value and type.
javascript
var geeks;alert ( geeks === undefined)
It’s so clear that you assigned a variable that was not defined but the variable exists. Here you are comparing geeks variable with the global variable “undefined” which is undefined also. Syntax:
Check by Value (Strict equality Operator): Here you will get that variable is assigned a value or not if the variable is not assigned a value it will display undefined.Check the type (Typeof operator): Here you will get what type of variable was that if there is no variable was assigned then it will display “undefined”.
Check by Value (Strict equality Operator): Here you will get that variable is assigned a value or not if the variable is not assigned a value it will display undefined.
Check the type (Typeof operator): Here you will get what type of variable was that if there is no variable was assigned then it will display “undefined”.
Note The strict equality operator (===) doesn’t check whether the variable is null or not.The type of operator does not throw an error if the variable has not been declared.Example: Here we assign two variable one is undefined and the other one is defined “null”, here null is not undefined when you put console.log it will show “null” if you check typeof then it will display object, below program will illustrate the approach more clearly. Program:
javascript
var firstName;var lastName = null; // Print: undefined console.log(firstName);// Print: nullconsole.log(lastName); // Print: undefinedconsole.log(typeof firstName);// Print: objectconsole.log(typeof lastName); // Print: falseconsole.log(null === undefined) if(firstName === undefined) { console.log('LastName is undefined'); } else if(firstName === null){ console.log('FirstName is null'); }
Output:
undefined
null
undefined
object
false
variable === undefined VS typeof variable === “undefined”
ruhelaa48
JavaScript-Misc
Picked
JavaScript
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here.
Difference between var, let and const keywords in JavaScript
Differences between Functional Components and Class Components in React
Remove elements from a JavaScript Array
Difference Between PUT and PATCH Request
How to Open URL in New Tab using JavaScript ?
Roadmap to Learn JavaScript For Beginners
How to get character array from string in JavaScript?
How do you run JavaScript script through the Terminal?
JavaScript | console.log() with Examples
Node.js | fs.writeFileSync() Method | [
{
"code": null,
"e": 53,
"s": 25,
"text": "\n28 Oct, 2021"
},
{
"code": null,
"e": 340,
"s": 53,
"text": "Undefined comes into a picture when any variable is defined already but not has been assigned any value. Undefined is not a keyword. A function can also be undefined when it doesn’t have the value returned. There are two ways to determine if a variable is not defined, Value and type. "
},
{
"code": null,
"e": 351,
"s": 340,
"text": "javascript"
},
{
"code": "var geeks;alert ( geeks === undefined)",
"e": 390,
"s": 351,
"text": null
},
{
"code": null,
"e": 589,
"s": 390,
"text": "It’s so clear that you assigned a variable that was not defined but the variable exists. Here you are comparing geeks variable with the global variable “undefined” which is undefined also. Syntax: "
},
{
"code": null,
"e": 911,
"s": 589,
"text": "Check by Value (Strict equality Operator): Here you will get that variable is assigned a value or not if the variable is not assigned a value it will display undefined.Check the type (Typeof operator): Here you will get what type of variable was that if there is no variable was assigned then it will display “undefined”."
},
{
"code": null,
"e": 1080,
"s": 911,
"text": "Check by Value (Strict equality Operator): Here you will get that variable is assigned a value or not if the variable is not assigned a value it will display undefined."
},
{
"code": null,
"e": 1234,
"s": 1080,
"text": "Check the type (Typeof operator): Here you will get what type of variable was that if there is no variable was assigned then it will display “undefined”."
},
{
"code": null,
"e": 1687,
"s": 1234,
"text": "Note The strict equality operator (===) doesn’t check whether the variable is null or not.The type of operator does not throw an error if the variable has not been declared.Example: Here we assign two variable one is undefined and the other one is defined “null”, here null is not undefined when you put console.log it will show “null” if you check typeof then it will display object, below program will illustrate the approach more clearly. Program: "
},
{
"code": null,
"e": 1698,
"s": 1687,
"text": "javascript"
},
{
"code": "var firstName;var lastName = null; // Print: undefined console.log(firstName);// Print: nullconsole.log(lastName); // Print: undefinedconsole.log(typeof firstName);// Print: objectconsole.log(typeof lastName); // Print: falseconsole.log(null === undefined) if(firstName === undefined) { console.log('LastName is undefined'); } else if(firstName === null){ console.log('FirstName is null'); }",
"e": 2144,
"s": 1698,
"text": null
},
{
"code": null,
"e": 2154,
"s": 2144,
"text": "Output: "
},
{
"code": null,
"e": 2194,
"s": 2154,
"text": "undefined\nnull\n\nundefined\nobject\n\nfalse"
},
{
"code": null,
"e": 2254,
"s": 2194,
"text": "variable === undefined VS typeof variable === “undefined” "
},
{
"code": null,
"e": 2266,
"s": 2256,
"text": "ruhelaa48"
},
{
"code": null,
"e": 2282,
"s": 2266,
"text": "JavaScript-Misc"
},
{
"code": null,
"e": 2289,
"s": 2282,
"text": "Picked"
},
{
"code": null,
"e": 2300,
"s": 2289,
"text": "JavaScript"
},
{
"code": null,
"e": 2398,
"s": 2300,
"text": "Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here."
},
{
"code": null,
"e": 2459,
"s": 2398,
"text": "Difference between var, let and const keywords in JavaScript"
},
{
"code": null,
"e": 2531,
"s": 2459,
"text": "Differences between Functional Components and Class Components in React"
},
{
"code": null,
"e": 2571,
"s": 2531,
"text": "Remove elements from a JavaScript Array"
},
{
"code": null,
"e": 2612,
"s": 2571,
"text": "Difference Between PUT and PATCH Request"
},
{
"code": null,
"e": 2658,
"s": 2612,
"text": "How to Open URL in New Tab using JavaScript ?"
},
{
"code": null,
"e": 2700,
"s": 2658,
"text": "Roadmap to Learn JavaScript For Beginners"
},
{
"code": null,
"e": 2754,
"s": 2700,
"text": "How to get character array from string in JavaScript?"
},
{
"code": null,
"e": 2809,
"s": 2754,
"text": "How do you run JavaScript script through the Terminal?"
},
{
"code": null,
"e": 2850,
"s": 2809,
"text": "JavaScript | console.log() with Examples"
}
] |
Recommendation System in Python | 06 Sep, 2021
There are a lot of applications where websites collect data from their users and use that data to predict the likes and dislikes of their users. This allows them to recommend the content that they like. Recommender systems are a way of suggesting or similar items and ideas to a user’s specific way of thinking.
Recommender System is different types:
Collaborative Filtering: Collaborative Filtering recommends items based on similarity measures between users and/or items. The basic assumption behind the algorithm is that users with similar interests have common preferences.
Content-Based Recommendation: It is supervised machine learning used to induce a classifier to discriminate between interesting and uninteresting items for the user.
Content-Based Recommendation System: Content-Based systems recommends items to the customer similar to previously high-rated items by the customer. It uses the features and properties of the item. From these properties, it can calculate the similarity between the items.
In a content-based recommendation system, first, we need to create a profile for each item, which represents the properties of those items. From the user profiles are inferred for a particular user. We use these user profiles to recommend the items to the users from the catalog.
Content-Based Recommendation System
Item profile:
In a content-based recommendation system, we need to build a profile for each item, which contains the important properties of each item. For Example, If the movie is an item, then its actors, director, release year, and genre are its important properties, and for the document, the important property is the type of content and set of important words in it.
Let’s have a look at how to create an item profile. First, we need to perform the TF-IDF vectorizer, here TF (term frequency) of a word is the number of times it appears in a document and The IDF (inverse document frequency) of a word is the measure of how significant that term is in the whole corpus. These can be calculated by the following formula:
The term-frequency can be calculated by:
where fij is the frequency of term(feature) i in document(item) j.
The inverse-document frequency can be calculated with:
where, ni number of documents that mention term i. N is the total number of docs.
Therefore, the total formula is:
Here, doc profile is the set of words with
User profile:
The user profile is a vector that describes the user preference. During the creation of the user’s profile, we use a utility matrix that describes the relationship between user and item. From this information, the best estimate we can decide which item the user likes, is some aggregation of the profiles of those items.
Advantages and Disadvantages:
Advantages:No need for data on other users when applying to similar users.Able to recommend to users with unique tastes.Able to recommend new & popular itemsExplanations for recommended items.
No need for data on other users when applying to similar users.
Able to recommend to users with unique tastes.
Able to recommend new & popular items
Explanations for recommended items.
Disadvantages:Finding the appropriate feature is hard.Doesn’t recommend items outside the user profile.
Finding the appropriate feature is hard.
Doesn’t recommend items outside the user profile.
Collaborative Filtering: Collaborative filtering is based on the idea that similar people (based on the data) generally tend to like similar things. It predicts which item a user will like based on the item preferences of other similar users.
Collaborative filtering uses a user-item matrix to generate recommendations. This matrix contains the values that indicate a user’s preference towards a given item. These values can represent either explicit feedback (direct user ratings) or implicit feedback (indirect user behavior such as listening, purchasing, watching).
Explicit Feedback: The amount of data that is collected from the users when they choose to do so. Many of the times, users choose not to provide data for the user. So, this data is scarce and sometimes costs money. For example, ratings from the user.
Implicit Feedback: In implicit feedback, we track user behavior to predict their preference.
Example:
Consider a user x, we need to find another user whose rating are similar to x’s rating, and then we estimate x’s rating based on another user.
Let’s create a matrix representing different user and movies:
Consider two users x, y with rating vectors rx and ry. We need to decide a similarity matrix to calculate similarity b/w sim(x,y). THere are many methods to calculate similarity such as: Jaccard similarity, cosine similarity and pearson similarity. Here, we use centered cosine similarity/ pearson similarity, where we normalize the rating by subtracting the mean:
Here, we can calculate similarity: For ex: sim(A,B) = cos(rA, rB) = 0.09 ; sim(A,C) = -0.56. sim(A,B) > sim(A,C).
Rating Predictions
Let rx be the vector of user x’s rating. Let N be the set of k similar users who also rated item i. Then we can calculate the prediction of user x and item i by using following formula:
Advantages and Disadvantages:
Advantages:No need for the domain knowledge because embedding are learned automatically.Capture inherent subtle characteristics.
No need for the domain knowledge because embedding are learned automatically.
Capture inherent subtle characteristics.
Disadvantages:Cannot handle fresh items due to cold start problem.Hard to add any new features that may improve quality of model
Cannot handle fresh items due to cold start problem.
Hard to add any new features that may improve quality of model
Python3
# codeimport numpy as npimport pandas as pdimport sklearnimport matplotlib.pyplot as pltimport seaborn as sns import warningswarnings.simplefilter(action='ignore', category=FutureWarning) ratings = pd.read_csv("https://s3-us-west-2.amazonaws.com/recommender-tutorial/ratings.csv")ratings.head() movies = pd.read_csv("https://s3-us-west-2.amazonaws.com/recommender-tutorial/movies.csv")movies.head() n_ratings = len(ratings)n_movies = len(ratings['movieId'].unique())n_users = len(ratings['userId'].unique()) print(f"Number of ratings: {n_ratings}")print(f"Number of unique movieId's: {n_movies}")print(f"Number of unique users: {n_users}")print(f"Average ratings per user: {round(n_ratings/n_users, 2)}")print(f"Average ratings per movie: {round(n_ratings/n_movies, 2)}") user_freq = ratings[['userId', 'movieId']].groupby('userId').count().reset_index()user_freq.columns = ['userId', 'n_ratings']user_freq.head() # Find Lowest and Highest rated movies:mean_rating = ratings.groupby('movieId')[['rating']].mean()# Lowest rated movieslowest_rated = mean_rating['rating'].idxmin()movies.loc[movies['movieId'] == lowest_rated]# Highest rated movieshighest_rated = mean_rating['rating'].idxmax()movies.loc[movies['movieId'] == highest_rated]# show number of people who rated movies rated movie highestratings[ratings['movieId']==highest_rated]# show number of people who rated movies rated movie lowestratings[ratings['movieId']==lowest_rated] ## the above movies has very low dataset. We will use bayesian averagemovie_stats = ratings.groupby('movieId')[['rating']].agg(['count', 'mean'])movie_stats.columns = movie_stats.columns.droplevel() # Now, we create user-item matrix using scipy csr matrixfrom scipy.sparse import csr_matrix def create_matrix(df): N = len(df['userId'].unique()) M = len(df['movieId'].unique()) # Map Ids to indices user_mapper = dict(zip(np.unique(df["userId"]), list(range(N)))) movie_mapper = dict(zip(np.unique(df["movieId"]), list(range(M)))) # Map indices to IDs user_inv_mapper = dict(zip(list(range(N)), np.unique(df["userId"]))) movie_inv_mapper = dict(zip(list(range(M)), np.unique(df["movieId"]))) user_index = [user_mapper[i] for i in df['userId']] movie_index = [movie_mapper[i] for i in df['movieId']] X = csr_matrix((df["rating"], (movie_index, user_index)), shape=(M, N)) return X, user_mapper, movie_mapper, user_inv_mapper, movie_inv_mapper X, user_mapper, movie_mapper, user_inv_mapper, movie_inv_mapper = create_matrix(ratings) from sklearn.neighbors import NearestNeighbors"""Find similar movies using KNN"""def find_similar_movies(movie_id, X, k, metric='cosine', show_distance=False): neighbour_ids = [] movie_ind = movie_mapper[movie_id] movie_vec = X[movie_ind] k+=1 kNN = NearestNeighbors(n_neighbors=k, algorithm="brute", metric=metric) kNN.fit(X) movie_vec = movie_vec.reshape(1,-1) neighbour = kNN.kneighbors(movie_vec, return_distance=show_distance) for i in range(0,k): n = neighbour.item(i) neighbour_ids.append(movie_inv_mapper[n]) neighbour_ids.pop(0) return neighbour_ids movie_titles = dict(zip(movies['movieId'], movies['title'])) movie_id = 3 similar_ids = find_similar_movies(movie_id, X, k=10)movie_title = movie_titles[movie_id] print(f"Since you watched {movie_title}")for i in similar_ids: print(movie_titles[i])
Output:
Number of ratings: 100836
Number of unique movieId's: 9724
Number of unique users: 610
Average number of ratings per user: 165.3
Average number of ratings per movie: 10.37
==========================================
# lowest rated
movieId title genres
2689 3604 Gypsy (1962) Musical
# highest rated
movieId title genres
48 53 Lamerica (1994) Adventure|Drama
# who rate highest rated movie
userId movieId rating timestamp
13368 85 53 5.0 889468268
96115 603 53 5.0 963180003
# who rate lowest rated movie
userId movieId rating timestamp
13633 89 3604 0.5 1520408880
Since you watched Grumpier Old Men (1995)
Grumpy Old Men (1993)
Striptease (1996)
Nutty Professor, The (1996)
Twister (1996)
Father of the Bride Part II (1995)
Broken Arrow (1996)
Bio-Dome (1996)
Truth About Cats & Dogs, The (1996)
Sabrina (1995)
Birdcage, The (1996
saurabh1990aror
Machine Learning
Python
Machine Learning
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here.
Introduction to Recurrent Neural Network
Support Vector Machine Algorithm
ML | Monte Carlo Tree Search (MCTS)
Markov Decision Process
DBSCAN Clustering in ML | Density based clustering
Read JSON file using Python
Python map() function
Adding new column to existing DataFrame in Pandas
Python Dictionary
How to get column names in Pandas dataframe | [
{
"code": null,
"e": 54,
"s": 26,
"text": "\n06 Sep, 2021"
},
{
"code": null,
"e": 366,
"s": 54,
"text": "There are a lot of applications where websites collect data from their users and use that data to predict the likes and dislikes of their users. This allows them to recommend the content that they like. Recommender systems are a way of suggesting or similar items and ideas to a user’s specific way of thinking."
},
{
"code": null,
"e": 405,
"s": 366,
"text": "Recommender System is different types:"
},
{
"code": null,
"e": 632,
"s": 405,
"text": "Collaborative Filtering: Collaborative Filtering recommends items based on similarity measures between users and/or items. The basic assumption behind the algorithm is that users with similar interests have common preferences."
},
{
"code": null,
"e": 798,
"s": 632,
"text": "Content-Based Recommendation: It is supervised machine learning used to induce a classifier to discriminate between interesting and uninteresting items for the user."
},
{
"code": null,
"e": 1069,
"s": 798,
"text": "Content-Based Recommendation System: Content-Based systems recommends items to the customer similar to previously high-rated items by the customer. It uses the features and properties of the item. From these properties, it can calculate the similarity between the items."
},
{
"code": null,
"e": 1349,
"s": 1069,
"text": "In a content-based recommendation system, first, we need to create a profile for each item, which represents the properties of those items. From the user profiles are inferred for a particular user. We use these user profiles to recommend the items to the users from the catalog."
},
{
"code": null,
"e": 1385,
"s": 1349,
"text": "Content-Based Recommendation System"
},
{
"code": null,
"e": 1399,
"s": 1385,
"text": "Item profile:"
},
{
"code": null,
"e": 1758,
"s": 1399,
"text": "In a content-based recommendation system, we need to build a profile for each item, which contains the important properties of each item. For Example, If the movie is an item, then its actors, director, release year, and genre are its important properties, and for the document, the important property is the type of content and set of important words in it."
},
{
"code": null,
"e": 2111,
"s": 1758,
"text": "Let’s have a look at how to create an item profile. First, we need to perform the TF-IDF vectorizer, here TF (term frequency) of a word is the number of times it appears in a document and The IDF (inverse document frequency) of a word is the measure of how significant that term is in the whole corpus. These can be calculated by the following formula:"
},
{
"code": null,
"e": 2152,
"s": 2111,
"text": "The term-frequency can be calculated by:"
},
{
"code": null,
"e": 2220,
"s": 2152,
"text": "where fij is the frequency of term(feature) i in document(item) j. "
},
{
"code": null,
"e": 2275,
"s": 2220,
"text": "The inverse-document frequency can be calculated with:"
},
{
"code": null,
"e": 2357,
"s": 2275,
"text": "where, ni number of documents that mention term i. N is the total number of docs."
},
{
"code": null,
"e": 2390,
"s": 2357,
"text": "Therefore, the total formula is:"
},
{
"code": null,
"e": 2433,
"s": 2390,
"text": "Here, doc profile is the set of words with"
},
{
"code": null,
"e": 2447,
"s": 2433,
"text": "User profile:"
},
{
"code": null,
"e": 2768,
"s": 2447,
"text": "The user profile is a vector that describes the user preference. During the creation of the user’s profile, we use a utility matrix that describes the relationship between user and item. From this information, the best estimate we can decide which item the user likes, is some aggregation of the profiles of those items."
},
{
"code": null,
"e": 2798,
"s": 2768,
"text": "Advantages and Disadvantages:"
},
{
"code": null,
"e": 2992,
"s": 2798,
"text": " Advantages:No need for data on other users when applying to similar users.Able to recommend to users with unique tastes.Able to recommend new & popular itemsExplanations for recommended items."
},
{
"code": null,
"e": 3056,
"s": 2992,
"text": "No need for data on other users when applying to similar users."
},
{
"code": null,
"e": 3103,
"s": 3056,
"text": "Able to recommend to users with unique tastes."
},
{
"code": null,
"e": 3141,
"s": 3103,
"text": "Able to recommend new & popular items"
},
{
"code": null,
"e": 3177,
"s": 3141,
"text": "Explanations for recommended items."
},
{
"code": null,
"e": 3281,
"s": 3177,
"text": "Disadvantages:Finding the appropriate feature is hard.Doesn’t recommend items outside the user profile."
},
{
"code": null,
"e": 3322,
"s": 3281,
"text": "Finding the appropriate feature is hard."
},
{
"code": null,
"e": 3372,
"s": 3322,
"text": "Doesn’t recommend items outside the user profile."
},
{
"code": null,
"e": 3616,
"s": 3372,
"text": "Collaborative Filtering: Collaborative filtering is based on the idea that similar people (based on the data) generally tend to like similar things. It predicts which item a user will like based on the item preferences of other similar users. "
},
{
"code": null,
"e": 3942,
"s": 3616,
"text": "Collaborative filtering uses a user-item matrix to generate recommendations. This matrix contains the values that indicate a user’s preference towards a given item. These values can represent either explicit feedback (direct user ratings) or implicit feedback (indirect user behavior such as listening, purchasing, watching)."
},
{
"code": null,
"e": 4194,
"s": 3942,
"text": "Explicit Feedback: The amount of data that is collected from the users when they choose to do so. Many of the times, users choose not to provide data for the user. So, this data is scarce and sometimes costs money. For example, ratings from the user."
},
{
"code": null,
"e": 4287,
"s": 4194,
"text": "Implicit Feedback: In implicit feedback, we track user behavior to predict their preference."
},
{
"code": null,
"e": 4296,
"s": 4287,
"text": "Example:"
},
{
"code": null,
"e": 4439,
"s": 4296,
"text": "Consider a user x, we need to find another user whose rating are similar to x’s rating, and then we estimate x’s rating based on another user."
},
{
"code": null,
"e": 4501,
"s": 4439,
"text": "Let’s create a matrix representing different user and movies:"
},
{
"code": null,
"e": 4866,
"s": 4501,
"text": "Consider two users x, y with rating vectors rx and ry. We need to decide a similarity matrix to calculate similarity b/w sim(x,y). THere are many methods to calculate similarity such as: Jaccard similarity, cosine similarity and pearson similarity. Here, we use centered cosine similarity/ pearson similarity, where we normalize the rating by subtracting the mean:"
},
{
"code": null,
"e": 4980,
"s": 4866,
"text": "Here, we can calculate similarity: For ex: sim(A,B) = cos(rA, rB) = 0.09 ; sim(A,C) = -0.56. sim(A,B) > sim(A,C)."
},
{
"code": null,
"e": 4999,
"s": 4980,
"text": "Rating Predictions"
},
{
"code": null,
"e": 5185,
"s": 4999,
"text": "Let rx be the vector of user x’s rating. Let N be the set of k similar users who also rated item i. Then we can calculate the prediction of user x and item i by using following formula:"
},
{
"code": null,
"e": 5215,
"s": 5185,
"text": "Advantages and Disadvantages:"
},
{
"code": null,
"e": 5345,
"s": 5215,
"text": " Advantages:No need for the domain knowledge because embedding are learned automatically.Capture inherent subtle characteristics."
},
{
"code": null,
"e": 5423,
"s": 5345,
"text": "No need for the domain knowledge because embedding are learned automatically."
},
{
"code": null,
"e": 5464,
"s": 5423,
"text": "Capture inherent subtle characteristics."
},
{
"code": null,
"e": 5593,
"s": 5464,
"text": "Disadvantages:Cannot handle fresh items due to cold start problem.Hard to add any new features that may improve quality of model"
},
{
"code": null,
"e": 5646,
"s": 5593,
"text": "Cannot handle fresh items due to cold start problem."
},
{
"code": null,
"e": 5709,
"s": 5646,
"text": "Hard to add any new features that may improve quality of model"
},
{
"code": null,
"e": 5717,
"s": 5709,
"text": "Python3"
},
{
"code": "# codeimport numpy as npimport pandas as pdimport sklearnimport matplotlib.pyplot as pltimport seaborn as sns import warningswarnings.simplefilter(action='ignore', category=FutureWarning) ratings = pd.read_csv(\"https://s3-us-west-2.amazonaws.com/recommender-tutorial/ratings.csv\")ratings.head() movies = pd.read_csv(\"https://s3-us-west-2.amazonaws.com/recommender-tutorial/movies.csv\")movies.head() n_ratings = len(ratings)n_movies = len(ratings['movieId'].unique())n_users = len(ratings['userId'].unique()) print(f\"Number of ratings: {n_ratings}\")print(f\"Number of unique movieId's: {n_movies}\")print(f\"Number of unique users: {n_users}\")print(f\"Average ratings per user: {round(n_ratings/n_users, 2)}\")print(f\"Average ratings per movie: {round(n_ratings/n_movies, 2)}\") user_freq = ratings[['userId', 'movieId']].groupby('userId').count().reset_index()user_freq.columns = ['userId', 'n_ratings']user_freq.head() # Find Lowest and Highest rated movies:mean_rating = ratings.groupby('movieId')[['rating']].mean()# Lowest rated movieslowest_rated = mean_rating['rating'].idxmin()movies.loc[movies['movieId'] == lowest_rated]# Highest rated movieshighest_rated = mean_rating['rating'].idxmax()movies.loc[movies['movieId'] == highest_rated]# show number of people who rated movies rated movie highestratings[ratings['movieId']==highest_rated]# show number of people who rated movies rated movie lowestratings[ratings['movieId']==lowest_rated] ## the above movies has very low dataset. We will use bayesian averagemovie_stats = ratings.groupby('movieId')[['rating']].agg(['count', 'mean'])movie_stats.columns = movie_stats.columns.droplevel() # Now, we create user-item matrix using scipy csr matrixfrom scipy.sparse import csr_matrix def create_matrix(df): N = len(df['userId'].unique()) M = len(df['movieId'].unique()) # Map Ids to indices user_mapper = dict(zip(np.unique(df[\"userId\"]), list(range(N)))) movie_mapper = dict(zip(np.unique(df[\"movieId\"]), list(range(M)))) # Map indices to IDs user_inv_mapper = dict(zip(list(range(N)), np.unique(df[\"userId\"]))) movie_inv_mapper = dict(zip(list(range(M)), np.unique(df[\"movieId\"]))) user_index = [user_mapper[i] for i in df['userId']] movie_index = [movie_mapper[i] for i in df['movieId']] X = csr_matrix((df[\"rating\"], (movie_index, user_index)), shape=(M, N)) return X, user_mapper, movie_mapper, user_inv_mapper, movie_inv_mapper X, user_mapper, movie_mapper, user_inv_mapper, movie_inv_mapper = create_matrix(ratings) from sklearn.neighbors import NearestNeighbors\"\"\"Find similar movies using KNN\"\"\"def find_similar_movies(movie_id, X, k, metric='cosine', show_distance=False): neighbour_ids = [] movie_ind = movie_mapper[movie_id] movie_vec = X[movie_ind] k+=1 kNN = NearestNeighbors(n_neighbors=k, algorithm=\"brute\", metric=metric) kNN.fit(X) movie_vec = movie_vec.reshape(1,-1) neighbour = kNN.kneighbors(movie_vec, return_distance=show_distance) for i in range(0,k): n = neighbour.item(i) neighbour_ids.append(movie_inv_mapper[n]) neighbour_ids.pop(0) return neighbour_ids movie_titles = dict(zip(movies['movieId'], movies['title'])) movie_id = 3 similar_ids = find_similar_movies(movie_id, X, k=10)movie_title = movie_titles[movie_id] print(f\"Since you watched {movie_title}\")for i in similar_ids: print(movie_titles[i])",
"e": 9145,
"s": 5717,
"text": null
},
{
"code": null,
"e": 9153,
"s": 9145,
"text": "Output:"
},
{
"code": null,
"e": 10081,
"s": 9153,
"text": "Number of ratings: 100836\nNumber of unique movieId's: 9724\nNumber of unique users: 610\nAverage number of ratings per user: 165.3\nAverage number of ratings per movie: 10.37\n==========================================\n# lowest rated\n movieId title genres\n2689 3604 Gypsy (1962) Musical\n\n# highest rated\n movieId title genres\n48 53 Lamerica (1994) Adventure|Drama\n\n# who rate highest rated movie\nuserId movieId rating timestamp\n13368 85 53 5.0 889468268\n96115 603 53 5.0 963180003\n\n# who rate lowest rated movie\nuserId movieId rating timestamp\n13633 89 3604 0.5 1520408880\n\n\nSince you watched Grumpier Old Men (1995)\nGrumpy Old Men (1993)\nStriptease (1996)\nNutty Professor, The (1996)\nTwister (1996)\nFather of the Bride Part II (1995)\nBroken Arrow (1996)\nBio-Dome (1996)\nTruth About Cats & Dogs, The (1996)\nSabrina (1995)\nBirdcage, The (1996"
},
{
"code": null,
"e": 10097,
"s": 10081,
"text": "saurabh1990aror"
},
{
"code": null,
"e": 10114,
"s": 10097,
"text": "Machine Learning"
},
{
"code": null,
"e": 10121,
"s": 10114,
"text": "Python"
},
{
"code": null,
"e": 10138,
"s": 10121,
"text": "Machine Learning"
},
{
"code": null,
"e": 10236,
"s": 10138,
"text": "Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here."
},
{
"code": null,
"e": 10277,
"s": 10236,
"text": "Introduction to Recurrent Neural Network"
},
{
"code": null,
"e": 10310,
"s": 10277,
"text": "Support Vector Machine Algorithm"
},
{
"code": null,
"e": 10346,
"s": 10310,
"text": "ML | Monte Carlo Tree Search (MCTS)"
},
{
"code": null,
"e": 10370,
"s": 10346,
"text": "Markov Decision Process"
},
{
"code": null,
"e": 10421,
"s": 10370,
"text": "DBSCAN Clustering in ML | Density based clustering"
},
{
"code": null,
"e": 10449,
"s": 10421,
"text": "Read JSON file using Python"
},
{
"code": null,
"e": 10471,
"s": 10449,
"text": "Python map() function"
},
{
"code": null,
"e": 10521,
"s": 10471,
"text": "Adding new column to existing DataFrame in Pandas"
},
{
"code": null,
"e": 10539,
"s": 10521,
"text": "Python Dictionary"
}
] |
Red Hawk – Information Gathering and Vulnerability Scanning Tool in Kali Linux | 28 Mar, 2021
Red Hawk is a free and open-source tool available on GitHub. Red Hawk is used to scanning websites for information gathering and finding vulnerabilities. Red Hawk is written in PHP. It uses PHP script to do reconnaissance. Red Hawk is so powerful that it can detect content management system while scanning, it can detect IP address, it can detect webserver record, it can detect Cloudflare information, and can detect robots.txt. Red Hawk can detect WordPress, Drupal, Joomla, and Magento CMS. Red Hawk looks for error-based SQL injections, WordPress sensitive files, and WordPress version-related vulnerabilities. RedHawk uses different modules for doing all the scannings. WHOIS data collection gives us information about Geo-IP lookup, Banner grabbing, DNS lookup, port scanning, sub-domain information, reverse IP, and MX records lookup. Overall RedHawk is a vulnerability Scanner.
Red Hawk can be used as a vulnerability Scanner.
Red Hawk can be used to find IP Addresses of the target.
Red Hawk can be used to look for error based SQL injections
Red Hawk can be used to find sensitive files
Red Hawk can be used to find information about Geo-IP lookup, Banner grabbing, DNS lookup, port scanning, sub-domain information, reverse IP using WHOIS lookup.
Red Hawk can be used to detect Content Management Systems (CMS) in use of a target web application,
Red Hawk can be used for WHOIS data collection, Geo-IP lookup, Banner grabbing, DNS lookup, port scanning, sub-domain information, reverse IP, and MX records lookup
Red Hawk is a complete package (TOOL) for information gathering .its free and Open Source.
Step 1: Turn on your Kali Linux operating system and Move to the Desktop using the following command.
cd Desktop
Step 2: Create a new directory on Desktop and name it redhawk.
mkdir redhawk
Step 3: Now move to redhawk directory.
cd redhawk
Step 4: Now within this directory you have to download the RedHawk tool, or You have to simply git clone from Github.
git clone https://github.com/Tuhinshubhra/RED_HAWK
Step 5: As you can see Now you have downloaded Redhawk from GitHub using the git clone command. Now you have to move on RED_HAWK directory using the following command.
cd RED_HAWK
Step 6: Now you are under RED_HAWK directory where you have to run the tool. Now to list out the content of the tool type following command
ls
Step 7: You can see many files here such as config.php, Dockerfile, LICENSE, rhawk.php, var.php these files are the main files of the tool.
Now run the tool type following command and press enter.
php rhawk.php
Step 8: Now you have to choose between HTTP and HTTPS.
Step 9: Now you will a screen like this is the screen of the tool after setting domain google.com.
Step 10: Now you can see scanning is completed.
Scanning is completed we have scanned google.com and we found IP address 172.217.166.238, and we found web server gws, similarly, we can run the tool again and again and can find out many vulnerabilities and options. So this is the full approach for scanning using RED HAWK.
Step 10: Now choose the options according to your requirements just like if you want to choose option 0 so type 0.
Now you can choose options from here according to your requirements. There are various options here such as whois lookup, subdomain scanner, crawler, geo-ip lookup, nmap port scan, mx-lookup, etc. choose options from here and stay connected with the internet while running the tool and you will get the desired result according to the option that you have chosen. For example, if you have chosen option 7. So type 7, and you will get all the subdomain of the domain that you have provided.
Cyber-security
Kali-Linux
Linux-Tools
Linux-Unix
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here.
tar command in Linux with examples
Conditional Statements | Shell Script
Tail command in Linux with examples
UDP Server-Client implementation in C
Docker - COPY Instruction
scp command in Linux with Examples
Cat command in Linux with examples
echo command in Linux with Examples
touch command in Linux with Examples
chown command in Linux with Examples | [
{
"code": null,
"e": 28,
"s": 0,
"text": "\n28 Mar, 2021"
},
{
"code": null,
"e": 916,
"s": 28,
"text": "Red Hawk is a free and open-source tool available on GitHub. Red Hawk is used to scanning websites for information gathering and finding vulnerabilities. Red Hawk is written in PHP. It uses PHP script to do reconnaissance. Red Hawk is so powerful that it can detect content management system while scanning, it can detect IP address, it can detect webserver record, it can detect Cloudflare information, and can detect robots.txt. Red Hawk can detect WordPress, Drupal, Joomla, and Magento CMS. Red Hawk looks for error-based SQL injections, WordPress sensitive files, and WordPress version-related vulnerabilities. RedHawk uses different modules for doing all the scannings. WHOIS data collection gives us information about Geo-IP lookup, Banner grabbing, DNS lookup, port scanning, sub-domain information, reverse IP, and MX records lookup. Overall RedHawk is a vulnerability Scanner."
},
{
"code": null,
"e": 965,
"s": 916,
"text": "Red Hawk can be used as a vulnerability Scanner."
},
{
"code": null,
"e": 1022,
"s": 965,
"text": "Red Hawk can be used to find IP Addresses of the target."
},
{
"code": null,
"e": 1082,
"s": 1022,
"text": "Red Hawk can be used to look for error based SQL injections"
},
{
"code": null,
"e": 1127,
"s": 1082,
"text": "Red Hawk can be used to find sensitive files"
},
{
"code": null,
"e": 1288,
"s": 1127,
"text": "Red Hawk can be used to find information about Geo-IP lookup, Banner grabbing, DNS lookup, port scanning, sub-domain information, reverse IP using WHOIS lookup."
},
{
"code": null,
"e": 1388,
"s": 1288,
"text": "Red Hawk can be used to detect Content Management Systems (CMS) in use of a target web application,"
},
{
"code": null,
"e": 1553,
"s": 1388,
"text": "Red Hawk can be used for WHOIS data collection, Geo-IP lookup, Banner grabbing, DNS lookup, port scanning, sub-domain information, reverse IP, and MX records lookup"
},
{
"code": null,
"e": 1644,
"s": 1553,
"text": "Red Hawk is a complete package (TOOL) for information gathering .its free and Open Source."
},
{
"code": null,
"e": 1746,
"s": 1644,
"text": "Step 1: Turn on your Kali Linux operating system and Move to the Desktop using the following command."
},
{
"code": null,
"e": 1757,
"s": 1746,
"text": "cd Desktop"
},
{
"code": null,
"e": 1820,
"s": 1757,
"text": "Step 2: Create a new directory on Desktop and name it redhawk."
},
{
"code": null,
"e": 1834,
"s": 1820,
"text": "mkdir redhawk"
},
{
"code": null,
"e": 1873,
"s": 1834,
"text": "Step 3: Now move to redhawk directory."
},
{
"code": null,
"e": 1884,
"s": 1873,
"text": "cd redhawk"
},
{
"code": null,
"e": 2002,
"s": 1884,
"text": "Step 4: Now within this directory you have to download the RedHawk tool, or You have to simply git clone from Github."
},
{
"code": null,
"e": 2053,
"s": 2002,
"text": "git clone https://github.com/Tuhinshubhra/RED_HAWK"
},
{
"code": null,
"e": 2221,
"s": 2053,
"text": "Step 5: As you can see Now you have downloaded Redhawk from GitHub using the git clone command. Now you have to move on RED_HAWK directory using the following command."
},
{
"code": null,
"e": 2233,
"s": 2221,
"text": "cd RED_HAWK"
},
{
"code": null,
"e": 2374,
"s": 2233,
"text": "Step 6: Now you are under RED_HAWK directory where you have to run the tool. Now to list out the content of the tool type following command "
},
{
"code": null,
"e": 2377,
"s": 2374,
"text": "ls"
},
{
"code": null,
"e": 2517,
"s": 2377,
"text": "Step 7: You can see many files here such as config.php, Dockerfile, LICENSE, rhawk.php, var.php these files are the main files of the tool."
},
{
"code": null,
"e": 2575,
"s": 2517,
"text": "Now run the tool type following command and press enter. "
},
{
"code": null,
"e": 2589,
"s": 2575,
"text": "php rhawk.php"
},
{
"code": null,
"e": 2644,
"s": 2589,
"text": "Step 8: Now you have to choose between HTTP and HTTPS."
},
{
"code": null,
"e": 2743,
"s": 2644,
"text": "Step 9: Now you will a screen like this is the screen of the tool after setting domain google.com."
},
{
"code": null,
"e": 2791,
"s": 2743,
"text": "Step 10: Now you can see scanning is completed."
},
{
"code": null,
"e": 3066,
"s": 2791,
"text": "Scanning is completed we have scanned google.com and we found IP address 172.217.166.238, and we found web server gws, similarly, we can run the tool again and again and can find out many vulnerabilities and options. So this is the full approach for scanning using RED HAWK."
},
{
"code": null,
"e": 3181,
"s": 3066,
"text": "Step 10: Now choose the options according to your requirements just like if you want to choose option 0 so type 0."
},
{
"code": null,
"e": 3672,
"s": 3181,
"text": "Now you can choose options from here according to your requirements. There are various options here such as whois lookup, subdomain scanner, crawler, geo-ip lookup, nmap port scan, mx-lookup, etc. choose options from here and stay connected with the internet while running the tool and you will get the desired result according to the option that you have chosen. For example, if you have chosen option 7. So type 7, and you will get all the subdomain of the domain that you have provided. "
},
{
"code": null,
"e": 3687,
"s": 3672,
"text": "Cyber-security"
},
{
"code": null,
"e": 3698,
"s": 3687,
"text": "Kali-Linux"
},
{
"code": null,
"e": 3710,
"s": 3698,
"text": "Linux-Tools"
},
{
"code": null,
"e": 3721,
"s": 3710,
"text": "Linux-Unix"
},
{
"code": null,
"e": 3819,
"s": 3721,
"text": "Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here."
},
{
"code": null,
"e": 3854,
"s": 3819,
"text": "tar command in Linux with examples"
},
{
"code": null,
"e": 3892,
"s": 3854,
"text": "Conditional Statements | Shell Script"
},
{
"code": null,
"e": 3928,
"s": 3892,
"text": "Tail command in Linux with examples"
},
{
"code": null,
"e": 3966,
"s": 3928,
"text": "UDP Server-Client implementation in C"
},
{
"code": null,
"e": 3992,
"s": 3966,
"text": "Docker - COPY Instruction"
},
{
"code": null,
"e": 4027,
"s": 3992,
"text": "scp command in Linux with Examples"
},
{
"code": null,
"e": 4062,
"s": 4027,
"text": "Cat command in Linux with examples"
},
{
"code": null,
"e": 4098,
"s": 4062,
"text": "echo command in Linux with Examples"
},
{
"code": null,
"e": 4135,
"s": 4098,
"text": "touch command in Linux with Examples"
}
] |
Python Pyforest Library - GeeksforGeeks | 19 Feb, 2020
Sometimes, it happens that we spent a huge amount of time importing some common libraries like NumPy, pandas, matplotlib, seaborn, nltk and many more. To remove this headache of importing such libraries manually, we have pyforest library.
It is that library which helps you to work directly without importing other libraries separately.It itself adds up some of the highly usable libraries used in DataScience while we are using it.
Functions of pyforest :
active_imports(): It will return all the libraries which have been used in the program.
lazy_imports(): It will return all the libraries available in pyforest.
Installing Library:
pip install pyforest
Let’s see the usage of pyforest with various libraries.
Numpy: NumPy is a general-purpose array-processing package. It provides a high-performance multidimensional array object, and tools for working with these arrays.Example:# here we have not import # 'numpy as np' by explicitly a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) print(a)Output:[[1 2 3]
[4 5 6]
[7 8 9]]
Note: For more information, refer to NumPy in Python
Example:
# here we have not import # 'numpy as np' by explicitly a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) print(a)
Output:
[[1 2 3]
[4 5 6]
[7 8 9]]
Note: For more information, refer to NumPy in Python
Pandas: Pandas DataFrame is two-dimensional size-mutable, potentially heterogeneous tabular data structure with labeled axes (rows and columns). A Data frame is a two-dimensional data structure, i.e., data is aligned in a tabular fashion in rows and columns. Pandas DataFrame consists of three principal components, the data, rows, and columns.Example:d = {'A':[1, 2, 3], 'B':[4, 5, 6], 'C':[7, 8, 9]} # here we have not import# 'pandas as pd' by ourself .df = pd.DataFrame(d) print(df)Output: A B C
0 1 4 7
1 2 5 8
2 3 6 9
Note: For more information, refer to Python | Pandas DataFrame
Example:
d = {'A':[1, 2, 3], 'B':[4, 5, 6], 'C':[7, 8, 9]} # here we have not import# 'pandas as pd' by ourself .df = pd.DataFrame(d) print(df)
Output:
A B C
0 1 4 7
1 2 5 8
2 3 6 9
Note: For more information, refer to Python | Pandas DataFrame
NLTK: The NLTK module is a massive tool kit, aimed at helping you with the entire Natural Language Processing (NLP) methodology.Example:# here we do not import# ' Nltk library' by ourself# but only the class of nltk .from nltk.tokenize import word_tokenize data = "All apples are red in colour" print(word_tokenize(data))Output:['All', 'apples', 'are', 'red', 'in', 'colour']Note: For more information, refer to Tokenize text using NLTK in python
Example:
# here we do not import# ' Nltk library' by ourself# but only the class of nltk .from nltk.tokenize import word_tokenize data = "All apples are red in colour" print(word_tokenize(data))
Output:
['All', 'apples', 'are', 'red', 'in', 'colour']
Note: For more information, refer to Tokenize text using NLTK in python
Matplotlib: Matplotlib is an amazing visualization library in Python for 2D plots of arrays. Matplotlib is a multi-platform data visualization library built on NumPy arrays and designed to work with the broader SciPy stack.Example:# here we have not imported # 'matplotlib.pyplot as plt' by ourself. x = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]y = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] plt.plot(x, y) plt.show()Output:Note: For more information, refer to Introduction to Matplotlib
Example:
# here we have not imported # 'matplotlib.pyplot as plt' by ourself. x = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]y = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] plt.plot(x, y) plt.show()
Output:
Note: For more information, refer to Introduction to Matplotlib
python-modules
Technical Scripter 2019
Python
Technical Scripter
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here.
Python Dictionary
How to Install PIP on Windows ?
Enumerate() in Python
Different ways to create Pandas Dataframe
Reading and Writing to text files in Python
*args and **kwargs in Python
Create a Pandas DataFrame from Lists
How To Convert Python Dictionary To JSON?
Convert integer to string in Python
Check if element exists in list in Python | [
{
"code": null,
"e": 26213,
"s": 26185,
"text": "\n19 Feb, 2020"
},
{
"code": null,
"e": 26452,
"s": 26213,
"text": "Sometimes, it happens that we spent a huge amount of time importing some common libraries like NumPy, pandas, matplotlib, seaborn, nltk and many more. To remove this headache of importing such libraries manually, we have pyforest library."
},
{
"code": null,
"e": 26646,
"s": 26452,
"text": "It is that library which helps you to work directly without importing other libraries separately.It itself adds up some of the highly usable libraries used in DataScience while we are using it."
},
{
"code": null,
"e": 26670,
"s": 26646,
"text": "Functions of pyforest :"
},
{
"code": null,
"e": 26758,
"s": 26670,
"text": "active_imports(): It will return all the libraries which have been used in the program."
},
{
"code": null,
"e": 26830,
"s": 26758,
"text": "lazy_imports(): It will return all the libraries available in pyforest."
},
{
"code": null,
"e": 26850,
"s": 26830,
"text": "Installing Library:"
},
{
"code": null,
"e": 26871,
"s": 26850,
"text": "pip install pyforest"
},
{
"code": null,
"e": 26927,
"s": 26871,
"text": "Let’s see the usage of pyforest with various libraries."
},
{
"code": null,
"e": 27300,
"s": 26927,
"text": "Numpy: NumPy is a general-purpose array-processing package. It provides a high-performance multidimensional array object, and tools for working with these arrays.Example:# here we have not import # 'numpy as np' by explicitly a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) print(a)Output:[[1 2 3]\n [4 5 6]\n [7 8 9]]\nNote: For more information, refer to NumPy in Python"
},
{
"code": null,
"e": 27309,
"s": 27300,
"text": "Example:"
},
{
"code": "# here we have not import # 'numpy as np' by explicitly a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) print(a)",
"e": 27425,
"s": 27309,
"text": null
},
{
"code": null,
"e": 27433,
"s": 27425,
"text": "Output:"
},
{
"code": null,
"e": 27462,
"s": 27433,
"text": "[[1 2 3]\n [4 5 6]\n [7 8 9]]\n"
},
{
"code": null,
"e": 27515,
"s": 27462,
"text": "Note: For more information, refer to NumPy in Python"
},
{
"code": null,
"e": 28119,
"s": 27515,
"text": "Pandas: Pandas DataFrame is two-dimensional size-mutable, potentially heterogeneous tabular data structure with labeled axes (rows and columns). A Data frame is a two-dimensional data structure, i.e., data is aligned in a tabular fashion in rows and columns. Pandas DataFrame consists of three principal components, the data, rows, and columns.Example:d = {'A':[1, 2, 3], 'B':[4, 5, 6], 'C':[7, 8, 9]} # here we have not import# 'pandas as pd' by ourself .df = pd.DataFrame(d) print(df)Output: A B C\n0 1 4 7\n1 2 5 8\n2 3 6 9\nNote: For more information, refer to Python | Pandas DataFrame"
},
{
"code": null,
"e": 28128,
"s": 28119,
"text": "Example:"
},
{
"code": "d = {'A':[1, 2, 3], 'B':[4, 5, 6], 'C':[7, 8, 9]} # here we have not import# 'pandas as pd' by ourself .df = pd.DataFrame(d) print(df)",
"e": 28267,
"s": 28128,
"text": null
},
{
"code": null,
"e": 28275,
"s": 28267,
"text": "Output:"
},
{
"code": null,
"e": 28320,
"s": 28275,
"text": " A B C\n0 1 4 7\n1 2 5 8\n2 3 6 9\n"
},
{
"code": null,
"e": 28383,
"s": 28320,
"text": "Note: For more information, refer to Python | Pandas DataFrame"
},
{
"code": null,
"e": 28832,
"s": 28383,
"text": "NLTK: The NLTK module is a massive tool kit, aimed at helping you with the entire Natural Language Processing (NLP) methodology.Example:# here we do not import# ' Nltk library' by ourself# but only the class of nltk .from nltk.tokenize import word_tokenize data = \"All apples are red in colour\" print(word_tokenize(data))Output:['All', 'apples', 'are', 'red', 'in', 'colour']Note: For more information, refer to Tokenize text using NLTK in python"
},
{
"code": null,
"e": 28841,
"s": 28832,
"text": "Example:"
},
{
"code": "# here we do not import# ' Nltk library' by ourself# but only the class of nltk .from nltk.tokenize import word_tokenize data = \"All apples are red in colour\" print(word_tokenize(data))",
"e": 29029,
"s": 28841,
"text": null
},
{
"code": null,
"e": 29037,
"s": 29029,
"text": "Output:"
},
{
"code": null,
"e": 29085,
"s": 29037,
"text": "['All', 'apples', 'are', 'red', 'in', 'colour']"
},
{
"code": null,
"e": 29157,
"s": 29085,
"text": "Note: For more information, refer to Tokenize text using NLTK in python"
},
{
"code": null,
"e": 29628,
"s": 29157,
"text": "Matplotlib: Matplotlib is an amazing visualization library in Python for 2D plots of arrays. Matplotlib is a multi-platform data visualization library built on NumPy arrays and designed to work with the broader SciPy stack.Example:# here we have not imported # 'matplotlib.pyplot as plt' by ourself. x = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]y = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] plt.plot(x, y) plt.show()Output:Note: For more information, refer to Introduction to Matplotlib"
},
{
"code": null,
"e": 29637,
"s": 29628,
"text": "Example:"
},
{
"code": "# here we have not imported # 'matplotlib.pyplot as plt' by ourself. x = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]y = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] plt.plot(x, y) plt.show()",
"e": 29807,
"s": 29637,
"text": null
},
{
"code": null,
"e": 29815,
"s": 29807,
"text": "Output:"
},
{
"code": null,
"e": 29879,
"s": 29815,
"text": "Note: For more information, refer to Introduction to Matplotlib"
},
{
"code": null,
"e": 29894,
"s": 29879,
"text": "python-modules"
},
{
"code": null,
"e": 29918,
"s": 29894,
"text": "Technical Scripter 2019"
},
{
"code": null,
"e": 29925,
"s": 29918,
"text": "Python"
},
{
"code": null,
"e": 29944,
"s": 29925,
"text": "Technical Scripter"
},
{
"code": null,
"e": 30042,
"s": 29944,
"text": "Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here."
},
{
"code": null,
"e": 30060,
"s": 30042,
"text": "Python Dictionary"
},
{
"code": null,
"e": 30092,
"s": 30060,
"text": "How to Install PIP on Windows ?"
},
{
"code": null,
"e": 30114,
"s": 30092,
"text": "Enumerate() in Python"
},
{
"code": null,
"e": 30156,
"s": 30114,
"text": "Different ways to create Pandas Dataframe"
},
{
"code": null,
"e": 30200,
"s": 30156,
"text": "Reading and Writing to text files in Python"
},
{
"code": null,
"e": 30229,
"s": 30200,
"text": "*args and **kwargs in Python"
},
{
"code": null,
"e": 30266,
"s": 30229,
"text": "Create a Pandas DataFrame from Lists"
},
{
"code": null,
"e": 30308,
"s": 30266,
"text": "How To Convert Python Dictionary To JSON?"
},
{
"code": null,
"e": 30344,
"s": 30308,
"text": "Convert integer to string in Python"
}
] |
Data Viz using Python and Tableau: from API call to Hosted Dashboard | by Richard Peterson | Towards Data Science | Building an infographic often starts while you are first exploring the data. Matplotlib, Seaborn, and ggplot2 are the classic programming charts, while Plotly has also exploded in popularity. But for me, nothing matches the clean professional aesthetic of Tableau. In this article, we will leave behind the inline Jupyter Notebook charts and learn how to create hosted interactive charts using Tableau Public.
This article is comprised of three parts:
Research — what is your topic and how will you extract the data? In this article, we will check out the Bureau of Labor Statistics (BLS) API. We are able to retrieve data using an HTTP POST request with parameters including the query IDs for each BLS query. The Data we will request is Unemployment by State.Data Cleaning — Using python, we will call the BLS API and clean the data using Pandas into a tidy vertical format. This can be done with a Jupyter Notebook or Python Script. Data will be saved to a CSV file and SQLite database.Tableau — Create a choropleth (geographic) map and a tree map, and publish the charts to Tableau Public. We will be able to share the chart with an embedded link.
Research — what is your topic and how will you extract the data? In this article, we will check out the Bureau of Labor Statistics (BLS) API. We are able to retrieve data using an HTTP POST request with parameters including the query IDs for each BLS query. The Data we will request is Unemployment by State.
Data Cleaning — Using python, we will call the BLS API and clean the data using Pandas into a tidy vertical format. This can be done with a Jupyter Notebook or Python Script. Data will be saved to a CSV file and SQLite database.
Tableau — Create a choropleth (geographic) map and a tree map, and publish the charts to Tableau Public. We will be able to share the chart with an embedded link.
You can follow along with the querying and cleaning data section from my GitHub repo! This is a companion article to an analysis on unemployment from the COVID-19 pandemic.
github.com
The Bureau of Labor Statistics API is a REST API that serves data based on unique query IDs. BLS query IDs are based on a number of factors, such as state and industry. To find the query ID, you will need to browse to BLS Databases, Tables & Calculators and locate the category related to your data. Unemployment by State is found under Local Area Unemployment Statistics — One-Screen Search. You will notice that each query has a unique ID. If you can determine the pattern, you can programmatically generate these IDs rather than copying and pasting.
In the GitHub repo, there is a module named blspandas.py which has wrapper functions for cleaning and performing the HTTP POST request that retrieves the data (Credit: BD Economics).The below script calls these functions to create a list of query IDs and performs the HTTP request, and saves the results as a pandas dataframe.
A key part of the blspandas.py module is the clean_bls_data(combined_queries) function. Before we ran df = blspandas.clean_bls_data(combined_queries) our data looked like this:
We need this data in a tidy vertical format like State | Date | Metric. Pandas melt is perfect for this transformation.
After running df = blspandas.clean_bls_data(combined_queries), the data looks like this:
For this, let’s think about what we ultimately need. Our goal here is to create a choropleth map and a tree map showing the change in unemployment from January 2020 to June 2020. This will describe the effects of the COVID-19 pandemic on the labor market, and show which states have not bounced back.
What we need to do is find the difference in unemployment between two dates for every state. We will do this by locating all the Date rows that contain our two dates, and subtract January unemployment from June Unemployment.
The resulting chg_emp dataframe looks like this:
This is all we need to create our Tableau Visuals! Simply write the chg_emp dataframe to a CSV file by running:
chg_emp.to_csv('Change in unemployment by state.csv', index=False)
At this point, either sign up for Tableau Public, or purchase the software. As a student, I was able to sign up for free and received the full desktop version.
Load the data by importing your CSV file into your workbook. Click on new worksheet and configure it like the below:
For the Tree Map, we will display the same data, but in a way that allows for greater detail, including the state’s name and change in unemployment. Under Marks, you will need:
Label for StateColor for Percent UnemployedLabel for Percent Unemployed
Label for State
Color for Percent Unemployed
Label for Percent Unemployed
You can create a dashboard by clicking on the ‘Dashboard 1’ and dragging over your two worksheets. Here I have arranged the Choropleth on top of the Tree Map.
Finally, we will push our Tableau workbook to Tableau Public.
To do this, we need to Extract our CSV data source. This will allow for the workbook to be uploaded as a single, bundled file to Tableau Public.
Once the extract is completed, we can push our workbook to Tableau Public.
After the upload to Tableau, it will open in your browser window. At this point, all you need to share your dashboard is to click the share button in the bottom right and send the link to your friends! Check out my version of the hosted dashboard here!
Embedded Dashboard
You have a couple of options:
Embed script — this can be embedded into html to serve a live version of your dashboard from Tableau Public. This article demonstrates how live dashboards can be embedded into a WordPress blog post.Unfortunately, you cannot embed the live dashboard in a Medium article.Another good option to check out is embedding the dashboard on a simple python web server.
Embed script — this can be embedded into html to serve a live version of your dashboard from Tableau Public. This article demonstrates how live dashboards can be embedded into a WordPress blog post.
Unfortunately, you cannot embed the live dashboard in a Medium article.
Another good option to check out is embedding the dashboard on a simple python web server.
Thanks for reading, and I hope you find the python+Tableau stack as compelling as I do! | [
{
"code": null,
"e": 581,
"s": 171,
"text": "Building an infographic often starts while you are first exploring the data. Matplotlib, Seaborn, and ggplot2 are the classic programming charts, while Plotly has also exploded in popularity. But for me, nothing matches the clean professional aesthetic of Tableau. In this article, we will leave behind the inline Jupyter Notebook charts and learn how to create hosted interactive charts using Tableau Public."
},
{
"code": null,
"e": 623,
"s": 581,
"text": "This article is comprised of three parts:"
},
{
"code": null,
"e": 1322,
"s": 623,
"text": "Research — what is your topic and how will you extract the data? In this article, we will check out the Bureau of Labor Statistics (BLS) API. We are able to retrieve data using an HTTP POST request with parameters including the query IDs for each BLS query. The Data we will request is Unemployment by State.Data Cleaning — Using python, we will call the BLS API and clean the data using Pandas into a tidy vertical format. This can be done with a Jupyter Notebook or Python Script. Data will be saved to a CSV file and SQLite database.Tableau — Create a choropleth (geographic) map and a tree map, and publish the charts to Tableau Public. We will be able to share the chart with an embedded link."
},
{
"code": null,
"e": 1631,
"s": 1322,
"text": "Research — what is your topic and how will you extract the data? In this article, we will check out the Bureau of Labor Statistics (BLS) API. We are able to retrieve data using an HTTP POST request with parameters including the query IDs for each BLS query. The Data we will request is Unemployment by State."
},
{
"code": null,
"e": 1860,
"s": 1631,
"text": "Data Cleaning — Using python, we will call the BLS API and clean the data using Pandas into a tidy vertical format. This can be done with a Jupyter Notebook or Python Script. Data will be saved to a CSV file and SQLite database."
},
{
"code": null,
"e": 2023,
"s": 1860,
"text": "Tableau — Create a choropleth (geographic) map and a tree map, and publish the charts to Tableau Public. We will be able to share the chart with an embedded link."
},
{
"code": null,
"e": 2196,
"s": 2023,
"text": "You can follow along with the querying and cleaning data section from my GitHub repo! This is a companion article to an analysis on unemployment from the COVID-19 pandemic."
},
{
"code": null,
"e": 2207,
"s": 2196,
"text": "github.com"
},
{
"code": null,
"e": 2760,
"s": 2207,
"text": "The Bureau of Labor Statistics API is a REST API that serves data based on unique query IDs. BLS query IDs are based on a number of factors, such as state and industry. To find the query ID, you will need to browse to BLS Databases, Tables & Calculators and locate the category related to your data. Unemployment by State is found under Local Area Unemployment Statistics — One-Screen Search. You will notice that each query has a unique ID. If you can determine the pattern, you can programmatically generate these IDs rather than copying and pasting."
},
{
"code": null,
"e": 3087,
"s": 2760,
"text": "In the GitHub repo, there is a module named blspandas.py which has wrapper functions for cleaning and performing the HTTP POST request that retrieves the data (Credit: BD Economics).The below script calls these functions to create a list of query IDs and performs the HTTP request, and saves the results as a pandas dataframe."
},
{
"code": null,
"e": 3264,
"s": 3087,
"text": "A key part of the blspandas.py module is the clean_bls_data(combined_queries) function. Before we ran df = blspandas.clean_bls_data(combined_queries) our data looked like this:"
},
{
"code": null,
"e": 3384,
"s": 3264,
"text": "We need this data in a tidy vertical format like State | Date | Metric. Pandas melt is perfect for this transformation."
},
{
"code": null,
"e": 3473,
"s": 3384,
"text": "After running df = blspandas.clean_bls_data(combined_queries), the data looks like this:"
},
{
"code": null,
"e": 3774,
"s": 3473,
"text": "For this, let’s think about what we ultimately need. Our goal here is to create a choropleth map and a tree map showing the change in unemployment from January 2020 to June 2020. This will describe the effects of the COVID-19 pandemic on the labor market, and show which states have not bounced back."
},
{
"code": null,
"e": 3999,
"s": 3774,
"text": "What we need to do is find the difference in unemployment between two dates for every state. We will do this by locating all the Date rows that contain our two dates, and subtract January unemployment from June Unemployment."
},
{
"code": null,
"e": 4048,
"s": 3999,
"text": "The resulting chg_emp dataframe looks like this:"
},
{
"code": null,
"e": 4160,
"s": 4048,
"text": "This is all we need to create our Tableau Visuals! Simply write the chg_emp dataframe to a CSV file by running:"
},
{
"code": null,
"e": 4227,
"s": 4160,
"text": "chg_emp.to_csv('Change in unemployment by state.csv', index=False)"
},
{
"code": null,
"e": 4387,
"s": 4227,
"text": "At this point, either sign up for Tableau Public, or purchase the software. As a student, I was able to sign up for free and received the full desktop version."
},
{
"code": null,
"e": 4504,
"s": 4387,
"text": "Load the data by importing your CSV file into your workbook. Click on new worksheet and configure it like the below:"
},
{
"code": null,
"e": 4681,
"s": 4504,
"text": "For the Tree Map, we will display the same data, but in a way that allows for greater detail, including the state’s name and change in unemployment. Under Marks, you will need:"
},
{
"code": null,
"e": 4753,
"s": 4681,
"text": "Label for StateColor for Percent UnemployedLabel for Percent Unemployed"
},
{
"code": null,
"e": 4769,
"s": 4753,
"text": "Label for State"
},
{
"code": null,
"e": 4798,
"s": 4769,
"text": "Color for Percent Unemployed"
},
{
"code": null,
"e": 4827,
"s": 4798,
"text": "Label for Percent Unemployed"
},
{
"code": null,
"e": 4986,
"s": 4827,
"text": "You can create a dashboard by clicking on the ‘Dashboard 1’ and dragging over your two worksheets. Here I have arranged the Choropleth on top of the Tree Map."
},
{
"code": null,
"e": 5048,
"s": 4986,
"text": "Finally, we will push our Tableau workbook to Tableau Public."
},
{
"code": null,
"e": 5193,
"s": 5048,
"text": "To do this, we need to Extract our CSV data source. This will allow for the workbook to be uploaded as a single, bundled file to Tableau Public."
},
{
"code": null,
"e": 5268,
"s": 5193,
"text": "Once the extract is completed, we can push our workbook to Tableau Public."
},
{
"code": null,
"e": 5521,
"s": 5268,
"text": "After the upload to Tableau, it will open in your browser window. At this point, all you need to share your dashboard is to click the share button in the bottom right and send the link to your friends! Check out my version of the hosted dashboard here!"
},
{
"code": null,
"e": 5540,
"s": 5521,
"text": "Embedded Dashboard"
},
{
"code": null,
"e": 5570,
"s": 5540,
"text": "You have a couple of options:"
},
{
"code": null,
"e": 5930,
"s": 5570,
"text": "Embed script — this can be embedded into html to serve a live version of your dashboard from Tableau Public. This article demonstrates how live dashboards can be embedded into a WordPress blog post.Unfortunately, you cannot embed the live dashboard in a Medium article.Another good option to check out is embedding the dashboard on a simple python web server."
},
{
"code": null,
"e": 6129,
"s": 5930,
"text": "Embed script — this can be embedded into html to serve a live version of your dashboard from Tableau Public. This article demonstrates how live dashboards can be embedded into a WordPress blog post."
},
{
"code": null,
"e": 6201,
"s": 6129,
"text": "Unfortunately, you cannot embed the live dashboard in a Medium article."
},
{
"code": null,
"e": 6292,
"s": 6201,
"text": "Another good option to check out is embedding the dashboard on a simple python web server."
}
] |
Kotlin - Interface | In this chapter, we will learn about the interface in Kotlin. In Kotlin, the interface works exactly similar to Java 8, which means they can contain method implementation as well as abstract methods declaration. An interface can be implemented by a class in order to use its defined functionality. We have already introduced an example with an interface in Chapter 6 - section “anonymous inner class”. In this chapter, we will learn more about it. The keyword “interface” is used to define an interface in Kotlin as shown in the following piece of code.
interface ExampleInterface {
var myVar: String // abstract property
fun absMethod() // abstract method
fun sayHello() = "Hello there" // method with default implementation
}
In the above example, we have created one interface named as “ExampleInterface” and inside that we have a couple of abstract properties and methods all together. Look at the function named “sayHello()”, which is an implemented method.
In the following example, we will be implementing the above interface in a class.
interface ExampleInterface {
var myVar: Int // abstract property
fun absMethod():String // abstract method
fun hello() {
println("Hello there, Welcome to TutorialsPoint.Com!")
}
}
class InterfaceImp : ExampleInterface {
override var myVar: Int = 25
override fun absMethod() = "Happy Learning "
}
fun main(args: Array<String>) {
val obj = InterfaceImp()
println("My Variable Value is = ${obj.myVar}")
print("Calling hello(): ")
obj.hello()
print("Message from the Website-- ")
println(obj.absMethod())
}
The above piece of code will yield the following output in the browser.
My Variable Value is = 25
Calling hello(): Hello there, Welcome to TutorialsPoint.Com!
Message from the Website-- Happy Learning
As mentioned earlier, Kotlin doesn’t support multiple inheritances, however, the same thing can be achieved by implementing more than two interfaces at a time.
In the following example, we will create two interfaces and later we will implement both the interfaces into a class.
interface A {
fun printMe() {
println(" method of interface A")
}
}
interface B {
fun printMeToo() {
println("I am another Method from interface B")
}
}
// implements two interfaces A and B
class multipleInterfaceExample: A, B
fun main(args: Array<String>) {
val obj = multipleInterfaceExample()
obj.printMe()
obj.printMeToo()
}
In the above example, we have created two sample interfaces A, B and in the class named “multipleInterfaceExample” we have implemented two interfaces declared earlier. The above piece of code will yield the following output in the browser.
method of interface A
I am another Method from interface B
68 Lectures
4.5 hours
Arnab Chakraborty
71 Lectures
5.5 hours
Frahaan Hussain
18 Lectures
1.5 hours
Mahmoud Ramadan
49 Lectures
6 hours
Catalin Stefan
49 Lectures
2.5 hours
Skillbakerystudios
22 Lectures
1 hours
CLEMENT OCHIENG
Print
Add Notes
Bookmark this page | [
{
"code": null,
"e": 2979,
"s": 2425,
"text": "In this chapter, we will learn about the interface in Kotlin. In Kotlin, the interface works exactly similar to Java 8, which means they can contain method implementation as well as abstract methods declaration. An interface can be implemented by a class in order to use its defined functionality. We have already introduced an example with an interface in Chapter 6 - section “anonymous inner class”. In this chapter, we will learn more about it. The keyword “interface” is used to define an interface in Kotlin as shown in the following piece of code."
},
{
"code": null,
"e": 3172,
"s": 2979,
"text": "interface ExampleInterface {\n var myVar: String // abstract property\n fun absMethod() // abstract method\n fun sayHello() = \"Hello there\" // method with default implementation\n}"
},
{
"code": null,
"e": 3407,
"s": 3172,
"text": "In the above example, we have created one interface named as “ExampleInterface” and inside that we have a couple of abstract properties and methods all together. Look at the function named “sayHello()”, which is an implemented method."
},
{
"code": null,
"e": 3489,
"s": 3407,
"text": "In the following example, we will be implementing the above interface in a class."
},
{
"code": null,
"e": 4057,
"s": 3489,
"text": "interface ExampleInterface {\n var myVar: Int // abstract property\n fun absMethod():String // abstract method\n \n fun hello() {\n println(\"Hello there, Welcome to TutorialsPoint.Com!\")\n }\n}\nclass InterfaceImp : ExampleInterface {\n override var myVar: Int = 25\n override fun absMethod() = \"Happy Learning \"\n}\nfun main(args: Array<String>) {\n val obj = InterfaceImp()\n println(\"My Variable Value is = ${obj.myVar}\")\n print(\"Calling hello(): \")\n obj.hello()\n \n print(\"Message from the Website-- \")\n println(obj.absMethod())\n}"
},
{
"code": null,
"e": 4129,
"s": 4057,
"text": "The above piece of code will yield the following output in the browser."
},
{
"code": null,
"e": 4260,
"s": 4129,
"text": "My Variable Value is = 25\nCalling hello(): Hello there, Welcome to TutorialsPoint.Com!\nMessage from the Website-- Happy Learning \n"
},
{
"code": null,
"e": 4420,
"s": 4260,
"text": "As mentioned earlier, Kotlin doesn’t support multiple inheritances, however, the same thing can be achieved by implementing more than two interfaces at a time."
},
{
"code": null,
"e": 4538,
"s": 4420,
"text": "In the following example, we will create two interfaces and later we will implement both the interfaces into a class."
},
{
"code": null,
"e": 4903,
"s": 4538,
"text": "interface A {\n fun printMe() {\n println(\" method of interface A\")\n }\n}\ninterface B {\n fun printMeToo() {\n println(\"I am another Method from interface B\")\n }\n}\n\n// implements two interfaces A and B\nclass multipleInterfaceExample: A, B\n\nfun main(args: Array<String>) {\n val obj = multipleInterfaceExample()\n obj.printMe()\n obj.printMeToo()\n}"
},
{
"code": null,
"e": 5143,
"s": 4903,
"text": "In the above example, we have created two sample interfaces A, B and in the class named “multipleInterfaceExample” we have implemented two interfaces declared earlier. The above piece of code will yield the following output in the browser."
},
{
"code": null,
"e": 5203,
"s": 5143,
"text": "method of interface A\nI am another Method from interface B\n"
},
{
"code": null,
"e": 5238,
"s": 5203,
"text": "\n 68 Lectures \n 4.5 hours \n"
},
{
"code": null,
"e": 5257,
"s": 5238,
"text": " Arnab Chakraborty"
},
{
"code": null,
"e": 5292,
"s": 5257,
"text": "\n 71 Lectures \n 5.5 hours \n"
},
{
"code": null,
"e": 5309,
"s": 5292,
"text": " Frahaan Hussain"
},
{
"code": null,
"e": 5344,
"s": 5309,
"text": "\n 18 Lectures \n 1.5 hours \n"
},
{
"code": null,
"e": 5361,
"s": 5344,
"text": " Mahmoud Ramadan"
},
{
"code": null,
"e": 5394,
"s": 5361,
"text": "\n 49 Lectures \n 6 hours \n"
},
{
"code": null,
"e": 5410,
"s": 5394,
"text": " Catalin Stefan"
},
{
"code": null,
"e": 5445,
"s": 5410,
"text": "\n 49 Lectures \n 2.5 hours \n"
},
{
"code": null,
"e": 5465,
"s": 5445,
"text": " Skillbakerystudios"
},
{
"code": null,
"e": 5498,
"s": 5465,
"text": "\n 22 Lectures \n 1 hours \n"
},
{
"code": null,
"e": 5515,
"s": 5498,
"text": " CLEMENT OCHIENG"
},
{
"code": null,
"e": 5522,
"s": 5515,
"text": " Print"
},
{
"code": null,
"e": 5533,
"s": 5522,
"text": " Add Notes"
}
] |
Java Program to add integers and check for overflow | To check for Integer overflow, we need to check the Integer.MAX_VALUE with the added integers result, Here, Integer.MAX_VALUE is the maximum value of an integer in Java.
Let us see an example wherein integers are added and if the sum is more than the Integer.MAX_VALUE, then an exception is thrown.
Live Demo
public class Demo {
public static void main(String[] args) {
int a = 9897988;
int b = 8798798;
System.out.println("Value1: "+a);
System.out.println("Value2: "+b);
long sum = (long)a + (long)b;
if (sum > Integer.MAX_VALUE) {
throw new ArithmeticException("Integer Overflow!");
}
// displaying sum
System.out.println("Sum: "+(int)sum);
}
}
Value1: 9897988
Value2: 8798798
Sum: 18696786
In the above example, we have taken the following two integers.
int val1 = 9897988;
int val2 = 8798798;
Now we will cast and add them to a long.
long sum = (long)val1 + (long)val2;
If the result is more than the maximum value, then an exception is thrown.
If (sum > Integer.MAX_VALUE) {
throw new ArithmeticException("Overflow!");
} | [
{
"code": null,
"e": 1232,
"s": 1062,
"text": "To check for Integer overflow, we need to check the Integer.MAX_VALUE with the added integers result, Here, Integer.MAX_VALUE is the maximum value of an integer in Java."
},
{
"code": null,
"e": 1361,
"s": 1232,
"text": "Let us see an example wherein integers are added and if the sum is more than the Integer.MAX_VALUE, then an exception is thrown."
},
{
"code": null,
"e": 1372,
"s": 1361,
"text": " Live Demo"
},
{
"code": null,
"e": 1779,
"s": 1372,
"text": "public class Demo {\n public static void main(String[] args) {\n int a = 9897988;\n int b = 8798798;\n System.out.println(\"Value1: \"+a);\n System.out.println(\"Value2: \"+b);\n long sum = (long)a + (long)b;\n if (sum > Integer.MAX_VALUE) {\n throw new ArithmeticException(\"Integer Overflow!\");\n }\n // displaying sum\n System.out.println(\"Sum: \"+(int)sum);\n }\n}"
},
{
"code": null,
"e": 1825,
"s": 1779,
"text": "Value1: 9897988\nValue2: 8798798\nSum: 18696786"
},
{
"code": null,
"e": 1889,
"s": 1825,
"text": "In the above example, we have taken the following two integers."
},
{
"code": null,
"e": 1929,
"s": 1889,
"text": "int val1 = 9897988;\nint val2 = 8798798;"
},
{
"code": null,
"e": 1970,
"s": 1929,
"text": "Now we will cast and add them to a long."
},
{
"code": null,
"e": 2006,
"s": 1970,
"text": "long sum = (long)val1 + (long)val2;"
},
{
"code": null,
"e": 2081,
"s": 2006,
"text": "If the result is more than the maximum value, then an exception is thrown."
},
{
"code": null,
"e": 2161,
"s": 2081,
"text": "If (sum > Integer.MAX_VALUE) {\n throw new ArithmeticException(\"Overflow!\");\n}"
}
] |
SQL Tryit Editor v1.6 | ALTER TABLE Employees
ALTER COLUMN BirthDate year;
Edit the SQL Statement, and click "Run SQL" to see the result.
This SQL-Statement is not supported in the WebSQL Database.
The example still works, because it uses a modified version of SQL.
Your browser does not support WebSQL.
Your are now using a light-version of the Try-SQL Editor, with a read-only Database.
If you switch to a browser with WebSQL support, you can try any SQL statement, and play with the Database as much as you like. The Database can also be restored at any time.
Our Try-SQL Editor uses WebSQL to demonstrate SQL.
A Database-object is created in your browser, for testing purposes.
You can try any SQL statement, and play with the Database as much as you like. The Database can be restored at any time, simply by clicking the "Restore Database" button.
WebSQL stores a Database locally, on the user's computer. Each user gets their own Database object.
WebSQL is supported in Chrome, Safari, Opera, and Edge(79).
If you use another browser you will still be able to use our Try SQL Editor, but a different version, using a server-based ASP application, with a read-only Access Database, where users are not allowed to make any changes to the data. | [
{
"code": null,
"e": 22,
"s": 0,
"text": "ALTER TABLE Employees"
},
{
"code": null,
"e": 51,
"s": 22,
"text": "ALTER COLUMN BirthDate year;"
},
{
"code": null,
"e": 53,
"s": 51,
"text": ""
},
{
"code": null,
"e": 116,
"s": 53,
"text": "Edit the SQL Statement, and click \"Run SQL\" to see the result."
},
{
"code": null,
"e": 176,
"s": 116,
"text": "This SQL-Statement is not supported in the WebSQL Database."
},
{
"code": null,
"e": 244,
"s": 176,
"text": "The example still works, because it uses a modified version of SQL."
},
{
"code": null,
"e": 282,
"s": 244,
"text": "Your browser does not support WebSQL."
},
{
"code": null,
"e": 367,
"s": 282,
"text": "Your are now using a light-version of the Try-SQL Editor, with a read-only Database."
},
{
"code": null,
"e": 541,
"s": 367,
"text": "If you switch to a browser with WebSQL support, you can try any SQL statement, and play with the Database as much as you like. The Database can also be restored at any time."
},
{
"code": null,
"e": 592,
"s": 541,
"text": "Our Try-SQL Editor uses WebSQL to demonstrate SQL."
},
{
"code": null,
"e": 660,
"s": 592,
"text": "A Database-object is created in your browser, for testing purposes."
},
{
"code": null,
"e": 831,
"s": 660,
"text": "You can try any SQL statement, and play with the Database as much as you like. The Database can be restored at any time, simply by clicking the \"Restore Database\" button."
},
{
"code": null,
"e": 931,
"s": 831,
"text": "WebSQL stores a Database locally, on the user's computer. Each user gets their own Database object."
},
{
"code": null,
"e": 991,
"s": 931,
"text": "WebSQL is supported in Chrome, Safari, Opera, and Edge(79)."
}
] |
Improving Performance of ML Model (Contdâ¦) | As we know that ML models are parameterized in such a way that their behavior can be adjusted for a specific problem. Algorithm tuning means finding the best combination of these parameters so that the performance of ML model can be improved. This process sometimes called hyperparameter optimization and the parameters of algorithm itself are called hyperparameters and coefficients found by ML algorithm are called parameters.
Here, we are going to discuss about some methods for algorithm parameter tuning provided by Python Scikit-learn.
It is a parameter tuning approach. The key point of working of this method is that it builds and evaluate the model methodically for every possible combination of algorithm parameter specified in a grid. Hence, we can say that this algorithm is having search nature.
Example
In the following Python recipe, we are going to perform grid search by using GridSearchCV class of sklearn for evaluating various alpha values for the Ridge Regression algorithm on Pima Indians diabetes dataset.
First, import the required packages as follows −
import numpy
from pandas import read_csv
from sklearn.linear_model import Ridge
from sklearn.model_selection import GridSearchCV
Now, we need to load the Pima diabetes dataset as did in previous examples −
path = r"C:\pima-indians-diabetes.csv"
headernames = ['preg', 'plas', 'pres', 'skin', 'test', 'mass', 'pedi', 'age', 'class']
data = read_csv(path, names=headernames)
array = data.values
X = array[:,0:8]
Y = array[:,8]
Next, evaluate the various alpha values as follows −
alphas = numpy.array([1,0.1,0.01,0.001,0.0001,0])
param_grid = dict(alpha=alphas)
Now, we need to apply grid search on our model −
model = Ridge()
grid = GridSearchCV(estimator=model, param_grid=param_grid)
grid.fit(X, Y)
Print the result with following script line −
print(grid.best_score_)
print(grid.best_estimator_.alpha)
Output
0.2796175593129722
1.0
The above output gives us the optimal score and the set of parameters in the grid that achieved that score. The alpha value in this case is 1.0.
It is a parameter tuning approach. The key point of working of this method is that it samples the algorithm parameters from a random distribution for a fixed number of iterations.
Example
In the following Python recipe, we are going to perform random search by using RandomizedSearchCV class of sklearn for evaluating different alpha values between 0 and 1 for the Ridge Regression algorithm on Pima Indians diabetes dataset.
First, import the required packages as follows −
import numpy
from pandas import read_csv
from scipy.stats import uniform
from sklearn.linear_model import Ridge
from sklearn.model_selection import RandomizedSearchCV
Now, we need to load the Pima diabetes dataset as did in previous examples −
path = r"C:\pima-indians-diabetes.csv"
headernames = ['preg', 'plas', 'pres', 'skin', 'test', 'mass', 'pedi', 'age', 'class']
data = read_csv(path, names=headernames)
array = data.values
X = array[:,0:8]
Y = array[:,8]
Next, evaluate the various alpha values on Ridge regression algorithm as follows −
param_grid = {'alpha': uniform()}
model = Ridge()
random_search = RandomizedSearchCV(estimator=model, param_distributions=param_grid, n_iter=50,
random_state=7)
random_search.fit(X, Y)
Print the result with following script line −
print(random_search.best_score_)
print(random_search.best_estimator_.alpha)
Output
0.27961712703051084
0.9779895119966027
The above output gives us the optimal score just similar to the grid search.
168 Lectures
13.5 hours
Er. Himanshu Vasishta
64 Lectures
10.5 hours
Eduonix Learning Solutions
91 Lectures
10 hours
Abhilash Nelson
54 Lectures
6 hours
Abhishek And Pukhraj
49 Lectures
5 hours
Abhishek And Pukhraj
35 Lectures
4 hours
Abhishek And Pukhraj
Print
Add Notes
Bookmark this page | [
{
"code": null,
"e": 2733,
"s": 2304,
"text": "As we know that ML models are parameterized in such a way that their behavior can be adjusted for a specific problem. Algorithm tuning means finding the best combination of these parameters so that the performance of ML model can be improved. This process sometimes called hyperparameter optimization and the parameters of algorithm itself are called hyperparameters and coefficients found by ML algorithm are called parameters."
},
{
"code": null,
"e": 2846,
"s": 2733,
"text": "Here, we are going to discuss about some methods for algorithm parameter tuning provided by Python Scikit-learn."
},
{
"code": null,
"e": 3113,
"s": 2846,
"text": "It is a parameter tuning approach. The key point of working of this method is that it builds and evaluate the model methodically for every possible combination of algorithm parameter specified in a grid. Hence, we can say that this algorithm is having search nature."
},
{
"code": null,
"e": 3121,
"s": 3113,
"text": "Example"
},
{
"code": null,
"e": 3333,
"s": 3121,
"text": "In the following Python recipe, we are going to perform grid search by using GridSearchCV class of sklearn for evaluating various alpha values for the Ridge Regression algorithm on Pima Indians diabetes dataset."
},
{
"code": null,
"e": 3382,
"s": 3333,
"text": "First, import the required packages as follows −"
},
{
"code": null,
"e": 3512,
"s": 3382,
"text": "import numpy\nfrom pandas import read_csv\nfrom sklearn.linear_model import Ridge\nfrom sklearn.model_selection import GridSearchCV\n"
},
{
"code": null,
"e": 3589,
"s": 3512,
"text": "Now, we need to load the Pima diabetes dataset as did in previous examples −"
},
{
"code": null,
"e": 3809,
"s": 3589,
"text": "path = r\"C:\\pima-indians-diabetes.csv\"\nheadernames = ['preg', 'plas', 'pres', 'skin', 'test', 'mass', 'pedi', 'age', 'class']\ndata = read_csv(path, names=headernames)\narray = data.values\nX = array[:,0:8]\nY = array[:,8]\n"
},
{
"code": null,
"e": 3862,
"s": 3809,
"text": "Next, evaluate the various alpha values as follows −"
},
{
"code": null,
"e": 3945,
"s": 3862,
"text": "alphas = numpy.array([1,0.1,0.01,0.001,0.0001,0])\nparam_grid = dict(alpha=alphas)\n"
},
{
"code": null,
"e": 3994,
"s": 3945,
"text": "Now, we need to apply grid search on our model −"
},
{
"code": null,
"e": 4086,
"s": 3994,
"text": "model = Ridge()\ngrid = GridSearchCV(estimator=model, param_grid=param_grid)\ngrid.fit(X, Y)\n"
},
{
"code": null,
"e": 4132,
"s": 4086,
"text": "Print the result with following script line −"
},
{
"code": null,
"e": 4191,
"s": 4132,
"text": "print(grid.best_score_)\nprint(grid.best_estimator_.alpha)\n"
},
{
"code": null,
"e": 4198,
"s": 4191,
"text": "Output"
},
{
"code": null,
"e": 4222,
"s": 4198,
"text": "0.2796175593129722\n1.0\n"
},
{
"code": null,
"e": 4367,
"s": 4222,
"text": "The above output gives us the optimal score and the set of parameters in the grid that achieved that score. The alpha value in this case is 1.0."
},
{
"code": null,
"e": 4547,
"s": 4367,
"text": "It is a parameter tuning approach. The key point of working of this method is that it samples the algorithm parameters from a random distribution for a fixed number of iterations."
},
{
"code": null,
"e": 4556,
"s": 4547,
"text": "Example\n"
},
{
"code": null,
"e": 4794,
"s": 4556,
"text": "In the following Python recipe, we are going to perform random search by using RandomizedSearchCV class of sklearn for evaluating different alpha values between 0 and 1 for the Ridge Regression algorithm on Pima Indians diabetes dataset."
},
{
"code": null,
"e": 4843,
"s": 4794,
"text": "First, import the required packages as follows −"
},
{
"code": null,
"e": 5011,
"s": 4843,
"text": "import numpy\nfrom pandas import read_csv\nfrom scipy.stats import uniform\nfrom sklearn.linear_model import Ridge\nfrom sklearn.model_selection import RandomizedSearchCV\n"
},
{
"code": null,
"e": 5088,
"s": 5011,
"text": "Now, we need to load the Pima diabetes dataset as did in previous examples −"
},
{
"code": null,
"e": 5307,
"s": 5088,
"text": "path = r\"C:\\pima-indians-diabetes.csv\"\nheadernames = ['preg', 'plas', 'pres', 'skin', 'test', 'mass', 'pedi', 'age', 'class']\ndata = read_csv(path, names=headernames)\narray = data.values\nX = array[:,0:8]\nY = array[:,8]"
},
{
"code": null,
"e": 5390,
"s": 5307,
"text": "Next, evaluate the various alpha values on Ridge regression algorithm as follows −"
},
{
"code": null,
"e": 5575,
"s": 5390,
"text": "param_grid = {'alpha': uniform()}\nmodel = Ridge()\nrandom_search = RandomizedSearchCV(estimator=model, param_distributions=param_grid, n_iter=50,\nrandom_state=7)\nrandom_search.fit(X, Y)"
},
{
"code": null,
"e": 5621,
"s": 5575,
"text": "Print the result with following script line −"
},
{
"code": null,
"e": 5698,
"s": 5621,
"text": "print(random_search.best_score_)\nprint(random_search.best_estimator_.alpha)\n"
},
{
"code": null,
"e": 5705,
"s": 5698,
"text": "Output"
},
{
"code": null,
"e": 5745,
"s": 5705,
"text": "0.27961712703051084\n0.9779895119966027\n"
},
{
"code": null,
"e": 5822,
"s": 5745,
"text": "The above output gives us the optimal score just similar to the grid search."
},
{
"code": null,
"e": 5859,
"s": 5822,
"text": "\n 168 Lectures \n 13.5 hours \n"
},
{
"code": null,
"e": 5882,
"s": 5859,
"text": " Er. Himanshu Vasishta"
},
{
"code": null,
"e": 5918,
"s": 5882,
"text": "\n 64 Lectures \n 10.5 hours \n"
},
{
"code": null,
"e": 5946,
"s": 5918,
"text": " Eduonix Learning Solutions"
},
{
"code": null,
"e": 5980,
"s": 5946,
"text": "\n 91 Lectures \n 10 hours \n"
},
{
"code": null,
"e": 5997,
"s": 5980,
"text": " Abhilash Nelson"
},
{
"code": null,
"e": 6030,
"s": 5997,
"text": "\n 54 Lectures \n 6 hours \n"
},
{
"code": null,
"e": 6052,
"s": 6030,
"text": " Abhishek And Pukhraj"
},
{
"code": null,
"e": 6085,
"s": 6052,
"text": "\n 49 Lectures \n 5 hours \n"
},
{
"code": null,
"e": 6107,
"s": 6085,
"text": " Abhishek And Pukhraj"
},
{
"code": null,
"e": 6140,
"s": 6107,
"text": "\n 35 Lectures \n 4 hours \n"
},
{
"code": null,
"e": 6162,
"s": 6140,
"text": " Abhishek And Pukhraj"
},
{
"code": null,
"e": 6169,
"s": 6162,
"text": " Print"
},
{
"code": null,
"e": 6180,
"s": 6169,
"text": " Add Notes"
}
] |
Protobuf - Map | Map is one of the composite datatypes of Protobuf. Protobuf translates this to a java.util.Map interface in Java.
Continuing with our theater example, following is the syntax that we need to have to instruct Protobuf that we will be creating a map −
syntax = "proto3";
package theater;
option java_package = "com.tutorialspoint.theater";
message Theater {
map<string, int32> movieTicketPrice = 9;
}
Now our class/message contains a map of movie and their ticket price. Note that although we have "string -> int" map, we can as well have number, Boolean, and custom data types. However, note that we cannot have a nested map.
To use Protobuf, we will now have to use the protoc binary to create the required classes from this ".proto" file. Let us see how to do that −
protoc --java_out=java/src/main/java proto_files\theater.proto
The above command will create the required files and now we can use it in our Java code. First, we will create a writer to write the theater information −
package com.tutorialspoint.theater;
import java.util.List;
import java.util.Map;
import java.io.FileOutputStream;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
import com.tutorialspoint.theater.TheaterOuterClass.Theater;
public class TheaterWriter{
public static void main(String[] args) throws IOException {
Map<String, Integer> ticketPrice = new HashMap<>();
ticketPrice.put("Avengers Endgame", 700);
ticketPrice.put("Captain America", 200);
ticketPrice.put("Wonder Woman 1984", 400);
Theater theater = Theater.newBuilder()
.putAllMovieTicketPrice(ticketPrice)
.build();
String filename = "theater_protobuf_output";
System.out.println("Saving theater information to file: " + filename);
try(FileOutputStream output = new FileOutputStream(filename)){
theater.writeTo(output);
}
System.out.println("Saved theater information with following data to disk: \n" + theater);
}
}
Next, we will have a reader to read the theater information −
package com.tutorialspoint.theater;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import com.tutorialspoint.greeting.Greeting.Greet;
import com.tutorialspoint.theater.TheaterOuterClass.Theater;
import com.tutorialspoint.theater.TheaterOuterClass.Theater.Builder;
public class TheaterReader{
public static void main(String[] args) throws IOException {
Builder theaterBuilder = Theater.newBuilder();
String filename = "theater_protobuf_output";
System.out.println("Reading from file " + filename);
try(FileInputStream input = new FileInputStream(filename)) {
Theater theater = theaterBuilder.mergeFrom(input).build();
System.out.println(theater);
}
}
}
Now, post compilation, let us execute the writer first −
> java -cp .\target\protobuf-tutorial-1.0.jar com.tutorialspoint.theater.TheaterWriter
Saving theater information to file: theater_protobuf_output
Saved theater information with following data to disk:
movieTicketPrice {
key: "Avengers Endgame"
value: 700
}
movieTicketPrice {
key: "Captain America"
value: 200
}
movieTicketPrice {
key: "Wonder Woman 1984"
value: 400
}
Now, let us execute the reader to read from the same file −
java -cp .\target\protobuf-tutorial-1.0.jar com.tutorialspoint.theater.TheaterReader
Reading from file theater_protobuf_output
movieTicketPrice {
key: "Avengers Endgame"
value: 700
}
movieTicketPrice {
key: "Captain America"
value: 200
}
movieTicketPrice {
key: "Wonder Woman 1984"
value: 400
}
So, as we see, we are able to read the serialized map by deserializing the binary data to Theater object. In the next chapter, we will see how to create a nested class in Protobuf.
Print
Add Notes
Bookmark this page | [
{
"code": null,
"e": 2159,
"s": 2045,
"text": "Map is one of the composite datatypes of Protobuf. Protobuf translates this to a java.util.Map interface in Java."
},
{
"code": null,
"e": 2295,
"s": 2159,
"text": "Continuing with our theater example, following is the syntax that we need to have to instruct Protobuf that we will be creating a map −"
},
{
"code": null,
"e": 2449,
"s": 2295,
"text": "syntax = \"proto3\";\npackage theater;\noption java_package = \"com.tutorialspoint.theater\";\n\nmessage Theater {\n map<string, int32> movieTicketPrice = 9;\n}\n"
},
{
"code": null,
"e": 2675,
"s": 2449,
"text": "Now our class/message contains a map of movie and their ticket price. Note that although we have \"string -> int\" map, we can as well have number, Boolean, and custom data types. However, note that we cannot have a nested map."
},
{
"code": null,
"e": 2818,
"s": 2675,
"text": "To use Protobuf, we will now have to use the protoc binary to create the required classes from this \".proto\" file. Let us see how to do that −"
},
{
"code": null,
"e": 2883,
"s": 2818,
"text": "protoc --java_out=java/src/main/java proto_files\\theater.proto\n"
},
{
"code": null,
"e": 3038,
"s": 2883,
"text": "The above command will create the required files and now we can use it in our Java code. First, we will create a writer to write the theater information −"
},
{
"code": null,
"e": 4061,
"s": 3038,
"text": "package com.tutorialspoint.theater;\n\nimport java.util.List;\nimport java.util.Map;\nimport java.io.FileOutputStream;\nimport java.io.IOException;\nimport java.util.ArrayList;\nimport java.util.HashMap;\nimport com.tutorialspoint.theater.TheaterOuterClass.Theater;\n\npublic class TheaterWriter{\n public static void main(String[] args) throws IOException {\n Map<String, Integer> ticketPrice = new HashMap<>();\n\t \n ticketPrice.put(\"Avengers Endgame\", 700);\n ticketPrice.put(\"Captain America\", 200);\n ticketPrice.put(\"Wonder Woman 1984\", 400);\n\t \n Theater theater = Theater.newBuilder()\n .putAllMovieTicketPrice(ticketPrice)\n .build();\n\t\t\n String filename = \"theater_protobuf_output\";\n System.out.println(\"Saving theater information to file: \" + filename);\n\t\t\n try(FileOutputStream output = new FileOutputStream(filename)){\n theater.writeTo(output);\n }\n System.out.println(\"Saved theater information with following data to disk: \\n\" + theater);\n }\n}"
},
{
"code": null,
"e": 4123,
"s": 4061,
"text": "Next, we will have a reader to read the theater information −"
},
{
"code": null,
"e": 4885,
"s": 4123,
"text": "package com.tutorialspoint.theater;\n\nimport java.io.FileInputStream;\nimport java.io.FileOutputStream;\nimport java.io.IOException;\nimport com.tutorialspoint.greeting.Greeting.Greet;\nimport com.tutorialspoint.theater.TheaterOuterClass.Theater;\nimport com.tutorialspoint.theater.TheaterOuterClass.Theater.Builder;\n\npublic class TheaterReader{\n public static void main(String[] args) throws IOException {\n\t Builder theaterBuilder = Theater.newBuilder();\n\n String filename = \"theater_protobuf_output\";\n System.out.println(\"Reading from file \" + filename);\n \n try(FileInputStream input = new FileInputStream(filename)) {\n Theater theater = theaterBuilder.mergeFrom(input).build();\n System.out.println(theater);\n }\n }\n}"
},
{
"code": null,
"e": 4942,
"s": 4885,
"text": "Now, post compilation, let us execute the writer first −"
},
{
"code": null,
"e": 5332,
"s": 4942,
"text": "> java -cp .\\target\\protobuf-tutorial-1.0.jar com.tutorialspoint.theater.TheaterWriter\n\nSaving theater information to file: theater_protobuf_output\nSaved theater information with following data to disk:\nmovieTicketPrice {\n key: \"Avengers Endgame\"\n value: 700\n}\nmovieTicketPrice {\n key: \"Captain America\"\n value: 200\n}\nmovieTicketPrice {\n key: \"Wonder Woman 1984\"\n value: 400\n}\n"
},
{
"code": null,
"e": 5392,
"s": 5332,
"text": "Now, let us execute the reader to read from the same file −"
},
{
"code": null,
"e": 5707,
"s": 5392,
"text": "java -cp .\\target\\protobuf-tutorial-1.0.jar com.tutorialspoint.theater.TheaterReader\n\nReading from file theater_protobuf_output\nmovieTicketPrice {\n key: \"Avengers Endgame\"\n value: 700\n}\nmovieTicketPrice {\n key: \"Captain America\"\n value: 200\n}\nmovieTicketPrice {\n key: \"Wonder Woman 1984\"\n value: 400\n}\n"
},
{
"code": null,
"e": 5888,
"s": 5707,
"text": "So, as we see, we are able to read the serialized map by deserializing the binary data to Theater object. In the next chapter, we will see how to create a nested class in Protobuf."
},
{
"code": null,
"e": 5895,
"s": 5888,
"text": " Print"
},
{
"code": null,
"e": 5906,
"s": 5895,
"text": " Add Notes"
}
] |
Data Preprocessing with Python Pandas — Part 2 Data Formatting | by Angelica Lo Duca | Towards Data Science | This tutorial explains how to preprocess data using the Pandas library. Preprocessing is the process of doing a pre-analysis of data, in order to transform them into a standard and normalized format. Preprocessing involves the following aspects:
missing values
data formatting
data normalization
data standardization
data binning
In this tutorial we deal only with data formatting. In my previous tutorial I dealt with missing values.
Data formatting is the process of transforming data into a common format, which helps users to perform comparisons. An example of not formatted data is the following: the same entity is referred in the same column with different values, such as New York and NY.
You can download the source code of this tutorial as a Jupyter notebook from my Github Data Science Repository.
In this tutorial we will use the dataset related to Twitter, which can be downloaded from this link.
Firstly, import data using the pandas library and convert them into a dataframe. Through the head(10) method we print only the first 10 rows of the dataset.
import pandas as pddf = pd.read_csv('tweets.csv')df.head(5)
In this tutorial, we drop all the missing values through the dropna() function.
df.dropna(inplace=True)
First of all, we should make sure that every column is assigned to the correct data type. This can be checked through the property dtypes.
df.dtypes
which gives the following output:
Tweet Id objectTweet URL objectTweet Posted Time (UTC) objectTweet Content objectTweet Type objectClient objectRetweets Received int64Likes Received int64Tweet Location objectTweet Language objectUser Id objectName objectUsername objectUser Bio objectVerified or Non-Verified objectProfile URL objectProtected or Non-protected objectUser Followers int64User Following int64User Account Creation Date objectImpressions int64dtype: object
In our case we can convert the column Tweet Location to string by using the function astype() as follows:
df['Tweet Location'] = df['Tweet Location'].astype('string')
We can convert all the objects to strings by running the following statements:
import numpy as npobj_columns = df.select_dtypes(include=np.object).columns.tolist()df[obj_columns] = df[obj_columns].astype('string')
The astype() function supports all datatypes described at this link.
This aspect involves categorical and numeric data. Categorical data should have all the same formatting style, such as lower case. In order to format all categorical data to lower case, we can use the following statement:
df['Tweet Content'] = df['Tweet Content'].str.lower()
Other techniques to make homogeneous categorical data involve the following aspects:
remove white space everywhere: df['Tweet Content'] = df['Tweet Content'].str.replace(‘ ‘, ‘’)
remove white space at the beginning of string: df['Tweet Content'] = df['Tweet Content'].str.lstrip()
remove white space at the end of string: df['Tweet Content'] = df['Tweet Content']].str.rstrip()
remove white space at both ends: df['Tweet Content'] = df['Tweet Content'].str.strip().
Numeric data should have for example the same number of digits after the point. For example, if we want 2 decimal points, we can run the following command: df['User Following'] = df['User Following'].round(2).
Other techniques to make homogeneous numeric data include:
Round up — Single DataFrame column — df['User Following'] = df['User Following'].apply(np.ceil)
Round down — Single DataFrame column — df['User Following'] = df['User Following'].apply(np.floor).
It may happen that the same concept is represented in different ways. For example, in our dataset, the column Twitter Location contains the values Columbus,OH and Columbus, OH to describe the same concept. We can use the unique() function to list all the values of a column.
df['Tweet Location'].unique()
which gives the following output:
<StringArray>[ 'Brussels', 'Pill, Bristol', 'Ohio, USA', <NA>, 'Cincinnati, OH', 'WKRC TV', 'Scottsdale, AZ', 'Columbus,OH', 'Columbus, OH', 'DK Diner, USA', ... 'Kampala, Uganda', 'ilorin,kwara state', 'Nigeria, Lagos', 'Kigali', 'Towcester, England', 'Heart of the EU (the clue is in the name)', 'South West, England', 'Manchester', 'Seattle, WA', 'in my happy place']Length: 106, dtype: string
In order to deal with different values representing the same concept, we should manipulate each type of error separately. For example, we can manipulate every string word,word in order to insert a space after the comma and have the following output word, word. We can define a function, called set_pattern() which searches for a specific pattern into a string and then it performs some replacement in the same string, if the pattern is found. In our case we search for all the patterns having the structure word,word and then we replace the , with , . Finally we return the result.
def set_pattern(x): pattern = r'[(A-Z)]\w+,([A-Z])\w+' res = re.match(pattern, x) if res: x = x.replace(',', ', ') return x
Now we can apply the function to every value in the column Tweet Location. This can be achieved by using the function apply() combined with the operator lambda. We can specify that the function apply() must be applied to every row (through the parameter axis = 1) and then through the lambda operator we can select the specific row and apply it the function set_pattern().
df['Tweet Location'] = df.apply(lambda x: set_pattern(x['Tweet Location']), axis=1)
In this tutorial I have illustrated how to perform data formatting with Python Pandas. The following three steps are suggested:
put data in the correct format — this is required when further analysis is required, such as statistical analysis
make data homogeneous — this is useful especially for textual data which need further analysis, such as sentiment analysis
use a single value to represent the same concept — this is useful when data grouping is required.
If you would like to learn about the other aspects of data preprocessing, such as data standardization and data normalization, stay tuned...
If you wanted to be updated on my research and other activities, you can follow me on Twitter, Youtube and and Github. | [
{
"code": null,
"e": 418,
"s": 172,
"text": "This tutorial explains how to preprocess data using the Pandas library. Preprocessing is the process of doing a pre-analysis of data, in order to transform them into a standard and normalized format. Preprocessing involves the following aspects:"
},
{
"code": null,
"e": 433,
"s": 418,
"text": "missing values"
},
{
"code": null,
"e": 449,
"s": 433,
"text": "data formatting"
},
{
"code": null,
"e": 468,
"s": 449,
"text": "data normalization"
},
{
"code": null,
"e": 489,
"s": 468,
"text": "data standardization"
},
{
"code": null,
"e": 502,
"s": 489,
"text": "data binning"
},
{
"code": null,
"e": 607,
"s": 502,
"text": "In this tutorial we deal only with data formatting. In my previous tutorial I dealt with missing values."
},
{
"code": null,
"e": 869,
"s": 607,
"text": "Data formatting is the process of transforming data into a common format, which helps users to perform comparisons. An example of not formatted data is the following: the same entity is referred in the same column with different values, such as New York and NY."
},
{
"code": null,
"e": 981,
"s": 869,
"text": "You can download the source code of this tutorial as a Jupyter notebook from my Github Data Science Repository."
},
{
"code": null,
"e": 1082,
"s": 981,
"text": "In this tutorial we will use the dataset related to Twitter, which can be downloaded from this link."
},
{
"code": null,
"e": 1239,
"s": 1082,
"text": "Firstly, import data using the pandas library and convert them into a dataframe. Through the head(10) method we print only the first 10 rows of the dataset."
},
{
"code": null,
"e": 1299,
"s": 1239,
"text": "import pandas as pddf = pd.read_csv('tweets.csv')df.head(5)"
},
{
"code": null,
"e": 1379,
"s": 1299,
"text": "In this tutorial, we drop all the missing values through the dropna() function."
},
{
"code": null,
"e": 1403,
"s": 1379,
"text": "df.dropna(inplace=True)"
},
{
"code": null,
"e": 1542,
"s": 1403,
"text": "First of all, we should make sure that every column is assigned to the correct data type. This can be checked through the property dtypes."
},
{
"code": null,
"e": 1552,
"s": 1542,
"text": "df.dtypes"
},
{
"code": null,
"e": 1586,
"s": 1552,
"text": "which gives the following output:"
},
{
"code": null,
"e": 2356,
"s": 1586,
"text": "Tweet Id objectTweet URL objectTweet Posted Time (UTC) objectTweet Content objectTweet Type objectClient objectRetweets Received int64Likes Received int64Tweet Location objectTweet Language objectUser Id objectName objectUsername objectUser Bio objectVerified or Non-Verified objectProfile URL objectProtected or Non-protected objectUser Followers int64User Following int64User Account Creation Date objectImpressions int64dtype: object"
},
{
"code": null,
"e": 2462,
"s": 2356,
"text": "In our case we can convert the column Tweet Location to string by using the function astype() as follows:"
},
{
"code": null,
"e": 2523,
"s": 2462,
"text": "df['Tweet Location'] = df['Tweet Location'].astype('string')"
},
{
"code": null,
"e": 2602,
"s": 2523,
"text": "We can convert all the objects to strings by running the following statements:"
},
{
"code": null,
"e": 2737,
"s": 2602,
"text": "import numpy as npobj_columns = df.select_dtypes(include=np.object).columns.tolist()df[obj_columns] = df[obj_columns].astype('string')"
},
{
"code": null,
"e": 2806,
"s": 2737,
"text": "The astype() function supports all datatypes described at this link."
},
{
"code": null,
"e": 3028,
"s": 2806,
"text": "This aspect involves categorical and numeric data. Categorical data should have all the same formatting style, such as lower case. In order to format all categorical data to lower case, we can use the following statement:"
},
{
"code": null,
"e": 3082,
"s": 3028,
"text": "df['Tweet Content'] = df['Tweet Content'].str.lower()"
},
{
"code": null,
"e": 3167,
"s": 3082,
"text": "Other techniques to make homogeneous categorical data involve the following aspects:"
},
{
"code": null,
"e": 3261,
"s": 3167,
"text": "remove white space everywhere: df['Tweet Content'] = df['Tweet Content'].str.replace(‘ ‘, ‘’)"
},
{
"code": null,
"e": 3363,
"s": 3261,
"text": "remove white space at the beginning of string: df['Tweet Content'] = df['Tweet Content'].str.lstrip()"
},
{
"code": null,
"e": 3460,
"s": 3363,
"text": "remove white space at the end of string: df['Tweet Content'] = df['Tweet Content']].str.rstrip()"
},
{
"code": null,
"e": 3548,
"s": 3460,
"text": "remove white space at both ends: df['Tweet Content'] = df['Tweet Content'].str.strip()."
},
{
"code": null,
"e": 3758,
"s": 3548,
"text": "Numeric data should have for example the same number of digits after the point. For example, if we want 2 decimal points, we can run the following command: df['User Following'] = df['User Following'].round(2)."
},
{
"code": null,
"e": 3817,
"s": 3758,
"text": "Other techniques to make homogeneous numeric data include:"
},
{
"code": null,
"e": 3913,
"s": 3817,
"text": "Round up — Single DataFrame column — df['User Following'] = df['User Following'].apply(np.ceil)"
},
{
"code": null,
"e": 4013,
"s": 3913,
"text": "Round down — Single DataFrame column — df['User Following'] = df['User Following'].apply(np.floor)."
},
{
"code": null,
"e": 4288,
"s": 4013,
"text": "It may happen that the same concept is represented in different ways. For example, in our dataset, the column Twitter Location contains the values Columbus,OH and Columbus, OH to describe the same concept. We can use the unique() function to list all the values of a column."
},
{
"code": null,
"e": 4318,
"s": 4288,
"text": "df['Tweet Location'].unique()"
},
{
"code": null,
"e": 4352,
"s": 4318,
"text": "which gives the following output:"
},
{
"code": null,
"e": 5296,
"s": 4352,
"text": "<StringArray>[ 'Brussels', 'Pill, Bristol', 'Ohio, USA', <NA>, 'Cincinnati, OH', 'WKRC TV', 'Scottsdale, AZ', 'Columbus,OH', 'Columbus, OH', 'DK Diner, USA', ... 'Kampala, Uganda', 'ilorin,kwara state', 'Nigeria, Lagos', 'Kigali', 'Towcester, England', 'Heart of the EU (the clue is in the name)', 'South West, England', 'Manchester', 'Seattle, WA', 'in my happy place']Length: 106, dtype: string"
},
{
"code": null,
"e": 5878,
"s": 5296,
"text": "In order to deal with different values representing the same concept, we should manipulate each type of error separately. For example, we can manipulate every string word,word in order to insert a space after the comma and have the following output word, word. We can define a function, called set_pattern() which searches for a specific pattern into a string and then it performs some replacement in the same string, if the pattern is found. In our case we search for all the patterns having the structure word,word and then we replace the , with , . Finally we return the result."
},
{
"code": null,
"e": 6021,
"s": 5878,
"text": "def set_pattern(x): pattern = r'[(A-Z)]\\w+,([A-Z])\\w+' res = re.match(pattern, x) if res: x = x.replace(',', ', ') return x"
},
{
"code": null,
"e": 6394,
"s": 6021,
"text": "Now we can apply the function to every value in the column Tweet Location. This can be achieved by using the function apply() combined with the operator lambda. We can specify that the function apply() must be applied to every row (through the parameter axis = 1) and then through the lambda operator we can select the specific row and apply it the function set_pattern()."
},
{
"code": null,
"e": 6478,
"s": 6394,
"text": "df['Tweet Location'] = df.apply(lambda x: set_pattern(x['Tweet Location']), axis=1)"
},
{
"code": null,
"e": 6606,
"s": 6478,
"text": "In this tutorial I have illustrated how to perform data formatting with Python Pandas. The following three steps are suggested:"
},
{
"code": null,
"e": 6720,
"s": 6606,
"text": "put data in the correct format — this is required when further analysis is required, such as statistical analysis"
},
{
"code": null,
"e": 6843,
"s": 6720,
"text": "make data homogeneous — this is useful especially for textual data which need further analysis, such as sentiment analysis"
},
{
"code": null,
"e": 6941,
"s": 6843,
"text": "use a single value to represent the same concept — this is useful when data grouping is required."
},
{
"code": null,
"e": 7082,
"s": 6941,
"text": "If you would like to learn about the other aspects of data preprocessing, such as data standardization and data normalization, stay tuned..."
}
] |
How to iterate the values of an enum using a for loop in Java? | Enumerations in Java represents a group of named constants, you can create an enumeration using the following syntax −
enum Days {
SUNDAY, MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY
}
You can retrieve the contents of an enum using the values() method. This method returns an array containing all the values. Once you obtain the array you can iterate it using the for loop.
public class IterateEnum{
public static void main(String args[]) {
Days days[] = Days.values();
System.out.println("Contents of the enum are: ");
//Iterating enum using the for loop
for(Days day: days) {
System.out.println(day);
}
}
}
Contents of the enum are:
SUNDAY
MONDAY
TUESDAY
WEDNESDAY
THURSDAY
FRIDAY
SATURDAY
Live Demo
enum Vehicles {
//Declaring the constants of the enum
ACTIVA125, ACTIVA5G, ACCESS125, VESPA, TVSJUPITER;
int i; //Instance variable
Vehicles() { //constructor
}
public void enumMethod() { //method
System.out.println("Current value: "+Vehicles.this);
}
}
public class Sam{
public static void main(String args[]) {
Vehicles vehicles[] = Vehicles.values();
for(Vehicles veh: vehicles) {
System.out.println(veh);
}
vehicles[3].enumMethod();
}
}
ACTIVA125
ACTIVA5G
ACCESS125
VESPA
TVSJUPITER
Current value: VESPA | [
{
"code": null,
"e": 1181,
"s": 1062,
"text": "Enumerations in Java represents a group of named constants, you can create an enumeration using the following syntax −"
},
{
"code": null,
"e": 1261,
"s": 1181,
"text": "enum Days {\n SUNDAY, MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY\n}"
},
{
"code": null,
"e": 1450,
"s": 1261,
"text": "You can retrieve the contents of an enum using the values() method. This method returns an array containing all the values. Once you obtain the array you can iterate it using the for loop."
},
{
"code": null,
"e": 1739,
"s": 1450,
"text": "public class IterateEnum{\n public static void main(String args[]) {\n Days days[] = Days.values();\n System.out.println(\"Contents of the enum are: \"); \n //Iterating enum using the for loop\n for(Days day: days) {\n System.out.println(day);\n }\n } \n}"
},
{
"code": null,
"e": 1822,
"s": 1739,
"text": "Contents of the enum are:\nSUNDAY\nMONDAY\nTUESDAY\nWEDNESDAY\nTHURSDAY\nFRIDAY\nSATURDAY"
},
{
"code": null,
"e": 1832,
"s": 1822,
"text": "Live Demo"
},
{
"code": null,
"e": 2351,
"s": 1832,
"text": "enum Vehicles {\n //Declaring the constants of the enum\n ACTIVA125, ACTIVA5G, ACCESS125, VESPA, TVSJUPITER;\n int i; //Instance variable\n Vehicles() { //constructor\n } \n public void enumMethod() { //method\n System.out.println(\"Current value: \"+Vehicles.this);\n }\n}\npublic class Sam{\n public static void main(String args[]) {\n Vehicles vehicles[] = Vehicles.values();\n for(Vehicles veh: vehicles) {\n System.out.println(veh);\n }\n vehicles[3].enumMethod(); \n } \n}"
},
{
"code": null,
"e": 2418,
"s": 2351,
"text": "ACTIVA125\nACTIVA5G\nACCESS125\nVESPA\nTVSJUPITER\nCurrent value: VESPA"
}
] |
Console readPassword() method in Java with Examples - GeeksforGeeks | 12 Jun, 2020
The readPassword() method of Console class in Java is of two types:
1. The readPassword() method of Console class in Java is used to read a password or passphrase from the console with disabled echoing.
Syntax:
public char[] readPassword()
Parameters: This method does not accept any parameter.
Return value: This method returns a character array that contains the password or passphrase read from the console. It returns null if the stream is ended.
Exceptions: This method throws IOError if an I/O error occurs.
Note: System.console() returns null in an online IDE.
Below programs illustrate readPassword() method in Console class in IO package:
Program 1:
// Java program to illustrate// Console readPassword() method import java.io.*; public class GFG { public static void main(String[] args) { // Create the console object Console cnsl = System.console(); if (cnsl == null) { System.out.println( "No console available"); return; } // Read line String str = cnsl.readLine( "Enter username : "); // Print username System.out.println( "Username : " + str); // Read password // into character array char[] ch = cnsl.readPassword( "Enter password : "); // Print password System.out.println( "Password : " + ch); }}
Program 2:
// Java program to illustrate// Console readPassword() method import java.io.*; public class GFG { public static void main(String[] args) { // Create the console object Console cnsl = System.console(); if (cnsl == null) { System.out.println( "No console available"); return; } // Read line String str = cnsl.readLine( "Enter username : "); // Print username System.out.println( "Username : " + str); // Read password // into character array char[] ch = cnsl.readPassword( "Enter password : "); // Print password System.out.println( "Password : " + ch); }}
2. The readPassword(String, Object) method of Console class in Java is used to read a password or passphrase from the console by providing a formatted prompt. It returns the password in the character array.
Syntax:
public char[] readPassword(String fmt,
Object... args)
Parameters: This method accepts two parameters:
fmt – It represents the format of the string.
args – It represents the arguments that are referenced by the format specifiers in the string format.
Return value: This method returns the character array that contains the the password or passphrase read from the console.It returns null if the stream is ended.
Exceptions:
IllegalFormatException – This method throws IllegalFormatException if string format contains an illegal syntax or a format specifier is not compatible with the given arguments or insufficient arguments given the format string or other conditions that are illegal.
IOError – This method throws IOError if an I/O error occurs.
Below programs illustrate readPassword(String, Object) method in Console class in IO package:
Program 1:
// Java program to illustrate// Console readPassword(String, Object) method import java.io.*; public class GFG { public static void main(String[] args) { // Create the console object Console cnsl = System.console(); if (cnsl == null) { System.out.println( "No console available"); return; } String fmt = "%2$5s %3$10s%n"; // Read line String un = cnsl.readLine( fmt, "Enter", "Username : "); // Print username System.out.println( "Username : " + un); // Read password // into character array char[] pwd = cnsl.readPassword( fmt, "Enter", "Password : "); // Print password System.out.println( "Password : " + pwd); }}
Program 2:
// Java program to illustrate// Console readPassword(String, Object) method import java.io.*; public class GFG { public static void main(String[] args) { // Create the console object Console cnsl = System.console(); if (cnsl == null) { System.out.println( "No console available"); return; } String fmt = "%2$5s %3$10s%n"; // Read line String un = cnsl.readLine( fmt, "Enter", "Username : "); // Print username System.out.println( "Username : " + un); // Read password // into character array char[] pwd = cnsl.readPassword( fmt, "Enter", "Password : "); // Print password System.out.println( "Password : " + pwd); }}
References:1. https://docs.oracle.com/javase/10/docs/api/java/io/Console.html#readPassword()2. https://docs.oracle.com/javase/10/docs/api/java/io/Console.html#readPassword(java.lang.String, java.lang.Object...)
Java-Functions
Java-IO package
Java
Java
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here.
Comments
Old Comments
Hashtable in Java
Constructors in Java
Different ways of Reading a text file in Java
Comparator Interface in Java with Examples
Java Math random() method with Examples
HashMap containsKey() Method in Java
How to Create Array of Objects in Java?
Convert Double to Integer in Java
Iterating over ArrayLists in Java
Generating random numbers in Java | [
{
"code": null,
"e": 23557,
"s": 23529,
"text": "\n12 Jun, 2020"
},
{
"code": null,
"e": 23625,
"s": 23557,
"text": "The readPassword() method of Console class in Java is of two types:"
},
{
"code": null,
"e": 23760,
"s": 23625,
"text": "1. The readPassword() method of Console class in Java is used to read a password or passphrase from the console with disabled echoing."
},
{
"code": null,
"e": 23768,
"s": 23760,
"text": "Syntax:"
},
{
"code": null,
"e": 23798,
"s": 23768,
"text": "public char[] readPassword()\n"
},
{
"code": null,
"e": 23853,
"s": 23798,
"text": "Parameters: This method does not accept any parameter."
},
{
"code": null,
"e": 24009,
"s": 23853,
"text": "Return value: This method returns a character array that contains the password or passphrase read from the console. It returns null if the stream is ended."
},
{
"code": null,
"e": 24072,
"s": 24009,
"text": "Exceptions: This method throws IOError if an I/O error occurs."
},
{
"code": null,
"e": 24126,
"s": 24072,
"text": "Note: System.console() returns null in an online IDE."
},
{
"code": null,
"e": 24206,
"s": 24126,
"text": "Below programs illustrate readPassword() method in Console class in IO package:"
},
{
"code": null,
"e": 24217,
"s": 24206,
"text": "Program 1:"
},
{
"code": "// Java program to illustrate// Console readPassword() method import java.io.*; public class GFG { public static void main(String[] args) { // Create the console object Console cnsl = System.console(); if (cnsl == null) { System.out.println( \"No console available\"); return; } // Read line String str = cnsl.readLine( \"Enter username : \"); // Print username System.out.println( \"Username : \" + str); // Read password // into character array char[] ch = cnsl.readPassword( \"Enter password : \"); // Print password System.out.println( \"Password : \" + ch); }}",
"e": 24977,
"s": 24217,
"text": null
},
{
"code": null,
"e": 24988,
"s": 24977,
"text": "Program 2:"
},
{
"code": "// Java program to illustrate// Console readPassword() method import java.io.*; public class GFG { public static void main(String[] args) { // Create the console object Console cnsl = System.console(); if (cnsl == null) { System.out.println( \"No console available\"); return; } // Read line String str = cnsl.readLine( \"Enter username : \"); // Print username System.out.println( \"Username : \" + str); // Read password // into character array char[] ch = cnsl.readPassword( \"Enter password : \"); // Print password System.out.println( \"Password : \" + ch); }}",
"e": 25748,
"s": 24988,
"text": null
},
{
"code": null,
"e": 25955,
"s": 25748,
"text": "2. The readPassword(String, Object) method of Console class in Java is used to read a password or passphrase from the console by providing a formatted prompt. It returns the password in the character array."
},
{
"code": null,
"e": 25963,
"s": 25955,
"text": "Syntax:"
},
{
"code": null,
"e": 26041,
"s": 25963,
"text": "public char[] readPassword(String fmt,\n Object... args)\n"
},
{
"code": null,
"e": 26089,
"s": 26041,
"text": "Parameters: This method accepts two parameters:"
},
{
"code": null,
"e": 26135,
"s": 26089,
"text": "fmt – It represents the format of the string."
},
{
"code": null,
"e": 26237,
"s": 26135,
"text": "args – It represents the arguments that are referenced by the format specifiers in the string format."
},
{
"code": null,
"e": 26398,
"s": 26237,
"text": "Return value: This method returns the character array that contains the the password or passphrase read from the console.It returns null if the stream is ended."
},
{
"code": null,
"e": 26410,
"s": 26398,
"text": "Exceptions:"
},
{
"code": null,
"e": 26674,
"s": 26410,
"text": "IllegalFormatException – This method throws IllegalFormatException if string format contains an illegal syntax or a format specifier is not compatible with the given arguments or insufficient arguments given the format string or other conditions that are illegal."
},
{
"code": null,
"e": 26735,
"s": 26674,
"text": "IOError – This method throws IOError if an I/O error occurs."
},
{
"code": null,
"e": 26829,
"s": 26735,
"text": "Below programs illustrate readPassword(String, Object) method in Console class in IO package:"
},
{
"code": null,
"e": 26840,
"s": 26829,
"text": "Program 1:"
},
{
"code": "// Java program to illustrate// Console readPassword(String, Object) method import java.io.*; public class GFG { public static void main(String[] args) { // Create the console object Console cnsl = System.console(); if (cnsl == null) { System.out.println( \"No console available\"); return; } String fmt = \"%2$5s %3$10s%n\"; // Read line String un = cnsl.readLine( fmt, \"Enter\", \"Username : \"); // Print username System.out.println( \"Username : \" + un); // Read password // into character array char[] pwd = cnsl.readPassword( fmt, \"Enter\", \"Password : \"); // Print password System.out.println( \"Password : \" + pwd); }}",
"e": 27670,
"s": 26840,
"text": null
},
{
"code": null,
"e": 27681,
"s": 27670,
"text": "Program 2:"
},
{
"code": "// Java program to illustrate// Console readPassword(String, Object) method import java.io.*; public class GFG { public static void main(String[] args) { // Create the console object Console cnsl = System.console(); if (cnsl == null) { System.out.println( \"No console available\"); return; } String fmt = \"%2$5s %3$10s%n\"; // Read line String un = cnsl.readLine( fmt, \"Enter\", \"Username : \"); // Print username System.out.println( \"Username : \" + un); // Read password // into character array char[] pwd = cnsl.readPassword( fmt, \"Enter\", \"Password : \"); // Print password System.out.println( \"Password : \" + pwd); }}",
"e": 28511,
"s": 27681,
"text": null
},
{
"code": null,
"e": 28722,
"s": 28511,
"text": "References:1. https://docs.oracle.com/javase/10/docs/api/java/io/Console.html#readPassword()2. https://docs.oracle.com/javase/10/docs/api/java/io/Console.html#readPassword(java.lang.String, java.lang.Object...)"
},
{
"code": null,
"e": 28737,
"s": 28722,
"text": "Java-Functions"
},
{
"code": null,
"e": 28753,
"s": 28737,
"text": "Java-IO package"
},
{
"code": null,
"e": 28758,
"s": 28753,
"text": "Java"
},
{
"code": null,
"e": 28763,
"s": 28758,
"text": "Java"
},
{
"code": null,
"e": 28861,
"s": 28763,
"text": "Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here."
},
{
"code": null,
"e": 28870,
"s": 28861,
"text": "Comments"
},
{
"code": null,
"e": 28883,
"s": 28870,
"text": "Old Comments"
},
{
"code": null,
"e": 28901,
"s": 28883,
"text": "Hashtable in Java"
},
{
"code": null,
"e": 28922,
"s": 28901,
"text": "Constructors in Java"
},
{
"code": null,
"e": 28968,
"s": 28922,
"text": "Different ways of Reading a text file in Java"
},
{
"code": null,
"e": 29011,
"s": 28968,
"text": "Comparator Interface in Java with Examples"
},
{
"code": null,
"e": 29051,
"s": 29011,
"text": "Java Math random() method with Examples"
},
{
"code": null,
"e": 29088,
"s": 29051,
"text": "HashMap containsKey() Method in Java"
},
{
"code": null,
"e": 29128,
"s": 29088,
"text": "How to Create Array of Objects in Java?"
},
{
"code": null,
"e": 29162,
"s": 29128,
"text": "Convert Double to Integer in Java"
},
{
"code": null,
"e": 29196,
"s": 29162,
"text": "Iterating over ArrayLists in Java"
}
] |
Ionic - Footer | Ionic footer is placed at the bottom of the app screen. Working with footers is almost the same as working with headers.
The main class for Ionic footers is bar (the same as header). When you want to add footer to your screens, you need to add bar-footer class to your element after the main bar class. Since we want to use our footer on every screen in the app, we will add it to the body of the index.html file. We will also add title for our footer.
<div class = "bar bar-footer">
<h1 class = "title">Footer</h1>
</div>
The above code will produce the following screen −
If you want to style your footer, you just need to add the appropriate color class to it. When you style your elements, you need to add your main element class as a prefix to your color class. Since we are styling a footer bar, the prefix class will be a bar and the color class that we want to use in this example is assertive (red).
<div class = "bar bar-footer bar-assertive">
<h1 class = "title">Footer</h1>
</div>
The above code will produce the following screen −
You can use any of the following nine classes to give a color of your choice to your app footer −
Footers can contain elements inside it. Most of the time you will need to add buttons with icons inside a footer.
The first button added will always be in the left corner. The last one will be placed on the right. The buttons in between will be grouped next to the first one on the left side of your footer. In following example, you can also notice that we use the icon class to add icons on top of the buttons.
<div class = "bar bar-footer bar-assertive">
<button class = "button icon ion-navicon"></button>
<button class = "button icon ion-home"></button>
<button class = "button icon ion-star"></button>
<button class = "button icon ion-checkmark-round"></button>
</div>
The above code will produce the following screen −
If you want to move your button to the right you can add pull-right class.
<div class = "bar bar-footer bar-assertive">
<button class = "button icon ion-navicon pull-right"></button>
</div>
The above code will produce the following screen −
16 Lectures
2.5 hours
Frahaan Hussain
185 Lectures
46.5 hours
Nikhil Agarwal
Print
Add Notes
Bookmark this page | [
{
"code": null,
"e": 2584,
"s": 2463,
"text": "Ionic footer is placed at the bottom of the app screen. Working with footers is almost the same as working with headers."
},
{
"code": null,
"e": 2916,
"s": 2584,
"text": "The main class for Ionic footers is bar (the same as header). When you want to add footer to your screens, you need to add bar-footer class to your element after the main bar class. Since we want to use our footer on every screen in the app, we will add it to the body of the index.html file. We will also add title for our footer."
},
{
"code": null,
"e": 2990,
"s": 2916,
"text": "<div class = \"bar bar-footer\">\n <h1 class = \"title\">Footer</h1>\n</div>\n"
},
{
"code": null,
"e": 3041,
"s": 2990,
"text": "The above code will produce the following screen −"
},
{
"code": null,
"e": 3376,
"s": 3041,
"text": "If you want to style your footer, you just need to add the appropriate color class to it. When you style your elements, you need to add your main element class as a prefix to your color class. Since we are styling a footer bar, the prefix class will be a bar and the color class that we want to use in this example is assertive (red)."
},
{
"code": null,
"e": 3464,
"s": 3376,
"text": "<div class = \"bar bar-footer bar-assertive\">\n <h1 class = \"title\">Footer</h1>\n</div>\n"
},
{
"code": null,
"e": 3515,
"s": 3464,
"text": "The above code will produce the following screen −"
},
{
"code": null,
"e": 3613,
"s": 3515,
"text": "You can use any of the following nine classes to give a color of your choice to your app footer −"
},
{
"code": null,
"e": 3727,
"s": 3613,
"text": "Footers can contain elements inside it. Most of the time you will need to add buttons with icons inside a footer."
},
{
"code": null,
"e": 4026,
"s": 3727,
"text": "The first button added will always be in the left corner. The last one will be placed on the right. The buttons in between will be grouped next to the first one on the left side of your footer. In following example, you can also notice that we use the icon class to add icons on top of the buttons."
},
{
"code": null,
"e": 4300,
"s": 4026,
"text": "<div class = \"bar bar-footer bar-assertive\">\n <button class = \"button icon ion-navicon\"></button>\n <button class = \"button icon ion-home\"></button>\n <button class = \"button icon ion-star\"></button>\n <button class = \"button icon ion-checkmark-round\"></button>\n</div>"
},
{
"code": null,
"e": 4351,
"s": 4300,
"text": "The above code will produce the following screen −"
},
{
"code": null,
"e": 4426,
"s": 4351,
"text": "If you want to move your button to the right you can add pull-right class."
},
{
"code": null,
"e": 4544,
"s": 4426,
"text": "<div class = \"bar bar-footer bar-assertive\">\n <button class = \"button icon ion-navicon pull-right\"></button>\n</div>"
},
{
"code": null,
"e": 4595,
"s": 4544,
"text": "The above code will produce the following screen −"
},
{
"code": null,
"e": 4630,
"s": 4595,
"text": "\n 16 Lectures \n 2.5 hours \n"
},
{
"code": null,
"e": 4647,
"s": 4630,
"text": " Frahaan Hussain"
},
{
"code": null,
"e": 4684,
"s": 4647,
"text": "\n 185 Lectures \n 46.5 hours \n"
},
{
"code": null,
"e": 4700,
"s": 4684,
"text": " Nikhil Agarwal"
},
{
"code": null,
"e": 4707,
"s": 4700,
"text": " Print"
},
{
"code": null,
"e": 4718,
"s": 4707,
"text": " Add Notes"
}
] |
Python Pandas DataFrame Join, Merge, and Concatenate | by Jiahui Wang | Towards Data Science | When using sql, we have join operation. In Python, pandas.DataFrame also provides the similar table operations. However, pandas.DataFrame has join, merge, and concat. What’s the difference among them? In this post, I will summarize the code and illustration of these operations.
Among the three DataFrame operations, join allows the lowest level of control. It will combine all the columns from the two tables, with the common columns renamed with the defined lsuffix and rsuffix. The way that rows from the two tables are combined is defined by how.
DataFrame.join(self, other, on=None, how='left', lsuffix='', rsuffix='', sort=False)
Similar to join, merge also combines all the columns from the two tables, with the common columns renamed with the defined suffixes. However, merge provides three ways of flexible control over row-wise alignment. The first way is to use on = COLUMN NAME, here the given column must be the common column in both tables. The second way is to use left_on = COLUMN NAME and right_on = COLUMN NAME , and it allows to align the two tables using two different columns. The third way is to use left_index = True and right_index = True, and the two tables are aligned based on their index.
DataFrame.merge(self, right, how='inner', on=None, left_on=None, right_on=None, left_index=False, right_index=False, sort=False, suffixes=('_x', '_y'), copy=True, indicator=False, validate=None)
Different from join and merge, which by default operate on columns, concat can define whether to operate on columns or rows.
pandas.concat(objs, axis=0, join='outer', join_axes=None, ignore_index=False, keys=None, levels=None, names=None, verify_integrity=False, sort=None, copy=True)
For pandas.DataFrame, both join and merge operates on columns and rename the common columns using the given suffix. In terms of row-wise alignment, merge provides more flexible control.
Different from join and merge, concat can operate on columns or rows, depending on the given axis, and no renaming is performed. In addition, concat allows defining hierachy structures by passing in keys and names. | [
{
"code": null,
"e": 450,
"s": 171,
"text": "When using sql, we have join operation. In Python, pandas.DataFrame also provides the similar table operations. However, pandas.DataFrame has join, merge, and concat. What’s the difference among them? In this post, I will summarize the code and illustration of these operations."
},
{
"code": null,
"e": 722,
"s": 450,
"text": "Among the three DataFrame operations, join allows the lowest level of control. It will combine all the columns from the two tables, with the common columns renamed with the defined lsuffix and rsuffix. The way that rows from the two tables are combined is defined by how."
},
{
"code": null,
"e": 807,
"s": 722,
"text": "DataFrame.join(self, other, on=None, how='left', lsuffix='', rsuffix='', sort=False)"
},
{
"code": null,
"e": 1388,
"s": 807,
"text": "Similar to join, merge also combines all the columns from the two tables, with the common columns renamed with the defined suffixes. However, merge provides three ways of flexible control over row-wise alignment. The first way is to use on = COLUMN NAME, here the given column must be the common column in both tables. The second way is to use left_on = COLUMN NAME and right_on = COLUMN NAME , and it allows to align the two tables using two different columns. The third way is to use left_index = True and right_index = True, and the two tables are aligned based on their index."
},
{
"code": null,
"e": 1583,
"s": 1388,
"text": "DataFrame.merge(self, right, how='inner', on=None, left_on=None, right_on=None, left_index=False, right_index=False, sort=False, suffixes=('_x', '_y'), copy=True, indicator=False, validate=None)"
},
{
"code": null,
"e": 1708,
"s": 1583,
"text": "Different from join and merge, which by default operate on columns, concat can define whether to operate on columns or rows."
},
{
"code": null,
"e": 1868,
"s": 1708,
"text": "pandas.concat(objs, axis=0, join='outer', join_axes=None, ignore_index=False, keys=None, levels=None, names=None, verify_integrity=False, sort=None, copy=True)"
},
{
"code": null,
"e": 2054,
"s": 1868,
"text": "For pandas.DataFrame, both join and merge operates on columns and rename the common columns using the given suffix. In terms of row-wise alignment, merge provides more flexible control."
}
] |
JavaFX Effects - MotionBlur | Just like Gaussian Effect, Motion Blur is an effect to blur the nodes in JavaFX. It also uses a Gaussian Convolution Kernel that helps in producing the blurring effect. The only difference between Gaussian Effect and Motion Blur is that the Gaussian Convolution Kernel is used with a specified angle.
As indicated by the name, on applying this effect by specifying some angle, the given input seems to you as if you are seeing it while it is in motion.
The class named MotionBlur of the package javafx.scene.effect represents the Motion Blur effect. This class contains three properties, which include −
input − This property is of the type Effect and it represents an input to the box blur effect.
input − This property is of the type Effect and it represents an input to the box blur effect.
radius − This property is of double type representing the radius with which the Motion Blur Effect is to be applied.
radius − This property is of double type representing the radius with which the Motion Blur Effect is to be applied.
Angle − This is a property of double type and it represents the angle of the motion effect in degrees.
Angle − This is a property of double type and it represents the angle of the motion effect in degrees.
The following program is an example demonstrating the Motion Blur Effect. In here, we are drawing the text “Welcome to Tutorialspoint” filled with DARKSEAGREEN color and applying Motion Blur Effect to it with an angle of 45 degrees.
Save this code in a file with the name MotionBlurEffectExample.java.
import javafx.application.Application;
import javafx.scene.Group;
import javafx.scene.Scene;
import javafx.scene.paint.Color;
import javafx.stage.Stage;
import javafx.scene.text.Font;
import javafx.scene.text.FontWeight;
import javafx.scene.text.Text;
import javafx.scene.effect.MotionBlur;
public class MotionBlurEffectExample extends Application {
@Override
public void start(Stage stage) {
//Creating a Text object
Text text = new Text();
//Setting font to the text
text.setFont(Font.font(null, FontWeight.BOLD, 40));
//setting the position of the text
text.setX(60);
text.setY(150);
//Setting the text to be added.
text.setText("Welcome to Tutorialspoint");
//Setting the color of the text
text.setFill(Color.DARKSEAGREEN);
//Instantiating the MotionBlur class
MotionBlur motionBlur = new MotionBlur();
//Setting the radius to the effect
motionBlur.setRadius(10.5);
//Setting angle to the effect
motionBlur.setAngle(45);
//Applying MotionBlur effect to text
text.setEffect(motionBlur);
//Creating a Group object
Group root = new Group(text);
//Creating a scene object
Scene scene = new Scene(root, 600, 300);
//Setting title to the Stage
stage.setTitle("Sample Application");
//Adding scene to the stage
stage.setScene(scene);
//Displaying the contents of the stage
stage.show();
}
public static void main(String args[]){
launch(args);
}
}
Compile and execute the saved java file from the command prompt using the following commands.
javac MotionBlurEffectExample.java
java MotionBlurEffectExample
On executing, the above program generates a JavaFX window as shown below.
33 Lectures
7.5 hours
Syed Raza
64 Lectures
12.5 hours
Emenwa Global, Ejike IfeanyiChukwu
20 Lectures
4 hours
Emenwa Global, Ejike IfeanyiChukwu
Print
Add Notes
Bookmark this page | [
{
"code": null,
"e": 2201,
"s": 1900,
"text": "Just like Gaussian Effect, Motion Blur is an effect to blur the nodes in JavaFX. It also uses a Gaussian Convolution Kernel that helps in producing the blurring effect. The only difference between Gaussian Effect and Motion Blur is that the Gaussian Convolution Kernel is used with a specified angle."
},
{
"code": null,
"e": 2353,
"s": 2201,
"text": "As indicated by the name, on applying this effect by specifying some angle, the given input seems to you as if you are seeing it while it is in motion."
},
{
"code": null,
"e": 2504,
"s": 2353,
"text": "The class named MotionBlur of the package javafx.scene.effect represents the Motion Blur effect. This class contains three properties, which include −"
},
{
"code": null,
"e": 2599,
"s": 2504,
"text": "input − This property is of the type Effect and it represents an input to the box blur effect."
},
{
"code": null,
"e": 2694,
"s": 2599,
"text": "input − This property is of the type Effect and it represents an input to the box blur effect."
},
{
"code": null,
"e": 2811,
"s": 2694,
"text": "radius − This property is of double type representing the radius with which the Motion Blur Effect is to be applied."
},
{
"code": null,
"e": 2928,
"s": 2811,
"text": "radius − This property is of double type representing the radius with which the Motion Blur Effect is to be applied."
},
{
"code": null,
"e": 3031,
"s": 2928,
"text": "Angle − This is a property of double type and it represents the angle of the motion effect in degrees."
},
{
"code": null,
"e": 3134,
"s": 3031,
"text": "Angle − This is a property of double type and it represents the angle of the motion effect in degrees."
},
{
"code": null,
"e": 3367,
"s": 3134,
"text": "The following program is an example demonstrating the Motion Blur Effect. In here, we are drawing the text “Welcome to Tutorialspoint” filled with DARKSEAGREEN color and applying Motion Blur Effect to it with an angle of 45 degrees."
},
{
"code": null,
"e": 3436,
"s": 3367,
"text": "Save this code in a file with the name MotionBlurEffectExample.java."
},
{
"code": null,
"e": 5191,
"s": 3436,
"text": "import javafx.application.Application; \nimport javafx.scene.Group; \nimport javafx.scene.Scene; \nimport javafx.scene.paint.Color; \nimport javafx.stage.Stage; \nimport javafx.scene.text.Font; \nimport javafx.scene.text.FontWeight; \nimport javafx.scene.text.Text; \nimport javafx.scene.effect.MotionBlur; \n \npublic class MotionBlurEffectExample extends Application { \n @Override \n public void start(Stage stage) { \n //Creating a Text object \n Text text = new Text(); \n \n //Setting font to the text \n text.setFont(Font.font(null, FontWeight.BOLD, 40)); \n \n //setting the position of the text \n text.setX(60); \n text.setY(150); \n \n //Setting the text to be added. \n text.setText(\"Welcome to Tutorialspoint\"); \n \n //Setting the color of the text \n text.setFill(Color.DARKSEAGREEN); \n \n //Instantiating the MotionBlur class \n MotionBlur motionBlur = new MotionBlur(); \n \n //Setting the radius to the effect \n motionBlur.setRadius(10.5); \n \n //Setting angle to the effect \n motionBlur.setAngle(45); \n \n //Applying MotionBlur effect to text\n text.setEffect(motionBlur); \n \n //Creating a Group object \n Group root = new Group(text); \n \n //Creating a scene object \n Scene scene = new Scene(root, 600, 300); \n \n //Setting title to the Stage \n stage.setTitle(\"Sample Application\"); \n \n //Adding scene to the stage \n stage.setScene(scene); \n \n //Displaying the contents of the stage \n stage.show(); \n } \n public static void main(String args[]){ \n launch(args); \n } \n} "
},
{
"code": null,
"e": 5285,
"s": 5191,
"text": "Compile and execute the saved java file from the command prompt using the following commands."
},
{
"code": null,
"e": 5354,
"s": 5285,
"text": "javac MotionBlurEffectExample.java \njava MotionBlurEffectExample \n"
},
{
"code": null,
"e": 5428,
"s": 5354,
"text": "On executing, the above program generates a JavaFX window as shown below."
},
{
"code": null,
"e": 5463,
"s": 5428,
"text": "\n 33 Lectures \n 7.5 hours \n"
},
{
"code": null,
"e": 5474,
"s": 5463,
"text": " Syed Raza"
},
{
"code": null,
"e": 5510,
"s": 5474,
"text": "\n 64 Lectures \n 12.5 hours \n"
},
{
"code": null,
"e": 5546,
"s": 5510,
"text": " Emenwa Global, Ejike IfeanyiChukwu"
},
{
"code": null,
"e": 5579,
"s": 5546,
"text": "\n 20 Lectures \n 4 hours \n"
},
{
"code": null,
"e": 5615,
"s": 5579,
"text": " Emenwa Global, Ejike IfeanyiChukwu"
},
{
"code": null,
"e": 5622,
"s": 5615,
"text": " Print"
},
{
"code": null,
"e": 5633,
"s": 5622,
"text": " Add Notes"
}
] |
C++ Program to Implement Affine Cipher | In the Affine cipher, each letter in an alphabet is mapped to its numeric equivalent, is a type of monoalphabetic substitution cipher. Encryption is done using a simple mathematical function and converted back to a letter.
The letters of an alphabet of size m are first mapped to the integers in the range 0 ... m-1, in the Affine cipher,
The ‘key’ for the Affine cipher consists of 2 numbers, a and b. a should be chosen to be relatively prime to m.
To transform the integer, it uses modular arithmetic that each plaintext letter corresponds to into another integer that correspond to a cipher text letter. The encryption function for a single letter is
E ( x ) = ( a x + b ) mod m
modulus m: size of the alphabet
a and b: key of the cipher.
In decryption, convert each of the cipher text letters into their integer values. The decryption function is
D ( x ) = a^-1 ( x - b ) mod m
a^-1 : modular multiplicative inverse of a modulo m. i.e., it satisfies the equation
1 = a^-1 mod m.
Here is a C++ program to implement this process.
Begin
Function encryption(string m)
for i = 0 to m.length()-1
if(m[i]!=' ')
c = c + (char) ((((a * (m[i]-'A') ) + b) % 26) + 'A')
else
c += m[i]
return c
End
Begin
Function decryption(string c)
Initialize a_inverse = 0
Initialize flag = 0
For i = 0 to 25
flag = (a * i) % 26
if (flag == 1)
a_inverse = i
done
done
For i = 0 to c.length() - 1
if(c[i]!=' ')
m = m + (char) (((a_inverse * ((c[i]+'A' - b)) % 26)) + 'A')
else
m = m+ c[i]
done
End
#include<bits/stdc++.h>
using namespace std;
static int a = 7;
static int b = 6;
string encryption(string m) {
//Cipher Text initially empty
string c = "";
for (int i = 0; i < m.length(); i++) {
// Avoid space to be encrypted
if(m[i]!=' ')
// added 'A' to bring it in range of ASCII alphabet [ 65-90 | A-Z ]
c = c + (char) ((((a * (m[i]-'A') ) + b) % 26) + 'A');
else
//else append space character
c += m[i];
}
return c;
}
string decryption(string c) {
string m = "";
int a_inverse = 0;
int flag = 0;
//Find a^-1 (the multiplicative inverse of a
//in the group of integers modulo m.)
for (int i = 0; i < 26; i++) {
flag = (a * i) % 26;
//Check if (a * i) % 26 == 1,
//then i will be the multiplicative inverse of a
if (flag == 1) {
a_inverse = i;
}
}
for (int i = 0; i < c.length(); i++) {
if(c[i] != ' ')
// added 'A' to bring it in range of ASCII alphabet [ 65-90 | A-Z ]
m = m + (char) (((a_inverse * ((c[i]+'A' - b)) % 26)) + 'A');
else
//else append space character
m += c[i];
}
return m;
}
int main(void) {
string msg = "TUTORIALSPOINT";
string c = encryption(msg);
cout << "Encrypted Message is : " << c<<endl;
cout << "Decrypted Message is: " << decryption(c);
return 0;
}
Encrypted Message is : JQJAVKGFCHAKTJ
Decrypted Message is: TUTORIALSPOINT | [
{
"code": null,
"e": 1285,
"s": 1062,
"text": "In the Affine cipher, each letter in an alphabet is mapped to its numeric equivalent, is a type of monoalphabetic substitution cipher. Encryption is done using a simple mathematical function and converted back to a letter."
},
{
"code": null,
"e": 1401,
"s": 1285,
"text": "The letters of an alphabet of size m are first mapped to the integers in the range 0 ... m-1, in the Affine cipher,"
},
{
"code": null,
"e": 1513,
"s": 1401,
"text": "The ‘key’ for the Affine cipher consists of 2 numbers, a and b. a should be chosen to be relatively prime to m."
},
{
"code": null,
"e": 1717,
"s": 1513,
"text": "To transform the integer, it uses modular arithmetic that each plaintext letter corresponds to into another integer that correspond to a cipher text letter. The encryption function for a single letter is"
},
{
"code": null,
"e": 1805,
"s": 1717,
"text": "E ( x ) = ( a x + b ) mod m\nmodulus m: size of the alphabet\na and b: key of the cipher."
},
{
"code": null,
"e": 1914,
"s": 1805,
"text": "In decryption, convert each of the cipher text letters into their integer values. The decryption function is"
},
{
"code": null,
"e": 2046,
"s": 1914,
"text": "D ( x ) = a^-1 ( x - b ) mod m\na^-1 : modular multiplicative inverse of a modulo m. i.e., it satisfies the equation\n1 = a^-1 mod m."
},
{
"code": null,
"e": 2095,
"s": 2046,
"text": "Here is a C++ program to implement this process."
},
{
"code": null,
"e": 2649,
"s": 2095,
"text": "Begin\nFunction encryption(string m)\n for i = 0 to m.length()-1\n if(m[i]!=' ')\n c = c + (char) ((((a * (m[i]-'A') ) + b) % 26) + 'A')\n else\n c += m[i]\n return c\nEnd\nBegin\nFunction decryption(string c)\n Initialize a_inverse = 0\n Initialize flag = 0\n For i = 0 to 25\n flag = (a * i) % 26\n if (flag == 1)\n a_inverse = i\n done\n done\n For i = 0 to c.length() - 1\n if(c[i]!=' ')\n m = m + (char) (((a_inverse * ((c[i]+'A' - b)) % 26)) + 'A')\n else\n m = m+ c[i]\n done\nEnd"
},
{
"code": null,
"e": 4031,
"s": 2649,
"text": "#include<bits/stdc++.h>\nusing namespace std;\nstatic int a = 7;\nstatic int b = 6;\nstring encryption(string m) {\n //Cipher Text initially empty\n string c = \"\";\n for (int i = 0; i < m.length(); i++) {\n // Avoid space to be encrypted\n if(m[i]!=' ')\n // added 'A' to bring it in range of ASCII alphabet [ 65-90 | A-Z ]\n c = c + (char) ((((a * (m[i]-'A') ) + b) % 26) + 'A');\n else\n //else append space character\n c += m[i];\n }\n return c;\n}\nstring decryption(string c) {\n string m = \"\";\n int a_inverse = 0;\n int flag = 0;\n //Find a^-1 (the multiplicative inverse of a\n //in the group of integers modulo m.)\n for (int i = 0; i < 26; i++) {\n flag = (a * i) % 26;\n //Check if (a * i) % 26 == 1,\n //then i will be the multiplicative inverse of a\n if (flag == 1) {\n a_inverse = i;\n }\n }\n for (int i = 0; i < c.length(); i++) {\n if(c[i] != ' ')\n // added 'A' to bring it in range of ASCII alphabet [ 65-90 | A-Z ]\n m = m + (char) (((a_inverse * ((c[i]+'A' - b)) % 26)) + 'A');\n else\n //else append space character\n m += c[i];\n }\n return m;\n}\nint main(void) {\n string msg = \"TUTORIALSPOINT\";\n string c = encryption(msg);\n cout << \"Encrypted Message is : \" << c<<endl;\n cout << \"Decrypted Message is: \" << decryption(c);\n return 0;\n}"
},
{
"code": null,
"e": 4106,
"s": 4031,
"text": "Encrypted Message is : JQJAVKGFCHAKTJ\nDecrypted Message is: TUTORIALSPOINT"
}
] |
Insert Data into MySQL Database | Data can be entered into MySQL tables by executing SQL INSERT statement through PHP function mysql_query. Below a simple example to insert a record into employee table.
Try out following example to insert record into employee table.
<?php
$dbhost = 'localhost:3036';
$dbuser = 'root';
$dbpass = 'rootpassword';
$conn = mysql_connect($dbhost, $dbuser, $dbpass);
if(! $conn ) {
die('Could not connect: ' . mysql_error());
}
$sql = 'INSERT INTO employee '.
'(emp_name,emp_address, emp_salary, join_date) '.
'VALUES ( "guest", "XYZ", 2000, NOW() )';
mysql_select_db('test_db');
$retval = mysql_query( $sql, $conn );
if(! $retval ) {
die('Could not enter data: ' . mysql_error());
}
echo "Entered data successfully\n";
mysql_close($conn);
?>
In real application, all the values will be taken using HTML form and then those values will be captured using PHP script and finally they will be inserted into MySQL tables.
While doing data insert its best practice to use function get_magic_quotes_gpc() to check if current configuration for magic quote is set or not. If this function returns false then use function addslashes() to add slashes before quotes.
Try out this example by putting this code into add_employee.php, this will take input using HTML Form and then it will create records into database.
<html>
<head>
<title>Add New Record in MySQL Database</title>
</head>
<body>
<?php
if(isset($_POST['add'])) {
$dbhost = 'localhost:3036';
$dbuser = 'root';
$dbpass = 'rootpassword';
$conn = mysql_connect($dbhost, $dbuser, $dbpass);
if(! $conn ) {
die('Could not connect: ' . mysql_error());
}
if(! get_magic_quotes_gpc() ) {
$emp_name = addslashes ($_POST['emp_name']);
$emp_address = addslashes ($_POST['emp_address']);
}else {
$emp_name = $_POST['emp_name'];
$emp_address = $_POST['emp_address'];
}
$emp_salary = $_POST['emp_salary'];
$sql = "INSERT INTO employee ". "(emp_name,emp_address, emp_salary,
join_date) ". "VALUES('$emp_name','$emp_address',$emp_salary, NOW())";
mysql_select_db('test_db');
$retval = mysql_query( $sql, $conn );
if(! $retval ) {
die('Could not enter data: ' . mysql_error());
}
echo "Entered data successfully\n";
mysql_close($conn);
}else {
?>
<form method = "post" action = "<?php $_PHP_SELF ?>">
<table width = "400" border = "0" cellspacing = "1"
cellpadding = "2">
<tr>
<td width = "100">Employee Name</td>
<td><input name = "emp_name" type = "text"
id = "emp_name"></td>
</tr>
<tr>
<td width = "100">Employee Address</td>
<td><input name = "emp_address" type = "text"
id = "emp_address"></td>
</tr>
<tr>
<td width = "100">Employee Salary</td>
<td><input name = "emp_salary" type = "text"
id = "emp_salary"></td>
</tr>
<tr>
<td width = "100"> </td>
<td> </td>
</tr>
<tr>
<td width = "100"> </td>
<td>
<input name = "add" type = "submit" id = "add"
value = "Add Employee">
</td>
</tr>
</table>
</form>
<?php
}
?>
</body>
</html>
45 Lectures
9 hours
Malhar Lathkar
34 Lectures
4 hours
Syed Raza
84 Lectures
5.5 hours
Frahaan Hussain
17 Lectures
1 hours
Nivedita Jain
100 Lectures
34 hours
Azaz Patel
43 Lectures
5.5 hours
Vijay Kumar Parvatha Reddy
Print
Add Notes
Bookmark this page | [
{
"code": null,
"e": 2926,
"s": 2757,
"text": "Data can be entered into MySQL tables by executing SQL INSERT statement through PHP function mysql_query. Below a simple example to insert a record into employee table."
},
{
"code": null,
"e": 2990,
"s": 2926,
"text": "Try out following example to insert record into employee table."
},
{
"code": null,
"e": 3584,
"s": 2990,
"text": "<?php\n $dbhost = 'localhost:3036';\n $dbuser = 'root';\n $dbpass = 'rootpassword';\n $conn = mysql_connect($dbhost, $dbuser, $dbpass);\n \n if(! $conn ) {\n die('Could not connect: ' . mysql_error());\n }\n \n $sql = 'INSERT INTO employee '.\n '(emp_name,emp_address, emp_salary, join_date) '.\n 'VALUES ( \"guest\", \"XYZ\", 2000, NOW() )';\n \n mysql_select_db('test_db');\n $retval = mysql_query( $sql, $conn );\n \n if(! $retval ) {\n die('Could not enter data: ' . mysql_error());\n }\n \n echo \"Entered data successfully\\n\";\n \n mysql_close($conn);\n?>"
},
{
"code": null,
"e": 3759,
"s": 3584,
"text": "In real application, all the values will be taken using HTML form and then those values will be captured using PHP script and finally they will be inserted into MySQL tables."
},
{
"code": null,
"e": 3997,
"s": 3759,
"text": "While doing data insert its best practice to use function get_magic_quotes_gpc() to check if current configuration for magic quote is set or not. If this function returns false then use function addslashes() to add slashes before quotes."
},
{
"code": null,
"e": 4146,
"s": 3997,
"text": "Try out this example by putting this code into add_employee.php, this will take input using HTML Form and then it will create records into database."
},
{
"code": null,
"e": 7063,
"s": 4146,
"text": "<html>\n \n <head>\n <title>Add New Record in MySQL Database</title>\n </head>\n \n <body>\n <?php\n if(isset($_POST['add'])) {\n $dbhost = 'localhost:3036';\n $dbuser = 'root';\n $dbpass = 'rootpassword';\n $conn = mysql_connect($dbhost, $dbuser, $dbpass);\n \n if(! $conn ) {\n die('Could not connect: ' . mysql_error());\n }\n \n if(! get_magic_quotes_gpc() ) {\n $emp_name = addslashes ($_POST['emp_name']);\n $emp_address = addslashes ($_POST['emp_address']);\n }else {\n $emp_name = $_POST['emp_name'];\n $emp_address = $_POST['emp_address'];\n }\n \n $emp_salary = $_POST['emp_salary'];\n \n $sql = \"INSERT INTO employee \". \"(emp_name,emp_address, emp_salary, \n join_date) \". \"VALUES('$emp_name','$emp_address',$emp_salary, NOW())\";\n \n mysql_select_db('test_db');\n $retval = mysql_query( $sql, $conn );\n \n if(! $retval ) {\n die('Could not enter data: ' . mysql_error());\n }\n \n echo \"Entered data successfully\\n\";\n \n mysql_close($conn);\n }else {\n ?>\n \n <form method = \"post\" action = \"<?php $_PHP_SELF ?>\">\n <table width = \"400\" border = \"0\" cellspacing = \"1\" \n cellpadding = \"2\">\n \n <tr>\n <td width = \"100\">Employee Name</td>\n <td><input name = \"emp_name\" type = \"text\" \n id = \"emp_name\"></td>\n </tr>\n \n <tr>\n <td width = \"100\">Employee Address</td>\n <td><input name = \"emp_address\" type = \"text\" \n id = \"emp_address\"></td>\n </tr>\n \n <tr>\n <td width = \"100\">Employee Salary</td>\n <td><input name = \"emp_salary\" type = \"text\" \n id = \"emp_salary\"></td>\n </tr>\n \n <tr>\n <td width = \"100\"> </td>\n <td> </td>\n </tr>\n \n <tr>\n <td width = \"100\"> </td>\n <td>\n <input name = \"add\" type = \"submit\" id = \"add\" \n value = \"Add Employee\">\n </td>\n </tr>\n \n </table>\n </form>\n \n <?php\n }\n ?>\n \n </body>\n</html>"
},
{
"code": null,
"e": 7096,
"s": 7063,
"text": "\n 45 Lectures \n 9 hours \n"
},
{
"code": null,
"e": 7112,
"s": 7096,
"text": " Malhar Lathkar"
},
{
"code": null,
"e": 7145,
"s": 7112,
"text": "\n 34 Lectures \n 4 hours \n"
},
{
"code": null,
"e": 7156,
"s": 7145,
"text": " Syed Raza"
},
{
"code": null,
"e": 7191,
"s": 7156,
"text": "\n 84 Lectures \n 5.5 hours \n"
},
{
"code": null,
"e": 7208,
"s": 7191,
"text": " Frahaan Hussain"
},
{
"code": null,
"e": 7241,
"s": 7208,
"text": "\n 17 Lectures \n 1 hours \n"
},
{
"code": null,
"e": 7256,
"s": 7241,
"text": " Nivedita Jain"
},
{
"code": null,
"e": 7291,
"s": 7256,
"text": "\n 100 Lectures \n 34 hours \n"
},
{
"code": null,
"e": 7303,
"s": 7291,
"text": " Azaz Patel"
},
{
"code": null,
"e": 7338,
"s": 7303,
"text": "\n 43 Lectures \n 5.5 hours \n"
},
{
"code": null,
"e": 7366,
"s": 7338,
"text": " Vijay Kumar Parvatha Reddy"
},
{
"code": null,
"e": 7373,
"s": 7366,
"text": " Print"
},
{
"code": null,
"e": 7384,
"s": 7373,
"text": " Add Notes"
}
] |
Koa.js - RESTful APIs | To create mobile applications, single page applications, use AJAX calls and provide data to clients, you'll need an API. A popular architectural style of how to structure and name these APIs and the endpoints is called REST(Representational Transfer State). HTTP 1.1 was designed keeping REST principles in mind. REST was introduced by Roy Fielding in 2000 in his paper Fielding Dissertations.
RESTful URIs and methods provide us with almost all information we need to process a request. The following table summarizes how the various verbs should be used and how URIs should be named. We'll be creating a movies API towards the end, so let’s discuss how it'll be structured.
Now let’s create this API in Koa. We will be using JSON as our transport data format as it is easy to work with in JavaScript and has loads of other benefits. Replace your index.js file with the following −
var koa = require('koa');
var router = require('koa-router');
var bodyParser = require('koa-body');
var app = koa();
//Set up body parsing middleware
app.use(bodyParser({
formidable:{uploadDir: './uploads'},
multipart: true,
urlencoded: true
}));
//Require the Router we defined in movies.js
var movies = require('./movies.js');
//Use the Router on the sub route /movies
app.use(movies.routes());
app.listen(3000);
Now that we have our application set up, let us concentrate on creating the API. First set up the movies.js file. We are not using a database to store the movies but are storing them in memory, so every time the server restarts the movies added by us will vanish. This can easily be mimicked using a database or a file (using node fs module).
Import koa-router, create a Router and export it using module.exports.
var Router = require('koa-router');
var router = Router({
prefix: '/movies'
}); //Prefixed all routes with /movies
var movies = [
{id: 101, name: "Fight Club", year: 1999, rating: 8.1},
{id: 102, name: "Inception", year: 2010, rating: 8.7},
{id: 103, name: "The Dark Knight", year: 2008, rating: 9},
{id: 104, name: "12 Angry Men", year: 1957, rating: 8.9}
];
//Routes will go here
module.exports = router;
Define the GET route for getting all the movies.
router.get('/', sendMovies);
function *sendMovies(next){
this.body = movies;
yield next;
}
That's it. To test out if this is working fine, run your app, then open your terminal and enter −
curl -i -H "Accept: application/json" -H "Content-Type: application/json" -X GET localhost:3000/movies
You'll get the following response −
[{"id":101,"name":"Fight
Club","year":1999,"rating":8.1},{"id":102,"name":"Inception","year":2010,"rating":8.7},
{"id":103,"name":"The Dark Knight","year":2008,"rating":9},{"id":104,"name":"12 Angry
Men","year":1957,"rating":8.9}]
We have a route to get all the movies. Now let’s create a route to get a specific movie by its id.
router.get('/:id([0-9]{3,})', sendMovieWithId);
function *sendMovieWithId(next){
var ctx = this;
var currMovie = movies.filter(function(movie){
if(movie.id == ctx.params.id){
return true;
}
});
if(currMovie.length == 1){
this.body = currMovie[0];
} else {
this.response.status = 404;//Set status to 404 as movie was not found
this.body = {message: "Not Found"};
}
yield next;
}
This will get us the movies according to the id that we provide. To test this out, use the following command in your terminal.
curl -i -H "Accept: application/json" -H "Content-Type: application/json" -X GET localhost:3000/movies/101
You'll get the response as −
{"id":101,"name":"Fight Club","year":1999,"rating":8.1}
If you visit an invalid route, it'll produce a cannot GET error, while if you visit a valid route with an id that doesn’t exist, it'll produce a 404 error.
We are done with the GET routes. Now, let’s move on to POST route.
Use the following route to handle the POSTed data.
router.post('/', addNewMovie);
function *addNewMovie(next){
//Check if all fields are provided and are valid:
if(!this.request.body.name ||
!this.request.body.year.toString().match(/^[0-9]{4}$/g) ||
!this.request.body.rating.toString().match(/^[0-9]\.[0-9]$/g)){
this.response.status = 400;
this.body = {message: "Bad Request"};
} else {
var newId = movies[movies.length-1].id+1;
movies.push({
id: newId,
name: this.request.body.name,
year: this.request.body.year,
rating: this.request.body.rating
});
this.body = {message: "New movie created.", location: "/movies/" + newId};
}
yield next;
}
This will create a new movie and store it in the movies variable. To test this route out, enter the following in your terminal −
curl -X POST --data "name = Toy%20story&year = 1995&rating = 8.5"
https://localhost:3000/movies
You'll get the following response −
{"message":"New movie created.","location":"/movies/105"}
To test if this was added to the movies object, run the get request for /movies/105 again. You'll get the following response −
{"id":105,"name":"Toy story","year":"1995","rating":"8.5"}
Let’s move on to create the PUT and DELETE routes.
The PUT route is almost exactly the same as the POST route. We will be specifying the id for the object that'll be updated/created. Create the route in the following way −
router.put('/:id', updateMovieWithId);
function *updateMovieWithId(next){
//Check if all fields are provided and are valid:
if(!this.request.body.name ||
!this.request.body.year.toString().match(/^[0-9]{4}$/g) ||
!this.request.body.rating.toString().match(/^[0-9]\.[0-9]$/g) ||
!this.params.id.toString().match(/^[0-9]{3,}$/g)){
this.response.status = 400;
this.body = {message: "Bad Request"};
} else {
//Gets us the index of movie with given id.
var updateIndex = movies.map(function(movie){
return movie.id;
}).indexOf(parseInt(this.params.id));
if(updateIndex === -1){
//Movie not found, create new movies.push({
id: this.params.id,
name: this.request.body.name,
year: this.request.body.year,
rating: this.request.body.rating
});
this.body = {message: "New movie created.", location: "/movies/" + this.params.id};
} else {
//Update existing movie
movies[updateIndex] = {
id: this.params.id,
name: this.request.body.name,
year: this.request.body.year,
rating: this.request.body.rating
};
this.body = {message: "Movie id " + this.params.id + " updated.", location: "/movies/" + this.params.id};
}
}
}
This route will do the function we specified in the table above. It'll update the object with new details if it exists. If it doesn't exist, it'll create a new object. To test out this route, use the following curl command. This will update an existing movie. To create a new Movie, just change the id to a non-existing id.
curl -X PUT --data "name = Toy%20story&year = 1995&rating = 8.5"
https://localhost:3000/movies/101
{"message":"Movie id 101 updated.","location":"/movies/101"}
Use the following code to create a delete route.
router.delete('/:id', deleteMovieWithId);
function *deleteMovieWithId(next){
var removeIndex = movies.map(function(movie){
return movie.id;
}).indexOf(this.params.id); //Gets us the index of movie with given id.
if(removeIndex === -1){
this.body = {message: "Not found"};
} else {
movies.splice(removeIndex, 1);
this.body = {message: "Movie id " + this.params.id + " removed."};
}
}
Test the route in the same way we did for the others. On successful deletion (for example id 105), you will get −
{message: "Movie id 105 removed."}
Finally, our movies.js file looks like −
var Router = require('koa-router');
var router = Router({
prefix: '/movies'
}); //Prefixed all routes with /movies
var movies = [
{id: 101, name: "Fight Club", year: 1999, rating: 8.1},
{id: 102, name: "Inception", year: 2010, rating: 8.7},
{id: 103, name: "The Dark Knight", year: 2008, rating: 9},
{id: 104, name: "12 Angry Men", year: 1957, rating: 8.9}
];
//Routes will go here
router.get('/', sendMovies);
router.get('/:id([0-9]{3,})', sendMovieWithId);
router.post('/', addNewMovie);
router.put('/:id', updateMovieWithId);
router.delete('/:id', deleteMovieWithId);
function *deleteMovieWithId(next){
var removeIndex = movies.map(function(movie){
return movie.id;
}).indexOf(this.params.id); //Gets us the index of movie with given id.
if(removeIndex === -1){
this.body = {message: "Not found"};
} else {
movies.splice(removeIndex, 1);
this.body = {message: "Movie id " + this.params.id + " removed."};
}
}
function *updateMovieWithId(next) {
//Check if all fields are provided and are valid:
if(!this.request.body.name ||
!this.request.body.year.toString().match(/^[0-9]{4}$/g) ||
!this.request.body.rating.toString().match(/^[0-9]\.[0-9]$/g) ||
!this.params.id.toString().match(/^[0-9]{3,}$/g)){
this.response.status = 400;
this.body = {message: "Bad Request"};
} else {
//Gets us the index of movie with given id.
var updateIndex = movies.map(function(movie){
return movie.id;
}).indexOf(parseInt(this.params.id));
if(updateIndex === -1){
//Movie not found, create new
movies.push({
id: this.params.id,
name: this.request.body.name,
year: this.request.body.year,
rating: this.request.body.rating
});
this.body = {message: "New movie created.", location: "/movies/" + this.params.id};
} else {
//Update existing movie
movies[updateIndex] = {
id: this.params.id,
name: this.request.body.name,
year: this.request.body.year,
rating: this.request.body.rating
};
this.body = {message: "Movie id " + this.params.id + " updated.",
location: "/movies/" + this.params.id};
}
}
}
function *addNewMovie(next){
//Check if all fields are provided and are valid:
if(!this.request.body.name ||
!this.request.body.year.toString().match(/^[0-9]{4}$/g) ||
!this.request.body.rating.toString().match(/^[0-9]\.[0-9]$/g)){
this.response.status = 400;
this.body = {message: "Bad Request"};
} else {
var newId = movies[movies.length-1].id+1;
movies.push({
id: newId,
name: this.request.body.name,
year: this.request.body.year,
rating: this.request.body.rating
});
this.body = {message: "New movie created.", location: "/movies/" + newId};
}
yield next;
}
function *sendMovies(next){
this.body = movies;
yield next;
}
function *sendMovieWithId(next){
var ctx = this
var currMovie = movies.filter(function(movie){
if(movie.id == ctx.params.id){
return true;
}
});
if(currMovie.length == 1){
this.body = currMovie[0];
} else {
this.response.status = 404;//Set status to 404 as movie was not found
this.body = {message: "Not Found"};
}
yield next;
}
module.exports = router;
This completes our REST API. Now you can create much more complex applications using this simple architectural style and Koa.
Print
Add Notes
Bookmark this page | [
{
"code": null,
"e": 2500,
"s": 2106,
"text": "To create mobile applications, single page applications, use AJAX calls and provide data to clients, you'll need an API. A popular architectural style of how to structure and name these APIs and the endpoints is called REST(Representational Transfer State). HTTP 1.1 was designed keeping REST principles in mind. REST was introduced by Roy Fielding in 2000 in his paper Fielding Dissertations."
},
{
"code": null,
"e": 2782,
"s": 2500,
"text": "RESTful URIs and methods provide us with almost all information we need to process a request. The following table summarizes how the various verbs should be used and how URIs should be named. We'll be creating a movies API towards the end, so let’s discuss how it'll be structured."
},
{
"code": null,
"e": 2989,
"s": 2782,
"text": "Now let’s create this API in Koa. We will be using JSON as our transport data format as it is easy to work with in JavaScript and has loads of other benefits. Replace your index.js file with the following −"
},
{
"code": null,
"e": 3418,
"s": 2989,
"text": "var koa = require('koa');\nvar router = require('koa-router');\nvar bodyParser = require('koa-body');\n\nvar app = koa();\n\n//Set up body parsing middleware\napp.use(bodyParser({\n formidable:{uploadDir: './uploads'},\n multipart: true,\n urlencoded: true\n}));\n\n//Require the Router we defined in movies.js\nvar movies = require('./movies.js');\n\n//Use the Router on the sub route /movies\napp.use(movies.routes());\n\napp.listen(3000);"
},
{
"code": null,
"e": 3761,
"s": 3418,
"text": "Now that we have our application set up, let us concentrate on creating the API. First set up the movies.js file. We are not using a database to store the movies but are storing them in memory, so every time the server restarts the movies added by us will vanish. This can easily be mimicked using a database or a file (using node fs module)."
},
{
"code": null,
"e": 3832,
"s": 3761,
"text": "Import koa-router, create a Router and export it using module.exports."
},
{
"code": null,
"e": 4257,
"s": 3832,
"text": "var Router = require('koa-router');\nvar router = Router({\n prefix: '/movies'\n}); //Prefixed all routes with /movies\n\nvar movies = [\n {id: 101, name: \"Fight Club\", year: 1999, rating: 8.1},\n {id: 102, name: \"Inception\", year: 2010, rating: 8.7},\n {id: 103, name: \"The Dark Knight\", year: 2008, rating: 9},\n {id: 104, name: \"12 Angry Men\", year: 1957, rating: 8.9}\n];\n\n//Routes will go here\n\nmodule.exports = router;"
},
{
"code": null,
"e": 4306,
"s": 4257,
"text": "Define the GET route for getting all the movies."
},
{
"code": null,
"e": 4403,
"s": 4306,
"text": "router.get('/', sendMovies);\nfunction *sendMovies(next){\n this.body = movies;\n yield next;\n}"
},
{
"code": null,
"e": 4501,
"s": 4403,
"text": "That's it. To test out if this is working fine, run your app, then open your terminal and enter −"
},
{
"code": null,
"e": 4605,
"s": 4501,
"text": "curl -i -H \"Accept: application/json\" -H \"Content-Type: application/json\" -X GET localhost:3000/movies\n"
},
{
"code": null,
"e": 4641,
"s": 4605,
"text": "You'll get the following response −"
},
{
"code": null,
"e": 4874,
"s": 4641,
"text": "[{\"id\":101,\"name\":\"Fight \nClub\",\"year\":1999,\"rating\":8.1},{\"id\":102,\"name\":\"Inception\",\"year\":2010,\"rating\":8.7},\n{\"id\":103,\"name\":\"The Dark Knight\",\"year\":2008,\"rating\":9},{\"id\":104,\"name\":\"12 Angry \nMen\",\"year\":1957,\"rating\":8.9}]"
},
{
"code": null,
"e": 4973,
"s": 4874,
"text": "We have a route to get all the movies. Now let’s create a route to get a specific movie by its id."
},
{
"code": null,
"e": 5412,
"s": 4973,
"text": "router.get('/:id([0-9]{3,})', sendMovieWithId);\n\nfunction *sendMovieWithId(next){\n var ctx = this;\n var currMovie = movies.filter(function(movie){\n if(movie.id == ctx.params.id){\n return true;\n }\n });\n if(currMovie.length == 1){\n this.body = currMovie[0];\n } else {\n this.response.status = 404;//Set status to 404 as movie was not found\n this.body = {message: \"Not Found\"};\n }\n yield next;\n}"
},
{
"code": null,
"e": 5539,
"s": 5412,
"text": "This will get us the movies according to the id that we provide. To test this out, use the following command in your terminal."
},
{
"code": null,
"e": 5647,
"s": 5539,
"text": "curl -i -H \"Accept: application/json\" -H \"Content-Type: application/json\" -X GET localhost:3000/movies/101\n"
},
{
"code": null,
"e": 5676,
"s": 5647,
"text": "You'll get the response as −"
},
{
"code": null,
"e": 5733,
"s": 5676,
"text": "{\"id\":101,\"name\":\"Fight Club\",\"year\":1999,\"rating\":8.1}\n"
},
{
"code": null,
"e": 5889,
"s": 5733,
"text": "If you visit an invalid route, it'll produce a cannot GET error, while if you visit a valid route with an id that doesn’t exist, it'll produce a 404 error."
},
{
"code": null,
"e": 5956,
"s": 5889,
"text": "We are done with the GET routes. Now, let’s move on to POST route."
},
{
"code": null,
"e": 6007,
"s": 5956,
"text": "Use the following route to handle the POSTed data."
},
{
"code": null,
"e": 6716,
"s": 6007,
"text": "router.post('/', addNewMovie);\n\nfunction *addNewMovie(next){\n //Check if all fields are provided and are valid:\n if(!this.request.body.name || \n !this.request.body.year.toString().match(/^[0-9]{4}$/g) || \n !this.request.body.rating.toString().match(/^[0-9]\\.[0-9]$/g)){\n \n this.response.status = 400;\n this.body = {message: \"Bad Request\"};\n } else {\n var newId = movies[movies.length-1].id+1;\n \n movies.push({\n id: newId,\n name: this.request.body.name,\n year: this.request.body.year,\n rating: this.request.body.rating\n });\n this.body = {message: \"New movie created.\", location: \"/movies/\" + newId};\n }\n yield next;\n}"
},
{
"code": null,
"e": 6845,
"s": 6716,
"text": "This will create a new movie and store it in the movies variable. To test this route out, enter the following in your terminal −"
},
{
"code": null,
"e": 6943,
"s": 6845,
"text": "curl -X POST --data \"name = Toy%20story&year = 1995&rating = 8.5\" \nhttps://localhost:3000/movies\n"
},
{
"code": null,
"e": 6979,
"s": 6943,
"text": "You'll get the following response −"
},
{
"code": null,
"e": 7038,
"s": 6979,
"text": "{\"message\":\"New movie created.\",\"location\":\"/movies/105\"}\n"
},
{
"code": null,
"e": 7165,
"s": 7038,
"text": "To test if this was added to the movies object, run the get request for /movies/105 again. You'll get the following response −"
},
{
"code": null,
"e": 7225,
"s": 7165,
"text": "{\"id\":105,\"name\":\"Toy story\",\"year\":\"1995\",\"rating\":\"8.5\"}\n"
},
{
"code": null,
"e": 7276,
"s": 7225,
"text": "Let’s move on to create the PUT and DELETE routes."
},
{
"code": null,
"e": 7448,
"s": 7276,
"text": "The PUT route is almost exactly the same as the POST route. We will be specifying the id for the object that'll be updated/created. Create the route in the following way −"
},
{
"code": null,
"e": 8818,
"s": 7448,
"text": "router.put('/:id', updateMovieWithId);\n\nfunction *updateMovieWithId(next){\n //Check if all fields are provided and are valid:\n if(!this.request.body.name || \n !this.request.body.year.toString().match(/^[0-9]{4}$/g) || \n !this.request.body.rating.toString().match(/^[0-9]\\.[0-9]$/g) ||\n !this.params.id.toString().match(/^[0-9]{3,}$/g)){\n \n this.response.status = 400;\n this.body = {message: \"Bad Request\"};\n } else {\n //Gets us the index of movie with given id.\n var updateIndex = movies.map(function(movie){\n return movie.id;\n }).indexOf(parseInt(this.params.id));\n \n if(updateIndex === -1){\n //Movie not found, create new movies.push({\n id: this.params.id,\n name: this.request.body.name,\n year: this.request.body.year,\n rating: this.request.body.rating\n });\n this.body = {message: \"New movie created.\", location: \"/movies/\" + this.params.id}; \n } else {\n //Update existing movie\n movies[updateIndex] = {\n id: this.params.id,\n name: this.request.body.name,\n year: this.request.body.year,\n rating: this.request.body.rating\n };\n this.body = {message: \"Movie id \" + this.params.id + \" updated.\", location: \"/movies/\" + this.params.id};\n }\n }\n}"
},
{
"code": null,
"e": 9142,
"s": 8818,
"text": "This route will do the function we specified in the table above. It'll update the object with new details if it exists. If it doesn't exist, it'll create a new object. To test out this route, use the following curl command. This will update an existing movie. To create a new Movie, just change the id to a non-existing id."
},
{
"code": null,
"e": 9243,
"s": 9142,
"text": "curl -X PUT --data \"name = Toy%20story&year = 1995&rating = 8.5\" \nhttps://localhost:3000/movies/101\n"
},
{
"code": null,
"e": 9305,
"s": 9243,
"text": "{\"message\":\"Movie id 101 updated.\",\"location\":\"/movies/101\"}\n"
},
{
"code": null,
"e": 9354,
"s": 9305,
"text": "Use the following code to create a delete route."
},
{
"code": null,
"e": 9781,
"s": 9354,
"text": "router.delete('/:id', deleteMovieWithId);\n\nfunction *deleteMovieWithId(next){\n var removeIndex = movies.map(function(movie){\n return movie.id;\n }).indexOf(this.params.id); //Gets us the index of movie with given id.\n \n if(removeIndex === -1){\n this.body = {message: \"Not found\"};\n } else {\n movies.splice(removeIndex, 1);\n this.body = {message: \"Movie id \" + this.params.id + \" removed.\"};\n }\n}"
},
{
"code": null,
"e": 9895,
"s": 9781,
"text": "Test the route in the same way we did for the others. On successful deletion (for example id 105), you will get −"
},
{
"code": null,
"e": 9931,
"s": 9895,
"text": "{message: \"Movie id 105 removed.\"}\n"
},
{
"code": null,
"e": 9972,
"s": 9931,
"text": "Finally, our movies.js file looks like −"
},
{
"code": null,
"e": 13458,
"s": 9972,
"text": "var Router = require('koa-router');\nvar router = Router({\n prefix: '/movies'\n}); //Prefixed all routes with /movies\nvar movies = [\n {id: 101, name: \"Fight Club\", year: 1999, rating: 8.1},\n {id: 102, name: \"Inception\", year: 2010, rating: 8.7},\n {id: 103, name: \"The Dark Knight\", year: 2008, rating: 9},\n {id: 104, name: \"12 Angry Men\", year: 1957, rating: 8.9}\n];\n\n//Routes will go here\nrouter.get('/', sendMovies);\nrouter.get('/:id([0-9]{3,})', sendMovieWithId);\nrouter.post('/', addNewMovie);\nrouter.put('/:id', updateMovieWithId);\nrouter.delete('/:id', deleteMovieWithId);\n\nfunction *deleteMovieWithId(next){\n var removeIndex = movies.map(function(movie){\n return movie.id;\n }).indexOf(this.params.id); //Gets us the index of movie with given id.\n \n if(removeIndex === -1){\n this.body = {message: \"Not found\"};\n } else {\n movies.splice(removeIndex, 1);\n this.body = {message: \"Movie id \" + this.params.id + \" removed.\"};\n }\n}\n\nfunction *updateMovieWithId(next) {\n //Check if all fields are provided and are valid:\n if(!this.request.body.name ||\n !this.request.body.year.toString().match(/^[0-9]{4}$/g) ||\n !this.request.body.rating.toString().match(/^[0-9]\\.[0-9]$/g) ||\n !this.params.id.toString().match(/^[0-9]{3,}$/g)){\n \n this.response.status = 400;\n this.body = {message: \"Bad Request\"};\n } else {\n //Gets us the index of movie with given id.\n var updateIndex = movies.map(function(movie){\n return movie.id;\n }).indexOf(parseInt(this.params.id));\n \n if(updateIndex === -1){\n //Movie not found, create new\n movies.push({\n id: this.params.id,\n name: this.request.body.name,\n year: this.request.body.year,\n rating: this.request.body.rating\n });\n this.body = {message: \"New movie created.\", location: \"/movies/\" + this.params.id};\n } else {\n //Update existing movie\n movies[updateIndex] = {\n id: this.params.id,\n name: this.request.body.name,\n year: this.request.body.year,\n rating: this.request.body.rating\n };\n this.body = {message: \"Movie id \" + this.params.id + \" updated.\", \n location: \"/movies/\" + this.params.id};\n }\n }\n}\n\nfunction *addNewMovie(next){\n //Check if all fields are provided and are valid:\n if(!this.request.body.name ||\n !this.request.body.year.toString().match(/^[0-9]{4}$/g) ||\n !this.request.body.rating.toString().match(/^[0-9]\\.[0-9]$/g)){\n \n this.response.status = 400;\n this.body = {message: \"Bad Request\"};\n } else {\n var newId = movies[movies.length-1].id+1;\n \n movies.push({\n id: newId,\n name: this.request.body.name,\n year: this.request.body.year,\n rating: this.request.body.rating\n });\n this.body = {message: \"New movie created.\", location: \"/movies/\" + newId};\n }\n yield next;\n}\nfunction *sendMovies(next){\n this.body = movies;\n yield next;\n}\nfunction *sendMovieWithId(next){\n var ctx = this\n \n var currMovie = movies.filter(function(movie){\n if(movie.id == ctx.params.id){\n return true;\n }\n });\n if(currMovie.length == 1){\n this.body = currMovie[0];\n } else {\n this.response.status = 404;//Set status to 404 as movie was not found\n this.body = {message: \"Not Found\"};\n }\n yield next;\n}\nmodule.exports = router;"
},
{
"code": null,
"e": 13584,
"s": 13458,
"text": "This completes our REST API. Now you can create much more complex applications using this simple architectural style and Koa."
},
{
"code": null,
"e": 13591,
"s": 13584,
"text": " Print"
},
{
"code": null,
"e": 13602,
"s": 13591,
"text": " Add Notes"
}
] |
MySQL - LOOP Statement | Stored procedures are sub routines, segment of SQL statements which are stored in SQL catalog. These procedures contain IN and OUT parameters, or both. They may return result sets in case you use SELECT statements; they can return multiple result-sets. You can also create functions in MYSQL.
Similar to other programming languages MySQL provides support for the flow control statements such as IF, CASE, ITERATE, LEAVE LOOP, WHILE, and REPEAT. You can use these statements in the stored programs (procedures), and RETURN in stored functions. You can use one Flow Control Statement with in another.
The LOOP is a compound MySQL statement which is used to execute a single or set of statements repeatedly.
Following is the syntax of the loop statement is MySQL −
begin_label: LOOP
statement_list
END LOOP end_label
Where, statement_list is a single or set of statements that are to be repeated. begin_label and end_label are the optional labels of the LOOP statement.
The statement(s) in the LOOP are executed repeatedly till the loop is terminated. You can terminate the LOOP using the LEAVE statement.
When used in a function the LOOP can also be terminated using the RETURN statement. Each statement in the LOOP ends with a semi colon (or. the current delimiter).
Following query demonstrates the usage of the LOOP statement with a procedure −
mysql> Delimiter //
mysql> CREATE procedure loopDemo()
label:BEGIN
DECLARE val INT ;
DECLARE result VARCHAR(255);
SET val =1;
SET result = '';
loop_label: LOOP
IF val > 10 THEN
LEAVE loop_label;
END IF;
SET result = CONCAT(result,val,',');
SET val = val + 1;
ITERATE loop_label;
END LOOP;
SELECT result;
END//
You can call the above procedure as follows −
mysql> call loopDemo;//
+-----------------------+
| result |
+-----------------------+
| 1,2,3,4,5,6,7,8,9,10, |
+-----------------------+
1 row in set (0.00 sec)
Query OK, 0 rows affected (0.07 sec)
Following query demonstrates how to use the LOOP statement with in a function.
DELIMITER //
CREATE FUNCTION Sample (bonus INT)
RETURNS INT
BEGIN
DECLARE income INT;
SET income = 0;
myLabel: LOOP
SET income = income + bonus;
IF income < 10000 THEN
ITERATE myLabel;
END IF;
LEAVE myLabel;
END LOOP myLabel;
RETURN income;
END; //
Query OK, 0 rows affected (0.41 sec)
mysql> DELIMITER ;
You can call the above created function as shown below −
mysql> SELECT Sample(1000);
+--------------+
| Sample(1000) |
+--------------+
| 10000 |
+--------------+
1 row in set (0.15 sec)
31 Lectures
6 hours
Eduonix Learning Solutions
84 Lectures
5.5 hours
Frahaan Hussain
6 Lectures
3.5 hours
DATAhill Solutions Srinivas Reddy
60 Lectures
10 hours
Vijay Kumar Parvatha Reddy
10 Lectures
1 hours
Harshit Srivastava
25 Lectures
4 hours
Trevoir Williams
Print
Add Notes
Bookmark this page | [
{
"code": null,
"e": 2626,
"s": 2333,
"text": "Stored procedures are sub routines, segment of SQL statements which are stored in SQL catalog. These procedures contain IN and OUT parameters, or both. They may return result sets in case you use SELECT statements; they can return multiple result-sets. You can also create functions in MYSQL."
},
{
"code": null,
"e": 2932,
"s": 2626,
"text": "Similar to other programming languages MySQL provides support for the flow control statements such as IF, CASE, ITERATE, LEAVE LOOP, WHILE, and REPEAT. You can use these statements in the stored programs (procedures), and RETURN in stored functions. You can use one Flow Control Statement with in another."
},
{
"code": null,
"e": 3038,
"s": 2932,
"text": "The LOOP is a compound MySQL statement which is used to execute a single or set of statements repeatedly."
},
{
"code": null,
"e": 3095,
"s": 3038,
"text": "Following is the syntax of the loop statement is MySQL −"
},
{
"code": null,
"e": 3148,
"s": 3095,
"text": "begin_label: LOOP\nstatement_list\nEND LOOP end_label\n"
},
{
"code": null,
"e": 3301,
"s": 3148,
"text": "Where, statement_list is a single or set of statements that are to be repeated. begin_label and end_label are the optional labels of the LOOP statement."
},
{
"code": null,
"e": 3437,
"s": 3301,
"text": "The statement(s) in the LOOP are executed repeatedly till the loop is terminated. You can terminate the LOOP using the LEAVE statement."
},
{
"code": null,
"e": 3600,
"s": 3437,
"text": "When used in a function the LOOP can also be terminated using the RETURN statement. Each statement in the LOOP ends with a semi colon (or. the current delimiter)."
},
{
"code": null,
"e": 3680,
"s": 3600,
"text": "Following query demonstrates the usage of the LOOP statement with a procedure −"
},
{
"code": null,
"e": 4102,
"s": 3680,
"text": "mysql> Delimiter //\nmysql> CREATE procedure loopDemo()\n label:BEGIN\n DECLARE val INT ;\n DECLARE result VARCHAR(255);\n SET val =1;\n SET result = '';\n loop_label: LOOP\n IF val > 10 THEN \n LEAVE loop_label;\n END IF;\n SET result = CONCAT(result,val,',');\n SET val = val + 1;\n ITERATE loop_label;\n END LOOP;\n SELECT result;\n END//"
},
{
"code": null,
"e": 4148,
"s": 4102,
"text": "You can call the above procedure as follows −"
},
{
"code": null,
"e": 4363,
"s": 4148,
"text": "mysql> call loopDemo;//\n+-----------------------+\n| result |\n+-----------------------+\n| 1,2,3,4,5,6,7,8,9,10, |\n+-----------------------+\n1 row in set (0.00 sec)\nQuery OK, 0 rows affected (0.07 sec)"
},
{
"code": null,
"e": 4442,
"s": 4363,
"text": "Following query demonstrates how to use the LOOP statement with in a function."
},
{
"code": null,
"e": 4828,
"s": 4442,
"text": "DELIMITER //\nCREATE FUNCTION Sample (bonus INT)\n RETURNS INT\n BEGIN\n DECLARE income INT;\n SET income = 0;\n myLabel: LOOP\n SET income = income + bonus;\n IF income < 10000 THEN\n ITERATE myLabel;\n END IF;\n LEAVE myLabel;\n END LOOP myLabel;\n RETURN income;\nEND; //\nQuery OK, 0 rows affected (0.41 sec)\nmysql> DELIMITER ;"
},
{
"code": null,
"e": 4885,
"s": 4828,
"text": "You can call the above created function as shown below −"
},
{
"code": null,
"e": 5022,
"s": 4885,
"text": "mysql> SELECT Sample(1000);\n+--------------+\n| Sample(1000) |\n+--------------+\n| 10000 |\n+--------------+\n1 row in set (0.15 sec)"
},
{
"code": null,
"e": 5055,
"s": 5022,
"text": "\n 31 Lectures \n 6 hours \n"
},
{
"code": null,
"e": 5083,
"s": 5055,
"text": " Eduonix Learning Solutions"
},
{
"code": null,
"e": 5118,
"s": 5083,
"text": "\n 84 Lectures \n 5.5 hours \n"
},
{
"code": null,
"e": 5135,
"s": 5118,
"text": " Frahaan Hussain"
},
{
"code": null,
"e": 5169,
"s": 5135,
"text": "\n 6 Lectures \n 3.5 hours \n"
},
{
"code": null,
"e": 5204,
"s": 5169,
"text": " DATAhill Solutions Srinivas Reddy"
},
{
"code": null,
"e": 5238,
"s": 5204,
"text": "\n 60 Lectures \n 10 hours \n"
},
{
"code": null,
"e": 5266,
"s": 5238,
"text": " Vijay Kumar Parvatha Reddy"
},
{
"code": null,
"e": 5299,
"s": 5266,
"text": "\n 10 Lectures \n 1 hours \n"
},
{
"code": null,
"e": 5319,
"s": 5299,
"text": " Harshit Srivastava"
},
{
"code": null,
"e": 5352,
"s": 5319,
"text": "\n 25 Lectures \n 4 hours \n"
},
{
"code": null,
"e": 5370,
"s": 5352,
"text": " Trevoir Williams"
},
{
"code": null,
"e": 5377,
"s": 5370,
"text": " Print"
},
{
"code": null,
"e": 5388,
"s": 5377,
"text": " Add Notes"
}
] |
6 hidden SQL mistakes to avoid. They fly under the radar and get you in... | by Skyler Dale | Towards Data Science | SQL is great. It’s human readable and efficient. It’s fairly easy to pick up.
Unfortunately, it’s also easy to make nasty SQL mistakes. Ones that don’t stand out, don’t get caught by your editor, and cause headaches down the line.
In this post, I’ll highlight some hidden errors to be mindful of when you’re cruising through your data analysis.
SQL has a large variety of common and useful operators for arithmetic (ie, +, -), comparison (ie. >,<) and logic (ie. Not, in).
Use them, enjoy them, but don’t forget that they won’t do anything with null values. An operation on a null value will return a null value.
Below are a couple of examples:
a. Adding columns
Let’s say we have one column with 2019 sales and one with 2020 sales. And there’s a null value in one of our rows for 2019.
When we add them together, we get a null:
This is bad news. We might know to treat the null as a 0, but SQL doesn’t.
b. “Not In” Operator
Now let’s say we have a table with survey results about a product. And we want to exclude only the very bad or very good reviews.
When we query for rating not in (‘Very good’, ‘Very Bad’) we get the following:
We may expect to return null values (after all, null is not in the list we provided) but SQL will exclude them in the result set.
The fix: Whenever we perform operations, we should make sure we are aware of any null values in our columns. If we want them treated as a different value, we can make that happen easily with a “coalesce” statement:
coalesce(rating, ‘’)
This is the opposite problem of the one above. When we run aggregation functions, SQL ignores nulls. In many cases this is desirable.
But what if null values are really 0s?
For example, let’s calculate the average sales in 2020:
In this case, SQL excludes the null value and calculates 410 / 4 = 102.5 instead of 410 / 5 = 82.
This is very easy to miss — 102.5 sounds like a real, perfectly reasonable value.
The fix: Same as #1. Let’s coalesce nulls if they really mean 0.
Joins are a key SQL functionality that helps us combine data from two tables.
But when we use the wrong type of join, we can lose data that we need.
For example, let’s say we have a geography table and a sales table, and we want to send a list of all contacts’ state and sales to our marketing team.
A left join gives us all of our contacts and their sales, but an inner join drops all contacts that don’t have any sales:
If we use the inner join in this case, we lose data that might be important (maybe marketing wants to target prospects!).
The fix: Be intentional about the type of join we use.
For more info, check out my intro post on joining data:
towardsdatascience.com
By the way, if we do go through with the left join, we should make sure to coalesce our sales column to 0, otherwise, we could run into problem #1 or #2 above!
When we join datasets and our right table has duplicates in the join key, we wind up with more records than we started with.
Sometimes this is what we want. But other times we may have surprise duplicates in our right table — potentially due to a data quality issue (real world data is messy, haven’t you heard?).
For example, let’s say we want to enrich our contact/geography table with more information in an “enhanced” city table.
If we join on state, we get contact duplicates:
The fix: Make sure we know whether or not our right join key columns are distinct. An easy way to check is to run a count and a count distinct and ensure that they are the same:
#these will be equal if there are no duplicatesselect count(state), count(distinct state) from mytable;
When creating complex conditional statements, it’s easy to make an operator precedence mistake.
The most common example: when we write SQL queries “And” gets processed before “Or”.
Let’s say we have a table with customer sales and segment and we want to return customers in segment A or segment B who also must have sales > 100.
If we write our query without paying attention to precedence, we wind up with:
Notice that the client 1 record gets returned even though it has sales below 100. That’s because the query resolves the “and” statement first.
When we add parenthesis in the proper places, we get what we’re looking for:
The fix: Always use parenthesis when we’re using more than one operator. It prevents mistakes and makes our code more readable.
This one may vary based on the database management system we are using.
For Postgres, we don’t need to include the “as” keyword when naming columns.
This means that if we drop a comma from our query, SQL will rename the column before the comma with the name of the column after the column:
This is a double whammy — not only do we lose one of the columns we were looking for, but one of our columns is also left with the incorrect name.
The fix: Do a careful audit of our select statement before running to make sure we haven’t dropped any commas.
In this post, we walked through several scenarios to watch out for when running SQL queries. When in doubt, it always helps to slow down, run tests, and make sure you haven’t fallen victim to one of these common mistakes.
P.S. → if you want to learn more about SQL, Python, and data science, subscribe to my free email list. | [
{
"code": null,
"e": 250,
"s": 172,
"text": "SQL is great. It’s human readable and efficient. It’s fairly easy to pick up."
},
{
"code": null,
"e": 403,
"s": 250,
"text": "Unfortunately, it’s also easy to make nasty SQL mistakes. Ones that don’t stand out, don’t get caught by your editor, and cause headaches down the line."
},
{
"code": null,
"e": 517,
"s": 403,
"text": "In this post, I’ll highlight some hidden errors to be mindful of when you’re cruising through your data analysis."
},
{
"code": null,
"e": 645,
"s": 517,
"text": "SQL has a large variety of common and useful operators for arithmetic (ie, +, -), comparison (ie. >,<) and logic (ie. Not, in)."
},
{
"code": null,
"e": 785,
"s": 645,
"text": "Use them, enjoy them, but don’t forget that they won’t do anything with null values. An operation on a null value will return a null value."
},
{
"code": null,
"e": 817,
"s": 785,
"text": "Below are a couple of examples:"
},
{
"code": null,
"e": 835,
"s": 817,
"text": "a. Adding columns"
},
{
"code": null,
"e": 959,
"s": 835,
"text": "Let’s say we have one column with 2019 sales and one with 2020 sales. And there’s a null value in one of our rows for 2019."
},
{
"code": null,
"e": 1001,
"s": 959,
"text": "When we add them together, we get a null:"
},
{
"code": null,
"e": 1076,
"s": 1001,
"text": "This is bad news. We might know to treat the null as a 0, but SQL doesn’t."
},
{
"code": null,
"e": 1097,
"s": 1076,
"text": "b. “Not In” Operator"
},
{
"code": null,
"e": 1227,
"s": 1097,
"text": "Now let’s say we have a table with survey results about a product. And we want to exclude only the very bad or very good reviews."
},
{
"code": null,
"e": 1307,
"s": 1227,
"text": "When we query for rating not in (‘Very good’, ‘Very Bad’) we get the following:"
},
{
"code": null,
"e": 1437,
"s": 1307,
"text": "We may expect to return null values (after all, null is not in the list we provided) but SQL will exclude them in the result set."
},
{
"code": null,
"e": 1652,
"s": 1437,
"text": "The fix: Whenever we perform operations, we should make sure we are aware of any null values in our columns. If we want them treated as a different value, we can make that happen easily with a “coalesce” statement:"
},
{
"code": null,
"e": 1674,
"s": 1652,
"text": "coalesce(rating, ‘’) "
},
{
"code": null,
"e": 1808,
"s": 1674,
"text": "This is the opposite problem of the one above. When we run aggregation functions, SQL ignores nulls. In many cases this is desirable."
},
{
"code": null,
"e": 1847,
"s": 1808,
"text": "But what if null values are really 0s?"
},
{
"code": null,
"e": 1903,
"s": 1847,
"text": "For example, let’s calculate the average sales in 2020:"
},
{
"code": null,
"e": 2001,
"s": 1903,
"text": "In this case, SQL excludes the null value and calculates 410 / 4 = 102.5 instead of 410 / 5 = 82."
},
{
"code": null,
"e": 2083,
"s": 2001,
"text": "This is very easy to miss — 102.5 sounds like a real, perfectly reasonable value."
},
{
"code": null,
"e": 2148,
"s": 2083,
"text": "The fix: Same as #1. Let’s coalesce nulls if they really mean 0."
},
{
"code": null,
"e": 2226,
"s": 2148,
"text": "Joins are a key SQL functionality that helps us combine data from two tables."
},
{
"code": null,
"e": 2297,
"s": 2226,
"text": "But when we use the wrong type of join, we can lose data that we need."
},
{
"code": null,
"e": 2448,
"s": 2297,
"text": "For example, let’s say we have a geography table and a sales table, and we want to send a list of all contacts’ state and sales to our marketing team."
},
{
"code": null,
"e": 2570,
"s": 2448,
"text": "A left join gives us all of our contacts and their sales, but an inner join drops all contacts that don’t have any sales:"
},
{
"code": null,
"e": 2692,
"s": 2570,
"text": "If we use the inner join in this case, we lose data that might be important (maybe marketing wants to target prospects!)."
},
{
"code": null,
"e": 2747,
"s": 2692,
"text": "The fix: Be intentional about the type of join we use."
},
{
"code": null,
"e": 2803,
"s": 2747,
"text": "For more info, check out my intro post on joining data:"
},
{
"code": null,
"e": 2826,
"s": 2803,
"text": "towardsdatascience.com"
},
{
"code": null,
"e": 2986,
"s": 2826,
"text": "By the way, if we do go through with the left join, we should make sure to coalesce our sales column to 0, otherwise, we could run into problem #1 or #2 above!"
},
{
"code": null,
"e": 3111,
"s": 2986,
"text": "When we join datasets and our right table has duplicates in the join key, we wind up with more records than we started with."
},
{
"code": null,
"e": 3300,
"s": 3111,
"text": "Sometimes this is what we want. But other times we may have surprise duplicates in our right table — potentially due to a data quality issue (real world data is messy, haven’t you heard?)."
},
{
"code": null,
"e": 3420,
"s": 3300,
"text": "For example, let’s say we want to enrich our contact/geography table with more information in an “enhanced” city table."
},
{
"code": null,
"e": 3468,
"s": 3420,
"text": "If we join on state, we get contact duplicates:"
},
{
"code": null,
"e": 3646,
"s": 3468,
"text": "The fix: Make sure we know whether or not our right join key columns are distinct. An easy way to check is to run a count and a count distinct and ensure that they are the same:"
},
{
"code": null,
"e": 3750,
"s": 3646,
"text": "#these will be equal if there are no duplicatesselect count(state), count(distinct state) from mytable;"
},
{
"code": null,
"e": 3846,
"s": 3750,
"text": "When creating complex conditional statements, it’s easy to make an operator precedence mistake."
},
{
"code": null,
"e": 3931,
"s": 3846,
"text": "The most common example: when we write SQL queries “And” gets processed before “Or”."
},
{
"code": null,
"e": 4079,
"s": 3931,
"text": "Let’s say we have a table with customer sales and segment and we want to return customers in segment A or segment B who also must have sales > 100."
},
{
"code": null,
"e": 4158,
"s": 4079,
"text": "If we write our query without paying attention to precedence, we wind up with:"
},
{
"code": null,
"e": 4301,
"s": 4158,
"text": "Notice that the client 1 record gets returned even though it has sales below 100. That’s because the query resolves the “and” statement first."
},
{
"code": null,
"e": 4378,
"s": 4301,
"text": "When we add parenthesis in the proper places, we get what we’re looking for:"
},
{
"code": null,
"e": 4506,
"s": 4378,
"text": "The fix: Always use parenthesis when we’re using more than one operator. It prevents mistakes and makes our code more readable."
},
{
"code": null,
"e": 4578,
"s": 4506,
"text": "This one may vary based on the database management system we are using."
},
{
"code": null,
"e": 4655,
"s": 4578,
"text": "For Postgres, we don’t need to include the “as” keyword when naming columns."
},
{
"code": null,
"e": 4796,
"s": 4655,
"text": "This means that if we drop a comma from our query, SQL will rename the column before the comma with the name of the column after the column:"
},
{
"code": null,
"e": 4943,
"s": 4796,
"text": "This is a double whammy — not only do we lose one of the columns we were looking for, but one of our columns is also left with the incorrect name."
},
{
"code": null,
"e": 5054,
"s": 4943,
"text": "The fix: Do a careful audit of our select statement before running to make sure we haven’t dropped any commas."
},
{
"code": null,
"e": 5276,
"s": 5054,
"text": "In this post, we walked through several scenarios to watch out for when running SQL queries. When in doubt, it always helps to slow down, run tests, and make sure you haven’t fallen victim to one of these common mistakes."
}
] |
COBOL - Conditional Statements | Conditional statements are used to change the execution flow depending on certain conditions specified by the programmer. Conditional statements will always evaluate to true or false. Conditions are used in IF, Evaluate, and Perform statements. The different types of conditions are as follows −
IF Condition Statement
Relation Condition
Sign Condition
Class Condition
Condition-Name Condition
Negated Condition
Combined Condition
IF statement checks for conditions. If a condition is true, the IF block is executed; and if the condition is false, the ELSE block is executed.
END-IF is used to end the IF block. To end the IF block, a period can be used instead of END-IF. But it is always preferable to use END-IF for multiple IF blocks.
Nested-IF − IF blocks appearing inside another IF block. There is no limit to the depth of nested IF statements.
Following is the syntax of IF condition statements −
IF [condition] THEN
[COBOL statements]
ELSE
[COBOL statements]
END-IF.
Example
IDENTIFICATION DIVISION.
PROGRAM-ID. HELLO.
DATA DIVISION.
WORKING-STORAGE SECTION.
01 WS-NUM1 PIC 9(9).
01 WS-NUM2 PIC 9(9).
01 WS-NUM3 PIC 9(5).
01 WS-NUM4 PIC 9(6).
PROCEDURE DIVISION.
A000-FIRST-PARA.
MOVE 25 TO WS-NUM1 WS-NUM3.
MOVE 15 TO WS-NUM2 WS-NUM4.
IF WS-NUM1 > WS-NUM2 THEN
DISPLAY 'IN LOOP 1 - IF BLOCK'
IF WS-NUM3 = WS-NUM4 THEN
DISPLAY 'IN LOOP 2 - IF BLOCK'
ELSE
DISPLAY 'IN LOOP 2 - ELSE BLOCK'
END-IF
ELSE
DISPLAY 'IN LOOP 1 - ELSE BLOCK'
END-IF.
STOP RUN.
JCL to execute the above COBOL program −
//SAMPLE JOB(TESTJCL,XXXXXX),CLASS = A,MSGCLASS = C
//STEP1 EXEC PGM = HELLO
When you compile and execute the above program, it produces the following result −
IN LOOP 1 - IF BLOCK
IN LOOP 2 - ELSE BLOCK
Relation condition compares two operands, either of which can be an identifier, literal, or arithmetic expression. Algebraic comparison of numeric fields is done regardless of size and usage clause.
For non-numeric operands
If two non-numeric operands of equal size are compared, then the characters are compared from left with the corresponding positions till the end is reached. The operand containing greater number of characters is declared greater.
If two non-numeric operands of unequal size are compared, then the shorter data item is appended with spaces at the end till the size of the operands becomes equal and then compared according to the rules mentioned in the previous point.
Given below is the syntax of Relation condition statements −
[Data Name/Arithmetic Operation]
[IS] [NOT]
[Equal to (=),Greater than (>), Less than (<),
Greater than or Equal (>=), Less than or equal (<=) ]
[Data Name/Arithmetic Operation]
Example
IDENTIFICATION DIVISION.
PROGRAM-ID. HELLO.
DATA DIVISION.
WORKING-STORAGE SECTION.
01 WS-NUM1 PIC 9(9).
01 WS-NUM2 PIC 9(9).
PROCEDURE DIVISION.
A000-FIRST-PARA.
MOVE 25 TO WS-NUM1.
MOVE 15 TO WS-NUM2.
IF WS-NUM1 IS GREATER THAN OR EQUAL TO WS-NUM2 THEN
DISPLAY 'WS-NUM1 IS GREATER THAN WS-NUM2'
ELSE
DISPLAY 'WS-NUM1 IS LESS THAN WS-NUM2'
END-IF.
STOP RUN.
JCL to execute the above COBOL program −
//SAMPLE JOB(TESTJCL,XXXXXX),CLASS = A,MSGCLASS = C
//STEP1 EXEC PGM = HELLO
When you compile and execute the above program it produces the following result −
WS-NUM1 IS GREATER THAN WS-NUM2
Sign condition is used to check the sign of a numeric operand. It determines whether a given numeric value is greater than, less than, or equal to ZERO.
Following is the syntax of Sign condition statements −
[Data Name/Arithmetic Operation]
[IS] [NOT]
[Positive, Negative or Zero]
[Data Name/Arithmetic Operation]
Example
IDENTIFICATION DIVISION.
PROGRAM-ID. HELLO.
DATA DIVISION.
WORKING-STORAGE SECTION.
01 WS-NUM1 PIC S9(9) VALUE -1234.
01 WS-NUM2 PIC S9(9) VALUE 123456.
PROCEDURE DIVISION.
A000-FIRST-PARA.
IF WS-NUM1 IS POSITIVE THEN
DISPLAY 'WS-NUM1 IS POSITIVE'.
IF WS-NUM1 IS NEGATIVE THEN
DISPLAY 'WS-NUM1 IS NEGATIVE'.
IF WS-NUM1 IS ZERO THEN
DISPLAY 'WS-NUM1 IS ZERO'.
IF WS-NUM2 IS POSITIVE THEN
DISPLAY 'WS-NUM2 IS POSITIVE'.
STOP RUN.
JCL to execute the above COBOL program −
//SAMPLE JOB(TESTJCL,XXXXXX),CLASS = A,MSGCLASS = C
//STEP1 EXEC PGM = HELLO
When you compile and execute the above program it produces the following result −
WS-NUM1 IS NEGATIVE
WS-NUM2 IS POSITIVE
Class condition is used to check if an operand contains only alphabets or numeric data. Spaces are considered in ALPHABETIC, ALPHABETIC-LOWER, and ALPHABETIC-UPPER.
Following is the syntax of Class condition statements −
[Data Name/Arithmetic Operation>]
[IS] [NOT]
[NUMERIC, ALPHABETIC, ALPHABETIC-LOWER, ALPHABETIC-UPPER]
[Data Name/Arithmetic Operation]
Example
IDENTIFICATION DIVISION.
PROGRAM-ID. HELLO.
DATA DIVISION.
WORKING-STORAGE SECTION.
01 WS-NUM1 PIC X(9) VALUE 'ABCD '.
01 WS-NUM2 PIC 9(9) VALUE 123456789.
PROCEDURE DIVISION.
A000-FIRST-PARA.
IF WS-NUM1 IS ALPHABETIC THEN
DISPLAY 'WS-NUM1 IS ALPHABETIC'.
IF WS-NUM1 IS NUMERIC THEN
DISPLAY 'WS-NUM1 IS NUMERIC'.
IF WS-NUM2 IS NUMERIC THEN
DISPLAY 'WS-NUM2 IS NUMERIC'.
STOP RUN.
JCL to execute the above COBOL program −
//SAMPLE JOB(TESTJCL,XXXXXX),CLASS = A,MSGCLASS = C
//STEP1 EXEC PGM = HELLO
When you compile and execute the above program, it produces the following result −
WS-NUM1 IS ALPHABETIC
WS-NUM2 IS NUMERIC
A condition-name is a user-defined name. It contains a set of values specified by the user. It behaves like Boolean variables. They are defined with level number 88. It will not have a PIC clause.
Following is the syntax of user-defined condition statements −
88 [Condition-Name] VALUE [IS, ARE] [LITERAL] [THRU LITERAL].
Example
IDENTIFICATION DIVISION.
PROGRAM-ID. HELLO.
DATA DIVISION.
WORKING-STORAGE SECTION.
01 WS-NUM PIC 9(3).
88 PASS VALUES ARE 041 THRU 100.
88 FAIL VALUES ARE 000 THRU 40.
PROCEDURE DIVISION.
A000-FIRST-PARA.
MOVE 65 TO WS-NUM.
IF PASS
DISPLAY 'Passed with ' WS-NUM ' marks'.
IF FAIL
DISPLAY 'FAILED with ' WS-NUM 'marks'.
STOP RUN.
JCL to execute the above COBOL program −
//SAMPLE JOB(TESTJCL,XXXXXX),CLASS = A,MSGCLASS = C
//STEP1 EXEC PGM = HELLO
When you compile and execute the above program, it produces the following result −
Passed with 065 marks
Negated condition is given by using the NOT keyword. If a condition is true and we have given NOT in front of it, then its final value will be false.
Following is the syntax of Negated condition statements −
IF NOT [CONDITION]
COBOL Statements
END-IF.
Example
IDENTIFICATION DIVISION.
PROGRAM-ID. HELLO.
DATA DIVISION.
WORKING-STORAGE SECTION.
01 WS-NUM1 PIC 9(2) VALUE 20.
01 WS-NUM2 PIC 9(9) VALUE 25.
PROCEDURE DIVISION.
A000-FIRST-PARA.
IF NOT WS-NUM1 IS LESS THAN WS-NUM2 THEN
DISPLAY 'IF-BLOCK'
ELSE
DISPLAY 'ELSE-BLOCK'
END-IF.
STOP RUN.
JCL to execute the above COBOL program −
//SAMPLE JOB(TESTJCL,XXXXXX),CLASS = A,MSGCLASS = C
//STEP1 EXEC PGM = HELLO
When you compile and execute the above program, it produces the following result −
ELSE-BLOCK
A combined condition contains two or more conditions connected using logical operators AND or OR.
Following is the syntax of combined condition statements −
IF [CONDITION] AND [CONDITION]
COBOL Statements
END-IF.
Example
IDENTIFICATION DIVISION.
PROGRAM-ID. HELLO.
DATA DIVISION.
WORKING-STORAGE SECTION.
01 WS-NUM1 PIC 9(2) VALUE 20.
01 WS-NUM2 PIC 9(2) VALUE 25.
01 WS-NUM3 PIC 9(2) VALUE 20.
PROCEDURE DIVISION.
A000-FIRST-PARA.
IF WS-NUM1 IS LESS THAN WS-NUM2 AND WS-NUM1=WS-NUM3 THEN
DISPLAY 'Both condition OK'
ELSE
DISPLAY 'Error'
END-IF.
STOP RUN.
JCL to execute the above COBOL program −
//SAMPLE JOB(TESTJCL,XXXXXX),CLASS = A,MSGCLASS = C
//STEP1 EXEC PGM = HELLO
When you compile and execute the above program, it produces the following result −
Both condition OK
Evaluate verb is a replacement of series of IF-ELSE statement. It can be used to evaluate more than one condition. It is similar to SWITCH statement in C programs.
Example
IDENTIFICATION DIVISION.
PROGRAM-ID. HELLO.
DATA DIVISION.
WORKING-STORAGE SECTION.
01 WS-A PIC 9 VALUE 0.
PROCEDURE DIVISION.
MOVE 3 TO WS-A.
EVALUATE TRUE
WHEN WS-A > 2
DISPLAY 'WS-A GREATER THAN 2'
WHEN WS-A < 0
DISPLAY 'WS-A LESS THAN 0'
WHEN OTHER
DISPLAY 'INVALID VALUE OF WS-A'
END-EVALUATE.
STOP RUN.
JCL to execute the above COBOL program −
//SAMPLE JOB(TESTJCL,XXXXXX),CLASS = A,MSGCLASS = C
//STEP1 EXEC PGM = HELLO
When you compile and execute the above program, it produces the following result −
WS-A GREATER THAN 2
12 Lectures
2.5 hours
Nishant Malik
33 Lectures
3.5 hours
Craig Kenneth Kaercher
Print
Add Notes
Bookmark this page | [
{
"code": null,
"e": 2318,
"s": 2022,
"text": "Conditional statements are used to change the execution flow depending on certain conditions specified by the programmer. Conditional statements will always evaluate to true or false. Conditions are used in IF, Evaluate, and Perform statements. The different types of conditions are as follows −"
},
{
"code": null,
"e": 2341,
"s": 2318,
"text": "IF Condition Statement"
},
{
"code": null,
"e": 2360,
"s": 2341,
"text": "Relation Condition"
},
{
"code": null,
"e": 2375,
"s": 2360,
"text": "Sign Condition"
},
{
"code": null,
"e": 2391,
"s": 2375,
"text": "Class Condition"
},
{
"code": null,
"e": 2416,
"s": 2391,
"text": "Condition-Name Condition"
},
{
"code": null,
"e": 2434,
"s": 2416,
"text": "Negated Condition"
},
{
"code": null,
"e": 2453,
"s": 2434,
"text": "Combined Condition"
},
{
"code": null,
"e": 2598,
"s": 2453,
"text": "IF statement checks for conditions. If a condition is true, the IF block is executed; and if the condition is false, the ELSE block is executed."
},
{
"code": null,
"e": 2761,
"s": 2598,
"text": "END-IF is used to end the IF block. To end the IF block, a period can be used instead of END-IF. But it is always preferable to use END-IF for multiple IF blocks."
},
{
"code": null,
"e": 2874,
"s": 2761,
"text": "Nested-IF − IF blocks appearing inside another IF block. There is no limit to the depth of nested IF statements."
},
{
"code": null,
"e": 2927,
"s": 2874,
"text": "Following is the syntax of IF condition statements −"
},
{
"code": null,
"e": 3005,
"s": 2927,
"text": "IF [condition] THEN\n [COBOL statements]\nELSE\n [COBOL statements]\nEND-IF.\n"
},
{
"code": null,
"e": 3013,
"s": 3005,
"text": "Example"
},
{
"code": null,
"e": 3594,
"s": 3013,
"text": "IDENTIFICATION DIVISION.\nPROGRAM-ID. HELLO.\n\nDATA DIVISION.\n WORKING-STORAGE SECTION.\n 01 WS-NUM1 PIC 9(9).\n 01 WS-NUM2 PIC 9(9).\n 01 WS-NUM3 PIC 9(5).\n 01 WS-NUM4 PIC 9(6).\n\nPROCEDURE DIVISION.\n A000-FIRST-PARA.\n MOVE 25 TO WS-NUM1 WS-NUM3.\n MOVE 15 TO WS-NUM2 WS-NUM4.\n \n IF WS-NUM1 > WS-NUM2 THEN\n DISPLAY 'IN LOOP 1 - IF BLOCK'\n \n IF WS-NUM3 = WS-NUM4 THEN\n DISPLAY 'IN LOOP 2 - IF BLOCK'\n ELSE\n DISPLAY 'IN LOOP 2 - ELSE BLOCK'\n END-IF\n \n ELSE\n DISPLAY 'IN LOOP 1 - ELSE BLOCK'\n END-IF.\n \nSTOP RUN."
},
{
"code": null,
"e": 3635,
"s": 3594,
"text": "JCL to execute the above COBOL program −"
},
{
"code": null,
"e": 3712,
"s": 3635,
"text": "//SAMPLE JOB(TESTJCL,XXXXXX),CLASS = A,MSGCLASS = C\n//STEP1 EXEC PGM = HELLO"
},
{
"code": null,
"e": 3795,
"s": 3712,
"text": "When you compile and execute the above program, it produces the following result −"
},
{
"code": null,
"e": 3840,
"s": 3795,
"text": "IN LOOP 1 - IF BLOCK\nIN LOOP 2 - ELSE BLOCK\n"
},
{
"code": null,
"e": 4039,
"s": 3840,
"text": "Relation condition compares two operands, either of which can be an identifier, literal, or arithmetic expression. Algebraic comparison of numeric fields is done regardless of size and usage clause."
},
{
"code": null,
"e": 4064,
"s": 4039,
"text": "For non-numeric operands"
},
{
"code": null,
"e": 4294,
"s": 4064,
"text": "If two non-numeric operands of equal size are compared, then the characters are compared from left with the corresponding positions till the end is reached. The operand containing greater number of characters is declared greater."
},
{
"code": null,
"e": 4532,
"s": 4294,
"text": "If two non-numeric operands of unequal size are compared, then the shorter data item is appended with spaces at the end till the size of the operands becomes equal and then compared according to the rules mentioned in the previous point."
},
{
"code": null,
"e": 4593,
"s": 4532,
"text": "Given below is the syntax of Relation condition statements −"
},
{
"code": null,
"e": 4781,
"s": 4593,
"text": "[Data Name/Arithmetic Operation]\n\n [IS] [NOT] \n\n[Equal to (=),Greater than (>), Less than (<), \nGreater than or Equal (>=), Less than or equal (<=) ]\n\n[Data Name/Arithmetic Operation] \n"
},
{
"code": null,
"e": 4789,
"s": 4781,
"text": "Example"
},
{
"code": null,
"e": 5197,
"s": 4789,
"text": "IDENTIFICATION DIVISION.\nPROGRAM-ID. HELLO.\n\nDATA DIVISION.\n WORKING-STORAGE SECTION.\n 01 WS-NUM1 PIC 9(9).\n 01 WS-NUM2 PIC 9(9).\n\nPROCEDURE DIVISION.\n A000-FIRST-PARA.\n MOVE 25 TO WS-NUM1.\n MOVE 15 TO WS-NUM2.\n \n IF WS-NUM1 IS GREATER THAN OR EQUAL TO WS-NUM2 THEN\n DISPLAY 'WS-NUM1 IS GREATER THAN WS-NUM2'\n ELSE\n DISPLAY 'WS-NUM1 IS LESS THAN WS-NUM2'\n END-IF.\n \nSTOP RUN."
},
{
"code": null,
"e": 5238,
"s": 5197,
"text": "JCL to execute the above COBOL program −"
},
{
"code": null,
"e": 5315,
"s": 5238,
"text": "//SAMPLE JOB(TESTJCL,XXXXXX),CLASS = A,MSGCLASS = C\n//STEP1 EXEC PGM = HELLO"
},
{
"code": null,
"e": 5397,
"s": 5315,
"text": "When you compile and execute the above program it produces the following result −"
},
{
"code": null,
"e": 5430,
"s": 5397,
"text": "WS-NUM1 IS GREATER THAN WS-NUM2\n"
},
{
"code": null,
"e": 5583,
"s": 5430,
"text": "Sign condition is used to check the sign of a numeric operand. It determines whether a given numeric value is greater than, less than, or equal to ZERO."
},
{
"code": null,
"e": 5638,
"s": 5583,
"text": "Following is the syntax of Sign condition statements −"
},
{
"code": null,
"e": 5753,
"s": 5638,
"text": "[Data Name/Arithmetic Operation] \n\n [IS] [NOT] \n\n[Positive, Negative or Zero]\n\n[Data Name/Arithmetic Operation]\n"
},
{
"code": null,
"e": 5761,
"s": 5753,
"text": "Example"
},
{
"code": null,
"e": 6261,
"s": 5761,
"text": "IDENTIFICATION DIVISION.\nPROGRAM-ID. HELLO.\n\nDATA DIVISION.\n WORKING-STORAGE SECTION.\n 01 WS-NUM1 PIC S9(9) VALUE -1234.\n 01 WS-NUM2 PIC S9(9) VALUE 123456.\n\nPROCEDURE DIVISION.\n A000-FIRST-PARA.\n IF WS-NUM1 IS POSITIVE THEN\n DISPLAY 'WS-NUM1 IS POSITIVE'.\n \n IF WS-NUM1 IS NEGATIVE THEN\n DISPLAY 'WS-NUM1 IS NEGATIVE'.\n \n IF WS-NUM1 IS ZERO THEN\n DISPLAY 'WS-NUM1 IS ZERO'.\n \n IF WS-NUM2 IS POSITIVE THEN\n DISPLAY 'WS-NUM2 IS POSITIVE'.\n\nSTOP RUN."
},
{
"code": null,
"e": 6302,
"s": 6261,
"text": "JCL to execute the above COBOL program −"
},
{
"code": null,
"e": 6379,
"s": 6302,
"text": "//SAMPLE JOB(TESTJCL,XXXXXX),CLASS = A,MSGCLASS = C\n//STEP1 EXEC PGM = HELLO"
},
{
"code": null,
"e": 6461,
"s": 6379,
"text": "When you compile and execute the above program it produces the following result −"
},
{
"code": null,
"e": 6502,
"s": 6461,
"text": "WS-NUM1 IS NEGATIVE\nWS-NUM2 IS POSITIVE\n"
},
{
"code": null,
"e": 6667,
"s": 6502,
"text": "Class condition is used to check if an operand contains only alphabets or numeric data. Spaces are considered in ALPHABETIC, ALPHABETIC-LOWER, and ALPHABETIC-UPPER."
},
{
"code": null,
"e": 6723,
"s": 6667,
"text": "Following is the syntax of Class condition statements −"
},
{
"code": null,
"e": 6867,
"s": 6723,
"text": "[Data Name/Arithmetic Operation>]\n\n [IS] [NOT] \n\n[NUMERIC, ALPHABETIC, ALPHABETIC-LOWER, ALPHABETIC-UPPER]\n\n[Data Name/Arithmetic Operation]\n"
},
{
"code": null,
"e": 6875,
"s": 6867,
"text": "Example"
},
{
"code": null,
"e": 7318,
"s": 6875,
"text": "IDENTIFICATION DIVISION.\nPROGRAM-ID. HELLO.\n\nDATA DIVISION.\n WORKING-STORAGE SECTION.\n 01 WS-NUM1 PIC X(9) VALUE 'ABCD '.\n 01 WS-NUM2 PIC 9(9) VALUE 123456789.\n\nPROCEDURE DIVISION.\n A000-FIRST-PARA.\n \n IF WS-NUM1 IS ALPHABETIC THEN\n DISPLAY 'WS-NUM1 IS ALPHABETIC'.\n \n IF WS-NUM1 IS NUMERIC THEN\n DISPLAY 'WS-NUM1 IS NUMERIC'.\n \n IF WS-NUM2 IS NUMERIC THEN\n DISPLAY 'WS-NUM2 IS NUMERIC'.\n \nSTOP RUN."
},
{
"code": null,
"e": 7359,
"s": 7318,
"text": "JCL to execute the above COBOL program −"
},
{
"code": null,
"e": 7436,
"s": 7359,
"text": "//SAMPLE JOB(TESTJCL,XXXXXX),CLASS = A,MSGCLASS = C\n//STEP1 EXEC PGM = HELLO"
},
{
"code": null,
"e": 7519,
"s": 7436,
"text": "When you compile and execute the above program, it produces the following result −"
},
{
"code": null,
"e": 7561,
"s": 7519,
"text": "WS-NUM1 IS ALPHABETIC\nWS-NUM2 IS NUMERIC\n"
},
{
"code": null,
"e": 7758,
"s": 7561,
"text": "A condition-name is a user-defined name. It contains a set of values specified by the user. It behaves like Boolean variables. They are defined with level number 88. It will not have a PIC clause."
},
{
"code": null,
"e": 7821,
"s": 7758,
"text": "Following is the syntax of user-defined condition statements −"
},
{
"code": null,
"e": 7884,
"s": 7821,
"text": "88 [Condition-Name] VALUE [IS, ARE] [LITERAL] [THRU LITERAL].\n"
},
{
"code": null,
"e": 7892,
"s": 7884,
"text": "Example"
},
{
"code": null,
"e": 8280,
"s": 7892,
"text": "IDENTIFICATION DIVISION.\nPROGRAM-ID. HELLO.\n\nDATA DIVISION.\n WORKING-STORAGE SECTION.\n 01 WS-NUM PIC 9(3).\n 88 PASS VALUES ARE 041 THRU 100.\n 88 FAIL VALUES ARE 000 THRU 40.\n\nPROCEDURE DIVISION.\n A000-FIRST-PARA.\n MOVE 65 TO WS-NUM.\n \n IF PASS \n DISPLAY 'Passed with ' WS-NUM ' marks'.\n \n IF FAIL \n DISPLAY 'FAILED with ' WS-NUM 'marks'.\n \nSTOP RUN."
},
{
"code": null,
"e": 8321,
"s": 8280,
"text": "JCL to execute the above COBOL program −"
},
{
"code": null,
"e": 8398,
"s": 8321,
"text": "//SAMPLE JOB(TESTJCL,XXXXXX),CLASS = A,MSGCLASS = C\n//STEP1 EXEC PGM = HELLO"
},
{
"code": null,
"e": 8481,
"s": 8398,
"text": "When you compile and execute the above program, it produces the following result −"
},
{
"code": null,
"e": 8504,
"s": 8481,
"text": "Passed with 065 marks\n"
},
{
"code": null,
"e": 8654,
"s": 8504,
"text": "Negated condition is given by using the NOT keyword. If a condition is true and we have given NOT in front of it, then its final value will be false."
},
{
"code": null,
"e": 8712,
"s": 8654,
"text": "Following is the syntax of Negated condition statements −"
},
{
"code": null,
"e": 8761,
"s": 8712,
"text": "IF NOT [CONDITION] \n COBOL Statements\nEND-IF.\n"
},
{
"code": null,
"e": 8769,
"s": 8761,
"text": "Example"
},
{
"code": null,
"e": 9097,
"s": 8769,
"text": "IDENTIFICATION DIVISION.\nPROGRAM-ID. HELLO.\n\nDATA DIVISION.\n WORKING-STORAGE SECTION.\n 01 WS-NUM1 PIC 9(2) VALUE 20.\n 01 WS-NUM2 PIC 9(9) VALUE 25.\n\nPROCEDURE DIVISION.\n A000-FIRST-PARA.\n \n IF NOT WS-NUM1 IS LESS THAN WS-NUM2 THEN\n DISPLAY 'IF-BLOCK'\n ELSE\n DISPLAY 'ELSE-BLOCK'\n END-IF.\n \nSTOP RUN."
},
{
"code": null,
"e": 9138,
"s": 9097,
"text": "JCL to execute the above COBOL program −"
},
{
"code": null,
"e": 9215,
"s": 9138,
"text": "//SAMPLE JOB(TESTJCL,XXXXXX),CLASS = A,MSGCLASS = C\n//STEP1 EXEC PGM = HELLO"
},
{
"code": null,
"e": 9298,
"s": 9215,
"text": "When you compile and execute the above program, it produces the following result −"
},
{
"code": null,
"e": 9310,
"s": 9298,
"text": "ELSE-BLOCK\n"
},
{
"code": null,
"e": 9408,
"s": 9310,
"text": "A combined condition contains two or more conditions connected using logical operators AND or OR."
},
{
"code": null,
"e": 9467,
"s": 9408,
"text": "Following is the syntax of combined condition statements −"
},
{
"code": null,
"e": 9527,
"s": 9467,
"text": "IF [CONDITION] AND [CONDITION]\n COBOL Statements\nEND-IF.\n"
},
{
"code": null,
"e": 9535,
"s": 9527,
"text": "Example"
},
{
"code": null,
"e": 9916,
"s": 9535,
"text": "IDENTIFICATION DIVISION.\nPROGRAM-ID. HELLO.\n\nDATA DIVISION.\n WORKING-STORAGE SECTION.\n 01 WS-NUM1 PIC 9(2) VALUE 20.\n 01 WS-NUM2 PIC 9(2) VALUE 25.\n 01 WS-NUM3 PIC 9(2) VALUE 20.\n\nPROCEDURE DIVISION.\n A000-FIRST-PARA.\n \n IF WS-NUM1 IS LESS THAN WS-NUM2 AND WS-NUM1=WS-NUM3 THEN\n DISPLAY 'Both condition OK'\n ELSE\n DISPLAY 'Error'\n END-IF.\n \nSTOP RUN."
},
{
"code": null,
"e": 9957,
"s": 9916,
"text": "JCL to execute the above COBOL program −"
},
{
"code": null,
"e": 10034,
"s": 9957,
"text": "//SAMPLE JOB(TESTJCL,XXXXXX),CLASS = A,MSGCLASS = C\n//STEP1 EXEC PGM = HELLO"
},
{
"code": null,
"e": 10117,
"s": 10034,
"text": "When you compile and execute the above program, it produces the following result −"
},
{
"code": null,
"e": 10136,
"s": 10117,
"text": "Both condition OK\n"
},
{
"code": null,
"e": 10300,
"s": 10136,
"text": "Evaluate verb is a replacement of series of IF-ELSE statement. It can be used to evaluate more than one condition. It is similar to SWITCH statement in C programs."
},
{
"code": null,
"e": 10308,
"s": 10300,
"text": "Example"
},
{
"code": null,
"e": 10692,
"s": 10308,
"text": "IDENTIFICATION DIVISION.\nPROGRAM-ID. HELLO.\n\nDATA DIVISION.\n WORKING-STORAGE SECTION.\n 01 WS-A PIC 9 VALUE 0.\n \nPROCEDURE DIVISION.\n MOVE 3 TO WS-A.\n \n EVALUATE TRUE\n WHEN WS-A > 2\n DISPLAY 'WS-A GREATER THAN 2'\n\n WHEN WS-A < 0\n DISPLAY 'WS-A LESS THAN 0'\n\n WHEN OTHER\n DISPLAY 'INVALID VALUE OF WS-A'\n END-EVALUATE.\n \nSTOP RUN."
},
{
"code": null,
"e": 10733,
"s": 10692,
"text": "JCL to execute the above COBOL program −"
},
{
"code": null,
"e": 10810,
"s": 10733,
"text": "//SAMPLE JOB(TESTJCL,XXXXXX),CLASS = A,MSGCLASS = C\n//STEP1 EXEC PGM = HELLO"
},
{
"code": null,
"e": 10893,
"s": 10810,
"text": "When you compile and execute the above program, it produces the following result −"
},
{
"code": null,
"e": 10914,
"s": 10893,
"text": "WS-A GREATER THAN 2\n"
},
{
"code": null,
"e": 10949,
"s": 10914,
"text": "\n 12 Lectures \n 2.5 hours \n"
},
{
"code": null,
"e": 10964,
"s": 10949,
"text": " Nishant Malik"
},
{
"code": null,
"e": 10999,
"s": 10964,
"text": "\n 33 Lectures \n 3.5 hours \n"
},
{
"code": null,
"e": 11023,
"s": 10999,
"text": " Craig Kenneth Kaercher"
},
{
"code": null,
"e": 11030,
"s": 11023,
"text": " Print"
},
{
"code": null,
"e": 11041,
"s": 11030,
"text": " Add Notes"
}
] |
Java Program to Calculate the Difference Between the Sum of the Odd Level and the Even Level Nodes of a Binary Tree - GeeksforGeeks | 04 Jan, 2021
Graph Traversal using DFS is an obvious way to traverse a tree with recursion. Below is an algorithm for traversing binary tree using DFS.
Prerequisites
Graph Traversal using DFS
Java Basics(Array List)
Recursion Basics
Algorithm
Initialize the current node as root node and the parent as -1.Traverse the Binary Tree as the in the general DFS fashion and keep of increasing the level of the node as we traverse farther from the root node.While traversing we check if the level of the current node of the binary tree is even then add in even sum else add in odd sum.Finally, print the Absolute difference of the of even sum and the odd sum.
Initialize the current node as root node and the parent as -1.
Traverse the Binary Tree as the in the general DFS fashion and keep of increasing the level of the node as we traverse farther from the root node.
While traversing we check if the level of the current node of the binary tree is even then add in even sum else add in odd sum.
Finally, print the Absolute difference of the of even sum and the odd sum.
Example
Java
import java.util.*;public class GFG { // global variable declaration static ArrayList<ArrayList<Integer> > arr; static int val[]; static int sum_odd = 0, sum_even = 0; // traverses the binary-tree/tree having parameters u, // par, level which denotes current node, current's // parent node, current level of the tree. static void dfs(int u, int par, int level) { // according to level adding the node if (level % 2 == 0) sum_even += val[u]; else sum_odd += val[u]; // exploring the child of the particular node u (2 // in case of binary tree). for (int v : arr.get(u)) { if (v != par) { // recursively calling the current child // node to become parent of the next dfs // call. dfs(v, u, level + 1); } } } public static void main(String args[]) { Scanner in = new Scanner(System.in); int n = 5; val = new int[] { 0, 2, 10, 5, 3, 2 }; // declaration of the ArrayList size arr = new ArrayList<>(); // initialization of each array element as ArrayList // class for (int i = 0; i <= n; i++) arr.add(new ArrayList<>()); arr.get(1).add(2); arr.get(2).add(1); arr.get(1).add(4); arr.get(4).add(1); arr.get(2).add(5); arr.get(5).add(2); arr.get(3).add(4); arr.get(4).add(3); // 1(2) // / \ // 2(10) 4(3) // / / // 5(2) 3(5) // initial call of recurssion dfs(1, -1, 0); System.out.println( "Absolute difference of sum of odd and even nodes of a binary tree " + Math.abs(sum_odd - sum_even)); }}
Absolute difference of sum of odd and even nodes of a binary tree 4
Time Complexity: O(V + E) where V is the vertices and E is the edges.
Picked
Technical Scripter 2020
Java
Java Programs
Technical Scripter
Java
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here.
Comments
Old Comments
Functional Interfaces in Java
Stream In Java
Constructors in Java
Different ways of Reading a text file in Java
Exceptions in Java
Convert a String to Character array in Java
Java Programming Examples
Convert Double to Integer in Java
Implementing a Linked List in Java using Class
Factory method design pattern in Java | [
{
"code": null,
"e": 23557,
"s": 23529,
"text": "\n04 Jan, 2021"
},
{
"code": null,
"e": 23697,
"s": 23557,
"text": "Graph Traversal using DFS is an obvious way to traverse a tree with recursion. Below is an algorithm for traversing binary tree using DFS. "
},
{
"code": null,
"e": 23712,
"s": 23697,
"text": "Prerequisites "
},
{
"code": null,
"e": 23738,
"s": 23712,
"text": "Graph Traversal using DFS"
},
{
"code": null,
"e": 23762,
"s": 23738,
"text": "Java Basics(Array List)"
},
{
"code": null,
"e": 23779,
"s": 23762,
"text": "Recursion Basics"
},
{
"code": null,
"e": 23789,
"s": 23779,
"text": "Algorithm"
},
{
"code": null,
"e": 24199,
"s": 23789,
"text": "Initialize the current node as root node and the parent as -1.Traverse the Binary Tree as the in the general DFS fashion and keep of increasing the level of the node as we traverse farther from the root node.While traversing we check if the level of the current node of the binary tree is even then add in even sum else add in odd sum.Finally, print the Absolute difference of the of even sum and the odd sum."
},
{
"code": null,
"e": 24262,
"s": 24199,
"text": "Initialize the current node as root node and the parent as -1."
},
{
"code": null,
"e": 24409,
"s": 24262,
"text": "Traverse the Binary Tree as the in the general DFS fashion and keep of increasing the level of the node as we traverse farther from the root node."
},
{
"code": null,
"e": 24537,
"s": 24409,
"text": "While traversing we check if the level of the current node of the binary tree is even then add in even sum else add in odd sum."
},
{
"code": null,
"e": 24612,
"s": 24537,
"text": "Finally, print the Absolute difference of the of even sum and the odd sum."
},
{
"code": null,
"e": 24620,
"s": 24612,
"text": "Example"
},
{
"code": null,
"e": 24625,
"s": 24620,
"text": "Java"
},
{
"code": "import java.util.*;public class GFG { // global variable declaration static ArrayList<ArrayList<Integer> > arr; static int val[]; static int sum_odd = 0, sum_even = 0; // traverses the binary-tree/tree having parameters u, // par, level which denotes current node, current's // parent node, current level of the tree. static void dfs(int u, int par, int level) { // according to level adding the node if (level % 2 == 0) sum_even += val[u]; else sum_odd += val[u]; // exploring the child of the particular node u (2 // in case of binary tree). for (int v : arr.get(u)) { if (v != par) { // recursively calling the current child // node to become parent of the next dfs // call. dfs(v, u, level + 1); } } } public static void main(String args[]) { Scanner in = new Scanner(System.in); int n = 5; val = new int[] { 0, 2, 10, 5, 3, 2 }; // declaration of the ArrayList size arr = new ArrayList<>(); // initialization of each array element as ArrayList // class for (int i = 0; i <= n; i++) arr.add(new ArrayList<>()); arr.get(1).add(2); arr.get(2).add(1); arr.get(1).add(4); arr.get(4).add(1); arr.get(2).add(5); arr.get(5).add(2); arr.get(3).add(4); arr.get(4).add(3); // 1(2) // / \\ // 2(10) 4(3) // / / // 5(2) 3(5) // initial call of recurssion dfs(1, -1, 0); System.out.println( \"Absolute difference of sum of odd and even nodes of a binary tree \" + Math.abs(sum_odd - sum_even)); }}",
"e": 26492,
"s": 24625,
"text": null
},
{
"code": null,
"e": 26560,
"s": 26492,
"text": "Absolute difference of sum of odd and even nodes of a binary tree 4"
},
{
"code": null,
"e": 26630,
"s": 26560,
"text": "Time Complexity: O(V + E) where V is the vertices and E is the edges."
},
{
"code": null,
"e": 26637,
"s": 26630,
"text": "Picked"
},
{
"code": null,
"e": 26661,
"s": 26637,
"text": "Technical Scripter 2020"
},
{
"code": null,
"e": 26666,
"s": 26661,
"text": "Java"
},
{
"code": null,
"e": 26680,
"s": 26666,
"text": "Java Programs"
},
{
"code": null,
"e": 26699,
"s": 26680,
"text": "Technical Scripter"
},
{
"code": null,
"e": 26704,
"s": 26699,
"text": "Java"
},
{
"code": null,
"e": 26802,
"s": 26704,
"text": "Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here."
},
{
"code": null,
"e": 26811,
"s": 26802,
"text": "Comments"
},
{
"code": null,
"e": 26824,
"s": 26811,
"text": "Old Comments"
},
{
"code": null,
"e": 26854,
"s": 26824,
"text": "Functional Interfaces in Java"
},
{
"code": null,
"e": 26869,
"s": 26854,
"text": "Stream In Java"
},
{
"code": null,
"e": 26890,
"s": 26869,
"text": "Constructors in Java"
},
{
"code": null,
"e": 26936,
"s": 26890,
"text": "Different ways of Reading a text file in Java"
},
{
"code": null,
"e": 26955,
"s": 26936,
"text": "Exceptions in Java"
},
{
"code": null,
"e": 26999,
"s": 26955,
"text": "Convert a String to Character array in Java"
},
{
"code": null,
"e": 27025,
"s": 26999,
"text": "Java Programming Examples"
},
{
"code": null,
"e": 27059,
"s": 27025,
"text": "Convert Double to Integer in Java"
},
{
"code": null,
"e": 27106,
"s": 27059,
"text": "Implementing a Linked List in Java using Class"
}
] |
GATE | GATE-CS-2001 | Question 29 - GeeksforGeeks | 28 Jun, 2021
Consider the following relations:
R1(a,b) iff (a+b) is even over the set of integers
R2(a,b) iff (a+b) is odd over the set of integers
R3(a,b) iff a.b > 0 over the set of non-zero rational numbers
R4(a,b) iff |a - b| <= 2 over the set of natural numbers
Which of the following statements is correct?(A) R1 and R2 are equivalence relations, R3 and R4 are not(B) R1 and R3 are equivalence relations, R2 and R4 are not(C) R1 and R4 are equivalence relations, R2 and R3 are not(D) R1, R2, R3 and R4 are all equivalence relationsAnswer: (B)Explanation: So basically, we have to tell whether these relations are equivalence or not.
R1(a,b)Reflexive : Yes, because (a+a) is even.Symmetrix : Yes, (a+b) is even ⟹ (b+a) is even.Transitive : Yes, because (a+b) is even and (b+c) is even ⟹ (a+c) is even.So R1 is equivalence relation.R2(a,b)Reflexive : No, because (a+a) is even.So R2 is not equivalence relation.R3(a,b)Reflexive : Yes, because a.a > 0.Symmetrix : Yes, a.b > 0 ⟹ b.a > 0.Transitive : Yes, because a.b > 0 and b.c > 0 ⟹ a.c > 0.So R3 is equivalence relation.R4(a,b)Reflexive : Yes, because |a-a| ≤ 2.Symmetrix : Yes, |a-b| ≤ 2 ⟹ |b-a| ≤ 2.Transitive : No, because |a-b| ≤ 2 and |b-c| ≤ 2 ⇏ (a-c) is even.So R4 is not equivalence relation.
R1(a,b)Reflexive : Yes, because (a+a) is even.Symmetrix : Yes, (a+b) is even ⟹ (b+a) is even.Transitive : Yes, because (a+b) is even and (b+c) is even ⟹ (a+c) is even.So R1 is equivalence relation.
Reflexive : Yes, because (a+a) is even.
Symmetrix : Yes, (a+b) is even ⟹ (b+a) is even.
Transitive : Yes, because (a+b) is even and (b+c) is even ⟹ (a+c) is even.
So R1 is equivalence relation.
R2(a,b)Reflexive : No, because (a+a) is even.So R2 is not equivalence relation.
Reflexive : No, because (a+a) is even.
So R2 is not equivalence relation.
R3(a,b)Reflexive : Yes, because a.a > 0.Symmetrix : Yes, a.b > 0 ⟹ b.a > 0.Transitive : Yes, because a.b > 0 and b.c > 0 ⟹ a.c > 0.So R3 is equivalence relation.
Reflexive : Yes, because a.a > 0.
Symmetrix : Yes, a.b > 0 ⟹ b.a > 0.
Transitive : Yes, because a.b > 0 and b.c > 0 ⟹ a.c > 0.
So R3 is equivalence relation.
R4(a,b)Reflexive : Yes, because |a-a| ≤ 2.Symmetrix : Yes, |a-b| ≤ 2 ⟹ |b-a| ≤ 2.Transitive : No, because |a-b| ≤ 2 and |b-c| ≤ 2 ⇏ (a-c) is even.So R4 is not equivalence relation.
Reflexive : Yes, because |a-a| ≤ 2.
Symmetrix : Yes, |a-b| ≤ 2 ⟹ |b-a| ≤ 2.
Transitive : No, because |a-b| ≤ 2 and |b-c| ≤ 2 ⇏ (a-c) is even.
So R4 is not equivalence relation.
So option (b) is correct..
Source: http://www.cse.iitd.ac.in/~mittal/gate/gate_math_2001.htmlQuiz of this Question
GATE-CS-2001
GATE-GATE-CS-2001
GATE
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here.
Comments
Old Comments
GATE | GATE-CS-2016 (Set 2) | Question 48
GATE | GATE-CS-2014-(Set-1) | Question 30
GATE | GATE-CS-2001 | Question 23
GATE | GATE-CS-2015 (Set 1) | Question 65
GATE | GATE CS 2010 | Question 45
GATE | GATE-CS-2015 (Set 3) | Question 65
GATE | GATE-CS-2015 (Set 1) | Question 42
GATE | GATE-CS-2014-(Set-1) | Question 65
C++ Program to count Vowels in a string using Pointer
GATE | GATE-CS-2004 | Question 3 | [
{
"code": null,
"e": 24171,
"s": 24143,
"text": "\n28 Jun, 2021"
},
{
"code": null,
"e": 24205,
"s": 24171,
"text": "Consider the following relations:"
},
{
"code": null,
"e": 24425,
"s": 24205,
"text": "R1(a,b) iff (a+b) is even over the set of integers\nR2(a,b) iff (a+b) is odd over the set of integers\nR3(a,b) iff a.b > 0 over the set of non-zero rational numbers\nR4(a,b) iff |a - b| <= 2 over the set of natural numbers"
},
{
"code": null,
"e": 24797,
"s": 24425,
"text": "Which of the following statements is correct?(A) R1 and R2 are equivalence relations, R3 and R4 are not(B) R1 and R3 are equivalence relations, R2 and R4 are not(C) R1 and R4 are equivalence relations, R2 and R3 are not(D) R1, R2, R3 and R4 are all equivalence relationsAnswer: (B)Explanation: So basically, we have to tell whether these relations are equivalence or not."
},
{
"code": null,
"e": 25416,
"s": 24797,
"text": "R1(a,b)Reflexive : Yes, because (a+a) is even.Symmetrix : Yes, (a+b) is even ⟹ (b+a) is even.Transitive : Yes, because (a+b) is even and (b+c) is even ⟹ (a+c) is even.So R1 is equivalence relation.R2(a,b)Reflexive : No, because (a+a) is even.So R2 is not equivalence relation.R3(a,b)Reflexive : Yes, because a.a > 0.Symmetrix : Yes, a.b > 0 ⟹ b.a > 0.Transitive : Yes, because a.b > 0 and b.c > 0 ⟹ a.c > 0.So R3 is equivalence relation.R4(a,b)Reflexive : Yes, because |a-a| ≤ 2.Symmetrix : Yes, |a-b| ≤ 2 ⟹ |b-a| ≤ 2.Transitive : No, because |a-b| ≤ 2 and |b-c| ≤ 2 ⇏ (a-c) is even.So R4 is not equivalence relation."
},
{
"code": null,
"e": 25614,
"s": 25416,
"text": "R1(a,b)Reflexive : Yes, because (a+a) is even.Symmetrix : Yes, (a+b) is even ⟹ (b+a) is even.Transitive : Yes, because (a+b) is even and (b+c) is even ⟹ (a+c) is even.So R1 is equivalence relation."
},
{
"code": null,
"e": 25654,
"s": 25614,
"text": "Reflexive : Yes, because (a+a) is even."
},
{
"code": null,
"e": 25702,
"s": 25654,
"text": "Symmetrix : Yes, (a+b) is even ⟹ (b+a) is even."
},
{
"code": null,
"e": 25777,
"s": 25702,
"text": "Transitive : Yes, because (a+b) is even and (b+c) is even ⟹ (a+c) is even."
},
{
"code": null,
"e": 25808,
"s": 25777,
"text": "So R1 is equivalence relation."
},
{
"code": null,
"e": 25888,
"s": 25808,
"text": "R2(a,b)Reflexive : No, because (a+a) is even.So R2 is not equivalence relation."
},
{
"code": null,
"e": 25927,
"s": 25888,
"text": "Reflexive : No, because (a+a) is even."
},
{
"code": null,
"e": 25962,
"s": 25927,
"text": "So R2 is not equivalence relation."
},
{
"code": null,
"e": 26124,
"s": 25962,
"text": "R3(a,b)Reflexive : Yes, because a.a > 0.Symmetrix : Yes, a.b > 0 ⟹ b.a > 0.Transitive : Yes, because a.b > 0 and b.c > 0 ⟹ a.c > 0.So R3 is equivalence relation."
},
{
"code": null,
"e": 26158,
"s": 26124,
"text": "Reflexive : Yes, because a.a > 0."
},
{
"code": null,
"e": 26194,
"s": 26158,
"text": "Symmetrix : Yes, a.b > 0 ⟹ b.a > 0."
},
{
"code": null,
"e": 26251,
"s": 26194,
"text": "Transitive : Yes, because a.b > 0 and b.c > 0 ⟹ a.c > 0."
},
{
"code": null,
"e": 26282,
"s": 26251,
"text": "So R3 is equivalence relation."
},
{
"code": null,
"e": 26464,
"s": 26282,
"text": "R4(a,b)Reflexive : Yes, because |a-a| ≤ 2.Symmetrix : Yes, |a-b| ≤ 2 ⟹ |b-a| ≤ 2.Transitive : No, because |a-b| ≤ 2 and |b-c| ≤ 2 ⇏ (a-c) is even.So R4 is not equivalence relation."
},
{
"code": null,
"e": 26500,
"s": 26464,
"text": "Reflexive : Yes, because |a-a| ≤ 2."
},
{
"code": null,
"e": 26540,
"s": 26500,
"text": "Symmetrix : Yes, |a-b| ≤ 2 ⟹ |b-a| ≤ 2."
},
{
"code": null,
"e": 26607,
"s": 26540,
"text": "Transitive : No, because |a-b| ≤ 2 and |b-c| ≤ 2 ⇏ (a-c) is even."
},
{
"code": null,
"e": 26642,
"s": 26607,
"text": "So R4 is not equivalence relation."
},
{
"code": null,
"e": 26669,
"s": 26642,
"text": "So option (b) is correct.."
},
{
"code": null,
"e": 26757,
"s": 26669,
"text": "Source: http://www.cse.iitd.ac.in/~mittal/gate/gate_math_2001.htmlQuiz of this Question"
},
{
"code": null,
"e": 26770,
"s": 26757,
"text": "GATE-CS-2001"
},
{
"code": null,
"e": 26788,
"s": 26770,
"text": "GATE-GATE-CS-2001"
},
{
"code": null,
"e": 26793,
"s": 26788,
"text": "GATE"
},
{
"code": null,
"e": 26891,
"s": 26793,
"text": "Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here."
},
{
"code": null,
"e": 26900,
"s": 26891,
"text": "Comments"
},
{
"code": null,
"e": 26913,
"s": 26900,
"text": "Old Comments"
},
{
"code": null,
"e": 26955,
"s": 26913,
"text": "GATE | GATE-CS-2016 (Set 2) | Question 48"
},
{
"code": null,
"e": 26997,
"s": 26955,
"text": "GATE | GATE-CS-2014-(Set-1) | Question 30"
},
{
"code": null,
"e": 27031,
"s": 26997,
"text": "GATE | GATE-CS-2001 | Question 23"
},
{
"code": null,
"e": 27073,
"s": 27031,
"text": "GATE | GATE-CS-2015 (Set 1) | Question 65"
},
{
"code": null,
"e": 27107,
"s": 27073,
"text": "GATE | GATE CS 2010 | Question 45"
},
{
"code": null,
"e": 27149,
"s": 27107,
"text": "GATE | GATE-CS-2015 (Set 3) | Question 65"
},
{
"code": null,
"e": 27191,
"s": 27149,
"text": "GATE | GATE-CS-2015 (Set 1) | Question 42"
},
{
"code": null,
"e": 27233,
"s": 27191,
"text": "GATE | GATE-CS-2014-(Set-1) | Question 65"
},
{
"code": null,
"e": 27287,
"s": 27233,
"text": "C++ Program to count Vowels in a string using Pointer"
}
] |
Add alpha to an existing Matplotlib colormap | To add apha to an existing matplotlib colormap, we can take the following steps −
Create data with a 4×4 dimension array using numpy.
Create data with a 4×4 dimension array using numpy.
Get the colormap using plt.cm.RdBU.
Get the colormap using plt.cm.RdBU.
Create a new colormap using numpy.
Create a new colormap using numpy.
Set alpha value to the new colormap.
Set alpha value to the new colormap.
Generate a colormap object using the list of colors.
Generate a colormap object using the list of colors.
Create a new figure or activate an existing figure using figure() method.
Create a new figure or activate an existing figure using figure() method.
Add a subplot to the current figure, nrows=1, ncols=2 at index=1.
Add a subplot to the current figure, nrows=1, ncols=2 at index=1.
Create a pseudocolor plot with a non-regular rectangular grid using pcolormesh() method.
Create a pseudocolor plot with a non-regular rectangular grid using pcolormesh() method.
Create a colorbar for scalar mappable instance.
Create a colorbar for scalar mappable instance.
Repeat steps 7 to 9, at index 2.
Repeat steps 7 to 9, at index 2.
Use tight_layout() to adjust the padding between and around the subplots.
Use tight_layout() to adjust the padding between and around the subplots.
To display the figure, use show() method.
To display the figure, use show() method.
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
plt.rcParams["figure.figsize"] = [7.00, 3.50]
plt.rcParams["figure.autolayout"] = True
data1 = np.random.random((4, 4))
cmap = plt.cm.RdBu
my_cmap = cmap(np.arange(cmap.N))
my_cmap[:, -1] = np.linspace(0, 1, cmap.N)
my_cmap = ListedColormap(my_cmap)
plt.figure()
plt.subplot(121)
plt.pcolormesh(data1, cmap=plt.cm.RdBu)
plt.colorbar()
plt.subplot(122)
plt.pcolormesh(data1, cmap=my_cmap)
plt.colorbar()
plt.tight_layout()
plt.show() | [
{
"code": null,
"e": 1144,
"s": 1062,
"text": "To add apha to an existing matplotlib colormap, we can take the following steps −"
},
{
"code": null,
"e": 1196,
"s": 1144,
"text": "Create data with a 4×4 dimension array using numpy."
},
{
"code": null,
"e": 1248,
"s": 1196,
"text": "Create data with a 4×4 dimension array using numpy."
},
{
"code": null,
"e": 1284,
"s": 1248,
"text": "Get the colormap using plt.cm.RdBU."
},
{
"code": null,
"e": 1320,
"s": 1284,
"text": "Get the colormap using plt.cm.RdBU."
},
{
"code": null,
"e": 1355,
"s": 1320,
"text": "Create a new colormap using numpy."
},
{
"code": null,
"e": 1390,
"s": 1355,
"text": "Create a new colormap using numpy."
},
{
"code": null,
"e": 1427,
"s": 1390,
"text": "Set alpha value to the new colormap."
},
{
"code": null,
"e": 1464,
"s": 1427,
"text": "Set alpha value to the new colormap."
},
{
"code": null,
"e": 1517,
"s": 1464,
"text": "Generate a colormap object using the list of colors."
},
{
"code": null,
"e": 1570,
"s": 1517,
"text": "Generate a colormap object using the list of colors."
},
{
"code": null,
"e": 1644,
"s": 1570,
"text": "Create a new figure or activate an existing figure using figure() method."
},
{
"code": null,
"e": 1718,
"s": 1644,
"text": "Create a new figure or activate an existing figure using figure() method."
},
{
"code": null,
"e": 1784,
"s": 1718,
"text": "Add a subplot to the current figure, nrows=1, ncols=2 at index=1."
},
{
"code": null,
"e": 1850,
"s": 1784,
"text": "Add a subplot to the current figure, nrows=1, ncols=2 at index=1."
},
{
"code": null,
"e": 1939,
"s": 1850,
"text": "Create a pseudocolor plot with a non-regular rectangular grid using pcolormesh() method."
},
{
"code": null,
"e": 2028,
"s": 1939,
"text": "Create a pseudocolor plot with a non-regular rectangular grid using pcolormesh() method."
},
{
"code": null,
"e": 2076,
"s": 2028,
"text": "Create a colorbar for scalar mappable instance."
},
{
"code": null,
"e": 2124,
"s": 2076,
"text": "Create a colorbar for scalar mappable instance."
},
{
"code": null,
"e": 2157,
"s": 2124,
"text": "Repeat steps 7 to 9, at index 2."
},
{
"code": null,
"e": 2190,
"s": 2157,
"text": "Repeat steps 7 to 9, at index 2."
},
{
"code": null,
"e": 2264,
"s": 2190,
"text": "Use tight_layout() to adjust the padding between and around the subplots."
},
{
"code": null,
"e": 2338,
"s": 2264,
"text": "Use tight_layout() to adjust the padding between and around the subplots."
},
{
"code": null,
"e": 2380,
"s": 2338,
"text": "To display the figure, use show() method."
},
{
"code": null,
"e": 2422,
"s": 2380,
"text": "To display the figure, use show() method."
},
{
"code": null,
"e": 2956,
"s": 2422,
"text": "import numpy as np\nfrom matplotlib import pyplot as plt\nfrom matplotlib.colors import ListedColormap\nplt.rcParams[\"figure.figsize\"] = [7.00, 3.50]\nplt.rcParams[\"figure.autolayout\"] = True\ndata1 = np.random.random((4, 4))\ncmap = plt.cm.RdBu\nmy_cmap = cmap(np.arange(cmap.N))\nmy_cmap[:, -1] = np.linspace(0, 1, cmap.N)\nmy_cmap = ListedColormap(my_cmap)\nplt.figure()\nplt.subplot(121)\nplt.pcolormesh(data1, cmap=plt.cm.RdBu)\nplt.colorbar()\nplt.subplot(122)\nplt.pcolormesh(data1, cmap=my_cmap)\nplt.colorbar()\nplt.tight_layout()\nplt.show()"
}
] |
How can we capitalize only first letter of a string with the help of MySQL function/s? | Actually, there is no single function in MySQL to capitalize only first letter of the string. We need to use nesting of functions and for this case, we can use UPPER() and LOWER() with SUBSTRING() functions. To understand it, we are using data, given as below, from ‘emp_tbl’.
mysql> Select * from emp_tbl;
+----+----------------+
| Id | Name |
+----+----------------+
| 1 | rahul singh |
| 2 | gaurav kumar |
| 3 | yashpal sharma |
| 4 | krishan kumar |
| 5 | kuldeep rai |
| 6 | munish nayak |
+----+----------------+
6 rows in set (0.00 sec)
We can see from the above result set that the first character of name string is in small letters. The following query will capitalize the first letter of string −
mysql> Select CONCAT(UPPER(SUBSTRING(name,1,1)),LOWER(SUBSTRING(name,2))) AS Name from emp_tbl;
+----------------+
| Name |
+----------------+
| Rahul singh |
| Gaurav kumar |
| Yashpal sharma |
| Krishan kumar |
| Kuldeep rai |
| Munish nayak |
+----------------+
6 rows in set (0.00 sec) | [
{
"code": null,
"e": 1339,
"s": 1062,
"text": "Actually, there is no single function in MySQL to capitalize only first letter of the string. We need to use nesting of functions and for this case, we can use UPPER() and LOWER() with SUBSTRING() functions. To understand it, we are using data, given as below, from ‘emp_tbl’."
},
{
"code": null,
"e": 1634,
"s": 1339,
"text": "mysql> Select * from emp_tbl;\n+----+----------------+\n| Id | Name |\n+----+----------------+\n| 1 | rahul singh |\n| 2 | gaurav kumar |\n| 3 | yashpal sharma |\n| 4 | krishan kumar |\n| 5 | kuldeep rai |\n| 6 | munish nayak |\n+----+----------------+\n6 rows in set (0.00 sec)"
},
{
"code": null,
"e": 1797,
"s": 1634,
"text": "We can see from the above result set that the first character of name string is in small letters. The following query will capitalize the first letter of string −"
},
{
"code": null,
"e": 2108,
"s": 1797,
"text": "mysql> Select CONCAT(UPPER(SUBSTRING(name,1,1)),LOWER(SUBSTRING(name,2))) AS Name from emp_tbl;\n+----------------+\n| Name |\n+----------------+\n| Rahul singh |\n| Gaurav kumar |\n| Yashpal sharma |\n| Krishan kumar |\n| Kuldeep rai |\n| Munish nayak |\n+----------------+\n6 rows in set (0.00 sec)"
}
] |
Stock Market Data Collection & Feature Engineering Using Python | by Posey | Towards Data Science | Finding appropriate data and pre-processing that data is arguably the most important part of all modern machine learning problems. In an age of unlimited libraries, APIs, and guides for how to build and implement optimal models, the actual data is overlooked. The advice is usually as simple as, “just find some data” or even better yet “go collect some relevant data.” This is of course rubbish. Collecting data is hard; collecting useful data is even harder. Some datasets are the result of years of data collection. In the case of financial data sets it is the result of decades of data accumulation. Most examples you’ll find on the web include pre-cooked datasets from Kaggle, the infamous Iris data set, housing, etc. What separates a great AI company from just an okay company is not usually the quality of models but rather the quantity and quality of data.
It’s hard to make predictions, especially about the future...
Niels Bohr
It is overwhelming nowadays to try and compete with established giants in AI and machine learning. How on Earth can we as normal people with average budgets go about acquiring the data necessary to engineer useful systems? Our models can be of similar quality to the big players, but our data is usually lacking. In this article we examine how to level the playing field. This information can be utilized for all sorts of problems, not just machine learning for the stock market. But we approach the problem in this article from a stock selection perspective.
This is how the average workflow looks, more or less. Arguably the most important parts are 1–3. The performance of our model oftentimes is entirely dependent upon the quantity and quality of our data. #2 defines the initial quantity and quality of our data. #3 defines making our data machine-readable, i.e. usable by our model as well as cleaning the data and squeezing as much utility out of the data as possible. There are all sorts of slick methods we can use to perform this processing which we’ll discuss in more depth later on in the article.
Quite clearly the problem we are looking to approach is stock selection. How can one value a stock? We’ve discussed such valuations in the past so we will not go too far in depth. If we are trying to develop a model that is capable of providing a market view, it would help to have some understanding of the markets and what data will be valuable to the model. In the case of deep learning we can potentially avoid this problem. With deep learning, if we gather enough data it’s possible for our model to be very useful without much knowledge of what is “good data” if we gather enough quantity of data. Gathering the quantity of data to make this a reality can be very difficult. So we will be focusing on finding quality data and processing the data well.
This is the part where most people get stuck. How on Earth can I find useful data when the big firms are tapped directly into all of the world’s top data sources and are accessing data at unprecedented scale? How can I find and collect this data on my personal computer? I contend that the average individual has the resources to gather sufficient data to build useful models. Storage these days is relatively cheap. And there are tons of data sources available that most people just aren’t aware of. Some of these data sources come from academia, some from random APIs, amazing open source people who provide this data for free, governments, etc.
Here are some useful data resources:
Favorites: https://db.nomics.world/ This resource is especially helpful. It’s a compilation of massive amounts of economics datasets from all over the world. You can select as many data sets as you want → add them to your cart → download all datasets with one click
https://github.com/addisonlynch/iexfinance This resource is my go-to for gathering data on stock prices, balance sheets, analyst predictions, and all sorts of great information.
Census Data: https://www.census.gov/data.html
Useful Collection of Forecasts: http://www.forecasts.org/stpoor.htm
Massive Collection of Indicators: https://www.assetmacro.com/
Labor Statistics: https://www.bls.gov/
Data from the US Treasury: https://home.treasury.gov/
Since we’re using Python, our first step is probably to read in the data from our various CSV files. One should look to merge all of this data together into a single dataframe and perform some exploratory analysis.
Cleaning Data
Inevitably, some of our data will have holes in it, some will just be garbage and completely out of left field, and some can be made more effective by scaling or normalizing. We can also add artificial data to our dataset if we need more data to train the model. This is especially popular right now in image processing where companies like Nvidia are simulating environments to help train CNNs.
The simplest route to take when preparing data that includes missing data or invalid data is to use Pandas built-in functions like dropna().
df.dropna(inplace=True)
dropna() will remove all NaN entries from our dataframe.
df.fillna(df.mean(), inplace=True)
fillna() will replace NaN entries with the desired argument. In our example above we use the column mean as a replacement. There are tons of possible approaches. Another possible approach is to take the average of the nearest two entries to that NaN entry. Some of these approaches are well-defined in various material on information theory.
Feature Engineering
Oftentimes it can be useful to identify what feature combinations will be most useful for training our model. These are going to be features that are highly separable. To that end, a visualization can be especially useful.
We can use the library seaborn to automatically build a visualization with our features vs. our output.
sns.pairplot(data=df, hue="asset_price")
These distributions aren’t terribly separable. We can see the first feature is probably more separable than the second feature. In a deep learning model we probably just throw every bit of data into the mix. But in some non-deep learning applications we can get better performance out of just training on the data that is more linearly separable.
We can also score features under the same principle in a more quantitative fashion. We use Scikit-Learn’s SelectKBest to score our features.
Obviously the higher the score the more impact the feature has on our output according to the SelectKBest algorithm. In our case, we use the Chi-squared test for our scoring:
The details aren’t overly important, just know your score indicates “goodness of fit.” Here is the implementation:
Scaling Data
There are many approaches to this part of the processing game. One of the most useful functions provided by Scikit-Learn is scale(). This uses normalization to bring all of the data into a more digestible form. You can read more about scaling here. In this case we also flatten our output variable since 1-D is our desired dimension:
Let’s continue the conversation on Twitter!
To support my writing and get full access to all articles on Medium, visit https://posey.medium.com/membership
I hope you learned something new, maybe a new method or a fresh approach. Many of these methods likely already exist in your workflow, but some may not and you should try adding them to your toolkit and seeing what works best. If you’re struggling to get very great results out of your models, I recommend spending more time on your data and less time trying to find a model that magically fits your data; you’re likely just overfitting. The majority of failing models occur because of a lack of data, low quality data, or just mismanagement of data. Data, data, data...
Note from Towards Data Science’s editors: While we allow independent authors to publish articles in accordance with our rules and guidelines, we do not endorse each author’s contribution. You should not rely on an author’s works without seeking professional advice. See our Reader Terms for details. | [
{
"code": null,
"e": 1038,
"s": 172,
"text": "Finding appropriate data and pre-processing that data is arguably the most important part of all modern machine learning problems. In an age of unlimited libraries, APIs, and guides for how to build and implement optimal models, the actual data is overlooked. The advice is usually as simple as, “just find some data” or even better yet “go collect some relevant data.” This is of course rubbish. Collecting data is hard; collecting useful data is even harder. Some datasets are the result of years of data collection. In the case of financial data sets it is the result of decades of data accumulation. Most examples you’ll find on the web include pre-cooked datasets from Kaggle, the infamous Iris data set, housing, etc. What separates a great AI company from just an okay company is not usually the quality of models but rather the quantity and quality of data."
},
{
"code": null,
"e": 1100,
"s": 1038,
"text": "It’s hard to make predictions, especially about the future..."
},
{
"code": null,
"e": 1111,
"s": 1100,
"text": "Niels Bohr"
},
{
"code": null,
"e": 1671,
"s": 1111,
"text": "It is overwhelming nowadays to try and compete with established giants in AI and machine learning. How on Earth can we as normal people with average budgets go about acquiring the data necessary to engineer useful systems? Our models can be of similar quality to the big players, but our data is usually lacking. In this article we examine how to level the playing field. This information can be utilized for all sorts of problems, not just machine learning for the stock market. But we approach the problem in this article from a stock selection perspective."
},
{
"code": null,
"e": 2222,
"s": 1671,
"text": "This is how the average workflow looks, more or less. Arguably the most important parts are 1–3. The performance of our model oftentimes is entirely dependent upon the quantity and quality of our data. #2 defines the initial quantity and quality of our data. #3 defines making our data machine-readable, i.e. usable by our model as well as cleaning the data and squeezing as much utility out of the data as possible. There are all sorts of slick methods we can use to perform this processing which we’ll discuss in more depth later on in the article."
},
{
"code": null,
"e": 2980,
"s": 2222,
"text": "Quite clearly the problem we are looking to approach is stock selection. How can one value a stock? We’ve discussed such valuations in the past so we will not go too far in depth. If we are trying to develop a model that is capable of providing a market view, it would help to have some understanding of the markets and what data will be valuable to the model. In the case of deep learning we can potentially avoid this problem. With deep learning, if we gather enough data it’s possible for our model to be very useful without much knowledge of what is “good data” if we gather enough quantity of data. Gathering the quantity of data to make this a reality can be very difficult. So we will be focusing on finding quality data and processing the data well."
},
{
"code": null,
"e": 3628,
"s": 2980,
"text": "This is the part where most people get stuck. How on Earth can I find useful data when the big firms are tapped directly into all of the world’s top data sources and are accessing data at unprecedented scale? How can I find and collect this data on my personal computer? I contend that the average individual has the resources to gather sufficient data to build useful models. Storage these days is relatively cheap. And there are tons of data sources available that most people just aren’t aware of. Some of these data sources come from academia, some from random APIs, amazing open source people who provide this data for free, governments, etc."
},
{
"code": null,
"e": 3665,
"s": 3628,
"text": "Here are some useful data resources:"
},
{
"code": null,
"e": 3931,
"s": 3665,
"text": "Favorites: https://db.nomics.world/ This resource is especially helpful. It’s a compilation of massive amounts of economics datasets from all over the world. You can select as many data sets as you want → add them to your cart → download all datasets with one click"
},
{
"code": null,
"e": 4109,
"s": 3931,
"text": "https://github.com/addisonlynch/iexfinance This resource is my go-to for gathering data on stock prices, balance sheets, analyst predictions, and all sorts of great information."
},
{
"code": null,
"e": 4155,
"s": 4109,
"text": "Census Data: https://www.census.gov/data.html"
},
{
"code": null,
"e": 4223,
"s": 4155,
"text": "Useful Collection of Forecasts: http://www.forecasts.org/stpoor.htm"
},
{
"code": null,
"e": 4285,
"s": 4223,
"text": "Massive Collection of Indicators: https://www.assetmacro.com/"
},
{
"code": null,
"e": 4324,
"s": 4285,
"text": "Labor Statistics: https://www.bls.gov/"
},
{
"code": null,
"e": 4378,
"s": 4324,
"text": "Data from the US Treasury: https://home.treasury.gov/"
},
{
"code": null,
"e": 4593,
"s": 4378,
"text": "Since we’re using Python, our first step is probably to read in the data from our various CSV files. One should look to merge all of this data together into a single dataframe and perform some exploratory analysis."
},
{
"code": null,
"e": 4607,
"s": 4593,
"text": "Cleaning Data"
},
{
"code": null,
"e": 5003,
"s": 4607,
"text": "Inevitably, some of our data will have holes in it, some will just be garbage and completely out of left field, and some can be made more effective by scaling or normalizing. We can also add artificial data to our dataset if we need more data to train the model. This is especially popular right now in image processing where companies like Nvidia are simulating environments to help train CNNs."
},
{
"code": null,
"e": 5144,
"s": 5003,
"text": "The simplest route to take when preparing data that includes missing data or invalid data is to use Pandas built-in functions like dropna()."
},
{
"code": null,
"e": 5168,
"s": 5144,
"text": "df.dropna(inplace=True)"
},
{
"code": null,
"e": 5225,
"s": 5168,
"text": "dropna() will remove all NaN entries from our dataframe."
},
{
"code": null,
"e": 5260,
"s": 5225,
"text": "df.fillna(df.mean(), inplace=True)"
},
{
"code": null,
"e": 5602,
"s": 5260,
"text": "fillna() will replace NaN entries with the desired argument. In our example above we use the column mean as a replacement. There are tons of possible approaches. Another possible approach is to take the average of the nearest two entries to that NaN entry. Some of these approaches are well-defined in various material on information theory."
},
{
"code": null,
"e": 5622,
"s": 5602,
"text": "Feature Engineering"
},
{
"code": null,
"e": 5845,
"s": 5622,
"text": "Oftentimes it can be useful to identify what feature combinations will be most useful for training our model. These are going to be features that are highly separable. To that end, a visualization can be especially useful."
},
{
"code": null,
"e": 5949,
"s": 5845,
"text": "We can use the library seaborn to automatically build a visualization with our features vs. our output."
},
{
"code": null,
"e": 5990,
"s": 5949,
"text": "sns.pairplot(data=df, hue=\"asset_price\")"
},
{
"code": null,
"e": 6337,
"s": 5990,
"text": "These distributions aren’t terribly separable. We can see the first feature is probably more separable than the second feature. In a deep learning model we probably just throw every bit of data into the mix. But in some non-deep learning applications we can get better performance out of just training on the data that is more linearly separable."
},
{
"code": null,
"e": 6478,
"s": 6337,
"text": "We can also score features under the same principle in a more quantitative fashion. We use Scikit-Learn’s SelectKBest to score our features."
},
{
"code": null,
"e": 6653,
"s": 6478,
"text": "Obviously the higher the score the more impact the feature has on our output according to the SelectKBest algorithm. In our case, we use the Chi-squared test for our scoring:"
},
{
"code": null,
"e": 6768,
"s": 6653,
"text": "The details aren’t overly important, just know your score indicates “goodness of fit.” Here is the implementation:"
},
{
"code": null,
"e": 6781,
"s": 6768,
"text": "Scaling Data"
},
{
"code": null,
"e": 7115,
"s": 6781,
"text": "There are many approaches to this part of the processing game. One of the most useful functions provided by Scikit-Learn is scale(). This uses normalization to bring all of the data into a more digestible form. You can read more about scaling here. In this case we also flatten our output variable since 1-D is our desired dimension:"
},
{
"code": null,
"e": 7159,
"s": 7115,
"text": "Let’s continue the conversation on Twitter!"
},
{
"code": null,
"e": 7270,
"s": 7159,
"text": "To support my writing and get full access to all articles on Medium, visit https://posey.medium.com/membership"
},
{
"code": null,
"e": 7841,
"s": 7270,
"text": "I hope you learned something new, maybe a new method or a fresh approach. Many of these methods likely already exist in your workflow, but some may not and you should try adding them to your toolkit and seeing what works best. If you’re struggling to get very great results out of your models, I recommend spending more time on your data and less time trying to find a model that magically fits your data; you’re likely just overfitting. The majority of failing models occur because of a lack of data, low quality data, or just mismanagement of data. Data, data, data..."
}
] |
What is boxing and unboxing in Java? | Wrapper classes are those whose objects wraps a primitive data type within them. In the java.lang package java provides a separate class for each of the primitive data type namely Byte, Character, Double, Integer, Float, Long, Short.
Converting primitive datatype to object is called boxing.
Integer obj = new Integer ("2526");
Whereas, converting an object into corresponding primitive datatype is known as unboxing.
Live Demo
public class Sample {
public static void main (String args[]){
Integer obj = new Integer("2526");
int i = obj.intValue();
System.out.println(i);
}
}
2526 | [
{
"code": null,
"e": 1296,
"s": 1062,
"text": "Wrapper classes are those whose objects wraps a primitive data type within them. In the java.lang package java provides a separate class for each of the primitive data type namely Byte, Character, Double, Integer, Float, Long, Short."
},
{
"code": null,
"e": 1354,
"s": 1296,
"text": "Converting primitive datatype to object is called boxing."
},
{
"code": null,
"e": 1390,
"s": 1354,
"text": "Integer obj = new Integer (\"2526\");"
},
{
"code": null,
"e": 1480,
"s": 1390,
"text": "Whereas, converting an object into corresponding primitive datatype is known as unboxing."
},
{
"code": null,
"e": 1490,
"s": 1480,
"text": "Live Demo"
},
{
"code": null,
"e": 1663,
"s": 1490,
"text": "public class Sample {\n public static void main (String args[]){\n Integer obj = new Integer(\"2526\");\n int i = obj.intValue();\n System.out.println(i);\n }\n}"
},
{
"code": null,
"e": 1668,
"s": 1663,
"text": "2526"
}
] |
Calendar function in Python | Python has an in built module called calendar which operation is related to calendar. There are some calendar functions in Python.
This function shows the year, width of characters, no. of lines per week and column separations.
print ("The calendar of 2014 is : ")
print (calendar.calendar(2014,3,1,4))
The calendar of year 2014 is :
2014
January February March
Mon Tue Wed Thu Fri Sat Sun Mon Tue Wed Thu Fri Sat Sun Mon Tue Wed Thu Fri Sat Sun
1 2 3 4 5 1 2 1 2
6 7 8 9 10 11 12 3 4 5 6 7 8 9 3 4 5 6 7 8 9
13 14 15 16 17 18 19 10 11 12 13 14 15 16 10 11 12 13 14 15 16
20 21 22 23 24 25 26 17 18 19 20 21 22 23 17 18 19 20 21 22 23
27 28 29 30 31 24 25 26 27 28 24 25 26 27 28 29 30
31
April May June
Mon Tue Wed Thu Fri Sat Sun Mon Tue Wed Thu Fri Sat Sun Mon Tue Wed Thu Fri Sat Sun
1 2 3 4 5 6 1 2 3 4 1
7 8 9 10 11 12 13 5 6 7 8 9 10 11 2 3 4 5 6 7 8
14 15 16 17 18 19 20 12 13 14 15 16 17 18 9 10 11 12 13 14 15
21 22 23 24 25 26 27 19 20 21 22 23 24 25 16 17 18 19 20 21 22
28 29 30 26 27 28 29 30 31 23 24 25 26 27 28 29
30
July August September
Mon Tue Wed Thu Fri Sat Sun Mon Tue Wed Thu Fri Sat Sun Mon Tue Wed Thu Fri Sat Sun
1 2 3 4 5 6 1 2 3 1 2 3 4 5 6 7
7 8 9 10 11 12 13 4 5 6 7 8 9 10 8 9 10 11 12 13 14
14 15 16 17 18 19 20 11 12 13 14 15 16 17 15 16 17 18 19 20 21
21 22 23 24 25 26 27 18 19 20 21 22 23 24 22 23 24 25 26 27 28
28 29 30 31 25 26 27 28 29 30 31 29 30
October November December
Mon Tue Wed Thu Fri Sat Sun Mon Tue Wed Thu Fri Sat Sun Mon Tue Wed Thu Fri Sat Sun
1 2 3 4 5 1 2 1 2 3 4 5 6 7
6 7 8 9 10 11 12 3 4 5 6 7 8 9 8 9 10 11 12 13 14
13 14 15 16 17 18 19 10 11 12 13 14 15 16 15 16 17 18 19 20 21
20 21 22 23 24 25 26 17 18 19 20 21 22 23 22 23 24 25 26 27 28
27 28 29 30 31 24 25 26 27 28 29 30 29 30 31
This function returns the first day of the week.
print ("The starting day in calendar is : ",end="")
print (calendar.firstweekday())
The starting day in calendar is : 0
This function checks if mentioned year in argument is leap year or not.
if (calendar.isleap(2014)):
print ("The year is leap year")
else :
print ("The year is not leap year")
The year is not leap year.
This function calculates the number of leap (year)days between the year specified in arguments.
print ("The leap (year) days between 1950 and 2000 are : ",end="")
print (calendar.leapdays(1950, 2000))
The leap days between 1950 and 2000 are : 12
This function display the month of a specific mentioned year in arguments. It takes 4 parameters year, month, and width of characters and no. of lines taken by a week.
print ("The month 6th of 2017 is :")
print (calendar.month(2017,6,3,1))
The month 6th of 2017 is :
June 2017
Mon Tue Wed Thu Fri Sat Sun
1 2 3 4
5 6 7 8 9 10 11
12 13 14 15 16 17 18
19 20 21 22 23 24 25
26 27 28 29 30 | [
{
"code": null,
"e": 1193,
"s": 1062,
"text": "Python has an in built module called calendar which operation is related to calendar. There are some calendar functions in Python."
},
{
"code": null,
"e": 1290,
"s": 1193,
"text": "This function shows the year, width of characters, no. of lines per week and column separations."
},
{
"code": null,
"e": 1366,
"s": 1290,
"text": "print (\"The calendar of 2014 is : \") \nprint (calendar.calendar(2014,3,1,4))"
},
{
"code": null,
"e": 3933,
"s": 1366,
"text": "The calendar of year 2014 is : \n 2014\n January \t\tFebruary March\nMon Tue Wed Thu Fri Sat Sun Mon Tue Wed Thu Fri Sat Sun Mon Tue Wed Thu Fri Sat Sun\n 1 2 3 4 5 1 2 1 2\n 6 7 8 9 10 11 12 3 4 5 6 7 8 9 3 4 5 6 7 8 9\n13 14 15 16 17 18 19 10 11 12 13 14 15 16 10 11 12 13 14 15 16\n20 21 22 23 24 25 26 17 18 19 20 21 22 23 17 18 19 20 21 22 23\n27 28 29 30 31 24 25 26 27 28 24 25 26 27 28 29 30\n\t\t\t\t 31\n\n April \tMay \tJune\nMon Tue Wed Thu Fri Sat Sun Mon Tue Wed Thu Fri Sat Sun Mon Tue Wed Thu Fri Sat Sun\n 1 2 3 4 5 6 1 2 3 4 1\n 7 8 9 10 11 12 13 5 6 7 8 9 10 11 2 3 4 5 6 7 8\n14 15 16 17 18 19 20 12 13 14 15 16 17 18 9 10 11 12 13 14 15\n21 22 23 24 25 26 27 19 20 21 22 23 24 25 16 17 18 19 20 21 22\n28 29 30 26 27 28 29 30 31 23 24 25 26 27 28 29\n 30\n\n July \tAugust \tSeptember\nMon Tue Wed Thu Fri Sat Sun Mon Tue Wed Thu Fri Sat Sun Mon Tue Wed Thu Fri Sat Sun\n\t 1 2 3 4 5 6 1 2 3 1 2 3 4 5 6 7\n 7 8 9 10 11 12 13 4 5 6 7 8 9 10 8 9 10 11 12 13 14\n14 15 16 17 18 19 20 11 12 13 14 15 16 17 \t15 16 17 18 19 20 21\n21 22 23 24 25 26 27 18 19 20 21 22 23 24 \t22 23 24 25 26 27 28\n28 29 30 31 25 26 27 28 29 30 31 \t29 30\n\n October \t\t November December\nMon Tue Wed Thu Fri Sat Sun Mon Tue Wed Thu Fri Sat Sun Mon Tue Wed Thu Fri Sat Sun\n 1 2 3 4 5 \t 1 2 1 2 3 4 5 6 7\n6 7 8 9 10 11 12 3 4 5 6 7 8 9 8 9 10 11 12 13 14\n13 14 15 16 17 18 19 10 11 12 13 14 15 16 15 16 17 18 19 20 21\n20 21 22 23 24 25 26 17 18 19 20 21 22 23 22 23 24 25 26 27 28\n27 28 29 30 31 24 25 26 27 28 29 30 29 30 31\n"
},
{
"code": null,
"e": 3982,
"s": 3933,
"text": "This function returns the first day of the week."
},
{
"code": null,
"e": 4068,
"s": 3982,
"text": "print (\"The starting day in calendar is : \",end=\"\") \nprint (calendar.firstweekday()) "
},
{
"code": null,
"e": 4106,
"s": 4068,
"text": "The starting day in calendar is : 0\n"
},
{
"code": null,
"e": 4178,
"s": 4106,
"text": "This function checks if mentioned year in argument is leap year or not."
},
{
"code": null,
"e": 4290,
"s": 4178,
"text": "if (calendar.isleap(2014)): \n print (\"The year is leap year\") \nelse : \n print (\"The year is not leap year\")"
},
{
"code": null,
"e": 4318,
"s": 4290,
"text": "The year is not leap year.\n"
},
{
"code": null,
"e": 4414,
"s": 4318,
"text": "This function calculates the number of leap (year)days between the year specified in arguments."
},
{
"code": null,
"e": 4521,
"s": 4414,
"text": "print (\"The leap (year) days between 1950 and 2000 are : \",end=\"\") \nprint (calendar.leapdays(1950, 2000)) "
},
{
"code": null,
"e": 4567,
"s": 4521,
"text": "The leap days between 1950 and 2000 are : 12\n"
},
{
"code": null,
"e": 4737,
"s": 4567,
"text": "This function display the month of a specific mentioned year in arguments. It takes 4 parameters year, month, and width of characters and no. of lines taken by a week."
},
{
"code": null,
"e": 4811,
"s": 4737,
"text": "print (\"The month 6th of 2017 is :\") \nprint (calendar.month(2017,6,3,1)) "
},
{
"code": null,
"e": 5018,
"s": 4811,
"text": "The month 6th of 2017 is :\n June 2017\nMon Tue Wed Thu Fri Sat Sun\n 1 2 3 4\n 5 6 7 8 9 10 11\n 12 13 14 15 16 17 18\n 19 20 21 22 23 24 25\n 26 27 28 29 30\n"
}
] |
Unlocking the True Power of Support Vector Regression | by Ashwin Raj | Towards Data Science | SVMs or Support Vector Machines are one of the most popular and widely used algorithm for dealing with classification problems in machine learning. However, the use of SVMs in regression is not very well documented. This algorithm acknowledges the presence of non-linearity in the data and provides a proficient prediction model.
In this article, I will first try to give you an intuitive understanding of the algorithm by taking a deep-dive into the theory behind the algorithm. Then we will build our very own SVM Regressor model. And finally, we will look into some advantages of using Support Vector Regression.
The SVM regression algorithm is referred to as Support Vector Regression or SVR. Before getting started with the algorithm, it is necessary that we have an intuition of what a support vector machine actually is.
In machine learning, Support Vector Machines are supervised learning models with associated learning algorithms that analyze data used for classification and regression analysis. In Support Vector Regression, the straight line that is required to fit the data is referred to as hyperplane.
The objective of a support vector machine algorithm is to find a hyperplane in an n-dimensional space that distinctly classifies the data points. The data points on either side of the hyperplane that are closest to the hyperplane are called Support Vectors. These influence the position and orientation of the hyperplane and thus help build the SVM.
Now that we have an intuition of what a support vector machine is, we will take look into the various hyperparameters that are used in Support Vector Regression. Some of the key parameters used are as mentioned below:
Hyperplanes are decision boundaries that is used to predict the continuous output. The data points on either side of the hyperplane that are closest to the hyperplane are called Support Vectors. These are used to plot the required line that shows the predicted output of the algorithm.
A kernel is a set of mathematical functions that takes data as input and transform it into the required form. These are generally used for finding a hyperplane in the higher dimensional space.
The most widely used kernels include Linear, Non-Linear, Polynomial, Radial Basis Function (RBF) and Sigmoid. By default, RBF is used as the kernel. Each of these kernels are used depending on the dataset.
These are the two lines that are drawn around the hyperplane at a distance of ε (epsilon). It is used to create a margin between the data points.
Support Vector Regression is a supervised learning algorithm that is used to predict discrete values. Support Vector Regression uses the same principle as the SVMs. The basic idea behind SVR is to find the best fit line. In SVR, the best fit line is the hyperplane that has the maximum number of points.
Unlike other Regression models that try to minimize the error between the real and predicted value, the SVR tries to fit the best line within a threshold value. The threshold value is the distance between the hyperplane and boundary line. The fit time complexity of SVR is more than quadratic with the number of samples which makes it hard to scale to datasets with more than a couple of 10000 samples.
For large datasets, Linear SVR or SGD Regressor is used. Linear SVR provides a faster implementation than SVR but only considers the linear kernel. The model produced by Support Vector Regression depends only on a subset of the training data, because the cost function ignores samples whose prediction is close to their target.
Now that we have a gist of what Support Vector Regression is, we shall try to build our very own SVR regressor. The code and other resources for building this regression model can be found here.
Our first step is to import the libraries required to build our model. It is not necessary to import all the libraries at just one place. Python gives us the flexibility to import libraries at any place. To get started we will be importing the Pandas, Numpy, Matplotlib and Seaborn libraries.
#Import the Libraries and read the data into a Pandas DataFrameimport pandas as pdimport numpy as npimport matplotlib.pyplot as pltimport seaborn as snstest = pd.read_csv("california_housing_test.csv")train = pd.read_csv("california_housing_train.csv")
Once these libraries have been imported our next step will be fetching the dataset and loading the data into our notebook. For this example I have used the California Housing dataset.
After successfully loading the data, our next step is to visualize this data. Seashore is an excellent library that can be used to visualize the data.
#Visualise the dataplt.figure()sns.heatmap(data.corr(), cmap='coolwarm')plt.show()sns.lmplot(x='median_income', y='median_house_value', data=train)sns.lmplot(x='housing_median_age', y='median_house_value', data=train)
Feature engineering is the process of using domain knowledge to extract features from raw data via data mining techniques. For this model I have selected columns with only numerical values. For handling categorical values label encoding techniques are applied.
#Select appropriate featuresdata = data[[‘total_rooms’, ‘total_bedrooms’, ‘housing_median_age’, ‘median_income’, ‘population’, ‘households’]]data.info()data['total_rooms'] = data['total_rooms'].fillna(data['total_rooms'].mean())data['total_bedrooms'] = data['total_bedrooms'].fillna(data['total_bedrooms'].mean()
Feature Scaling basically helps to normalize the data within a particular range. Normally several common class types contain the feature scaling function so that they make feature scaling automatically.
After selecting the desired parameters the next step is to import train_test_split from sklearn library which is used to split the dataset into training and testing data.
#Split the dataset into training and testing dataimport train_test_splitX_train, X_test, y_train, y_test = train_test_split(train, y, test_size = 0.2, random_state = 0)y_train = y_train.reshape(-1,1)y_test = y_test.reshape(-1,1)
After this SVR is imported from sklearn.svm and the model is fit over the training dataset.
# Fit the model over the training datafrom sklearn.svm import SVRregressor = SVR(kernel = 'rbf')regressor.fit(X_train, y_train)
Here, In this particular example I have used the RBF Kernel. Other parameters of the model are left to have their default configurations. Once the model is fit over the training data, our model is ready for use.
Although Support Vector Regression is used rarely it carries certain advantages that are as mentioned below:
It is robust to outliers.Decision model can be easily updated.It has excellent generalization capability, with high prediction accuracy.Its implementation is easy.
It is robust to outliers.
Decision model can be easily updated.
It has excellent generalization capability, with high prediction accuracy.
Its implementation is easy.
Some of the drawbacks faced by Support Vector Machines while handling regression problems are as mentioned below:
They are not suitable for large datasets.In cases where the number of features for each data point exceeds the number of training data samples, the SVM will underperform.The Decision model does not perform very well when the data set has more noise i.e. target classes are overlapping..
They are not suitable for large datasets.
In cases where the number of features for each data point exceeds the number of training data samples, the SVM will underperform.
The Decision model does not perform very well when the data set has more noise i.e. target classes are overlapping..
With that, we have reached the end of this article. I hope this article would have helped you get a feel about the idea behind SVR algorithms. If you have any question or if you believe I have made any mistake, please contact me! You can get in touch with me via: Email or LinkedIn. | [
{
"code": null,
"e": 501,
"s": 171,
"text": "SVMs or Support Vector Machines are one of the most popular and widely used algorithm for dealing with classification problems in machine learning. However, the use of SVMs in regression is not very well documented. This algorithm acknowledges the presence of non-linearity in the data and provides a proficient prediction model."
},
{
"code": null,
"e": 787,
"s": 501,
"text": "In this article, I will first try to give you an intuitive understanding of the algorithm by taking a deep-dive into the theory behind the algorithm. Then we will build our very own SVM Regressor model. And finally, we will look into some advantages of using Support Vector Regression."
},
{
"code": null,
"e": 999,
"s": 787,
"text": "The SVM regression algorithm is referred to as Support Vector Regression or SVR. Before getting started with the algorithm, it is necessary that we have an intuition of what a support vector machine actually is."
},
{
"code": null,
"e": 1289,
"s": 999,
"text": "In machine learning, Support Vector Machines are supervised learning models with associated learning algorithms that analyze data used for classification and regression analysis. In Support Vector Regression, the straight line that is required to fit the data is referred to as hyperplane."
},
{
"code": null,
"e": 1639,
"s": 1289,
"text": "The objective of a support vector machine algorithm is to find a hyperplane in an n-dimensional space that distinctly classifies the data points. The data points on either side of the hyperplane that are closest to the hyperplane are called Support Vectors. These influence the position and orientation of the hyperplane and thus help build the SVM."
},
{
"code": null,
"e": 1857,
"s": 1639,
"text": "Now that we have an intuition of what a support vector machine is, we will take look into the various hyperparameters that are used in Support Vector Regression. Some of the key parameters used are as mentioned below:"
},
{
"code": null,
"e": 2143,
"s": 1857,
"text": "Hyperplanes are decision boundaries that is used to predict the continuous output. The data points on either side of the hyperplane that are closest to the hyperplane are called Support Vectors. These are used to plot the required line that shows the predicted output of the algorithm."
},
{
"code": null,
"e": 2336,
"s": 2143,
"text": "A kernel is a set of mathematical functions that takes data as input and transform it into the required form. These are generally used for finding a hyperplane in the higher dimensional space."
},
{
"code": null,
"e": 2542,
"s": 2336,
"text": "The most widely used kernels include Linear, Non-Linear, Polynomial, Radial Basis Function (RBF) and Sigmoid. By default, RBF is used as the kernel. Each of these kernels are used depending on the dataset."
},
{
"code": null,
"e": 2688,
"s": 2542,
"text": "These are the two lines that are drawn around the hyperplane at a distance of ε (epsilon). It is used to create a margin between the data points."
},
{
"code": null,
"e": 2992,
"s": 2688,
"text": "Support Vector Regression is a supervised learning algorithm that is used to predict discrete values. Support Vector Regression uses the same principle as the SVMs. The basic idea behind SVR is to find the best fit line. In SVR, the best fit line is the hyperplane that has the maximum number of points."
},
{
"code": null,
"e": 3395,
"s": 2992,
"text": "Unlike other Regression models that try to minimize the error between the real and predicted value, the SVR tries to fit the best line within a threshold value. The threshold value is the distance between the hyperplane and boundary line. The fit time complexity of SVR is more than quadratic with the number of samples which makes it hard to scale to datasets with more than a couple of 10000 samples."
},
{
"code": null,
"e": 3723,
"s": 3395,
"text": "For large datasets, Linear SVR or SGD Regressor is used. Linear SVR provides a faster implementation than SVR but only considers the linear kernel. The model produced by Support Vector Regression depends only on a subset of the training data, because the cost function ignores samples whose prediction is close to their target."
},
{
"code": null,
"e": 3918,
"s": 3723,
"text": "Now that we have a gist of what Support Vector Regression is, we shall try to build our very own SVR regressor. The code and other resources for building this regression model can be found here."
},
{
"code": null,
"e": 4211,
"s": 3918,
"text": "Our first step is to import the libraries required to build our model. It is not necessary to import all the libraries at just one place. Python gives us the flexibility to import libraries at any place. To get started we will be importing the Pandas, Numpy, Matplotlib and Seaborn libraries."
},
{
"code": null,
"e": 4464,
"s": 4211,
"text": "#Import the Libraries and read the data into a Pandas DataFrameimport pandas as pdimport numpy as npimport matplotlib.pyplot as pltimport seaborn as snstest = pd.read_csv(\"california_housing_test.csv\")train = pd.read_csv(\"california_housing_train.csv\")"
},
{
"code": null,
"e": 4648,
"s": 4464,
"text": "Once these libraries have been imported our next step will be fetching the dataset and loading the data into our notebook. For this example I have used the California Housing dataset."
},
{
"code": null,
"e": 4799,
"s": 4648,
"text": "After successfully loading the data, our next step is to visualize this data. Seashore is an excellent library that can be used to visualize the data."
},
{
"code": null,
"e": 5017,
"s": 4799,
"text": "#Visualise the dataplt.figure()sns.heatmap(data.corr(), cmap='coolwarm')plt.show()sns.lmplot(x='median_income', y='median_house_value', data=train)sns.lmplot(x='housing_median_age', y='median_house_value', data=train)"
},
{
"code": null,
"e": 5278,
"s": 5017,
"text": "Feature engineering is the process of using domain knowledge to extract features from raw data via data mining techniques. For this model I have selected columns with only numerical values. For handling categorical values label encoding techniques are applied."
},
{
"code": null,
"e": 5591,
"s": 5278,
"text": "#Select appropriate featuresdata = data[[‘total_rooms’, ‘total_bedrooms’, ‘housing_median_age’, ‘median_income’, ‘population’, ‘households’]]data.info()data['total_rooms'] = data['total_rooms'].fillna(data['total_rooms'].mean())data['total_bedrooms'] = data['total_bedrooms'].fillna(data['total_bedrooms'].mean()"
},
{
"code": null,
"e": 5794,
"s": 5591,
"text": "Feature Scaling basically helps to normalize the data within a particular range. Normally several common class types contain the feature scaling function so that they make feature scaling automatically."
},
{
"code": null,
"e": 5965,
"s": 5794,
"text": "After selecting the desired parameters the next step is to import train_test_split from sklearn library which is used to split the dataset into training and testing data."
},
{
"code": null,
"e": 6194,
"s": 5965,
"text": "#Split the dataset into training and testing dataimport train_test_splitX_train, X_test, y_train, y_test = train_test_split(train, y, test_size = 0.2, random_state = 0)y_train = y_train.reshape(-1,1)y_test = y_test.reshape(-1,1)"
},
{
"code": null,
"e": 6286,
"s": 6194,
"text": "After this SVR is imported from sklearn.svm and the model is fit over the training dataset."
},
{
"code": null,
"e": 6414,
"s": 6286,
"text": "# Fit the model over the training datafrom sklearn.svm import SVRregressor = SVR(kernel = 'rbf')regressor.fit(X_train, y_train)"
},
{
"code": null,
"e": 6626,
"s": 6414,
"text": "Here, In this particular example I have used the RBF Kernel. Other parameters of the model are left to have their default configurations. Once the model is fit over the training data, our model is ready for use."
},
{
"code": null,
"e": 6735,
"s": 6626,
"text": "Although Support Vector Regression is used rarely it carries certain advantages that are as mentioned below:"
},
{
"code": null,
"e": 6899,
"s": 6735,
"text": "It is robust to outliers.Decision model can be easily updated.It has excellent generalization capability, with high prediction accuracy.Its implementation is easy."
},
{
"code": null,
"e": 6925,
"s": 6899,
"text": "It is robust to outliers."
},
{
"code": null,
"e": 6963,
"s": 6925,
"text": "Decision model can be easily updated."
},
{
"code": null,
"e": 7038,
"s": 6963,
"text": "It has excellent generalization capability, with high prediction accuracy."
},
{
"code": null,
"e": 7066,
"s": 7038,
"text": "Its implementation is easy."
},
{
"code": null,
"e": 7180,
"s": 7066,
"text": "Some of the drawbacks faced by Support Vector Machines while handling regression problems are as mentioned below:"
},
{
"code": null,
"e": 7467,
"s": 7180,
"text": "They are not suitable for large datasets.In cases where the number of features for each data point exceeds the number of training data samples, the SVM will underperform.The Decision model does not perform very well when the data set has more noise i.e. target classes are overlapping.."
},
{
"code": null,
"e": 7509,
"s": 7467,
"text": "They are not suitable for large datasets."
},
{
"code": null,
"e": 7639,
"s": 7509,
"text": "In cases where the number of features for each data point exceeds the number of training data samples, the SVM will underperform."
},
{
"code": null,
"e": 7756,
"s": 7639,
"text": "The Decision model does not perform very well when the data set has more noise i.e. target classes are overlapping.."
}
] |
Amazon Interview Experience for SDE-II (Virtual Rounds) - GeeksforGeeks | 22 Aug, 2021
I got a call from an Amazon recruiter as I was referred by one of my friends for the role. She sent me the coding test link which I had to complete within a week. Once I completed the test, I got a call to schedule the interviews in two weeks. 3 interviews were held on Amazon Chime as per schedule. Post that after 4-5 days, the hr informed me that they would like to proceed with the final bar raiser round, which happened in a couple of days. Following are the questions that were asked:
Online Round (Coding Test – 90 mins)
We are given the costs of a list of pants, shirts, shoes, skirts. We have a certain amount of cash with us, we need to determine the total number of possible combinations which we can buy given that we must buy one and only one of each type.Eg: pants=[3, 5, 7], shirts = [4, 7, 8],
skirts = [5, 8], shoes = [3], budget = 25So in the above e.g., apart from the combination [7, 8, 8, 3], all others are possible.Hint: Since we have to buy all, we can combine the first two lists and the last two lists, so we would have cost lists like pants_shirts = [...] and skirts_shoes = [...], now we can just iterate over one list and binary search the remaining amount over the other list and add accordingly.It was quite trivial so don’t remember exactly.
We are given the costs of a list of pants, shirts, shoes, skirts. We have a certain amount of cash with us, we need to determine the total number of possible combinations which we can buy given that we must buy one and only one of each type.Eg: pants=[3, 5, 7], shirts = [4, 7, 8],
skirts = [5, 8], shoes = [3], budget = 25So in the above e.g., apart from the combination [7, 8, 8, 3], all others are possible.Hint: Since we have to buy all, we can combine the first two lists and the last two lists, so we would have cost lists like pants_shirts = [...] and skirts_shoes = [...], now we can just iterate over one list and binary search the remaining amount over the other list and add accordingly.
Eg: pants=[3, 5, 7], shirts = [4, 7, 8],
skirts = [5, 8], shoes = [3], budget = 25
So in the above e.g., apart from the combination [7, 8, 8, 3], all others are possible.
Hint: Since we have to buy all, we can combine the first two lists and the last two lists, so we would have cost lists like pants_shirts = [...] and
skirts_shoes = [...], now we can just iterate over one list and binary search the remaining amount over the other list and add accordingly.
It was quite trivial so don’t remember exactly.
Round 1:
The interviewer gave his introduction, asked me a bit on the kind of projects I’ve worked on. Then he started with a data structure problem.
Given a binary tree with the following TreeNode, create a copy of the tree without using any extra space. TreeNode{
left*, right*, random*, val
}My solution: I first told a hashmap solution where I would maintain a mapping from the original node to the copy node in the new tree, and in the second traversal, I would be able to assign the random pointers as well. The interviewer agreed that this would work, but he wanted me to do this without the hashmap. It took me around 15-20 more mins to come up with the final code. I first appended the duplicate node to the left child of the original node something like: A A
B C -> A' C
B C'
B'This way on iterating over the original nodes, we can assign the left and right pointers, and we need one more traversal to assign the random pointer.We are given N solar systems, each solar system with M planets. We can move to any other planet of the same solar system in 1 light year.We can move from Mth planet of Kth solar system to 1st planet of (K + 1)th solar system in 1 light year. Apart from this we are also given a list of wormholes, where each wormhole specifies the entry planet and exit planet. Passing through a wormhole would also take 1 light year.Now given the X-starting planet, and Y-destination planet, we need to find the minimum number of light-years that we would take to travel.My Solution:I told him that I would create a graph(which was quite a ridiculous suggestion tbh :p) and then do bfs. He asked the time complexity of creating a graph and then applying my approach. Complexity was pretty bad, so I moved to a new solution. I suggested starting from X, add all the neigbouring planets at a distance 1 and if any wormholes are present from the planet and do bfs on the fly without creating graph. Since the time was less(as I had spent around 35-40 mins in the first question), I just coded a level-wise bfs and the interviewer seemed convinced.
Given a binary tree with the following TreeNode, create a copy of the tree without using any extra space. TreeNode{
left*, right*, random*, val
}My solution: I first told a hashmap solution where I would maintain a mapping from the original node to the copy node in the new tree, and in the second traversal, I would be able to assign the random pointers as well. The interviewer agreed that this would work, but he wanted me to do this without the hashmap. It took me around 15-20 more mins to come up with the final code. I first appended the duplicate node to the left child of the original node something like: A A
B C -> A' C
B C'
B'This way on iterating over the original nodes, we can assign the left and right pointers, and we need one more traversal to assign the random pointer.
TreeNode{
left*, right*, random*, val
}
My solution: I first told a hashmap solution where I would maintain a mapping from the original node to the copy node in the new tree, and in the second traversal, I would be able to assign the random pointers as well. The interviewer agreed that this would work, but he wanted me to do this without the hashmap. It took me around 15-20 more mins to come up with the final code. I first appended the duplicate node to the left child of the original node something like:
A A
B C -> A' C
B C'
B'
This way on iterating over the original nodes, we can assign the left and right pointers, and we need one more traversal to assign the random pointer.
We are given N solar systems, each solar system with M planets. We can move to any other planet of the same solar system in 1 light year.We can move from Mth planet of Kth solar system to 1st planet of (K + 1)th solar system in 1 light year. Apart from this we are also given a list of wormholes, where each wormhole specifies the entry planet and exit planet. Passing through a wormhole would also take 1 light year.Now given the X-starting planet, and Y-destination planet, we need to find the minimum number of light-years that we would take to travel.My Solution:I told him that I would create a graph(which was quite a ridiculous suggestion tbh :p) and then do bfs. He asked the time complexity of creating a graph and then applying my approach. Complexity was pretty bad, so I moved to a new solution. I suggested starting from X, add all the neigbouring planets at a distance 1 and if any wormholes are present from the planet and do bfs on the fly without creating graph. Since the time was less(as I had spent around 35-40 mins in the first question), I just coded a level-wise bfs and the interviewer seemed convinced.
Now given the X-starting planet, and Y-destination planet, we need to find the minimum number of light-years that we would take to travel.
My Solution:I told him that I would create a graph(which was quite a ridiculous suggestion tbh :p) and then do bfs. He asked the time complexity of creating a graph and then applying my approach. Complexity was pretty bad, so I moved to a new solution. I suggested starting from X, add all the neigbouring planets at a distance 1 and if any wormholes are present from the planet and do bfs on the fly without creating graph. Since the time was less(as I had spent around 35-40 mins in the first question), I just coded a level-wise bfs and the interviewer seemed convinced.
Round 2:
This was taken by an engineering manager who asked me regarding my projects for around 10-15 mins and then we moved to a system design problem. He asked me to design Slack messenger.I started by listing the functional and non-functional requirements(on which he questioned me a bit), then I moved to draw the high-level architecture. The components which I drew were the clients, gateway service(LB + authentication, etc), Messaging Service, User Service, Web Socket Manager service, Fan Out service(I added this for the group messages thing, but he didn’t interrogate much on that).He asked me what would be the schema of my messages table and the scenarios in which the recipient user is online/offline.Also asked about the partitioning key and primary key of the 2-3 tables which I had made.
This was taken by an engineering manager who asked me regarding my projects for around 10-15 mins and then we moved to a system design problem. He asked me to design Slack messenger.I started by listing the functional and non-functional requirements(on which he questioned me a bit), then I moved to draw the high-level architecture. The components which I drew were the clients, gateway service(LB + authentication, etc), Messaging Service, User Service, Web Socket Manager service, Fan Out service(I added this for the group messages thing, but he didn’t interrogate much on that).He asked me what would be the schema of my messages table and the scenarios in which the recipient user is online/offline.Also asked about the partitioning key and primary key of the 2-3 tables which I had made.
I started by listing the functional and non-functional requirements(on which he questioned me a bit), then I moved to draw the high-level architecture. The components which I drew were the clients, gateway service(LB + authentication, etc), Messaging Service, User Service, Web Socket Manager service, Fan Out service(I added this for the group messages thing, but he didn’t interrogate much on that).
He asked me what would be the schema of my messages table and the scenarios in which the recipient user is online/offline.Also asked about the partitioning key and primary key of the 2-3 tables which I had made.
Round 3:
This was taken by an SDE III guy, who again asked me about my projects for like 10 mins and then moved on to a low level design question.He asked me to design the HackerRank platform.Again I started with listing down the usecases which I would cover, the interviewer asked me to write all the APIs which I would need to expose.I made various classes like Question(subclassed into MCQ and CodingQuestion), Answer, Candidate, Test, QuestionBank, etc.Surprisingly(since this was an LLD round) he asked me the schema of the tables and which SQL/NoSQL would I choose and why. Then he asked me the case when the question gets changed, I couldn’t answer that, later he mentioned that he was expecting something like and EditHistory inside each Test Entity.
This was taken by an SDE III guy, who again asked me about my projects for like 10 mins and then moved on to a low level design question.He asked me to design the HackerRank platform.Again I started with listing down the usecases which I would cover, the interviewer asked me to write all the APIs which I would need to expose.I made various classes like Question(subclassed into MCQ and CodingQuestion), Answer, Candidate, Test, QuestionBank, etc.Surprisingly(since this was an LLD round) he asked me the schema of the tables and which SQL/NoSQL would I choose and why. Then he asked me the case when the question gets changed, I couldn’t answer that, later he mentioned that he was expecting something like and EditHistory inside each Test Entity.
Again I started with listing down the usecases which I would cover, the interviewer asked me to write all the APIs which I would need to expose.
I made various classes like Question(subclassed into MCQ and CodingQuestion), Answer, Candidate, Test, QuestionBank, etc.
Surprisingly(since this was an LLD round) he asked me the schema of the tables and which SQL/NoSQL would I choose and why. Then he asked me the case when the question gets changed, I couldn’t answer that, later he mentioned that he was expecting something like and EditHistory inside each Test Entity.
Round 4(Bar Raiser):
This was again taken by an engineering manager who discussed my projects in depth for around 20-25 mins. In the remaining time he asked me 2 dsa questions. (Yes I too was surprised that he didn’t ask anything regarding design).
Given a list of strings, group the anagrams together. (https://practice.geeksforgeeks.org/problems/print-anagrams-together/1)Given two linked list L1 and L2 where head of the linked list points to the most significant digit, return a the linked list creating after subtracting these two lists. (https://practice.geeksforgeeks.org/problems/subtraction-in-linked-list/1)
Given a list of strings, group the anagrams together. (https://practice.geeksforgeeks.org/problems/print-anagrams-together/1)
Given two linked list L1 and L2 where head of the linked list points to the most significant digit, return a the linked list creating after subtracting these two lists. (https://practice.geeksforgeeks.org/problems/subtraction-in-linked-list/1)
NOTE:
In almost all the rounds, I was asked questions related to Amazon Leadership principles, so do make sure you go through those before sitting for the interview process. You can refer to this link(https://kraftshala.com/what-questions-to-expect-in-amazon-interview/) for practicing the same, I found it useful. In the design rounds, interviewer doesn’t expect the most ideal answer from you and unless your choice of technology is outrageously wrong, he won’t pinpoint that.
In almost all the rounds, I was asked questions related to Amazon Leadership principles, so do make sure you go through those before sitting for the interview process. You can refer to this link(https://kraftshala.com/what-questions-to-expect-in-amazon-interview/) for practicing the same, I found it useful.
In the design rounds, interviewer doesn’t expect the most ideal answer from you and unless your choice of technology is outrageously wrong, he won’t pinpoint that.
Amazon
Marketing
Interview Experiences
Amazon
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here.
Comments
Old Comments
Amazon Interview Experience for SDE-1 (On-Campus)
Amazon Interview Experience for SDE-1
Difference between ANN, CNN and RNN
Zoho Interview | Set 3 (Off-Campus)
Amazon Interview Experience for SDE1 (8 Months Experienced) 2022
Amazon Interview Experience for SDE-1
Amazon Interview Experience (Off-Campus) 2022
Directi Interview | Set 7 (Programming Questions)
Amazon Interview Experience for SDE-1(Off-Campus)
Microsoft Interview Experience for Internship (Via Engage) | [
{
"code": null,
"e": 24786,
"s": 24758,
"text": "\n22 Aug, 2021"
},
{
"code": null,
"e": 25277,
"s": 24786,
"text": "I got a call from an Amazon recruiter as I was referred by one of my friends for the role. She sent me the coding test link which I had to complete within a week. Once I completed the test, I got a call to schedule the interviews in two weeks. 3 interviews were held on Amazon Chime as per schedule. Post that after 4-5 days, the hr informed me that they would like to proceed with the final bar raiser round, which happened in a couple of days. Following are the questions that were asked:"
},
{
"code": null,
"e": 25314,
"s": 25277,
"text": "Online Round (Coding Test – 90 mins)"
},
{
"code": null,
"e": 26061,
"s": 25314,
"text": "We are given the costs of a list of pants, shirts, shoes, skirts. We have a certain amount of cash with us, we need to determine the total number of possible combinations which we can buy given that we must buy one and only one of each type.Eg: pants=[3, 5, 7], shirts = [4, 7, 8], \nskirts = [5, 8], shoes = [3], budget = 25So in the above e.g., apart from the combination [7, 8, 8, 3], all others are possible.Hint: Since we have to buy all, we can combine the first two lists and the last two lists, so we would have cost lists like pants_shirts = [...] and skirts_shoes = [...], now we can just iterate over one list and binary search the remaining amount over the other list and add accordingly.It was quite trivial so don’t remember exactly."
},
{
"code": null,
"e": 26761,
"s": 26061,
"text": "We are given the costs of a list of pants, shirts, shoes, skirts. We have a certain amount of cash with us, we need to determine the total number of possible combinations which we can buy given that we must buy one and only one of each type.Eg: pants=[3, 5, 7], shirts = [4, 7, 8], \nskirts = [5, 8], shoes = [3], budget = 25So in the above e.g., apart from the combination [7, 8, 8, 3], all others are possible.Hint: Since we have to buy all, we can combine the first two lists and the last two lists, so we would have cost lists like pants_shirts = [...] and skirts_shoes = [...], now we can just iterate over one list and binary search the remaining amount over the other list and add accordingly."
},
{
"code": null,
"e": 26845,
"s": 26761,
"text": "Eg: pants=[3, 5, 7], shirts = [4, 7, 8], \nskirts = [5, 8], shoes = [3], budget = 25"
},
{
"code": null,
"e": 26933,
"s": 26845,
"text": "So in the above e.g., apart from the combination [7, 8, 8, 3], all others are possible."
},
{
"code": null,
"e": 27083,
"s": 26933,
"text": "Hint: Since we have to buy all, we can combine the first two lists and the last two lists, so we would have cost lists like pants_shirts = [...] and "
},
{
"code": null,
"e": 27223,
"s": 27083,
"text": "skirts_shoes = [...], now we can just iterate over one list and binary search the remaining amount over the other list and add accordingly."
},
{
"code": null,
"e": 27271,
"s": 27223,
"text": "It was quite trivial so don’t remember exactly."
},
{
"code": null,
"e": 27280,
"s": 27271,
"text": "Round 1:"
},
{
"code": null,
"e": 27421,
"s": 27280,
"text": "The interviewer gave his introduction, asked me a bit on the kind of projects I’ve worked on. Then he started with a data structure problem."
},
{
"code": null,
"e": 29545,
"s": 27421,
"text": "Given a binary tree with the following TreeNode, create a copy of the tree without using any extra space. TreeNode{\n left*, right*, random*, val\n }My solution: I first told a hashmap solution where I would maintain a mapping from the original node to the copy node in the new tree, and in the second traversal, I would be able to assign the random pointers as well. The interviewer agreed that this would work, but he wanted me to do this without the hashmap. It took me around 15-20 more mins to come up with the final code. I first appended the duplicate node to the left child of the original node something like: A A\n B C -> A' C \n B C'\n B'This way on iterating over the original nodes, we can assign the left and right pointers, and we need one more traversal to assign the random pointer.We are given N solar systems, each solar system with M planets. We can move to any other planet of the same solar system in 1 light year.We can move from Mth planet of Kth solar system to 1st planet of (K + 1)th solar system in 1 light year. Apart from this we are also given a list of wormholes, where each wormhole specifies the entry planet and exit planet. Passing through a wormhole would also take 1 light year.Now given the X-starting planet, and Y-destination planet, we need to find the minimum number of light-years that we would take to travel.My Solution:I told him that I would create a graph(which was quite a ridiculous suggestion tbh :p) and then do bfs. He asked the time complexity of creating a graph and then applying my approach. Complexity was pretty bad, so I moved to a new solution. I suggested starting from X, add all the neigbouring planets at a distance 1 and if any wormholes are present from the planet and do bfs on the fly without creating graph. Since the time was less(as I had spent around 35-40 mins in the first question), I just coded a level-wise bfs and the interviewer seemed convinced."
},
{
"code": null,
"e": 30541,
"s": 29545,
"text": "Given a binary tree with the following TreeNode, create a copy of the tree without using any extra space. TreeNode{\n left*, right*, random*, val\n }My solution: I first told a hashmap solution where I would maintain a mapping from the original node to the copy node in the new tree, and in the second traversal, I would be able to assign the random pointers as well. The interviewer agreed that this would work, but he wanted me to do this without the hashmap. It took me around 15-20 more mins to come up with the final code. I first appended the duplicate node to the left child of the original node something like: A A\n B C -> A' C \n B C'\n B'This way on iterating over the original nodes, we can assign the left and right pointers, and we need one more traversal to assign the random pointer."
},
{
"code": null,
"e": 30609,
"s": 30541,
"text": " TreeNode{\n left*, right*, random*, val\n }"
},
{
"code": null,
"e": 31079,
"s": 30609,
"text": "My solution: I first told a hashmap solution where I would maintain a mapping from the original node to the copy node in the new tree, and in the second traversal, I would be able to assign the random pointers as well. The interviewer agreed that this would work, but he wanted me to do this without the hashmap. It took me around 15-20 more mins to come up with the final code. I first appended the duplicate node to the left child of the original node something like:"
},
{
"code": null,
"e": 31284,
"s": 31079,
"text": " A A\n B C -> A' C \n B C'\n B'"
},
{
"code": null,
"e": 31435,
"s": 31284,
"text": "This way on iterating over the original nodes, we can assign the left and right pointers, and we need one more traversal to assign the random pointer."
},
{
"code": null,
"e": 32564,
"s": 31435,
"text": "We are given N solar systems, each solar system with M planets. We can move to any other planet of the same solar system in 1 light year.We can move from Mth planet of Kth solar system to 1st planet of (K + 1)th solar system in 1 light year. Apart from this we are also given a list of wormholes, where each wormhole specifies the entry planet and exit planet. Passing through a wormhole would also take 1 light year.Now given the X-starting planet, and Y-destination planet, we need to find the minimum number of light-years that we would take to travel.My Solution:I told him that I would create a graph(which was quite a ridiculous suggestion tbh :p) and then do bfs. He asked the time complexity of creating a graph and then applying my approach. Complexity was pretty bad, so I moved to a new solution. I suggested starting from X, add all the neigbouring planets at a distance 1 and if any wormholes are present from the planet and do bfs on the fly without creating graph. Since the time was less(as I had spent around 35-40 mins in the first question), I just coded a level-wise bfs and the interviewer seemed convinced."
},
{
"code": null,
"e": 32703,
"s": 32564,
"text": "Now given the X-starting planet, and Y-destination planet, we need to find the minimum number of light-years that we would take to travel."
},
{
"code": null,
"e": 33277,
"s": 32703,
"text": "My Solution:I told him that I would create a graph(which was quite a ridiculous suggestion tbh :p) and then do bfs. He asked the time complexity of creating a graph and then applying my approach. Complexity was pretty bad, so I moved to a new solution. I suggested starting from X, add all the neigbouring planets at a distance 1 and if any wormholes are present from the planet and do bfs on the fly without creating graph. Since the time was less(as I had spent around 35-40 mins in the first question), I just coded a level-wise bfs and the interviewer seemed convinced."
},
{
"code": null,
"e": 33286,
"s": 33277,
"text": "Round 2:"
},
{
"code": null,
"e": 34081,
"s": 33286,
"text": "This was taken by an engineering manager who asked me regarding my projects for around 10-15 mins and then we moved to a system design problem. He asked me to design Slack messenger.I started by listing the functional and non-functional requirements(on which he questioned me a bit), then I moved to draw the high-level architecture. The components which I drew were the clients, gateway service(LB + authentication, etc), Messaging Service, User Service, Web Socket Manager service, Fan Out service(I added this for the group messages thing, but he didn’t interrogate much on that).He asked me what would be the schema of my messages table and the scenarios in which the recipient user is online/offline.Also asked about the partitioning key and primary key of the 2-3 tables which I had made."
},
{
"code": null,
"e": 34876,
"s": 34081,
"text": "This was taken by an engineering manager who asked me regarding my projects for around 10-15 mins and then we moved to a system design problem. He asked me to design Slack messenger.I started by listing the functional and non-functional requirements(on which he questioned me a bit), then I moved to draw the high-level architecture. The components which I drew were the clients, gateway service(LB + authentication, etc), Messaging Service, User Service, Web Socket Manager service, Fan Out service(I added this for the group messages thing, but he didn’t interrogate much on that).He asked me what would be the schema of my messages table and the scenarios in which the recipient user is online/offline.Also asked about the partitioning key and primary key of the 2-3 tables which I had made."
},
{
"code": null,
"e": 35278,
"s": 34876,
"text": "I started by listing the functional and non-functional requirements(on which he questioned me a bit), then I moved to draw the high-level architecture. The components which I drew were the clients, gateway service(LB + authentication, etc), Messaging Service, User Service, Web Socket Manager service, Fan Out service(I added this for the group messages thing, but he didn’t interrogate much on that)."
},
{
"code": null,
"e": 35490,
"s": 35278,
"text": "He asked me what would be the schema of my messages table and the scenarios in which the recipient user is online/offline.Also asked about the partitioning key and primary key of the 2-3 tables which I had made."
},
{
"code": null,
"e": 35499,
"s": 35490,
"text": "Round 3:"
},
{
"code": null,
"e": 36249,
"s": 35499,
"text": "This was taken by an SDE III guy, who again asked me about my projects for like 10 mins and then moved on to a low level design question.He asked me to design the HackerRank platform.Again I started with listing down the usecases which I would cover, the interviewer asked me to write all the APIs which I would need to expose.I made various classes like Question(subclassed into MCQ and CodingQuestion), Answer, Candidate, Test, QuestionBank, etc.Surprisingly(since this was an LLD round) he asked me the schema of the tables and which SQL/NoSQL would I choose and why. Then he asked me the case when the question gets changed, I couldn’t answer that, later he mentioned that he was expecting something like and EditHistory inside each Test Entity."
},
{
"code": null,
"e": 36999,
"s": 36249,
"text": "This was taken by an SDE III guy, who again asked me about my projects for like 10 mins and then moved on to a low level design question.He asked me to design the HackerRank platform.Again I started with listing down the usecases which I would cover, the interviewer asked me to write all the APIs which I would need to expose.I made various classes like Question(subclassed into MCQ and CodingQuestion), Answer, Candidate, Test, QuestionBank, etc.Surprisingly(since this was an LLD round) he asked me the schema of the tables and which SQL/NoSQL would I choose and why. Then he asked me the case when the question gets changed, I couldn’t answer that, later he mentioned that he was expecting something like and EditHistory inside each Test Entity."
},
{
"code": null,
"e": 37144,
"s": 36999,
"text": "Again I started with listing down the usecases which I would cover, the interviewer asked me to write all the APIs which I would need to expose."
},
{
"code": null,
"e": 37266,
"s": 37144,
"text": "I made various classes like Question(subclassed into MCQ and CodingQuestion), Answer, Candidate, Test, QuestionBank, etc."
},
{
"code": null,
"e": 37568,
"s": 37266,
"text": "Surprisingly(since this was an LLD round) he asked me the schema of the tables and which SQL/NoSQL would I choose and why. Then he asked me the case when the question gets changed, I couldn’t answer that, later he mentioned that he was expecting something like and EditHistory inside each Test Entity."
},
{
"code": null,
"e": 37589,
"s": 37568,
"text": "Round 4(Bar Raiser):"
},
{
"code": null,
"e": 37817,
"s": 37589,
"text": "This was again taken by an engineering manager who discussed my projects in depth for around 20-25 mins. In the remaining time he asked me 2 dsa questions. (Yes I too was surprised that he didn’t ask anything regarding design)."
},
{
"code": null,
"e": 38186,
"s": 37817,
"text": "Given a list of strings, group the anagrams together. (https://practice.geeksforgeeks.org/problems/print-anagrams-together/1)Given two linked list L1 and L2 where head of the linked list points to the most significant digit, return a the linked list creating after subtracting these two lists. (https://practice.geeksforgeeks.org/problems/subtraction-in-linked-list/1)"
},
{
"code": null,
"e": 38312,
"s": 38186,
"text": "Given a list of strings, group the anagrams together. (https://practice.geeksforgeeks.org/problems/print-anagrams-together/1)"
},
{
"code": null,
"e": 38556,
"s": 38312,
"text": "Given two linked list L1 and L2 where head of the linked list points to the most significant digit, return a the linked list creating after subtracting these two lists. (https://practice.geeksforgeeks.org/problems/subtraction-in-linked-list/1)"
},
{
"code": null,
"e": 38562,
"s": 38556,
"text": "NOTE:"
},
{
"code": null,
"e": 39035,
"s": 38562,
"text": "In almost all the rounds, I was asked questions related to Amazon Leadership principles, so do make sure you go through those before sitting for the interview process. You can refer to this link(https://kraftshala.com/what-questions-to-expect-in-amazon-interview/) for practicing the same, I found it useful. In the design rounds, interviewer doesn’t expect the most ideal answer from you and unless your choice of technology is outrageously wrong, he won’t pinpoint that."
},
{
"code": null,
"e": 39345,
"s": 39035,
"text": "In almost all the rounds, I was asked questions related to Amazon Leadership principles, so do make sure you go through those before sitting for the interview process. You can refer to this link(https://kraftshala.com/what-questions-to-expect-in-amazon-interview/) for practicing the same, I found it useful. "
},
{
"code": null,
"e": 39509,
"s": 39345,
"text": "In the design rounds, interviewer doesn’t expect the most ideal answer from you and unless your choice of technology is outrageously wrong, he won’t pinpoint that."
},
{
"code": null,
"e": 39516,
"s": 39509,
"text": "Amazon"
},
{
"code": null,
"e": 39526,
"s": 39516,
"text": "Marketing"
},
{
"code": null,
"e": 39548,
"s": 39526,
"text": "Interview Experiences"
},
{
"code": null,
"e": 39555,
"s": 39548,
"text": "Amazon"
},
{
"code": null,
"e": 39653,
"s": 39555,
"text": "Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here."
},
{
"code": null,
"e": 39662,
"s": 39653,
"text": "Comments"
},
{
"code": null,
"e": 39675,
"s": 39662,
"text": "Old Comments"
},
{
"code": null,
"e": 39725,
"s": 39675,
"text": "Amazon Interview Experience for SDE-1 (On-Campus)"
},
{
"code": null,
"e": 39763,
"s": 39725,
"text": "Amazon Interview Experience for SDE-1"
},
{
"code": null,
"e": 39799,
"s": 39763,
"text": "Difference between ANN, CNN and RNN"
},
{
"code": null,
"e": 39835,
"s": 39799,
"text": "Zoho Interview | Set 3 (Off-Campus)"
},
{
"code": null,
"e": 39900,
"s": 39835,
"text": "Amazon Interview Experience for SDE1 (8 Months Experienced) 2022"
},
{
"code": null,
"e": 39938,
"s": 39900,
"text": "Amazon Interview Experience for SDE-1"
},
{
"code": null,
"e": 39984,
"s": 39938,
"text": "Amazon Interview Experience (Off-Campus) 2022"
},
{
"code": null,
"e": 40034,
"s": 39984,
"text": "Directi Interview | Set 7 (Programming Questions)"
},
{
"code": null,
"e": 40084,
"s": 40034,
"text": "Amazon Interview Experience for SDE-1(Off-Campus)"
}
] |
Elasticsearch - Analysis | When a query is processed during a search operation, the content in any index is analyzed by the analysis module. This module consists of analyzer, tokenizer, tokenfilters and charfilters. If no analyzer is defined, then by default the built in analyzers, token, filters and tokenizers get registered with analysis module.
In the following example, we use a standard analyzer which is used when no other analyzer is specified. It will analyze the sentence based on the grammar and produce words used in the sentence.
POST _analyze
{
"analyzer": "standard",
"text": "Today's weather is beautiful"
}
On running the above code, we get the response as shown below −
{
"tokens" : [
{
"token" : "today's",
"start_offset" : 0,
"end_offset" : 7,
"type" : "",
"position" : 0
},
{
"token" : "weather",
"start_offset" : 8,
"end_offset" : 15,
"type" : "",
"position" : 1
},
{
"token" : "is",
"start_offset" : 16,
"end_offset" : 18,
"type" : "",
"position" : 2
},
{
"token" : "beautiful",
"start_offset" : 19,
"end_offset" : 28,
"type" : "",
"position" : 3
}
]
}
We can configure the standard analyser with various parameters to get our custom requirements.
In the following example, we configure the standard analyzer to have a max_token_length of 5.
For this, we first create an index with the analyser having max_length_token parameter.
PUT index_4_analysis
{
"settings": {
"analysis": {
"analyzer": {
"my_english_analyzer": {
"type": "standard",
"max_token_length": 5,
"stopwords": "_english_"
}
}
}
}
}
Next we apply the analyser with a text as shown below. Please note how the token is does not appear as it has two spaces in the beginning and two spaces at the end. For the word “is”, there is a space at the beginning of it and a space at the end of it. Taking all of them, it becomes 4 letters with spaces and that does not make it a word. There should be a nonspace character at least at the beginning or at the end, to make it a word to be counted.
POST index_4_analysis/_analyze
{
"analyzer": "my_english_analyzer",
"text": "Today's weather is beautiful"
}
On running the above code, we get the response as shown below −
{
"tokens" : [
{
"token" : "today",
"start_offset" : 0,
"end_offset" : 5,
"type" : "",
"position" : 0
},
{
"token" : "s",
"start_offset" : 6,
"end_offset" : 7,
"type" : "",
"position" : 1
},
{
"token" : "weath",
"start_offset" : 8,
"end_offset" : 13,
"type" : "",
"position" : 2
},
{
"token" : "er",
"start_offset" : 13,
"end_offset" : 15,
"type" : "",
"position" : 3
},
{
"token" : "beaut",
"start_offset" : 19,
"end_offset" : 24,
"type" : "",
"position" : 5
},
{
"token" : "iful",
"start_offset" : 24,
"end_offset" : 28,
"type" : "",
"position" : 6
}
]
}
The list of various analyzers and their description are given in the table shown below −
Standard analyzer (standard)
stopwords and max_token_length setting can be set for this analyzer. By default, stopwords list is empty and max_token_length is 255.
Simple analyzer (simple)
This analyzer is composed of lowercase tokenizer.
Whitespace analyzer (whitespace)
This analyzer is composed of whitespace tokenizer.
Stop analyzer (stop)
stopwords and stopwords_path can be configured. By default stopwords initialized to English stop words and stopwords_path contains path to a text file with stop words.
Tokenizers are used for generating tokens from a text in Elasticsearch. Text can be broken down into tokens by taking whitespace or other punctuations into account. Elasticsearch has plenty of built-in tokenizers, which can be used in custom analyzer.
An example of tokenizer that breaks text into terms whenever it encounters a character which is not a letter, but it also lowercases all terms, is shown below −
POST _analyze
{
"tokenizer": "lowercase",
"text": "It Was a Beautiful Weather 5 Days ago."
}
On running the above code, we get the response as shown below −
{
"tokens" : [
{
"token" : "it",
"start_offset" : 0,
"end_offset" : 2,
"type" : "word",
"position" : 0
},
{
"token" : "was",
"start_offset" : 3,
"end_offset" : 6,
"type" : "word",
"position" : 1
},
{
"token" : "a",
"start_offset" : 7,
"end_offset" : 8,
"type" : "word",
"position" : 2
},
{
"token" : "beautiful",
"start_offset" : 9,
"end_offset" : 18,
"type" : "word",
"position" : 3
},
{
"token" : "weather",
"start_offset" : 19,
"end_offset" : 26,
"type" : "word",
"position" : 4
},
{
"token" : "days",
"start_offset" : 29,
"end_offset" : 33,
"type" : "word",
"position" : 5
},
{
"token" : "ago",
"start_offset" : 34,
"end_offset" : 37,
"type" : "word",
"position" : 6
}
]
}
A list of Tokenizers and their descriptions are shown here in the table given below −
Standard tokenizer (standard)
This is built on grammar based tokenizer and max_token_length can be
configured for this tokenizer.
Edge NGram tokenizer (edgeNGram)
Settings like min_gram, max_gram, token_chars can be set for this tokenizer.
Keyword tokenizer (keyword)
This generates entire input as an output and buffer_size can be set for this.
Letter tokenizer (letter)
This captures the whole word until a non-letter is encountered.
14 Lectures
5 hours
Manuj Aggarwal
20 Lectures
1 hours
Faizan Tayyab
Print
Add Notes
Bookmark this page | [
{
"code": null,
"e": 2904,
"s": 2581,
"text": "When a query is processed during a search operation, the content in any index is analyzed by the analysis module. This module consists of analyzer, tokenizer, tokenfilters and charfilters. If no analyzer is defined, then by default the built in analyzers, token, filters and tokenizers get registered with analysis module."
},
{
"code": null,
"e": 3098,
"s": 2904,
"text": "In the following example, we use a standard analyzer which is used when no other analyzer is specified. It will analyze the sentence based on the grammar and produce words used in the sentence."
},
{
"code": null,
"e": 3185,
"s": 3098,
"text": "POST _analyze\n{\n \"analyzer\": \"standard\",\n \"text\": \"Today's weather is beautiful\"\n}"
},
{
"code": null,
"e": 3249,
"s": 3185,
"text": "On running the above code, we get the response as shown below −"
},
{
"code": null,
"e": 3872,
"s": 3249,
"text": "{\n \"tokens\" : [\n {\n \"token\" : \"today's\",\n \"start_offset\" : 0,\n \"end_offset\" : 7,\n \"type\" : \"\",\n \"position\" : 0\n },\n {\n \"token\" : \"weather\",\n \"start_offset\" : 8,\n \"end_offset\" : 15,\n \"type\" : \"\",\n \"position\" : 1\n },\n {\n \"token\" : \"is\",\n \"start_offset\" : 16,\n \"end_offset\" : 18,\n \"type\" : \"\",\n \"position\" : 2\n },\n {\n \"token\" : \"beautiful\",\n \"start_offset\" : 19,\n \"end_offset\" : 28,\n \"type\" : \"\",\n \"position\" : 3\n }\n ]\n}\n"
},
{
"code": null,
"e": 3967,
"s": 3872,
"text": "We can configure the standard analyser with various parameters to get our custom requirements."
},
{
"code": null,
"e": 4061,
"s": 3967,
"text": "In the following example, we configure the standard analyzer to have a max_token_length of 5."
},
{
"code": null,
"e": 4149,
"s": 4061,
"text": "For this, we first create an index with the analyser having max_length_token parameter."
},
{
"code": null,
"e": 4422,
"s": 4149,
"text": "PUT index_4_analysis\n{\n \"settings\": {\n \"analysis\": {\n \"analyzer\": {\n \"my_english_analyzer\": {\n \"type\": \"standard\",\n \"max_token_length\": 5,\n \"stopwords\": \"_english_\"\n }\n }\n }\n }\n}"
},
{
"code": null,
"e": 4874,
"s": 4422,
"text": "Next we apply the analyser with a text as shown below. Please note how the token is does not appear as it has two spaces in the beginning and two spaces at the end. For the word “is”, there is a space at the beginning of it and a space at the end of it. Taking all of them, it becomes 4 letters with spaces and that does not make it a word. There should be a nonspace character at least at the beginning or at the end, to make it a word to be counted."
},
{
"code": null,
"e": 4989,
"s": 4874,
"text": "POST index_4_analysis/_analyze\n{\n \"analyzer\": \"my_english_analyzer\",\n \"text\": \"Today's weather is beautiful\"\n}"
},
{
"code": null,
"e": 5053,
"s": 4989,
"text": "On running the above code, we get the response as shown below −"
},
{
"code": null,
"e": 5959,
"s": 5053,
"text": "{\n \"tokens\" : [\n {\n \"token\" : \"today\",\n \"start_offset\" : 0,\n \"end_offset\" : 5,\n \"type\" : \"\",\n \"position\" : 0\n },\n {\n \"token\" : \"s\",\n \"start_offset\" : 6,\n \"end_offset\" : 7,\n \"type\" : \"\",\n \"position\" : 1\n },\n {\n \"token\" : \"weath\",\n \"start_offset\" : 8,\n \"end_offset\" : 13,\n \"type\" : \"\",\n \"position\" : 2\n },\n {\n \"token\" : \"er\",\n \"start_offset\" : 13,\n \"end_offset\" : 15,\n \"type\" : \"\",\n \"position\" : 3\n },\n {\n \"token\" : \"beaut\",\n \"start_offset\" : 19,\n \"end_offset\" : 24,\n \"type\" : \"\",\n \"position\" : 5\n },\n {\n \"token\" : \"iful\",\n \"start_offset\" : 24,\n \"end_offset\" : 28,\n \"type\" : \"\",\n \"position\" : 6\n }\n ]\n}\n"
},
{
"code": null,
"e": 6048,
"s": 5959,
"text": "The list of various analyzers and their description are given in the table shown below −"
},
{
"code": null,
"e": 6077,
"s": 6048,
"text": "Standard analyzer (standard)"
},
{
"code": null,
"e": 6211,
"s": 6077,
"text": "stopwords and max_token_length setting can be set for this analyzer. By default, stopwords list is empty and max_token_length is 255."
},
{
"code": null,
"e": 6236,
"s": 6211,
"text": "Simple analyzer (simple)"
},
{
"code": null,
"e": 6286,
"s": 6236,
"text": "This analyzer is composed of lowercase tokenizer."
},
{
"code": null,
"e": 6319,
"s": 6286,
"text": "Whitespace analyzer (whitespace)"
},
{
"code": null,
"e": 6370,
"s": 6319,
"text": "This analyzer is composed of whitespace tokenizer."
},
{
"code": null,
"e": 6391,
"s": 6370,
"text": "Stop analyzer (stop)"
},
{
"code": null,
"e": 6559,
"s": 6391,
"text": "stopwords and stopwords_path can be configured. By default stopwords initialized to English stop words and stopwords_path contains path to a text file with stop words."
},
{
"code": null,
"e": 6811,
"s": 6559,
"text": "Tokenizers are used for generating tokens from a text in Elasticsearch. Text can be broken down into tokens by taking whitespace or other punctuations into account. Elasticsearch has plenty of built-in tokenizers, which can be used in custom analyzer."
},
{
"code": null,
"e": 6972,
"s": 6811,
"text": "An example of tokenizer that breaks text into terms whenever it encounters a character which is not a letter, but it also lowercases all terms, is shown below −"
},
{
"code": null,
"e": 7071,
"s": 6972,
"text": "POST _analyze\n{\n \"tokenizer\": \"lowercase\",\n \"text\": \"It Was a Beautiful Weather 5 Days ago.\"\n}"
},
{
"code": null,
"e": 7135,
"s": 7071,
"text": "On running the above code, we get the response as shown below −"
},
{
"code": null,
"e": 8218,
"s": 7135,
"text": "{\n \"tokens\" : [\n {\n \"token\" : \"it\",\n \"start_offset\" : 0,\n \"end_offset\" : 2,\n \"type\" : \"word\",\n \"position\" : 0\n },\n {\n \"token\" : \"was\",\n \"start_offset\" : 3,\n \"end_offset\" : 6,\n \"type\" : \"word\",\n \"position\" : 1\n },\n {\n \"token\" : \"a\",\n \"start_offset\" : 7,\n \"end_offset\" : 8,\n \"type\" : \"word\",\n \"position\" : 2\n },\n {\n \"token\" : \"beautiful\",\n \"start_offset\" : 9,\n \"end_offset\" : 18,\n \"type\" : \"word\",\n \"position\" : 3\n },\n {\n \"token\" : \"weather\",\n \"start_offset\" : 19,\n \"end_offset\" : 26,\n \"type\" : \"word\",\n \"position\" : 4\n },\n {\n \"token\" : \"days\",\n \"start_offset\" : 29,\n \"end_offset\" : 33,\n \"type\" : \"word\",\n \"position\" : 5\n },\n {\n \"token\" : \"ago\",\n \"start_offset\" : 34,\n \"end_offset\" : 37,\n \"type\" : \"word\",\n \"position\" : 6\n }\n ]\n}\n"
},
{
"code": null,
"e": 8304,
"s": 8218,
"text": "A list of Tokenizers and their descriptions are shown here in the table given below −"
},
{
"code": null,
"e": 8334,
"s": 8304,
"text": "Standard tokenizer (standard)"
},
{
"code": null,
"e": 8434,
"s": 8334,
"text": "This is built on grammar based tokenizer and max_token_length can be\nconfigured for this tokenizer."
},
{
"code": null,
"e": 8467,
"s": 8434,
"text": "Edge NGram tokenizer (edgeNGram)"
},
{
"code": null,
"e": 8544,
"s": 8467,
"text": "Settings like min_gram, max_gram, token_chars can be set for this tokenizer."
},
{
"code": null,
"e": 8572,
"s": 8544,
"text": "Keyword tokenizer (keyword)"
},
{
"code": null,
"e": 8650,
"s": 8572,
"text": "This generates entire input as an output and buffer_size can be set for this."
},
{
"code": null,
"e": 8676,
"s": 8650,
"text": "Letter tokenizer (letter)"
},
{
"code": null,
"e": 8740,
"s": 8676,
"text": "This captures the whole word until a non-letter is encountered."
},
{
"code": null,
"e": 8773,
"s": 8740,
"text": "\n 14 Lectures \n 5 hours \n"
},
{
"code": null,
"e": 8789,
"s": 8773,
"text": " Manuj Aggarwal"
},
{
"code": null,
"e": 8822,
"s": 8789,
"text": "\n 20 Lectures \n 1 hours \n"
},
{
"code": null,
"e": 8837,
"s": 8822,
"text": " Faizan Tayyab"
},
{
"code": null,
"e": 8844,
"s": 8837,
"text": " Print"
},
{
"code": null,
"e": 8855,
"s": 8844,
"text": " Add Notes"
}
] |
Introduction to Sequelize in NodeJS | Sequelize follows a promise-based Node.js ORM structure for different databases like – Postgres, MySQL, MariaDB, SQLite and Microsoft SQL Server. Sequelize have multiple features which makes it easy to implement and use.
Some of the main features of sequelize are as follows −
Sequelize is a third-party package.
Sequelize is a third-party package.
It uses an Object-Relational Mapping to map objects. That's why its called an ORM.
It uses an Object-Relational Mapping to map objects. That's why its called an ORM.
Sequelize supports solid transaction support along with eager and lazy loading concept.
Sequelize supports solid transaction support along with eager and lazy loading concept.
It can also perform read replication on databases.
It can also perform read replication on databases.
Sequelize follows standardization, which means it has a single schema definition in the code. Standardization makes the schema easy to read and understand along with making changes.
Sequelize follows standardization, which means it has a single schema definition in the code. Standardization makes the schema easy to read and understand along with making changes.
In sequelize, the queries are written in plain Javascript. Therefore, you don't need to learn SQL.
In sequelize, the queries are written in plain Javascript. Therefore, you don't need to learn SQL.
Before installing sequelize, please check if the Node.js server is properly initialized on the system using the following command.
Before installing sequelize, please check if the Node.js server is properly initialized on the system using the following command.
>> npm init -y
The MySQL dependency needs to be installed for sequelize. For Sequelize, you need to install mysql2 as it does not works on myql. Therefore install the mysql dependency by using the following command −
The MySQL dependency needs to be installed for sequelize. For Sequelize, you need to install mysql2 as it does not works on myql. Therefore install the mysql dependency by using the following command −
>> npm install mysql2
After installing MySQL, we will now install Sequelize by using the following command −
After installing MySQL, we will now install Sequelize by using the following command −
>> npm install sequelize
You need to add the following statement in every project to include sequelize.
const Sequelize = require('sequelize');
// Including the Sequelize module
const Sequelize = require('sequelize')
// Creating a sequelize object for DB connection
const sequelize = new Sequelize(
'YOUR_DB_NAME',
'YOUR_DB_USER_NAME',
'YOUR_DB_PASSWORD', {
dialect: 'mysql',
// Defining the default host
host: 'localhost'
}
);
// Exporting the sequelize object.
// To use it in other files as well.
module.exports = sequelize | [
{
"code": null,
"e": 1283,
"s": 1062,
"text": "Sequelize follows a promise-based Node.js ORM structure for different databases like – Postgres, MySQL, MariaDB, SQLite and Microsoft SQL Server. Sequelize have multiple features which makes it easy to implement and use."
},
{
"code": null,
"e": 1339,
"s": 1283,
"text": "Some of the main features of sequelize are as follows −"
},
{
"code": null,
"e": 1375,
"s": 1339,
"text": "Sequelize is a third-party package."
},
{
"code": null,
"e": 1411,
"s": 1375,
"text": "Sequelize is a third-party package."
},
{
"code": null,
"e": 1494,
"s": 1411,
"text": "It uses an Object-Relational Mapping to map objects. That's why its called an ORM."
},
{
"code": null,
"e": 1577,
"s": 1494,
"text": "It uses an Object-Relational Mapping to map objects. That's why its called an ORM."
},
{
"code": null,
"e": 1665,
"s": 1577,
"text": "Sequelize supports solid transaction support along with eager and lazy loading concept."
},
{
"code": null,
"e": 1753,
"s": 1665,
"text": "Sequelize supports solid transaction support along with eager and lazy loading concept."
},
{
"code": null,
"e": 1804,
"s": 1753,
"text": "It can also perform read replication on databases."
},
{
"code": null,
"e": 1855,
"s": 1804,
"text": "It can also perform read replication on databases."
},
{
"code": null,
"e": 2037,
"s": 1855,
"text": "Sequelize follows standardization, which means it has a single schema definition in the code. Standardization makes the schema easy to read and understand along with making changes."
},
{
"code": null,
"e": 2219,
"s": 2037,
"text": "Sequelize follows standardization, which means it has a single schema definition in the code. Standardization makes the schema easy to read and understand along with making changes."
},
{
"code": null,
"e": 2318,
"s": 2219,
"text": "In sequelize, the queries are written in plain Javascript. Therefore, you don't need to learn SQL."
},
{
"code": null,
"e": 2417,
"s": 2318,
"text": "In sequelize, the queries are written in plain Javascript. Therefore, you don't need to learn SQL."
},
{
"code": null,
"e": 2548,
"s": 2417,
"text": "Before installing sequelize, please check if the Node.js server is properly initialized on the system using the following command."
},
{
"code": null,
"e": 2679,
"s": 2548,
"text": "Before installing sequelize, please check if the Node.js server is properly initialized on the system using the following command."
},
{
"code": null,
"e": 2694,
"s": 2679,
"text": ">> npm init -y"
},
{
"code": null,
"e": 2896,
"s": 2694,
"text": "The MySQL dependency needs to be installed for sequelize. For Sequelize, you need to install mysql2 as it does not works on myql. Therefore install the mysql dependency by using the following command −"
},
{
"code": null,
"e": 3098,
"s": 2896,
"text": "The MySQL dependency needs to be installed for sequelize. For Sequelize, you need to install mysql2 as it does not works on myql. Therefore install the mysql dependency by using the following command −"
},
{
"code": null,
"e": 3120,
"s": 3098,
"text": ">> npm install mysql2"
},
{
"code": null,
"e": 3207,
"s": 3120,
"text": "After installing MySQL, we will now install Sequelize by using the following command −"
},
{
"code": null,
"e": 3294,
"s": 3207,
"text": "After installing MySQL, we will now install Sequelize by using the following command −"
},
{
"code": null,
"e": 3319,
"s": 3294,
"text": ">> npm install sequelize"
},
{
"code": null,
"e": 3398,
"s": 3319,
"text": "You need to add the following statement in every project to include sequelize."
},
{
"code": null,
"e": 3438,
"s": 3398,
"text": "const Sequelize = require('sequelize');"
},
{
"code": null,
"e": 3854,
"s": 3438,
"text": "// Including the Sequelize module\nconst Sequelize = require('sequelize')\n\n// Creating a sequelize object for DB connection\nconst sequelize = new Sequelize(\n 'YOUR_DB_NAME',\n 'YOUR_DB_USER_NAME',\n 'YOUR_DB_PASSWORD', {\n\n dialect: 'mysql',\n // Defining the default host\n host: 'localhost'\n }\n);\n\n// Exporting the sequelize object.\n// To use it in other files as well.\nmodule.exports = sequelize"
}
] |
CSS | writing-mode Property - GeeksforGeeks | 09 Nov, 2021
The writing-mode CSS property is used to signify whether the lines of text are laid out horizontally or vertically and also the direction in which the block progress.
Syntax:
writing-mode: horizontal-tb|vertical-rl|vertical-lr;
Default Value : Its default value is horizontal-tb.
Property values:
horizontal-tb:This mode lets the content flow horizontally from left to right, vertically from top to bottom. The next horizontal line is positioned below the previous line.Syntax:writing-mode: horizontal-tb;
Example:<!DOCTYPE html><html> <head> <title>writing-mode Property</title> <style> p.geek { width: 300px; height: 100px; border: 1px solid black; writing-mode: horizontal-tb; color: white; background: green; } </style> </head> <body style = "text-align: center;"> <h1 style = "color:green;">GeeksforGeeks</h1> <p class="geek"> Geeks Classes is a classroom program in Noida. This is a quick course to cover algorithms questions. </p> </body></html>Output:
Syntax:
writing-mode: horizontal-tb;
Example:
<!DOCTYPE html><html> <head> <title>writing-mode Property</title> <style> p.geek { width: 300px; height: 100px; border: 1px solid black; writing-mode: horizontal-tb; color: white; background: green; } </style> </head> <body style = "text-align: center;"> <h1 style = "color:green;">GeeksforGeeks</h1> <p class="geek"> Geeks Classes is a classroom program in Noida. This is a quick course to cover algorithms questions. </p> </body></html>
Output:
vertical-rl:This mode lets the content flow vertically from top to bottom, horizontally from right to left. The next vertical line is positioned to the left of the previous line.Syntax:writing-mode: vertical-rl;
Example:<!DOCTYPE html><html> <head> <title>writing-mode Property</title> <style> p.geek { width: 200px; height: 200px; border: 1px solid black; writing-mode: vertical-rl; color: white; background: green; } </style> </head> <body style = "text-align: center;"> <h1 style = "color:green;">GeeksforGeeks</h1> <p class="geek"> Geeks Classes is a classroom program in Noida. This is a quick course to cover algorithms questions. </p> </body></html>Output:
Syntax:
writing-mode: vertical-rl;
Example:
<!DOCTYPE html><html> <head> <title>writing-mode Property</title> <style> p.geek { width: 200px; height: 200px; border: 1px solid black; writing-mode: vertical-rl; color: white; background: green; } </style> </head> <body style = "text-align: center;"> <h1 style = "color:green;">GeeksforGeeks</h1> <p class="geek"> Geeks Classes is a classroom program in Noida. This is a quick course to cover algorithms questions. </p> </body></html>
Output:
vertical-lr:This mode lets the content flow vertically from top to bottom, horizontally from left to right. The next vertical line is positioned to the right of the previous line.Syntax:writing-mode: vertical-lr;
Example:<!DOCTYPE html><html> <head> <title>writing-mode Property</title> <style> p.geek { width: 200px; height: 200px; border: 1px solid black; writing-mode: vertical-lr; color: white; background: green; } </style> </head> <body style = "text-align: center;"> <h1 style = "color:green;">GeeksforGeeks</h1> <p class="geek"> Geeks Classes is a classroom program in Noida. This is a quick course to cover algorithms questions. </p> </body></html>Output:
Syntax:
writing-mode: vertical-lr;
Example:
<!DOCTYPE html><html> <head> <title>writing-mode Property</title> <style> p.geek { width: 200px; height: 200px; border: 1px solid black; writing-mode: vertical-lr; color: white; background: green; } </style> </head> <body style = "text-align: center;"> <h1 style = "color:green;">GeeksforGeeks</h1> <p class="geek"> Geeks Classes is a classroom program in Noida. This is a quick course to cover algorithms questions. </p> </body></html>
Output:
Supported Browsers: The browsers that supports the writing-mode property are listed below:
Google Chrome 48.0
Internet Explorer 12.0
Firefox 41.0
Opera 35.0
Apple Safari 11.0
shubham_singh
ManasChhabra2
CSS-Properties
Picked
CSS
Web Technologies
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here.
Top 10 Projects For Beginners To Practice HTML and CSS Skills
How to insert spaces/tabs in text using HTML/CSS?
How to create footer to stay at the bottom of a Web page?
How to update Node.js and NPM to next version ?
CSS to put icon inside an input element in a form
Roadmap to Become a Web Developer in 2022
Installation of Node.js on Linux
How to fetch data from an API in ReactJS ?
Top 10 Projects For Beginners To Practice HTML and CSS Skills
How to insert spaces/tabs in text using HTML/CSS? | [
{
"code": null,
"e": 23666,
"s": 23638,
"text": "\n09 Nov, 2021"
},
{
"code": null,
"e": 23833,
"s": 23666,
"text": "The writing-mode CSS property is used to signify whether the lines of text are laid out horizontally or vertically and also the direction in which the block progress."
},
{
"code": null,
"e": 23841,
"s": 23833,
"text": "Syntax:"
},
{
"code": null,
"e": 23895,
"s": 23841,
"text": "writing-mode: horizontal-tb|vertical-rl|vertical-lr;\n"
},
{
"code": null,
"e": 23947,
"s": 23895,
"text": "Default Value : Its default value is horizontal-tb."
},
{
"code": null,
"e": 23964,
"s": 23947,
"text": "Property values:"
},
{
"code": null,
"e": 24820,
"s": 23964,
"text": "horizontal-tb:This mode lets the content flow horizontally from left to right, vertically from top to bottom. The next horizontal line is positioned below the previous line.Syntax:writing-mode: horizontal-tb;\nExample:<!DOCTYPE html><html> <head> <title>writing-mode Property</title> <style> p.geek { width: 300px; height: 100px; border: 1px solid black; writing-mode: horizontal-tb; color: white; background: green; } </style> </head> <body style = \"text-align: center;\"> <h1 style = \"color:green;\">GeeksforGeeks</h1> <p class=\"geek\"> Geeks Classes is a classroom program in Noida. This is a quick course to cover algorithms questions. </p> </body></html>Output:"
},
{
"code": null,
"e": 24828,
"s": 24820,
"text": "Syntax:"
},
{
"code": null,
"e": 24858,
"s": 24828,
"text": "writing-mode: horizontal-tb;\n"
},
{
"code": null,
"e": 24867,
"s": 24858,
"text": "Example:"
},
{
"code": "<!DOCTYPE html><html> <head> <title>writing-mode Property</title> <style> p.geek { width: 300px; height: 100px; border: 1px solid black; writing-mode: horizontal-tb; color: white; background: green; } </style> </head> <body style = \"text-align: center;\"> <h1 style = \"color:green;\">GeeksforGeeks</h1> <p class=\"geek\"> Geeks Classes is a classroom program in Noida. This is a quick course to cover algorithms questions. </p> </body></html>",
"e": 25499,
"s": 24867,
"text": null
},
{
"code": null,
"e": 25507,
"s": 25499,
"text": "Output:"
},
{
"code": null,
"e": 26364,
"s": 25507,
"text": "vertical-rl:This mode lets the content flow vertically from top to bottom, horizontally from right to left. The next vertical line is positioned to the left of the previous line.Syntax:writing-mode: vertical-rl;\nExample:<!DOCTYPE html><html> <head> <title>writing-mode Property</title> <style> p.geek { width: 200px; height: 200px; border: 1px solid black; writing-mode: vertical-rl; color: white; background: green; } </style> </head> <body style = \"text-align: center;\"> <h1 style = \"color:green;\">GeeksforGeeks</h1> <p class=\"geek\"> Geeks Classes is a classroom program in Noida. This is a quick course to cover algorithms questions. </p> </body></html>Output:"
},
{
"code": null,
"e": 26372,
"s": 26364,
"text": "Syntax:"
},
{
"code": null,
"e": 26400,
"s": 26372,
"text": "writing-mode: vertical-rl;\n"
},
{
"code": null,
"e": 26409,
"s": 26400,
"text": "Example:"
},
{
"code": "<!DOCTYPE html><html> <head> <title>writing-mode Property</title> <style> p.geek { width: 200px; height: 200px; border: 1px solid black; writing-mode: vertical-rl; color: white; background: green; } </style> </head> <body style = \"text-align: center;\"> <h1 style = \"color:green;\">GeeksforGeeks</h1> <p class=\"geek\"> Geeks Classes is a classroom program in Noida. This is a quick course to cover algorithms questions. </p> </body></html>",
"e": 27039,
"s": 26409,
"text": null
},
{
"code": null,
"e": 27047,
"s": 27039,
"text": "Output:"
},
{
"code": null,
"e": 27905,
"s": 27047,
"text": "vertical-lr:This mode lets the content flow vertically from top to bottom, horizontally from left to right. The next vertical line is positioned to the right of the previous line.Syntax:writing-mode: vertical-lr;\nExample:<!DOCTYPE html><html> <head> <title>writing-mode Property</title> <style> p.geek { width: 200px; height: 200px; border: 1px solid black; writing-mode: vertical-lr; color: white; background: green; } </style> </head> <body style = \"text-align: center;\"> <h1 style = \"color:green;\">GeeksforGeeks</h1> <p class=\"geek\"> Geeks Classes is a classroom program in Noida. This is a quick course to cover algorithms questions. </p> </body></html>Output:"
},
{
"code": null,
"e": 27913,
"s": 27905,
"text": "Syntax:"
},
{
"code": null,
"e": 27941,
"s": 27913,
"text": "writing-mode: vertical-lr;\n"
},
{
"code": null,
"e": 27950,
"s": 27941,
"text": "Example:"
},
{
"code": "<!DOCTYPE html><html> <head> <title>writing-mode Property</title> <style> p.geek { width: 200px; height: 200px; border: 1px solid black; writing-mode: vertical-lr; color: white; background: green; } </style> </head> <body style = \"text-align: center;\"> <h1 style = \"color:green;\">GeeksforGeeks</h1> <p class=\"geek\"> Geeks Classes is a classroom program in Noida. This is a quick course to cover algorithms questions. </p> </body></html>",
"e": 28580,
"s": 27950,
"text": null
},
{
"code": null,
"e": 28588,
"s": 28580,
"text": "Output:"
},
{
"code": null,
"e": 28679,
"s": 28588,
"text": "Supported Browsers: The browsers that supports the writing-mode property are listed below:"
},
{
"code": null,
"e": 28698,
"s": 28679,
"text": "Google Chrome 48.0"
},
{
"code": null,
"e": 28721,
"s": 28698,
"text": "Internet Explorer 12.0"
},
{
"code": null,
"e": 28734,
"s": 28721,
"text": "Firefox 41.0"
},
{
"code": null,
"e": 28745,
"s": 28734,
"text": "Opera 35.0"
},
{
"code": null,
"e": 28763,
"s": 28745,
"text": "Apple Safari 11.0"
},
{
"code": null,
"e": 28777,
"s": 28763,
"text": "shubham_singh"
},
{
"code": null,
"e": 28791,
"s": 28777,
"text": "ManasChhabra2"
},
{
"code": null,
"e": 28806,
"s": 28791,
"text": "CSS-Properties"
},
{
"code": null,
"e": 28813,
"s": 28806,
"text": "Picked"
},
{
"code": null,
"e": 28817,
"s": 28813,
"text": "CSS"
},
{
"code": null,
"e": 28834,
"s": 28817,
"text": "Web Technologies"
},
{
"code": null,
"e": 28932,
"s": 28834,
"text": "Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here."
},
{
"code": null,
"e": 28994,
"s": 28932,
"text": "Top 10 Projects For Beginners To Practice HTML and CSS Skills"
},
{
"code": null,
"e": 29044,
"s": 28994,
"text": "How to insert spaces/tabs in text using HTML/CSS?"
},
{
"code": null,
"e": 29102,
"s": 29044,
"text": "How to create footer to stay at the bottom of a Web page?"
},
{
"code": null,
"e": 29150,
"s": 29102,
"text": "How to update Node.js and NPM to next version ?"
},
{
"code": null,
"e": 29200,
"s": 29150,
"text": "CSS to put icon inside an input element in a form"
},
{
"code": null,
"e": 29242,
"s": 29200,
"text": "Roadmap to Become a Web Developer in 2022"
},
{
"code": null,
"e": 29275,
"s": 29242,
"text": "Installation of Node.js on Linux"
},
{
"code": null,
"e": 29318,
"s": 29275,
"text": "How to fetch data from an API in ReactJS ?"
},
{
"code": null,
"e": 29380,
"s": 29318,
"text": "Top 10 Projects For Beginners To Practice HTML and CSS Skills"
}
] |
Convert key-values list to flat dictionary in Python | When it is required to convert a dictionary, that contains pairs of key values into a flat list, dictionary comprehension can be used.
It iterates through the dictionary and zips them using the ‘zip’ method.
The zip method takes iterables, aggregates them into a tuple, and returns it as the result.
Below is a demonstration of the same −
Live Demo
from itertools import product
my_dict = {'month_num' : [1, 2, 3, 4, 5, 6], 'name_of_month' : ['Jan', 'Feb', 'March', 'Apr', 'May', 'June']}
print("The dictionary is : ")
print(my_dict)
my_result = dict(zip(my_dict['month_num'], my_dict['name_of_month']))
print("The flattened dictionary is: ")
print(my_result)
The dictionary is :
{'month_num': [1, 2, 3, 4, 5, 6], 'name_of_month': ['Jan', 'Feb', 'March', 'Apr', 'May', 'June']}
The flattened dictionary is:
{1: 'Jan', 2: 'Feb', 3: 'March', 4: 'Apr', 5: 'May', 6: 'June'}
The required packages are imported into the environment.
The required packages are imported into the environment.
A dictionary is defined, and is displayed on the console.
A dictionary is defined, and is displayed on the console.
The ‘zip’ method is used to bind the key and value of a dictionary, and it is again converted to a dictionary.
The ‘zip’ method is used to bind the key and value of a dictionary, and it is again converted to a dictionary.
This is assigned to a variable.
This is assigned to a variable.
It is displayed as output on the console.
It is displayed as output on the console. | [
{
"code": null,
"e": 1197,
"s": 1062,
"text": "When it is required to convert a dictionary, that contains pairs of key values into a flat list, dictionary comprehension can be used."
},
{
"code": null,
"e": 1270,
"s": 1197,
"text": "It iterates through the dictionary and zips them using the ‘zip’ method."
},
{
"code": null,
"e": 1362,
"s": 1270,
"text": "The zip method takes iterables, aggregates them into a tuple, and returns it as the result."
},
{
"code": null,
"e": 1401,
"s": 1362,
"text": "Below is a demonstration of the same −"
},
{
"code": null,
"e": 1412,
"s": 1401,
"text": " Live Demo"
},
{
"code": null,
"e": 1727,
"s": 1412,
"text": "from itertools import product\n\nmy_dict = {'month_num' : [1, 2, 3, 4, 5, 6], 'name_of_month' : ['Jan', 'Feb', 'March', 'Apr', 'May', 'June']}\n\nprint(\"The dictionary is : \")\nprint(my_dict)\n\nmy_result = dict(zip(my_dict['month_num'], my_dict['name_of_month']))\n\nprint(\"The flattened dictionary is: \")\nprint(my_result)"
},
{
"code": null,
"e": 1938,
"s": 1727,
"text": "The dictionary is :\n{'month_num': [1, 2, 3, 4, 5, 6], 'name_of_month': ['Jan', 'Feb', 'March', 'Apr', 'May', 'June']}\nThe flattened dictionary is:\n{1: 'Jan', 2: 'Feb', 3: 'March', 4: 'Apr', 5: 'May', 6: 'June'}"
},
{
"code": null,
"e": 1995,
"s": 1938,
"text": "The required packages are imported into the environment."
},
{
"code": null,
"e": 2052,
"s": 1995,
"text": "The required packages are imported into the environment."
},
{
"code": null,
"e": 2110,
"s": 2052,
"text": "A dictionary is defined, and is displayed on the console."
},
{
"code": null,
"e": 2168,
"s": 2110,
"text": "A dictionary is defined, and is displayed on the console."
},
{
"code": null,
"e": 2279,
"s": 2168,
"text": "The ‘zip’ method is used to bind the key and value of a dictionary, and it is again converted to a dictionary."
},
{
"code": null,
"e": 2390,
"s": 2279,
"text": "The ‘zip’ method is used to bind the key and value of a dictionary, and it is again converted to a dictionary."
},
{
"code": null,
"e": 2422,
"s": 2390,
"text": "This is assigned to a variable."
},
{
"code": null,
"e": 2454,
"s": 2422,
"text": "This is assigned to a variable."
},
{
"code": null,
"e": 2496,
"s": 2454,
"text": "It is displayed as output on the console."
},
{
"code": null,
"e": 2538,
"s": 2496,
"text": "It is displayed as output on the console."
}
] |
Ensemble learning: A case study from the 1994 US Census database | by Vinícius Campos | Towards Data Science | Before we start to understand what is ensemble learning and how is it works, we need to know the dataset used in this case study.
The dataset used is a subset of the 1994 US Census database, which was provided by Barry Becker and is available at UCI Machine Learning Repository. The prediction task of this repository is to determine whether a person makes over 50K a year, for this the following attributes and values are provided:
high_income: target class.
age: continuous.
workclass: Private, Self-emp-not-inc, Self-emp-inc, Federal-gov, Local-gov, State-gov, Without-pay, Never-worked.
fnlwgt: continuous.
education: Bachelors, Some-college, 11th, HS-grad, Prof-school, Assoc-acdm, Assoc-voc, 9th, 7th-8th, 12th, Masters, 1st-4th, 10th, Doctorate, 5th-6th, Preschool.
education-num: continuous.
marital-status: Married-civ-spouse, Divorced, Never-married, Separated, Widowed, Married-spouse-absent, Married-AF-spouse.
occupation: Tech-support, Craft-repair, Other-service, Sales, Exec-managerial, Prof-specialty, Handlers-cleaners, Machine-op-inspct, Adm-clerical, Farming-fishing, Transport-moving, Priv-house-serv, Protective-serv, Armed-Forces.
relationship: Wife, Own-child, Husband, Not-in-family, Other-relative, Unmarried.
race: White, Asian-Pac-Islander, Amer-Indian-Eskimo, Other, Black.
sex: Female, Male.
capital-gain: continuous.
capital-loss: continuous.
hours-per-week: continuous.
native-country: United-States, Cambodia, England, Puerto-Rico, Canada, Germany, Outlying-US(Guam-USVI-etc), India, Japan, Greece, South, China, Cuba, Iran, Honduras, Philippines, Italy, Poland, Jamaica, Vietnam, Mexico, Portugal, Ireland, France, Dominican-Republic, Laos, Ecuador, Taiwan, Haiti, Columbia, Hungary, Guatemala, Nicaragua, Scotland, Thailand, Yugoslavia, El-Salvador, Trinadad&Tobago, Peru, Hong, Holand-Netherlands.
Ensemble learning is a technique that combine other machine learning models to optimize and create a better one.
The are some different types of ensemble methods, such as: boosting, stacking, pasting, bagging and random forest. In this case study the focus is the last two methods.
Bootstrap Aggregating or Bagging is a technique which combines bootstrapping and aggregating methods. The first method consists in divide a dataset in n subsets with replacement, the idea of second method is create n models, one for each subset and aggregate them to yield in a final prediction.
Pasting method works similar to bagging, but the difference is that bootstrapping step, where the split is made without replacement.
The random forest is a special type of ensemble algorithm that uses multiple decision trees to built its model. Each tree is trained with different portion of dataset. Usually, this split is made similarly with bagging technique and the final model is built of several decision tree models which combined produce the prediction of model.
For this project we will use python as programming language, and the libraries: pandas and scikit-learn. Initially, let’s know the “face” of our data:
# load the datasetincome = pd.read_csv("income.csv")income.head()
Before we continue, we need to preprocess the data to change the categorical variables to numerical variables. After this process, this is a sample of the data:
Now, we’ll use two great scikit-learn features, called Pipeline and GridSearchCV, which allow us to test automatically several hyperparameters of different models.
# split-out train/validation and test datasetX_train, X_test, y_train, y_test = train_test_split(income.drop(labels="high_income",axis=1), income["high_income"],test_size=0.20,random_state=seed,shuffle=True,stratify=income["high_income"])
# The full pipeline as a step in another pipeline with an estimator as the final steppipe = Pipeline(steps = [("clf",RandomForestClassifier())])# create a dictionary with the hyperparameterssearch_space = [{"clf":[DecisionTreeClassifier()], "clf__criterion": ["gini","entropy"], "clf__splitter": ["best","random"], "clf__random_state": [seed], "fs__score_func":[chi2], "fs__k":[4,6,8]}, {"clf":[RandomForestClassifier()], "clf__n_estimators": [200,300], "clf__criterion": ["gini","entropy"], "clf__max_leaf_nodes": [32,64,128], "clf__random_state": [seed], "fs__score_func":[chi2], "fs__k":[4,6,8]}, {'clf' [BaggingClassifier(DecisionTreeClassifier(random_state=42))], "clf__base_estimator__criterion": ['gini','entropy'], "clf__base_estimator__splitter": ['best','random'], "clf__oob_score": [True], "clf__n_estimators": [200,300], "clf__bootstrap":[True], "fs__score_func":[chi2], "fs__k":[4,6,8]}, {'clf': [BaggingClassifier(DecisionTreeClassifier(random_state=42))], "clf__base_estimator__criterion": ['gini','entropy'], "clf__base_estimator__splitter": ['best','random'], "clf__oob_score": [False], "clf__n_estimators": [200,300], "clf__bootstrap":[False], "fs__score_func":[chi2], "fs__k":[4,6,8]}]# create grid searchkfold = KFold(n_splits=num_folds,random_state=seed)grid = GridSearchCV(estimator=pipe, param_grid=search_space, cv=kfold, scoring=scoring, return_train_score=True, n_jobs=-1, refit="AUC", verbose=10)
# fit grid searchbest_model = grid.fit(X_train,y_train)
This configuration allow the algorithm uses all the available cores, which will test 960 different configuration of decision trees, random forests and bagging classifiers(with decision trees as internal model) in a parallel way.
After this process, the best model produced by the GridSearchCV was a random forest model with the following configuration:
Before we define if this is the best model, let’s check the accuracy of the model to train and test datasets. The purpose of this comparison is verify if the model is underfitted or overfitted.
As we can see above, the accuracy for both sets are good, and more than that, the values of train and test accuracy are very close. Thus, this results show us that the best model produced by GridSearchCV was well generalized.
In a random forest classifier we can set a hyperparameter called bootstrap, which defines weather samples will be trained with replacement or not. Although the best model was selected with this parameter as false, which tries to help the model to minimize the chance of overfitting, several other models present similar results when the parameter was set as true, as we can see in the image below.
So, for this dataset, we achieved good results regardless of the value of the bootstrap variable. However, the worst results due to possible overfitting came with bootstrap equal to true.
Now, lets check the importance of each feature of the dataset for our model. For this task, we used two tools: The feature importance, from random forest classifier and the library SHAP (SHapley Additive exPlanations), which is a unified approach to explain the output of any machine learning model.
The left image was produced by feature_importances_ from scikit-learn and the right image was produced by SHAP. It is important to see that the both results presented similar results and the 4 most important features were the same, changing only the positions. With these results it is easy to know the information that most influences the model, which greatly helps understanding and solving the problem. | [
{
"code": null,
"e": 302,
"s": 172,
"text": "Before we start to understand what is ensemble learning and how is it works, we need to know the dataset used in this case study."
},
{
"code": null,
"e": 605,
"s": 302,
"text": "The dataset used is a subset of the 1994 US Census database, which was provided by Barry Becker and is available at UCI Machine Learning Repository. The prediction task of this repository is to determine whether a person makes over 50K a year, for this the following attributes and values are provided:"
},
{
"code": null,
"e": 632,
"s": 605,
"text": "high_income: target class."
},
{
"code": null,
"e": 649,
"s": 632,
"text": "age: continuous."
},
{
"code": null,
"e": 763,
"s": 649,
"text": "workclass: Private, Self-emp-not-inc, Self-emp-inc, Federal-gov, Local-gov, State-gov, Without-pay, Never-worked."
},
{
"code": null,
"e": 783,
"s": 763,
"text": "fnlwgt: continuous."
},
{
"code": null,
"e": 945,
"s": 783,
"text": "education: Bachelors, Some-college, 11th, HS-grad, Prof-school, Assoc-acdm, Assoc-voc, 9th, 7th-8th, 12th, Masters, 1st-4th, 10th, Doctorate, 5th-6th, Preschool."
},
{
"code": null,
"e": 972,
"s": 945,
"text": "education-num: continuous."
},
{
"code": null,
"e": 1095,
"s": 972,
"text": "marital-status: Married-civ-spouse, Divorced, Never-married, Separated, Widowed, Married-spouse-absent, Married-AF-spouse."
},
{
"code": null,
"e": 1325,
"s": 1095,
"text": "occupation: Tech-support, Craft-repair, Other-service, Sales, Exec-managerial, Prof-specialty, Handlers-cleaners, Machine-op-inspct, Adm-clerical, Farming-fishing, Transport-moving, Priv-house-serv, Protective-serv, Armed-Forces."
},
{
"code": null,
"e": 1407,
"s": 1325,
"text": "relationship: Wife, Own-child, Husband, Not-in-family, Other-relative, Unmarried."
},
{
"code": null,
"e": 1474,
"s": 1407,
"text": "race: White, Asian-Pac-Islander, Amer-Indian-Eskimo, Other, Black."
},
{
"code": null,
"e": 1493,
"s": 1474,
"text": "sex: Female, Male."
},
{
"code": null,
"e": 1519,
"s": 1493,
"text": "capital-gain: continuous."
},
{
"code": null,
"e": 1545,
"s": 1519,
"text": "capital-loss: continuous."
},
{
"code": null,
"e": 1573,
"s": 1545,
"text": "hours-per-week: continuous."
},
{
"code": null,
"e": 2005,
"s": 1573,
"text": "native-country: United-States, Cambodia, England, Puerto-Rico, Canada, Germany, Outlying-US(Guam-USVI-etc), India, Japan, Greece, South, China, Cuba, Iran, Honduras, Philippines, Italy, Poland, Jamaica, Vietnam, Mexico, Portugal, Ireland, France, Dominican-Republic, Laos, Ecuador, Taiwan, Haiti, Columbia, Hungary, Guatemala, Nicaragua, Scotland, Thailand, Yugoslavia, El-Salvador, Trinadad&Tobago, Peru, Hong, Holand-Netherlands."
},
{
"code": null,
"e": 2118,
"s": 2005,
"text": "Ensemble learning is a technique that combine other machine learning models to optimize and create a better one."
},
{
"code": null,
"e": 2287,
"s": 2118,
"text": "The are some different types of ensemble methods, such as: boosting, stacking, pasting, bagging and random forest. In this case study the focus is the last two methods."
},
{
"code": null,
"e": 2583,
"s": 2287,
"text": "Bootstrap Aggregating or Bagging is a technique which combines bootstrapping and aggregating methods. The first method consists in divide a dataset in n subsets with replacement, the idea of second method is create n models, one for each subset and aggregate them to yield in a final prediction."
},
{
"code": null,
"e": 2716,
"s": 2583,
"text": "Pasting method works similar to bagging, but the difference is that bootstrapping step, where the split is made without replacement."
},
{
"code": null,
"e": 3054,
"s": 2716,
"text": "The random forest is a special type of ensemble algorithm that uses multiple decision trees to built its model. Each tree is trained with different portion of dataset. Usually, this split is made similarly with bagging technique and the final model is built of several decision tree models which combined produce the prediction of model."
},
{
"code": null,
"e": 3205,
"s": 3054,
"text": "For this project we will use python as programming language, and the libraries: pandas and scikit-learn. Initially, let’s know the “face” of our data:"
},
{
"code": null,
"e": 3271,
"s": 3205,
"text": "# load the datasetincome = pd.read_csv(\"income.csv\")income.head()"
},
{
"code": null,
"e": 3432,
"s": 3271,
"text": "Before we continue, we need to preprocess the data to change the categorical variables to numerical variables. After this process, this is a sample of the data:"
},
{
"code": null,
"e": 3596,
"s": 3432,
"text": "Now, we’ll use two great scikit-learn features, called Pipeline and GridSearchCV, which allow us to test automatically several hyperparameters of different models."
},
{
"code": null,
"e": 3886,
"s": 3596,
"text": "# split-out train/validation and test datasetX_train, X_test, y_train, y_test = train_test_split(income.drop(labels=\"high_income\",axis=1), income[\"high_income\"],test_size=0.20,random_state=seed,shuffle=True,stratify=income[\"high_income\"])"
},
{
"code": null,
"e": 5870,
"s": 3886,
"text": "# The full pipeline as a step in another pipeline with an estimator as the final steppipe = Pipeline(steps = [(\"clf\",RandomForestClassifier())])# create a dictionary with the hyperparameterssearch_space = [{\"clf\":[DecisionTreeClassifier()], \"clf__criterion\": [\"gini\",\"entropy\"], \"clf__splitter\": [\"best\",\"random\"], \"clf__random_state\": [seed], \"fs__score_func\":[chi2], \"fs__k\":[4,6,8]}, {\"clf\":[RandomForestClassifier()], \"clf__n_estimators\": [200,300], \"clf__criterion\": [\"gini\",\"entropy\"], \"clf__max_leaf_nodes\": [32,64,128], \"clf__random_state\": [seed], \"fs__score_func\":[chi2], \"fs__k\":[4,6,8]}, {'clf' [BaggingClassifier(DecisionTreeClassifier(random_state=42))], \"clf__base_estimator__criterion\": ['gini','entropy'], \"clf__base_estimator__splitter\": ['best','random'], \"clf__oob_score\": [True], \"clf__n_estimators\": [200,300], \"clf__bootstrap\":[True], \"fs__score_func\":[chi2], \"fs__k\":[4,6,8]}, {'clf': [BaggingClassifier(DecisionTreeClassifier(random_state=42))], \"clf__base_estimator__criterion\": ['gini','entropy'], \"clf__base_estimator__splitter\": ['best','random'], \"clf__oob_score\": [False], \"clf__n_estimators\": [200,300], \"clf__bootstrap\":[False], \"fs__score_func\":[chi2], \"fs__k\":[4,6,8]}]# create grid searchkfold = KFold(n_splits=num_folds,random_state=seed)grid = GridSearchCV(estimator=pipe, param_grid=search_space, cv=kfold, scoring=scoring, return_train_score=True, n_jobs=-1, refit=\"AUC\", verbose=10)"
},
{
"code": null,
"e": 5926,
"s": 5870,
"text": "# fit grid searchbest_model = grid.fit(X_train,y_train)"
},
{
"code": null,
"e": 6155,
"s": 5926,
"text": "This configuration allow the algorithm uses all the available cores, which will test 960 different configuration of decision trees, random forests and bagging classifiers(with decision trees as internal model) in a parallel way."
},
{
"code": null,
"e": 6279,
"s": 6155,
"text": "After this process, the best model produced by the GridSearchCV was a random forest model with the following configuration:"
},
{
"code": null,
"e": 6473,
"s": 6279,
"text": "Before we define if this is the best model, let’s check the accuracy of the model to train and test datasets. The purpose of this comparison is verify if the model is underfitted or overfitted."
},
{
"code": null,
"e": 6699,
"s": 6473,
"text": "As we can see above, the accuracy for both sets are good, and more than that, the values of train and test accuracy are very close. Thus, this results show us that the best model produced by GridSearchCV was well generalized."
},
{
"code": null,
"e": 7097,
"s": 6699,
"text": "In a random forest classifier we can set a hyperparameter called bootstrap, which defines weather samples will be trained with replacement or not. Although the best model was selected with this parameter as false, which tries to help the model to minimize the chance of overfitting, several other models present similar results when the parameter was set as true, as we can see in the image below."
},
{
"code": null,
"e": 7285,
"s": 7097,
"text": "So, for this dataset, we achieved good results regardless of the value of the bootstrap variable. However, the worst results due to possible overfitting came with bootstrap equal to true."
},
{
"code": null,
"e": 7585,
"s": 7285,
"text": "Now, lets check the importance of each feature of the dataset for our model. For this task, we used two tools: The feature importance, from random forest classifier and the library SHAP (SHapley Additive exPlanations), which is a unified approach to explain the output of any machine learning model."
}
] |
Convert number to list of integers in Python | As part of data manipulation in Python we may sometimes need to convert a given number into a list which contains the digits from that number. In this article we'll see the approaches to achieve this.
In the below approach we apply the str function to the given number and then convert into integer through identity function. Finally we wrap the result into a list.
Live Demo
numA = 1342
# Given number
print("Given number : \n", numA)
res = [int(x) for x in str(numA)]
# Result
print("List of number: \n",res)
Running the above code gives us the following result −
Given number :
1342
List of number:
[1, 3, 4, 2]
We fast apply the str function to the given number. Then apply the in function repeatedly using map. Finally keep the result inside a list function.
Live Demo
numA = 1342
# Given number
print("Given number : \n", numA)
res = list(map(int, str(numA)))
# Result
print("List of number: \n",res)
Running the above code gives us the following result −
Given number :
1342
List of number:
[1, 3, 4, 2] | [
{
"code": null,
"e": 1263,
"s": 1062,
"text": "As part of data manipulation in Python we may sometimes need to convert a given number into a list which contains the digits from that number. In this article we'll see the approaches to achieve this."
},
{
"code": null,
"e": 1428,
"s": 1263,
"text": "In the below approach we apply the str function to the given number and then convert into integer through identity function. Finally we wrap the result into a list."
},
{
"code": null,
"e": 1439,
"s": 1428,
"text": " Live Demo"
},
{
"code": null,
"e": 1574,
"s": 1439,
"text": "numA = 1342\n# Given number\nprint(\"Given number : \\n\", numA)\nres = [int(x) for x in str(numA)]\n# Result\nprint(\"List of number: \\n\",res)"
},
{
"code": null,
"e": 1629,
"s": 1574,
"text": "Running the above code gives us the following result −"
},
{
"code": null,
"e": 1678,
"s": 1629,
"text": "Given number :\n1342\nList of number:\n[1, 3, 4, 2]"
},
{
"code": null,
"e": 1827,
"s": 1678,
"text": "We fast apply the str function to the given number. Then apply the in function repeatedly using map. Finally keep the result inside a list function."
},
{
"code": null,
"e": 1838,
"s": 1827,
"text": " Live Demo"
},
{
"code": null,
"e": 1971,
"s": 1838,
"text": "numA = 1342\n# Given number\nprint(\"Given number : \\n\", numA)\nres = list(map(int, str(numA)))\n# Result\nprint(\"List of number: \\n\",res)"
},
{
"code": null,
"e": 2026,
"s": 1971,
"text": "Running the above code gives us the following result −"
},
{
"code": null,
"e": 2075,
"s": 2026,
"text": "Given number :\n1342\nList of number:\n[1, 3, 4, 2]"
}
] |
Sentiment Analysis with Deep Learning of Netflix Reviews | by Artem Oppermann | Towards Data Science | In this article, I will cover the topic of Sentiment Analysis and how to implement a Deep Learning model that can recognize and classify human emotions in Netflix reviews.
One of the most important elements for businesses is being in touch with its customer base. It is vital for these firms to know exactly what consumers or clients think of new and established products or services, recent initiatives, and customer service offerings.
Sentiment analysis is one way to accomplish this necessary task.
Sentiment Analysis is a field of Natural Language Processing (NLP) that builds models that try to identify and classify attributes of the expression e.g.:
Polarity: if the speaker expresses a positive or negative opinion,
Subject: the thing that is being talked about,
Opinion holder: the person, or entity that expresses the opinion.
In a world where we generate 2.5 quintillion bytes of data every day, sentiment analysis has become a key tool for making sense of that data. This has allowed companies to get key insights and automate all kind of processes.
Sentiment Analysis can help to automatically transform the unstructured information into structured data of public opinions about products, services, brands, politics or any other topic that people can express opinions about. This data can be very useful for commercial applications like marketing analysis, public relations, product reviews, net promoter scoring, product feedback, and customer service.
In the following, I will show you how to implement a Deep Learning model that can classify Netflix reviews as positive or negative. The model will take a whole review as an input (word after word) and provide percentage ratings for checking whether the review conveys a positive or negative sentiment.
I am using a dataset that contains roughly 5000 negative and 5000 positive reviews. Here are 5 samples reviews from the dataset, that at the end of the article will be classified by the model:
"The film is a hoot and is just as good if not better than much of what s on saturday morning tv, especially the pseudo educational stuff we all can’t stand.” "The things this movie tries to get the audience to buy just won’t fly with most intelligent viewers.”"Although life or something like it is very much in the mold of feel good movies, the cast and director stephen herek’s polished direction pour delightfully piquant wine from aged bottles.” "This is the case of a pregnant premise being wasted by a script that takes few chances and manages to insult the intelligence of everyone in the audience.” "One of the finest most humane and important holocaust movies ever made."
The deep learning model + all necessary data can be found in my GitHub repo.
Let's begin with some theory.
Recurrent Neural Networks (RNNs) are popular models that have shown great promise in many NLP tasks.
RNN’s make use of sequential information such as text. In a “traditional” feedforward neural network we assume that all inputs are independent of each other. But for many tasks that’s a very bad idea. A sentence, for example, has a clear grammatical structure and order, where each word depends on the previous word. If you want your neural network to learn the meaning (or sentiment in our case) the network must know which words came in which order.
RNNs are called recurrent because they perform the same task for every element of a sequence, with the output being dependent on the previous computations. Another way to think about RNNs is that they have a “memory” which captures information about what has been calculated so far. Here is what a typical RNN looks like:
x(t-1), x(t), x(t+1) are sequential inputs that depend on each other (such as words in a sentence). y(t_1), y(t), y(t+1) are the outputs. Unique for RNN is is the fact that the calculation of the current hidden state h(t) of the neurons for the input x(t) depends on the previous hidden state h(t-1) for the previous input x(t-1). Wxh and Whh are weight matrices that connect the input x(t) with the hidden layer h(t), and h(t) with h(t-1) respectively. This way we introduce a recurrence to the neural network which can be considered as a memory on the previous inputs. In theory, this way “vanilla” RNNs can make use of information in arbitrarily long sequences, but in practice, they are limited to looking back only a few steps.
This is where LSTMs come in handy.
Long Short-Term Memory networks — usually just called “LSTMs” — are a special kind of RNN, capable of learning long-term dependencies. LSTMs don’t have a fundamentally different architecture from RNNs, but they incorporate additional components.
The key to LSTMs is the cell state C(t), the horizontal line running through the top of the diagram. A cell state is an additional way to store memory, besides just only using the hidden state h(t). However, C(t) makes it possible that LSTMs can work with much longer sequences in opposite to vanilla RNNs.
Furthermore, LSTMs have the ability to remove or add information to the cell state, carefully regulated by structures called gates. Gates are a way to optionally let information through. An LSTM has three of these gates, to protect and control the cell state.
Forget Gate: After getting the hidden state h(t-1 ) of the previous input x(t-1), Forget gate helps us to make decisions about what must be removed from h(t-1) state and thus keeping only relevant stuff.
Input Gate: In the input gate, we decide to add new stuff from the present input x(t) to our present cell state C(t).
Output Gate: The output gate as the name suggests, decides what to output from the current cell state C(t) to the next C(t+1). For the language model example, since it just saw a subject, it might want to output information relevant to a verb, in case that’s what is coming next. For example, it might output whether the subject is singular or plural so that we know what form a verb should be conjugated into if that’s what follows next.
Behind each of these states are separate neural networks. As you can imagine this makes LSTMs quite complex. At this point, I won't go much more into the detail about LSTMs.
Before we can use the reviews as inputs for the recurrent neural network it is required to do some preprocessing on the data. Our main purpose here is to shrink the observation space.
Consider the words such as ”Something” and “something”. For us humans these words have the same meaning, the only difference between them is that the first word is capitalized, because it may be the first word in a sentence. But for the neural network, these words will have (at the beginning at least) a different meaning because of their different spelling. Only during training, the neural network may or may not learn to recognize that these words mean the same. Our aim is to prevent such misconceptions.
Because of this, the first step of preprocessing is to make all words lowercase words.
Special characters such as . , ! ? ‘ etc. do not contribute to the sentiment of a review and hence can be removed.
Consider the following unprocessed review sample:
"Although life or something like it is very much in the mold of feel good movies, the cast and director stephen herek’s polished direction pour delightfully piquant wine from aged bottles.”
After we do the mentioned preprocessing steps the review sample look as follows:
"although life or something like it is very much in the mold of feel good movies the cast and director stephen hereks polished direction pour delightfully piquant wine from aged bottles”
The preprocessing is applied to every review in the dataset.
Another major step is to create a so-called Word-To-Index map, which assigns a unique integer value to each word in the dataset. The dataset I used in this project that contains all positive and negative reviews consists of 18339 unique words. Thus the word-to-index map has the same number of entries. This number is also called the vocabulary size.
The first and last entries in the word-to-index map that I have obtained look as follows:
This step of assigning a unique integer to words in the dataset is crucial because we can not feed in string data into a neural network. Instead, word-to-index allows us to use integers to represent whole sentences and reviews. Consider the following review:
"the things this movie tries to get the audience to buy just wont fly with most intelligent viewers”
Using word-to-index map, the review can be represented by an integer array, where each integer represents a word according to the map:
[0, 5094, 147, 81, 1269, 5, 532, 0, 1303, 5, 1835, 652, 236, 1101, 125, 188, 712, 855]
Of course, a neural network can neither take a string or a single integer value as input. Instead, we must use Word Embeddings.
Word Embeddings are a distributed representation for text that is perhaps one of the key breakthroughs for the impressive performance of deep learning methods on challenging NLP problems. Word Embeddings are in fact a class of techniques where individual words are represented by a real-valued vector, often with tens or hundreds of dimensions. Each word is mapped to one specific vector and the vector values are learned by the neural network.
This is contrasted to the thousands or millions of dimensions required for sparse word representations, such as a one-hot encoding. For instance, we can embed the words “although” and “life“ as 10-dimensional vectors:
although = [0.8 1.0 4.2 7.5 3.6]life = [8.3 5.7 7.8 4.6 2.5 ]
Each vector that represents a word in the dataset is obtained from a large matrix, called embedding-matrix. The number of rows of this matrix represents the dimensionality of the word embedding, the number of columns represents the vocabulary size or number of unique words in the dataset. Thus each column of this matrix represents an embedding vector for a unique word in the dataset.
How do we know which column represents which word? This is where we use the word-to-index map. Consider you want to get the embedding vector for the word “although”, according to the word-to-index map this word is represented by the number 2511. In the next step, it is necessary to create a one-hot-encoded vector of size 18339 (number of words in the dataset), where each entry is 0 except for the 2511th entry which has the value of 1.
By doing a dot-product between the embedding matrix and the one-hot-encoded vector we obtain the 2511th column of the matrix, which is the embedding vector for the word “although”.
This way we can feed whole string-paragraphs or Netflix reviews into an LSTM. We just look up for each word the integer value in the word-to-index map, create the appropriate one-hot-encoded vector and perform a dot-product with the matrix. The review is then fed word by word (vector by vector) into the LSTM network.
So far, you have seen how to preprocess the data and how to feed in the reviews in the LSTM network. Now, let's discuss how we can finally get the sentiment of a given review.
For each time step t, the LSTM network receives an input vector x(t) which results in the output vector y(t). This process is repeated until x(n), n being the number of words in the review. Let's say n=20 words. Until x(n) the LSTM network produced y(n) output vectors. Each of these 20 vectors represents something, but not the sentiment we are looking for. Rather the vectors y are an encoded representation of features of the review that (according to the neural network) will be important in determining the sentiment.
y(8) represents the features the neural networks recognized for the first 8 words of the review. y(20), on the other hand, represents the features for the whole review. Although it is sufficient to use only the last output vector y(20) in practice, I have found that it leads to more accurate results if we use all vectors y(0) — y(20) for determining of the sentiment. This can be achieved by computing the mean value over all vectors. Let's call this mean value vector y_mean.
Finally, the feature representation of the review that is encoded in y_mean can be used to classify the review into the categories of being positive or being negative. In order to do so, it is required to add a final classification layer, which is nothing else than the dot product between y_mean and another weight matrix W.
This process of sentiment analysis I just described is implemented in a deep learning model in my GitHub repo. You are welcome to check it out and try it for yourself. After the model is trained the can perform the sentiment analysis on yet unseen reviews:
Test Samples:Review: "the film is a hoot and is just as good if not better than much of whats on saturday morning tv especially the pseudo educational stuff we all cant stand"pos. sentiment: 0.96 %neg. sentiment: 0.04 %Review: "the things this movie tries to get the audience to buy just wont fly with most intelligent viewers"pos. sentiment: 0.11 %neg. sentiment: 0.89 %Review: "although life or something like it is very much in the mold of feel good movies the cast and director stephen hereks polished direction pour delightfully piquant wine from aged bottles"pos. sentiment: 0.97 %neg. sentiment: 0.03 % Review: "this is the case of a pregnant premise being wasted by a script that takes few chances and manages to insult the intelligence of everyone in the audience"pos. sentiment: 0.02 %neg. sentiment: 0.98 % | [
{
"code": null,
"e": 344,
"s": 172,
"text": "In this article, I will cover the topic of Sentiment Analysis and how to implement a Deep Learning model that can recognize and classify human emotions in Netflix reviews."
},
{
"code": null,
"e": 609,
"s": 344,
"text": "One of the most important elements for businesses is being in touch with its customer base. It is vital for these firms to know exactly what consumers or clients think of new and established products or services, recent initiatives, and customer service offerings."
},
{
"code": null,
"e": 674,
"s": 609,
"text": "Sentiment analysis is one way to accomplish this necessary task."
},
{
"code": null,
"e": 829,
"s": 674,
"text": "Sentiment Analysis is a field of Natural Language Processing (NLP) that builds models that try to identify and classify attributes of the expression e.g.:"
},
{
"code": null,
"e": 896,
"s": 829,
"text": "Polarity: if the speaker expresses a positive or negative opinion,"
},
{
"code": null,
"e": 943,
"s": 896,
"text": "Subject: the thing that is being talked about,"
},
{
"code": null,
"e": 1009,
"s": 943,
"text": "Opinion holder: the person, or entity that expresses the opinion."
},
{
"code": null,
"e": 1234,
"s": 1009,
"text": "In a world where we generate 2.5 quintillion bytes of data every day, sentiment analysis has become a key tool for making sense of that data. This has allowed companies to get key insights and automate all kind of processes."
},
{
"code": null,
"e": 1639,
"s": 1234,
"text": "Sentiment Analysis can help to automatically transform the unstructured information into structured data of public opinions about products, services, brands, politics or any other topic that people can express opinions about. This data can be very useful for commercial applications like marketing analysis, public relations, product reviews, net promoter scoring, product feedback, and customer service."
},
{
"code": null,
"e": 1941,
"s": 1639,
"text": "In the following, I will show you how to implement a Deep Learning model that can classify Netflix reviews as positive or negative. The model will take a whole review as an input (word after word) and provide percentage ratings for checking whether the review conveys a positive or negative sentiment."
},
{
"code": null,
"e": 2134,
"s": 1941,
"text": "I am using a dataset that contains roughly 5000 negative and 5000 positive reviews. Here are 5 samples reviews from the dataset, that at the end of the article will be classified by the model:"
},
{
"code": null,
"e": 2817,
"s": 2134,
"text": "\"The film is a hoot and is just as good if not better than much of what s on saturday morning tv, especially the pseudo educational stuff we all can’t stand.” \"The things this movie tries to get the audience to buy just won’t fly with most intelligent viewers.”\"Although life or something like it is very much in the mold of feel good movies, the cast and director stephen herek’s polished direction pour delightfully piquant wine from aged bottles.” \"This is the case of a pregnant premise being wasted by a script that takes few chances and manages to insult the intelligence of everyone in the audience.” \"One of the finest most humane and important holocaust movies ever made.\""
},
{
"code": null,
"e": 2894,
"s": 2817,
"text": "The deep learning model + all necessary data can be found in my GitHub repo."
},
{
"code": null,
"e": 2924,
"s": 2894,
"text": "Let's begin with some theory."
},
{
"code": null,
"e": 3025,
"s": 2924,
"text": "Recurrent Neural Networks (RNNs) are popular models that have shown great promise in many NLP tasks."
},
{
"code": null,
"e": 3477,
"s": 3025,
"text": "RNN’s make use of sequential information such as text. In a “traditional” feedforward neural network we assume that all inputs are independent of each other. But for many tasks that’s a very bad idea. A sentence, for example, has a clear grammatical structure and order, where each word depends on the previous word. If you want your neural network to learn the meaning (or sentiment in our case) the network must know which words came in which order."
},
{
"code": null,
"e": 3799,
"s": 3477,
"text": "RNNs are called recurrent because they perform the same task for every element of a sequence, with the output being dependent on the previous computations. Another way to think about RNNs is that they have a “memory” which captures information about what has been calculated so far. Here is what a typical RNN looks like:"
},
{
"code": null,
"e": 4532,
"s": 3799,
"text": "x(t-1), x(t), x(t+1) are sequential inputs that depend on each other (such as words in a sentence). y(t_1), y(t), y(t+1) are the outputs. Unique for RNN is is the fact that the calculation of the current hidden state h(t) of the neurons for the input x(t) depends on the previous hidden state h(t-1) for the previous input x(t-1). Wxh and Whh are weight matrices that connect the input x(t) with the hidden layer h(t), and h(t) with h(t-1) respectively. This way we introduce a recurrence to the neural network which can be considered as a memory on the previous inputs. In theory, this way “vanilla” RNNs can make use of information in arbitrarily long sequences, but in practice, they are limited to looking back only a few steps."
},
{
"code": null,
"e": 4567,
"s": 4532,
"text": "This is where LSTMs come in handy."
},
{
"code": null,
"e": 4813,
"s": 4567,
"text": "Long Short-Term Memory networks — usually just called “LSTMs” — are a special kind of RNN, capable of learning long-term dependencies. LSTMs don’t have a fundamentally different architecture from RNNs, but they incorporate additional components."
},
{
"code": null,
"e": 5120,
"s": 4813,
"text": "The key to LSTMs is the cell state C(t), the horizontal line running through the top of the diagram. A cell state is an additional way to store memory, besides just only using the hidden state h(t). However, C(t) makes it possible that LSTMs can work with much longer sequences in opposite to vanilla RNNs."
},
{
"code": null,
"e": 5380,
"s": 5120,
"text": "Furthermore, LSTMs have the ability to remove or add information to the cell state, carefully regulated by structures called gates. Gates are a way to optionally let information through. An LSTM has three of these gates, to protect and control the cell state."
},
{
"code": null,
"e": 5584,
"s": 5380,
"text": "Forget Gate: After getting the hidden state h(t-1 ) of the previous input x(t-1), Forget gate helps us to make decisions about what must be removed from h(t-1) state and thus keeping only relevant stuff."
},
{
"code": null,
"e": 5702,
"s": 5584,
"text": "Input Gate: In the input gate, we decide to add new stuff from the present input x(t) to our present cell state C(t)."
},
{
"code": null,
"e": 6141,
"s": 5702,
"text": "Output Gate: The output gate as the name suggests, decides what to output from the current cell state C(t) to the next C(t+1). For the language model example, since it just saw a subject, it might want to output information relevant to a verb, in case that’s what is coming next. For example, it might output whether the subject is singular or plural so that we know what form a verb should be conjugated into if that’s what follows next."
},
{
"code": null,
"e": 6315,
"s": 6141,
"text": "Behind each of these states are separate neural networks. As you can imagine this makes LSTMs quite complex. At this point, I won't go much more into the detail about LSTMs."
},
{
"code": null,
"e": 6499,
"s": 6315,
"text": "Before we can use the reviews as inputs for the recurrent neural network it is required to do some preprocessing on the data. Our main purpose here is to shrink the observation space."
},
{
"code": null,
"e": 7009,
"s": 6499,
"text": "Consider the words such as ”Something” and “something”. For us humans these words have the same meaning, the only difference between them is that the first word is capitalized, because it may be the first word in a sentence. But for the neural network, these words will have (at the beginning at least) a different meaning because of their different spelling. Only during training, the neural network may or may not learn to recognize that these words mean the same. Our aim is to prevent such misconceptions."
},
{
"code": null,
"e": 7096,
"s": 7009,
"text": "Because of this, the first step of preprocessing is to make all words lowercase words."
},
{
"code": null,
"e": 7211,
"s": 7096,
"text": "Special characters such as . , ! ? ‘ etc. do not contribute to the sentiment of a review and hence can be removed."
},
{
"code": null,
"e": 7261,
"s": 7211,
"text": "Consider the following unprocessed review sample:"
},
{
"code": null,
"e": 7451,
"s": 7261,
"text": "\"Although life or something like it is very much in the mold of feel good movies, the cast and director stephen herek’s polished direction pour delightfully piquant wine from aged bottles.”"
},
{
"code": null,
"e": 7532,
"s": 7451,
"text": "After we do the mentioned preprocessing steps the review sample look as follows:"
},
{
"code": null,
"e": 7719,
"s": 7532,
"text": "\"although life or something like it is very much in the mold of feel good movies the cast and director stephen hereks polished direction pour delightfully piquant wine from aged bottles”"
},
{
"code": null,
"e": 7780,
"s": 7719,
"text": "The preprocessing is applied to every review in the dataset."
},
{
"code": null,
"e": 8131,
"s": 7780,
"text": "Another major step is to create a so-called Word-To-Index map, which assigns a unique integer value to each word in the dataset. The dataset I used in this project that contains all positive and negative reviews consists of 18339 unique words. Thus the word-to-index map has the same number of entries. This number is also called the vocabulary size."
},
{
"code": null,
"e": 8221,
"s": 8131,
"text": "The first and last entries in the word-to-index map that I have obtained look as follows:"
},
{
"code": null,
"e": 8480,
"s": 8221,
"text": "This step of assigning a unique integer to words in the dataset is crucial because we can not feed in string data into a neural network. Instead, word-to-index allows us to use integers to represent whole sentences and reviews. Consider the following review:"
},
{
"code": null,
"e": 8581,
"s": 8480,
"text": "\"the things this movie tries to get the audience to buy just wont fly with most intelligent viewers”"
},
{
"code": null,
"e": 8716,
"s": 8581,
"text": "Using word-to-index map, the review can be represented by an integer array, where each integer represents a word according to the map:"
},
{
"code": null,
"e": 8803,
"s": 8716,
"text": "[0, 5094, 147, 81, 1269, 5, 532, 0, 1303, 5, 1835, 652, 236, 1101, 125, 188, 712, 855]"
},
{
"code": null,
"e": 8931,
"s": 8803,
"text": "Of course, a neural network can neither take a string or a single integer value as input. Instead, we must use Word Embeddings."
},
{
"code": null,
"e": 9376,
"s": 8931,
"text": "Word Embeddings are a distributed representation for text that is perhaps one of the key breakthroughs for the impressive performance of deep learning methods on challenging NLP problems. Word Embeddings are in fact a class of techniques where individual words are represented by a real-valued vector, often with tens or hundreds of dimensions. Each word is mapped to one specific vector and the vector values are learned by the neural network."
},
{
"code": null,
"e": 9594,
"s": 9376,
"text": "This is contrasted to the thousands or millions of dimensions required for sparse word representations, such as a one-hot encoding. For instance, we can embed the words “although” and “life“ as 10-dimensional vectors:"
},
{
"code": null,
"e": 9656,
"s": 9594,
"text": "although = [0.8 1.0 4.2 7.5 3.6]life = [8.3 5.7 7.8 4.6 2.5 ]"
},
{
"code": null,
"e": 10043,
"s": 9656,
"text": "Each vector that represents a word in the dataset is obtained from a large matrix, called embedding-matrix. The number of rows of this matrix represents the dimensionality of the word embedding, the number of columns represents the vocabulary size or number of unique words in the dataset. Thus each column of this matrix represents an embedding vector for a unique word in the dataset."
},
{
"code": null,
"e": 10482,
"s": 10043,
"text": "How do we know which column represents which word? This is where we use the word-to-index map. Consider you want to get the embedding vector for the word “although”, according to the word-to-index map this word is represented by the number 2511. In the next step, it is necessary to create a one-hot-encoded vector of size 18339 (number of words in the dataset), where each entry is 0 except for the 2511th entry which has the value of 1."
},
{
"code": null,
"e": 10663,
"s": 10482,
"text": "By doing a dot-product between the embedding matrix and the one-hot-encoded vector we obtain the 2511th column of the matrix, which is the embedding vector for the word “although”."
},
{
"code": null,
"e": 10982,
"s": 10663,
"text": "This way we can feed whole string-paragraphs or Netflix reviews into an LSTM. We just look up for each word the integer value in the word-to-index map, create the appropriate one-hot-encoded vector and perform a dot-product with the matrix. The review is then fed word by word (vector by vector) into the LSTM network."
},
{
"code": null,
"e": 11158,
"s": 10982,
"text": "So far, you have seen how to preprocess the data and how to feed in the reviews in the LSTM network. Now, let's discuss how we can finally get the sentiment of a given review."
},
{
"code": null,
"e": 11681,
"s": 11158,
"text": "For each time step t, the LSTM network receives an input vector x(t) which results in the output vector y(t). This process is repeated until x(n), n being the number of words in the review. Let's say n=20 words. Until x(n) the LSTM network produced y(n) output vectors. Each of these 20 vectors represents something, but not the sentiment we are looking for. Rather the vectors y are an encoded representation of features of the review that (according to the neural network) will be important in determining the sentiment."
},
{
"code": null,
"e": 12160,
"s": 11681,
"text": "y(8) represents the features the neural networks recognized for the first 8 words of the review. y(20), on the other hand, represents the features for the whole review. Although it is sufficient to use only the last output vector y(20) in practice, I have found that it leads to more accurate results if we use all vectors y(0) — y(20) for determining of the sentiment. This can be achieved by computing the mean value over all vectors. Let's call this mean value vector y_mean."
},
{
"code": null,
"e": 12486,
"s": 12160,
"text": "Finally, the feature representation of the review that is encoded in y_mean can be used to classify the review into the categories of being positive or being negative. In order to do so, it is required to add a final classification layer, which is nothing else than the dot product between y_mean and another weight matrix W."
},
{
"code": null,
"e": 12743,
"s": 12486,
"text": "This process of sentiment analysis I just described is implemented in a deep learning model in my GitHub repo. You are welcome to check it out and try it for yourself. After the model is trained the can perform the sentiment analysis on yet unseen reviews:"
}
] |
Customizing Plots with Python Matplotlib | by Carolina Bento | Towards Data Science | A central part of Data Science and Data Analysis is how you visualize the data. How you make use of visualizations tools has an important role in defining how you communicate insights.
My language of choice to explore and visualize data is Python.
In this article, I want to walk you through my framework for going from visualizing raw data to having a beautiful plot that is not just eye-catching but emphases the core insights you want to convey.
In this example I'm going to be using a dataset of workout sessions used in a previous article. It looks like this
A bare bones scatter plot would look like this
Which you can replicate with the following code
import pandas as pdimport matplotlib.pyplot as plt#loading datasetdf = pd.read_csv(‘workout_log.csv’)df.columns = [‘date’, ‘distance_km’, ‘duration_min’, ‘delta_last_workout’, ‘day_category’]def scatterplot(df, x_dim, y_dim): x = df[x_dim] y = df[y_dim] fig, ax = plt.subplots(figsize=(10, 5)) ax.scatter(x, y) plt.show()scatterplot(df, ‘distance_km’, ‘duration_min’)
The usual next step for me is to label the axes and add a title so each plot is appropriately labeled.
The code change is minimal, but definitely makes a difference.
import pandas as pdimport matplotlib.pyplot as plt#loading datasetdf = pd.read_csv(‘workout_log.csv’)df.columns = [‘date’, ‘distance_km’, ‘duration_min’, ‘delta_last_workout’, ‘day_category’]def scatterplot(df, x_dim, y_dim): x = df[x_dim] y = df[y_dim] fig, ax = plt.subplots(figsize=(10, 5)) ax.scatter(x, y) #adds a title and axes labels ax.set_title('Distance vs Workout Duration') ax.set_xlabel('Distance (Km)') ax.set_ylabel('Workout Duration (min)') plt.show()scatterplot(df, ‘distance_km’, ‘duration_min’)
In order to change the default box around the plot, we have to actually remove some of the plot's borders.
import pandas as pdimport matplotlib.pyplot as plt#loading datasetdf = pd.read_csv(‘workout_log.csv’)df.columns = [‘date’, ‘distance_km’, ‘duration_min’, ‘delta_last_workout’, ‘day_category’]def scatterplot(df, x_dim, y_dim): x = df[x_dim] y = df[y_dim] fig, ax = plt.subplots(figsize=(10, 5)) ax.scatter(x, y) #adds a title and axes labels ax.set_title('Distance vs Workout Duration') ax.set_xlabel('Distance (Km)') ax.set_ylabel('Workout Duration (min)') #removing top and right borders ax.spines['top'].set_visible(False) ax.spines['right'].set_visible(False) plt.show()scatterplot(df, ‘distance_km’, ‘duration_min’)
Something that I usually like to add to my plots are major gridlines. It helps with readability by reducing the amount of white background. You can play around with the its width linewidth and transparency alpha.
import pandas as pdimport matplotlib.pyplot as plt#loading datasetdf = pd.read_csv(‘workout_log.csv’)df.columns = [‘date’, ‘distance_km’, ‘duration_min’, ‘delta_last_workout’, ‘day_category’]def scatterplot(df, x_dim, y_dim): x = df[x_dim] y = df[y_dim] fig, ax = plt.subplots(figsize=(10, 5)) ax.scatter(x, y) #adds a title and axes labels ax.set_title('Distance vs Workout Duration') ax.set_xlabel('Distance (Km)') ax.set_ylabel('Workout Duration (min)') #removing top and right borders ax.spines['top'].set_visible(False) ax.spines['right'].set_visible(False) #adds major gridlines ax.grid(color='grey', linestyle='-', linewidth=0.25, alpha=0.5) plt.show()scatterplot(df, ‘distance_km’, ‘duration_min’)
You can see that some of the dots in the plot overlap. To improve readability even more, we can adjust the dots' transparency — alpha.
import pandas as pdimport matplotlib.pyplot as plt#loading datasetdf = pd.read_csv(‘workout_log.csv’)df.columns = [‘date’, ‘distance_km’, ‘duration_min’, ‘delta_last_workout’, ‘day_category’]def scatterplot(df, x_dim, y_dim): x = df[x_dim] y = df[y_dim] fig, ax = plt.subplots(figsize=(10, 5)) #customizes alpha for each dot in the scatter plot ax.scatter(x, y, alpha=0.70) #adds a title and axes labels ax.set_title('Distance vs Workout Duration') ax.set_xlabel('Distance (Km)') ax.set_ylabel('Workout Duration (min)') #removing top and right borders ax.spines['top'].set_visible(False) ax.spines['right'].set_visible(False) #adds major gridlines ax.grid(color='grey', linestyle='-', linewidth=0.25, alpha=0.5) plt.show()scatterplot(df, ‘distance_km’, ‘duration_min’)
There is still a bit of overlap, but at least the transparency improved the readability of the majority of the dots.
Since we have the day category we can also try identifying each dot in our plot with a different color.
For that you can choose from two different approaches:
Pick the colors yourself using tools like Adobe Kuler’s color wheel
Use Python's color maps
#1 Defining your own color palette
import pandas as pdimport matplotlib.pyplot as plt#loading datasetdf = pd.read_csv(‘workout_log.csv’)df.columns = [‘date’, ‘distance_km’, ‘duration_min’, ‘delta_last_workout’, ‘day_category’]def scatterplot(df, x_dim, y_dim): x = df[x_dim] y = df[y_dim]fig, ax = plt.subplots(figsize=(10, 5)) #defining an array of colors colors = ['#2300A8', '#00A658'] #assigns a color to each data point ax.scatter(x, y, alpha=0.70, color=colors) #adds a title and axes labels ax.set_title('Distance vs Workout Duration') ax.set_xlabel('Distance (Km)') ax.set_ylabel('Workout Duration (min)') #removing top and right borders ax.spines['top'].set_visible(False) ax.spines['right'].set_visible(False)#adds major gridlines ax.grid(color='grey', linestyle='-', linewidth=0.25, alpha=0.5)plt.show()scatterplot(df, ‘distance_km’, ‘duration_min’)
#2 Using Python Color Maps
To paint each dot according to its day category I need to introduce a few new components in the code
Import the color map library
Take the day category as a parameter, so the corresponding color can be mapped
Use parameterc from the scatter method to assign the color sequence
Use parameter cmap to assign the color map to be used. I'm going to use the brg color map
import pandas as pdimport matplotlib.cm as cmimport matplotlib.pyplot as plt#loading datasetdf = pd.read_csv(‘workout_log.csv’)df.columns = [‘date’, ‘distance_km’, ‘duration_min’, ‘delta_last_workout’, ‘day_category’]def scatterplot(df, x_dim, y_dim, category): x = df[x_dim] y = df[y_dim] fig, ax = plt.subplots(figsize=(10, 5)) #applies the custom color map along with the color sequence ax.scatter(x, y, alpha=0.70, c= df[category], cmap=cm.brg) #adds a title and axes labels ax.set_title('Distance vs Workout Duration') ax.set_xlabel('Distance (Km)') ax.set_ylabel('Workout Duration (min)') #removing top and right borders ax.spines['top'].set_visible(False) ax.spines['right'].set_visible(False) #adds major gridlines ax.grid(color='grey', linestyle='-', linewidth=0.25, alpha=0.5) plt.show()scatterplot(df, ‘distance_km’, ‘duration_min’, ‘day_category’)
So far, we've been using the native scatter method to plot each data point. In order to add a legend, we'll have to change the code a little bit.
We'll have to
Take the day category as a parameter, so we have our labels
Convert the numerical (0,1) labels into categorical labels (weekday, weekend)
Iterate through the dataset in order to assign a label to each data point
import pandas as pdimport matplotlib.cm as cmimport matplotlib.pyplot as plt#loading datasetdf = pd.read_csv(‘workout_log.csv’)df.columns = [‘date’, ‘distance_km’, ‘duration_min’, ‘delta_last_workout’, ‘day_category’]def scatterplot(df, x_dim, y_dim, category): x = df[x_dim] y = df[y_dim] #converting original (numerical) labels into categorical labels categories = df[category].apply(lambda x: 'weekday' if x == 0 else 'weekend') fig, ax = plt.subplots(figsize=(10, 5)) #assigns a color to each data point colors = ['#2300A8', '#00A658'] #iterates through the dataset plotting each data point and assigning it its corresponding color and label for i in range(len(df)): ax.scatter(x.ix[i], y.ix[i], alpha=0.70, color = colors[i%len(colors)], label=categories.ix[i]) #adds title and axes labels ax.set_title('Distance vs Workout Duration') ax.set_xlabel('Distance (Km)') ax.set_ylabel('Workout Duration (min)') #removing top and right borders ax.spines['top'].set_visible(False) ax.spines['right'].set_visible(False) #adds major gridlines ax.grid(color='grey', linestyle='-', linewidth=0.25, alpha=0.5) #adds legend ax.legend(categories.unique()) plt.show()scatterplot(df, 'distance_km', 'duration_min', 'day_category')
And there you have it! A customized scatter plot from which it's now easier to understand the data and draw some insights.
Thanks for reading! | [
{
"code": null,
"e": 356,
"s": 171,
"text": "A central part of Data Science and Data Analysis is how you visualize the data. How you make use of visualizations tools has an important role in defining how you communicate insights."
},
{
"code": null,
"e": 419,
"s": 356,
"text": "My language of choice to explore and visualize data is Python."
},
{
"code": null,
"e": 620,
"s": 419,
"text": "In this article, I want to walk you through my framework for going from visualizing raw data to having a beautiful plot that is not just eye-catching but emphases the core insights you want to convey."
},
{
"code": null,
"e": 735,
"s": 620,
"text": "In this example I'm going to be using a dataset of workout sessions used in a previous article. It looks like this"
},
{
"code": null,
"e": 782,
"s": 735,
"text": "A bare bones scatter plot would look like this"
},
{
"code": null,
"e": 830,
"s": 782,
"text": "Which you can replicate with the following code"
},
{
"code": null,
"e": 1203,
"s": 830,
"text": "import pandas as pdimport matplotlib.pyplot as plt#loading datasetdf = pd.read_csv(‘workout_log.csv’)df.columns = [‘date’, ‘distance_km’, ‘duration_min’, ‘delta_last_workout’, ‘day_category’]def scatterplot(df, x_dim, y_dim): x = df[x_dim] y = df[y_dim] fig, ax = plt.subplots(figsize=(10, 5)) ax.scatter(x, y) plt.show()scatterplot(df, ‘distance_km’, ‘duration_min’)"
},
{
"code": null,
"e": 1306,
"s": 1203,
"text": "The usual next step for me is to label the axes and add a title so each plot is appropriately labeled."
},
{
"code": null,
"e": 1369,
"s": 1306,
"text": "The code change is minimal, but definitely makes a difference."
},
{
"code": null,
"e": 1893,
"s": 1369,
"text": "import pandas as pdimport matplotlib.pyplot as plt#loading datasetdf = pd.read_csv(‘workout_log.csv’)df.columns = [‘date’, ‘distance_km’, ‘duration_min’, ‘delta_last_workout’, ‘day_category’]def scatterplot(df, x_dim, y_dim): x = df[x_dim] y = df[y_dim] fig, ax = plt.subplots(figsize=(10, 5)) ax.scatter(x, y) #adds a title and axes labels ax.set_title('Distance vs Workout Duration') ax.set_xlabel('Distance (Km)') ax.set_ylabel('Workout Duration (min)') plt.show()scatterplot(df, ‘distance_km’, ‘duration_min’)"
},
{
"code": null,
"e": 2000,
"s": 1893,
"text": "In order to change the default box around the plot, we have to actually remove some of the plot's borders."
},
{
"code": null,
"e": 2634,
"s": 2000,
"text": "import pandas as pdimport matplotlib.pyplot as plt#loading datasetdf = pd.read_csv(‘workout_log.csv’)df.columns = [‘date’, ‘distance_km’, ‘duration_min’, ‘delta_last_workout’, ‘day_category’]def scatterplot(df, x_dim, y_dim): x = df[x_dim] y = df[y_dim] fig, ax = plt.subplots(figsize=(10, 5)) ax.scatter(x, y) #adds a title and axes labels ax.set_title('Distance vs Workout Duration') ax.set_xlabel('Distance (Km)') ax.set_ylabel('Workout Duration (min)') #removing top and right borders ax.spines['top'].set_visible(False) ax.spines['right'].set_visible(False) plt.show()scatterplot(df, ‘distance_km’, ‘duration_min’)"
},
{
"code": null,
"e": 2847,
"s": 2634,
"text": "Something that I usually like to add to my plots are major gridlines. It helps with readability by reducing the amount of white background. You can play around with the its width linewidth and transparency alpha."
},
{
"code": null,
"e": 3569,
"s": 2847,
"text": "import pandas as pdimport matplotlib.pyplot as plt#loading datasetdf = pd.read_csv(‘workout_log.csv’)df.columns = [‘date’, ‘distance_km’, ‘duration_min’, ‘delta_last_workout’, ‘day_category’]def scatterplot(df, x_dim, y_dim): x = df[x_dim] y = df[y_dim] fig, ax = plt.subplots(figsize=(10, 5)) ax.scatter(x, y) #adds a title and axes labels ax.set_title('Distance vs Workout Duration') ax.set_xlabel('Distance (Km)') ax.set_ylabel('Workout Duration (min)') #removing top and right borders ax.spines['top'].set_visible(False) ax.spines['right'].set_visible(False) #adds major gridlines ax.grid(color='grey', linestyle='-', linewidth=0.25, alpha=0.5) plt.show()scatterplot(df, ‘distance_km’, ‘duration_min’)"
},
{
"code": null,
"e": 3704,
"s": 3569,
"text": "You can see that some of the dots in the plot overlap. To improve readability even more, we can adjust the dots' transparency — alpha."
},
{
"code": null,
"e": 4490,
"s": 3704,
"text": "import pandas as pdimport matplotlib.pyplot as plt#loading datasetdf = pd.read_csv(‘workout_log.csv’)df.columns = [‘date’, ‘distance_km’, ‘duration_min’, ‘delta_last_workout’, ‘day_category’]def scatterplot(df, x_dim, y_dim): x = df[x_dim] y = df[y_dim] fig, ax = plt.subplots(figsize=(10, 5)) #customizes alpha for each dot in the scatter plot ax.scatter(x, y, alpha=0.70) #adds a title and axes labels ax.set_title('Distance vs Workout Duration') ax.set_xlabel('Distance (Km)') ax.set_ylabel('Workout Duration (min)') #removing top and right borders ax.spines['top'].set_visible(False) ax.spines['right'].set_visible(False) #adds major gridlines ax.grid(color='grey', linestyle='-', linewidth=0.25, alpha=0.5) plt.show()scatterplot(df, ‘distance_km’, ‘duration_min’)"
},
{
"code": null,
"e": 4607,
"s": 4490,
"text": "There is still a bit of overlap, but at least the transparency improved the readability of the majority of the dots."
},
{
"code": null,
"e": 4711,
"s": 4607,
"text": "Since we have the day category we can also try identifying each dot in our plot with a different color."
},
{
"code": null,
"e": 4766,
"s": 4711,
"text": "For that you can choose from two different approaches:"
},
{
"code": null,
"e": 4834,
"s": 4766,
"text": "Pick the colors yourself using tools like Adobe Kuler’s color wheel"
},
{
"code": null,
"e": 4858,
"s": 4834,
"text": "Use Python's color maps"
},
{
"code": null,
"e": 4893,
"s": 4858,
"text": "#1 Defining your own color palette"
},
{
"code": null,
"e": 5738,
"s": 4893,
"text": "import pandas as pdimport matplotlib.pyplot as plt#loading datasetdf = pd.read_csv(‘workout_log.csv’)df.columns = [‘date’, ‘distance_km’, ‘duration_min’, ‘delta_last_workout’, ‘day_category’]def scatterplot(df, x_dim, y_dim): x = df[x_dim] y = df[y_dim]fig, ax = plt.subplots(figsize=(10, 5)) #defining an array of colors colors = ['#2300A8', '#00A658'] #assigns a color to each data point ax.scatter(x, y, alpha=0.70, color=colors) #adds a title and axes labels ax.set_title('Distance vs Workout Duration') ax.set_xlabel('Distance (Km)') ax.set_ylabel('Workout Duration (min)') #removing top and right borders ax.spines['top'].set_visible(False) ax.spines['right'].set_visible(False)#adds major gridlines ax.grid(color='grey', linestyle='-', linewidth=0.25, alpha=0.5)plt.show()scatterplot(df, ‘distance_km’, ‘duration_min’)"
},
{
"code": null,
"e": 5765,
"s": 5738,
"text": "#2 Using Python Color Maps"
},
{
"code": null,
"e": 5866,
"s": 5765,
"text": "To paint each dot according to its day category I need to introduce a few new components in the code"
},
{
"code": null,
"e": 5895,
"s": 5866,
"text": "Import the color map library"
},
{
"code": null,
"e": 5974,
"s": 5895,
"text": "Take the day category as a parameter, so the corresponding color can be mapped"
},
{
"code": null,
"e": 6042,
"s": 5974,
"text": "Use parameterc from the scatter method to assign the color sequence"
},
{
"code": null,
"e": 6132,
"s": 6042,
"text": "Use parameter cmap to assign the color map to be used. I'm going to use the brg color map"
},
{
"code": null,
"e": 7009,
"s": 6132,
"text": "import pandas as pdimport matplotlib.cm as cmimport matplotlib.pyplot as plt#loading datasetdf = pd.read_csv(‘workout_log.csv’)df.columns = [‘date’, ‘distance_km’, ‘duration_min’, ‘delta_last_workout’, ‘day_category’]def scatterplot(df, x_dim, y_dim, category): x = df[x_dim] y = df[y_dim] fig, ax = plt.subplots(figsize=(10, 5)) #applies the custom color map along with the color sequence ax.scatter(x, y, alpha=0.70, c= df[category], cmap=cm.brg) #adds a title and axes labels ax.set_title('Distance vs Workout Duration') ax.set_xlabel('Distance (Km)') ax.set_ylabel('Workout Duration (min)') #removing top and right borders ax.spines['top'].set_visible(False) ax.spines['right'].set_visible(False) #adds major gridlines ax.grid(color='grey', linestyle='-', linewidth=0.25, alpha=0.5) plt.show()scatterplot(df, ‘distance_km’, ‘duration_min’, ‘day_category’)"
},
{
"code": null,
"e": 7155,
"s": 7009,
"text": "So far, we've been using the native scatter method to plot each data point. In order to add a legend, we'll have to change the code a little bit."
},
{
"code": null,
"e": 7169,
"s": 7155,
"text": "We'll have to"
},
{
"code": null,
"e": 7229,
"s": 7169,
"text": "Take the day category as a parameter, so we have our labels"
},
{
"code": null,
"e": 7307,
"s": 7229,
"text": "Convert the numerical (0,1) labels into categorical labels (weekday, weekend)"
},
{
"code": null,
"e": 7381,
"s": 7307,
"text": "Iterate through the dataset in order to assign a label to each data point"
},
{
"code": null,
"e": 8647,
"s": 7381,
"text": "import pandas as pdimport matplotlib.cm as cmimport matplotlib.pyplot as plt#loading datasetdf = pd.read_csv(‘workout_log.csv’)df.columns = [‘date’, ‘distance_km’, ‘duration_min’, ‘delta_last_workout’, ‘day_category’]def scatterplot(df, x_dim, y_dim, category): x = df[x_dim] y = df[y_dim] #converting original (numerical) labels into categorical labels categories = df[category].apply(lambda x: 'weekday' if x == 0 else 'weekend') fig, ax = plt.subplots(figsize=(10, 5)) #assigns a color to each data point colors = ['#2300A8', '#00A658'] #iterates through the dataset plotting each data point and assigning it its corresponding color and label for i in range(len(df)): ax.scatter(x.ix[i], y.ix[i], alpha=0.70, color = colors[i%len(colors)], label=categories.ix[i]) #adds title and axes labels ax.set_title('Distance vs Workout Duration') ax.set_xlabel('Distance (Km)') ax.set_ylabel('Workout Duration (min)') #removing top and right borders ax.spines['top'].set_visible(False) ax.spines['right'].set_visible(False) #adds major gridlines ax.grid(color='grey', linestyle='-', linewidth=0.25, alpha=0.5) #adds legend ax.legend(categories.unique()) plt.show()scatterplot(df, 'distance_km', 'duration_min', 'day_category')"
},
{
"code": null,
"e": 8770,
"s": 8647,
"text": "And there you have it! A customized scatter plot from which it's now easier to understand the data and draw some insights."
}
] |
HTTP - Caching | HTTP is typically used for distributed information systems, where performance can be improved by the use of response caches. The HTTP/1.1 protocol includes a number of elements intended to make caching work.
The goal of caching in HTTP/1.1 is to eliminate the need to send requests in many cases, and to eliminate the need to send full responses in many other cases.
The basic cache mechanisms in HTTP/1.1 are implicit directives to caches where server-specifies expiration times and validators. We use the Cache-Control header for this purpose.
The Cache-Control header allows a client or server to transmit a variety of directives in either requests or responses. These directives typically override the default caching algorithms. The caching directives are specified in a comma-separated list. For example:
Cache-control: no-cache
The following cache request directives can be used by the client in its HTTP request:
A cache must not use the response to satisfy a subsequent request without successful revalidation with the origin server.
The cache should not store anything about the client request or server response.
Indicates that the client is willing to accept a response whose age is not greater than the specified time in seconds.
Indicates that the client is willing to accept a response that has exceeded its expiration time. If seconds are given, it must not be expired by more than that time.
Indicates that the client is willing to accept a response whose freshness lifetime is not less than its current age plus the specified time in seconds.
Does not convert the entity-body.
Does not retrieve new data. The cache can send a document only if it is in the cache, and should not contact the origin-server to see if a newer copy exists.
The following cache response directives can be used by the server in its HTTP response:
Indicates that the response may be cached by any cache.
Indicates that all or part of the response message is intended for a single user and must not be cached by a shared cache.
A cache must not use the response to satisfy a subsequent request without successful re-validation with the origin server.
The cache should not store anything about the client request or server response.
Does not convert the entity-body.
The cache must verify the status of stale documents before using it and expired ones should not be used.
The proxy-revalidate directive has the same meaning as the must- revalidate directive, except that it does not apply to non-shared user agent caches.
Indicates that the client is willing to accept a response whose age is not greater than the specified time in seconds.
The maximum age specified by this directive overrides the maximum age specified by either the max-age directive or the Expires header. The s-maxage directive is always ignored by a private cache.
Print
Add Notes
Bookmark this page | [
{
"code": null,
"e": 1987,
"s": 1779,
"text": "HTTP is typically used for distributed information systems, where performance can be improved by the use of response caches. The HTTP/1.1 protocol includes a number of elements intended to make caching work."
},
{
"code": null,
"e": 2147,
"s": 1987,
"text": "The goal of caching in HTTP/1.1 is to eliminate the need to send requests in many cases, and to eliminate the need to send full responses in many other cases. "
},
{
"code": null,
"e": 2326,
"s": 2147,
"text": "The basic cache mechanisms in HTTP/1.1 are implicit directives to caches where server-specifies expiration times and validators. We use the Cache-Control header for this purpose."
},
{
"code": null,
"e": 2591,
"s": 2326,
"text": "The Cache-Control header allows a client or server to transmit a variety of directives in either requests or responses. These directives typically override the default caching algorithms. The caching directives are specified in a comma-separated list. For example:"
},
{
"code": null,
"e": 2616,
"s": 2591,
"text": "Cache-control: no-cache\n"
},
{
"code": null,
"e": 2702,
"s": 2616,
"text": "The following cache request directives can be used by the client in its HTTP request:"
},
{
"code": null,
"e": 2824,
"s": 2702,
"text": "A cache must not use the response to satisfy a subsequent request without successful revalidation with the origin server."
},
{
"code": null,
"e": 2905,
"s": 2824,
"text": "The cache should not store anything about the client request or server response."
},
{
"code": null,
"e": 3024,
"s": 2905,
"text": "Indicates that the client is willing to accept a response whose age is not greater than the specified time in seconds."
},
{
"code": null,
"e": 3190,
"s": 3024,
"text": "Indicates that the client is willing to accept a response that has exceeded its expiration time. If seconds are given, it must not be expired by more than that time."
},
{
"code": null,
"e": 3342,
"s": 3190,
"text": "Indicates that the client is willing to accept a response whose freshness lifetime is not less than its current age plus the specified time in seconds."
},
{
"code": null,
"e": 3376,
"s": 3342,
"text": "Does not convert the entity-body."
},
{
"code": null,
"e": 3534,
"s": 3376,
"text": "Does not retrieve new data. The cache can send a document only if it is in the cache, and should not contact the origin-server to see if a newer copy exists."
},
{
"code": null,
"e": 3622,
"s": 3534,
"text": "The following cache response directives can be used by the server in its HTTP response:"
},
{
"code": null,
"e": 3678,
"s": 3622,
"text": "Indicates that the response may be cached by any cache."
},
{
"code": null,
"e": 3802,
"s": 3678,
"text": "Indicates that all or part of the response message is intended for a single user and must not be cached by a shared cache. "
},
{
"code": null,
"e": 3925,
"s": 3802,
"text": "A cache must not use the response to satisfy a subsequent request without successful re-validation with the origin server."
},
{
"code": null,
"e": 4006,
"s": 3925,
"text": "The cache should not store anything about the client request or server response."
},
{
"code": null,
"e": 4040,
"s": 4006,
"text": "Does not convert the entity-body."
},
{
"code": null,
"e": 4145,
"s": 4040,
"text": "The cache must verify the status of stale documents before using it and expired ones should not be used."
},
{
"code": null,
"e": 4295,
"s": 4145,
"text": "The proxy-revalidate directive has the same meaning as the must- revalidate directive, except that it does not apply to non-shared user agent caches."
},
{
"code": null,
"e": 4414,
"s": 4295,
"text": "Indicates that the client is willing to accept a response whose age is not greater than the specified time in seconds."
},
{
"code": null,
"e": 4610,
"s": 4414,
"text": "The maximum age specified by this directive overrides the maximum age specified by either the max-age directive or the Expires header. The s-maxage directive is always ignored by a private cache."
},
{
"code": null,
"e": 4617,
"s": 4610,
"text": " Print"
},
{
"code": null,
"e": 4628,
"s": 4617,
"text": " Add Notes"
}
] |
Product of unique prime factors of a number - GeeksforGeeks | 04 May, 2021
Given a number n, we need to find the product of all of its unique prime factors. Prime factors: It is basically a factor of the number that is a prime number itself. Examples :
Input: num = 10
Output: Product is 10
Explanation:
Here, the input number is 10 having only 2 prime factors and they are 5 and 2.
And hence their product is 10.
Input : num = 25
Output: Product is 5
Explanation:
Here, for the input to be 25 we have only one unique prime factor i.e 5.
And hence the required product is 5.
Method 1 (Simple) Using a loop from i = 2 to n and check if i is a factor of n then check if i is prime number itself if yes then store product in product variable and continue this process till i = n.
CPP
Java
Python3
C#
PHP
Javascript
// C++ program to find product of// unique prime factors of a number#include <bits/stdc++.h>using namespace std; long long int productPrimeFactors(int n){ long long int product = 1; for (int i = 2; i <= n; i++) { // Checking if 'i' is factor of num if (n % i == 0) { // Checking if 'i' is a Prime number bool isPrime = true; for (int j = 2; j <= i / 2; j++) { if (i % j == 0) { isPrime = false; break; } } // condition if 'i' is Prime number // as well as factor of num if (isPrime) { product = product * i; } } } return product;} // driver functionint main(){ int n = 44; cout << productPrimeFactors(n); return 0;}
// Java program to find product of// unique prime factors of a number. class GFG { public static long productPrimeFactors(int n) { long product = 1; for (int i = 2; i <= n; i++) { // Checking if 'i' is factor of num if (n % i == 0) { // Checking if 'i' is a Prime number boolean isPrime = true; for (int j = 2; j <= i / 2; j++) { if (i % j == 0) { isPrime = false; break; } } // condition if 'i' is Prime number // as well as factor of num if (isPrime) { product = product * i; } } } return product; } // Driver Code public static void main(String[] args) { int n = 44; System.out.print(productPrimeFactors(n)); }} // This code is contributed by _omg
# Python program to find sum of given# series. def productPrimeFactors(n): product = 1 for i in range(2, n + 1): if (n % i == 0): isPrime = 1 for j in range(2, int(i / 2 + 1)): if (i % j == 0): isPrime = 0 break # condition if 'i' is Prime number # as well as factor of num if (isPrime): product = product * i return product # main()n = 44print (productPrimeFactors(n)) # Contributed by _omg
// C# program to find product of// unique prime factors of a number.using System; class GFG { // Function to find product of unique // prime factors of a number public static long productPrimeFactors(int n) { long product = 1; for (int i = 2; i <= n; i++) { // Checking if 'i' is factor of num if (n % i == 0) { // Checking if 'i' is a Prime number bool isPrime = true; for (int j = 2; j <= i / 2; j++) { if (i % j == 0) { isPrime = false; break; } } // condition if 'i' is Prime number // as well as factor of num if (isPrime) { product = product * i; } } } return product; } // Driver Code public static void Main() { int n = 44; Console.Write(productPrimeFactors(n)); }} // This code is contributed by nitin mittal
<?php// PHP program to find// product of unique// prime factors of a number function productPrimeFactors($n){ $product = 1; for ($i = 2; $i <= $n; $i++) { // Checking if 'i' is // factor of num if ($n % $i == 0) { // Checking if 'i' is // a Prime number $isPrime = true; for ($j = 2; $j <= $i / 2; $j++) { if ($i % $j == 0) { $isPrime = false; break; } } // condition if 'i' is // Prime number as well // as factor of num if ($isPrime) { $product = $product * $i; } } } return $product;} // Driver Code$n = 44;echo(productPrimeFactors($n)); // This code is contributed by Ajit.?>
<script> // JavaScript program to find product of// unique prime factors of a number. function productPrimeFactors(n) { let product = 1; for (let i = 2; i <= n; i++) { // Checking if 'i' is factor of num if (n % i == 0) { // Checking if 'i' is a Prime number let isPrime = true; for (let j = 2; j <= i / 2; j++) { if (i % j == 0) { isPrime = false; break; } } // condition if 'i' is Prime number // as well as factor of num if (isPrime) { product = product * i; } } } return product; } // Driver Code let n = 44; document.write(productPrimeFactors(n)); // This code is contributed by avijitmondal1998.</script>
22
Method 2 (Efficient) The idea is based on Efficient program to print all prime factors of a given number
CPP
Java
Python3
C#
PHP
Javascript
// C++ program to find product of// unique prime factors of a number#include <bits/stdc++.h>using namespace std; // A function to print all prime// factors of a given number nlong long int productPrimeFactors(int n){ long long int product = 1; // Handle prime factor 2 explicitly // so that can optimally handle // other prime factors. if (n % 2 == 0) { product *= 2; while (n % 2 == 0) n = n / 2; } // n must be odd at this point. // So we can skip one element // (Note i = i + 2) for (int i = 3; i <= sqrt(n); i = i + 2) { // While i divides n, print // i and divide n if (n % i == 0) { product = product * i; while (n % i == 0) n = n / i; } } // This condition is to handle the // case when n is a prime number // greater than 2 if (n > 2) product = product * n; return product;} // Driver Codeint main(){ int n = 44; cout << productPrimeFactors(n); return 0;}
// Java program to find product of// unique prime factors of a number.import java.util.*;import java.lang.*; class GFG { public static long productPrimeFactors(int n) { long product = 1; // Handle prime factor 2 // explicitly so that can // optimally handle other // prime factors. if (n % 2 == 0) { product *= 2; while (n % 2 == 0) n = n / 2; } // n must be odd at this point. // So we can skip one element // (Note i = i +2) for (int i = 3; i <= Math.sqrt(n); i = i + 2) { // While i divides n, print // i and divide n if (n % i == 0) { product = product * i; while (n % i == 0) n = n / i; } } // This condition is to handle // the case when n is a prime // number greater than 2 if (n > 2) product = product * n; return product; } // Driver Code public static void main(String[] args) { int n = 44; System.out.print(productPrimeFactors(n)); }} // This code is contributed by _omg
# Python program to find product of# unique prime factors of a number import math def productPrimeFactors(n): product = 1 # Handle prime factor 2 explicitly so that # can optimally handle other prime factors. if (n % 2 == 0): product *= 2 while (n % 2 == 0): n = n / 2 # n must be odd at this point. So we can # skip one element (Note i = i + 2) for i in range (3, int(math.sqrt(n))+1, 2): # While i divides n, print i and # divide n if (n % i == 0): product = product * i while (n % i == 0): n = n / i # This condition is to handle the case when n # is a prime number greater than 2 if (n > 2): product = product * n return product # main()n = 44print (int(productPrimeFactors(n))) # Contributed by _omg
// C# program to find product// of unique prime factors// of a number.using System; public class GFG { // Function to find product // of prime factors public static long productPrimeFactors(int n) { long product = 1; // Handle prime factor 2 explicitly // so that can optimally handle // other prime factors. if (n % 2 == 0) { product *= 2; while (n % 2 == 0) n = n / 2; } // n must be odd at this point. // So we can skip one // element (Note i = i + 2) for (int i = 3; i <= Math.Sqrt(n); i = i + 2) { // While i divides n, print // i and divide n if (n % i == 0) { product = product * i; while (n % i == 0) n = n / i; } } // This condition is to handle // the case when n is a prime // number greater than 2 if (n > 2) product = product * n; return product; } // Driver Code public static void Main(String[] args) { int n = 44; Console.Write(productPrimeFactors(n)); }} // This code is contributed by parashar...
<?php// PHP program to find product of// unique prime factors of a number // A function to print all prime// factors of a given number nfunction productPrimeFactors( $n){ $product = 1; // Handle prime factor 2 // explicitly so that can // optimally handle other // prime factors. if ($n % 2 == 0) { $product *= 2; while ($n % 2 == 0) $n = $n / 2; } // n must be odd at this point. // So we can skip one element // (Note i = i + 2) for ($i = 3; $i <= sqrt($n); $i = $i + 2) { // While i divides n, print // i and divide n if ($n % $i == 0) { $product = $product * $i; while ($n % $i == 0) $n = $n / $i; } } // This condition is to handle the // case when n is a prime number // greater than 2 if ($n > 2) $product = $product * $n; return $product;} // Driver Code$n = 44;echo productPrimeFactors($n); // This code is contributed by ajit?>
<script> // Javascript program to find product of// unique prime factors of a number. function productPrimeFactors(n) { var product = 1; // Handle prime factor 2 // explicitly so that can // optimally handle other // prime factors. if (n % 2 == 0) { product *= 2; while (n % 2 == 0) n = n / 2; } // n must be odd at this point. // So we can skip one element // (Note i = i +2) for (i = 3; i <= Math.sqrt(n); i = i + 2) { // While i divides n, print // i and divide n if (n % i == 0) { product = product * i; while (n % i == 0) n = n / i; } } // This condition is to handle // the case when n is a prime // number greater than 2 if (n > 2) product = product * n; return product; } // Driver Code var n = 44; document.write(productPrimeFactors(n)); // This code contributed by aashish1995 </script>
22
parashar
nitin mittal
jit_t
_sachin_
avijitmondal1998
aashish1995
Prime Number
prime-factor
school-programming
Mathematical
Mathematical
Prime Number
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here.
Comments
Old Comments
Merge two sorted arrays
Modulo Operator (%) in C/C++ with Examples
Program to find sum of elements in a given array
Operators in C / C++
Program for factorial of a number
Algorithm to solve Rubik's Cube
Print all possible combinations of r elements in a given array of size n
The Knight's tour problem | Backtracking-1
Minimum number of jumps to reach end
Find minimum number of coins that make a given value | [
{
"code": null,
"e": 24510,
"s": 24482,
"text": "\n04 May, 2021"
},
{
"code": null,
"e": 24690,
"s": 24510,
"text": "Given a number n, we need to find the product of all of its unique prime factors. Prime factors: It is basically a factor of the number that is a prime number itself. Examples : "
},
{
"code": null,
"e": 25014,
"s": 24690,
"text": "Input: num = 10\nOutput: Product is 10\nExplanation:\nHere, the input number is 10 having only 2 prime factors and they are 5 and 2.\nAnd hence their product is 10.\n\nInput : num = 25\nOutput: Product is 5\nExplanation:\nHere, for the input to be 25 we have only one unique prime factor i.e 5.\nAnd hence the required product is 5."
},
{
"code": null,
"e": 25219,
"s": 25016,
"text": "Method 1 (Simple) Using a loop from i = 2 to n and check if i is a factor of n then check if i is prime number itself if yes then store product in product variable and continue this process till i = n. "
},
{
"code": null,
"e": 25223,
"s": 25219,
"text": "CPP"
},
{
"code": null,
"e": 25228,
"s": 25223,
"text": "Java"
},
{
"code": null,
"e": 25236,
"s": 25228,
"text": "Python3"
},
{
"code": null,
"e": 25239,
"s": 25236,
"text": "C#"
},
{
"code": null,
"e": 25243,
"s": 25239,
"text": "PHP"
},
{
"code": null,
"e": 25254,
"s": 25243,
"text": "Javascript"
},
{
"code": "// C++ program to find product of// unique prime factors of a number#include <bits/stdc++.h>using namespace std; long long int productPrimeFactors(int n){ long long int product = 1; for (int i = 2; i <= n; i++) { // Checking if 'i' is factor of num if (n % i == 0) { // Checking if 'i' is a Prime number bool isPrime = true; for (int j = 2; j <= i / 2; j++) { if (i % j == 0) { isPrime = false; break; } } // condition if 'i' is Prime number // as well as factor of num if (isPrime) { product = product * i; } } } return product;} // driver functionint main(){ int n = 44; cout << productPrimeFactors(n); return 0;}",
"e": 26088,
"s": 25254,
"text": null
},
{
"code": "// Java program to find product of// unique prime factors of a number. class GFG { public static long productPrimeFactors(int n) { long product = 1; for (int i = 2; i <= n; i++) { // Checking if 'i' is factor of num if (n % i == 0) { // Checking if 'i' is a Prime number boolean isPrime = true; for (int j = 2; j <= i / 2; j++) { if (i % j == 0) { isPrime = false; break; } } // condition if 'i' is Prime number // as well as factor of num if (isPrime) { product = product * i; } } } return product; } // Driver Code public static void main(String[] args) { int n = 44; System.out.print(productPrimeFactors(n)); }} // This code is contributed by _omg",
"e": 27062,
"s": 26088,
"text": null
},
{
"code": "# Python program to find sum of given# series. def productPrimeFactors(n): product = 1 for i in range(2, n + 1): if (n % i == 0): isPrime = 1 for j in range(2, int(i / 2 + 1)): if (i % j == 0): isPrime = 0 break # condition if 'i' is Prime number # as well as factor of num if (isPrime): product = product * i return product # main()n = 44print (productPrimeFactors(n)) # Contributed by _omg",
"e": 27660,
"s": 27062,
"text": null
},
{
"code": "// C# program to find product of// unique prime factors of a number.using System; class GFG { // Function to find product of unique // prime factors of a number public static long productPrimeFactors(int n) { long product = 1; for (int i = 2; i <= n; i++) { // Checking if 'i' is factor of num if (n % i == 0) { // Checking if 'i' is a Prime number bool isPrime = true; for (int j = 2; j <= i / 2; j++) { if (i % j == 0) { isPrime = false; break; } } // condition if 'i' is Prime number // as well as factor of num if (isPrime) { product = product * i; } } } return product; } // Driver Code public static void Main() { int n = 44; Console.Write(productPrimeFactors(n)); }} // This code is contributed by nitin mittal",
"e": 28709,
"s": 27660,
"text": null
},
{
"code": "<?php// PHP program to find// product of unique// prime factors of a number function productPrimeFactors($n){ $product = 1; for ($i = 2; $i <= $n; $i++) { // Checking if 'i' is // factor of num if ($n % $i == 0) { // Checking if 'i' is // a Prime number $isPrime = true; for ($j = 2; $j <= $i / 2; $j++) { if ($i % $j == 0) { $isPrime = false; break; } } // condition if 'i' is // Prime number as well // as factor of num if ($isPrime) { $product = $product * $i; } } } return $product;} // Driver Code$n = 44;echo(productPrimeFactors($n)); // This code is contributed by Ajit.?>",
"e": 29602,
"s": 28709,
"text": null
},
{
"code": "<script> // JavaScript program to find product of// unique prime factors of a number. function productPrimeFactors(n) { let product = 1; for (let i = 2; i <= n; i++) { // Checking if 'i' is factor of num if (n % i == 0) { // Checking if 'i' is a Prime number let isPrime = true; for (let j = 2; j <= i / 2; j++) { if (i % j == 0) { isPrime = false; break; } } // condition if 'i' is Prime number // as well as factor of num if (isPrime) { product = product * i; } } } return product; } // Driver Code let n = 44; document.write(productPrimeFactors(n)); // This code is contributed by avijitmondal1998.</script>",
"e": 30531,
"s": 29602,
"text": null
},
{
"code": null,
"e": 30534,
"s": 30531,
"text": "22"
},
{
"code": null,
"e": 30642,
"s": 30536,
"text": "Method 2 (Efficient) The idea is based on Efficient program to print all prime factors of a given number "
},
{
"code": null,
"e": 30646,
"s": 30642,
"text": "CPP"
},
{
"code": null,
"e": 30651,
"s": 30646,
"text": "Java"
},
{
"code": null,
"e": 30659,
"s": 30651,
"text": "Python3"
},
{
"code": null,
"e": 30662,
"s": 30659,
"text": "C#"
},
{
"code": null,
"e": 30666,
"s": 30662,
"text": "PHP"
},
{
"code": null,
"e": 30677,
"s": 30666,
"text": "Javascript"
},
{
"code": "// C++ program to find product of// unique prime factors of a number#include <bits/stdc++.h>using namespace std; // A function to print all prime// factors of a given number nlong long int productPrimeFactors(int n){ long long int product = 1; // Handle prime factor 2 explicitly // so that can optimally handle // other prime factors. if (n % 2 == 0) { product *= 2; while (n % 2 == 0) n = n / 2; } // n must be odd at this point. // So we can skip one element // (Note i = i + 2) for (int i = 3; i <= sqrt(n); i = i + 2) { // While i divides n, print // i and divide n if (n % i == 0) { product = product * i; while (n % i == 0) n = n / i; } } // This condition is to handle the // case when n is a prime number // greater than 2 if (n > 2) product = product * n; return product;} // Driver Codeint main(){ int n = 44; cout << productPrimeFactors(n); return 0;}",
"e": 31699,
"s": 30677,
"text": null
},
{
"code": "// Java program to find product of// unique prime factors of a number.import java.util.*;import java.lang.*; class GFG { public static long productPrimeFactors(int n) { long product = 1; // Handle prime factor 2 // explicitly so that can // optimally handle other // prime factors. if (n % 2 == 0) { product *= 2; while (n % 2 == 0) n = n / 2; } // n must be odd at this point. // So we can skip one element // (Note i = i +2) for (int i = 3; i <= Math.sqrt(n); i = i + 2) { // While i divides n, print // i and divide n if (n % i == 0) { product = product * i; while (n % i == 0) n = n / i; } } // This condition is to handle // the case when n is a prime // number greater than 2 if (n > 2) product = product * n; return product; } // Driver Code public static void main(String[] args) { int n = 44; System.out.print(productPrimeFactors(n)); }} // This code is contributed by _omg",
"e": 32881,
"s": 31699,
"text": null
},
{
"code": "# Python program to find product of# unique prime factors of a number import math def productPrimeFactors(n): product = 1 # Handle prime factor 2 explicitly so that # can optimally handle other prime factors. if (n % 2 == 0): product *= 2 while (n % 2 == 0): n = n / 2 # n must be odd at this point. So we can # skip one element (Note i = i + 2) for i in range (3, int(math.sqrt(n))+1, 2): # While i divides n, print i and # divide n if (n % i == 0): product = product * i while (n % i == 0): n = n / i # This condition is to handle the case when n # is a prime number greater than 2 if (n > 2): product = product * n return product # main()n = 44print (int(productPrimeFactors(n))) # Contributed by _omg",
"e": 33761,
"s": 32881,
"text": null
},
{
"code": "// C# program to find product// of unique prime factors// of a number.using System; public class GFG { // Function to find product // of prime factors public static long productPrimeFactors(int n) { long product = 1; // Handle prime factor 2 explicitly // so that can optimally handle // other prime factors. if (n % 2 == 0) { product *= 2; while (n % 2 == 0) n = n / 2; } // n must be odd at this point. // So we can skip one // element (Note i = i + 2) for (int i = 3; i <= Math.Sqrt(n); i = i + 2) { // While i divides n, print // i and divide n if (n % i == 0) { product = product * i; while (n % i == 0) n = n / i; } } // This condition is to handle // the case when n is a prime // number greater than 2 if (n > 2) product = product * n; return product; } // Driver Code public static void Main(String[] args) { int n = 44; Console.Write(productPrimeFactors(n)); }} // This code is contributed by parashar...",
"e": 34989,
"s": 33761,
"text": null
},
{
"code": "<?php// PHP program to find product of// unique prime factors of a number // A function to print all prime// factors of a given number nfunction productPrimeFactors( $n){ $product = 1; // Handle prime factor 2 // explicitly so that can // optimally handle other // prime factors. if ($n % 2 == 0) { $product *= 2; while ($n % 2 == 0) $n = $n / 2; } // n must be odd at this point. // So we can skip one element // (Note i = i + 2) for ($i = 3; $i <= sqrt($n); $i = $i + 2) { // While i divides n, print // i and divide n if ($n % $i == 0) { $product = $product * $i; while ($n % $i == 0) $n = $n / $i; } } // This condition is to handle the // case when n is a prime number // greater than 2 if ($n > 2) $product = $product * $n; return $product;} // Driver Code$n = 44;echo productPrimeFactors($n); // This code is contributed by ajit?>",
"e": 35993,
"s": 34989,
"text": null
},
{
"code": "<script> // Javascript program to find product of// unique prime factors of a number. function productPrimeFactors(n) { var product = 1; // Handle prime factor 2 // explicitly so that can // optimally handle other // prime factors. if (n % 2 == 0) { product *= 2; while (n % 2 == 0) n = n / 2; } // n must be odd at this point. // So we can skip one element // (Note i = i +2) for (i = 3; i <= Math.sqrt(n); i = i + 2) { // While i divides n, print // i and divide n if (n % i == 0) { product = product * i; while (n % i == 0) n = n / i; } } // This condition is to handle // the case when n is a prime // number greater than 2 if (n > 2) product = product * n; return product; } // Driver Code var n = 44; document.write(productPrimeFactors(n)); // This code contributed by aashish1995 </script>",
"e": 37093,
"s": 35993,
"text": null
},
{
"code": null,
"e": 37096,
"s": 37093,
"text": "22"
},
{
"code": null,
"e": 37107,
"s": 37098,
"text": "parashar"
},
{
"code": null,
"e": 37120,
"s": 37107,
"text": "nitin mittal"
},
{
"code": null,
"e": 37126,
"s": 37120,
"text": "jit_t"
},
{
"code": null,
"e": 37135,
"s": 37126,
"text": "_sachin_"
},
{
"code": null,
"e": 37152,
"s": 37135,
"text": "avijitmondal1998"
},
{
"code": null,
"e": 37164,
"s": 37152,
"text": "aashish1995"
},
{
"code": null,
"e": 37177,
"s": 37164,
"text": "Prime Number"
},
{
"code": null,
"e": 37190,
"s": 37177,
"text": "prime-factor"
},
{
"code": null,
"e": 37209,
"s": 37190,
"text": "school-programming"
},
{
"code": null,
"e": 37222,
"s": 37209,
"text": "Mathematical"
},
{
"code": null,
"e": 37235,
"s": 37222,
"text": "Mathematical"
},
{
"code": null,
"e": 37248,
"s": 37235,
"text": "Prime Number"
},
{
"code": null,
"e": 37346,
"s": 37248,
"text": "Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here."
},
{
"code": null,
"e": 37355,
"s": 37346,
"text": "Comments"
},
{
"code": null,
"e": 37368,
"s": 37355,
"text": "Old Comments"
},
{
"code": null,
"e": 37392,
"s": 37368,
"text": "Merge two sorted arrays"
},
{
"code": null,
"e": 37435,
"s": 37392,
"text": "Modulo Operator (%) in C/C++ with Examples"
},
{
"code": null,
"e": 37484,
"s": 37435,
"text": "Program to find sum of elements in a given array"
},
{
"code": null,
"e": 37505,
"s": 37484,
"text": "Operators in C / C++"
},
{
"code": null,
"e": 37539,
"s": 37505,
"text": "Program for factorial of a number"
},
{
"code": null,
"e": 37571,
"s": 37539,
"text": "Algorithm to solve Rubik's Cube"
},
{
"code": null,
"e": 37644,
"s": 37571,
"text": "Print all possible combinations of r elements in a given array of size n"
},
{
"code": null,
"e": 37687,
"s": 37644,
"text": "The Knight's tour problem | Backtracking-1"
},
{
"code": null,
"e": 37724,
"s": 37687,
"text": "Minimum number of jumps to reach end"
}
] |
LISP - Quick Guide | John McCarthy invented LISP in 1958, shortly after the development of FORTRAN. It was first implemented by Steve Russell on an IBM 704 computer.
It is particularly suitable for Artificial Intelligence programs, as it processes symbolic information effectively.
Common Lisp originated, during the 1980s and 1990s, in an attempt to unify the work of several implementation groups that were successors to Maclisp, like ZetaLisp and NIL (New Implementation of Lisp) etc.
It serves as a common language, which can be easily extended for specific implementation.
Programs written in Common LISP do not depend on machine-specific characteristics, such as word length etc.
It is machine-independent
It is machine-independent
It uses iterative design methodology, and easy extensibility.
It uses iterative design methodology, and easy extensibility.
It allows updating the programs dynamically.
It allows updating the programs dynamically.
It provides high level debugging.
It provides high level debugging.
It provides advanced object-oriented programming.
It provides advanced object-oriented programming.
It provides a convenient macro system.
It provides a convenient macro system.
It provides wide-ranging data types like, objects, structures, lists, vectors, adjustable arrays, hash-tables, and symbols.
It provides wide-ranging data types like, objects, structures, lists, vectors, adjustable arrays, hash-tables, and symbols.
It is expression-based.
It is expression-based.
It provides an object-oriented condition system.
It provides an object-oriented condition system.
It provides a complete I/O library.
It provides a complete I/O library.
It provides extensive control structures.
It provides extensive control structures.
Large successful applications built in Lisp.
Emacs
Emacs
G2
G2
AutoCad
AutoCad
Igor Engraver
Igor Engraver
Yahoo Store
Yahoo Store
If you are still willing to set up your environment for Lisp programming language, you need the following two softwares available on your computer, (a) Text Editor and (b) The Lisp Executer.
This will be used to type your program. Examples of few editors include Windows Notepad, OS Edit command, Brief, Epsilon, EMACS, and vim or vi.
Name and version of text editor can vary on different operating systems. For example, Notepad will be used on Windows, and vim or vi can be used on windows as well as Linux or UNIX.
The files you create with your editor are called source files and contain program source code. The source files for Lisp programs are typically named with the extension ".lisp".
Before starting your programming, make sure you have one text editor in place and you have enough experience to write a computer program, save it in a file, finally execute it.
The source code written in source file is the human readable source for your program. It needs to be "executed", to turn into machine language so that your CPU can actually execute the program as per instructions given.
This Lisp programming language will be used to execute your source code into final executable program. I assume you have basic knowledge about a programming language.
CLISP is the GNU Common LISP multi-architechtural compiler used for setting up LISP in Windows. The windows version emulates a unix environment using MingW under windows. The installer takes care of this and automatically adds clisp to the windows PATH variable.
You can get the latest CLISP for Windows from here - https://sourceforge.net/projects/clisp/files/latest/download
It creates a shortcut in the Start Menu by default, for the line-by-line interpreter.
During installation, clisp is automatically added to your PATH variable if you select the option (RECOMMENDED) This means that you can simply open a new Command Prompt window and type “clisp” to bring up the compiler.
To run a *.lisp or *.lsp file, simply use −
clisp hello.lisp
LISP expressions are called symbolic expressions or s-expressions. The s-expressions are composed of three valid objects, atoms, lists and strings.
Any s-expression is a valid program.
LISP programs run either on an interpreter or as compiled code.
The interpreter checks the source code in a repeated loop, which is also called the read-evaluate-print loop (REPL). It reads the program code, evaluates it, and prints the values returned by the program.
Let us write an s-expression to find the sum of three numbers 7, 9 and 11. To do this, we can type at the interpreter prompt.
(+ 7 9 11)
LISP returns the result −
27
If you would like to run the same program as a compiled code, then create a LISP source code file named myprog.lisp and type the following code in it.
(write (+ 7 9 11))
When you click the Execute button, or type Ctrl+E, LISP executes it immediately and the result returned is −
27
You might have noted that LISP uses prefix notation.
In the above program the + symbol works as the function name for the process of summation of the numbers.
In prefix notation, operators are written before their operands. For example, the expression,
a * ( b + c ) / d
will be written as −
(/ (* a (+ b c) ) d)
Let us take another example, let us write code for converting Fahrenheit temp of 60o F to the centigrade scale −
The mathematical expression for this conversion will be −
(60 * 9 / 5) + 32
Create a source code file named main.lisp and type the following code in it.
(write(+ (* (/ 9 5) 60) 32))
When you click the Execute button, or type Ctrl+E, LISP executes it immediately and the result returned is−
140
Evaluation of LISP programs has two parts −
Translation of program text into Lisp objects by a reader program
Translation of program text into Lisp objects by a reader program
Implementation of the semantics of the language in terms of these objects by an evaluator program
Implementation of the semantics of the language in terms of these objects by an evaluator program
The evaluation process takes the following steps −
The reader translates the strings of characters to LISP objects or s-expressions.
The reader translates the strings of characters to LISP objects or s-expressions.
The evaluator defines syntax of Lisp forms that are built from s-expressions. This second level of evaluation defines a syntax that determines which s-expressions are LISP forms.
The evaluator defines syntax of Lisp forms that are built from s-expressions. This second level of evaluation defines a syntax that determines which s-expressions are LISP forms.
The evaluator works as a function that takes a valid LISP form as an argument and returns a value. This is the reason why we put the LISP expression in parenthesis, because we are sending the entire expression/form to the evaluator as arguments.
The evaluator works as a function that takes a valid LISP form as an argument and returns a value. This is the reason why we put the LISP expression in parenthesis, because we are sending the entire expression/form to the evaluator as arguments.
Learning a new programming language doesn't really take off until you learn how to greet the entire world in that language, right!
So, please create new source code file named main.lisp and type the following code in it.
(write-line "Hello World")
(write-line "I am at 'Tutorials Point'! Learning LISP")
When you click the Execute button, or type Ctrl+E, LISP executes it immediately and the result returned is −
Hello World
I am at 'Tutorials Point'! Learning LISP
LISP programs are made up of three basic building blocks −
atom
list
string
An atom is a number or string of contiguous characters. It includes numbers and special characters.
Following are examples of some valid atoms −
hello-from-tutorials-point
name
123008907
*hello*
Block#221
abc123
A list is a sequence of atoms and/or other lists enclosed in parentheses.
Following are examples of some valid lists −
( i am a list)
(a ( a b c) d e fgh)
(father tom ( susan bill joe))
(sun mon tue wed thur fri sat)
( )
A string is a group of characters enclosed in double quotation marks.
Following are examples of some valid strings −
" I am a string"
"a ba c d efg #$%^&!"
"Please enter the following details :"
"Hello from 'Tutorials Point'! "
The semicolon symbol (;) is used for indicating a comment line.
For Example,
(write-line "Hello World") ; greet the world
; tell them your whereabouts
(write-line "I am at 'Tutorials Point'! Learning LISP")
When you click the Execute button, or type Ctrl+E, LISP executes it immediately and the result returned is −
Hello World
I am at 'Tutorials Point'! Learning LISP
Following are some of the important points to note −
The basic numeric operations in LISP are +, -, *, and /
The basic numeric operations in LISP are +, -, *, and /
LISP represents a function call f(x) as (f x), for example cos(45) is written as cos 45
LISP represents a function call f(x) as (f x), for example cos(45) is written as cos 45
LISP expressions are case-insensitive, cos 45 or COS 45 are same.
LISP expressions are case-insensitive, cos 45 or COS 45 are same.
LISP tries to evaluate everything, including the arguments of a function. Only three types of elements are constants and always return their own value
Numbers
The letter t, that stands for logical true.
The value nil, that stands for logical false, as well as an empty list.
LISP tries to evaluate everything, including the arguments of a function. Only three types of elements are constants and always return their own value
Numbers
Numbers
The letter t, that stands for logical true.
The letter t, that stands for logical true.
The value nil, that stands for logical false, as well as an empty list.
The value nil, that stands for logical false, as well as an empty list.
In the previous chapter, we mentioned that the evaluation process of LISP code takes the following steps.
The reader translates the strings of characters to LISP objects or s-expressions.
The reader translates the strings of characters to LISP objects or s-expressions.
The evaluator defines syntax of Lisp forms that are built from s-expressions. This second level of evaluation defines a syntax that determines which s-expressions are LISP forms.
The evaluator defines syntax of Lisp forms that are built from s-expressions. This second level of evaluation defines a syntax that determines which s-expressions are LISP forms.
Now, a LISP forms could be.
An Atom
An empty or non-list
Any list that has a symbol as its first element
The evaluator works as a function that takes a valid LISP form as an argument and returns a value. This is the reason why we put the LISP expression in parenthesis, because we are sending the entire expression/form to the evaluator as arguments.
Name or symbols can consist of any number of alphanumeric characters other than whitespace, open and closing parentheses, double and single quotes, backslash, comma, colon, semicolon and vertical bar. To use these characters in a name, you need to use escape character (\).
A name can have digits but not entirely made of digits, because then it would be read as a number. Similarly a name can have periods, but can't be made entirely of periods.
LISP evaluates everything including the function arguments and list members.
At times, we need to take atoms or lists literally and don't want them evaluated or treated as function calls.
To do this, we need to precede the atom or the list with a single quotation mark.
The following example demonstrates this.
Create a file named main.lisp and type the following code into it.
(write-line "single quote used, it inhibits evaluation")
(write '(* 2 3))
(write-line " ")
(write-line "single quote not used, so expression evaluated")
(write (* 2 3))
When you click the Execute button, or type Ctrl+E, LISP executes it immediately and the result returned is −
single quote used, it inhibits evaluation
(* 2 3)
single quote not used, so expression evaluated
6
In LISP, variables are not typed, but data objects are.
LISP data types can be categorized as.
Scalar types − for example, number types, characters, symbols etc.
Scalar types − for example, number types, characters, symbols etc.
Data structures − for example, lists, vectors, bit-vectors, and strings.
Data structures − for example, lists, vectors, bit-vectors, and strings.
Any variable can take any LISP object as its value, unless you have declared it explicitly.
Although, it is not necessary to specify a data type for a LISP variable, however, it helps in certain loop expansions, in method declarations and some other situations that we will discuss in later chapters.
The data types are arranged into a hierarchy. A data type is a set of LISP objects and many objects may belong to one such set.
The typep predicate is used for finding whether an object belongs to a specific type.
The type-of function returns the data type of a given object.
Type specifiers are system-defined symbols for data types.
Apart from these system-defined types, you can create your own data types. When a structure type is defined using defstruct function, the name of the structure type becomes a valid type symbol.
Create new source code file named main.lisp and type the following code in it.
(setq x 10)
(setq y 34.567)
(setq ch nil)
(setq n 123.78)
(setq bg 11.0e+4)
(setq r 124/2)
(print x)
(print y)
(print n)
(print ch)
(print bg)
(print r)
When you click the Execute button, or type Ctrl+E, LISP executes it immediately and the result returned is −
10
34.567
123.78
NIL
110000.0
62
Next let's check the types of the variables used in the previous example. Create new source code file named main. lisp and type the following code in it.
(defvar x 10)
(defvar y 34.567)
(defvar ch nil)
(defvar n 123.78)
(defvar bg 11.0e+4)
(defvar r 124/2)
(print (type-of x))
(print (type-of y))
(print (type-of n))
(print (type-of ch))
(print (type-of bg))
(print (type-of r))
When you click the Execute button, or type Ctrl+E, LISP executes it immediately and the result returned is −
(INTEGER 0 281474976710655)
SINGLE-FLOAT
SINGLE-FLOAT
NULL
SINGLE-FLOAT
(INTEGER 0 281474976710655)
Macros allow you to extend the syntax of standard LISP.
Technically, a macro is a function that takes an s-expression as arguments and returns a LISP form, which is then evaluated.
In LISP, a named macro is defined using another macro named defmacro. Syntax for defining a macro is −
(defmacro macro-name (parameter-list))
"Optional documentation string."
body-form
The macro definition consists of the name of the macro, a parameter list, an optional documentation string, and a body of Lisp expressions that defines the job to be performed by the macro.
Let us write a simple macro named setTo10, which will take a number and set its value to 10.
Create new source code file named main.lisp and type the following code in it.
(defmacro setTo10(num)
(setq num 10)(print num))
(setq x 25)
(print x)
(setTo10 x)
When you click the Execute button, or type Ctrl+E, LISP executes it immediately and the result returned is −
25
10
In LISP, each variable is represented by a symbol. The variable's name is the name of the symbol and it is stored in the storage cell of the symbol.
Global variables have permanent values throughout the LISP system and remain in effect until a new value is specified.
Global variables are generally declared using the defvar construct.
(defvar x 234)
(write x)
When you click the Execute button, or type Ctrl+E, LISP executes it immediately and the result returned is
234
Since there is no type declaration for variables in LISP, you directly specify a value for a symbol with the setq construct.
->(setq x 10)
The above expression assigns the value 10 to the variable x. You can refer to the variable using the symbol itself as an expression.
The symbol-value function allows you to extract the value stored at the symbol storage place.
Create new source code file named main.lisp and type the following code in it.
(setq x 10)
(setq y 20)
(format t "x = ~2d y = ~2d ~%" x y)
(setq x 100)
(setq y 200)
(format t "x = ~2d y = ~2d" x y)
When you click the Execute button, or type Ctrl+E, LISP executes it immediately and the result returned is.
x = 10 y = 20
x = 100 y = 200
Local variables are defined within a given procedure. The parameters named as arguments within a function definition are also local variables. Local variables are accessible only within the respective function.
Like the global variables, local variables can also be created using the setq construct.
There are two other constructs - let and prog for creating local variables.
The let construct has the following syntax.
(let ((var1 val1) (var2 val2).. (varn valn))<s-expressions>)
Where var1, var2, ..varn are variable names and val1, val2, .. valn are the initial values assigned to the respective variables.
When let is executed, each variable is assigned the respective value and lastly the s-expression is evaluated. The value of the last expression evaluated is returned.
If you don't include an initial value for a variable, it is assigned to nil.
Create new source code file named main.lisp and type the following code in it.
(let ((x 'a) (y 'b)(z 'c))
(format t "x = ~a y = ~a z = ~a" x y z))
When you click the Execute button, or type Ctrl+E, LISP executes it immediately and the result returned is.
x = A y = B z = C
The prog construct also has the list of local variables as its first argument, which is followed by the body of the prog, and any number of s-expressions.
The prog function executes the list of s-expressions in sequence and returns nil unless it encounters a function call named return. Then the argument of the return function is evaluated and returned.
Create new source code file named main.lisp and type the following code in it.
(prog ((x '(a b c))(y '(1 2 3))(z '(p q 10)))
(format t "x = ~a y = ~a z = ~a" x y z))
When you click the Execute button, or type Ctrl+E, LISP executes it immediately and the result returned is.
x = (A B C) y = (1 2 3) z = (P Q 10)
In LISP, constants are variables that never change their values during program execution. Constants are declared using the defconstant construct.
The following example shows declaring a global constant PI and later using this value inside a function named area-circle that calculates the area of a circle.
The defun construct is used for defining a function, we will look into it in the Functions chapter.
Create a new source code file named main.lisp and type the following code in it.
(defconstant PI 3.141592)
(defun area-circle(rad)
(terpri)
(format t "Radius: ~5f" rad)
(format t "~%Area: ~10f" (* PI rad rad)))
(area-circle 10)
When you click the Execute button, or type Ctrl+E, LISP executes it immediately and the result returned is.
Radius: 10.0
Area: 314.1592
An operator is a symbol that tells the compiler to perform specific mathematical or logical manipulations. LISP allows numerous operations on data, supported by various functions, macros and other constructs.
The operations allowed on data could be categorized as −
Arithmetic Operations
Comparison Operations
Logical Operations
Bitwise Operations
The following table shows all the arithmetic operators supported by LISP. Assume variable A holds 10 and variable B holds 20 then −
Show Examples
Following table shows all the relational operators supported by LISP that compares between numbers. However unlike relational operators in other languages, LISP comparison operators may take more than two operands and they work on numbers only.
Assume variable A holds 10 and variable B holds 20, then −
Show Examples
Common LISP provides three logical operators: and, or, and not that operates on Boolean values. Assume A has value nil and B has value 5, then −
Show Examples
Bitwise operators work on bits and perform bit-by-bit operation. The truth tables for bitwise and, or, and xor operations are as follows −
Show Examples
Assume if A = 60; and B = 13; now in binary format they will be as follows:
A = 0011 1100
B = 0000 1101
-----------------
A and B = 0000 1100
A or B = 0011 1101
A xor B = 0011 0001
not A = 1100 0011
The Bitwise operators supported by LISP are listed in the following table. Assume variable A holds 60 and variable B holds 13, then −
Decision making structures require that the programmer specify one or more conditions to be evaluated or tested by the program, along with a statement or statements to be executed if the condition is determined to be true, and optionally, other statements to be executed if the condition is determined to be false.
Following is the general form of a typical decision making structure found in most of the programming languages −
LISP provides following types of decision making constructs. Click the following links to check their detail.
This construct is used for used for checking multiple test-action clauses. It can be compared to the nested if statements in other programming languages.
The if construct has various forms. In simplest form it is followed by a test clause, a test action and some other consequent action(s). If the test clause evaluates to true, then the test action is executed otherwise, the consequent clause is evaluated.
In simplest form it is followed by a test clause, and a test action. If the test clause evaluates to true, then the test action is executed otherwise, the consequent clause is evaluated.
This construct implements multiple test-action clauses like the cond construct. However, it evaluates a key form and allows multiple action clauses based on the evaluation of that key form.
There may be a situation, when you need to execute a block of code numbers of times. A loop statement allows us to execute a statement or group of statements multiple times and following is the general form of a loop statement in most of the programming languages.
LISP provides the following types of constructs to handle looping requirements. Click the following links to check their detail.
The loop construct is the simplest form of iteration provided by LISP. In its simplest form, it allows you to execute some statement(s) repeatedly until it finds a return statement.
The loop for construct allows you to implement a for-loop like iteration as most common in other languages.
The do construct is also used for performing iteration using LISP. It provides a structured form of iteration.
The dotimes construct allows looping for some fixed number of iterations.
The dolist construct allows iteration through each element of a list.
The block and return-from allows you to exit gracefully from any nested blocks in case of any error.
The block function allows you to create a named block with a body composed of zero or more statements. Syntax is −
(block block-name(
...
...
))
The return-from function takes a block name and an optional (the default is nil) return value.
The following example demonstrates this −
Create a new source code file named main.lisp and type the following code in it −
(defun demo-function (flag)
(print 'entering-outer-block)
(block outer-block
(print 'entering-inner-block)
(print (block inner-block
(if flag
(return-from outer-block 3)
(return-from inner-block 5)
)
(print 'This-wil--not-be-printed))
)
(print 'left-inner-block)
(print 'leaving-outer-block)
t)
)
(demo-function t)
(terpri)
(demo-function nil)
When you click the Execute button, or type Ctrl+E, LISP executes it immediately and the result returned is −
ENTERING-OUTER-BLOCK
ENTERING-INNER-BLOCK
ENTERING-OUTER-BLOCK
ENTERING-INNER-BLOCK
5
LEFT-INNER-BLOCK
LEAVING-OUTER-BLOCK
A function is a group of statements that together perform a task.
You can divide up your code into separate functions. How you divide up your code among different functions is up to you, but logically the division usually is so each function performs a specific task.
The macro named defun is used for defining functions. The defun macro needs three arguments −
Name of the function
Parameters of the function
Body of the function
Syntax for defun is −
(defun name (parameter-list) "Optional documentation string." body)
Let us illustrate the concept with simple examples.
Let's write a function named averagenum that will print the average of four numbers. We will send these numbers as parameters.
Create a new source code file named main.lisp and type the following code in it.
(defun averagenum (n1 n2 n3 n4)
(/ ( + n1 n2 n3 n4) 4)
)
(write(averagenum 10 20 30 40))
When you execute the code, it returns the following result −
25
Let's define and call a function that would calculate the area of a circle when the radius of the circle is given as an argument.
Create a new source code file named main.lisp and type the following code in it.
(defun area-circle(rad)
"Calculates area of a circle with given radius"
(terpri)
(format t "Radius: ~5f" rad)
(format t "~%Area: ~10f" (* 3.141592 rad rad))
)
(area-circle 10)
When you execute the code, it returns the following result −
Radius: 10.0
Area: 314.1592
Please note that −
You can provide an empty list as parameters, which means the function takes no arguments, the list is empty, written as ().
You can provide an empty list as parameters, which means the function takes no arguments, the list is empty, written as ().
LISP also allows optional, multiple, and keyword arguments.
LISP also allows optional, multiple, and keyword arguments.
The documentation string describes the purpose of the function. It is associated with the name of the function and can be obtained using the documentation function.
The documentation string describes the purpose of the function. It is associated with the name of the function and can be obtained using the documentation function.
The body of the function may consist of any number of Lisp expressions.
The body of the function may consist of any number of Lisp expressions.
The value of the last expression in the body is returned as the value of the function.
The value of the last expression in the body is returned as the value of the function.
You can also return a value from the function using the return-from special operator.
You can also return a value from the function using the return-from special operator.
Let us discuss the above concepts in brief. Click following links to find details −
Optional Parameters
Optional Parameters
Rest Parameters
Rest Parameters
Keyword Parameters
Keyword Parameters
Returning Values from a Function
Returning Values from a Function
Lambda Functions
Lambda Functions
Mapping Functions
Mapping Functions
Predicates are functions that test their arguments for some specific conditions and returns nil if the condition is false, or some non-nil value is the condition is true.
The following table shows some of the most commonly used predicates −
atom
It takes one argument and returns t if the argument is an atom or nil if otherwise.
equal
It takes two arguments and returns t if they are structurally equal or nil otherwise.
eq
It takes two arguments and returns t if they are same identical objects, sharing the same memory location or nil otherwise.
eql
It takes two arguments and returns t if the arguments are eq, or if they are numbers of the same type with the same value, or if they are character objects that represent the same character, or nil otherwise.
evenp
It takes one numeric argument and returns t if the argument is even number or nil if otherwise.
oddp
It takes one numeric argument and returns t if the argument is odd number or nil if otherwise.
zerop
It takes one numeric argument and returns t if the argument is zero or nil if otherwise.
null
It takes one argument and returns t if the argument evaluates to nil, otherwise it returns nil.
listp
It takes one argument and returns t if the argument evaluates to a list otherwise it returns nil.
greaterp
It takes one or more argument and returns t if either there is a single argument or the arguments are successively larger from left to right, or nil if otherwise.
lessp
It takes one or more argument and returns t if either there is a single argument or the arguments are successively smaller from left to right, or nil if otherwise.
numberp
It takes one argument and returns t if the argument is a number or nil if otherwise.
symbolp
It takes one argument and returns t if the argument is a symbol otherwise it returns nil.
integerp
It takes one argument and returns t if the argument is an integer otherwise it returns nil.
rationalp
It takes one argument and returns t if the argument is rational number, either a ratio or a number, otherwise it returns nil.
floatp
It takes one argument and returns t if the argument is a floating point number otherwise it returns nil.
realp
It takes one argument and returns t if the argument is a real number otherwise it returns nil.
complexp
It takes one argument and returns t if the argument is a complex number otherwise it returns nil.
characterp
It takes one argument and returns t if the argument is a character otherwise it returns nil.
stringp
It takes one argument and returns t if the argument is a string object otherwise it returns nil.
arrayp
It takes one argument and returns t if the argument is an array object otherwise it returns nil.
packagep
It takes one argument and returns t if the argument is a package otherwise it returns nil.
Create a new source code file named main.lisp and type the following code in it.
(write (atom 'abcd))
(terpri)
(write (equal 'a 'b))
(terpri)
(write (evenp 10))
(terpri)
(write (evenp 7 ))
(terpri)
(write (oddp 7 ))
(terpri)
(write (zerop 0.0000000001))
(terpri)
(write (eq 3 3.0 ))
(terpri)
(write (equal 3 3.0 ))
(terpri)
(write (null nil ))
When you execute the code, it returns the following result −
T
NIL
T
NIL
T
NIL
NIL
NIL
T
Create a new source code file named main.lisp and type the following code in it.
(defun factorial (num)
(cond ((zerop num) 1)
(t ( * num (factorial (- num 1))))
)
)
(setq n 6)
(format t "~% Factorial ~d is: ~d" n (factorial n))
When you execute the code, it returns the following result −
Factorial 6 is: 720
Common Lisp defines several kinds of numbers. The number data type includes various kinds of numbers supported by LISP.
The number types supported by LISP are −
Integers
Ratios
Floating-point numbers
Complex numbers
The following diagram shows the number hierarchy and various numeric data types available in LISP −
The following table describes various number type data available in LISP −
fixnum
This data type represents integers which are not too large and mostly in the range -215 to 215-1 (it is machine-dependent)
bignum
These are very large numbers with size limited by the amount of memory allocated for LISP, they are not fixnum numbers.
ratio
Represents the ratio of two numbers in the numerator/denominator form. The / function always produce the result in ratios, when its arguments are integers.
float
It represents non-integer numbers. There are four float data types with increasing precision.
complex
It represents complex numbers, which are denoted by #c. The real and imaginary parts could be both either rational or floating point numbers.
Create a new source code file named main.lisp and type the following code in it.
(write (/ 1 2))
(terpri)
(write ( + (/ 1 2) (/ 3 4)))
(terpri)
(write ( + #c( 1 2) #c( 3 -4)))
When you execute the code, it returns the following result −
1/2
5/4
#C(4 -2)
The following table describes some commonly used numeric functions −
+, -, *, /
Respective arithmetic operations
sin, cos, tan, acos, asin, atan
Respective trigonometric functions.
sinh, cosh, tanh, acosh, asinh, atanh
Respective hyperbolic functions.
exp
Exponentiation function. Calculates ex
expt
Exponentiation function, takes base and power both.
sqrt
It calculates the square root of a number.
log
Logarithmic function. It one parameter is given, then it calculates its natural logarithm, otherwise the second parameter is used as base.
conjugate
It calculates the complex conjugate of a number. In case of a real number, it returns the number itself.
abs
It returns the absolute value (or magnitude) of a number.
gcd
It calculates the greatest common divisor of the given numbers.
lcm
It calculates the least common multiple of the given numbers.
isqrt
It gives the greatest integer less than or equal to the exact square root of a given natural number.
floor, ceiling, truncate, round
All these functions take two arguments as a number and returns the quotient; floor returns the largest integer that is not greater than ratio, ceiling chooses the smaller integer that is larger than ratio, truncate chooses the integer of the same sign as ratio with the largest absolute value that is less than absolute value of ratio, and round chooses an integer that is closest to ratio.
ffloor, fceiling, ftruncate, fround
Does the same as above, but returns the quotient as a floating point number.
mod, rem
Returns the remainder in a division operation.
float
Converts a real number to a floating point number.
rational, rationalize
Converts a real number to rational number.
numerator, denominator
Returns the respective parts of a rational number.
realpart, imagpart
Returns the real and imaginary part of a complex number.
Create a new source code file named main.lisp and type the following code in it.
(write (/ 45 78))
(terpri)
(write (floor 45 78))
(terpri)
(write (/ 3456 75))
(terpri)
(write (floor 3456 75))
(terpri)
(write (ceiling 3456 75))
(terpri)
(write (truncate 3456 75))
(terpri)
(write (round 3456 75))
(terpri)
(write (ffloor 3456 75))
(terpri)
(write (fceiling 3456 75))
(terpri)
(write (ftruncate 3456 75))
(terpri)
(write (fround 3456 75))
(terpri)
(write (mod 3456 75))
(terpri)
(setq c (complex 6 7))
(write c)
(terpri)
(write (complex 5 -9))
(terpri)
(write (realpart c))
(terpri)
(write (imagpart c))
When you execute the code, it returns the following result −
15/26
0
1152/25
46
47
46
46
46.0
47.0
46.0
46.0
6
#C(6 7)
#C(5 -9)
6
7
In LISP, characters are represented as data objects of type character.
You can denote a character object preceding #\ before the character itself. For example, #\a means the character a.
Space and other special characters can be denoted by preceding #\ before the name of the character. For example, #\SPACE represents the space character.
The following example demonstrates this −
Create a new source code file named main.lisp and type the following code in it.
(write 'a)
(terpri)
(write #\a)
(terpri)
(write-char #\a)
(terpri)
(write-char 'a)
When you execute the code, it returns the following result −
A
#\a
a
*** - WRITE-CHAR: argument A is not a character
Common LISP allows using the following special characters in your code. They are called the semi-standard characters.
#\Backspace
#\Tab
#\Linefeed
#\Page
#\Return
#\Rubout
Numeric comparison functions and operators, like, < and > do not work on characters. Common LISP provides other two sets of functions for comparing characters in your code.
One set is case-sensitive and the other case-insensitive.
The following table provides the functions −
Create a new source code file named main.lisp and type the following code in it.
; case-sensitive comparison
(write (char= #\a #\b))
(terpri)
(write (char= #\a #\a))
(terpri)
(write (char= #\a #\A))
(terpri)
;case-insensitive comparision
(write (char-equal #\a #\A))
(terpri)
(write (char-equal #\a #\b))
(terpri)
(write (char-lessp #\a #\b #\c))
(terpri)
(write (char-greaterp #\a #\b #\c))
When you execute the code, it returns the following result −
NIL
T
NIL
T
NIL
T
NIL
LISP allows you to define single or multiple-dimension arrays using the make-array function. An array can store any LISP object as its elements.
All arrays consist of contiguous memory locations. The lowest address corresponds to the first element and the highest address to the last element.
The number of dimensions of an array is called its rank.
In LISP, an array element is specified by a sequence of non-negative integer indices. The length of the sequence must equal the rank of the array. Indexing starts from zero.
For example, to create an array with 10- cells, named my-array, we can write −
(setf my-array (make-array '(10)))
The aref function allows accessing the contents of the cells. It takes two arguments, the name of the array and the index value.
For example, to access the content of the tenth cell, we write −
(aref my-array 9)
Create a new source code file named main.lisp and type the following code in it.
(write (setf my-array (make-array '(10))))
(terpri)
(setf (aref my-array 0) 25)
(setf (aref my-array 1) 23)
(setf (aref my-array 2) 45)
(setf (aref my-array 3) 10)
(setf (aref my-array 4) 20)
(setf (aref my-array 5) 17)
(setf (aref my-array 6) 25)
(setf (aref my-array 7) 19)
(setf (aref my-array 8) 67)
(setf (aref my-array 9) 30)
(write my-array)
When you execute the code, it returns the following result −
#(NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL)
#(25 23 45 10 20 17 25 19 67 30)
Let us create a 3-by-3 array.
Create a new source code file named main.lisp and type the following code in it.
(setf x (make-array '(3 3)
:initial-contents '((0 1 2 ) (3 4 5) (6 7 8)))
)
(write x)
When you execute the code, it returns the following result −
#2A((0 1 2) (3 4 5) (6 7 8))
Create a new source code file named main.lisp and type the following code in it.
(setq a (make-array '(4 3)))
(dotimes (i 4)
(dotimes (j 3)
(setf (aref a i j) (list i 'x j '= (* i j)))
)
)
(dotimes (i 4)
(dotimes (j 3)
(print (aref a i j))
)
)
When you execute the code, it returns the following result −
(0 X 0 = 0)
(0 X 1 = 0)
(0 X 2 = 0)
(1 X 0 = 0)
(1 X 1 = 1)
(1 X 2 = 2)
(2 X 0 = 0)
(2 X 1 = 2)
(2 X 2 = 4)
(3 X 0 = 0)
(3 X 1 = 3)
(3 X 2 = 6)
The make-array function takes many other arguments. Let us look at the complete syntax of this function −
make-array dimensions :element-type :initial-element :initial-contents :adjustable :fill-pointer :displaced-to :displaced-index-offset
Apart from the dimensions argument, all other arguments are keywords. The following table provides brief description of the arguments.
dimensions
It gives the dimensions of the array. It is a number for one-dimensional array, and a list for multi-dimensional array.
:element-type
It is the type specifier, default value is T, i.e. any type
:initial-element
Initial elements value. It will make an array with all the elements initialized to a particular value.
:initial-content
Initial content as object.
:adjustable
It helps in creating a resizeable (or adjustable) vector whose underlying memory can be resized. The argument is a Boolean value indicating whether the array is adjustable or not, default value being NIL.
:fill-pointer
It keeps track of the number of elements actually stored in a resizeable vector.
:displaced-to
It helps in creating a displaced array or shared array that shares its contents with the specified array. Both the arrays should have same element type. The :displaced-to option may not be used with the :initial-element or :initial-contents option. This argument defaults to nil.
:displaced-index-offset
It gives the index-offset of the created shared array.
Create a new source code file named main.lisp and type the following code in it.
(setq myarray (make-array '(3 2 3)
:initial-contents
'(((a b c) (1 2 3))
((d e f) (4 5 6))
((g h i) (7 8 9))
))
)
(setq array2 (make-array 4 :displaced-to myarray :displaced-index-offset 2))
(write myarray)
(terpri)
(write array2)
When you execute the code, it returns the following result −
#3A(((A B C) (1 2 3)) ((D E F) (4 5 6)) ((G H I) (7 8 9)))
#(C 1 2 3)
If the displaced array is two dimensional −
(setq myarray (make-array '(3 2 3)
:initial-contents
'(((a b c) (1 2 3))
((d e f) (4 5 6))
((g h i) (7 8 9))
))
)
(setq array2 (make-array '(3 2) :displaced-to myarray :displaced-index-offset 2))
(write myarray)
(terpri)
(write array2)
When you execute the code, it returns the following result −
#3A(((A B C) (1 2 3)) ((D E F) (4 5 6)) ((G H I) (7 8 9)))
#2A((C 1) (2 3) (D E))
Let's change the displaced index offset to 5 −
(setq myarray (make-array '(3 2 3)
:initial-contents
'(((a b c) (1 2 3))
((d e f) (4 5 6))
((g h i) (7 8 9))
))
)
(setq array2 (make-array '(3 2) :displaced-to myarray :displaced-index-offset 5))
(write myarray)
(terpri)
(write array2)
When you execute the code, it returns the following result −
#3A(((A B C) (1 2 3)) ((D E F) (4 5 6)) ((G H I) (7 8 9)))
#2A((3 D) (E F) (4 5))
Create a new source code file named main.lisp and type the following code in it.
;a one dimensional array with 5 elements,
;initail value 5
(write (make-array 5 :initial-element 5))
(terpri)
;two dimensional array, with initial element a
(write (make-array '(2 3) :initial-element 'a))
(terpri)
;an array of capacity 14, but fill pointer 5, is 5
(write(length (make-array 14 :fill-pointer 5)))
(terpri)
;however its length is 14
(write (array-dimensions (make-array 14 :fill-pointer 5)))
(terpri)
; a bit array with all initial elements set to 1
(write(make-array 10 :element-type 'bit :initial-element 1))
(terpri)
; a character array with all initial elements set to a
; is a string actually
(write(make-array 10 :element-type 'character :initial-element #\a))
(terpri)
; a two dimensional array with initial values a
(setq myarray (make-array '(2 2) :initial-element 'a :adjustable t))
(write myarray)
(terpri)
;readjusting the array
(adjust-array myarray '(1 3) :initial-element 'b)
(write myarray)
When you execute the code, it returns the following result −
#(5 5 5 5 5)
#2A((A A A) (A A A))
5
(14)
#*1111111111
"aaaaaaaaaa"
#2A((A A) (A A))
#2A((A A B))
Strings in Common Lisp are vectors, i.e., one-dimensional array of characters.
String literals are enclosed in double quotes. Any character supported by the character set can be enclosed within double quotes to make a string, except the double quote character (") and the escape character (\). However, you can include these by escaping them with a backslash (\).
Create a new source code file named main.lisp and type the following code in it.
(write-line "Hello World")
(write-line "Welcome to Tutorials Point")
;escaping the double quote character
(write-line "Welcome to \"Tutorials Point\"")
When you execute the code, it returns the following result −
Hello World
Welcome to Tutorials Point
Welcome to "Tutorials Point"
Numeric comparison functions and operators, like, < and > do not work on strings. Common LISP provides other two sets of functions for comparing strings in your code. One set is case-sensitive and the other case-insensitive.
The following table provides the functions −
Create a new source code file named main.lisp and type the following code in it.
; case-sensitive comparison
(write (string= "this is test" "This is test"))
(terpri)
(write (string> "this is test" "This is test"))
(terpri)
(write (string< "this is test" "This is test"))
(terpri)
;case-insensitive comparision
(write (string-equal "this is test" "This is test"))
(terpri)
(write (string-greaterp "this is test" "This is test"))
(terpri)
(write (string-lessp "this is test" "This is test"))
(terpri)
;checking non-equal
(write (string/= "this is test" "this is Test"))
(terpri)
(write (string-not-equal "this is test" "This is test"))
(terpri)
(write (string/= "lisp" "lisping"))
(terpri)
(write (string/= "decent" "decency"))
When you execute the code, it returns the following result −
NIL
0
NIL
T
NIL
NIL
8
NIL
4
5
The following table describes the case controlling functions −
string-upcase
Converts the string to upper case
string-downcase
Converts the string to lower case
string-capitalize
Capitalizes each word in the string
Create a new source code file named main.lisp and type the following code in it.
(write-line (string-upcase "a big hello from tutorials point"))
(write-line (string-capitalize "a big hello from tutorials point"))
When you execute the code, it returns the following result −
A BIG HELLO FROM TUTORIALS POINT
A Big Hello From Tutorials Point
The following table describes the string trimming functions −
string-trim
It takes a string of character(s) as first argument and a string as the second argument and returns a substring where all characters that are in the first argument are removed off the argument string.
String-left-trim
It takes a string of character(s) as first argument and a string as the second argument and returns a substring where all characters that are in the first argument are removed off the beginning of the argument string.
String-right-trim
It takes a string character(s) as first argument and a string as the second argument and returns a substring where all characters that are in the first argument are removed off the end of the argument string.
Create a new source code file named main.lisp and type the following code in it.
(write-line (string-trim " " " a big hello from tutorials point "))
(write-line (string-left-trim " " " a big hello from tutorials point "))
(write-line (string-right-trim " " " a big hello from tutorials point "))
(write-line (string-trim " a" " a big hello from tutorials point "))
When you execute the code, it returns the following result −
a big hello from tutorials point
a big hello from tutorials point
a big hello from tutorials point
big hello from tutorials point
Strings in LISP are arrays and thus also sequences. We will cover these data types in coming tutorials. All functions that are applicable to arrays and sequences also apply to strings. However, we will demonstrate some commonly used functions using various examples.
The length function calculates the length of a string.
The subseq function returns a sub-string (as a string is also a sequence) starting at a particular index and continuing to a particular ending index or the end of the string.
The char function allows accessing individual characters of a string.
Example
Create a new source code file named main.lisp and type the following code in it.
(write (length "Hello World"))
(terpri)
(write-line (subseq "Hello World" 6))
(write (char "Hello World" 6))
When you execute the code, it returns the following result −
11
World
#\W
The sort function allows sorting a string. It takes a sequence (vector or string) and a two-argument predicate and returns a sorted version of the sequence.
The merge function takes two sequences and a predicate and returns a sequence produced by merging the two sequences, according to the predicate.
Example
Create a new source code file named main.lisp and type the following code in it.
;sorting the strings
(write (sort (vector "Amal" "Akbar" "Anthony") #'string<))
(terpri)
;merging the strings
(write (merge 'vector (vector "Rishi" "Zara" "Priyanka")
(vector "Anju" "Anuj" "Avni") #'string<))
When you execute the code, it returns the following result −
#("Akbar" "Amal" "Anthony")
#("Anju" "Anuj" "Avni" "Rishi" "Zara" "Priyanka")
The reverse function reverses a string.
For example, Create a new source code file named main.lisp and type the following code in it.
(write-line (reverse "Are we not drawn onward, we few, drawn onward to new era"))
When you execute the code, it returns the following result −
are wen ot drawno nward ,wef ew ,drawno nward ton ew erA
The concatenate function concatenates two strings. This is generic sequence function and you must provide the result type as the first argument.
For example, Create a new source code file named main.lisp and type the following code in it.
(write-line (concatenate 'string "Are we not drawn onward, " "we few, drawn onward to new era"))
When you execute the code, it returns the following result −
Are we not drawn onward, we few, drawn onward to new era
Sequence is an abstract data type in LISP. Vectors and lists are the two concrete subtypes of this data type. All the functionalities defined on sequence data type are actually applied on all vectors and list types.
In this section, we will discuss most commonly used functions on sequences.
Before starting on various ways of manipulating sequences (i.e., vectors and lists), let us have a look at the list of all available functions.
The function make-sequence allows you to create a sequence of any type. The syntax for this function is −
make-sequence sqtype sqsize &key :initial-element
It creates a sequence of type sqtype and of length sqsize.
You may optionally specify some value using the :initial-element argument, then each of the elements will be initialized to this value.
For example, Create a new source code file named main.lisp and type the following code in it.
(write (make-sequence '(vector float)
10
:initial-element 1.0))
When you execute the code, it returns the following result −
#(1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0)
elt
It allows access to individual elements through an integer index.
length
It returns the length of a sequence.
subseq
It returns a sub-sequence by extracting the subsequence starting at a particular index and continuing to a particular ending index or the end of the sequence.
copy-seq
It returns a sequence that contains the same elements as its argument.
fill
It is used to set multiple elements of a sequence to a single value.
replace
It takes two sequences and the first argument sequence is destructively modified by copying successive elements into it from the second argument sequence.
count
It takes an item and a sequence and returns the number of times the item appears in the sequence.
reverse
It returns a sequence contains the same elements of the argument but in reverse order.
nreverse
It returns the same sequence containing the same elements as sequence but in reverse order.
concatenate
It creates a new sequence containing the concatenation of any number of sequences.
position
It takes an item and a sequence and returns the index of the item in the sequence or nil.
find
It takes an item and a sequence. It finds the item in the sequence and returns it, if not found then it returns nil.
sort
It takes a sequence and a two-argument predicate and returns a sorted version of the sequence.
merge
It takes two sequences and a predicate and returns a sequence produced by merging the two sequences, according to the predicate.
map
It takes an n-argument function and n sequences and returns a new sequence containing the result of applying the function to subsequent elements of the sequences.
some
It takes a predicate as an argument and iterates over the argument sequence, and returns the first non-NIL value returned by the predicate or returns false if the predicate is never satisfied.
every
It takes a predicate as an argument and iterate over the argument sequence, it terminates, returning false, as soon as the predicate fails. If the predicate is always satisfied, it returns true.
notany
It takes a predicate as an argument and iterate over the argument sequence, and returns false as soon as the predicate is satisfied or true if it never is.
notevery
It takes a predicate as an argument and iterate over the argument sequence, and returns true as soon as the predicate fails or false if the predicate is always satisfied.
reduce
It maps over a single sequence, applying a two-argument function first to the first two elements of the sequence and then to the value returned by the function and subsequent elements of the sequence.
search
It searches a sequence to locate one or more elements satisfying some test.
remove
It takes an item and a sequence and returns the sequence with instances of item removed.
delete
This also takes an item and a sequence and returns a sequence of the same kind as the argument sequence that has the same elements except the item.
substitute
It takes a new item, an existing item, and a sequence and returns a sequence with instances of the existing item replaced with the new item.
nsubstitute
It takes a new item, an existing item, and a sequence and returns the same sequence with instances of the existing item replaced with the new item.
mismatch
It takes two sequences and returns the index of the first pair of mismatched elements.
We have just discussed various functions and keywords that are used as arguments in these functions working on sequences. In the next sections, we will see how to use these functions using examples.
The length function returns the length of a sequence, and the elt function allows you to access individual elements using an integer index.
Create a new source code file named main.lisp and type the following code in it.
(setq x (vector 'a 'b 'c 'd 'e))
(write (length x))
(terpri)
(write (elt x 3))
When you execute the code, it returns the following result −
5
D
Some sequence functions allows iterating through the sequence and perform some operations like, searching, removing, counting or filtering specific elements without writing explicit loops.
The following example demonstrates this −
Create a new source code file named main.lisp and type the following code in it.
(write (count 7 '(1 5 6 7 8 9 2 7 3 4 5)))
(terpri)
(write (remove 5 '(1 5 6 7 8 9 2 7 3 4 5)))
(terpri)
(write (delete 5 '(1 5 6 7 8 9 2 7 3 4 5)))
(terpri)
(write (substitute 10 7 '(1 5 6 7 8 9 2 7 3 4 5)))
(terpri)
(write (find 7 '(1 5 6 7 8 9 2 7 3 4 5)))
(terpri)
(write (position 5 '(1 5 6 7 8 9 2 7 3 4 5)))
When you execute the code, it returns the following result −
2
(1 6 7 8 9 2 7 3 4)
(1 6 7 8 9 2 7 3 4)
(1 5 6 10 8 9 2 10 3 4 5)
7
1
Create a new source code file named main.lisp and type the following code in it.
(write (delete-if #'oddp '(1 5 6 7 8 9 2 7 3 4 5)))
(terpri)
(write (delete-if #'evenp '(1 5 6 7 8 9 2 7 3 4 5)))
(terpri)
(write (remove-if #'evenp '(1 5 6 7 8 9 2 7 3 4 5) :count 1 :from-end t))
(terpri)
(setq x (vector 'a 'b 'c 'd 'e 'f 'g))
(fill x 'p :start 1 :end 4)
(write x)
When you execute the code, it returns the following result −
(6 8 2 4)
(1 5 7 9 7 3 5)
(1 5 6 7 8 9 2 7 3 5)
#(A P P P E F G)
The sorting functions take a sequence and a two-argument predicate and return a sorted version of the sequence.
Create a new source code file named main.lisp and type the following code in it.
(write (sort '(2 4 7 3 9 1 5 4 6 3 8) #'<))
(terpri)
(write (sort '(2 4 7 3 9 1 5 4 6 3 8) #'>))
(terpri)
When you execute the code, it returns the following result −
(1 2 3 3 4 4 5 6 7 8 9)
(9 8 7 6 5 4 4 3 3 2 1)
Create a new source code file named main.lisp and type the following code in it.
(write (merge 'vector #(1 3 5) #(2 4 6) #'<))
(terpri)
(write (merge 'list #(1 3 5) #(2 4 6) #'<))
(terpri)
When you execute the code, it returns the following result −
#(1 2 3 4 5 6)
(1 2 3 4 5 6)
The functions every, some, notany, and notevery are called the sequence predicates.
These functions iterate over sequences and test the Boolean predicate.
All these functions takes a predicate as the first argument and the remaining arguments are sequences.
Create a new source code file named main.lisp and type the following code in it.
(write (every #'evenp #(2 4 6 8 10)))
(terpri)
(write (some #'evenp #(2 4 6 8 10 13 14)))
(terpri)
(write (every #'evenp #(2 4 6 8 10 13 14)))
(terpri)
(write (notany #'evenp #(2 4 6 8 10)))
(terpri)
(write (notevery #'evenp #(2 4 6 8 10 13 14)))
(terpri)
When you execute the code, it returns the following result −
T
T
NIL
NIL
T
We have already discussed the mapping functions. Similarly the map function allows you to apply a function on to subsequent elements of one or more sequences.
The map function takes a n-argument function and n sequences and returns a new sequence after applying the function to subsequent elements of the sequences.
Create a new source code file named main.lisp and type the following code in it.
(write (map 'vector #'* #(2 3 4 5) #(3 5 4 8)))
When you execute the code, it returns the following result −
#(6 15 16 40)
Lists had been the most important and the primary composite data structure in traditional LISP. Present day's Common LISP provides other data structures like, vector, hash table, classes or structures.
Lists are single linked lists. In LISP, lists are constructed as a chain of a simple record structure named cons linked together.
A cons is a record structure containing two components called the car and the cdr.
Cons cells or cons are objects are pairs of values that are created using the function cons.
The cons function takes two arguments and returns a new cons cell containing the two values. These values can be references to any kind of object.
If the second value is not nil, or another cons cell, then the values are printed as a dotted pair enclosed by parentheses.
The two values in a cons cell are called the car and the cdr. The car function is used to access the first value and the cdr function is used to access the second value.
Create a new source code file named main.lisp and type the following code in it.
(write (cons 1 2))
(terpri)
(write (cons 'a 'b))
(terpri)
(write (cons 1 nil))
(terpri)
(write (cons 1 (cons 2 nil)))
(terpri)
(write (cons 1 (cons 2 (cons 3 nil))))
(terpri)
(write (cons 'a (cons 'b (cons 'c nil))))
(terpri)
(write ( car (cons 'a (cons 'b (cons 'c nil)))))
(terpri)
(write ( cdr (cons 'a (cons 'b (cons 'c nil)))))
When you execute the code, it returns the following result −
(1 . 2)
(A . B)
(1)
(1 2)
(1 2 3)
(A B C)
A
(B C)
The above example shows how the cons structures could be used to create a single linked list, e.g., the list (A B C) consists of three cons cells linked together by their cdrs.
Diagrammatically, it could be expressed as −
Although cons cells can be used to create lists, however, constructing a list out of nested cons function calls can't be the best solution. The list function is rather used for creating lists in LISP.
The list function can take any number of arguments and as it is a function, it evaluates its arguments.
The first and rest functions give the first element and the rest part of a list. The following examples demonstrate the concepts.
Create a new source code file named main.lisp and type the following code in it.
(write (list 1 2))
(terpri)
(write (list 'a 'b))
(terpri)
(write (list 1 nil))
(terpri)
(write (list 1 2 3))
(terpri)
(write (list 'a 'b 'c))
(terpri)
(write (list 3 4 'a (car '(b . c)) (* 4 -2)))
(terpri)
(write (list (list 'a 'b) (list 'c 'd 'e)))
When you execute the code, it returns the following result −
(1 2)
(A B)
(1 NIL)
(1 2 3)
(A B C)
(3 4 A B -8)
((A B) (C D E))
Create a new source code file named main.lisp and type the following code in it.
(defun my-library (title author rating availability)
(list :title title :author author :rating rating :availabilty availability)
)
(write (getf (my-library "Hunger Game" "Collins" 9 t) :title))
When you execute the code, it returns the following result −
"Hunger Game"
The following table provides some commonly used list manipulating functions.
car
It takes a list as argument, and returns its first element.
cdr
It takes a list as argument, and returns a list without the first element
cons
It takes two arguments, an element and a list and returns a list with the element inserted at the first place.
list
It takes any number of arguments and returns a list with the arguments as member elements of the list.
append
It merges two or more list into one.
last
It takes a list and returns a list containing the last element.
member
It takes two arguments of which the second must be a list, if the first argument is a member of the second argument, and then it returns the remainder of the list beginning with the first argument.
reverse
It takes a list and returns a list with the top elements in reverse order.
Please note that all sequence functions are applicable to lists.
Create a new source code file named main.lisp and type the following code in it.
(write (car '(a b c d e f)))
(terpri)
(write (cdr '(a b c d e f)))
(terpri)
(write (cons 'a '(b c)))
(terpri)
(write (list 'a '(b c) '(e f)))
(terpri)
(write (append '(b c) '(e f) '(p q) '() '(g)))
(terpri)
(write (last '(a b c d (e f))))
(terpri)
(write (reverse '(a b c d (e f))))
When you execute the code, it returns the following result −
A
(B C D E F)
(A B C)
(A (B C) (E F))
(B C E F P Q G)
((E F))
((E F) D C B A)
The car and cdr functions and their combination allows extracting any particular element/ member of a list.
However, sequences of car and cdr functions could be abbreviated by concatenating the letter a for car and d for cdr within the letters c and r.
For example we can write cadadr to abbreviate the sequence of function calls - car cdr car cdr.
Thus, (cadadr '(a (c d) (e f g))) will return d
Create a new source code file named main.lisp and type the following code in it.
(write (cadadr '(a (c d) (e f g))))
(terpri)
(write (caar (list (list 'a 'b) 'c)))
(terpri)
(write (cadr (list (list 1 2) (list 3 4))))
(terpri)
When you execute the code, it returns the following result −
D
A
(3 4)
In LISP, a symbol is a name that represents data objects and interestingly it is also a data object.
What makes symbols special is that they have a component called the property list, or plist.
LISP allows you to assign properties to symbols. For example, let us have a 'person' object. We would like this 'person' object to have properties like name, sex, height, weight, address, profession etc. A property is like an attribute name.
A property list is implemented as a list with an even number (possibly zero) of elements. Each pair of elements in the list constitutes an entry; the first item is the indicator, and the second is the value.
When a symbol is created, its property list is initially empty. Properties are created by using get within a setf form.
For example, the following statements allow us to assign properties title, author and publisher, and respective values, to an object named (symbol) 'book'.
Create a new source code file named main.lisp and type the following code in it.
(write (setf (get 'books'title) '(Gone with the Wind)))
(terpri)
(write (setf (get 'books 'author) '(Margaret Michel)))
(terpri)
(write (setf (get 'books 'publisher) '(Warner Books)))
When you execute the code, it returns the following result −
(GONE WITH THE WIND)
(MARGARET MICHEL)
(WARNER BOOKS)
Various property list functions allow you to assign properties as well as retrieve, replace or remove the properties of a symbol.
The get function returns the property list of symbol for a given indicator. It has the following syntax −
get symbol indicator &optional default
The get function looks for the property list of the given symbol for the specified indicator, if found then it returns the corresponding value; otherwise default is returned (or nil, if a default value is not specified).
Create a new source code file named main.lisp and type the following code in it.
(setf (get 'books 'title) '(Gone with the Wind))
(setf (get 'books 'author) '(Margaret Micheal))
(setf (get 'books 'publisher) '(Warner Books))
(write (get 'books 'title))
(terpri)
(write (get 'books 'author))
(terpri)
(write (get 'books 'publisher))
When you execute the code, it returns the following result −
(GONE WITH THE WIND)
(MARGARET MICHEAL)
(WARNER BOOKS)
The symbol-plist function allows you to see all the properties of a symbol.
Create a new source code file named main.lisp and type the following code in it.
(setf (get 'annie 'age) 43)
(setf (get 'annie 'job) 'accountant)
(setf (get 'annie 'sex) 'female)
(setf (get 'annie 'children) 3)
(terpri)
(write (symbol-plist 'annie))
When you execute the code, it returns the following result −
(CHILDREN 3 SEX FEMALE JOB ACCOUNTANT AGE 43)
The remprop function removes the specified property from a symbol.
Create a new source code file named main.lisp and type the following code in it.
(setf (get 'annie 'age) 43)
(setf (get 'annie 'job) 'accountant)
(setf (get 'annie 'sex) 'female)
(setf (get 'annie 'children) 3)
(terpri)
(write (symbol-plist 'annie))
(remprop 'annie 'age)
(terpri)
(write (symbol-plist 'annie))
When you execute the code, it returns the following result −
(CHILDREN 3 SEX FEMALE JOB ACCOUNTANT AGE 43)
(CHILDREN 3 SEX FEMALE JOB ACCOUNTANT)
Vectors are one-dimensional arrays, therefore a subtype of array. Vectors and lists are collectively called sequences. Therefore all sequence generic functions and array functions we have discussed so far, work on vectors.
The vector function allows you to make fixed-size vectors with specific values. It takes any number of arguments and returns a vector containing those arguments.
Create a new source code file named main.lisp and type the following code in it.
(setf v1 (vector 1 2 3 4 5))
(setf v2 #(a b c d e))
(setf v3 (vector 'p 'q 'r 's 't))
(write v1)
(terpri)
(write v2)
(terpri)
(write v3)
When you execute the code, it returns the following result −
#(1 2 3 4 5)
#(A B C D E)
#(P Q R S T)
Please note that LISP uses the #(...) syntax as the literal notation for vectors. You can use this #(... ) syntax to create and include literal vectors in your code.
However, these are literal vectors, so modifying them is not defined in LISP. Therefore, for programming, you should always use the vector function, or the more general function make-array to create vectors you plan to modify.
The make-array function is the more generic way to create a vector. You can access the vector elements using the aref function.
Create a new source code file named main.lisp and type the following code in it.
(setq a (make-array 5 :initial-element 0))
(setq b (make-array 5 :initial-element 2))
(dotimes (i 5)
(setf (aref a i) i))
(write a)
(terpri)
(write b)
(terpri)
When you execute the code, it returns the following result −
#(0 1 2 3 4)
#(2 2 2 2 2)
The make-array function allows you to create a resizable vector.
The fill-pointer argument of the function keeps track of the number of elements actually stored in the vector. It's the index of the next position to be filled when you add an element to the vector.
The vector-push function allows you to add an element to the end of a resizable vector. It increases the fill-pointer by 1.
The vector-pop function returns the most recently pushed item and decrements the fill pointer by 1.
Create a new source code file named main.lisp and type the following code in it.
(setq a (make-array 5 :fill-pointer 0))
(write a)
(vector-push 'a a)
(vector-push 'b a)
(vector-push 'c a)
(terpri)
(write a)
(terpri)
(vector-push 'd a)
(vector-push 'e a)
;this will not be entered as the vector limit is 5
(vector-push 'f a)
(write a)
(terpri)
(vector-pop a)
(vector-pop a)
(vector-pop a)
(write a)
When you execute the code, it returns the following result −
#()
#(A B C)
#(A B C D E)
#(A B)
Vectors being sequences, all sequence functions are applicable for vectors. Please consult the sequences chapter, for vector functions.
Common Lisp does not provide a set data type. However, it provides number of functions that allows set operations to be performed on a list.
You can add, remove, and search for items in a list, based on various criteria. You can also perform various set operations like: union, intersection, and set difference.
Sets, like lists are generally implemented in terms of cons cells. However, for this very reason, the set operations get less and less efficient the bigger the sets get.
The adjoin function allows you to build up a set. It takes an item and a list representing a set and returns a list representing the set containing the item and all the items in the original set.
The adjoin function first looks for the item in the given list, if it is found, then it returns the original list; otherwise it creates a new cons cell with its car as the item and cdr pointing to the original list and returns this new list.
The adjoin function also takes :key and :test keyword arguments. These arguments are used for checking whether the item is present in the original list.
Since, the adjoin function does not modify the original list, to make a change in the list itself, you must either assign the value returned by adjoin to the original list or, you may use the macro pushnew to add an item to the set.
Create a new source code file named main.lisp and type the following code in it.
; creating myset as an empty list
(defparameter *myset* ())
(adjoin 1 *myset*)
(adjoin 2 *myset*)
; adjoin did not change the original set
;so it remains same
(write *myset*)
(terpri)
(setf *myset* (adjoin 1 *myset*))
(setf *myset* (adjoin 2 *myset*))
;now the original set is changed
(write *myset*)
(terpri)
;adding an existing value
(pushnew 2 *myset*)
;no duplicate allowed
(write *myset*)
(terpri)
;pushing a new value
(pushnew 3 *myset*)
(write *myset*)
(terpri)
When you execute the code, it returns the following result −
NIL
(2 1)
(2 1)
(3 2 1)
The member group of functions allows you to check whether an element is member of a set or not.
The following are the syntaxes of these functions −
member item list &key :test :test-not :key
member-if predicate list &key :key
member-if-not predicate list &key :key
These functions search the given list for a given item that satisfies the test. If no such item is found, then the functions returns nil. Otherwise, the tail of the list with the element as the first element is returned.
The search is conducted at the top level only.
These functions could be used as predicates.
Create a new source code file named main.lisp and type the following code in it.
(write (member 'zara '(ayan abdul zara riyan nuha)))
(terpri)
(write (member-if #'evenp '(3 7 2 5/3 'a)))
(terpri)
(write (member-if-not #'numberp '(3 7 2 5/3 'a 'b 'c)))
When you execute the code, it returns the following result −
(ZARA RIYAN NUHA)
(2 5/3 'A)
('A 'B 'C)
The union group of functions allows you to perform set union on two lists provided as arguments to these functions on the basis of a test.
The following are the syntaxes of these functions −
union list1 list2 &key :test :test-not :key
nunion list1 list2 &key :test :test-not :key
The union function takes two lists and returns a new list containing all the elements present in either of the lists. If there are duplications, then only one copy of the member is retained in the returned list.
The nunion function performs the same operation but may destroy the argument lists.
Create a new source code file named main.lisp and type the following code in it.
(setq set1 (union '(a b c) '(c d e)))
(setq set2 (union '(#(a b) #(5 6 7) #(f h))
'(#(5 6 7) #(a b) #(g h)) :test-not #'mismatch)
)
(setq set3 (union '(#(a b) #(5 6 7) #(f h))
'(#(5 6 7) #(a b) #(g h)))
)
(write set1)
(terpri)
(write set2)
(terpri)
(write set3)
When you execute the code, it returns the following result −
(A B C D E)
(#(F H) #(5 6 7) #(A B) #(G H))
(#(A B) #(5 6 7) #(F H) #(5 6 7) #(A B) #(G H))
The union function does not work as expected without :test-not #'mismatch arguments for a list of three vectors. This is because, the lists are made of cons cells and although the values look same to us apparently, the cdr part of cells does not match, so they are not exactly same to LISP interpreter/compiler. This is the reason; implementing big sets are not advised using lists. It works fine for small sets though.
The intersection group of functions allows you to perform intersection on two lists provided as arguments to these functions on the basis of a test.
The following are the syntaxes of these functions −
intersection list1 list2 &key :test :test-not :key
nintersection list1 list2 &key :test :test-not :key
These functions take two lists and return a new list containing all the elements present in both argument lists. If either list has duplicate entries, the redundant entries may or may not appear in the result.
Create a new source code file named main.lisp and type the following code in it.
(setq set1 (intersection '(a b c) '(c d e)))
(setq set2 (intersection '(#(a b) #(5 6 7) #(f h))
'(#(5 6 7) #(a b) #(g h)) :test-not #'mismatch)
)
(setq set3 (intersection '(#(a b) #(5 6 7) #(f h))
'(#(5 6 7) #(a b) #(g h)))
)
(write set1)
(terpri)
(write set2)
(terpri)
(write set3)
When you execute the code, it returns the following result −
(C)
(#(A B) #(5 6 7))
NIL
The intersection function is the destructive version of intersection, i.e., it may destroy the original lists.
The set-difference group of functions allows you to perform set difference on two lists provided as arguments to these functions on the basis of a test.
The following are the syntaxes of these functions −
set-difference list1 list2 &key :test :test-not :key
nset-difference list1 list2 &key :test :test-not :key
The set-difference function returns a list of elements of the first list that do not appear in the second list.
Create a new source code file named main.lisp and type the following code in it.
(setq set1 (set-difference '(a b c) '(c d e)))
(setq set2 (set-difference '(#(a b) #(5 6 7) #(f h))
'(#(5 6 7) #(a b) #(g h)) :test-not #'mismatch)
)
(setq set3 (set-difference '(#(a b) #(5 6 7) #(f h))
'(#(5 6 7) #(a b) #(g h)))
)
(write set1)
(terpri)
(write set2)
(terpri)
(write set3)
When you execute the code, it returns the following result −
(A B)
(#(F H))
(#(A B) #(5 6 7) #(F H))
You can build tree data structures from cons cells, as lists of lists.
To implement tree structures, you will have to design functionalities that would traverse through the cons cells, in specific order, for example, pre-order, in-order, and post-order for binary trees.
Let us consider a tree structure made up of cons cell that form the following list of lists −
((1 2) (3 4) (5 6)).
Diagrammatically, it could be expressed as −
Although mostly you will need to write your own tree-functionalities according to your specific need, LISP provides some tree functions that you can use.
Apart from all the list functions, the following functions work especially on tree structures −
copy-tree x & optional vecp
It returns a copy of the tree of cons cells x. It recursively copies both the car and the cdr directions. If x is not a cons cell, the function simply returns x unchanged. If the optional vecp argument is true, this function copies vectors (recursively) as well as cons cells.
tree-equal x y & key :test :test-not :key
It compares two trees of cons cells. If x and y are both cons cells, their cars and cdrs are compared recursively. If neither x nor y is a cons cell, they are compared by eql, or according to the specified test. The :key function, if specified, is applied to the elements of both trees.
subst new old tree & key :test :test-not :key
It substitutes occurrences of given old item with new item, in tree, which is a tree of cons cells.
nsubst new old tree & key :test :test-not :key
It works same as subst, but it destroys the original tree.
sublis alist tree & key :test :test-not :key
It works like subst, except that it takes an association list alist of old-new pairs. Each element of the tree (after applying the :key function, if any), is compared with the cars of alist; if it matches, it is replaced by the corresponding cdr.
nsublis alist tree & key :test :test-not :key
It works same as sublis, but a destructive version.
Create a new source code file named main.lisp and type the following code in it.
(setq lst (list '(1 2) '(3 4) '(5 6)))
(setq mylst (copy-list lst))
(setq tr (copy-tree lst))
(write lst)
(terpri)
(write mylst)
(terpri)
(write tr)
When you execute the code, it returns the following result −
((1 2) (3 4) (5 6))
((1 2) (3 4) (5 6))
((1 2) (3 4) (5 6))
Create a new source code file named main.lisp and type the following code in it.
(setq tr '((1 2 (3 4 5) ((7 8) (7 8 9)))))
(write tr)
(setq trs (subst 7 1 tr))
(terpri)
(write trs)
When you execute the code, it returns the following result −
((1 2 (3 4 5) ((7 8) (7 8 9))))
((7 2 (3 4 5) ((7 8) (7 8 9))))
Let us try to build our own tree, using the list functions available in LISP.
(defun make-tree (item)
"it creates a new node with item."
(cons (cons item nil) nil)
)
Next let us add a child node into the tree - it will take two tree nodes and add the second tree as the child of the first.
(defun add-child (tree child)
(setf (car tree) (append (car tree) child))
tree)
This function will return the first child a given tree - it will take a tree node and return the first child of that node, or nil, if this node does not have any child node.
(defun first-child (tree)
(if (null tree)
nil
(cdr (car tree))
)
)
This function will return the next sibling of a given node - it takes a tree node as argument, and returns a reference to the next sibling node, or nil, if the node does not have any.
(defun next-sibling (tree)
(cdr tree)
)
Lastly we need a function to return the information in a node −
(defun data (tree)
(car (car tree))
)
This example uses the above functionalities −
Create a new source code file named main.lisp and type the following code in it.
(defun make-tree (item)
"it creates a new node with item."
(cons (cons item nil) nil)
)
(defun first-child (tree)
(if (null tree)
nil
(cdr (car tree))
)
)
(defun next-sibling (tree)
(cdr tree)
)
(defun data (tree)
(car (car tree))
)
(defun add-child (tree child)
(setf (car tree) (append (car tree) child))
tree
)
(setq tr '((1 2 (3 4 5) ((7 8) (7 8 9)))))
(setq mytree (make-tree 10))
(write (data mytree))
(terpri)
(write (first-child tr))
(terpri)
(setq newtree (add-child tr mytree))
(terpri)
(write newtree)
When you execute the code, it returns the following result −
10
(2 (3 4 5) ((7 8) (7 8 9)))
((1 2 (3 4 5) ((7 8) (7 8 9)) (10)))
The hash table data structure represents a collection of key-and-value pairs that are organized based on the hash code of the key. It uses the key to access the elements in the collection.
A hash table is used when you need to access elements by using a key, and you can identify a useful key value. Each item in the hash table has a key/value pair. The key is used to access the items in the collection.
In Common LISP, hash table is a general-purpose collection. You can use arbitrary objects as a key or indexes.
When you store a value in a hash table, you make a key-value pair, and store it under that key. Later you can retrieve the value from the hash table using the same key. Each key maps to a single value, although you can store a new value in a key.
Hash tables, in LISP, could be categorized into three types, based on the way the keys could be compared - eq, eql or equal. If the hash table is hashed on LISP objects then the keys are compared with eq or eql. If the hash table hash on tree structure, then it would be compared using equal.
The make-hash-table function is used for creating a hash table. Syntax for this function is −
make-hash-table &key :test :size :rehash-size :rehash-threshold
Where −
The key argument provides the key.
The key argument provides the key.
The :test argument determines how keys are compared - it should have one of three values #'eq, #'eql, or #'equal, or one of the three symbols eq, eql, or equal. If not specified, eql is assumed.
The :test argument determines how keys are compared - it should have one of three values #'eq, #'eql, or #'equal, or one of the three symbols eq, eql, or equal. If not specified, eql is assumed.
The :size argument sets the initial size of the hash table. This should be an integer greater than zero.
The :size argument sets the initial size of the hash table. This should be an integer greater than zero.
The :rehash-size argument specifies how much to increase the size of the hash table when it becomes full. This can be an integer greater than zero, which is the number of entries to add, or it can be a floating-point number greater than 1, which is the ratio of the new size to the old size. The default value for this argument is implementation-dependent.
The :rehash-size argument specifies how much to increase the size of the hash table when it becomes full. This can be an integer greater than zero, which is the number of entries to add, or it can be a floating-point number greater than 1, which is the ratio of the new size to the old size. The default value for this argument is implementation-dependent.
The :rehash-threshold argument specifies how full the hash table can get before it must grow. This can be an integer greater than zero and less than the :rehash-size (in which case it will be scaled whenever the table is grown), or it can be a floating-point number between zero and 1. The default value for this argument is implementation-dependent.
The :rehash-threshold argument specifies how full the hash table can get before it must grow. This can be an integer greater than zero and less than the :rehash-size (in which case it will be scaled whenever the table is grown), or it can be a floating-point number between zero and 1. The default value for this argument is implementation-dependent.
You can also call the make-hash-table function with no arguments.
The gethash function retrieves an item from the hash table by searching for its key. If it does not find the key, then it returns nil.
It has the following syntax −
gethash key hash-table &optional default
where −
key: is the associated key
key: is the associated key
hash-table: is the hash-table to be searched
hash-table: is the hash-table to be searched
default: is the value to be returned, if the entry is not found, which is nil, if not specified.
default: is the value to be returned, if the entry is not found, which is nil, if not specified.
The gethash function actually returns two values, the second being a predicate value that is true if an entry was found, and false if no entry was found.
For adding an item to the hash table, you can use the setf function along with the gethash function.
Create a new source code file named main.lisp and type the following code in it.
(setq empList (make-hash-table))
(setf (gethash '001 empList) '(Charlie Brown))
(setf (gethash '002 empList) '(Freddie Seal))
(write (gethash '001 empList))
(terpri)
(write (gethash '002 empList))
When you execute the code, it returns the following result −
(CHARLIE BROWN)
(FREDDIE SEAL)
The remhash function removes any entry for a specific key in hash-table. This is a predicate that is true if there was an entry or false if there was not.
The syntax for this function is −
remhash key hash-table
Create a new source code file named main.lisp and type the following code in it.
(setq empList (make-hash-table))
(setf (gethash '001 empList) '(Charlie Brown))
(setf (gethash '002 empList) '(Freddie Seal))
(setf (gethash '003 empList) '(Mark Mongoose))
(write (gethash '001 empList))
(terpri)
(write (gethash '002 empList))
(terpri)
(write (gethash '003 empList))
(remhash '003 empList)
(terpri)
(write (gethash '003 empList))
When you execute the code, it returns the following result −
(CHARLIE BROWN)
(FREDDIE SEAL)
(MARK MONGOOSE)
NIL
The maphash function allows you to apply a specified function on each key-value pair on a hash table.
It takes two arguments - the function and a hash table and invokes the function once for each key/value pair in the hash table.
Create a new source code file named main.lisp and type the following code in it.
(setq empList (make-hash-table))
(setf (gethash '001 empList) '(Charlie Brown))
(setf (gethash '002 empList) '(Freddie Seal))
(setf (gethash '003 empList) '(Mark Mongoose))
(maphash #'(lambda (k v) (format t "~a => ~a~%" k v)) empList)
When you execute the code, it returns the following result −
3 => (MARK MONGOOSE)
2 => (FREDDIE SEAL)
1 => (CHARLIE BROWN)
Common LISP provides numerous input-output functions. We have already used the format function, and print function for output. In this section, we will look into some of the most commonly used input-output functions provided in LISP.
The following table provides the most commonly used input functions of LISP −
read & optional input-stream eof-error-p eof-value recursive-p
It reads in the printed representation of a Lisp object from input-stream, builds a corresponding Lisp object, and returns the object.
read-preserving-whitespace & optional in-stream eof-error-p eof-value recursive-p
It is used in some specialized situations where it is desirable to determine precisely what character terminated the extended token.
read-line & optional input-stream eof-error-p eof-value recursive-p
It reads in a line of text terminated by a newline.
read-char & optional input-stream eof-error-p eof-value recursive-p
It takes one character from input-stream and returns it as a character object.
unread-char character & optional input-stream
It puts the character most recently read from the input-stream, onto the front of input-stream.
peek-char & optional peek-type input-stream eof-error-p eof-value recursive-p
It returns the next character to be read from input-stream, without actually removing it from the input stream.
listen & optional input-stream
The predicate listen is true if there is a character immediately available from input-stream, and is false if not.
read-char-no-hang & optional input-stream eof-error-p eof-value recursive-p
It is similar to read-char, but if it does not get a character, it does not wait for a character, but returns nil immediately.
clear-input & optional input-stream
It clears any buffered input associated with input-stream.
read-from-string string & optional eof-error-p eof-value & key :start :end :preserve-whitespace
It takes the characters of the string successively and builds a LISP object and returns the object. It also returns the index of the first character in the string not read, or the length of the string (or, length +1), as the case may be.
parse-integer string & key :start :end :radix :junk-allowed
It examines the substring of string delimited by :start and :end (default to the beginning and end of the string). It skips over whitespace characters and then attempts to parse an integer.
read-byte binary-input-stream & optional eof-error-p eof-value
It reads one byte from the binary-input-stream and returns it in the form of an integer.
The read function is used for taking input from the keyboard. It may not take any argument.
For example, consider the code snippet −
(write ( + 15.0 (read)))
Assume the user enters 10.2 from the STDIN Input, it returns,
25.2
The read function reads characters from an input stream and interprets them by parsing as representations of Lisp objects.
Create a new source code file named main.lisp and type the following code in it −
; the function AreaOfCircle
; calculates area of a circle
; when the radius is input from keyboard
(defun AreaOfCircle()
(terpri)
(princ "Enter Radius: ")
(setq radius (read))
(setq area (* 3.1416 radius radius))
(princ "Area: ")
(write area))
(AreaOfCircle)
When you execute the code, it returns the following result −
Enter Radius: 5 (STDIN Input)
Area: 78.53999
Create a new source code file named main.lisp and type the following code in it.
(with-input-from-string (stream "Welcome to Tutorials Point!")
(print (read-char stream))
(print (read-char stream))
(print (read-char stream))
(print (read-char stream))
(print (read-char stream))
(print (read-char stream))
(print (read-char stream))
(print (read-char stream))
(print (read-char stream))
(print (read-char stream))
(print (peek-char nil stream nil 'the-end))
(values)
)
When you execute the code, it returns the following result −
#\W
#\e
#\l
#\c
#\o
#\m
#\e
#\Space
#\t
#\o
#\Space
All output functions in LISP take an optional argument called output-stream, where the output is sent. If not mentioned or nil, output-stream defaults to the value of the variable *standard-output*.
The following table provides the most commonly used output functions of LISP −
write object & key :stream :escape :radix :base :circle :pretty :level :length :case :gensym :array
write object & key :stream :escape :radix :base :circle :pretty :level :length :case :gensym :array :readably :right-margin :miser-width :lines :pprint-dispatch
Both write the object to the output stream specified by :stream, which defaults to the value of *standard-output*. Other values default to the corresponding global variables set for printing.
prin1 object & optional output-stream
print object & optional output-stream
pprint object & optional output-stream
princ object & optional output-stream
All these functions outputs the printed representation of object to output-stream. However, the following differences are there −
prin1 returns the object as its value.
prin1 returns the object as its value.
print prints the object with a preceding newline and followed by a space. It returns object.
print prints the object with a preceding newline and followed by a space. It returns object.
pprint is just like print except that the trailing space is omitted.
pprint is just like print except that the trailing space is omitted.
princ is just like prin1 except that the output has no escape character
princ is just like prin1 except that the output has no escape character
write-to-string object & key :escape :radix :base :circle :pretty :level :length :case :gensym :array
write-to-string object & key :escape :radix :base :circle :pretty :level :length :case :gensym :array :readably :right-margin :miser-width :lines :pprint-dispatch
prin1-to-string object
princ-to-string object
The object is effectively printed and the output characters are made into a string, which is returned.
write-char character & optional output-stream
It outputs the character to output-stream, and returns character.
write-string string & optional output-stream & key :start :end
It writes the characters of the specified substring of string to the output-stream.
write-line string & optional output-stream & key :start :end
It works the same way as write-string, but outputs a newline afterwards.
terpri & optional output-stream
It outputs a newline to output-stream.
fresh-line & optional output-stream
it outputs a newline only if the stream is not already at the start of a line.
finish-output & optional output-stream
force-output & optional output-stream
clear-output & optional output-stream
The function finish-output attempts to ensure that all output sent to output-stream has reached its destination, and only then returns nil.
The function finish-output attempts to ensure that all output sent to output-stream has reached its destination, and only then returns nil.
The function force-output initiates the emptying of any internal buffers but returns nil without waiting for completion or acknowledgment.
The function force-output initiates the emptying of any internal buffers but returns nil without waiting for completion or acknowledgment.
The function clear-output attempts to abort any outstanding output operation in progress in order to allow as little output as possible to continue to the destination.
The function clear-output attempts to abort any outstanding output operation in progress in order to allow as little output as possible to continue to the destination.
write-byte integer binary-output-stream
It writes one byte, the value of the integer.
Create a new source code file named main.lisp and type the following code in it.
; this program inputs a numbers and doubles it
(defun DoubleNumber()
(terpri)
(princ "Enter Number : ")
(setq n1 (read))
(setq doubled (* 2.0 n1))
(princ "The Number: ")
(write n1)
(terpri)
(princ "The Number Doubled: ")
(write doubled)
)
(DoubleNumber)
When you execute the code, it returns the following result −
Enter Number : 3456.78 (STDIN Input)
The Number: 3456.78
The Number Doubled: 6913.56
The function format is used for producing nicely formatted text. It has the following syntax −
format destination control-string &rest arguments
where,
destination is standard output
control-string holds the characters to be output and the printing directive.
A format directive consists of a tilde (~), optional prefix parameters separated by commas, optional colon (:) and at-sign (@) modifiers, and a single character indicating what kind of directive this is.
The prefix parameters are generally integers, notated as optionally signed decimal numbers.
The following table provides brief description of the commonly used directives −
~A
Is followed by ASCII arguments.
~S
Is followed by S-expressions.
~D
For decimal arguments.
~B
For binary arguments.
~O
For octal arguments.
~X
For hexadecimal arguments.
~C
For character arguments.
~F
For Fixed-format floating-point arguments.
~E
Exponential floating-point arguments.
~$
Dollar and floating point arguments.
~%
A new line is printed.
~*
Next argument is ignored.
~?
Indirection. The next argument must be a string, and the one after it a list.
Let us rewrite the program calculating a circle's area −
Create a new source code file named main.lisp and type the following code in it.
(defun AreaOfCircle()
(terpri)
(princ "Enter Radius: ")
(setq radius (read))
(setq area (* 3.1416 radius radius))
(format t "Radius: = ~F~% Area = ~F" radius area)
)
(AreaOfCircle)
When you execute the code, it returns the following result −
Enter Radius: 10.234 (STDIN Input)
Radius: = 10.234
Area = 329.03473
We have discussed about how standard input and output is handled by common LISP. All these functions work for reading from and writing into text and binary files too. Only difference is in this case the stream we use is not standard input or output, but a stream created for the specific purpose of writing into or reading from files.
In this chapter we will see how LISP can create, open, close text or binary files for their data storage.
A file represents a sequence of bytes, does not matter if it is a text file or binary file. This chapter will take you through important functions/macros for the file management.
You can use the open function to create a new file or to open an existing file. It is the most basic function for opening a file. However, the with-open-file is usually more convenient and more commonly used, as we will see later in this section.
When a file is opened, a stream object is constructed to represent it in the LISP environment. All operations on the stream are basically equivalent to operations on the file.
Syntax for the open function is −
open filename &key :direction :element-type :if-exists :if-does-not-exist :external-format
where,
The filename argument is the name of the file to be opened or created.
The filename argument is the name of the file to be opened or created.
The keyword arguments specify the type of stream and error handling ways.
The keyword arguments specify the type of stream and error handling ways.
The :direction keyword specifies whether the stream should handle input, output, or both, it takes the following values −
:input - for input streams (default value)
:output - for output streams
:io - for bidirectional streams
:probe - for just checking a files existence; the stream is opened and then closed.
The :direction keyword specifies whether the stream should handle input, output, or both, it takes the following values −
:input - for input streams (default value)
:input - for input streams (default value)
:output - for output streams
:output - for output streams
:io - for bidirectional streams
:io - for bidirectional streams
:probe - for just checking a files existence; the stream is opened and then closed.
:probe - for just checking a files existence; the stream is opened and then closed.
The :element-type specifies the type of the unit of transaction for the stream.
The :element-type specifies the type of the unit of transaction for the stream.
The :if-exists argument specifies the action to be taken if the :direction is :output or :io and a file of the specified name already exists. If the direction is :input or :probe, this argument is ignored. It takes the following values −
:error - it signals an error.
:new-version - it creates a new file with the same name but larger version number.
:rename - it renames the existing file.
:rename-and-delete - it renames the existing file and then deletes it.
:append - it appends to the existing file.
:supersede - it supersedes the existing file.
nil - it does not create a file or even a stream just returns nil to indicate failure.
The :if-exists argument specifies the action to be taken if the :direction is :output or :io and a file of the specified name already exists. If the direction is :input or :probe, this argument is ignored. It takes the following values −
:error - it signals an error.
:error - it signals an error.
:new-version - it creates a new file with the same name but larger version number.
:new-version - it creates a new file with the same name but larger version number.
:rename - it renames the existing file.
:rename - it renames the existing file.
:rename-and-delete - it renames the existing file and then deletes it.
:rename-and-delete - it renames the existing file and then deletes it.
:append - it appends to the existing file.
:append - it appends to the existing file.
:supersede - it supersedes the existing file.
:supersede - it supersedes the existing file.
nil - it does not create a file or even a stream just returns nil to indicate failure.
nil - it does not create a file or even a stream just returns nil to indicate failure.
The :if-does-not-exist argument specifies the action to be taken if a file of the specified name does not already exist. It takes the following values −
:error - it signals an error.
:create - it creates an empty file with the specified name and then uses it.
nil - it does not create a file or even a stream, but instead simply returns nil to indicate failure.
The :if-does-not-exist argument specifies the action to be taken if a file of the specified name does not already exist. It takes the following values −
:error - it signals an error.
:error - it signals an error.
:create - it creates an empty file with the specified name and then uses it.
:create - it creates an empty file with the specified name and then uses it.
nil - it does not create a file or even a stream, but instead simply returns nil to indicate failure.
nil - it does not create a file or even a stream, but instead simply returns nil to indicate failure.
The :external-format argument specifies an implementation-recognized scheme for representing characters in files.
The :external-format argument specifies an implementation-recognized scheme for representing characters in files.
For example, you can open a file named myfile.txt stored in the /tmp folder as −
(open "/tmp/myfile.txt")
The with-open-file allows reading or writing into a file, using the stream variable associated with the read/write transaction. Once the job is done, it automatically closes the file. It is extremely convenient to use.
It has the following syntax −
with-open-file (stream filename {options}*)
{declaration}* {form}*
filename is the name of the file to be opened; it may be a string, a pathname, or a stream.
filename is the name of the file to be opened; it may be a string, a pathname, or a stream.
The options are same as the keyword arguments to the function open.
The options are same as the keyword arguments to the function open.
Create a new source code file named main.lisp and type the following code in it.
(with-open-file (stream "/tmp/myfile.txt" :direction :output)
(format stream "Welcome to Tutorials Point!")
(terpri stream)
(format stream "This is a tutorials database")
(terpri stream)
(format stream "Submit your Tutorials, White Papers and Articles into our Tutorials Directory.")
)
Please note that all input-output functions discussed in the previous chapter, such as, terpri and format are working for writing into the file we created here.
When you execute the code, it does not return anything; however, our data is written into the file. The :direction :output keywords allows us do this.
However, we can read from this file using the read-line function.
Create a new source code file named main.lisp and type the following code in it.
(let ((in (open "/tmp/myfile.txt" :if-does-not-exist nil)))
(when in
(loop for line = (read-line in nil)
while line do (format t "~a~%" line))
(close in)
)
)
When you execute the code, it returns the following result −
Welcome to Tutorials Point!
This is a tutorials database
Submit your Tutorials, White Papers and Articles into our Tutorials Directory.
The close function closes a stream.
Structures are one of the user-defined data type, which allows you to combine data items of different kinds.
Structures are used to represent a record. Suppose you want to keep track of your books in a library. You might want to track the following attributes about each book −
Title
Author
Subject
Book ID
The defstruct macro in LISP allows you to define an abstract record structure. The defstruct statement defines a new data type, with more than one member for your program.
To discuss the format of the defstruct macro, let us write the definition of the Book structure. We could define the book structure as −
(defstruct book
title
author
subject
book-id
)
The above declaration creates a book structure with four named components. So every book created will be an object of this structure.
The above declaration creates a book structure with four named components. So every book created will be an object of this structure.
It defines four functions named book-title, book-author, book-subject and book-book-id, which will take one argument, a book structure, and will return the fields title, author, subject and book-id of the book object. These functions are called the access functions.
It defines four functions named book-title, book-author, book-subject and book-book-id, which will take one argument, a book structure, and will return the fields title, author, subject and book-id of the book object. These functions are called the access functions.
The symbol book becomes a data type and you can check it using the typep predicate.
The symbol book becomes a data type and you can check it using the typep predicate.
There will also be an implicit function named book-p, which is a predicate and will be true if its argument is a book and is false otherwise.
There will also be an implicit function named book-p, which is a predicate and will be true if its argument is a book and is false otherwise.
Another implicit function named make-book will be created, which is a constructor, which, when invoked, will create a data structure with four components, suitable for use with the access functions.
Another implicit function named make-book will be created, which is a constructor, which, when invoked, will create a data structure with four components, suitable for use with the access functions.
The #S syntax refers to a structure, and you can use it to read or print instances of a book.
The #S syntax refers to a structure, and you can use it to read or print instances of a book.
An implicit function named copy-book of one argument is also defined that. It takes a book object and creates another book object, which is a copy of the first one. This function is called the copier function.
An implicit function named copy-book of one argument is also defined that. It takes a book object and creates another book object, which is a copy of the first one. This function is called the copier function.
You can use setf to alter the components of a book, for example
You can use setf to alter the components of a book, for example
(setf (book-book-id book3) 100)
Create a new source code file named main.lisp and type the following code in it.
(defstruct book
title
author
subject
book-id
)
( setq book1 (make-book :title "C Programming"
:author "Nuha Ali"
:subject "C-Programming Tutorial"
:book-id "478")
)
( setq book2 (make-book :title "Telecom Billing"
:author "Zara Ali"
:subject "C-Programming Tutorial"
:book-id "501")
)
(write book1)
(terpri)
(write book2)
(setq book3( copy-book book1))
(setf (book-book-id book3) 100)
(terpri)
(write book3)
When you execute the code, it returns the following result −
#S(BOOK :TITLE "C Programming" :AUTHOR "Nuha Ali" :SUBJECT "C-Programming Tutorial" :BOOK-ID "478")
#S(BOOK :TITLE "Telecom Billing" :AUTHOR "Zara Ali" :SUBJECT "C-Programming Tutorial" :BOOK-ID "501")
#S(BOOK :TITLE "C Programming" :AUTHOR "Nuha Ali" :SUBJECT "C-Programming Tutorial" :BOOK-ID 100)
In general term of programming languages, a package is designed for providing a way to keep one set of names separate from another. The symbols declared in one package will not conflict with the same symbols declared in another. This way packages reduce the naming conflicts between independent code modules.
The LISP reader maintains a table of all the symbols it has found. When it finds a new character sequence, it creates a new symbol and stores in the symbol table. This table is called a package.
The current package is referred by the special variable *package*.
There are two predefined packages in LISP −
common-lisp − it contains symbols for all the functions and variables defined.
common-lisp − it contains symbols for all the functions and variables defined.
common-lisp-user − it uses the common-lisp package and all other packages with editing and debugging tools; it is called cl-user in short
common-lisp-user − it uses the common-lisp package and all other packages with editing and debugging tools; it is called cl-user in short
The following table provides most commonly used functions used for creating, using and manipulating packages −
make-package package-name &key :nicknames :use
It creates and returns a new package with the specified package name.
in-package package-name &key :nicknames :use
Makes the package current.
in-package name
This macro causes *package* to be set to the package named name, which must be a symbol or string.
find-package name
It searches for a package. The package with that name or nickname is returned; if no such package exists, find-package returns nil.
rename-package package new-name &optional new-nicknames
it renames a package.
list-all-packages
This function returns a list of all packages that currently exist in the Lisp system.
delete-package package
It deletes a package.
The defpackage function is used for creating an user defined package. It has the following syntax −
(defpackage :package-name
(:use :common-lisp ...)
(:export :symbol1 :symbol2 ...)
)
Where,
package-name is the name of the package.
package-name is the name of the package.
The :use keyword specifies the packages that this package needs, i.e., packages that define functions used by code in this package.
The :use keyword specifies the packages that this package needs, i.e., packages that define functions used by code in this package.
The :export keyword specifies the symbols that are external in this package.
The :export keyword specifies the symbols that are external in this package.
The make-package function is also used for creating a package. The syntax for this function is −
make-package package-name &key :nicknames :use
the arguments and keywords has same meaning as before.
Once you have created a package, you can use the code in this package, by making it the current package. The in-package macro makes a package current in the environment.
Create a new source code file named main.lisp and type the following code in it.
(make-package :tom)
(make-package :dick)
(make-package :harry)
(in-package tom)
(defun hello ()
(write-line "Hello! This is Tom's Tutorials Point")
)
(hello)
(in-package dick)
(defun hello ()
(write-line "Hello! This is Dick's Tutorials Point")
)
(hello)
(in-package harry)
(defun hello ()
(write-line "Hello! This is Harry's Tutorials Point")
)
(hello)
(in-package tom)
(hello)
(in-package dick)
(hello)
(in-package harry)
(hello)
When you execute the code, it returns the following result −
Hello! This is Tom's Tutorials Point
Hello! This is Dick's Tutorials Point
Hello! This is Harry's Tutorials Point
The delete-package macro allows you to delete a package. The following example demonstrates this −
Create a new source code file named main.lisp and type the following code in it.
(make-package :tom)
(make-package :dick)
(make-package :harry)
(in-package tom)
(defun hello ()
(write-line "Hello! This is Tom's Tutorials Point")
)
(in-package dick)
(defun hello ()
(write-line "Hello! This is Dick's Tutorials Point")
)
(in-package harry)
(defun hello ()
(write-line "Hello! This is Harry's Tutorials Point")
)
(in-package tom)
(hello)
(in-package dick)
(hello)
(in-package harry)
(hello)
(delete-package tom)
(in-package tom)
(hello)
When you execute the code, it returns the following result −
Hello! This is Tom's Tutorials Point
Hello! This is Dick's Tutorials Point
Hello! This is Harry's Tutorials Point
*** - EVAL: variable TOM has no value
In Common LISP terminology, exceptions are called conditions.
In fact, conditions are more general than exceptions in traditional programming languages, because a condition represents any occurrence, error, or not, which might affect various levels of function call stack.
Condition handling mechanism in LISP, handles such situations in such a way that conditions are used to signal warning (say by printing an warning) while the upper level code on the call stack can continue its work.
The condition handling system in LISP has three parts −
Signalling a condition
Handling the condition
Restart the process
Let us take up an example of handling a condition arising out of divide by zero condition, to explain the concepts here.
You need to take the following steps for handling a condition −
Define the Condition − "A condition is an object whose class indicates the general nature of the condition and whose instance data carries information about the details of the particular circumstances that lead to the condition being signalled".
The define-condition macro is used for defining a condition, which has the following syntax −
(define-condition condition-name (error)
((text :initarg :text :reader text))
)
New condition objects are created with MAKE-CONDITION macro, which initializes the slots of the new condition based on the :initargs argument.
In our example, the following code defines the condition −
(define-condition on-division-by-zero (error)
((message :initarg :message :reader message))
)
Define the Condition − "A condition is an object whose class indicates the general nature of the condition and whose instance data carries information about the details of the particular circumstances that lead to the condition being signalled".
The define-condition macro is used for defining a condition, which has the following syntax −
(define-condition condition-name (error)
((text :initarg :text :reader text))
)
New condition objects are created with MAKE-CONDITION macro, which initializes the slots of the new condition based on the :initargs argument.
In our example, the following code defines the condition −
(define-condition on-division-by-zero (error)
((message :initarg :message :reader message))
)
Writing the Handlers − a condition handler is a code that are used for handling the condition signalled thereon. It is generally written in one of the higher level functions that call the erroring function. When a condition is signalled, the signalling mechanism searches for an appropriate handler based on the condition's class.
Each handler consists of −
Type specifier, that indicates the type of condition it can handle
A function that takes a single argument, the condition
When a condition is signalled, the signalling mechanism finds the most recently established handler that is compatible with the condition type and calls its function.
The macro handler-case establishes a condition handler. The basic form of a handler-case −
(handler-case expression error-clause*)
Where, each error clause is of the form −
condition-type ([var]) code)
Writing the Handlers − a condition handler is a code that are used for handling the condition signalled thereon. It is generally written in one of the higher level functions that call the erroring function. When a condition is signalled, the signalling mechanism searches for an appropriate handler based on the condition's class.
Each handler consists of −
Type specifier, that indicates the type of condition it can handle
A function that takes a single argument, the condition
When a condition is signalled, the signalling mechanism finds the most recently established handler that is compatible with the condition type and calls its function.
The macro handler-case establishes a condition handler. The basic form of a handler-case −
(handler-case expression error-clause*)
Where, each error clause is of the form −
condition-type ([var]) code)
Restarting Phase
This is the code that actually recovers your program from errors, and condition handlers can then handle a condition by invoking an appropriate restart. The restart code is generally place in middle-level or low-level functions and the condition handlers are placed into the upper levels of the application.
The handler-bind macro allows you to provide a restart function, and allows you to continue at the lower level functions without unwinding the function call stack. In other words, the flow of control will still be in the lower level function.
The basic form of handler-bind is as follows −
(handler-bind (binding*) form*)
Where each binding is a list of the following −
a condition type
a handler function of one argument
The invoke-restart macro finds and invokes the most recently bound restart function with the specified name as argument.
You can have multiple restarts.
Restarting Phase
This is the code that actually recovers your program from errors, and condition handlers can then handle a condition by invoking an appropriate restart. The restart code is generally place in middle-level or low-level functions and the condition handlers are placed into the upper levels of the application.
The handler-bind macro allows you to provide a restart function, and allows you to continue at the lower level functions without unwinding the function call stack. In other words, the flow of control will still be in the lower level function.
The basic form of handler-bind is as follows −
(handler-bind (binding*) form*)
Where each binding is a list of the following −
a condition type
a handler function of one argument
The invoke-restart macro finds and invokes the most recently bound restart function with the specified name as argument.
You can have multiple restarts.
In this example, we demonstrate the above concepts by writing a function named division-function, which will create an error condition if the divisor argument is zero. We have three anonymous functions that provide three ways to come out of it - by returning a value 1, by sending a divisor 2 and recalculating, or by returning 1.
Create a new source code file named main.lisp and type the following code in it.
(define-condition on-division-by-zero (error)
((message :initarg :message :reader message))
)
(defun handle-infinity ()
(restart-case
(let ((result 0))
(setf result (division-function 10 0))
(format t "Value: ~a~%" result)
)
(just-continue () nil)
)
)
(defun division-function (value1 value2)
(restart-case
(if (/= value2 0)
(/ value1 value2)
(error 'on-division-by-zero :message "denominator is zero")
)
(return-zero () 0)
(return-value (r) r)
(recalc-using (d) (division-function value1 d))
)
)
(defun high-level-code ()
(handler-bind
(
(on-division-by-zero
#'(lambda (c)
(format t "error signaled: ~a~%" (message c))
(invoke-restart 'return-zero)
)
)
(handle-infinity)
)
)
)
(handler-bind
(
(on-division-by-zero
#'(lambda (c)
(format t "error signaled: ~a~%" (message c))
(invoke-restart 'return-value 1)
)
)
)
(handle-infinity)
)
(handler-bind
(
(on-division-by-zero
#'(lambda (c)
(format t "error signaled: ~a~%" (message c))
(invoke-restart 'recalc-using 2)
)
)
)
(handle-infinity)
)
(handler-bind
(
(on-division-by-zero
#'(lambda (c)
(format t "error signaled: ~a~%" (message c))
(invoke-restart 'just-continue)
)
)
)
(handle-infinity)
)
(format t "Done."))
When you execute the code, it returns the following result −
error signaled: denominator is zero
Value: 1
error signaled: denominator is zero
Value: 5
error signaled: denominator is zero
Done.
Apart from the 'Condition System', as discussed above, Common LISP also provides various functions that may be called for signalling an error. Handling of an error, when signalled, is however, implementation-dependent.
The following table provides commonly used functions signalling warnings, breaks, non-fatal and fatal errors.
The user program specifies an error message (a string). The functions process this message and may/may not display it to the user.
The error messages should be constructed by applying the format function, should not contain a newline character at either the beginning or end, and need not indicate error, as the LISP system will take care of these according to its preferred style.
error format-string &rest args
It signals a fatal error. It is impossible to continue from this kind of error; thus error will never return to its caller.
cerror continue-format-string error-format-string &rest args
It signals an error and enters the debugger. However, it allows the program to be continued from the debugger after resolving the error.
warn format-string &rest args
it prints an error message but normally doesn't go into the debugger
break &optional format-string &rest args
It prints the message and goes directly into the debugger, without allowing any possibility of interception by programmed error-handling facilities
In this example, the factorial function calculates factorial of a number; however, if the argument is negative, it raises an error condition.
Create a new source code file named main.lisp and type the following code in it.
(defun factorial (x)
(cond ((or (not (typep x 'integer)) (minusp x))
(error "~S is a negative number." x))
((zerop x) 1)
(t (* x (factorial (- x 1))))
)
)
(write(factorial 5))
(terpri)
(write(factorial -1))
When you execute the code, it returns the following result −
120
*** - -1 is a negative number.
Common LISP predated the advance of object-oriented programming by couple of decades. However, it object-orientation was incorporated into it at a later stage.
The defclass macro allows creating user-defined classes. It establishes a class as a data type. It has the following syntax −
(defclass class-name (superclass-name*)
(slot-description*)
class-option*))
The slots are variables that store data, or fields.
A slot-description has the form (slot-name slot-option*), where each option is a keyword followed by a name, expression and other options. Most commonly used slot options are −
:accessor function-name
:accessor function-name
:initform expression
:initform expression
:initarg symbol
:initarg symbol
For example, let us define a Box class, with three slots length, breadth, and height.
(defclass Box ()
(length
breadth
height)
)
Unless the slots have values that can be accessed, read or written to, classes are pretty useless.
You can specify accessors for each slot when you define a class. For example, take our Box class −
(defclass Box ()
((length :accessor length)
(breadth :accessor breadth)
(height :accessor height)
)
)
You can also specify separate accessor names for reading and writing a slot.
(defclass Box ()
((length :reader get-length :writer set-length)
(breadth :reader get-breadth :writer set-breadth)
(height :reader get-height :writer set-height)
)
)
The generic function make-instance creates and returns a new instance of a class.
It has the following syntax −
(make-instance class {initarg value}*)
Let us create a Box class, with three slots, length, breadth and height. We will use three slot accessors to set the values in these fields.
Create a new source code file named main.lisp and type the following code in it.
(defclass box ()
((length :accessor box-length)
(breadth :accessor box-breadth)
(height :accessor box-height)
)
)
(setf item (make-instance 'box))
(setf (box-length item) 10)
(setf (box-breadth item) 10)
(setf (box-height item) 5)
(format t "Length of the Box is ~d~%" (box-length item))
(format t "Breadth of the Box is ~d~%" (box-breadth item))
(format t "Height of the Box is ~d~%" (box-height item))
When you execute the code, it returns the following result −
Length of the Box is 10
Breadth of the Box is 10
Height of the Box is 5
The defmethod macro allows you to define a method inside the class. The following example extends our Box class to include a method named volume.
Create a new source code file named main.lisp and type the following code in it.
(defclass box ()
((length :accessor box-length)
(breadth :accessor box-breadth)
(height :accessor box-height)
(volume :reader volume)
)
)
; method calculating volume
(defmethod volume ((object box))
(* (box-length object) (box-breadth object)(box-height object))
)
;setting the values
(setf item (make-instance 'box))
(setf (box-length item) 10)
(setf (box-breadth item) 10)
(setf (box-height item) 5)
; displaying values
(format t "Length of the Box is ~d~%" (box-length item))
(format t "Breadth of the Box is ~d~%" (box-breadth item))
(format t "Height of the Box is ~d~%" (box-height item))
(format t "Volume of the Box is ~d~%" (volume item))
When you execute the code, it returns the following result −
Length of the Box is 10
Breadth of the Box is 10
Height of the Box is 5
Volume of the Box is 500
LISP allows you to define an object in terms of another object. This is called inheritance. You can create a derived class by adding features that are new or different. The derived class inherits the functionalities of the parent class.
The following example explains this −
Create a new source code file named main.lisp and type the following code in it.
(defclass box ()
((length :accessor box-length)
(breadth :accessor box-breadth)
(height :accessor box-height)
(volume :reader volume)
)
)
; method calculating volume
(defmethod volume ((object box))
(* (box-length object) (box-breadth object)(box-height object))
)
;wooden-box class inherits the box class
(defclass wooden-box (box)
((price :accessor box-price)))
;setting the values
(setf item (make-instance 'wooden-box))
(setf (box-length item) 10)
(setf (box-breadth item) 10)
(setf (box-height item) 5)
(setf (box-price item) 1000)
; displaying values
(format t "Length of the Wooden Box is ~d~%" (box-length item))
(format t "Breadth of the Wooden Box is ~d~%" (box-breadth item))
(format t "Height of the Wooden Box is ~d~%" (box-height item))
(format t "Volume of the Wooden Box is ~d~%" (volume item))
(format t "Price of the Wooden Box is ~d~%" (box-price item))
When you execute the code, it returns the following result −
Length of the Wooden Box is 10
Breadth of the Wooden Box is 10
Height of the Wooden Box is 5
Volume of the Wooden Box is 500
Price of the Wooden Box is 1000
79 Lectures
7 hours
Arnold Higuit
Print
Add Notes
Bookmark this page | [
{
"code": null,
"e": 2205,
"s": 2060,
"text": "John McCarthy invented LISP in 1958, shortly after the development of FORTRAN. It was first implemented by Steve Russell on an IBM 704 computer."
},
{
"code": null,
"e": 2321,
"s": 2205,
"text": "It is particularly suitable for Artificial Intelligence programs, as it processes symbolic information effectively."
},
{
"code": null,
"e": 2527,
"s": 2321,
"text": "Common Lisp originated, during the 1980s and 1990s, in an attempt to unify the work of several implementation groups that were successors to Maclisp, like ZetaLisp and NIL (New Implementation of Lisp) etc."
},
{
"code": null,
"e": 2617,
"s": 2527,
"text": "It serves as a common language, which can be easily extended for specific implementation."
},
{
"code": null,
"e": 2725,
"s": 2617,
"text": "Programs written in Common LISP do not depend on machine-specific characteristics, such as word length etc."
},
{
"code": null,
"e": 2751,
"s": 2725,
"text": "It is machine-independent"
},
{
"code": null,
"e": 2777,
"s": 2751,
"text": "It is machine-independent"
},
{
"code": null,
"e": 2839,
"s": 2777,
"text": "It uses iterative design methodology, and easy extensibility."
},
{
"code": null,
"e": 2901,
"s": 2839,
"text": "It uses iterative design methodology, and easy extensibility."
},
{
"code": null,
"e": 2946,
"s": 2901,
"text": "It allows updating the programs dynamically."
},
{
"code": null,
"e": 2991,
"s": 2946,
"text": "It allows updating the programs dynamically."
},
{
"code": null,
"e": 3025,
"s": 2991,
"text": "It provides high level debugging."
},
{
"code": null,
"e": 3059,
"s": 3025,
"text": "It provides high level debugging."
},
{
"code": null,
"e": 3109,
"s": 3059,
"text": "It provides advanced object-oriented programming."
},
{
"code": null,
"e": 3159,
"s": 3109,
"text": "It provides advanced object-oriented programming."
},
{
"code": null,
"e": 3198,
"s": 3159,
"text": "It provides a convenient macro system."
},
{
"code": null,
"e": 3237,
"s": 3198,
"text": "It provides a convenient macro system."
},
{
"code": null,
"e": 3361,
"s": 3237,
"text": "It provides wide-ranging data types like, objects, structures, lists, vectors, adjustable arrays, hash-tables, and symbols."
},
{
"code": null,
"e": 3485,
"s": 3361,
"text": "It provides wide-ranging data types like, objects, structures, lists, vectors, adjustable arrays, hash-tables, and symbols."
},
{
"code": null,
"e": 3509,
"s": 3485,
"text": "It is expression-based."
},
{
"code": null,
"e": 3533,
"s": 3509,
"text": "It is expression-based."
},
{
"code": null,
"e": 3582,
"s": 3533,
"text": "It provides an object-oriented condition system."
},
{
"code": null,
"e": 3631,
"s": 3582,
"text": "It provides an object-oriented condition system."
},
{
"code": null,
"e": 3667,
"s": 3631,
"text": "It provides a complete I/O library."
},
{
"code": null,
"e": 3703,
"s": 3667,
"text": "It provides a complete I/O library."
},
{
"code": null,
"e": 3745,
"s": 3703,
"text": "It provides extensive control structures."
},
{
"code": null,
"e": 3787,
"s": 3745,
"text": "It provides extensive control structures."
},
{
"code": null,
"e": 3832,
"s": 3787,
"text": "Large successful applications built in Lisp."
},
{
"code": null,
"e": 3838,
"s": 3832,
"text": "Emacs"
},
{
"code": null,
"e": 3844,
"s": 3838,
"text": "Emacs"
},
{
"code": null,
"e": 3847,
"s": 3844,
"text": "G2"
},
{
"code": null,
"e": 3850,
"s": 3847,
"text": "G2"
},
{
"code": null,
"e": 3858,
"s": 3850,
"text": "AutoCad"
},
{
"code": null,
"e": 3866,
"s": 3858,
"text": "AutoCad"
},
{
"code": null,
"e": 3880,
"s": 3866,
"text": "Igor Engraver"
},
{
"code": null,
"e": 3894,
"s": 3880,
"text": "Igor Engraver"
},
{
"code": null,
"e": 3906,
"s": 3894,
"text": "Yahoo Store"
},
{
"code": null,
"e": 3918,
"s": 3906,
"text": "Yahoo Store"
},
{
"code": null,
"e": 4109,
"s": 3918,
"text": "If you are still willing to set up your environment for Lisp programming language, you need the following two softwares available on your computer, (a) Text Editor and (b) The Lisp Executer."
},
{
"code": null,
"e": 4253,
"s": 4109,
"text": "This will be used to type your program. Examples of few editors include Windows Notepad, OS Edit command, Brief, Epsilon, EMACS, and vim or vi."
},
{
"code": null,
"e": 4435,
"s": 4253,
"text": "Name and version of text editor can vary on different operating systems. For example, Notepad will be used on Windows, and vim or vi can be used on windows as well as Linux or UNIX."
},
{
"code": null,
"e": 4613,
"s": 4435,
"text": "The files you create with your editor are called source files and contain program source code. The source files for Lisp programs are typically named with the extension \".lisp\"."
},
{
"code": null,
"e": 4790,
"s": 4613,
"text": "Before starting your programming, make sure you have one text editor in place and you have enough experience to write a computer program, save it in a file, finally execute it."
},
{
"code": null,
"e": 5010,
"s": 4790,
"text": "The source code written in source file is the human readable source for your program. It needs to be \"executed\", to turn into machine language so that your CPU can actually execute the program as per instructions given."
},
{
"code": null,
"e": 5177,
"s": 5010,
"text": "This Lisp programming language will be used to execute your source code into final executable program. I assume you have basic knowledge about a programming language."
},
{
"code": null,
"e": 5440,
"s": 5177,
"text": "CLISP is the GNU Common LISP multi-architechtural compiler used for setting up LISP in Windows. The windows version emulates a unix environment using MingW under windows. The installer takes care of this and automatically adds clisp to the windows PATH variable."
},
{
"code": null,
"e": 5554,
"s": 5440,
"text": "You can get the latest CLISP for Windows from here - https://sourceforge.net/projects/clisp/files/latest/download"
},
{
"code": null,
"e": 5640,
"s": 5554,
"text": "It creates a shortcut in the Start Menu by default, for the line-by-line interpreter."
},
{
"code": null,
"e": 5859,
"s": 5640,
"text": "During installation, clisp is automatically added to your PATH variable if you select the option (RECOMMENDED) This means that you can simply open a new Command Prompt window and type “clisp” to bring up the compiler."
},
{
"code": null,
"e": 5903,
"s": 5859,
"text": "To run a *.lisp or *.lsp file, simply use −"
},
{
"code": null,
"e": 5920,
"s": 5903,
"text": "clisp hello.lisp"
},
{
"code": null,
"e": 6068,
"s": 5920,
"text": "LISP expressions are called symbolic expressions or s-expressions. The s-expressions are composed of three valid objects, atoms, lists and strings."
},
{
"code": null,
"e": 6105,
"s": 6068,
"text": "Any s-expression is a valid program."
},
{
"code": null,
"e": 6169,
"s": 6105,
"text": "LISP programs run either on an interpreter or as compiled code."
},
{
"code": null,
"e": 6374,
"s": 6169,
"text": "The interpreter checks the source code in a repeated loop, which is also called the read-evaluate-print loop (REPL). It reads the program code, evaluates it, and prints the values returned by the program."
},
{
"code": null,
"e": 6500,
"s": 6374,
"text": "Let us write an s-expression to find the sum of three numbers 7, 9 and 11. To do this, we can type at the interpreter prompt."
},
{
"code": null,
"e": 6511,
"s": 6500,
"text": "(+ 7 9 11)"
},
{
"code": null,
"e": 6537,
"s": 6511,
"text": "LISP returns the result −"
},
{
"code": null,
"e": 6541,
"s": 6537,
"text": "27\n"
},
{
"code": null,
"e": 6692,
"s": 6541,
"text": "If you would like to run the same program as a compiled code, then create a LISP source code file named myprog.lisp and type the following code in it."
},
{
"code": null,
"e": 6711,
"s": 6692,
"text": "(write (+ 7 9 11))"
},
{
"code": null,
"e": 6820,
"s": 6711,
"text": "When you click the Execute button, or type Ctrl+E, LISP executes it immediately and the result returned is −"
},
{
"code": null,
"e": 6824,
"s": 6820,
"text": "27\n"
},
{
"code": null,
"e": 6877,
"s": 6824,
"text": "You might have noted that LISP uses prefix notation."
},
{
"code": null,
"e": 6983,
"s": 6877,
"text": "In the above program the + symbol works as the function name for the process of summation of the numbers."
},
{
"code": null,
"e": 7077,
"s": 6983,
"text": "In prefix notation, operators are written before their operands. For example, the expression,"
},
{
"code": null,
"e": 7095,
"s": 7077,
"text": "a * ( b + c ) / d"
},
{
"code": null,
"e": 7116,
"s": 7095,
"text": "will be written as −"
},
{
"code": null,
"e": 7137,
"s": 7116,
"text": "(/ (* a (+ b c) ) d)"
},
{
"code": null,
"e": 7250,
"s": 7137,
"text": "Let us take another example, let us write code for converting Fahrenheit temp of 60o F to the centigrade scale −"
},
{
"code": null,
"e": 7308,
"s": 7250,
"text": "The mathematical expression for this conversion will be −"
},
{
"code": null,
"e": 7326,
"s": 7308,
"text": "(60 * 9 / 5) + 32"
},
{
"code": null,
"e": 7403,
"s": 7326,
"text": "Create a source code file named main.lisp and type the following code in it."
},
{
"code": null,
"e": 7432,
"s": 7403,
"text": "(write(+ (* (/ 9 5) 60) 32))"
},
{
"code": null,
"e": 7540,
"s": 7432,
"text": "When you click the Execute button, or type Ctrl+E, LISP executes it immediately and the result returned is−"
},
{
"code": null,
"e": 7545,
"s": 7540,
"text": "140\n"
},
{
"code": null,
"e": 7589,
"s": 7545,
"text": "Evaluation of LISP programs has two parts −"
},
{
"code": null,
"e": 7655,
"s": 7589,
"text": "Translation of program text into Lisp objects by a reader program"
},
{
"code": null,
"e": 7721,
"s": 7655,
"text": "Translation of program text into Lisp objects by a reader program"
},
{
"code": null,
"e": 7819,
"s": 7721,
"text": "Implementation of the semantics of the language in terms of these objects by an evaluator program"
},
{
"code": null,
"e": 7917,
"s": 7819,
"text": "Implementation of the semantics of the language in terms of these objects by an evaluator program"
},
{
"code": null,
"e": 7968,
"s": 7917,
"text": "The evaluation process takes the following steps −"
},
{
"code": null,
"e": 8050,
"s": 7968,
"text": "The reader translates the strings of characters to LISP objects or s-expressions."
},
{
"code": null,
"e": 8132,
"s": 8050,
"text": "The reader translates the strings of characters to LISP objects or s-expressions."
},
{
"code": null,
"e": 8311,
"s": 8132,
"text": "The evaluator defines syntax of Lisp forms that are built from s-expressions. This second level of evaluation defines a syntax that determines which s-expressions are LISP forms."
},
{
"code": null,
"e": 8490,
"s": 8311,
"text": "The evaluator defines syntax of Lisp forms that are built from s-expressions. This second level of evaluation defines a syntax that determines which s-expressions are LISP forms."
},
{
"code": null,
"e": 8736,
"s": 8490,
"text": "The evaluator works as a function that takes a valid LISP form as an argument and returns a value. This is the reason why we put the LISP expression in parenthesis, because we are sending the entire expression/form to the evaluator as arguments."
},
{
"code": null,
"e": 8982,
"s": 8736,
"text": "The evaluator works as a function that takes a valid LISP form as an argument and returns a value. This is the reason why we put the LISP expression in parenthesis, because we are sending the entire expression/form to the evaluator as arguments."
},
{
"code": null,
"e": 9113,
"s": 8982,
"text": "Learning a new programming language doesn't really take off until you learn how to greet the entire world in that language, right!"
},
{
"code": null,
"e": 9203,
"s": 9113,
"text": "So, please create new source code file named main.lisp and type the following code in it."
},
{
"code": null,
"e": 9287,
"s": 9203,
"text": "(write-line \"Hello World\")\n\n(write-line \"I am at 'Tutorials Point'! Learning LISP\")"
},
{
"code": null,
"e": 9396,
"s": 9287,
"text": "When you click the Execute button, or type Ctrl+E, LISP executes it immediately and the result returned is −"
},
{
"code": null,
"e": 9451,
"s": 9396,
"text": "Hello World\n\nI am at 'Tutorials Point'! Learning LISP\n"
},
{
"code": null,
"e": 9510,
"s": 9451,
"text": "LISP programs are made up of three basic building blocks −"
},
{
"code": null,
"e": 9515,
"s": 9510,
"text": "atom"
},
{
"code": null,
"e": 9520,
"s": 9515,
"text": "list"
},
{
"code": null,
"e": 9527,
"s": 9520,
"text": "string"
},
{
"code": null,
"e": 9627,
"s": 9527,
"text": "An atom is a number or string of contiguous characters. It includes numbers and special characters."
},
{
"code": null,
"e": 9672,
"s": 9627,
"text": "Following are examples of some valid atoms −"
},
{
"code": null,
"e": 9739,
"s": 9672,
"text": "hello-from-tutorials-point\nname\n123008907\n*hello*\nBlock#221\nabc123"
},
{
"code": null,
"e": 9813,
"s": 9739,
"text": "A list is a sequence of atoms and/or other lists enclosed in parentheses."
},
{
"code": null,
"e": 9858,
"s": 9813,
"text": "Following are examples of some valid lists −"
},
{
"code": null,
"e": 9960,
"s": 9858,
"text": "( i am a list)\n(a ( a b c) d e fgh)\n(father tom ( susan bill joe))\n(sun mon tue wed thur fri sat)\n( )"
},
{
"code": null,
"e": 10030,
"s": 9960,
"text": "A string is a group of characters enclosed in double quotation marks."
},
{
"code": null,
"e": 10077,
"s": 10030,
"text": "Following are examples of some valid strings −"
},
{
"code": null,
"e": 10188,
"s": 10077,
"text": "\" I am a string\"\n\"a ba c d efg #$%^&!\"\n\"Please enter the following details :\"\n\"Hello from 'Tutorials Point'! \""
},
{
"code": null,
"e": 10252,
"s": 10188,
"text": "The semicolon symbol (;) is used for indicating a comment line."
},
{
"code": null,
"e": 10265,
"s": 10252,
"text": "For Example,"
},
{
"code": null,
"e": 10397,
"s": 10265,
"text": "(write-line \"Hello World\") ; greet the world\n\n; tell them your whereabouts\n\n(write-line \"I am at 'Tutorials Point'! Learning LISP\")"
},
{
"code": null,
"e": 10506,
"s": 10397,
"text": "When you click the Execute button, or type Ctrl+E, LISP executes it immediately and the result returned is −"
},
{
"code": null,
"e": 10561,
"s": 10506,
"text": "Hello World\n\nI am at 'Tutorials Point'! Learning LISP\n"
},
{
"code": null,
"e": 10614,
"s": 10561,
"text": "Following are some of the important points to note −"
},
{
"code": null,
"e": 10670,
"s": 10614,
"text": "The basic numeric operations in LISP are +, -, *, and /"
},
{
"code": null,
"e": 10726,
"s": 10670,
"text": "The basic numeric operations in LISP are +, -, *, and /"
},
{
"code": null,
"e": 10814,
"s": 10726,
"text": "LISP represents a function call f(x) as (f x), for example cos(45) is written as cos 45"
},
{
"code": null,
"e": 10902,
"s": 10814,
"text": "LISP represents a function call f(x) as (f x), for example cos(45) is written as cos 45"
},
{
"code": null,
"e": 10968,
"s": 10902,
"text": "LISP expressions are case-insensitive, cos 45 or COS 45 are same."
},
{
"code": null,
"e": 11034,
"s": 10968,
"text": "LISP expressions are case-insensitive, cos 45 or COS 45 are same."
},
{
"code": null,
"e": 11312,
"s": 11034,
"text": "LISP tries to evaluate everything, including the arguments of a function. Only three types of elements are constants and always return their own value\n\nNumbers\nThe letter t, that stands for logical true.\nThe value nil, that stands for logical false, as well as an empty list.\n\n"
},
{
"code": null,
"e": 11463,
"s": 11312,
"text": "LISP tries to evaluate everything, including the arguments of a function. Only three types of elements are constants and always return their own value"
},
{
"code": null,
"e": 11471,
"s": 11463,
"text": "Numbers"
},
{
"code": null,
"e": 11479,
"s": 11471,
"text": "Numbers"
},
{
"code": null,
"e": 11523,
"s": 11479,
"text": "The letter t, that stands for logical true."
},
{
"code": null,
"e": 11567,
"s": 11523,
"text": "The letter t, that stands for logical true."
},
{
"code": null,
"e": 11639,
"s": 11567,
"text": "The value nil, that stands for logical false, as well as an empty list."
},
{
"code": null,
"e": 11711,
"s": 11639,
"text": "The value nil, that stands for logical false, as well as an empty list."
},
{
"code": null,
"e": 11817,
"s": 11711,
"text": "In the previous chapter, we mentioned that the evaluation process of LISP code takes the following steps."
},
{
"code": null,
"e": 11899,
"s": 11817,
"text": "The reader translates the strings of characters to LISP objects or s-expressions."
},
{
"code": null,
"e": 11981,
"s": 11899,
"text": "The reader translates the strings of characters to LISP objects or s-expressions."
},
{
"code": null,
"e": 12160,
"s": 11981,
"text": "The evaluator defines syntax of Lisp forms that are built from s-expressions. This second level of evaluation defines a syntax that determines which s-expressions are LISP forms."
},
{
"code": null,
"e": 12339,
"s": 12160,
"text": "The evaluator defines syntax of Lisp forms that are built from s-expressions. This second level of evaluation defines a syntax that determines which s-expressions are LISP forms."
},
{
"code": null,
"e": 12367,
"s": 12339,
"text": "Now, a LISP forms could be."
},
{
"code": null,
"e": 12375,
"s": 12367,
"text": "An Atom"
},
{
"code": null,
"e": 12396,
"s": 12375,
"text": "An empty or non-list"
},
{
"code": null,
"e": 12444,
"s": 12396,
"text": "Any list that has a symbol as its first element"
},
{
"code": null,
"e": 12690,
"s": 12444,
"text": "The evaluator works as a function that takes a valid LISP form as an argument and returns a value. This is the reason why we put the LISP expression in parenthesis, because we are sending the entire expression/form to the evaluator as arguments."
},
{
"code": null,
"e": 12964,
"s": 12690,
"text": "Name or symbols can consist of any number of alphanumeric characters other than whitespace, open and closing parentheses, double and single quotes, backslash, comma, colon, semicolon and vertical bar. To use these characters in a name, you need to use escape character (\\)."
},
{
"code": null,
"e": 13137,
"s": 12964,
"text": "A name can have digits but not entirely made of digits, because then it would be read as a number. Similarly a name can have periods, but can't be made entirely of periods."
},
{
"code": null,
"e": 13214,
"s": 13137,
"text": "LISP evaluates everything including the function arguments and list members."
},
{
"code": null,
"e": 13325,
"s": 13214,
"text": "At times, we need to take atoms or lists literally and don't want them evaluated or treated as function calls."
},
{
"code": null,
"e": 13407,
"s": 13325,
"text": "To do this, we need to precede the atom or the list with a single quotation mark."
},
{
"code": null,
"e": 13448,
"s": 13407,
"text": "The following example demonstrates this."
},
{
"code": null,
"e": 13515,
"s": 13448,
"text": "Create a file named main.lisp and type the following code into it."
},
{
"code": null,
"e": 13684,
"s": 13515,
"text": "(write-line \"single quote used, it inhibits evaluation\")\n(write '(* 2 3))\n(write-line \" \")\n(write-line \"single quote not used, so expression evaluated\")\n(write (* 2 3))"
},
{
"code": null,
"e": 13793,
"s": 13684,
"text": "When you click the Execute button, or type Ctrl+E, LISP executes it immediately and the result returned is −"
},
{
"code": null,
"e": 13894,
"s": 13793,
"text": "single quote used, it inhibits evaluation\n(* 2 3) \nsingle quote not used, so expression evaluated\n6\n"
},
{
"code": null,
"e": 13950,
"s": 13894,
"text": "In LISP, variables are not typed, but data objects are."
},
{
"code": null,
"e": 13989,
"s": 13950,
"text": "LISP data types can be categorized as."
},
{
"code": null,
"e": 14056,
"s": 13989,
"text": "Scalar types − for example, number types, characters, symbols etc."
},
{
"code": null,
"e": 14123,
"s": 14056,
"text": "Scalar types − for example, number types, characters, symbols etc."
},
{
"code": null,
"e": 14196,
"s": 14123,
"text": "Data structures − for example, lists, vectors, bit-vectors, and strings."
},
{
"code": null,
"e": 14269,
"s": 14196,
"text": "Data structures − for example, lists, vectors, bit-vectors, and strings."
},
{
"code": null,
"e": 14361,
"s": 14269,
"text": "Any variable can take any LISP object as its value, unless you have declared it explicitly."
},
{
"code": null,
"e": 14570,
"s": 14361,
"text": "Although, it is not necessary to specify a data type for a LISP variable, however, it helps in certain loop expansions, in method declarations and some other situations that we will discuss in later chapters."
},
{
"code": null,
"e": 14698,
"s": 14570,
"text": "The data types are arranged into a hierarchy. A data type is a set of LISP objects and many objects may belong to one such set."
},
{
"code": null,
"e": 14784,
"s": 14698,
"text": "The typep predicate is used for finding whether an object belongs to a specific type."
},
{
"code": null,
"e": 14846,
"s": 14784,
"text": "The type-of function returns the data type of a given object."
},
{
"code": null,
"e": 14905,
"s": 14846,
"text": "Type specifiers are system-defined symbols for data types."
},
{
"code": null,
"e": 15099,
"s": 14905,
"text": "Apart from these system-defined types, you can create your own data types. When a structure type is defined using defstruct function, the name of the structure type becomes a valid type symbol."
},
{
"code": null,
"e": 15178,
"s": 15099,
"text": "Create new source code file named main.lisp and type the following code in it."
},
{
"code": null,
"e": 15332,
"s": 15178,
"text": "(setq x 10)\n(setq y 34.567)\n(setq ch nil)\n(setq n 123.78)\n(setq bg 11.0e+4)\n(setq r 124/2)\n\n(print x)\n(print y)\n(print n)\n(print ch)\n(print bg)\n(print r)"
},
{
"code": null,
"e": 15441,
"s": 15332,
"text": "When you click the Execute button, or type Ctrl+E, LISP executes it immediately and the result returned is −"
},
{
"code": null,
"e": 15480,
"s": 15441,
"text": "10 \n34.567 \n123.78 \nNIL \n110000.0 \n62\n"
},
{
"code": null,
"e": 15634,
"s": 15480,
"text": "Next let's check the types of the variables used in the previous example. Create new source code file named main. lisp and type the following code in it."
},
{
"code": null,
"e": 15860,
"s": 15634,
"text": "(defvar x 10)\n(defvar y 34.567)\n(defvar ch nil)\n(defvar n 123.78)\n(defvar bg 11.0e+4)\n(defvar r 124/2)\n\n(print (type-of x))\n(print (type-of y))\n(print (type-of n))\n(print (type-of ch))\n(print (type-of bg))\n(print (type-of r))"
},
{
"code": null,
"e": 15969,
"s": 15860,
"text": "When you click the Execute button, or type Ctrl+E, LISP executes it immediately and the result returned is −"
},
{
"code": null,
"e": 16075,
"s": 15969,
"text": "(INTEGER 0 281474976710655) \nSINGLE-FLOAT \nSINGLE-FLOAT \nNULL \nSINGLE-FLOAT \n(INTEGER 0 281474976710655)\n"
},
{
"code": null,
"e": 16131,
"s": 16075,
"text": "Macros allow you to extend the syntax of standard LISP."
},
{
"code": null,
"e": 16256,
"s": 16131,
"text": "Technically, a macro is a function that takes an s-expression as arguments and returns a LISP form, which is then evaluated."
},
{
"code": null,
"e": 16359,
"s": 16256,
"text": "In LISP, a named macro is defined using another macro named defmacro. Syntax for defining a macro is −"
},
{
"code": null,
"e": 16441,
"s": 16359,
"text": "(defmacro macro-name (parameter-list))\n\"Optional documentation string.\"\nbody-form"
},
{
"code": null,
"e": 16631,
"s": 16441,
"text": "The macro definition consists of the name of the macro, a parameter list, an optional documentation string, and a body of Lisp expressions that defines the job to be performed by the macro."
},
{
"code": null,
"e": 16724,
"s": 16631,
"text": "Let us write a simple macro named setTo10, which will take a number and set its value to 10."
},
{
"code": null,
"e": 16803,
"s": 16724,
"text": "Create new source code file named main.lisp and type the following code in it."
},
{
"code": null,
"e": 16886,
"s": 16803,
"text": "(defmacro setTo10(num)\n(setq num 10)(print num))\n(setq x 25)\n(print x)\n(setTo10 x)"
},
{
"code": null,
"e": 16995,
"s": 16886,
"text": "When you click the Execute button, or type Ctrl+E, LISP executes it immediately and the result returned is −"
},
{
"code": null,
"e": 17002,
"s": 16995,
"text": "25\n10\n"
},
{
"code": null,
"e": 17151,
"s": 17002,
"text": "In LISP, each variable is represented by a symbol. The variable's name is the name of the symbol and it is stored in the storage cell of the symbol."
},
{
"code": null,
"e": 17270,
"s": 17151,
"text": "Global variables have permanent values throughout the LISP system and remain in effect until a new value is specified."
},
{
"code": null,
"e": 17338,
"s": 17270,
"text": "Global variables are generally declared using the defvar construct."
},
{
"code": null,
"e": 17363,
"s": 17338,
"text": "(defvar x 234)\n(write x)"
},
{
"code": null,
"e": 17470,
"s": 17363,
"text": "When you click the Execute button, or type Ctrl+E, LISP executes it immediately and the result returned is"
},
{
"code": null,
"e": 17475,
"s": 17470,
"text": "234\n"
},
{
"code": null,
"e": 17600,
"s": 17475,
"text": "Since there is no type declaration for variables in LISP, you directly specify a value for a symbol with the setq construct."
},
{
"code": null,
"e": 17614,
"s": 17600,
"text": "->(setq x 10)"
},
{
"code": null,
"e": 17747,
"s": 17614,
"text": "The above expression assigns the value 10 to the variable x. You can refer to the variable using the symbol itself as an expression."
},
{
"code": null,
"e": 17841,
"s": 17747,
"text": "The symbol-value function allows you to extract the value stored at the symbol storage place."
},
{
"code": null,
"e": 17920,
"s": 17841,
"text": "Create new source code file named main.lisp and type the following code in it."
},
{
"code": null,
"e": 18040,
"s": 17920,
"text": "(setq x 10)\n(setq y 20)\n(format t \"x = ~2d y = ~2d ~%\" x y)\n\n(setq x 100)\n(setq y 200)\n(format t \"x = ~2d y = ~2d\" x y)"
},
{
"code": null,
"e": 18148,
"s": 18040,
"text": "When you click the Execute button, or type Ctrl+E, LISP executes it immediately and the result returned is."
},
{
"code": null,
"e": 18180,
"s": 18148,
"text": "x = 10 y = 20 \nx = 100 y = 200\n"
},
{
"code": null,
"e": 18391,
"s": 18180,
"text": "Local variables are defined within a given procedure. The parameters named as arguments within a function definition are also local variables. Local variables are accessible only within the respective function."
},
{
"code": null,
"e": 18480,
"s": 18391,
"text": "Like the global variables, local variables can also be created using the setq construct."
},
{
"code": null,
"e": 18556,
"s": 18480,
"text": "There are two other constructs - let and prog for creating local variables."
},
{
"code": null,
"e": 18600,
"s": 18556,
"text": "The let construct has the following syntax."
},
{
"code": null,
"e": 18664,
"s": 18600,
"text": "(let ((var1 val1) (var2 val2).. (varn valn))<s-expressions>)"
},
{
"code": null,
"e": 18793,
"s": 18664,
"text": "Where var1, var2, ..varn are variable names and val1, val2, .. valn are the initial values assigned to the respective variables."
},
{
"code": null,
"e": 18960,
"s": 18793,
"text": "When let is executed, each variable is assigned the respective value and lastly the s-expression is evaluated. The value of the last expression evaluated is returned."
},
{
"code": null,
"e": 19037,
"s": 18960,
"text": "If you don't include an initial value for a variable, it is assigned to nil."
},
{
"code": null,
"e": 19116,
"s": 19037,
"text": "Create new source code file named main.lisp and type the following code in it."
},
{
"code": null,
"e": 19184,
"s": 19116,
"text": "(let ((x 'a) (y 'b)(z 'c))\n(format t \"x = ~a y = ~a z = ~a\" x y z))"
},
{
"code": null,
"e": 19292,
"s": 19184,
"text": "When you click the Execute button, or type Ctrl+E, LISP executes it immediately and the result returned is."
},
{
"code": null,
"e": 19311,
"s": 19292,
"text": "x = A y = B z = C\n"
},
{
"code": null,
"e": 19466,
"s": 19311,
"text": "The prog construct also has the list of local variables as its first argument, which is followed by the body of the prog, and any number of s-expressions."
},
{
"code": null,
"e": 19666,
"s": 19466,
"text": "The prog function executes the list of s-expressions in sequence and returns nil unless it encounters a function call named return. Then the argument of the return function is evaluated and returned."
},
{
"code": null,
"e": 19745,
"s": 19666,
"text": "Create new source code file named main.lisp and type the following code in it."
},
{
"code": null,
"e": 19832,
"s": 19745,
"text": "(prog ((x '(a b c))(y '(1 2 3))(z '(p q 10)))\n(format t \"x = ~a y = ~a z = ~a\" x y z))"
},
{
"code": null,
"e": 19940,
"s": 19832,
"text": "When you click the Execute button, or type Ctrl+E, LISP executes it immediately and the result returned is."
},
{
"code": null,
"e": 19978,
"s": 19940,
"text": "x = (A B C) y = (1 2 3) z = (P Q 10)\n"
},
{
"code": null,
"e": 20124,
"s": 19978,
"text": "In LISP, constants are variables that never change their values during program execution. Constants are declared using the defconstant construct."
},
{
"code": null,
"e": 20284,
"s": 20124,
"text": "The following example shows declaring a global constant PI and later using this value inside a function named area-circle that calculates the area of a circle."
},
{
"code": null,
"e": 20384,
"s": 20284,
"text": "The defun construct is used for defining a function, we will look into it in the Functions chapter."
},
{
"code": null,
"e": 20465,
"s": 20384,
"text": "Create a new source code file named main.lisp and type the following code in it."
},
{
"code": null,
"e": 20621,
"s": 20465,
"text": "(defconstant PI 3.141592)\n(defun area-circle(rad)\n (terpri)\n (format t \"Radius: ~5f\" rad)\n (format t \"~%Area: ~10f\" (* PI rad rad)))\n(area-circle 10)"
},
{
"code": null,
"e": 20729,
"s": 20621,
"text": "When you click the Execute button, or type Ctrl+E, LISP executes it immediately and the result returned is."
},
{
"code": null,
"e": 20761,
"s": 20729,
"text": "Radius: 10.0\nArea: 314.1592\n"
},
{
"code": null,
"e": 20970,
"s": 20761,
"text": "An operator is a symbol that tells the compiler to perform specific mathematical or logical manipulations. LISP allows numerous operations on data, supported by various functions, macros and other constructs."
},
{
"code": null,
"e": 21027,
"s": 20970,
"text": "The operations allowed on data could be categorized as −"
},
{
"code": null,
"e": 21049,
"s": 21027,
"text": "Arithmetic Operations"
},
{
"code": null,
"e": 21071,
"s": 21049,
"text": "Comparison Operations"
},
{
"code": null,
"e": 21090,
"s": 21071,
"text": "Logical Operations"
},
{
"code": null,
"e": 21109,
"s": 21090,
"text": "Bitwise Operations"
},
{
"code": null,
"e": 21241,
"s": 21109,
"text": "The following table shows all the arithmetic operators supported by LISP. Assume variable A holds 10 and variable B holds 20 then −"
},
{
"code": null,
"e": 21255,
"s": 21241,
"text": "Show Examples"
},
{
"code": null,
"e": 21500,
"s": 21255,
"text": "Following table shows all the relational operators supported by LISP that compares between numbers. However unlike relational operators in other languages, LISP comparison operators may take more than two operands and they work on numbers only."
},
{
"code": null,
"e": 21559,
"s": 21500,
"text": "Assume variable A holds 10 and variable B holds 20, then −"
},
{
"code": null,
"e": 21573,
"s": 21559,
"text": "Show Examples"
},
{
"code": null,
"e": 21718,
"s": 21573,
"text": "Common LISP provides three logical operators: and, or, and not that operates on Boolean values. Assume A has value nil and B has value 5, then −"
},
{
"code": null,
"e": 21732,
"s": 21718,
"text": "Show Examples"
},
{
"code": null,
"e": 21871,
"s": 21732,
"text": "Bitwise operators work on bits and perform bit-by-bit operation. The truth tables for bitwise and, or, and xor operations are as follows −"
},
{
"code": null,
"e": 21885,
"s": 21871,
"text": "Show Examples"
},
{
"code": null,
"e": 22085,
"s": 21885,
"text": "Assume if A = 60; and B = 13; now in binary format they will be as follows:\nA = 0011 1100\nB = 0000 1101\n-----------------\nA and B = 0000 1100\nA or B = 0011 1101\nA xor B = 0011 0001\nnot A = 1100 0011"
},
{
"code": null,
"e": 22219,
"s": 22085,
"text": "The Bitwise operators supported by LISP are listed in the following table. Assume variable A holds 60 and variable B holds 13, then −"
},
{
"code": null,
"e": 22534,
"s": 22219,
"text": "Decision making structures require that the programmer specify one or more conditions to be evaluated or tested by the program, along with a statement or statements to be executed if the condition is determined to be true, and optionally, other statements to be executed if the condition is determined to be false."
},
{
"code": null,
"e": 22648,
"s": 22534,
"text": "Following is the general form of a typical decision making structure found in most of the programming languages −"
},
{
"code": null,
"e": 22758,
"s": 22648,
"text": "LISP provides following types of decision making constructs. Click the following links to check their detail."
},
{
"code": null,
"e": 22912,
"s": 22758,
"text": "This construct is used for used for checking multiple test-action clauses. It can be compared to the nested if statements in other programming languages."
},
{
"code": null,
"e": 23167,
"s": 22912,
"text": "The if construct has various forms. In simplest form it is followed by a test clause, a test action and some other consequent action(s). If the test clause evaluates to true, then the test action is executed otherwise, the consequent clause is evaluated."
},
{
"code": null,
"e": 23354,
"s": 23167,
"text": "In simplest form it is followed by a test clause, and a test action. If the test clause evaluates to true, then the test action is executed otherwise, the consequent clause is evaluated."
},
{
"code": null,
"e": 23544,
"s": 23354,
"text": "This construct implements multiple test-action clauses like the cond construct. However, it evaluates a key form and allows multiple action clauses based on the evaluation of that key form."
},
{
"code": null,
"e": 23809,
"s": 23544,
"text": "There may be a situation, when you need to execute a block of code numbers of times. A loop statement allows us to execute a statement or group of statements multiple times and following is the general form of a loop statement in most of the programming languages."
},
{
"code": null,
"e": 23938,
"s": 23809,
"text": "LISP provides the following types of constructs to handle looping requirements. Click the following links to check their detail."
},
{
"code": null,
"e": 24120,
"s": 23938,
"text": "The loop construct is the simplest form of iteration provided by LISP. In its simplest form, it allows you to execute some statement(s) repeatedly until it finds a return statement."
},
{
"code": null,
"e": 24228,
"s": 24120,
"text": "The loop for construct allows you to implement a for-loop like iteration as most common in other languages."
},
{
"code": null,
"e": 24339,
"s": 24228,
"text": "The do construct is also used for performing iteration using LISP. It provides a structured form of iteration."
},
{
"code": null,
"e": 24413,
"s": 24339,
"text": "The dotimes construct allows looping for some fixed number of iterations."
},
{
"code": null,
"e": 24483,
"s": 24413,
"text": "The dolist construct allows iteration through each element of a list."
},
{
"code": null,
"e": 24584,
"s": 24483,
"text": "The block and return-from allows you to exit gracefully from any nested blocks in case of any error."
},
{
"code": null,
"e": 24699,
"s": 24584,
"text": "The block function allows you to create a named block with a body composed of zero or more statements. Syntax is −"
},
{
"code": null,
"e": 24730,
"s": 24699,
"text": "(block block-name(\n...\n...\n))\n"
},
{
"code": null,
"e": 24825,
"s": 24730,
"text": "The return-from function takes a block name and an optional (the default is nil) return value."
},
{
"code": null,
"e": 24867,
"s": 24825,
"text": "The following example demonstrates this −"
},
{
"code": null,
"e": 24949,
"s": 24867,
"text": "Create a new source code file named main.lisp and type the following code in it −"
},
{
"code": null,
"e": 25390,
"s": 24949,
"text": "(defun demo-function (flag)\n (print 'entering-outer-block)\n \n (block outer-block\n (print 'entering-inner-block)\n (print (block inner-block\n\n (if flag\n (return-from outer-block 3)\n (return-from inner-block 5)\n )\n\n (print 'This-wil--not-be-printed))\n )\n\n (print 'left-inner-block)\n (print 'leaving-outer-block)\n t)\n)\n(demo-function t)\n(terpri)\n(demo-function nil)"
},
{
"code": null,
"e": 25499,
"s": 25390,
"text": "When you click the Execute button, or type Ctrl+E, LISP executes it immediately and the result returned is −"
},
{
"code": null,
"e": 25630,
"s": 25499,
"text": "ENTERING-OUTER-BLOCK \nENTERING-INNER-BLOCK \n\nENTERING-OUTER-BLOCK \nENTERING-INNER-BLOCK \n5 \nLEFT-INNER-BLOCK \nLEAVING-OUTER-BLOCK\n"
},
{
"code": null,
"e": 25696,
"s": 25630,
"text": "A function is a group of statements that together perform a task."
},
{
"code": null,
"e": 25898,
"s": 25696,
"text": "You can divide up your code into separate functions. How you divide up your code among different functions is up to you, but logically the division usually is so each function performs a specific task."
},
{
"code": null,
"e": 25992,
"s": 25898,
"text": "The macro named defun is used for defining functions. The defun macro needs three arguments −"
},
{
"code": null,
"e": 26013,
"s": 25992,
"text": "Name of the function"
},
{
"code": null,
"e": 26040,
"s": 26013,
"text": "Parameters of the function"
},
{
"code": null,
"e": 26061,
"s": 26040,
"text": "Body of the function"
},
{
"code": null,
"e": 26083,
"s": 26061,
"text": "Syntax for defun is −"
},
{
"code": null,
"e": 26151,
"s": 26083,
"text": "(defun name (parameter-list) \"Optional documentation string.\" body)"
},
{
"code": null,
"e": 26203,
"s": 26151,
"text": "Let us illustrate the concept with simple examples."
},
{
"code": null,
"e": 26330,
"s": 26203,
"text": "Let's write a function named averagenum that will print the average of four numbers. We will send these numbers as parameters."
},
{
"code": null,
"e": 26411,
"s": 26330,
"text": "Create a new source code file named main.lisp and type the following code in it."
},
{
"code": null,
"e": 26503,
"s": 26411,
"text": "(defun averagenum (n1 n2 n3 n4)\n (/ ( + n1 n2 n3 n4) 4)\n)\n(write(averagenum 10 20 30 40))"
},
{
"code": null,
"e": 26564,
"s": 26503,
"text": "When you execute the code, it returns the following result −"
},
{
"code": null,
"e": 26568,
"s": 26564,
"text": "25\n"
},
{
"code": null,
"e": 26698,
"s": 26568,
"text": "Let's define and call a function that would calculate the area of a circle when the radius of the circle is given as an argument."
},
{
"code": null,
"e": 26779,
"s": 26698,
"text": "Create a new source code file named main.lisp and type the following code in it."
},
{
"code": null,
"e": 26967,
"s": 26779,
"text": "(defun area-circle(rad)\n \"Calculates area of a circle with given radius\"\n (terpri)\n (format t \"Radius: ~5f\" rad)\n (format t \"~%Area: ~10f\" (* 3.141592 rad rad))\n)\n(area-circle 10)"
},
{
"code": null,
"e": 27028,
"s": 26967,
"text": "When you execute the code, it returns the following result −"
},
{
"code": null,
"e": 27060,
"s": 27028,
"text": "Radius: 10.0\nArea: 314.1592\n"
},
{
"code": null,
"e": 27079,
"s": 27060,
"text": "Please note that −"
},
{
"code": null,
"e": 27203,
"s": 27079,
"text": "You can provide an empty list as parameters, which means the function takes no arguments, the list is empty, written as ()."
},
{
"code": null,
"e": 27327,
"s": 27203,
"text": "You can provide an empty list as parameters, which means the function takes no arguments, the list is empty, written as ()."
},
{
"code": null,
"e": 27387,
"s": 27327,
"text": "LISP also allows optional, multiple, and keyword arguments."
},
{
"code": null,
"e": 27447,
"s": 27387,
"text": "LISP also allows optional, multiple, and keyword arguments."
},
{
"code": null,
"e": 27612,
"s": 27447,
"text": "The documentation string describes the purpose of the function. It is associated with the name of the function and can be obtained using the documentation function."
},
{
"code": null,
"e": 27777,
"s": 27612,
"text": "The documentation string describes the purpose of the function. It is associated with the name of the function and can be obtained using the documentation function."
},
{
"code": null,
"e": 27849,
"s": 27777,
"text": "The body of the function may consist of any number of Lisp expressions."
},
{
"code": null,
"e": 27921,
"s": 27849,
"text": "The body of the function may consist of any number of Lisp expressions."
},
{
"code": null,
"e": 28008,
"s": 27921,
"text": "The value of the last expression in the body is returned as the value of the function."
},
{
"code": null,
"e": 28095,
"s": 28008,
"text": "The value of the last expression in the body is returned as the value of the function."
},
{
"code": null,
"e": 28181,
"s": 28095,
"text": "You can also return a value from the function using the return-from special operator."
},
{
"code": null,
"e": 28267,
"s": 28181,
"text": "You can also return a value from the function using the return-from special operator."
},
{
"code": null,
"e": 28351,
"s": 28267,
"text": "Let us discuss the above concepts in brief. Click following links to find details −"
},
{
"code": null,
"e": 28371,
"s": 28351,
"text": "Optional Parameters"
},
{
"code": null,
"e": 28391,
"s": 28371,
"text": "Optional Parameters"
},
{
"code": null,
"e": 28407,
"s": 28391,
"text": "Rest Parameters"
},
{
"code": null,
"e": 28423,
"s": 28407,
"text": "Rest Parameters"
},
{
"code": null,
"e": 28442,
"s": 28423,
"text": "Keyword Parameters"
},
{
"code": null,
"e": 28461,
"s": 28442,
"text": "Keyword Parameters"
},
{
"code": null,
"e": 28494,
"s": 28461,
"text": "Returning Values from a Function"
},
{
"code": null,
"e": 28527,
"s": 28494,
"text": "Returning Values from a Function"
},
{
"code": null,
"e": 28544,
"s": 28527,
"text": "Lambda Functions"
},
{
"code": null,
"e": 28561,
"s": 28544,
"text": "Lambda Functions"
},
{
"code": null,
"e": 28579,
"s": 28561,
"text": "Mapping Functions"
},
{
"code": null,
"e": 28597,
"s": 28579,
"text": "Mapping Functions"
},
{
"code": null,
"e": 28768,
"s": 28597,
"text": "Predicates are functions that test their arguments for some specific conditions and returns nil if the condition is false, or some non-nil value is the condition is true."
},
{
"code": null,
"e": 28838,
"s": 28768,
"text": "The following table shows some of the most commonly used predicates −"
},
{
"code": null,
"e": 28843,
"s": 28838,
"text": "atom"
},
{
"code": null,
"e": 28927,
"s": 28843,
"text": "It takes one argument and returns t if the argument is an atom or nil if otherwise."
},
{
"code": null,
"e": 28933,
"s": 28927,
"text": "equal"
},
{
"code": null,
"e": 29019,
"s": 28933,
"text": "It takes two arguments and returns t if they are structurally equal or nil otherwise."
},
{
"code": null,
"e": 29022,
"s": 29019,
"text": "eq"
},
{
"code": null,
"e": 29146,
"s": 29022,
"text": "It takes two arguments and returns t if they are same identical objects, sharing the same memory location or nil otherwise."
},
{
"code": null,
"e": 29150,
"s": 29146,
"text": "eql"
},
{
"code": null,
"e": 29359,
"s": 29150,
"text": "It takes two arguments and returns t if the arguments are eq, or if they are numbers of the same type with the same value, or if they are character objects that represent the same character, or nil otherwise."
},
{
"code": null,
"e": 29365,
"s": 29359,
"text": "evenp"
},
{
"code": null,
"e": 29461,
"s": 29365,
"text": "It takes one numeric argument and returns t if the argument is even number or nil if otherwise."
},
{
"code": null,
"e": 29466,
"s": 29461,
"text": "oddp"
},
{
"code": null,
"e": 29561,
"s": 29466,
"text": "It takes one numeric argument and returns t if the argument is odd number or nil if otherwise."
},
{
"code": null,
"e": 29567,
"s": 29561,
"text": "zerop"
},
{
"code": null,
"e": 29656,
"s": 29567,
"text": "It takes one numeric argument and returns t if the argument is zero or nil if otherwise."
},
{
"code": null,
"e": 29661,
"s": 29656,
"text": "null"
},
{
"code": null,
"e": 29757,
"s": 29661,
"text": "It takes one argument and returns t if the argument evaluates to nil, otherwise it returns nil."
},
{
"code": null,
"e": 29763,
"s": 29757,
"text": "listp"
},
{
"code": null,
"e": 29861,
"s": 29763,
"text": "It takes one argument and returns t if the argument evaluates to a list otherwise it returns nil."
},
{
"code": null,
"e": 29870,
"s": 29861,
"text": "greaterp"
},
{
"code": null,
"e": 30033,
"s": 29870,
"text": "It takes one or more argument and returns t if either there is a single argument or the arguments are successively larger from left to right, or nil if otherwise."
},
{
"code": null,
"e": 30039,
"s": 30033,
"text": "lessp"
},
{
"code": null,
"e": 30203,
"s": 30039,
"text": "It takes one or more argument and returns t if either there is a single argument or the arguments are successively smaller from left to right, or nil if otherwise."
},
{
"code": null,
"e": 30211,
"s": 30203,
"text": "numberp"
},
{
"code": null,
"e": 30296,
"s": 30211,
"text": "It takes one argument and returns t if the argument is a number or nil if otherwise."
},
{
"code": null,
"e": 30304,
"s": 30296,
"text": "symbolp"
},
{
"code": null,
"e": 30394,
"s": 30304,
"text": "It takes one argument and returns t if the argument is a symbol otherwise it returns nil."
},
{
"code": null,
"e": 30403,
"s": 30394,
"text": "integerp"
},
{
"code": null,
"e": 30495,
"s": 30403,
"text": "It takes one argument and returns t if the argument is an integer otherwise it returns nil."
},
{
"code": null,
"e": 30505,
"s": 30495,
"text": "rationalp"
},
{
"code": null,
"e": 30631,
"s": 30505,
"text": "It takes one argument and returns t if the argument is rational number, either a ratio or a number, otherwise it returns nil."
},
{
"code": null,
"e": 30638,
"s": 30631,
"text": "floatp"
},
{
"code": null,
"e": 30743,
"s": 30638,
"text": "It takes one argument and returns t if the argument is a floating point number otherwise it returns nil."
},
{
"code": null,
"e": 30749,
"s": 30743,
"text": "realp"
},
{
"code": null,
"e": 30844,
"s": 30749,
"text": "It takes one argument and returns t if the argument is a real number otherwise it returns nil."
},
{
"code": null,
"e": 30853,
"s": 30844,
"text": "complexp"
},
{
"code": null,
"e": 30951,
"s": 30853,
"text": "It takes one argument and returns t if the argument is a complex number otherwise it returns nil."
},
{
"code": null,
"e": 30962,
"s": 30951,
"text": "characterp"
},
{
"code": null,
"e": 31055,
"s": 30962,
"text": "It takes one argument and returns t if the argument is a character otherwise it returns nil."
},
{
"code": null,
"e": 31063,
"s": 31055,
"text": "stringp"
},
{
"code": null,
"e": 31160,
"s": 31063,
"text": "It takes one argument and returns t if the argument is a string object otherwise it returns nil."
},
{
"code": null,
"e": 31167,
"s": 31160,
"text": "arrayp"
},
{
"code": null,
"e": 31264,
"s": 31167,
"text": "It takes one argument and returns t if the argument is an array object otherwise it returns nil."
},
{
"code": null,
"e": 31273,
"s": 31264,
"text": "packagep"
},
{
"code": null,
"e": 31364,
"s": 31273,
"text": "It takes one argument and returns t if the argument is a package otherwise it returns nil."
},
{
"code": null,
"e": 31445,
"s": 31364,
"text": "Create a new source code file named main.lisp and type the following code in it."
},
{
"code": null,
"e": 31708,
"s": 31445,
"text": "(write (atom 'abcd))\n(terpri)\n(write (equal 'a 'b))\n(terpri)\n(write (evenp 10))\n(terpri)\n(write (evenp 7 ))\n(terpri)\n(write (oddp 7 ))\n(terpri)\n(write (zerop 0.0000000001))\n(terpri)\n(write (eq 3 3.0 ))\n(terpri)\n(write (equal 3 3.0 ))\n(terpri)\n(write (null nil ))"
},
{
"code": null,
"e": 31769,
"s": 31708,
"text": "When you execute the code, it returns the following result −"
},
{
"code": null,
"e": 31798,
"s": 31769,
"text": "T\nNIL\nT\nNIL\nT\nNIL\nNIL\nNIL\nT\n"
},
{
"code": null,
"e": 31879,
"s": 31798,
"text": "Create a new source code file named main.lisp and type the following code in it."
},
{
"code": null,
"e": 32038,
"s": 31879,
"text": "(defun factorial (num)\n (cond ((zerop num) 1)\n (t ( * num (factorial (- num 1))))\n )\n)\n(setq n 6)\n(format t \"~% Factorial ~d is: ~d\" n (factorial n))"
},
{
"code": null,
"e": 32099,
"s": 32038,
"text": "When you execute the code, it returns the following result −"
},
{
"code": null,
"e": 32120,
"s": 32099,
"text": "Factorial 6 is: 720\n"
},
{
"code": null,
"e": 32240,
"s": 32120,
"text": "Common Lisp defines several kinds of numbers. The number data type includes various kinds of numbers supported by LISP."
},
{
"code": null,
"e": 32281,
"s": 32240,
"text": "The number types supported by LISP are −"
},
{
"code": null,
"e": 32290,
"s": 32281,
"text": "Integers"
},
{
"code": null,
"e": 32297,
"s": 32290,
"text": "Ratios"
},
{
"code": null,
"e": 32320,
"s": 32297,
"text": "Floating-point numbers"
},
{
"code": null,
"e": 32336,
"s": 32320,
"text": "Complex numbers"
},
{
"code": null,
"e": 32436,
"s": 32336,
"text": "The following diagram shows the number hierarchy and various numeric data types available in LISP −"
},
{
"code": null,
"e": 32511,
"s": 32436,
"text": "The following table describes various number type data available in LISP −"
},
{
"code": null,
"e": 32518,
"s": 32511,
"text": "fixnum"
},
{
"code": null,
"e": 32643,
"s": 32518,
"text": "This data type represents integers which are not too large and mostly in the range -215 to 215-1 (it is machine-dependent)"
},
{
"code": null,
"e": 32650,
"s": 32643,
"text": "bignum"
},
{
"code": null,
"e": 32770,
"s": 32650,
"text": "These are very large numbers with size limited by the amount of memory allocated for LISP, they are not fixnum numbers."
},
{
"code": null,
"e": 32776,
"s": 32770,
"text": "ratio"
},
{
"code": null,
"e": 32932,
"s": 32776,
"text": "Represents the ratio of two numbers in the numerator/denominator form. The / function always produce the result in ratios, when its arguments are integers."
},
{
"code": null,
"e": 32938,
"s": 32932,
"text": "float"
},
{
"code": null,
"e": 33032,
"s": 32938,
"text": "It represents non-integer numbers. There are four float data types with increasing precision."
},
{
"code": null,
"e": 33040,
"s": 33032,
"text": "complex"
},
{
"code": null,
"e": 33182,
"s": 33040,
"text": "It represents complex numbers, which are denoted by #c. The real and imaginary parts could be both either rational or floating point numbers."
},
{
"code": null,
"e": 33263,
"s": 33182,
"text": "Create a new source code file named main.lisp and type the following code in it."
},
{
"code": null,
"e": 33358,
"s": 33263,
"text": "(write (/ 1 2))\n(terpri)\n(write ( + (/ 1 2) (/ 3 4)))\n(terpri)\n(write ( + #c( 1 2) #c( 3 -4)))"
},
{
"code": null,
"e": 33419,
"s": 33358,
"text": "When you execute the code, it returns the following result −"
},
{
"code": null,
"e": 33437,
"s": 33419,
"text": "1/2\n5/4\n#C(4 -2)\n"
},
{
"code": null,
"e": 33506,
"s": 33437,
"text": "The following table describes some commonly used numeric functions −"
},
{
"code": null,
"e": 33517,
"s": 33506,
"text": "+, -, *, /"
},
{
"code": null,
"e": 33550,
"s": 33517,
"text": "Respective arithmetic operations"
},
{
"code": null,
"e": 33582,
"s": 33550,
"text": "sin, cos, tan, acos, asin, atan"
},
{
"code": null,
"e": 33618,
"s": 33582,
"text": "Respective trigonometric functions."
},
{
"code": null,
"e": 33656,
"s": 33618,
"text": "sinh, cosh, tanh, acosh, asinh, atanh"
},
{
"code": null,
"e": 33689,
"s": 33656,
"text": "Respective hyperbolic functions."
},
{
"code": null,
"e": 33693,
"s": 33689,
"text": "exp"
},
{
"code": null,
"e": 33732,
"s": 33693,
"text": "Exponentiation function. Calculates ex"
},
{
"code": null,
"e": 33737,
"s": 33732,
"text": "expt"
},
{
"code": null,
"e": 33789,
"s": 33737,
"text": "Exponentiation function, takes base and power both."
},
{
"code": null,
"e": 33794,
"s": 33789,
"text": "sqrt"
},
{
"code": null,
"e": 33837,
"s": 33794,
"text": "It calculates the square root of a number."
},
{
"code": null,
"e": 33841,
"s": 33837,
"text": "log"
},
{
"code": null,
"e": 33980,
"s": 33841,
"text": "Logarithmic function. It one parameter is given, then it calculates its natural logarithm, otherwise the second parameter is used as base."
},
{
"code": null,
"e": 33990,
"s": 33980,
"text": "conjugate"
},
{
"code": null,
"e": 34095,
"s": 33990,
"text": "It calculates the complex conjugate of a number. In case of a real number, it returns the number itself."
},
{
"code": null,
"e": 34099,
"s": 34095,
"text": "abs"
},
{
"code": null,
"e": 34157,
"s": 34099,
"text": "It returns the absolute value (or magnitude) of a number."
},
{
"code": null,
"e": 34161,
"s": 34157,
"text": "gcd"
},
{
"code": null,
"e": 34225,
"s": 34161,
"text": "It calculates the greatest common divisor of the given numbers."
},
{
"code": null,
"e": 34229,
"s": 34225,
"text": "lcm"
},
{
"code": null,
"e": 34291,
"s": 34229,
"text": "It calculates the least common multiple of the given numbers."
},
{
"code": null,
"e": 34297,
"s": 34291,
"text": "isqrt"
},
{
"code": null,
"e": 34398,
"s": 34297,
"text": "It gives the greatest integer less than or equal to the exact square root of a given natural number."
},
{
"code": null,
"e": 34430,
"s": 34398,
"text": "floor, ceiling, truncate, round"
},
{
"code": null,
"e": 34821,
"s": 34430,
"text": "All these functions take two arguments as a number and returns the quotient; floor returns the largest integer that is not greater than ratio, ceiling chooses the smaller integer that is larger than ratio, truncate chooses the integer of the same sign as ratio with the largest absolute value that is less than absolute value of ratio, and round chooses an integer that is closest to ratio."
},
{
"code": null,
"e": 34857,
"s": 34821,
"text": "ffloor, fceiling, ftruncate, fround"
},
{
"code": null,
"e": 34934,
"s": 34857,
"text": "Does the same as above, but returns the quotient as a floating point number."
},
{
"code": null,
"e": 34943,
"s": 34934,
"text": "mod, rem"
},
{
"code": null,
"e": 34990,
"s": 34943,
"text": "Returns the remainder in a division operation."
},
{
"code": null,
"e": 34996,
"s": 34990,
"text": "float"
},
{
"code": null,
"e": 35047,
"s": 34996,
"text": "Converts a real number to a floating point number."
},
{
"code": null,
"e": 35069,
"s": 35047,
"text": "rational, rationalize"
},
{
"code": null,
"e": 35112,
"s": 35069,
"text": "Converts a real number to rational number."
},
{
"code": null,
"e": 35135,
"s": 35112,
"text": "numerator, denominator"
},
{
"code": null,
"e": 35186,
"s": 35135,
"text": "Returns the respective parts of a rational number."
},
{
"code": null,
"e": 35205,
"s": 35186,
"text": "realpart, imagpart"
},
{
"code": null,
"e": 35262,
"s": 35205,
"text": "Returns the real and imaginary part of a complex number."
},
{
"code": null,
"e": 35343,
"s": 35262,
"text": "Create a new source code file named main.lisp and type the following code in it."
},
{
"code": null,
"e": 35864,
"s": 35343,
"text": "(write (/ 45 78))\n(terpri)\n(write (floor 45 78))\n(terpri)\n(write (/ 3456 75))\n(terpri)\n(write (floor 3456 75))\n(terpri)\n(write (ceiling 3456 75))\n(terpri)\n(write (truncate 3456 75))\n(terpri)\n(write (round 3456 75))\n(terpri)\n(write (ffloor 3456 75))\n(terpri)\n(write (fceiling 3456 75))\n(terpri)\n(write (ftruncate 3456 75))\n(terpri)\n(write (fround 3456 75))\n(terpri)\n(write (mod 3456 75))\n(terpri)\n(setq c (complex 6 7))\n(write c)\n(terpri)\n(write (complex 5 -9))\n(terpri)\n(write (realpart c))\n(terpri)\n(write (imagpart c))"
},
{
"code": null,
"e": 35925,
"s": 35864,
"text": "When you execute the code, it returns the following result −"
},
{
"code": null,
"e": 35997,
"s": 35925,
"text": "15/26\n0\n1152/25\n46\n47\n46\n46\n46.0\n47.0\n46.0\n46.0\n6\n#C(6 7)\n#C(5 -9)\n6\n7\n"
},
{
"code": null,
"e": 36068,
"s": 35997,
"text": "In LISP, characters are represented as data objects of type character."
},
{
"code": null,
"e": 36184,
"s": 36068,
"text": "You can denote a character object preceding #\\ before the character itself. For example, #\\a means the character a."
},
{
"code": null,
"e": 36337,
"s": 36184,
"text": "Space and other special characters can be denoted by preceding #\\ before the name of the character. For example, #\\SPACE represents the space character."
},
{
"code": null,
"e": 36379,
"s": 36337,
"text": "The following example demonstrates this −"
},
{
"code": null,
"e": 36460,
"s": 36379,
"text": "Create a new source code file named main.lisp and type the following code in it."
},
{
"code": null,
"e": 36543,
"s": 36460,
"text": "(write 'a)\n(terpri)\n(write #\\a)\n(terpri)\n(write-char #\\a)\n(terpri)\n(write-char 'a)"
},
{
"code": null,
"e": 36604,
"s": 36543,
"text": "When you execute the code, it returns the following result −"
},
{
"code": null,
"e": 36661,
"s": 36604,
"text": "A\n#\\a\na\n*** - WRITE-CHAR: argument A is not a character\n"
},
{
"code": null,
"e": 36779,
"s": 36661,
"text": "Common LISP allows using the following special characters in your code. They are called the semi-standard characters."
},
{
"code": null,
"e": 36791,
"s": 36779,
"text": "#\\Backspace"
},
{
"code": null,
"e": 36797,
"s": 36791,
"text": "#\\Tab"
},
{
"code": null,
"e": 36808,
"s": 36797,
"text": "#\\Linefeed"
},
{
"code": null,
"e": 36815,
"s": 36808,
"text": "#\\Page"
},
{
"code": null,
"e": 36824,
"s": 36815,
"text": "#\\Return"
},
{
"code": null,
"e": 36833,
"s": 36824,
"text": "#\\Rubout"
},
{
"code": null,
"e": 37006,
"s": 36833,
"text": "Numeric comparison functions and operators, like, < and > do not work on characters. Common LISP provides other two sets of functions for comparing characters in your code."
},
{
"code": null,
"e": 37064,
"s": 37006,
"text": "One set is case-sensitive and the other case-insensitive."
},
{
"code": null,
"e": 37109,
"s": 37064,
"text": "The following table provides the functions −"
},
{
"code": null,
"e": 37190,
"s": 37109,
"text": "Create a new source code file named main.lisp and type the following code in it."
},
{
"code": null,
"e": 37505,
"s": 37190,
"text": "; case-sensitive comparison\n(write (char= #\\a #\\b))\n(terpri)\n(write (char= #\\a #\\a))\n(terpri)\n(write (char= #\\a #\\A))\n(terpri)\n \n;case-insensitive comparision\n(write (char-equal #\\a #\\A))\n(terpri)\n(write (char-equal #\\a #\\b))\n(terpri)\n(write (char-lessp #\\a #\\b #\\c))\n(terpri)\n(write (char-greaterp #\\a #\\b #\\c))"
},
{
"code": null,
"e": 37566,
"s": 37505,
"text": "When you execute the code, it returns the following result −"
},
{
"code": null,
"e": 37589,
"s": 37566,
"text": "NIL\nT\nNIL\nT\nNIL\nT\nNIL\n"
},
{
"code": null,
"e": 37734,
"s": 37589,
"text": "LISP allows you to define single or multiple-dimension arrays using the make-array function. An array can store any LISP object as its elements."
},
{
"code": null,
"e": 37882,
"s": 37734,
"text": "All arrays consist of contiguous memory locations. The lowest address corresponds to the first element and the highest address to the last element."
},
{
"code": null,
"e": 37939,
"s": 37882,
"text": "The number of dimensions of an array is called its rank."
},
{
"code": null,
"e": 38113,
"s": 37939,
"text": "In LISP, an array element is specified by a sequence of non-negative integer indices. The length of the sequence must equal the rank of the array. Indexing starts from zero."
},
{
"code": null,
"e": 38192,
"s": 38113,
"text": "For example, to create an array with 10- cells, named my-array, we can write −"
},
{
"code": null,
"e": 38228,
"s": 38192,
"text": "(setf my-array (make-array '(10)))\n"
},
{
"code": null,
"e": 38357,
"s": 38228,
"text": "The aref function allows accessing the contents of the cells. It takes two arguments, the name of the array and the index value."
},
{
"code": null,
"e": 38422,
"s": 38357,
"text": "For example, to access the content of the tenth cell, we write −"
},
{
"code": null,
"e": 38441,
"s": 38422,
"text": "(aref my-array 9)\n"
},
{
"code": null,
"e": 38522,
"s": 38441,
"text": "Create a new source code file named main.lisp and type the following code in it."
},
{
"code": null,
"e": 38871,
"s": 38522,
"text": "(write (setf my-array (make-array '(10))))\n(terpri)\n(setf (aref my-array 0) 25)\n(setf (aref my-array 1) 23)\n(setf (aref my-array 2) 45)\n(setf (aref my-array 3) 10)\n(setf (aref my-array 4) 20)\n(setf (aref my-array 5) 17)\n(setf (aref my-array 6) 25)\n(setf (aref my-array 7) 19)\n(setf (aref my-array 8) 67)\n(setf (aref my-array 9) 30)\n(write my-array)"
},
{
"code": null,
"e": 38932,
"s": 38871,
"text": "When you execute the code, it returns the following result −"
},
{
"code": null,
"e": 39009,
"s": 38932,
"text": "#(NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL)\n#(25 23 45 10 20 17 25 19 67 30)\n"
},
{
"code": null,
"e": 39039,
"s": 39009,
"text": "Let us create a 3-by-3 array."
},
{
"code": null,
"e": 39120,
"s": 39039,
"text": "Create a new source code file named main.lisp and type the following code in it."
},
{
"code": null,
"e": 39210,
"s": 39120,
"text": "(setf x (make-array '(3 3) \n :initial-contents '((0 1 2 ) (3 4 5) (6 7 8)))\n)\n(write x)"
},
{
"code": null,
"e": 39271,
"s": 39210,
"text": "When you execute the code, it returns the following result −"
},
{
"code": null,
"e": 39301,
"s": 39271,
"text": "#2A((0 1 2) (3 4 5) (6 7 8))\n"
},
{
"code": null,
"e": 39382,
"s": 39301,
"text": "Create a new source code file named main.lisp and type the following code in it."
},
{
"code": null,
"e": 39569,
"s": 39382,
"text": "(setq a (make-array '(4 3)))\n(dotimes (i 4)\n (dotimes (j 3)\n (setf (aref a i j) (list i 'x j '= (* i j)))\n )\n)\n(dotimes (i 4)\n (dotimes (j 3)\n (print (aref a i j))\n )\n)"
},
{
"code": null,
"e": 39630,
"s": 39569,
"text": "When you execute the code, it returns the following result −"
},
{
"code": null,
"e": 39786,
"s": 39630,
"text": "(0 X 0 = 0) \n(0 X 1 = 0) \n(0 X 2 = 0) \n(1 X 0 = 0) \n(1 X 1 = 1) \n(1 X 2 = 2) \n(2 X 0 = 0) \n(2 X 1 = 2) \n(2 X 2 = 4) \n(3 X 0 = 0) \n(3 X 1 = 3) \n(3 X 2 = 6)\n"
},
{
"code": null,
"e": 39892,
"s": 39786,
"text": "The make-array function takes many other arguments. Let us look at the complete syntax of this function −"
},
{
"code": null,
"e": 40028,
"s": 39892,
"text": "make-array dimensions :element-type :initial-element :initial-contents :adjustable :fill-pointer :displaced-to :displaced-index-offset"
},
{
"code": null,
"e": 40163,
"s": 40028,
"text": "Apart from the dimensions argument, all other arguments are keywords. The following table provides brief description of the arguments."
},
{
"code": null,
"e": 40174,
"s": 40163,
"text": "dimensions"
},
{
"code": null,
"e": 40294,
"s": 40174,
"text": "It gives the dimensions of the array. It is a number for one-dimensional array, and a list for multi-dimensional array."
},
{
"code": null,
"e": 40308,
"s": 40294,
"text": ":element-type"
},
{
"code": null,
"e": 40368,
"s": 40308,
"text": "It is the type specifier, default value is T, i.e. any type"
},
{
"code": null,
"e": 40385,
"s": 40368,
"text": ":initial-element"
},
{
"code": null,
"e": 40488,
"s": 40385,
"text": "Initial elements value. It will make an array with all the elements initialized to a particular value."
},
{
"code": null,
"e": 40505,
"s": 40488,
"text": ":initial-content"
},
{
"code": null,
"e": 40532,
"s": 40505,
"text": "Initial content as object."
},
{
"code": null,
"e": 40544,
"s": 40532,
"text": ":adjustable"
},
{
"code": null,
"e": 40749,
"s": 40544,
"text": "It helps in creating a resizeable (or adjustable) vector whose underlying memory can be resized. The argument is a Boolean value indicating whether the array is adjustable or not, default value being NIL."
},
{
"code": null,
"e": 40763,
"s": 40749,
"text": ":fill-pointer"
},
{
"code": null,
"e": 40844,
"s": 40763,
"text": "It keeps track of the number of elements actually stored in a resizeable vector."
},
{
"code": null,
"e": 40858,
"s": 40844,
"text": ":displaced-to"
},
{
"code": null,
"e": 41138,
"s": 40858,
"text": "It helps in creating a displaced array or shared array that shares its contents with the specified array. Both the arrays should have same element type. The :displaced-to option may not be used with the :initial-element or :initial-contents option. This argument defaults to nil."
},
{
"code": null,
"e": 41162,
"s": 41138,
"text": ":displaced-index-offset"
},
{
"code": null,
"e": 41217,
"s": 41162,
"text": "It gives the index-offset of the created shared array."
},
{
"code": null,
"e": 41298,
"s": 41217,
"text": "Create a new source code file named main.lisp and type the following code in it."
},
{
"code": null,
"e": 41557,
"s": 41298,
"text": "(setq myarray (make-array '(3 2 3) \n :initial-contents \n '(((a b c) (1 2 3)) \n ((d e f) (4 5 6)) \n ((g h i) (7 8 9)) \n ))\n) \n(setq array2 (make-array 4 :displaced-to myarray :displaced-index-offset 2)) \n(write myarray)\n(terpri)\n(write array2)"
},
{
"code": null,
"e": 41618,
"s": 41557,
"text": "When you execute the code, it returns the following result −"
},
{
"code": null,
"e": 41689,
"s": 41618,
"text": "#3A(((A B C) (1 2 3)) ((D E F) (4 5 6)) ((G H I) (7 8 9)))\n#(C 1 2 3)\n"
},
{
"code": null,
"e": 41733,
"s": 41689,
"text": "If the displaced array is two dimensional −"
},
{
"code": null,
"e": 41997,
"s": 41733,
"text": "(setq myarray (make-array '(3 2 3) \n :initial-contents \n '(((a b c) (1 2 3)) \n ((d e f) (4 5 6)) \n ((g h i) (7 8 9)) \n ))\n) \n(setq array2 (make-array '(3 2) :displaced-to myarray :displaced-index-offset 2)) \n(write myarray)\n(terpri)\n(write array2)"
},
{
"code": null,
"e": 42058,
"s": 41997,
"text": "When you execute the code, it returns the following result −"
},
{
"code": null,
"e": 42141,
"s": 42058,
"text": "#3A(((A B C) (1 2 3)) ((D E F) (4 5 6)) ((G H I) (7 8 9)))\n#2A((C 1) (2 3) (D E))\n"
},
{
"code": null,
"e": 42188,
"s": 42141,
"text": "Let's change the displaced index offset to 5 −"
},
{
"code": null,
"e": 42452,
"s": 42188,
"text": "(setq myarray (make-array '(3 2 3) \n :initial-contents \n '(((a b c) (1 2 3)) \n ((d e f) (4 5 6)) \n ((g h i) (7 8 9)) \n ))\n) \n(setq array2 (make-array '(3 2) :displaced-to myarray :displaced-index-offset 5)) \n(write myarray)\n(terpri)\n(write array2)"
},
{
"code": null,
"e": 42513,
"s": 42452,
"text": "When you execute the code, it returns the following result −"
},
{
"code": null,
"e": 42596,
"s": 42513,
"text": "#3A(((A B C) (1 2 3)) ((D E F) (4 5 6)) ((G H I) (7 8 9)))\n#2A((3 D) (E F) (4 5))\n"
},
{
"code": null,
"e": 42677,
"s": 42596,
"text": "Create a new source code file named main.lisp and type the following code in it."
},
{
"code": null,
"e": 43609,
"s": 42677,
"text": ";a one dimensional array with 5 elements, \n;initail value 5\n(write (make-array 5 :initial-element 5))\n(terpri)\n\n;two dimensional array, with initial element a\n(write (make-array '(2 3) :initial-element 'a))\n(terpri)\n\n;an array of capacity 14, but fill pointer 5, is 5\n(write(length (make-array 14 :fill-pointer 5)))\n(terpri)\n\n;however its length is 14\n(write (array-dimensions (make-array 14 :fill-pointer 5)))\n(terpri)\n\n; a bit array with all initial elements set to 1\n(write(make-array 10 :element-type 'bit :initial-element 1))\n(terpri)\n\n; a character array with all initial elements set to a\n; is a string actually\n(write(make-array 10 :element-type 'character :initial-element #\\a)) \n(terpri)\n\n; a two dimensional array with initial values a\n(setq myarray (make-array '(2 2) :initial-element 'a :adjustable t))\n(write myarray)\n(terpri)\n\n;readjusting the array\n(adjust-array myarray '(1 3) :initial-element 'b) \n(write myarray)"
},
{
"code": null,
"e": 43670,
"s": 43609,
"text": "When you execute the code, it returns the following result −"
},
{
"code": null,
"e": 43768,
"s": 43670,
"text": "#(5 5 5 5 5)\n#2A((A A A) (A A A))\n5\n(14)\n#*1111111111\n\"aaaaaaaaaa\"\n#2A((A A) (A A))\n#2A((A A B))\n"
},
{
"code": null,
"e": 43847,
"s": 43768,
"text": "Strings in Common Lisp are vectors, i.e., one-dimensional array of characters."
},
{
"code": null,
"e": 44132,
"s": 43847,
"text": "String literals are enclosed in double quotes. Any character supported by the character set can be enclosed within double quotes to make a string, except the double quote character (\") and the escape character (\\). However, you can include these by escaping them with a backslash (\\)."
},
{
"code": null,
"e": 44213,
"s": 44132,
"text": "Create a new source code file named main.lisp and type the following code in it."
},
{
"code": null,
"e": 44366,
"s": 44213,
"text": "(write-line \"Hello World\")\n(write-line \"Welcome to Tutorials Point\")\n\n;escaping the double quote character\n(write-line \"Welcome to \\\"Tutorials Point\\\"\")"
},
{
"code": null,
"e": 44427,
"s": 44366,
"text": "When you execute the code, it returns the following result −"
},
{
"code": null,
"e": 44496,
"s": 44427,
"text": "Hello World\nWelcome to Tutorials Point\nWelcome to \"Tutorials Point\"\n"
},
{
"code": null,
"e": 44721,
"s": 44496,
"text": "Numeric comparison functions and operators, like, < and > do not work on strings. Common LISP provides other two sets of functions for comparing strings in your code. One set is case-sensitive and the other case-insensitive."
},
{
"code": null,
"e": 44766,
"s": 44721,
"text": "The following table provides the functions −"
},
{
"code": null,
"e": 44847,
"s": 44766,
"text": "Create a new source code file named main.lisp and type the following code in it."
},
{
"code": null,
"e": 45494,
"s": 44847,
"text": "; case-sensitive comparison\n(write (string= \"this is test\" \"This is test\"))\n(terpri)\n(write (string> \"this is test\" \"This is test\"))\n(terpri)\n(write (string< \"this is test\" \"This is test\"))\n(terpri)\n\n;case-insensitive comparision\n(write (string-equal \"this is test\" \"This is test\"))\n(terpri)\n(write (string-greaterp \"this is test\" \"This is test\"))\n(terpri)\n(write (string-lessp \"this is test\" \"This is test\"))\n(terpri)\n\n;checking non-equal\n(write (string/= \"this is test\" \"this is Test\"))\n(terpri)\n(write (string-not-equal \"this is test\" \"This is test\"))\n(terpri)\n(write (string/= \"lisp\" \"lisping\"))\n(terpri)\n(write (string/= \"decent\" \"decency\"))"
},
{
"code": null,
"e": 45555,
"s": 45494,
"text": "When you execute the code, it returns the following result −"
},
{
"code": null,
"e": 45586,
"s": 45555,
"text": "NIL\n0\nNIL\nT\nNIL\nNIL\n8\nNIL\n4\n5\n"
},
{
"code": null,
"e": 45649,
"s": 45586,
"text": "The following table describes the case controlling functions −"
},
{
"code": null,
"e": 45663,
"s": 45649,
"text": "string-upcase"
},
{
"code": null,
"e": 45697,
"s": 45663,
"text": "Converts the string to upper case"
},
{
"code": null,
"e": 45713,
"s": 45697,
"text": "string-downcase"
},
{
"code": null,
"e": 45747,
"s": 45713,
"text": "Converts the string to lower case"
},
{
"code": null,
"e": 45765,
"s": 45747,
"text": "string-capitalize"
},
{
"code": null,
"e": 45801,
"s": 45765,
"text": "Capitalizes each word in the string"
},
{
"code": null,
"e": 45882,
"s": 45801,
"text": "Create a new source code file named main.lisp and type the following code in it."
},
{
"code": null,
"e": 46014,
"s": 45882,
"text": "(write-line (string-upcase \"a big hello from tutorials point\"))\n(write-line (string-capitalize \"a big hello from tutorials point\"))"
},
{
"code": null,
"e": 46075,
"s": 46014,
"text": "When you execute the code, it returns the following result −"
},
{
"code": null,
"e": 46142,
"s": 46075,
"text": "A BIG HELLO FROM TUTORIALS POINT\nA Big Hello From Tutorials Point\n"
},
{
"code": null,
"e": 46204,
"s": 46142,
"text": "The following table describes the string trimming functions −"
},
{
"code": null,
"e": 46216,
"s": 46204,
"text": "string-trim"
},
{
"code": null,
"e": 46417,
"s": 46216,
"text": "It takes a string of character(s) as first argument and a string as the second argument and returns a substring where all characters that are in the first argument are removed off the argument string."
},
{
"code": null,
"e": 46434,
"s": 46417,
"text": "String-left-trim"
},
{
"code": null,
"e": 46652,
"s": 46434,
"text": "It takes a string of character(s) as first argument and a string as the second argument and returns a substring where all characters that are in the first argument are removed off the beginning of the argument string."
},
{
"code": null,
"e": 46670,
"s": 46652,
"text": "String-right-trim"
},
{
"code": null,
"e": 46879,
"s": 46670,
"text": "It takes a string character(s) as first argument and a string as the second argument and returns a substring where all characters that are in the first argument are removed off the end of the argument string."
},
{
"code": null,
"e": 46960,
"s": 46879,
"text": "Create a new source code file named main.lisp and type the following code in it."
},
{
"code": null,
"e": 47260,
"s": 46960,
"text": "(write-line (string-trim \" \" \" a big hello from tutorials point \"))\n(write-line (string-left-trim \" \" \" a big hello from tutorials point \"))\n(write-line (string-right-trim \" \" \" a big hello from tutorials point \"))\n(write-line (string-trim \" a\" \" a big hello from tutorials point \"))"
},
{
"code": null,
"e": 47321,
"s": 47260,
"text": "When you execute the code, it returns the following result −"
},
{
"code": null,
"e": 47458,
"s": 47321,
"text": "a big hello from tutorials point\na big hello from tutorials point \n a big hello from tutorials point\nbig hello from tutorials point\n"
},
{
"code": null,
"e": 47725,
"s": 47458,
"text": "Strings in LISP are arrays and thus also sequences. We will cover these data types in coming tutorials. All functions that are applicable to arrays and sequences also apply to strings. However, we will demonstrate some commonly used functions using various examples."
},
{
"code": null,
"e": 47780,
"s": 47725,
"text": "The length function calculates the length of a string."
},
{
"code": null,
"e": 47955,
"s": 47780,
"text": "The subseq function returns a sub-string (as a string is also a sequence) starting at a particular index and continuing to a particular ending index or the end of the string."
},
{
"code": null,
"e": 48025,
"s": 47955,
"text": "The char function allows accessing individual characters of a string."
},
{
"code": null,
"e": 48033,
"s": 48025,
"text": "Example"
},
{
"code": null,
"e": 48114,
"s": 48033,
"text": "Create a new source code file named main.lisp and type the following code in it."
},
{
"code": null,
"e": 48223,
"s": 48114,
"text": "(write (length \"Hello World\"))\n(terpri)\n(write-line (subseq \"Hello World\" 6))\n(write (char \"Hello World\" 6))"
},
{
"code": null,
"e": 48284,
"s": 48223,
"text": "When you execute the code, it returns the following result −"
},
{
"code": null,
"e": 48298,
"s": 48284,
"text": "11\nWorld\n#\\W\n"
},
{
"code": null,
"e": 48455,
"s": 48298,
"text": "The sort function allows sorting a string. It takes a sequence (vector or string) and a two-argument predicate and returns a sorted version of the sequence."
},
{
"code": null,
"e": 48600,
"s": 48455,
"text": "The merge function takes two sequences and a predicate and returns a sequence produced by merging the two sequences, according to the predicate."
},
{
"code": null,
"e": 48608,
"s": 48600,
"text": "Example"
},
{
"code": null,
"e": 48689,
"s": 48608,
"text": "Create a new source code file named main.lisp and type the following code in it."
},
{
"code": null,
"e": 48903,
"s": 48689,
"text": ";sorting the strings\n(write (sort (vector \"Amal\" \"Akbar\" \"Anthony\") #'string<))\n(terpri)\n\n;merging the strings\n(write (merge 'vector (vector \"Rishi\" \"Zara\" \"Priyanka\") \n (vector \"Anju\" \"Anuj\" \"Avni\") #'string<))"
},
{
"code": null,
"e": 48964,
"s": 48903,
"text": "When you execute the code, it returns the following result −"
},
{
"code": null,
"e": 49043,
"s": 48964,
"text": "#(\"Akbar\" \"Amal\" \"Anthony\")\n#(\"Anju\" \"Anuj\" \"Avni\" \"Rishi\" \"Zara\" \"Priyanka\")\n"
},
{
"code": null,
"e": 49083,
"s": 49043,
"text": "The reverse function reverses a string."
},
{
"code": null,
"e": 49177,
"s": 49083,
"text": "For example, Create a new source code file named main.lisp and type the following code in it."
},
{
"code": null,
"e": 49259,
"s": 49177,
"text": "(write-line (reverse \"Are we not drawn onward, we few, drawn onward to new era\"))"
},
{
"code": null,
"e": 49320,
"s": 49259,
"text": "When you execute the code, it returns the following result −"
},
{
"code": null,
"e": 49378,
"s": 49320,
"text": "are wen ot drawno nward ,wef ew ,drawno nward ton ew erA\n"
},
{
"code": null,
"e": 49523,
"s": 49378,
"text": "The concatenate function concatenates two strings. This is generic sequence function and you must provide the result type as the first argument."
},
{
"code": null,
"e": 49617,
"s": 49523,
"text": "For example, Create a new source code file named main.lisp and type the following code in it."
},
{
"code": null,
"e": 49714,
"s": 49617,
"text": "(write-line (concatenate 'string \"Are we not drawn onward, \" \"we few, drawn onward to new era\"))"
},
{
"code": null,
"e": 49775,
"s": 49714,
"text": "When you execute the code, it returns the following result −"
},
{
"code": null,
"e": 49833,
"s": 49775,
"text": "Are we not drawn onward, we few, drawn onward to new era\n"
},
{
"code": null,
"e": 50049,
"s": 49833,
"text": "Sequence is an abstract data type in LISP. Vectors and lists are the two concrete subtypes of this data type. All the functionalities defined on sequence data type are actually applied on all vectors and list types."
},
{
"code": null,
"e": 50125,
"s": 50049,
"text": "In this section, we will discuss most commonly used functions on sequences."
},
{
"code": null,
"e": 50269,
"s": 50125,
"text": "Before starting on various ways of manipulating sequences (i.e., vectors and lists), let us have a look at the list of all available functions."
},
{
"code": null,
"e": 50375,
"s": 50269,
"text": "The function make-sequence allows you to create a sequence of any type. The syntax for this function is −"
},
{
"code": null,
"e": 50426,
"s": 50375,
"text": "make-sequence sqtype sqsize &key :initial-element\n"
},
{
"code": null,
"e": 50485,
"s": 50426,
"text": "It creates a sequence of type sqtype and of length sqsize."
},
{
"code": null,
"e": 50621,
"s": 50485,
"text": "You may optionally specify some value using the :initial-element argument, then each of the elements will be initialized to this value."
},
{
"code": null,
"e": 50715,
"s": 50621,
"text": "For example, Create a new source code file named main.lisp and type the following code in it."
},
{
"code": null,
"e": 50787,
"s": 50715,
"text": "(write (make-sequence '(vector float) \n 10 \n :initial-element 1.0))"
},
{
"code": null,
"e": 50848,
"s": 50787,
"text": "When you execute the code, it returns the following result −"
},
{
"code": null,
"e": 50892,
"s": 50848,
"text": "#(1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0)\n"
},
{
"code": null,
"e": 50896,
"s": 50892,
"text": "elt"
},
{
"code": null,
"e": 50962,
"s": 50896,
"text": "It allows access to individual elements through an integer index."
},
{
"code": null,
"e": 50969,
"s": 50962,
"text": "length"
},
{
"code": null,
"e": 51006,
"s": 50969,
"text": "It returns the length of a sequence."
},
{
"code": null,
"e": 51013,
"s": 51006,
"text": "subseq"
},
{
"code": null,
"e": 51172,
"s": 51013,
"text": "It returns a sub-sequence by extracting the subsequence starting at a particular index and continuing to a particular ending index or the end of the sequence."
},
{
"code": null,
"e": 51181,
"s": 51172,
"text": "copy-seq"
},
{
"code": null,
"e": 51252,
"s": 51181,
"text": "It returns a sequence that contains the same elements as its argument."
},
{
"code": null,
"e": 51257,
"s": 51252,
"text": "fill"
},
{
"code": null,
"e": 51326,
"s": 51257,
"text": "It is used to set multiple elements of a sequence to a single value."
},
{
"code": null,
"e": 51334,
"s": 51326,
"text": "replace"
},
{
"code": null,
"e": 51489,
"s": 51334,
"text": "It takes two sequences and the first argument sequence is destructively modified by copying successive elements into it from the second argument sequence."
},
{
"code": null,
"e": 51495,
"s": 51489,
"text": "count"
},
{
"code": null,
"e": 51593,
"s": 51495,
"text": "It takes an item and a sequence and returns the number of times the item appears in the sequence."
},
{
"code": null,
"e": 51601,
"s": 51593,
"text": "reverse"
},
{
"code": null,
"e": 51688,
"s": 51601,
"text": "It returns a sequence contains the same elements of the argument but in reverse order."
},
{
"code": null,
"e": 51697,
"s": 51688,
"text": "nreverse"
},
{
"code": null,
"e": 51789,
"s": 51697,
"text": "It returns the same sequence containing the same elements as sequence but in reverse order."
},
{
"code": null,
"e": 51801,
"s": 51789,
"text": "concatenate"
},
{
"code": null,
"e": 51884,
"s": 51801,
"text": "It creates a new sequence containing the concatenation of any number of sequences."
},
{
"code": null,
"e": 51893,
"s": 51884,
"text": "position"
},
{
"code": null,
"e": 51983,
"s": 51893,
"text": "It takes an item and a sequence and returns the index of the item in the sequence or nil."
},
{
"code": null,
"e": 51988,
"s": 51983,
"text": "find"
},
{
"code": null,
"e": 52105,
"s": 51988,
"text": "It takes an item and a sequence. It finds the item in the sequence and returns it, if not found then it returns nil."
},
{
"code": null,
"e": 52110,
"s": 52105,
"text": "sort"
},
{
"code": null,
"e": 52205,
"s": 52110,
"text": "It takes a sequence and a two-argument predicate and returns a sorted version of the sequence."
},
{
"code": null,
"e": 52211,
"s": 52205,
"text": "merge"
},
{
"code": null,
"e": 52340,
"s": 52211,
"text": "It takes two sequences and a predicate and returns a sequence produced by merging the two sequences, according to the predicate."
},
{
"code": null,
"e": 52344,
"s": 52340,
"text": "map"
},
{
"code": null,
"e": 52507,
"s": 52344,
"text": "It takes an n-argument function and n sequences and returns a new sequence containing the result of applying the function to subsequent elements of the sequences."
},
{
"code": null,
"e": 52512,
"s": 52507,
"text": "some"
},
{
"code": null,
"e": 52705,
"s": 52512,
"text": "It takes a predicate as an argument and iterates over the argument sequence, and returns the first non-NIL value returned by the predicate or returns false if the predicate is never satisfied."
},
{
"code": null,
"e": 52711,
"s": 52705,
"text": "every"
},
{
"code": null,
"e": 52906,
"s": 52711,
"text": "It takes a predicate as an argument and iterate over the argument sequence, it terminates, returning false, as soon as the predicate fails. If the predicate is always satisfied, it returns true."
},
{
"code": null,
"e": 52913,
"s": 52906,
"text": "notany"
},
{
"code": null,
"e": 53069,
"s": 52913,
"text": "It takes a predicate as an argument and iterate over the argument sequence, and returns false as soon as the predicate is satisfied or true if it never is."
},
{
"code": null,
"e": 53078,
"s": 53069,
"text": "notevery"
},
{
"code": null,
"e": 53249,
"s": 53078,
"text": "It takes a predicate as an argument and iterate over the argument sequence, and returns true as soon as the predicate fails or false if the predicate is always satisfied."
},
{
"code": null,
"e": 53256,
"s": 53249,
"text": "reduce"
},
{
"code": null,
"e": 53457,
"s": 53256,
"text": "It maps over a single sequence, applying a two-argument function first to the first two elements of the sequence and then to the value returned by the function and subsequent elements of the sequence."
},
{
"code": null,
"e": 53464,
"s": 53457,
"text": "search"
},
{
"code": null,
"e": 53540,
"s": 53464,
"text": "It searches a sequence to locate one or more elements satisfying some test."
},
{
"code": null,
"e": 53547,
"s": 53540,
"text": "remove"
},
{
"code": null,
"e": 53636,
"s": 53547,
"text": "It takes an item and a sequence and returns the sequence with instances of item removed."
},
{
"code": null,
"e": 53643,
"s": 53636,
"text": "delete"
},
{
"code": null,
"e": 53791,
"s": 53643,
"text": "This also takes an item and a sequence and returns a sequence of the same kind as the argument sequence that has the same elements except the item."
},
{
"code": null,
"e": 53802,
"s": 53791,
"text": "substitute"
},
{
"code": null,
"e": 53943,
"s": 53802,
"text": "It takes a new item, an existing item, and a sequence and returns a sequence with instances of the existing item replaced with the new item."
},
{
"code": null,
"e": 53955,
"s": 53943,
"text": "nsubstitute"
},
{
"code": null,
"e": 54103,
"s": 53955,
"text": "It takes a new item, an existing item, and a sequence and returns the same sequence with instances of the existing item replaced with the new item."
},
{
"code": null,
"e": 54112,
"s": 54103,
"text": "mismatch"
},
{
"code": null,
"e": 54199,
"s": 54112,
"text": "It takes two sequences and returns the index of the first pair of mismatched elements."
},
{
"code": null,
"e": 54398,
"s": 54199,
"text": "We have just discussed various functions and keywords that are used as arguments in these functions working on sequences. In the next sections, we will see how to use these functions using examples."
},
{
"code": null,
"e": 54538,
"s": 54398,
"text": "The length function returns the length of a sequence, and the elt function allows you to access individual elements using an integer index."
},
{
"code": null,
"e": 54619,
"s": 54538,
"text": "Create a new source code file named main.lisp and type the following code in it."
},
{
"code": null,
"e": 54698,
"s": 54619,
"text": "(setq x (vector 'a 'b 'c 'd 'e))\n(write (length x))\n(terpri)\n(write (elt x 3))"
},
{
"code": null,
"e": 54759,
"s": 54698,
"text": "When you execute the code, it returns the following result −"
},
{
"code": null,
"e": 54764,
"s": 54759,
"text": "5\nD\n"
},
{
"code": null,
"e": 54953,
"s": 54764,
"text": "Some sequence functions allows iterating through the sequence and perform some operations like, searching, removing, counting or filtering specific elements without writing explicit loops."
},
{
"code": null,
"e": 54995,
"s": 54953,
"text": "The following example demonstrates this −"
},
{
"code": null,
"e": 55076,
"s": 54995,
"text": "Create a new source code file named main.lisp and type the following code in it."
},
{
"code": null,
"e": 55391,
"s": 55076,
"text": "(write (count 7 '(1 5 6 7 8 9 2 7 3 4 5)))\n(terpri)\n(write (remove 5 '(1 5 6 7 8 9 2 7 3 4 5)))\n(terpri)\n(write (delete 5 '(1 5 6 7 8 9 2 7 3 4 5)))\n(terpri)\n(write (substitute 10 7 '(1 5 6 7 8 9 2 7 3 4 5)))\n(terpri)\n(write (find 7 '(1 5 6 7 8 9 2 7 3 4 5)))\n(terpri)\n(write (position 5 '(1 5 6 7 8 9 2 7 3 4 5)))"
},
{
"code": null,
"e": 55452,
"s": 55391,
"text": "When you execute the code, it returns the following result −"
},
{
"code": null,
"e": 55525,
"s": 55452,
"text": "2\n(1 6 7 8 9 2 7 3 4)\n(1 6 7 8 9 2 7 3 4)\n(1 5 6 10 8 9 2 10 3 4 5)\n7\n1\n"
},
{
"code": null,
"e": 55606,
"s": 55525,
"text": "Create a new source code file named main.lisp and type the following code in it."
},
{
"code": null,
"e": 55889,
"s": 55606,
"text": "(write (delete-if #'oddp '(1 5 6 7 8 9 2 7 3 4 5)))\n(terpri)\n(write (delete-if #'evenp '(1 5 6 7 8 9 2 7 3 4 5)))\n(terpri)\n(write (remove-if #'evenp '(1 5 6 7 8 9 2 7 3 4 5) :count 1 :from-end t))\n(terpri)\n(setq x (vector 'a 'b 'c 'd 'e 'f 'g))\n(fill x 'p :start 1 :end 4)\n(write x)"
},
{
"code": null,
"e": 55950,
"s": 55889,
"text": "When you execute the code, it returns the following result −"
},
{
"code": null,
"e": 56016,
"s": 55950,
"text": "(6 8 2 4)\n(1 5 7 9 7 3 5)\n(1 5 6 7 8 9 2 7 3 5)\n#(A P P P E F G)\n"
},
{
"code": null,
"e": 56128,
"s": 56016,
"text": "The sorting functions take a sequence and a two-argument predicate and return a sorted version of the sequence."
},
{
"code": null,
"e": 56209,
"s": 56128,
"text": "Create a new source code file named main.lisp and type the following code in it."
},
{
"code": null,
"e": 56315,
"s": 56209,
"text": "(write (sort '(2 4 7 3 9 1 5 4 6 3 8) #'<))\n(terpri)\n(write (sort '(2 4 7 3 9 1 5 4 6 3 8) #'>))\n(terpri)"
},
{
"code": null,
"e": 56376,
"s": 56315,
"text": "When you execute the code, it returns the following result −"
},
{
"code": null,
"e": 56425,
"s": 56376,
"text": "(1 2 3 3 4 4 5 6 7 8 9)\n(9 8 7 6 5 4 4 3 3 2 1)\n"
},
{
"code": null,
"e": 56506,
"s": 56425,
"text": "Create a new source code file named main.lisp and type the following code in it."
},
{
"code": null,
"e": 56614,
"s": 56506,
"text": "(write (merge 'vector #(1 3 5) #(2 4 6) #'<))\n(terpri)\n(write (merge 'list #(1 3 5) #(2 4 6) #'<))\n(terpri)"
},
{
"code": null,
"e": 56675,
"s": 56614,
"text": "When you execute the code, it returns the following result −"
},
{
"code": null,
"e": 56705,
"s": 56675,
"text": "#(1 2 3 4 5 6)\n(1 2 3 4 5 6)\n"
},
{
"code": null,
"e": 56789,
"s": 56705,
"text": "The functions every, some, notany, and notevery are called the sequence predicates."
},
{
"code": null,
"e": 56860,
"s": 56789,
"text": "These functions iterate over sequences and test the Boolean predicate."
},
{
"code": null,
"e": 56963,
"s": 56860,
"text": "All these functions takes a predicate as the first argument and the remaining arguments are sequences."
},
{
"code": null,
"e": 57044,
"s": 56963,
"text": "Create a new source code file named main.lisp and type the following code in it."
},
{
"code": null,
"e": 57300,
"s": 57044,
"text": "(write (every #'evenp #(2 4 6 8 10)))\n(terpri)\n(write (some #'evenp #(2 4 6 8 10 13 14)))\n(terpri)\n(write (every #'evenp #(2 4 6 8 10 13 14)))\n(terpri)\n(write (notany #'evenp #(2 4 6 8 10)))\n(terpri)\n(write (notevery #'evenp #(2 4 6 8 10 13 14)))\n(terpri)"
},
{
"code": null,
"e": 57361,
"s": 57300,
"text": "When you execute the code, it returns the following result −"
},
{
"code": null,
"e": 57376,
"s": 57361,
"text": "T\nT\nNIL\nNIL\nT\n"
},
{
"code": null,
"e": 57535,
"s": 57376,
"text": "We have already discussed the mapping functions. Similarly the map function allows you to apply a function on to subsequent elements of one or more sequences."
},
{
"code": null,
"e": 57692,
"s": 57535,
"text": "The map function takes a n-argument function and n sequences and returns a new sequence after applying the function to subsequent elements of the sequences."
},
{
"code": null,
"e": 57773,
"s": 57692,
"text": "Create a new source code file named main.lisp and type the following code in it."
},
{
"code": null,
"e": 57821,
"s": 57773,
"text": "(write (map 'vector #'* #(2 3 4 5) #(3 5 4 8)))"
},
{
"code": null,
"e": 57882,
"s": 57821,
"text": "When you execute the code, it returns the following result −"
},
{
"code": null,
"e": 57897,
"s": 57882,
"text": "#(6 15 16 40)\n"
},
{
"code": null,
"e": 58099,
"s": 57897,
"text": "Lists had been the most important and the primary composite data structure in traditional LISP. Present day's Common LISP provides other data structures like, vector, hash table, classes or structures."
},
{
"code": null,
"e": 58229,
"s": 58099,
"text": "Lists are single linked lists. In LISP, lists are constructed as a chain of a simple record structure named cons linked together."
},
{
"code": null,
"e": 58312,
"s": 58229,
"text": "A cons is a record structure containing two components called the car and the cdr."
},
{
"code": null,
"e": 58405,
"s": 58312,
"text": "Cons cells or cons are objects are pairs of values that are created using the function cons."
},
{
"code": null,
"e": 58552,
"s": 58405,
"text": "The cons function takes two arguments and returns a new cons cell containing the two values. These values can be references to any kind of object."
},
{
"code": null,
"e": 58676,
"s": 58552,
"text": "If the second value is not nil, or another cons cell, then the values are printed as a dotted pair enclosed by parentheses."
},
{
"code": null,
"e": 58846,
"s": 58676,
"text": "The two values in a cons cell are called the car and the cdr. The car function is used to access the first value and the cdr function is used to access the second value."
},
{
"code": null,
"e": 58927,
"s": 58846,
"text": "Create a new source code file named main.lisp and type the following code in it."
},
{
"code": null,
"e": 59260,
"s": 58927,
"text": "(write (cons 1 2))\n(terpri)\n(write (cons 'a 'b))\n(terpri)\n(write (cons 1 nil))\n(terpri)\n(write (cons 1 (cons 2 nil)))\n(terpri)\n(write (cons 1 (cons 2 (cons 3 nil))))\n(terpri)\n(write (cons 'a (cons 'b (cons 'c nil))))\n(terpri)\n(write ( car (cons 'a (cons 'b (cons 'c nil)))))\n(terpri)\n(write ( cdr (cons 'a (cons 'b (cons 'c nil)))))"
},
{
"code": null,
"e": 59321,
"s": 59260,
"text": "When you execute the code, it returns the following result −"
},
{
"code": null,
"e": 59372,
"s": 59321,
"text": "(1 . 2)\n(A . B)\n(1)\n(1 2)\n(1 2 3)\n(A B C)\nA\n(B C)\n"
},
{
"code": null,
"e": 59549,
"s": 59372,
"text": "The above example shows how the cons structures could be used to create a single linked list, e.g., the list (A B C) consists of three cons cells linked together by their cdrs."
},
{
"code": null,
"e": 59594,
"s": 59549,
"text": "Diagrammatically, it could be expressed as −"
},
{
"code": null,
"e": 59795,
"s": 59594,
"text": "Although cons cells can be used to create lists, however, constructing a list out of nested cons function calls can't be the best solution. The list function is rather used for creating lists in LISP."
},
{
"code": null,
"e": 59899,
"s": 59795,
"text": "The list function can take any number of arguments and as it is a function, it evaluates its arguments."
},
{
"code": null,
"e": 60029,
"s": 59899,
"text": "The first and rest functions give the first element and the rest part of a list. The following examples demonstrate the concepts."
},
{
"code": null,
"e": 60110,
"s": 60029,
"text": "Create a new source code file named main.lisp and type the following code in it."
},
{
"code": null,
"e": 60360,
"s": 60110,
"text": "(write (list 1 2))\n(terpri)\n(write (list 'a 'b))\n(terpri)\n(write (list 1 nil))\n(terpri)\n(write (list 1 2 3))\n(terpri)\n(write (list 'a 'b 'c))\n(terpri)\n(write (list 3 4 'a (car '(b . c)) (* 4 -2)))\n(terpri)\n(write (list (list 'a 'b) (list 'c 'd 'e)))"
},
{
"code": null,
"e": 60421,
"s": 60360,
"text": "When you execute the code, it returns the following result −"
},
{
"code": null,
"e": 60487,
"s": 60421,
"text": "(1 2)\n(A B)\n(1 NIL)\n(1 2 3)\n(A B C)\n(3 4 A B -8)\n((A B) (C D E))\n"
},
{
"code": null,
"e": 60568,
"s": 60487,
"text": "Create a new source code file named main.lisp and type the following code in it."
},
{
"code": null,
"e": 60766,
"s": 60568,
"text": "(defun my-library (title author rating availability)\n (list :title title :author author :rating rating :availabilty availability)\n)\n\n(write (getf (my-library \"Hunger Game\" \"Collins\" 9 t) :title))"
},
{
"code": null,
"e": 60827,
"s": 60766,
"text": "When you execute the code, it returns the following result −"
},
{
"code": null,
"e": 60842,
"s": 60827,
"text": "\"Hunger Game\"\n"
},
{
"code": null,
"e": 60919,
"s": 60842,
"text": "The following table provides some commonly used list manipulating functions."
},
{
"code": null,
"e": 60923,
"s": 60919,
"text": "car"
},
{
"code": null,
"e": 60983,
"s": 60923,
"text": "It takes a list as argument, and returns its first element."
},
{
"code": null,
"e": 60987,
"s": 60983,
"text": "cdr"
},
{
"code": null,
"e": 61061,
"s": 60987,
"text": "It takes a list as argument, and returns a list without the first element"
},
{
"code": null,
"e": 61066,
"s": 61061,
"text": "cons"
},
{
"code": null,
"e": 61177,
"s": 61066,
"text": "It takes two arguments, an element and a list and returns a list with the element inserted at the first place."
},
{
"code": null,
"e": 61182,
"s": 61177,
"text": "list"
},
{
"code": null,
"e": 61285,
"s": 61182,
"text": "It takes any number of arguments and returns a list with the arguments as member elements of the list."
},
{
"code": null,
"e": 61292,
"s": 61285,
"text": "append"
},
{
"code": null,
"e": 61329,
"s": 61292,
"text": "It merges two or more list into one."
},
{
"code": null,
"e": 61334,
"s": 61329,
"text": "last"
},
{
"code": null,
"e": 61398,
"s": 61334,
"text": "It takes a list and returns a list containing the last element."
},
{
"code": null,
"e": 61405,
"s": 61398,
"text": "member"
},
{
"code": null,
"e": 61603,
"s": 61405,
"text": "It takes two arguments of which the second must be a list, if the first argument is a member of the second argument, and then it returns the remainder of the list beginning with the first argument."
},
{
"code": null,
"e": 61611,
"s": 61603,
"text": "reverse"
},
{
"code": null,
"e": 61686,
"s": 61611,
"text": "It takes a list and returns a list with the top elements in reverse order."
},
{
"code": null,
"e": 61751,
"s": 61686,
"text": "Please note that all sequence functions are applicable to lists."
},
{
"code": null,
"e": 61832,
"s": 61751,
"text": "Create a new source code file named main.lisp and type the following code in it."
},
{
"code": null,
"e": 62115,
"s": 61832,
"text": "(write (car '(a b c d e f)))\n(terpri)\n(write (cdr '(a b c d e f)))\n(terpri)\n(write (cons 'a '(b c)))\n(terpri)\n(write (list 'a '(b c) '(e f)))\n(terpri)\n(write (append '(b c) '(e f) '(p q) '() '(g)))\n(terpri)\n(write (last '(a b c d (e f))))\n(terpri)\n(write (reverse '(a b c d (e f))))"
},
{
"code": null,
"e": 62176,
"s": 62115,
"text": "When you execute the code, it returns the following result −"
},
{
"code": null,
"e": 62255,
"s": 62176,
"text": "A\n(B C D E F)\n(A B C)\n(A (B C) (E F))\n(B C E F P Q G)\n((E F))\n((E F) D C B A)\n"
},
{
"code": null,
"e": 62363,
"s": 62255,
"text": "The car and cdr functions and their combination allows extracting any particular element/ member of a list."
},
{
"code": null,
"e": 62508,
"s": 62363,
"text": "However, sequences of car and cdr functions could be abbreviated by concatenating the letter a for car and d for cdr within the letters c and r."
},
{
"code": null,
"e": 62604,
"s": 62508,
"text": "For example we can write cadadr to abbreviate the sequence of function calls - car cdr car cdr."
},
{
"code": null,
"e": 62652,
"s": 62604,
"text": "Thus, (cadadr '(a (c d) (e f g))) will return d"
},
{
"code": null,
"e": 62733,
"s": 62652,
"text": "Create a new source code file named main.lisp and type the following code in it."
},
{
"code": null,
"e": 62881,
"s": 62733,
"text": "(write (cadadr '(a (c d) (e f g))))\n(terpri)\n(write (caar (list (list 'a 'b) 'c))) \n(terpri)\n(write (cadr (list (list 1 2) (list 3 4))))\n(terpri)"
},
{
"code": null,
"e": 62942,
"s": 62881,
"text": "When you execute the code, it returns the following result −"
},
{
"code": null,
"e": 62953,
"s": 62942,
"text": "D\nA\n(3 4)\n"
},
{
"code": null,
"e": 63054,
"s": 62953,
"text": "In LISP, a symbol is a name that represents data objects and interestingly it is also a data object."
},
{
"code": null,
"e": 63147,
"s": 63054,
"text": "What makes symbols special is that they have a component called the property list, or plist."
},
{
"code": null,
"e": 63389,
"s": 63147,
"text": "LISP allows you to assign properties to symbols. For example, let us have a 'person' object. We would like this 'person' object to have properties like name, sex, height, weight, address, profession etc. A property is like an attribute name."
},
{
"code": null,
"e": 63597,
"s": 63389,
"text": "A property list is implemented as a list with an even number (possibly zero) of elements. Each pair of elements in the list constitutes an entry; the first item is the indicator, and the second is the value."
},
{
"code": null,
"e": 63717,
"s": 63597,
"text": "When a symbol is created, its property list is initially empty. Properties are created by using get within a setf form."
},
{
"code": null,
"e": 63873,
"s": 63717,
"text": "For example, the following statements allow us to assign properties title, author and publisher, and respective values, to an object named (symbol) 'book'."
},
{
"code": null,
"e": 63954,
"s": 63873,
"text": "Create a new source code file named main.lisp and type the following code in it."
},
{
"code": null,
"e": 64138,
"s": 63954,
"text": "(write (setf (get 'books'title) '(Gone with the Wind)))\n(terpri)\n(write (setf (get 'books 'author) '(Margaret Michel)))\n(terpri)\n(write (setf (get 'books 'publisher) '(Warner Books)))"
},
{
"code": null,
"e": 64199,
"s": 64138,
"text": "When you execute the code, it returns the following result −"
},
{
"code": null,
"e": 64254,
"s": 64199,
"text": "(GONE WITH THE WIND)\n(MARGARET MICHEL)\n(WARNER BOOKS)\n"
},
{
"code": null,
"e": 64384,
"s": 64254,
"text": "Various property list functions allow you to assign properties as well as retrieve, replace or remove the properties of a symbol."
},
{
"code": null,
"e": 64490,
"s": 64384,
"text": "The get function returns the property list of symbol for a given indicator. It has the following syntax −"
},
{
"code": null,
"e": 64529,
"s": 64490,
"text": "get symbol indicator &optional default"
},
{
"code": null,
"e": 64750,
"s": 64529,
"text": "The get function looks for the property list of the given symbol for the specified indicator, if found then it returns the corresponding value; otherwise default is returned (or nil, if a default value is not specified)."
},
{
"code": null,
"e": 64831,
"s": 64750,
"text": "Create a new source code file named main.lisp and type the following code in it."
},
{
"code": null,
"e": 65083,
"s": 64831,
"text": "(setf (get 'books 'title) '(Gone with the Wind))\n(setf (get 'books 'author) '(Margaret Micheal))\n(setf (get 'books 'publisher) '(Warner Books))\n\n(write (get 'books 'title))\n(terpri)\n(write (get 'books 'author))\n(terpri)\n(write (get 'books 'publisher))"
},
{
"code": null,
"e": 65144,
"s": 65083,
"text": "When you execute the code, it returns the following result −"
},
{
"code": null,
"e": 65200,
"s": 65144,
"text": "(GONE WITH THE WIND)\n(MARGARET MICHEAL)\n(WARNER BOOKS)\n"
},
{
"code": null,
"e": 65276,
"s": 65200,
"text": "The symbol-plist function allows you to see all the properties of a symbol."
},
{
"code": null,
"e": 65357,
"s": 65276,
"text": "Create a new source code file named main.lisp and type the following code in it."
},
{
"code": null,
"e": 65527,
"s": 65357,
"text": "(setf (get 'annie 'age) 43)\n(setf (get 'annie 'job) 'accountant)\n(setf (get 'annie 'sex) 'female)\n(setf (get 'annie 'children) 3)\n\n(terpri)\n(write (symbol-plist 'annie))"
},
{
"code": null,
"e": 65588,
"s": 65527,
"text": "When you execute the code, it returns the following result −"
},
{
"code": null,
"e": 65635,
"s": 65588,
"text": "(CHILDREN 3 SEX FEMALE JOB ACCOUNTANT AGE 43)\n"
},
{
"code": null,
"e": 65702,
"s": 65635,
"text": "The remprop function removes the specified property from a symbol."
},
{
"code": null,
"e": 65783,
"s": 65702,
"text": "Create a new source code file named main.lisp and type the following code in it."
},
{
"code": null,
"e": 66014,
"s": 65783,
"text": "(setf (get 'annie 'age) 43)\n(setf (get 'annie 'job) 'accountant)\n(setf (get 'annie 'sex) 'female)\n(setf (get 'annie 'children) 3)\n\n(terpri)\n(write (symbol-plist 'annie))\n(remprop 'annie 'age)\n(terpri)\n(write (symbol-plist 'annie))"
},
{
"code": null,
"e": 66075,
"s": 66014,
"text": "When you execute the code, it returns the following result −"
},
{
"code": null,
"e": 66161,
"s": 66075,
"text": "(CHILDREN 3 SEX FEMALE JOB ACCOUNTANT AGE 43)\n(CHILDREN 3 SEX FEMALE JOB ACCOUNTANT)\n"
},
{
"code": null,
"e": 66384,
"s": 66161,
"text": "Vectors are one-dimensional arrays, therefore a subtype of array. Vectors and lists are collectively called sequences. Therefore all sequence generic functions and array functions we have discussed so far, work on vectors."
},
{
"code": null,
"e": 66546,
"s": 66384,
"text": "The vector function allows you to make fixed-size vectors with specific values. It takes any number of arguments and returns a vector containing those arguments."
},
{
"code": null,
"e": 66627,
"s": 66546,
"text": "Create a new source code file named main.lisp and type the following code in it."
},
{
"code": null,
"e": 66765,
"s": 66627,
"text": "(setf v1 (vector 1 2 3 4 5))\n(setf v2 #(a b c d e))\n(setf v3 (vector 'p 'q 'r 's 't))\n\n(write v1)\n(terpri)\n(write v2)\n(terpri)\n(write v3)"
},
{
"code": null,
"e": 66826,
"s": 66765,
"text": "When you execute the code, it returns the following result −"
},
{
"code": null,
"e": 66866,
"s": 66826,
"text": "#(1 2 3 4 5)\n#(A B C D E)\n#(P Q R S T)\n"
},
{
"code": null,
"e": 67032,
"s": 66866,
"text": "Please note that LISP uses the #(...) syntax as the literal notation for vectors. You can use this #(... ) syntax to create and include literal vectors in your code."
},
{
"code": null,
"e": 67259,
"s": 67032,
"text": "However, these are literal vectors, so modifying them is not defined in LISP. Therefore, for programming, you should always use the vector function, or the more general function make-array to create vectors you plan to modify."
},
{
"code": null,
"e": 67387,
"s": 67259,
"text": "The make-array function is the more generic way to create a vector. You can access the vector elements using the aref function."
},
{
"code": null,
"e": 67468,
"s": 67387,
"text": "Create a new source code file named main.lisp and type the following code in it."
},
{
"code": null,
"e": 67636,
"s": 67468,
"text": "(setq a (make-array 5 :initial-element 0))\n(setq b (make-array 5 :initial-element 2))\n\n(dotimes (i 5)\n (setf (aref a i) i))\n \n(write a)\n(terpri)\n(write b)\n(terpri)"
},
{
"code": null,
"e": 67697,
"s": 67636,
"text": "When you execute the code, it returns the following result −"
},
{
"code": null,
"e": 67724,
"s": 67697,
"text": "#(0 1 2 3 4)\n#(2 2 2 2 2)\n"
},
{
"code": null,
"e": 67789,
"s": 67724,
"text": "The make-array function allows you to create a resizable vector."
},
{
"code": null,
"e": 67988,
"s": 67789,
"text": "The fill-pointer argument of the function keeps track of the number of elements actually stored in the vector. It's the index of the next position to be filled when you add an element to the vector."
},
{
"code": null,
"e": 68112,
"s": 67988,
"text": "The vector-push function allows you to add an element to the end of a resizable vector. It increases the fill-pointer by 1."
},
{
"code": null,
"e": 68212,
"s": 68112,
"text": "The vector-pop function returns the most recently pushed item and decrements the fill pointer by 1."
},
{
"code": null,
"e": 68293,
"s": 68212,
"text": "Create a new source code file named main.lisp and type the following code in it."
},
{
"code": null,
"e": 68617,
"s": 68293,
"text": "(setq a (make-array 5 :fill-pointer 0))\n(write a)\n\n(vector-push 'a a)\n(vector-push 'b a)\n(vector-push 'c a)\n\n(terpri)\n(write a)\n(terpri)\n\n(vector-push 'd a)\n(vector-push 'e a)\n\n;this will not be entered as the vector limit is 5\n(vector-push 'f a)\n\n(write a)\n(terpri)\n\n(vector-pop a)\n(vector-pop a)\n(vector-pop a)\n\n(write a)"
},
{
"code": null,
"e": 68678,
"s": 68617,
"text": "When you execute the code, it returns the following result −"
},
{
"code": null,
"e": 68712,
"s": 68678,
"text": "#()\n#(A B C)\n#(A B C D E)\n#(A B)\n"
},
{
"code": null,
"e": 68848,
"s": 68712,
"text": "Vectors being sequences, all sequence functions are applicable for vectors. Please consult the sequences chapter, for vector functions."
},
{
"code": null,
"e": 68989,
"s": 68848,
"text": "Common Lisp does not provide a set data type. However, it provides number of functions that allows set operations to be performed on a list."
},
{
"code": null,
"e": 69160,
"s": 68989,
"text": "You can add, remove, and search for items in a list, based on various criteria. You can also perform various set operations like: union, intersection, and set difference."
},
{
"code": null,
"e": 69330,
"s": 69160,
"text": "Sets, like lists are generally implemented in terms of cons cells. However, for this very reason, the set operations get less and less efficient the bigger the sets get."
},
{
"code": null,
"e": 69526,
"s": 69330,
"text": "The adjoin function allows you to build up a set. It takes an item and a list representing a set and returns a list representing the set containing the item and all the items in the original set."
},
{
"code": null,
"e": 69768,
"s": 69526,
"text": "The adjoin function first looks for the item in the given list, if it is found, then it returns the original list; otherwise it creates a new cons cell with its car as the item and cdr pointing to the original list and returns this new list."
},
{
"code": null,
"e": 69921,
"s": 69768,
"text": "The adjoin function also takes :key and :test keyword arguments. These arguments are used for checking whether the item is present in the original list."
},
{
"code": null,
"e": 70154,
"s": 69921,
"text": "Since, the adjoin function does not modify the original list, to make a change in the list itself, you must either assign the value returned by adjoin to the original list or, you may use the macro pushnew to add an item to the set."
},
{
"code": null,
"e": 70235,
"s": 70154,
"text": "Create a new source code file named main.lisp and type the following code in it."
},
{
"code": null,
"e": 70709,
"s": 70235,
"text": "; creating myset as an empty list\n(defparameter *myset* ())\n(adjoin 1 *myset*)\n(adjoin 2 *myset*)\n\n; adjoin did not change the original set\n;so it remains same\n(write *myset*)\n(terpri)\n(setf *myset* (adjoin 1 *myset*))\n(setf *myset* (adjoin 2 *myset*))\n\n;now the original set is changed\n(write *myset*)\n(terpri)\n\n;adding an existing value\n(pushnew 2 *myset*)\n\n;no duplicate allowed\n(write *myset*)\n(terpri)\n\n;pushing a new value\n(pushnew 3 *myset*)\n(write *myset*)\n(terpri)"
},
{
"code": null,
"e": 70770,
"s": 70709,
"text": "When you execute the code, it returns the following result −"
},
{
"code": null,
"e": 70795,
"s": 70770,
"text": "NIL\n(2 1)\n(2 1)\n(3 2 1)\n"
},
{
"code": null,
"e": 70891,
"s": 70795,
"text": "The member group of functions allows you to check whether an element is member of a set or not."
},
{
"code": null,
"e": 70943,
"s": 70891,
"text": "The following are the syntaxes of these functions −"
},
{
"code": null,
"e": 71063,
"s": 70943,
"text": "member item list &key :test :test-not :key \nmember-if predicate list &key :key \nmember-if-not predicate list &key :key\n"
},
{
"code": null,
"e": 71284,
"s": 71063,
"text": "These functions search the given list for a given item that satisfies the test. If no such item is found, then the functions returns nil. Otherwise, the tail of the list with the element as the first element is returned."
},
{
"code": null,
"e": 71331,
"s": 71284,
"text": "The search is conducted at the top level only."
},
{
"code": null,
"e": 71376,
"s": 71331,
"text": "These functions could be used as predicates."
},
{
"code": null,
"e": 71457,
"s": 71376,
"text": "Create a new source code file named main.lisp and type the following code in it."
},
{
"code": null,
"e": 71628,
"s": 71457,
"text": "(write (member 'zara '(ayan abdul zara riyan nuha)))\n(terpri)\n(write (member-if #'evenp '(3 7 2 5/3 'a)))\n(terpri)\n(write (member-if-not #'numberp '(3 7 2 5/3 'a 'b 'c)))"
},
{
"code": null,
"e": 71689,
"s": 71628,
"text": "When you execute the code, it returns the following result −"
},
{
"code": null,
"e": 71730,
"s": 71689,
"text": "(ZARA RIYAN NUHA)\n(2 5/3 'A)\n('A 'B 'C)\n"
},
{
"code": null,
"e": 71869,
"s": 71730,
"text": "The union group of functions allows you to perform set union on two lists provided as arguments to these functions on the basis of a test."
},
{
"code": null,
"e": 71921,
"s": 71869,
"text": "The following are the syntaxes of these functions −"
},
{
"code": null,
"e": 72012,
"s": 71921,
"text": "union list1 list2 &key :test :test-not :key \nnunion list1 list2 &key :test :test-not :key\n"
},
{
"code": null,
"e": 72224,
"s": 72012,
"text": "The union function takes two lists and returns a new list containing all the elements present in either of the lists. If there are duplications, then only one copy of the member is retained in the returned list."
},
{
"code": null,
"e": 72308,
"s": 72224,
"text": "The nunion function performs the same operation but may destroy the argument lists."
},
{
"code": null,
"e": 72389,
"s": 72308,
"text": "Create a new source code file named main.lisp and type the following code in it."
},
{
"code": null,
"e": 72667,
"s": 72389,
"text": "(setq set1 (union '(a b c) '(c d e)))\n(setq set2 (union '(#(a b) #(5 6 7) #(f h)) \n '(#(5 6 7) #(a b) #(g h)) :test-not #'mismatch)\n)\n \n(setq set3 (union '(#(a b) #(5 6 7) #(f h)) \n '(#(5 6 7) #(a b) #(g h)))\n)\n(write set1)\n(terpri)\n(write set2)\n(terpri)\n(write set3)"
},
{
"code": null,
"e": 72728,
"s": 72667,
"text": "When you execute the code, it returns the following result −"
},
{
"code": null,
"e": 72821,
"s": 72728,
"text": "(A B C D E)\n(#(F H) #(5 6 7) #(A B) #(G H))\n(#(A B) #(5 6 7) #(F H) #(5 6 7) #(A B) #(G H))\n"
},
{
"code": null,
"e": 73241,
"s": 72821,
"text": "The union function does not work as expected without :test-not #'mismatch arguments for a list of three vectors. This is because, the lists are made of cons cells and although the values look same to us apparently, the cdr part of cells does not match, so they are not exactly same to LISP interpreter/compiler. This is the reason; implementing big sets are not advised using lists. It works fine for small sets though."
},
{
"code": null,
"e": 73390,
"s": 73241,
"text": "The intersection group of functions allows you to perform intersection on two lists provided as arguments to these functions on the basis of a test."
},
{
"code": null,
"e": 73442,
"s": 73390,
"text": "The following are the syntaxes of these functions −"
},
{
"code": null,
"e": 73547,
"s": 73442,
"text": "intersection list1 list2 &key :test :test-not :key \nnintersection list1 list2 &key :test :test-not :key\n"
},
{
"code": null,
"e": 73757,
"s": 73547,
"text": "These functions take two lists and return a new list containing all the elements present in both argument lists. If either list has duplicate entries, the redundant entries may or may not appear in the result."
},
{
"code": null,
"e": 73838,
"s": 73757,
"text": "Create a new source code file named main.lisp and type the following code in it."
},
{
"code": null,
"e": 74137,
"s": 73838,
"text": "(setq set1 (intersection '(a b c) '(c d e)))\n(setq set2 (intersection '(#(a b) #(5 6 7) #(f h)) \n '(#(5 6 7) #(a b) #(g h)) :test-not #'mismatch)\n)\n \n(setq set3 (intersection '(#(a b) #(5 6 7) #(f h)) \n '(#(5 6 7) #(a b) #(g h)))\n)\n(write set1)\n(terpri)\n(write set2)\n(terpri)\n(write set3)"
},
{
"code": null,
"e": 74198,
"s": 74137,
"text": "When you execute the code, it returns the following result −"
},
{
"code": null,
"e": 74225,
"s": 74198,
"text": "(C)\n(#(A B) #(5 6 7))\nNIL\n"
},
{
"code": null,
"e": 74336,
"s": 74225,
"text": "The intersection function is the destructive version of intersection, i.e., it may destroy the original lists."
},
{
"code": null,
"e": 74489,
"s": 74336,
"text": "The set-difference group of functions allows you to perform set difference on two lists provided as arguments to these functions on the basis of a test."
},
{
"code": null,
"e": 74541,
"s": 74489,
"text": "The following are the syntaxes of these functions −"
},
{
"code": null,
"e": 74650,
"s": 74541,
"text": "set-difference list1 list2 &key :test :test-not :key \nnset-difference list1 list2 &key :test :test-not :key\n"
},
{
"code": null,
"e": 74762,
"s": 74650,
"text": "The set-difference function returns a list of elements of the first list that do not appear in the second list."
},
{
"code": null,
"e": 74843,
"s": 74762,
"text": "Create a new source code file named main.lisp and type the following code in it."
},
{
"code": null,
"e": 75140,
"s": 74843,
"text": "(setq set1 (set-difference '(a b c) '(c d e)))\n(setq set2 (set-difference '(#(a b) #(5 6 7) #(f h)) \n '(#(5 6 7) #(a b) #(g h)) :test-not #'mismatch)\n)\n(setq set3 (set-difference '(#(a b) #(5 6 7) #(f h)) \n '(#(5 6 7) #(a b) #(g h)))\n)\n(write set1)\n(terpri)\n(write set2)\n(terpri)\n(write set3)"
},
{
"code": null,
"e": 75201,
"s": 75140,
"text": "When you execute the code, it returns the following result −"
},
{
"code": null,
"e": 75242,
"s": 75201,
"text": "(A B)\n(#(F H))\n(#(A B) #(5 6 7) #(F H))\n"
},
{
"code": null,
"e": 75313,
"s": 75242,
"text": "You can build tree data structures from cons cells, as lists of lists."
},
{
"code": null,
"e": 75513,
"s": 75313,
"text": "To implement tree structures, you will have to design functionalities that would traverse through the cons cells, in specific order, for example, pre-order, in-order, and post-order for binary trees."
},
{
"code": null,
"e": 75607,
"s": 75513,
"text": "Let us consider a tree structure made up of cons cell that form the following list of lists −"
},
{
"code": null,
"e": 75628,
"s": 75607,
"text": "((1 2) (3 4) (5 6))."
},
{
"code": null,
"e": 75673,
"s": 75628,
"text": "Diagrammatically, it could be expressed as −"
},
{
"code": null,
"e": 75827,
"s": 75673,
"text": "Although mostly you will need to write your own tree-functionalities according to your specific need, LISP provides some tree functions that you can use."
},
{
"code": null,
"e": 75923,
"s": 75827,
"text": "Apart from all the list functions, the following functions work especially on tree structures −"
},
{
"code": null,
"e": 75951,
"s": 75923,
"text": "copy-tree x & optional vecp"
},
{
"code": null,
"e": 76228,
"s": 75951,
"text": "It returns a copy of the tree of cons cells x. It recursively copies both the car and the cdr directions. If x is not a cons cell, the function simply returns x unchanged. If the optional vecp argument is true, this function copies vectors (recursively) as well as cons cells."
},
{
"code": null,
"e": 76270,
"s": 76228,
"text": "tree-equal x y & key :test :test-not :key"
},
{
"code": null,
"e": 76557,
"s": 76270,
"text": "It compares two trees of cons cells. If x and y are both cons cells, their cars and cdrs are compared recursively. If neither x nor y is a cons cell, they are compared by eql, or according to the specified test. The :key function, if specified, is applied to the elements of both trees."
},
{
"code": null,
"e": 76603,
"s": 76557,
"text": "subst new old tree & key :test :test-not :key"
},
{
"code": null,
"e": 76703,
"s": 76603,
"text": "It substitutes occurrences of given old item with new item, in tree, which is a tree of cons cells."
},
{
"code": null,
"e": 76750,
"s": 76703,
"text": "nsubst new old tree & key :test :test-not :key"
},
{
"code": null,
"e": 76809,
"s": 76750,
"text": "It works same as subst, but it destroys the original tree."
},
{
"code": null,
"e": 76854,
"s": 76809,
"text": "sublis alist tree & key :test :test-not :key"
},
{
"code": null,
"e": 77102,
"s": 76854,
"text": "It works like subst, except that it takes an association list alist of old-new pairs. Each element of the tree (after applying the :key function, if any), is compared with the cars of alist; if it matches, it is replaced by the corresponding cdr."
},
{
"code": null,
"e": 77148,
"s": 77102,
"text": "nsublis alist tree & key :test :test-not :key"
},
{
"code": null,
"e": 77200,
"s": 77148,
"text": "It works same as sublis, but a destructive version."
},
{
"code": null,
"e": 77281,
"s": 77200,
"text": "Create a new source code file named main.lisp and type the following code in it."
},
{
"code": null,
"e": 77431,
"s": 77281,
"text": "(setq lst (list '(1 2) '(3 4) '(5 6)))\n(setq mylst (copy-list lst))\n(setq tr (copy-tree lst))\n\n(write lst)\n(terpri)\n(write mylst)\n(terpri)\n(write tr)"
},
{
"code": null,
"e": 77492,
"s": 77431,
"text": "When you execute the code, it returns the following result −"
},
{
"code": null,
"e": 77553,
"s": 77492,
"text": "((1 2) (3 4) (5 6))\n((1 2) (3 4) (5 6))\n((1 2) (3 4) (5 6))\n"
},
{
"code": null,
"e": 77634,
"s": 77553,
"text": "Create a new source code file named main.lisp and type the following code in it."
},
{
"code": null,
"e": 77735,
"s": 77634,
"text": "(setq tr '((1 2 (3 4 5) ((7 8) (7 8 9)))))\n(write tr)\n(setq trs (subst 7 1 tr))\n(terpri)\n(write trs)"
},
{
"code": null,
"e": 77796,
"s": 77735,
"text": "When you execute the code, it returns the following result −"
},
{
"code": null,
"e": 77861,
"s": 77796,
"text": "((1 2 (3 4 5) ((7 8) (7 8 9))))\n((7 2 (3 4 5) ((7 8) (7 8 9))))\n"
},
{
"code": null,
"e": 77939,
"s": 77861,
"text": "Let us try to build our own tree, using the list functions available in LISP."
},
{
"code": null,
"e": 78033,
"s": 77939,
"text": "(defun make-tree (item)\n \"it creates a new node with item.\"\n (cons (cons item nil) nil)\n)"
},
{
"code": null,
"e": 78157,
"s": 78033,
"text": "Next let us add a child node into the tree - it will take two tree nodes and add the second tree as the child of the first."
},
{
"code": null,
"e": 78243,
"s": 78157,
"text": "(defun add-child (tree child)\n (setf (car tree) (append (car tree) child))\n tree)"
},
{
"code": null,
"e": 78417,
"s": 78243,
"text": "This function will return the first child a given tree - it will take a tree node and return the first child of that node, or nil, if this node does not have any child node."
},
{
"code": null,
"e": 78502,
"s": 78417,
"text": "(defun first-child (tree)\n (if (null tree)\n nil\n (cdr (car tree))\n )\n)"
},
{
"code": null,
"e": 78686,
"s": 78502,
"text": "This function will return the next sibling of a given node - it takes a tree node as argument, and returns a reference to the next sibling node, or nil, if the node does not have any."
},
{
"code": null,
"e": 78729,
"s": 78686,
"text": "(defun next-sibling (tree)\n (cdr tree)\n)"
},
{
"code": null,
"e": 78793,
"s": 78729,
"text": "Lastly we need a function to return the information in a node −"
},
{
"code": null,
"e": 78834,
"s": 78793,
"text": "(defun data (tree)\n (car (car tree))\n)"
},
{
"code": null,
"e": 78880,
"s": 78834,
"text": "This example uses the above functionalities −"
},
{
"code": null,
"e": 78961,
"s": 78880,
"text": "Create a new source code file named main.lisp and type the following code in it."
},
{
"code": null,
"e": 79513,
"s": 78961,
"text": "(defun make-tree (item)\n \"it creates a new node with item.\"\n (cons (cons item nil) nil)\n)\n(defun first-child (tree)\n (if (null tree)\n nil\n (cdr (car tree))\n )\n)\n\n(defun next-sibling (tree)\n (cdr tree)\n)\n(defun data (tree)\n (car (car tree))\n)\n(defun add-child (tree child)\n (setf (car tree) (append (car tree) child))\n tree\n)\n\n(setq tr '((1 2 (3 4 5) ((7 8) (7 8 9)))))\n(setq mytree (make-tree 10))\n\n(write (data mytree))\n(terpri)\n(write (first-child tr))\n(terpri)\n(setq newtree (add-child tr mytree))\n(terpri)\n(write newtree)"
},
{
"code": null,
"e": 79574,
"s": 79513,
"text": "When you execute the code, it returns the following result −"
},
{
"code": null,
"e": 79644,
"s": 79574,
"text": "10\n(2 (3 4 5) ((7 8) (7 8 9)))\n\n((1 2 (3 4 5) ((7 8) (7 8 9)) (10)))\n"
},
{
"code": null,
"e": 79833,
"s": 79644,
"text": "The hash table data structure represents a collection of key-and-value pairs that are organized based on the hash code of the key. It uses the key to access the elements in the collection."
},
{
"code": null,
"e": 80049,
"s": 79833,
"text": "A hash table is used when you need to access elements by using a key, and you can identify a useful key value. Each item in the hash table has a key/value pair. The key is used to access the items in the collection."
},
{
"code": null,
"e": 80160,
"s": 80049,
"text": "In Common LISP, hash table is a general-purpose collection. You can use arbitrary objects as a key or indexes."
},
{
"code": null,
"e": 80407,
"s": 80160,
"text": "When you store a value in a hash table, you make a key-value pair, and store it under that key. Later you can retrieve the value from the hash table using the same key. Each key maps to a single value, although you can store a new value in a key."
},
{
"code": null,
"e": 80700,
"s": 80407,
"text": "Hash tables, in LISP, could be categorized into three types, based on the way the keys could be compared - eq, eql or equal. If the hash table is hashed on LISP objects then the keys are compared with eq or eql. If the hash table hash on tree structure, then it would be compared using equal."
},
{
"code": null,
"e": 80794,
"s": 80700,
"text": "The make-hash-table function is used for creating a hash table. Syntax for this function is −"
},
{
"code": null,
"e": 80859,
"s": 80794,
"text": "make-hash-table &key :test :size :rehash-size :rehash-threshold\n"
},
{
"code": null,
"e": 80867,
"s": 80859,
"text": "Where −"
},
{
"code": null,
"e": 80902,
"s": 80867,
"text": "The key argument provides the key."
},
{
"code": null,
"e": 80937,
"s": 80902,
"text": "The key argument provides the key."
},
{
"code": null,
"e": 81132,
"s": 80937,
"text": "The :test argument determines how keys are compared - it should have one of three values #'eq, #'eql, or #'equal, or one of the three symbols eq, eql, or equal. If not specified, eql is assumed."
},
{
"code": null,
"e": 81327,
"s": 81132,
"text": "The :test argument determines how keys are compared - it should have one of three values #'eq, #'eql, or #'equal, or one of the three symbols eq, eql, or equal. If not specified, eql is assumed."
},
{
"code": null,
"e": 81432,
"s": 81327,
"text": "The :size argument sets the initial size of the hash table. This should be an integer greater than zero."
},
{
"code": null,
"e": 81537,
"s": 81432,
"text": "The :size argument sets the initial size of the hash table. This should be an integer greater than zero."
},
{
"code": null,
"e": 81894,
"s": 81537,
"text": "The :rehash-size argument specifies how much to increase the size of the hash table when it becomes full. This can be an integer greater than zero, which is the number of entries to add, or it can be a floating-point number greater than 1, which is the ratio of the new size to the old size. The default value for this argument is implementation-dependent."
},
{
"code": null,
"e": 82251,
"s": 81894,
"text": "The :rehash-size argument specifies how much to increase the size of the hash table when it becomes full. This can be an integer greater than zero, which is the number of entries to add, or it can be a floating-point number greater than 1, which is the ratio of the new size to the old size. The default value for this argument is implementation-dependent."
},
{
"code": null,
"e": 82602,
"s": 82251,
"text": "The :rehash-threshold argument specifies how full the hash table can get before it must grow. This can be an integer greater than zero and less than the :rehash-size (in which case it will be scaled whenever the table is grown), or it can be a floating-point number between zero and 1. The default value for this argument is implementation-dependent."
},
{
"code": null,
"e": 82953,
"s": 82602,
"text": "The :rehash-threshold argument specifies how full the hash table can get before it must grow. This can be an integer greater than zero and less than the :rehash-size (in which case it will be scaled whenever the table is grown), or it can be a floating-point number between zero and 1. The default value for this argument is implementation-dependent."
},
{
"code": null,
"e": 83019,
"s": 82953,
"text": "You can also call the make-hash-table function with no arguments."
},
{
"code": null,
"e": 83154,
"s": 83019,
"text": "The gethash function retrieves an item from the hash table by searching for its key. If it does not find the key, then it returns nil."
},
{
"code": null,
"e": 83184,
"s": 83154,
"text": "It has the following syntax −"
},
{
"code": null,
"e": 83226,
"s": 83184,
"text": "gethash key hash-table &optional default\n"
},
{
"code": null,
"e": 83234,
"s": 83226,
"text": "where −"
},
{
"code": null,
"e": 83261,
"s": 83234,
"text": "key: is the associated key"
},
{
"code": null,
"e": 83288,
"s": 83261,
"text": "key: is the associated key"
},
{
"code": null,
"e": 83333,
"s": 83288,
"text": "hash-table: is the hash-table to be searched"
},
{
"code": null,
"e": 83378,
"s": 83333,
"text": "hash-table: is the hash-table to be searched"
},
{
"code": null,
"e": 83475,
"s": 83378,
"text": "default: is the value to be returned, if the entry is not found, which is nil, if not specified."
},
{
"code": null,
"e": 83572,
"s": 83475,
"text": "default: is the value to be returned, if the entry is not found, which is nil, if not specified."
},
{
"code": null,
"e": 83726,
"s": 83572,
"text": "The gethash function actually returns two values, the second being a predicate value that is true if an entry was found, and false if no entry was found."
},
{
"code": null,
"e": 83827,
"s": 83726,
"text": "For adding an item to the hash table, you can use the setf function along with the gethash function."
},
{
"code": null,
"e": 83908,
"s": 83827,
"text": "Create a new source code file named main.lisp and type the following code in it."
},
{
"code": null,
"e": 84110,
"s": 83908,
"text": "(setq empList (make-hash-table)) \n(setf (gethash '001 empList) '(Charlie Brown))\n(setf (gethash '002 empList) '(Freddie Seal)) \n(write (gethash '001 empList)) \n(terpri)\n(write (gethash '002 empList)) "
},
{
"code": null,
"e": 84171,
"s": 84110,
"text": "When you execute the code, it returns the following result −"
},
{
"code": null,
"e": 84203,
"s": 84171,
"text": "(CHARLIE BROWN)\n(FREDDIE SEAL)\n"
},
{
"code": null,
"e": 84358,
"s": 84203,
"text": "The remhash function removes any entry for a specific key in hash-table. This is a predicate that is true if there was an entry or false if there was not."
},
{
"code": null,
"e": 84392,
"s": 84358,
"text": "The syntax for this function is −"
},
{
"code": null,
"e": 84416,
"s": 84392,
"text": "remhash key hash-table\n"
},
{
"code": null,
"e": 84497,
"s": 84416,
"text": "Create a new source code file named main.lisp and type the following code in it."
},
{
"code": null,
"e": 84854,
"s": 84497,
"text": "(setq empList (make-hash-table)) \n(setf (gethash '001 empList) '(Charlie Brown))\n(setf (gethash '002 empList) '(Freddie Seal)) \n(setf (gethash '003 empList) '(Mark Mongoose)) \n\n(write (gethash '001 empList)) \n(terpri)\n(write (gethash '002 empList)) \n(terpri)\n(write (gethash '003 empList)) \n(remhash '003 empList)\n(terpri)\n(write (gethash '003 empList)) "
},
{
"code": null,
"e": 84915,
"s": 84854,
"text": "When you execute the code, it returns the following result −"
},
{
"code": null,
"e": 84967,
"s": 84915,
"text": "(CHARLIE BROWN)\n(FREDDIE SEAL)\n(MARK MONGOOSE)\nNIL\n"
},
{
"code": null,
"e": 85069,
"s": 84967,
"text": "The maphash function allows you to apply a specified function on each key-value pair on a hash table."
},
{
"code": null,
"e": 85197,
"s": 85069,
"text": "It takes two arguments - the function and a hash table and invokes the function once for each key/value pair in the hash table."
},
{
"code": null,
"e": 85278,
"s": 85197,
"text": "Create a new source code file named main.lisp and type the following code in it."
},
{
"code": null,
"e": 85518,
"s": 85278,
"text": "(setq empList (make-hash-table)) \n(setf (gethash '001 empList) '(Charlie Brown))\n(setf (gethash '002 empList) '(Freddie Seal)) \n(setf (gethash '003 empList) '(Mark Mongoose)) \n\n(maphash #'(lambda (k v) (format t \"~a => ~a~%\" k v)) empList)"
},
{
"code": null,
"e": 85579,
"s": 85518,
"text": "When you execute the code, it returns the following result −"
},
{
"code": null,
"e": 85642,
"s": 85579,
"text": "3 => (MARK MONGOOSE)\n2 => (FREDDIE SEAL)\n1 => (CHARLIE BROWN)\n"
},
{
"code": null,
"e": 85876,
"s": 85642,
"text": "Common LISP provides numerous input-output functions. We have already used the format function, and print function for output. In this section, we will look into some of the most commonly used input-output functions provided in LISP."
},
{
"code": null,
"e": 85954,
"s": 85876,
"text": "The following table provides the most commonly used input functions of LISP −"
},
{
"code": null,
"e": 86017,
"s": 85954,
"text": "read & optional input-stream eof-error-p eof-value recursive-p"
},
{
"code": null,
"e": 86152,
"s": 86017,
"text": "It reads in the printed representation of a Lisp object from input-stream, builds a corresponding Lisp object, and returns the object."
},
{
"code": null,
"e": 86234,
"s": 86152,
"text": "read-preserving-whitespace & optional in-stream eof-error-p eof-value recursive-p"
},
{
"code": null,
"e": 86367,
"s": 86234,
"text": "It is used in some specialized situations where it is desirable to determine precisely what character terminated the extended token."
},
{
"code": null,
"e": 86435,
"s": 86367,
"text": "read-line & optional input-stream eof-error-p eof-value recursive-p"
},
{
"code": null,
"e": 86487,
"s": 86435,
"text": "It reads in a line of text terminated by a newline."
},
{
"code": null,
"e": 86555,
"s": 86487,
"text": "read-char & optional input-stream eof-error-p eof-value recursive-p"
},
{
"code": null,
"e": 86634,
"s": 86555,
"text": "It takes one character from input-stream and returns it as a character object."
},
{
"code": null,
"e": 86680,
"s": 86634,
"text": "unread-char character & optional input-stream"
},
{
"code": null,
"e": 86776,
"s": 86680,
"text": "It puts the character most recently read from the input-stream, onto the front of input-stream."
},
{
"code": null,
"e": 86854,
"s": 86776,
"text": "peek-char & optional peek-type input-stream eof-error-p eof-value recursive-p"
},
{
"code": null,
"e": 86966,
"s": 86854,
"text": "It returns the next character to be read from input-stream, without actually removing it from the input stream."
},
{
"code": null,
"e": 86997,
"s": 86966,
"text": "listen & optional input-stream"
},
{
"code": null,
"e": 87112,
"s": 86997,
"text": "The predicate listen is true if there is a character immediately available from input-stream, and is false if not."
},
{
"code": null,
"e": 87188,
"s": 87112,
"text": "read-char-no-hang & optional input-stream eof-error-p eof-value recursive-p"
},
{
"code": null,
"e": 87315,
"s": 87188,
"text": "It is similar to read-char, but if it does not get a character, it does not wait for a character, but returns nil immediately."
},
{
"code": null,
"e": 87351,
"s": 87315,
"text": "clear-input & optional input-stream"
},
{
"code": null,
"e": 87410,
"s": 87351,
"text": "It clears any buffered input associated with input-stream."
},
{
"code": null,
"e": 87506,
"s": 87410,
"text": "read-from-string string & optional eof-error-p eof-value & key :start :end :preserve-whitespace"
},
{
"code": null,
"e": 87744,
"s": 87506,
"text": "It takes the characters of the string successively and builds a LISP object and returns the object. It also returns the index of the first character in the string not read, or the length of the string (or, length +1), as the case may be."
},
{
"code": null,
"e": 87804,
"s": 87744,
"text": "parse-integer string & key :start :end :radix :junk-allowed"
},
{
"code": null,
"e": 87994,
"s": 87804,
"text": "It examines the substring of string delimited by :start and :end (default to the beginning and end of the string). It skips over whitespace characters and then attempts to parse an integer."
},
{
"code": null,
"e": 88057,
"s": 87994,
"text": "read-byte binary-input-stream & optional eof-error-p eof-value"
},
{
"code": null,
"e": 88146,
"s": 88057,
"text": "It reads one byte from the binary-input-stream and returns it in the form of an integer."
},
{
"code": null,
"e": 88238,
"s": 88146,
"text": "The read function is used for taking input from the keyboard. It may not take any argument."
},
{
"code": null,
"e": 88279,
"s": 88238,
"text": "For example, consider the code snippet −"
},
{
"code": null,
"e": 88305,
"s": 88279,
"text": "(write ( + 15.0 (read)))\n"
},
{
"code": null,
"e": 88367,
"s": 88305,
"text": "Assume the user enters 10.2 from the STDIN Input, it returns,"
},
{
"code": null,
"e": 88373,
"s": 88367,
"text": "25.2\n"
},
{
"code": null,
"e": 88496,
"s": 88373,
"text": "The read function reads characters from an input stream and interprets them by parsing as representations of Lisp objects."
},
{
"code": null,
"e": 88578,
"s": 88496,
"text": "Create a new source code file named main.lisp and type the following code in it −"
},
{
"code": null,
"e": 88838,
"s": 88578,
"text": "; the function AreaOfCircle\n; calculates area of a circle\n; when the radius is input from keyboard\n\n(defun AreaOfCircle()\n(terpri)\n(princ \"Enter Radius: \")\n(setq radius (read))\n(setq area (* 3.1416 radius radius))\n(princ \"Area: \")\n(write area))\n(AreaOfCircle)"
},
{
"code": null,
"e": 88899,
"s": 88838,
"text": "When you execute the code, it returns the following result −"
},
{
"code": null,
"e": 88945,
"s": 88899,
"text": "Enter Radius: 5 (STDIN Input)\nArea: 78.53999\n"
},
{
"code": null,
"e": 89026,
"s": 88945,
"text": "Create a new source code file named main.lisp and type the following code in it."
},
{
"code": null,
"e": 89450,
"s": 89026,
"text": "(with-input-from-string (stream \"Welcome to Tutorials Point!\")\n (print (read-char stream))\n (print (read-char stream))\n (print (read-char stream))\n (print (read-char stream))\n (print (read-char stream))\n (print (read-char stream))\n (print (read-char stream))\n (print (read-char stream))\n (print (read-char stream))\n (print (read-char stream))\n (print (peek-char nil stream nil 'the-end))\n (values)\n)"
},
{
"code": null,
"e": 89511,
"s": 89450,
"text": "When you execute the code, it returns the following result −"
},
{
"code": null,
"e": 89575,
"s": 89511,
"text": "#\\W \n#\\e \n#\\l \n#\\c \n#\\o \n#\\m \n#\\e \n#\\Space \n#\\t \n#\\o \n#\\Space \n"
},
{
"code": null,
"e": 89774,
"s": 89575,
"text": "All output functions in LISP take an optional argument called output-stream, where the output is sent. If not mentioned or nil, output-stream defaults to the value of the variable *standard-output*."
},
{
"code": null,
"e": 89853,
"s": 89774,
"text": "The following table provides the most commonly used output functions of LISP −"
},
{
"code": null,
"e": 89953,
"s": 89853,
"text": "write object & key :stream :escape :radix :base :circle :pretty :level :length :case :gensym :array"
},
{
"code": null,
"e": 90114,
"s": 89953,
"text": "write object & key :stream :escape :radix :base :circle :pretty :level :length :case :gensym :array :readably :right-margin :miser-width :lines :pprint-dispatch"
},
{
"code": null,
"e": 90306,
"s": 90114,
"text": "Both write the object to the output stream specified by :stream, which defaults to the value of *standard-output*. Other values default to the corresponding global variables set for printing."
},
{
"code": null,
"e": 90344,
"s": 90306,
"text": "prin1 object & optional output-stream"
},
{
"code": null,
"e": 90382,
"s": 90344,
"text": "print object & optional output-stream"
},
{
"code": null,
"e": 90421,
"s": 90382,
"text": "pprint object & optional output-stream"
},
{
"code": null,
"e": 90459,
"s": 90421,
"text": "princ object & optional output-stream"
},
{
"code": null,
"e": 90589,
"s": 90459,
"text": "All these functions outputs the printed representation of object to output-stream. However, the following differences are there −"
},
{
"code": null,
"e": 90628,
"s": 90589,
"text": "prin1 returns the object as its value."
},
{
"code": null,
"e": 90667,
"s": 90628,
"text": "prin1 returns the object as its value."
},
{
"code": null,
"e": 90760,
"s": 90667,
"text": "print prints the object with a preceding newline and followed by a space. It returns object."
},
{
"code": null,
"e": 90853,
"s": 90760,
"text": "print prints the object with a preceding newline and followed by a space. It returns object."
},
{
"code": null,
"e": 90922,
"s": 90853,
"text": "pprint is just like print except that the trailing space is omitted."
},
{
"code": null,
"e": 90991,
"s": 90922,
"text": "pprint is just like print except that the trailing space is omitted."
},
{
"code": null,
"e": 91063,
"s": 90991,
"text": "princ is just like prin1 except that the output has no escape character"
},
{
"code": null,
"e": 91135,
"s": 91063,
"text": "princ is just like prin1 except that the output has no escape character"
},
{
"code": null,
"e": 91238,
"s": 91135,
"text": "write-to-string object & key :escape :radix :base :circle :pretty :level :length :case :gensym :array "
},
{
"code": null,
"e": 91401,
"s": 91238,
"text": "write-to-string object & key :escape :radix :base :circle :pretty :level :length :case :gensym :array :readably :right-margin :miser-width :lines :pprint-dispatch"
},
{
"code": null,
"e": 91424,
"s": 91401,
"text": "prin1-to-string object"
},
{
"code": null,
"e": 91447,
"s": 91424,
"text": "princ-to-string object"
},
{
"code": null,
"e": 91550,
"s": 91447,
"text": "The object is effectively printed and the output characters are made into a string, which is returned."
},
{
"code": null,
"e": 91596,
"s": 91550,
"text": "write-char character & optional output-stream"
},
{
"code": null,
"e": 91662,
"s": 91596,
"text": "It outputs the character to output-stream, and returns character."
},
{
"code": null,
"e": 91725,
"s": 91662,
"text": "write-string string & optional output-stream & key :start :end"
},
{
"code": null,
"e": 91809,
"s": 91725,
"text": "It writes the characters of the specified substring of string to the output-stream."
},
{
"code": null,
"e": 91870,
"s": 91809,
"text": "write-line string & optional output-stream & key :start :end"
},
{
"code": null,
"e": 91943,
"s": 91870,
"text": "It works the same way as write-string, but outputs a newline afterwards."
},
{
"code": null,
"e": 91975,
"s": 91943,
"text": "terpri & optional output-stream"
},
{
"code": null,
"e": 92014,
"s": 91975,
"text": "It outputs a newline to output-stream."
},
{
"code": null,
"e": 92050,
"s": 92014,
"text": "fresh-line & optional output-stream"
},
{
"code": null,
"e": 92129,
"s": 92050,
"text": "it outputs a newline only if the stream is not already at the start of a line."
},
{
"code": null,
"e": 92168,
"s": 92129,
"text": "finish-output & optional output-stream"
},
{
"code": null,
"e": 92206,
"s": 92168,
"text": "force-output & optional output-stream"
},
{
"code": null,
"e": 92244,
"s": 92206,
"text": "clear-output & optional output-stream"
},
{
"code": null,
"e": 92384,
"s": 92244,
"text": "The function finish-output attempts to ensure that all output sent to output-stream has reached its destination, and only then returns nil."
},
{
"code": null,
"e": 92524,
"s": 92384,
"text": "The function finish-output attempts to ensure that all output sent to output-stream has reached its destination, and only then returns nil."
},
{
"code": null,
"e": 92663,
"s": 92524,
"text": "The function force-output initiates the emptying of any internal buffers but returns nil without waiting for completion or acknowledgment."
},
{
"code": null,
"e": 92802,
"s": 92663,
"text": "The function force-output initiates the emptying of any internal buffers but returns nil without waiting for completion or acknowledgment."
},
{
"code": null,
"e": 92970,
"s": 92802,
"text": "The function clear-output attempts to abort any outstanding output operation in progress in order to allow as little output as possible to continue to the destination."
},
{
"code": null,
"e": 93138,
"s": 92970,
"text": "The function clear-output attempts to abort any outstanding output operation in progress in order to allow as little output as possible to continue to the destination."
},
{
"code": null,
"e": 93178,
"s": 93138,
"text": "write-byte integer binary-output-stream"
},
{
"code": null,
"e": 93224,
"s": 93178,
"text": "It writes one byte, the value of the integer."
},
{
"code": null,
"e": 93305,
"s": 93224,
"text": "Create a new source code file named main.lisp and type the following code in it."
},
{
"code": null,
"e": 93586,
"s": 93305,
"text": "; this program inputs a numbers and doubles it\n(defun DoubleNumber()\n (terpri)\n (princ \"Enter Number : \")\n (setq n1 (read))\n (setq doubled (* 2.0 n1))\n (princ \"The Number: \")\n (write n1)\n (terpri)\n (princ \"The Number Doubled: \")\n (write doubled)\n)\n(DoubleNumber)"
},
{
"code": null,
"e": 93647,
"s": 93586,
"text": "When you execute the code, it returns the following result −"
},
{
"code": null,
"e": 93733,
"s": 93647,
"text": "Enter Number : 3456.78 (STDIN Input)\nThe Number: 3456.78\nThe Number Doubled: 6913.56\n"
},
{
"code": null,
"e": 93828,
"s": 93733,
"text": "The function format is used for producing nicely formatted text. It has the following syntax −"
},
{
"code": null,
"e": 93879,
"s": 93828,
"text": "format destination control-string &rest arguments\n"
},
{
"code": null,
"e": 93886,
"s": 93879,
"text": "where,"
},
{
"code": null,
"e": 93917,
"s": 93886,
"text": "destination is standard output"
},
{
"code": null,
"e": 93994,
"s": 93917,
"text": "control-string holds the characters to be output and the printing directive."
},
{
"code": null,
"e": 94198,
"s": 93994,
"text": "A format directive consists of a tilde (~), optional prefix parameters separated by commas, optional colon (:) and at-sign (@) modifiers, and a single character indicating what kind of directive this is."
},
{
"code": null,
"e": 94290,
"s": 94198,
"text": "The prefix parameters are generally integers, notated as optionally signed decimal numbers."
},
{
"code": null,
"e": 94371,
"s": 94290,
"text": "The following table provides brief description of the commonly used directives −"
},
{
"code": null,
"e": 94374,
"s": 94371,
"text": "~A"
},
{
"code": null,
"e": 94406,
"s": 94374,
"text": "Is followed by ASCII arguments."
},
{
"code": null,
"e": 94409,
"s": 94406,
"text": "~S"
},
{
"code": null,
"e": 94439,
"s": 94409,
"text": "Is followed by S-expressions."
},
{
"code": null,
"e": 94442,
"s": 94439,
"text": "~D"
},
{
"code": null,
"e": 94465,
"s": 94442,
"text": "For decimal arguments."
},
{
"code": null,
"e": 94468,
"s": 94465,
"text": "~B"
},
{
"code": null,
"e": 94490,
"s": 94468,
"text": "For binary arguments."
},
{
"code": null,
"e": 94493,
"s": 94490,
"text": "~O"
},
{
"code": null,
"e": 94514,
"s": 94493,
"text": "For octal arguments."
},
{
"code": null,
"e": 94517,
"s": 94514,
"text": "~X"
},
{
"code": null,
"e": 94544,
"s": 94517,
"text": "For hexadecimal arguments."
},
{
"code": null,
"e": 94547,
"s": 94544,
"text": "~C"
},
{
"code": null,
"e": 94572,
"s": 94547,
"text": "For character arguments."
},
{
"code": null,
"e": 94575,
"s": 94572,
"text": "~F"
},
{
"code": null,
"e": 94618,
"s": 94575,
"text": "For Fixed-format floating-point arguments."
},
{
"code": null,
"e": 94621,
"s": 94618,
"text": "~E"
},
{
"code": null,
"e": 94659,
"s": 94621,
"text": "Exponential floating-point arguments."
},
{
"code": null,
"e": 94662,
"s": 94659,
"text": "~$"
},
{
"code": null,
"e": 94699,
"s": 94662,
"text": "Dollar and floating point arguments."
},
{
"code": null,
"e": 94702,
"s": 94699,
"text": "~%"
},
{
"code": null,
"e": 94725,
"s": 94702,
"text": "A new line is printed."
},
{
"code": null,
"e": 94728,
"s": 94725,
"text": "~*"
},
{
"code": null,
"e": 94754,
"s": 94728,
"text": "Next argument is ignored."
},
{
"code": null,
"e": 94757,
"s": 94754,
"text": "~?"
},
{
"code": null,
"e": 94835,
"s": 94757,
"text": "Indirection. The next argument must be a string, and the one after it a list."
},
{
"code": null,
"e": 94892,
"s": 94835,
"text": "Let us rewrite the program calculating a circle's area −"
},
{
"code": null,
"e": 94973,
"s": 94892,
"text": "Create a new source code file named main.lisp and type the following code in it."
},
{
"code": null,
"e": 95169,
"s": 94973,
"text": "(defun AreaOfCircle()\n (terpri)\n (princ \"Enter Radius: \")\n (setq radius (read))\n (setq area (* 3.1416 radius radius))\n (format t \"Radius: = ~F~% Area = ~F\" radius area)\n)\n(AreaOfCircle)"
},
{
"code": null,
"e": 95230,
"s": 95169,
"text": "When you execute the code, it returns the following result −"
},
{
"code": null,
"e": 95300,
"s": 95230,
"text": "Enter Radius: 10.234 (STDIN Input)\nRadius: = 10.234\nArea = 329.03473\n"
},
{
"code": null,
"e": 95635,
"s": 95300,
"text": "We have discussed about how standard input and output is handled by common LISP. All these functions work for reading from and writing into text and binary files too. Only difference is in this case the stream we use is not standard input or output, but a stream created for the specific purpose of writing into or reading from files."
},
{
"code": null,
"e": 95741,
"s": 95635,
"text": "In this chapter we will see how LISP can create, open, close text or binary files for their data storage."
},
{
"code": null,
"e": 95920,
"s": 95741,
"text": "A file represents a sequence of bytes, does not matter if it is a text file or binary file. This chapter will take you through important functions/macros for the file management."
},
{
"code": null,
"e": 96167,
"s": 95920,
"text": "You can use the open function to create a new file or to open an existing file. It is the most basic function for opening a file. However, the with-open-file is usually more convenient and more commonly used, as we will see later in this section."
},
{
"code": null,
"e": 96343,
"s": 96167,
"text": "When a file is opened, a stream object is constructed to represent it in the LISP environment. All operations on the stream are basically equivalent to operations on the file."
},
{
"code": null,
"e": 96377,
"s": 96343,
"text": "Syntax for the open function is −"
},
{
"code": null,
"e": 96469,
"s": 96377,
"text": "open filename &key :direction :element-type :if-exists :if-does-not-exist :external-format\n"
},
{
"code": null,
"e": 96476,
"s": 96469,
"text": "where,"
},
{
"code": null,
"e": 96547,
"s": 96476,
"text": "The filename argument is the name of the file to be opened or created."
},
{
"code": null,
"e": 96618,
"s": 96547,
"text": "The filename argument is the name of the file to be opened or created."
},
{
"code": null,
"e": 96692,
"s": 96618,
"text": "The keyword arguments specify the type of stream and error handling ways."
},
{
"code": null,
"e": 96766,
"s": 96692,
"text": "The keyword arguments specify the type of stream and error handling ways."
},
{
"code": null,
"e": 97079,
"s": 96766,
"text": "The :direction keyword specifies whether the stream should handle input, output, or both, it takes the following values −\n\n:input - for input streams (default value)\n:output - for output streams\n:io - for bidirectional streams\n:probe - for just checking a files existence; the stream is opened and then closed.\n\n"
},
{
"code": null,
"e": 97201,
"s": 97079,
"text": "The :direction keyword specifies whether the stream should handle input, output, or both, it takes the following values −"
},
{
"code": null,
"e": 97244,
"s": 97201,
"text": ":input - for input streams (default value)"
},
{
"code": null,
"e": 97287,
"s": 97244,
"text": ":input - for input streams (default value)"
},
{
"code": null,
"e": 97316,
"s": 97287,
"text": ":output - for output streams"
},
{
"code": null,
"e": 97345,
"s": 97316,
"text": ":output - for output streams"
},
{
"code": null,
"e": 97377,
"s": 97345,
"text": ":io - for bidirectional streams"
},
{
"code": null,
"e": 97409,
"s": 97377,
"text": ":io - for bidirectional streams"
},
{
"code": null,
"e": 97493,
"s": 97409,
"text": ":probe - for just checking a files existence; the stream is opened and then closed."
},
{
"code": null,
"e": 97577,
"s": 97493,
"text": ":probe - for just checking a files existence; the stream is opened and then closed."
},
{
"code": null,
"e": 97657,
"s": 97577,
"text": "The :element-type specifies the type of the unit of transaction for the stream."
},
{
"code": null,
"e": 97737,
"s": 97657,
"text": "The :element-type specifies the type of the unit of transaction for the stream."
},
{
"code": null,
"e": 98378,
"s": 97737,
"text": "The :if-exists argument specifies the action to be taken if the :direction is :output or :io and a file of the specified name already exists. If the direction is :input or :probe, this argument is ignored. It takes the following values −\n\n:error - it signals an error.\n:new-version - it creates a new file with the same name but larger version number.\n:rename - it renames the existing file.\n:rename-and-delete - it renames the existing file and then deletes it.\n:append - it appends to the existing file.\n:supersede - it supersedes the existing file.\nnil - it does not create a file or even a stream just returns nil to indicate failure.\n\n"
},
{
"code": null,
"e": 98616,
"s": 98378,
"text": "The :if-exists argument specifies the action to be taken if the :direction is :output or :io and a file of the specified name already exists. If the direction is :input or :probe, this argument is ignored. It takes the following values −"
},
{
"code": null,
"e": 98646,
"s": 98616,
"text": ":error - it signals an error."
},
{
"code": null,
"e": 98676,
"s": 98646,
"text": ":error - it signals an error."
},
{
"code": null,
"e": 98759,
"s": 98676,
"text": ":new-version - it creates a new file with the same name but larger version number."
},
{
"code": null,
"e": 98842,
"s": 98759,
"text": ":new-version - it creates a new file with the same name but larger version number."
},
{
"code": null,
"e": 98882,
"s": 98842,
"text": ":rename - it renames the existing file."
},
{
"code": null,
"e": 98922,
"s": 98882,
"text": ":rename - it renames the existing file."
},
{
"code": null,
"e": 98993,
"s": 98922,
"text": ":rename-and-delete - it renames the existing file and then deletes it."
},
{
"code": null,
"e": 99064,
"s": 98993,
"text": ":rename-and-delete - it renames the existing file and then deletes it."
},
{
"code": null,
"e": 99107,
"s": 99064,
"text": ":append - it appends to the existing file."
},
{
"code": null,
"e": 99150,
"s": 99107,
"text": ":append - it appends to the existing file."
},
{
"code": null,
"e": 99196,
"s": 99150,
"text": ":supersede - it supersedes the existing file."
},
{
"code": null,
"e": 99242,
"s": 99196,
"text": ":supersede - it supersedes the existing file."
},
{
"code": null,
"e": 99329,
"s": 99242,
"text": "nil - it does not create a file or even a stream just returns nil to indicate failure."
},
{
"code": null,
"e": 99416,
"s": 99329,
"text": "nil - it does not create a file or even a stream just returns nil to indicate failure."
},
{
"code": null,
"e": 99781,
"s": 99416,
"text": "The :if-does-not-exist argument specifies the action to be taken if a file of the specified name does not already exist. It takes the following values −\n\n:error - it signals an error.\n:create - it creates an empty file with the specified name and then uses it.\nnil - it does not create a file or even a stream, but instead simply returns nil to indicate failure.\n\n"
},
{
"code": null,
"e": 99934,
"s": 99781,
"text": "The :if-does-not-exist argument specifies the action to be taken if a file of the specified name does not already exist. It takes the following values −"
},
{
"code": null,
"e": 99964,
"s": 99934,
"text": ":error - it signals an error."
},
{
"code": null,
"e": 99994,
"s": 99964,
"text": ":error - it signals an error."
},
{
"code": null,
"e": 100071,
"s": 99994,
"text": ":create - it creates an empty file with the specified name and then uses it."
},
{
"code": null,
"e": 100148,
"s": 100071,
"text": ":create - it creates an empty file with the specified name and then uses it."
},
{
"code": null,
"e": 100250,
"s": 100148,
"text": "nil - it does not create a file or even a stream, but instead simply returns nil to indicate failure."
},
{
"code": null,
"e": 100352,
"s": 100250,
"text": "nil - it does not create a file or even a stream, but instead simply returns nil to indicate failure."
},
{
"code": null,
"e": 100466,
"s": 100352,
"text": "The :external-format argument specifies an implementation-recognized scheme for representing characters in files."
},
{
"code": null,
"e": 100580,
"s": 100466,
"text": "The :external-format argument specifies an implementation-recognized scheme for representing characters in files."
},
{
"code": null,
"e": 100661,
"s": 100580,
"text": "For example, you can open a file named myfile.txt stored in the /tmp folder as −"
},
{
"code": null,
"e": 100687,
"s": 100661,
"text": "(open \"/tmp/myfile.txt\")\n"
},
{
"code": null,
"e": 100906,
"s": 100687,
"text": "The with-open-file allows reading or writing into a file, using the stream variable associated with the read/write transaction. Once the job is done, it automatically closes the file. It is extremely convenient to use."
},
{
"code": null,
"e": 100936,
"s": 100906,
"text": "It has the following syntax −"
},
{
"code": null,
"e": 101007,
"s": 100936,
"text": "with-open-file (stream filename {options}*)\n {declaration}* {form}*\n"
},
{
"code": null,
"e": 101099,
"s": 101007,
"text": "filename is the name of the file to be opened; it may be a string, a pathname, or a stream."
},
{
"code": null,
"e": 101191,
"s": 101099,
"text": "filename is the name of the file to be opened; it may be a string, a pathname, or a stream."
},
{
"code": null,
"e": 101259,
"s": 101191,
"text": "The options are same as the keyword arguments to the function open."
},
{
"code": null,
"e": 101327,
"s": 101259,
"text": "The options are same as the keyword arguments to the function open."
},
{
"code": null,
"e": 101408,
"s": 101327,
"text": "Create a new source code file named main.lisp and type the following code in it."
},
{
"code": null,
"e": 101711,
"s": 101408,
"text": "(with-open-file (stream \"/tmp/myfile.txt\" :direction :output)\n (format stream \"Welcome to Tutorials Point!\")\n (terpri stream)\n (format stream \"This is a tutorials database\")\n (terpri stream)\n (format stream \"Submit your Tutorials, White Papers and Articles into our Tutorials Directory.\")\n)"
},
{
"code": null,
"e": 101872,
"s": 101711,
"text": "Please note that all input-output functions discussed in the previous chapter, such as, terpri and format are working for writing into the file we created here."
},
{
"code": null,
"e": 102023,
"s": 101872,
"text": "When you execute the code, it does not return anything; however, our data is written into the file. The :direction :output keywords allows us do this."
},
{
"code": null,
"e": 102089,
"s": 102023,
"text": "However, we can read from this file using the read-line function."
},
{
"code": null,
"e": 102170,
"s": 102089,
"text": "Create a new source code file named main.lisp and type the following code in it."
},
{
"code": null,
"e": 102359,
"s": 102170,
"text": "(let ((in (open \"/tmp/myfile.txt\" :if-does-not-exist nil)))\n (when in\n (loop for line = (read-line in nil)\n \n while line do (format t \"~a~%\" line))\n (close in)\n )\n)"
},
{
"code": null,
"e": 102420,
"s": 102359,
"text": "When you execute the code, it returns the following result −"
},
{
"code": null,
"e": 102557,
"s": 102420,
"text": "Welcome to Tutorials Point!\nThis is a tutorials database\nSubmit your Tutorials, White Papers and Articles into our Tutorials Directory.\n"
},
{
"code": null,
"e": 102593,
"s": 102557,
"text": "The close function closes a stream."
},
{
"code": null,
"e": 102702,
"s": 102593,
"text": "Structures are one of the user-defined data type, which allows you to combine data items of different kinds."
},
{
"code": null,
"e": 102871,
"s": 102702,
"text": "Structures are used to represent a record. Suppose you want to keep track of your books in a library. You might want to track the following attributes about each book −"
},
{
"code": null,
"e": 102877,
"s": 102871,
"text": "Title"
},
{
"code": null,
"e": 102884,
"s": 102877,
"text": "Author"
},
{
"code": null,
"e": 102892,
"s": 102884,
"text": "Subject"
},
{
"code": null,
"e": 102900,
"s": 102892,
"text": "Book ID"
},
{
"code": null,
"e": 103072,
"s": 102900,
"text": "The defstruct macro in LISP allows you to define an abstract record structure. The defstruct statement defines a new data type, with more than one member for your program."
},
{
"code": null,
"e": 103209,
"s": 103072,
"text": "To discuss the format of the defstruct macro, let us write the definition of the Book structure. We could define the book structure as −"
},
{
"code": null,
"e": 103274,
"s": 103209,
"text": "(defstruct book \n title \n author \n subject \n book-id \n)\n"
},
{
"code": null,
"e": 103408,
"s": 103274,
"text": "The above declaration creates a book structure with four named components. So every book created will be an object of this structure."
},
{
"code": null,
"e": 103542,
"s": 103408,
"text": "The above declaration creates a book structure with four named components. So every book created will be an object of this structure."
},
{
"code": null,
"e": 103809,
"s": 103542,
"text": "It defines four functions named book-title, book-author, book-subject and book-book-id, which will take one argument, a book structure, and will return the fields title, author, subject and book-id of the book object. These functions are called the access functions."
},
{
"code": null,
"e": 104076,
"s": 103809,
"text": "It defines four functions named book-title, book-author, book-subject and book-book-id, which will take one argument, a book structure, and will return the fields title, author, subject and book-id of the book object. These functions are called the access functions."
},
{
"code": null,
"e": 104160,
"s": 104076,
"text": "The symbol book becomes a data type and you can check it using the typep predicate."
},
{
"code": null,
"e": 104244,
"s": 104160,
"text": "The symbol book becomes a data type and you can check it using the typep predicate."
},
{
"code": null,
"e": 104386,
"s": 104244,
"text": "There will also be an implicit function named book-p, which is a predicate and will be true if its argument is a book and is false otherwise."
},
{
"code": null,
"e": 104528,
"s": 104386,
"text": "There will also be an implicit function named book-p, which is a predicate and will be true if its argument is a book and is false otherwise."
},
{
"code": null,
"e": 104727,
"s": 104528,
"text": "Another implicit function named make-book will be created, which is a constructor, which, when invoked, will create a data structure with four components, suitable for use with the access functions."
},
{
"code": null,
"e": 104926,
"s": 104727,
"text": "Another implicit function named make-book will be created, which is a constructor, which, when invoked, will create a data structure with four components, suitable for use with the access functions."
},
{
"code": null,
"e": 105020,
"s": 104926,
"text": "The #S syntax refers to a structure, and you can use it to read or print instances of a book."
},
{
"code": null,
"e": 105114,
"s": 105020,
"text": "The #S syntax refers to a structure, and you can use it to read or print instances of a book."
},
{
"code": null,
"e": 105324,
"s": 105114,
"text": "An implicit function named copy-book of one argument is also defined that. It takes a book object and creates another book object, which is a copy of the first one. This function is called the copier function."
},
{
"code": null,
"e": 105534,
"s": 105324,
"text": "An implicit function named copy-book of one argument is also defined that. It takes a book object and creates another book object, which is a copy of the first one. This function is called the copier function."
},
{
"code": null,
"e": 105598,
"s": 105534,
"text": "You can use setf to alter the components of a book, for example"
},
{
"code": null,
"e": 105662,
"s": 105598,
"text": "You can use setf to alter the components of a book, for example"
},
{
"code": null,
"e": 105695,
"s": 105662,
"text": "(setf (book-book-id book3) 100)\n"
},
{
"code": null,
"e": 105776,
"s": 105695,
"text": "Create a new source code file named main.lisp and type the following code in it."
},
{
"code": null,
"e": 106226,
"s": 105776,
"text": "(defstruct book \n title \n author \n subject \n book-id \n)\n\n( setq book1 (make-book :title \"C Programming\"\n :author \"Nuha Ali\" \n :subject \"C-Programming Tutorial\"\n :book-id \"478\")\n)\n\n( setq book2 (make-book :title \"Telecom Billing\"\n :author \"Zara Ali\" \n :subject \"C-Programming Tutorial\"\n :book-id \"501\")\n) \n\n(write book1)\n(terpri)\n(write book2)\n(setq book3( copy-book book1))\n(setf (book-book-id book3) 100) \n(terpri)\n(write book3)"
},
{
"code": null,
"e": 106287,
"s": 106226,
"text": "When you execute the code, it returns the following result −"
},
{
"code": null,
"e": 106588,
"s": 106287,
"text": "#S(BOOK :TITLE \"C Programming\" :AUTHOR \"Nuha Ali\" :SUBJECT \"C-Programming Tutorial\" :BOOK-ID \"478\")\n#S(BOOK :TITLE \"Telecom Billing\" :AUTHOR \"Zara Ali\" :SUBJECT \"C-Programming Tutorial\" :BOOK-ID \"501\")\n#S(BOOK :TITLE \"C Programming\" :AUTHOR \"Nuha Ali\" :SUBJECT \"C-Programming Tutorial\" :BOOK-ID 100)\n"
},
{
"code": null,
"e": 106897,
"s": 106588,
"text": "In general term of programming languages, a package is designed for providing a way to keep one set of names separate from another. The symbols declared in one package will not conflict with the same symbols declared in another. This way packages reduce the naming conflicts between independent code modules."
},
{
"code": null,
"e": 107092,
"s": 106897,
"text": "The LISP reader maintains a table of all the symbols it has found. When it finds a new character sequence, it creates a new symbol and stores in the symbol table. This table is called a package."
},
{
"code": null,
"e": 107159,
"s": 107092,
"text": "The current package is referred by the special variable *package*."
},
{
"code": null,
"e": 107203,
"s": 107159,
"text": "There are two predefined packages in LISP −"
},
{
"code": null,
"e": 107282,
"s": 107203,
"text": "common-lisp − it contains symbols for all the functions and variables defined."
},
{
"code": null,
"e": 107361,
"s": 107282,
"text": "common-lisp − it contains symbols for all the functions and variables defined."
},
{
"code": null,
"e": 107499,
"s": 107361,
"text": "common-lisp-user − it uses the common-lisp package and all other packages with editing and debugging tools; it is called cl-user in short"
},
{
"code": null,
"e": 107637,
"s": 107499,
"text": "common-lisp-user − it uses the common-lisp package and all other packages with editing and debugging tools; it is called cl-user in short"
},
{
"code": null,
"e": 107748,
"s": 107637,
"text": "The following table provides most commonly used functions used for creating, using and manipulating packages −"
},
{
"code": null,
"e": 107795,
"s": 107748,
"text": "make-package package-name &key :nicknames :use"
},
{
"code": null,
"e": 107865,
"s": 107795,
"text": "It creates and returns a new package with the specified package name."
},
{
"code": null,
"e": 107910,
"s": 107865,
"text": "in-package package-name &key :nicknames :use"
},
{
"code": null,
"e": 107937,
"s": 107910,
"text": "Makes the package current."
},
{
"code": null,
"e": 107953,
"s": 107937,
"text": "in-package name"
},
{
"code": null,
"e": 108052,
"s": 107953,
"text": "This macro causes *package* to be set to the package named name, which must be a symbol or string."
},
{
"code": null,
"e": 108070,
"s": 108052,
"text": "find-package name"
},
{
"code": null,
"e": 108202,
"s": 108070,
"text": "It searches for a package. The package with that name or nickname is returned; if no such package exists, find-package returns nil."
},
{
"code": null,
"e": 108258,
"s": 108202,
"text": "rename-package package new-name &optional new-nicknames"
},
{
"code": null,
"e": 108280,
"s": 108258,
"text": "it renames a package."
},
{
"code": null,
"e": 108298,
"s": 108280,
"text": "list-all-packages"
},
{
"code": null,
"e": 108384,
"s": 108298,
"text": "This function returns a list of all packages that currently exist in the Lisp system."
},
{
"code": null,
"e": 108407,
"s": 108384,
"text": "delete-package package"
},
{
"code": null,
"e": 108429,
"s": 108407,
"text": "It deletes a package."
},
{
"code": null,
"e": 108529,
"s": 108429,
"text": "The defpackage function is used for creating an user defined package. It has the following syntax −"
},
{
"code": null,
"e": 108619,
"s": 108529,
"text": "(defpackage :package-name\n (:use :common-lisp ...)\n (:export :symbol1 :symbol2 ...)\n)"
},
{
"code": null,
"e": 108626,
"s": 108619,
"text": "Where,"
},
{
"code": null,
"e": 108667,
"s": 108626,
"text": "package-name is the name of the package."
},
{
"code": null,
"e": 108708,
"s": 108667,
"text": "package-name is the name of the package."
},
{
"code": null,
"e": 108840,
"s": 108708,
"text": "The :use keyword specifies the packages that this package needs, i.e., packages that define functions used by code in this package."
},
{
"code": null,
"e": 108972,
"s": 108840,
"text": "The :use keyword specifies the packages that this package needs, i.e., packages that define functions used by code in this package."
},
{
"code": null,
"e": 109049,
"s": 108972,
"text": "The :export keyword specifies the symbols that are external in this package."
},
{
"code": null,
"e": 109126,
"s": 109049,
"text": "The :export keyword specifies the symbols that are external in this package."
},
{
"code": null,
"e": 109223,
"s": 109126,
"text": "The make-package function is also used for creating a package. The syntax for this function is −"
},
{
"code": null,
"e": 109271,
"s": 109223,
"text": "make-package package-name &key :nicknames :use\n"
},
{
"code": null,
"e": 109326,
"s": 109271,
"text": "the arguments and keywords has same meaning as before."
},
{
"code": null,
"e": 109496,
"s": 109326,
"text": "Once you have created a package, you can use the code in this package, by making it the current package. The in-package macro makes a package current in the environment."
},
{
"code": null,
"e": 109577,
"s": 109496,
"text": "Create a new source code file named main.lisp and type the following code in it."
},
{
"code": null,
"e": 110024,
"s": 109577,
"text": "(make-package :tom)\n(make-package :dick)\n(make-package :harry)\n(in-package tom)\n(defun hello () \n (write-line \"Hello! This is Tom's Tutorials Point\")\n)\n\n(hello)\n(in-package dick)\n(defun hello () \n (write-line \"Hello! This is Dick's Tutorials Point\")\n)\n\n(hello)\n(in-package harry)\n(defun hello () \n (write-line \"Hello! This is Harry's Tutorials Point\")\n)\n\n(hello)\n(in-package tom)\n(hello)\n(in-package dick)\n(hello)\n(in-package harry)\n(hello)"
},
{
"code": null,
"e": 110085,
"s": 110024,
"text": "When you execute the code, it returns the following result −"
},
{
"code": null,
"e": 110200,
"s": 110085,
"text": "Hello! This is Tom's Tutorials Point\nHello! This is Dick's Tutorials Point\nHello! This is Harry's Tutorials Point\n"
},
{
"code": null,
"e": 110299,
"s": 110200,
"text": "The delete-package macro allows you to delete a package. The following example demonstrates this −"
},
{
"code": null,
"e": 110380,
"s": 110299,
"text": "Create a new source code file named main.lisp and type the following code in it."
},
{
"code": null,
"e": 110849,
"s": 110380,
"text": "(make-package :tom)\n(make-package :dick)\n(make-package :harry)\n(in-package tom)\n(defun hello () \n (write-line \"Hello! This is Tom's Tutorials Point\")\n)\n\n(in-package dick)\n(defun hello () \n (write-line \"Hello! This is Dick's Tutorials Point\")\n)\n\n(in-package harry)\n(defun hello () \n (write-line \"Hello! This is Harry's Tutorials Point\")\n)\n\n(in-package tom)\n(hello)\n(in-package dick)\n(hello)\n(in-package harry)\n(hello)\n(delete-package tom)\n(in-package tom)\n(hello)"
},
{
"code": null,
"e": 110910,
"s": 110849,
"text": "When you execute the code, it returns the following result −"
},
{
"code": null,
"e": 111063,
"s": 110910,
"text": "Hello! This is Tom's Tutorials Point\nHello! This is Dick's Tutorials Point\nHello! This is Harry's Tutorials Point\n*** - EVAL: variable TOM has no value\n"
},
{
"code": null,
"e": 111125,
"s": 111063,
"text": "In Common LISP terminology, exceptions are called conditions."
},
{
"code": null,
"e": 111336,
"s": 111125,
"text": "In fact, conditions are more general than exceptions in traditional programming languages, because a condition represents any occurrence, error, or not, which might affect various levels of function call stack."
},
{
"code": null,
"e": 111552,
"s": 111336,
"text": "Condition handling mechanism in LISP, handles such situations in such a way that conditions are used to signal warning (say by printing an warning) while the upper level code on the call stack can continue its work."
},
{
"code": null,
"e": 111608,
"s": 111552,
"text": "The condition handling system in LISP has three parts −"
},
{
"code": null,
"e": 111631,
"s": 111608,
"text": "Signalling a condition"
},
{
"code": null,
"e": 111654,
"s": 111631,
"text": "Handling the condition"
},
{
"code": null,
"e": 111674,
"s": 111654,
"text": "Restart the process"
},
{
"code": null,
"e": 111795,
"s": 111674,
"text": "Let us take up an example of handling a condition arising out of divide by zero condition, to explain the concepts here."
},
{
"code": null,
"e": 111859,
"s": 111795,
"text": "You need to take the following steps for handling a condition −"
},
{
"code": null,
"e": 112583,
"s": 111859,
"text": "Define the Condition − \"A condition is an object whose class indicates the general nature of the condition and whose instance data carries information about the details of the particular circumstances that lead to the condition being signalled\".\nThe define-condition macro is used for defining a condition, which has the following syntax −\n(define-condition condition-name (error)\n ((text :initarg :text :reader text))\n)\n\nNew condition objects are created with MAKE-CONDITION macro, which initializes the slots of the new condition based on the :initargs argument.\nIn our example, the following code defines the condition −\n(define-condition on-division-by-zero (error)\n ((message :initarg :message :reader message))\n)\n"
},
{
"code": null,
"e": 112829,
"s": 112583,
"text": "Define the Condition − \"A condition is an object whose class indicates the general nature of the condition and whose instance data carries information about the details of the particular circumstances that lead to the condition being signalled\"."
},
{
"code": null,
"e": 112923,
"s": 112829,
"text": "The define-condition macro is used for defining a condition, which has the following syntax −"
},
{
"code": null,
"e": 113007,
"s": 112923,
"text": "(define-condition condition-name (error)\n ((text :initarg :text :reader text))\n)\n"
},
{
"code": null,
"e": 113150,
"s": 113007,
"text": "New condition objects are created with MAKE-CONDITION macro, which initializes the slots of the new condition based on the :initargs argument."
},
{
"code": null,
"e": 113209,
"s": 113150,
"text": "In our example, the following code defines the condition −"
},
{
"code": null,
"e": 113306,
"s": 113209,
"text": "(define-condition on-division-by-zero (error)\n ((message :initarg :message :reader message))\n)"
},
{
"code": null,
"e": 114160,
"s": 113306,
"text": "Writing the Handlers − a condition handler is a code that are used for handling the condition signalled thereon. It is generally written in one of the higher level functions that call the erroring function. When a condition is signalled, the signalling mechanism searches for an appropriate handler based on the condition's class.\nEach handler consists of −\n\nType specifier, that indicates the type of condition it can handle\nA function that takes a single argument, the condition\n\nWhen a condition is signalled, the signalling mechanism finds the most recently established handler that is compatible with the condition type and calls its function.\nThe macro handler-case establishes a condition handler. The basic form of a handler-case −\n(handler-case expression error-clause*)\n\nWhere, each error clause is of the form −\ncondition-type ([var]) code)\n\n"
},
{
"code": null,
"e": 114491,
"s": 114160,
"text": "Writing the Handlers − a condition handler is a code that are used for handling the condition signalled thereon. It is generally written in one of the higher level functions that call the erroring function. When a condition is signalled, the signalling mechanism searches for an appropriate handler based on the condition's class."
},
{
"code": null,
"e": 114518,
"s": 114491,
"text": "Each handler consists of −"
},
{
"code": null,
"e": 114585,
"s": 114518,
"text": "Type specifier, that indicates the type of condition it can handle"
},
{
"code": null,
"e": 114640,
"s": 114585,
"text": "A function that takes a single argument, the condition"
},
{
"code": null,
"e": 114807,
"s": 114640,
"text": "When a condition is signalled, the signalling mechanism finds the most recently established handler that is compatible with the condition type and calls its function."
},
{
"code": null,
"e": 114898,
"s": 114807,
"text": "The macro handler-case establishes a condition handler. The basic form of a handler-case −"
},
{
"code": null,
"e": 114939,
"s": 114898,
"text": "(handler-case expression error-clause*)\n"
},
{
"code": null,
"e": 114981,
"s": 114939,
"text": "Where, each error clause is of the form −"
},
{
"code": null,
"e": 115011,
"s": 114981,
"text": "condition-type ([var]) code)\n"
},
{
"code": null,
"e": 115915,
"s": 115011,
"text": "Restarting Phase\nThis is the code that actually recovers your program from errors, and condition handlers can then handle a condition by invoking an appropriate restart. The restart code is generally place in middle-level or low-level functions and the condition handlers are placed into the upper levels of the application.\nThe handler-bind macro allows you to provide a restart function, and allows you to continue at the lower level functions without unwinding the function call stack. In other words, the flow of control will still be in the lower level function.\nThe basic form of handler-bind is as follows −\n(handler-bind (binding*) form*)\n\nWhere each binding is a list of the following −\n\na condition type\na handler function of one argument\n\nThe invoke-restart macro finds and invokes the most recently bound restart function with the specified name as argument.\nYou can have multiple restarts.\n"
},
{
"code": null,
"e": 115932,
"s": 115915,
"text": "Restarting Phase"
},
{
"code": null,
"e": 116240,
"s": 115932,
"text": "This is the code that actually recovers your program from errors, and condition handlers can then handle a condition by invoking an appropriate restart. The restart code is generally place in middle-level or low-level functions and the condition handlers are placed into the upper levels of the application."
},
{
"code": null,
"e": 116483,
"s": 116240,
"text": "The handler-bind macro allows you to provide a restart function, and allows you to continue at the lower level functions without unwinding the function call stack. In other words, the flow of control will still be in the lower level function."
},
{
"code": null,
"e": 116530,
"s": 116483,
"text": "The basic form of handler-bind is as follows −"
},
{
"code": null,
"e": 116563,
"s": 116530,
"text": "(handler-bind (binding*) form*)\n"
},
{
"code": null,
"e": 116611,
"s": 116563,
"text": "Where each binding is a list of the following −"
},
{
"code": null,
"e": 116628,
"s": 116611,
"text": "a condition type"
},
{
"code": null,
"e": 116663,
"s": 116628,
"text": "a handler function of one argument"
},
{
"code": null,
"e": 116784,
"s": 116663,
"text": "The invoke-restart macro finds and invokes the most recently bound restart function with the specified name as argument."
},
{
"code": null,
"e": 116816,
"s": 116784,
"text": "You can have multiple restarts."
},
{
"code": null,
"e": 117147,
"s": 116816,
"text": "In this example, we demonstrate the above concepts by writing a function named division-function, which will create an error condition if the divisor argument is zero. We have three anonymous functions that provide three ways to come out of it - by returning a value 1, by sending a divisor 2 and recalculating, or by returning 1."
},
{
"code": null,
"e": 117228,
"s": 117147,
"text": "Create a new source code file named main.lisp and type the following code in it."
},
{
"code": null,
"e": 118796,
"s": 117228,
"text": "(define-condition on-division-by-zero (error)\n ((message :initarg :message :reader message))\n)\n \n(defun handle-infinity ()\n (restart-case\n (let ((result 0))\n (setf result (division-function 10 0))\n (format t \"Value: ~a~%\" result)\n )\n (just-continue () nil)\n )\n)\n \n(defun division-function (value1 value2)\n (restart-case\n (if (/= value2 0)\n (/ value1 value2)\n (error 'on-division-by-zero :message \"denominator is zero\")\n )\n\n (return-zero () 0)\n (return-value (r) r)\n (recalc-using (d) (division-function value1 d))\n )\n)\n\n(defun high-level-code ()\n (handler-bind\n (\n (on-division-by-zero\n #'(lambda (c)\n (format t \"error signaled: ~a~%\" (message c))\n (invoke-restart 'return-zero)\n )\n )\n (handle-infinity)\n )\n )\n)\n\n(handler-bind\n (\n (on-division-by-zero\n #'(lambda (c)\n (format t \"error signaled: ~a~%\" (message c))\n (invoke-restart 'return-value 1)\n )\n )\n )\n (handle-infinity)\n)\n\n(handler-bind\n (\n (on-division-by-zero\n #'(lambda (c)\n (format t \"error signaled: ~a~%\" (message c))\n (invoke-restart 'recalc-using 2)\n )\n )\n )\n (handle-infinity)\n)\n\n(handler-bind\n (\n (on-division-by-zero\n #'(lambda (c)\n (format t \"error signaled: ~a~%\" (message c))\n (invoke-restart 'just-continue)\n )\n )\n )\n (handle-infinity)\n)\n\n(format t \"Done.\"))"
},
{
"code": null,
"e": 118857,
"s": 118796,
"text": "When you execute the code, it returns the following result −"
},
{
"code": null,
"e": 118990,
"s": 118857,
"text": "error signaled: denominator is zero\nValue: 1\nerror signaled: denominator is zero\nValue: 5\nerror signaled: denominator is zero\nDone.\n"
},
{
"code": null,
"e": 119209,
"s": 118990,
"text": "Apart from the 'Condition System', as discussed above, Common LISP also provides various functions that may be called for signalling an error. Handling of an error, when signalled, is however, implementation-dependent."
},
{
"code": null,
"e": 119319,
"s": 119209,
"text": "The following table provides commonly used functions signalling warnings, breaks, non-fatal and fatal errors."
},
{
"code": null,
"e": 119450,
"s": 119319,
"text": "The user program specifies an error message (a string). The functions process this message and may/may not display it to the user."
},
{
"code": null,
"e": 119701,
"s": 119450,
"text": "The error messages should be constructed by applying the format function, should not contain a newline character at either the beginning or end, and need not indicate error, as the LISP system will take care of these according to its preferred style."
},
{
"code": null,
"e": 119732,
"s": 119701,
"text": "error format-string &rest args"
},
{
"code": null,
"e": 119856,
"s": 119732,
"text": "It signals a fatal error. It is impossible to continue from this kind of error; thus error will never return to its caller."
},
{
"code": null,
"e": 119917,
"s": 119856,
"text": "cerror continue-format-string error-format-string &rest args"
},
{
"code": null,
"e": 120054,
"s": 119917,
"text": "It signals an error and enters the debugger. However, it allows the program to be continued from the debugger after resolving the error."
},
{
"code": null,
"e": 120084,
"s": 120054,
"text": "warn format-string &rest args"
},
{
"code": null,
"e": 120153,
"s": 120084,
"text": "it prints an error message but normally doesn't go into the debugger"
},
{
"code": null,
"e": 120194,
"s": 120153,
"text": "break &optional format-string &rest args"
},
{
"code": null,
"e": 120342,
"s": 120194,
"text": "It prints the message and goes directly into the debugger, without allowing any possibility of interception by programmed error-handling facilities"
},
{
"code": null,
"e": 120484,
"s": 120342,
"text": "In this example, the factorial function calculates factorial of a number; however, if the argument is negative, it raises an error condition."
},
{
"code": null,
"e": 120565,
"s": 120484,
"text": "Create a new source code file named main.lisp and type the following code in it."
},
{
"code": null,
"e": 120797,
"s": 120565,
"text": "(defun factorial (x)\n (cond ((or (not (typep x 'integer)) (minusp x))\n (error \"~S is a negative number.\" x))\n ((zerop x) 1)\n (t (* x (factorial (- x 1))))\n )\n)\n\n(write(factorial 5))\n(terpri)\n(write(factorial -1))"
},
{
"code": null,
"e": 120858,
"s": 120797,
"text": "When you execute the code, it returns the following result −"
},
{
"code": null,
"e": 120894,
"s": 120858,
"text": "120\n*** - -1 is a negative number.\n"
},
{
"code": null,
"e": 121054,
"s": 120894,
"text": "Common LISP predated the advance of object-oriented programming by couple of decades. However, it object-orientation was incorporated into it at a later stage."
},
{
"code": null,
"e": 121180,
"s": 121054,
"text": "The defclass macro allows creating user-defined classes. It establishes a class as a data type. It has the following syntax −"
},
{
"code": null,
"e": 121262,
"s": 121180,
"text": "(defclass class-name (superclass-name*)\n (slot-description*)\n class-option*))"
},
{
"code": null,
"e": 121314,
"s": 121262,
"text": "The slots are variables that store data, or fields."
},
{
"code": null,
"e": 121491,
"s": 121314,
"text": "A slot-description has the form (slot-name slot-option*), where each option is a keyword followed by a name, expression and other options. Most commonly used slot options are −"
},
{
"code": null,
"e": 121515,
"s": 121491,
"text": ":accessor function-name"
},
{
"code": null,
"e": 121539,
"s": 121515,
"text": ":accessor function-name"
},
{
"code": null,
"e": 121560,
"s": 121539,
"text": ":initform expression"
},
{
"code": null,
"e": 121581,
"s": 121560,
"text": ":initform expression"
},
{
"code": null,
"e": 121597,
"s": 121581,
"text": ":initarg symbol"
},
{
"code": null,
"e": 121613,
"s": 121597,
"text": ":initarg symbol"
},
{
"code": null,
"e": 121699,
"s": 121613,
"text": "For example, let us define a Box class, with three slots length, breadth, and height."
},
{
"code": null,
"e": 121755,
"s": 121699,
"text": "(defclass Box () \n (length \n breadth \n height)\n)\n"
},
{
"code": null,
"e": 121854,
"s": 121755,
"text": "Unless the slots have values that can be accessed, read or written to, classes are pretty useless."
},
{
"code": null,
"e": 121953,
"s": 121854,
"text": "You can specify accessors for each slot when you define a class. For example, take our Box class −"
},
{
"code": null,
"e": 122073,
"s": 121953,
"text": "(defclass Box ()\n ((length :accessor length)\n (breadth :accessor breadth)\n (height :accessor height)\n )\n)"
},
{
"code": null,
"e": 122150,
"s": 122073,
"text": "You can also specify separate accessor names for reading and writing a slot."
},
{
"code": null,
"e": 122334,
"s": 122150,
"text": "(defclass Box ()\n ((length :reader get-length :writer set-length)\n (breadth :reader get-breadth :writer set-breadth)\n (height :reader get-height :writer set-height)\n )\n)"
},
{
"code": null,
"e": 122416,
"s": 122334,
"text": "The generic function make-instance creates and returns a new instance of a class."
},
{
"code": null,
"e": 122446,
"s": 122416,
"text": "It has the following syntax −"
},
{
"code": null,
"e": 122486,
"s": 122446,
"text": "(make-instance class {initarg value}*)\n"
},
{
"code": null,
"e": 122627,
"s": 122486,
"text": "Let us create a Box class, with three slots, length, breadth and height. We will use three slot accessors to set the values in these fields."
},
{
"code": null,
"e": 122708,
"s": 122627,
"text": "Create a new source code file named main.lisp and type the following code in it."
},
{
"code": null,
"e": 123130,
"s": 122708,
"text": "(defclass box ()\n ((length :accessor box-length)\n (breadth :accessor box-breadth)\n (height :accessor box-height)\n )\n)\n(setf item (make-instance 'box))\n(setf (box-length item) 10)\n(setf (box-breadth item) 10)\n(setf (box-height item) 5)\n(format t \"Length of the Box is ~d~%\" (box-length item))\n(format t \"Breadth of the Box is ~d~%\" (box-breadth item))\n(format t \"Height of the Box is ~d~%\" (box-height item))"
},
{
"code": null,
"e": 123191,
"s": 123130,
"text": "When you execute the code, it returns the following result −"
},
{
"code": null,
"e": 123264,
"s": 123191,
"text": "Length of the Box is 10\nBreadth of the Box is 10\nHeight of the Box is 5\n"
},
{
"code": null,
"e": 123410,
"s": 123264,
"text": "The defmethod macro allows you to define a method inside the class. The following example extends our Box class to include a method named volume."
},
{
"code": null,
"e": 123491,
"s": 123410,
"text": "Create a new source code file named main.lisp and type the following code in it."
},
{
"code": null,
"e": 124177,
"s": 123491,
"text": "(defclass box ()\n ((length :accessor box-length)\n (breadth :accessor box-breadth)\n (height :accessor box-height)\n (volume :reader volume)\n )\n)\n\n; method calculating volume \n\n(defmethod volume ((object box))\n (* (box-length object) (box-breadth object)(box-height object))\n)\n\n ;setting the values \n\n(setf item (make-instance 'box))\n(setf (box-length item) 10)\n(setf (box-breadth item) 10)\n(setf (box-height item) 5)\n\n; displaying values\n\n(format t \"Length of the Box is ~d~%\" (box-length item))\n(format t \"Breadth of the Box is ~d~%\" (box-breadth item))\n(format t \"Height of the Box is ~d~%\" (box-height item))\n(format t \"Volume of the Box is ~d~%\" (volume item))"
},
{
"code": null,
"e": 124238,
"s": 124177,
"text": "When you execute the code, it returns the following result −"
},
{
"code": null,
"e": 124336,
"s": 124238,
"text": "Length of the Box is 10\nBreadth of the Box is 10\nHeight of the Box is 5\nVolume of the Box is 500\n"
},
{
"code": null,
"e": 124573,
"s": 124336,
"text": "LISP allows you to define an object in terms of another object. This is called inheritance. You can create a derived class by adding features that are new or different. The derived class inherits the functionalities of the parent class."
},
{
"code": null,
"e": 124611,
"s": 124573,
"text": "The following example explains this −"
},
{
"code": null,
"e": 124692,
"s": 124611,
"text": "Create a new source code file named main.lisp and type the following code in it."
},
{
"code": null,
"e": 125604,
"s": 124692,
"text": "(defclass box ()\n ((length :accessor box-length)\n (breadth :accessor box-breadth)\n (height :accessor box-height)\n (volume :reader volume)\n )\n)\n\n; method calculating volume \n(defmethod volume ((object box))\n (* (box-length object) (box-breadth object)(box-height object))\n)\n \n;wooden-box class inherits the box class \n(defclass wooden-box (box)\n((price :accessor box-price)))\n\n;setting the values \n(setf item (make-instance 'wooden-box))\n(setf (box-length item) 10)\n(setf (box-breadth item) 10)\n(setf (box-height item) 5)\n(setf (box-price item) 1000)\n\n; displaying values\n(format t \"Length of the Wooden Box is ~d~%\" (box-length item))\n(format t \"Breadth of the Wooden Box is ~d~%\" (box-breadth item))\n(format t \"Height of the Wooden Box is ~d~%\" (box-height item))\n(format t \"Volume of the Wooden Box is ~d~%\" (volume item))\n(format t \"Price of the Wooden Box is ~d~%\" (box-price item))"
},
{
"code": null,
"e": 125665,
"s": 125604,
"text": "When you execute the code, it returns the following result −"
},
{
"code": null,
"e": 125823,
"s": 125665,
"text": "Length of the Wooden Box is 10\nBreadth of the Wooden Box is 10\nHeight of the Wooden Box is 5\nVolume of the Wooden Box is 500\nPrice of the Wooden Box is 1000\n"
},
{
"code": null,
"e": 125856,
"s": 125823,
"text": "\n 79 Lectures \n 7 hours \n"
},
{
"code": null,
"e": 125871,
"s": 125856,
"text": " Arnold Higuit"
},
{
"code": null,
"e": 125878,
"s": 125871,
"text": " Print"
},
{
"code": null,
"e": 125889,
"s": 125878,
"text": " Add Notes"
}
] |
Bootstrap 4 - Code | It is used to display inline and multiline blocks of code in the document.
You can display the inline code snippet with help of <code> tag and to display the multiple lines of code, use the <pre> tag.
The following example specifies displaying of inline and multiline blocks of code in the document −
<html lang = "en">
<head>
<!-- Meta tags -->
<meta charset = "utf-8">
<meta name = "viewport" content = "width = device-width, initial-scale = 1, shrink-to-fit = no">
<!-- Bootstrap CSS -->
<link rel = "stylesheet"
href = "https://stackpath.bootstrapcdn.com/bootstrap/4.1.3/css/bootstrap.min.css"
integrity = "sha384-MCw98/SFnGE8fJT3GXwEOngsV7Zt27NXFoaoApmYm81iuXoPkFOJwJ8ERdknLPMO"
crossorigin = "anonymous">
<title>Bootstrap 4 Example</title>
</head>
<body>
<div class = "container">
<h2>Inline Code</h2>
Hello World!!! Welcome to <code>tutorialspoint</code>...
<p></p>
<h2>Multiline Code Block</h2>
<pre>
<code><p>Welcome to tutorialspoint...</p>
<p>Best resource for Online Education...</p>
</code>
</pre>
</div>
<!-- jQuery first, then Popper.js, then Bootstrap JS -->
<script src = "https://code.jquery.com/jquery-3.3.1.slim.min.js"
integrity = "sha384-q8i/X+965DzO0rT7abK41JStQIAqVgRVzpbzo5smXKp4YfRvH+8abtTE1Pi6jizo"
crossorigin = "anonymous">
</script>
<script src = "https://cdnjs.cloudflare.com/ajax/libs/popper.js/1.14.3/umd/popper.min.js"
integrity = "sha384-ZMP7rVo3mIykV+2+9J3UJ46jBk0WLaUAdn689aCwoqbBJiSnjAK/l8WvCWPIPm49"
crossorigin = "anonymous">
</script>
<script src = "https://stackpath.bootstrapcdn.com/bootstrap/4.1.3/js/bootstrap.min.js"
integrity = "sha384-ChfqqxuZUCnJSK3+MXmPNIyE6ZbWh2IMqE241rYiqJxyMiZ6OW/JmZQ5stwEULTy"
crossorigin = "anonymous">
</script>
</body>
</html>
It will produce the following result −
Welcome to tutorialspoint...
Best resource for Online Education...
Welcome to tutorialspoint...
Best resource for Online Education...
You can indicate the input via keyboard by using <kbd> tag and display the sample output of a program by using <samp> tag.
Let us see an example below −
<html lang = "en">
<head>
<!-- Meta tags -->
<meta charset = "utf-8">
<meta name = "viewport" content = "width = device-width, initial-scale = 1, shrink-to-fit = no">
<!-- Bootstrap CSS -->
<link rel = "stylesheet"
href = "https://stackpath.bootstrapcdn.com/bootstrap/4.1.3/css/bootstrap.min.css"
integrity = "sha384-MCw98/SFnGE8fJT3GXwEOngsV7Zt27NXFoaoApmYm81iuXoPkFOJwJ8ERdknLPMO"
crossorigin = "anonymous">
<title>Bootstrap 4 Example</title>
</head>
<body>
<div class = "container">
<h2>Keyboard Input</h2>
<p>Take the print of a page by using <kbd>ctrl + p</kbd></p>
<p></p>
<h2>Sample Text</h2>
Welcome to tutorialspoint!!! <samp>Best resource for Online Education...</samp>
</div>
<!-- jQuery first, then Popper.js, then Bootstrap JS -->
<script src = "https://code.jquery.com/jquery-3.3.1.slim.min.js"
integrity = "sha384-q8i/X+965DzO0rT7abK41JStQIAqVgRVzpbzo5smXKp4YfRvH+8abtTE1Pi6jizo"
crossorigin = "anonymous">
</script>
<script src = "https://cdnjs.cloudflare.com/ajax/libs/popper.js/1.14.3/umd/popper.min.js"
integrity = "sha384-ZMP7rVo3mIykV+2+9J3UJ46jBk0WLaUAdn689aCwoqbBJiSnjAK/l8WvCWPIPm49"
crossorigin = "anonymous">
</script>
<script src = "https://stackpath.bootstrapcdn.com/bootstrap/4.1.3/js/bootstrap.min.js"
integrity = "sha384-ChfqqxuZUCnJSK3+MXmPNIyE6ZbWh2IMqE241rYiqJxyMiZ6OW/JmZQ5stwEULTy"
crossorigin = "anonymous">
</script>
</body>
</html>
It will produce the following result −
Take the print of a page by using ctrl + p
26 Lectures
2 hours
Anadi Sharma
54 Lectures
4.5 hours
Frahaan Hussain
161 Lectures
14.5 hours
Eduonix Learning Solutions
20 Lectures
4 hours
Azaz Patel
15 Lectures
1.5 hours
Muhammad Ismail
62 Lectures
8 hours
Yossef Ayman Zedan
Print
Add Notes
Bookmark this page | [
{
"code": null,
"e": 1891,
"s": 1816,
"text": "It is used to display inline and multiline blocks of code in the document."
},
{
"code": null,
"e": 2017,
"s": 1891,
"text": "You can display the inline code snippet with help of <code> tag and to display the multiple lines of code, use the <pre> tag."
},
{
"code": null,
"e": 2117,
"s": 2017,
"text": "The following example specifies displaying of inline and multiline blocks of code in the document −"
},
{
"code": null,
"e": 3856,
"s": 2117,
"text": "<html lang = \"en\">\n <head>\n <!-- Meta tags -->\n <meta charset = \"utf-8\">\n <meta name = \"viewport\" content = \"width = device-width, initial-scale = 1, shrink-to-fit = no\">\n \n <!-- Bootstrap CSS -->\n <link rel = \"stylesheet\" \n href = \"https://stackpath.bootstrapcdn.com/bootstrap/4.1.3/css/bootstrap.min.css\" \n integrity = \"sha384-MCw98/SFnGE8fJT3GXwEOngsV7Zt27NXFoaoApmYm81iuXoPkFOJwJ8ERdknLPMO\" \n crossorigin = \"anonymous\">\n \n <title>Bootstrap 4 Example</title>\n </head>\n \n <body>\n <div class = \"container\">\n <h2>Inline Code</h2>\n Hello World!!! Welcome to <code>tutorialspoint</code>...\n <p></p>\n <h2>Multiline Code Block</h2>\n \n <pre>\n <code><p>Welcome to tutorialspoint...</p>\n <p>Best resource for Online Education...</p>\n </code>\n </pre>\n </div>\n \n <!-- jQuery first, then Popper.js, then Bootstrap JS -->\n <script src = \"https://code.jquery.com/jquery-3.3.1.slim.min.js\" \n integrity = \"sha384-q8i/X+965DzO0rT7abK41JStQIAqVgRVzpbzo5smXKp4YfRvH+8abtTE1Pi6jizo\" \n crossorigin = \"anonymous\">\n </script>\n \n <script src = \"https://cdnjs.cloudflare.com/ajax/libs/popper.js/1.14.3/umd/popper.min.js\" \n integrity = \"sha384-ZMP7rVo3mIykV+2+9J3UJ46jBk0WLaUAdn689aCwoqbBJiSnjAK/l8WvCWPIPm49\" \n crossorigin = \"anonymous\">\n </script>\n \n <script src = \"https://stackpath.bootstrapcdn.com/bootstrap/4.1.3/js/bootstrap.min.js\" \n integrity = \"sha384-ChfqqxuZUCnJSK3+MXmPNIyE6ZbWh2IMqE241rYiqJxyMiZ6OW/JmZQ5stwEULTy\" \n crossorigin = \"anonymous\">\n </script>\n \n </body>\n</html>"
},
{
"code": null,
"e": 3895,
"s": 3856,
"text": "It will produce the following result −"
},
{
"code": null,
"e": 4009,
"s": 3895,
"text": " Welcome to tutorialspoint...\n Best resource for Online Education...\n \n "
},
{
"code": null,
"e": 4038,
"s": 4009,
"text": "Welcome to tutorialspoint..."
},
{
"code": null,
"e": 4076,
"s": 4038,
"text": "Best resource for Online Education..."
},
{
"code": null,
"e": 4199,
"s": 4076,
"text": "You can indicate the input via keyboard by using <kbd> tag and display the sample output of a program by using <samp> tag."
},
{
"code": null,
"e": 4229,
"s": 4199,
"text": "Let us see an example below −"
},
{
"code": null,
"e": 5883,
"s": 4229,
"text": "<html lang = \"en\">\n <head>\n <!-- Meta tags -->\n <meta charset = \"utf-8\">\n <meta name = \"viewport\" content = \"width = device-width, initial-scale = 1, shrink-to-fit = no\">\n \n <!-- Bootstrap CSS -->\n <link rel = \"stylesheet\" \n href = \"https://stackpath.bootstrapcdn.com/bootstrap/4.1.3/css/bootstrap.min.css\" \n integrity = \"sha384-MCw98/SFnGE8fJT3GXwEOngsV7Zt27NXFoaoApmYm81iuXoPkFOJwJ8ERdknLPMO\" \n crossorigin = \"anonymous\">\n \n <title>Bootstrap 4 Example</title>\n </head>\n \n <body>\n <div class = \"container\">\n <h2>Keyboard Input</h2>\n <p>Take the print of a page by using <kbd>ctrl + p</kbd></p>\n <p></p>\n <h2>Sample Text</h2>\n Welcome to tutorialspoint!!! <samp>Best resource for Online Education...</samp>\n </div>\n \n <!-- jQuery first, then Popper.js, then Bootstrap JS -->\n <script src = \"https://code.jquery.com/jquery-3.3.1.slim.min.js\" \n integrity = \"sha384-q8i/X+965DzO0rT7abK41JStQIAqVgRVzpbzo5smXKp4YfRvH+8abtTE1Pi6jizo\" \n crossorigin = \"anonymous\">\n </script>\n \n <script src = \"https://cdnjs.cloudflare.com/ajax/libs/popper.js/1.14.3/umd/popper.min.js\" \n integrity = \"sha384-ZMP7rVo3mIykV+2+9J3UJ46jBk0WLaUAdn689aCwoqbBJiSnjAK/l8WvCWPIPm49\" \n crossorigin = \"anonymous\">\n </script>\n \n <script src = \"https://stackpath.bootstrapcdn.com/bootstrap/4.1.3/js/bootstrap.min.js\" \n integrity = \"sha384-ChfqqxuZUCnJSK3+MXmPNIyE6ZbWh2IMqE241rYiqJxyMiZ6OW/JmZQ5stwEULTy\" \n crossorigin = \"anonymous\">\n </script>\n \n </body>\n</html>"
},
{
"code": null,
"e": 5922,
"s": 5883,
"text": "It will produce the following result −"
},
{
"code": null,
"e": 5965,
"s": 5922,
"text": "Take the print of a page by using ctrl + p"
},
{
"code": null,
"e": 5998,
"s": 5965,
"text": "\n 26 Lectures \n 2 hours \n"
},
{
"code": null,
"e": 6012,
"s": 5998,
"text": " Anadi Sharma"
},
{
"code": null,
"e": 6047,
"s": 6012,
"text": "\n 54 Lectures \n 4.5 hours \n"
},
{
"code": null,
"e": 6064,
"s": 6047,
"text": " Frahaan Hussain"
},
{
"code": null,
"e": 6101,
"s": 6064,
"text": "\n 161 Lectures \n 14.5 hours \n"
},
{
"code": null,
"e": 6129,
"s": 6101,
"text": " Eduonix Learning Solutions"
},
{
"code": null,
"e": 6162,
"s": 6129,
"text": "\n 20 Lectures \n 4 hours \n"
},
{
"code": null,
"e": 6174,
"s": 6162,
"text": " Azaz Patel"
},
{
"code": null,
"e": 6209,
"s": 6174,
"text": "\n 15 Lectures \n 1.5 hours \n"
},
{
"code": null,
"e": 6226,
"s": 6209,
"text": " Muhammad Ismail"
},
{
"code": null,
"e": 6259,
"s": 6226,
"text": "\n 62 Lectures \n 8 hours \n"
},
{
"code": null,
"e": 6279,
"s": 6259,
"text": " Yossef Ayman Zedan"
},
{
"code": null,
"e": 6286,
"s": 6279,
"text": " Print"
},
{
"code": null,
"e": 6297,
"s": 6286,
"text": " Add Notes"
}
] |
How to handle a python exception within a loop? | We can the exception in the code by rewriting it as follows
a=[]
foo = 'redbullz'
try:
for i in foo:
a.append(i)
print a[8]
except Exception as e:
print e
We get following output
list index out of range
Process finished with exit code 0 | [
{
"code": null,
"e": 1122,
"s": 1062,
"text": "We can the exception in the code by rewriting it as follows"
},
{
"code": null,
"e": 1217,
"s": 1122,
"text": "a=[]\nfoo = 'redbullz'\ntry:\nfor i in foo:\na.append(i)\nprint a[8]\nexcept Exception as e:\nprint e"
},
{
"code": null,
"e": 1241,
"s": 1217,
"text": "We get following output"
},
{
"code": null,
"e": 1299,
"s": 1241,
"text": "list index out of range\nProcess finished with exit code 0"
}
] |
Find array whose elements are XOR of adjacent elements in given array - GeeksforGeeks | 14 Sep, 2021
Given an array arr[] consisting of N integers, the task is to re-construct an array arr[] such that the values in arr[] are obtained by doing XOR of the adjacent elements in the array. Print the array elements.
Examples:
Input: arr[ ] = {10, 11, 1, 2, 3} Output: 1 10 3 1 3 Explanation: At index 0, arr[0] xor arr[1] = 1At index 1, arr[1] xor arr[2] = 10At index 2, arr[2] xor arr[3] = 3...At index 4, No element is left So, it will remain as it is.New Array will be {1, 10, 3, 1, 3}
Input: arr[ ] = {5, 9, 7, 6}Output: 12 14 1 6 Explanation: At index 0, arr[0] xor arr[1] = 12At index 1, arr[1] xor arr[2] = 14At index 2, arr[2] xor arr[3] = 1At index 3, No element is left So, it will remain as it is.New Array will be {12, 14, 1, 6}
Approach: The main idea to solve the given problem is to perform the following steps:
Traverse the given array arr[] from the 0th index to (N – 2)th index.For each element arr[i] at ith position calculate arr[i] ^ arr[i+1] and store it at position i.
Traverse the given array arr[] from the 0th index to (N – 2)th index.
For each element arr[i] at ith position calculate arr[i] ^ arr[i+1] and store it at position i.
Below is the implementation of the above approach:
C++
Java
Python3
C#
Javascript
// C++ implementation// of the above approach#include <iostream>using namespace std; // Function to reconstruct the array// arr[] with xor of adjacent elementsint* game_with_number(int arr[], int n){ // Iterate through each element for (int i = 0; i < n - 1; i++) { // Store the xor of current // and next element in arr[i] arr[i] = arr[i] ^ arr[i + 1]; } return arr;} // Function to print the arrayvoid print(int arr[], int n){ for (int i = 0; i < n; i++) { cout << arr[i] << " "; }} // Driver Codeint main(){ // Inputs int arr[] = { 10, 11, 1, 2, 3 }; // Length of the array given int n = sizeof(arr) / sizeof(arr[0]); // Function call to reconstruct the arr[] int* new_arr = game_with_number(arr, n); // Function call to print arr[] print(new_arr, n);}
// Java implementation// of the above approachimport java.io.*; class GFG{ // Function to reconstruct the array// arr[] with xor of adjacent elementsstatic int[] game_with_number(int arr[], int n){ // Iterate through each element for(int i = 0; i < n - 1; i++) { // Store the xor of current // and next element in arr[i] arr[i] = arr[i] ^ arr[i + 1]; } return arr;} // Function to print the arraystatic void print(int arr[], int n){ for(int i = 0; i < n; i++) { System.out.print(arr[i] + " "); }} // Driver Codepublic static void main(String[] args){ // Inputs int arr[] = { 10, 11, 1, 2, 3 }; // Length of the array given int n = arr.length; // Function call to reconstruct the arr[] int[] new_arr = game_with_number(arr, n); // Function call to print arr[] print(new_arr, n);}} // This code is contributed by subhammahato348
# Python3 implementation# of the above approach # Function to reconstruct the array# arr[] with xor of adjacent elementsdef game_with_number(arr, n): # Iterate through each element for i in range(n-1): # Store the xor of current #and next element in arr[i] arr[i] = arr[i] ^ arr[i + 1] return arr # Function to print arraydef printt(arr, n): print(*arr) # Driver Codeif __name__ == '__main__': # Inputs arr= [10, 11, 1, 2, 3] # Length of the array given n = len(arr) # Function call to reconstruct the arr[] new_arr = game_with_number(arr, n); # Function call to prarr[] printt(new_arr, n) # This code is contributed by mohit kumar 29.
// C# program for the above approachusing System; class GFG{ // Function to reconstruct the array// arr[] with xor of adjacent elementsstatic int[] game_with_number(int[] arr, int n){ // Iterate through each element for(int i = 0; i < n - 1; i++) { // Store the xor of current // and next element in arr[i] arr[i] = arr[i] ^ arr[i + 1]; } return arr;} // Function to print the arraystatic void print(int[] arr, int n){ for(int i = 0; i < n; i++) { Console.Write(arr[i] + " "); }} // Driver Codepublic static void Main(){ // Inputs int[] arr = { 10, 11, 1, 2, 3 }; // Length of the array given int n = arr.Length; // Function call to reconstruct the arr[] int[] new_arr = game_with_number(arr, n); // Function call to print arr[] print(new_arr, n);}} // This code is contributed by target_2.
<script> // Javascript program for the above approach // Function to reconstruct the array// arr[] with xor of adjacent elementsfunction game_with_number(arr,n){ // Iterate through each element for (let i = 0; i < n - 1; i++) { // Store the xor of current // and next element in arr[i] arr[i] = arr[i] ^ arr[i + 1]; } return arr;} // Function to print the arrayfunction print(arr,n){ for (let i = 0; i < n; i++) { document.write(arr[i]+" "); }} // Driver Code //Inputs let arr = [10, 11, 1, 2, 3 ]; // Length of the array given let n = arr.length; // Function call to reconstruct the arr[] let new_arr = game_with_number(arr, n); // Function call to print arr[] print(new_arr, n); // This code is contributed by// Potta Lokesh </script>
1 10 3 1 3
Time Complexity: O(N)Auxiliary Space: O(1)
mohit kumar 29
lokeshpotta20
subhammahato348
target_2
akshaysingh98088
khushboogoyal499
Bitwise-XOR
Arrays
Mathematical
Arrays
Mathematical
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here.
Comments
Old Comments
Window Sliding Technique
Program to find sum of elements in a given array
Reversal algorithm for array rotation
Find duplicates in O(n) time and O(1) extra space | Set 1
Trapping Rain Water
Program for Fibonacci numbers
C++ Data Types
Write a program to print all permutations of a given string
Set in C++ Standard Template Library (STL)
Coin Change | DP-7 | [
{
"code": null,
"e": 24716,
"s": 24688,
"text": "\n14 Sep, 2021"
},
{
"code": null,
"e": 24927,
"s": 24716,
"text": "Given an array arr[] consisting of N integers, the task is to re-construct an array arr[] such that the values in arr[] are obtained by doing XOR of the adjacent elements in the array. Print the array elements."
},
{
"code": null,
"e": 24937,
"s": 24927,
"text": "Examples:"
},
{
"code": null,
"e": 25200,
"s": 24937,
"text": "Input: arr[ ] = {10, 11, 1, 2, 3} Output: 1 10 3 1 3 Explanation: At index 0, arr[0] xor arr[1] = 1At index 1, arr[1] xor arr[2] = 10At index 2, arr[2] xor arr[3] = 3...At index 4, No element is left So, it will remain as it is.New Array will be {1, 10, 3, 1, 3}"
},
{
"code": null,
"e": 25452,
"s": 25200,
"text": "Input: arr[ ] = {5, 9, 7, 6}Output: 12 14 1 6 Explanation: At index 0, arr[0] xor arr[1] = 12At index 1, arr[1] xor arr[2] = 14At index 2, arr[2] xor arr[3] = 1At index 3, No element is left So, it will remain as it is.New Array will be {12, 14, 1, 6}"
},
{
"code": null,
"e": 25538,
"s": 25452,
"text": "Approach: The main idea to solve the given problem is to perform the following steps:"
},
{
"code": null,
"e": 25703,
"s": 25538,
"text": "Traverse the given array arr[] from the 0th index to (N – 2)th index.For each element arr[i] at ith position calculate arr[i] ^ arr[i+1] and store it at position i."
},
{
"code": null,
"e": 25773,
"s": 25703,
"text": "Traverse the given array arr[] from the 0th index to (N – 2)th index."
},
{
"code": null,
"e": 25869,
"s": 25773,
"text": "For each element arr[i] at ith position calculate arr[i] ^ arr[i+1] and store it at position i."
},
{
"code": null,
"e": 25920,
"s": 25869,
"text": "Below is the implementation of the above approach:"
},
{
"code": null,
"e": 25924,
"s": 25920,
"text": "C++"
},
{
"code": null,
"e": 25929,
"s": 25924,
"text": "Java"
},
{
"code": null,
"e": 25937,
"s": 25929,
"text": "Python3"
},
{
"code": null,
"e": 25940,
"s": 25937,
"text": "C#"
},
{
"code": null,
"e": 25951,
"s": 25940,
"text": "Javascript"
},
{
"code": "// C++ implementation// of the above approach#include <iostream>using namespace std; // Function to reconstruct the array// arr[] with xor of adjacent elementsint* game_with_number(int arr[], int n){ // Iterate through each element for (int i = 0; i < n - 1; i++) { // Store the xor of current // and next element in arr[i] arr[i] = arr[i] ^ arr[i + 1]; } return arr;} // Function to print the arrayvoid print(int arr[], int n){ for (int i = 0; i < n; i++) { cout << arr[i] << \" \"; }} // Driver Codeint main(){ // Inputs int arr[] = { 10, 11, 1, 2, 3 }; // Length of the array given int n = sizeof(arr) / sizeof(arr[0]); // Function call to reconstruct the arr[] int* new_arr = game_with_number(arr, n); // Function call to print arr[] print(new_arr, n);}",
"e": 26781,
"s": 25951,
"text": null
},
{
"code": "// Java implementation// of the above approachimport java.io.*; class GFG{ // Function to reconstruct the array// arr[] with xor of adjacent elementsstatic int[] game_with_number(int arr[], int n){ // Iterate through each element for(int i = 0; i < n - 1; i++) { // Store the xor of current // and next element in arr[i] arr[i] = arr[i] ^ arr[i + 1]; } return arr;} // Function to print the arraystatic void print(int arr[], int n){ for(int i = 0; i < n; i++) { System.out.print(arr[i] + \" \"); }} // Driver Codepublic static void main(String[] args){ // Inputs int arr[] = { 10, 11, 1, 2, 3 }; // Length of the array given int n = arr.length; // Function call to reconstruct the arr[] int[] new_arr = game_with_number(arr, n); // Function call to print arr[] print(new_arr, n);}} // This code is contributed by subhammahato348",
"e": 27706,
"s": 26781,
"text": null
},
{
"code": "# Python3 implementation# of the above approach # Function to reconstruct the array# arr[] with xor of adjacent elementsdef game_with_number(arr, n): # Iterate through each element for i in range(n-1): # Store the xor of current #and next element in arr[i] arr[i] = arr[i] ^ arr[i + 1] return arr # Function to print arraydef printt(arr, n): print(*arr) # Driver Codeif __name__ == '__main__': # Inputs arr= [10, 11, 1, 2, 3] # Length of the array given n = len(arr) # Function call to reconstruct the arr[] new_arr = game_with_number(arr, n); # Function call to prarr[] printt(new_arr, n) # This code is contributed by mohit kumar 29.",
"e": 28414,
"s": 27706,
"text": null
},
{
"code": "// C# program for the above approachusing System; class GFG{ // Function to reconstruct the array// arr[] with xor of adjacent elementsstatic int[] game_with_number(int[] arr, int n){ // Iterate through each element for(int i = 0; i < n - 1; i++) { // Store the xor of current // and next element in arr[i] arr[i] = arr[i] ^ arr[i + 1]; } return arr;} // Function to print the arraystatic void print(int[] arr, int n){ for(int i = 0; i < n; i++) { Console.Write(arr[i] + \" \"); }} // Driver Codepublic static void Main(){ // Inputs int[] arr = { 10, 11, 1, 2, 3 }; // Length of the array given int n = arr.Length; // Function call to reconstruct the arr[] int[] new_arr = game_with_number(arr, n); // Function call to print arr[] print(new_arr, n);}} // This code is contributed by target_2.",
"e": 29298,
"s": 28414,
"text": null
},
{
"code": " <script> // Javascript program for the above approach // Function to reconstruct the array// arr[] with xor of adjacent elementsfunction game_with_number(arr,n){ // Iterate through each element for (let i = 0; i < n - 1; i++) { // Store the xor of current // and next element in arr[i] arr[i] = arr[i] ^ arr[i + 1]; } return arr;} // Function to print the arrayfunction print(arr,n){ for (let i = 0; i < n; i++) { document.write(arr[i]+\" \"); }} // Driver Code //Inputs let arr = [10, 11, 1, 2, 3 ]; // Length of the array given let n = arr.length; // Function call to reconstruct the arr[] let new_arr = game_with_number(arr, n); // Function call to print arr[] print(new_arr, n); // This code is contributed by// Potta Lokesh </script>",
"e": 30133,
"s": 29298,
"text": null
},
{
"code": null,
"e": 30145,
"s": 30133,
"text": "1 10 3 1 3 "
},
{
"code": null,
"e": 30188,
"s": 30145,
"text": "Time Complexity: O(N)Auxiliary Space: O(1)"
},
{
"code": null,
"e": 30205,
"s": 30190,
"text": "mohit kumar 29"
},
{
"code": null,
"e": 30219,
"s": 30205,
"text": "lokeshpotta20"
},
{
"code": null,
"e": 30235,
"s": 30219,
"text": "subhammahato348"
},
{
"code": null,
"e": 30244,
"s": 30235,
"text": "target_2"
},
{
"code": null,
"e": 30261,
"s": 30244,
"text": "akshaysingh98088"
},
{
"code": null,
"e": 30278,
"s": 30261,
"text": "khushboogoyal499"
},
{
"code": null,
"e": 30290,
"s": 30278,
"text": "Bitwise-XOR"
},
{
"code": null,
"e": 30297,
"s": 30290,
"text": "Arrays"
},
{
"code": null,
"e": 30310,
"s": 30297,
"text": "Mathematical"
},
{
"code": null,
"e": 30317,
"s": 30310,
"text": "Arrays"
},
{
"code": null,
"e": 30330,
"s": 30317,
"text": "Mathematical"
},
{
"code": null,
"e": 30428,
"s": 30330,
"text": "Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here."
},
{
"code": null,
"e": 30437,
"s": 30428,
"text": "Comments"
},
{
"code": null,
"e": 30450,
"s": 30437,
"text": "Old Comments"
},
{
"code": null,
"e": 30475,
"s": 30450,
"text": "Window Sliding Technique"
},
{
"code": null,
"e": 30524,
"s": 30475,
"text": "Program to find sum of elements in a given array"
},
{
"code": null,
"e": 30562,
"s": 30524,
"text": "Reversal algorithm for array rotation"
},
{
"code": null,
"e": 30620,
"s": 30562,
"text": "Find duplicates in O(n) time and O(1) extra space | Set 1"
},
{
"code": null,
"e": 30640,
"s": 30620,
"text": "Trapping Rain Water"
},
{
"code": null,
"e": 30670,
"s": 30640,
"text": "Program for Fibonacci numbers"
},
{
"code": null,
"e": 30685,
"s": 30670,
"text": "C++ Data Types"
},
{
"code": null,
"e": 30745,
"s": 30685,
"text": "Write a program to print all permutations of a given string"
},
{
"code": null,
"e": 30788,
"s": 30745,
"text": "Set in C++ Standard Template Library (STL)"
}
] |
pthread_cancel() in C | The threa_cancel() is used to cancel one particular thread by the thread id. This function sends one cancellation request to the thread for termination. The syntax of the pthread_cancel() is like below −
int pthread_cancel(pthread_t th);
Now, let us see how to cancel threads using this function.
#include <stdio.h>
#include <unistd.h>
#include <sys/types.h>
#include <pthread.h>
int count = 0;
pthread_t sample_thread;
void* thread_one_func(void* p) {
while (1) {
printf("This is thread 1\n");
sleep(1); // wait for 1 seconds
count++;
if (count == 5) {
//if the counter is 5, then request to cancel thread two and exit from current thread
pthread_cancel(sample_thread);
pthread_exit(NULL);
}
}
}
void* thread_two_func(void* p) {
sample_thread = pthread_self(); //store the id of thread 2
while (1) {
printf("This is thread 2\n");
sleep(2); // wit for 2 seconds
}
}
main() {
pthread_t t1, t2;
//create two threads
pthread_create(&t1, NULL, thread_one_func, NULL);
pthread_create(&t2, NULL, thread_two_func, NULL);
//wait for completing threads
pthread_join(t1, NULL);
pthread_join(t2, NULL);
}
This is thread 2
This is thread 1
This is thread 1
This is thread 2
This is thread 1
This is thread 1
This is thread 1
This is thread 2
This is thread 2
This is thread 2
This is thread 2
This is thread 2
This is thread 2
This is thread 2
This is thread 2
This is thread 2
This is thread 2
This is thread 2
This is thread 2 | [
{
"code": null,
"e": 1266,
"s": 1062,
"text": "The threa_cancel() is used to cancel one particular thread by the thread id. This function sends one cancellation request to the thread for termination. The syntax of the pthread_cancel() is like below −"
},
{
"code": null,
"e": 1300,
"s": 1266,
"text": "int pthread_cancel(pthread_t th);"
},
{
"code": null,
"e": 1359,
"s": 1300,
"text": "Now, let us see how to cancel threads using this function."
},
{
"code": null,
"e": 2261,
"s": 1359,
"text": "#include <stdio.h>\n#include <unistd.h>\n#include <sys/types.h>\n#include <pthread.h>\nint count = 0;\npthread_t sample_thread;\nvoid* thread_one_func(void* p) {\n while (1) {\n printf(\"This is thread 1\\n\");\n sleep(1); // wait for 1 seconds\n count++;\n if (count == 5) {\n //if the counter is 5, then request to cancel thread two and exit from current thread\n pthread_cancel(sample_thread);\n pthread_exit(NULL);\n }\n }\n}\nvoid* thread_two_func(void* p) {\n sample_thread = pthread_self(); //store the id of thread 2\n while (1) {\n printf(\"This is thread 2\\n\");\n sleep(2); // wit for 2 seconds\n }\n}\nmain() {\n pthread_t t1, t2;\n //create two threads\n pthread_create(&t1, NULL, thread_one_func, NULL);\n pthread_create(&t2, NULL, thread_two_func, NULL);\n //wait for completing threads\n pthread_join(t1, NULL);\n pthread_join(t2, NULL);\n}"
},
{
"code": null,
"e": 2584,
"s": 2261,
"text": "This is thread 2\nThis is thread 1\nThis is thread 1\nThis is thread 2\nThis is thread 1\nThis is thread 1\nThis is thread 1\nThis is thread 2\nThis is thread 2\nThis is thread 2\nThis is thread 2\nThis is thread 2\nThis is thread 2\nThis is thread 2\nThis is thread 2\nThis is thread 2\nThis is thread 2\nThis is thread 2\nThis is thread 2"
}
] |
Octal literals in C - GeeksforGeeks | 19 Aug, 2018
When we initialize a value by putting ‘0’ before a number, the number is treated as octal. For instance ’10’ is read as 10 but ‘010’ is read as 8.
Examples:
Input : 0101
Output : 65
Input : 01010
Output : 520
#include<iostream>using namespace std;int main(){ int x = 0101; cout << x; return 0;}
65
#include<iostream>using namespace std;int main(){ int x = 020; cout << x; return 0;}
16
#include<iostream>using namespace std;int main(){ int x = 090; cout << x; return 0;}
Output :
Compiler Error : 9 is not a valid digit in octal number.
cpp-data-types
C Language
C++ Programs
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here.
Comments
Old Comments
rand() and srand() in C/C++
Left Shift and Right Shift Operators in C/C++
fork() in C
Command line arguments in C/C++
Substring in C++
Header files in C/C++ and its uses
cin in C++
Sorting a Map by value in C++ STL
C Program to Swap two Numbers
Program to print ASCII Value of a character | [
{
"code": null,
"e": 23935,
"s": 23907,
"text": "\n19 Aug, 2018"
},
{
"code": null,
"e": 24082,
"s": 23935,
"text": "When we initialize a value by putting ‘0’ before a number, the number is treated as octal. For instance ’10’ is read as 10 but ‘010’ is read as 8."
},
{
"code": null,
"e": 24092,
"s": 24082,
"text": "Examples:"
},
{
"code": null,
"e": 24146,
"s": 24092,
"text": "Input : 0101\nOutput : 65\n\nInput : 01010\nOutput : 520\n"
},
{
"code": "#include<iostream>using namespace std;int main(){ int x = 0101; cout << x; return 0;}",
"e": 24245,
"s": 24146,
"text": null
},
{
"code": null,
"e": 24249,
"s": 24245,
"text": "65\n"
},
{
"code": "#include<iostream>using namespace std;int main(){ int x = 020; cout << x; return 0;}",
"e": 24347,
"s": 24249,
"text": null
},
{
"code": null,
"e": 24351,
"s": 24347,
"text": "16\n"
},
{
"code": "#include<iostream>using namespace std;int main(){ int x = 090; cout << x; return 0;}",
"e": 24449,
"s": 24351,
"text": null
},
{
"code": null,
"e": 24458,
"s": 24449,
"text": "Output :"
},
{
"code": null,
"e": 24516,
"s": 24458,
"text": "Compiler Error : 9 is not a valid digit in octal number.\n"
},
{
"code": null,
"e": 24531,
"s": 24516,
"text": "cpp-data-types"
},
{
"code": null,
"e": 24542,
"s": 24531,
"text": "C Language"
},
{
"code": null,
"e": 24555,
"s": 24542,
"text": "C++ Programs"
},
{
"code": null,
"e": 24653,
"s": 24555,
"text": "Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here."
},
{
"code": null,
"e": 24662,
"s": 24653,
"text": "Comments"
},
{
"code": null,
"e": 24675,
"s": 24662,
"text": "Old Comments"
},
{
"code": null,
"e": 24703,
"s": 24675,
"text": "rand() and srand() in C/C++"
},
{
"code": null,
"e": 24749,
"s": 24703,
"text": "Left Shift and Right Shift Operators in C/C++"
},
{
"code": null,
"e": 24761,
"s": 24749,
"text": "fork() in C"
},
{
"code": null,
"e": 24793,
"s": 24761,
"text": "Command line arguments in C/C++"
},
{
"code": null,
"e": 24810,
"s": 24793,
"text": "Substring in C++"
},
{
"code": null,
"e": 24845,
"s": 24810,
"text": "Header files in C/C++ and its uses"
},
{
"code": null,
"e": 24856,
"s": 24845,
"text": "cin in C++"
},
{
"code": null,
"e": 24890,
"s": 24856,
"text": "Sorting a Map by value in C++ STL"
},
{
"code": null,
"e": 24920,
"s": 24890,
"text": "C Program to Swap two Numbers"
}
] |
Matplotlib Animations in Jupyter Notebook | by B. Chen | Towards Data Science | Matplotlib is one of the most popular plotting libraries for exploratory data analysis. It’s the default plotting backend in Pandas and other popular plotting libraries are based on it, for instance, seaborn. Plotting a static graph should work well in most cases, but when you are running simulations or doing time-series data analysis, basic plots may not always be enough. You may want to show an animation that helps you understand how the state changes over time.
In this article, you’ll learn how to create animations using matplotlib in Jupyter Notebook. This article is structured as follows:
Interactive Plot in Jupyter NotebookEmbedded HTML5 video in Jupyter Notebook
Interactive Plot in Jupyter Notebook
Embedded HTML5 video in Jupyter Notebook
Please check out the Notebook for source code.
For demonstration, we will be creating a moving sine wave, and below is one frame example:
import matplotlib.pyplot as pltimport numpy as npx = np.linspace(0, 2*np.pi, 100)y = np.sin(x)plt.plot(x, y)plt.show()
In order to create an interactive plot in Jupyter Notebook, you first need to enable interactive plot as follows:
# Enable interactive plot%matplotlib notebook
After that, we import the required libraries. Especially FuncAnimation class that can be used to create an animation for you.
import matplotlib.pyplot as pltfrom matplotlib.animation import FuncAnimation
Next, we need to create an initial state of the animation figure. We call subplots() without any arguments to create a Figure fig and a single Axes ax. We set the x range to (0, 2*Pi) and y range to (-1.1,1.1) to avoid them to be constantly changing.
fig, ax = plt.subplots()line, = ax.plot([]) # A tuple unpacking to unpack the only plotax.set_xlim(0, 2*np.pi)ax.set_ylim(-1.1, 1.1)
We then create a function animate() that’s going to be called by the FuncAnimation. The function takes one argument frame_num — the current frame number. What we want to do here is to change data for our line according to the frame number.
def animate(frame_num): y = np.sin(x + 2*np.pi * frame_num/100) line.set_data((x, y)) return line
Finally, we create our animation object by calling FuncAnimation with 4 arguments
The first argument fig is the reference to the figure we created
The second argument animate is the function we created to call at each frame to update the plot.
The third argument is frames=100, and it defines the number of frames for “one round of animation”
Finally, the interval=20 argument sets the delay (in milliseconds) between frames. 20 is equivalent to 50FPS (1000ms / 20 = 50 FPS). If the number is too large you wait a really long time, if the number is too small, it would be faster than your eyes could see. In general, we need FPS greater than 16 for smooth animation (Human eyes can only receive 10–12 frames [1]).
anim = FuncAnimation(fig, animate, frames=100, interval=20)plt.show()
Please check out Notebook for the source code
In the previous example, we have created a nice sine wave animation. However, the plot is animating only when the code is running. Of course, we could take a screen capture, but that’s not efficient when you want to share your Jupyter Notebook online.
What we can do is convert the animation into an HTML5 video and embed it in Jupyter Notebook. We will be using FFmpeg for conversion. If you don’t have it, you first need to follow the instruction to download FFmpeg and unzip it.
After that, we import the required libraries and set 'ffmpeg_path' to the path to your local ffmpeg executable:
import matplotlib.pyplot as pltfrom matplotlib.animation import FuncAnimationfrom IPython import displayplt.rcParams['animation.ffmpeg_path'] = '/path_to_your/ffmpeg'
Creating animation is the same as the previous example.
fig, ax = plt.subplots()line, = ax.plot([]) # A tuple unpacking to unpack the only plotax.set_xlim(0, 2*np.pi)ax.set_ylim(-1.1, 1.1)def animate(frame_num): y = np.sin(x + 2*np.pi * frame_num/100) line.set_data((x, y)) return lineanim = FuncAnimation(fig, animate, frames=100, interval=20)
But instead of plt.show() to plot it, we are calling anim.to_html5_video() method to convert the animation result into an HTML5 video. We then need to get the HTML code that does the embedding for this video and that is done by calling IPython display.HTML(video). Finally, we call display.display(html) to embed the HTML code in Jupyter Notebook.
video = anim.to_html5_video()html = display.HTML(video)display.display(html)plt.close() # avoid plotting a spare static plot
Please check out Notebook for source code
In this article, we have learned 2 approaches to create matplotlib animation in Jupyter Notebook. Creating an animation plot can help you running simulations and doing time-series data analysis.
I hope this article will help you to save time in learning matplotlib. I recommend you to check out the documentation for more options & setting and to know about other things you can do.
Thanks for reading. Please check out the notebook for the source code and stay tuned if you are interested in the practical aspect of machine learning.
Python Interactive Data Visualization with Altair
Interactive Data Visualization for exploring Coronavirus Spreads
10 tricks for converting numbers and strings to Datetime in Pandas
All Pandas json_normalize() you should know for flattening JSON
Using Pandas method chaining to improve code readability
How to do a Custom Sort on Pandas DataFrame
All the Pandas shift() you should know for data analysis
More tutorials can be found on my Github | [
{
"code": null,
"e": 516,
"s": 47,
"text": "Matplotlib is one of the most popular plotting libraries for exploratory data analysis. It’s the default plotting backend in Pandas and other popular plotting libraries are based on it, for instance, seaborn. Plotting a static graph should work well in most cases, but when you are running simulations or doing time-series data analysis, basic plots may not always be enough. You may want to show an animation that helps you understand how the state changes over time."
},
{
"code": null,
"e": 648,
"s": 516,
"text": "In this article, you’ll learn how to create animations using matplotlib in Jupyter Notebook. This article is structured as follows:"
},
{
"code": null,
"e": 725,
"s": 648,
"text": "Interactive Plot in Jupyter NotebookEmbedded HTML5 video in Jupyter Notebook"
},
{
"code": null,
"e": 762,
"s": 725,
"text": "Interactive Plot in Jupyter Notebook"
},
{
"code": null,
"e": 803,
"s": 762,
"text": "Embedded HTML5 video in Jupyter Notebook"
},
{
"code": null,
"e": 850,
"s": 803,
"text": "Please check out the Notebook for source code."
},
{
"code": null,
"e": 941,
"s": 850,
"text": "For demonstration, we will be creating a moving sine wave, and below is one frame example:"
},
{
"code": null,
"e": 1060,
"s": 941,
"text": "import matplotlib.pyplot as pltimport numpy as npx = np.linspace(0, 2*np.pi, 100)y = np.sin(x)plt.plot(x, y)plt.show()"
},
{
"code": null,
"e": 1174,
"s": 1060,
"text": "In order to create an interactive plot in Jupyter Notebook, you first need to enable interactive plot as follows:"
},
{
"code": null,
"e": 1220,
"s": 1174,
"text": "# Enable interactive plot%matplotlib notebook"
},
{
"code": null,
"e": 1346,
"s": 1220,
"text": "After that, we import the required libraries. Especially FuncAnimation class that can be used to create an animation for you."
},
{
"code": null,
"e": 1424,
"s": 1346,
"text": "import matplotlib.pyplot as pltfrom matplotlib.animation import FuncAnimation"
},
{
"code": null,
"e": 1675,
"s": 1424,
"text": "Next, we need to create an initial state of the animation figure. We call subplots() without any arguments to create a Figure fig and a single Axes ax. We set the x range to (0, 2*Pi) and y range to (-1.1,1.1) to avoid them to be constantly changing."
},
{
"code": null,
"e": 1812,
"s": 1675,
"text": "fig, ax = plt.subplots()line, = ax.plot([]) # A tuple unpacking to unpack the only plotax.set_xlim(0, 2*np.pi)ax.set_ylim(-1.1, 1.1)"
},
{
"code": null,
"e": 2052,
"s": 1812,
"text": "We then create a function animate() that’s going to be called by the FuncAnimation. The function takes one argument frame_num — the current frame number. What we want to do here is to change data for our line according to the frame number."
},
{
"code": null,
"e": 2159,
"s": 2052,
"text": "def animate(frame_num): y = np.sin(x + 2*np.pi * frame_num/100) line.set_data((x, y)) return line"
},
{
"code": null,
"e": 2241,
"s": 2159,
"text": "Finally, we create our animation object by calling FuncAnimation with 4 arguments"
},
{
"code": null,
"e": 2306,
"s": 2241,
"text": "The first argument fig is the reference to the figure we created"
},
{
"code": null,
"e": 2403,
"s": 2306,
"text": "The second argument animate is the function we created to call at each frame to update the plot."
},
{
"code": null,
"e": 2502,
"s": 2403,
"text": "The third argument is frames=100, and it defines the number of frames for “one round of animation”"
},
{
"code": null,
"e": 2873,
"s": 2502,
"text": "Finally, the interval=20 argument sets the delay (in milliseconds) between frames. 20 is equivalent to 50FPS (1000ms / 20 = 50 FPS). If the number is too large you wait a really long time, if the number is too small, it would be faster than your eyes could see. In general, we need FPS greater than 16 for smooth animation (Human eyes can only receive 10–12 frames [1])."
},
{
"code": null,
"e": 2943,
"s": 2873,
"text": "anim = FuncAnimation(fig, animate, frames=100, interval=20)plt.show()"
},
{
"code": null,
"e": 2989,
"s": 2943,
"text": "Please check out Notebook for the source code"
},
{
"code": null,
"e": 3241,
"s": 2989,
"text": "In the previous example, we have created a nice sine wave animation. However, the plot is animating only when the code is running. Of course, we could take a screen capture, but that’s not efficient when you want to share your Jupyter Notebook online."
},
{
"code": null,
"e": 3471,
"s": 3241,
"text": "What we can do is convert the animation into an HTML5 video and embed it in Jupyter Notebook. We will be using FFmpeg for conversion. If you don’t have it, you first need to follow the instruction to download FFmpeg and unzip it."
},
{
"code": null,
"e": 3583,
"s": 3471,
"text": "After that, we import the required libraries and set 'ffmpeg_path' to the path to your local ffmpeg executable:"
},
{
"code": null,
"e": 3750,
"s": 3583,
"text": "import matplotlib.pyplot as pltfrom matplotlib.animation import FuncAnimationfrom IPython import displayplt.rcParams['animation.ffmpeg_path'] = '/path_to_your/ffmpeg'"
},
{
"code": null,
"e": 3806,
"s": 3750,
"text": "Creating animation is the same as the previous example."
},
{
"code": null,
"e": 4106,
"s": 3806,
"text": "fig, ax = plt.subplots()line, = ax.plot([]) # A tuple unpacking to unpack the only plotax.set_xlim(0, 2*np.pi)ax.set_ylim(-1.1, 1.1)def animate(frame_num): y = np.sin(x + 2*np.pi * frame_num/100) line.set_data((x, y)) return lineanim = FuncAnimation(fig, animate, frames=100, interval=20)"
},
{
"code": null,
"e": 4454,
"s": 4106,
"text": "But instead of plt.show() to plot it, we are calling anim.to_html5_video() method to convert the animation result into an HTML5 video. We then need to get the HTML code that does the embedding for this video and that is done by calling IPython display.HTML(video). Finally, we call display.display(html) to embed the HTML code in Jupyter Notebook."
},
{
"code": null,
"e": 4597,
"s": 4454,
"text": "video = anim.to_html5_video()html = display.HTML(video)display.display(html)plt.close() # avoid plotting a spare static plot"
},
{
"code": null,
"e": 4639,
"s": 4597,
"text": "Please check out Notebook for source code"
},
{
"code": null,
"e": 4834,
"s": 4639,
"text": "In this article, we have learned 2 approaches to create matplotlib animation in Jupyter Notebook. Creating an animation plot can help you running simulations and doing time-series data analysis."
},
{
"code": null,
"e": 5022,
"s": 4834,
"text": "I hope this article will help you to save time in learning matplotlib. I recommend you to check out the documentation for more options & setting and to know about other things you can do."
},
{
"code": null,
"e": 5174,
"s": 5022,
"text": "Thanks for reading. Please check out the notebook for the source code and stay tuned if you are interested in the practical aspect of machine learning."
},
{
"code": null,
"e": 5224,
"s": 5174,
"text": "Python Interactive Data Visualization with Altair"
},
{
"code": null,
"e": 5289,
"s": 5224,
"text": "Interactive Data Visualization for exploring Coronavirus Spreads"
},
{
"code": null,
"e": 5356,
"s": 5289,
"text": "10 tricks for converting numbers and strings to Datetime in Pandas"
},
{
"code": null,
"e": 5420,
"s": 5356,
"text": "All Pandas json_normalize() you should know for flattening JSON"
},
{
"code": null,
"e": 5477,
"s": 5420,
"text": "Using Pandas method chaining to improve code readability"
},
{
"code": null,
"e": 5521,
"s": 5477,
"text": "How to do a Custom Sort on Pandas DataFrame"
},
{
"code": null,
"e": 5578,
"s": 5521,
"text": "All the Pandas shift() you should know for data analysis"
}
] |
Normal BST to Balanced BST | Practice | GeeksforGeeks | Given a Binary Search Tree, modify the given BST such that itis balanced and has minimum possible height.
Examples :
Input:
30
/
20
/
10
Output:
20
/ \
10 30
Input:
4
/
3
/
2
/
1
Output:
3 3 2
/ \ / \ / \
1 4 OR 2 4 OR 1 3 OR ..
\ / \
2 1 4
Your Task:
The task is to complete the function buildBalancedTree() which takes root as the input argument, and returns the root of tree after converting the given BST into a balanced BST that has minimum possible height. The driver code will print the height of the updated tree in output itself.
Expected Time Complexity: O(N)
Expected Auxiliary Space: O(N)
Here N denotes total number of nodes in given BST.
Constraints:
1<=N<=200
0
vikasanwla
This comment was deleted.
0
mishraanuyesh71 month ago
class GfG{ static void inorder(Node root, ArrayList<Integer> list){ if(root==null) return; inorder(root.left,list); list.add(root.data); inorder(root.right,list); } static Node makeTree(ArrayList<Integer> list,int l, int r){ if(l>r) return null; int mid = l + (r-1)/2; int data = list.get(mid); Node lc=makeTree(list,l,mid-1); Node rc=makeTree(list,mid+1,r); Node node=new Node(data); node.left=lc; node.right=rc; return node; } Node buildBalancedTree(Node root) { if(root == null){ return root; } ArrayList<Integer> list = new ArrayList<>(); inorder(root,list); return makeTree(list,0,list.size()-1); //Add your code here. }}
+1
gaurabhkumarjha271020011 month ago
void inorder (Node* root, vector <Node* > &v){
if (!root) return;
inorder (root->left, v);
v.push_back (root);
inorder (root->right, v);
}
Node* cons (Node* root, vector <Node*> &common, int low, int high){
if (low > high) return nullptr;
int mid= low+ (high-low)/2;
root= common[mid];
root->left= cons (root, common, low, mid-1);
root->right= cons (root, common, mid+1, high);
return root;
}
Node* buildBalancedTree(Node* root)
{
if (!root) return root;
vector <Node*> v;
inorder(root, v);
return cons (root, v, 0, v.size()-1);
}
0
detroix071 month ago
Node* BST(vector<int>InOrder,int n,int InOrderStart,int InOrderEnd){ if(InOrderStart>InOrderEnd) return NULL; int mid=(InOrderStart+InOrderEnd)/2; Node* root=new Node(InOrder[mid]); root->left=BST(InOrder,n,InOrderStart,mid-1); root->right=BST(InOrder,n,mid+1,InOrderEnd); return root;}
void InOrderTraversal(Node* root,vector<int> &InOrder){ if(root==NULL) return; InOrderTraversal(root->left,InOrder); InOrder.push_back(root->data); InOrderTraversal(root->right,InOrder);}
Node* buildBalancedTree(Node* root){vector<int> InOrder;InOrderTraversal(root,InOrder);int n = InOrder.size();Node* ans = BST(InOrder,n,0,n-1);return ans;}
0
adityasingh1091 month ago
void inorder(Node* root, vector<int> &in){
if(root == nullptr)
return;
inorder(root->left, in);
in.push_back(root->data);
inorder(root->right, in);
}
Node* inorderToBst(int s , int e, vector<int> &in){
if(s>e)
return NULL;
int mid = (s+e) /2;
Node* root = new Node(in[mid]);
root->left = inorderToBst(s,mid-1,in);
root->right = inorderToBst(mid +1, e, in);
}
Node* buildBalancedTree(Node* root)
{
vector<int> bst;
inorder(root, bst);
return inorderToBst(0,bst.size()-1,bst);
}
+2
sanketbhagat2 months ago
SIMPLE JAVA SOLUTION
1→ Tree using same nodes.
class GfG{
void inorder(Node root, ArrayList<Node> list){
if(root==null) return;
inorder(root.left,list);
list.add(root);
inorder(root.right,list);
}
Node makeTree(ArrayList<Node> list, int l, int r){
if(l>r) return null;
int mid = l+(r-l)/2;
Node root = list.get(mid);
root.left = makeTree(list,l,mid-1);
root.right = makeTree(list,mid+1,r);
return root;
}
Node buildBalancedTree(Node root){
//Add your code here.
if(root==null) return root;
ArrayList<Node> list = new ArrayList<>();
inorder(root,list);
return makeTree(list,0,list.size()-1);
}
}
2→ Tree by creating new nodes.
class GfG{
void inorder(Node root, ArrayList<Integer> list){
if(root==null) return;
inorder(root.left,list);
list.add(root.data);
inorder(root.right,list);
}
Node makeTree(ArrayList<Integer> list, int l, int r){
if(l>r) return null;
int mid = l+(r-l)/2;
Node root = new Node(list.get(mid));
root.left = makeTree(list,l,mid-1);
root.right = makeTree(list,mid+1,r);
return root;
}
Node buildBalancedTree(Node root){
//Add your code here.
if(root==null) return root;
ArrayList<Integer> list = new ArrayList<>();
inorder(root,list);
return makeTree(list,0,list.size()-1);
}
}
0
codecrackerp2 months ago
void inorder(Node*root,vector<int>&v){
if(root==NULL)return;
inorder(root->left,v);
v.push_back(root->data);
inorder(root->right,v);
}
Node*construct(int mid,int l,int r,vector<int>&v){
if(l>r){
return NULL;
}
mid=(l+r)/2;
Node*newnode=new Node(v[mid]);
newnode->left=construct(mid,l,mid-1,v);
newnode->right=construct(mid,mid+1,r,v);
return newnode;
}
Node* buildBalancedTree(Node* root)
{
vector<int>v;
inorder(root,v);
int mid=-1,l=0,r=v.size()-1;
Node*root1= construct(mid,l,r,v);
return root1;
}
+1
aloksinghbais022 months ago
C++ solution having time complexity as O(N) and space complexity as O(N) is as follows :-
Execution Time :- 0.0 / 1.1 sec
void inorder(Node *node,vector<int> &in){ if(!node) return; inorder(node->left,in); in.push_back(node->data); inorder(node->right,in);}
Node* build(int s,int e,vector<int> &in){ if(s > e) return nullptr; int mid = (s+e)/2; Node *node = new Node(in[mid]); node->left = node->right = nullptr; node->left = build(s,mid-1,in); node->right = build(mid+1,e,in); return (node);}
Node* buildBalancedTree(Node* root){vector<int> in;inorder(root,in);return build(0,in.size()-1,in);}
0
shiva10903 months ago
void inorder(Node* root , vector<int> &v){
while(root){
if(root->left == NULL){
v.push_back(root->data);
root = root->right;
}
else
{
Node* prev = root->left;
while(prev->right != NULL && prev->right != root)
{
prev = prev->right;
}
if(prev->right == NULL){
prev->right = root;
root = root->left;
}
else{
prev->right = NULL;
v.push_back(root->data);
root = root->right;
}
}
}
}
Node* construct( vector<int> &v , int start , int end)
{
if(start > end)
{
return NULL;
}
if(start == end)
{
Node* root = new Node(v[start]);
return root;
}
int mid = (start+end)/2;
Node* root = new Node(v[mid]);
root->left = construct(v , start ,mid-1);
root->right = construct(v, mid+1 , end);
return root;
}
Node* buildBalancedTree(Node* root)
{
// Code here
vector<int> v;
inorder(root , v);
return construct(v,0,v.size()-1);
}
used morris traversal for reducing some space complexity , but spcae complexity is O(N) only... :'D
+5
arthurshelby3 months ago
void inorder(Node *root,vector<Node *>&v)
{
if(root!=NULL)
{
inorder(root->left,v);
v.push_back(root);
inorder(root->right,v);
}
}
Node *construct(vector<Node *>&v,int start,int end)
{
if(start>end)
return NULL;
int mid=(start+end)/2;
Node *temp=v[mid];
temp->left=construct(v,start,mid-1);
temp->right=construct(v,mid+1,end);
return temp;
}
Node* buildBalancedTree(Node* root)
{
// Code here
vector<Node *>v;
inorder(root,v);
int start=0,end=v.size();
return construct(v,start,end-1);
}
We strongly recommend solving this problem on your own before viewing its editorial. Do you still
want to view the editorial?
Login to access your submissions.
Problem
Contest
Reset the IDE using the second button on the top right corner.
Avoid using static/global variables in your code as your code is tested against multiple test cases and these tend to retain their previous values.
Passing the Sample/Custom Test cases does not guarantee the correctness of code. On submission, your code is tested against multiple test cases consisting of all possible corner cases and stress constraints.
You can access the hints to get an idea about what is expected of you as well as the final solution code.
You can view the solutions submitted by other users from the submission tab. | [
{
"code": null,
"e": 344,
"s": 238,
"text": "Given a Binary Search Tree, modify the given BST such that itis balanced and has minimum possible height."
},
{
"code": null,
"e": 355,
"s": 344,
"text": "Examples :"
},
{
"code": null,
"e": 695,
"s": 355,
"text": "Input:\n 30\n /\n 20\n /\n 10\nOutput:\n 20\n / \\\n 10 30\n\nInput:\n 4\n /\n 3\n /\n 2\n /\n 1\nOutput:\n 3 3 2\n / \\ / \\ / \\\n 1 4 OR 2 4 OR 1 3 OR ..\n \\ / \\\n 2 1 4 \n"
},
{
"code": null,
"e": 997,
"s": 695,
"text": "\nYour Task:\nThe task is to complete the function buildBalancedTree() which takes root as the input argument, and returns the root of tree after converting the given BST into a balanced BST that has minimum possible height. The driver code will print the height of the updated tree in output itself. \n "
},
{
"code": null,
"e": 1110,
"s": 997,
"text": "Expected Time Complexity: O(N)\nExpected Auxiliary Space: O(N)\nHere N denotes total number of nodes in given BST."
},
{
"code": null,
"e": 1134,
"s": 1110,
"text": "\nConstraints:\n1<=N<=200"
},
{
"code": null,
"e": 1136,
"s": 1134,
"text": "0"
},
{
"code": null,
"e": 1147,
"s": 1136,
"text": "vikasanwla"
},
{
"code": null,
"e": 1173,
"s": 1147,
"text": "This comment was deleted."
},
{
"code": null,
"e": 1175,
"s": 1173,
"text": "0"
},
{
"code": null,
"e": 1201,
"s": 1175,
"text": "mishraanuyesh71 month ago"
},
{
"code": null,
"e": 1945,
"s": 1201,
"text": "class GfG{ static void inorder(Node root, ArrayList<Integer> list){ if(root==null) return; inorder(root.left,list); list.add(root.data); inorder(root.right,list); } static Node makeTree(ArrayList<Integer> list,int l, int r){ if(l>r) return null; int mid = l + (r-1)/2; int data = list.get(mid); Node lc=makeTree(list,l,mid-1); Node rc=makeTree(list,mid+1,r); Node node=new Node(data); node.left=lc; node.right=rc; return node; } Node buildBalancedTree(Node root) { if(root == null){ return root; } ArrayList<Integer> list = new ArrayList<>(); inorder(root,list); return makeTree(list,0,list.size()-1); //Add your code here. }}"
},
{
"code": null,
"e": 1948,
"s": 1945,
"text": "+1"
},
{
"code": null,
"e": 1983,
"s": 1948,
"text": "gaurabhkumarjha271020011 month ago"
},
{
"code": null,
"e": 2677,
"s": 1983,
"text": "void inorder (Node* root, vector <Node* > &v){\n \n if (!root) return;\n \n inorder (root->left, v);\n v.push_back (root);\n inorder (root->right, v);\n }\nNode* cons (Node* root, vector <Node*> &common, int low, int high){\n \n if (low > high) return nullptr;\n \n int mid= low+ (high-low)/2;\n root= common[mid];\n \n root->left= cons (root, common, low, mid-1);\n root->right= cons (root, common, mid+1, high);\n \n return root;\n }\nNode* buildBalancedTree(Node* root)\n{\n\n\t if (!root) return root;\n vector <Node*> v;\n inorder(root, v);\n return cons (root, v, 0, v.size()-1);\n \n}\n"
},
{
"code": null,
"e": 2679,
"s": 2677,
"text": "0"
},
{
"code": null,
"e": 2700,
"s": 2679,
"text": "detroix071 month ago"
},
{
"code": null,
"e": 2993,
"s": 2700,
"text": "Node* BST(vector<int>InOrder,int n,int InOrderStart,int InOrderEnd){ if(InOrderStart>InOrderEnd) return NULL; int mid=(InOrderStart+InOrderEnd)/2; Node* root=new Node(InOrder[mid]); root->left=BST(InOrder,n,InOrderStart,mid-1); root->right=BST(InOrder,n,mid+1,InOrderEnd); return root;}"
},
{
"code": null,
"e": 3191,
"s": 2995,
"text": "void InOrderTraversal(Node* root,vector<int> &InOrder){ if(root==NULL) return; InOrderTraversal(root->left,InOrder); InOrder.push_back(root->data); InOrderTraversal(root->right,InOrder);}"
},
{
"code": null,
"e": 3350,
"s": 3193,
"text": "Node* buildBalancedTree(Node* root){vector<int> InOrder;InOrderTraversal(root,InOrder);int n = InOrder.size();Node* ans = BST(InOrder,n,0,n-1);return ans;} "
},
{
"code": null,
"e": 3352,
"s": 3350,
"text": "0"
},
{
"code": null,
"e": 3378,
"s": 3352,
"text": "adityasingh1091 month ago"
},
{
"code": null,
"e": 3928,
"s": 3378,
"text": "void inorder(Node* root, vector<int> &in){\n if(root == nullptr)\n return;\n inorder(root->left, in);\n in.push_back(root->data);\n inorder(root->right, in);\n}\n\nNode* inorderToBst(int s , int e, vector<int> &in){\n if(s>e)\n return NULL;\n int mid = (s+e) /2;\n \n Node* root = new Node(in[mid]);\n root->left = inorderToBst(s,mid-1,in);\n root->right = inorderToBst(mid +1, e, in);\n}\n\nNode* buildBalancedTree(Node* root)\n{\n vector<int> bst;\n inorder(root, bst);\n return inorderToBst(0,bst.size()-1,bst);\n\t\n}"
},
{
"code": null,
"e": 3931,
"s": 3928,
"text": "+2"
},
{
"code": null,
"e": 3956,
"s": 3931,
"text": "sanketbhagat2 months ago"
},
{
"code": null,
"e": 3977,
"s": 3956,
"text": "SIMPLE JAVA SOLUTION"
},
{
"code": null,
"e": 4005,
"s": 3979,
"text": "1→ Tree using same nodes."
},
{
"code": null,
"e": 4712,
"s": 4005,
"text": "class GfG{\n \n void inorder(Node root, ArrayList<Node> list){\n if(root==null) return;\n inorder(root.left,list);\n list.add(root);\n inorder(root.right,list);\n }\n \n Node makeTree(ArrayList<Node> list, int l, int r){\n if(l>r) return null;\n int mid = l+(r-l)/2;\n Node root = list.get(mid);\n root.left = makeTree(list,l,mid-1);\n root.right = makeTree(list,mid+1,r);\n return root;\n }\n \n Node buildBalancedTree(Node root){\n //Add your code here.\n if(root==null) return root;\n ArrayList<Node> list = new ArrayList<>();\n inorder(root,list);\n return makeTree(list,0,list.size()-1);\n }\n}"
},
{
"code": null,
"e": 4745,
"s": 4714,
"text": "2→ Tree by creating new nodes."
},
{
"code": null,
"e": 5476,
"s": 4745,
"text": "class GfG{\n \n void inorder(Node root, ArrayList<Integer> list){\n if(root==null) return;\n inorder(root.left,list);\n list.add(root.data);\n inorder(root.right,list);\n }\n \n Node makeTree(ArrayList<Integer> list, int l, int r){\n if(l>r) return null;\n int mid = l+(r-l)/2;\n Node root = new Node(list.get(mid));\n root.left = makeTree(list,l,mid-1);\n root.right = makeTree(list,mid+1,r);\n return root;\n }\n \n Node buildBalancedTree(Node root){\n //Add your code here.\n if(root==null) return root;\n ArrayList<Integer> list = new ArrayList<>();\n inorder(root,list);\n return makeTree(list,0,list.size()-1);\n }\n}"
},
{
"code": null,
"e": 5478,
"s": 5476,
"text": "0"
},
{
"code": null,
"e": 5503,
"s": 5478,
"text": "codecrackerp2 months ago"
},
{
"code": null,
"e": 6049,
"s": 5503,
"text": "void inorder(Node*root,vector<int>&v){\n if(root==NULL)return;\n inorder(root->left,v);\n v.push_back(root->data);\n inorder(root->right,v);\n}\nNode*construct(int mid,int l,int r,vector<int>&v){\n if(l>r){\n return NULL;\n }\n mid=(l+r)/2;\n Node*newnode=new Node(v[mid]);\n newnode->left=construct(mid,l,mid-1,v);\n newnode->right=construct(mid,mid+1,r,v);\n return newnode;\n}\nNode* buildBalancedTree(Node* root)\n{\n vector<int>v;\n inorder(root,v);\n int mid=-1,l=0,r=v.size()-1;\n Node*root1= construct(mid,l,r,v);\n return root1;\n}"
},
{
"code": null,
"e": 6052,
"s": 6049,
"text": "+1"
},
{
"code": null,
"e": 6080,
"s": 6052,
"text": "aloksinghbais022 months ago"
},
{
"code": null,
"e": 6171,
"s": 6080,
"text": "C++ solution having time complexity as O(N) and space complexity as O(N) is as follows :- "
},
{
"code": null,
"e": 6205,
"s": 6173,
"text": "Execution Time :- 0.0 / 1.1 sec"
},
{
"code": null,
"e": 6351,
"s": 6207,
"text": "void inorder(Node *node,vector<int> &in){ if(!node) return; inorder(node->left,in); in.push_back(node->data); inorder(node->right,in);}"
},
{
"code": null,
"e": 6611,
"s": 6351,
"text": "Node* build(int s,int e,vector<int> &in){ if(s > e) return nullptr; int mid = (s+e)/2; Node *node = new Node(in[mid]); node->left = node->right = nullptr; node->left = build(s,mid-1,in); node->right = build(mid+1,e,in); return (node);}"
},
{
"code": null,
"e": 6713,
"s": 6611,
"text": "Node* buildBalancedTree(Node* root){vector<int> in;inorder(root,in);return build(0,in.size()-1,in);} "
},
{
"code": null,
"e": 6715,
"s": 6713,
"text": "0"
},
{
"code": null,
"e": 6737,
"s": 6715,
"text": "shiva10903 months ago"
},
{
"code": null,
"e": 7925,
"s": 6737,
"text": "void inorder(Node* root , vector<int> &v){\n \n while(root){\n if(root->left == NULL){\n v.push_back(root->data);\n root = root->right;\n }\n else\n {\n Node* prev = root->left;\n \n while(prev->right != NULL && prev->right != root)\n {\n prev = prev->right;\n }\n \n if(prev->right == NULL){\n prev->right = root;\n root = root->left;\n }\n else{\n prev->right = NULL;\n v.push_back(root->data);\n root = root->right;\n }\n }\n }\n}\n\nNode* construct( vector<int> &v , int start , int end)\n{\n if(start > end)\n {\n return NULL;\n }\n if(start == end)\n {\n Node* root = new Node(v[start]);\n return root;\n }\n int mid = (start+end)/2;\n Node* root = new Node(v[mid]);\n \n root->left = construct(v , start ,mid-1);\n root->right = construct(v, mid+1 , end);\n \n return root;\n}\nNode* buildBalancedTree(Node* root)\n{\n\t// Code here\n\tvector<int> v;\n\tinorder(root , v);\n\treturn construct(v,0,v.size()-1);\n}"
},
{
"code": null,
"e": 8025,
"s": 7925,
"text": "used morris traversal for reducing some space complexity , but spcae complexity is O(N) only... :'D"
},
{
"code": null,
"e": 8028,
"s": 8025,
"text": "+5"
},
{
"code": null,
"e": 8053,
"s": 8028,
"text": "arthurshelby3 months ago"
},
{
"code": null,
"e": 8612,
"s": 8053,
"text": "void inorder(Node *root,vector<Node *>&v)\n\n{\n if(root!=NULL)\n {\n inorder(root->left,v);\n v.push_back(root);\n inorder(root->right,v);\n }\n}\nNode *construct(vector<Node *>&v,int start,int end)\n{\n if(start>end)\n return NULL;\n int mid=(start+end)/2;\n Node *temp=v[mid];\n temp->left=construct(v,start,mid-1);\n temp->right=construct(v,mid+1,end);\n return temp;\n}\nNode* buildBalancedTree(Node* root)\n{\n\t// Code here\n\tvector<Node *>v;\n\tinorder(root,v);\n\tint start=0,end=v.size(); \n\treturn construct(v,start,end-1);\n}"
},
{
"code": null,
"e": 8758,
"s": 8612,
"text": "We strongly recommend solving this problem on your own before viewing its editorial. Do you still\n want to view the editorial?"
},
{
"code": null,
"e": 8794,
"s": 8758,
"text": " Login to access your submissions. "
},
{
"code": null,
"e": 8804,
"s": 8794,
"text": "\nProblem\n"
},
{
"code": null,
"e": 8814,
"s": 8804,
"text": "\nContest\n"
},
{
"code": null,
"e": 8877,
"s": 8814,
"text": "Reset the IDE using the second button on the top right corner."
},
{
"code": null,
"e": 9025,
"s": 8877,
"text": "Avoid using static/global variables in your code as your code is tested against multiple test cases and these tend to retain their previous values."
},
{
"code": null,
"e": 9233,
"s": 9025,
"text": "Passing the Sample/Custom Test cases does not guarantee the correctness of code. On submission, your code is tested against multiple test cases consisting of all possible corner cases and stress constraints."
},
{
"code": null,
"e": 9339,
"s": 9233,
"text": "You can access the hints to get an idea about what is expected of you as well as the final solution code."
}
] |
org.json - JSONObject | JSONObject class is a unordered collection of key-value pairs. It provides methods to access values by key and to put values. Following types are supported −
Boolean
Boolean
JSONArray
JSONArray
JSONObject
JSONObject
Number
Number
String
String
JSONObject.NULL object
JSONObject.NULL object
import org.json.JSONArray;
import org.json.JSONObject;
public class JSONDemo {
public static void main(String[] args) {
JSONObject jsonObject = new JSONObject();
jsonObject.put("Name", "Robert");
jsonObject.put("ID", 1);
jsonObject.put("Fees", new Double(1000.21));
jsonObject.put("Active", new Boolean(true));
jsonObject.put("Other Details", JSONObject.NULL);
JSONArray list = new JSONArray();
list.put("foo");
list.put(new Integer(100));
jsonObject.put("list",list);
System.out.println(jsonObject);
}
}
{"Active":true,"Other Details":null,"ID":1,"Fees":1000.21,"list":["foo",100],"Name":"Robert"}
18 Lectures
1.5 hours
Dr. Saatya Prasad
107 Lectures
13.5 hours
Arnab Chakraborty
75 Lectures
5 hours
Revathi Ramachandran
14 Lectures
44 mins
Zach Miller
12 Lectures
54 mins
Prof. Paul Cline, Ed.D
54 Lectures
4 hours
Gilad James, PhD
Print
Add Notes
Bookmark this page | [
{
"code": null,
"e": 2133,
"s": 1975,
"text": "JSONObject class is a unordered collection of key-value pairs. It provides methods to access values by key and to put values. Following types are supported −"
},
{
"code": null,
"e": 2141,
"s": 2133,
"text": "Boolean"
},
{
"code": null,
"e": 2149,
"s": 2141,
"text": "Boolean"
},
{
"code": null,
"e": 2159,
"s": 2149,
"text": "JSONArray"
},
{
"code": null,
"e": 2169,
"s": 2159,
"text": "JSONArray"
},
{
"code": null,
"e": 2180,
"s": 2169,
"text": "JSONObject"
},
{
"code": null,
"e": 2191,
"s": 2180,
"text": "JSONObject"
},
{
"code": null,
"e": 2198,
"s": 2191,
"text": "Number"
},
{
"code": null,
"e": 2205,
"s": 2198,
"text": "Number"
},
{
"code": null,
"e": 2212,
"s": 2205,
"text": "String"
},
{
"code": null,
"e": 2219,
"s": 2212,
"text": "String"
},
{
"code": null,
"e": 2242,
"s": 2219,
"text": "JSONObject.NULL object"
},
{
"code": null,
"e": 2265,
"s": 2242,
"text": "JSONObject.NULL object"
},
{
"code": null,
"e": 2845,
"s": 2265,
"text": "import org.json.JSONArray;\nimport org.json.JSONObject;\n\npublic class JSONDemo {\n public static void main(String[] args) { \n JSONObject jsonObject = new JSONObject();\n jsonObject.put(\"Name\", \"Robert\");\n jsonObject.put(\"ID\", 1);\n jsonObject.put(\"Fees\", new Double(1000.21));\n jsonObject.put(\"Active\", new Boolean(true));\n jsonObject.put(\"Other Details\", JSONObject.NULL);\n\n JSONArray list = new JSONArray();\n list.put(\"foo\");\n list.put(new Integer(100));\n jsonObject.put(\"list\",list);\n System.out.println(jsonObject);\n }\n}"
},
{
"code": null,
"e": 2940,
"s": 2845,
"text": "{\"Active\":true,\"Other Details\":null,\"ID\":1,\"Fees\":1000.21,\"list\":[\"foo\",100],\"Name\":\"Robert\"}\n"
},
{
"code": null,
"e": 2975,
"s": 2940,
"text": "\n 18 Lectures \n 1.5 hours \n"
},
{
"code": null,
"e": 2994,
"s": 2975,
"text": " Dr. Saatya Prasad"
},
{
"code": null,
"e": 3031,
"s": 2994,
"text": "\n 107 Lectures \n 13.5 hours \n"
},
{
"code": null,
"e": 3050,
"s": 3031,
"text": " Arnab Chakraborty"
},
{
"code": null,
"e": 3083,
"s": 3050,
"text": "\n 75 Lectures \n 5 hours \n"
},
{
"code": null,
"e": 3105,
"s": 3083,
"text": " Revathi Ramachandran"
},
{
"code": null,
"e": 3137,
"s": 3105,
"text": "\n 14 Lectures \n 44 mins\n"
},
{
"code": null,
"e": 3150,
"s": 3137,
"text": " Zach Miller"
},
{
"code": null,
"e": 3182,
"s": 3150,
"text": "\n 12 Lectures \n 54 mins\n"
},
{
"code": null,
"e": 3206,
"s": 3182,
"text": " Prof. Paul Cline, Ed.D"
},
{
"code": null,
"e": 3239,
"s": 3206,
"text": "\n 54 Lectures \n 4 hours \n"
},
{
"code": null,
"e": 3257,
"s": 3239,
"text": " Gilad James, PhD"
},
{
"code": null,
"e": 3264,
"s": 3257,
"text": " Print"
},
{
"code": null,
"e": 3275,
"s": 3264,
"text": " Add Notes"
}
] |
Ethical Hacking - TCP/IP Hijacking | TCP/IP Hijacking is when an authorized user gains access to a genuine network connection of another user. It is done in order to bypass the password authentication which is normally the start of a session.
In theory, a TCP/IP connection is established as shown below −
To hijack this connection, there are two possibilities −
Find the seq which is a number that increases by 1, but there is no chance to predict it.
Find the seq which is a number that increases by 1, but there is no chance to predict it.
The second possibility is to use the Man-in-the-Middle attack which, in simple words, is a type of network sniffing. For sniffing, we use tools like Wireshark or Ethercap.
The second possibility is to use the Man-in-the-Middle attack which, in simple words, is a type of network sniffing. For sniffing, we use tools like Wireshark or Ethercap.
An attacker monitors the data transmission over a network and discovers the IP’s of two devices that participate in a connection.
When the hacker discovers the IP of one of the users, he can put down the connection of the other user by DoS attack and then resume communication by spoofing the IP of the disconnected user.
In practice, one of the best TCP/IP hijack tools is Shijack. It is developed using Python language and you can download it from the following link − https://packetstormsecurity.com/sniffers/shijack.tgz
Here is an example of a Shijack command −
root:/home/root/hijack# ./shijack eth0 192.168.0.100 53517 192.168.0.200 23
Here, we are trying to hijack a Telnet connection between the two hosts.
Hunt is another popular tool that you can use to hijack a TCP/IP connection. It can be downloaded from − https://packetstormsecurity.com/sniffers/hunt/
All unencrypted sessions are vulnerable to TCP/IP session hijacking, so you should be using encrypted protocols as much as possible. Or, you should use double authentication techniques to keep the session secured.
36 Lectures
5 hours
Sharad Kumar
31 Lectures
3.5 hours
Abhilash Nelson
22 Lectures
3 hours
Blair Cook
74 Lectures
4.5 hours
199courses
75 Lectures
4.5 hours
199courses
148 Lectures
28.5 hours
Joseph Delgadillo
Print
Add Notes
Bookmark this page | [
{
"code": null,
"e": 2685,
"s": 2479,
"text": "TCP/IP Hijacking is when an authorized user gains access to a genuine network connection of another user. It is done in order to bypass the password authentication which is normally the start of a session."
},
{
"code": null,
"e": 2748,
"s": 2685,
"text": "In theory, a TCP/IP connection is established as shown below −"
},
{
"code": null,
"e": 2805,
"s": 2748,
"text": "To hijack this connection, there are two possibilities −"
},
{
"code": null,
"e": 2895,
"s": 2805,
"text": "Find the seq which is a number that increases by 1, but there is no chance to predict it."
},
{
"code": null,
"e": 2985,
"s": 2895,
"text": "Find the seq which is a number that increases by 1, but there is no chance to predict it."
},
{
"code": null,
"e": 3157,
"s": 2985,
"text": "The second possibility is to use the Man-in-the-Middle attack which, in simple words, is a type of network sniffing. For sniffing, we use tools like Wireshark or Ethercap."
},
{
"code": null,
"e": 3329,
"s": 3157,
"text": "The second possibility is to use the Man-in-the-Middle attack which, in simple words, is a type of network sniffing. For sniffing, we use tools like Wireshark or Ethercap."
},
{
"code": null,
"e": 3459,
"s": 3329,
"text": "An attacker monitors the data transmission over a network and discovers the IP’s of two devices that participate in a connection."
},
{
"code": null,
"e": 3651,
"s": 3459,
"text": "When the hacker discovers the IP of one of the users, he can put down the connection of the other user by DoS attack and then resume communication by spoofing the IP of the disconnected user."
},
{
"code": null,
"e": 3853,
"s": 3651,
"text": "In practice, one of the best TCP/IP hijack tools is Shijack. It is developed using Python language and you can download it from the following link − https://packetstormsecurity.com/sniffers/shijack.tgz"
},
{
"code": null,
"e": 3895,
"s": 3853,
"text": "Here is an example of a Shijack command −"
},
{
"code": null,
"e": 3972,
"s": 3895,
"text": "root:/home/root/hijack# ./shijack eth0 192.168.0.100 53517 192.168.0.200 23\n"
},
{
"code": null,
"e": 4045,
"s": 3972,
"text": "Here, we are trying to hijack a Telnet connection between the two hosts."
},
{
"code": null,
"e": 4197,
"s": 4045,
"text": "Hunt is another popular tool that you can use to hijack a TCP/IP connection. It can be downloaded from − https://packetstormsecurity.com/sniffers/hunt/"
},
{
"code": null,
"e": 4411,
"s": 4197,
"text": "All unencrypted sessions are vulnerable to TCP/IP session hijacking, so you should be using encrypted protocols as much as possible. Or, you should use double authentication techniques to keep the session secured."
},
{
"code": null,
"e": 4444,
"s": 4411,
"text": "\n 36 Lectures \n 5 hours \n"
},
{
"code": null,
"e": 4458,
"s": 4444,
"text": " Sharad Kumar"
},
{
"code": null,
"e": 4493,
"s": 4458,
"text": "\n 31 Lectures \n 3.5 hours \n"
},
{
"code": null,
"e": 4510,
"s": 4493,
"text": " Abhilash Nelson"
},
{
"code": null,
"e": 4543,
"s": 4510,
"text": "\n 22 Lectures \n 3 hours \n"
},
{
"code": null,
"e": 4555,
"s": 4543,
"text": " Blair Cook"
},
{
"code": null,
"e": 4590,
"s": 4555,
"text": "\n 74 Lectures \n 4.5 hours \n"
},
{
"code": null,
"e": 4602,
"s": 4590,
"text": " 199courses"
},
{
"code": null,
"e": 4637,
"s": 4602,
"text": "\n 75 Lectures \n 4.5 hours \n"
},
{
"code": null,
"e": 4649,
"s": 4637,
"text": " 199courses"
},
{
"code": null,
"e": 4686,
"s": 4649,
"text": "\n 148 Lectures \n 28.5 hours \n"
},
{
"code": null,
"e": 4705,
"s": 4686,
"text": " Joseph Delgadillo"
},
{
"code": null,
"e": 4712,
"s": 4705,
"text": " Print"
},
{
"code": null,
"e": 4723,
"s": 4712,
"text": " Add Notes"
}
] |
How to plot a function with ggplot2 in R? | Plotting a function is very easy with curve function but we can do it with ggplot2 as well. Since ggplot2 provides a better-looking plot, it is common to use it for plotting instead of other plotting functions. To plot a function, we should specify the function under stat_function in ggplot.
Consider the below data frame −
> x<-1:10
> df<-data.frame(x)
Loading ggplot2 package −
> library(ggplot2)
Plotting of functions is as shown below:
> ggplot(df,aes(x))+
+ stat_function(fun=function(x) log(x))
> ggplot(df,aes(x))+
+ stat_function(fun=function(x) log(x)/x)
> ggplot(df,aes(x))+
+ stat_function(fun=function(x) log(x)/(x-3))
> ggplot(df,aes(x))+
+ stat_function(fun=function(x) (exp(x)^2)*2) | [
{
"code": null,
"e": 1355,
"s": 1062,
"text": "Plotting a function is very easy with curve function but we can do it with ggplot2 as well. Since ggplot2 provides a better-looking plot, it is common to use it for plotting instead of other plotting functions. To plot a function, we should specify the function under stat_function in ggplot."
},
{
"code": null,
"e": 1387,
"s": 1355,
"text": "Consider the below data frame −"
},
{
"code": null,
"e": 1417,
"s": 1387,
"text": "> x<-1:10\n> df<-data.frame(x)"
},
{
"code": null,
"e": 1443,
"s": 1417,
"text": "Loading ggplot2 package −"
},
{
"code": null,
"e": 1462,
"s": 1443,
"text": "> library(ggplot2)"
},
{
"code": null,
"e": 1503,
"s": 1462,
"text": "Plotting of functions is as shown below:"
},
{
"code": null,
"e": 1564,
"s": 1503,
"text": "> ggplot(df,aes(x))+\n+ stat_function(fun=function(x) log(x))"
},
{
"code": null,
"e": 1627,
"s": 1564,
"text": "> ggplot(df,aes(x))+\n+ stat_function(fun=function(x) log(x)/x)"
},
{
"code": null,
"e": 1694,
"s": 1627,
"text": "> ggplot(df,aes(x))+\n+ stat_function(fun=function(x) log(x)/(x-3))"
},
{
"code": null,
"e": 1761,
"s": 1694,
"text": "> ggplot(df,aes(x))+\n+ stat_function(fun=function(x) (exp(x)^2)*2)"
}
] |
C++ Program to Encode a Message Using Playfair Cipher | In this scheme, pairs of letters are encrypted, instead of single letters as in the case of simple substitution cipher.
In playfair cipher, initially a key table is created. The key table is a 5×5 grid of alphabets that acts as the key for encrypting the plaintext. Each of the 25 alphabets must be unique and one letter of the alphabet (usually J) is omitted from the table as we need only 25 alphabets instead of 26. If the plaintext contains J, then it is replaced by I.
The sender and the receiver deicide on a particular key, say ‘tutorials’. In a key table, the first characters (going left to right) in the table is the phrase, excluding the duplicate letters. The rest of the table will be filled with the remaining letters of the alphabet, in natural order. The key table works out to be −
First, a plaintext message is split into pairs of two letters (digraphs). If there is an odd number of letters, a Z is added to the last letter. Let us consider that, we want to encrypt the message “hide money”. It will be written as −
HI DE MO NE YZ
The rules of encryption are −
If both the letters are in the same column, take the letter below each one (going back to the top if at the bottom)‘H’ and ‘I’ are in same column, hence take letter below them to replace. HI → QC
If both letters are in the same row, take the letter to the right of each one (going back to the left if at the farthest right)‘D’ and ‘E’ are in same row, hence take letter to the right of them to replace. DE → EF
If neither of the preceding two rules are true, form a rectangle with the two letters and take the letters on the horizontal opposite corner of the rectangle.
Using these rules, the result of the encryption of ‘hide money’ with the key of ‘tutorials’ would be −
QC EF NU MF ZV
Decrypting the Playfair cipher is as simple as doing the same process in reverse. Receiver has the same key and can create the same key table, and then decrypt any messages made using that key.
Here is a C++ program is given to encode a message using Playfair Cipher.
Begin
Function void play( int dir )
For it = msg.begin() to it != msg.end()
If ( getPos( *it++, j, k ) )
If ( getPos( *it, p, q) )
If ( j == p )
nmsg+= getChar( j, k + dir )
nmsg += getChar( p, q + dir )
else if( k == q )
nmsg += getChar( j + dir, k )
nmsg += getChar( p + dir, q )
else
nmsg += getChar( p, k )
nmsg += getChar( j, q )
done
done
done
msg = nmsg
done
End
#include <iostream>
#include <string>
using namespace std;
class playfair {
public:
string msg; char n[5][5];
void play( string k, string t, bool m, bool e ) {
createEncoder( k, m );
getText( t, m, e );
if( e )
play( 1 );
else
play( -1 );
print();
}
private:
void play( int dir ) {
int j,k,p,q;
string nmsg;
for( string::const_iterator it = msg.begin(); it != msg.end(); it++ ) {
if( getPos( *it++, j, k ) )
if( getPos( *it, p, q) {
//for same row
if( j == p ) {
nmsg+= getChar( j, k + dir );
nmsg += getChar( p, q + dir );
}
//for same column
else if( k == q ) {
nmsg += getChar( j + dir, k );
nmsg += getChar( p + dir, q );
} else {
nmsg += getChar( p, k );
nmsg += getChar( j, q );
}
}
}
msg = nmsg;
}
void print() //print the solution {
cout << "\n\n Solution:" << endl;
string::iterator it = msg.begin(); int count = 0;
while( it != msg.end() ) {
cout << *it;
it++;
cout << *it << " ";
it++;
if( ++count >= 26 )
cout << endl;
count = 0;
}
cout << endl << endl;
}
char getChar( int a, int b ) { //get the characters
return n[ (b + 5) % 5 ][ (a + 5) % 5 ];
}
bool getPos( char l, int &c, int &d ) { //get the position
for( int y = 0; y < 5; y++ )
for( int x = 0; x < 5; x++ )
if( n[y][x] == l ) {
c = x;
d= y;
return true;
}
return false;
}
void getText( string t, bool m, bool e ) { //get the original message
for( string::iterator it = t.begin(); it != t.end(); it++ ) {
//to choose J = I or no Q in the alphabet.
*it = toupper( *it );
if( *it < 65 || *it > 90 )
continue;
if( *it == 'J' && m )
*it = 'I';
else if( *it == 'Q' && !m )
continue;
msg += *it;
}
if( e ) {
string nmsg = ""; size_t len = msg.length();
for( size_t x = 0; x < len; x += 2 ) {
nmsg += msg[x];
if( x + 1 < len ) {
if( msg[x] == msg[x + 1] ) nmsg += 'X';
nmsg += msg[x + 1];
}
}
msg = nmsg;
}
if( msg.length() & 1 )
msg += 'X';
}
void createEncoder( string key, bool m ) { //creation of the key table
if( key.length() < 1 )
key= "KEYWORD";
key += "ABCDEFGHIJKLMNOPQRSTUVWXYZ";
string s= "";
for( string::iterator it = key.begin(); it != key.end(); it++ ) {
*it = toupper( *it );
if( *it < 65 || *it > 90 )
continue;
if( ( *it == 'J' && m ) || ( *it == 'Q' && !m ) )
continue;
if( s.find( *it ) == -1 )
s += *it;
}
copy( s.begin(), s.end(), &n[0][0] );
}
};
int main( int argc, char* argv[] ) {
string k, i, msg;
bool m, c;
cout << "Encrpty or Decypt? ";
getline( cin, i );
c = ( i[0] == 'e' || i[0] == 'E' );
cout << "Enter a key: ";
getline( cin, k);
cout << "I <-> J (Y/N): ";
getline( cin, i );
m = ( i[0] == 'y' || i[0] == 'Y' );
cout << "Enter the message: ";
getline( cin, msg );
playfair pf;
pf.play( k, msg,m, c );
return system( "pause" );
}
Encrpty or Decypt? e
Enter a key: players
I <-> J (Y/N): y
Enter the message: This is tutorialspoint
Solution:
OK GC GC MZ MQ CF YA RL QH OM | [
{
"code": null,
"e": 1182,
"s": 1062,
"text": "In this scheme, pairs of letters are encrypted, instead of single letters as in the case of simple substitution cipher."
},
{
"code": null,
"e": 1536,
"s": 1182,
"text": "In playfair cipher, initially a key table is created. The key table is a 5×5 grid of alphabets that acts as the key for encrypting the plaintext. Each of the 25 alphabets must be unique and one letter of the alphabet (usually J) is omitted from the table as we need only 25 alphabets instead of 26. If the plaintext contains J, then it is replaced by I."
},
{
"code": null,
"e": 1861,
"s": 1536,
"text": "The sender and the receiver deicide on a particular key, say ‘tutorials’. In a key table, the first characters (going left to right) in the table is the phrase, excluding the duplicate letters. The rest of the table will be filled with the remaining letters of the alphabet, in natural order. The key table works out to be −"
},
{
"code": null,
"e": 2097,
"s": 1861,
"text": "First, a plaintext message is split into pairs of two letters (digraphs). If there is an odd number of letters, a Z is added to the last letter. Let us consider that, we want to encrypt the message “hide money”. It will be written as −"
},
{
"code": null,
"e": 2112,
"s": 2097,
"text": "HI DE MO NE YZ"
},
{
"code": null,
"e": 2142,
"s": 2112,
"text": "The rules of encryption are −"
},
{
"code": null,
"e": 2338,
"s": 2142,
"text": "If both the letters are in the same column, take the letter below each one (going back to the top if at the bottom)‘H’ and ‘I’ are in same column, hence take letter below them to replace. HI → QC"
},
{
"code": null,
"e": 2553,
"s": 2338,
"text": "If both letters are in the same row, take the letter to the right of each one (going back to the left if at the farthest right)‘D’ and ‘E’ are in same row, hence take letter to the right of them to replace. DE → EF"
},
{
"code": null,
"e": 2712,
"s": 2553,
"text": "If neither of the preceding two rules are true, form a rectangle with the two letters and take the letters on the horizontal opposite corner of the rectangle."
},
{
"code": null,
"e": 2815,
"s": 2712,
"text": "Using these rules, the result of the encryption of ‘hide money’ with the key of ‘tutorials’ would be −"
},
{
"code": null,
"e": 2830,
"s": 2815,
"text": "QC EF NU MF ZV"
},
{
"code": null,
"e": 3024,
"s": 2830,
"text": "Decrypting the Playfair cipher is as simple as doing the same process in reverse. Receiver has the same key and can create the same key table, and then decrypt any messages made using that key."
},
{
"code": null,
"e": 3098,
"s": 3024,
"text": "Here is a C++ program is given to encode a message using Playfair Cipher."
},
{
"code": null,
"e": 3597,
"s": 3098,
"text": "Begin\nFunction void play( int dir )\nFor it = msg.begin() to it != msg.end()\n If ( getPos( *it++, j, k ) )\n If ( getPos( *it, p, q) )\n If ( j == p )\n nmsg+= getChar( j, k + dir )\n nmsg += getChar( p, q + dir )\n else if( k == q )\n nmsg += getChar( j + dir, k )\n nmsg += getChar( p + dir, q )\n else\n nmsg += getChar( p, k )\n nmsg += getChar( j, q )\n done\n done\n done\n msg = nmsg\ndone\nEnd"
},
{
"code": null,
"e": 7104,
"s": 3597,
"text": "#include <iostream>\n#include <string>\nusing namespace std;\nclass playfair {\n public:\n string msg; char n[5][5];\n void play( string k, string t, bool m, bool e ) {\n createEncoder( k, m );\n getText( t, m, e );\n if( e )\n play( 1 );\n else\n play( -1 );\n print();\n }\n private:\n void play( int dir ) {\n int j,k,p,q;\n string nmsg;\n for( string::const_iterator it = msg.begin(); it != msg.end(); it++ ) {\n if( getPos( *it++, j, k ) )\n if( getPos( *it, p, q) {\n //for same row\n if( j == p ) {\n nmsg+= getChar( j, k + dir );\n nmsg += getChar( p, q + dir );\n }\n //for same column\n else if( k == q ) {\n nmsg += getChar( j + dir, k );\n nmsg += getChar( p + dir, q );\n } else {\n nmsg += getChar( p, k );\n nmsg += getChar( j, q );\n }\n }\n }\n msg = nmsg;\n }\n void print() //print the solution {\n cout << \"\\n\\n Solution:\" << endl;\n string::iterator it = msg.begin(); int count = 0;\n while( it != msg.end() ) {\n cout << *it;\n it++;\n cout << *it << \" \";\n it++;\n if( ++count >= 26 )\n cout << endl;\n count = 0;\n }\n cout << endl << endl;\n }\n char getChar( int a, int b ) { //get the characters\n return n[ (b + 5) % 5 ][ (a + 5) % 5 ];\n }\n bool getPos( char l, int &c, int &d ) { //get the position\n for( int y = 0; y < 5; y++ )\n for( int x = 0; x < 5; x++ )\n if( n[y][x] == l ) {\n c = x;\n d= y;\n return true;\n }\n return false;\n }\n void getText( string t, bool m, bool e ) { //get the original message\n for( string::iterator it = t.begin(); it != t.end(); it++ ) {\n //to choose J = I or no Q in the alphabet.\n *it = toupper( *it );\n if( *it < 65 || *it > 90 )\n continue;\n if( *it == 'J' && m )\n *it = 'I';\n else if( *it == 'Q' && !m )\n continue;\n msg += *it;\n } \n if( e ) {\n string nmsg = \"\"; size_t len = msg.length();\n for( size_t x = 0; x < len; x += 2 ) {\n nmsg += msg[x];\n if( x + 1 < len ) {\n if( msg[x] == msg[x + 1] ) nmsg += 'X';\n nmsg += msg[x + 1];\n }\n }\n msg = nmsg;\n }\n if( msg.length() & 1 )\n msg += 'X';\n }\n void createEncoder( string key, bool m ) { //creation of the key table\n if( key.length() < 1 )\n key= \"KEYWORD\";\n key += \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\";\n string s= \"\";\n for( string::iterator it = key.begin(); it != key.end(); it++ ) {\n *it = toupper( *it );\n if( *it < 65 || *it > 90 )\n continue;\n if( ( *it == 'J' && m ) || ( *it == 'Q' && !m ) )\n continue;\n if( s.find( *it ) == -1 )\n s += *it;\n }\n copy( s.begin(), s.end(), &n[0][0] );\n }\n};\nint main( int argc, char* argv[] ) {\n string k, i, msg;\n bool m, c;\n cout << \"Encrpty or Decypt? \";\n getline( cin, i );\n c = ( i[0] == 'e' || i[0] == 'E' );\n cout << \"Enter a key: \";\n getline( cin, k);\n cout << \"I <-> J (Y/N): \";\n getline( cin, i );\n m = ( i[0] == 'y' || i[0] == 'Y' );\n cout << \"Enter the message: \";\n getline( cin, msg );\n playfair pf;\n pf.play( k, msg,m, c );\n return system( \"pause\" );\n}"
},
{
"code": null,
"e": 7247,
"s": 7104,
"text": "Encrpty or Decypt? e\nEnter a key: players\nI <-> J (Y/N): y\nEnter the message: This is tutorialspoint\n\n\nSolution:\nOK GC GC MZ MQ CF YA RL QH OM"
}
] |
Ruby | Array intersection operation - GeeksforGeeks | 07 Jan, 2020
Array#&() is a Array class method which performs set intersection operation on the arrays. And returns the common of the two arrays.
Syntax: Array.&()
Parameter: Arrays for performing the intersection operation.
Return: Common elements from both the arrays.
Example #1 :
# Ruby code for &() method# showing intersection operation # declaration of arraya = [18, 22, 33, 4, 5, 6] # declaration of arrayb = [5, 4, 22, 1, 88, 9] # declaration of arrayc = [18, 22, 33, 40, 50, 6] # a intersecting bputs "intersection of a and b : #{a & b}\n\n" # a intersecting cputs "intersection of a and c : #{a & c}\n\n" # b intersecting cputs "intersection of b and c : #{b & c}\n\n"
Output :
intersection of a and b : [22, 4, 5]
intersection of a and c : [18, 22, 33, 6]
intersection of b and c : [22]
Example #2 :
# Ruby code for &() method# showing intersection operation # declaration of arraya = ["abc", "xyz", "dog"] # declaration of arrayb = ["cow", "cat", "dog"] # declaration of arrayc = ["cat", "1", "dog"] # a intersecting bputs "intersection of a and b : #{a & b}\n\n" # a intersecting cputs "intersection of a and c : #{a & c}\n\n" # b intersecting cputs "intersection of b and c : #{b & c}\n\n"
Output :
intersection of a and b : ["dog"]
intersection of a and c : ["dog"]
intersection of b and c : ["cat", "dog"]
Ruby Array-class
Ruby-Methods
Ruby
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here.
Ruby | Array count() operation
Include v/s Extend in Ruby
Ruby | Enumerator each_with_index function
Global Variable in Ruby
Ruby | Array select() function
Ruby | Case Statement
Ruby | Hash delete() function
Ruby | String gsub! Method
Ruby | Decision Making (if, if-else, if-else-if, ternary) | Set - 1
Ruby | Data Types | [
{
"code": null,
"e": 23752,
"s": 23724,
"text": "\n07 Jan, 2020"
},
{
"code": null,
"e": 23885,
"s": 23752,
"text": "Array#&() is a Array class method which performs set intersection operation on the arrays. And returns the common of the two arrays."
},
{
"code": null,
"e": 23903,
"s": 23885,
"text": "Syntax: Array.&()"
},
{
"code": null,
"e": 23964,
"s": 23903,
"text": "Parameter: Arrays for performing the intersection operation."
},
{
"code": null,
"e": 24010,
"s": 23964,
"text": "Return: Common elements from both the arrays."
},
{
"code": null,
"e": 24023,
"s": 24010,
"text": "Example #1 :"
},
{
"code": "# Ruby code for &() method# showing intersection operation # declaration of arraya = [18, 22, 33, 4, 5, 6] # declaration of arrayb = [5, 4, 22, 1, 88, 9] # declaration of arrayc = [18, 22, 33, 40, 50, 6] # a intersecting bputs \"intersection of a and b : #{a & b}\\n\\n\" # a intersecting cputs \"intersection of a and c : #{a & c}\\n\\n\" # b intersecting cputs \"intersection of b and c : #{b & c}\\n\\n\"",
"e": 24431,
"s": 24023,
"text": null
},
{
"code": null,
"e": 24440,
"s": 24431,
"text": "Output :"
},
{
"code": null,
"e": 24553,
"s": 24440,
"text": "intersection of a and b : [22, 4, 5]\n\nintersection of a and c : [18, 22, 33, 6]\n\nintersection of b and c : [22]\n"
},
{
"code": null,
"e": 24566,
"s": 24553,
"text": "Example #2 :"
},
{
"code": "# Ruby code for &() method# showing intersection operation # declaration of arraya = [\"abc\", \"xyz\", \"dog\"] # declaration of arrayb = [\"cow\", \"cat\", \"dog\"] # declaration of arrayc = [\"cat\", \"1\", \"dog\"] # a intersecting bputs \"intersection of a and b : #{a & b}\\n\\n\" # a intersecting cputs \"intersection of a and c : #{a & c}\\n\\n\" # b intersecting cputs \"intersection of b and c : #{b & c}\\n\\n\"",
"e": 24971,
"s": 24566,
"text": null
},
{
"code": null,
"e": 24980,
"s": 24971,
"text": "Output :"
},
{
"code": null,
"e": 25091,
"s": 24980,
"text": "intersection of a and b : [\"dog\"]\n\nintersection of a and c : [\"dog\"]\n\nintersection of b and c : [\"cat\", \"dog\"]"
},
{
"code": null,
"e": 25108,
"s": 25091,
"text": "Ruby Array-class"
},
{
"code": null,
"e": 25121,
"s": 25108,
"text": "Ruby-Methods"
},
{
"code": null,
"e": 25126,
"s": 25121,
"text": "Ruby"
},
{
"code": null,
"e": 25224,
"s": 25126,
"text": "Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here."
},
{
"code": null,
"e": 25255,
"s": 25224,
"text": "Ruby | Array count() operation"
},
{
"code": null,
"e": 25282,
"s": 25255,
"text": "Include v/s Extend in Ruby"
},
{
"code": null,
"e": 25325,
"s": 25282,
"text": "Ruby | Enumerator each_with_index function"
},
{
"code": null,
"e": 25349,
"s": 25325,
"text": "Global Variable in Ruby"
},
{
"code": null,
"e": 25380,
"s": 25349,
"text": "Ruby | Array select() function"
},
{
"code": null,
"e": 25402,
"s": 25380,
"text": "Ruby | Case Statement"
},
{
"code": null,
"e": 25432,
"s": 25402,
"text": "Ruby | Hash delete() function"
},
{
"code": null,
"e": 25459,
"s": 25432,
"text": "Ruby | String gsub! Method"
},
{
"code": null,
"e": 25527,
"s": 25459,
"text": "Ruby | Decision Making (if, if-else, if-else-if, ternary) | Set - 1"
}
] |
Java Examples - Merge two arrays | How to merge two arrays ?
This example shows how to merge two arrays into a single array by the use of list.Addall(array1.asList(array2) method of List class and Arrays.toString () method of Array class.
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
public class Main {
public static void main(String args[]) {
String a[] = { "A", "E", "I" };
String b[] = { "O", "U" };
List list = new ArrayList(Arrays.asList(a));
list.addAll(Arrays.asList(b));
Object[] c = list.toArray();
System.out.println(Arrays.toString(c));
}
}
The above code sample will produce the following result.
[A, E, I, O, U]
Another sample example of Arrays Merge.
public class HelloWorld {
public static void main(String[] args) {
int[]a = {1,2,3,4};
int[]b = {4,16,1,2,3,22};
int[]c = new int[a.length+b.length];
int count = 0;
for(int i = 0; i < a.length; i++) {
c[i] = a[i];
count++;
}
for(int j = 0; j < b.length;j++) {
c[count++] = b[j];
}
for(int i = 0;i < c.length;i++) System.out.print(c[i]+" ");
}
}
The above code sample will produce the following result.
1,2,3,4,4,16,1,2,3,22
Print
Add Notes
Bookmark this page | [
{
"code": null,
"e": 2094,
"s": 2068,
"text": "How to merge two arrays ?"
},
{
"code": null,
"e": 2272,
"s": 2094,
"text": "This example shows how to merge two arrays into a single array by the use of list.Addall(array1.asList(array2) method of List class and Arrays.toString () method of Array class."
},
{
"code": null,
"e": 2660,
"s": 2272,
"text": "import java.util.ArrayList;\nimport java.util.Arrays;\nimport java.util.List;\n\npublic class Main {\n public static void main(String args[]) {\n String a[] = { \"A\", \"E\", \"I\" };\n String b[] = { \"O\", \"U\" };\n List list = new ArrayList(Arrays.asList(a));\n list.addAll(Arrays.asList(b));\n Object[] c = list.toArray();\n System.out.println(Arrays.toString(c));\n }\n}"
},
{
"code": null,
"e": 2717,
"s": 2660,
"text": "The above code sample will produce the following result."
},
{
"code": null,
"e": 2734,
"s": 2717,
"text": "[A, E, I, O, U]\n"
},
{
"code": null,
"e": 2774,
"s": 2734,
"text": "Another sample example of Arrays Merge."
},
{
"code": null,
"e": 3218,
"s": 2774,
"text": "public class HelloWorld {\n public static void main(String[] args) {\n int[]a = {1,2,3,4};\n int[]b = {4,16,1,2,3,22};\n int[]c = new int[a.length+b.length];\n int count = 0;\n \n for(int i = 0; i < a.length; i++) { \n c[i] = a[i];\n count++;\n } \n for(int j = 0; j < b.length;j++) { \n c[count++] = b[j];\n } \n for(int i = 0;i < c.length;i++) System.out.print(c[i]+\" \");\n } \n}"
},
{
"code": null,
"e": 3275,
"s": 3218,
"text": "The above code sample will produce the following result."
},
{
"code": null,
"e": 3298,
"s": 3275,
"text": "1,2,3,4,4,16,1,2,3,22\n"
},
{
"code": null,
"e": 3305,
"s": 3298,
"text": " Print"
},
{
"code": null,
"e": 3316,
"s": 3305,
"text": " Add Notes"
}
] |
Create a sticky navbar with CSS | To create a sticky navbar, use the position: sticky; property. You can try to run the following code to create a sticky navbar,
Live Demo
<!DOCTYPE html>
<html>
<head>
<style>
ul {
list-style-type: none;
position: sticky;
overflow: hidden;
top: 0;
width: 100%;
}
li {
float: left;
border-right: 1px solid white;
}
li a {
display: block;
padding: 8px;
background-color: orange;
}
li:last-child {
border-right: none;
}
div {
padding:5px;
margin-top:5px;
background-color:white;
height:1000px;
}
</style>
</head>
<body>
<ul>
<li><a href = "#home">Home</a></li>
<li><a href = "#news">News</a></li>
<li><a href = "#contact">Contact</a></li>
<li><a href = "#about">About</a></li>
</ul>
<div>
<p>Adding demo text to check fixed menu.</p>
<p>Adding demo text to check fixed menu.</p>
<p>Adding demo text to check fixed menu.</p>
<p>Adding demo text to check fixed menu.</p>
<p>Adding demo text to check fixed menu.</p>
<p>Adding demo text to check fixed menu.</p>
<p>Adding demo text to check fixed menu.</p>
<p>Adding demo text to check fixed menu.</p>
<p>Adding demo text to check fixed menu.</p>
<p>Adding demo text to check fixed menu.</p>
<p>Adding demo text to check fixed menu.</p>
<p>Adding demo text to check fixed menu.</p>
<p>Adding demo text to check fixed menu.</p>
<p>Adding demo text to check fixed menu.</p>
</div>
</body>
</html> | [
{
"code": null,
"e": 1190,
"s": 1062,
"text": "To create a sticky navbar, use the position: sticky; property. You can try to run the following code to create a sticky navbar,"
},
{
"code": null,
"e": 1200,
"s": 1190,
"text": "Live Demo"
},
{
"code": null,
"e": 2882,
"s": 1200,
"text": "<!DOCTYPE html>\n<html>\n <head>\n <style>\n ul {\n list-style-type: none;\n position: sticky;\n overflow: hidden;\n top: 0;\n width: 100%;\n }\n li {\n float: left;\n border-right: 1px solid white;\n }\n li a {\n display: block;\n padding: 8px;\n background-color: orange;\n }\n li:last-child {\n border-right: none;\n }\n div {\n padding:5px;\n margin-top:5px;\n background-color:white;\n height:1000px;\n }\n </style>\n </head>\n\n <body>\n <ul>\n <li><a href = \"#home\">Home</a></li>\n <li><a href = \"#news\">News</a></li>\n <li><a href = \"#contact\">Contact</a></li>\n <li><a href = \"#about\">About</a></li>\n </ul>\n <div>\n <p>Adding demo text to check fixed menu.</p>\n <p>Adding demo text to check fixed menu.</p>\n <p>Adding demo text to check fixed menu.</p>\n <p>Adding demo text to check fixed menu.</p>\n <p>Adding demo text to check fixed menu.</p>\n <p>Adding demo text to check fixed menu.</p>\n <p>Adding demo text to check fixed menu.</p>\n <p>Adding demo text to check fixed menu.</p>\n <p>Adding demo text to check fixed menu.</p>\n <p>Adding demo text to check fixed menu.</p>\n <p>Adding demo text to check fixed menu.</p>\n <p>Adding demo text to check fixed menu.</p>\n <p>Adding demo text to check fixed menu.</p>\n <p>Adding demo text to check fixed menu.</p>\n </div>\n </body>\n</html>"
}
] |
Fundamentals of Supervised Sentiment Analysis | by Eunjoo Byeon | Towards Data Science | In this post, I will explain a few basic machine learning approaches in classifying tweet sentiment and how to run them in Python.
Sentiment analysis is used to identify the affect or emotion (positive, negative, or neutral) of the data. For a business, it is a simple way to determine customers’ reactions towards the product or service and to quickly pick up on any change of emotion that may require immediate attention. The most basic approach to this problem is to use supervised learning. We can have actual humans to determine and label the sentiment of our data and treat it like a text classification problem. This is exactly what I will go over in this post, and will revisit the topic in the later post to discuss unsupervised methods.
You may find some human-labeled tweets data on the data.world. Data contains over 8000 tweets that have been labeled to be positive, negative, neutral, or unknown (“I can’t tell’).Sentiment140 provides training data by a team at Stanford. Labels for this dataset were automatically annotated.
You may find some human-labeled tweets data on the data.world. Data contains over 8000 tweets that have been labeled to be positive, negative, neutral, or unknown (“I can’t tell’).
Sentiment140 provides training data by a team at Stanford. Labels for this dataset were automatically annotated.
After cleaning the data (removing observations with missing texts, removing ‘unknown’ class, removing RTs), and splitting our data into train, test, and validation sets, we need to preprocess texts so they are properly quantifiable for our analysis. I will go through them one by one.
Different encodings can result in some strange characters appearing on our data. So first we will make sure we are only keeping ASCII characters. The below code will filter out all characters that are not ASCII.
def ascii_only(str_): return str_.encode("ascii", "ignore").decode()
This one is straightforward.
def make_lower(str_): return str_.lower()
Due to mishaps in encodings, some tweets contain HTML symbols such as . Before we remove punctuations, we will get rid of these words first so we are not left with gibberish after we remove punctuations. Other alphabetical words we want to remove are the name of users (e.g. @stereopickle) and hyperlinks (written as {link} in this dataset). We will use regex to do this.
import re def remove_nonwords(str_): return re.sub("[^A-Za-z0-9 ]\w+[^A-Za-z0-9]*", ' ', str_)
This expression means to substitute all words that do not start with the alphanumeric characters with space. These words may or may not end with a non-alphanumeric character.
This is a step specific to this dataset. Because our dataset contains tweets about products, there’s a lot of references to the actual brand and product names. We don’t want the overall distribution of sentiment per different brands or products to bias our model, so we will remove some of them.
def remove_brandwords(str_): p = '''#?(iphone|ipad|sxsw|hcsm|google|apple|cisco|austin| atari|intel|mac|pc|blackberry|android|linux|ubuntu)[a-z0-9]*''' return re.sub(p, ' ', str_)
Another straightforward cleaning. Please note, that I am removing all punctuation here for the sake of simplicity, but in reality, many tweets may use punctuations to express the sentiment, such as :) and :(.
import stringpunctuations = string.punctuationpunctuations = punctuations + '�' + string.digitsdef remove_punctuations(str_, punctuations): table_ = str.maketrans('', '', punctuations) return str_.translate(table_)
If you are curious about the above method, please take a look at my other post on how to effectively removing punctuations.
towardsdatascience.com
Finally, we will lemmatize our vocabularies and remove stopwords using NLTK. In summary, lemmatizer will return the vocabularies into its basic form, lemma.
from nltk.stem import WordNetLemmatizerfrom nltk.corpus import stopwordssw = stopwords.words('english')def lemmatize(str_, sw): wnl = WordNetLemmatizer() return ' '.join([wnl.lemmatize(w) for w in x.split() if w not in sw])
Read more about lemmatization or stemming here.
medium.com
After applying the above functions to our text data, we should have a pretty clean set of words to work with. But I like to add a feature selection step here because NLP problems tend to end up with too many features when each vocabulary or bi-grams or tri-grams are tokenized.
Of course, there are many ways to purge features, but I like to combine words that are similar using Spacy. The basic idea is to iterate through words that only occur a few times and to find an existing word with high similarity (closer in the vector space).
This custom function returns a dictionary with low occurring words as keys and their replacements as values. This step often fixes some typos and corrects where lemmatization might have missed. We can use this replacement dictionary to change words to their more frequently occurring counterparts and remove ones that are occurring less than specified times.
Typically our data will have a high class-imbalance problem as people are more likely to write about neutral or positive tweets than negative tweets in most cases. But for most business problems, our model must detect these negative tweets. So we will keep an eye out for these by looking at the macro-averaged f1-score as well as the precision-recall curve. Below function will plot the ROC curve and precision-recall curve and print the key evaluation metrics.
We can use a scikit-learn’s DummyClassifier to first see what our baseline measure would be if we were to just classify it based on how frequently each class occurs.
from sklearn.dummy import DummyClassifierdummy_classifier = DummyClassifier()dummy_classifier.fit(tweets_train, labels_train)y_pred_p = dummy_classifier.predict_proba(tweets_validation) y_pred = dummy_classifier.predict(tweets_validation)
One of the simplest way to quantify text data is to just count the frequency of each word. The scikit-learn’s CountVectorizer can do that job easily.
from sklearn.feature_extraction.text import CountVectorizercountvec = CountVectorizer(ngram_range = (1, 2), min_df = 2)count_vectors = countvec.fit_transform(tweets_train)
This returns the count vectors of single vocabularies and bigrams that occurs at least twice. Then we can use thes count vectors to train different classification algorithms.
A problem with the count vector is that it only looks at the frequency of individual word and it does not care about the context in which the word occurs. There’s no way to assess whether how important specific word in a tweet is. This is where the term frequency — inverse document frequency (TF-IDF) score comes in. TF-IDF score weighs words that are more uniquely frequent in one tweet more than words that tend to be frequent across all tweets.
from sklearn.feature_extraction.text import TfidfVectorizertfvec = TfidfVectorizer(ngram_range = (1, 2), min_df = 2)tf_vectors = tfvec.fit_transform(tweets_train)
Now that we have two vectorized texts, we can test different classifiers for each of these vectors.
Naive-Bayes is one of the more popular choices for text classification. It’s a simple application of Bayes Theorem on each class and the predictors, and it assumes that each individual features (words in our case) are independent of each other.
So let’s say, we have a tweet that reads... “I love my new phone. It’s really fast, reliable and well-designed!”. This tweet clearly has a positive sentiment. In this case, the Naive-Bayes model assumes that the individual words like ‘love’, ‘new’, ‘really’, ‘fast’, ‘reliable’, all contribute independently to its positive class. In other words, likelihood of the tweet being positive when it uses the word ‘reliable’ does not change by other words. This does not mean that these words are independent in their appearances. Some words may tend to appear together more often than not, but that does not mean how much each word contributes to its class is dependent.
Naive-Bayes algorithm is simple to use and reliable when the above assumption holds. Since testing on our model requires vectorization, we can get the pipeline built into our model.
from sklearn.naive_bayes import MultinomialNBfrom sklearn.pipeline import Pipelinemn_nb = MultinomialNB()# change countvec to tfvec for tf-idf model = Pipeline([('vectorize', countvec), ('classify', mn_nb)])# fitting training count vectors (change to tf_vectors for tf-idf)model['classify'].fit(count_vectors, labels_train)y_pred_p = model.predict_proba(tweets_validation) y_pred = model.predict(tweets_validation)evaluating(labels_validation, y_pred, y_pred_p)
Because of its assumption of independence between features, Naive Bayes overestimates confidence of how much each feature contributes to the label, making it a bad estimator. So take the probability of predictions with a grain of salt.
Another popular choice of the text classification algorithm is the support vector machine (SVM). Simply put, SVM finds the hyperplane that divides the classes with a maximum margin between them. The main reason SVM is preferred in text classification is that we tend to end up with a lot of features. If we were to work in such a high dimensional space that takes all our features, it would have caused a problem known as the curse of dimensionality. Basically, our space is too big that our observations start to lose their meanings. But SVM is more robust when dealing with a large number of features because it uses the kernel trick. SVM does not actually work in the high dimensions, it just looks at the pairwise distances between observations as if they are in the high dimensions. It does take a long time to do the job, but it is robust.
from sklearn.svm import SVCsvm_classifier = SVC(class_weight = 'balanced', probability= True)# don't forget to adjust the hyperparameters! # change countvec to tfvec for tf-idf svm_model = Pipeline([('vectorize', countvec), ('classify', svm_classifier)])# fitting training count vectors (change to tf_vectors for tf-idf)svm_model['classify'].fit(count_vectors, labels_train)y_pred_p = svm_model.predict_proba(tweets_validation) y_pred = svm_model.predict(tweets_validation)evaluating(labels_validation, y_pred, y_pred_p)
When the SVM uses the kernel trick, things get into a bit of grey area, in terms of interpretability. But we can use the Shapley value to decipher how individual features are contributing to the classification. We’ll use SHAP’s friendly interface to visualize the Shapley values. For a detailed tutorial on this, I recommend reading the documentation on SHAP.
import shapshap.initjs() sample = shap.kmeans(count_vectors, 10)e = shap.KernelExplainer(svm_model.predict_proba, sample, link = 'logit')shap_vals = e.shap_values(X_val_tf, nsamples = 100)shap.summary_plot(shap_vals, feature_names = countvec.get_feature_names(), class_names = svm_model.classes_)
Let’s dive a little bit deeper (literally). So far we worked with two different frequency measures to quantify our text data. But the frequency of each word tells only little bit of the story. Understanding language and its meaning requires understanding of syntax, or at the very least, the sequence of words. So we will look at a deep learning architecture that cares about the sequence of vocabularies: the long short-term memory (LSTM) architecture.
For the LSTM, we need to feed in texts as a sequence. Below steps outline the steps to run and evaluate the LSTM classifier. I explained each step in the code.
One downside of our LSTM model is that it only contains information that’s present within our training data, while vocabularies have semantic meanings outside of the tweets. Knowing how each vocabulary relates to each other in terms of semantic similarity may help our model. We can apply weights to our vocabularies based on the pre-trained word embedding algorithm.
For this, we will use GloVe, the vector representations obtained by a team at Stanford. I’m using their 200 dimensions word vectors trained on 2 Billion tweets. You will need to download the vectors from their website.
Then you can add the obtained vector matrix as embedding weights to the Embedding layer of our LSTM architecture.
# adding the bolded partmodel.add(Embedding(num_vocab, 200, weights = [vector_matrix], input_length = max_len))
By using the word embedding and LSTM, my model showed 20% increase in overall accuracy and 16% increase in the macro-averaged F1-score.
We reviewed the fundamentals of building a sentiment analysis model using tweets data. I will end the post with some of the questions to think about and to expand upon.
How can we approach the same problem, if we didn’t have labels? (unsupervised learning)What are other ways to reduce dimensions while keeping its interpretability?
How can we approach the same problem, if we didn’t have labels? (unsupervised learning)
What are other ways to reduce dimensions while keeping its interpretability? | [
{
"code": null,
"e": 302,
"s": 171,
"text": "In this post, I will explain a few basic machine learning approaches in classifying tweet sentiment and how to run them in Python."
},
{
"code": null,
"e": 918,
"s": 302,
"text": "Sentiment analysis is used to identify the affect or emotion (positive, negative, or neutral) of the data. For a business, it is a simple way to determine customers’ reactions towards the product or service and to quickly pick up on any change of emotion that may require immediate attention. The most basic approach to this problem is to use supervised learning. We can have actual humans to determine and label the sentiment of our data and treat it like a text classification problem. This is exactly what I will go over in this post, and will revisit the topic in the later post to discuss unsupervised methods."
},
{
"code": null,
"e": 1211,
"s": 918,
"text": "You may find some human-labeled tweets data on the data.world. Data contains over 8000 tweets that have been labeled to be positive, negative, neutral, or unknown (“I can’t tell’).Sentiment140 provides training data by a team at Stanford. Labels for this dataset were automatically annotated."
},
{
"code": null,
"e": 1392,
"s": 1211,
"text": "You may find some human-labeled tweets data on the data.world. Data contains over 8000 tweets that have been labeled to be positive, negative, neutral, or unknown (“I can’t tell’)."
},
{
"code": null,
"e": 1505,
"s": 1392,
"text": "Sentiment140 provides training data by a team at Stanford. Labels for this dataset were automatically annotated."
},
{
"code": null,
"e": 1790,
"s": 1505,
"text": "After cleaning the data (removing observations with missing texts, removing ‘unknown’ class, removing RTs), and splitting our data into train, test, and validation sets, we need to preprocess texts so they are properly quantifiable for our analysis. I will go through them one by one."
},
{
"code": null,
"e": 2002,
"s": 1790,
"text": "Different encodings can result in some strange characters appearing on our data. So first we will make sure we are only keeping ASCII characters. The below code will filter out all characters that are not ASCII."
},
{
"code": null,
"e": 2074,
"s": 2002,
"text": "def ascii_only(str_): return str_.encode(\"ascii\", \"ignore\").decode()"
},
{
"code": null,
"e": 2103,
"s": 2074,
"text": "This one is straightforward."
},
{
"code": null,
"e": 2148,
"s": 2103,
"text": "def make_lower(str_): return str_.lower()"
},
{
"code": null,
"e": 2526,
"s": 2148,
"text": "Due to mishaps in encodings, some tweets contain HTML symbols such as . Before we remove punctuations, we will get rid of these words first so we are not left with gibberish after we remove punctuations. Other alphabetical words we want to remove are the name of users (e.g. @stereopickle) and hyperlinks (written as {link} in this dataset). We will use regex to do this."
},
{
"code": null,
"e": 2624,
"s": 2526,
"text": "import re def remove_nonwords(str_): return re.sub(\"[^A-Za-z0-9 ]\\w+[^A-Za-z0-9]*\", ' ', str_)"
},
{
"code": null,
"e": 2799,
"s": 2624,
"text": "This expression means to substitute all words that do not start with the alphanumeric characters with space. These words may or may not end with a non-alphanumeric character."
},
{
"code": null,
"e": 3095,
"s": 2799,
"text": "This is a step specific to this dataset. Because our dataset contains tweets about products, there’s a lot of references to the actual brand and product names. We don’t want the overall distribution of sentiment per different brands or products to bias our model, so we will remove some of them."
},
{
"code": null,
"e": 3284,
"s": 3095,
"text": "def remove_brandwords(str_): p = '''#?(iphone|ipad|sxsw|hcsm|google|apple|cisco|austin| atari|intel|mac|pc|blackberry|android|linux|ubuntu)[a-z0-9]*''' return re.sub(p, ' ', str_)"
},
{
"code": null,
"e": 3493,
"s": 3284,
"text": "Another straightforward cleaning. Please note, that I am removing all punctuation here for the sake of simplicity, but in reality, many tweets may use punctuations to express the sentiment, such as :) and :(."
},
{
"code": null,
"e": 3714,
"s": 3493,
"text": "import stringpunctuations = string.punctuationpunctuations = punctuations + '�' + string.digitsdef remove_punctuations(str_, punctuations): table_ = str.maketrans('', '', punctuations) return str_.translate(table_)"
},
{
"code": null,
"e": 3838,
"s": 3714,
"text": "If you are curious about the above method, please take a look at my other post on how to effectively removing punctuations."
},
{
"code": null,
"e": 3861,
"s": 3838,
"text": "towardsdatascience.com"
},
{
"code": null,
"e": 4018,
"s": 3861,
"text": "Finally, we will lemmatize our vocabularies and remove stopwords using NLTK. In summary, lemmatizer will return the vocabularies into its basic form, lemma."
},
{
"code": null,
"e": 4248,
"s": 4018,
"text": "from nltk.stem import WordNetLemmatizerfrom nltk.corpus import stopwordssw = stopwords.words('english')def lemmatize(str_, sw): wnl = WordNetLemmatizer() return ' '.join([wnl.lemmatize(w) for w in x.split() if w not in sw])"
},
{
"code": null,
"e": 4296,
"s": 4248,
"text": "Read more about lemmatization or stemming here."
},
{
"code": null,
"e": 4307,
"s": 4296,
"text": "medium.com"
},
{
"code": null,
"e": 4585,
"s": 4307,
"text": "After applying the above functions to our text data, we should have a pretty clean set of words to work with. But I like to add a feature selection step here because NLP problems tend to end up with too many features when each vocabulary or bi-grams or tri-grams are tokenized."
},
{
"code": null,
"e": 4844,
"s": 4585,
"text": "Of course, there are many ways to purge features, but I like to combine words that are similar using Spacy. The basic idea is to iterate through words that only occur a few times and to find an existing word with high similarity (closer in the vector space)."
},
{
"code": null,
"e": 5203,
"s": 4844,
"text": "This custom function returns a dictionary with low occurring words as keys and their replacements as values. This step often fixes some typos and corrects where lemmatization might have missed. We can use this replacement dictionary to change words to their more frequently occurring counterparts and remove ones that are occurring less than specified times."
},
{
"code": null,
"e": 5666,
"s": 5203,
"text": "Typically our data will have a high class-imbalance problem as people are more likely to write about neutral or positive tweets than negative tweets in most cases. But for most business problems, our model must detect these negative tweets. So we will keep an eye out for these by looking at the macro-averaged f1-score as well as the precision-recall curve. Below function will plot the ROC curve and precision-recall curve and print the key evaluation metrics."
},
{
"code": null,
"e": 5832,
"s": 5666,
"text": "We can use a scikit-learn’s DummyClassifier to first see what our baseline measure would be if we were to just classify it based on how frequently each class occurs."
},
{
"code": null,
"e": 6074,
"s": 5832,
"text": "from sklearn.dummy import DummyClassifierdummy_classifier = DummyClassifier()dummy_classifier.fit(tweets_train, labels_train)y_pred_p = dummy_classifier.predict_proba(tweets_validation) y_pred = dummy_classifier.predict(tweets_validation)"
},
{
"code": null,
"e": 6224,
"s": 6074,
"text": "One of the simplest way to quantify text data is to just count the frequency of each word. The scikit-learn’s CountVectorizer can do that job easily."
},
{
"code": null,
"e": 6396,
"s": 6224,
"text": "from sklearn.feature_extraction.text import CountVectorizercountvec = CountVectorizer(ngram_range = (1, 2), min_df = 2)count_vectors = countvec.fit_transform(tweets_train)"
},
{
"code": null,
"e": 6571,
"s": 6396,
"text": "This returns the count vectors of single vocabularies and bigrams that occurs at least twice. Then we can use thes count vectors to train different classification algorithms."
},
{
"code": null,
"e": 7020,
"s": 6571,
"text": "A problem with the count vector is that it only looks at the frequency of individual word and it does not care about the context in which the word occurs. There’s no way to assess whether how important specific word in a tweet is. This is where the term frequency — inverse document frequency (TF-IDF) score comes in. TF-IDF score weighs words that are more uniquely frequent in one tweet more than words that tend to be frequent across all tweets."
},
{
"code": null,
"e": 7183,
"s": 7020,
"text": "from sklearn.feature_extraction.text import TfidfVectorizertfvec = TfidfVectorizer(ngram_range = (1, 2), min_df = 2)tf_vectors = tfvec.fit_transform(tweets_train)"
},
{
"code": null,
"e": 7283,
"s": 7183,
"text": "Now that we have two vectorized texts, we can test different classifiers for each of these vectors."
},
{
"code": null,
"e": 7528,
"s": 7283,
"text": "Naive-Bayes is one of the more popular choices for text classification. It’s a simple application of Bayes Theorem on each class and the predictors, and it assumes that each individual features (words in our case) are independent of each other."
},
{
"code": null,
"e": 8194,
"s": 7528,
"text": "So let’s say, we have a tweet that reads... “I love my new phone. It’s really fast, reliable and well-designed!”. This tweet clearly has a positive sentiment. In this case, the Naive-Bayes model assumes that the individual words like ‘love’, ‘new’, ‘really’, ‘fast’, ‘reliable’, all contribute independently to its positive class. In other words, likelihood of the tweet being positive when it uses the word ‘reliable’ does not change by other words. This does not mean that these words are independent in their appearances. Some words may tend to appear together more often than not, but that does not mean how much each word contributes to its class is dependent."
},
{
"code": null,
"e": 8376,
"s": 8194,
"text": "Naive-Bayes algorithm is simple to use and reliable when the above assumption holds. Since testing on our model requires vectorization, we can get the pipeline built into our model."
},
{
"code": null,
"e": 8841,
"s": 8376,
"text": "from sklearn.naive_bayes import MultinomialNBfrom sklearn.pipeline import Pipelinemn_nb = MultinomialNB()# change countvec to tfvec for tf-idf model = Pipeline([('vectorize', countvec), ('classify', mn_nb)])# fitting training count vectors (change to tf_vectors for tf-idf)model['classify'].fit(count_vectors, labels_train)y_pred_p = model.predict_proba(tweets_validation) y_pred = model.predict(tweets_validation)evaluating(labels_validation, y_pred, y_pred_p)"
},
{
"code": null,
"e": 9077,
"s": 8841,
"text": "Because of its assumption of independence between features, Naive Bayes overestimates confidence of how much each feature contributes to the label, making it a bad estimator. So take the probability of predictions with a grain of salt."
},
{
"code": null,
"e": 9923,
"s": 9077,
"text": "Another popular choice of the text classification algorithm is the support vector machine (SVM). Simply put, SVM finds the hyperplane that divides the classes with a maximum margin between them. The main reason SVM is preferred in text classification is that we tend to end up with a lot of features. If we were to work in such a high dimensional space that takes all our features, it would have caused a problem known as the curse of dimensionality. Basically, our space is too big that our observations start to lose their meanings. But SVM is more robust when dealing with a large number of features because it uses the kernel trick. SVM does not actually work in the high dimensions, it just looks at the pairwise distances between observations as if they are in the high dimensions. It does take a long time to do the job, but it is robust."
},
{
"code": null,
"e": 10447,
"s": 9923,
"text": "from sklearn.svm import SVCsvm_classifier = SVC(class_weight = 'balanced', probability= True)# don't forget to adjust the hyperparameters! # change countvec to tfvec for tf-idf svm_model = Pipeline([('vectorize', countvec), ('classify', svm_classifier)])# fitting training count vectors (change to tf_vectors for tf-idf)svm_model['classify'].fit(count_vectors, labels_train)y_pred_p = svm_model.predict_proba(tweets_validation) y_pred = svm_model.predict(tweets_validation)evaluating(labels_validation, y_pred, y_pred_p)"
},
{
"code": null,
"e": 10807,
"s": 10447,
"text": "When the SVM uses the kernel trick, things get into a bit of grey area, in terms of interpretability. But we can use the Shapley value to decipher how individual features are contributing to the classification. We’ll use SHAP’s friendly interface to visualize the Shapley values. For a detailed tutorial on this, I recommend reading the documentation on SHAP."
},
{
"code": null,
"e": 11139,
"s": 10807,
"text": "import shapshap.initjs() sample = shap.kmeans(count_vectors, 10)e = shap.KernelExplainer(svm_model.predict_proba, sample, link = 'logit')shap_vals = e.shap_values(X_val_tf, nsamples = 100)shap.summary_plot(shap_vals, feature_names = countvec.get_feature_names(), class_names = svm_model.classes_)"
},
{
"code": null,
"e": 11593,
"s": 11139,
"text": "Let’s dive a little bit deeper (literally). So far we worked with two different frequency measures to quantify our text data. But the frequency of each word tells only little bit of the story. Understanding language and its meaning requires understanding of syntax, or at the very least, the sequence of words. So we will look at a deep learning architecture that cares about the sequence of vocabularies: the long short-term memory (LSTM) architecture."
},
{
"code": null,
"e": 11753,
"s": 11593,
"text": "For the LSTM, we need to feed in texts as a sequence. Below steps outline the steps to run and evaluate the LSTM classifier. I explained each step in the code."
},
{
"code": null,
"e": 12121,
"s": 11753,
"text": "One downside of our LSTM model is that it only contains information that’s present within our training data, while vocabularies have semantic meanings outside of the tweets. Knowing how each vocabulary relates to each other in terms of semantic similarity may help our model. We can apply weights to our vocabularies based on the pre-trained word embedding algorithm."
},
{
"code": null,
"e": 12340,
"s": 12121,
"text": "For this, we will use GloVe, the vector representations obtained by a team at Stanford. I’m using their 200 dimensions word vectors trained on 2 Billion tweets. You will need to download the vectors from their website."
},
{
"code": null,
"e": 12454,
"s": 12340,
"text": "Then you can add the obtained vector matrix as embedding weights to the Embedding layer of our LSTM architecture."
},
{
"code": null,
"e": 12586,
"s": 12454,
"text": "# adding the bolded partmodel.add(Embedding(num_vocab, 200, weights = [vector_matrix], input_length = max_len))"
},
{
"code": null,
"e": 12722,
"s": 12586,
"text": "By using the word embedding and LSTM, my model showed 20% increase in overall accuracy and 16% increase in the macro-averaged F1-score."
},
{
"code": null,
"e": 12891,
"s": 12722,
"text": "We reviewed the fundamentals of building a sentiment analysis model using tweets data. I will end the post with some of the questions to think about and to expand upon."
},
{
"code": null,
"e": 13055,
"s": 12891,
"text": "How can we approach the same problem, if we didn’t have labels? (unsupervised learning)What are other ways to reduce dimensions while keeping its interpretability?"
},
{
"code": null,
"e": 13143,
"s": 13055,
"text": "How can we approach the same problem, if we didn’t have labels? (unsupervised learning)"
}
] |
Perl Quote-like Operators Example | There are following Quote-like operators supported by Perl language. In the following table, a {} represents any pair of delimiters you choose.
q{ }
Encloses a string with-in single quotes
Example − q{abcd} gives 'abcd'
qq{ }
Encloses a string with-in double quotes
Example − qq{abcd} gives "abcd"
qx{ }
Encloses a string with-in invert quotes
Example − qx{abcd} gives `abcd`
Try the following example to understand all the quote-like operators available in Perl. Copy and paste the following Perl program in test.pl file and execute this program.
#!/usr/local/bin/perl
$a = 10;
$b = q{a = $a};
print "Value of q{a = \$a} = $b\n";
$b = qq{a = $a};
print "Value of qq{a = \$a} = $b\n";
# unix command execution
$t = qx{date};
print "Value of qx{date} = $t\n";
When the above code is executed, it produces the following result −
Value of q{a = $a} = a = $a
Value of qq{a = $a} = a = 10
Value of qx{date} = Thu Feb 14 08:13:17 MST 2013
46 Lectures
4.5 hours
Devi Killada
11 Lectures
1.5 hours
Harshit Srivastava
30 Lectures
6 hours
TELCOMA Global
24 Lectures
2 hours
Mohammad Nauman
68 Lectures
7 hours
Stone River ELearning
58 Lectures
6.5 hours
Stone River ELearning
Print
Add Notes
Bookmark this page | [
{
"code": null,
"e": 2364,
"s": 2220,
"text": "There are following Quote-like operators supported by Perl language. In the following table, a {} represents any pair of delimiters you choose."
},
{
"code": null,
"e": 2369,
"s": 2364,
"text": "q{ }"
},
{
"code": null,
"e": 2409,
"s": 2369,
"text": "Encloses a string with-in single quotes"
},
{
"code": null,
"e": 2440,
"s": 2409,
"text": "Example − q{abcd} gives 'abcd'"
},
{
"code": null,
"e": 2446,
"s": 2440,
"text": "qq{ }"
},
{
"code": null,
"e": 2486,
"s": 2446,
"text": "Encloses a string with-in double quotes"
},
{
"code": null,
"e": 2518,
"s": 2486,
"text": "Example − qq{abcd} gives \"abcd\""
},
{
"code": null,
"e": 2524,
"s": 2518,
"text": "qx{ }"
},
{
"code": null,
"e": 2564,
"s": 2524,
"text": "Encloses a string with-in invert quotes"
},
{
"code": null,
"e": 2596,
"s": 2564,
"text": "Example − qx{abcd} gives `abcd`"
},
{
"code": null,
"e": 2768,
"s": 2596,
"text": "Try the following example to understand all the quote-like operators available in Perl. Copy and paste the following Perl program in test.pl file and execute this program."
},
{
"code": null,
"e": 2984,
"s": 2768,
"text": "#!/usr/local/bin/perl\n\n$a = 10;\n \n$b = q{a = $a};\nprint \"Value of q{a = \\$a} = $b\\n\";\n\n$b = qq{a = $a};\nprint \"Value of qq{a = \\$a} = $b\\n\";\n\n# unix command execution\n$t = qx{date};\nprint \"Value of qx{date} = $t\\n\";"
},
{
"code": null,
"e": 3052,
"s": 2984,
"text": "When the above code is executed, it produces the following result −"
},
{
"code": null,
"e": 3159,
"s": 3052,
"text": "Value of q{a = $a} = a = $a\nValue of qq{a = $a} = a = 10\nValue of qx{date} = Thu Feb 14 08:13:17 MST 2013\n"
},
{
"code": null,
"e": 3194,
"s": 3159,
"text": "\n 46 Lectures \n 4.5 hours \n"
},
{
"code": null,
"e": 3208,
"s": 3194,
"text": " Devi Killada"
},
{
"code": null,
"e": 3243,
"s": 3208,
"text": "\n 11 Lectures \n 1.5 hours \n"
},
{
"code": null,
"e": 3263,
"s": 3243,
"text": " Harshit Srivastava"
},
{
"code": null,
"e": 3296,
"s": 3263,
"text": "\n 30 Lectures \n 6 hours \n"
},
{
"code": null,
"e": 3312,
"s": 3296,
"text": " TELCOMA Global"
},
{
"code": null,
"e": 3345,
"s": 3312,
"text": "\n 24 Lectures \n 2 hours \n"
},
{
"code": null,
"e": 3362,
"s": 3345,
"text": " Mohammad Nauman"
},
{
"code": null,
"e": 3395,
"s": 3362,
"text": "\n 68 Lectures \n 7 hours \n"
},
{
"code": null,
"e": 3418,
"s": 3395,
"text": " Stone River ELearning"
},
{
"code": null,
"e": 3453,
"s": 3418,
"text": "\n 58 Lectures \n 6.5 hours \n"
},
{
"code": null,
"e": 3476,
"s": 3453,
"text": " Stone River ELearning"
},
{
"code": null,
"e": 3483,
"s": 3476,
"text": " Print"
},
{
"code": null,
"e": 3494,
"s": 3483,
"text": " Add Notes"
}
] |
QlikView - Aggregate Functions | QlikView Aggregate functions are used to produce aggregate data from the rows of the table. The functions are applied to the columns when creating the load script. Given below is a sample list of Aggregate functions. We also need to apply the Group by clause appropriately when applying the aggregate functions.
SUM gives the sum of the numeric values of the column.
AVG gives the average of the numeric values of the column.
MAX gives the maximum of the numeric values of the column.
MIN gives the minimum of the numeric values of the column.
Consider the following data stored as product_sales.csv in the local system. It represents the sales figures for different product lines and product category in a store.
Product_Line,Product_category,Quantity,Value
Sporting Goods,Outdoor Recreation,12,5642
Food, Beverages & Tobacco,38,2514
Apparel & Accessories,Clothing,54,2365
Apparel & Accessories,Costumes & Accessories,29,4487
Sporting Goods,Athletics,11,812
Health & Beauty,Personal Care,21,6912
Arts & Entertainment,Hobbies & Creative Arts,58,5201
Arts & Entertainment,Paintings,73,8451
Arts & Entertainment,Musical Instruments,41,1245
Hardware,Tool Accessories,2,456
Home & Garden,Bathroom Accessories,36,241
Food,Drinks,54,1247
Home & Garden,Lawn & Garden,29,5462
Office Supplies,Presentation Supplies,22,577
Hardware,Blocks,53,548
Baby & Toddler,Diapering,19,1247
We open the script editor in a new QlikView document using Control+E. The following
code creates the required tables as inline data. After creating this script press control+R to reload the data into the QlikView document.
Let us create a Table Box sheet object to show the data generated by the Aggregate function. Go to the menu Layout → New Sheet Object → Table Box. The following window appears in which we mention the Title of the table and the select the required fields to be displayed. Clicking OK displays the data from the CSV file in the QlikView Table
Box as shown below.
Given below is the load script to find the sum of the sales quantity and sales value across the Product Lines and product categories.
Click OK and press Control+R to reload the data into QlikView document. Now follow the same steps as given above in − Creating Sheet Objects to create a QlikView Table Box for displaying the result of the script as shown below.
Given below is the load script to create the average of the sales quantity and sales value across each Product Line.
# Average sales of Quantity and value in each Product Line.
LOAD Product_Line,
avg(Quantity),
avg(Value)
FROM
[E:\Qlikview\data\product_sales.csv]
(txt, codepage is 1252, embedded labels, delimiter is ',', msq)
Group by Product_Line;
Click OK and press Control+R to reload the data into QlikView document. Now follow the same steps as given above in − Creating Sheet Objects to create a QlikView Table Box for displaying the result of the script as shown below.
Given below is the load script to create the maximum and minimum of the sales quantity across each Product Line.
# Maximum and Minimum sales in each product Line.
LOAD Product_Line,
max(Quantity) as MaxQuantity,
min(Quantity) as MinQuantity
FROM
[E:\Qlikview\data\product_sales.csv]
(txt, codepage is 1252, embedded labels, delimiter is ',', msq)
Group by Product_Line;
Click OK and Control+R to reload the data into QlikView document. Now follow the same steps as above in − Creating Sheet Objects to create a QlikView Table Box for displaying the result of the script as shown below.
70 Lectures
5 hours
Arthur Fong
Print
Add Notes
Bookmark this page | [
{
"code": null,
"e": 3232,
"s": 2920,
"text": "QlikView Aggregate functions are used to produce aggregate data from the rows of the table. The functions are applied to the columns when creating the load script. Given below is a sample list of Aggregate functions. We also need to apply the Group by clause appropriately when applying the aggregate functions."
},
{
"code": null,
"e": 3287,
"s": 3232,
"text": "SUM gives the sum of the numeric values of the column."
},
{
"code": null,
"e": 3346,
"s": 3287,
"text": "AVG gives the average of the numeric values of the column."
},
{
"code": null,
"e": 3405,
"s": 3346,
"text": "MAX gives the maximum of the numeric values of the column."
},
{
"code": null,
"e": 3464,
"s": 3405,
"text": "MIN gives the minimum of the numeric values of the column."
},
{
"code": null,
"e": 3634,
"s": 3464,
"text": "Consider the following data stored as product_sales.csv in the local system. It represents the sales figures for different product lines and product category in a store."
},
{
"code": null,
"e": 4290,
"s": 3634,
"text": "Product_Line,Product_category,Quantity,Value\nSporting Goods,Outdoor Recreation,12,5642\nFood, Beverages & Tobacco,38,2514\nApparel & Accessories,Clothing,54,2365\nApparel & Accessories,Costumes & Accessories,29,4487\nSporting Goods,Athletics,11,812\nHealth & Beauty,Personal Care,21,6912\nArts & Entertainment,Hobbies & Creative Arts,58,5201\nArts & Entertainment,Paintings,73,8451\nArts & Entertainment,Musical Instruments,41,1245\nHardware,Tool Accessories,2,456\nHome & Garden,Bathroom Accessories,36,241\nFood,Drinks,54,1247\nHome & Garden,Lawn & Garden,29,5462\nOffice Supplies,Presentation Supplies,22,577\nHardware,Blocks,53,548\nBaby & Toddler,Diapering,19,1247\n"
},
{
"code": null,
"e": 4513,
"s": 4290,
"text": "We open the script editor in a new QlikView document using Control+E. The following\ncode creates the required tables as inline data. After creating this script press control+R to reload the data into the QlikView document."
},
{
"code": null,
"e": 4874,
"s": 4513,
"text": "Let us create a Table Box sheet object to show the data generated by the Aggregate function. Go to the menu Layout → New Sheet Object → Table Box. The following window appears in which we mention the Title of the table and the select the required fields to be displayed. Clicking OK displays the data from the CSV file in the QlikView Table\nBox as shown below."
},
{
"code": null,
"e": 5008,
"s": 4874,
"text": "Given below is the load script to find the sum of the sales quantity and sales value across the Product Lines and product categories."
},
{
"code": null,
"e": 5236,
"s": 5008,
"text": "Click OK and press Control+R to reload the data into QlikView document. Now follow the same steps as given above in − Creating Sheet Objects to create a QlikView Table Box for displaying the result of the script as shown below."
},
{
"code": null,
"e": 5353,
"s": 5236,
"text": "Given below is the load script to create the average of the sales quantity and sales value across each Product Line."
},
{
"code": null,
"e": 5595,
"s": 5353,
"text": "# Average sales of Quantity and value in each Product Line.\nLOAD Product_Line, \n avg(Quantity),\n\t avg(Value)\nFROM\n[E:\\Qlikview\\data\\product_sales.csv]\n(txt, codepage is 1252, embedded labels, delimiter is ',', msq)\nGroup by Product_Line;"
},
{
"code": null,
"e": 5823,
"s": 5595,
"text": "Click OK and press Control+R to reload the data into QlikView document. Now follow the same steps as given above in − Creating Sheet Objects to create a QlikView Table Box for displaying the result of the script as shown below."
},
{
"code": null,
"e": 5936,
"s": 5823,
"text": "Given below is the load script to create the maximum and minimum of the sales quantity across each Product Line."
},
{
"code": null,
"e": 6203,
"s": 5936,
"text": "# Maximum and Minimum sales in each product Line.\nLOAD Product_Line,\n max(Quantity) as MaxQuantity,\n min(Quantity) as MinQuantity\nFROM\n[E:\\Qlikview\\data\\product_sales.csv]\n(txt, codepage is 1252, embedded labels, delimiter is ',', msq)\nGroup by Product_Line;"
},
{
"code": null,
"e": 6419,
"s": 6203,
"text": "Click OK and Control+R to reload the data into QlikView document. Now follow the same steps as above in − Creating Sheet Objects to create a QlikView Table Box for displaying the result of the script as shown below."
},
{
"code": null,
"e": 6452,
"s": 6419,
"text": "\n 70 Lectures \n 5 hours \n"
},
{
"code": null,
"e": 6465,
"s": 6452,
"text": " Arthur Fong"
},
{
"code": null,
"e": 6472,
"s": 6465,
"text": " Print"
},
{
"code": null,
"e": 6483,
"s": 6472,
"text": " Add Notes"
}
] |
HTML DOM Input DatetimeLocal step Property | The HTML DOM Input DatetimeLocal step property determines the legal intervals for seconds or milliseconds.
Following is the syntax −
Returning number value
inputDatetimeLocalObject.step
Setting step attribute to a number value
inputDatetimeLocalObject.step = number
Parameter number values −
Let us see an example of Input DatetimeLocal step property −
Live Demo
<!DOCTYPE html>
<html>
<head>
<title>Input DatetimeLocal step</title>
<style>
form {
width:70%;
margin: 0 auto;
text-align: center;
}
* {
padding: 2px;
margin:5px;
}
input[type="button"] {
border-radius: 10px;
}
</style>
</head>
<body>
<form>
<fieldset>
<legend>Datetime-Local-step</legend>
<label for="datetimeLocalSelect">Local Date Time :
<input type="datetime-local" id="datetimeLocalSelect" step="2">
</label>
<input type="button" onclick="changeStep('10')" value="Step Seconds to 10">
<input type="button" onclick="changeStep('.10')" value="Step milliseconds to .10">
<div id="divDisplay"></div>
</fieldset>
</form>
<script>
var divDisplay = document.getElementById("divDisplay");
var inputDatetimeLocal = document.getElementById("datetimeLocalSelect");
function changeStep(myStep) {
inputDatetimeLocal.step = myStep;
if(inputDatetimeLocal.step.indexOf('.') === -1)
divDisplay.textContent = 'Seconds step: '+inputDatetimeLocal.step;
else
divDisplay.textContent = 'Milli-seconds step: '+inputDatetimeLocal.step;
}
</script>
</body>
</html>
This will produce the following output −
Clicking “Step Seconds to 10” button
Clicking “Step millseconds to .10” button − | [
{
"code": null,
"e": 1169,
"s": 1062,
"text": "The HTML DOM Input DatetimeLocal step property determines the legal intervals for seconds or milliseconds."
},
{
"code": null,
"e": 1195,
"s": 1169,
"text": "Following is the syntax −"
},
{
"code": null,
"e": 1218,
"s": 1195,
"text": "Returning number value"
},
{
"code": null,
"e": 1248,
"s": 1218,
"text": "inputDatetimeLocalObject.step"
},
{
"code": null,
"e": 1289,
"s": 1248,
"text": "Setting step attribute to a number value"
},
{
"code": null,
"e": 1328,
"s": 1289,
"text": "inputDatetimeLocalObject.step = number"
},
{
"code": null,
"e": 1354,
"s": 1328,
"text": "Parameter number values −"
},
{
"code": null,
"e": 1415,
"s": 1354,
"text": "Let us see an example of Input DatetimeLocal step property −"
},
{
"code": null,
"e": 1426,
"s": 1415,
"text": " Live Demo"
},
{
"code": null,
"e": 2573,
"s": 1426,
"text": "<!DOCTYPE html>\n<html>\n<head>\n<title>Input DatetimeLocal step</title>\n<style>\n form {\n width:70%;\n margin: 0 auto;\n text-align: center;\n }\n * {\n padding: 2px;\n margin:5px;\n }\n input[type=\"button\"] {\n border-radius: 10px;\n }\n</style>\n</head>\n<body>\n<form>\n<fieldset>\n<legend>Datetime-Local-step</legend>\n<label for=\"datetimeLocalSelect\">Local Date Time :\n<input type=\"datetime-local\" id=\"datetimeLocalSelect\" step=\"2\">\n</label>\n<input type=\"button\" onclick=\"changeStep('10')\" value=\"Step Seconds to 10\">\n<input type=\"button\" onclick=\"changeStep('.10')\" value=\"Step milliseconds to .10\">\n<div id=\"divDisplay\"></div>\n</fieldset>\n</form>\n<script>\n var divDisplay = document.getElementById(\"divDisplay\");\n var inputDatetimeLocal = document.getElementById(\"datetimeLocalSelect\");\n function changeStep(myStep) {\n inputDatetimeLocal.step = myStep;\n if(inputDatetimeLocal.step.indexOf('.') === -1)\n divDisplay.textContent = 'Seconds step: '+inputDatetimeLocal.step;\n else\n divDisplay.textContent = 'Milli-seconds step: '+inputDatetimeLocal.step;\n }\n</script>\n</body>\n</html>"
},
{
"code": null,
"e": 2614,
"s": 2573,
"text": "This will produce the following output −"
},
{
"code": null,
"e": 2651,
"s": 2614,
"text": "Clicking “Step Seconds to 10” button"
},
{
"code": null,
"e": 2695,
"s": 2651,
"text": "Clicking “Step millseconds to .10” button −"
}
] |
How to list all collections in the Mongo shell? | To list all collections in Mongo shell, you can use the function getCollectionNames().
The syntax is as follows −
db.getCollectionNames();
You can use another command which is collections. The syntax is as follows −
show collections;
To list all collections in Mongo, use the above two functions. The query is as follows −
> db.getCollectionNames();
The following is the output −
[
"ConvertStringToDateDemo",
"IdUpdateDemo",
"ProductsInformation",
"addFieldDemo",
"addNewFieldToEveryDocument",
"arrayInnerElementsDemo",
"arrayLengthGreaterThanOne",
"arrayOfArraysDemo",
"caseInsensitiveDemo",
"changeDataType",
"changeType",
"charactersAllowedDemo",
"charactersDemo",
"checkFieldContainsStringDemo",
"checkSequenceDemo",
"combinationOfArrayDemo",
"conditionalSumDemo",
"convertStringToNumberDemo",
"copyThisCollectionToSampleDatabaseDemo",
"countDemo",
"createSequenceDemo",
"distinctCountValuesDemo",
"distinctRecordDemo",
"distinctWithMultipleKeysDemo",
"employeeInformation",
"filterArray",
"findAllDuplicateKeyDocumentDemo",
"findByMultipleArrayDemo",
"findDuplicateByKeyDemo",
"findDuplicateRecordsDemo",
"findSpecificValue",
"findValueInArrayWithMultipleCriteriaDemo",
"getLastNRecordsDemo",
"getParticularElementFromArrayDemo",
"getPartuclarElement",
"getSizeOfArray",
"groupByDateDemo",
"incrementValueInNestedArrayDemo",
"insertIfNotExistsDemo",
"nestedArrayDemo",
"notLikeOpeartorDemo",
"regExpOnIntegerDemo",
"removeArrayDemo",
"removeArrayElement",
"removeArrayElements",
"removeDuplicateDocumentDemo",
"removeElementFromDoublyNestedArrayDemo",
"removeFieldCompletlyDemo",
"removeObject",
"renameFieldDemo",
"reverseRegexDemo",
"searchArrayDemo",
"selectSingleFieldDemo",
"singleFieldDemo",
"sortDemo",
"sortInnerArrayDemo",
"sourceCollection",
"stringFieldLengthDemo",
"test.js",
"uniqueIndexOnArrayDemo",
"unwindOperatorDemo",
"updateExactField",
"updateIdDemo",
"updateObjects"
]
Here is the second query to display all collections from Mongo. The query is as follows −
> show collections;
The following is the output −
ConvertStringToDateDemo
IdUpdateDemo
ProductsInformation
addFieldDemo
addNewFieldToEveryDocument
arrayInnerElementsDemo
arrayLengthGreaterThanOne
arrayOfArraysDemo
caseInsensitiveDemo
changeDataType
changeType
charactersAllowedDemo
charactersDemo
checkFieldContainsStringDemo
checkSequenceDemo
combinationOfArrayDemo
conditionalSumDemo
convertStringToNumberDemo
copyThisCollectionToSampleDatabaseDemo
countDemo
createSequenceDemo
distinctCountValuesDemo
distinctRecordDemo
distinctWithMultipleKeysDemo
employeeInformation
filterArray
findAllDuplicateKeyDocumentDemo
findByMultipleArrayDemo
findDuplicateByKeyDemo
findDuplicateRecordsDemo
findSpecificValue
findValueInArrayWithMultipleCriteriaDemo
getLastNRecordsDemo
getParticularElementFromArrayDemo
getPartuclarElement
getSizeOfArray
groupByDateDemo
incrementValueInNestedArrayDemo
insertIfNotExistsDemo
nestedArrayDemo
notLikeOpeartorDemo
regExpOnIntegerDemo
removeArrayDemo
removeArrayElement
removeArrayElements
removeDuplicateDocumentDemo
removeElementFromDoublyNestedArrayDemo
removeFieldCompletlyDemo
removeObject
renameFieldDemo
reverseRegexDemo
searchArrayDemo
selectSingleFieldDemo
singleFieldDemo
sortDemo
sortInnerArrayDemo
sourceCollection
stringFieldLengthDemo
test.js
uniqueIndexOnArrayDemo
unwindOperatorDemo
updateExactField
updateIdDemo
updateObjects | [
{
"code": null,
"e": 1149,
"s": 1062,
"text": "To list all collections in Mongo shell, you can use the function getCollectionNames()."
},
{
"code": null,
"e": 1176,
"s": 1149,
"text": "The syntax is as follows −"
},
{
"code": null,
"e": 1201,
"s": 1176,
"text": "db.getCollectionNames();"
},
{
"code": null,
"e": 1278,
"s": 1201,
"text": "You can use another command which is collections. The syntax is as follows −"
},
{
"code": null,
"e": 1296,
"s": 1278,
"text": "show collections;"
},
{
"code": null,
"e": 1385,
"s": 1296,
"text": "To list all collections in Mongo, use the above two functions. The query is as follows −"
},
{
"code": null,
"e": 1412,
"s": 1385,
"text": "> db.getCollectionNames();"
},
{
"code": null,
"e": 1442,
"s": 1412,
"text": "The following is the output −"
},
{
"code": null,
"e": 3149,
"s": 1442,
"text": "[\n \"ConvertStringToDateDemo\",\n \"IdUpdateDemo\",\n \"ProductsInformation\",\n \"addFieldDemo\",\n \"addNewFieldToEveryDocument\",\n \"arrayInnerElementsDemo\",\n \"arrayLengthGreaterThanOne\",\n \"arrayOfArraysDemo\",\n \"caseInsensitiveDemo\",\n \"changeDataType\",\n \"changeType\",\n \"charactersAllowedDemo\",\n \"charactersDemo\",\n \"checkFieldContainsStringDemo\",\n \"checkSequenceDemo\",\n \"combinationOfArrayDemo\",\n \"conditionalSumDemo\",\n \"convertStringToNumberDemo\",\n \"copyThisCollectionToSampleDatabaseDemo\",\n \"countDemo\",\n \"createSequenceDemo\",\n \"distinctCountValuesDemo\",\n \"distinctRecordDemo\",\n \"distinctWithMultipleKeysDemo\",\n \"employeeInformation\",\n \"filterArray\",\n \"findAllDuplicateKeyDocumentDemo\",\n \"findByMultipleArrayDemo\",\n \"findDuplicateByKeyDemo\",\n \"findDuplicateRecordsDemo\",\n \"findSpecificValue\",\n \"findValueInArrayWithMultipleCriteriaDemo\",\n \"getLastNRecordsDemo\",\n \"getParticularElementFromArrayDemo\",\n \"getPartuclarElement\",\n \"getSizeOfArray\",\n \"groupByDateDemo\",\n \"incrementValueInNestedArrayDemo\",\n \"insertIfNotExistsDemo\",\n \"nestedArrayDemo\",\n \"notLikeOpeartorDemo\",\n \"regExpOnIntegerDemo\",\n \"removeArrayDemo\",\n \"removeArrayElement\",\n \"removeArrayElements\",\n \"removeDuplicateDocumentDemo\",\n \"removeElementFromDoublyNestedArrayDemo\",\n \"removeFieldCompletlyDemo\",\n \"removeObject\",\n \"renameFieldDemo\",\n \"reverseRegexDemo\",\n \"searchArrayDemo\",\n \"selectSingleFieldDemo\",\n \"singleFieldDemo\",\n \"sortDemo\",\n \"sortInnerArrayDemo\",\n \"sourceCollection\",\n \"stringFieldLengthDemo\",\n \"test.js\",\n \"uniqueIndexOnArrayDemo\",\n \"unwindOperatorDemo\",\n \"updateExactField\",\n \"updateIdDemo\",\n \"updateObjects\"\n]"
},
{
"code": null,
"e": 3239,
"s": 3149,
"text": "Here is the second query to display all collections from Mongo. The query is as follows −"
},
{
"code": null,
"e": 3259,
"s": 3239,
"text": "> show collections;"
},
{
"code": null,
"e": 3289,
"s": 3259,
"text": "The following is the output −"
},
{
"code": null,
"e": 4609,
"s": 3289,
"text": "ConvertStringToDateDemo\nIdUpdateDemo\nProductsInformation\naddFieldDemo\naddNewFieldToEveryDocument\narrayInnerElementsDemo\narrayLengthGreaterThanOne\narrayOfArraysDemo\ncaseInsensitiveDemo\nchangeDataType\nchangeType\ncharactersAllowedDemo\ncharactersDemo\ncheckFieldContainsStringDemo\ncheckSequenceDemo\ncombinationOfArrayDemo\nconditionalSumDemo\nconvertStringToNumberDemo\ncopyThisCollectionToSampleDatabaseDemo\ncountDemo\ncreateSequenceDemo\ndistinctCountValuesDemo\ndistinctRecordDemo\ndistinctWithMultipleKeysDemo\nemployeeInformation\nfilterArray\nfindAllDuplicateKeyDocumentDemo\nfindByMultipleArrayDemo\nfindDuplicateByKeyDemo\nfindDuplicateRecordsDemo\nfindSpecificValue\nfindValueInArrayWithMultipleCriteriaDemo\ngetLastNRecordsDemo\ngetParticularElementFromArrayDemo\ngetPartuclarElement\ngetSizeOfArray\ngroupByDateDemo\nincrementValueInNestedArrayDemo\ninsertIfNotExistsDemo\nnestedArrayDemo\nnotLikeOpeartorDemo\nregExpOnIntegerDemo\nremoveArrayDemo\nremoveArrayElement\nremoveArrayElements\nremoveDuplicateDocumentDemo\nremoveElementFromDoublyNestedArrayDemo\nremoveFieldCompletlyDemo\nremoveObject\nrenameFieldDemo\nreverseRegexDemo\nsearchArrayDemo\nselectSingleFieldDemo\nsingleFieldDemo\nsortDemo\nsortInnerArrayDemo\nsourceCollection\nstringFieldLengthDemo\ntest.js\nuniqueIndexOnArrayDemo\nunwindOperatorDemo\nupdateExactField\nupdateIdDemo\nupdateObjects"
}
] |
Swift - Properties | Swift 4 language provides properties for class, enumeration or structure to associate values. Properties can be further classified into Stored properties and Computed properties.
Difference between Stored Properties and Computed Properties
Both Stored and Computed properties are associated with instances type. When the properties are associated with its type values then it is defined as 'Type Properties'. Stored and computed properties are usually associated with instances of a particular type. However, properties can also be associated with the type itself. Such properties are known as type properties. Property observers are also used
To observe the value of the stored properties
To observe the property of inherited subclass derived from superclass
Swift 4 introduces Stored Property concept to store the instances of constants and variables. Stored properties of constants are defined by the 'let' keyword and Stored properties of variables are defined by the 'var' keyword.
During definition Stored property provides 'default value'
During Initialization the user can initialize and modify the initial values
struct Number {
var digits: Int
let pi = 3.1415
}
var n = Number(digits: 12345)
n.digits = 67
print("\(n.digits)")
print("\(n.pi)")
When we run the above program using playground, we get the following result −
67
3.1415
Consider the following line in the above code −
let pi = 3.1415
Here, the variable pi is initialized as a stored property value with the instance pi = 3.1415. So, whenever the instance is referred it will hold the value 3.1415 alone.
Another method to have stored property is to have as constant structures. So the whole instance of the structures will be considered as 'Stored Properties of Constants'.
struct Number {
var digits: Int
let numbers = 3.1415
}
var n = Number(digits: 12345)
n.digits = 67
print("\(n.digits)")
print("\(n.numbers)")
n.numbers = 8.7
When we run the above program using playground, we get the following result −
error: cannot assign to 'numbers' in 'n'
n.numbers = 8.7
Instead of reinitializing the 'number' to 8.7 it will return an error message indicating that the 'number' is declared as constant.
Swift 4 provides a flexible property called 'Lazy Stored Property' where it won't calculate the initial values when the variable is initialized for the first time. 'lazy' modifier is used before the variable declaration to have it as a lazy stored property.
Lazy Properties are used −
To delay object creation.
When the property is dependent on other parts of a class, that are not known yet
class sample {
lazy var no = number() // `var` declaration is required.
}
class number {
var name = "Swift 4"
}
var firstsample = sample()
print(firstsample.no.name)
When we run the above program using playground, we get the following result −
Swift 4
In Objective C, Stored properties also have instance variables for back up purposes to store the values declared in stored property.
Swift 4 integrates both these concepts into a single 'stored property' declaration. Instead of having a corresponding instance variable and back up value 'stored property' contains all integrated information defined in a single location about the variables property by variable name, data type and memory management functionalities.
Rather than storing the values computed properties provide a getter and an optional setter to retrieve and set other properties and values indirectly.
class sample {
var no1 = 0.0, no2 = 0.0
var length = 300.0, breadth = 150.0
var middle: (Double, Double) {
get {
return (length / 2, breadth / 2)
}
set(axis){
no1 = axis.0 - (length / 2)
no2 = axis.1 - (breadth / 2)
}
}
}
var result = sample()
print(result.middle)
result.middle = (0.0, 10.0)
print(result.no1)
print(result.no2)
When we run the above program using playground, we get the following result −
(150.0, 75.0)
-150.0
-65.0
When a computed property left the new value as undefined, the default value will be set for that particular variable.
A read-only property in computed property is defined as a property with getter but no setter. It is always used to return a value. The variables are further accessed through a '.' Syntax but cannot be set to another value.
class film {
var head = ""
var duration = 0.0
var metaInfo: [String:String] {
return [
"head": self.head,
"duration":"\(self.duration)"
]
}
}
var movie = film()
movie.head = "Swift 4 Properties"
movie.duration = 3.09
print(movie.metaInfo["head"]!)
print(movie.metaInfo["duration"]!)
When we run the above program using playground, we get the following result −
Swift 4 Properties
3.09
In Swift 4 to observe and respond to property values Property Observers are used. Each and every time when property values are set property observers are called. Except lazy stored properties we can add property observers to 'inherited' property by method 'overriding'.
Property Observers can be defined by either
Before Storing the value - willset
Before Storing the value - willset
After Storing the new value - didset
After Storing the new value - didset
When a property is set in an initializer willset and didset observers cannot be called.
When a property is set in an initializer willset and didset observers cannot be called.
class Samplepgm {
var counter: Int = 0 {
willSet(newTotal){
print("Total Counter is: \(newTotal)")
}
didSet {
if counter > oldValue {
print("Newly Added Counter \(counter - oldValue)")
}
}
}
}
let NewCounter = Samplepgm()
NewCounter.counter = 100
NewCounter.counter = 800
When we run the above program using playground, we get the following result −
Total Counter is: 100
Newly Added Counter 100
Total Counter is: 800
Newly Added Counter 700
Local and global variable are declared for computing and observing the properties.
Properties are defined in the Type definition section with curly braces {} and scope of the variables are also defined previously. For defining type properties for value types 'static' keyword is used and for class types 'class' keyword is used.
struct Structname {
static var storedTypeProperty = " "
static var computedTypeProperty: Int {
// return an Int value here
}
}
enum Enumname {
static var storedTypeProperty = " "
static var computedTypeProperty: Int {
// return an Int value here
}
}
class Classname {
class var computedTypeProperty: Int {
// return an Int value here
}
}
Just like instance properties Type properties are queried and set with '.' Syntax just on the type alone instead of pointing to the instance.
struct StudMarks {
static let markCount = 97
static var totalCount = 0
var InternalMarks: Int = 0 {
didSet {
if InternalMarks > StudMarks.markCount {
InternalMarks = StudMarks.markCount
}
if InternalMarks > StudMarks.totalCount {
StudMarks.totalCount = InternalMarks
}
}
}
}
var stud1Mark1 = StudMarks()
var stud1Mark2 = StudMarks()
stud1Mark1.InternalMarks = 98
print(stud1Mark1.InternalMarks)
stud1Mark2.InternalMarks = 87
print(stud1Mark2.InternalMarks)
When we run the above program using playground, we get the following result −
97
87
38 Lectures
1 hours
Ashish Sharma
13 Lectures
2 hours
Three Millennials
7 Lectures
1 hours
Three Millennials
22 Lectures
1 hours
Frahaan Hussain
12 Lectures
39 mins
Devasena Rajendran
40 Lectures
2.5 hours
Grant Klimaytys
Print
Add Notes
Bookmark this page | [
{
"code": null,
"e": 2432,
"s": 2253,
"text": "Swift 4 language provides properties for class, enumeration or structure to associate values. Properties can be further classified into Stored properties and Computed properties."
},
{
"code": null,
"e": 2493,
"s": 2432,
"text": "Difference between Stored Properties and Computed Properties"
},
{
"code": null,
"e": 2897,
"s": 2493,
"text": "Both Stored and Computed properties are associated with instances type. When the properties are associated with its type values then it is defined as 'Type Properties'. Stored and computed properties are usually associated with instances of a particular type. However, properties can also be associated with the type itself. Such properties are known as type properties. Property observers are also used"
},
{
"code": null,
"e": 2943,
"s": 2897,
"text": "To observe the value of the stored properties"
},
{
"code": null,
"e": 3013,
"s": 2943,
"text": "To observe the property of inherited subclass derived from superclass"
},
{
"code": null,
"e": 3240,
"s": 3013,
"text": "Swift 4 introduces Stored Property concept to store the instances of constants and variables. Stored properties of constants are defined by the 'let' keyword and Stored properties of variables are defined by the 'var' keyword."
},
{
"code": null,
"e": 3299,
"s": 3240,
"text": "During definition Stored property provides 'default value'"
},
{
"code": null,
"e": 3375,
"s": 3299,
"text": "During Initialization the user can initialize and modify the initial values"
},
{
"code": null,
"e": 3515,
"s": 3375,
"text": "struct Number {\n var digits: Int\n let pi = 3.1415\n}\n\nvar n = Number(digits: 12345)\nn.digits = 67\n\nprint(\"\\(n.digits)\")\nprint(\"\\(n.pi)\")"
},
{
"code": null,
"e": 3593,
"s": 3515,
"text": "When we run the above program using playground, we get the following result −"
},
{
"code": null,
"e": 3604,
"s": 3593,
"text": "67\n3.1415\n"
},
{
"code": null,
"e": 3652,
"s": 3604,
"text": "Consider the following line in the above code −"
},
{
"code": null,
"e": 3669,
"s": 3652,
"text": "let pi = 3.1415\n"
},
{
"code": null,
"e": 3839,
"s": 3669,
"text": "Here, the variable pi is initialized as a stored property value with the instance pi = 3.1415. So, whenever the instance is referred it will hold the value 3.1415 alone."
},
{
"code": null,
"e": 4009,
"s": 3839,
"text": "Another method to have stored property is to have as constant structures. So the whole instance of the structures will be considered as 'Stored Properties of Constants'."
},
{
"code": null,
"e": 4175,
"s": 4009,
"text": "struct Number {\n var digits: Int\n let numbers = 3.1415\n}\n\nvar n = Number(digits: 12345)\nn.digits = 67\n\nprint(\"\\(n.digits)\")\nprint(\"\\(n.numbers)\")\nn.numbers = 8.7"
},
{
"code": null,
"e": 4253,
"s": 4175,
"text": "When we run the above program using playground, we get the following result −"
},
{
"code": null,
"e": 4311,
"s": 4253,
"text": "error: cannot assign to 'numbers' in 'n'\nn.numbers = 8.7\n"
},
{
"code": null,
"e": 4443,
"s": 4311,
"text": "Instead of reinitializing the 'number' to 8.7 it will return an error message indicating that the 'number' is declared as constant."
},
{
"code": null,
"e": 4701,
"s": 4443,
"text": "Swift 4 provides a flexible property called 'Lazy Stored Property' where it won't calculate the initial values when the variable is initialized for the first time. 'lazy' modifier is used before the variable declaration to have it as a lazy stored property."
},
{
"code": null,
"e": 4728,
"s": 4701,
"text": "Lazy Properties are used −"
},
{
"code": null,
"e": 4754,
"s": 4728,
"text": "To delay object creation."
},
{
"code": null,
"e": 4835,
"s": 4754,
"text": "When the property is dependent on other parts of a class, that are not known yet"
},
{
"code": null,
"e": 5012,
"s": 4835,
"text": "class sample {\n lazy var no = number() // `var` declaration is required.\n}\n\nclass number {\n var name = \"Swift 4\"\n}\n\nvar firstsample = sample()\nprint(firstsample.no.name)"
},
{
"code": null,
"e": 5090,
"s": 5012,
"text": "When we run the above program using playground, we get the following result −"
},
{
"code": null,
"e": 5099,
"s": 5090,
"text": "Swift 4\n"
},
{
"code": null,
"e": 5232,
"s": 5099,
"text": "In Objective C, Stored properties also have instance variables for back up purposes to store the values declared in stored property."
},
{
"code": null,
"e": 5565,
"s": 5232,
"text": "Swift 4 integrates both these concepts into a single 'stored property' declaration. Instead of having a corresponding instance variable and back up value 'stored property' contains all integrated information defined in a single location about the variables property by variable name, data type and memory management functionalities."
},
{
"code": null,
"e": 5716,
"s": 5565,
"text": "Rather than storing the values computed properties provide a getter and an optional setter to retrieve and set other properties and values indirectly."
},
{
"code": null,
"e": 6118,
"s": 5716,
"text": "class sample {\n var no1 = 0.0, no2 = 0.0\n var length = 300.0, breadth = 150.0\n\n var middle: (Double, Double) {\n get {\n return (length / 2, breadth / 2)\n }\n \n set(axis){\n no1 = axis.0 - (length / 2)\n no2 = axis.1 - (breadth / 2)\n }\n }\n}\n\nvar result = sample()\nprint(result.middle)\nresult.middle = (0.0, 10.0)\n\nprint(result.no1)\nprint(result.no2)"
},
{
"code": null,
"e": 6196,
"s": 6118,
"text": "When we run the above program using playground, we get the following result −"
},
{
"code": null,
"e": 6224,
"s": 6196,
"text": "(150.0, 75.0)\n-150.0\n-65.0\n"
},
{
"code": null,
"e": 6342,
"s": 6224,
"text": "When a computed property left the new value as undefined, the default value will be set for that particular variable."
},
{
"code": null,
"e": 6565,
"s": 6342,
"text": "A read-only property in computed property is defined as a property with getter but no setter. It is always used to return a value. The variables are further accessed through a '.' Syntax but cannot be set to another value."
},
{
"code": null,
"e": 6892,
"s": 6565,
"text": "class film {\n var head = \"\"\n var duration = 0.0\n var metaInfo: [String:String] {\n return [\n \"head\": self.head,\n \"duration\":\"\\(self.duration)\"\n ]\n }\n}\n\nvar movie = film()\nmovie.head = \"Swift 4 Properties\"\nmovie.duration = 3.09\n\nprint(movie.metaInfo[\"head\"]!)\nprint(movie.metaInfo[\"duration\"]!)"
},
{
"code": null,
"e": 6970,
"s": 6892,
"text": "When we run the above program using playground, we get the following result −"
},
{
"code": null,
"e": 6995,
"s": 6970,
"text": "Swift 4 Properties\n3.09\n"
},
{
"code": null,
"e": 7265,
"s": 6995,
"text": "In Swift 4 to observe and respond to property values Property Observers are used. Each and every time when property values are set property observers are called. Except lazy stored properties we can add property observers to 'inherited' property by method 'overriding'."
},
{
"code": null,
"e": 7309,
"s": 7265,
"text": "Property Observers can be defined by either"
},
{
"code": null,
"e": 7344,
"s": 7309,
"text": "Before Storing the value - willset"
},
{
"code": null,
"e": 7379,
"s": 7344,
"text": "Before Storing the value - willset"
},
{
"code": null,
"e": 7416,
"s": 7379,
"text": "After Storing the new value - didset"
},
{
"code": null,
"e": 7453,
"s": 7416,
"text": "After Storing the new value - didset"
},
{
"code": null,
"e": 7541,
"s": 7453,
"text": "When a property is set in an initializer willset and didset observers cannot be called."
},
{
"code": null,
"e": 7629,
"s": 7541,
"text": "When a property is set in an initializer willset and didset observers cannot be called."
},
{
"code": null,
"e": 7978,
"s": 7629,
"text": "class Samplepgm {\n var counter: Int = 0 {\n willSet(newTotal){\n print(\"Total Counter is: \\(newTotal)\")\n }\n \n didSet {\n if counter > oldValue {\n print(\"Newly Added Counter \\(counter - oldValue)\")\n }\n }\n }\n}\n\nlet NewCounter = Samplepgm()\nNewCounter.counter = 100\nNewCounter.counter = 800"
},
{
"code": null,
"e": 8056,
"s": 7978,
"text": "When we run the above program using playground, we get the following result −"
},
{
"code": null,
"e": 8149,
"s": 8056,
"text": "Total Counter is: 100\nNewly Added Counter 100\nTotal Counter is: 800\nNewly Added Counter 700\n"
},
{
"code": null,
"e": 8232,
"s": 8149,
"text": "Local and global variable are declared for computing and observing the properties."
},
{
"code": null,
"e": 8478,
"s": 8232,
"text": "Properties are defined in the Type definition section with curly braces {} and scope of the variables are also defined previously. For defining type properties for value types 'static' keyword is used and for class types 'class' keyword is used."
},
{
"code": null,
"e": 8861,
"s": 8478,
"text": "struct Structname {\n static var storedTypeProperty = \" \"\n static var computedTypeProperty: Int {\n // return an Int value here\n }\n}\n\nenum Enumname {\n static var storedTypeProperty = \" \"\n static var computedTypeProperty: Int {\n // return an Int value here\n }\n}\n\nclass Classname {\n class var computedTypeProperty: Int {\n // return an Int value here\n }\n}\n"
},
{
"code": null,
"e": 9003,
"s": 8861,
"text": "Just like instance properties Type properties are queried and set with '.' Syntax just on the type alone instead of pointing to the instance."
},
{
"code": null,
"e": 9551,
"s": 9003,
"text": "struct StudMarks {\n static let markCount = 97\n static var totalCount = 0\n \n var InternalMarks: Int = 0 {\n didSet {\n if InternalMarks > StudMarks.markCount {\n InternalMarks = StudMarks.markCount\n }\n if InternalMarks > StudMarks.totalCount {\n StudMarks.totalCount = InternalMarks\n }\n }\n }\n}\n\nvar stud1Mark1 = StudMarks()\nvar stud1Mark2 = StudMarks()\n\nstud1Mark1.InternalMarks = 98\nprint(stud1Mark1.InternalMarks)\n\nstud1Mark2.InternalMarks = 87\nprint(stud1Mark2.InternalMarks)"
},
{
"code": null,
"e": 9629,
"s": 9551,
"text": "When we run the above program using playground, we get the following result −"
},
{
"code": null,
"e": 9636,
"s": 9629,
"text": "97\n87\n"
},
{
"code": null,
"e": 9669,
"s": 9636,
"text": "\n 38 Lectures \n 1 hours \n"
},
{
"code": null,
"e": 9684,
"s": 9669,
"text": " Ashish Sharma"
},
{
"code": null,
"e": 9717,
"s": 9684,
"text": "\n 13 Lectures \n 2 hours \n"
},
{
"code": null,
"e": 9736,
"s": 9717,
"text": " Three Millennials"
},
{
"code": null,
"e": 9768,
"s": 9736,
"text": "\n 7 Lectures \n 1 hours \n"
},
{
"code": null,
"e": 9787,
"s": 9768,
"text": " Three Millennials"
},
{
"code": null,
"e": 9820,
"s": 9787,
"text": "\n 22 Lectures \n 1 hours \n"
},
{
"code": null,
"e": 9837,
"s": 9820,
"text": " Frahaan Hussain"
},
{
"code": null,
"e": 9869,
"s": 9837,
"text": "\n 12 Lectures \n 39 mins\n"
},
{
"code": null,
"e": 9889,
"s": 9869,
"text": " Devasena Rajendran"
},
{
"code": null,
"e": 9924,
"s": 9889,
"text": "\n 40 Lectures \n 2.5 hours \n"
},
{
"code": null,
"e": 9941,
"s": 9924,
"text": " Grant Klimaytys"
},
{
"code": null,
"e": 9948,
"s": 9941,
"text": " Print"
},
{
"code": null,
"e": 9959,
"s": 9948,
"text": " Add Notes"
}
] |
Private and final methods in Java Programming | In Java private methods are the methods having private access modifier and are restricted to be access in the defining class only and are not visible in their child class due to which are not eligible for overridden. However, we can define a method with the same name in the child class and could access in parent class.
Like private methods final methods in Java are the methods having final non-access modifier instead of private and are again restricted to be accessed in the defining class only and are not visible in their child class due to which are not eligible for overridden. The only difference between private and final methods is that in case of final methods we even can't define a method with the same name in child class while in case of private methods we could define.
In Java as both private and final methods do not allow the overridden functionality so no use of using both modifiers together with same method.
Live Demo
public class PrivateFinalMethods {
private void print() {
System.out.println("in parent print");
}
public static void main(String[] args) {
PrivateFinalMethods obj = new PrivateFinalMethodsChild();
obj.print();
PrivateFinalMethodsChild obj1 = new PrivateFinalMethodsChild();
obj1.print();
}
}
class PrivateFinalMethodsChild extends PrivateFinalMethods {
public void print(){
System.out.println("in child print method");
}
}
in parent print
in child print method | [
{
"code": null,
"e": 1383,
"s": 1062,
"text": "In Java private methods are the methods having private access modifier and are restricted to be access in the defining class only and are not visible in their child class due to which are not eligible for overridden. However, we can define a method with the same name in the child class and could access in parent class."
},
{
"code": null,
"e": 1849,
"s": 1383,
"text": "Like private methods final methods in Java are the methods having final non-access modifier instead of private and are again restricted to be accessed in the defining class only and are not visible in their child class due to which are not eligible for overridden. The only difference between private and final methods is that in case of final methods we even can't define a method with the same name in child class while in case of private methods we could define."
},
{
"code": null,
"e": 1994,
"s": 1849,
"text": "In Java as both private and final methods do not allow the overridden functionality so no use of using both modifiers together with same method."
},
{
"code": null,
"e": 2005,
"s": 1994,
"text": " Live Demo"
},
{
"code": null,
"e": 2484,
"s": 2005,
"text": "public class PrivateFinalMethods {\n private void print() {\n System.out.println(\"in parent print\");\n }\n public static void main(String[] args) {\n\n PrivateFinalMethods obj = new PrivateFinalMethodsChild();\n obj.print();\n PrivateFinalMethodsChild obj1 = new PrivateFinalMethodsChild();\n obj1.print();\n }\n}\nclass PrivateFinalMethodsChild extends PrivateFinalMethods {\n public void print(){\n System.out.println(\"in child print method\");\n }\n}"
},
{
"code": null,
"e": 2522,
"s": 2484,
"text": "in parent print\nin child print method"
}
] |
How to read a text file in Selenium with python? | We can read a text file in Selenium with python by first creating a txt file and having a content on it.
First of all, we need to open the file and mention the path of the location of the text file as an argument. There are multiple reading methods to perform these operations.
read() – It reads the entire content of the file.
read() – It reads the entire content of the file.
read(n) – It reads n characters of the text file.
read(n) – It reads n characters of the text file.
readline() – It reads character line by line at a time. If we need to read the first two lines, the readline() method is to be used twice.
readline() – It reads character line by line at a time. If we need to read the first two lines, the readline() method is to be used twice.
readlines() – it reads line by line and stores them in a list.
readlines() – it reads line by line and stores them in a list.
Code Implementation with read()
#open the file for read operation
f = open('pythontext.txt')
#reads the entire file content and prints in console
print(f.read())
#close the file
f.close()
Code Implementation with read(n)
#open the file for read operation
f = open('pythontext.txt')
#reads 4 characters as passed as parameter and prints in console
print(f.read(4))
#close the file
f.close()
Code Implementation with readline()
#open the file for read operation
f = open('pythontext.txt')
# reads line by line
l = f.readline()
while l!= "":
print(l)
l = f.readline()
#close the file
f.close()
Code Implementation with readlines()
#open the file for read operation
f = open('pythontext.txt')
# reads line by line and stores them in list
for l in f.readlines():
print(l)
#close the file
f.close() | [
{
"code": null,
"e": 1167,
"s": 1062,
"text": "We can read a text file in Selenium with python by first creating a txt file and having a content on it."
},
{
"code": null,
"e": 1340,
"s": 1167,
"text": "First of all, we need to open the file and mention the path of the location of the text file as an argument. There are multiple reading methods to perform these operations."
},
{
"code": null,
"e": 1390,
"s": 1340,
"text": "read() – It reads the entire content of the file."
},
{
"code": null,
"e": 1440,
"s": 1390,
"text": "read() – It reads the entire content of the file."
},
{
"code": null,
"e": 1490,
"s": 1440,
"text": "read(n) – It reads n characters of the text file."
},
{
"code": null,
"e": 1540,
"s": 1490,
"text": "read(n) – It reads n characters of the text file."
},
{
"code": null,
"e": 1679,
"s": 1540,
"text": "readline() – It reads character line by line at a time. If we need to read the first two lines, the readline() method is to be used twice."
},
{
"code": null,
"e": 1818,
"s": 1679,
"text": "readline() – It reads character line by line at a time. If we need to read the first two lines, the readline() method is to be used twice."
},
{
"code": null,
"e": 1881,
"s": 1818,
"text": "readlines() – it reads line by line and stores them in a list."
},
{
"code": null,
"e": 1944,
"s": 1881,
"text": "readlines() – it reads line by line and stores them in a list."
},
{
"code": null,
"e": 1976,
"s": 1944,
"text": "Code Implementation with read()"
},
{
"code": null,
"e": 2132,
"s": 1976,
"text": "#open the file for read operation\nf = open('pythontext.txt')\n#reads the entire file content and prints in console\nprint(f.read())\n#close the file\nf.close()"
},
{
"code": null,
"e": 2165,
"s": 2132,
"text": "Code Implementation with read(n)"
},
{
"code": null,
"e": 2334,
"s": 2165,
"text": "#open the file for read operation\nf = open('pythontext.txt')\n#reads 4 characters as passed as parameter and prints in console\nprint(f.read(4))\n#close the file\nf.close()"
},
{
"code": null,
"e": 2370,
"s": 2334,
"text": "Code Implementation with readline()"
},
{
"code": null,
"e": 2535,
"s": 2370,
"text": "#open the file for read operation\nf = open('pythontext.txt')\n# reads line by line\nl = f.readline()\nwhile l!= \"\":\nprint(l)\nl = f.readline()\n#close the file\nf.close()"
},
{
"code": null,
"e": 2572,
"s": 2535,
"text": "Code Implementation with readlines()"
},
{
"code": null,
"e": 2737,
"s": 2572,
"text": "#open the file for read operation\nf = open('pythontext.txt')\n# reads line by line and stores them in list\nfor l in f.readlines():\nprint(l)\n#close the file\nf.close()"
}
] |
How to Use int as long long int for Competitive Programming? - GeeksforGeeks | 28 Dec, 2021
Most of the time, input constraints in Competitive programming questions are bigger than the limits of int. Therefore, there is a need to use long int or even long long int. Here let us take two cases so that if a naive user writes correct logic still input is not getting accepted can get to know where the rectification is required.
Case 1: Big integer input without redefining int as long long intCase 2: Big integer input with redefining int as long long int
Case 1: Big integer input without redefining int as long long int
Example:
C++
// C++ program to demonstrate Overflow in Implicit Conversion itself // Importing input output libraries#include <iostream> using namespace std; // Main driver methodint main(){ // 10 raised to the power of 10 int x = 1e10; // Printing the number cout << x << endl; // As return type of main was integer type return 0;}
Output:
prog.cpp: In function ‘int main()’:
prog.cpp:5:10: warning: overflow in implicit constant conversion [-overflow]
int x = 1e10;
^
Output Explanation:
It is because the range of numbers integer data type can hold is 4 bytes meaning it can hold integer numbers ranging from -2,147,483,647 to 2,147,483,647. Here in our case, the output exceeds the maximum integer a variable can hold so do throw a warning of implicit constant conversion. So we do have to use a long datatype as it can hold 8 bytes. In order to rectify the same, we need to redefine int. However, the program will still throw an error as the datatype of the main also changes. So defining them to int so that our speed in contests can increase. (ie) #define int long long int.
Case 2: Big integer input with redefining int as long long int
Example:
C++
// C++ program to demonstrate longlongint approach // Including all basic libraries#include <bits/stdc++.h>using namespace std; // Main driver method with int32_t return typeint32_t main(){ // Calculating size of Integer data type // using sizeof() method cout << "size of int = " << sizeof(int) << '\n'; // Defining int as long long int#define int long long int // Calculating new size of Integer data type // again using standard sizeof() method cout << "new size of int = " << sizeof(int) << '\n'; // Big custom input integer int x = 1e18; // Print and display this big integer value cout << "value of x = " << x << endl; return 0;}
size of int = 4
new size of int = 8
value of x = 1000000000000000000
Note: This is generally used in competitive programming problems as it will accept all input sizes.
sagar0719kumar
sumitgumber28
rkbhola5
C++
Competitive Programming
CPP
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here.
Operator Overloading in C++
Polymorphism in C++
Friend class and function in C++
Sorting a vector in C++
std::string class in C++
Competitive Programming - A Complete Guide
Practice for cracking any coding interview
Arrow operator -> in C/C++ with Examples
Prefix Sum Array - Implementation and Applications in Competitive Programming
Top 10 Algorithms and Data Structures for Competitive Programming | [
{
"code": null,
"e": 25367,
"s": 25339,
"text": "\n28 Dec, 2021"
},
{
"code": null,
"e": 25702,
"s": 25367,
"text": "Most of the time, input constraints in Competitive programming questions are bigger than the limits of int. Therefore, there is a need to use long int or even long long int. Here let us take two cases so that if a naive user writes correct logic still input is not getting accepted can get to know where the rectification is required."
},
{
"code": null,
"e": 25830,
"s": 25702,
"text": "Case 1: Big integer input without redefining int as long long intCase 2: Big integer input with redefining int as long long int"
},
{
"code": null,
"e": 25896,
"s": 25830,
"text": "Case 1: Big integer input without redefining int as long long int"
},
{
"code": null,
"e": 25906,
"s": 25896,
"text": "Example: "
},
{
"code": null,
"e": 25910,
"s": 25906,
"text": "C++"
},
{
"code": "// C++ program to demonstrate Overflow in Implicit Conversion itself // Importing input output libraries#include <iostream> using namespace std; // Main driver methodint main(){ // 10 raised to the power of 10 int x = 1e10; // Printing the number cout << x << endl; // As return type of main was integer type return 0;}",
"e": 26250,
"s": 25910,
"text": null
},
{
"code": null,
"e": 26259,
"s": 26250,
"text": "Output: "
},
{
"code": null,
"e": 26399,
"s": 26259,
"text": "prog.cpp: In function ‘int main()’:\nprog.cpp:5:10: warning: overflow in implicit constant conversion [-overflow]\n int x = 1e10; \n ^"
},
{
"code": null,
"e": 26419,
"s": 26399,
"text": "Output Explanation:"
},
{
"code": null,
"e": 27011,
"s": 26419,
"text": "It is because the range of numbers integer data type can hold is 4 bytes meaning it can hold integer numbers ranging from -2,147,483,647 to 2,147,483,647. Here in our case, the output exceeds the maximum integer a variable can hold so do throw a warning of implicit constant conversion. So we do have to use a long datatype as it can hold 8 bytes. In order to rectify the same, we need to redefine int. However, the program will still throw an error as the datatype of the main also changes. So defining them to int so that our speed in contests can increase. (ie) #define int long long int."
},
{
"code": null,
"e": 27075,
"s": 27011,
"text": "Case 2: Big integer input with redefining int as long long int "
},
{
"code": null,
"e": 27084,
"s": 27075,
"text": "Example:"
},
{
"code": null,
"e": 27088,
"s": 27084,
"text": "C++"
},
{
"code": "// C++ program to demonstrate longlongint approach // Including all basic libraries#include <bits/stdc++.h>using namespace std; // Main driver method with int32_t return typeint32_t main(){ // Calculating size of Integer data type // using sizeof() method cout << \"size of int = \" << sizeof(int) << '\\n'; // Defining int as long long int#define int long long int // Calculating new size of Integer data type // again using standard sizeof() method cout << \"new size of int = \" << sizeof(int) << '\\n'; // Big custom input integer int x = 1e18; // Print and display this big integer value cout << \"value of x = \" << x << endl; return 0;}",
"e": 27761,
"s": 27088,
"text": null
},
{
"code": null,
"e": 27830,
"s": 27761,
"text": "size of int = 4\nnew size of int = 8\nvalue of x = 1000000000000000000"
},
{
"code": null,
"e": 27930,
"s": 27830,
"text": "Note: This is generally used in competitive programming problems as it will accept all input sizes."
},
{
"code": null,
"e": 27945,
"s": 27930,
"text": "sagar0719kumar"
},
{
"code": null,
"e": 27959,
"s": 27945,
"text": "sumitgumber28"
},
{
"code": null,
"e": 27968,
"s": 27959,
"text": "rkbhola5"
},
{
"code": null,
"e": 27972,
"s": 27968,
"text": "C++"
},
{
"code": null,
"e": 27996,
"s": 27972,
"text": "Competitive Programming"
},
{
"code": null,
"e": 28000,
"s": 27996,
"text": "CPP"
},
{
"code": null,
"e": 28098,
"s": 28000,
"text": "Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here."
},
{
"code": null,
"e": 28126,
"s": 28098,
"text": "Operator Overloading in C++"
},
{
"code": null,
"e": 28146,
"s": 28126,
"text": "Polymorphism in C++"
},
{
"code": null,
"e": 28179,
"s": 28146,
"text": "Friend class and function in C++"
},
{
"code": null,
"e": 28203,
"s": 28179,
"text": "Sorting a vector in C++"
},
{
"code": null,
"e": 28228,
"s": 28203,
"text": "std::string class in C++"
},
{
"code": null,
"e": 28271,
"s": 28228,
"text": "Competitive Programming - A Complete Guide"
},
{
"code": null,
"e": 28314,
"s": 28271,
"text": "Practice for cracking any coding interview"
},
{
"code": null,
"e": 28355,
"s": 28314,
"text": "Arrow operator -> in C/C++ with Examples"
},
{
"code": null,
"e": 28433,
"s": 28355,
"text": "Prefix Sum Array - Implementation and Applications in Competitive Programming"
}
] |
How to disable zoom on a mobile web page using CSS? - GeeksforGeeks | 23 Aug, 2021
To disable the zooming option with the multi-touch gesture we can use surefox browser but still, a user can zoom in or out by double tapping on the screen. We can use the <meta> tag to disable zoom in and out on a mobile web page.Syntax:
<meta name="viewport" content= "width=device-width, user-scalable=no">
Steps to implement on mobile browser:
Download opera mobile emulator
Install the downloaded file, and run the emulator.
Choose your device preferred devices from the list.
Drag your file into that emulator to run the non_zoom-able file.
Non-zoomable Example:
Zoomable Example:
Example: This example uses user-scalable=no to disable zoom on mobile web page.
html
<!DOCTYPE html><html> <head> <title> Disable Double-Tap to Zoom </title> <meta meta name="viewport" content= "width=device-width, user-scalable=no" /> <style> body { height:410px; width:600px; border: 2px solid green; } p { font-size:20px; padding:5px; margin:7px; width:270px; height:300px; border:2px solid green; } </style></head> <body> <center> <h1 style="color:green;text-shadow: 1px 3px 2px #000"> GeeksforGeeks </h1> <div> <p style=" float:left; "> It is a good platform to learn programming. It is an educational website. Prepare for the Recruitment drive of product based companies like Microsoft, Amazon, Adobe etc with a free online placement preparation course. The course focuses on various MCQ's & Coding question likely to be asked in the interviews & make your upcoming placement season efficient and successful. </p> <p style="float:right;"> Also, any geeks can help other geeks by writing articles on the GeeksforGeeks, publishing articles follow a few steps that are Articles that need little modification/improvement from reviewers is published first. To quickly get your articles reviewed, please refer existing articles, their formatting style, coding style, and try to make you are close to them. </p> </div> </center></body> </html>
Output:
Example 2: This example uses user-scalable=no to disable zoom on mobile web page.
html
<!DOCTYPE html><html> <head> <title> Disable Double-Tap to Zoom </title> <meta meta name="viewport" content= "width=device-width, user-scalable=no" /> <style> body { height:415px; width:630px; border: 2px solid green; } </style></head> <body> <center> <h1 style="color:green"> GeeksforGeeks </h1> <img src="https://media.geeksforgeeks.org/wp-content/uploads/6-86.png" alt="" style="width:396px; border:2px solid black; float:left; margin:7px;"/> <img src="https://media.geeksforgeeks.org/wp-content/uploads/5-113.png" alt=""style="width:196px; border:2px solid black; float:right; margin:7px;" /> <img src="https://media.geeksforgeeks.org/wp-content/uploads/3-58.jpg" alt="" style="width:396px; border:2px solid black; float:left; margin:7px;"/> <img src="https://media.geeksforgeeks.org/wp-content/uploads/2-528.png" alt="" style="width:196px; height:101px; border:2px solid black; float:right;margin:7px;"/> <p><b>Note:</b>Not zoomable on mobile</p> </center></body></html>
Attention reader! Don’t stop learning now. Get hold of all the important HTML concepts with the Web Design for Beginners | HTML course.
rajeev0719singh
CSS-Misc
Picked
HTML
Web Technologies
Web technologies Questions
HTML
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here.
How to update Node.js and NPM to next version ?
REST API (Introduction)
How to Insert Form Data into Database using PHP ?
CSS to put icon inside an input element in a form
How to position a div at the bottom of its container using CSS?
Remove elements from a JavaScript Array
Installation of Node.js on Linux
Convert a string to an integer in JavaScript
How to fetch data from an API in ReactJS ?
Difference between var, let and const keywords in JavaScript | [
{
"code": null,
"e": 26737,
"s": 26709,
"text": "\n23 Aug, 2021"
},
{
"code": null,
"e": 26977,
"s": 26737,
"text": "To disable the zooming option with the multi-touch gesture we can use surefox browser but still, a user can zoom in or out by double tapping on the screen. We can use the <meta> tag to disable zoom in and out on a mobile web page.Syntax: "
},
{
"code": null,
"e": 27048,
"s": 26977,
"text": "<meta name=\"viewport\" content= \"width=device-width, user-scalable=no\">"
},
{
"code": null,
"e": 27088,
"s": 27048,
"text": "Steps to implement on mobile browser: "
},
{
"code": null,
"e": 27119,
"s": 27088,
"text": "Download opera mobile emulator"
},
{
"code": null,
"e": 27170,
"s": 27119,
"text": "Install the downloaded file, and run the emulator."
},
{
"code": null,
"e": 27222,
"s": 27170,
"text": "Choose your device preferred devices from the list."
},
{
"code": null,
"e": 27287,
"s": 27222,
"text": "Drag your file into that emulator to run the non_zoom-able file."
},
{
"code": null,
"e": 27311,
"s": 27287,
"text": "Non-zoomable Example: "
},
{
"code": null,
"e": 27331,
"s": 27311,
"text": "Zoomable Example: "
},
{
"code": null,
"e": 27413,
"s": 27331,
"text": "Example: This example uses user-scalable=no to disable zoom on mobile web page. "
},
{
"code": null,
"e": 27418,
"s": 27413,
"text": "html"
},
{
"code": "<!DOCTYPE html><html> <head> <title> Disable Double-Tap to Zoom </title> <meta meta name=\"viewport\" content= \"width=device-width, user-scalable=no\" /> <style> body { height:410px; width:600px; border: 2px solid green; } p { font-size:20px; padding:5px; margin:7px; width:270px; height:300px; border:2px solid green; } </style></head> <body> <center> <h1 style=\"color:green;text-shadow: 1px 3px 2px #000\"> GeeksforGeeks </h1> <div> <p style=\" float:left; \"> It is a good platform to learn programming. It is an educational website. Prepare for the Recruitment drive of product based companies like Microsoft, Amazon, Adobe etc with a free online placement preparation course. The course focuses on various MCQ's & Coding question likely to be asked in the interviews & make your upcoming placement season efficient and successful. </p> <p style=\"float:right;\"> Also, any geeks can help other geeks by writing articles on the GeeksforGeeks, publishing articles follow a few steps that are Articles that need little modification/improvement from reviewers is published first. To quickly get your articles reviewed, please refer existing articles, their formatting style, coding style, and try to make you are close to them. </p> </div> </center></body> </html> ",
"e": 29253,
"s": 27418,
"text": null
},
{
"code": null,
"e": 29263,
"s": 29253,
"text": "Output: "
},
{
"code": null,
"e": 29347,
"s": 29263,
"text": "Example 2: This example uses user-scalable=no to disable zoom on mobile web page. "
},
{
"code": null,
"e": 29352,
"s": 29347,
"text": "html"
},
{
"code": "<!DOCTYPE html><html> <head> <title> Disable Double-Tap to Zoom </title> <meta meta name=\"viewport\" content= \"width=device-width, user-scalable=no\" /> <style> body { height:415px; width:630px; border: 2px solid green; } </style></head> <body> <center> <h1 style=\"color:green\"> GeeksforGeeks </h1> <img src=\"https://media.geeksforgeeks.org/wp-content/uploads/6-86.png\" alt=\"\" style=\"width:396px; border:2px solid black; float:left; margin:7px;\"/> <img src=\"https://media.geeksforgeeks.org/wp-content/uploads/5-113.png\" alt=\"\"style=\"width:196px; border:2px solid black; float:right; margin:7px;\" /> <img src=\"https://media.geeksforgeeks.org/wp-content/uploads/3-58.jpg\" alt=\"\" style=\"width:396px; border:2px solid black; float:left; margin:7px;\"/> <img src=\"https://media.geeksforgeeks.org/wp-content/uploads/2-528.png\" alt=\"\" style=\"width:196px; height:101px; border:2px solid black; float:right;margin:7px;\"/> <p><b>Note:</b>Not zoomable on mobile</p> </center></body></html> ",
"e": 30713,
"s": 29352,
"text": null
},
{
"code": null,
"e": 30850,
"s": 30713,
"text": "Attention reader! Don’t stop learning now. Get hold of all the important HTML concepts with the Web Design for Beginners | HTML course."
},
{
"code": null,
"e": 30866,
"s": 30850,
"text": "rajeev0719singh"
},
{
"code": null,
"e": 30875,
"s": 30866,
"text": "CSS-Misc"
},
{
"code": null,
"e": 30882,
"s": 30875,
"text": "Picked"
},
{
"code": null,
"e": 30887,
"s": 30882,
"text": "HTML"
},
{
"code": null,
"e": 30904,
"s": 30887,
"text": "Web Technologies"
},
{
"code": null,
"e": 30931,
"s": 30904,
"text": "Web technologies Questions"
},
{
"code": null,
"e": 30936,
"s": 30931,
"text": "HTML"
},
{
"code": null,
"e": 31034,
"s": 30936,
"text": "Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here."
},
{
"code": null,
"e": 31082,
"s": 31034,
"text": "How to update Node.js and NPM to next version ?"
},
{
"code": null,
"e": 31106,
"s": 31082,
"text": "REST API (Introduction)"
},
{
"code": null,
"e": 31156,
"s": 31106,
"text": "How to Insert Form Data into Database using PHP ?"
},
{
"code": null,
"e": 31206,
"s": 31156,
"text": "CSS to put icon inside an input element in a form"
},
{
"code": null,
"e": 31270,
"s": 31206,
"text": "How to position a div at the bottom of its container using CSS?"
},
{
"code": null,
"e": 31310,
"s": 31270,
"text": "Remove elements from a JavaScript Array"
},
{
"code": null,
"e": 31343,
"s": 31310,
"text": "Installation of Node.js on Linux"
},
{
"code": null,
"e": 31388,
"s": 31343,
"text": "Convert a string to an integer in JavaScript"
},
{
"code": null,
"e": 31431,
"s": 31388,
"text": "How to fetch data from an API in ReactJS ?"
}
] |
How useEffect works in ReactJS ? - GeeksforGeeks | 26 May, 2021
When we want to perform something after each render of component then we can use the useEffect() hook. By using this Hook, we tell React that our component needs to do something after render by passing a function. React remember the function we passed in useEffect() hook and call it later after performing the DOM updates.
By default, the useEffect hook runs after the first render and after every update. React updates the DOM by the time it runs the effects.
Creating React Application:
Step 1: Create a React application using the following command:npx create-react-app foldername
Step 1: Create a React application using the following command:
npx create-react-app foldername
Step 2: After creating your project folder i.e. foldername, move to it using the following command:cd foldername
Step 2: After creating your project folder i.e. foldername, move to it using the following command:
cd foldername
Project Structure: It will look like the following.
Example: Now write down the following code in the App.js file. Here, App is our default component where we have written our code.
App.js
import React, { useState, useEffect } from 'react'; function App() { const [count, setCount] = useState(0); useEffect(() => { alert(`You clicked ${count} times`) }); const handleUpdate = ()=> { setCount (count + 1) } return ( <div> <div>You have clicked {count} times</div> <button onClick={ handleUpdate} > Click me </button> </div> );} export default App;
Step to Run Application: Run the application using the following command from the root directory of the project.
npm start
Output: Now open your browser and go to http://localhost:3000/, you will see the following output.
Explanation: As we can from the above example whenever we update the state, React re-render the component, and just after that useEffect() hook call function that we have passed.
Picked
ReactJS-Basics
ReactJS
Web Technologies
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here.
ReactJS useNavigate() Hook
How to set background images in ReactJS ?
Axios in React: A Guide for Beginners
How to create a table in ReactJS ?
How to navigate on path by button click in react router ?
Remove elements from a JavaScript Array
Installation of Node.js on Linux
Convert a string to an integer in JavaScript
How to insert spaces/tabs in text using HTML/CSS?
Difference between var, let and const keywords in JavaScript | [
{
"code": null,
"e": 26071,
"s": 26043,
"text": "\n26 May, 2021"
},
{
"code": null,
"e": 26395,
"s": 26071,
"text": "When we want to perform something after each render of component then we can use the useEffect() hook. By using this Hook, we tell React that our component needs to do something after render by passing a function. React remember the function we passed in useEffect() hook and call it later after performing the DOM updates."
},
{
"code": null,
"e": 26533,
"s": 26395,
"text": "By default, the useEffect hook runs after the first render and after every update. React updates the DOM by the time it runs the effects."
},
{
"code": null,
"e": 26561,
"s": 26533,
"text": "Creating React Application:"
},
{
"code": null,
"e": 26656,
"s": 26561,
"text": "Step 1: Create a React application using the following command:npx create-react-app foldername"
},
{
"code": null,
"e": 26720,
"s": 26656,
"text": "Step 1: Create a React application using the following command:"
},
{
"code": null,
"e": 26752,
"s": 26720,
"text": "npx create-react-app foldername"
},
{
"code": null,
"e": 26865,
"s": 26752,
"text": "Step 2: After creating your project folder i.e. foldername, move to it using the following command:cd foldername"
},
{
"code": null,
"e": 26965,
"s": 26865,
"text": "Step 2: After creating your project folder i.e. foldername, move to it using the following command:"
},
{
"code": null,
"e": 26979,
"s": 26965,
"text": "cd foldername"
},
{
"code": null,
"e": 27031,
"s": 26979,
"text": "Project Structure: It will look like the following."
},
{
"code": null,
"e": 27162,
"s": 27031,
"text": "Example: Now write down the following code in the App.js file. Here, App is our default component where we have written our code. "
},
{
"code": null,
"e": 27169,
"s": 27162,
"text": "App.js"
},
{
"code": "import React, { useState, useEffect } from 'react'; function App() { const [count, setCount] = useState(0); useEffect(() => { alert(`You clicked ${count} times`) }); const handleUpdate = ()=> { setCount (count + 1) } return ( <div> <div>You have clicked {count} times</div> <button onClick={ handleUpdate} > Click me </button> </div> );} export default App;",
"e": 27584,
"s": 27169,
"text": null
},
{
"code": null,
"e": 27697,
"s": 27584,
"text": "Step to Run Application: Run the application using the following command from the root directory of the project."
},
{
"code": null,
"e": 27707,
"s": 27697,
"text": "npm start"
},
{
"code": null,
"e": 27806,
"s": 27707,
"text": "Output: Now open your browser and go to http://localhost:3000/, you will see the following output."
},
{
"code": null,
"e": 27985,
"s": 27806,
"text": "Explanation: As we can from the above example whenever we update the state, React re-render the component, and just after that useEffect() hook call function that we have passed."
},
{
"code": null,
"e": 27992,
"s": 27985,
"text": "Picked"
},
{
"code": null,
"e": 28007,
"s": 27992,
"text": "ReactJS-Basics"
},
{
"code": null,
"e": 28015,
"s": 28007,
"text": "ReactJS"
},
{
"code": null,
"e": 28032,
"s": 28015,
"text": "Web Technologies"
},
{
"code": null,
"e": 28130,
"s": 28032,
"text": "Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here."
},
{
"code": null,
"e": 28157,
"s": 28130,
"text": "ReactJS useNavigate() Hook"
},
{
"code": null,
"e": 28199,
"s": 28157,
"text": "How to set background images in ReactJS ?"
},
{
"code": null,
"e": 28237,
"s": 28199,
"text": "Axios in React: A Guide for Beginners"
},
{
"code": null,
"e": 28272,
"s": 28237,
"text": "How to create a table in ReactJS ?"
},
{
"code": null,
"e": 28330,
"s": 28272,
"text": "How to navigate on path by button click in react router ?"
},
{
"code": null,
"e": 28370,
"s": 28330,
"text": "Remove elements from a JavaScript Array"
},
{
"code": null,
"e": 28403,
"s": 28370,
"text": "Installation of Node.js on Linux"
},
{
"code": null,
"e": 28448,
"s": 28403,
"text": "Convert a string to an integer in JavaScript"
},
{
"code": null,
"e": 28498,
"s": 28448,
"text": "How to insert spaces/tabs in text using HTML/CSS?"
}
] |
Node.js filehandle.writeFile() Method from class: FileHandle - GeeksforGeeks | 29 Jun, 2020
The filehandle.writeFile() method is used to defined in the File System module of Node.js. The File System module is basically to interact with the hard disk of the user’s computer. The fs.writeFile() method asynchronously writes data to a file, replacing the file if it already exists.
Syntax:
filehandle.writeFile(data, options)
Parameter: This method accepts two parameters as mentioned above and described below:
data: It is a String, Buffer or Uint8Array instance. It is the data which will write to the file.
options: It is an optional parameter that affects the output in someway accordingly we provide it to the function call or not.encoding: It is a string that specifies the encoding technique, default value is ‘utf8’.
encoding: It is a string that specifies the encoding technique, default value is ‘utf8’.
Example 1: This example explains how to write operation done to the file that already exists.
// Node.js program to demonstrate the// filehandle.writeFile() Method // Importing File System and Utilities moduleconst fs = require('fs') // The readFileSync() method reads the// contents of the file and returns the// buffer form of the dataconst oldBuff = fs.readFileSync('./tesTfile.txt')const oldContent = oldBuff.toString()console.log(`\nOld content of the file :\n${oldContent}`) const writeToFile = async (path, data) => { let filehandle = null try { filehandle = await fs.promises.open(path, mode = 'w') // Write to file await filehandle.writeFile(data) } finally { if (filehandle) { // Close the file if it is opened. await filehandle.close(); } } // New content after write operation const newBuff = fs.readFileSync('./tesTfile.txt') const newContent = newBuff.toString() console.log(`\nNew content of the file :\n${newContent}`)} writeToFile('./testFile.txt', "Hey, I am newly added!") .catch(err => { console.log(`Error Occurs, Error code -> ${err.code}, Error NO -> ${err.errno}`) })
Output:
Example 2: This example explains how to write operation done to the file that not exist earlier but created at run time.
// Node.js program to demonstrate the// filehandle.writeFile() Method // Importing File System and Utilities moduleconst fs = require('fs') const writeToFile = async (path, data) => { let filehandle = null try { filehandle = await fs .promises.open(path, mode = 'w') // Write to file await filehandle.writeFile(data) } finally { if (filehandle) { // Close the file if it is opened. await filehandle.close(); } } // The readFileSync() method reads // the contents of the file and // returns the buffer form of the data const buff = fs.readFileSync(path) const content = buff.toString() console.log(`\nContents of the file :\n${content}`)} var query = "Hey there, I am newly added " + "content of newly added file!";writeToFile('./testFile.txt', query) .catch(err => { console.log(`Error Occurs, Error code -> ${err.code}, Error NO -> ${err.errno}`) })
Directory structure before running the program:
Directory structure after running the program:
Output:
Node.js-fs-module
Node.js
Web Technologies
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here.
How to install the previous version of node.js and npm ?
Difference between promise and async await in Node.js
How to use an ES6 import in Node.js?
Mongoose | findByIdAndUpdate() Function
Express.js res.render() Function
Remove elements from a JavaScript Array
Convert a string to an integer in JavaScript
How to fetch data from an API in ReactJS ?
How to insert spaces/tabs in text using HTML/CSS?
Difference between var, let and const keywords in JavaScript | [
{
"code": null,
"e": 25865,
"s": 25837,
"text": "\n29 Jun, 2020"
},
{
"code": null,
"e": 26152,
"s": 25865,
"text": "The filehandle.writeFile() method is used to defined in the File System module of Node.js. The File System module is basically to interact with the hard disk of the user’s computer. The fs.writeFile() method asynchronously writes data to a file, replacing the file if it already exists."
},
{
"code": null,
"e": 26160,
"s": 26152,
"text": "Syntax:"
},
{
"code": null,
"e": 26196,
"s": 26160,
"text": "filehandle.writeFile(data, options)"
},
{
"code": null,
"e": 26282,
"s": 26196,
"text": "Parameter: This method accepts two parameters as mentioned above and described below:"
},
{
"code": null,
"e": 26380,
"s": 26282,
"text": "data: It is a String, Buffer or Uint8Array instance. It is the data which will write to the file."
},
{
"code": null,
"e": 26595,
"s": 26380,
"text": "options: It is an optional parameter that affects the output in someway accordingly we provide it to the function call or not.encoding: It is a string that specifies the encoding technique, default value is ‘utf8’."
},
{
"code": null,
"e": 26684,
"s": 26595,
"text": "encoding: It is a string that specifies the encoding technique, default value is ‘utf8’."
},
{
"code": null,
"e": 26778,
"s": 26684,
"text": "Example 1: This example explains how to write operation done to the file that already exists."
},
{
"code": "// Node.js program to demonstrate the// filehandle.writeFile() Method // Importing File System and Utilities moduleconst fs = require('fs') // The readFileSync() method reads the// contents of the file and returns the// buffer form of the dataconst oldBuff = fs.readFileSync('./tesTfile.txt')const oldContent = oldBuff.toString()console.log(`\\nOld content of the file :\\n${oldContent}`) const writeToFile = async (path, data) => { let filehandle = null try { filehandle = await fs.promises.open(path, mode = 'w') // Write to file await filehandle.writeFile(data) } finally { if (filehandle) { // Close the file if it is opened. await filehandle.close(); } } // New content after write operation const newBuff = fs.readFileSync('./tesTfile.txt') const newContent = newBuff.toString() console.log(`\\nNew content of the file :\\n${newContent}`)} writeToFile('./testFile.txt', \"Hey, I am newly added!\") .catch(err => { console.log(`Error Occurs, Error code -> ${err.code}, Error NO -> ${err.errno}`) })",
"e": 27888,
"s": 26778,
"text": null
},
{
"code": null,
"e": 27896,
"s": 27888,
"text": "Output:"
},
{
"code": null,
"e": 28017,
"s": 27896,
"text": "Example 2: This example explains how to write operation done to the file that not exist earlier but created at run time."
},
{
"code": "// Node.js program to demonstrate the// filehandle.writeFile() Method // Importing File System and Utilities moduleconst fs = require('fs') const writeToFile = async (path, data) => { let filehandle = null try { filehandle = await fs .promises.open(path, mode = 'w') // Write to file await filehandle.writeFile(data) } finally { if (filehandle) { // Close the file if it is opened. await filehandle.close(); } } // The readFileSync() method reads // the contents of the file and // returns the buffer form of the data const buff = fs.readFileSync(path) const content = buff.toString() console.log(`\\nContents of the file :\\n${content}`)} var query = \"Hey there, I am newly added \" + \"content of newly added file!\";writeToFile('./testFile.txt', query) .catch(err => { console.log(`Error Occurs, Error code -> ${err.code}, Error NO -> ${err.errno}`) })",
"e": 29000,
"s": 28017,
"text": null
},
{
"code": null,
"e": 29048,
"s": 29000,
"text": "Directory structure before running the program:"
},
{
"code": null,
"e": 29095,
"s": 29048,
"text": "Directory structure after running the program:"
},
{
"code": null,
"e": 29103,
"s": 29095,
"text": "Output:"
},
{
"code": null,
"e": 29121,
"s": 29103,
"text": "Node.js-fs-module"
},
{
"code": null,
"e": 29129,
"s": 29121,
"text": "Node.js"
},
{
"code": null,
"e": 29146,
"s": 29129,
"text": "Web Technologies"
},
{
"code": null,
"e": 29244,
"s": 29146,
"text": "Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here."
},
{
"code": null,
"e": 29301,
"s": 29244,
"text": "How to install the previous version of node.js and npm ?"
},
{
"code": null,
"e": 29355,
"s": 29301,
"text": "Difference between promise and async await in Node.js"
},
{
"code": null,
"e": 29392,
"s": 29355,
"text": "How to use an ES6 import in Node.js?"
},
{
"code": null,
"e": 29432,
"s": 29392,
"text": "Mongoose | findByIdAndUpdate() Function"
},
{
"code": null,
"e": 29465,
"s": 29432,
"text": "Express.js res.render() Function"
},
{
"code": null,
"e": 29505,
"s": 29465,
"text": "Remove elements from a JavaScript Array"
},
{
"code": null,
"e": 29550,
"s": 29505,
"text": "Convert a string to an integer in JavaScript"
},
{
"code": null,
"e": 29593,
"s": 29550,
"text": "How to fetch data from an API in ReactJS ?"
},
{
"code": null,
"e": 29643,
"s": 29593,
"text": "How to insert spaces/tabs in text using HTML/CSS?"
}
] |
Array vs Matrix in R Programming - GeeksforGeeks | 11 Jun, 2021
The data structure is a particular way of organizing data in a computer so that it can be used effectively. The idea is to reduce the space and time complexities of different tasks. Data structures in R programming are tools for holding multiple values. The two most important data structures in R are Arrays and Matrices.
Arrays are data storage objects in R containing more than or equal to 1 dimension. Arrays can contain only a single data type. The array() function is an in-built function which takes input as a vector and arranges them according to dim argument. Array is an iterable object, where the array elements are indexed, accessed and modified individually. Operations on array can be performed with similar structures and dimensions. Uni-dimensional arrays are called vectors in R. Two-dimensional arrays are called matrices.
Syntax: array(array1, dim = c (r, c, m), dimnames = list(c.names, r.names, m.names))Parameters: array1: a vector of values dim: contains the number of matrices, m of the specified number of rows and columns dimnames: contain the names for the dimensions
Example:
Python3
# R program to illustrate an array # creating a vectorvector1 <- c("A", "B", "C")# declaring a character arrayuni_array <- array(vector1)print("Uni-Dimensional Array")print(uni_array) # creating another vectorvector <- c(1:12)# declaring 2 numeric multi-dimensional# array with size 2x3multi_array <- array(vector, dim = c(2, 3, 2))print("Multi-Dimensional Array")print(multi_array)
Output:
[1] "Uni-Dimensional Array"
[1] "A" "B" "C"
[1] "Multi-Dimensional Array"
, , 1
[,1] [,2] [,3]
[1,] 1 3 5
[2,] 2 4 6
, , 2
[,1] [,2] [,3]
[1,] 7 9 11
[2,] 8 10 12
Matrix in R is a table-like structure consisting of elements arranged in a fixed number of rows and columns. All the elements belong to a single data type. R contains an in-built function matrix() to create a matrix. Elements of a matrix can be accessed by providing indexes of rows and columns. The arithmetic operation, addition, subtraction, and multiplication can be performed on matrices with the same dimensions. Matrices can be easily converted to data frames CSVs.
Syntax: matrix(data, nrow, ncol, byrow)Parameters: data: contain a vector of similar data type elements. nrow: number of rows. ncol: number of columns. byrow: By default matrices are in column-wise order. So this parameter decides how to arrange the matrix
Example:
Python3
# R program to illustrate a matrix A = matrix( # Taking sequence of elements c(1, 2, 3, 4, 5, 6, 7, 8, 9), # No of rows and columns nrow = 3, ncol = 3, # By default matrices are # in column-wise order # So this parameter decides # how to arrange the matrix byrow = TRUE ) print(A)
Output:
[,1] [,2] [,3]
[1,] 1 2 3
[2,] 4 5 6
[3,] 7 8 9
sooda367
Picked
R-Arrays
R-Matrix
R Language
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here.
Filter data by multiple conditions in R using Dplyr
Loops in R (for, while, repeat)
How to change Row Names of DataFrame in R ?
Change Color of Bars in Barchart using ggplot2 in R
Group by function in R using Dplyr
How to Change Axis Scales in R Plots?
How to Split Column Into Multiple Columns in R DataFrame?
R Programming Language - Introduction
K-Means Clustering in R Programming
Replace Specific Characters in String in R | [
{
"code": null,
"e": 26188,
"s": 26160,
"text": "\n11 Jun, 2021"
},
{
"code": null,
"e": 26513,
"s": 26188,
"text": "The data structure is a particular way of organizing data in a computer so that it can be used effectively. The idea is to reduce the space and time complexities of different tasks. Data structures in R programming are tools for holding multiple values. The two most important data structures in R are Arrays and Matrices. "
},
{
"code": null,
"e": 27033,
"s": 26513,
"text": "Arrays are data storage objects in R containing more than or equal to 1 dimension. Arrays can contain only a single data type. The array() function is an in-built function which takes input as a vector and arranges them according to dim argument. Array is an iterable object, where the array elements are indexed, accessed and modified individually. Operations on array can be performed with similar structures and dimensions. Uni-dimensional arrays are called vectors in R. Two-dimensional arrays are called matrices. "
},
{
"code": null,
"e": 27287,
"s": 27033,
"text": "Syntax: array(array1, dim = c (r, c, m), dimnames = list(c.names, r.names, m.names))Parameters: array1: a vector of values dim: contains the number of matrices, m of the specified number of rows and columns dimnames: contain the names for the dimensions"
},
{
"code": null,
"e": 27297,
"s": 27287,
"text": "Example: "
},
{
"code": null,
"e": 27305,
"s": 27297,
"text": "Python3"
},
{
"code": "# R program to illustrate an array # creating a vectorvector1 <- c(\"A\", \"B\", \"C\")# declaring a character arrayuni_array <- array(vector1)print(\"Uni-Dimensional Array\")print(uni_array) # creating another vectorvector <- c(1:12)# declaring 2 numeric multi-dimensional# array with size 2x3multi_array <- array(vector, dim = c(2, 3, 2))print(\"Multi-Dimensional Array\")print(multi_array)",
"e": 27688,
"s": 27305,
"text": null
},
{
"code": null,
"e": 27698,
"s": 27688,
"text": "Output: "
},
{
"code": null,
"e": 27907,
"s": 27698,
"text": "[1] \"Uni-Dimensional Array\"\n[1] \"A\" \"B\" \"C\"\n[1] \"Multi-Dimensional Array\"\n, , 1\n\n [,1] [,2] [,3]\n[1,] 1 3 5\n[2,] 2 4 6\n\n, , 2\n\n [,1] [,2] [,3]\n[1,] 7 9 11\n[2,] 8 10 12"
},
{
"code": null,
"e": 28383,
"s": 27909,
"text": "Matrix in R is a table-like structure consisting of elements arranged in a fixed number of rows and columns. All the elements belong to a single data type. R contains an in-built function matrix() to create a matrix. Elements of a matrix can be accessed by providing indexes of rows and columns. The arithmetic operation, addition, subtraction, and multiplication can be performed on matrices with the same dimensions. Matrices can be easily converted to data frames CSVs. "
},
{
"code": null,
"e": 28640,
"s": 28383,
"text": "Syntax: matrix(data, nrow, ncol, byrow)Parameters: data: contain a vector of similar data type elements. nrow: number of rows. ncol: number of columns. byrow: By default matrices are in column-wise order. So this parameter decides how to arrange the matrix"
},
{
"code": null,
"e": 28651,
"s": 28640,
"text": "Example: "
},
{
"code": null,
"e": 28659,
"s": 28651,
"text": "Python3"
},
{
"code": "# R program to illustrate a matrix A = matrix( # Taking sequence of elements c(1, 2, 3, 4, 5, 6, 7, 8, 9), # No of rows and columns nrow = 3, ncol = 3, # By default matrices are # in column-wise order # So this parameter decides # how to arrange the matrix byrow = TRUE ) print(A)",
"e": 29010,
"s": 28659,
"text": null
},
{
"code": null,
"e": 29020,
"s": 29010,
"text": "Output: "
},
{
"code": null,
"e": 29100,
"s": 29020,
"text": " [,1] [,2] [,3]\n[1,] 1 2 3\n[2,] 4 5 6\n[3,] 7 8 9"
},
{
"code": null,
"e": 29115,
"s": 29106,
"text": "sooda367"
},
{
"code": null,
"e": 29122,
"s": 29115,
"text": "Picked"
},
{
"code": null,
"e": 29131,
"s": 29122,
"text": "R-Arrays"
},
{
"code": null,
"e": 29140,
"s": 29131,
"text": "R-Matrix"
},
{
"code": null,
"e": 29151,
"s": 29140,
"text": "R Language"
},
{
"code": null,
"e": 29249,
"s": 29151,
"text": "Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here."
},
{
"code": null,
"e": 29301,
"s": 29249,
"text": "Filter data by multiple conditions in R using Dplyr"
},
{
"code": null,
"e": 29333,
"s": 29301,
"text": "Loops in R (for, while, repeat)"
},
{
"code": null,
"e": 29377,
"s": 29333,
"text": "How to change Row Names of DataFrame in R ?"
},
{
"code": null,
"e": 29429,
"s": 29377,
"text": "Change Color of Bars in Barchart using ggplot2 in R"
},
{
"code": null,
"e": 29464,
"s": 29429,
"text": "Group by function in R using Dplyr"
},
{
"code": null,
"e": 29502,
"s": 29464,
"text": "How to Change Axis Scales in R Plots?"
},
{
"code": null,
"e": 29560,
"s": 29502,
"text": "How to Split Column Into Multiple Columns in R DataFrame?"
},
{
"code": null,
"e": 29598,
"s": 29560,
"text": "R Programming Language - Introduction"
},
{
"code": null,
"e": 29634,
"s": 29598,
"text": "K-Means Clustering in R Programming"
}
] |
How to Access <tr> element from Table using JavaScript ? - GeeksforGeeks | 11 May, 2020
Given an HTML table and the task is to access the table element from the Controller and highlight any row that we want.
Approach: We will use a basic DOM operation in JavaScript to access table row element. We will be adding highlight class to the row that we click, if the highlight class is already present then we will remove this class to make it normal.
getElementById() Method: To select any element in HTML from its ID, we will select the table to perform the above operation.
addEventListener() Method: After selecting this table, we will add an Event Listener to listen from the click event.
path: When we click at any point on window then Path describes its complete path that it belongs to. For example, if we click to a td element of a table, then its Path will be [td, tr, tbody, table, body, html, document, Window].
After selecting the row, we will look for highlight class in its classList, if we found it simply remove this class or add if it do not contain it.
Example:
<!DOCTYPE html><html> <head> <title> How to Access tr element from Table using JavaScript ? </title> <style type="text/css"> body { text-align: center; } h1 { color: green; } /* Basic CSS to design table */ table { border-collapse: collapse; width: 100%; } th, td { padding: 8px; text-align: left; border-bottom: 1px solid #ddd; } /* CSS command for the row to highlight */ .highlight { background-color: #b8b8b8; } </style></head> <body> <h1>GeeksforGeeks</h1> <h3> Access tr element from Table using JavaScript </h3> <table id="table_to_highlight"> <tr> <th>Name</th> <th>Email</th> <th>Position</th> </tr> <tr> <td>Shivam Singhal</td> <td>[email protected]</td> <td>Full Stack Developer</td> </tr> <tr> <td>Shashank Chugh</td> <td>[email protected]</td> <td>Software Developer</td> </tr> <tr> <td>Akash Kumar</td> <td>[email protected]</td> <td>ML Engineer</td> </tr> <tr> <td>Shivam Jaiswal</td> <td>[email protected]</td> <td>Ethical Hacker</td> </tr> </table> <script type="text/javascript"> // JavaScript Code to Highlight any // row in the given table. document.getElementById('table_to_highlight') .addEventListener('click', function (item) { // To get tr tag // In the row where we click var row = item.path[1]; var row_value = ""; for (var j = 0; j < row.cells.length; j++) { row_value += row.cells[j].innerHTML; row_value += " | "; } alert(row_value); // Toggle the highlight if (row.classList.contains('highlight')) row.classList.remove('highlight'); else row.classList.add('highlight'); }); </script></body> </html>
Output:
CSS-Misc
HTML-Misc
JavaScript-Misc
Picked
CSS
HTML
JavaScript
Web Technologies
Web technologies Questions
HTML
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here.
Design a web page using HTML and CSS
How to set space between the flexbox ?
Form validation using jQuery
Search Bar using HTML, CSS and JavaScript
How to style a checkbox using CSS?
How to set the default value for an HTML <select> element ?
Hide or show elements in HTML using display property
How to set input type date in dd-mm-yyyy format using HTML ?
How to Insert Form Data into Database using PHP ?
REST API (Introduction) | [
{
"code": null,
"e": 26621,
"s": 26593,
"text": "\n11 May, 2020"
},
{
"code": null,
"e": 26741,
"s": 26621,
"text": "Given an HTML table and the task is to access the table element from the Controller and highlight any row that we want."
},
{
"code": null,
"e": 26980,
"s": 26741,
"text": "Approach: We will use a basic DOM operation in JavaScript to access table row element. We will be adding highlight class to the row that we click, if the highlight class is already present then we will remove this class to make it normal."
},
{
"code": null,
"e": 27105,
"s": 26980,
"text": "getElementById() Method: To select any element in HTML from its ID, we will select the table to perform the above operation."
},
{
"code": null,
"e": 27222,
"s": 27105,
"text": "addEventListener() Method: After selecting this table, we will add an Event Listener to listen from the click event."
},
{
"code": null,
"e": 27452,
"s": 27222,
"text": "path: When we click at any point on window then Path describes its complete path that it belongs to. For example, if we click to a td element of a table, then its Path will be [td, tr, tbody, table, body, html, document, Window]."
},
{
"code": null,
"e": 27600,
"s": 27452,
"text": "After selecting the row, we will look for highlight class in its classList, if we found it simply remove this class or add if it do not contain it."
},
{
"code": null,
"e": 27609,
"s": 27600,
"text": "Example:"
},
{
"code": "<!DOCTYPE html><html> <head> <title> How to Access tr element from Table using JavaScript ? </title> <style type=\"text/css\"> body { text-align: center; } h1 { color: green; } /* Basic CSS to design table */ table { border-collapse: collapse; width: 100%; } th, td { padding: 8px; text-align: left; border-bottom: 1px solid #ddd; } /* CSS command for the row to highlight */ .highlight { background-color: #b8b8b8; } </style></head> <body> <h1>GeeksforGeeks</h1> <h3> Access tr element from Table using JavaScript </h3> <table id=\"table_to_highlight\"> <tr> <th>Name</th> <th>Email</th> <th>Position</th> </tr> <tr> <td>Shivam Singhal</td> <td>[email protected]</td> <td>Full Stack Developer</td> </tr> <tr> <td>Shashank Chugh</td> <td>[email protected]</td> <td>Software Developer</td> </tr> <tr> <td>Akash Kumar</td> <td>[email protected]</td> <td>ML Engineer</td> </tr> <tr> <td>Shivam Jaiswal</td> <td>[email protected]</td> <td>Ethical Hacker</td> </tr> </table> <script type=\"text/javascript\"> // JavaScript Code to Highlight any // row in the given table. document.getElementById('table_to_highlight') .addEventListener('click', function (item) { // To get tr tag // In the row where we click var row = item.path[1]; var row_value = \"\"; for (var j = 0; j < row.cells.length; j++) { row_value += row.cells[j].innerHTML; row_value += \" | \"; } alert(row_value); // Toggle the highlight if (row.classList.contains('highlight')) row.classList.remove('highlight'); else row.classList.add('highlight'); }); </script></body> </html>",
"e": 29929,
"s": 27609,
"text": null
},
{
"code": null,
"e": 29937,
"s": 29929,
"text": "Output:"
},
{
"code": null,
"e": 29946,
"s": 29937,
"text": "CSS-Misc"
},
{
"code": null,
"e": 29956,
"s": 29946,
"text": "HTML-Misc"
},
{
"code": null,
"e": 29972,
"s": 29956,
"text": "JavaScript-Misc"
},
{
"code": null,
"e": 29979,
"s": 29972,
"text": "Picked"
},
{
"code": null,
"e": 29983,
"s": 29979,
"text": "CSS"
},
{
"code": null,
"e": 29988,
"s": 29983,
"text": "HTML"
},
{
"code": null,
"e": 29999,
"s": 29988,
"text": "JavaScript"
},
{
"code": null,
"e": 30016,
"s": 29999,
"text": "Web Technologies"
},
{
"code": null,
"e": 30043,
"s": 30016,
"text": "Web technologies Questions"
},
{
"code": null,
"e": 30048,
"s": 30043,
"text": "HTML"
},
{
"code": null,
"e": 30146,
"s": 30048,
"text": "Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here."
},
{
"code": null,
"e": 30183,
"s": 30146,
"text": "Design a web page using HTML and CSS"
},
{
"code": null,
"e": 30222,
"s": 30183,
"text": "How to set space between the flexbox ?"
},
{
"code": null,
"e": 30251,
"s": 30222,
"text": "Form validation using jQuery"
},
{
"code": null,
"e": 30293,
"s": 30251,
"text": "Search Bar using HTML, CSS and JavaScript"
},
{
"code": null,
"e": 30328,
"s": 30293,
"text": "How to style a checkbox using CSS?"
},
{
"code": null,
"e": 30388,
"s": 30328,
"text": "How to set the default value for an HTML <select> element ?"
},
{
"code": null,
"e": 30441,
"s": 30388,
"text": "Hide or show elements in HTML using display property"
},
{
"code": null,
"e": 30502,
"s": 30441,
"text": "How to set input type date in dd-mm-yyyy format using HTML ?"
},
{
"code": null,
"e": 30552,
"s": 30502,
"text": "How to Insert Form Data into Database using PHP ?"
}
] |
Remove odd indexed characters from a given string - GeeksforGeeks | 07 Sep, 2020
Given string str of size N, the task is to remove the characters present at odd indices (0-based indexing) of a given string.
Examples :
Input: str = “abcdef”Output: aceExplanation:The characters ‘b’, ‘d’ and ‘f’ are present at odd indices, i.e. 1, 3 and 5 respectively. Therefore, they are removed from the string.
Input: str = “geeks”Output: ges
Approach: Follow the steps below to solve the problem:
Initialize an empty string, say new_string, to store the result.
Traverse the given string and for every index, check if it is even or not.
If found to be true, append the characters at those indices to the string new_string.
Finally, after complete traversal of the entire string, return the new_string.
Below is the implementation of the above approach:
C++
Java
Python3
C#
// C++ program to implement// the above approach #include <bits/stdc++.h>using namespace std; // Function to remove the odd// indexed characters from a given stringstring removeOddIndexCharacters(string s){ // Stores the resultant string string new_string = ""; for (int i = 0; i < s.length(); i++) { // If current index is odd if (i % 2 == 1) { // Skip the character continue; } // Otherwise, append the // character new_string += s[i]; } // Return the result return new_string;} // Driver Codeint main(){ string str = "abcdef"; // Function call cout << removeOddIndexCharacters(str); return 0;}
// Java program to implement// the above approachimport java.util.*; class GFG { // Function to remove odd indexed // characters from a given string static String removeOddIndexCharacters( String s) { // Stores the resultant string String new_string = ""; for (int i = 0; i < s.length(); i++) { // If the current index is odd if (i % 2 == 1) // Skip the character continue; // Otherwise, append the // character new_string += s.charAt(i); } // Return the modified string return new_string; } // Driver Code public static void main(String[] args) { String str = "abcdef"; // Remove the characters which // have odd index str = removeOddIndexCharacters(str); System.out.print(str); }}
# Python3 program to implement# the above approach # Function to remove the odd# indexed characters from a given stringdef removeOddIndexCharacters(s): # Stores the resultant string new_s = "" i = 0 while i < len(s): # If the current index is odd if (i % 2 == 1): # Skip the character i+= 1 continue # Otherwise, append the # character new_s += s[i] i+= 1 # Return the modified string return new_s # Driver Code if __name__ == '__main__': str = "abcdef" # Remove the characters which # have odd index str = removeOddIndexCharacters(str) print(str)
// C# program to implement // the above approach using System; class GFG{ // Function to remove odd indexed // characters from a given string static string removeOddIndexCharacters(string s) { // Stores the resultant string string new_string = ""; for(int i = 0; i < s.Length; i++) { // If the current index is odd if (i % 2 == 1) // Skip the character continue; // Otherwise, append the // character new_string += s[i]; } // Return the modified string return new_string; } // Driver Code public static void Main() { string str = "abcdef"; // Remove the characters which // have odd index str = removeOddIndexCharacters(str); Console.Write(str); } } // This code is contributed by sanjoy_62
ace
Time Complexity: O(N)Auxiliary Space: O(N)
sanjoy_62
School Programming
Searching
Strings
Searching
Strings
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here.
Constructors in Java
Exceptions in Java
Data Types
Inline Functions in C++
Pure Virtual Functions and Abstract Classes in C++
Binary Search
Maximum and minimum of an array using minimum number of comparisons
Linear Search
Search an element in a sorted and rotated array
Find the Missing Number | [
{
"code": null,
"e": 25517,
"s": 25489,
"text": "\n07 Sep, 2020"
},
{
"code": null,
"e": 25643,
"s": 25517,
"text": "Given string str of size N, the task is to remove the characters present at odd indices (0-based indexing) of a given string."
},
{
"code": null,
"e": 25655,
"s": 25643,
"text": "Examples : "
},
{
"code": null,
"e": 25834,
"s": 25655,
"text": "Input: str = “abcdef”Output: aceExplanation:The characters ‘b’, ‘d’ and ‘f’ are present at odd indices, i.e. 1, 3 and 5 respectively. Therefore, they are removed from the string."
},
{
"code": null,
"e": 25866,
"s": 25834,
"text": "Input: str = “geeks”Output: ges"
},
{
"code": null,
"e": 25921,
"s": 25866,
"text": "Approach: Follow the steps below to solve the problem:"
},
{
"code": null,
"e": 25986,
"s": 25921,
"text": "Initialize an empty string, say new_string, to store the result."
},
{
"code": null,
"e": 26061,
"s": 25986,
"text": "Traverse the given string and for every index, check if it is even or not."
},
{
"code": null,
"e": 26147,
"s": 26061,
"text": "If found to be true, append the characters at those indices to the string new_string."
},
{
"code": null,
"e": 26226,
"s": 26147,
"text": "Finally, after complete traversal of the entire string, return the new_string."
},
{
"code": null,
"e": 26277,
"s": 26226,
"text": "Below is the implementation of the above approach:"
},
{
"code": null,
"e": 26281,
"s": 26277,
"text": "C++"
},
{
"code": null,
"e": 26286,
"s": 26281,
"text": "Java"
},
{
"code": null,
"e": 26294,
"s": 26286,
"text": "Python3"
},
{
"code": null,
"e": 26297,
"s": 26294,
"text": "C#"
},
{
"code": "// C++ program to implement// the above approach #include <bits/stdc++.h>using namespace std; // Function to remove the odd// indexed characters from a given stringstring removeOddIndexCharacters(string s){ // Stores the resultant string string new_string = \"\"; for (int i = 0; i < s.length(); i++) { // If current index is odd if (i % 2 == 1) { // Skip the character continue; } // Otherwise, append the // character new_string += s[i]; } // Return the result return new_string;} // Driver Codeint main(){ string str = \"abcdef\"; // Function call cout << removeOddIndexCharacters(str); return 0;}",
"e": 27007,
"s": 26297,
"text": null
},
{
"code": "// Java program to implement// the above approachimport java.util.*; class GFG { // Function to remove odd indexed // characters from a given string static String removeOddIndexCharacters( String s) { // Stores the resultant string String new_string = \"\"; for (int i = 0; i < s.length(); i++) { // If the current index is odd if (i % 2 == 1) // Skip the character continue; // Otherwise, append the // character new_string += s.charAt(i); } // Return the modified string return new_string; } // Driver Code public static void main(String[] args) { String str = \"abcdef\"; // Remove the characters which // have odd index str = removeOddIndexCharacters(str); System.out.print(str); }}",
"e": 27905,
"s": 27007,
"text": null
},
{
"code": "# Python3 program to implement# the above approach # Function to remove the odd# indexed characters from a given stringdef removeOddIndexCharacters(s): # Stores the resultant string new_s = \"\" i = 0 while i < len(s): # If the current index is odd if (i % 2 == 1): # Skip the character i+= 1 continue # Otherwise, append the # character new_s += s[i] i+= 1 # Return the modified string return new_s # Driver Code if __name__ == '__main__': str = \"abcdef\" # Remove the characters which # have odd index str = removeOddIndexCharacters(str) print(str)",
"e": 28639,
"s": 27905,
"text": null
},
{
"code": "// C# program to implement // the above approach using System; class GFG{ // Function to remove odd indexed // characters from a given string static string removeOddIndexCharacters(string s) { // Stores the resultant string string new_string = \"\"; for(int i = 0; i < s.Length; i++) { // If the current index is odd if (i % 2 == 1) // Skip the character continue; // Otherwise, append the // character new_string += s[i]; } // Return the modified string return new_string; } // Driver Code public static void Main() { string str = \"abcdef\"; // Remove the characters which // have odd index str = removeOddIndexCharacters(str); Console.Write(str); } } // This code is contributed by sanjoy_62",
"e": 29479,
"s": 28639,
"text": null
},
{
"code": null,
"e": 29484,
"s": 29479,
"text": "ace\n"
},
{
"code": null,
"e": 29527,
"s": 29484,
"text": "Time Complexity: O(N)Auxiliary Space: O(N)"
},
{
"code": null,
"e": 29537,
"s": 29527,
"text": "sanjoy_62"
},
{
"code": null,
"e": 29556,
"s": 29537,
"text": "School Programming"
},
{
"code": null,
"e": 29566,
"s": 29556,
"text": "Searching"
},
{
"code": null,
"e": 29574,
"s": 29566,
"text": "Strings"
},
{
"code": null,
"e": 29584,
"s": 29574,
"text": "Searching"
},
{
"code": null,
"e": 29592,
"s": 29584,
"text": "Strings"
},
{
"code": null,
"e": 29690,
"s": 29592,
"text": "Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here."
},
{
"code": null,
"e": 29711,
"s": 29690,
"text": "Constructors in Java"
},
{
"code": null,
"e": 29730,
"s": 29711,
"text": "Exceptions in Java"
},
{
"code": null,
"e": 29741,
"s": 29730,
"text": "Data Types"
},
{
"code": null,
"e": 29765,
"s": 29741,
"text": "Inline Functions in C++"
},
{
"code": null,
"e": 29816,
"s": 29765,
"text": "Pure Virtual Functions and Abstract Classes in C++"
},
{
"code": null,
"e": 29830,
"s": 29816,
"text": "Binary Search"
},
{
"code": null,
"e": 29898,
"s": 29830,
"text": "Maximum and minimum of an array using minimum number of comparisons"
},
{
"code": null,
"e": 29912,
"s": 29898,
"text": "Linear Search"
},
{
"code": null,
"e": 29960,
"s": 29912,
"text": "Search an element in a sorted and rotated array"
}
] |
How to render as emphasized text using HTML ? - GeeksforGeeks | 31 Dec, 2020
The <em> tag in HTML is a phrase tag and used to emphasize the text content. It is similar to <italic> tag. The main difference between these two tag is the <em> tag semantically emphasizes on the important word or section of words while <i> tag is just offset text conventionally styled in italic to show alternative mood or voice.
Note: This effect can be achieved by using CSS property.
Syntax:
<em> Contents... </em>
Example 1: This example uses <em> tag to create emphasized text.
HTML
<!DOCTYPE html><html> <head> <title> How to render as emphasized text using HTML? </title></head> <body style="text-align: center;"> <h1 style="color:green;"> GeeksforGeeks </h1> <h3> How to render as emphasized text using HTML? </h3> <em>Emphasized text content</em></body> </html>
Output:
Example 2: This example uses <em> tag with title attribute to create emphasized text.
HTML
<!DOCTYPE html><html> <head> <title> How to render as emphasized text using HTML? </title></head> <body style="text-align: center;"> <h1 style="color:green;"> GeeksforGeeks </h1> <h3> How to render as emphasized text using HTML? </h3> <em title="Emphasized text"> Emphasized text content </em></body> </html>
Output:
Supported Browsers:
Google Chrome
Internet Explorer
Firefox
Safari
Opera
Attention reader! Don’t stop learning now. Get hold of all the important HTML concepts with the Web Design for Beginners | HTML course.
HTML-Misc
CSS
HTML
Web Technologies
HTML
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here.
Design a web page using HTML and CSS
How to set space between the flexbox ?
Form validation using jQuery
Search Bar using HTML, CSS and JavaScript
How to style a checkbox using CSS?
How to set the default value for an HTML <select> element ?
Hide or show elements in HTML using display property
How to set input type date in dd-mm-yyyy format using HTML ?
How to Insert Form Data into Database using PHP ?
REST API (Introduction) | [
{
"code": null,
"e": 26621,
"s": 26593,
"text": "\n31 Dec, 2020"
},
{
"code": null,
"e": 26954,
"s": 26621,
"text": "The <em> tag in HTML is a phrase tag and used to emphasize the text content. It is similar to <italic> tag. The main difference between these two tag is the <em> tag semantically emphasizes on the important word or section of words while <i> tag is just offset text conventionally styled in italic to show alternative mood or voice."
},
{
"code": null,
"e": 27011,
"s": 26954,
"text": "Note: This effect can be achieved by using CSS property."
},
{
"code": null,
"e": 27019,
"s": 27011,
"text": "Syntax:"
},
{
"code": null,
"e": 27042,
"s": 27019,
"text": "<em> Contents... </em>"
},
{
"code": null,
"e": 27107,
"s": 27042,
"text": "Example 1: This example uses <em> tag to create emphasized text."
},
{
"code": null,
"e": 27112,
"s": 27107,
"text": "HTML"
},
{
"code": "<!DOCTYPE html><html> <head> <title> How to render as emphasized text using HTML? </title></head> <body style=\"text-align: center;\"> <h1 style=\"color:green;\"> GeeksforGeeks </h1> <h3> How to render as emphasized text using HTML? </h3> <em>Emphasized text content</em></body> </html>",
"e": 27453,
"s": 27112,
"text": null
},
{
"code": null,
"e": 27461,
"s": 27453,
"text": "Output:"
},
{
"code": null,
"e": 27547,
"s": 27461,
"text": "Example 2: This example uses <em> tag with title attribute to create emphasized text."
},
{
"code": null,
"e": 27552,
"s": 27547,
"text": "HTML"
},
{
"code": "<!DOCTYPE html><html> <head> <title> How to render as emphasized text using HTML? </title></head> <body style=\"text-align: center;\"> <h1 style=\"color:green;\"> GeeksforGeeks </h1> <h3> How to render as emphasized text using HTML? </h3> <em title=\"Emphasized text\"> Emphasized text content </em></body> </html>",
"e": 27929,
"s": 27552,
"text": null
},
{
"code": null,
"e": 27937,
"s": 27929,
"text": "Output:"
},
{
"code": null,
"e": 27957,
"s": 27937,
"text": "Supported Browsers:"
},
{
"code": null,
"e": 27971,
"s": 27957,
"text": "Google Chrome"
},
{
"code": null,
"e": 27989,
"s": 27971,
"text": "Internet Explorer"
},
{
"code": null,
"e": 27997,
"s": 27989,
"text": "Firefox"
},
{
"code": null,
"e": 28004,
"s": 27997,
"text": "Safari"
},
{
"code": null,
"e": 28010,
"s": 28004,
"text": "Opera"
},
{
"code": null,
"e": 28147,
"s": 28010,
"text": "Attention reader! Don’t stop learning now. Get hold of all the important HTML concepts with the Web Design for Beginners | HTML course."
},
{
"code": null,
"e": 28157,
"s": 28147,
"text": "HTML-Misc"
},
{
"code": null,
"e": 28161,
"s": 28157,
"text": "CSS"
},
{
"code": null,
"e": 28166,
"s": 28161,
"text": "HTML"
},
{
"code": null,
"e": 28183,
"s": 28166,
"text": "Web Technologies"
},
{
"code": null,
"e": 28188,
"s": 28183,
"text": "HTML"
},
{
"code": null,
"e": 28286,
"s": 28188,
"text": "Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here."
},
{
"code": null,
"e": 28323,
"s": 28286,
"text": "Design a web page using HTML and CSS"
},
{
"code": null,
"e": 28362,
"s": 28323,
"text": "How to set space between the flexbox ?"
},
{
"code": null,
"e": 28391,
"s": 28362,
"text": "Form validation using jQuery"
},
{
"code": null,
"e": 28433,
"s": 28391,
"text": "Search Bar using HTML, CSS and JavaScript"
},
{
"code": null,
"e": 28468,
"s": 28433,
"text": "How to style a checkbox using CSS?"
},
{
"code": null,
"e": 28528,
"s": 28468,
"text": "How to set the default value for an HTML <select> element ?"
},
{
"code": null,
"e": 28581,
"s": 28528,
"text": "Hide or show elements in HTML using display property"
},
{
"code": null,
"e": 28642,
"s": 28581,
"text": "How to set input type date in dd-mm-yyyy format using HTML ?"
},
{
"code": null,
"e": 28692,
"s": 28642,
"text": "How to Insert Form Data into Database using PHP ?"
}
] |
Subsets and Splits